hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
e2f7f329c1ce56cbf63b0c9e90b10f5e13a5c5a4.hip | // !!! This is a file automatically generated by hipify!!!
/*
* This file takes care of memory coloring in GPU. To do this, we make
* custom ioctl() calls to nvidia uvm driver. These ioctls have been added to
* vanilla uvm driver to expose certain functionality.
* This file traps some calls made by CUDA library using preload mechanisms.
* This is needed because CUDA library is closed source.
*/
/* TODO: Use better error codes */
/* TODO: Half of colored memory is being wasted. Need to resolve this issue */
/*
* TODO: Make PTEs on GPU consistent (on memprefetch to CPU they are invidated
* for uvm to work). But make sure data migrates when data changes (When user
* explicitly requests)
*/
/*
* TODO: There shouldn't be need to memprefetch incase data hasn't changed
* between CPU and GPU. This should work when GPU TLBs are made persistent.
* Check what happens currently.
*/
#include <assert.h>
#include <dlfcn.h>
#include <errno.h>
#include <iostream>
#include <inttypes.h>
#include <linux/ioctl.h>
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <unistd.h>
/* CUDA/NVML */
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <rocm_smi/rocm_smi.h>
#include <hip/driver_types.h>
/* NVIDIA driver */
#include <uvm_minimal_init.h>
#include <nvCpuUuid.h>
#include <fractional_gpu.hpp>
#include <fractional_gpu_cuda.cuh>
#include <fgpu_internal_allocator.hpp>
#include <fgpu_internal_memory.hpp>
#ifdef FGPU_MEM_COLORING_ENABLED
#define NVIDIA_UVM_DEVICE_PATH "/dev/" NVIDIA_UVM_DEVICE_NAME
/* TODO: This path can be changed via environment variable */
#define NVIDIA_MPS_CONTROL_PATH "/tmp/nvidia-mps/control"
/* Ioctl codes */
#define IOCTL_GET_DEVICE_COLOR_INFO _IOC(0, 0, UVM_GET_DEVICE_COLOR_INFO, 0)
#define IOCTL_GET_PROCESS_COLOR_INFO _IOC(0, 0, UVM_GET_PROCESS_COLOR_INFO, 0)
#define IOCTL_SET_PROCESS_COLOR_INFO _IOC(0, 0, UVM_SET_PROCESS_COLOR_INFO, 0)
#define IOCTL_MEMCPY_COLORED _IOC(0, 0, UVM_MEMCPY_COLORED, 0)
#define IOCTL_MEMSET_COLORED _IOC(0, 0, UVM_MEMSET_COLORED, 0)
/* UVM device fd */
static int g_uvm_fd = -1;
typedef int (*orig_open_f_type)(const char *pathname, int flags, int mode);
orig_open_f_type g_orig_open;
typedef int (*orig_connect_f_type)(int sockfd, const struct sockaddr *addr,
socklen_t addrlen);
orig_connect_f_type g_orig_connect;
pthread_once_t g_pre_init_once = PTHREAD_ONCE_INIT;
pthread_once_t g_post_init_once = PTHREAD_ONCE_INIT;
bool g_init_failed;
/* All information needed for tracking memory */
struct {
bool is_initialized;
/* Start physical address of allocation */
void *base_phy_addr;
/* Actual memory available for coloring */
size_t reserved_len;
/* Actual memory allocation */
void *base_addr;
int color;
allocator_t *allocator;
} g_memory_ctx;
/* Does the most neccesary initialization */
static void pre_initialization(void)
{
g_orig_open = (orig_open_f_type)dlsym(RTLD_NEXT,"open");
if (!g_orig_open) {
g_init_failed = true;
return;
}
g_orig_connect = (orig_connect_f_type)dlsym(RTLD_NEXT,"connect");
if (!g_orig_connect) {
g_init_failed = true;
return;
}
}
static void post_initialization(void)
{
rsmi_status_t ncode;
ncode = nvmlInit();
if (ncode != RSMI_STATUS_SUCCESS) {
g_init_failed = true;
return;
}
}
/* Does the initialization atmost once */
static int init(bool do_post_init)
{
int ret;
ret = pthread_once(&g_pre_init_once, pre_initialization);
if (ret < 0)
return ret;
if (g_init_failed) {
fprintf(stderr, "FGPU:Initialization failed\n");
return -EINVAL;
}
if (!do_post_init)
return 0;
ret = pthread_once(&g_post_init_once, post_initialization);
if (ret < 0)
return ret;
if (g_init_failed) {
fprintf(stderr, "FGPU:Initialization failed\n");
return -EINVAL;
}
return 0;
}
/* Retrieve the device UUID from the CUDA device handle */
static int get_device_UUID(int device, NvProcessorUuid *uuid)
{
rsmi_status_t ncode;
hipError_t ccode;
char pciID[32];
uint32_t handle;
char buf[100];
char hex[3];
char *nbuf;
int cindex, hindex, uindex, needed_bytes;
char c;
int len;
std::string prefix = "GPU";
const char *gpu_prefix = prefix.c_str();
int gpu_prefix_len = strlen(gpu_prefix);
/* Get PCI ID from the device handle and then use NVML library to get UUID */
ccode = hipDeviceGetPCIBusId(pciID, sizeof(pciID), device);
if (ccode != hipSuccess) {
fprintf(stderr, "FGPU:Couldn't find PCI Bus ID\n");
return -EINVAL;
}
ncode = nvmlDeviceGetHandleByPciBusId(pciID, &handle);
if (ncode != RSMI_STATUS_SUCCESS){
fprintf(stderr, "FGPU:Couldn't get Device Handle\n");
return -EINVAL;
}
ncode = nvmlDeviceGetUUID(handle, buf, sizeof(buf));
if (ncode != RSMI_STATUS_SUCCESS){
fprintf(stderr, "FGPU:Couldn't find device UUID\n");
return -EINVAL;
}
if (strncmp(buf, gpu_prefix, gpu_prefix_len != 0))
return 0;
nbuf = buf + gpu_prefix_len;
/*
* UUID has characters and hexadecimal numbers.
* We are only interested in hexadecimal numbers.
* Each hexadecimal numbers is equal to 1 byte.
*/
needed_bytes = sizeof(NvProcessorUuid);
len = strlen(nbuf);
for (cindex = 0, hindex = 0, uindex = 0; cindex < len; cindex++) {
c = nbuf[cindex];
if ((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9')) {
hex[hindex] = c;
hindex++;
if (hindex == 2) {
hex[2] = '\0';
uuid->uuid[uindex] = (uint8_t)strtol(hex, NULL, 16);
uindex++;
hindex = 0;
if (uindex > needed_bytes) {
fprintf(stderr, "FGPU:Invalid device UUID\n");
return -EINVAL;
}
}
}
}
if (uindex != needed_bytes) {
fprintf(stderr, "FGPU:Invalid device UUID\n");
return -EINVAL;
}
return 0;
}
extern "C" {
/* Trap open() calls (interested in UVM device opened by CUDA) */
int open(const char *pathname, int flags, int mode)
{
int ret;
ret = init(false);
if (ret < 0)
return ret;
ret = g_orig_open(pathname,flags, mode);
if (g_uvm_fd < 0 &&
strncmp(pathname, NVIDIA_UVM_DEVICE_PATH, strlen(NVIDIA_UVM_DEVICE_PATH)) == 0) {
g_uvm_fd = ret;
}
return ret;
}
/* Trap connect() calls (interested in connection to MPS) */
int connect(int sockfd, const struct sockaddr *addr,
socklen_t addrlen)
{
int ret;
ret = init(false);
if (ret < 0)
return ret;
ret = g_orig_connect(sockfd, addr, addrlen);
if (ret >= 0 && g_uvm_fd < 0 && addr && addr->sa_family == AF_LOCAL &&
strncmp(addr->sa_data, NVIDIA_MPS_CONTROL_PATH, strlen(NVIDIA_MPS_CONTROL_PATH)) == 0) {
g_uvm_fd = sockfd;
}
return ret;
}
} /* extern "C" */
static int get_device_color_info(int device, int *num_colors, size_t *max_len)
{
UVM_GET_DEVICE_COLOR_INFO_PARAMS params;
int ret;
ret = get_device_UUID(device, ¶ms.destinationUuid);
if (ret < 0)
return ret;
ret = ioctl(g_uvm_fd, IOCTL_GET_DEVICE_COLOR_INFO, ¶ms);
if (ret < 0)
return ret;
if (params.rmStatus != NV_OK) {
fprintf(stderr, "FGPU:Couldn't get device color info\n");
return -EINVAL;
}
if (num_colors)
*num_colors = params.numColors;
if (max_len)
*max_len = params.maxLength;
return 0;
}
/* Get the numbers of colors supported by the memory and maximum memory that can be reserved */
int fgpu_memory_get_device_info(int *num_colors, size_t *max_len)
{
int ret;
ret = init(true);
if (ret < 0)
return ret;
if (g_uvm_fd < 0) {
fprintf(stderr, "FGPU:Initialization not done\n");
return -EBADF;
}
return get_device_color_info(FGPU_DEVICE_NUMBER, num_colors, max_len);
}
static int get_process_color_info(int device, int *color, size_t *length)
{
UVM_GET_PROCESS_COLOR_INFO_PARAMS params;
int ret;
ret = get_device_UUID(device, ¶ms.destinationUuid);
if (ret < 0)
return ret;
ret = ioctl(g_uvm_fd, IOCTL_GET_PROCESS_COLOR_INFO, ¶ms);
if (ret < 0)
return ret;
if (params.rmStatus != NV_OK) {
fprintf(stderr, "FGPU:Couldn't get process color property\n");
return -EINVAL;
}
if (color)
*color = params.color;
if (length)
*length = params.length;
return 0;
}
/* Indicates the color set currently for the process and the length reserved */
int fgpu_process_get_colors_info(int device, int *color, size_t *length)
{
int ret;
ret = init(true);
if (ret < 0)
return ret;
if (g_uvm_fd < 0) {
fprintf(stderr, "FGPU:Initialization not done\n");
return -EBADF;
}
return get_process_color_info(device, color, length);
}
/* Set memory color and also reserve memory */
static int set_process_color_info(int device, int color, size_t req_length,
hipStream_t stream)
{
UVM_SET_PROCESS_COLOR_INFO_PARAMS params;
size_t actual_length = req_length;
int ret;
/* Color can only be set once */
if (g_memory_ctx.is_initialized) {
fprintf(stderr, "FGPU:Process color already set\n");
return -EINVAL;
}
#if defined(FGPU_USER_MEM_COLORING_ENABLED)
int num_colors;
ret = get_device_color_info(device, &num_colors, NULL);
if (ret < 0)
return ret;
actual_length = req_length * num_colors;
#endif
ret = get_device_UUID(device, ¶ms.destinationUuid);
if (ret < 0)
return ret;
params.color = color;
params.length = actual_length;
ret = ioctl(g_uvm_fd, IOCTL_SET_PROCESS_COLOR_INFO, ¶ms);
if (ret < 0)
return ret;
if (params.rmStatus != NV_OK) {
fprintf(stderr, "FGPU:Couldn't set process color property\n");
return -EINVAL;
}
ret = gpuErrCheck(hipMallocManaged(&g_memory_ctx.base_addr, actual_length));
if (ret < 0)
return ret;
#if 1
/* Do the actual allocation on device */
ret = gpuErrCheck(hipMemPrefetchAsync(g_memory_ctx.base_addr, actual_length,
device, stream));
if (ret < 0) {
hipFree(g_memory_ctx.base_addr);
return ret;
}
#endif
ret = gpuErrCheck(hipStreamSynchronize(stream));
if (ret < 0) {
hipFree(g_memory_ctx.base_addr);
return ret;
}
g_memory_ctx.is_initialized = true;
g_memory_ctx.base_phy_addr = (void *)params.address;
g_memory_ctx.reserved_len = req_length;
g_memory_ctx.color = color;
g_memory_ctx.allocator = allocator_init(g_memory_ctx.base_addr,
req_length, FGPU_DEVICE_ADDRESS_ALIGNMENT);
if (!g_memory_ctx.allocator) {
fprintf(stderr, "FGPU:Allocator Initialization Failed\n");
return -EINVAL;
}
return 0;
}
/* Indicates the color set currently for the process and the length reserved */
int fgpu_memory_set_colors_info(int device, int color, size_t length,
hipStream_t stream)
{
int ret;
ret = init(true);
if (ret < 0)
return ret;
if (g_uvm_fd < 0) {
fprintf(stderr, "FGPU:Initialization not done\n");
return -EBADF;
}
return set_process_color_info(device, color, length, stream);
}
void fgpu_memory_deinit(void)
{
if (!g_memory_ctx.is_initialized)
return;
if (g_memory_ctx.allocator)
allocator_deinit(g_memory_ctx.allocator);
hipFree(g_memory_ctx.base_addr);
g_memory_ctx.is_initialized = false;
}
int fgpu_memory_allocate(void **p, size_t len)
{
void *ret_addr;
if (!g_memory_ctx.is_initialized) {
fprintf(stderr, "FGPU:Initialization not done\n");
return -EBADF;
}
ret_addr = allocator_alloc(g_memory_ctx.allocator, len);
if (!ret_addr) {
fprintf(stderr, "FGPU:Can't allocate device memory\n");
return -ENOMEM;
}
*p = ret_addr;
return 0;
}
int fgpu_memory_free(void *p)
{
if (!g_memory_ctx.is_initialized) {
fprintf(stderr, "FGPU:Initialization not done\n");
return -EBADF;
}
allocator_free(g_memory_ctx.allocator, p);
return 0;
}
/* Useful for only reverse engineering */
void *fgpu_memory_get_phy_address(void *addr)
{
if (!g_memory_ctx.base_phy_addr)
return NULL;
return (void *)((uintptr_t)g_memory_ctx.base_phy_addr +
(uintptr_t)addr - (uintptr_t)g_memory_ctx.base_addr);
}
#else /* FGPU_MEM_COLORING_ENABLED */
int fgpu_memory_allocate(void **p, size_t len)
{
/*
* XXX: We are using hipMallocManaged() nstead of just
* hipMalloc() because to make comparision fair between memory coloring
* enabled v.s. disabled. Memcpy() is slower (for small sizes) for
* hipMallocManaged() v.s. for hipMalloc() (but faster for larger sizes > 8MB)
* This we suspect is because of code difference inside the Linux driver
*/
int ret;
ret = gpuErrCheck(hipMallocManaged(p, len));
if (ret < 0)
return ret;
/* Do the actual allocation on device */
ret = gpuErrCheck(hipMemPrefetchAsync(*p, len, FGPU_DEVICE_NUMBER));
if (ret < 0) {
hipFree(p);
return ret;
}
return gpuErrCheck(hipDeviceSynchronize());
}
int fgpu_memory_free(void *p)
{
return gpuErrCheck(hipFree(p));
}
void *fgpu_memory_get_phy_address(void *addr)
{
assert(0);
return NULL;
}
#endif /* FGPU_MEM_COLORING_ENABLED */
#if defined(FGPU_USER_MEM_COLORING_ENABLED)
int fgpu_get_memory_info(uintptr_t *start_virt_addr, uintptr_t *start_idx)
{
if (!g_memory_ctx.is_initialized) {
fprintf(stderr, "FGPU:Initialization not done\n");
return -EBADF;
}
*start_virt_addr = (uintptr_t)g_memory_ctx.base_addr;
*start_idx = ((uintptr_t)g_memory_ctx.base_phy_addr) >> FGPU_DEVICE_COLOR_SHIFT;
return 0;
}
/*
* TODO: This might be slower to loop in userspace. Doing this inside kernel
* might be faster. So measure the reduction in bandwidth and if substantial,
* do inside kernel
*/
int fgpu_memory_copy_async_to_device_internal(void *dst, const void *src,
size_t count, hipStream_t stream)
{
size_t left = count;
int ret;
while (left) {
uintptr_t base = (uintptr_t)dst & FGPU_DEVICE_PAGE_MASK;
uintptr_t offset = (uintptr_t)dst - base;
size_t transfer = min(min(left, (size_t)FGPU_DEVICE_PAGE_SIZE),
(size_t)FGPU_DEVICE_PAGE_SIZE - (size_t)offset);
void *true_virt_addr_dest = fgpu_color_device_true_virt_addr((uint64_t)g_memory_ctx.base_addr,
(uint64_t)g_memory_ctx.base_phy_addr,
g_memory_ctx.color,
dst);
ret = gpuErrCheck(hipMemcpyAsync(true_virt_addr_dest, src, transfer, hipMemcpyHostToDevice, stream));
if (ret < 0)
return ret;
dst = (void *)((uintptr_t)dst + transfer);
src = (void *)((uintptr_t)src + transfer);
left -= transfer;
}
return 0;
}
int fgpu_memory_copy_async_to_host_internal(void *dst, const void *src,
size_t count, hipStream_t stream)
{
size_t left = count;
int ret;
while (left) {
uintptr_t base = (uintptr_t)src & FGPU_DEVICE_PAGE_MASK;
uintptr_t offset = (uintptr_t)src - base;
size_t transfer = min(min(left, (size_t)FGPU_DEVICE_PAGE_SIZE),
(size_t)FGPU_DEVICE_PAGE_SIZE - (size_t)offset);
void *true_virt_addr_src = fgpu_color_device_true_virt_addr((uint64_t)g_memory_ctx.base_addr,
(uint64_t)g_memory_ctx.base_phy_addr,
g_memory_ctx.color,
src);
ret = gpuErrCheck(hipMemcpyAsync(dst, true_virt_addr_src, transfer, hipMemcpyDeviceToHost, stream));
if (ret < 0)
return ret;
dst = (void *)((uintptr_t)dst + transfer);
src = (void *)((uintptr_t)src + transfer);
left -= transfer;
}
return 0;
}
/* Using kernel provided colored memcopy instead of doing it in userspace */
/*
int fgpu_memory_copy_async_internal(void *dst, const void *src, size_t count,
enum fgpu_memory_copy_type type,
hipStream_t stream)
{
switch (type) {
case FGPU_COPY_CPU_TO_GPU:
return fgpu_memory_copy_async_to_device_internal(dst, src, count, stream);
case FGPU_COPY_GPU_TO_CPU:
return fgpu_memory_copy_async_to_host_internal(dst, src, count, stream);
default:
return -1;
}
}
*/
/* Check if given address lies on GPU */
static bool is_address_on_gpu(const void *address)
{
if ((uintptr_t)address < (uintptr_t)g_memory_ctx.base_addr)
return false;
if ((uintptr_t)address >= (uintptr_t)g_memory_ctx.base_addr +
g_memory_ctx.reserved_len)
return false;
return true;
}
int fgpu_memory_copy_async_internal(void *dst, const void *src, size_t count,
enum fgpu_memory_copy_type type,
hipStream_t stream)
{
/* XXX: Currently, not sure how to use stream? */
UVM_MEMCPY_COLORED_PARAMS params;
int ret;
if (type == FGPU_COPY_CPU_TO_CPU) {
memcpy(dst, src, count);
return 0;
}
/* Source is GPU? */
if (type == FGPU_COPY_GPU_TO_CPU || type == FGPU_COPY_GPU_TO_GPU ||
(type == FGPU_COPY_DEFAULT && is_address_on_gpu(src))) {
ret = get_device_UUID(FGPU_DEVICE_NUMBER, ¶ms.srcUuid);
if (ret < 0)
return ret;
params.srcBase = (NvU64)fgpu_color_device_true_virt_addr((uint64_t)g_memory_ctx.base_addr,
(uint64_t)g_memory_ctx.base_phy_addr,
g_memory_ctx.color,
src);
} else {
memcpy(¶ms.srcUuid, &NV_PROCESSOR_UUID_CPU_DEFAULT, sizeof(NvProcessorUuid));
params.srcBase = (NvU64)src;
}
/* Destination is GPU? */
if (type == FGPU_COPY_CPU_TO_GPU || type == FGPU_COPY_GPU_TO_GPU ||
(type == FGPU_COPY_DEFAULT && is_address_on_gpu(dst))) {
ret = get_device_UUID(FGPU_DEVICE_NUMBER, ¶ms.destUuid);
if (ret < 0)
return ret;
params.destBase = (NvU64)fgpu_color_device_true_virt_addr((uint64_t)g_memory_ctx.base_addr,
(uint64_t)g_memory_ctx.base_phy_addr,
g_memory_ctx.color,
dst);
} else {
memcpy(¶ms.destUuid, &NV_PROCESSOR_UUID_CPU_DEFAULT, sizeof(NvProcessorUuid));
params.destBase = (NvU64)dst;
}
params.length = count;
ret = ioctl(g_uvm_fd, IOCTL_MEMCPY_COLORED, ¶ms);
if (ret < 0)
return ret;
if (params.rmStatus != NV_OK) {
fprintf(stderr, "FGPU:Memcpy failed\n");
return -EINVAL;
}
return 0;
}
int fgpu_memory_memset_async_internal(void *address, int value, size_t count, hipStream_t stream)
{
/* XXX: Currently, not sure how to use stream? */
UVM_MEMSET_COLORED_PARAMS params;
int ret;
ret = get_device_UUID(FGPU_DEVICE_NUMBER, ¶ms.uuid);
if (ret < 0)
return ret;
params.base = (NvU64)fgpu_color_device_true_virt_addr((uint64_t)g_memory_ctx.base_addr,
(uint64_t)g_memory_ctx.base_phy_addr,
g_memory_ctx.color,
address);
params.value = value;
params.length = count;
ret = ioctl(g_uvm_fd, IOCTL_MEMSET_COLORED, ¶ms);
if (ret < 0)
return ret;
if (params.rmStatus != NV_OK) {
fprintf(stderr, "FGPU:Memcpy failed\n");
return -EINVAL;
}
return 0;
}
#else /* FGPU_USER_MEM_COLORING_ENABLED */
int fgpu_memory_copy_async_internal(void *dst, const void *src, size_t count, enum fgpu_memory_copy_type type, hipStream_t stream)
{
switch (type) {
case FGPU_COPY_CPU_TO_GPU:
return gpuErrCheck(hipMemcpyAsync(dst, src, count, hipMemcpyHostToDevice, stream));
case FGPU_COPY_GPU_TO_CPU:
return gpuErrCheck(hipMemcpyAsync(dst, src, count, hipMemcpyDeviceToHost, stream));
case FGPU_COPY_GPU_TO_GPU:
return gpuErrCheck(hipMemcpyAsync(dst, src, count, hipMemcpyDeviceToDevice, stream));
case FGPU_COPY_CPU_TO_CPU:
return gpuErrCheck(hipMemcpyAsync(dst, src, count, hipMemcpyHostToHost, stream));
case FGPU_COPY_DEFAULT:
return gpuErrCheck(hipMemcpyAsync(dst, src, count, hipMemcpyDefault, stream));
default:
assert(0);
return -1;
}
}
int fgpu_memory_memset_async_internal(void *address, int value, size_t count, hipStream_t stream)
{
return gpuErrCheck(hipMemsetAsync(address, value, count, stream));
}
#endif /* FGPU_USER_MEM_COLORING_ENABLED */
| e2f7f329c1ce56cbf63b0c9e90b10f5e13a5c5a4.cu | /*
* This file takes care of memory coloring in GPU. To do this, we make
* custom ioctl() calls to nvidia uvm driver. These ioctls have been added to
* vanilla uvm driver to expose certain functionality.
* This file traps some calls made by CUDA library using preload mechanisms.
* This is needed because CUDA library is closed source.
*/
/* TODO: Use better error codes */
/* TODO: Half of colored memory is being wasted. Need to resolve this issue */
/*
* TODO: Make PTEs on GPU consistent (on memprefetch to CPU they are invidated
* for uvm to work). But make sure data migrates when data changes (When user
* explicitly requests)
*/
/*
* TODO: There shouldn't be need to memprefetch incase data hasn't changed
* between CPU and GPU. This should work when GPU TLBs are made persistent.
* Check what happens currently.
*/
#include <assert.h>
#include <dlfcn.h>
#include <errno.h>
#include <iostream>
#include <inttypes.h>
#include <linux/ioctl.h>
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <unistd.h>
/* CUDA/NVML */
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <nvml.h>
#include <driver_types.h>
/* NVIDIA driver */
#include <uvm_minimal_init.h>
#include <nvCpuUuid.h>
#include <fractional_gpu.hpp>
#include <fractional_gpu_cuda.cuh>
#include <fgpu_internal_allocator.hpp>
#include <fgpu_internal_memory.hpp>
#ifdef FGPU_MEM_COLORING_ENABLED
#define NVIDIA_UVM_DEVICE_PATH "/dev/" NVIDIA_UVM_DEVICE_NAME
/* TODO: This path can be changed via environment variable */
#define NVIDIA_MPS_CONTROL_PATH "/tmp/nvidia-mps/control"
/* Ioctl codes */
#define IOCTL_GET_DEVICE_COLOR_INFO _IOC(0, 0, UVM_GET_DEVICE_COLOR_INFO, 0)
#define IOCTL_GET_PROCESS_COLOR_INFO _IOC(0, 0, UVM_GET_PROCESS_COLOR_INFO, 0)
#define IOCTL_SET_PROCESS_COLOR_INFO _IOC(0, 0, UVM_SET_PROCESS_COLOR_INFO, 0)
#define IOCTL_MEMCPY_COLORED _IOC(0, 0, UVM_MEMCPY_COLORED, 0)
#define IOCTL_MEMSET_COLORED _IOC(0, 0, UVM_MEMSET_COLORED, 0)
/* UVM device fd */
static int g_uvm_fd = -1;
typedef int (*orig_open_f_type)(const char *pathname, int flags, int mode);
orig_open_f_type g_orig_open;
typedef int (*orig_connect_f_type)(int sockfd, const struct sockaddr *addr,
socklen_t addrlen);
orig_connect_f_type g_orig_connect;
pthread_once_t g_pre_init_once = PTHREAD_ONCE_INIT;
pthread_once_t g_post_init_once = PTHREAD_ONCE_INIT;
bool g_init_failed;
/* All information needed for tracking memory */
struct {
bool is_initialized;
/* Start physical address of allocation */
void *base_phy_addr;
/* Actual memory available for coloring */
size_t reserved_len;
/* Actual memory allocation */
void *base_addr;
int color;
allocator_t *allocator;
} g_memory_ctx;
/* Does the most neccesary initialization */
static void pre_initialization(void)
{
g_orig_open = (orig_open_f_type)dlsym(RTLD_NEXT,"open");
if (!g_orig_open) {
g_init_failed = true;
return;
}
g_orig_connect = (orig_connect_f_type)dlsym(RTLD_NEXT,"connect");
if (!g_orig_connect) {
g_init_failed = true;
return;
}
}
static void post_initialization(void)
{
nvmlReturn_t ncode;
ncode = nvmlInit();
if (ncode != NVML_SUCCESS) {
g_init_failed = true;
return;
}
}
/* Does the initialization atmost once */
static int init(bool do_post_init)
{
int ret;
ret = pthread_once(&g_pre_init_once, pre_initialization);
if (ret < 0)
return ret;
if (g_init_failed) {
fprintf(stderr, "FGPU:Initialization failed\n");
return -EINVAL;
}
if (!do_post_init)
return 0;
ret = pthread_once(&g_post_init_once, post_initialization);
if (ret < 0)
return ret;
if (g_init_failed) {
fprintf(stderr, "FGPU:Initialization failed\n");
return -EINVAL;
}
return 0;
}
/* Retrieve the device UUID from the CUDA device handle */
static int get_device_UUID(int device, NvProcessorUuid *uuid)
{
nvmlReturn_t ncode;
cudaError_t ccode;
char pciID[32];
nvmlDevice_t handle;
char buf[100];
char hex[3];
char *nbuf;
int cindex, hindex, uindex, needed_bytes;
char c;
int len;
std::string prefix = "GPU";
const char *gpu_prefix = prefix.c_str();
int gpu_prefix_len = strlen(gpu_prefix);
/* Get PCI ID from the device handle and then use NVML library to get UUID */
ccode = cudaDeviceGetPCIBusId(pciID, sizeof(pciID), device);
if (ccode != cudaSuccess) {
fprintf(stderr, "FGPU:Couldn't find PCI Bus ID\n");
return -EINVAL;
}
ncode = nvmlDeviceGetHandleByPciBusId(pciID, &handle);
if (ncode != NVML_SUCCESS){
fprintf(stderr, "FGPU:Couldn't get Device Handle\n");
return -EINVAL;
}
ncode = nvmlDeviceGetUUID(handle, buf, sizeof(buf));
if (ncode != NVML_SUCCESS){
fprintf(stderr, "FGPU:Couldn't find device UUID\n");
return -EINVAL;
}
if (strncmp(buf, gpu_prefix, gpu_prefix_len != 0))
return 0;
nbuf = buf + gpu_prefix_len;
/*
* UUID has characters and hexadecimal numbers.
* We are only interested in hexadecimal numbers.
* Each hexadecimal numbers is equal to 1 byte.
*/
needed_bytes = sizeof(NvProcessorUuid);
len = strlen(nbuf);
for (cindex = 0, hindex = 0, uindex = 0; cindex < len; cindex++) {
c = nbuf[cindex];
if ((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9')) {
hex[hindex] = c;
hindex++;
if (hindex == 2) {
hex[2] = '\0';
uuid->uuid[uindex] = (uint8_t)strtol(hex, NULL, 16);
uindex++;
hindex = 0;
if (uindex > needed_bytes) {
fprintf(stderr, "FGPU:Invalid device UUID\n");
return -EINVAL;
}
}
}
}
if (uindex != needed_bytes) {
fprintf(stderr, "FGPU:Invalid device UUID\n");
return -EINVAL;
}
return 0;
}
extern "C" {
/* Trap open() calls (interested in UVM device opened by CUDA) */
int open(const char *pathname, int flags, int mode)
{
int ret;
ret = init(false);
if (ret < 0)
return ret;
ret = g_orig_open(pathname,flags, mode);
if (g_uvm_fd < 0 &&
strncmp(pathname, NVIDIA_UVM_DEVICE_PATH, strlen(NVIDIA_UVM_DEVICE_PATH)) == 0) {
g_uvm_fd = ret;
}
return ret;
}
/* Trap connect() calls (interested in connection to MPS) */
int connect(int sockfd, const struct sockaddr *addr,
socklen_t addrlen)
{
int ret;
ret = init(false);
if (ret < 0)
return ret;
ret = g_orig_connect(sockfd, addr, addrlen);
if (ret >= 0 && g_uvm_fd < 0 && addr && addr->sa_family == AF_LOCAL &&
strncmp(addr->sa_data, NVIDIA_MPS_CONTROL_PATH, strlen(NVIDIA_MPS_CONTROL_PATH)) == 0) {
g_uvm_fd = sockfd;
}
return ret;
}
} /* extern "C" */
static int get_device_color_info(int device, int *num_colors, size_t *max_len)
{
UVM_GET_DEVICE_COLOR_INFO_PARAMS params;
int ret;
ret = get_device_UUID(device, ¶ms.destinationUuid);
if (ret < 0)
return ret;
ret = ioctl(g_uvm_fd, IOCTL_GET_DEVICE_COLOR_INFO, ¶ms);
if (ret < 0)
return ret;
if (params.rmStatus != NV_OK) {
fprintf(stderr, "FGPU:Couldn't get device color info\n");
return -EINVAL;
}
if (num_colors)
*num_colors = params.numColors;
if (max_len)
*max_len = params.maxLength;
return 0;
}
/* Get the numbers of colors supported by the memory and maximum memory that can be reserved */
int fgpu_memory_get_device_info(int *num_colors, size_t *max_len)
{
int ret;
ret = init(true);
if (ret < 0)
return ret;
if (g_uvm_fd < 0) {
fprintf(stderr, "FGPU:Initialization not done\n");
return -EBADF;
}
return get_device_color_info(FGPU_DEVICE_NUMBER, num_colors, max_len);
}
static int get_process_color_info(int device, int *color, size_t *length)
{
UVM_GET_PROCESS_COLOR_INFO_PARAMS params;
int ret;
ret = get_device_UUID(device, ¶ms.destinationUuid);
if (ret < 0)
return ret;
ret = ioctl(g_uvm_fd, IOCTL_GET_PROCESS_COLOR_INFO, ¶ms);
if (ret < 0)
return ret;
if (params.rmStatus != NV_OK) {
fprintf(stderr, "FGPU:Couldn't get process color property\n");
return -EINVAL;
}
if (color)
*color = params.color;
if (length)
*length = params.length;
return 0;
}
/* Indicates the color set currently for the process and the length reserved */
int fgpu_process_get_colors_info(int device, int *color, size_t *length)
{
int ret;
ret = init(true);
if (ret < 0)
return ret;
if (g_uvm_fd < 0) {
fprintf(stderr, "FGPU:Initialization not done\n");
return -EBADF;
}
return get_process_color_info(device, color, length);
}
/* Set memory color and also reserve memory */
static int set_process_color_info(int device, int color, size_t req_length,
cudaStream_t stream)
{
UVM_SET_PROCESS_COLOR_INFO_PARAMS params;
size_t actual_length = req_length;
int ret;
/* Color can only be set once */
if (g_memory_ctx.is_initialized) {
fprintf(stderr, "FGPU:Process color already set\n");
return -EINVAL;
}
#if defined(FGPU_USER_MEM_COLORING_ENABLED)
int num_colors;
ret = get_device_color_info(device, &num_colors, NULL);
if (ret < 0)
return ret;
actual_length = req_length * num_colors;
#endif
ret = get_device_UUID(device, ¶ms.destinationUuid);
if (ret < 0)
return ret;
params.color = color;
params.length = actual_length;
ret = ioctl(g_uvm_fd, IOCTL_SET_PROCESS_COLOR_INFO, ¶ms);
if (ret < 0)
return ret;
if (params.rmStatus != NV_OK) {
fprintf(stderr, "FGPU:Couldn't set process color property\n");
return -EINVAL;
}
ret = gpuErrCheck(cudaMallocManaged(&g_memory_ctx.base_addr, actual_length));
if (ret < 0)
return ret;
#if 1
/* Do the actual allocation on device */
ret = gpuErrCheck(cudaMemPrefetchAsync(g_memory_ctx.base_addr, actual_length,
device, stream));
if (ret < 0) {
cudaFree(g_memory_ctx.base_addr);
return ret;
}
#endif
ret = gpuErrCheck(cudaStreamSynchronize(stream));
if (ret < 0) {
cudaFree(g_memory_ctx.base_addr);
return ret;
}
g_memory_ctx.is_initialized = true;
g_memory_ctx.base_phy_addr = (void *)params.address;
g_memory_ctx.reserved_len = req_length;
g_memory_ctx.color = color;
g_memory_ctx.allocator = allocator_init(g_memory_ctx.base_addr,
req_length, FGPU_DEVICE_ADDRESS_ALIGNMENT);
if (!g_memory_ctx.allocator) {
fprintf(stderr, "FGPU:Allocator Initialization Failed\n");
return -EINVAL;
}
return 0;
}
/* Indicates the color set currently for the process and the length reserved */
int fgpu_memory_set_colors_info(int device, int color, size_t length,
cudaStream_t stream)
{
int ret;
ret = init(true);
if (ret < 0)
return ret;
if (g_uvm_fd < 0) {
fprintf(stderr, "FGPU:Initialization not done\n");
return -EBADF;
}
return set_process_color_info(device, color, length, stream);
}
void fgpu_memory_deinit(void)
{
if (!g_memory_ctx.is_initialized)
return;
if (g_memory_ctx.allocator)
allocator_deinit(g_memory_ctx.allocator);
cudaFree(g_memory_ctx.base_addr);
g_memory_ctx.is_initialized = false;
}
int fgpu_memory_allocate(void **p, size_t len)
{
void *ret_addr;
if (!g_memory_ctx.is_initialized) {
fprintf(stderr, "FGPU:Initialization not done\n");
return -EBADF;
}
ret_addr = allocator_alloc(g_memory_ctx.allocator, len);
if (!ret_addr) {
fprintf(stderr, "FGPU:Can't allocate device memory\n");
return -ENOMEM;
}
*p = ret_addr;
return 0;
}
int fgpu_memory_free(void *p)
{
if (!g_memory_ctx.is_initialized) {
fprintf(stderr, "FGPU:Initialization not done\n");
return -EBADF;
}
allocator_free(g_memory_ctx.allocator, p);
return 0;
}
/* Useful for only reverse engineering */
void *fgpu_memory_get_phy_address(void *addr)
{
if (!g_memory_ctx.base_phy_addr)
return NULL;
return (void *)((uintptr_t)g_memory_ctx.base_phy_addr +
(uintptr_t)addr - (uintptr_t)g_memory_ctx.base_addr);
}
#else /* FGPU_MEM_COLORING_ENABLED */
int fgpu_memory_allocate(void **p, size_t len)
{
/*
* XXX: We are using cudaMallocManaged() nstead of just
* cudaMalloc() because to make comparision fair between memory coloring
* enabled v.s. disabled. Memcpy() is slower (for small sizes) for
* cudaMallocManaged() v.s. for cudaMalloc() (but faster for larger sizes > 8MB)
* This we suspect is because of code difference inside the Linux driver
*/
int ret;
ret = gpuErrCheck(cudaMallocManaged(p, len));
if (ret < 0)
return ret;
/* Do the actual allocation on device */
ret = gpuErrCheck(cudaMemPrefetchAsync(*p, len, FGPU_DEVICE_NUMBER));
if (ret < 0) {
cudaFree(p);
return ret;
}
return gpuErrCheck(cudaDeviceSynchronize());
}
int fgpu_memory_free(void *p)
{
return gpuErrCheck(cudaFree(p));
}
void *fgpu_memory_get_phy_address(void *addr)
{
assert(0);
return NULL;
}
#endif /* FGPU_MEM_COLORING_ENABLED */
#if defined(FGPU_USER_MEM_COLORING_ENABLED)
int fgpu_get_memory_info(uintptr_t *start_virt_addr, uintptr_t *start_idx)
{
if (!g_memory_ctx.is_initialized) {
fprintf(stderr, "FGPU:Initialization not done\n");
return -EBADF;
}
*start_virt_addr = (uintptr_t)g_memory_ctx.base_addr;
*start_idx = ((uintptr_t)g_memory_ctx.base_phy_addr) >> FGPU_DEVICE_COLOR_SHIFT;
return 0;
}
/*
* TODO: This might be slower to loop in userspace. Doing this inside kernel
* might be faster. So measure the reduction in bandwidth and if substantial,
* do inside kernel
*/
int fgpu_memory_copy_async_to_device_internal(void *dst, const void *src,
size_t count, cudaStream_t stream)
{
size_t left = count;
int ret;
while (left) {
uintptr_t base = (uintptr_t)dst & FGPU_DEVICE_PAGE_MASK;
uintptr_t offset = (uintptr_t)dst - base;
size_t transfer = min(min(left, (size_t)FGPU_DEVICE_PAGE_SIZE),
(size_t)FGPU_DEVICE_PAGE_SIZE - (size_t)offset);
void *true_virt_addr_dest = fgpu_color_device_true_virt_addr((uint64_t)g_memory_ctx.base_addr,
(uint64_t)g_memory_ctx.base_phy_addr,
g_memory_ctx.color,
dst);
ret = gpuErrCheck(cudaMemcpyAsync(true_virt_addr_dest, src, transfer, cudaMemcpyHostToDevice, stream));
if (ret < 0)
return ret;
dst = (void *)((uintptr_t)dst + transfer);
src = (void *)((uintptr_t)src + transfer);
left -= transfer;
}
return 0;
}
int fgpu_memory_copy_async_to_host_internal(void *dst, const void *src,
size_t count, cudaStream_t stream)
{
size_t left = count;
int ret;
while (left) {
uintptr_t base = (uintptr_t)src & FGPU_DEVICE_PAGE_MASK;
uintptr_t offset = (uintptr_t)src - base;
size_t transfer = min(min(left, (size_t)FGPU_DEVICE_PAGE_SIZE),
(size_t)FGPU_DEVICE_PAGE_SIZE - (size_t)offset);
void *true_virt_addr_src = fgpu_color_device_true_virt_addr((uint64_t)g_memory_ctx.base_addr,
(uint64_t)g_memory_ctx.base_phy_addr,
g_memory_ctx.color,
src);
ret = gpuErrCheck(cudaMemcpyAsync(dst, true_virt_addr_src, transfer, cudaMemcpyDeviceToHost, stream));
if (ret < 0)
return ret;
dst = (void *)((uintptr_t)dst + transfer);
src = (void *)((uintptr_t)src + transfer);
left -= transfer;
}
return 0;
}
/* Using kernel provided colored memcopy instead of doing it in userspace */
/*
int fgpu_memory_copy_async_internal(void *dst, const void *src, size_t count,
enum fgpu_memory_copy_type type,
cudaStream_t stream)
{
switch (type) {
case FGPU_COPY_CPU_TO_GPU:
return fgpu_memory_copy_async_to_device_internal(dst, src, count, stream);
case FGPU_COPY_GPU_TO_CPU:
return fgpu_memory_copy_async_to_host_internal(dst, src, count, stream);
default:
return -1;
}
}
*/
/* Check if given address lies on GPU */
static bool is_address_on_gpu(const void *address)
{
if ((uintptr_t)address < (uintptr_t)g_memory_ctx.base_addr)
return false;
if ((uintptr_t)address >= (uintptr_t)g_memory_ctx.base_addr +
g_memory_ctx.reserved_len)
return false;
return true;
}
int fgpu_memory_copy_async_internal(void *dst, const void *src, size_t count,
enum fgpu_memory_copy_type type,
cudaStream_t stream)
{
/* XXX: Currently, not sure how to use stream? */
UVM_MEMCPY_COLORED_PARAMS params;
int ret;
if (type == FGPU_COPY_CPU_TO_CPU) {
memcpy(dst, src, count);
return 0;
}
/* Source is GPU? */
if (type == FGPU_COPY_GPU_TO_CPU || type == FGPU_COPY_GPU_TO_GPU ||
(type == FGPU_COPY_DEFAULT && is_address_on_gpu(src))) {
ret = get_device_UUID(FGPU_DEVICE_NUMBER, ¶ms.srcUuid);
if (ret < 0)
return ret;
params.srcBase = (NvU64)fgpu_color_device_true_virt_addr((uint64_t)g_memory_ctx.base_addr,
(uint64_t)g_memory_ctx.base_phy_addr,
g_memory_ctx.color,
src);
} else {
memcpy(¶ms.srcUuid, &NV_PROCESSOR_UUID_CPU_DEFAULT, sizeof(NvProcessorUuid));
params.srcBase = (NvU64)src;
}
/* Destination is GPU? */
if (type == FGPU_COPY_CPU_TO_GPU || type == FGPU_COPY_GPU_TO_GPU ||
(type == FGPU_COPY_DEFAULT && is_address_on_gpu(dst))) {
ret = get_device_UUID(FGPU_DEVICE_NUMBER, ¶ms.destUuid);
if (ret < 0)
return ret;
params.destBase = (NvU64)fgpu_color_device_true_virt_addr((uint64_t)g_memory_ctx.base_addr,
(uint64_t)g_memory_ctx.base_phy_addr,
g_memory_ctx.color,
dst);
} else {
memcpy(¶ms.destUuid, &NV_PROCESSOR_UUID_CPU_DEFAULT, sizeof(NvProcessorUuid));
params.destBase = (NvU64)dst;
}
params.length = count;
ret = ioctl(g_uvm_fd, IOCTL_MEMCPY_COLORED, ¶ms);
if (ret < 0)
return ret;
if (params.rmStatus != NV_OK) {
fprintf(stderr, "FGPU:Memcpy failed\n");
return -EINVAL;
}
return 0;
}
int fgpu_memory_memset_async_internal(void *address, int value, size_t count, cudaStream_t stream)
{
/* XXX: Currently, not sure how to use stream? */
UVM_MEMSET_COLORED_PARAMS params;
int ret;
ret = get_device_UUID(FGPU_DEVICE_NUMBER, ¶ms.uuid);
if (ret < 0)
return ret;
params.base = (NvU64)fgpu_color_device_true_virt_addr((uint64_t)g_memory_ctx.base_addr,
(uint64_t)g_memory_ctx.base_phy_addr,
g_memory_ctx.color,
address);
params.value = value;
params.length = count;
ret = ioctl(g_uvm_fd, IOCTL_MEMSET_COLORED, ¶ms);
if (ret < 0)
return ret;
if (params.rmStatus != NV_OK) {
fprintf(stderr, "FGPU:Memcpy failed\n");
return -EINVAL;
}
return 0;
}
#else /* FGPU_USER_MEM_COLORING_ENABLED */
int fgpu_memory_copy_async_internal(void *dst, const void *src, size_t count, enum fgpu_memory_copy_type type, cudaStream_t stream)
{
switch (type) {
case FGPU_COPY_CPU_TO_GPU:
return gpuErrCheck(cudaMemcpyAsync(dst, src, count, cudaMemcpyHostToDevice, stream));
case FGPU_COPY_GPU_TO_CPU:
return gpuErrCheck(cudaMemcpyAsync(dst, src, count, cudaMemcpyDeviceToHost, stream));
case FGPU_COPY_GPU_TO_GPU:
return gpuErrCheck(cudaMemcpyAsync(dst, src, count, cudaMemcpyDeviceToDevice, stream));
case FGPU_COPY_CPU_TO_CPU:
return gpuErrCheck(cudaMemcpyAsync(dst, src, count, cudaMemcpyHostToHost, stream));
case FGPU_COPY_DEFAULT:
return gpuErrCheck(cudaMemcpyAsync(dst, src, count, cudaMemcpyDefault, stream));
default:
assert(0);
return -1;
}
}
int fgpu_memory_memset_async_internal(void *address, int value, size_t count, cudaStream_t stream)
{
return gpuErrCheck(cudaMemsetAsync(address, value, count, stream));
}
#endif /* FGPU_USER_MEM_COLORING_ENABLED */
|
788d14f7f1685995fa470ab014f6543d57e4697e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from magmablas/zswapdblk.cu, normal z -> s, Sun Nov 20 20:20:29 2016
*/
#include "magma_internal.h"
/******************************************************************************/
/*
* Swap diagonal blocks of two matrices.
* Each thread block swaps one diagonal block.
* Each thread iterates across one row of the block.
*/
__global__ void
sswapdblk_kernel( int nb,
float *dA, int ldda, int inca,
float *dB, int lddb, int incb )
{
const int tx = threadIdx.x;
const int bx = blockIdx.x;
dA += tx + bx * nb * (ldda + inca);
dB += tx + bx * nb * (lddb + incb);
float tmp;
#pragma unroll
for( int i = 0; i < nb; i++ ) {
tmp = dA[i*ldda];
dA[i*ldda] = dB[i*lddb];
dB[i*lddb] = tmp;
}
}
/***************************************************************************//**
Purpose
-------
sswapdblk swaps diagonal blocks of size nb x nb between matrices
dA and dB on the GPU. It swaps nblocks = n/nb blocks.
For i = 1 .. nblocks, submatrices
dA( i*nb*inca, i*nb ) and
dB( i*nb*incb, i*nb ) are swapped.
Arguments
---------
@param[in]
n INTEGER
The number of columns of the matrices dA and dB. N >= 0.
@param[in]
nb INTEGER
The size of diagonal blocks.
NB > 0 and NB <= maximum threads per CUDA block (512 or 1024).
@param[in,out]
dA REAL array, dimension (LDDA,N)
The matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA.
LDDA >= (nblocks - 1)*nb*inca + nb.
@param[in]
inca INTEGER
The row increment between diagonal blocks of dA. inca >= 0. For example,
inca = 1 means blocks are stored on the diagonal at dA(i*nb, i*nb),
inca = 0 means blocks are stored side-by-side at dA(0, i*nb).
@param[in,out]
dB REAL array, dimension (LDDB,N)
The matrix dB.
@param[in]
lddb INTEGER
The leading dimension of the array db.
LDDB >= (nblocks - 1)*nb*incb + nb.
@param[in]
incb INTEGER
The row increment between diagonal blocks of dB. incb >= 0. See inca.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_swapdblk
*******************************************************************************/
extern "C" void
magmablas_sswapdblk(
magma_int_t n, magma_int_t nb,
magmaFloat_ptr dA, magma_int_t ldda, magma_int_t inca,
magmaFloat_ptr dB, magma_int_t lddb, magma_int_t incb,
magma_queue_t queue )
{
magma_int_t nblocks = n / nb;
magma_int_t info = 0;
if (n < 0) {
info = -1;
} else if (nb < 1 || nb > 1024) {
info = -2;
} else if (ldda < (nblocks-1)*nb*inca + nb) {
info = -4;
} else if (inca < 0) {
info = -5;
} else if (lddb < (nblocks-1)*nb*incb + nb) {
info = -7;
} else if (incb < 0) {
info = -8;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( nblocks > 0 ) {
hipLaunchKernelGGL(( sswapdblk_kernel), dim3(nblocks), dim3(nb), 0, queue->cuda_stream() ,
nb, dA, ldda, inca,
dB, lddb, incb );
}
}
| 788d14f7f1685995fa470ab014f6543d57e4697e.cu | /*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from magmablas/zswapdblk.cu, normal z -> s, Sun Nov 20 20:20:29 2016
*/
#include "magma_internal.h"
/******************************************************************************/
/*
* Swap diagonal blocks of two matrices.
* Each thread block swaps one diagonal block.
* Each thread iterates across one row of the block.
*/
__global__ void
sswapdblk_kernel( int nb,
float *dA, int ldda, int inca,
float *dB, int lddb, int incb )
{
const int tx = threadIdx.x;
const int bx = blockIdx.x;
dA += tx + bx * nb * (ldda + inca);
dB += tx + bx * nb * (lddb + incb);
float tmp;
#pragma unroll
for( int i = 0; i < nb; i++ ) {
tmp = dA[i*ldda];
dA[i*ldda] = dB[i*lddb];
dB[i*lddb] = tmp;
}
}
/***************************************************************************//**
Purpose
-------
sswapdblk swaps diagonal blocks of size nb x nb between matrices
dA and dB on the GPU. It swaps nblocks = n/nb blocks.
For i = 1 .. nblocks, submatrices
dA( i*nb*inca, i*nb ) and
dB( i*nb*incb, i*nb ) are swapped.
Arguments
---------
@param[in]
n INTEGER
The number of columns of the matrices dA and dB. N >= 0.
@param[in]
nb INTEGER
The size of diagonal blocks.
NB > 0 and NB <= maximum threads per CUDA block (512 or 1024).
@param[in,out]
dA REAL array, dimension (LDDA,N)
The matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA.
LDDA >= (nblocks - 1)*nb*inca + nb.
@param[in]
inca INTEGER
The row increment between diagonal blocks of dA. inca >= 0. For example,
inca = 1 means blocks are stored on the diagonal at dA(i*nb, i*nb),
inca = 0 means blocks are stored side-by-side at dA(0, i*nb).
@param[in,out]
dB REAL array, dimension (LDDB,N)
The matrix dB.
@param[in]
lddb INTEGER
The leading dimension of the array db.
LDDB >= (nblocks - 1)*nb*incb + nb.
@param[in]
incb INTEGER
The row increment between diagonal blocks of dB. incb >= 0. See inca.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_swapdblk
*******************************************************************************/
extern "C" void
magmablas_sswapdblk(
magma_int_t n, magma_int_t nb,
magmaFloat_ptr dA, magma_int_t ldda, magma_int_t inca,
magmaFloat_ptr dB, magma_int_t lddb, magma_int_t incb,
magma_queue_t queue )
{
magma_int_t nblocks = n / nb;
magma_int_t info = 0;
if (n < 0) {
info = -1;
} else if (nb < 1 || nb > 1024) {
info = -2;
} else if (ldda < (nblocks-1)*nb*inca + nb) {
info = -4;
} else if (inca < 0) {
info = -5;
} else if (lddb < (nblocks-1)*nb*incb + nb) {
info = -7;
} else if (incb < 0) {
info = -8;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( nblocks > 0 ) {
sswapdblk_kernel<<< nblocks, nb, 0, queue->cuda_stream() >>>
( nb, dA, ldda, inca,
dB, lddb, incb );
}
}
|
994c6017d331abb0378e5ea6a907242c8ef94eed.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <algorithm>
#include <iostream>
#include <metrics/kl_divergence.cuh>
#include <raft/mr/device/allocator.hpp>
#include <random>
#include "test_utils.h"
namespace MLCommon {
namespace Metrics {
//parameter structure definition
struct klDivergenceParam {
int nElements;
double tolerance;
};
//test fixture class
template <typename DataT>
class klDivergenceTest : public ::testing::TestWithParam<klDivergenceParam> {
protected:
//the constructor
void SetUp() override {
//getting the parameters
params = ::testing::TestWithParam<klDivergenceParam>::GetParam();
nElements = params.nElements;
//generating random value test input
std::vector<DataT> h_modelPDF(nElements, 0);
std::vector<DataT> h_candidatePDF(nElements, 0);
std::random_device rd;
std::default_random_engine dre(rd());
std::uniform_real_distribution<DataT> realGenerator(0.0, 1.0);
std::generate(h_modelPDF.begin(), h_modelPDF.end(),
[&]() { return realGenerator(dre); });
std::generate(h_candidatePDF.begin(), h_candidatePDF.end(),
[&]() { return realGenerator(dre); });
//allocating and initializing memory to the GPU
CUDA_CHECK(hipStreamCreate(&stream));
raft::allocate(d_modelPDF, nElements, true);
raft::allocate(d_candidatePDF, nElements, true);
raft::update_device(d_modelPDF, &h_modelPDF[0], (int)nElements, stream);
raft::update_device(d_candidatePDF, &h_candidatePDF[0], (int)nElements,
stream);
std::shared_ptr<raft::mr::device::allocator> allocator(
new raft::mr::device::default_allocator);
//generating the golden output
for (int i = 0; i < nElements; ++i) {
if (h_modelPDF[i] == 0.0)
truthklDivergence += 0;
else
truthklDivergence +=
h_modelPDF[i] * log(h_modelPDF[i] / h_candidatePDF[i]);
}
//calling the kl_divergence CUDA implementation
computedklDivergence = MLCommon::Metrics::kl_divergence(
d_modelPDF, d_candidatePDF, nElements, allocator, stream);
}
//the destructor
void TearDown() override {
CUDA_CHECK(hipFree(d_modelPDF));
CUDA_CHECK(hipFree(d_candidatePDF));
CUDA_CHECK(hipStreamDestroy(stream));
}
//declaring the data values
klDivergenceParam params;
DataT* d_modelPDF = nullptr;
DataT* d_candidatePDF = nullptr;
int nElements = 0;
DataT truthklDivergence = 0;
DataT computedklDivergence = 0;
hipStream_t stream;
};
//setting test parameter values
const std::vector<klDivergenceParam> inputs = {
{500, 0.000001}, {200, 0.001}, {5000, 0.000001}, {500000, 0.000001}
};
//writing the test suite
typedef klDivergenceTest<double> klDivergenceTestClass;
TEST_P(klDivergenceTestClass, Result) {
ASSERT_NEAR(computedklDivergence, truthklDivergence, params.tolerance);
}
INSTANTIATE_TEST_CASE_P(klDivergence, klDivergenceTestClass,
::testing::ValuesIn(inputs));
} //end namespace Metrics
} //end namespace MLCommon
| 994c6017d331abb0378e5ea6a907242c8ef94eed.cu | /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <algorithm>
#include <iostream>
#include <metrics/kl_divergence.cuh>
#include <raft/mr/device/allocator.hpp>
#include <random>
#include "test_utils.h"
namespace MLCommon {
namespace Metrics {
//parameter structure definition
struct klDivergenceParam {
int nElements;
double tolerance;
};
//test fixture class
template <typename DataT>
class klDivergenceTest : public ::testing::TestWithParam<klDivergenceParam> {
protected:
//the constructor
void SetUp() override {
//getting the parameters
params = ::testing::TestWithParam<klDivergenceParam>::GetParam();
nElements = params.nElements;
//generating random value test input
std::vector<DataT> h_modelPDF(nElements, 0);
std::vector<DataT> h_candidatePDF(nElements, 0);
std::random_device rd;
std::default_random_engine dre(rd());
std::uniform_real_distribution<DataT> realGenerator(0.0, 1.0);
std::generate(h_modelPDF.begin(), h_modelPDF.end(),
[&]() { return realGenerator(dre); });
std::generate(h_candidatePDF.begin(), h_candidatePDF.end(),
[&]() { return realGenerator(dre); });
//allocating and initializing memory to the GPU
CUDA_CHECK(cudaStreamCreate(&stream));
raft::allocate(d_modelPDF, nElements, true);
raft::allocate(d_candidatePDF, nElements, true);
raft::update_device(d_modelPDF, &h_modelPDF[0], (int)nElements, stream);
raft::update_device(d_candidatePDF, &h_candidatePDF[0], (int)nElements,
stream);
std::shared_ptr<raft::mr::device::allocator> allocator(
new raft::mr::device::default_allocator);
//generating the golden output
for (int i = 0; i < nElements; ++i) {
if (h_modelPDF[i] == 0.0)
truthklDivergence += 0;
else
truthklDivergence +=
h_modelPDF[i] * log(h_modelPDF[i] / h_candidatePDF[i]);
}
//calling the kl_divergence CUDA implementation
computedklDivergence = MLCommon::Metrics::kl_divergence(
d_modelPDF, d_candidatePDF, nElements, allocator, stream);
}
//the destructor
void TearDown() override {
CUDA_CHECK(cudaFree(d_modelPDF));
CUDA_CHECK(cudaFree(d_candidatePDF));
CUDA_CHECK(cudaStreamDestroy(stream));
}
//declaring the data values
klDivergenceParam params;
DataT* d_modelPDF = nullptr;
DataT* d_candidatePDF = nullptr;
int nElements = 0;
DataT truthklDivergence = 0;
DataT computedklDivergence = 0;
cudaStream_t stream;
};
//setting test parameter values
const std::vector<klDivergenceParam> inputs = {
{500, 0.000001}, {200, 0.001}, {5000, 0.000001}, {500000, 0.000001}
};
//writing the test suite
typedef klDivergenceTest<double> klDivergenceTestClass;
TEST_P(klDivergenceTestClass, Result) {
ASSERT_NEAR(computedklDivergence, truthklDivergence, params.tolerance);
}
INSTANTIATE_TEST_CASE_P(klDivergence, klDivergenceTestClass,
::testing::ValuesIn(inputs));
} //end namespace Metrics
} //end namespace MLCommon
|
6a3b4d122ab90f17c7ffb86d1d75db83aae28da5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*****************************************************************************************
realize_dopscale.c
Implements the '=' state for Doppler scaling factors, and also checks that the values of
all such factors lie within the allowed range
For each delay-Doppler or Doppler dataset whose Doppler scaling factor has the '=' state,
go backwards in the datafile until we find a delay-Doppler or Doppler dataset whose
Doppler scaling factor has state 'f' and/or 'c', and copy its value.
Since the "vary_dopscale" parameter permits Doppler scaling factors to be varied jointly
with shape/spin parameters that are being fit, the "dopscale_mode" parameter to
realize_dopscale determines how to handle this possibility:
dopscale_mode = 0:
Doppler scaling factors are not being varied jointly with shape/spin parameters,
or else we aren't fitting a shape/spin parameter right now; just update each
dataset's dopscale_save by setting it equal to that dataset's Doppler scaling
factor, in case joint variation is needed later in the fit
dopscale_mode = 1:
Doppler scaling factors are being varied jointly with shape/spin parameters, and
we're in the process of fitting some shape/spin parameter p (i.e., we're realizing
the model for a trial value of p); set each dataset's Doppler scaling factor equal
to the product of the corresponding dopscale_save and "dopscale_factor"
dopscale_mode = 2:
Doppler scaling factors are being varied jointly with shape/spin parameters, we've
just obtained the best-fit value for shape/spin parameter p, and now we need to
set the Doppler scaling factors to their best-fit values (i.e., to the values that
"go with" the best-fit value of p); set each dataset's Doppler scaling factor
equal to the product of the corresponding dopscale_save and "dopscale_factor,"
then update dopscale_save by setting it equal to this same product
Modified 2016 July 7 by Matt Engels:
Adapted for use in shape-cuda.
Written 2012 March 24 by CM, based on the "realize_delcor" routine for implementing the
'=' state and on the "realize_photo" routine for checking for legal values
*****************************************************************************************/
extern "C" {
#include "head.h"
}
__device__ int realize_dopscale_nsets;
__device__ void dev_checkdopscale(double parval, double parmin, double parmax,
int mode, unsigned char *baddopscale, double *baddopscale_logfactor)
{
/* Flag Doppler scaling factor as bad if
* mode = 0: parval < parmin or parval > parmax
* mode = 1: parval <= parmin or parval > parmax
* mode = 2: parval < parmin or parval >= parmax
* mode = 3: parval <= parmin or parval >= parmax */
if (mode < 0 || mode > 3)
printf("realize_dopscale.c: checkdopscale mode must be between 0 and 3\n");
if (mode == 0 || mode == 2) {
if (parval < parmin) {
*baddopscale = 1;
*baddopscale_logfactor += log(1 + parmin - parval);
}
} else {
if (parval <= parmin) {
*baddopscale = 1;
*baddopscale_logfactor += log(1 + parmin - parval);
}
}
if (mode == 0 || mode == 1) {
if (parval > parmax) {
*baddopscale = 1;
*baddopscale_logfactor += log(1 + parval - parmax);
}
} else {
if (parval >= parmax) {
*baddopscale = 1;
*baddopscale_logfactor += log(1 + parval - parmax);
}
}
}
__global__ void realize_dopscale_devpar_init_krnl(struct par_t *dpar,
struct dat_t *ddat) {
/* Single-threaded kernel */
if (threadIdx.x == 0) {
dpar->baddopscale = 0;
dpar->baddopscale_logfactor = 0.0;
realize_dopscale_nsets = ddat->nsets;
}
}
__global__ void realize_dopscale_cuda_krnl(struct par_t *dpar, struct dat_t
*ddat, double dopscale_factor, int dopscale_mode) {
/* nset-threaded kernel */
int s = blockIdx.x * blockDim.x + threadIdx.x;
int s_dopscale = -1, type_dopscale = -1;
if (s < realize_dopscale_nsets) {
if (ddat->set[s].type == DELAY) {
if (ddat->set[s].desc.deldop.dopscale.state != '=') {
s_dopscale = s;
type_dopscale = DELAY;
if (ddat->set[s].desc.deldop.dopscale.state == 'f') {
if (dopscale_mode != 0)
ddat->set[s].desc.deldop.dopscale.val =
ddat->set[s].desc.deldop.dopscale_save * dopscale_factor;
if (dopscale_mode != 1)
ddat->set[s].desc.deldop.dopscale_save =
ddat->set[s].desc.deldop.dopscale.val;
}
dev_checkdopscale( ddat->set[s].desc.deldop.dopscale.val,
dpar->dopscale_min, dpar->dopscale_max, 3,
&dpar->baddopscale, &dpar->baddopscale_logfactor);
} else if (s_dopscale < 0) {
printf("can't use \"=\" state for the first (delay-)Doppler dataset\n");
} else if (type_dopscale == DELAY) {
ddat->set[s].desc.deldop.dopscale.val =
ddat->set[s_dopscale].desc.deldop.dopscale.val;
} else {
ddat->set[s].desc.deldop.dopscale.val =
ddat->set[s_dopscale].desc.doppler.dopscale.val;
}
} else if (ddat->set[s].type == DOPPLER) {
if (ddat->set[s].desc.doppler.dopscale.state != '=') {
s_dopscale = s;
type_dopscale = DOPPLER;
if (ddat->set[s].desc.doppler.dopscale.state == 'f') {
if (dopscale_mode != 0)
ddat->set[s].desc.doppler.dopscale.val =
ddat->set[s].desc.doppler.dopscale_save * dopscale_factor;
if (dopscale_mode != 1)
ddat->set[s].desc.doppler.dopscale_save =
ddat->set[s].desc.doppler.dopscale.val;
}
dev_checkdopscale( ddat->set[s].desc.doppler.dopscale.val,
dpar->dopscale_min, dpar->dopscale_max, 3,
&dpar->baddopscale, &dpar->baddopscale_logfactor);
} else if (s_dopscale < 0) {
printf("can't use \"=\" state for the first (delay-)Doppler dataset\n");
} else if (type_dopscale == DELAY) {
ddat->set[s].desc.doppler.dopscale.val =
ddat->set[s_dopscale].desc.deldop.dopscale.val;
} else {
ddat->set[s].desc.doppler.dopscale.val =
ddat->set[s_dopscale].desc.doppler.dopscale.val;
}
}
}
}
__host__ void realize_dopscale_cuda(struct par_t *dpar, struct dat_t
*ddat, double dopscale_factor, int dopscale_mode)
{
int nsets = 0;
dim3 BLK,THD;
/* Initialize the flag for illegal Doppler scaling factor values */
hipLaunchKernelGGL(( realize_dopscale_devpar_init_krnl), dim3(1),dim3(1), 0, 0, dpar, ddat);
checkErrorAfterKernelLaunch("realize_dopscale_devpar_init_krnl "
"(realize_dopscale)");
gpuErrchk(hipMemcpyFromSymbol(&nsets, realize_dopscale_nsets, sizeof(int),
0, hipMemcpyDeviceToHost));
/* If a dataset has a Doppler scaling factor with state = '=', go backwards
* in the datafile until we reach a delay-Doppler or Doppler dataset whose
* Doppler scaling factor has state 'f' or 'c' rather than '='.
* s_dopscale is the number of the dataset we find.
* type_dopscale tells whether that dataset is delay-Doppler or Doppler. */
/* Launch nset-threaded kernel */
THD.x = nsets;
hipLaunchKernelGGL(( realize_dopscale_cuda_krnl), dim3(BLK),dim3(THD), 0, 0, dpar, ddat, dopscale_factor,
dopscale_mode);
checkErrorAfterKernelLaunch("realize_dopscale_cuda_krnl (realize_dopscale)");
}
| 6a3b4d122ab90f17c7ffb86d1d75db83aae28da5.cu | /*****************************************************************************************
realize_dopscale.c
Implements the '=' state for Doppler scaling factors, and also checks that the values of
all such factors lie within the allowed range
For each delay-Doppler or Doppler dataset whose Doppler scaling factor has the '=' state,
go backwards in the datafile until we find a delay-Doppler or Doppler dataset whose
Doppler scaling factor has state 'f' and/or 'c', and copy its value.
Since the "vary_dopscale" parameter permits Doppler scaling factors to be varied jointly
with shape/spin parameters that are being fit, the "dopscale_mode" parameter to
realize_dopscale determines how to handle this possibility:
dopscale_mode = 0:
Doppler scaling factors are not being varied jointly with shape/spin parameters,
or else we aren't fitting a shape/spin parameter right now; just update each
dataset's dopscale_save by setting it equal to that dataset's Doppler scaling
factor, in case joint variation is needed later in the fit
dopscale_mode = 1:
Doppler scaling factors are being varied jointly with shape/spin parameters, and
we're in the process of fitting some shape/spin parameter p (i.e., we're realizing
the model for a trial value of p); set each dataset's Doppler scaling factor equal
to the product of the corresponding dopscale_save and "dopscale_factor"
dopscale_mode = 2:
Doppler scaling factors are being varied jointly with shape/spin parameters, we've
just obtained the best-fit value for shape/spin parameter p, and now we need to
set the Doppler scaling factors to their best-fit values (i.e., to the values that
"go with" the best-fit value of p); set each dataset's Doppler scaling factor
equal to the product of the corresponding dopscale_save and "dopscale_factor,"
then update dopscale_save by setting it equal to this same product
Modified 2016 July 7 by Matt Engels:
Adapted for use in shape-cuda.
Written 2012 March 24 by CM, based on the "realize_delcor" routine for implementing the
'=' state and on the "realize_photo" routine for checking for legal values
*****************************************************************************************/
extern "C" {
#include "head.h"
}
__device__ int realize_dopscale_nsets;
__device__ void dev_checkdopscale(double parval, double parmin, double parmax,
int mode, unsigned char *baddopscale, double *baddopscale_logfactor)
{
/* Flag Doppler scaling factor as bad if
* mode = 0: parval < parmin or parval > parmax
* mode = 1: parval <= parmin or parval > parmax
* mode = 2: parval < parmin or parval >= parmax
* mode = 3: parval <= parmin or parval >= parmax */
if (mode < 0 || mode > 3)
printf("realize_dopscale.c: checkdopscale mode must be between 0 and 3\n");
if (mode == 0 || mode == 2) {
if (parval < parmin) {
*baddopscale = 1;
*baddopscale_logfactor += log(1 + parmin - parval);
}
} else {
if (parval <= parmin) {
*baddopscale = 1;
*baddopscale_logfactor += log(1 + parmin - parval);
}
}
if (mode == 0 || mode == 1) {
if (parval > parmax) {
*baddopscale = 1;
*baddopscale_logfactor += log(1 + parval - parmax);
}
} else {
if (parval >= parmax) {
*baddopscale = 1;
*baddopscale_logfactor += log(1 + parval - parmax);
}
}
}
__global__ void realize_dopscale_devpar_init_krnl(struct par_t *dpar,
struct dat_t *ddat) {
/* Single-threaded kernel */
if (threadIdx.x == 0) {
dpar->baddopscale = 0;
dpar->baddopscale_logfactor = 0.0;
realize_dopscale_nsets = ddat->nsets;
}
}
__global__ void realize_dopscale_cuda_krnl(struct par_t *dpar, struct dat_t
*ddat, double dopscale_factor, int dopscale_mode) {
/* nset-threaded kernel */
int s = blockIdx.x * blockDim.x + threadIdx.x;
int s_dopscale = -1, type_dopscale = -1;
if (s < realize_dopscale_nsets) {
if (ddat->set[s].type == DELAY) {
if (ddat->set[s].desc.deldop.dopscale.state != '=') {
s_dopscale = s;
type_dopscale = DELAY;
if (ddat->set[s].desc.deldop.dopscale.state == 'f') {
if (dopscale_mode != 0)
ddat->set[s].desc.deldop.dopscale.val =
ddat->set[s].desc.deldop.dopscale_save * dopscale_factor;
if (dopscale_mode != 1)
ddat->set[s].desc.deldop.dopscale_save =
ddat->set[s].desc.deldop.dopscale.val;
}
dev_checkdopscale( ddat->set[s].desc.deldop.dopscale.val,
dpar->dopscale_min, dpar->dopscale_max, 3,
&dpar->baddopscale, &dpar->baddopscale_logfactor);
} else if (s_dopscale < 0) {
printf("can't use \"=\" state for the first (delay-)Doppler dataset\n");
} else if (type_dopscale == DELAY) {
ddat->set[s].desc.deldop.dopscale.val =
ddat->set[s_dopscale].desc.deldop.dopscale.val;
} else {
ddat->set[s].desc.deldop.dopscale.val =
ddat->set[s_dopscale].desc.doppler.dopscale.val;
}
} else if (ddat->set[s].type == DOPPLER) {
if (ddat->set[s].desc.doppler.dopscale.state != '=') {
s_dopscale = s;
type_dopscale = DOPPLER;
if (ddat->set[s].desc.doppler.dopscale.state == 'f') {
if (dopscale_mode != 0)
ddat->set[s].desc.doppler.dopscale.val =
ddat->set[s].desc.doppler.dopscale_save * dopscale_factor;
if (dopscale_mode != 1)
ddat->set[s].desc.doppler.dopscale_save =
ddat->set[s].desc.doppler.dopscale.val;
}
dev_checkdopscale( ddat->set[s].desc.doppler.dopscale.val,
dpar->dopscale_min, dpar->dopscale_max, 3,
&dpar->baddopscale, &dpar->baddopscale_logfactor);
} else if (s_dopscale < 0) {
printf("can't use \"=\" state for the first (delay-)Doppler dataset\n");
} else if (type_dopscale == DELAY) {
ddat->set[s].desc.doppler.dopscale.val =
ddat->set[s_dopscale].desc.deldop.dopscale.val;
} else {
ddat->set[s].desc.doppler.dopscale.val =
ddat->set[s_dopscale].desc.doppler.dopscale.val;
}
}
}
}
__host__ void realize_dopscale_cuda(struct par_t *dpar, struct dat_t
*ddat, double dopscale_factor, int dopscale_mode)
{
int nsets = 0;
dim3 BLK,THD;
/* Initialize the flag for illegal Doppler scaling factor values */
realize_dopscale_devpar_init_krnl<<<1,1>>>(dpar, ddat);
checkErrorAfterKernelLaunch("realize_dopscale_devpar_init_krnl "
"(realize_dopscale)");
gpuErrchk(cudaMemcpyFromSymbol(&nsets, realize_dopscale_nsets, sizeof(int),
0, cudaMemcpyDeviceToHost));
/* If a dataset has a Doppler scaling factor with state = '=', go backwards
* in the datafile until we reach a delay-Doppler or Doppler dataset whose
* Doppler scaling factor has state 'f' or 'c' rather than '='.
* s_dopscale is the number of the dataset we find.
* type_dopscale tells whether that dataset is delay-Doppler or Doppler. */
/* Launch nset-threaded kernel */
THD.x = nsets;
realize_dopscale_cuda_krnl<<<BLK,THD>>>(dpar, ddat, dopscale_factor,
dopscale_mode);
checkErrorAfterKernelLaunch("realize_dopscale_cuda_krnl (realize_dopscale)");
}
|
0afd114cce4a52ce277bde93384d3cd8ee1e8250.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* *
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
#include "ElementSum.h"
/**
* This boolean value defined if debug information is printed within macros
*/
#define DEBUG true
/**
* The number of items in the partial sum array
*/
#define MAT_SIZE 512
#define SEED 41887
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*/
#define CUDA_CHECK_RETURN(value) { \
hipError_t _m_cudaStat = value; \
if (_m_cudaStat != hipSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
hipGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
/**
* This macro changes the active device to the device with the provided integer index.
*/
#define SET_DEVICE(value) { \
hipDeviceProp_t devProp; \
hipGetDeviceProperties(&devProp, value); \
if(DEBUG)printf("Changing the gpu to device id: %i name: %s\n",value,devProp.name); \
CUDA_CHECK_RETURN(hipSetDevice(value)); \
\
}
__global__ void ElementSum_ColumnSums(int* matrix, int* result){
//get this threads column index
const unsigned int column = threadIdx.x;
//allocate shared memory for partial sums
__shared__ int columnSums[MAT_SIZE];
//loop through all elements in global memory and keep a running sum. Finally leave that total in the shared memory space.
int i;
int sum = 0;
for(i = 0; i < MAT_SIZE; i++){
sum += matrix[(i*MAT_SIZE) +column]; //only difference in computation is here, the data access pattern
}
columnSums[column] = sum;
__syncthreads();
if(column == 0){
int finalSum = 0;
for(i=0; i < MAT_SIZE; i++){
finalSum += columnSums[i];
}
*result = finalSum;
}
__syncthreads();
return;
}
__global__ void ElementSum_RowSums(int* matrix, int* result){
//get this threads row index
const unsigned int row = threadIdx.x;
//allocate shared memory for partial sums
__shared__ int rowSums[MAT_SIZE];
//loop through all elements in global memory and keep a running sum. Finally leave that total in the shared memory space.
int i;
int sum = 0;
for(i = 0; i < MAT_SIZE; i++){
sum += matrix[(row*MAT_SIZE) +i]; //only difference in computation is here, the data access pattern
}
rowSums[row] = sum;
__syncthreads();
if(row == 0){
int finalSum = 0;
for(i=0; i < MAT_SIZE; i++){
finalSum += rowSums[i];
}
*result = finalSum;
}
__syncthreads();
return;
}
/**
* Host function that prepares data array and passes it to the CUDA kernel.
*/
int elementSum(bool improved) {
SET_DEVICE(0);
CUDA_CHECK_RETURN(hipDeviceReset()); //pre-clear the device
int A[MAT_SIZE*MAT_SIZE];
//populate both matrices with random values
srand(SEED);
for(int i = 0; i < MAT_SIZE; i++){
for(int j=0; j<MAT_SIZE; j++){
A[i*MAT_SIZE+j] = (int)(((float)rand()/RAND_MAX)*100);
}
}
//Device pointers for A and gpuResult
int *d_A, *d_gpuResult;
//allocate device side memory for d_A
CUDA_CHECK_RETURN(hipMalloc((void**) &d_A, sizeof(int)*MAT_SIZE*MAT_SIZE));
//allocate the array of size 1 for return value
CUDA_CHECK_RETURN(hipMalloc((void**) &d_gpuResult, sizeof(int)));
//Memcpy device side A matrix
CUDA_CHECK_RETURN(hipMemcpy((void*) d_A, A, sizeof(int)*MAT_SIZE*MAT_SIZE, hipMemcpyHostToDevice));
if(!improved){
printf("Calculating element sum by adding matrix row sums.\n");
hipLaunchKernelGGL(( ElementSum_ColumnSums), dim3(dim3(1,1,1)),dim3(dim3(MAT_SIZE,1,1)),0,0, d_A,d_gpuResult);
}
else{
printf("Calculating element sum by adding matrix column sums.\n");
hipLaunchKernelGGL(( ElementSum_RowSums), dim3(dim3(1,1,1)),dim3(dim3(MAT_SIZE,1,1)),0,0, d_A,d_gpuResult);
}
CUDA_CHECK_RETURN(hipDeviceSynchronize());
//Allocate local memory for GPU result and serial result
int GPU_Answer, Serial_Answer=0;
CUDA_CHECK_RETURN(hipMemcpy(&GPU_Answer, d_gpuResult, sizeof(int), hipMemcpyDeviceToHost));
for(int i =0; i < MAT_SIZE*MAT_SIZE; i++){
Serial_Answer += A[i];
}
printf("GPU Answer:\t%i\nSerial Answer:\t%i\n",GPU_Answer,Serial_Answer);
//Clean up
hipFree(d_A);
hipFree(d_gpuResult);
CUDA_CHECK_RETURN(hipDeviceReset()); //clear the device after all work is completed
return 0;
}
| 0afd114cce4a52ce277bde93384d3cd8ee1e8250.cu | /* *
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
#include "ElementSum.h"
/**
* This boolean value defined if debug information is printed within macros
*/
#define DEBUG true
/**
* The number of items in the partial sum array
*/
#define MAT_SIZE 512
#define SEED 41887
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*/
#define CUDA_CHECK_RETURN(value) { \
cudaError_t _m_cudaStat = value; \
if (_m_cudaStat != cudaSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
/**
* This macro changes the active device to the device with the provided integer index.
*/
#define SET_DEVICE(value) { \
cudaDeviceProp devProp; \
cudaGetDeviceProperties(&devProp, value); \
if(DEBUG)printf("Changing the gpu to device id: %i name: %s\n",value,devProp.name); \
CUDA_CHECK_RETURN(cudaSetDevice(value)); \
\
}
__global__ void ElementSum_ColumnSums(int* matrix, int* result){
//get this threads column index
const unsigned int column = threadIdx.x;
//allocate shared memory for partial sums
__shared__ int columnSums[MAT_SIZE];
//loop through all elements in global memory and keep a running sum. Finally leave that total in the shared memory space.
int i;
int sum = 0;
for(i = 0; i < MAT_SIZE; i++){
sum += matrix[(i*MAT_SIZE) +column]; //only difference in computation is here, the data access pattern
}
columnSums[column] = sum;
__syncthreads();
if(column == 0){
int finalSum = 0;
for(i=0; i < MAT_SIZE; i++){
finalSum += columnSums[i];
}
*result = finalSum;
}
__syncthreads();
return;
}
__global__ void ElementSum_RowSums(int* matrix, int* result){
//get this threads row index
const unsigned int row = threadIdx.x;
//allocate shared memory for partial sums
__shared__ int rowSums[MAT_SIZE];
//loop through all elements in global memory and keep a running sum. Finally leave that total in the shared memory space.
int i;
int sum = 0;
for(i = 0; i < MAT_SIZE; i++){
sum += matrix[(row*MAT_SIZE) +i]; //only difference in computation is here, the data access pattern
}
rowSums[row] = sum;
__syncthreads();
if(row == 0){
int finalSum = 0;
for(i=0; i < MAT_SIZE; i++){
finalSum += rowSums[i];
}
*result = finalSum;
}
__syncthreads();
return;
}
/**
* Host function that prepares data array and passes it to the CUDA kernel.
*/
int elementSum(bool improved) {
SET_DEVICE(0);
CUDA_CHECK_RETURN(cudaDeviceReset()); //pre-clear the device
int A[MAT_SIZE*MAT_SIZE];
//populate both matrices with random values
srand(SEED);
for(int i = 0; i < MAT_SIZE; i++){
for(int j=0; j<MAT_SIZE; j++){
A[i*MAT_SIZE+j] = (int)(((float)rand()/RAND_MAX)*100);
}
}
//Device pointers for A and gpuResult
int *d_A, *d_gpuResult;
//allocate device side memory for d_A
CUDA_CHECK_RETURN(cudaMalloc((void**) &d_A, sizeof(int)*MAT_SIZE*MAT_SIZE));
//allocate the array of size 1 for return value
CUDA_CHECK_RETURN(cudaMalloc((void**) &d_gpuResult, sizeof(int)));
//Memcpy device side A matrix
CUDA_CHECK_RETURN(cudaMemcpy((void*) d_A, A, sizeof(int)*MAT_SIZE*MAT_SIZE, cudaMemcpyHostToDevice));
if(!improved){
printf("Calculating element sum by adding matrix row sums.\n");
ElementSum_ColumnSums<<<dim3(1,1,1),dim3(MAT_SIZE,1,1),0,0>>>(d_A,d_gpuResult);
}
else{
printf("Calculating element sum by adding matrix column sums.\n");
ElementSum_RowSums<<<dim3(1,1,1),dim3(MAT_SIZE,1,1),0,0>>>(d_A,d_gpuResult);
}
CUDA_CHECK_RETURN(cudaDeviceSynchronize());
//Allocate local memory for GPU result and serial result
int GPU_Answer, Serial_Answer=0;
CUDA_CHECK_RETURN(cudaMemcpy(&GPU_Answer, d_gpuResult, sizeof(int), cudaMemcpyDeviceToHost));
for(int i =0; i < MAT_SIZE*MAT_SIZE; i++){
Serial_Answer += A[i];
}
printf("GPU Answer:\t%i\nSerial Answer:\t%i\n",GPU_Answer,Serial_Answer);
//Clean up
cudaFree(d_A);
cudaFree(d_gpuResult);
CUDA_CHECK_RETURN(cudaDeviceReset()); //clear the device after all work is completed
return 0;
}
|
38ab717333e82694dcf2a07148b6c225696c892e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "normalization.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *glcm = NULL;
hipMalloc(&glcm, XSIZE*YSIZE);
float *norm = NULL;
hipMalloc(&norm, XSIZE*YSIZE);
int max = 1;
int sum = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
normalization), dim3(gridBlock),dim3(threadBlock), 0, 0, glcm,norm,max,sum);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
normalization), dim3(gridBlock),dim3(threadBlock), 0, 0, glcm,norm,max,sum);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
normalization), dim3(gridBlock),dim3(threadBlock), 0, 0, glcm,norm,max,sum);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 38ab717333e82694dcf2a07148b6c225696c892e.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "normalization.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *glcm = NULL;
cudaMalloc(&glcm, XSIZE*YSIZE);
float *norm = NULL;
cudaMalloc(&norm, XSIZE*YSIZE);
int max = 1;
int sum = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
normalization<<<gridBlock,threadBlock>>>(glcm,norm,max,sum);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
normalization<<<gridBlock,threadBlock>>>(glcm,norm,max,sum);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
normalization<<<gridBlock,threadBlock>>>(glcm,norm,max,sum);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
c23dac76635bac26ffaf40528e3dbe4886afcc63.hip | // !!! This is a file automatically generated by hipify!!!
#include <call_kernel.h>
//fail
//--blockDim=64 --gridDim=64 --no-inline
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <hip/hip_runtime_api.h>
#define N 1
__device__ void baz (int p []){
int a;
p = &a;
}
__device__ void bar (int *p){
int a = 2;
p = &a;
}
__global__ void foo (int* p, int* q){
__shared__ int sharedArr [100];
__shared__ int sharedArr2 [50];
bar(p);
baz (sharedArr);
bar(q);
if (*q){
baz(sharedArr2);
}
//*p = 23; *q = 23; // remove this comment to see that the __device__ function does not work
}
int main(){
int *a, *b;
int *dev_a, *dev_b;
int size = N*sizeof(int);
hipMalloc((void**)&dev_a, size);
hipMalloc((void**)&dev_b, size);
a = (int*)malloc(size);
b = (int*)malloc(size);
for (int i = 0; i < N; i++)
a[i] = 1;
for (int i = 0; i < N; i++)
b[i] = 1;
hipMemcpy(dev_a,a,size, hipMemcpyHostToDevice);
hipMemcpy(dev_b,b,size, hipMemcpyHostToDevice);
printf("a: ");
for (int i = 0; i < N; i++)
printf("%d ", a[i]);
printf("\nb: ");
for (int i = 0; i < N; i++)
printf("%d ", b[i]);
// foo<<<1,2>>>(dev_a,dev_b);
ESBMC_verify_kernel(foo,1,2,dev_a,dev_b);
hipMemcpy(a,dev_a,size,hipMemcpyDeviceToHost);
hipMemcpy(b,dev_b,size,hipMemcpyDeviceToHost);
printf("\nnew a and new b:\n");
printf("a: ");
for (int i = 0; i < N; i++)
printf("%d ", a[i]);
printf("\nb: ");
for (int i = 0; i < N; i++)
printf("%d ", b[i]);
assert(a[0]!=1);
assert(b[0]!=1);
free(a); free(b);
hipFree(dev_a);
hipFree(dev_b);
return 0;
}
| c23dac76635bac26ffaf40528e3dbe4886afcc63.cu | #include <call_kernel.h>
//fail
//--blockDim=64 --gridDim=64 --no-inline
#include "cuda.h"
#include <stdio.h>
#include <cuda_runtime_api.h>
#define N 1
__device__ void baz (int p []){
int a;
p = &a;
}
__device__ void bar (int *p){
int a = 2;
p = &a;
}
__global__ void foo (int* p, int* q){
__shared__ int sharedArr [100];
__shared__ int sharedArr2 [50];
bar(p);
baz (sharedArr);
bar(q);
if (*q){
baz(sharedArr2);
}
//*p = 23; *q = 23; // remove this comment to see that the __device__ function does not work
}
int main(){
int *a, *b;
int *dev_a, *dev_b;
int size = N*sizeof(int);
cudaMalloc((void**)&dev_a, size);
cudaMalloc((void**)&dev_b, size);
a = (int*)malloc(size);
b = (int*)malloc(size);
for (int i = 0; i < N; i++)
a[i] = 1;
for (int i = 0; i < N; i++)
b[i] = 1;
cudaMemcpy(dev_a,a,size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,b,size, cudaMemcpyHostToDevice);
printf("a: ");
for (int i = 0; i < N; i++)
printf("%d ", a[i]);
printf("\nb: ");
for (int i = 0; i < N; i++)
printf("%d ", b[i]);
// foo<<<1,2>>>(dev_a,dev_b);
ESBMC_verify_kernel(foo,1,2,dev_a,dev_b);
cudaMemcpy(a,dev_a,size,cudaMemcpyDeviceToHost);
cudaMemcpy(b,dev_b,size,cudaMemcpyDeviceToHost);
printf("\nnew a and new b:\n");
printf("a: ");
for (int i = 0; i < N; i++)
printf("%d ", a[i]);
printf("\nb: ");
for (int i = 0; i < N; i++)
printf("%d ", b[i]);
assert(a[0]!=1);
assert(b[0]!=1);
free(a); free(b);
cudaFree(dev_a);
cudaFree(dev_b);
return 0;
}
|
c604be7a1ab227adbd6c17f6e35272c5de16983d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHUNN.h"
#include "common.h"
#include "THHDeviceTensor.cuh"
#include "THHDeviceTensorUtils.cuh"
#include "THHDeviceUtils.cuh"
#include <cfloat>
__device__ inline float getInterval(float sample,
int index,
int inputSize,
int outputSize,
int poolSize) {
float alpha = (float)(inputSize - poolSize) / (float) (outputSize - 1);
if (index == outputSize - 1) {
return inputSize - poolSize;
} else {
return (int) ((index + sample) * alpha) - (int) (sample * alpha);
}
}
// We template on poolSizeW to allow the innermost loop to be unrolled
template <int PoolSizeWStatic>
__global__ void SpatialFractionalMaxPooling_updateOutput(
THCDeviceTensor<float, 4> input,
THCDeviceTensor<float, 4> output,
THCDeviceTensor<float, 4> indices,
THCDeviceTensor<float, 3> samples,
int poolSizeW, int poolSizeH) {
// Output (h, w) point that this thread is responsible for
int ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
// Each thread generates a specific output point
if (ourOutputPoint < output.getSize(2) * output.getSize(3)) {
int outputW = ourOutputPoint % output.getSize(3);
int outputH = ourOutputPoint / output.getSize(3);
int poolW = getInterval(samples[batch][plane][0], outputW,
input.getSize(3), output.getSize(3), poolSizeW);
int poolH = getInterval(samples[batch][plane][1], outputH,
input.getSize(2), output.getSize(2), poolSizeH);
float maxVal = -FLT_MAX;
int maxIndex = -1;
for (int h = poolH; h < poolH + poolSizeH; ++h) {
if (PoolSizeWStatic == -1) {
for (int w = poolW; w < poolW + poolSizeW; ++w) {
float val = input[batch][plane][h][w];
maxVal = fmaxf(val, maxVal);
maxIndex = (maxVal == val) ? (h * input.getSize(3) + w) : maxIndex;
}
} else {
#pragma unroll
for (int i = 0; i < PoolSizeWStatic; ++i) {
int w = i + poolW;
float val = input[batch][plane][h][w];
maxVal = fmaxf(val, maxVal);
maxIndex = (maxVal == val) ? (h * input.getSize(3) + w) : maxIndex;
}
}
}
assert(maxVal != -FLT_MAX);
assert(maxIndex != -1);
// +1 for Lua index
indices[batch][plane][outputH][outputW] = maxIndex + TH_INDEX_BASE;
output[batch][plane][outputH][outputW] = maxVal;
}
}
void THNN_CudaSpatialFractionalMaxPooling_updateOutput(
THCState *state,
THCudaTensor *input,
THCudaTensor *output,
int outputW, int outputH,
int poolSizeW, int poolSizeH,
THCudaTensor *indices,
THCudaTensor *randomSamples)
{
int planeDim = 0;
int dimh = 1;
int dimw = 2;
long numBatch = 1;
long numInputDims = THCudaTensor_nDimension(state, input);
THArgCheck(numInputDims == 3 || numInputDims == 4, 2,
"3D or 4D (batch mode) tensor expected");
if (numInputDims == 4) {
numBatch = THCudaTensor_size(state, input, 0);
planeDim++;
dimh++;
dimw++;
}
/* sizes */
long numPlanes = THCudaTensor_size(state, input, planeDim);
long inputH = THCudaTensor_size(state, input, dimh);
long inputW = THCudaTensor_size(state, input, dimw);
THArgCheck(outputH + poolSizeH - 1 < inputH, 6,
"poolSizeH too large relative to input height");
THArgCheck(outputW + poolSizeW - 1 < inputW, 5,
"poolSizeW too large relative to input width");
THCDeviceTensor<float, 4> devInput;
THCDeviceTensor<float, 4> devOutput;
THCDeviceTensor<float, 4> devIndices;
THCDeviceTensor<float, 3> devSamples =
toDeviceTensor<float, 3>(state, randomSamples);
if (numInputDims == 3) {
/* resize output */
THCudaTensor_resize3d(state, output, numPlanes, outputH, outputW);
/* indices will contain the locations for each output point */
THCudaTensor_resize3d(state, indices, numPlanes, outputH, outputW);
devInput = toDeviceTensor<float, 3>(state, input).upcastOuter<4>();
devOutput = toDeviceTensor<float, 3>(state, output).upcastOuter<4>();
devIndices = toDeviceTensor<float, 3>(state, indices).upcastOuter<4>();
} else {
THCudaTensor_resize4d(state, output, numBatch, numPlanes, outputH, outputW);
/* indices will contain the locations for each output point */
THCudaTensor_resize4d(state, indices, numBatch, numPlanes, outputH, outputW);
devInput = toDeviceTensor<float, 4>(state, input);
devOutput = toDeviceTensor<float, 4>(state, output);
devIndices = toDeviceTensor<float, 4>(state, indices);
}
// block is limited to 4 warps
// grid handles overflow per each plane
int outputPlaneSize = devOutput.getSize(2) * devOutput.getSize(3);
dim3 grid(THCCeilDiv(outputPlaneSize, 128),
devInput.getSize(1),
devInput.getSize(0));
dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize);
#define SFMP_UPDATE_OUTPUT(POOL_W) \
hipLaunchKernelGGL(( SpatialFractionalMaxPooling_updateOutput<POOL_W>) \
, dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), \
devInput, devOutput, devIndices, devSamples, poolSizeW, poolSizeH);
#define SFMP_UPDATE_OUTPUT_CASE(POOL_W) \
case POOL_W: SFMP_UPDATE_OUTPUT(POOL_W); break
switch (poolSizeW) {
SFMP_UPDATE_OUTPUT_CASE(2);
SFMP_UPDATE_OUTPUT_CASE(3);
SFMP_UPDATE_OUTPUT_CASE(4);
SFMP_UPDATE_OUTPUT_CASE(5);
SFMP_UPDATE_OUTPUT_CASE(6);
SFMP_UPDATE_OUTPUT_CASE(7);
default:
// dynamic pool width
SFMP_UPDATE_OUTPUT_CASE(-1);
}
THCudaCheck(hipGetLastError());
}
__global__ void SpatialFractionalMaxPooling_updateGradInput(
THCDeviceTensor<float, 4> gradInput,
THCDeviceTensor<float, 4> gradOutput,
THCDeviceTensor<float, 4> indices) {
// Output (h, w) point that this thread is responsible for
int ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
// Each thread generates a specific output point
if (ourOutputPoint < gradOutput.getSize(2) * gradOutput.getSize(3)) {
int outputW = ourOutputPoint % gradOutput.getSize(3);
int outputH = ourOutputPoint / gradOutput.getSize(3);
int index = indices[batch][plane][outputH][outputW] - TH_INDEX_BASE;
assert(index >= 0);
int inputW = index % gradInput.getSize(3);
int inputH = index / gradInput.getSize(3);
assert(inputH < gradInput.getSize(2));
atomicAdd(gradInput[batch][plane][inputH][inputW].data(),
gradOutput[batch][plane][outputH][outputW]);
}
}
void THNN_CudaSpatialFractionalMaxPooling_updateGradInput(
THCState *state,
THCudaTensor *input,
THCudaTensor *gradOutput,
THCudaTensor *gradInput,
int outputW, int outputH,
int poolSizeW, int poolSizeH,
THCudaTensor *indices)
{
int dimh = 1;
int dimw = 2;
long numInputDims = THCudaTensor_nDimension(state, input);
if (numInputDims == 4) {
dimh++;
dimw++;
}
/* sizes */
long inputH = THCudaTensor_size(state, input, dimh);
long inputW = THCudaTensor_size(state, input, dimw);
THArgCheck(outputH == THCudaTensor_size(state, gradOutput, dimh), 3,
"gradOutput height unexpected");
THArgCheck(outputW == THCudaTensor_size(state, gradOutput, dimw), 3,
"gradOutput width unexpected");
/* resize */
THCudaTensor_resizeAs(state, gradInput, input);
THCudaTensor_zero(state, gradInput);
THCDeviceTensor<float, 4> devGradInput;
THCDeviceTensor<float, 4> devGradOutput;
THCDeviceTensor<float, 4> devIndices;
/* backprop */
if (numInputDims == 3) {
devGradInput = toDeviceTensor<float, 3>(state, gradInput).upcastOuter<4>();
devGradOutput = toDeviceTensor<float, 3>(state, gradOutput).upcastOuter<4>();
devIndices = toDeviceTensor<float, 3>(state, indices).upcastOuter<4>();
} else {
devGradInput = toDeviceTensor<float, 4>(state, gradInput);
devGradOutput = toDeviceTensor<float, 4>(state, gradOutput);
devIndices = toDeviceTensor<float, 4>(state, indices);
}
// block is limited to 4 warps
// grid handles overflow per each plane
int outputPlaneSize = devGradOutput.getSize(2) * devGradOutput.getSize(3);
dim3 grid(THCCeilDiv(outputPlaneSize, 128),
devGradInput.getSize(1),
devGradInput.getSize(0));
dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize);
hipLaunchKernelGGL(( SpatialFractionalMaxPooling_updateGradInput)
, dim3(grid), dim3(block), 0, THCState_getCurrentStream(state),
devGradInput, devGradOutput, devIndices);
THCudaCheck(hipGetLastError());
}
| c604be7a1ab227adbd6c17f6e35272c5de16983d.cu | #include "THCUNN.h"
#include "common.h"
#include "THCDeviceTensor.cuh"
#include "THCDeviceTensorUtils.cuh"
#include "THCDeviceUtils.cuh"
#include <cfloat>
__device__ inline float getInterval(float sample,
int index,
int inputSize,
int outputSize,
int poolSize) {
float alpha = (float)(inputSize - poolSize) / (float) (outputSize - 1);
if (index == outputSize - 1) {
return inputSize - poolSize;
} else {
return (int) ((index + sample) * alpha) - (int) (sample * alpha);
}
}
// We template on poolSizeW to allow the innermost loop to be unrolled
template <int PoolSizeWStatic>
__global__ void SpatialFractionalMaxPooling_updateOutput(
THCDeviceTensor<float, 4> input,
THCDeviceTensor<float, 4> output,
THCDeviceTensor<float, 4> indices,
THCDeviceTensor<float, 3> samples,
int poolSizeW, int poolSizeH) {
// Output (h, w) point that this thread is responsible for
int ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
// Each thread generates a specific output point
if (ourOutputPoint < output.getSize(2) * output.getSize(3)) {
int outputW = ourOutputPoint % output.getSize(3);
int outputH = ourOutputPoint / output.getSize(3);
int poolW = getInterval(samples[batch][plane][0], outputW,
input.getSize(3), output.getSize(3), poolSizeW);
int poolH = getInterval(samples[batch][plane][1], outputH,
input.getSize(2), output.getSize(2), poolSizeH);
float maxVal = -FLT_MAX;
int maxIndex = -1;
for (int h = poolH; h < poolH + poolSizeH; ++h) {
if (PoolSizeWStatic == -1) {
for (int w = poolW; w < poolW + poolSizeW; ++w) {
float val = input[batch][plane][h][w];
maxVal = fmaxf(val, maxVal);
maxIndex = (maxVal == val) ? (h * input.getSize(3) + w) : maxIndex;
}
} else {
#pragma unroll
for (int i = 0; i < PoolSizeWStatic; ++i) {
int w = i + poolW;
float val = input[batch][plane][h][w];
maxVal = fmaxf(val, maxVal);
maxIndex = (maxVal == val) ? (h * input.getSize(3) + w) : maxIndex;
}
}
}
assert(maxVal != -FLT_MAX);
assert(maxIndex != -1);
// +1 for Lua index
indices[batch][plane][outputH][outputW] = maxIndex + TH_INDEX_BASE;
output[batch][plane][outputH][outputW] = maxVal;
}
}
void THNN_CudaSpatialFractionalMaxPooling_updateOutput(
THCState *state,
THCudaTensor *input,
THCudaTensor *output,
int outputW, int outputH,
int poolSizeW, int poolSizeH,
THCudaTensor *indices,
THCudaTensor *randomSamples)
{
int planeDim = 0;
int dimh = 1;
int dimw = 2;
long numBatch = 1;
long numInputDims = THCudaTensor_nDimension(state, input);
THArgCheck(numInputDims == 3 || numInputDims == 4, 2,
"3D or 4D (batch mode) tensor expected");
if (numInputDims == 4) {
numBatch = THCudaTensor_size(state, input, 0);
planeDim++;
dimh++;
dimw++;
}
/* sizes */
long numPlanes = THCudaTensor_size(state, input, planeDim);
long inputH = THCudaTensor_size(state, input, dimh);
long inputW = THCudaTensor_size(state, input, dimw);
THArgCheck(outputH + poolSizeH - 1 < inputH, 6,
"poolSizeH too large relative to input height");
THArgCheck(outputW + poolSizeW - 1 < inputW, 5,
"poolSizeW too large relative to input width");
THCDeviceTensor<float, 4> devInput;
THCDeviceTensor<float, 4> devOutput;
THCDeviceTensor<float, 4> devIndices;
THCDeviceTensor<float, 3> devSamples =
toDeviceTensor<float, 3>(state, randomSamples);
if (numInputDims == 3) {
/* resize output */
THCudaTensor_resize3d(state, output, numPlanes, outputH, outputW);
/* indices will contain the locations for each output point */
THCudaTensor_resize3d(state, indices, numPlanes, outputH, outputW);
devInput = toDeviceTensor<float, 3>(state, input).upcastOuter<4>();
devOutput = toDeviceTensor<float, 3>(state, output).upcastOuter<4>();
devIndices = toDeviceTensor<float, 3>(state, indices).upcastOuter<4>();
} else {
THCudaTensor_resize4d(state, output, numBatch, numPlanes, outputH, outputW);
/* indices will contain the locations for each output point */
THCudaTensor_resize4d(state, indices, numBatch, numPlanes, outputH, outputW);
devInput = toDeviceTensor<float, 4>(state, input);
devOutput = toDeviceTensor<float, 4>(state, output);
devIndices = toDeviceTensor<float, 4>(state, indices);
}
// block is limited to 4 warps
// grid handles overflow per each plane
int outputPlaneSize = devOutput.getSize(2) * devOutput.getSize(3);
dim3 grid(THCCeilDiv(outputPlaneSize, 128),
devInput.getSize(1),
devInput.getSize(0));
dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize);
#define SFMP_UPDATE_OUTPUT(POOL_W) \
SpatialFractionalMaxPooling_updateOutput<POOL_W> \
<<<grid, block, 0, THCState_getCurrentStream(state)>>>( \
devInput, devOutput, devIndices, devSamples, poolSizeW, poolSizeH);
#define SFMP_UPDATE_OUTPUT_CASE(POOL_W) \
case POOL_W: SFMP_UPDATE_OUTPUT(POOL_W); break
switch (poolSizeW) {
SFMP_UPDATE_OUTPUT_CASE(2);
SFMP_UPDATE_OUTPUT_CASE(3);
SFMP_UPDATE_OUTPUT_CASE(4);
SFMP_UPDATE_OUTPUT_CASE(5);
SFMP_UPDATE_OUTPUT_CASE(6);
SFMP_UPDATE_OUTPUT_CASE(7);
default:
// dynamic pool width
SFMP_UPDATE_OUTPUT_CASE(-1);
}
THCudaCheck(cudaGetLastError());
}
__global__ void SpatialFractionalMaxPooling_updateGradInput(
THCDeviceTensor<float, 4> gradInput,
THCDeviceTensor<float, 4> gradOutput,
THCDeviceTensor<float, 4> indices) {
// Output (h, w) point that this thread is responsible for
int ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
// Each thread generates a specific output point
if (ourOutputPoint < gradOutput.getSize(2) * gradOutput.getSize(3)) {
int outputW = ourOutputPoint % gradOutput.getSize(3);
int outputH = ourOutputPoint / gradOutput.getSize(3);
int index = indices[batch][plane][outputH][outputW] - TH_INDEX_BASE;
assert(index >= 0);
int inputW = index % gradInput.getSize(3);
int inputH = index / gradInput.getSize(3);
assert(inputH < gradInput.getSize(2));
atomicAdd(gradInput[batch][plane][inputH][inputW].data(),
gradOutput[batch][plane][outputH][outputW]);
}
}
void THNN_CudaSpatialFractionalMaxPooling_updateGradInput(
THCState *state,
THCudaTensor *input,
THCudaTensor *gradOutput,
THCudaTensor *gradInput,
int outputW, int outputH,
int poolSizeW, int poolSizeH,
THCudaTensor *indices)
{
int dimh = 1;
int dimw = 2;
long numInputDims = THCudaTensor_nDimension(state, input);
if (numInputDims == 4) {
dimh++;
dimw++;
}
/* sizes */
long inputH = THCudaTensor_size(state, input, dimh);
long inputW = THCudaTensor_size(state, input, dimw);
THArgCheck(outputH == THCudaTensor_size(state, gradOutput, dimh), 3,
"gradOutput height unexpected");
THArgCheck(outputW == THCudaTensor_size(state, gradOutput, dimw), 3,
"gradOutput width unexpected");
/* resize */
THCudaTensor_resizeAs(state, gradInput, input);
THCudaTensor_zero(state, gradInput);
THCDeviceTensor<float, 4> devGradInput;
THCDeviceTensor<float, 4> devGradOutput;
THCDeviceTensor<float, 4> devIndices;
/* backprop */
if (numInputDims == 3) {
devGradInput = toDeviceTensor<float, 3>(state, gradInput).upcastOuter<4>();
devGradOutput = toDeviceTensor<float, 3>(state, gradOutput).upcastOuter<4>();
devIndices = toDeviceTensor<float, 3>(state, indices).upcastOuter<4>();
} else {
devGradInput = toDeviceTensor<float, 4>(state, gradInput);
devGradOutput = toDeviceTensor<float, 4>(state, gradOutput);
devIndices = toDeviceTensor<float, 4>(state, indices);
}
// block is limited to 4 warps
// grid handles overflow per each plane
int outputPlaneSize = devGradOutput.getSize(2) * devGradOutput.getSize(3);
dim3 grid(THCCeilDiv(outputPlaneSize, 128),
devGradInput.getSize(1),
devGradInput.getSize(0));
dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize);
SpatialFractionalMaxPooling_updateGradInput
<<<grid, block, 0, THCState_getCurrentStream(state)>>>(
devGradInput, devGradOutput, devIndices);
THCudaCheck(cudaGetLastError());
}
|
316c2ace9ff1c67d44e031d1b88fde5e7eaa7ee8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void add(int *a, int *b, int *c)
{
*c = *a + *b;
} | 316c2ace9ff1c67d44e031d1b88fde5e7eaa7ee8.cu | #include "includes.h"
__global__ void add(int *a, int *b, int *c)
{
*c = *a + *b;
} |
2d6ef2959a4cc8614decd4e6b96da3acffa97fe5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda_kernels.cuh"
#include "cuda_kernel_sobel.cuh"
#include "cuda_bilinear.cuh"
#include "cuda_nearest_neighbor.cuh"
__global__ void pixel_precalculation_kernel(pixel_precalculation* precalculation, const double pixel_weight_increment, unsigned int N)
{
auto i = blockIdx.x * blockDim.x + threadIdx.x;
if ((i >= N))
{
return;
}
auto current_pixel_weight = pixel_weight_increment * i;
auto source_image_front_pixel = static_cast<unsigned int>(current_pixel_weight);
precalculation[i].front_pixel = source_image_front_pixel;
precalculation[i].rear_pixel = source_image_front_pixel + 1;
auto weight = current_pixel_weight - static_cast<double>(source_image_front_pixel);
precalculation[i].front_weight = 1 - weight;
precalculation[i].rear_weight = weight;
}
__global__ void bilinear_nn(d_scale_params f_params)
{
auto x = blockIdx.x * blockDim.x + threadIdx.x;
auto y = blockIdx.y * blockDim.y + threadIdx.y;
if ((x >= f_params.dimensions_info_p->result_image_width) || (y >= f_params.dimensions_info_p->result_image_height))
{
return;
}
if (y < f_params.dimensions_info_p->result_image_height / 2)
{
device::apply_bilinear_filter(f_params, x, y);
}
else
{
device::apply_nearest_neighbor(f_params, x, y);
}
}
__device__ unsigned int block_counter = 0;
__global__ void bilinear_nn_sobel(d_scale_params f_params, d_sobel_params s_params)
{
auto x = blockIdx.x * blockDim.x + threadIdx.x;
auto y = blockIdx.y * blockDim.y + threadIdx.y;
if ((x >= f_params.dimensions_info_p->result_image_width) || (y >= f_params.dimensions_info_p->result_image_height))
{
return;
}
if (y < f_params.dimensions_info_p->result_image_height / 2)
{
device::apply_bilinear_filter(f_params, x, y);
}
else
{
device::apply_nearest_neighbor(f_params, x, y);
}
__threadfence(); // make sure the data processed by the thread is written to global memory
__syncthreads(); // every thread needs to be done before we can report that the block is done
//fist thread of every block reports that the block is done
if(threadIdx.x == 0 && threadIdx.y == 0)
{
auto grid_count = (gridDim.x * gridDim.y);
unsigned int done_grids = atomicInc(&block_counter, grid_count);
if(done_grids == grid_count - 1) // last done block starts the child kernel
{
dim3 block_size(32, 32);
auto bx = (f_params.dimensions_info_p->result_image_width + block_size.x - 1) / block_size.x;
auto by = (f_params.dimensions_info_p->result_image_height + block_size.y - 1) / block_size.y;
auto grid_size = dim3(bx, by);
hipLaunchKernelGGL(( apply_sobel_filter), dim3(grid_size), dim3(block_size), 0, 0, s_params, 0, f_params.dimensions_info_p->result_image_height);
}
// if (done_grids == grid_count - 1) // last done block starts the child kernel
// {
// dim3 block_size(32, 32);
//
// auto bx = (f_params.dimensions_info_p->result_image_width * 16 + block_size.x - 1) / block_size.x;
//
// auto by = (f_params.dimensions_info_p->result_image_height + block_size.y - 1) / block_size.y;
//
// auto grid_size = dim3(bx, by);
// sobel_cooperative_groups_tile16_8::apply_sobel_filter << <grid_size, block_size >> > (s_params);
// }
//
// if (done_grids == grid_count - 1) // last done block starts the child kernel
// {
// dim3 block_size(32, 32);
//
// auto bx = (f_params.dimensions_info_p->result_image_width * 2 + block_size.x - 1) / block_size.x;
//
// auto by = (f_params.dimensions_info_p->result_image_height + block_size.y - 1) / block_size.y;
//
// auto grid_size = dim3(bx, by);
// sobel_cooperative_groups_tile2::apply_sobel_filter << <grid_size, block_size >> > (s_params);
// }
}
}
| 2d6ef2959a4cc8614decd4e6b96da3acffa97fe5.cu | #include "cuda_kernels.cuh"
#include "cuda_kernel_sobel.cuh"
#include "cuda_bilinear.cuh"
#include "cuda_nearest_neighbor.cuh"
__global__ void pixel_precalculation_kernel(pixel_precalculation* precalculation, const double pixel_weight_increment, unsigned int N)
{
auto i = blockIdx.x * blockDim.x + threadIdx.x;
if ((i >= N))
{
return;
}
auto current_pixel_weight = pixel_weight_increment * i;
auto source_image_front_pixel = static_cast<unsigned int>(current_pixel_weight);
precalculation[i].front_pixel = source_image_front_pixel;
precalculation[i].rear_pixel = source_image_front_pixel + 1;
auto weight = current_pixel_weight - static_cast<double>(source_image_front_pixel);
precalculation[i].front_weight = 1 - weight;
precalculation[i].rear_weight = weight;
}
__global__ void bilinear_nn(d_scale_params f_params)
{
auto x = blockIdx.x * blockDim.x + threadIdx.x;
auto y = blockIdx.y * blockDim.y + threadIdx.y;
if ((x >= f_params.dimensions_info_p->result_image_width) || (y >= f_params.dimensions_info_p->result_image_height))
{
return;
}
if (y < f_params.dimensions_info_p->result_image_height / 2)
{
device::apply_bilinear_filter(f_params, x, y);
}
else
{
device::apply_nearest_neighbor(f_params, x, y);
}
}
__device__ unsigned int block_counter = 0;
__global__ void bilinear_nn_sobel(d_scale_params f_params, d_sobel_params s_params)
{
auto x = blockIdx.x * blockDim.x + threadIdx.x;
auto y = blockIdx.y * blockDim.y + threadIdx.y;
if ((x >= f_params.dimensions_info_p->result_image_width) || (y >= f_params.dimensions_info_p->result_image_height))
{
return;
}
if (y < f_params.dimensions_info_p->result_image_height / 2)
{
device::apply_bilinear_filter(f_params, x, y);
}
else
{
device::apply_nearest_neighbor(f_params, x, y);
}
__threadfence(); // make sure the data processed by the thread is written to global memory
__syncthreads(); // every thread needs to be done before we can report that the block is done
//fist thread of every block reports that the block is done
if(threadIdx.x == 0 && threadIdx.y == 0)
{
auto grid_count = (gridDim.x * gridDim.y);
unsigned int done_grids = atomicInc(&block_counter, grid_count);
if(done_grids == grid_count - 1) // last done block starts the child kernel
{
dim3 block_size(32, 32);
auto bx = (f_params.dimensions_info_p->result_image_width + block_size.x - 1) / block_size.x;
auto by = (f_params.dimensions_info_p->result_image_height + block_size.y - 1) / block_size.y;
auto grid_size = dim3(bx, by);
apply_sobel_filter<<<grid_size, block_size>>>(s_params, 0, f_params.dimensions_info_p->result_image_height);
}
// if (done_grids == grid_count - 1) // last done block starts the child kernel
// {
// dim3 block_size(32, 32);
//
// auto bx = (f_params.dimensions_info_p->result_image_width * 16 + block_size.x - 1) / block_size.x;
//
// auto by = (f_params.dimensions_info_p->result_image_height + block_size.y - 1) / block_size.y;
//
// auto grid_size = dim3(bx, by);
// sobel_cooperative_groups_tile16_8::apply_sobel_filter << <grid_size, block_size >> > (s_params);
// }
//
// if (done_grids == grid_count - 1) // last done block starts the child kernel
// {
// dim3 block_size(32, 32);
//
// auto bx = (f_params.dimensions_info_p->result_image_width * 2 + block_size.x - 1) / block_size.x;
//
// auto by = (f_params.dimensions_info_p->result_image_height + block_size.y - 1) / block_size.y;
//
// auto grid_size = dim3(bx, by);
// sobel_cooperative_groups_tile2::apply_sobel_filter << <grid_size, block_size >> > (s_params);
// }
}
}
|
bab60c8123665415d6735d7bbf10918705b3ba45.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#define X 0
#define Y 1
#define SIZEA 1123
#define SIZEB 2223
#define N_BLOCKS 64
#define N_THREADS 32
__global__ void pathBig_k(const int *A, const int *B, int *Aindex, int *Bindex, const int sizeA, const int sizeB, const int morceaux){
if(blockIdx.x == 0){
Aindex[0] = 0;
Bindex[0] = 0;
Aindex[morceaux] = sizeA;
Bindex[morceaux] = sizeB;
return;
}
int i = (sizeA + sizeB)/morceaux * blockIdx.x;
int K[2];
int P[2];
int Q[2];
int offset;
if (i > sizeA) {
K[X] = i - sizeA;
K[Y] = sizeA;
P[X] = sizeA;
P[Y] = i - sizeA;
}
else {
K[X] = 0;
K[Y] = i;
P[X] = i;
P[Y] = 0;
}
while (1) {
offset = (abs(K[Y] - P[Y]))/2;
Q[X] = K[X] + offset;
Q[Y] = K[Y] - offset;
if (Q[Y] >= 0 && Q[X] <= sizeB && (Q[Y] == sizeA || Q[X] == 0 || A[Q[Y]] > B[Q[X]-1])) {
if (Q[X] == sizeB || Q[Y] == 0 || A[Q[Y]-1] <= B[Q[X]]) {
Aindex[blockIdx.x] = Q[Y];
Bindex[blockIdx.x] = Q[X];
break ;
}
else {
K[X] = Q[X] + 1;
K[Y] = Q[Y] - 1;
}
}
else {
P[X] = Q[X] - 1;
P[Y] = Q[Y] + 1;
}
}
}
__global__ void mergeBig_k(int *A, int *B, int *M, int *Aindex, int *Bindex){
int i = threadIdx.x;
// Mmoire shared sur laquelle on va travailler
__shared__ int A_shared[N_THREADS];
__shared__ int B_shared[N_THREADS];
// Biais de tour correspondant un thread
int biaisAi; // Dcalage induit ou non par le thread (0 ou 1)
int biaisBi;
// Biais totaux
__shared__ int biaisA;
__shared__ int biaisB;
int startABlock = Aindex[blockIdx.x];
int endABlock = Aindex[blockIdx.x+1];
int startBBlock = Bindex[blockIdx.x];
int endBBlock = Bindex[blockIdx.x+1];
// Taille des partitions de A et B
int sABlock = endABlock - startABlock;
int sBBlock = endBBlock - startBBlock;
// Nombre de fentres glissantes
int nb_windows = (blockDim.x - 1 + sABlock + sBBlock) / blockDim.x;
biaisAi = 0;
biaisBi = 0;
biaisA = 0;
biaisB = 0;
// Merge fentre par fentre
for(int k=0; k < nb_windows; k++){
// Somme des biais de A et de B
biaisA += __syncthreads_count(biaisAi);
biaisB += __syncthreads_count(biaisBi);
// Rinitialisation des biais de thread
biaisAi = 0;
biaisBi = 0;
// Copie en mmoire shared
if (startABlock + biaisA + i < endABlock)
A_shared[i] = A[startABlock + biaisA + i];
if (startBBlock + biaisB + i < endBBlock)
B_shared[i] = B[startBBlock + biaisB + i];
// Synchronisation de la mmoire shared
__syncthreads();
// Taille des sous tableaux en mmoire shared
int sizeAshared = min(blockDim.x, max(0, sABlock - biaisA));
int sizeBshared = min(blockDim.x, max(0, sBBlock - biaisB));
// Recherche dichotomique
if (i < (sizeAshared + sizeBshared)){
int K[2];
int P[2];
if (i > sizeAshared) {
K[X] = i - sizeAshared;
K[Y] = sizeAshared;
P[X] = sizeAshared;
P[Y] = i - sizeAshared;
}
else {
K[X] = 0;
K[Y] = i;
P[X] = i;
P[Y] = 0;
}
while (1) {
int offset = (abs(K[Y] - P[Y]))/2;
int Q[2] = {K[X] + offset, K[Y] - offset};
if (Q[Y] >= 0 && Q[X] <= sizeBshared && (Q[Y] == sizeAshared || Q[X] == 0 || A_shared[Q[Y]] > B_shared[Q[X]-1])) {
if (Q[X] == sizeBshared || Q[Y] == 0 || A_shared[Q[Y]-1] <= B_shared[Q[X]]) {
if (Q[Y] < sizeAshared && (Q[X] == sizeBshared || A_shared[Q[Y]] <= B_shared[Q[X]]) ) {
M[i + startABlock + startBBlock + k * blockDim.x] = A_shared[Q[Y]];
biaisAi += 1;
}
else {
M[i + startABlock + startBBlock + k * blockDim.x] = B_shared[Q[X]];
biaisBi += 1;
}
break ;
}
else {
K[X] = Q[X] + 1;
K[Y] = Q[Y] - 1;
}
}
else {
P[X] = Q[X] - 1;
P[Y] = Q[Y] + 1 ;
}
}
}
}
}
int main(){
int i;
// Allocation et replissage des tableaux d'entre
int A_cpu[SIZEA];
int B_cpu[SIZEB];
for (i = 0; i < SIZEA; i++)
A_cpu[i] = 2 * i;
for (i = 0; i < SIZEB; i++)
B_cpu[i] = 2 * i + 1;
// Allocation du tableau de sortie
int M_cpu[SIZEA + SIZEB];
// Dclaration et allocation de la mmoire du GPU
int *A_gpu, *B_gpu, *M_gpu, *Aindex, *Bindex;
hipMalloc( (void**) &A_gpu, SIZEA * sizeof(int) );
hipMalloc( (void**) &B_gpu, SIZEB * sizeof(int) );
hipMalloc( (void**) &M_gpu, (SIZEA+SIZEB) * sizeof(int) );
hipMalloc( (void**) &Aindex, (N_BLOCKS + 1) * sizeof(int) );
hipMalloc( (void**) &Bindex, (N_BLOCKS + 1) * sizeof(int) );
// Copie des tableaux CPU vers GPU
hipMemcpy( A_gpu, A_cpu, SIZEA * sizeof(int), hipMemcpyHostToDevice );
hipMemcpy( B_gpu, B_cpu, SIZEB * sizeof(int), hipMemcpyHostToDevice );
// Kernel de partitionnement des tableaux par blocks
hipLaunchKernelGGL(( pathBig_k), dim3(N_BLOCKS), dim3(1), 0, 0, A_gpu, B_gpu, Aindex, Bindex, SIZEA, SIZEB, N_BLOCKS);
// Kernel de merge des partitions sur chaque block
hipLaunchKernelGGL(( mergeBig_k), dim3(N_BLOCKS), dim3(N_THREADS), 0, 0, A_gpu, B_gpu, M_gpu, Aindex, Bindex);
// Copie du tableau rsultat GPU vers CPU et affichage
hipMemcpy( M_cpu, M_gpu, (SIZEA+SIZEB) * sizeof(int), hipMemcpyDeviceToHost );
for (int i = 0; i < SIZEA+SIZEB; i ++)
printf("M[%d] = %d\n", i, M_cpu[i]);
// Liberation de la mmoire
hipFree(A_gpu);
hipFree(B_gpu);
hipFree(M_gpu);
hipFree(Aindex);
hipFree(Bindex);
return 0;
}
| bab60c8123665415d6735d7bbf10918705b3ba45.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#define X 0
#define Y 1
#define SIZEA 1123
#define SIZEB 2223
#define N_BLOCKS 64
#define N_THREADS 32
__global__ void pathBig_k(const int *A, const int *B, int *Aindex, int *Bindex, const int sizeA, const int sizeB, const int morceaux){
if(blockIdx.x == 0){
Aindex[0] = 0;
Bindex[0] = 0;
Aindex[morceaux] = sizeA;
Bindex[morceaux] = sizeB;
return;
}
int i = (sizeA + sizeB)/morceaux * blockIdx.x;
int K[2];
int P[2];
int Q[2];
int offset;
if (i > sizeA) {
K[X] = i - sizeA;
K[Y] = sizeA;
P[X] = sizeA;
P[Y] = i - sizeA;
}
else {
K[X] = 0;
K[Y] = i;
P[X] = i;
P[Y] = 0;
}
while (1) {
offset = (abs(K[Y] - P[Y]))/2;
Q[X] = K[X] + offset;
Q[Y] = K[Y] - offset;
if (Q[Y] >= 0 && Q[X] <= sizeB && (Q[Y] == sizeA || Q[X] == 0 || A[Q[Y]] > B[Q[X]-1])) {
if (Q[X] == sizeB || Q[Y] == 0 || A[Q[Y]-1] <= B[Q[X]]) {
Aindex[blockIdx.x] = Q[Y];
Bindex[blockIdx.x] = Q[X];
break ;
}
else {
K[X] = Q[X] + 1;
K[Y] = Q[Y] - 1;
}
}
else {
P[X] = Q[X] - 1;
P[Y] = Q[Y] + 1;
}
}
}
__global__ void mergeBig_k(int *A, int *B, int *M, int *Aindex, int *Bindex){
int i = threadIdx.x;
// Mémoire shared sur laquelle on va travailler
__shared__ int A_shared[N_THREADS];
__shared__ int B_shared[N_THREADS];
// Biais de tour correspondant à un thread
int biaisAi; // Décalage induit ou non par le thread (0 ou 1)
int biaisBi;
// Biais totaux
__shared__ int biaisA;
__shared__ int biaisB;
int startABlock = Aindex[blockIdx.x];
int endABlock = Aindex[blockIdx.x+1];
int startBBlock = Bindex[blockIdx.x];
int endBBlock = Bindex[blockIdx.x+1];
// Taille des partitions de A et B
int sABlock = endABlock - startABlock;
int sBBlock = endBBlock - startBBlock;
// Nombre de fenêtres glissantes
int nb_windows = (blockDim.x - 1 + sABlock + sBBlock) / blockDim.x;
biaisAi = 0;
biaisBi = 0;
biaisA = 0;
biaisB = 0;
// Merge fenêtre par fenêtre
for(int k=0; k < nb_windows; k++){
// Somme des biais de A et de B
biaisA += __syncthreads_count(biaisAi);
biaisB += __syncthreads_count(biaisBi);
// Réinitialisation des biais de thread
biaisAi = 0;
biaisBi = 0;
// Copie en mémoire shared
if (startABlock + biaisA + i < endABlock)
A_shared[i] = A[startABlock + biaisA + i];
if (startBBlock + biaisB + i < endBBlock)
B_shared[i] = B[startBBlock + biaisB + i];
// Synchronisation de la mémoire shared
__syncthreads();
// Taille des sous tableaux en mémoire shared
int sizeAshared = min(blockDim.x, max(0, sABlock - biaisA));
int sizeBshared = min(blockDim.x, max(0, sBBlock - biaisB));
// Recherche dichotomique
if (i < (sizeAshared + sizeBshared)){
int K[2];
int P[2];
if (i > sizeAshared) {
K[X] = i - sizeAshared;
K[Y] = sizeAshared;
P[X] = sizeAshared;
P[Y] = i - sizeAshared;
}
else {
K[X] = 0;
K[Y] = i;
P[X] = i;
P[Y] = 0;
}
while (1) {
int offset = (abs(K[Y] - P[Y]))/2;
int Q[2] = {K[X] + offset, K[Y] - offset};
if (Q[Y] >= 0 && Q[X] <= sizeBshared && (Q[Y] == sizeAshared || Q[X] == 0 || A_shared[Q[Y]] > B_shared[Q[X]-1])) {
if (Q[X] == sizeBshared || Q[Y] == 0 || A_shared[Q[Y]-1] <= B_shared[Q[X]]) {
if (Q[Y] < sizeAshared && (Q[X] == sizeBshared || A_shared[Q[Y]] <= B_shared[Q[X]]) ) {
M[i + startABlock + startBBlock + k * blockDim.x] = A_shared[Q[Y]];
biaisAi += 1;
}
else {
M[i + startABlock + startBBlock + k * blockDim.x] = B_shared[Q[X]];
biaisBi += 1;
}
break ;
}
else {
K[X] = Q[X] + 1;
K[Y] = Q[Y] - 1;
}
}
else {
P[X] = Q[X] - 1;
P[Y] = Q[Y] + 1 ;
}
}
}
}
}
int main(){
int i;
// Allocation et replissage des tableaux d'entrée
int A_cpu[SIZEA];
int B_cpu[SIZEB];
for (i = 0; i < SIZEA; i++)
A_cpu[i] = 2 * i;
for (i = 0; i < SIZEB; i++)
B_cpu[i] = 2 * i + 1;
// Allocation du tableau de sortie
int M_cpu[SIZEA + SIZEB];
// Déclaration et allocation de la mémoire du GPU
int *A_gpu, *B_gpu, *M_gpu, *Aindex, *Bindex;
cudaMalloc( (void**) &A_gpu, SIZEA * sizeof(int) );
cudaMalloc( (void**) &B_gpu, SIZEB * sizeof(int) );
cudaMalloc( (void**) &M_gpu, (SIZEA+SIZEB) * sizeof(int) );
cudaMalloc( (void**) &Aindex, (N_BLOCKS + 1) * sizeof(int) );
cudaMalloc( (void**) &Bindex, (N_BLOCKS + 1) * sizeof(int) );
// Copie des tableaux CPU vers GPU
cudaMemcpy( A_gpu, A_cpu, SIZEA * sizeof(int), cudaMemcpyHostToDevice );
cudaMemcpy( B_gpu, B_cpu, SIZEB * sizeof(int), cudaMemcpyHostToDevice );
// Kernel de partitionnement des tableaux par blocks
pathBig_k<<<N_BLOCKS, 1>>>(A_gpu, B_gpu, Aindex, Bindex, SIZEA, SIZEB, N_BLOCKS);
// Kernel de merge des partitions sur chaque block
mergeBig_k<<<N_BLOCKS, N_THREADS>>>(A_gpu, B_gpu, M_gpu, Aindex, Bindex);
// Copie du tableau résultat GPU vers CPU et affichage
cudaMemcpy( M_cpu, M_gpu, (SIZEA+SIZEB) * sizeof(int), cudaMemcpyDeviceToHost );
for (int i = 0; i < SIZEA+SIZEB; i ++)
printf("M[%d] = %d\n", i, M_cpu[i]);
// Liberation de la mémoire
cudaFree(A_gpu);
cudaFree(B_gpu);
cudaFree(M_gpu);
cudaFree(Aindex);
cudaFree(Bindex);
return 0;
}
|
9581bc45da91f2ae8ed388ed261ebcc2fc2a2347.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <iostream>
#include <math.h>
#include <stdbool.h>
#include <driver/cuda_helper.h>
#include <math_functions.h>
#include <scene/scene_data.h>
#include <shaders/brdf.cuh>
#include <shaders/intersection.cuh>
#include <shaders/post_process.cuh>
#include <utils/utils.h>
using post_process_t = float3 (*)(const float3&);
post_process_t h_post_process_table[4];
surface<void, cudaSurfaceType2D> surf;
texture<float4, hipTextureTypeCubemap> cubemap_ref;
union rgba_24
{
uint1 b32;
struct
{
unsigned r : 8;
unsigned g : 8;
unsigned b : 8;
unsigned a : 8;
};
};
#ifndef M_PI
#define M_PI 3.14159265359f
#endif
__device__ inline float3
radiance(scene::Ray& r, const struct scene::Scenes& scenes,
unsigned int scene_id, const scene::Camera* const cam,
hiprandState_t* rand_state, int is_static, int static_samples)
{
float3 acc = make_float3(0.0f);
// For energy compensation on Russian roulette
float3 throughput = make_float3(1.0f);
// Contains information about each intersection.
// This will be updated at each call to 'intersect'.
IntersectionData inter;
if (!is_static)
if (intersect(r, scenes, scene_id, inter))
return inter.diffuse_col;
else
{
// Accumulate Environment map's contribution (approximated as many far away lights)
auto val = texCubemap(cubemap_ref, r.dir.x, r.dir.y, -r.dir.z);
return make_float3(val.x, val.y, val.z);
}
// Max bounces
// Bounce more when the camera is not moving
const int max_bounces = 1 + is_static * (static_samples + 1);
for (int b = 0; b < max_bounces; b++) {
float3 oriented_normal;
float r1 = hiprand_uniform(rand_state);
if (intersect(r, scenes, scene_id, inter)) {
inter.normal = inter.normal;
float cos_theta = dot(inter.normal, r.dir);
oriented_normal =
inter.normal; // cos_theta < 0 ? inter.normal : inter.normal * -1.0f;
// return oriented_normal;
float3 up = make_float3(0.0, 1.0, 0.0);
float3 right = cross(up, inter.normal);
up = cross(inter.normal, right);
// Oren-Nayar diffuse
// BRDF = brdf_oren_nayar(cos_theta, cos_theta, light_dir, r.dir,
// oriented_normal, 0.5f, 0.5f, inter.diffuse_col);
// Specular ray
// Computed everytime and then used to simulate roughness by concentrating
// rays towards it
float3 spec = normalize(reflect(r.dir, inter.normal));
float PDF = pdf_lambert(); // Divided by PI
// Lambert BRDF/PDF
float3 BRDF = brdf_lambert(inter.diffuse_col); // Divided by PI
float3 direct_light = BRDF / PDF;
// Default IOR (Index Of Refraction) is 1.0f
if (inter.ior == 1.0f || inter.light != NULL) {
// Accumulate light emission
if (inter.light != NULL) {
BRDF = make_float3(inter.light->color.x, inter.light->color.y,
inter.light->color.z);
acc += BRDF * inter.light->emission * throughput;
}
// Sample the hemisphere with a random ray
float phi = 2.0f * M_PI *
hiprand_uniform(rand_state); // glm::mix(1.0f -
// inter.specular_col,
// inter.specular_col, 0.1f);
float sin_t = __fsqrt_rn(r1);
float cos_t = __fsqrt_rn(1.f - r1);
// u, v and oriented_normal form the base of the hemisphere
float3 u = normalize(cross(fabs(oriented_normal.x) > .1
? make_float3(0.0f, 1.0f, 0.0f)
: make_float3(1.0f, 0.0f, 0.0f),
oriented_normal));
float3 v = cross(oriented_normal, u);
// Diffuse hemishphere reflection
float3 d = normalize(v * sin_t * __cosf(phi) + u * __sinf(phi) * sin_t +
oriented_normal * cos_t);
r.origin += r.dir * inter.dist;
// Mix the specular and random diffuse ray by the "specular_col" amount
// to approximate roughness
r.dir = mix(d, spec, inter.specular_col);
// Avoids self intersection
r.origin += r.dir * 0.03f;
throughput *= direct_light;
} else {
// Transmision
// n1: IOR of exterior medium
float n1 = 1.0f; // sin theta2
// n2: IOR of entering medium
float n2 = inter.ior; // sin theta1
oriented_normal = cos_theta < 0 ? inter.normal : inter.normal * -1.0f;
float c1 = dot(oriented_normal, r.dir);
bool entering = dot(inter.normal, oriented_normal) > 0;
// Snell's Law
float eta = entering ? n1 / n2 : n2 / n1;
float eta_2 = eta * eta;
float c2_term = 1.0f - eta_2 * (1.0f - c1 * c1);
// Total Internal Reflection
if (c2_term < 0.0f) {
r.origin += oriented_normal * inter.dist / 100.f;
// return make_float3(1.0f, 0.0f, 0.0f);
r.dir = spec;
} else {
// Schlick R0
float R0 = (n2 - n1) / (n1 + n2);
R0 *= R0;
float c2 = __fsqrt_rn(c2_term);
float3 T = normalize(eta * r.dir + (eta * c1 - c2) * oriented_normal);
float f_cos_theta = 1.0f - (entering ? -c1 : dot(T, inter.normal));
f_cos_theta = powf(cos_theta, 5.0f);
// Fresnel-Schlick approximation for the reflection amount
float f_r = R0 + (1.0f - R0) * f_cos_theta;
// If reflection
// Not exactly sure why "0.25f" works better than "f_r"...
if (hiprand_uniform(rand_state) < 0.25f) {
throughput *= f_r * direct_light;
r.origin += oriented_normal * inter.dist / 100.f;
r.dir = spec; // mix(d, spec, inter.specular_col);
// return make_float3(0.0f, 1.0f, 0.0f);
} else // Transmission
{
// Energy conservation
float f_t = 1.0f - f_r;
throughput *= f_t * direct_light;
// We're inside a mesh doing transmission, so we try to reduce the
// bias as much as possible
// or the ray could get outside of the mesh which makes no sense
r.origin += oriented_normal * inter.dist / 10000.f;
r.dir = T;
// return make_float3(0.0f, 0.0f, 1.0f);
}
}
}
} else {
// Accumulate Environment map's contribution (approximated as many far
// away lights)
auto val = texCubemap(cubemap_ref, r.dir.x, r.dir.y, -r.dir.z);
acc += make_float3(val.x, val.y, val.z) * throughput;
}
// Russian roulette for early path termination
float p = fmaxf(throughput.x, fmaxf(throughput.y, throughput.z));
if (r1 > p && b > 1)
return acc;
throughput *= __fdividef(1.0f, p);
}
return acc;
}
__global__ void
kernel(const unsigned int width, const unsigned int height,
const scene::Scenes scenes, unsigned int scene_id, scene::Camera cam,
unsigned int hash_seed, int frame_nb, float3* temporal_framebuffer,
bool moved, post_process_t post)
{
const unsigned int half_w = width / 2;
const unsigned int half_h = height / 2;
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x >= width || y >= height)
return;
const unsigned int tid =
(blockIdx.x + blockIdx.y * gridDim.x) * (blockDim.x * blockDim.y) +
(threadIdx.y * blockDim.x) + threadIdx.x;
union rgba_24 rgbx;
rgbx.a = 0.0;
hiprandState_t rand_state;
hiprand_init(hash_seed + tid, 0, 0, &rand_state);
scene::Ray r = generateRay(x, y, half_w, half_h, cam);
// Depth-Of-Field
camera_dof(r, cam, &rand_state);
int is_static = !moved;
int static_samples = 1;
float3 rad =
radiance(r, scenes, scene_id, &cam, &rand_state, is_static, static_samples);
rad = clamp(rad, 0.0f, 1.0f);
// Accumulation buffer for when the camera is static
// This makes the image converge
int i = (height - y - 1) * width + x;
// Zero-out if the camera is moving to reset the buffer
temporal_framebuffer[i] *= is_static;
temporal_framebuffer[i] += rad;
rad = temporal_framebuffer[i] / (float)frame_nb;
// Tone Mapping + White Balance
rad = exposure(rad);
// Gamma Correction
rad = pow(rad, 1.0f / 2.2f);
rad = (*post)(rad);
rgbx.r = rad.x * 255;
rgbx.g = rad.y * 255;
rgbx.b = rad.z * 255;
surf2Dwrite(rgbx.b32, surf, x * sizeof(rgbx), y, hipBoundaryModeZero);
}
// Very nice and fast PRNG
// Credit: Thomas Wang
inline unsigned int
WangHash(unsigned int a)
{
a = (a ^ 61) ^ (a >> 16);
a = a + (a << 3);
a = a ^ (a >> 4);
a = a * 0x27d4eb2d;
a = a ^ (a >> 15);
return a;
}
hipError_t
raytrace(hipArray_const_t array, const scene::Scenes& scenes,
unsigned int scene_id, const std::vector<scene::Cubemap>& cubemaps,
int cubemap_id, const scene::Camera* const cam,
const unsigned int width, const unsigned int height,
hipStream_t stream, float3* temporal_framebuffer, bool moved,
unsigned int post_id)
{
// Seed for the Wang Hash
static unsigned int seed = 0;
if (moved)
seed = 0;
seed++;
hipBindSurfaceToArray(surf, array);
const scene::Cubemap& cubemap = cubemaps[cubemap_id];
cubemap_ref.addressMode[0] = hipAddressModeWrap;
cubemap_ref.addressMode[1] = hipAddressModeWrap;
cubemap_ref.filterMode = hipFilterModeLinear;
cubemap_ref.normalized = true;
hipBindTextureToArray(cubemap_ref, cubemap.cubemap, cubemap.cubemap_desc);
// Register occupancy : nb_threads = regs_per_block / 32
// Shared memory occupancy : nb_threads = shared_mem / 32
// Block size occupancy
dim3 threads_per_block(16, 16);
dim3 nb_blocks(width / threads_per_block.x + 1,
height / threads_per_block.y + 1);
if (nb_blocks.x > 0 && nb_blocks.y > 0)
hipLaunchKernelGGL(( kernel), dim3(nb_blocks), dim3(threads_per_block), 0, stream,
width, height, scenes, scene_id, *cam, WangHash(seed), seed,
temporal_framebuffer, moved, h_post_process_table[post_id]);
return hipSuccess;
}
__device__ float3
no_post_process(const float3& color)
{
return color;
}
__device__ float3
grayscale(const float3& color)
{
const float gray = color.x * 0.3 + color.y * 0.59 + color.z * 0.11;
return make_float3(gray, gray, gray);
}
__device__ float3
sepia(const float3& color)
{
return make_float3(color.x * 0.393 + color.y * 0.769 + color.z * 0.189,
color.x * 0.349 + color.y * 0.686 + color.z * 0.168,
color.x * 0.272 + color.y * 0.534 + color.z * 0.131);
}
__device__ float3
invert(const float3& color)
{
return make_float3(1.0 - color.x, 1.0 - color.y, 1.0 - color.z);
}
__device__ post_process_t p_none = no_post_process;
__device__ post_process_t p_gray = grayscale;
__device__ post_process_t p_sepia = sepia;
__device__ post_process_t p_invert = invert;
// Copy the pointers from the function tables to the host side.
void
setupFunctionTables()
{
hipMemcpyFromSymbol(&h_post_process_table[0], p_none,
sizeof(post_process_t));
cudaCheckError();
hipMemcpyFromSymbol(&h_post_process_table[1], p_gray,
sizeof(post_process_t));
cudaCheckError();
hipMemcpyFromSymbol(&h_post_process_table[2], p_sepia,
sizeof(post_process_t));
cudaCheckError();
hipMemcpyFromSymbol(&h_post_process_table[3], p_invert,
sizeof(post_process_t));
cudaCheckError();
}
| 9581bc45da91f2ae8ed388ed261ebcc2fc2a2347.cu | #include <cuda_runtime.h>
#include <curand.h>
#include <curand_kernel.h>
#include <iostream>
#include <math.h>
#include <stdbool.h>
#include <driver/cuda_helper.h>
#include <math_functions.h>
#include <scene/scene_data.h>
#include <shaders/brdf.cuh>
#include <shaders/intersection.cuh>
#include <shaders/post_process.cuh>
#include <utils/utils.h>
using post_process_t = float3 (*)(const float3&);
post_process_t h_post_process_table[4];
surface<void, cudaSurfaceType2D> surf;
texture<float4, cudaTextureTypeCubemap> cubemap_ref;
union rgba_24
{
uint1 b32;
struct
{
unsigned r : 8;
unsigned g : 8;
unsigned b : 8;
unsigned a : 8;
};
};
#ifndef M_PI
#define M_PI 3.14159265359f
#endif
__device__ inline float3
radiance(scene::Ray& r, const struct scene::Scenes& scenes,
unsigned int scene_id, const scene::Camera* const cam,
curandState* rand_state, int is_static, int static_samples)
{
float3 acc = make_float3(0.0f);
// For energy compensation on Russian roulette
float3 throughput = make_float3(1.0f);
// Contains information about each intersection.
// This will be updated at each call to 'intersect'.
IntersectionData inter;
if (!is_static)
if (intersect(r, scenes, scene_id, inter))
return inter.diffuse_col;
else
{
// Accumulate Environment map's contribution (approximated as many far away lights)
auto val = texCubemap(cubemap_ref, r.dir.x, r.dir.y, -r.dir.z);
return make_float3(val.x, val.y, val.z);
}
// Max bounces
// Bounce more when the camera is not moving
const int max_bounces = 1 + is_static * (static_samples + 1);
for (int b = 0; b < max_bounces; b++) {
float3 oriented_normal;
float r1 = curand_uniform(rand_state);
if (intersect(r, scenes, scene_id, inter)) {
inter.normal = inter.normal;
float cos_theta = dot(inter.normal, r.dir);
oriented_normal =
inter.normal; // cos_theta < 0 ? inter.normal : inter.normal * -1.0f;
// return oriented_normal;
float3 up = make_float3(0.0, 1.0, 0.0);
float3 right = cross(up, inter.normal);
up = cross(inter.normal, right);
// Oren-Nayar diffuse
// BRDF = brdf_oren_nayar(cos_theta, cos_theta, light_dir, r.dir,
// oriented_normal, 0.5f, 0.5f, inter.diffuse_col);
// Specular ray
// Computed everytime and then used to simulate roughness by concentrating
// rays towards it
float3 spec = normalize(reflect(r.dir, inter.normal));
float PDF = pdf_lambert(); // Divided by PI
// Lambert BRDF/PDF
float3 BRDF = brdf_lambert(inter.diffuse_col); // Divided by PI
float3 direct_light = BRDF / PDF;
// Default IOR (Index Of Refraction) is 1.0f
if (inter.ior == 1.0f || inter.light != NULL) {
// Accumulate light emission
if (inter.light != NULL) {
BRDF = make_float3(inter.light->color.x, inter.light->color.y,
inter.light->color.z);
acc += BRDF * inter.light->emission * throughput;
}
// Sample the hemisphere with a random ray
float phi = 2.0f * M_PI *
curand_uniform(rand_state); // glm::mix(1.0f -
// inter.specular_col,
// inter.specular_col, 0.1f);
float sin_t = __fsqrt_rn(r1);
float cos_t = __fsqrt_rn(1.f - r1);
// u, v and oriented_normal form the base of the hemisphere
float3 u = normalize(cross(fabs(oriented_normal.x) > .1
? make_float3(0.0f, 1.0f, 0.0f)
: make_float3(1.0f, 0.0f, 0.0f),
oriented_normal));
float3 v = cross(oriented_normal, u);
// Diffuse hemishphere reflection
float3 d = normalize(v * sin_t * __cosf(phi) + u * __sinf(phi) * sin_t +
oriented_normal * cos_t);
r.origin += r.dir * inter.dist;
// Mix the specular and random diffuse ray by the "specular_col" amount
// to approximate roughness
r.dir = mix(d, spec, inter.specular_col);
// Avoids self intersection
r.origin += r.dir * 0.03f;
throughput *= direct_light;
} else {
// Transmision
// n1: IOR of exterior medium
float n1 = 1.0f; // sin theta2
// n2: IOR of entering medium
float n2 = inter.ior; // sin theta1
oriented_normal = cos_theta < 0 ? inter.normal : inter.normal * -1.0f;
float c1 = dot(oriented_normal, r.dir);
bool entering = dot(inter.normal, oriented_normal) > 0;
// Snell's Law
float eta = entering ? n1 / n2 : n2 / n1;
float eta_2 = eta * eta;
float c2_term = 1.0f - eta_2 * (1.0f - c1 * c1);
// Total Internal Reflection
if (c2_term < 0.0f) {
r.origin += oriented_normal * inter.dist / 100.f;
// return make_float3(1.0f, 0.0f, 0.0f);
r.dir = spec;
} else {
// Schlick R0
float R0 = (n2 - n1) / (n1 + n2);
R0 *= R0;
float c2 = __fsqrt_rn(c2_term);
float3 T = normalize(eta * r.dir + (eta * c1 - c2) * oriented_normal);
float f_cos_theta = 1.0f - (entering ? -c1 : dot(T, inter.normal));
f_cos_theta = powf(cos_theta, 5.0f);
// Fresnel-Schlick approximation for the reflection amount
float f_r = R0 + (1.0f - R0) * f_cos_theta;
// If reflection
// Not exactly sure why "0.25f" works better than "f_r"...
if (curand_uniform(rand_state) < 0.25f) {
throughput *= f_r * direct_light;
r.origin += oriented_normal * inter.dist / 100.f;
r.dir = spec; // mix(d, spec, inter.specular_col);
// return make_float3(0.0f, 1.0f, 0.0f);
} else // Transmission
{
// Energy conservation
float f_t = 1.0f - f_r;
throughput *= f_t * direct_light;
// We're inside a mesh doing transmission, so we try to reduce the
// bias as much as possible
// or the ray could get outside of the mesh which makes no sense
r.origin += oriented_normal * inter.dist / 10000.f;
r.dir = T;
// return make_float3(0.0f, 0.0f, 1.0f);
}
}
}
} else {
// Accumulate Environment map's contribution (approximated as many far
// away lights)
auto val = texCubemap(cubemap_ref, r.dir.x, r.dir.y, -r.dir.z);
acc += make_float3(val.x, val.y, val.z) * throughput;
}
// Russian roulette for early path termination
float p = fmaxf(throughput.x, fmaxf(throughput.y, throughput.z));
if (r1 > p && b > 1)
return acc;
throughput *= __fdividef(1.0f, p);
}
return acc;
}
__global__ void
kernel(const unsigned int width, const unsigned int height,
const scene::Scenes scenes, unsigned int scene_id, scene::Camera cam,
unsigned int hash_seed, int frame_nb, float3* temporal_framebuffer,
bool moved, post_process_t post)
{
const unsigned int half_w = width / 2;
const unsigned int half_h = height / 2;
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x >= width || y >= height)
return;
const unsigned int tid =
(blockIdx.x + blockIdx.y * gridDim.x) * (blockDim.x * blockDim.y) +
(threadIdx.y * blockDim.x) + threadIdx.x;
union rgba_24 rgbx;
rgbx.a = 0.0;
curandState rand_state;
curand_init(hash_seed + tid, 0, 0, &rand_state);
scene::Ray r = generateRay(x, y, half_w, half_h, cam);
// Depth-Of-Field
camera_dof(r, cam, &rand_state);
int is_static = !moved;
int static_samples = 1;
float3 rad =
radiance(r, scenes, scene_id, &cam, &rand_state, is_static, static_samples);
rad = clamp(rad, 0.0f, 1.0f);
// Accumulation buffer for when the camera is static
// This makes the image converge
int i = (height - y - 1) * width + x;
// Zero-out if the camera is moving to reset the buffer
temporal_framebuffer[i] *= is_static;
temporal_framebuffer[i] += rad;
rad = temporal_framebuffer[i] / (float)frame_nb;
// Tone Mapping + White Balance
rad = exposure(rad);
// Gamma Correction
rad = pow(rad, 1.0f / 2.2f);
rad = (*post)(rad);
rgbx.r = rad.x * 255;
rgbx.g = rad.y * 255;
rgbx.b = rad.z * 255;
surf2Dwrite(rgbx.b32, surf, x * sizeof(rgbx), y, cudaBoundaryModeZero);
}
// Very nice and fast PRNG
// Credit: Thomas Wang
inline unsigned int
WangHash(unsigned int a)
{
a = (a ^ 61) ^ (a >> 16);
a = a + (a << 3);
a = a ^ (a >> 4);
a = a * 0x27d4eb2d;
a = a ^ (a >> 15);
return a;
}
cudaError_t
raytrace(cudaArray_const_t array, const scene::Scenes& scenes,
unsigned int scene_id, const std::vector<scene::Cubemap>& cubemaps,
int cubemap_id, const scene::Camera* const cam,
const unsigned int width, const unsigned int height,
cudaStream_t stream, float3* temporal_framebuffer, bool moved,
unsigned int post_id)
{
// Seed for the Wang Hash
static unsigned int seed = 0;
if (moved)
seed = 0;
seed++;
cudaBindSurfaceToArray(surf, array);
const scene::Cubemap& cubemap = cubemaps[cubemap_id];
cubemap_ref.addressMode[0] = cudaAddressModeWrap;
cubemap_ref.addressMode[1] = cudaAddressModeWrap;
cubemap_ref.filterMode = cudaFilterModeLinear;
cubemap_ref.normalized = true;
cudaBindTextureToArray(cubemap_ref, cubemap.cubemap, cubemap.cubemap_desc);
// Register occupancy : nb_threads = regs_per_block / 32
// Shared memory occupancy : nb_threads = shared_mem / 32
// Block size occupancy
dim3 threads_per_block(16, 16);
dim3 nb_blocks(width / threads_per_block.x + 1,
height / threads_per_block.y + 1);
if (nb_blocks.x > 0 && nb_blocks.y > 0)
kernel<<<nb_blocks, threads_per_block, 0, stream>>>(
width, height, scenes, scene_id, *cam, WangHash(seed), seed,
temporal_framebuffer, moved, h_post_process_table[post_id]);
return cudaSuccess;
}
__device__ float3
no_post_process(const float3& color)
{
return color;
}
__device__ float3
grayscale(const float3& color)
{
const float gray = color.x * 0.3 + color.y * 0.59 + color.z * 0.11;
return make_float3(gray, gray, gray);
}
__device__ float3
sepia(const float3& color)
{
return make_float3(color.x * 0.393 + color.y * 0.769 + color.z * 0.189,
color.x * 0.349 + color.y * 0.686 + color.z * 0.168,
color.x * 0.272 + color.y * 0.534 + color.z * 0.131);
}
__device__ float3
invert(const float3& color)
{
return make_float3(1.0 - color.x, 1.0 - color.y, 1.0 - color.z);
}
__device__ post_process_t p_none = no_post_process;
__device__ post_process_t p_gray = grayscale;
__device__ post_process_t p_sepia = sepia;
__device__ post_process_t p_invert = invert;
// Copy the pointers from the function tables to the host side.
void
setupFunctionTables()
{
cudaMemcpyFromSymbol(&h_post_process_table[0], p_none,
sizeof(post_process_t));
cudaCheckError();
cudaMemcpyFromSymbol(&h_post_process_table[1], p_gray,
sizeof(post_process_t));
cudaCheckError();
cudaMemcpyFromSymbol(&h_post_process_table[2], p_sepia,
sizeof(post_process_t));
cudaCheckError();
cudaMemcpyFromSymbol(&h_post_process_table[3], p_invert,
sizeof(post_process_t));
cudaCheckError();
}
|
4d14fbe3e57ddf30476c522ba71198400f2d3a4e.hip | // !!! This is a file automatically generated by hipify!!!
#include "AffineTransform.h"
#define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); }
static void cudaErrCheck_(hipError_t stat, const char *file, int line) {
if (stat != hipSuccess) {
fprintf(stderr, "CUDA Error: %s %s %d\n", hipGetErrorString(stat), file, line);
}
}
#define cublasErrCheck(stat) { cublasErrCheck_((stat), __FILE__, __LINE__); }
static void cublasErrCheck_(hipblasStatus_t stat, const char *file, int line) {
if (stat != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "cuBLAS Error: %d %s %d\n", stat, file, line);
}
}
#define curandErrCheck(stat) { curandErrCheck_((stat), __FILE__, __LINE__); }
static void curandErrCheck_(hiprandStatus_t stat, const char *file, int line) {
if (stat != HIPRAND_STATUS_SUCCESS) {
fprintf(stderr, "cuRand Error: %d %s %d\n", stat, file, line);
}
}
static void Print_matrix(float* mat, int n, int m, int r_c){
//const char nmfile[] = "out.txt";
float *data_host;
data_host=(float*)malloc(n*m*sizeof(float));
hipMemcpy(data_host, mat, n*m*sizeof(float), hipMemcpyDeviceToHost); // this won't work, will throw error
if(r_c==0){
for (int jj=0; jj<n; jj++)
{
int ii;
for (ii=0; ii<m; ii++)
{
float* temp=(float *)(data_host+jj*m+ii);
printf("%.10e ", *temp);
//if(jj==101) printf("%f ", *temp);
}
printf("\n");
}
}else{
for (int jj=0; jj<n; jj++)
{
int ii;
for (ii=0; ii<m; ii++)
{
float* temp=(float *)(data_host+ii*n+jj);
printf("%.10e ", *temp);
//if(jj==101) printf("%f ", *temp);
}
printf("\n");
}
}
free(data_host);
}
AffineTransform::AffineTransform(int input_dim, int output_dim) {
input_dim_= input_dim; // 640
output_dim_= output_dim; //640
cudaErrCheck(hipMalloc((void**)&wei_affine_, input_dim_ * output_dim_ * sizeof(float)));
cudaErrCheck(hipMalloc((void**)&bias_, output_dim_ * sizeof(float)));
}
AffineTransform::~AffineTransform(){
cudaErrCheck(hipFree(wei_affine_));
cudaErrCheck(hipFree(bias_));
}
float AffineTransform::Propagate(hipblasHandle_t handle, hipStream_t stream, float* in, float* out, int seqLength){
//continue
//hipStream_t stream=NULL;
float alpha = 1.f;
float beta = 0.f;
int frame;
hipEvent_t start, stop;
float elapsedTime=0.f;
dim3 blockDim, gridDim;
// hipEventCreate(&start);
// hipEventCreate(&stop);
// hipEventRecord(start);
const hipblasOperation_t transa = HIPBLAS_OP_N;
const hipblasOperation_t transb = HIPBLAS_OP_N;
//x ncol outdim, y nrow seqLength
blockDim.x = 32;
gridDim.x = (output_dim_ + blockDim.x - 1) / blockDim.x;
blockDim.y = 32;
gridDim.y = (seqLength + blockDim.y - 1) / blockDim.y;
add_vec_to_rows_w(gridDim, blockDim, stream, alpha, bias_, beta, out, seqLength, output_dim_);
beta=1.f;
hipblasSetStream(handle, stream);
cublasErrCheck(hipblasSgemm(handle,
transa, transb,
output_dim_, //m, number of rows of matrix op(A) and C.
seqLength, //n, number of cols of matrix op(B) and C.
input_dim_, //k, number of cols of matrix op(A) and rows of op(B).
&alpha,
wei_affine_,
output_dim_, //leading dimension = number of rows (I use the number of col because I do the transpose with transa)
in,
input_dim_,
&beta,
out,
output_dim_));
//hipEventRecord(stop);
//hipEventSynchronize(stop);
//hipEventElapsedTime(&elapsedTime, start, stop);
return elapsedTime;
}
void AffineTransform::Init() {
float mean = 0.0;
float stdev = 1.0;
hiprandGenerator_t rng;
curandErrCheck(hiprandCreateGenerator(&rng, HIPRAND_RNG_PSEUDO_DEFAULT));
curandErrCheck(hiprandSetPseudoRandomGeneratorSeed(rng, 1337ull));
curandErrCheck(hiprandGenerateNormal(rng, wei_affine_, input_dim_ * output_dim_, mean, stdev));
curandErrCheck(hiprandGenerateNormal(rng, bias_, output_dim_, mean, stdev));
curandErrCheck(hiprandDestroyGenerator(rng));
// initialize layer
}
| 4d14fbe3e57ddf30476c522ba71198400f2d3a4e.cu | #include "AffineTransform.h"
#define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); }
static void cudaErrCheck_(cudaError_t stat, const char *file, int line) {
if (stat != cudaSuccess) {
fprintf(stderr, "CUDA Error: %s %s %d\n", cudaGetErrorString(stat), file, line);
}
}
#define cublasErrCheck(stat) { cublasErrCheck_((stat), __FILE__, __LINE__); }
static void cublasErrCheck_(cublasStatus_t stat, const char *file, int line) {
if (stat != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "cuBLAS Error: %d %s %d\n", stat, file, line);
}
}
#define curandErrCheck(stat) { curandErrCheck_((stat), __FILE__, __LINE__); }
static void curandErrCheck_(curandStatus_t stat, const char *file, int line) {
if (stat != CURAND_STATUS_SUCCESS) {
fprintf(stderr, "cuRand Error: %d %s %d\n", stat, file, line);
}
}
static void Print_matrix(float* mat, int n, int m, int r_c){
//const char nmfile[] = "out.txt";
float *data_host;
data_host=(float*)malloc(n*m*sizeof(float));
cudaMemcpy(data_host, mat, n*m*sizeof(float), cudaMemcpyDeviceToHost); // this won't work, will throw error
if(r_c==0){
for (int jj=0; jj<n; jj++)
{
int ii;
for (ii=0; ii<m; ii++)
{
float* temp=(float *)(data_host+jj*m+ii);
printf("%.10e ", *temp);
//if(jj==101) printf("%f ", *temp);
}
printf("\n");
}
}else{
for (int jj=0; jj<n; jj++)
{
int ii;
for (ii=0; ii<m; ii++)
{
float* temp=(float *)(data_host+ii*n+jj);
printf("%.10e ", *temp);
//if(jj==101) printf("%f ", *temp);
}
printf("\n");
}
}
free(data_host);
}
AffineTransform::AffineTransform(int input_dim, int output_dim) {
input_dim_= input_dim; // 640
output_dim_= output_dim; //640
cudaErrCheck(cudaMalloc((void**)&wei_affine_, input_dim_ * output_dim_ * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&bias_, output_dim_ * sizeof(float)));
}
AffineTransform::~AffineTransform(){
cudaErrCheck(cudaFree(wei_affine_));
cudaErrCheck(cudaFree(bias_));
}
float AffineTransform::Propagate(cublasHandle_t handle, cudaStream_t stream, float* in, float* out, int seqLength){
//continue
//cudaStream_t stream=NULL;
float alpha = 1.f;
float beta = 0.f;
int frame;
cudaEvent_t start, stop;
float elapsedTime=0.f;
dim3 blockDim, gridDim;
// cudaEventCreate(&start);
// cudaEventCreate(&stop);
// cudaEventRecord(start);
const cublasOperation_t transa = CUBLAS_OP_N;
const cublasOperation_t transb = CUBLAS_OP_N;
//x ncol outdim, y nrow seqLength
blockDim.x = 32;
gridDim.x = (output_dim_ + blockDim.x - 1) / blockDim.x;
blockDim.y = 32;
gridDim.y = (seqLength + blockDim.y - 1) / blockDim.y;
add_vec_to_rows_w(gridDim, blockDim, stream, alpha, bias_, beta, out, seqLength, output_dim_);
beta=1.f;
cublasSetStream(handle, stream);
cublasErrCheck(cublasSgemm(handle,
transa, transb,
output_dim_, //m, number of rows of matrix op(A) and C.
seqLength, //n, number of cols of matrix op(B) and C.
input_dim_, //k, number of cols of matrix op(A) and rows of op(B).
&alpha,
wei_affine_,
output_dim_, //leading dimension = number of rows (I use the number of col because I do the transpose with transa)
in,
input_dim_,
&beta,
out,
output_dim_));
//cudaEventRecord(stop);
//cudaEventSynchronize(stop);
//cudaEventElapsedTime(&elapsedTime, start, stop);
return elapsedTime;
}
void AffineTransform::Init() {
float mean = 0.0;
float stdev = 1.0;
curandGenerator_t rng;
curandErrCheck(curandCreateGenerator(&rng, CURAND_RNG_PSEUDO_DEFAULT));
curandErrCheck(curandSetPseudoRandomGeneratorSeed(rng, 1337ull));
curandErrCheck(curandGenerateNormal(rng, wei_affine_, input_dim_ * output_dim_, mean, stdev));
curandErrCheck(curandGenerateNormal(rng, bias_, output_dim_, mean, stdev));
curandErrCheck(curandDestroyGenerator(rng));
// initialize layer
}
|
9445f35ff77e3ff8be76d8d02d751dd79ce2fe1d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <vector>
#include "profiler.h"
#include <algorithm>
#include <fstream>
#include <sstream>
const int block_size = 32 * 32;
__global__ void reduce_5(double* in_data, double* out_data)
{
__shared__ double buf[block_size];
int thread_rank = threadIdx.x + threadIdx.y * blockDim.x;
int block_rank = blockIdx.x + blockIdx.y * gridDim.x;
int idx = thread_rank + block_rank * blockDim.x * blockDim.y;
buf[thread_rank] = in_data[idx];
__syncthreads ();
for (int s = block_size / 2; s > 0; s /= 2)
{
if ( thread_rank < s )
buf[thread_rank] += buf[thread_rank + s];
__syncthreads();
}
if( thread_rank == 0 )
out_data[block_rank] = buf[thread_rank];
}
int main(int argc, char **argv)
{
profiler prof;
size_t n = 512 * 1024 * 1024;
std::vector<double> a(n);
std::uniform_real_distribution<double> distribution(1.0, 10.0);
std::mt19937 engine;
auto generator = std::bind(distribution, engine);
std::generate_n(a.begin(), n, generator);
prof.tic("reduction cpu");
double sum = std::accumulate(a.begin(), a.end(), 0.0);
prof.toc("reduction cpu");
double *a_dev[2];
hipError_t cuerr = hipMalloc( (void**)&a_dev[0], n * sizeof(double));
if (cuerr != hipSuccess)
{
std::cout << "Cannot allocate GPU memory for a_dev" << hipGetErrorString(cuerr) << std::endl;
return 1;
}
cuerr = hipMalloc( (void**)&a_dev[1], n * sizeof(double));
if (cuerr != hipSuccess)
{
std::cout << "Cannot allocate GPU memory for a_dev" << hipGetErrorString(cuerr) << std::endl;
return 1;
}
cuerr = hipMemcpy ( a_dev[0], a.data(), n * sizeof(double), hipMemcpyHostToDevice );
if (cuerr != hipSuccess)
{
std::cout << "Cannot copy data to device" << hipGetErrorString(cuerr) << std::endl;
return 1;
}
auto i = 0;
size_t j = n;
prof.tic("reduction gpu 5");
for (j = n ; j >= block_size; j /= block_size, i ^= 1)
{
auto num_blocks = j / block_size;
auto b_x = num_blocks;
auto b_y = 1;
if( num_blocks > 65536 )
{
b_x = block_size;
b_y = num_blocks / b_x;
}
dim3 threads(32, 32, 1);
dim3 blocks(b_x, b_y, 1);
hipLaunchKernelGGL(( reduce_5), dim3(blocks), dim3(threads), 0, 0, a_dev[i], a_dev[i^1]);
cuerr = hipGetLastError();
if (cuerr != hipSuccess)
{
std::cout << "Cannot launch CUDA kernel " << hipGetErrorString(cuerr) << std::endl;
return 1;
}
cuerr = hipDeviceSynchronize();
if (cuerr != hipSuccess)
{
std::cout << "Cannot synchronize CUDA kernel " << hipGetErrorString(cuerr) << std::endl;
return 1;
}
}
std::cout << "j = " << j << std::endl;
std::vector<double> b(j);
cuerr = hipMemcpy( b.data(), a_dev[i], sizeof(double)*j, hipMemcpyDeviceToHost );
if (cuerr != hipSuccess)
{
std::cout << "Cannot copy data from device " << hipGetErrorString(cuerr) << std::endl;
return 1;
}
double sum_dev = std::accumulate(b.begin(), b.end(), 0.0);
prof.toc("reduction gpu 5");
std::cout << "error = " << fabs( sum - sum_dev) / n << std::endl;
prof.report();
hipFree(a_dev[0]);
hipFree(a_dev[1]);
return 0;
}
| 9445f35ff77e3ff8be76d8d02d751dd79ce2fe1d.cu | #include <iostream>
#include <vector>
#include "profiler.h"
#include <algorithm>
#include <fstream>
#include <sstream>
const int block_size = 32 * 32;
__global__ void reduce_5(double* in_data, double* out_data)
{
__shared__ double buf[block_size];
int thread_rank = threadIdx.x + threadIdx.y * blockDim.x;
int block_rank = blockIdx.x + blockIdx.y * gridDim.x;
int idx = thread_rank + block_rank * blockDim.x * blockDim.y;
buf[thread_rank] = in_data[idx];
__syncthreads ();
for (int s = block_size / 2; s > 0; s /= 2)
{
if ( thread_rank < s )
buf[thread_rank] += buf[thread_rank + s];
__syncthreads();
}
if( thread_rank == 0 )
out_data[block_rank] = buf[thread_rank];
}
int main(int argc, char **argv)
{
profiler prof;
size_t n = 512 * 1024 * 1024;
std::vector<double> a(n);
std::uniform_real_distribution<double> distribution(1.0, 10.0);
std::mt19937 engine;
auto generator = std::bind(distribution, engine);
std::generate_n(a.begin(), n, generator);
prof.tic("reduction cpu");
double sum = std::accumulate(a.begin(), a.end(), 0.0);
prof.toc("reduction cpu");
double *a_dev[2];
cudaError_t cuerr = cudaMalloc( (void**)&a_dev[0], n * sizeof(double));
if (cuerr != cudaSuccess)
{
std::cout << "Cannot allocate GPU memory for a_dev" << cudaGetErrorString(cuerr) << std::endl;
return 1;
}
cuerr = cudaMalloc( (void**)&a_dev[1], n * sizeof(double));
if (cuerr != cudaSuccess)
{
std::cout << "Cannot allocate GPU memory for a_dev" << cudaGetErrorString(cuerr) << std::endl;
return 1;
}
cuerr = cudaMemcpy ( a_dev[0], a.data(), n * sizeof(double), cudaMemcpyHostToDevice );
if (cuerr != cudaSuccess)
{
std::cout << "Cannot copy data to device" << cudaGetErrorString(cuerr) << std::endl;
return 1;
}
auto i = 0;
size_t j = n;
prof.tic("reduction gpu 5");
for (j = n ; j >= block_size; j /= block_size, i ^= 1)
{
auto num_blocks = j / block_size;
auto b_x = num_blocks;
auto b_y = 1;
if( num_blocks > 65536 )
{
b_x = block_size;
b_y = num_blocks / b_x;
}
dim3 threads(32, 32, 1);
dim3 blocks(b_x, b_y, 1);
reduce_5<<<blocks, threads>>>(a_dev[i], a_dev[i^1]);
cuerr = cudaGetLastError();
if (cuerr != cudaSuccess)
{
std::cout << "Cannot launch CUDA kernel " << cudaGetErrorString(cuerr) << std::endl;
return 1;
}
cuerr = cudaDeviceSynchronize();
if (cuerr != cudaSuccess)
{
std::cout << "Cannot synchronize CUDA kernel " << cudaGetErrorString(cuerr) << std::endl;
return 1;
}
}
std::cout << "j = " << j << std::endl;
std::vector<double> b(j);
cuerr = cudaMemcpy( b.data(), a_dev[i], sizeof(double)*j, cudaMemcpyDeviceToHost );
if (cuerr != cudaSuccess)
{
std::cout << "Cannot copy data from device " << cudaGetErrorString(cuerr) << std::endl;
return 1;
}
double sum_dev = std::accumulate(b.begin(), b.end(), 0.0);
prof.toc("reduction gpu 5");
std::cout << "error = " << fabs( sum - sum_dev) / n << std::endl;
prof.report();
cudaFree(a_dev[0]);
cudaFree(a_dev[1]);
return 0;
}
|
2e6f36c90c451b203f107b4fed2d3dc27e624303.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include <ctype.h>
#include <unistd.h>
#include <omp.h>
#include <sys/times.h>
#include <time.h>
#include <sys/times.h>
#include <stdio.h>
const int threadsPerBlock = 16;
// Funcion que calcula la suma de vecindades en cpu
void suma2d_cpu(float *A, float *B, int N, int V){
for(int posX = 0; posX < N; posX++){
for(int posY = 0; posY < N; posY++){
for(int i = (posX-V); i<=(posX+V); i++){
for(int j = (posY-V); j<=(posY+V); j++){
if(!(i < 0 || i >= N || j < 0 || j >= N)){
B[posX*N+posY] = B[posX*N+posY] + A[i*N+j];
}
}
}
}
}
}
// Memoria compartida
__global__ void suma2dshm(float *A, float *B, int N, int V)
{
// Se define la variable compartida temp, con el tamao del bloque
__shared__ float temp[threadsPerBlock*threadsPerBlock];
// Se obtiene la pos x de la hebra
int posX = blockDim.x*blockIdx.x + threadIdx.x;
// Se obtiene la pos y de la hebra
int posY = blockDim.y*blockIdx.y + threadIdx.y;
for(int i = (posX-V); i<=(posX+V); i++){
for(int j = (posY-V); j<=(posY+V); j++){
if(!(i < 0 || i >= N || j < 0 || j >= N)){
temp[threadIdx.x*threadsPerBlock+threadIdx.y] = temp[threadIdx.x*threadsPerBlock+threadIdx.y] + A[i*N+j];
}
}
}
B[posX*N+posY] = temp[threadIdx.x*threadsPerBlock+threadIdx.y];
}
int main(int argc, char **argv){
int N, V, Bs, aux;;
while ((aux = getopt (argc, argv, ":N:B:V:")) != -1){
switch (aux){
case 'N':
N = atoi(optarg);
break;
case 'V':
V = atoi(optarg);
break;
case '?':
fprintf(stderr, "ERROR, Opcion invalida: -%c\n", optopt);
return EXIT_FAILURE;
case ':':
fprintf(stderr, "ERROR, Falta el argumento de la opcion: -%c\n", optopt);
return EXIT_FAILURE;
}
}
Bs = threadsPerBlock;
int size = N*N*sizeof(float);
srand(time(NULL));
//MEMORIA HOST
float *A = (float *)malloc(size);
float *B = (float *)malloc(size);
// SE INICIALIZA
for(int i = 0; i<N; i++){
for(int j = 0; j<N; j++){
A[i*N+j] = (float)rand()/RAND_MAX;
B[i*N+j] = 0;
}
}
//COPIA DESDE MEMORIA A DEVICE
float *d_a;
float *d_b;
hipMalloc((void **) &d_a, size);
hipMalloc((void **) &d_b, size);
hipError_t err = hipMemcpy(d_a, A, size, hipMemcpyHostToDevice);
if(err!=hipSuccess) {
printf("Error al copiar hacia device arreglo A: %s\n", hipGetErrorString(err));
}
err = hipMemcpy(d_b, B, size, hipMemcpyHostToDevice);
if(err!=hipSuccess) {
printf("Error al copiar hacia device arreglo B: %s\n", hipGetErrorString(err));
}
//TAMAO BLOQUES
dim3 gridSize = dim3(N/Bs, N/Bs);
dim3 blockSize = dim3(Bs, Bs);
//LLAMADO A KERNEL
// Nota: Se comenta la medicion del tiempo del programa
//hipEvent_t start, stop;
//float elapsedTime = 0;
//hipEventCreate(&start);
//hipEventCreate(&stop);
//hipEventRecord(start, 0);
hipLaunchKernelGGL(( suma2dshm), dim3(gridSize), dim3(blockSize), 0, 0, d_a, d_b, N, V);
//hipEventRecord(stop, 0);
//hipEventSynchronize(stop);
//hipEventElapsedTime(&elapsedTime, start, stop);
//printf("El tiempo de ejecucion en GPU fue de: %f segundos\n", elapsedTime/1000);
//COPIA RESULTADO DESDE DEVICE A HOST
err = hipMemcpy(B, d_b, size, hipMemcpyDeviceToHost);
if(err!=hipSuccess) {
printf("Error al copiar hacia host arreglo B: %s\n", hipGetErrorString(err));
}
printf("Resultado CUDA:\n");
for(int i = 0; i<N; i++){
for(int j = 0; j<N; j++){
printf("%f ", B[i*N+j]);
}
printf("\n");
}
hipFree(d_a);
hipFree(d_b);
///////
//CPU//
///////
float *B_GPU = (float *)malloc(size);
for(int i = 0; i<N; i++){
for(int j = 0; j<N; j++){
B_GPU[i*N+j] = 0;
}
}
// Nota: Se comenta la medicion del tiempo del programa
// clock_t t;
// t = clock();
suma2d_cpu(A, B_GPU, N,V);
//t = clock() - t;
//double time_taken = ((double)t)/CLOCKS_PER_SEC; // in seconds
//printf("El tiempo de ejecucion en CPU fue de: %f segundos\n", time_taken);
printf("Resultado CPU:\n");
for(int i = 0; i<N; i++){
for(int j = 0; j<N; j++){
printf("%f ", B_GPU[i*N+j]);
}
printf("\n");
}
free(A);
free(B);
free(B_GPU);
return 0;
}
| 2e6f36c90c451b203f107b4fed2d3dc27e624303.cu | #include <stdlib.h>
#include <stdio.h>
#include <ctype.h>
#include <unistd.h>
#include <omp.h>
#include <sys/times.h>
#include <time.h>
#include <sys/times.h>
#include <stdio.h>
const int threadsPerBlock = 16;
// Funcion que calcula la suma de vecindades en cpu
void suma2d_cpu(float *A, float *B, int N, int V){
for(int posX = 0; posX < N; posX++){
for(int posY = 0; posY < N; posY++){
for(int i = (posX-V); i<=(posX+V); i++){
for(int j = (posY-V); j<=(posY+V); j++){
if(!(i < 0 || i >= N || j < 0 || j >= N)){
B[posX*N+posY] = B[posX*N+posY] + A[i*N+j];
}
}
}
}
}
}
// Memoria compartida
__global__ void suma2dshm(float *A, float *B, int N, int V)
{
// Se define la variable compartida temp, con el tamaño del bloque
__shared__ float temp[threadsPerBlock*threadsPerBlock];
// Se obtiene la pos x de la hebra
int posX = blockDim.x*blockIdx.x + threadIdx.x;
// Se obtiene la pos y de la hebra
int posY = blockDim.y*blockIdx.y + threadIdx.y;
for(int i = (posX-V); i<=(posX+V); i++){
for(int j = (posY-V); j<=(posY+V); j++){
if(!(i < 0 || i >= N || j < 0 || j >= N)){
temp[threadIdx.x*threadsPerBlock+threadIdx.y] = temp[threadIdx.x*threadsPerBlock+threadIdx.y] + A[i*N+j];
}
}
}
B[posX*N+posY] = temp[threadIdx.x*threadsPerBlock+threadIdx.y];
}
int main(int argc, char **argv){
int N, V, Bs, aux;;
while ((aux = getopt (argc, argv, ":N:B:V:")) != -1){
switch (aux){
case 'N':
N = atoi(optarg);
break;
case 'V':
V = atoi(optarg);
break;
case '?':
fprintf(stderr, "ERROR, Opcion invalida: -%c\n", optopt);
return EXIT_FAILURE;
case ':':
fprintf(stderr, "ERROR, Falta el argumento de la opcion: -%c\n", optopt);
return EXIT_FAILURE;
}
}
Bs = threadsPerBlock;
int size = N*N*sizeof(float);
srand(time(NULL));
//MEMORIA HOST
float *A = (float *)malloc(size);
float *B = (float *)malloc(size);
// SE INICIALIZA
for(int i = 0; i<N; i++){
for(int j = 0; j<N; j++){
A[i*N+j] = (float)rand()/RAND_MAX;
B[i*N+j] = 0;
}
}
//COPIA DESDE MEMORIA A DEVICE
float *d_a;
float *d_b;
cudaMalloc((void **) &d_a, size);
cudaMalloc((void **) &d_b, size);
cudaError err = cudaMemcpy(d_a, A, size, cudaMemcpyHostToDevice);
if(err!=cudaSuccess) {
printf("Error al copiar hacia device arreglo A: %s\n", cudaGetErrorString(err));
}
err = cudaMemcpy(d_b, B, size, cudaMemcpyHostToDevice);
if(err!=cudaSuccess) {
printf("Error al copiar hacia device arreglo B: %s\n", cudaGetErrorString(err));
}
//TAMAÑO BLOQUES
dim3 gridSize = dim3(N/Bs, N/Bs);
dim3 blockSize = dim3(Bs, Bs);
//LLAMADO A KERNEL
// Nota: Se comenta la medicion del tiempo del programa
//cudaEvent_t start, stop;
//float elapsedTime = 0;
//cudaEventCreate(&start);
//cudaEventCreate(&stop);
//cudaEventRecord(start, 0);
suma2dshm<<<gridSize, blockSize>>>(d_a, d_b, N, V);
//cudaEventRecord(stop, 0);
//cudaEventSynchronize(stop);
//cudaEventElapsedTime(&elapsedTime, start, stop);
//printf("El tiempo de ejecucion en GPU fue de: %f segundos\n", elapsedTime/1000);
//COPIA RESULTADO DESDE DEVICE A HOST
err = cudaMemcpy(B, d_b, size, cudaMemcpyDeviceToHost);
if(err!=cudaSuccess) {
printf("Error al copiar hacia host arreglo B: %s\n", cudaGetErrorString(err));
}
printf("Resultado CUDA:\n");
for(int i = 0; i<N; i++){
for(int j = 0; j<N; j++){
printf("%f ", B[i*N+j]);
}
printf("\n");
}
cudaFree(d_a);
cudaFree(d_b);
///////
//CPU//
///////
float *B_GPU = (float *)malloc(size);
for(int i = 0; i<N; i++){
for(int j = 0; j<N; j++){
B_GPU[i*N+j] = 0;
}
}
// Nota: Se comenta la medicion del tiempo del programa
// clock_t t;
// t = clock();
suma2d_cpu(A, B_GPU, N,V);
//t = clock() - t;
//double time_taken = ((double)t)/CLOCKS_PER_SEC; // in seconds
//printf("El tiempo de ejecucion en CPU fue de: %f segundos\n", time_taken);
printf("Resultado CPU:\n");
for(int i = 0; i<N; i++){
for(int j = 0; j<N; j++){
printf("%f ", B_GPU[i*N+j]);
}
printf("\n");
}
free(A);
free(B);
free(B_GPU);
return 0;
}
|
d075b356cb65d6ec665dc4465aa8072845e14d2d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <gauge_field_order.h>
namespace quda {
/**
Kernel argument struct
*/
template <typename OutOrder, typename InOrder>
struct CopyGaugeArg {
OutOrder out;
const InOrder in;
int volume;
int faceVolumeCB[QUDA_MAX_DIM];
int nDim;
int geometry;
CopyGaugeArg(const OutOrder &out, const InOrder &in, int volume,
const int *faceVolumeCB, int nDim, int geometry)
: out(out), in(in), volume(volume), nDim(nDim), geometry(geometry) {
for (int d=0; d<nDim; d++) this->faceVolumeCB[d] = faceVolumeCB[d];
}
};
/**
Generic CPU gauge reordering and packing
*/
template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder>
void copyGauge(CopyGaugeArg<OutOrder,InOrder> arg) {
typedef typename mapper<FloatIn>::type RegTypeIn;
typedef typename mapper<FloatOut>::type RegTypeOut;
for (int parity=0; parity<2; parity++) {
for (int d=0; d<arg.geometry; d++) {
for (int x=0; x<arg.volume/2; x++) {
RegTypeIn in[length];
RegTypeOut out[length];
arg.in.load(in, x, d, parity);
for (int i=0; i<length; i++) out[i] = in[i];
arg.out.save(out, x, d, parity);
}
}
}
}
/**
Check whether the field contains Nans
*/
template <typename Float, int length, typename Arg>
void checkNan(Arg arg) {
typedef typename mapper<Float>::type RegType;
for (int parity=0; parity<2; parity++) {
for (int d=0; d<arg.geometry; d++) {
for (int x=0; x<arg.volume/2; x++) {
RegType u[length];
arg.in.load(u, x, d, parity);
for (int i=0; i<length; i++)
if (isnan(u[i]))
errorQuda("Nan detected at parity=%d, dir=%d, x=%d, i=%d", parity, d, x, i);
}
}
}
}
/**
Generic CUDA gauge reordering and packing. Adopts a similar form as
the CPU version, using the same inlined functions.
*/
template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder>
__global__ void copyGaugeKernel(CopyGaugeArg<OutOrder,InOrder> arg) {
typedef typename mapper<FloatIn>::type RegTypeIn;
typedef typename mapper<FloatOut>::type RegTypeOut;
for (int parity=0; parity<2; parity++) {
for (int d=0; d<arg.geometry; d++) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x >= arg.volume/2) return;
RegTypeIn in[length];
RegTypeOut out[length];
arg.in.load(in, x, d, parity);
for (int i=0; i<length; i++) out[i] = in[i];
arg.out.save(out, x, d, parity);
}
}
}
/**
Generic CPU gauge ghost reordering and packing
*/
template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder>
void copyGhost(CopyGaugeArg<OutOrder,InOrder> arg) {
typedef typename mapper<FloatIn>::type RegTypeIn;
typedef typename mapper<FloatOut>::type RegTypeOut;
for (int parity=0; parity<2; parity++) {
for (int d=0; d<arg.nDim; d++) {
for (int x=0; x<arg.faceVolumeCB[d]; x++) {
RegTypeIn in[length];
RegTypeOut out[length];
arg.in.loadGhost(in, x, d, parity); // assumes we are loading
for (int i=0; i<length; i++) out[i] = in[i];
arg.out.saveGhost(out, x, d, parity);
}
}
}
}
/**
Generic CUDA kernel for copying the ghost zone. Adopts a similar form as
the CPU version, using the same inlined functions.
*/
template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder>
__global__ void copyGhostKernel(CopyGaugeArg<OutOrder,InOrder> arg) {
typedef typename mapper<FloatIn>::type RegTypeIn;
typedef typename mapper<FloatOut>::type RegTypeOut;
int x = blockIdx.x * blockDim.x + threadIdx.x;
for (int parity=0; parity<2; parity++) {
for (int d=0; d<arg.nDim; d++) {
if (x < arg.faceVolumeCB[d]) {
RegTypeIn in[length];
RegTypeOut out[length];
arg.in.loadGhost(in, x, d, parity); // assumes we are loading
for (int i=0; i<length; i++) out[i] = in[i];
arg.out.saveGhost(out, x, d, parity);
}
}
}
}
template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder, bool isGhost>
class CopyGauge : Tunable {
CopyGaugeArg<OutOrder,InOrder> arg;
int size;
const GaugeField &meta;
private:
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0 ;}
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return size; }
public:
CopyGauge(CopyGaugeArg<OutOrder,InOrder> &arg, const GaugeField &meta) : arg(arg), meta(meta) {
int faceMax = 0;
for (int d=0; d<arg.nDim; d++) {
faceMax = (arg.faceVolumeCB[d] > faceMax ) ? arg.faceVolumeCB[d] : faceMax;
}
size = isGhost ? faceMax : arg.volume/2;
writeAuxString("out_stride=%d,in_stride=%d", arg.out.stride, arg.in.stride);
}
virtual ~CopyGauge() { ; }
void apply(const hipStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
if (!isGhost) {
hipLaunchKernelGGL(( copyGaugeKernel<FloatOut, FloatIn, length, OutOrder, InOrder>)
, dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, arg);
} else {
hipLaunchKernelGGL(( copyGhostKernel<FloatOut, FloatIn, length, OutOrder, InOrder>)
, dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, arg);
}
}
TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); }
std::string paramString(const TuneParam ¶m) const { // Don't bother printing the grid dim.
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << "), ";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
long long flops() const { return 0; }
long long bytes() const {
int sites = 4*arg.volume/2;
if (isGhost) {
sites = 0;
for (int d=0; d<4; d++) sites += arg.faceVolumeCB[d];
}
return 2 * sites * ( arg.in.Bytes() + arg.in.hasPhase*sizeof(FloatIn)
+ arg.out.Bytes() + arg.out.hasPhase*sizeof(FloatOut) );
}
};
template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder>
void copyGauge(OutOrder outOrder, const InOrder inOrder, int volume, const int *faceVolumeCB,
int nDim, int geometry, const GaugeField &out, QudaFieldLocation location, int type) {
CopyGaugeArg<OutOrder,InOrder> arg(outOrder, inOrder, volume, faceVolumeCB, nDim, geometry);
if (location == QUDA_CPU_FIELD_LOCATION) {
#ifdef HOST_DEBUG
checkNan<FloatIn, length>(arg);
#endif
if (type == 0 || type == 2) {
copyGauge<FloatOut, FloatIn, length>(arg);
}
#ifdef MULTI_GPU // only copy the ghost zone if doing multi-gpu
if (type == 0 || type == 1) {
if (geometry == QUDA_VECTOR_GEOMETRY) copyGhost<FloatOut, FloatIn, length>(arg);
//else warningQuda("Cannot copy for %d geometry gauge field", geometry);
}
#endif
} else if (location == QUDA_CUDA_FIELD_LOCATION) {
// first copy body
if (type == 0 || type == 2) {
CopyGauge<FloatOut, FloatIn, length, OutOrder, InOrder, 0> gaugeCopier(arg, out);
gaugeCopier.apply(0);
}
#ifdef MULTI_GPU
if (type == 0 || type == 1) {
if (geometry == QUDA_VECTOR_GEOMETRY) {
// now copy ghost
CopyGauge<FloatOut, FloatIn, length, OutOrder, InOrder, 1> ghostCopier(arg, out);
ghostCopier.apply(0);
} else {
//warningQuda("Cannot copy for %d geometry gauge field", geometry);
}
}
#endif
} else {
errorQuda("Undefined field location %d for copyGauge", location);
}
}
template <typename FloatOut, typename FloatIn, int length, typename InOrder>
void copyGauge(const InOrder &inOrder, GaugeField &out, QudaFieldLocation location,
FloatOut *Out, FloatOut **outGhost, int type) {
int faceVolumeCB[QUDA_MAX_DIM];
for (int i=0; i<4; i++) faceVolumeCB[i] = out.SurfaceCB(i) * out.Nface();
if (out.isNative()) {
if (out.Reconstruct() == QUDA_RECONSTRUCT_NO) {
if (typeid(FloatOut)==typeid(short) && out.LinkType() == QUDA_ASQTAD_FAT_LINKS) {
copyGauge<short,FloatIn,length>
(FloatNOrder<short,length,2,19>(out, (short*)Out, (short**)outGhost), inOrder,
out.Volume(), faceVolumeCB, out.Ndim(), out.Geometry(), out, location, type);
} else {
typedef typename gauge_mapper<FloatOut,QUDA_RECONSTRUCT_NO>::type G;
copyGauge<FloatOut,FloatIn,length>
(G(out,Out,outGhost), inOrder, out.Volume(), faceVolumeCB,
out.Ndim(), out.Geometry(), out, location, type);
}
} else if (out.Reconstruct() == QUDA_RECONSTRUCT_12) {
typedef typename gauge_mapper<FloatOut,QUDA_RECONSTRUCT_12>::type G;
copyGauge<FloatOut,FloatIn,length>
(G(out,Out,outGhost), inOrder, out.Volume(), faceVolumeCB,
out.Ndim(), out.Geometry(), out, location, type);
} else if (out.Reconstruct() == QUDA_RECONSTRUCT_8) {
typedef typename gauge_mapper<FloatOut,QUDA_RECONSTRUCT_8>::type G;
copyGauge<FloatOut,FloatIn,length>
(G(out,Out,outGhost), inOrder, out.Volume(), faceVolumeCB,
out.Ndim(), out.Geometry(), out, location, type);
#ifdef GPU_STAGGERED_DIRAC
} else if (out.Reconstruct() == QUDA_RECONSTRUCT_13) {
typedef typename gauge_mapper<FloatOut,QUDA_RECONSTRUCT_13>::type G;
copyGauge<FloatOut,FloatIn,length>
(G(out, Out, outGhost), inOrder, out.Volume(), faceVolumeCB,
out.Ndim(), out.Geometry(), out, location, type);
} else if (out.Reconstruct() == QUDA_RECONSTRUCT_9) {
typedef typename gauge_mapper<FloatOut,QUDA_RECONSTRUCT_9>::type G;
copyGauge<FloatOut,FloatIn,length>
(G(out, Out, outGhost), inOrder, out.Volume(), faceVolumeCB,
out.Ndim(), out.Geometry(), out, location, type);
#endif
} else {
errorQuda("Reconstruction %d and order %d not supported", out.Reconstruct(), out.Order());
}
} else if (out.Order() == QUDA_QDP_GAUGE_ORDER) {
#ifdef BUILD_QDP_INTERFACE
copyGauge<FloatOut,FloatIn,length>
(QDPOrder<FloatOut,length>(out, Out, outGhost), inOrder, out.Volume(),
faceVolumeCB, out.Ndim(), out.Geometry(), out, location, type);
#else
errorQuda("QDP interface has not been built\n");
#endif
} else if (out.Order() == QUDA_QDPJIT_GAUGE_ORDER) {
#ifdef BUILD_QDPJIT_INTERFACE
copyGauge<FloatOut,FloatIn,length>
(QDPJITOrder<FloatOut,length>(out, Out, outGhost), inOrder, out.Volume(),
faceVolumeCB, out.Ndim(), out.Geometry(), out, location, type);
#else
errorQuda("QDPJIT interface has not been built\n");
#endif
} else if (out.Order() == QUDA_CPS_WILSON_GAUGE_ORDER) {
#ifdef BUILD_CPS_INTERFACE
copyGauge<FloatOut,FloatIn,length>
(CPSOrder<FloatOut,length>(out, Out, outGhost), inOrder, out.Volume(),
faceVolumeCB, out.Ndim(), out.Geometry(), out, location, type);
#else
errorQuda("CPS interface has not been built\n");
#endif
} else if (out.Order() == QUDA_MILC_GAUGE_ORDER) {
#ifdef BUILD_MILC_INTERFACE
copyGauge<FloatOut,FloatIn,length>
(MILCOrder<FloatOut,length>(out, Out, outGhost), inOrder, out.Volume(),
faceVolumeCB, out.Ndim(), out.Geometry(), out, location, type);
#else
errorQuda("MILC interface has not been built\n");
#endif
} else if (out.Order() == QUDA_BQCD_GAUGE_ORDER) {
#ifdef BUILD_BQCD_INTERFACE
copyGauge<FloatOut,FloatIn,length>
(BQCDOrder<FloatOut,length>(out, Out, outGhost), inOrder, out.Volume(),
faceVolumeCB, out.Ndim(), out.Geometry(), out, location, type);
#else
errorQuda("BQCD interface has not been built\n");
#endif
} else if (out.Order() == QUDA_TIFR_GAUGE_ORDER) {
#ifdef BUILD_TIFR_INTERFACE
copyGauge<FloatOut,FloatIn,length>
(TIFROrder<FloatOut,length>(out, Out, outGhost), inOrder, out.Volume(),
faceVolumeCB, out.Ndim(), out.Geometry(), out, location, type);
#else
errorQuda("TIFR interface has not been built\n");
#endif
} else {
errorQuda("Gauge field %d order not supported", out.Order());
}
}
template <typename FloatOut, typename FloatIn, int length>
void copyGauge(GaugeField &out, const GaugeField &in, QudaFieldLocation location,
FloatOut *Out, FloatIn *In, FloatOut **outGhost, FloatIn **inGhost, int type) {
// reconstruction only supported on FloatN fields currently
if (in.isNative()) {
if (in.Reconstruct() == QUDA_RECONSTRUCT_NO) {
if (typeid(FloatIn)==typeid(short) && in.LinkType() == QUDA_ASQTAD_FAT_LINKS) {
copyGauge<FloatOut,short,length> (FloatNOrder<short,length,2,19>
(in,(short*)In,(short**)inGhost),
out, location, Out, outGhost, type);
} else {
typedef typename gauge_mapper<FloatIn,QUDA_RECONSTRUCT_NO>::type G;
copyGauge<FloatOut,FloatIn,length> (G(in,In,inGhost), out, location, Out, outGhost, type);
}
} else if (in.Reconstruct() == QUDA_RECONSTRUCT_12) {
typedef typename gauge_mapper<FloatIn,QUDA_RECONSTRUCT_12>::type G;
copyGauge<FloatOut,FloatIn,length> (G(in,In,inGhost), out, location, Out, outGhost, type);
} else if (in.Reconstruct() == QUDA_RECONSTRUCT_8) {
typedef typename gauge_mapper<FloatIn,QUDA_RECONSTRUCT_8>::type G;
copyGauge<FloatOut,FloatIn,length> (G(in,In,inGhost), out, location, Out, outGhost, type);
#ifdef GPU_STAGGERED_DIRAC
} else if (in.Reconstruct() == QUDA_RECONSTRUCT_13) {
typedef typename gauge_mapper<FloatIn,QUDA_RECONSTRUCT_13>::type G;
copyGauge<FloatOut,FloatIn,length> (G(in,In,inGhost), out, location, Out, outGhost, type);
} else if (in.Reconstruct() == QUDA_RECONSTRUCT_9) {
typedef typename gauge_mapper<FloatIn,QUDA_RECONSTRUCT_9>::type G;
copyGauge<FloatOut,FloatIn,length> (G(in,In,inGhost), out, location, Out, outGhost, type);
#endif
} else {
errorQuda("Reconstruction %d and order %d not supported", in.Reconstruct(), in.Order());
}
} else if (in.Order() == QUDA_QDP_GAUGE_ORDER) {
#ifdef BUILD_QDP_INTERFACE
copyGauge<FloatOut,FloatIn,length>(QDPOrder<FloatIn,length>(in, In, inGhost),
out, location, Out, outGhost, type);
#else
errorQuda("QDP interface has not been built\n");
#endif
} else if (in.Order() == QUDA_QDPJIT_GAUGE_ORDER) {
#ifdef BUILD_QDPJIT_INTERFACE
copyGauge<FloatOut,FloatIn,length>(QDPJITOrder<FloatIn,length>(in, In, inGhost),
out, location, Out, outGhost, type);
#else
errorQuda("QDPJIT interface has not been built\n");
#endif
} else if (in.Order() == QUDA_CPS_WILSON_GAUGE_ORDER) {
#ifdef BUILD_CPS_INTERFACE
copyGauge<FloatOut,FloatIn,length>(CPSOrder<FloatIn,length>(in, In, inGhost),
out, location, Out, outGhost, type);
#else
errorQuda("CPS interface has not been built\n");
#endif
} else if (in.Order() == QUDA_MILC_GAUGE_ORDER) {
#ifdef BUILD_MILC_INTERFACE
copyGauge<FloatOut,FloatIn,length>(MILCOrder<FloatIn,length>(in, In, inGhost),
out, location, Out, outGhost, type);
#else
errorQuda("MILC interface has not been built\n");
#endif
} else if (in.Order() == QUDA_BQCD_GAUGE_ORDER) {
#ifdef BUILD_BQCD_INTERFACE
copyGauge<FloatOut,FloatIn,length>(BQCDOrder<FloatIn,length>(in, In, inGhost),
out, location, Out, outGhost, type);
#else
errorQuda("BQCD interface has not been built\n");
#endif
} else if (in.Order() == QUDA_TIFR_GAUGE_ORDER) {
#ifdef BUILD_TIFR_INTERFACE
copyGauge<FloatOut,FloatIn,length>(TIFROrder<FloatIn,length>(in, In, inGhost),
out, location, Out, outGhost, type);
#else
errorQuda("TIFR interface has not been built\n");
#endif
} else {
errorQuda("Gauge field %d order not supported", in.Order());
}
}
void checkMomOrder(const GaugeField &u);
template <typename FloatOut, typename FloatIn>
void copyGauge(GaugeField &out, const GaugeField &in, QudaFieldLocation location, FloatOut *Out,
FloatIn *In, FloatOut **outGhost, FloatIn **inGhost, int type) {
if (in.Ncolor() != 3 && out.Ncolor() != 3) {
errorQuda("Unsupported number of colors; out.Nc=%d, in.Nc=%d", out.Ncolor(), in.Ncolor());
}
if (out.Geometry() != in.Geometry()) {
errorQuda("Field geometries %d %d do not match", out.Geometry(), in.Geometry());
}
if (in.LinkType() != QUDA_ASQTAD_MOM_LINKS && out.LinkType() != QUDA_ASQTAD_MOM_LINKS) {
// we are doing gauge field packing
copyGauge<FloatOut,FloatIn,18>(out, in, location, Out, In, outGhost, inGhost, type);
} else {
if (location != QUDA_CPU_FIELD_LOCATION) errorQuda("Location %d not supported", location);
if (out.Geometry() != QUDA_VECTOR_GEOMETRY) errorQuda("Unsupported geometry %d", out.Geometry());
checkMomOrder(in);
checkMomOrder(out);
int faceVolumeCB[QUDA_MAX_DIM];
for (int d=0; d<in.Ndim(); d++) faceVolumeCB[d] = in.SurfaceCB(d) * in.Nface();
// momentum only currently supported on MILC (10), TIFR (18) and Float2 (10) fields currently
if (out.Order() == QUDA_FLOAT2_GAUGE_ORDER) {
if (in.Order() == QUDA_FLOAT2_GAUGE_ORDER) {
CopyGaugeArg<FloatNOrder<FloatOut,10,2,10>, FloatNOrder<FloatIn,10,2,10> >
arg(FloatNOrder<FloatOut,10,2,10>(out, Out),
FloatNOrder<FloatIn,10,2,10>(in, In), in.Volume(), faceVolumeCB, in.Ndim(), in.Geometry());
copyGauge<FloatOut,FloatIn,10>(arg);
} else if (in.Order() == QUDA_MILC_GAUGE_ORDER) {
#ifdef BUILD_MILC_INTERFACE
CopyGaugeArg<FloatNOrder<FloatOut,10,2,10>, MILCOrder<FloatIn,10> >
arg(FloatNOrder<FloatOut,10,2,10>(out, Out), MILCOrder<FloatIn,10>(in, In),
in.Volume(), faceVolumeCB, in.Ndim(), in.Geometry());
copyGauge<FloatOut,FloatIn,10>(arg);
#else
errorQuda("MILC interface has not been built\n");
#endif
} else if (in.Order() == QUDA_TIFR_GAUGE_ORDER) {
#ifdef BUILD_TIFR_INTERFACE
CopyGaugeArg<FloatNOrder<FloatOut,18,2,11>, TIFROrder<FloatIn,18> >
arg(FloatNOrder<FloatOut,18,2,11>(out, Out), TIFROrder<FloatIn,18>(in, In),
in.Volume(), faceVolumeCB, in.Ndim(), in.Geometry());
copyGauge<FloatOut,FloatIn,18>(arg);
#else
errorQuda("TIFR interface has not been built\n");
#endif
} else {
errorQuda("Gauge field orders %d not supported", in.Order());
}
} else if (out.Order() == QUDA_MILC_GAUGE_ORDER) {
#ifdef BUILD_MILC_INTERFACE
if (in.Order() == QUDA_FLOAT2_GAUGE_ORDER) {
CopyGaugeArg<MILCOrder<FloatOut,10>, FloatNOrder<FloatIn,10,2,10> >
arg(MILCOrder<FloatOut,10>(out, Out), FloatNOrder<FloatIn,10,2,10>(in, In),
in.Volume(), faceVolumeCB, in.Ndim(), in.Geometry());
copyGauge<FloatOut,FloatIn,10>(arg);
} else if (in.Order() == QUDA_MILC_GAUGE_ORDER) {
CopyGaugeArg<MILCOrder<FloatOut,10>, MILCOrder<FloatIn,10> >
arg(MILCOrder<FloatOut,10>(out, Out), MILCOrder<FloatIn,10>(in, In),
in.Volume(), faceVolumeCB, in.Ndim(), in.Geometry());
copyGauge<FloatOut,FloatIn,10>(arg);
} else {
errorQuda("Gauge field orders %d not supported", in.Order());
}
#else
errorQuda("MILC interface has not been built\n");
#endif
} else if (out.Order() == QUDA_TIFR_GAUGE_ORDER) {
#ifdef BUILD_TIFR_INTERFACE
if (in.Order() == QUDA_FLOAT2_GAUGE_ORDER) {
// FIX ME - 11 is a misnomer to avoid confusion in template instantiation
CopyGaugeArg<TIFROrder<FloatOut,18>, FloatNOrder<FloatIn,18,2,11> >
arg(TIFROrder<FloatOut,18>(out, Out), FloatNOrder<FloatIn,18,2,11>(in, In),
in.Volume(), faceVolumeCB, in.Ndim(), in.Geometry());
copyGauge<FloatOut,FloatIn,18>(arg);
} else if (in.Order() == QUDA_TIFR_GAUGE_ORDER) {
CopyGaugeArg<TIFROrder<FloatOut,18>, TIFROrder<FloatIn,18> >
arg(TIFROrder<FloatOut,18>(out, Out), TIFROrder<FloatIn,18>(in, In),
in.Volume(), faceVolumeCB, in.Ndim(), in.Geometry());
copyGauge<FloatOut,FloatIn,10>(arg);
} else {
errorQuda("Gauge field orders %d not supported", in.Order());
}
#else
errorQuda("TIFR interface has not been built\n");
#endif
} else {
errorQuda("Gauge field orders %d not supported", out.Order());
}
}
}
} // namespace quda
| d075b356cb65d6ec665dc4465aa8072845e14d2d.cu | #include <gauge_field_order.h>
namespace quda {
/**
Kernel argument struct
*/
template <typename OutOrder, typename InOrder>
struct CopyGaugeArg {
OutOrder out;
const InOrder in;
int volume;
int faceVolumeCB[QUDA_MAX_DIM];
int nDim;
int geometry;
CopyGaugeArg(const OutOrder &out, const InOrder &in, int volume,
const int *faceVolumeCB, int nDim, int geometry)
: out(out), in(in), volume(volume), nDim(nDim), geometry(geometry) {
for (int d=0; d<nDim; d++) this->faceVolumeCB[d] = faceVolumeCB[d];
}
};
/**
Generic CPU gauge reordering and packing
*/
template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder>
void copyGauge(CopyGaugeArg<OutOrder,InOrder> arg) {
typedef typename mapper<FloatIn>::type RegTypeIn;
typedef typename mapper<FloatOut>::type RegTypeOut;
for (int parity=0; parity<2; parity++) {
for (int d=0; d<arg.geometry; d++) {
for (int x=0; x<arg.volume/2; x++) {
RegTypeIn in[length];
RegTypeOut out[length];
arg.in.load(in, x, d, parity);
for (int i=0; i<length; i++) out[i] = in[i];
arg.out.save(out, x, d, parity);
}
}
}
}
/**
Check whether the field contains Nans
*/
template <typename Float, int length, typename Arg>
void checkNan(Arg arg) {
typedef typename mapper<Float>::type RegType;
for (int parity=0; parity<2; parity++) {
for (int d=0; d<arg.geometry; d++) {
for (int x=0; x<arg.volume/2; x++) {
RegType u[length];
arg.in.load(u, x, d, parity);
for (int i=0; i<length; i++)
if (isnan(u[i]))
errorQuda("Nan detected at parity=%d, dir=%d, x=%d, i=%d", parity, d, x, i);
}
}
}
}
/**
Generic CUDA gauge reordering and packing. Adopts a similar form as
the CPU version, using the same inlined functions.
*/
template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder>
__global__ void copyGaugeKernel(CopyGaugeArg<OutOrder,InOrder> arg) {
typedef typename mapper<FloatIn>::type RegTypeIn;
typedef typename mapper<FloatOut>::type RegTypeOut;
for (int parity=0; parity<2; parity++) {
for (int d=0; d<arg.geometry; d++) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x >= arg.volume/2) return;
RegTypeIn in[length];
RegTypeOut out[length];
arg.in.load(in, x, d, parity);
for (int i=0; i<length; i++) out[i] = in[i];
arg.out.save(out, x, d, parity);
}
}
}
/**
Generic CPU gauge ghost reordering and packing
*/
template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder>
void copyGhost(CopyGaugeArg<OutOrder,InOrder> arg) {
typedef typename mapper<FloatIn>::type RegTypeIn;
typedef typename mapper<FloatOut>::type RegTypeOut;
for (int parity=0; parity<2; parity++) {
for (int d=0; d<arg.nDim; d++) {
for (int x=0; x<arg.faceVolumeCB[d]; x++) {
RegTypeIn in[length];
RegTypeOut out[length];
arg.in.loadGhost(in, x, d, parity); // assumes we are loading
for (int i=0; i<length; i++) out[i] = in[i];
arg.out.saveGhost(out, x, d, parity);
}
}
}
}
/**
Generic CUDA kernel for copying the ghost zone. Adopts a similar form as
the CPU version, using the same inlined functions.
*/
template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder>
__global__ void copyGhostKernel(CopyGaugeArg<OutOrder,InOrder> arg) {
typedef typename mapper<FloatIn>::type RegTypeIn;
typedef typename mapper<FloatOut>::type RegTypeOut;
int x = blockIdx.x * blockDim.x + threadIdx.x;
for (int parity=0; parity<2; parity++) {
for (int d=0; d<arg.nDim; d++) {
if (x < arg.faceVolumeCB[d]) {
RegTypeIn in[length];
RegTypeOut out[length];
arg.in.loadGhost(in, x, d, parity); // assumes we are loading
for (int i=0; i<length; i++) out[i] = in[i];
arg.out.saveGhost(out, x, d, parity);
}
}
}
}
template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder, bool isGhost>
class CopyGauge : Tunable {
CopyGaugeArg<OutOrder,InOrder> arg;
int size;
const GaugeField &meta;
private:
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0 ;}
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return size; }
public:
CopyGauge(CopyGaugeArg<OutOrder,InOrder> &arg, const GaugeField &meta) : arg(arg), meta(meta) {
int faceMax = 0;
for (int d=0; d<arg.nDim; d++) {
faceMax = (arg.faceVolumeCB[d] > faceMax ) ? arg.faceVolumeCB[d] : faceMax;
}
size = isGhost ? faceMax : arg.volume/2;
writeAuxString("out_stride=%d,in_stride=%d", arg.out.stride, arg.in.stride);
}
virtual ~CopyGauge() { ; }
void apply(const cudaStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
if (!isGhost) {
copyGaugeKernel<FloatOut, FloatIn, length, OutOrder, InOrder>
<<<tp.grid, tp.block, tp.shared_bytes, stream>>>(arg);
} else {
copyGhostKernel<FloatOut, FloatIn, length, OutOrder, InOrder>
<<<tp.grid, tp.block, tp.shared_bytes, stream>>>(arg);
}
}
TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); }
std::string paramString(const TuneParam ¶m) const { // Don't bother printing the grid dim.
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << "), ";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
long long flops() const { return 0; }
long long bytes() const {
int sites = 4*arg.volume/2;
if (isGhost) {
sites = 0;
for (int d=0; d<4; d++) sites += arg.faceVolumeCB[d];
}
return 2 * sites * ( arg.in.Bytes() + arg.in.hasPhase*sizeof(FloatIn)
+ arg.out.Bytes() + arg.out.hasPhase*sizeof(FloatOut) );
}
};
template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder>
void copyGauge(OutOrder outOrder, const InOrder inOrder, int volume, const int *faceVolumeCB,
int nDim, int geometry, const GaugeField &out, QudaFieldLocation location, int type) {
CopyGaugeArg<OutOrder,InOrder> arg(outOrder, inOrder, volume, faceVolumeCB, nDim, geometry);
if (location == QUDA_CPU_FIELD_LOCATION) {
#ifdef HOST_DEBUG
checkNan<FloatIn, length>(arg);
#endif
if (type == 0 || type == 2) {
copyGauge<FloatOut, FloatIn, length>(arg);
}
#ifdef MULTI_GPU // only copy the ghost zone if doing multi-gpu
if (type == 0 || type == 1) {
if (geometry == QUDA_VECTOR_GEOMETRY) copyGhost<FloatOut, FloatIn, length>(arg);
//else warningQuda("Cannot copy for %d geometry gauge field", geometry);
}
#endif
} else if (location == QUDA_CUDA_FIELD_LOCATION) {
// first copy body
if (type == 0 || type == 2) {
CopyGauge<FloatOut, FloatIn, length, OutOrder, InOrder, 0> gaugeCopier(arg, out);
gaugeCopier.apply(0);
}
#ifdef MULTI_GPU
if (type == 0 || type == 1) {
if (geometry == QUDA_VECTOR_GEOMETRY) {
// now copy ghost
CopyGauge<FloatOut, FloatIn, length, OutOrder, InOrder, 1> ghostCopier(arg, out);
ghostCopier.apply(0);
} else {
//warningQuda("Cannot copy for %d geometry gauge field", geometry);
}
}
#endif
} else {
errorQuda("Undefined field location %d for copyGauge", location);
}
}
template <typename FloatOut, typename FloatIn, int length, typename InOrder>
void copyGauge(const InOrder &inOrder, GaugeField &out, QudaFieldLocation location,
FloatOut *Out, FloatOut **outGhost, int type) {
int faceVolumeCB[QUDA_MAX_DIM];
for (int i=0; i<4; i++) faceVolumeCB[i] = out.SurfaceCB(i) * out.Nface();
if (out.isNative()) {
if (out.Reconstruct() == QUDA_RECONSTRUCT_NO) {
if (typeid(FloatOut)==typeid(short) && out.LinkType() == QUDA_ASQTAD_FAT_LINKS) {
copyGauge<short,FloatIn,length>
(FloatNOrder<short,length,2,19>(out, (short*)Out, (short**)outGhost), inOrder,
out.Volume(), faceVolumeCB, out.Ndim(), out.Geometry(), out, location, type);
} else {
typedef typename gauge_mapper<FloatOut,QUDA_RECONSTRUCT_NO>::type G;
copyGauge<FloatOut,FloatIn,length>
(G(out,Out,outGhost), inOrder, out.Volume(), faceVolumeCB,
out.Ndim(), out.Geometry(), out, location, type);
}
} else if (out.Reconstruct() == QUDA_RECONSTRUCT_12) {
typedef typename gauge_mapper<FloatOut,QUDA_RECONSTRUCT_12>::type G;
copyGauge<FloatOut,FloatIn,length>
(G(out,Out,outGhost), inOrder, out.Volume(), faceVolumeCB,
out.Ndim(), out.Geometry(), out, location, type);
} else if (out.Reconstruct() == QUDA_RECONSTRUCT_8) {
typedef typename gauge_mapper<FloatOut,QUDA_RECONSTRUCT_8>::type G;
copyGauge<FloatOut,FloatIn,length>
(G(out,Out,outGhost), inOrder, out.Volume(), faceVolumeCB,
out.Ndim(), out.Geometry(), out, location, type);
#ifdef GPU_STAGGERED_DIRAC
} else if (out.Reconstruct() == QUDA_RECONSTRUCT_13) {
typedef typename gauge_mapper<FloatOut,QUDA_RECONSTRUCT_13>::type G;
copyGauge<FloatOut,FloatIn,length>
(G(out, Out, outGhost), inOrder, out.Volume(), faceVolumeCB,
out.Ndim(), out.Geometry(), out, location, type);
} else if (out.Reconstruct() == QUDA_RECONSTRUCT_9) {
typedef typename gauge_mapper<FloatOut,QUDA_RECONSTRUCT_9>::type G;
copyGauge<FloatOut,FloatIn,length>
(G(out, Out, outGhost), inOrder, out.Volume(), faceVolumeCB,
out.Ndim(), out.Geometry(), out, location, type);
#endif
} else {
errorQuda("Reconstruction %d and order %d not supported", out.Reconstruct(), out.Order());
}
} else if (out.Order() == QUDA_QDP_GAUGE_ORDER) {
#ifdef BUILD_QDP_INTERFACE
copyGauge<FloatOut,FloatIn,length>
(QDPOrder<FloatOut,length>(out, Out, outGhost), inOrder, out.Volume(),
faceVolumeCB, out.Ndim(), out.Geometry(), out, location, type);
#else
errorQuda("QDP interface has not been built\n");
#endif
} else if (out.Order() == QUDA_QDPJIT_GAUGE_ORDER) {
#ifdef BUILD_QDPJIT_INTERFACE
copyGauge<FloatOut,FloatIn,length>
(QDPJITOrder<FloatOut,length>(out, Out, outGhost), inOrder, out.Volume(),
faceVolumeCB, out.Ndim(), out.Geometry(), out, location, type);
#else
errorQuda("QDPJIT interface has not been built\n");
#endif
} else if (out.Order() == QUDA_CPS_WILSON_GAUGE_ORDER) {
#ifdef BUILD_CPS_INTERFACE
copyGauge<FloatOut,FloatIn,length>
(CPSOrder<FloatOut,length>(out, Out, outGhost), inOrder, out.Volume(),
faceVolumeCB, out.Ndim(), out.Geometry(), out, location, type);
#else
errorQuda("CPS interface has not been built\n");
#endif
} else if (out.Order() == QUDA_MILC_GAUGE_ORDER) {
#ifdef BUILD_MILC_INTERFACE
copyGauge<FloatOut,FloatIn,length>
(MILCOrder<FloatOut,length>(out, Out, outGhost), inOrder, out.Volume(),
faceVolumeCB, out.Ndim(), out.Geometry(), out, location, type);
#else
errorQuda("MILC interface has not been built\n");
#endif
} else if (out.Order() == QUDA_BQCD_GAUGE_ORDER) {
#ifdef BUILD_BQCD_INTERFACE
copyGauge<FloatOut,FloatIn,length>
(BQCDOrder<FloatOut,length>(out, Out, outGhost), inOrder, out.Volume(),
faceVolumeCB, out.Ndim(), out.Geometry(), out, location, type);
#else
errorQuda("BQCD interface has not been built\n");
#endif
} else if (out.Order() == QUDA_TIFR_GAUGE_ORDER) {
#ifdef BUILD_TIFR_INTERFACE
copyGauge<FloatOut,FloatIn,length>
(TIFROrder<FloatOut,length>(out, Out, outGhost), inOrder, out.Volume(),
faceVolumeCB, out.Ndim(), out.Geometry(), out, location, type);
#else
errorQuda("TIFR interface has not been built\n");
#endif
} else {
errorQuda("Gauge field %d order not supported", out.Order());
}
}
template <typename FloatOut, typename FloatIn, int length>
void copyGauge(GaugeField &out, const GaugeField &in, QudaFieldLocation location,
FloatOut *Out, FloatIn *In, FloatOut **outGhost, FloatIn **inGhost, int type) {
// reconstruction only supported on FloatN fields currently
if (in.isNative()) {
if (in.Reconstruct() == QUDA_RECONSTRUCT_NO) {
if (typeid(FloatIn)==typeid(short) && in.LinkType() == QUDA_ASQTAD_FAT_LINKS) {
copyGauge<FloatOut,short,length> (FloatNOrder<short,length,2,19>
(in,(short*)In,(short**)inGhost),
out, location, Out, outGhost, type);
} else {
typedef typename gauge_mapper<FloatIn,QUDA_RECONSTRUCT_NO>::type G;
copyGauge<FloatOut,FloatIn,length> (G(in,In,inGhost), out, location, Out, outGhost, type);
}
} else if (in.Reconstruct() == QUDA_RECONSTRUCT_12) {
typedef typename gauge_mapper<FloatIn,QUDA_RECONSTRUCT_12>::type G;
copyGauge<FloatOut,FloatIn,length> (G(in,In,inGhost), out, location, Out, outGhost, type);
} else if (in.Reconstruct() == QUDA_RECONSTRUCT_8) {
typedef typename gauge_mapper<FloatIn,QUDA_RECONSTRUCT_8>::type G;
copyGauge<FloatOut,FloatIn,length> (G(in,In,inGhost), out, location, Out, outGhost, type);
#ifdef GPU_STAGGERED_DIRAC
} else if (in.Reconstruct() == QUDA_RECONSTRUCT_13) {
typedef typename gauge_mapper<FloatIn,QUDA_RECONSTRUCT_13>::type G;
copyGauge<FloatOut,FloatIn,length> (G(in,In,inGhost), out, location, Out, outGhost, type);
} else if (in.Reconstruct() == QUDA_RECONSTRUCT_9) {
typedef typename gauge_mapper<FloatIn,QUDA_RECONSTRUCT_9>::type G;
copyGauge<FloatOut,FloatIn,length> (G(in,In,inGhost), out, location, Out, outGhost, type);
#endif
} else {
errorQuda("Reconstruction %d and order %d not supported", in.Reconstruct(), in.Order());
}
} else if (in.Order() == QUDA_QDP_GAUGE_ORDER) {
#ifdef BUILD_QDP_INTERFACE
copyGauge<FloatOut,FloatIn,length>(QDPOrder<FloatIn,length>(in, In, inGhost),
out, location, Out, outGhost, type);
#else
errorQuda("QDP interface has not been built\n");
#endif
} else if (in.Order() == QUDA_QDPJIT_GAUGE_ORDER) {
#ifdef BUILD_QDPJIT_INTERFACE
copyGauge<FloatOut,FloatIn,length>(QDPJITOrder<FloatIn,length>(in, In, inGhost),
out, location, Out, outGhost, type);
#else
errorQuda("QDPJIT interface has not been built\n");
#endif
} else if (in.Order() == QUDA_CPS_WILSON_GAUGE_ORDER) {
#ifdef BUILD_CPS_INTERFACE
copyGauge<FloatOut,FloatIn,length>(CPSOrder<FloatIn,length>(in, In, inGhost),
out, location, Out, outGhost, type);
#else
errorQuda("CPS interface has not been built\n");
#endif
} else if (in.Order() == QUDA_MILC_GAUGE_ORDER) {
#ifdef BUILD_MILC_INTERFACE
copyGauge<FloatOut,FloatIn,length>(MILCOrder<FloatIn,length>(in, In, inGhost),
out, location, Out, outGhost, type);
#else
errorQuda("MILC interface has not been built\n");
#endif
} else if (in.Order() == QUDA_BQCD_GAUGE_ORDER) {
#ifdef BUILD_BQCD_INTERFACE
copyGauge<FloatOut,FloatIn,length>(BQCDOrder<FloatIn,length>(in, In, inGhost),
out, location, Out, outGhost, type);
#else
errorQuda("BQCD interface has not been built\n");
#endif
} else if (in.Order() == QUDA_TIFR_GAUGE_ORDER) {
#ifdef BUILD_TIFR_INTERFACE
copyGauge<FloatOut,FloatIn,length>(TIFROrder<FloatIn,length>(in, In, inGhost),
out, location, Out, outGhost, type);
#else
errorQuda("TIFR interface has not been built\n");
#endif
} else {
errorQuda("Gauge field %d order not supported", in.Order());
}
}
void checkMomOrder(const GaugeField &u);
template <typename FloatOut, typename FloatIn>
void copyGauge(GaugeField &out, const GaugeField &in, QudaFieldLocation location, FloatOut *Out,
FloatIn *In, FloatOut **outGhost, FloatIn **inGhost, int type) {
if (in.Ncolor() != 3 && out.Ncolor() != 3) {
errorQuda("Unsupported number of colors; out.Nc=%d, in.Nc=%d", out.Ncolor(), in.Ncolor());
}
if (out.Geometry() != in.Geometry()) {
errorQuda("Field geometries %d %d do not match", out.Geometry(), in.Geometry());
}
if (in.LinkType() != QUDA_ASQTAD_MOM_LINKS && out.LinkType() != QUDA_ASQTAD_MOM_LINKS) {
// we are doing gauge field packing
copyGauge<FloatOut,FloatIn,18>(out, in, location, Out, In, outGhost, inGhost, type);
} else {
if (location != QUDA_CPU_FIELD_LOCATION) errorQuda("Location %d not supported", location);
if (out.Geometry() != QUDA_VECTOR_GEOMETRY) errorQuda("Unsupported geometry %d", out.Geometry());
checkMomOrder(in);
checkMomOrder(out);
int faceVolumeCB[QUDA_MAX_DIM];
for (int d=0; d<in.Ndim(); d++) faceVolumeCB[d] = in.SurfaceCB(d) * in.Nface();
// momentum only currently supported on MILC (10), TIFR (18) and Float2 (10) fields currently
if (out.Order() == QUDA_FLOAT2_GAUGE_ORDER) {
if (in.Order() == QUDA_FLOAT2_GAUGE_ORDER) {
CopyGaugeArg<FloatNOrder<FloatOut,10,2,10>, FloatNOrder<FloatIn,10,2,10> >
arg(FloatNOrder<FloatOut,10,2,10>(out, Out),
FloatNOrder<FloatIn,10,2,10>(in, In), in.Volume(), faceVolumeCB, in.Ndim(), in.Geometry());
copyGauge<FloatOut,FloatIn,10>(arg);
} else if (in.Order() == QUDA_MILC_GAUGE_ORDER) {
#ifdef BUILD_MILC_INTERFACE
CopyGaugeArg<FloatNOrder<FloatOut,10,2,10>, MILCOrder<FloatIn,10> >
arg(FloatNOrder<FloatOut,10,2,10>(out, Out), MILCOrder<FloatIn,10>(in, In),
in.Volume(), faceVolumeCB, in.Ndim(), in.Geometry());
copyGauge<FloatOut,FloatIn,10>(arg);
#else
errorQuda("MILC interface has not been built\n");
#endif
} else if (in.Order() == QUDA_TIFR_GAUGE_ORDER) {
#ifdef BUILD_TIFR_INTERFACE
CopyGaugeArg<FloatNOrder<FloatOut,18,2,11>, TIFROrder<FloatIn,18> >
arg(FloatNOrder<FloatOut,18,2,11>(out, Out), TIFROrder<FloatIn,18>(in, In),
in.Volume(), faceVolumeCB, in.Ndim(), in.Geometry());
copyGauge<FloatOut,FloatIn,18>(arg);
#else
errorQuda("TIFR interface has not been built\n");
#endif
} else {
errorQuda("Gauge field orders %d not supported", in.Order());
}
} else if (out.Order() == QUDA_MILC_GAUGE_ORDER) {
#ifdef BUILD_MILC_INTERFACE
if (in.Order() == QUDA_FLOAT2_GAUGE_ORDER) {
CopyGaugeArg<MILCOrder<FloatOut,10>, FloatNOrder<FloatIn,10,2,10> >
arg(MILCOrder<FloatOut,10>(out, Out), FloatNOrder<FloatIn,10,2,10>(in, In),
in.Volume(), faceVolumeCB, in.Ndim(), in.Geometry());
copyGauge<FloatOut,FloatIn,10>(arg);
} else if (in.Order() == QUDA_MILC_GAUGE_ORDER) {
CopyGaugeArg<MILCOrder<FloatOut,10>, MILCOrder<FloatIn,10> >
arg(MILCOrder<FloatOut,10>(out, Out), MILCOrder<FloatIn,10>(in, In),
in.Volume(), faceVolumeCB, in.Ndim(), in.Geometry());
copyGauge<FloatOut,FloatIn,10>(arg);
} else {
errorQuda("Gauge field orders %d not supported", in.Order());
}
#else
errorQuda("MILC interface has not been built\n");
#endif
} else if (out.Order() == QUDA_TIFR_GAUGE_ORDER) {
#ifdef BUILD_TIFR_INTERFACE
if (in.Order() == QUDA_FLOAT2_GAUGE_ORDER) {
// FIX ME - 11 is a misnomer to avoid confusion in template instantiation
CopyGaugeArg<TIFROrder<FloatOut,18>, FloatNOrder<FloatIn,18,2,11> >
arg(TIFROrder<FloatOut,18>(out, Out), FloatNOrder<FloatIn,18,2,11>(in, In),
in.Volume(), faceVolumeCB, in.Ndim(), in.Geometry());
copyGauge<FloatOut,FloatIn,18>(arg);
} else if (in.Order() == QUDA_TIFR_GAUGE_ORDER) {
CopyGaugeArg<TIFROrder<FloatOut,18>, TIFROrder<FloatIn,18> >
arg(TIFROrder<FloatOut,18>(out, Out), TIFROrder<FloatIn,18>(in, In),
in.Volume(), faceVolumeCB, in.Ndim(), in.Geometry());
copyGauge<FloatOut,FloatIn,10>(arg);
} else {
errorQuda("Gauge field orders %d not supported", in.Order());
}
#else
errorQuda("TIFR interface has not been built\n");
#endif
} else {
errorQuda("Gauge field orders %d not supported", out.Order());
}
}
}
} // namespace quda
|
8d55eecea90032ee6f78c59f8ce33a1b858a1aaf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "define.h" //must be on top for Windows compilation
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <thrust/sort.h>
#include <thrust/device_ptr.h>
#include "host.h"
#include "ISO.h"
#include "voigt.h"
#include "resample.h"
/*
// runs with biliniar interpolation
// texDescr.filterMode = hipFilterModeLinear;
__global__ void Voigt_texture_kernel(hipTextureObject_t K2dtex, float *K_d, int Nx, int Ny, int Nxtex, int Nytex, size_t pitch){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if(idx < Nx && idy < Ny){
float x = idx * Nxtex / float(Nx);
float y = idy * Nytex / float(Ny);
//float x = idx / float(Nx);
//float y = idy / float(Ny);
float K = tex2D <float> (K2dtex, x + 0.5f , y + 0.5f);
float *row = (float *)(((char *)K_d)+(idy*pitch));
row[idx] = K;
//if(idy == 0) printf("%d %d %f %f %f\n", idx, idy, x * 10.0f, y * 10.0f, K);
}
}
// runs with manual biliniar interpolation
// texDescr.filterMode = hipFilterModePoint;
__global__ void Voigt_textureb_kernel(hipTextureObject_t K2dtex, float *K_d, int Nx, int Ny, int Nxtex, int Nytex, size_t pitch){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if(idx < Nx && idy < Ny){
float x = idx * Nxtex / float(Nx);
float y = idy * Nytex / float(Ny);
float K00 = tex2D <float> (K2dtex, x, y);
float K10 = tex2D <float> (K2dtex, x + 1.0f, y);
float K01 = tex2D <float> (K2dtex, x, y + 1.0f);
float K11 = tex2D <float> (K2dtex, x + 1.0f, y + 1.0f);
float xx = (idx % (Nx / Nxtex)) * Nxtex / float(Nx);
float yy = (idy % (Ny / Nytex)) * Nytex / float(Ny);
float K = (1.0f - xx) * ( 1.0f - yy) * K00 + xx * (1.0f - yy) * K10 + (1.0f - xx) * yy * K01 + xx * yy * K11;
float *row = (float *)(((char *)K_d)+(idy*pitch));
row[idx] = K;
//if(idy == 0) printf("%d %d %f %f | %f %f | %f %f %f %f %f\n", idx, idy, x * 10.0f / Nx, y * 10.0f / Ny, xx, yy, K00, K10, K01, K11, K);
}
}
// runs with manual biliniar interpolation
// texDescr.filterMode = hipFilterModePoint;
__global__ void Voigt_b_kernel(float *K2d_d, float *K_d, int Nx, int Ny, int Nxtex, int Nytex, size_t pitch){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if(idx < Nx && idy < Ny){
int x = floor(idx * Nxtex / float(Nx));
int y = floor(idy * Nytex / float(Ny));
float *row1 = (float *)(((char *)K_d)+(y*pitch)) + x;
float K00 = *row1;
float *row2 = (float *)(((char *)K_d)+(y*pitch)) + x + 1;
float K10 = *row2;
float *row3 = (float *)(((char *)K_d)+((y + 1)*pitch)) + x;
float K01 = *row3;
float *row4 = (float *)(((char *)K_d)+((y + 1)*pitch)) + x + 1;
float K11 = *row4;
float xx = (idx % (Nx / Nxtex)) * Nxtex / float(Nx);
float yy = (idy % (Ny / Nytex)) * Nytex / float(Ny);
float K = (1.0f - xx) * ( 1.0f - yy) * K00 + xx * (1.0f - yy) * K10 + (1.0f - xx) * yy * K01 + xx * yy * K11;
float *row = (float *)(((char *)K_d)+(idy*pitch));
row[idx] = K;
//if(idy == 0) printf("%d %d %f %f | %f %f | %f %f %f %f %f\n", idx, idy, x * 10.0f / Nx, y * 10.0f / Ny, xx, yy, K00, K10, K01, K11, K);
}
}
//https://stackoverflow.com/questions/34622717/bicubic-interpolation-in-c
__device__ float cubic_hermite(float A, float B, float C, float D, float t){
float a = -A / 2.0f + (3.0f * B) / 2.0f - (3.0f * C) / 2.0f + D / 2.0f;
float b = A - (5.0f * B) / 2.0f + 2.0f * C - D / 2.0f;
float c = -A / 2.0f + C / 2.0f;
float d = B;
float tt = t * t;
return a * t* tt + b * tt + c * t + d;
}
// runs with manual biliniar interpolation
// texDescr.filterMode = hipFilterModePoint;
__global__ void Voigt_bicubic_kernel(hipTextureObject_t K2dtex, float *K_d, int Nx, int Ny, int Nxtex, int Nytex, size_t pitch){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if(idx > 0 && idy > 0 && idx < Nx - 1&& idy < Ny - 1){
float x = idx * Nxtex / float(Nx);
float y = idy * Nytex / float(Ny);
float K00 = tex2D <float> (K2dtex, x - 1.0f, y - 1.0f);
float K10 = tex2D <float> (K2dtex, x , y - 1.0f);
float K20 = tex2D <float> (K2dtex, x + 1.0f, y - 1.0f);
float K30 = tex2D <float> (K2dtex, x + 2.0f, y - 1.0f);
float K01 = tex2D <float> (K2dtex, x - 1.0f, y);
float K11 = tex2D <float> (K2dtex, x , y);
float K21 = tex2D <float> (K2dtex, x + 1.0f, y);
float K31 = tex2D <float> (K2dtex, x + 2.0f, y);
float K02 = tex2D <float> (K2dtex, x - 1.0f, y + 1.0f);
float K12 = tex2D <float> (K2dtex, x , y + 1.0f);
float K22 = tex2D <float> (K2dtex, x + 1.0f, y + 1.0f);
float K32 = tex2D <float> (K2dtex, x + 2.0f, y + 1.0f);
float K03 = tex2D <float> (K2dtex, x - 1.0f, y + 2.0f);
float K13 = tex2D <float> (K2dtex, x , y + 2.0f);
float K23 = tex2D <float> (K2dtex, x + 1.0f, y + 2.0f);
float K33 = tex2D <float> (K2dtex, x + 2.0f, y + 2.0f);
float xx = (idx % (Nx / Nxtex)) * Nxtex / float(Nx);
float yy = (idy % (Ny / Nytex)) * Nytex / float(Ny);
float K0 = cubic_hermite(K00, K10, K20, K30, xx);
float K1 = cubic_hermite(K01, K11, K21, K31, xx);
float K2 = cubic_hermite(K02, K12, K22, K32, xx);
float K3 = cubic_hermite(K03, K13, K23, K33, xx);
float K = cubic_hermite(K0, K1, K2, K3, yy);
if(idx == 15 && idy == 15) printf("%d %d %g %g %g %g %g %g %g\n", idx, idy, x, y, K00, K10, K20, K30, K0, K);
float *row = (float *)(((char *)K_d)+(idy*pitch));
row[idx] = K;
//if(idy == 0) printf("%d %d %f %f | %f %f | %f %f %f %f %f\n", idx, idy, x * 10.0f / Nx, y * 10.0f / Ny, xx, yy, K00, K10, K01, K11, K);
}
}
*/
int main(int argc, char*argv[]){
hipError_t error;
int er;
int devCount = 0;
hipGetDeviceCount(&devCount);
if(devCount == 0){
printf("Error: No valid cuda device!\n");
return 0;
}
if(devCount == 1) printf("There is %d CUDA Device\n", devCount);
else printf("There are %d CUDA Devices\n", devCount);
/*
{
double xMax = 10.0;
double yMax = 10.0;
int Nx = 1000;
int Ny = 1000;
int Nxtex = Nx + 1;
int Nytex = Ny + 1;
int Nxtexf = Nx / 10 + 1;
int Nytexf = Ny / 10 + 1;
double *K2d_h, *K2d_d;
size_t pitch;
//with pitch, the 2d memory is extendend in one dimension to set memory alignment, pitch is the new Nxtex
K2d_h = (double*)malloc( Nxtex * Nytex * sizeof(double));
hipMallocPitch((void **) &K2d_d, &pitch, Nxtex * sizeof(double), Nytex);
//printf("%d %d %lu\n", Nxtex, Nytex, pitch);
{
double a = (double)(M_PI * sqrt(-1.0 / log(def_TOLF * 0.5)));
double b = (double)(1.0 / sqrt(M_PI));
double c = (double)(2.0 * a / M_PI);
Voigt_2d_kernel <<< dim3((Nxtex + 31) / 32, (Nytex + 31) / 32), dim3(32, 32, 1) >>> (a, b, c, K2d_d, Nxtex, Nytex, pitch, xMax, xMax);
hipMemcpy2D(K2d_h, Nxtex * sizeof(double), K2d_d, pitch, Nxtex * sizeof(double), Nytex, hipMemcpyDeviceToHost);
}
// / *
for(int i = 0; i < Nxtex - 1; ++i){
for(int j = 0; j < Nytex - 1; ++j){
//x and y arrays from 0.1 to 2000
double x = exp(-2.3 + i * xMax / double(Nxtex - 1));
double y = exp(-2.3 + j * yMax / double(Nytex - 1));
//if( x < xMax && y < yMax){
printf("%g %g %.15g\n", x, y, K2d_h[j * Nxtex + i]);
//}
}
}
// * /
return 0;
}
/ *
float *K2df_h, *K2df_d;
size_t pitchf;
//with pitchf, the 2d memory is extendend in one dimension to set memory alignment, pitchf is the new Nxtexf
K2df_h = (float*)malloc( Nxtexf * Nytexf * sizeof(float));
hipMallocPitch((void **) &K2df_d, &pitchf, Nxtexf * sizeof(float), Nytexf);
//printf("%d %d %lu\n", Nxtexf, Nytexf, pitchf);
{
float a = (float)(M_PI * sqrt(-1.0f / log(def_TOLF * 0.5f)));
float b = (float)(1.0f / sqrt(M_PI));
float c = (float)(2.0f * a / M_PI);
Voigt_2df_kernel <<< dim3((Nxtexf + 31) / 32, (Nytexf + 31) / 32), dim3(32, 32, 1) >>> (a, b, c, K2df_d, Nxtexf, Nytexf, pitchf, xMax, xMax);
hipMemcpy2D(K2df_h, Nxtexf * sizeof(float), K2df_d, pitchf, Nxtexf * sizeof(float), Nytexf, hipMemcpyDeviceToHost);
}
/ *
for(int i = 0; i < Nxtexf - 1; ++i){
for(int j = 0; j < Nytexf -1; ++j){
float x = i * xMax / float(Nxtexf - 1);
float y = j * yMax / float(Nytexf - 1);
if( x < xMax && y < yMax){
printf("%g %g %.15g\n", x, y, K2df_h[j * Nxtexf + i]);
}
}
}
return 0;
* /
//https://stackoverflow.com/questions/41749024/edit-cuda-texture-object
hipTextureObject_t K2dtex;
hipResourceDesc resDescr;
memset(&resDescr, 0, sizeof(hipResourceDesc));
resDescr.resType = hipResourceTypePitch2D;
resDescr.res.pitch2D.desc = hipCreateChannelDesc<float>();
resDescr.res.pitch2D.devPtr = K2df_d;
resDescr.res.pitch2D.height = Nytexf;
resDescr.res.pitch2D.pitchInBytes = pitchf;
resDescr.res.pitch2D.width = Nxtexf;
hipTextureDesc texDescr;
memset(&texDescr, 0, sizeof(hipTextureDesc));
texDescr.normalizedCoords = 0;
//texDescr.filterMode = hipFilterModeLinear;
texDescr.filterMode = hipFilterModePoint;
texDescr.addressMode[0] = hipAddressModeClamp;
texDescr.addressMode[1] = hipAddressModeClamp;
texDescr.addressMode[2] = hipAddressModeClamp;
texDescr.readMode = hipReadModeElementType;
hipCreateTextureObject(&K2dtex, &resDescr, &texDescr, NULL);
float *K_h, *K_d;
K_h = (float*)malloc( Nx * Ny * sizeof(float));
//with pitch, the 2d memory is extendend in one dimension to set memory alignment, pitch is the new Nx
hipMallocPitch((void **) &K_d, &pitch, Nx * sizeof(float), Ny);
for(int t = 0; t < 1; ++t){
//Voigt_texture_kernel <<< dim3((Nx + 31) / 32, (Ny + 31) / 32), dim3(32, 32, 1) >>> (K2dtex, K_d, Nx, Ny, Nxtexf - 1, Nytexf - 1, pitch);
//Voigt_textureb_kernel <<< dim3((Nx + 31) / 32, (Ny + 31) / 32), dim3(32, 32, 1) >>> (K2dtex, K_d, Nx, Ny, Nxtexf -1, Nytexf - 1, pitch);
//Voigt_b_kernel <<< dim3((Nx + 31) / 32, (Ny + 31) / 32), dim3(32, 32, 1) >>> (K2d_d, K_d, Nx, Ny, Nxtex - 1, Nytex - 1, pitch);
Voigt_bicubic_kernel <<< dim3((Nx + 31) / 32, (Ny + 31) / 32), dim3(32, 32, 1) >>> (K2dtex, K_d, Nx, Ny, Nxtexf - 1, Nytexf - 1, pitch);
}
hipMemcpy2D(K_h, Nx * sizeof(float), K_d, pitch, Nx * sizeof(float), Ny, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
for(int i = 0; i < Nx; ++i){
for(int j = 0; j < Ny; ++j){
double x = i * xMax / double(Nx);
double y = j * yMax / double(Ny);
if( x < xMax && y < yMax){
double diff = fabs(K2d_h[j * Nxtex + i] - K_h[j * Nx + i]);
if(diff > 5.0e-7){
printf("%g %g %.15g %.15g %.15g\n", x, y, K2d_h[j * Nxtex + i], K_h[j * Nx + i], diff);
}
}
}
}
return 0;
}
*/
char qFilename[15][160]; //for maximal 15 isotopologues
char paramFilename[160];
sprintf(paramFilename, "%s", "param.dat");
//Read prameters
Param param;
sprintf(param.PFilename, "%s", "-");
sprintf(param.SpeciesFilename, "%s", "-");
sprintf(param.edges, "%s", "-");
sprintf(param.bins, "%s", "-");
sprintf(param.ciaSystem, "%s", "-");
sprintf(param.subLorentzianFilename, "%s", "-");
param.dev = 0;
param.useIndividualBins = 0;
param.useOutputEdges = 0;
param.nedges = 0;
param.nP = 1;
param.usePFile = 0;
param.useIndividualX = 0;
param.useCia = 0;
param.path[0] = 0;
param.pathK[0] = 0;
param.nSpecies = 1;
param.useSpeciesFile = 0;
param.useSubLorentzian = 0;
param.T = 0.0;
param.P = 0.0;
param.mParamFilename[0] = 0;
param.dataBase = 0;
param.numin = 0.0;
param.numax = 0.0;
param.dnu = 0.0;
param.Nxb = 0;
param.cutMode = 0;
param.cut = 0.0;
param.doResampling = 0;
param.nC = 0;
param.doTransmission = 0;
param.nTr = 0;
param.dTr = 0.0;
param.doStoreFullK = 0;
param.doStoreK = 0;
param.nbins = 0;
param.kmin = 0.0;
param.qalphaL = def_qALPHA_L;
param.gammaF = def_gammaF;
param.doMean = 0;
param.units = 0;
param.replaceFiles = 0;
param.profile = def_PROFILE;
param.doTuning = def_doTuning;
param.removePlinth = def_removePlinth;
er = read_parameters(param, paramFilename, argc, argv);
if(er == 0){
return 0;
}
if(param.dev >= devCount || param.dev < 0){
printf("Error: Device Number is not allowed\n");
return 0;
}
char filemode[16];
if(param.replaceFiles == 0){
sprintf(filemode, "a");
}
else{
sprintf(filemode, "w");
}
FILE *InfoFile;
char InfoFilename[300];
sprintf(InfoFilename, "Info_%s.dat", param.name);
InfoFile = fopen(InfoFilename, filemode);
int runtimeVersion;
int driverVersion;
hipRuntimeGetVersion(&runtimeVersion);
hipDriverGetVersion(&driverVersion);
hipSetDevice(param.dev);
hipDeviceProp_t devProp;
for(int i = 0; i < 2; ++i){
FILE *infofile;
if(i == 0) infofile = InfoFile;
if(i == 1) infofile = stdout;
for(int j = 0; j < devCount; ++j){
hipGetDeviceProperties(&devProp, j);
fprintf(infofile,"Name:%s, Major:%d, Minor:%d, Max threads per Block:%d, Max x dim:%d\n, #Multiprocessors:%d, Clock Rate:%d, Memory Clock Rate:%d, Global Memory:%lu, Shared memory per block: %lu\n",
devProp.name, devProp.major, devProp.minor, devProp.maxThreadsPerBlock, devProp.maxThreadsDim[0],
devProp.multiProcessorCount, devProp.clockRate, devProp.memoryClockRate, (long unsigned int)(devProp.totalGlobalMem), (long unsigned int)(devProp.sharedMemPerBlock));
}
}
if(param.Nxb != 0){
param.useIndividualX = 1;
}
if(param.removePlinth == 1 && param.profile == 4){
printf("Error, remove plinth is not supported for profile 4\n");
return 0;
}
subLorentzianConstantCopy(param.useSubLorentzian);
//If the bin file is used, store the boundaries of the bins
double *binBoundaries_h, *binBoundaries_d;
binBoundaries_h = (double*)malloc((param.nbins + 1) * sizeof(double));
hipMalloc((void **) &binBoundaries_d, (param.nbins + 1) * sizeof(double));
if(param.useIndividualBins == 1){
er = readBinFile(param, binBoundaries_h);
if(er == 0) return 0;
param.numin = binBoundaries_h[0];
param.numax = binBoundaries_h[param.nbins];
if(param.doResampling > 0){
printf("Error: The resampling function is not supported for the bin-file option\n");
return 0;
}
if(param.doTransmission > 0){
printf("Error: The transmission function is not supported for the bin-file option\n");
return 0;
}
}
else{
for(int i = 0; i < param.nbins; ++i){
binBoundaries_h[i] = param.numin + i * (param.numax - param.numin) / ((double)(param.nbins));
}
binBoundaries_h[param.nbins] = param.numax;
}
hipMemcpy(binBoundaries_d, binBoundaries_h, (param.nbins + 1) * sizeof(double), hipMemcpyHostToDevice);
//for(int i = 0; i < param.nbins + 1; ++i){
// printf("binboundaries %d %g\n", i, binBoundaries_h[i]);
//}
int Nx;
if(param.useIndividualX == 0){
Nx = (int)((param.numax - param.numin) / param.dnu + 0.5); //+ 0.5 to round correctly between double and int
if((param.numax - param.numin) / param.dnu + 0.5 >= 2147483647){
printf("Error: Nx too large, integer overflow. %d %g\n", Nx, (param.numax - param.numin) / param.dnu);
return 0;
}
printf("%g %g %g %g\n", param.numax, param.numin, param.dnu, (param.numax - param.numin) / param.dnu + 0.5);
param.Nxb = Nx / param.nbins;
if(Nx % param.nbins != 0){
printf("Error: range cannot be divided evenly in bins. %d %d %g\n", Nx, param.nbins, Nx / ((double)(param.nbins)));
return 0;
}
}
else{
Nx = param.nbins * param.Nxb;
if(param.nbins * param.Nxb >= 2147483647){
printf("Error: Nx too large, integer overflow. %d %g\n", Nx, (double)(param.nbins) * (double)(param.Nxb));
return 0;
}
if(param.doResampling > 0){
printf("Error: The resampling function is not supported for unequal spacing option\n");
return 0;
}
if(param.doTransmission > 0){
printf("Error: The transmission function is not supported for unequal spacing option\n");
return 0;
}
}
if(param.useSubLorentzian == 1){
subLorentzianB(param.T);
param.useIndividualX = 1;
//this is needed because of the nu/nu0 factor
}
//If the output edges file is used store the edges
double *outputEdges_h;
if(param.useOutputEdges == 1){
outputEdges_h = (double*)malloc((param.nedges + 1) * sizeof(double));
er = readEdgesFile(param, outputEdges_h);
if(er == 0) return 0;
}
else{
outputEdges_h = NULL;
}
//Allocate P array
double *P_h;
P_h = (double*)malloc((param.nP) * sizeof(double));
P_h[0] = param.P;
if(param.usePFile == 1){
er = readPFile(param, P_h);
if(er == 0) return 0;
}
//Allocate Species array
double *SpeciesA_h; //abundance
char **SpeciesN_h;
SpeciesA_h = (double*)malloc(param.nSpecies * sizeof(double));
SpeciesN_h = (char**)malloc(param.nSpecies * sizeof(char*));
for(int i = 0; i < param.nSpecies; ++i){
SpeciesN_h[i] = (char*)malloc(160 * sizeof(char));
}
if(param.useSpeciesFile == 1){
er = readSpeciesFile(param, SpeciesN_h, SpeciesA_h);
if(er == 0) return 0;
}
double time[9];
double timeT[3];
for(int i = 0; i < 9; ++i){
time[i] = 0.0;
}
for(int i = 0; i < 3; ++i){
timeT[i] = 0.0;
}
//Allocate Molecule properties
for(int i = 0; i < 2; ++i){
FILE *infofile;
if(i == 0) infofile = InfoFile;
if(i == 1) infofile = stdout;
fprintf(infofile, "\nVersion: %g\n", VERSION);
fprintf(infofile, "Using device %d\n\n", param.dev);
fprintf(infofile, "Runtime Version %d\n", runtimeVersion);
fprintf(infofile, "Driver Version %d\n", driverVersion);
fprintf(infofile, "GIT Describe: %s\n", GIT_DESCRIBE);
fprintf(infofile, "Build Date: %s\n", BUILD_DATE);
fprintf(infofile, "Build Path: %s\n", BUILD_PATH);
fprintf(infofile, "Build System: %s\n", BUILD_SYSTEM);
fprintf(infofile, "Build Compute Capability: SM=%s\n", BUILD_SM);
fprintf(infofile, "\n");
if(param.Nxb < param.nC && i == 0){
printf("Number of points per bin smaller than the number of Chebyshev coefficients: Changed nC to %d\n", param.Nxb);
fprintf(infofile, "Number of points per bin smaller than the number of Chebyshev coefficients: Changed nC to %d\n", param.Nxb);
param.nC = param.Nxb;
}
fprintf(infofile, "name = %s\n", param.name);
fprintf(infofile, "T = %g\n", param.T);
if(param.usePFile == 0){
fprintf(infofile, "P = %g\n", P_h[0]);
}
else{
fprintf(infofile, "P in file: %s\n", param.PFilename);
fprintf(infofile, "Number of P values: %d\n", param.nP);
}
if(param.useSpeciesFile > 0){
fprintf(infofile, "Species in file: %s\n", param.SpeciesFilename);
fprintf(infofile, "Number of Species: %d\n", param.nSpecies);
}
if(param.useSubLorentzian > 0){
fprintf(infofile, "sub-Lorentzian file: %s\n", param.subLorentzianFilename);
}
fprintf(infofile, "cia System = %s\n", param.ciaSystem);
fprintf(infofile, "pathToData = %s\n", param.path);
fprintf(infofile, "numin = %g\n", param.numin);
fprintf(infofile, "numax = %g\n", param.numax);
fprintf(infofile, "dnu = %g\n", param.dnu);
fprintf(infofile, "Nnu per bin = %d\n", param.Nxb);
fprintf(infofile, "Number of points: %d\n", Nx);
fprintf(infofile, "cutMode = %d\n", param.cutMode);
fprintf(infofile, "cut = %g\n", param.cut);
fprintf(infofile, "doResampling = %d\n", param.doResampling);
fprintf(infofile, "nC = %d\n", param.nC);
fprintf(infofile, "doTransmission = %d\n", param.doTransmission);
fprintf(infofile, "nTr = %d\n", param.nTr);
fprintf(infofile, "dTr = %g\n", param.dTr);
fprintf(infofile, "doStoreFullK = %d\n", param.doStoreFullK);
fprintf(infofile, "pathToK = %s\n", param.pathK);
fprintf(infofile, "dostoreK = %d\n", param.doStoreK);
fprintf(infofile, "nbins = %d\n", param.nbins);
if(param.useIndividualBins == 1){
fprintf(infofile, "use Individual bins: %s\n", param.bins);
}
fprintf(infofile, "kmin = %g\n", param.kmin);
fprintf(infofile, "qalphaL = %g\n", param.qalphaL);
fprintf(infofile, "gammaF = %g\n", param.gammaF);
fprintf(infofile, "doMean = %d\n", param.doMean);
fprintf(infofile, "Units = %d\n", param.units);
fprintf(infofile, "Replace files = %d\n", param.replaceFiles);
fprintf(infofile, "profile = %d\n", param.profile);
fprintf(infofile, "doTuning = %d\n", param.doTuning);
fprintf(infofile, "def_TOL = %g\n", def_TOL);
fprintf(infofile, "def_TOLf = %g\n", def_TOLF);
fprintf(infofile, "def_nthmax = %d\n", def_nthmax);
fprintf(infofile, "def_nlmax = %d\n", def_nlmax);
fprintf(infofile, "def_maxlines = %lld\n", def_maxlines);
fprintf(infofile, "def_maxfiles = %d\n", def_maxfiles);
fprintf(infofile, "def_NmaxSample = %d\n", def_NmaxSample);
if(param.useOutputEdges == 1){
fprintf(infofile, "use output edges: %s\n", param.edges);
}
fprintf(infofile, "\n");
}
fclose(InfoFile);
hipEvent_t tt1; //start time
hipEvent_t tt2; //end time
hipEventCreate(&tt1);
hipEventCreate(&tt2);
hipEvent_t ReadStart, ReadStop;
hipEventCreate(&ReadStart);
hipEventCreate(&ReadStop);
hipEvent_t KStart, KStop;
hipEventCreate(&KStart);
hipEventCreate(&KStop);
hipEvent_t LineStart, LineStop;
hipEventCreate(&LineStart);
hipEventCreate(&LineStop);
hipEvent_t TuneStart, TuneStop;
hipEventCreate(&TuneStart);
hipEventCreate(&TuneStop);
hipEvent_t iiLimitsEvent;
hipEventCreate(&iiLimitsEvent);
hipEvent_t AEvent;
hipEventCreate(&AEvent);
hipEvent_t ALEvent;
hipEventCreate(&ALEvent);
hipEvent_t AREvent;
hipEventCreate(&AREvent);
hipEvent_t BEvent;
hipEventCreate(&BEvent);
float milliseconds;
hipStream_t VStream[def_KSn];
for(int i = 0; i < def_KSn; ++i){
hipStreamCreate(&VStream[i]);
}
hipStream_t CStream[def_rBs];
for(int i = 0; i < def_rBs; ++i){
hipStreamCreate(&CStream[i]);
}
hipStream_t tuneStream[2];
for(int i = 0; i < 2; ++i){
hipStreamCreate(&tuneStream[i]);
}
hipStream_t nuLimitsStream[5];
for(int i = 0; i < 5; ++i){
hipStreamCreate(&nuLimitsStream[i]);
}
// ************************************************************
//calculate mean mass before starting the opacity calculation
//needed to set kmin
// ************************************************************
double meanMass = 0.0;
for(int iSpecies = 0; iSpecies < param.nSpecies; ++iSpecies){
double Sscale = 1.0;
if(param.nSpecies > 1){
sprintf(param.mParamFilename, "%s", SpeciesN_h[iSpecies]);
Sscale = SpeciesA_h[iSpecies];
}
Molecule m;
if(param.useCia == 0){
int er = Init(m, param, qFilename);
if(er == 0) return 0;
}
//compute the mean mass
for(int i = 0; i < m.nISO; ++i){
//incldue here mixture abundances
meanMass += m.ISO[i].Ab * m.ISO[i].m * Sscale; //mean Molar Mass (g)
}
}
printf("mean mass %g\n", meanMass);
//needed here already to get the cia.mass1. Initialize it again later in the main species loop
ciaSystem cia;
if(param.useCia == 1){
Molecule m;
er = InitCia(m, cia, param);
if(er == 0) return 0;
}
double unitScale = 1.0;
if(param.units == 1){
unitScale = 1.0 / def_NA * meanMass;
if(param.useCia == 1){
unitScale = 1.0 / def_NA * cia.mass1;
}
param.kmin /= unitScale;
printf("kmin %g\n", param.kmin);
}
// ************************************************************
// ****************************************************************************
// Allocate and initialize K and x arrays
// ****************************************************************************
double *K_h, *K_d;
double *KS_d; //used in multiple y blocks
double *x_h, *x_d;
int *binKey_d;
int *binIndex_h, *binIndex_d;
K_h = (double*)malloc(Nx * sizeof(double));
x_h = (double*)malloc(Nx * sizeof(double));
binIndex_h = (int*)malloc((param.nbins + 2) * sizeof(int));
hipMalloc((void **) &K_d, param.nP * Nx * sizeof(double));
hipMalloc((void **) &KS_d, def_KSn * Nx * sizeof(double));
hipMalloc((void **) &x_d, Nx * sizeof(double));
hipMalloc((void **) &binKey_d, Nx * sizeof(int));
hipMalloc((void **) &binIndex_d, (param.nbins + 2) * sizeof(int));
hipDeviceSynchronize();
error = hipGetLastError();
if(error != 0){
printf("K alloc error = %d = %s\n",error, hipGetErrorString(error));
return 0;
}
for(int k = 0; k < param.nP * Nx; k += def_nthmax){
int Nk = min(def_nthmax, param.nP * Nx - k);
hipLaunchKernelGGL(( InitialK_kernel) , dim3((Nk + 511) / 512), dim3(512) , 0, 0, K_d, param.nP * Nx, param.kmin, k);
}
for(int k = 0; k < def_KSn * Nx; k += def_nthmax){
int Nk = min(def_nthmax, def_KSn * Nx - k);
//kmin must be here always zero, because the different streams are added later to K_d
hipLaunchKernelGGL(( InitialK_kernel) , dim3((Nk + 511) / 512), dim3(512) , 0, 0, KS_d, def_KSn * Nx, 0.0, k);
}
hipDeviceSynchronize();
error = hipGetLastError();
if(error != 0){
printf("bin1 error = %d = %s\n",error, hipGetErrorString(error));
return 0;
}
for(int k = 0; k < Nx; k += def_nthmax){
int Nk = min(def_nthmax, Nx - k);
hipLaunchKernelGGL(( setX_kernel) , dim3((Nk + 511) / 512), dim3(512) , 0, 0, x_d, Nx, param.numin, param.dnu, param.Nxb, param.useIndividualX, binBoundaries_d, k);
}
hipMemcpy(x_h, x_d, Nx * sizeof(double), hipMemcpyDeviceToHost);
for(int k = 0; k < Nx; k += def_nthmax){
int Nk = min(def_nthmax, Nx - k);
hipLaunchKernelGGL(( binKey_kernel) , dim3((Nk + 511) / 512), dim3(512) , 0, 0, binKey_d, Nx, param.Nxb, binBoundaries_d, param.nbins, param.numax, x_d, param.useIndividualX, k);
}
for(int k = 0; k < Nx; k += def_nthmax){
int Nk = min(def_nthmax, Nx - k);
hipLaunchKernelGGL(( binIndex_kernel) , dim3((Nk + 511) / 512), dim3(512) , 0, 0, binKey_d, binIndex_d, Nx, param.nbins, k);
}
hipMemcpy(binIndex_h, binIndex_d, (param.nbins + 2) * sizeof(int), hipMemcpyDeviceToHost);
/*
int *binKey_h; //only needed to check the key
binKey_h = (int*)malloc(Nx * sizeof(int));
hipMemcpy(binKey_h, binKey_d, Nx * sizeof(int), hipMemcpyDeviceToHost);
for(int i = 0; i < Nx; ++i){
int bin = binKey_h[i];
printf("%d %.10g %d %d %d\n", i, x_h[i], bin, binIndex_h[bin], binIndex_h[bin + 1]);
}
*/
// ****************************************************************************
hipDeviceSynchronize();
error = hipGetLastError();
if(error != 0){
printf("K and x alloc error = %d = %s\n",error, hipGetErrorString(error));
return 0;
}
//start species loop here
for(int iSpecies = 0; iSpecies < param.nSpecies; ++iSpecies){
double Sscale = 1.0; //Abundance scale for mixtures
if(param.nSpecies > 1){
sprintf(param.mParamFilename, "%s", SpeciesN_h[iSpecies]);
Sscale = SpeciesA_h[iSpecies];
}
Molecule m;
m.id=0;
m.NL[0] = 0;
m.nISO = 0;
m.defaultL = 0.0;
m.defaultn = 0.0;
//Initialize the Isotopologue properties for ISO.h
if(param.useCia == 0){
int er = Init(m, param, qFilename);
if(er == 0) return 0;
}
//print species dependent information
InfoFile = fopen(InfoFilename, "a");
for(int i = 0; i < 2; ++i){
FILE *infofile;
if(i == 0) infofile = InfoFile;
if(i == 1) infofile = stdout;
fprintf(infofile, "Species Name = %s\n", m.mName);
fprintf(infofile, "dataBase = %d\n", param.dataBase);
fprintf(infofile, "Molecule Number = %d\n", m.id);
fprintf(infofile, "default L = %g\n", m.defaultL);
fprintf(infofile, "default n = %g\n", m.defaultn);
fprintf(infofile, "\n");
}
fclose(InfoFile);
//Read partition function
Partition part;
er = readPartition(param, qFilename, part, param.T, m);
if(er == 0){
return 0;
}
printf("mean mass %g, Sscale %g\n", meanMass, Sscale);
//Set cia System properties
ciaSystem cia;
if(param.useCia == 1){
er = InitCia(m, cia, param);
if(er == 0) return 0;
}
if(param.useCia == 1 && m.id != 0){
printf("Error, not allowed to use a cia system with a molecule\n");
return 0;
}
double *readBuffer_h, *readBuffer_d;
int readBufferSize = 8192;
int readBufferN = 0;
int readBufferCount = 0;
int rbvs = 0;
if(param.dataBase == 2){
//Exomol nu, S, El, A
//Kurucz Molecules
readBufferN = 4;
}
if(param.dataBase == 20){
//Exomol super lines nu, S
readBufferN = 2;
}
if(param.dataBase == 30){
//Kurucz Atoms nu, S, El, A, Gamma_nat
readBufferN = 5;
}
if(param.dataBase == 31){
//NIST Atoms
readBufferN = 5;
}
if(param.dataBase == 32){
//VALD Atoms
readBufferN = 5;
}
hipHostMalloc((void **) &readBuffer_h, def_rBs * readBufferSize * readBufferN * sizeof(double), hipHostMallocDefault);
hipDeviceSynchronize();
error = hipGetLastError();
if(error != 0){
printf("Buffer host alloc error = %d = %s\n",error, hipGetErrorString(error));
return 0;
}
hipMalloc((void **) &readBuffer_d, def_maxlines * readBufferN * sizeof(double));
hipDeviceSynchronize();
error = hipGetLastError();
if(error != 0){
printf("Buffer device alloc error = %d = %s\n",error, hipGetErrorString(error));
return 0;
}
//printf("Allocate read Buffer %d %d %d %lld | %d %lld\n", def_rBs, readBufferSize, readBufferN, m.NLmax, def_rBs * readBufferSize * readBufferN, m.NLmax * readBufferN);
Line L;
hipDeviceSynchronize();
error = hipGetLastError();
if(error != 0){
printf("Initial error = %d = %s\n",error, hipGetErrorString(error));
return 0;
}
//Allocate memory for Line properties
if(param.dataBase < 2 || param.dataBase == 3){
// 0 1 3
Alloc_Line(L, m, param);
}
else{
// 2 20 30 31 32
Alloc2_Line(L, m, param);
}
if(param.useCia == 1){
for(int iP = 0; iP < param.nP; ++iP){
int er = readCiaFile(param, cia, x_h, K_h, Nx, param.T, P_h[iP]);
hipMemcpy(K_d + iP * Nx, K_h, Nx * sizeof(double), hipMemcpyHostToDevice);
if(er == 0){
return 0;
}
}
}
hipDeviceSynchronize();
error = hipGetLastError();
if(error != 0){
printf("Line alloc error = %d = %s\n",error, hipGetErrorString(error));
return 0;
}
if(m.id > 0 && param.doStoreFullK >= 0){
// **************************************
// Starting the loop around the datafiles
// **************************************
int fi0 = m.nFiles;
int fi1 = 0;
if(param.cut == 0.0) param.cut = 1.0e30;
if(param.cutMode == 0 && param.cut){
for(int fi = 0; fi < m.nFiles; ++fi){
if(m.fileLimit[fi] - param.cut <= param.numax) fi1 = fi + 1;
else break;
}
for(int fi = m.nFiles - 1; fi >= 0; --fi){
if(m.fileLimit[fi + 1] + param.cut >= param.numin) fi0 = fi;
else break;
}
}
else{
fi0 = 0;
fi1 = m.nFiles;
}
printf("File range %d to %d\n", fi0, fi1 - 1);
int fi;
FILE *dataFile;
char dataFilename[180];
timeT[0] += time[0];
time[0] = 0.0;
//Tuning parameters for Line Kernels
int ntAOld = 0;
int ntA = 0;
int ntALOld = 0;
int ntAL = 0;
int ntAROld = 0;
int ntAR = 0;
int ntBOld = 0;
int ntB = 0;
int ntCOld = 0;
int ntC = 0;
int nkA = 8;
int nkAL = 8;
int nkAR = 8;
int nkB = 4;
int nkC = 2;
double c1 = def_h * def_c / (def_kB * param.T);
double T1 = def_T0 / param.T;
for(fi = fi0; fi < fi1; ++fi){
timeT[1] += time[1];
timeT[2] += time[2];
time[1] = 0.0;
time[2] = 0.0;
int NL;
int NL1;
long long lPart;
// read the first block of files outside the loop
// the remaining reads are called at the end of the loop
// to allow overlapping execution
// **************************read0
if(fi == fi0){
sprintf(dataFilename, "%sbin", m.dataFilename[fi]);
dataFile = fopen(dataFilename, "rb");
if(dataFile == NULL){
printf("Error: line list file not found: %s\n", dataFilename);
return 0;
}
printf("Reading Line file %d of %d: %s\n", fi, fi1 - 1, dataFilename);
printf("Number of lines: %lld\n", m.NL[fi]);
NL = min(def_maxlines, m.NL[fi] - 0);
lPart = (0 + def_maxlines - 1) / def_maxlines;
hipEventRecord(ReadStart);
printf("Reading Line file %d of %d; part %lld of %lld with %d lines\n", fi, fi1 - 1, lPart, (m.NL[fi] + def_maxlines - 1) / def_maxlines - 1, NL);
// **************************
// Read the Line list
// **************************
if(param.dataBase < 2 || param.dataBase == 3){
//0 1 3
er = readFile(param, m, part, L, param.qalphaL, NL, dataFile, Sscale, meanMass);
}
else {
// 2 20 30 31 32
int vs = 0;
for(int i = 0; i < NL; i += readBufferSize){
er = readFileExomol(L, NL, dataFile, readBuffer_h, readBuffer_d, readBufferSize, readBufferN, readBufferCount, vs, CStream);
readBufferCount += readBufferSize;
++vs;
}
}
if(er == 0){
return 0;
}
hipEventRecord(ReadStop);
hipEventSynchronize(ReadStop);
hipEventElapsedTime(&milliseconds, ReadStart, ReadStop);
time[0] += milliseconds * 0.001;
printf("Reading Line file %d, part %lld complete\n", fi, lPart);
printf("Time for input, %d %lld: %g seconds\n", fi, lPart, time[0]);
}
// **************************read0
for(long long int iL = 0LL; iL < m.NL[fi]; iL += def_maxlines){
//start the loop around the Pressure values. only 1 iteration if no Pressure file is given
for(int iP = 0; iP < param.nP; ++iP){
//Copy Line data to the device
hipEventRecord(LineStart);
if(param.dataBase < 2 || param.dataBase == 3){
//0 1 3
Copy_Line(L, m, NL);
}
else{
//2 30 31 32
double mass = m.ISO[0].m / def_NA;
double Abundance = m.ISO[0].Ab;
if(param.units == 0){
Abundance *= m.ISO[0].m / meanMass;
Sscale *= m.ISO[0].m / meanMass;
}
double Q = part.Q[0];
int vs = 0;
for(int k = 0; k < NL; k += def_nthmax / 4){
int Nk = min(def_nthmax / 4, NL - k);
if(Nk > 0){
// ***************************
// Compute Line properties 1
// ***************************
if(param.dataBase == 2){
hipLaunchKernelGGL(( L_kernelExomol) , dim3((Nk + 127) / 128), dim3(128), 0, VStream[vs % def_KSn] , readBuffer_d, L.nu_d, L.S_d, L.EL_d, L.ialphaD_d, L.A_d, L.vy_d, L.n_d, m.defaultL, m.defaultn, param.gammaF, mass, param.T, Q, Abundance, Sscale, NL, k);
}
if(param.dataBase == 20){
hipLaunchKernelGGL(( L_kernelExomolSuper) , dim3((Nk + 127) / 128), dim3(128), 0, VStream[vs % def_KSn] , readBuffer_d, L.nu_d, L.S_d, L.ialphaD_d, L.vy_d, L.n_d, m.defaultL, m.defaultn, param.gammaF, mass, param.T, Abundance, Sscale, NL, k);
}
if(param.dataBase == 30){
hipLaunchKernelGGL(( L_kernelKurucz) , dim3((Nk + 127) / 128), dim3(128), 0, VStream[vs % def_KSn] , readBuffer_d, L.nu_d, L.S_d, L.EL_d, L.ialphaD_d, L.A_d, L.vy_d, L.n_d, m.defaultL, m.defaultn, param.gammaF, mass, param.T, Q, Abundance, Sscale, NL, k);
}
if(param.dataBase == 31){
hipLaunchKernelGGL(( L_kernelNIST) , dim3((Nk + 127) / 128), dim3(128), 0, VStream[vs % def_KSn] , readBuffer_d, L.nu_d, L.S_d, L.EL_d, L.ialphaD_d, L.A_d, L.vy_d, L.n_d, m.defaultL, m.defaultn, param.gammaF, mass, param.T, Q, Abundance, Sscale, NL, k);
}
if(param.dataBase == 32){
hipLaunchKernelGGL(( L_kernelVALD) , dim3((Nk + 127) / 128), dim3(128), 0, VStream[vs % def_KSn] , readBuffer_d, L.nu_d, L.S_d, L.EL_d, L.ialphaD_d, L.A_d, L.vy_d, L.n_d, m.defaultL, m.defaultn, param.gammaF, mass, param.T, Q, Abundance, Sscale, NL, k);
}
// ***************************
// Compute Line properties 2
// ***************************
if(param.dataBase != 20){
hipLaunchKernelGGL(( Sf_kernel) , dim3((Nk + 127) / 128), dim3(128), 0, VStream[vs % def_KSn] , L.nu_d, L.S_d, L.A_d, L.vy_d, L.ialphaD_d, L.n_d, L.EL_d, L.ID_d, NL, c1, T1, P_h[iP], k);
}
else{
// 2 30 31 32
hipLaunchKernelGGL(( SfSuper_kernel) , dim3((Nk + 127) / 128), dim3(128), 0, VStream[vs % def_KSn] , L.nu_d, L.S_d, L.vy_d, L.ialphaD_d, L.n_d, L.ID_d, NL, T1, P_h[iP], k);
}
}
++vs;
}
}
hipDeviceSynchronize();
// ************************
// ***************************
// Compute Line properties
// ***************************
if(param.dataBase < 2 || param.dataBase == 3){
// 0 1 3
for(int k = 0; k < NL; k += def_nthmax){
int Nk = min(def_nthmax, NL - k);
if(Nk > 0)hipLaunchKernelGGL(( S2_kernel) , dim3((Nk + 127) / 128), dim3(128) , 0, 0, L.nu_d, L.S_d, L.A_d, L.vy_d, L.ialphaD_d, L.n_d, L.delta_d, L.EL_d, L.ID_d, NL, param.T, P_h[iP], k);
}
/* // *************
//uncoment this only when no Pressure file is given
//print number of lines per bin
hipMemcpy(L.nu_h, L.nu_d, NL * sizeof(double), hipMemcpyDeviceToHost);
int nLb[param.nbins];
for(int i = 0; i < param.nbins; ++i){
nLb[i] = 0;
}
double binWidth = (param.numax - param.numin) / ((double)(param.nbins));
printf("%g\n", binWidth);
for(int i = 0; i < NL; ++i){
int b = int(L.nu_h[i] / binWidth);
nLb[b] += 1;
}
for(int i = 0; i < param.nbins; ++i){
printf("%d, ", nLb[i]);
}
printf("\n");
*/
}
//print_kernel <<< 1, 1 >>> (L.nu_d, L.ialphaD_d, L.vy_d, L.ID_d, 500, 0);
//Sort the data along nu
thrust::device_ptr<double> nu_dt = thrust::device_pointer_cast(L.nu_d);
thrust::device_ptr<int> ID_dt = thrust::device_pointer_cast(L.ID_d);
thrust::sort_by_key(nu_dt, nu_dt + NL, ID_dt);
//Use Sort_d and ID_d to sort S_d, vy_d and ialphaD_d
int Nk = min(def_nthmax, NL);
for(int k = 0; k < NL; k += def_nthmax){
if(Nk > 0)hipLaunchKernelGGL(( Copy_kernel) , dim3((Nk + 127) / 128), dim3(128) , 0, 0, L.S_d, L.Sort_d, NL, k);
}
for(int k = 0; k < NL; k += def_nthmax){
if(Nk > 0)hipLaunchKernelGGL(( Sort_kernel) , dim3((Nk + 127) / 128), dim3(128) , 0, 0, L.Sort_d, L.S_d, L.ID_d, NL, k);
}
for(int k = 0; k < NL; k += def_nthmax){
if(Nk > 0)hipLaunchKernelGGL(( Copy_kernel) , dim3((Nk + 127) / 128), dim3(128) , 0, 0, L.vy_d, L.Sort_d, NL, k);
}
for(int k = 0; k < NL; k += def_nthmax){
if(Nk > 0)hipLaunchKernelGGL(( Sort_kernel) , dim3((Nk + 127) / 128), dim3(128) , 0, 0, L.Sort_d, L.vy_d, L.ID_d, NL, k);
}
for(int k = 0; k < NL; k += def_nthmax){
if(Nk > 0)hipLaunchKernelGGL(( Copy_kernel) , dim3((Nk + 127) / 128), dim3(128) , 0, 0, L.ialphaD_d, L.Sort_d, NL, k);
}
for(int k = 0; k < NL; k += def_nthmax){
if(Nk > 0)hipLaunchKernelGGL(( Sort_kernel) , dim3((Nk + 127) / 128), dim3(128) , 0, 0, L.Sort_d, L.ialphaD_d, L.ID_d, NL, k);
}
// ********************************
for(int k = 0; k < NL; k += def_nthmax){
int Nk = min(def_nthmax, NL - k);
if(Nk > 0){
hipLaunchKernelGGL(( S3_kernel) , dim3((Nk + 127) / 128), dim3(128) , 0, 0, L.nu_d, L.S_d, L.S1_d, L.vy_d, L.ialphaD_d, L.Sf_d, L.S1f_d, L.vyf_d, L.vcut2_d, L.va_d, L.vb_d, param.cut, param.cutMode, param.profile, param.numin, param.dnu, param.useIndividualX, NL, k);
if(param.removePlinth == 1 && param.cut != 0.0){
float a = (float)(M_PI * sqrt(-1.0 / log(def_TOLF * 0.5)));
float b = (float)(1.0 / sqrt(M_PI));
float c = (float)(2.0 * a / M_PI);
hipLaunchKernelGGL(( Plinth_kernel) , dim3((Nk + 127) / 128), dim3(128) , 0, 0, L.S1f_d, L.Sf_d, L.vyf_d, L.vcut2_d, L.plinth_d, NL, a, b, c, param.profile);
//printPlinth_kernel <<< (Nk + 127) / 128, 128 >>> (L.plinth_d, L.nu_d, NL);
}
}
}
hipEventRecord(LineStop);
hipEventSynchronize(LineStop);
error = hipGetLastError();
if(error != 0){
printf("Line error = %d = %s\n",error, hipGetErrorString(error));
return 0;
}
hipEventElapsedTime(&milliseconds, LineStart, LineStop);
time[1] += milliseconds * 0.001;
printf("Time for Lines: %d %lld %d: %g seconds\n", fi, lPart, iP, time[1]);
hipEventRecord(KStart);
// ************************************
// Compute the opacity function K(x)
// ************************************
int nlLimitsA = (NL + def_nlA - 1) / def_nlA;
int nlLimitsB = (NL + def_nlB - 1) / def_nlB;
int nlLimitsC = (NL + def_nlC - 1) / def_nlC;
//A
hipLaunchKernelGGL(( nuLimits_kernel), dim3(nlLimitsA), dim3(min(def_nlA, 1024)), 0, nuLimitsStream[0] , L.nu_d, L.ialphaD_d, L.vy_d, L.vcut2_d, L.nuLimitsA0_d, L.nuLimitsA1_d, param.numin, param.numax, def_nlA, NL, param.profile, 10);
hipLaunchKernelGGL(( iiLimits_kernel) , dim3((nlLimitsA + 127) / 128), dim3(128), 0, nuLimitsStream[0] , L.nuLimitsA0_d, L.nuLimitsA1_d, L.iiLimitsA0_d, L.iiLimitsA1_d, binBoundaries_d, nlLimitsA, param.numin, param.dnu, Nx, param.useIndividualX, param.nbins, param.Nxb, 10);
hipLaunchKernelGGL(( iiLimitsMax_kernel< 512 >) , dim3(1), dim3(512) , 0, 0, L.iiLimitsA0_d, L.iiLimitsA1_d, L.iiLimitsAT_d, Nx, nlLimitsA);
if(param.profile == 1){ //only for voigt profiles
//AL
hipLaunchKernelGGL(( nuLimits_kernel), dim3(nlLimitsA), dim3(min(def_nlA, 1024)), 0, nuLimitsStream[1] , L.nu_d, L.ialphaD_d, L.vy_d, L.vcut2_d, L.nuLimitsAL0_d, L.nuLimitsAL1_d, param.numin, param.numax, def_nlA, NL, param.profile, 11);
hipLaunchKernelGGL(( iiLimits_kernel) , dim3((nlLimitsA + 127) / 128), dim3(128), 0, nuLimitsStream[1] , L.nuLimitsAL0_d, L.nuLimitsAL1_d, L.iiLimitsAL0_d, L.iiLimitsAL1_d, binBoundaries_d, nlLimitsA, param.numin, param.dnu, Nx, param.useIndividualX, param.nbins, param.Nxb, 11);
hipLaunchKernelGGL(( iiLimitsMax_kernel< 512 >) , dim3(1), dim3(512) , 0, 0, L.iiLimitsAL0_d, L.iiLimitsAL1_d, L.iiLimitsALT_d, Nx, nlLimitsA);
//AR
hipLaunchKernelGGL(( nuLimits_kernel), dim3(nlLimitsA), dim3(min(def_nlA, 1024)), 0, nuLimitsStream[2] , L.nu_d, L.ialphaD_d, L.vy_d, L.vcut2_d, L.nuLimitsAR0_d, L.nuLimitsAR1_d, param.numin, param.numax, def_nlA, NL, param.profile, 12);
hipLaunchKernelGGL(( iiLimits_kernel) , dim3((nlLimitsA + 127) / 128), dim3(128), 0, nuLimitsStream[2] , L.nuLimitsAR0_d, L.nuLimitsAR1_d, L.iiLimitsAR0_d, L.iiLimitsAR1_d, binBoundaries_d, nlLimitsA, param.numin, param.dnu, Nx, param.useIndividualX, param.nbins, param.Nxb, 12);
hipLaunchKernelGGL(( iiLimitsMax_kernel< 512 >) , dim3(1), dim3(512) , 0, 0, L.iiLimitsAR0_d, L.iiLimitsAR1_d, L.iiLimitsART_d, Nx, nlLimitsA);
//B
hipLaunchKernelGGL(( nuLimits_kernel), dim3(nlLimitsB), dim3(min(def_nlB, 1024)), 0, nuLimitsStream[3] , L.nu_d, L.ialphaD_d, L.vy_d, L.vcut2_d, L.nuLimitsB0_d, L.nuLimitsB1_d, param.numin, param.numax, def_nlB, NL, param.profile, 20);
hipLaunchKernelGGL(( iiLimits_kernel) , dim3((nlLimitsB + 127) / 128), dim3(128), 0, nuLimitsStream[3] , L.nuLimitsB0_d, L.nuLimitsB1_d, L.iiLimitsB0_d, L.iiLimitsB1_d, binBoundaries_d, nlLimitsB, param.numin, param.dnu, Nx, param.useIndividualX, param.nbins, param.Nxb, 20);
hipLaunchKernelGGL(( iiLimitsMax_kernel< 512 >) , dim3(1), dim3(512) , 0, 0, L.iiLimitsB0_d, L.iiLimitsB1_d, L.iiLimitsBT_d, Nx, nlLimitsB);
//C
hipLaunchKernelGGL(( nuLimits_kernel), dim3(nlLimitsC), dim3(min(def_nlC, 1024)), 0, nuLimitsStream[4] , L.nu_d, L.ialphaD_d, L.vy_d, L.vcut2_d, L.nuLimitsC0_d, L.nuLimitsC1_d, param.numin, param.numax, def_nlC, NL, param.profile, 30);
hipLaunchKernelGGL(( iiLimits_kernel) , dim3((nlLimitsC + 127) / 128), dim3(128), 0, nuLimitsStream[4] , L.nuLimitsC0_d, L.nuLimitsC1_d, L.iiLimitsC0_d, L.iiLimitsC1_d, binBoundaries_d, nlLimitsC, param.numin, param.dnu, Nx, param.useIndividualX, param.nbins, param.Nxb, 30);
hipLaunchKernelGGL(( iiLimitsMax_kernel< 512 >) , dim3(1), dim3(512) , 0, 0, L.iiLimitsC0_d, L.iiLimitsC1_d, L.iiLimitsCT_d, Nx, nlLimitsC);
}
hipEventRecord(iiLimitsEvent);
hipEventSynchronize(iiLimitsEvent);
hipLaunchKernelGGL(( iiLimitsCheck) , dim3((nlLimitsA + 127) / 128), dim3(128) , 0, 0, L.iiLimitsA0_d, L.iiLimitsA1_d, L.iiLimitsAL0_d, L.iiLimitsAL1_d, L.iiLimitsAR0_d, L.iiLimitsAR1_d, nlLimitsA);
hipEventRecord(iiLimitsEvent);
hipEventSynchronize(iiLimitsEvent);
long long int nTA = L.iiLimitsAT_m[1] - L.iiLimitsAT_m[0];
long long int nTAL = L.iiLimitsALT_m[1] - L.iiLimitsALT_m[0];
long long int nTAR = L.iiLimitsART_m[1] - L.iiLimitsART_m[0];
long long int nTB = L.iiLimitsBT_m[1] - L.iiLimitsBT_m[0];
long long int nTC = L.iiLimitsCT_m[1] - L.iiLimitsCT_m[0];
if(ntA < 0) ntA = 0ll;
if(ntAL < 0) ntAL = 0ll;
if(ntAR < 0) ntAR = 0ll;
if(ntB < 0) ntB = 0ll;
if(ntC < 0) ntC = 0ll;
printf("A Limits %lld %lld | %lld\n", L.iiLimitsAT_m[0], L.iiLimitsAT_m[1], nTA);
printf("AL Limits %lld %lld | %lld\n", L.iiLimitsALT_m[0], L.iiLimitsALT_m[1], nTAL);
printf("AR Limits %lld %lld | %lld\n", L.iiLimitsART_m[0], L.iiLimitsART_m[1], nTAR);
printf("B Limits %lld %lld | %lld\n", L.iiLimitsBT_m[0], L.iiLimitsBT_m[1], nTB);
printf("C Limits %lld %lld | %lld\n", L.iiLimitsCT_m[0], L.iiLimitsCT_m[1], nTC);
if(nTA > 0){
hipMemcpyAsync(L.iiLimitsA0_h, L.iiLimitsA0_d, nlLimitsA * sizeof(long long int), hipMemcpyDeviceToHost, nuLimitsStream[0]);
hipMemcpyAsync(L.iiLimitsA1_h, L.iiLimitsA1_d, nlLimitsA * sizeof(long long int), hipMemcpyDeviceToHost, nuLimitsStream[0]);
}
if(nTAL > 0){
hipMemcpyAsync(L.iiLimitsAL0_h, L.iiLimitsAL0_d, nlLimitsA * sizeof(long long int), hipMemcpyDeviceToHost, nuLimitsStream[1]);
hipMemcpyAsync(L.iiLimitsAL1_h, L.iiLimitsAL1_d, nlLimitsA * sizeof(long long int), hipMemcpyDeviceToHost, nuLimitsStream[1]);
}
if(nTAR > 0){
hipMemcpyAsync(L.iiLimitsAR0_h, L.iiLimitsAR0_d, nlLimitsA * sizeof(long long int), hipMemcpyDeviceToHost, nuLimitsStream[2]);
hipMemcpyAsync(L.iiLimitsAR1_h, L.iiLimitsAR1_d, nlLimitsA * sizeof(long long int), hipMemcpyDeviceToHost, nuLimitsStream[2]);
}
if(nTB > 0){
hipMemcpyAsync(L.iiLimitsB0_h, L.iiLimitsB0_d, nlLimitsB * sizeof(long long int), hipMemcpyDeviceToHost, nuLimitsStream[3]);
hipMemcpyAsync(L.iiLimitsB1_h, L.iiLimitsB1_d, nlLimitsB * sizeof(long long int), hipMemcpyDeviceToHost, nuLimitsStream[3]);
}
if(nTC > 0){
hipMemcpyAsync(L.iiLimitsC0_h, L.iiLimitsC0_d, nlLimitsC * sizeof(long long int), hipMemcpyDeviceToHost, nuLimitsStream[4]);
hipMemcpyAsync(L.iiLimitsC1_h, L.iiLimitsC1_d, nlLimitsC * sizeof(long long int), hipMemcpyDeviceToHost, nuLimitsStream[4]);
}
double timeOld = time[0];
long long lPartOld = lPart;
int fii = fi;
if((iL < m.NL[fi] - def_maxlines || fi < fi1 - 1) && iP == param.nP - 1){
//read the next line file while calculating the K kernels of the current file
// **************************read iL + 1
int iLL = iL + def_maxlines;
if(iL >= m.NL[fi] - def_maxlines){
iLL = 0;
fii = fi + 1;
timeT[0] += time[0];
time[0] = 0.0;
fclose(dataFile);
sprintf(dataFilename, "%sbin", m.dataFilename[fii]);
dataFile = fopen(dataFilename, "rb");
if(dataFile == NULL){
printf("Error: line list file not found: %s\n", dataFilename);
return 0;
}
printf("Reading Line file %d of %d: %s\n", fii, fi1 - 1, dataFilename);
printf("Number of lines: %lld\n", m.NL[fii]);
}
NL1 = min(def_maxlines, m.NL[fii] - iLL);
lPart = (iLL + def_maxlines - 1) / def_maxlines;
hipEventRecord(ReadStart);
printf("Reading Line file %d of %d; part %lld of %lld with %d lines\n", fii, fi1 - 1, lPart, (m.NL[fii] + def_maxlines - 1) / def_maxlines - 1, NL);
readBufferCount = 0;
rbvs = 0;
}
hipDeviceSynchronize();
/*
for(int i = 0; i < nlLimitsA; ++i){
int ni = L.iiLimitsA1_h[i] - L.iiLimitsA0_h[i];
if(ni > 0 || i < 10) printf("iiLimitsTotal A %d %lld %lld | %d\n", i, L.iiLimitsA0_h[i], L.iiLimitsA1_h[i], ni);
}
for(int i = 0; i < nlLimitsA; ++i){
int ni = L.iiLimitsAL1_h[i] - L.iiLimitsAL0_h[i];
if(ni > 0 || i < 10) printf("iiLimitsTotal AL %d %lld %lld | %d\n", i, L.iiLimitsAL0_h[i], L.iiLimitsAL1_h[i], ni);
}
for(int i = 0; i < nlLimitsA; ++i){
int ni = L.iiLimitsAR1_h[i] - L.iiLimitsAR0_h[i];
if(ni > 0 || i < 10) printf("iiLimitsTotal AR %d %lld %lld | %d\n", i, L.iiLimitsAR0_h[i], L.iiLimitsAR1_h[i], ni);
}
for(int i = 0; i < nlLimitsB; ++i){
int ni = L.iiLimitsB1_h[i] - L.iiLimitsB0_h[i];
if(ni > 0 || i < 10) printf("iiLimitsTotal B %d %lld %lld | %d\n", i, L.iiLimitsB0_h[i], L.iiLimitsB1_h[i], ni);
}
for(int i = 0; i < nlLimitsC; ++i){
int ni = L.iiLimitsC1_h[i] - L.iiLimitsC0_h[i];
if(ni > 0 || i < 10) printf("iiLimitsTotal C %d %lld %lld | %d\n", i, L.iiLimitsC0_h[i], L.iiLimitsC1_h[i], ni);
}
*/
if(nTA > 0){
//A
const int nntt = 128;
for(int il = 0; il < NL; il += def_KSn * def_nlB){ //loop over lines
// *************************************
// Call A Line kernels
ntA = Line6A_Call(L, param, KS_d, x_d, il, NL, nntt, nkA, Nx, tuneStream[0], 10, 1);
// *************************************
if(param.doTuning == 1){
if(ntA > 0 && ntA < 0.6 * ntAOld || ntA > 1.6 * ntAOld){
ntAOld = ntA;
int nkt;
int nktt = nkA;
float time0;
for(int k = 0; k < 2; ++k){
for(int j = 0; j < 8; ++j){
if(j == 0) nkt = nkA;
if(j > 0){
if(k == 0) nkt = nkt * 2;
else nkt = nkt / 2;
}
if(nkt > 128 || nkt < 1) break;
hipEventRecord(TuneStart);
Line6A_Call(L, param, KS_d, x_d, il, NL, nntt, nkt, Nx, tuneStream[1], 10, 0);
hipEventRecord(TuneStop);
hipEventSynchronize(TuneStop);
hipEventElapsedTime(&milliseconds, TuneStart, TuneStop);
printf("Selftune A %d %d %d %d %g\n", il, ntA, ntAOld, nkt, milliseconds);
if(j == 0 && k == 0) time0 = milliseconds;
else{
if(milliseconds > time0) break;
else{
nktt = nkt;
time0 = milliseconds;
}
}
}
}
nkA = nktt;
printf("Selftune A %d\n", nktt);
}
}
}
hipEventRecord(AEvent, tuneStream[0]);
if((iL < m.NL[fi] - def_maxlines || fi < fi1 - 1) && iP == param.nP - 1){
// **************************
//Read the Line list A
// **************************
if(param.dataBase == 2 || param.dataBase >= 20){
// 2 20 30 31 32
for(int i = readBufferCount; i < NL1; i += readBufferSize){
//check if the A kernels have finished, otherwise use host to read more data
int ev = hipEventQuery(AEvent);
if(ev == 0) break;
//printf("read A %d %d\n", readBufferCount, ev);
er = readFileExomol(L, NL1, dataFile, readBuffer_h, readBuffer_d, readBufferSize, readBufferN, readBufferCount, rbvs, CStream);
readBufferCount += readBufferSize;
++rbvs;
}
}
}
}
if(param.profile == 1){ //only for voigt profiles
if(nTAL > 0){
//AL
const int nntt = 128;
for(int il = 0; il < NL; il += def_KSn * def_nlB){ //loop over lines
// *************************************
// Call AL Line kernels
ntAL = Line6A_Call(L, param, KS_d, x_d, il, NL, nntt, nkAL, Nx, tuneStream[0], 11, 1);
// *************************************
if(param.doTuning == 1){
if(ntAL > 0 && ntAL < 0.6 * ntALOld || ntAL > 1.6 * ntALOld){
ntALOld = ntAL;
int nkt;
int nktt = nkAL;
float time0;
for(int k = 0; k < 2; ++k){
for(int j = 0; j < 8; ++j){
if(j == 0) nkt = nkAL;
if(j > 0){
if(k == 0) nkt = nkt * 2;
else nkt = nkt / 2;
}
if(nkt > 128 || nkt < 1) break;
hipEventRecord(TuneStart);
Line6A_Call(L, param, KS_d, x_d, il, NL, nntt, nkt, Nx, tuneStream[1], 11, 0);
hipEventRecord(TuneStop);
hipEventSynchronize(TuneStop);
hipEventElapsedTime(&milliseconds, TuneStart, TuneStop);
printf("Selftune AL %d %d %d %d %g\n", il, ntAL, ntALOld, nkt, milliseconds);
if(j == 0 && k == 0) time0 = milliseconds;
else{
if(milliseconds > time0) break;
else{
nktt = nkt;
time0 = milliseconds;
}
}
}
}
nkAL = nktt;
printf("Selftune AL %d\n", nktt);
}
}
}
hipEventRecord(ALEvent, tuneStream[0]);
if((iL < m.NL[fi] - def_maxlines || fi < fi1 - 1) && iP == param.nP - 1){
// **************************
//Read the Line list AL
// **************************
if(param.dataBase == 2 || param.dataBase >= 20){
//2 20 30 31 32
for(int i = readBufferCount; i < NL1; i += readBufferSize){
//check if the AL kernels have finished, otherwise use host to read more data
int ev = hipEventQuery(ALEvent);
if(ev == 0) break;
//printf("read AL %d %d\n", readBufferCount, ev);
er = readFileExomol(L, NL1, dataFile, readBuffer_h, readBuffer_d, readBufferSize, readBufferN, readBufferCount, rbvs, CStream);
readBufferCount += readBufferSize;
++rbvs;
}
}
}
}
if(nTAR > 0){
//AR
const int nntt = 128;
for(int il = 0; il < NL; il += def_KSn * def_nlB){ //loop over lines
// *************************************
// Call AR Line kernels
ntAR = Line6A_Call(L, param, KS_d, x_d, il, NL, nntt, nkAR, Nx, tuneStream[0], 12, 1);
// *************************************
if(param.doTuning == 1){
if(ntAR > 0 && ntAR < 0.6 * ntAROld || ntAR > 1.6 * ntAROld){
ntAROld = ntAR;
int nkt;
int nktt = nkAR;
float time0;
for(int k = 0; k < 2; ++k){
for(int j = 0; j < 8; ++j){
if(j == 0) nkt = nkAR;
if(j > 0){
if(k == 0) nkt = nkt * 2;
else nkt = nkt / 2;
}
if(nkt > 128 || nkt < 1) break;
hipEventRecord(TuneStart);
Line6A_Call(L, param, KS_d, x_d, il, NL, nntt, nkt, Nx, tuneStream[1], 12, 0);
hipEventRecord(TuneStop);
hipEventSynchronize(TuneStop);
hipEventElapsedTime(&milliseconds, TuneStart, TuneStop);
printf("Selftune AR %d %d %d %d %g\n", il, ntAR, ntAROld, nkt, milliseconds);
if(j == 0 && k == 0) time0 = milliseconds;
else{
if(milliseconds > time0) break;
else{
nktt = nkt;
time0 = milliseconds;
}
}
}
}
nkAR = nktt;
printf("Selftune AR %d\n", nktt);
}
}
}
hipEventRecord(AREvent, tuneStream[0]);
if((iL < m.NL[fi] - def_maxlines || fi < fi1 - 1) && iP == param.nP - 1){
// **************************
//Read the Line list AR
// **************************
if(param.dataBase == 2 || param.dataBase >= 20){
//2 20 30 31 32
for(int i = readBufferCount; i < NL1; i += readBufferSize){
//check if the AR kernels have finished, otherwise use host to read more data
int ev = hipEventQuery(AREvent);
if(ev == 0) break;
//printf("read AR %d %d\n", readBufferCount, ev);
er = readFileExomol(L, NL1, dataFile, readBuffer_h, readBuffer_d, readBufferSize, readBufferN, readBufferCount, rbvs, CStream);
readBufferCount += readBufferSize;
++rbvs;
}
}
}
}
//hipDeviceSynchronize();
if(nTB > 0){
// B
const int nntt2 = 128;
for(int il = 0; il < NL; il += def_KSn * def_nlB){ //loop over lines
// *************************************
// Call B Line kernels
ntB = Line6B_Call(L, param, KS_d, x_d, il, NL, nntt2, nkB, Nx, tuneStream[0], 1);
// *************************************
if(param.doTuning == 1){
if(ntB > 0 && ntB < 0.6 * ntBOld || ntB > 1.6 * ntBOld){
ntBOld = ntB;
int nkt;
int nktt = nkB;
float time0;
for(int k = 0; k < 2; ++k){
for(int j = 0; j < 8; ++j){
if(j == 0) nkt = nkB;
if(j > 0){
if(k == 0) nkt = nkt * 2;
else nkt = nkt / 2;
}
if(nkt > 128 || nkt < 1) break;
hipEventRecord(TuneStart);
Line6B_Call(L, param, KS_d, x_d, il, NL, nntt2, nkt, Nx, tuneStream[1], 0);
hipEventRecord(TuneStop);
hipEventSynchronize(TuneStop);
hipEventElapsedTime(&milliseconds, TuneStart, TuneStop);
printf("Selftune B %d %d %d %d %g\n", il, ntB, ntBOld, nkt, milliseconds);
if(j == 0 && k == 0) time0 = milliseconds;
else{
if(milliseconds > time0) break;
else{
nktt = nkt;
time0 = milliseconds;
}
}
}
}
nkB = nktt;
printf("Selftune B %d\n", nktt);
}
}
}
hipEventRecord(BEvent, tuneStream[0]);
if((iL < m.NL[fi] - def_maxlines || fi < fi1 - 1) && iP == param.nP - 1){
// **************************
//Read the Line list B
// **************************
if(param.dataBase == 2 || param.dataBase >= 20){
//2 20 30 31 32
for(int i = readBufferCount; i < NL1; i += readBufferSize){
//check if the B kernels have finished, otherwise use host to read more data
int ev = hipEventQuery(BEvent);
if(ev == 0) break;
//printf("read B %d %d\n", readBufferCount, ev);
er = readFileExomol(L, NL1, dataFile, readBuffer_h, readBuffer_d, readBufferSize, readBufferN, readBufferCount, rbvs, CStream);
readBufferCount += readBufferSize;
++rbvs;
}
}
}
}
//C
if(nTC > 0){
//search higher order regimes of the Voigt profile
const int nntt3 = 128;
float a = (float)(M_PI * sqrt(-1.0 / log(def_TOLF * 0.5)));
float b = (float)(1.0 / sqrt(M_PI));
float c = (float)(2.0 * a / M_PI);
for(int il = 0; il < NL; il += def_KSn * def_nlC){ //loop over lines
// *************************************
// Call C Line kernels
ntC = Line6C_Call(L, param, KS_d, x_d, il, NL, nntt3, nkC, Nx, a, b, c, tuneStream[0],1);
// *************************************
if(param.doTuning == 1){
if(ntC > 0 && ntC < 0.6 * ntCOld || ntC > 1.6 * ntCOld){
ntCOld = ntC;
int nkt;
int nktt = nkC;
float time0;
for(int k = 0; k < 2; ++k){
for(int j = 0; j < 8; ++j){
if(j == 0) nkt = nkC;
if(j > 0){
if(k == 0) nkt = nkt * 2;
else nkt = nkt / 2;
}
if(nkt > 128 || nkt < 1) break;
hipEventRecord(TuneStart);
Line6C_Call(L, param, KS_d, x_d, il, NL, nntt3, nkt, Nx, a, b, c, tuneStream[1],0);
hipEventRecord(TuneStop);
hipEventSynchronize(TuneStop);
hipEventElapsedTime(&milliseconds, TuneStart, TuneStop);
printf("Selftune C %d %d %d %d %g\n", il, ntC, ntCOld, nkt, milliseconds);
if(j == 0 && k == 0) time0 = milliseconds;
else{
if(milliseconds > time0) break;
else{
nktt = nkt;
time0 = milliseconds;
}
}
}
}
nkC = nktt;
printf("Selftune C %d\n", nktt);
}
}
}
}
} //end profile 1
//Add now all streams together
printf("Add streams A\n");
error = hipGetLastError();
if(error != 0){
printf("K error = %d = %s\n",error, hipGetErrorString(error));
return 0;
}
hipEventRecord(KStop);
if((iL < m.NL[fi] - def_maxlines || fi < fi1 - 1) && iP == param.nP - 1){
// **************************
//Read the Line list end
// **************************
if(param.dataBase == 2 || param.dataBase >= 20){
//2 20 30 31 32
for(int i = readBufferCount; i < NL1; i += readBufferSize){
er = readFileExomol(L, NL1, dataFile, readBuffer_h, readBuffer_d, readBufferSize, readBufferN, readBufferCount, rbvs, CStream);
readBufferCount += readBufferSize;
++rbvs;
}
}
}
if((iL < m.NL[fi] - def_maxlines || fi < fi1 - 1) && iP == param.nP - 1){
if(param.dataBase < 2 || param.dataBase == 3){
// 0 1 3
er = readFile(param, m, part, L, param.qalphaL, NL1, dataFile, Sscale, meanMass);
if(er == 0){
return 0;
}
}
printf("Reading Line file %d, part %lld complete\n", fii, lPart);
hipEventRecord(ReadStop);
hipEventSynchronize(ReadStop);
hipEventElapsedTime(&milliseconds, ReadStart, ReadStop);
time[0] += milliseconds * 0.001;
printf("Time for input, %d %lld: %g seconds\n", fii, lPart, time[0]);
// **************************read iL + 1
NL = NL1;
for(int i = 0; i < def_rBs; ++i){
hipStreamSynchronize(CStream[i]);
}
}
//wait until all KS streams are complete
hipDeviceSynchronize();
//collect streams and store all KS_d into K_d
//set KS_d to zero
hipLaunchKernelGGL(( AddKStreams_kernel) , dim3((Nx + 511) / 512), dim3(512) , 0, 0, K_d + iP * Nx, KS_d, def_KSn, Nx);
printf("Add streams B\n");
// *************************************
//synchronize here only if no more data has to be read from the disk.
//otherwise read data before synchronization
hipEventSynchronize(KStop);
hipEventElapsedTime(&milliseconds, KStart, KStop);
time[2] += milliseconds * 0.001;
printf("Time for K(x): %d %lld %d: %g seconds\n", fi, lPartOld, iP, time[2]);
hipDeviceSynchronize();
error = hipGetLastError();
if(error != 0){
printf("Kb error = %d = %s\n",error, hipGetErrorString(error));
return 0;
}
if(iL >= m.NL[fi] - def_maxlines && iP == param.nP - 1){
InfoFile = fopen(InfoFilename, "a");
fprintf(InfoFile,"File %d of %d\n", fi, fi1);
fprintf(InfoFile,"Number of lines: %lld\n", m.NL[fi]);
fprintf(InfoFile,"Time for input: %g seconds\n", timeOld);
fprintf(InfoFile,"Time for Lines: %g seconds\n", time[1]);
fprintf(InfoFile,"Time for K(x): %g seconds\n", time[2]);
fclose(InfoFile);
}
} // End of pressure loop
} // End of maxLines loop
} // End of linefile loop
if(fi1 > fi0){
fclose(dataFile);
}
}
if(param.dataBase < 2 || param.dataBase == 3){
// 0 1 3
free_Line(L, param);
}
else{
// 2 20 30 31 32
free2_Line(L, param);
}
} //end species loop
printf("\n");
printf("Time for input total: %g seconds\n", timeT[0]);
printf("Time for Lines total: %g seconds\n", timeT[1]);
printf("Time for K(x) total: %g seconds\n", timeT[2]);
free(binBoundaries_h);
hipFree(binIndex_d);
hipFree(binBoundaries_d);
hipFree(KS_d);
hipEventRecord(tt1, 0);
for(int i = 0; i < def_KSn; ++i){
hipStreamDestroy(VStream[i]);
}
for(int i = 0; i < def_rBs; ++i){
hipStreamDestroy(CStream[i]);
}
for(int i = 0; i < 2; ++i){
hipStreamDestroy(tuneStream[i]);
}
for(int i = 0; i < 5; ++i){
hipStreamDestroy(nuLimitsStream[i]);
}
// ****************************
// Write the full line profile
// ****************************
if(param.doStoreFullK == 1){
FILE *OutFile;
char OutFilename[300];
sprintf(OutFilename, "Out_%s.dat", param.name);
OutFile = fopen(OutFilename, filemode);
for(int iP = 0; iP < param.nP; ++iP){
hipMemcpy(K_h, K_d + iP * Nx, Nx * sizeof(double), hipMemcpyDeviceToHost);
for(int j = 0; j < Nx; ++j){
if(param.nP == 1){
fprintf(OutFile, "%.20g %.20g\n", x_h[j], K_h[j] * unitScale);
}
else{
fprintf(OutFile, "%.20g %.20g %.20g %.20g\n", x_h[j], K_h[j] * unitScale, param.T, P_h[iP]);
}
}
fprintf(OutFile, "\n\n");
}
fclose(OutFile);
}
if(param.doStoreFullK == -1){
FILE *OutFile;
char OutFilename[500];
sprintf(OutFilename, "%sOut_%s.dat", param.pathK, param.name);
OutFile = fopen(OutFilename, "r");
if(OutFile == NULL){
printf("Error: Input file not found %s\n", OutFilename);
return 0;
}
for(int iP = 0; iP < param.nP; ++iP){
for(int j = 0; j < Nx; ++j){
if(param.nP == 1){
double k;
fscanf(OutFile, "%lf %lf\n", &x_h[j], &k);
K_h[j] = k / unitScale;
}
else{
double k, t, p;
fscanf(OutFile, "%lf %lf %lf %lf\n", &x_h[j], &k, &t, &p);
K_h[j] = k / unitScale;
}
}
hipMemcpy(K_d + iP * Nx, K_h, Nx * sizeof(double), hipMemcpyHostToDevice);
fscanf(OutFile, "\n\n");
}
fclose(OutFile);
}
if(param.doStoreFullK == 2){
//write a binary file in single precision
FILE *OutFile;
char OutFilename[300];
sprintf(OutFilename, "Out_%s.bin", param.name);
if(param.replaceFiles == 0){
OutFile = fopen(OutFilename, "ab");
}
else{
OutFile = fopen(OutFilename, "wb");
}
for(int iP = 0; iP < param.nP; ++iP){
hipMemcpy(K_h, K_d + iP * Nx, Nx * sizeof(double), hipMemcpyDeviceToHost);
for(int j = 0; j < Nx; ++j){
float Kf = (float)(K_h[j] * unitScale);
fwrite(&Kf, sizeof(float), 1, OutFile);
}
}
fclose(OutFile);
}
if(param.doStoreFullK == -2){
//read a binary file
FILE *OutFile;
char OutFilename[500];
sprintf(OutFilename, "%sOut_%s.bin", param.pathK, param.name);
OutFile = fopen(OutFilename, "rb");
if(OutFile == NULL){
printf("Error: Input file not found %s\n", OutFilename);
return 0;
}
for(int iP = 0; iP < param.nP; ++iP){
for(int j = 0; j < Nx; ++j){
float Kf;
fread(&Kf, sizeof(float), 1, OutFile);
K_h[j] = (double)(Kf) / unitScale;
}
hipMemcpy(K_d + iP * Nx, K_h, Nx * sizeof(double), hipMemcpyHostToDevice);
}
fclose(OutFile);
}
// *******************************
hipDeviceSynchronize();
error = hipGetLastError();
if(error != 0){
printf("Write error = %d = %s\n",error, hipGetErrorString(error));
return 0;
}
hipEventRecord(tt2, 0);
hipEventSynchronize(tt2);
hipEventElapsedTime(&milliseconds, tt1, tt2);
time[3] += milliseconds * 0.001;
printf("Time for write K(x): %g seconds\n", time[3]);
hipEventRecord(tt1, 0);
// **************************************
// compute the Planck and Rosseland means
// **************************************
if(param.doMean > 0){
double *Pmn_d;
double *Rmn_d;
hipMalloc((void **) &Pmn_d, Nx * sizeof(double));
hipMalloc((void **) &Rmn_d, Nx * sizeof(double));
double *means_h, *means_d;
means_h = (double*)malloc(4 * sizeof(double));
hipMalloc((void **) &means_d, 4 * sizeof(double));
FILE *Out4File;
char Out4Filename[300];
sprintf(Out4Filename, "Out_%s_mean.dat", param.name);
Out4File = fopen(Out4Filename, filemode);
for(int iP = 0; iP < param.nP; ++iP){
hipLaunchKernelGGL(( Mean_kernel) , dim3((Nx + 511) / 512), dim3(512) , 0, 0, x_d, Pmn_d, Rmn_d, param.T, Nx);
/*
printf("\n\n");
hipMemcpy(K_h, Pmn_d, Nx * sizeof(double), hipMemcpyDeviceToHost);
for(int i = 0; i < Nx; ++i){
printf("%g %g\n", param.numin + i * param.dnu, K_h[i]);
}
printf("\n\n");
hipMemcpy(K_h, Rmn_d, Nx * sizeof(double), hipMemcpyDeviceToHost);
for(int i = 0; i < Nx; ++i){
printf("%g %g\n", param.numin + i * param.dnu, K_h[i]);
}
printf("\n\n");
*/
hipLaunchKernelGGL(( IntegrateMean_kernel <512>) , dim3(4), dim3(512) , 0, 0, Pmn_d, Rmn_d, x_d, K_d + iP * Nx, means_d, Nx, param.useIndividualX);
double sigma = 2.0 * def_kB * def_kB * def_kB * def_kB / ( def_h * def_h * def_h * def_c * def_c * 15.0) * M_PI * M_PI * M_PI * M_PI * M_PI;
double integral1 = sigma * param.T * param.T * param.T * param.T / M_PI;
double integral2 = M_PI / (4.0 * sigma * param.T * param.T * param.T);
hipMemcpy(means_h, means_d, 4 * sizeof(double), hipMemcpyDeviceToHost);
if(param.nP == 1){
fprintf(Out4File, "%.20g\n", means_h[0] / means_h[2]);
fprintf(Out4File, "%.20g\n", means_h[3] / means_h[1]);
fprintf(Out4File, "%.20g\n", means_h[2] * param.dnu);
fprintf(Out4File, "%.20g\n", integral1);
fprintf(Out4File, "%.20g\n", means_h[3] * param.dnu);
fprintf(Out4File, "%.20g\n", 1.0 / integral2);
}
else{
fprintf(Out4File, "%.20g %.20g %.20g\n", means_h[0] / means_h[2], param.T, P_h[iP]);
fprintf(Out4File, "%.20g %.20g %.20g\n", means_h[3] / means_h[1], param.T, P_h[iP]);
fprintf(Out4File, "%.20g %.20g %.20g\n", means_h[2], param.T, P_h[iP]);
fprintf(Out4File, "%.20g %.20g %.20g\n", integral1, param.T, P_h[iP]);
fprintf(Out4File, "%.20g %.20g %.20g\n", means_h[3], param.T, P_h[iP]);
fprintf(Out4File, "%.20g %.20g %.20g\n", 1.0 / integral2, param.T, P_h[iP]);
}
//fprintf(Out4File, "\n\n");
}
fclose(Out4File);
free(means_h);
hipFree(means_d);
hipFree(Pmn_d);
hipFree(Rmn_d);
}
hipFree(x_d);
hipDeviceSynchronize();
error = hipGetLastError();
if(error != 0){
printf("maen error = %d = %s\n",error, hipGetErrorString(error));
return 0;
}
hipEventRecord(tt2, 0);
hipEventSynchronize(tt2);
hipEventElapsedTime(&milliseconds, tt1, tt2);
time[4] += milliseconds * 0.001;
printf("Time for mean K(x): %g seconds\n", time[4]);
hipEventRecord(tt1, 0);
// ***************************************
// Do the sorting of K for all bins
// ***************************************
thrust::device_ptr<double> K_dt = thrust::device_pointer_cast(K_d);
thrust::device_ptr<int> binKey_dt = thrust::device_pointer_cast(binKey_d);
for(int iP = 0; iP < param.nP; ++iP){
thrust::sort_by_key(K_dt + iP * Nx, K_dt + Nx + iP * Nx, binKey_dt);
thrust::stable_sort_by_key(binKey_dt, binKey_dt + Nx, K_dt + iP * Nx);
}
hipFree(binKey_d);
// ****************************************
hipDeviceSynchronize();
error = hipGetLastError();
if(error != 0){
printf("Sort error = %d = %s\n",error, hipGetErrorString(error));
return 0;
}
hipEventRecord(tt2, 0);
hipEventSynchronize(tt2);
hipEventElapsedTime(&milliseconds, tt1, tt2);
time[5] += milliseconds * 0.001;
printf("Time for sort K(x): %g seconds\n", time[5]);
hipEventRecord(tt1, 0);
// *********************************
// Prepare Resampling and do QR factorization, the same for all bins
// this doesn't work with individual bins
// *********************************
//size_t free_byte;
//size_t total_byte;
//hipMemGetInfo( &free_byte, &total_byte );
//printf("***MEMORY %g %g %g\n", (double)(free_byte), (double)(total_byte), (double)(total_byte) - (double)(free_byte));
int *Nxmin_h, *Nxmin_d;
Nxmin_h = (int*)malloc(param.nbins * sizeof(int));
hipMalloc((void **) &Nxmin_d, param.nbins * sizeof(int));
for(int i = 0; i < param.nbins; ++i){
Nxmin_h[i] = 0;
}
hipMemset(Nxmin_d, 0, param.nbins * sizeof(int));
if(param.doResampling > 0){
double *K2_h, *K2_d;
K2_h = (double*)malloc(Nx * sizeof(double));
hipMalloc((void **) &K2_d, Nx * sizeof(double));
//hipMemGetInfo( &free_byte, &total_byte );
//printf("***MEMORY %g %g %g\n", (double)(free_byte), (double)(total_byte), (double)(total_byte) - (double)(free_byte));
double *V_d; //Vandermonde like matrix for least sqaures
double *C_d, *D_d;
hipMalloc((void **) &V_d, param.nC * param.Nxb * sizeof(double));
hipMalloc((void **) &C_d, param.nC * sizeof(double));
hipMalloc((void **) &D_d, param.nC * sizeof(double));
hipDeviceSynchronize();
error = hipGetLastError();
if(error != 0){
printf("Resampling Allocation error = %d = %s\n",error, hipGetErrorString(error));
return 0;
}
hipLaunchKernelGGL(( Vandermonde_kernel) , dim3((param.Nxb + 511) / 512), dim3(512) , 0, 0, V_d, (double)(param.Nxb), param.nC);
hipLaunchKernelGGL(( QR_kernel <512>) , dim3(1), dim3(512) , 0, 0, V_d, C_d, D_d, param.Nxb, param.nC);
FILE *Out3File;
char Out3Filename[300];
if(param.doResampling == 1){
sprintf(Out3Filename, "Out_%s_cbin.dat", param.name);
Out3File = fopen(Out3Filename, filemode);
}
if(param.doResampling == 2){
if(param.replaceFiles == 1){
for(int i = 0; i < param.nbins; ++i){
sprintf(Out3Filename, "Out_%s_cbin%.4d.dat", param.name, i);
Out3File = fopen(Out3Filename, "w");
fclose(Out3File);
}
}
sprintf(Out3Filename, "Out_%s_cbin%.4d.dat", param.name, 0);
Out3File = fopen(Out3Filename, "a");
}
for(int iP = 0; iP < param.nP; ++iP){
if(param.doResampling == 2 && iP > 0){
fclose(Out3File);
sprintf(Out3Filename, "Out_%s_cbin%.4d.dat", param.name, 0);
Out3File = fopen(Out3Filename, "a");
}
hipMemset(K2_d, 0, Nx * sizeof(double));
hipMemset(Nxmin_d, 0, param.nbins * sizeof(int));
hipLaunchKernelGGL(( findCut_kernel) , dim3((Nx + 511) / 512), dim3(512) , 0, 0, K_d + iP * Nx, Nx, param.Nxb, param.kmin, Nxmin_d, param.nbins);
hipLaunchKernelGGL(( rescale_kernel < 512 >) , dim3(param.nbins), dim3(512) , 0, 0, Nxmin_d, K_d + iP * Nx, K2_d, param.Nxb, param.kmin, 1);
/*
hipMemcpy(K2_h, K2_d, Nx * sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(K_h, K_d + iP * Nx, Nx * sizeof(double), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
//printf only cut and empty bins
for(int i = 0; i < param.nbins; ++i){
int il = i * param.Nxb;
if(K_h[il] == param.kmin){
for(int j = 0; j < param.Nxb; ++j){
// printf("%g %.20g\n", j / (double)(param.Nxb), K2_h[j + il]);
}
// printf("\n\n");
}
}
//print all bins
for(int i = 0; i < Nx; ++i){
printf("%d %.20g %.20g\n", i, K_h[i], K2_h[i]);
}
*/
hipLaunchKernelGGL(( copyK2_kernel< 512 >) , dim3(param.nbins), dim3(512) , 0, 0, Nxmin_d, K_d + iP * Nx, K2_d, param.Nxb);
hipMemcpy(Nxmin_h, Nxmin_d, param.nbins * sizeof(int), hipMemcpyDeviceToHost);
hipLaunchKernelGGL(( lnK_kernel) , dim3((Nx + 511) / 512), dim3(512) , 0, 0, K_d + iP * Nx, Nx);
hipLaunchKernelGGL(( leastSquare_kernel <512>) , dim3(param.nbins), dim3(512) , 0, 0, V_d, C_d, D_d, K_d + iP * Nx, param.Nxb, param.nC);
for(int i = 0; i < param.nbins; ++i){
int il = i * param.Nxb;
hipMemcpy(K_h + il, K_d + il + iP * Nx, param.nC * sizeof(double), hipMemcpyDeviceToHost);
fprintf(Out3File, "%.20g %.20g ", param.kmin, fmin(Nxmin_h[i] / ((double)(param.Nxb - 1)), 1.0));
for(int ic = 0; ic < param.nC; ++ic){
if(Nxmin_h[i] != param.Nxb) fprintf(Out3File, "%.20g ", K_h[il + ic]);
else fprintf(Out3File, "0.0 ");
}
if(param.nP > 1){
fprintf(Out3File, "%.20g %.20g ", param.T, P_h[iP]);
}
if(param.doResampling == 1){
fprintf(Out3File, "\n\n");
}
if(param.doResampling == 2 && i < param.nbins - 1){
fprintf(Out3File, "\n");
fclose(Out3File);
sprintf(Out3Filename, "Out_%s_cbin%.4d.dat", param.name, i + 1);
Out3File = fopen(Out3Filename, "a");
}
}
//fprintf(Out3File, "\n\n");
if(param.doTransmission > 0 || param.doStoreK > 0){
hipLaunchKernelGGL(( expfx_kernel) , dim3(param.nbins), dim3(512) , 0, 0, K_d + iP * Nx, param.nC, param.Nxb);
hipLaunchKernelGGL(( rescale_kernel < 512 >) , dim3(param.nbins), dim3(512) , 0, 0, Nxmin_d, K_d + iP * Nx, K2_d, param.Nxb, param.kmin, -1);
hipLaunchKernelGGL(( copyK2_kernel< 512 >) , dim3(param.nbins), dim3(512) , 0, 0, Nxmin_d, K_d + iP * Nx, K2_d, param.Nxb);
}
}
fclose(Out3File);
hipFree(V_d);
hipFree(C_d);
hipFree(D_d);
hipFree(K2_d);
free(K2_h);
}
// **********************************
hipDeviceSynchronize();
error = hipGetLastError();
if(error != 0){
printf("Resampling error = %d = %s\n",error, hipGetErrorString(error));
return 0;
}
hipEventRecord(tt2, 0);
hipEventSynchronize(tt2);
hipEventElapsedTime(&milliseconds, tt1, tt2);
time[6] += milliseconds * 0.001;
printf("Time for Resampling: %g seconds\n", time[6]);
hipEventRecord(tt1, 0);
// *****************************
// Write K per bin output
// *****************************
if(param.doStoreK > 0){
FILE *Out2File;
char Out2Filename[300];
if(param.doStoreK == 1){
sprintf(Out2Filename, "Out_%s_bin.dat", param.name);
Out2File = fopen(Out2Filename, filemode);
}
if(param.doStoreK == 2){
if(param.replaceFiles == 1){
for(int i = 0; i < param.nbins; ++i){
sprintf(Out2Filename, "Out_%s_bin%.4d.dat", param.name, i);
Out2File = fopen(Out2Filename, "w");
fclose(Out2File);
}
}
sprintf(Out2Filename, "Out_%s_bin%.4d.dat", param.name, 0);
Out2File = fopen(Out2Filename, "a");
}
for(int iP = 0; iP < param.nP; ++iP){
if(param.doStoreK == 2 && iP > 0){
fclose(Out2File);
sprintf(Out2Filename, "Out_%s_bin%.4d.dat", param.name, 0);
Out2File = fopen(Out2Filename, "a");
}
hipMemcpy(K_h, K_d + iP * Nx, Nx * sizeof(double), hipMemcpyDeviceToHost);
if(param.useIndividualBins == 0){
for(int i = 0; i < param.nbins; ++i){
int Nxb = param.Nxb;
int il = i * Nxb;
int iedge = 0; //index of edge
int nedge = 0; //number of points per edge intervall
double sedge = 0.0; //sum of points in edge intervall
for(int j = 0; j < Nxb; ++j){
double y = j / ((double)(Nxb - 1));
double y1 = (j + 1) / ((double)(Nxb - 1));
if(param.useOutputEdges == 0){
if(param.nP == 1){
fprintf(Out2File, "%g %.20g\n", y, K_h[j + il] * unitScale);
}
else{
fprintf(Out2File, "%g %.20g %g %g %d\n", y, K_h[j + il] * unitScale, param.T, P_h[iP], j);
}
}
else{
double edge = outputEdges_h[iedge];
++nedge;
sedge += K_h[j + il] * unitScale;
if(y <= edge && edge <= y1 && iedge < param.nedges){
if(param.nP == 1){
if(iedge > 0) fprintf(Out2File, "%g %.20g\n", 0.5 * (edge + outputEdges_h[iedge - 1]), sedge / ((double)(nedge)));
}
else{
if(iedge > 0) fprintf(Out2File, "%g %.20g %g %g %d\n", 0.5 * (edge + outputEdges_h[iedge - 1]), sedge / ((double)(nedge)), param.T, P_h[iP], iedge - 1);
}
++iedge;
nedge = 0;
sedge = 0.0;
}
}
}
if(param.doStoreK == 1){
fprintf(Out2File,"\n\n");
}
if(param.doStoreK == 2 && i < param.nbins - 1){
fclose(Out2File);
sprintf(Out2Filename, "Out_%s_bin%.4d.dat", param.name, i + 1);
Out2File = fopen(Out2Filename, "a");
}
}
}
else{
int ib = 0;
int j = 0;
int iedge = 0; //inde of edge
int nedge = 0; //number of points per edge intervall
double sedge = 0.0; //sum of points in edge intervall
for(int i = 0; i < Nx; ++i){
int il = binIndex_h[ib];
int ir = binIndex_h[ib + 1];
int Nxb = ir - il;
double y = j / ((double)(Nxb - 1));
double y1 = (j + 1) / ((double)(Nxb - 1));
if(param.useOutputEdges == 0){
if(param.nP == 1){
fprintf(Out2File, "%g %.20g\n", y, K_h[i] * unitScale);
}
else{
fprintf(Out2File, "%g %.20g %.20g %.20g %d\n", y, K_h[i] * unitScale, param.T, P_h[iP], j);
}
}
else{
double edge = outputEdges_h[iedge];
++nedge;
sedge += K_h[i] * unitScale;
if(y <= edge && edge <= y1 && iedge < param.nedges){
if(param.nP == 1){
if(iedge > 0) fprintf(Out2File, "%g %.20g\n", 0.5 * (edge + outputEdges_h[iedge - 1]), sedge / ((double)(nedge)));
}
else{
if(iedge > 0) fprintf(Out2File, "%g %.20g %.20g %.20g %d\n", 0.5 * (edge + outputEdges_h[iedge - 1]), sedge / ((double)(nedge)), param.T, P_h[iP], iedge - 1);
}
++iedge;
nedge = 0;
sedge = 0.0;
}
}
++j;
if(i >= ir - 1){
//printf("%d %d %d %d\n", ib, il, ir, Nxb);
++ib;
j = 0;
if(param.doStoreK == 1){
fprintf(Out2File,"\n\n");
}
if(param.doStoreK == 2 && ib < param.nbins){
fclose(Out2File);
sprintf(Out2Filename, "Out_%s_bin%.4d.dat", param.name, ib);
Out2File = fopen(Out2Filename, "a");
}
iedge = 0;
}
if(ib >= param.nbins){
break;
}
}
}
}//end of P loop
fclose(Out2File);
}
// ******************************
hipDeviceSynchronize();
error = hipGetLastError();
if(error != 0){
printf("Write error = %d = %s\n",error, hipGetErrorString(error));
return 0;
}
hipEventRecord(tt2, 0);
hipEventSynchronize(tt2);
hipEventElapsedTime(&milliseconds, tt1, tt2);
time[7] += milliseconds * 0.001;
printf("Time for write K(y): %g seconds\n", time[7]);
hipEventRecord(tt1, 0);
//set correction factor for simpsons rule needed for resampling
SimpsonCoefficient();
// *********************************
// Calculate the Transmission function
// *********************************
if(param.doTransmission > 0 ){
double *Tr_h, *Tr_d;
Tr_h = (double*)malloc(param.nbins * param.nTr * sizeof(double));
hipMalloc((void **) &Tr_d, param.nbins * param.nTr * sizeof(double));
FILE *Out3File;
char Out3Filename[300];
if(param.doTransmission == 1){
sprintf(Out3Filename, "Out_%s_tr.dat", param.name);
Out3File = fopen(Out3Filename, filemode);
}
if(param.doTransmission == 2){
if(param.replaceFiles == 1){
for(int i = 0; i < param.nbins; ++i){
sprintf(Out3Filename, "Out_%s_tr%.4d.dat", param.name, i);
Out3File = fopen(Out3Filename, "w");
fclose(Out3File);
}
}
sprintf(Out3Filename, "Out_%s_tr%.4d.dat", param.name, 0);
Out3File = fopen(Out3Filename, "a");
}
for(int iP = 0; iP < param.nP; ++iP){
if(param.doTransmission == 2 && iP > 0){
fclose(Out3File);
sprintf(Out3Filename, "Out_%s_tr%.4d.dat", param.name, 0);
Out3File = fopen(Out3Filename, "a");
}
hipLaunchKernelGGL(( Integrate_kernel < 512 >) , dim3(param.nbins), dim3(512) , 0, 0, K_d + iP * Nx, Tr_d, param.Nxb, param.nTr, param.dTr, Nxmin_d, param.kmin);
hipMemcpy(Tr_h, Tr_d, param.nbins * param.nTr * sizeof(double), hipMemcpyDeviceToHost);
for(int i = 0; i < param.nbins; ++i){
for(int j = 0; j < param.nTr; ++j){
double m = exp((j - param.nTr/2) * param.dTr);
if(param.nP == 1){
fprintf(Out3File, "%.20g %.20g\n", m, Tr_h[i * param.nTr + j]);
}
else{
fprintf(Out3File, "%.20g %.20g %.20g %.20g %d\n", m, Tr_h[i * param.nTr + j], param.T, P_h[iP], j);
}
}
if(param.doTransmission == 1){
fprintf(Out3File, "\n\n");
}
if(param.doTransmission == 2 && i < param.nbins - 1){
fclose(Out3File);
sprintf(Out3Filename, "Out_%s_tr%.4d.dat", param.name, i + 1);
Out3File = fopen(Out3Filename, "a");
}
}
}
fclose(Out3File);
free(Tr_h);
hipFree(Tr_d);
}
hipDeviceSynchronize();
error = hipGetLastError();
if(error != 0){
printf("Transmission error = %d = %s\n",error, hipGetErrorString(error));
return 0;
}
hipEventRecord(tt2, 0);
hipEventSynchronize(tt2);
hipEventElapsedTime(&milliseconds, tt1, tt2);
time[8] += milliseconds * 0.001;
printf("Time for Transmission: %g seconds\n", time[8]);
InfoFile = fopen(InfoFilename, "a");
fprintf(InfoFile,"\n");
fprintf(InfoFile,"Time for input total: %g seconds\n", timeT[0]);
fprintf(InfoFile,"Time for Lines total: %g seconds\n", timeT[1]);
fprintf(InfoFile,"Time for K(x) total: %g seconds\n", timeT[2]);
fprintf(InfoFile,"Time for write K(x): %g seconds\n", time[3]);
fprintf(InfoFile,"Time for mean K(x): %g seconds\n", time[4]);
fprintf(InfoFile,"Time for sort K(x): %g seconds\n", time[5]);
fprintf(InfoFile,"Time for Resampling: %g seconds\n", time[6]);
fprintf(InfoFile,"Time for write K(y): %g seconds\n", time[7]);
fprintf(InfoFile,"Time for Transmission: %g seconds\n", time[8]);
fclose(InfoFile);
free(K_h);
free(x_h);
free(Nxmin_h);
free(outputEdges_h);
free(binIndex_h);
hipFree(K_d);
hipFree(Nxmin_d);
error = hipGetLastError();
printf("Final error = %d = %s\n",error, hipGetErrorString(error));
return 0;
}
| 8d55eecea90032ee6f78c59f8ce33a1b858a1aaf.cu | #include "define.h" //must be on top for Windows compilation
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <thrust/sort.h>
#include <thrust/device_ptr.h>
#include "host.h"
#include "ISO.h"
#include "voigt.h"
#include "resample.h"
/*
// runs with biliniar interpolation
// texDescr.filterMode = cudaFilterModeLinear;
__global__ void Voigt_texture_kernel(cudaTextureObject_t K2dtex, float *K_d, int Nx, int Ny, int Nxtex, int Nytex, size_t pitch){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if(idx < Nx && idy < Ny){
float x = idx * Nxtex / float(Nx);
float y = idy * Nytex / float(Ny);
//float x = idx / float(Nx);
//float y = idy / float(Ny);
float K = tex2D <float> (K2dtex, x + 0.5f , y + 0.5f);
float *row = (float *)(((char *)K_d)+(idy*pitch));
row[idx] = K;
//if(idy == 0) printf("%d %d %f %f %f\n", idx, idy, x * 10.0f, y * 10.0f, K);
}
}
// runs with manual biliniar interpolation
// texDescr.filterMode = cudaFilterModePoint;
__global__ void Voigt_textureb_kernel(cudaTextureObject_t K2dtex, float *K_d, int Nx, int Ny, int Nxtex, int Nytex, size_t pitch){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if(idx < Nx && idy < Ny){
float x = idx * Nxtex / float(Nx);
float y = idy * Nytex / float(Ny);
float K00 = tex2D <float> (K2dtex, x, y);
float K10 = tex2D <float> (K2dtex, x + 1.0f, y);
float K01 = tex2D <float> (K2dtex, x, y + 1.0f);
float K11 = tex2D <float> (K2dtex, x + 1.0f, y + 1.0f);
float xx = (idx % (Nx / Nxtex)) * Nxtex / float(Nx);
float yy = (idy % (Ny / Nytex)) * Nytex / float(Ny);
float K = (1.0f - xx) * ( 1.0f - yy) * K00 + xx * (1.0f - yy) * K10 + (1.0f - xx) * yy * K01 + xx * yy * K11;
float *row = (float *)(((char *)K_d)+(idy*pitch));
row[idx] = K;
//if(idy == 0) printf("%d %d %f %f | %f %f | %f %f %f %f %f\n", idx, idy, x * 10.0f / Nx, y * 10.0f / Ny, xx, yy, K00, K10, K01, K11, K);
}
}
// runs with manual biliniar interpolation
// texDescr.filterMode = cudaFilterModePoint;
__global__ void Voigt_b_kernel(float *K2d_d, float *K_d, int Nx, int Ny, int Nxtex, int Nytex, size_t pitch){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if(idx < Nx && idy < Ny){
int x = floor(idx * Nxtex / float(Nx));
int y = floor(idy * Nytex / float(Ny));
float *row1 = (float *)(((char *)K_d)+(y*pitch)) + x;
float K00 = *row1;
float *row2 = (float *)(((char *)K_d)+(y*pitch)) + x + 1;
float K10 = *row2;
float *row3 = (float *)(((char *)K_d)+((y + 1)*pitch)) + x;
float K01 = *row3;
float *row4 = (float *)(((char *)K_d)+((y + 1)*pitch)) + x + 1;
float K11 = *row4;
float xx = (idx % (Nx / Nxtex)) * Nxtex / float(Nx);
float yy = (idy % (Ny / Nytex)) * Nytex / float(Ny);
float K = (1.0f - xx) * ( 1.0f - yy) * K00 + xx * (1.0f - yy) * K10 + (1.0f - xx) * yy * K01 + xx * yy * K11;
float *row = (float *)(((char *)K_d)+(idy*pitch));
row[idx] = K;
//if(idy == 0) printf("%d %d %f %f | %f %f | %f %f %f %f %f\n", idx, idy, x * 10.0f / Nx, y * 10.0f / Ny, xx, yy, K00, K10, K01, K11, K);
}
}
//https://stackoverflow.com/questions/34622717/bicubic-interpolation-in-c
__device__ float cubic_hermite(float A, float B, float C, float D, float t){
float a = -A / 2.0f + (3.0f * B) / 2.0f - (3.0f * C) / 2.0f + D / 2.0f;
float b = A - (5.0f * B) / 2.0f + 2.0f * C - D / 2.0f;
float c = -A / 2.0f + C / 2.0f;
float d = B;
float tt = t * t;
return a * t* tt + b * tt + c * t + d;
}
// runs with manual biliniar interpolation
// texDescr.filterMode = cudaFilterModePoint;
__global__ void Voigt_bicubic_kernel(cudaTextureObject_t K2dtex, float *K_d, int Nx, int Ny, int Nxtex, int Nytex, size_t pitch){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if(idx > 0 && idy > 0 && idx < Nx - 1&& idy < Ny - 1){
float x = idx * Nxtex / float(Nx);
float y = idy * Nytex / float(Ny);
float K00 = tex2D <float> (K2dtex, x - 1.0f, y - 1.0f);
float K10 = tex2D <float> (K2dtex, x , y - 1.0f);
float K20 = tex2D <float> (K2dtex, x + 1.0f, y - 1.0f);
float K30 = tex2D <float> (K2dtex, x + 2.0f, y - 1.0f);
float K01 = tex2D <float> (K2dtex, x - 1.0f, y);
float K11 = tex2D <float> (K2dtex, x , y);
float K21 = tex2D <float> (K2dtex, x + 1.0f, y);
float K31 = tex2D <float> (K2dtex, x + 2.0f, y);
float K02 = tex2D <float> (K2dtex, x - 1.0f, y + 1.0f);
float K12 = tex2D <float> (K2dtex, x , y + 1.0f);
float K22 = tex2D <float> (K2dtex, x + 1.0f, y + 1.0f);
float K32 = tex2D <float> (K2dtex, x + 2.0f, y + 1.0f);
float K03 = tex2D <float> (K2dtex, x - 1.0f, y + 2.0f);
float K13 = tex2D <float> (K2dtex, x , y + 2.0f);
float K23 = tex2D <float> (K2dtex, x + 1.0f, y + 2.0f);
float K33 = tex2D <float> (K2dtex, x + 2.0f, y + 2.0f);
float xx = (idx % (Nx / Nxtex)) * Nxtex / float(Nx);
float yy = (idy % (Ny / Nytex)) * Nytex / float(Ny);
float K0 = cubic_hermite(K00, K10, K20, K30, xx);
float K1 = cubic_hermite(K01, K11, K21, K31, xx);
float K2 = cubic_hermite(K02, K12, K22, K32, xx);
float K3 = cubic_hermite(K03, K13, K23, K33, xx);
float K = cubic_hermite(K0, K1, K2, K3, yy);
if(idx == 15 && idy == 15) printf("%d %d %g %g %g %g %g %g %g\n", idx, idy, x, y, K00, K10, K20, K30, K0, K);
float *row = (float *)(((char *)K_d)+(idy*pitch));
row[idx] = K;
//if(idy == 0) printf("%d %d %f %f | %f %f | %f %f %f %f %f\n", idx, idy, x * 10.0f / Nx, y * 10.0f / Ny, xx, yy, K00, K10, K01, K11, K);
}
}
*/
int main(int argc, char*argv[]){
cudaError_t error;
int er;
int devCount = 0;
cudaGetDeviceCount(&devCount);
if(devCount == 0){
printf("Error: No valid cuda device!\n");
return 0;
}
if(devCount == 1) printf("There is %d CUDA Device\n", devCount);
else printf("There are %d CUDA Devices\n", devCount);
/*
{
double xMax = 10.0;
double yMax = 10.0;
int Nx = 1000;
int Ny = 1000;
int Nxtex = Nx + 1;
int Nytex = Ny + 1;
int Nxtexf = Nx / 10 + 1;
int Nytexf = Ny / 10 + 1;
double *K2d_h, *K2d_d;
size_t pitch;
//with pitch, the 2d memory is extendend in one dimension to set memory alignment, pitch is the new Nxtex
K2d_h = (double*)malloc( Nxtex * Nytex * sizeof(double));
cudaMallocPitch((void **) &K2d_d, &pitch, Nxtex * sizeof(double), Nytex);
//printf("%d %d %lu\n", Nxtex, Nytex, pitch);
{
double a = (double)(M_PI * sqrt(-1.0 / log(def_TOLF * 0.5)));
double b = (double)(1.0 / sqrt(M_PI));
double c = (double)(2.0 * a / M_PI);
Voigt_2d_kernel <<< dim3((Nxtex + 31) / 32, (Nytex + 31) / 32), dim3(32, 32, 1) >>> (a, b, c, K2d_d, Nxtex, Nytex, pitch, xMax, xMax);
cudaMemcpy2D(K2d_h, Nxtex * sizeof(double), K2d_d, pitch, Nxtex * sizeof(double), Nytex, cudaMemcpyDeviceToHost);
}
// / *
for(int i = 0; i < Nxtex - 1; ++i){
for(int j = 0; j < Nytex - 1; ++j){
//x and y arrays from 0.1 to 2000
double x = exp(-2.3 + i * xMax / double(Nxtex - 1));
double y = exp(-2.3 + j * yMax / double(Nytex - 1));
//if( x < xMax && y < yMax){
printf("%g %g %.15g\n", x, y, K2d_h[j * Nxtex + i]);
//}
}
}
// * /
return 0;
}
/ *
float *K2df_h, *K2df_d;
size_t pitchf;
//with pitchf, the 2d memory is extendend in one dimension to set memory alignment, pitchf is the new Nxtexf
K2df_h = (float*)malloc( Nxtexf * Nytexf * sizeof(float));
cudaMallocPitch((void **) &K2df_d, &pitchf, Nxtexf * sizeof(float), Nytexf);
//printf("%d %d %lu\n", Nxtexf, Nytexf, pitchf);
{
float a = (float)(M_PI * sqrt(-1.0f / log(def_TOLF * 0.5f)));
float b = (float)(1.0f / sqrt(M_PI));
float c = (float)(2.0f * a / M_PI);
Voigt_2df_kernel <<< dim3((Nxtexf + 31) / 32, (Nytexf + 31) / 32), dim3(32, 32, 1) >>> (a, b, c, K2df_d, Nxtexf, Nytexf, pitchf, xMax, xMax);
cudaMemcpy2D(K2df_h, Nxtexf * sizeof(float), K2df_d, pitchf, Nxtexf * sizeof(float), Nytexf, cudaMemcpyDeviceToHost);
}
/ *
for(int i = 0; i < Nxtexf - 1; ++i){
for(int j = 0; j < Nytexf -1; ++j){
float x = i * xMax / float(Nxtexf - 1);
float y = j * yMax / float(Nytexf - 1);
if( x < xMax && y < yMax){
printf("%g %g %.15g\n", x, y, K2df_h[j * Nxtexf + i]);
}
}
}
return 0;
* /
//https://stackoverflow.com/questions/41749024/edit-cuda-texture-object
cudaTextureObject_t K2dtex;
cudaResourceDesc resDescr;
memset(&resDescr, 0, sizeof(cudaResourceDesc));
resDescr.resType = cudaResourceTypePitch2D;
resDescr.res.pitch2D.desc = cudaCreateChannelDesc<float>();
resDescr.res.pitch2D.devPtr = K2df_d;
resDescr.res.pitch2D.height = Nytexf;
resDescr.res.pitch2D.pitchInBytes = pitchf;
resDescr.res.pitch2D.width = Nxtexf;
cudaTextureDesc texDescr;
memset(&texDescr, 0, sizeof(cudaTextureDesc));
texDescr.normalizedCoords = 0;
//texDescr.filterMode = cudaFilterModeLinear;
texDescr.filterMode = cudaFilterModePoint;
texDescr.addressMode[0] = cudaAddressModeClamp;
texDescr.addressMode[1] = cudaAddressModeClamp;
texDescr.addressMode[2] = cudaAddressModeClamp;
texDescr.readMode = cudaReadModeElementType;
cudaCreateTextureObject(&K2dtex, &resDescr, &texDescr, NULL);
float *K_h, *K_d;
K_h = (float*)malloc( Nx * Ny * sizeof(float));
//with pitch, the 2d memory is extendend in one dimension to set memory alignment, pitch is the new Nx
cudaMallocPitch((void **) &K_d, &pitch, Nx * sizeof(float), Ny);
for(int t = 0; t < 1; ++t){
//Voigt_texture_kernel <<< dim3((Nx + 31) / 32, (Ny + 31) / 32), dim3(32, 32, 1) >>> (K2dtex, K_d, Nx, Ny, Nxtexf - 1, Nytexf - 1, pitch);
//Voigt_textureb_kernel <<< dim3((Nx + 31) / 32, (Ny + 31) / 32), dim3(32, 32, 1) >>> (K2dtex, K_d, Nx, Ny, Nxtexf -1, Nytexf - 1, pitch);
//Voigt_b_kernel <<< dim3((Nx + 31) / 32, (Ny + 31) / 32), dim3(32, 32, 1) >>> (K2d_d, K_d, Nx, Ny, Nxtex - 1, Nytex - 1, pitch);
Voigt_bicubic_kernel <<< dim3((Nx + 31) / 32, (Ny + 31) / 32), dim3(32, 32, 1) >>> (K2dtex, K_d, Nx, Ny, Nxtexf - 1, Nytexf - 1, pitch);
}
cudaMemcpy2D(K_h, Nx * sizeof(float), K_d, pitch, Nx * sizeof(float), Ny, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
for(int i = 0; i < Nx; ++i){
for(int j = 0; j < Ny; ++j){
double x = i * xMax / double(Nx);
double y = j * yMax / double(Ny);
if( x < xMax && y < yMax){
double diff = fabs(K2d_h[j * Nxtex + i] - K_h[j * Nx + i]);
if(diff > 5.0e-7){
printf("%g %g %.15g %.15g %.15g\n", x, y, K2d_h[j * Nxtex + i], K_h[j * Nx + i], diff);
}
}
}
}
return 0;
}
*/
char qFilename[15][160]; //for maximal 15 isotopologues
char paramFilename[160];
sprintf(paramFilename, "%s", "param.dat");
//Read prameters
Param param;
sprintf(param.PFilename, "%s", "-");
sprintf(param.SpeciesFilename, "%s", "-");
sprintf(param.edges, "%s", "-");
sprintf(param.bins, "%s", "-");
sprintf(param.ciaSystem, "%s", "-");
sprintf(param.subLorentzianFilename, "%s", "-");
param.dev = 0;
param.useIndividualBins = 0;
param.useOutputEdges = 0;
param.nedges = 0;
param.nP = 1;
param.usePFile = 0;
param.useIndividualX = 0;
param.useCia = 0;
param.path[0] = 0;
param.pathK[0] = 0;
param.nSpecies = 1;
param.useSpeciesFile = 0;
param.useSubLorentzian = 0;
param.T = 0.0;
param.P = 0.0;
param.mParamFilename[0] = 0;
param.dataBase = 0;
param.numin = 0.0;
param.numax = 0.0;
param.dnu = 0.0;
param.Nxb = 0;
param.cutMode = 0;
param.cut = 0.0;
param.doResampling = 0;
param.nC = 0;
param.doTransmission = 0;
param.nTr = 0;
param.dTr = 0.0;
param.doStoreFullK = 0;
param.doStoreK = 0;
param.nbins = 0;
param.kmin = 0.0;
param.qalphaL = def_qALPHA_L;
param.gammaF = def_gammaF;
param.doMean = 0;
param.units = 0;
param.replaceFiles = 0;
param.profile = def_PROFILE;
param.doTuning = def_doTuning;
param.removePlinth = def_removePlinth;
er = read_parameters(param, paramFilename, argc, argv);
if(er == 0){
return 0;
}
if(param.dev >= devCount || param.dev < 0){
printf("Error: Device Number is not allowed\n");
return 0;
}
char filemode[16];
if(param.replaceFiles == 0){
sprintf(filemode, "a");
}
else{
sprintf(filemode, "w");
}
FILE *InfoFile;
char InfoFilename[300];
sprintf(InfoFilename, "Info_%s.dat", param.name);
InfoFile = fopen(InfoFilename, filemode);
int runtimeVersion;
int driverVersion;
cudaRuntimeGetVersion(&runtimeVersion);
cudaDriverGetVersion(&driverVersion);
cudaSetDevice(param.dev);
cudaDeviceProp devProp;
for(int i = 0; i < 2; ++i){
FILE *infofile;
if(i == 0) infofile = InfoFile;
if(i == 1) infofile = stdout;
for(int j = 0; j < devCount; ++j){
cudaGetDeviceProperties(&devProp, j);
fprintf(infofile,"Name:%s, Major:%d, Minor:%d, Max threads per Block:%d, Max x dim:%d\n, #Multiprocessors:%d, Clock Rate:%d, Memory Clock Rate:%d, Global Memory:%lu, Shared memory per block: %lu\n",
devProp.name, devProp.major, devProp.minor, devProp.maxThreadsPerBlock, devProp.maxThreadsDim[0],
devProp.multiProcessorCount, devProp.clockRate, devProp.memoryClockRate, (long unsigned int)(devProp.totalGlobalMem), (long unsigned int)(devProp.sharedMemPerBlock));
}
}
if(param.Nxb != 0){
param.useIndividualX = 1;
}
if(param.removePlinth == 1 && param.profile == 4){
printf("Error, remove plinth is not supported for profile 4\n");
return 0;
}
subLorentzianConstantCopy(param.useSubLorentzian);
//If the bin file is used, store the boundaries of the bins
double *binBoundaries_h, *binBoundaries_d;
binBoundaries_h = (double*)malloc((param.nbins + 1) * sizeof(double));
cudaMalloc((void **) &binBoundaries_d, (param.nbins + 1) * sizeof(double));
if(param.useIndividualBins == 1){
er = readBinFile(param, binBoundaries_h);
if(er == 0) return 0;
param.numin = binBoundaries_h[0];
param.numax = binBoundaries_h[param.nbins];
if(param.doResampling > 0){
printf("Error: The resampling function is not supported for the bin-file option\n");
return 0;
}
if(param.doTransmission > 0){
printf("Error: The transmission function is not supported for the bin-file option\n");
return 0;
}
}
else{
for(int i = 0; i < param.nbins; ++i){
binBoundaries_h[i] = param.numin + i * (param.numax - param.numin) / ((double)(param.nbins));
}
binBoundaries_h[param.nbins] = param.numax;
}
cudaMemcpy(binBoundaries_d, binBoundaries_h, (param.nbins + 1) * sizeof(double), cudaMemcpyHostToDevice);
//for(int i = 0; i < param.nbins + 1; ++i){
// printf("binboundaries %d %g\n", i, binBoundaries_h[i]);
//}
int Nx;
if(param.useIndividualX == 0){
Nx = (int)((param.numax - param.numin) / param.dnu + 0.5); //+ 0.5 to round correctly between double and int
if((param.numax - param.numin) / param.dnu + 0.5 >= 2147483647){
printf("Error: Nx too large, integer overflow. %d %g\n", Nx, (param.numax - param.numin) / param.dnu);
return 0;
}
printf("%g %g %g %g\n", param.numax, param.numin, param.dnu, (param.numax - param.numin) / param.dnu + 0.5);
param.Nxb = Nx / param.nbins;
if(Nx % param.nbins != 0){
printf("Error: range cannot be divided evenly in bins. %d %d %g\n", Nx, param.nbins, Nx / ((double)(param.nbins)));
return 0;
}
}
else{
Nx = param.nbins * param.Nxb;
if(param.nbins * param.Nxb >= 2147483647){
printf("Error: Nx too large, integer overflow. %d %g\n", Nx, (double)(param.nbins) * (double)(param.Nxb));
return 0;
}
if(param.doResampling > 0){
printf("Error: The resampling function is not supported for unequal spacing option\n");
return 0;
}
if(param.doTransmission > 0){
printf("Error: The transmission function is not supported for unequal spacing option\n");
return 0;
}
}
if(param.useSubLorentzian == 1){
subLorentzianB(param.T);
param.useIndividualX = 1;
//this is needed because of the nu/nu0 factor
}
//If the output edges file is used store the edges
double *outputEdges_h;
if(param.useOutputEdges == 1){
outputEdges_h = (double*)malloc((param.nedges + 1) * sizeof(double));
er = readEdgesFile(param, outputEdges_h);
if(er == 0) return 0;
}
else{
outputEdges_h = NULL;
}
//Allocate P array
double *P_h;
P_h = (double*)malloc((param.nP) * sizeof(double));
P_h[0] = param.P;
if(param.usePFile == 1){
er = readPFile(param, P_h);
if(er == 0) return 0;
}
//Allocate Species array
double *SpeciesA_h; //abundance
char **SpeciesN_h;
SpeciesA_h = (double*)malloc(param.nSpecies * sizeof(double));
SpeciesN_h = (char**)malloc(param.nSpecies * sizeof(char*));
for(int i = 0; i < param.nSpecies; ++i){
SpeciesN_h[i] = (char*)malloc(160 * sizeof(char));
}
if(param.useSpeciesFile == 1){
er = readSpeciesFile(param, SpeciesN_h, SpeciesA_h);
if(er == 0) return 0;
}
double time[9];
double timeT[3];
for(int i = 0; i < 9; ++i){
time[i] = 0.0;
}
for(int i = 0; i < 3; ++i){
timeT[i] = 0.0;
}
//Allocate Molecule properties
for(int i = 0; i < 2; ++i){
FILE *infofile;
if(i == 0) infofile = InfoFile;
if(i == 1) infofile = stdout;
fprintf(infofile, "\nVersion: %g\n", VERSION);
fprintf(infofile, "Using device %d\n\n", param.dev);
fprintf(infofile, "Runtime Version %d\n", runtimeVersion);
fprintf(infofile, "Driver Version %d\n", driverVersion);
fprintf(infofile, "GIT Describe: %s\n", GIT_DESCRIBE);
fprintf(infofile, "Build Date: %s\n", BUILD_DATE);
fprintf(infofile, "Build Path: %s\n", BUILD_PATH);
fprintf(infofile, "Build System: %s\n", BUILD_SYSTEM);
fprintf(infofile, "Build Compute Capability: SM=%s\n", BUILD_SM);
fprintf(infofile, "\n");
if(param.Nxb < param.nC && i == 0){
printf("Number of points per bin smaller than the number of Chebyshev coefficients: Changed nC to %d\n", param.Nxb);
fprintf(infofile, "Number of points per bin smaller than the number of Chebyshev coefficients: Changed nC to %d\n", param.Nxb);
param.nC = param.Nxb;
}
fprintf(infofile, "name = %s\n", param.name);
fprintf(infofile, "T = %g\n", param.T);
if(param.usePFile == 0){
fprintf(infofile, "P = %g\n", P_h[0]);
}
else{
fprintf(infofile, "P in file: %s\n", param.PFilename);
fprintf(infofile, "Number of P values: %d\n", param.nP);
}
if(param.useSpeciesFile > 0){
fprintf(infofile, "Species in file: %s\n", param.SpeciesFilename);
fprintf(infofile, "Number of Species: %d\n", param.nSpecies);
}
if(param.useSubLorentzian > 0){
fprintf(infofile, "sub-Lorentzian file: %s\n", param.subLorentzianFilename);
}
fprintf(infofile, "cia System = %s\n", param.ciaSystem);
fprintf(infofile, "pathToData = %s\n", param.path);
fprintf(infofile, "numin = %g\n", param.numin);
fprintf(infofile, "numax = %g\n", param.numax);
fprintf(infofile, "dnu = %g\n", param.dnu);
fprintf(infofile, "Nnu per bin = %d\n", param.Nxb);
fprintf(infofile, "Number of points: %d\n", Nx);
fprintf(infofile, "cutMode = %d\n", param.cutMode);
fprintf(infofile, "cut = %g\n", param.cut);
fprintf(infofile, "doResampling = %d\n", param.doResampling);
fprintf(infofile, "nC = %d\n", param.nC);
fprintf(infofile, "doTransmission = %d\n", param.doTransmission);
fprintf(infofile, "nTr = %d\n", param.nTr);
fprintf(infofile, "dTr = %g\n", param.dTr);
fprintf(infofile, "doStoreFullK = %d\n", param.doStoreFullK);
fprintf(infofile, "pathToK = %s\n", param.pathK);
fprintf(infofile, "dostoreK = %d\n", param.doStoreK);
fprintf(infofile, "nbins = %d\n", param.nbins);
if(param.useIndividualBins == 1){
fprintf(infofile, "use Individual bins: %s\n", param.bins);
}
fprintf(infofile, "kmin = %g\n", param.kmin);
fprintf(infofile, "qalphaL = %g\n", param.qalphaL);
fprintf(infofile, "gammaF = %g\n", param.gammaF);
fprintf(infofile, "doMean = %d\n", param.doMean);
fprintf(infofile, "Units = %d\n", param.units);
fprintf(infofile, "Replace files = %d\n", param.replaceFiles);
fprintf(infofile, "profile = %d\n", param.profile);
fprintf(infofile, "doTuning = %d\n", param.doTuning);
fprintf(infofile, "def_TOL = %g\n", def_TOL);
fprintf(infofile, "def_TOLf = %g\n", def_TOLF);
fprintf(infofile, "def_nthmax = %d\n", def_nthmax);
fprintf(infofile, "def_nlmax = %d\n", def_nlmax);
fprintf(infofile, "def_maxlines = %lld\n", def_maxlines);
fprintf(infofile, "def_maxfiles = %d\n", def_maxfiles);
fprintf(infofile, "def_NmaxSample = %d\n", def_NmaxSample);
if(param.useOutputEdges == 1){
fprintf(infofile, "use output edges: %s\n", param.edges);
}
fprintf(infofile, "\n");
}
fclose(InfoFile);
cudaEvent_t tt1; //start time
cudaEvent_t tt2; //end time
cudaEventCreate(&tt1);
cudaEventCreate(&tt2);
cudaEvent_t ReadStart, ReadStop;
cudaEventCreate(&ReadStart);
cudaEventCreate(&ReadStop);
cudaEvent_t KStart, KStop;
cudaEventCreate(&KStart);
cudaEventCreate(&KStop);
cudaEvent_t LineStart, LineStop;
cudaEventCreate(&LineStart);
cudaEventCreate(&LineStop);
cudaEvent_t TuneStart, TuneStop;
cudaEventCreate(&TuneStart);
cudaEventCreate(&TuneStop);
cudaEvent_t iiLimitsEvent;
cudaEventCreate(&iiLimitsEvent);
cudaEvent_t AEvent;
cudaEventCreate(&AEvent);
cudaEvent_t ALEvent;
cudaEventCreate(&ALEvent);
cudaEvent_t AREvent;
cudaEventCreate(&AREvent);
cudaEvent_t BEvent;
cudaEventCreate(&BEvent);
float milliseconds;
cudaStream_t VStream[def_KSn];
for(int i = 0; i < def_KSn; ++i){
cudaStreamCreate(&VStream[i]);
}
cudaStream_t CStream[def_rBs];
for(int i = 0; i < def_rBs; ++i){
cudaStreamCreate(&CStream[i]);
}
cudaStream_t tuneStream[2];
for(int i = 0; i < 2; ++i){
cudaStreamCreate(&tuneStream[i]);
}
cudaStream_t nuLimitsStream[5];
for(int i = 0; i < 5; ++i){
cudaStreamCreate(&nuLimitsStream[i]);
}
// ************************************************************
//calculate mean mass before starting the opacity calculation
//needed to set kmin
// ************************************************************
double meanMass = 0.0;
for(int iSpecies = 0; iSpecies < param.nSpecies; ++iSpecies){
double Sscale = 1.0;
if(param.nSpecies > 1){
sprintf(param.mParamFilename, "%s", SpeciesN_h[iSpecies]);
Sscale = SpeciesA_h[iSpecies];
}
Molecule m;
if(param.useCia == 0){
int er = Init(m, param, qFilename);
if(er == 0) return 0;
}
//compute the mean mass
for(int i = 0; i < m.nISO; ++i){
//incldue here mixture abundances
meanMass += m.ISO[i].Ab * m.ISO[i].m * Sscale; //mean Molar Mass (g)
}
}
printf("mean mass %g\n", meanMass);
//needed here already to get the cia.mass1. Initialize it again later in the main species loop
ciaSystem cia;
if(param.useCia == 1){
Molecule m;
er = InitCia(m, cia, param);
if(er == 0) return 0;
}
double unitScale = 1.0;
if(param.units == 1){
unitScale = 1.0 / def_NA * meanMass;
if(param.useCia == 1){
unitScale = 1.0 / def_NA * cia.mass1;
}
param.kmin /= unitScale;
printf("kmin %g\n", param.kmin);
}
// ************************************************************
// ****************************************************************************
// Allocate and initialize K and x arrays
// ****************************************************************************
double *K_h, *K_d;
double *KS_d; //used in multiple y blocks
double *x_h, *x_d;
int *binKey_d;
int *binIndex_h, *binIndex_d;
K_h = (double*)malloc(Nx * sizeof(double));
x_h = (double*)malloc(Nx * sizeof(double));
binIndex_h = (int*)malloc((param.nbins + 2) * sizeof(int));
cudaMalloc((void **) &K_d, param.nP * Nx * sizeof(double));
cudaMalloc((void **) &KS_d, def_KSn * Nx * sizeof(double));
cudaMalloc((void **) &x_d, Nx * sizeof(double));
cudaMalloc((void **) &binKey_d, Nx * sizeof(int));
cudaMalloc((void **) &binIndex_d, (param.nbins + 2) * sizeof(int));
cudaDeviceSynchronize();
error = cudaGetLastError();
if(error != 0){
printf("K alloc error = %d = %s\n",error, cudaGetErrorString(error));
return 0;
}
for(int k = 0; k < param.nP * Nx; k += def_nthmax){
int Nk = min(def_nthmax, param.nP * Nx - k);
InitialK_kernel <<< (Nk + 511) / 512, 512 >>> (K_d, param.nP * Nx, param.kmin, k);
}
for(int k = 0; k < def_KSn * Nx; k += def_nthmax){
int Nk = min(def_nthmax, def_KSn * Nx - k);
//kmin must be here always zero, because the different streams are added later to K_d
InitialK_kernel <<< (Nk + 511) / 512, 512 >>> (KS_d, def_KSn * Nx, 0.0, k);
}
cudaDeviceSynchronize();
error = cudaGetLastError();
if(error != 0){
printf("bin1 error = %d = %s\n",error, cudaGetErrorString(error));
return 0;
}
for(int k = 0; k < Nx; k += def_nthmax){
int Nk = min(def_nthmax, Nx - k);
setX_kernel <<< (Nk + 511) / 512, 512 >>> (x_d, Nx, param.numin, param.dnu, param.Nxb, param.useIndividualX, binBoundaries_d, k);
}
cudaMemcpy(x_h, x_d, Nx * sizeof(double), cudaMemcpyDeviceToHost);
for(int k = 0; k < Nx; k += def_nthmax){
int Nk = min(def_nthmax, Nx - k);
binKey_kernel <<< (Nk + 511) / 512, 512 >>> (binKey_d, Nx, param.Nxb, binBoundaries_d, param.nbins, param.numax, x_d, param.useIndividualX, k);
}
for(int k = 0; k < Nx; k += def_nthmax){
int Nk = min(def_nthmax, Nx - k);
binIndex_kernel <<< (Nk + 511) / 512, 512 >>> (binKey_d, binIndex_d, Nx, param.nbins, k);
}
cudaMemcpy(binIndex_h, binIndex_d, (param.nbins + 2) * sizeof(int), cudaMemcpyDeviceToHost);
/*
int *binKey_h; //only needed to check the key
binKey_h = (int*)malloc(Nx * sizeof(int));
cudaMemcpy(binKey_h, binKey_d, Nx * sizeof(int), cudaMemcpyDeviceToHost);
for(int i = 0; i < Nx; ++i){
int bin = binKey_h[i];
printf("%d %.10g %d %d %d\n", i, x_h[i], bin, binIndex_h[bin], binIndex_h[bin + 1]);
}
*/
// ****************************************************************************
cudaDeviceSynchronize();
error = cudaGetLastError();
if(error != 0){
printf("K and x alloc error = %d = %s\n",error, cudaGetErrorString(error));
return 0;
}
//start species loop here
for(int iSpecies = 0; iSpecies < param.nSpecies; ++iSpecies){
double Sscale = 1.0; //Abundance scale for mixtures
if(param.nSpecies > 1){
sprintf(param.mParamFilename, "%s", SpeciesN_h[iSpecies]);
Sscale = SpeciesA_h[iSpecies];
}
Molecule m;
m.id=0;
m.NL[0] = 0;
m.nISO = 0;
m.defaultL = 0.0;
m.defaultn = 0.0;
//Initialize the Isotopologue properties for ISO.h
if(param.useCia == 0){
int er = Init(m, param, qFilename);
if(er == 0) return 0;
}
//print species dependent information
InfoFile = fopen(InfoFilename, "a");
for(int i = 0; i < 2; ++i){
FILE *infofile;
if(i == 0) infofile = InfoFile;
if(i == 1) infofile = stdout;
fprintf(infofile, "Species Name = %s\n", m.mName);
fprintf(infofile, "dataBase = %d\n", param.dataBase);
fprintf(infofile, "Molecule Number = %d\n", m.id);
fprintf(infofile, "default L = %g\n", m.defaultL);
fprintf(infofile, "default n = %g\n", m.defaultn);
fprintf(infofile, "\n");
}
fclose(InfoFile);
//Read partition function
Partition part;
er = readPartition(param, qFilename, part, param.T, m);
if(er == 0){
return 0;
}
printf("mean mass %g, Sscale %g\n", meanMass, Sscale);
//Set cia System properties
ciaSystem cia;
if(param.useCia == 1){
er = InitCia(m, cia, param);
if(er == 0) return 0;
}
if(param.useCia == 1 && m.id != 0){
printf("Error, not allowed to use a cia system with a molecule\n");
return 0;
}
double *readBuffer_h, *readBuffer_d;
int readBufferSize = 8192;
int readBufferN = 0;
int readBufferCount = 0;
int rbvs = 0;
if(param.dataBase == 2){
//Exomol nu, S, El, A
//Kurucz Molecules
readBufferN = 4;
}
if(param.dataBase == 20){
//Exomol super lines nu, S
readBufferN = 2;
}
if(param.dataBase == 30){
//Kurucz Atoms nu, S, El, A, Gamma_nat
readBufferN = 5;
}
if(param.dataBase == 31){
//NIST Atoms
readBufferN = 5;
}
if(param.dataBase == 32){
//VALD Atoms
readBufferN = 5;
}
cudaHostAlloc((void **) &readBuffer_h, def_rBs * readBufferSize * readBufferN * sizeof(double), cudaHostAllocDefault);
cudaDeviceSynchronize();
error = cudaGetLastError();
if(error != 0){
printf("Buffer host alloc error = %d = %s\n",error, cudaGetErrorString(error));
return 0;
}
cudaMalloc((void **) &readBuffer_d, def_maxlines * readBufferN * sizeof(double));
cudaDeviceSynchronize();
error = cudaGetLastError();
if(error != 0){
printf("Buffer device alloc error = %d = %s\n",error, cudaGetErrorString(error));
return 0;
}
//printf("Allocate read Buffer %d %d %d %lld | %d %lld\n", def_rBs, readBufferSize, readBufferN, m.NLmax, def_rBs * readBufferSize * readBufferN, m.NLmax * readBufferN);
Line L;
cudaDeviceSynchronize();
error = cudaGetLastError();
if(error != 0){
printf("Initial error = %d = %s\n",error, cudaGetErrorString(error));
return 0;
}
//Allocate memory for Line properties
if(param.dataBase < 2 || param.dataBase == 3){
// 0 1 3
Alloc_Line(L, m, param);
}
else{
// 2 20 30 31 32
Alloc2_Line(L, m, param);
}
if(param.useCia == 1){
for(int iP = 0; iP < param.nP; ++iP){
int er = readCiaFile(param, cia, x_h, K_h, Nx, param.T, P_h[iP]);
cudaMemcpy(K_d + iP * Nx, K_h, Nx * sizeof(double), cudaMemcpyHostToDevice);
if(er == 0){
return 0;
}
}
}
cudaDeviceSynchronize();
error = cudaGetLastError();
if(error != 0){
printf("Line alloc error = %d = %s\n",error, cudaGetErrorString(error));
return 0;
}
if(m.id > 0 && param.doStoreFullK >= 0){
// **************************************
// Starting the loop around the datafiles
// **************************************
int fi0 = m.nFiles;
int fi1 = 0;
if(param.cut == 0.0) param.cut = 1.0e30;
if(param.cutMode == 0 && param.cut){
for(int fi = 0; fi < m.nFiles; ++fi){
if(m.fileLimit[fi] - param.cut <= param.numax) fi1 = fi + 1;
else break;
}
for(int fi = m.nFiles - 1; fi >= 0; --fi){
if(m.fileLimit[fi + 1] + param.cut >= param.numin) fi0 = fi;
else break;
}
}
else{
fi0 = 0;
fi1 = m.nFiles;
}
printf("File range %d to %d\n", fi0, fi1 - 1);
int fi;
FILE *dataFile;
char dataFilename[180];
timeT[0] += time[0];
time[0] = 0.0;
//Tuning parameters for Line Kernels
int ntAOld = 0;
int ntA = 0;
int ntALOld = 0;
int ntAL = 0;
int ntAROld = 0;
int ntAR = 0;
int ntBOld = 0;
int ntB = 0;
int ntCOld = 0;
int ntC = 0;
int nkA = 8;
int nkAL = 8;
int nkAR = 8;
int nkB = 4;
int nkC = 2;
double c1 = def_h * def_c / (def_kB * param.T);
double T1 = def_T0 / param.T;
for(fi = fi0; fi < fi1; ++fi){
timeT[1] += time[1];
timeT[2] += time[2];
time[1] = 0.0;
time[2] = 0.0;
int NL;
int NL1;
long long lPart;
// read the first block of files outside the loop
// the remaining reads are called at the end of the loop
// to allow overlapping execution
// **************************read0
if(fi == fi0){
sprintf(dataFilename, "%sbin", m.dataFilename[fi]);
dataFile = fopen(dataFilename, "rb");
if(dataFile == NULL){
printf("Error: line list file not found: %s\n", dataFilename);
return 0;
}
printf("Reading Line file %d of %d: %s\n", fi, fi1 - 1, dataFilename);
printf("Number of lines: %lld\n", m.NL[fi]);
NL = min(def_maxlines, m.NL[fi] - 0);
lPart = (0 + def_maxlines - 1) / def_maxlines;
cudaEventRecord(ReadStart);
printf("Reading Line file %d of %d; part %lld of %lld with %d lines\n", fi, fi1 - 1, lPart, (m.NL[fi] + def_maxlines - 1) / def_maxlines - 1, NL);
// **************************
// Read the Line list
// **************************
if(param.dataBase < 2 || param.dataBase == 3){
//0 1 3
er = readFile(param, m, part, L, param.qalphaL, NL, dataFile, Sscale, meanMass);
}
else {
// 2 20 30 31 32
int vs = 0;
for(int i = 0; i < NL; i += readBufferSize){
er = readFileExomol(L, NL, dataFile, readBuffer_h, readBuffer_d, readBufferSize, readBufferN, readBufferCount, vs, CStream);
readBufferCount += readBufferSize;
++vs;
}
}
if(er == 0){
return 0;
}
cudaEventRecord(ReadStop);
cudaEventSynchronize(ReadStop);
cudaEventElapsedTime(&milliseconds, ReadStart, ReadStop);
time[0] += milliseconds * 0.001;
printf("Reading Line file %d, part %lld complete\n", fi, lPart);
printf("Time for input, %d %lld: %g seconds\n", fi, lPart, time[0]);
}
// **************************read0
for(long long int iL = 0LL; iL < m.NL[fi]; iL += def_maxlines){
//start the loop around the Pressure values. only 1 iteration if no Pressure file is given
for(int iP = 0; iP < param.nP; ++iP){
//Copy Line data to the device
cudaEventRecord(LineStart);
if(param.dataBase < 2 || param.dataBase == 3){
//0 1 3
Copy_Line(L, m, NL);
}
else{
//2 30 31 32
double mass = m.ISO[0].m / def_NA;
double Abundance = m.ISO[0].Ab;
if(param.units == 0){
Abundance *= m.ISO[0].m / meanMass;
Sscale *= m.ISO[0].m / meanMass;
}
double Q = part.Q[0];
int vs = 0;
for(int k = 0; k < NL; k += def_nthmax / 4){
int Nk = min(def_nthmax / 4, NL - k);
if(Nk > 0){
// ***************************
// Compute Line properties 1
// ***************************
if(param.dataBase == 2){
L_kernelExomol <<< (Nk + 127) / 128, 128, 0, VStream[vs % def_KSn] >>> (readBuffer_d, L.nu_d, L.S_d, L.EL_d, L.ialphaD_d, L.A_d, L.vy_d, L.n_d, m.defaultL, m.defaultn, param.gammaF, mass, param.T, Q, Abundance, Sscale, NL, k);
}
if(param.dataBase == 20){
L_kernelExomolSuper <<< (Nk + 127) / 128, 128, 0, VStream[vs % def_KSn] >>> (readBuffer_d, L.nu_d, L.S_d, L.ialphaD_d, L.vy_d, L.n_d, m.defaultL, m.defaultn, param.gammaF, mass, param.T, Abundance, Sscale, NL, k);
}
if(param.dataBase == 30){
L_kernelKurucz <<< (Nk + 127) / 128, 128, 0, VStream[vs % def_KSn] >>> (readBuffer_d, L.nu_d, L.S_d, L.EL_d, L.ialphaD_d, L.A_d, L.vy_d, L.n_d, m.defaultL, m.defaultn, param.gammaF, mass, param.T, Q, Abundance, Sscale, NL, k);
}
if(param.dataBase == 31){
L_kernelNIST <<< (Nk + 127) / 128, 128, 0, VStream[vs % def_KSn] >>> (readBuffer_d, L.nu_d, L.S_d, L.EL_d, L.ialphaD_d, L.A_d, L.vy_d, L.n_d, m.defaultL, m.defaultn, param.gammaF, mass, param.T, Q, Abundance, Sscale, NL, k);
}
if(param.dataBase == 32){
L_kernelVALD <<< (Nk + 127) / 128, 128, 0, VStream[vs % def_KSn] >>> (readBuffer_d, L.nu_d, L.S_d, L.EL_d, L.ialphaD_d, L.A_d, L.vy_d, L.n_d, m.defaultL, m.defaultn, param.gammaF, mass, param.T, Q, Abundance, Sscale, NL, k);
}
// ***************************
// Compute Line properties 2
// ***************************
if(param.dataBase != 20){
Sf_kernel <<< (Nk + 127) / 128, 128, 0, VStream[vs % def_KSn] >>> (L.nu_d, L.S_d, L.A_d, L.vy_d, L.ialphaD_d, L.n_d, L.EL_d, L.ID_d, NL, c1, T1, P_h[iP], k);
}
else{
// 2 30 31 32
SfSuper_kernel <<< (Nk + 127) / 128, 128, 0, VStream[vs % def_KSn] >>> (L.nu_d, L.S_d, L.vy_d, L.ialphaD_d, L.n_d, L.ID_d, NL, T1, P_h[iP], k);
}
}
++vs;
}
}
cudaDeviceSynchronize();
// ************************
// ***************************
// Compute Line properties
// ***************************
if(param.dataBase < 2 || param.dataBase == 3){
// 0 1 3
for(int k = 0; k < NL; k += def_nthmax){
int Nk = min(def_nthmax, NL - k);
if(Nk > 0) S2_kernel <<< (Nk + 127) / 128, 128 >>> (L.nu_d, L.S_d, L.A_d, L.vy_d, L.ialphaD_d, L.n_d, L.delta_d, L.EL_d, L.ID_d, NL, param.T, P_h[iP], k);
}
/* // *************
//uncoment this only when no Pressure file is given
//print number of lines per bin
cudaMemcpy(L.nu_h, L.nu_d, NL * sizeof(double), cudaMemcpyDeviceToHost);
int nLb[param.nbins];
for(int i = 0; i < param.nbins; ++i){
nLb[i] = 0;
}
double binWidth = (param.numax - param.numin) / ((double)(param.nbins));
printf("%g\n", binWidth);
for(int i = 0; i < NL; ++i){
int b = int(L.nu_h[i] / binWidth);
nLb[b] += 1;
}
for(int i = 0; i < param.nbins; ++i){
printf("%d, ", nLb[i]);
}
printf("\n");
*/
}
//print_kernel <<< 1, 1 >>> (L.nu_d, L.ialphaD_d, L.vy_d, L.ID_d, 500, 0);
//Sort the data along nu
thrust::device_ptr<double> nu_dt = thrust::device_pointer_cast(L.nu_d);
thrust::device_ptr<int> ID_dt = thrust::device_pointer_cast(L.ID_d);
thrust::sort_by_key(nu_dt, nu_dt + NL, ID_dt);
//Use Sort_d and ID_d to sort S_d, vy_d and ialphaD_d
int Nk = min(def_nthmax, NL);
for(int k = 0; k < NL; k += def_nthmax){
if(Nk > 0) Copy_kernel <<< (Nk + 127) / 128, 128 >>> (L.S_d, L.Sort_d, NL, k);
}
for(int k = 0; k < NL; k += def_nthmax){
if(Nk > 0) Sort_kernel <<< (Nk + 127) / 128, 128 >>> (L.Sort_d, L.S_d, L.ID_d, NL, k);
}
for(int k = 0; k < NL; k += def_nthmax){
if(Nk > 0) Copy_kernel <<< (Nk + 127) / 128, 128 >>> (L.vy_d, L.Sort_d, NL, k);
}
for(int k = 0; k < NL; k += def_nthmax){
if(Nk > 0) Sort_kernel <<< (Nk + 127) / 128, 128 >>> (L.Sort_d, L.vy_d, L.ID_d, NL, k);
}
for(int k = 0; k < NL; k += def_nthmax){
if(Nk > 0) Copy_kernel <<< (Nk + 127) / 128, 128 >>> (L.ialphaD_d, L.Sort_d, NL, k);
}
for(int k = 0; k < NL; k += def_nthmax){
if(Nk > 0) Sort_kernel <<< (Nk + 127) / 128, 128 >>> (L.Sort_d, L.ialphaD_d, L.ID_d, NL, k);
}
// ********************************
for(int k = 0; k < NL; k += def_nthmax){
int Nk = min(def_nthmax, NL - k);
if(Nk > 0){
S3_kernel <<< (Nk + 127) / 128, 128 >>> (L.nu_d, L.S_d, L.S1_d, L.vy_d, L.ialphaD_d, L.Sf_d, L.S1f_d, L.vyf_d, L.vcut2_d, L.va_d, L.vb_d, param.cut, param.cutMode, param.profile, param.numin, param.dnu, param.useIndividualX, NL, k);
if(param.removePlinth == 1 && param.cut != 0.0){
float a = (float)(M_PI * sqrt(-1.0 / log(def_TOLF * 0.5)));
float b = (float)(1.0 / sqrt(M_PI));
float c = (float)(2.0 * a / M_PI);
Plinth_kernel <<< (Nk + 127) / 128, 128 >>> (L.S1f_d, L.Sf_d, L.vyf_d, L.vcut2_d, L.plinth_d, NL, a, b, c, param.profile);
//printPlinth_kernel <<< (Nk + 127) / 128, 128 >>> (L.plinth_d, L.nu_d, NL);
}
}
}
cudaEventRecord(LineStop);
cudaEventSynchronize(LineStop);
error = cudaGetLastError();
if(error != 0){
printf("Line error = %d = %s\n",error, cudaGetErrorString(error));
return 0;
}
cudaEventElapsedTime(&milliseconds, LineStart, LineStop);
time[1] += milliseconds * 0.001;
printf("Time for Lines: %d %lld %d: %g seconds\n", fi, lPart, iP, time[1]);
cudaEventRecord(KStart);
// ************************************
// Compute the opacity function K(x)
// ************************************
int nlLimitsA = (NL + def_nlA - 1) / def_nlA;
int nlLimitsB = (NL + def_nlB - 1) / def_nlB;
int nlLimitsC = (NL + def_nlC - 1) / def_nlC;
//A
nuLimits_kernel<<< nlLimitsA, min(def_nlA, 1024), 0, nuLimitsStream[0] >>> (L.nu_d, L.ialphaD_d, L.vy_d, L.vcut2_d, L.nuLimitsA0_d, L.nuLimitsA1_d, param.numin, param.numax, def_nlA, NL, param.profile, 10);
iiLimits_kernel <<< (nlLimitsA + 127) / 128, 128, 0, nuLimitsStream[0] >>> (L.nuLimitsA0_d, L.nuLimitsA1_d, L.iiLimitsA0_d, L.iiLimitsA1_d, binBoundaries_d, nlLimitsA, param.numin, param.dnu, Nx, param.useIndividualX, param.nbins, param.Nxb, 10);
iiLimitsMax_kernel< 512 > <<< 1, 512 >>> (L.iiLimitsA0_d, L.iiLimitsA1_d, L.iiLimitsAT_d, Nx, nlLimitsA);
if(param.profile == 1){ //only for voigt profiles
//AL
nuLimits_kernel<<< nlLimitsA, min(def_nlA, 1024), 0, nuLimitsStream[1] >>> (L.nu_d, L.ialphaD_d, L.vy_d, L.vcut2_d, L.nuLimitsAL0_d, L.nuLimitsAL1_d, param.numin, param.numax, def_nlA, NL, param.profile, 11);
iiLimits_kernel <<< (nlLimitsA + 127) / 128, 128, 0, nuLimitsStream[1] >>> (L.nuLimitsAL0_d, L.nuLimitsAL1_d, L.iiLimitsAL0_d, L.iiLimitsAL1_d, binBoundaries_d, nlLimitsA, param.numin, param.dnu, Nx, param.useIndividualX, param.nbins, param.Nxb, 11);
iiLimitsMax_kernel< 512 > <<< 1, 512 >>> (L.iiLimitsAL0_d, L.iiLimitsAL1_d, L.iiLimitsALT_d, Nx, nlLimitsA);
//AR
nuLimits_kernel<<< nlLimitsA, min(def_nlA, 1024), 0, nuLimitsStream[2] >>> (L.nu_d, L.ialphaD_d, L.vy_d, L.vcut2_d, L.nuLimitsAR0_d, L.nuLimitsAR1_d, param.numin, param.numax, def_nlA, NL, param.profile, 12);
iiLimits_kernel <<< (nlLimitsA + 127) / 128, 128, 0, nuLimitsStream[2] >>> (L.nuLimitsAR0_d, L.nuLimitsAR1_d, L.iiLimitsAR0_d, L.iiLimitsAR1_d, binBoundaries_d, nlLimitsA, param.numin, param.dnu, Nx, param.useIndividualX, param.nbins, param.Nxb, 12);
iiLimitsMax_kernel< 512 > <<< 1, 512 >>> (L.iiLimitsAR0_d, L.iiLimitsAR1_d, L.iiLimitsART_d, Nx, nlLimitsA);
//B
nuLimits_kernel<<< nlLimitsB, min(def_nlB, 1024), 0, nuLimitsStream[3] >>> (L.nu_d, L.ialphaD_d, L.vy_d, L.vcut2_d, L.nuLimitsB0_d, L.nuLimitsB1_d, param.numin, param.numax, def_nlB, NL, param.profile, 20);
iiLimits_kernel <<< (nlLimitsB + 127) / 128, 128, 0, nuLimitsStream[3] >>> (L.nuLimitsB0_d, L.nuLimitsB1_d, L.iiLimitsB0_d, L.iiLimitsB1_d, binBoundaries_d, nlLimitsB, param.numin, param.dnu, Nx, param.useIndividualX, param.nbins, param.Nxb, 20);
iiLimitsMax_kernel< 512 > <<< 1, 512 >>> (L.iiLimitsB0_d, L.iiLimitsB1_d, L.iiLimitsBT_d, Nx, nlLimitsB);
//C
nuLimits_kernel<<< nlLimitsC, min(def_nlC, 1024), 0, nuLimitsStream[4] >>> (L.nu_d, L.ialphaD_d, L.vy_d, L.vcut2_d, L.nuLimitsC0_d, L.nuLimitsC1_d, param.numin, param.numax, def_nlC, NL, param.profile, 30);
iiLimits_kernel <<< (nlLimitsC + 127) / 128, 128, 0, nuLimitsStream[4] >>> (L.nuLimitsC0_d, L.nuLimitsC1_d, L.iiLimitsC0_d, L.iiLimitsC1_d, binBoundaries_d, nlLimitsC, param.numin, param.dnu, Nx, param.useIndividualX, param.nbins, param.Nxb, 30);
iiLimitsMax_kernel< 512 > <<< 1, 512 >>> (L.iiLimitsC0_d, L.iiLimitsC1_d, L.iiLimitsCT_d, Nx, nlLimitsC);
}
cudaEventRecord(iiLimitsEvent);
cudaEventSynchronize(iiLimitsEvent);
iiLimitsCheck <<< (nlLimitsA + 127) / 128, 128 >>> (L.iiLimitsA0_d, L.iiLimitsA1_d, L.iiLimitsAL0_d, L.iiLimitsAL1_d, L.iiLimitsAR0_d, L.iiLimitsAR1_d, nlLimitsA);
cudaEventRecord(iiLimitsEvent);
cudaEventSynchronize(iiLimitsEvent);
long long int nTA = L.iiLimitsAT_m[1] - L.iiLimitsAT_m[0];
long long int nTAL = L.iiLimitsALT_m[1] - L.iiLimitsALT_m[0];
long long int nTAR = L.iiLimitsART_m[1] - L.iiLimitsART_m[0];
long long int nTB = L.iiLimitsBT_m[1] - L.iiLimitsBT_m[0];
long long int nTC = L.iiLimitsCT_m[1] - L.iiLimitsCT_m[0];
if(ntA < 0) ntA = 0ll;
if(ntAL < 0) ntAL = 0ll;
if(ntAR < 0) ntAR = 0ll;
if(ntB < 0) ntB = 0ll;
if(ntC < 0) ntC = 0ll;
printf("A Limits %lld %lld | %lld\n", L.iiLimitsAT_m[0], L.iiLimitsAT_m[1], nTA);
printf("AL Limits %lld %lld | %lld\n", L.iiLimitsALT_m[0], L.iiLimitsALT_m[1], nTAL);
printf("AR Limits %lld %lld | %lld\n", L.iiLimitsART_m[0], L.iiLimitsART_m[1], nTAR);
printf("B Limits %lld %lld | %lld\n", L.iiLimitsBT_m[0], L.iiLimitsBT_m[1], nTB);
printf("C Limits %lld %lld | %lld\n", L.iiLimitsCT_m[0], L.iiLimitsCT_m[1], nTC);
if(nTA > 0){
cudaMemcpyAsync(L.iiLimitsA0_h, L.iiLimitsA0_d, nlLimitsA * sizeof(long long int), cudaMemcpyDeviceToHost, nuLimitsStream[0]);
cudaMemcpyAsync(L.iiLimitsA1_h, L.iiLimitsA1_d, nlLimitsA * sizeof(long long int), cudaMemcpyDeviceToHost, nuLimitsStream[0]);
}
if(nTAL > 0){
cudaMemcpyAsync(L.iiLimitsAL0_h, L.iiLimitsAL0_d, nlLimitsA * sizeof(long long int), cudaMemcpyDeviceToHost, nuLimitsStream[1]);
cudaMemcpyAsync(L.iiLimitsAL1_h, L.iiLimitsAL1_d, nlLimitsA * sizeof(long long int), cudaMemcpyDeviceToHost, nuLimitsStream[1]);
}
if(nTAR > 0){
cudaMemcpyAsync(L.iiLimitsAR0_h, L.iiLimitsAR0_d, nlLimitsA * sizeof(long long int), cudaMemcpyDeviceToHost, nuLimitsStream[2]);
cudaMemcpyAsync(L.iiLimitsAR1_h, L.iiLimitsAR1_d, nlLimitsA * sizeof(long long int), cudaMemcpyDeviceToHost, nuLimitsStream[2]);
}
if(nTB > 0){
cudaMemcpyAsync(L.iiLimitsB0_h, L.iiLimitsB0_d, nlLimitsB * sizeof(long long int), cudaMemcpyDeviceToHost, nuLimitsStream[3]);
cudaMemcpyAsync(L.iiLimitsB1_h, L.iiLimitsB1_d, nlLimitsB * sizeof(long long int), cudaMemcpyDeviceToHost, nuLimitsStream[3]);
}
if(nTC > 0){
cudaMemcpyAsync(L.iiLimitsC0_h, L.iiLimitsC0_d, nlLimitsC * sizeof(long long int), cudaMemcpyDeviceToHost, nuLimitsStream[4]);
cudaMemcpyAsync(L.iiLimitsC1_h, L.iiLimitsC1_d, nlLimitsC * sizeof(long long int), cudaMemcpyDeviceToHost, nuLimitsStream[4]);
}
double timeOld = time[0];
long long lPartOld = lPart;
int fii = fi;
if((iL < m.NL[fi] - def_maxlines || fi < fi1 - 1) && iP == param.nP - 1){
//read the next line file while calculating the K kernels of the current file
// **************************read iL + 1
int iLL = iL + def_maxlines;
if(iL >= m.NL[fi] - def_maxlines){
iLL = 0;
fii = fi + 1;
timeT[0] += time[0];
time[0] = 0.0;
fclose(dataFile);
sprintf(dataFilename, "%sbin", m.dataFilename[fii]);
dataFile = fopen(dataFilename, "rb");
if(dataFile == NULL){
printf("Error: line list file not found: %s\n", dataFilename);
return 0;
}
printf("Reading Line file %d of %d: %s\n", fii, fi1 - 1, dataFilename);
printf("Number of lines: %lld\n", m.NL[fii]);
}
NL1 = min(def_maxlines, m.NL[fii] - iLL);
lPart = (iLL + def_maxlines - 1) / def_maxlines;
cudaEventRecord(ReadStart);
printf("Reading Line file %d of %d; part %lld of %lld with %d lines\n", fii, fi1 - 1, lPart, (m.NL[fii] + def_maxlines - 1) / def_maxlines - 1, NL);
readBufferCount = 0;
rbvs = 0;
}
cudaDeviceSynchronize();
/*
for(int i = 0; i < nlLimitsA; ++i){
int ni = L.iiLimitsA1_h[i] - L.iiLimitsA0_h[i];
if(ni > 0 || i < 10) printf("iiLimitsTotal A %d %lld %lld | %d\n", i, L.iiLimitsA0_h[i], L.iiLimitsA1_h[i], ni);
}
for(int i = 0; i < nlLimitsA; ++i){
int ni = L.iiLimitsAL1_h[i] - L.iiLimitsAL0_h[i];
if(ni > 0 || i < 10) printf("iiLimitsTotal AL %d %lld %lld | %d\n", i, L.iiLimitsAL0_h[i], L.iiLimitsAL1_h[i], ni);
}
for(int i = 0; i < nlLimitsA; ++i){
int ni = L.iiLimitsAR1_h[i] - L.iiLimitsAR0_h[i];
if(ni > 0 || i < 10) printf("iiLimitsTotal AR %d %lld %lld | %d\n", i, L.iiLimitsAR0_h[i], L.iiLimitsAR1_h[i], ni);
}
for(int i = 0; i < nlLimitsB; ++i){
int ni = L.iiLimitsB1_h[i] - L.iiLimitsB0_h[i];
if(ni > 0 || i < 10) printf("iiLimitsTotal B %d %lld %lld | %d\n", i, L.iiLimitsB0_h[i], L.iiLimitsB1_h[i], ni);
}
for(int i = 0; i < nlLimitsC; ++i){
int ni = L.iiLimitsC1_h[i] - L.iiLimitsC0_h[i];
if(ni > 0 || i < 10) printf("iiLimitsTotal C %d %lld %lld | %d\n", i, L.iiLimitsC0_h[i], L.iiLimitsC1_h[i], ni);
}
*/
if(nTA > 0){
//A
const int nntt = 128;
for(int il = 0; il < NL; il += def_KSn * def_nlB){ //loop over lines
// *************************************
// Call A Line kernels
ntA = Line6A_Call(L, param, KS_d, x_d, il, NL, nntt, nkA, Nx, tuneStream[0], 10, 1);
// *************************************
if(param.doTuning == 1){
if(ntA > 0 && ntA < 0.6 * ntAOld || ntA > 1.6 * ntAOld){
ntAOld = ntA;
int nkt;
int nktt = nkA;
float time0;
for(int k = 0; k < 2; ++k){
for(int j = 0; j < 8; ++j){
if(j == 0) nkt = nkA;
if(j > 0){
if(k == 0) nkt = nkt * 2;
else nkt = nkt / 2;
}
if(nkt > 128 || nkt < 1) break;
cudaEventRecord(TuneStart);
Line6A_Call(L, param, KS_d, x_d, il, NL, nntt, nkt, Nx, tuneStream[1], 10, 0);
cudaEventRecord(TuneStop);
cudaEventSynchronize(TuneStop);
cudaEventElapsedTime(&milliseconds, TuneStart, TuneStop);
printf("Selftune A %d %d %d %d %g\n", il, ntA, ntAOld, nkt, milliseconds);
if(j == 0 && k == 0) time0 = milliseconds;
else{
if(milliseconds > time0) break;
else{
nktt = nkt;
time0 = milliseconds;
}
}
}
}
nkA = nktt;
printf("Selftune A %d\n", nktt);
}
}
}
cudaEventRecord(AEvent, tuneStream[0]);
if((iL < m.NL[fi] - def_maxlines || fi < fi1 - 1) && iP == param.nP - 1){
// **************************
//Read the Line list A
// **************************
if(param.dataBase == 2 || param.dataBase >= 20){
// 2 20 30 31 32
for(int i = readBufferCount; i < NL1; i += readBufferSize){
//check if the A kernels have finished, otherwise use host to read more data
int ev = cudaEventQuery(AEvent);
if(ev == 0) break;
//printf("read A %d %d\n", readBufferCount, ev);
er = readFileExomol(L, NL1, dataFile, readBuffer_h, readBuffer_d, readBufferSize, readBufferN, readBufferCount, rbvs, CStream);
readBufferCount += readBufferSize;
++rbvs;
}
}
}
}
if(param.profile == 1){ //only for voigt profiles
if(nTAL > 0){
//AL
const int nntt = 128;
for(int il = 0; il < NL; il += def_KSn * def_nlB){ //loop over lines
// *************************************
// Call AL Line kernels
ntAL = Line6A_Call(L, param, KS_d, x_d, il, NL, nntt, nkAL, Nx, tuneStream[0], 11, 1);
// *************************************
if(param.doTuning == 1){
if(ntAL > 0 && ntAL < 0.6 * ntALOld || ntAL > 1.6 * ntALOld){
ntALOld = ntAL;
int nkt;
int nktt = nkAL;
float time0;
for(int k = 0; k < 2; ++k){
for(int j = 0; j < 8; ++j){
if(j == 0) nkt = nkAL;
if(j > 0){
if(k == 0) nkt = nkt * 2;
else nkt = nkt / 2;
}
if(nkt > 128 || nkt < 1) break;
cudaEventRecord(TuneStart);
Line6A_Call(L, param, KS_d, x_d, il, NL, nntt, nkt, Nx, tuneStream[1], 11, 0);
cudaEventRecord(TuneStop);
cudaEventSynchronize(TuneStop);
cudaEventElapsedTime(&milliseconds, TuneStart, TuneStop);
printf("Selftune AL %d %d %d %d %g\n", il, ntAL, ntALOld, nkt, milliseconds);
if(j == 0 && k == 0) time0 = milliseconds;
else{
if(milliseconds > time0) break;
else{
nktt = nkt;
time0 = milliseconds;
}
}
}
}
nkAL = nktt;
printf("Selftune AL %d\n", nktt);
}
}
}
cudaEventRecord(ALEvent, tuneStream[0]);
if((iL < m.NL[fi] - def_maxlines || fi < fi1 - 1) && iP == param.nP - 1){
// **************************
//Read the Line list AL
// **************************
if(param.dataBase == 2 || param.dataBase >= 20){
//2 20 30 31 32
for(int i = readBufferCount; i < NL1; i += readBufferSize){
//check if the AL kernels have finished, otherwise use host to read more data
int ev = cudaEventQuery(ALEvent);
if(ev == 0) break;
//printf("read AL %d %d\n", readBufferCount, ev);
er = readFileExomol(L, NL1, dataFile, readBuffer_h, readBuffer_d, readBufferSize, readBufferN, readBufferCount, rbvs, CStream);
readBufferCount += readBufferSize;
++rbvs;
}
}
}
}
if(nTAR > 0){
//AR
const int nntt = 128;
for(int il = 0; il < NL; il += def_KSn * def_nlB){ //loop over lines
// *************************************
// Call AR Line kernels
ntAR = Line6A_Call(L, param, KS_d, x_d, il, NL, nntt, nkAR, Nx, tuneStream[0], 12, 1);
// *************************************
if(param.doTuning == 1){
if(ntAR > 0 && ntAR < 0.6 * ntAROld || ntAR > 1.6 * ntAROld){
ntAROld = ntAR;
int nkt;
int nktt = nkAR;
float time0;
for(int k = 0; k < 2; ++k){
for(int j = 0; j < 8; ++j){
if(j == 0) nkt = nkAR;
if(j > 0){
if(k == 0) nkt = nkt * 2;
else nkt = nkt / 2;
}
if(nkt > 128 || nkt < 1) break;
cudaEventRecord(TuneStart);
Line6A_Call(L, param, KS_d, x_d, il, NL, nntt, nkt, Nx, tuneStream[1], 12, 0);
cudaEventRecord(TuneStop);
cudaEventSynchronize(TuneStop);
cudaEventElapsedTime(&milliseconds, TuneStart, TuneStop);
printf("Selftune AR %d %d %d %d %g\n", il, ntAR, ntAROld, nkt, milliseconds);
if(j == 0 && k == 0) time0 = milliseconds;
else{
if(milliseconds > time0) break;
else{
nktt = nkt;
time0 = milliseconds;
}
}
}
}
nkAR = nktt;
printf("Selftune AR %d\n", nktt);
}
}
}
cudaEventRecord(AREvent, tuneStream[0]);
if((iL < m.NL[fi] - def_maxlines || fi < fi1 - 1) && iP == param.nP - 1){
// **************************
//Read the Line list AR
// **************************
if(param.dataBase == 2 || param.dataBase >= 20){
//2 20 30 31 32
for(int i = readBufferCount; i < NL1; i += readBufferSize){
//check if the AR kernels have finished, otherwise use host to read more data
int ev = cudaEventQuery(AREvent);
if(ev == 0) break;
//printf("read AR %d %d\n", readBufferCount, ev);
er = readFileExomol(L, NL1, dataFile, readBuffer_h, readBuffer_d, readBufferSize, readBufferN, readBufferCount, rbvs, CStream);
readBufferCount += readBufferSize;
++rbvs;
}
}
}
}
//cudaDeviceSynchronize();
if(nTB > 0){
// B
const int nntt2 = 128;
for(int il = 0; il < NL; il += def_KSn * def_nlB){ //loop over lines
// *************************************
// Call B Line kernels
ntB = Line6B_Call(L, param, KS_d, x_d, il, NL, nntt2, nkB, Nx, tuneStream[0], 1);
// *************************************
if(param.doTuning == 1){
if(ntB > 0 && ntB < 0.6 * ntBOld || ntB > 1.6 * ntBOld){
ntBOld = ntB;
int nkt;
int nktt = nkB;
float time0;
for(int k = 0; k < 2; ++k){
for(int j = 0; j < 8; ++j){
if(j == 0) nkt = nkB;
if(j > 0){
if(k == 0) nkt = nkt * 2;
else nkt = nkt / 2;
}
if(nkt > 128 || nkt < 1) break;
cudaEventRecord(TuneStart);
Line6B_Call(L, param, KS_d, x_d, il, NL, nntt2, nkt, Nx, tuneStream[1], 0);
cudaEventRecord(TuneStop);
cudaEventSynchronize(TuneStop);
cudaEventElapsedTime(&milliseconds, TuneStart, TuneStop);
printf("Selftune B %d %d %d %d %g\n", il, ntB, ntBOld, nkt, milliseconds);
if(j == 0 && k == 0) time0 = milliseconds;
else{
if(milliseconds > time0) break;
else{
nktt = nkt;
time0 = milliseconds;
}
}
}
}
nkB = nktt;
printf("Selftune B %d\n", nktt);
}
}
}
cudaEventRecord(BEvent, tuneStream[0]);
if((iL < m.NL[fi] - def_maxlines || fi < fi1 - 1) && iP == param.nP - 1){
// **************************
//Read the Line list B
// **************************
if(param.dataBase == 2 || param.dataBase >= 20){
//2 20 30 31 32
for(int i = readBufferCount; i < NL1; i += readBufferSize){
//check if the B kernels have finished, otherwise use host to read more data
int ev = cudaEventQuery(BEvent);
if(ev == 0) break;
//printf("read B %d %d\n", readBufferCount, ev);
er = readFileExomol(L, NL1, dataFile, readBuffer_h, readBuffer_d, readBufferSize, readBufferN, readBufferCount, rbvs, CStream);
readBufferCount += readBufferSize;
++rbvs;
}
}
}
}
//C
if(nTC > 0){
//search higher order regimes of the Voigt profile
const int nntt3 = 128;
float a = (float)(M_PI * sqrt(-1.0 / log(def_TOLF * 0.5)));
float b = (float)(1.0 / sqrt(M_PI));
float c = (float)(2.0 * a / M_PI);
for(int il = 0; il < NL; il += def_KSn * def_nlC){ //loop over lines
// *************************************
// Call C Line kernels
ntC = Line6C_Call(L, param, KS_d, x_d, il, NL, nntt3, nkC, Nx, a, b, c, tuneStream[0],1);
// *************************************
if(param.doTuning == 1){
if(ntC > 0 && ntC < 0.6 * ntCOld || ntC > 1.6 * ntCOld){
ntCOld = ntC;
int nkt;
int nktt = nkC;
float time0;
for(int k = 0; k < 2; ++k){
for(int j = 0; j < 8; ++j){
if(j == 0) nkt = nkC;
if(j > 0){
if(k == 0) nkt = nkt * 2;
else nkt = nkt / 2;
}
if(nkt > 128 || nkt < 1) break;
cudaEventRecord(TuneStart);
Line6C_Call(L, param, KS_d, x_d, il, NL, nntt3, nkt, Nx, a, b, c, tuneStream[1],0);
cudaEventRecord(TuneStop);
cudaEventSynchronize(TuneStop);
cudaEventElapsedTime(&milliseconds, TuneStart, TuneStop);
printf("Selftune C %d %d %d %d %g\n", il, ntC, ntCOld, nkt, milliseconds);
if(j == 0 && k == 0) time0 = milliseconds;
else{
if(milliseconds > time0) break;
else{
nktt = nkt;
time0 = milliseconds;
}
}
}
}
nkC = nktt;
printf("Selftune C %d\n", nktt);
}
}
}
}
} //end profile 1
//Add now all streams together
printf("Add streams A\n");
error = cudaGetLastError();
if(error != 0){
printf("K error = %d = %s\n",error, cudaGetErrorString(error));
return 0;
}
cudaEventRecord(KStop);
if((iL < m.NL[fi] - def_maxlines || fi < fi1 - 1) && iP == param.nP - 1){
// **************************
//Read the Line list end
// **************************
if(param.dataBase == 2 || param.dataBase >= 20){
//2 20 30 31 32
for(int i = readBufferCount; i < NL1; i += readBufferSize){
er = readFileExomol(L, NL1, dataFile, readBuffer_h, readBuffer_d, readBufferSize, readBufferN, readBufferCount, rbvs, CStream);
readBufferCount += readBufferSize;
++rbvs;
}
}
}
if((iL < m.NL[fi] - def_maxlines || fi < fi1 - 1) && iP == param.nP - 1){
if(param.dataBase < 2 || param.dataBase == 3){
// 0 1 3
er = readFile(param, m, part, L, param.qalphaL, NL1, dataFile, Sscale, meanMass);
if(er == 0){
return 0;
}
}
printf("Reading Line file %d, part %lld complete\n", fii, lPart);
cudaEventRecord(ReadStop);
cudaEventSynchronize(ReadStop);
cudaEventElapsedTime(&milliseconds, ReadStart, ReadStop);
time[0] += milliseconds * 0.001;
printf("Time for input, %d %lld: %g seconds\n", fii, lPart, time[0]);
// **************************read iL + 1
NL = NL1;
for(int i = 0; i < def_rBs; ++i){
cudaStreamSynchronize(CStream[i]);
}
}
//wait until all KS streams are complete
cudaDeviceSynchronize();
//collect streams and store all KS_d into K_d
//set KS_d to zero
AddKStreams_kernel <<< (Nx + 511) / 512, 512 >>> (K_d + iP * Nx, KS_d, def_KSn, Nx);
printf("Add streams B\n");
// *************************************
//synchronize here only if no more data has to be read from the disk.
//otherwise read data before synchronization
cudaEventSynchronize(KStop);
cudaEventElapsedTime(&milliseconds, KStart, KStop);
time[2] += milliseconds * 0.001;
printf("Time for K(x): %d %lld %d: %g seconds\n", fi, lPartOld, iP, time[2]);
cudaDeviceSynchronize();
error = cudaGetLastError();
if(error != 0){
printf("Kb error = %d = %s\n",error, cudaGetErrorString(error));
return 0;
}
if(iL >= m.NL[fi] - def_maxlines && iP == param.nP - 1){
InfoFile = fopen(InfoFilename, "a");
fprintf(InfoFile,"File %d of %d\n", fi, fi1);
fprintf(InfoFile,"Number of lines: %lld\n", m.NL[fi]);
fprintf(InfoFile,"Time for input: %g seconds\n", timeOld);
fprintf(InfoFile,"Time for Lines: %g seconds\n", time[1]);
fprintf(InfoFile,"Time for K(x): %g seconds\n", time[2]);
fclose(InfoFile);
}
} // End of pressure loop
} // End of maxLines loop
} // End of linefile loop
if(fi1 > fi0){
fclose(dataFile);
}
}
if(param.dataBase < 2 || param.dataBase == 3){
// 0 1 3
free_Line(L, param);
}
else{
// 2 20 30 31 32
free2_Line(L, param);
}
} //end species loop
printf("\n");
printf("Time for input total: %g seconds\n", timeT[0]);
printf("Time for Lines total: %g seconds\n", timeT[1]);
printf("Time for K(x) total: %g seconds\n", timeT[2]);
free(binBoundaries_h);
cudaFree(binIndex_d);
cudaFree(binBoundaries_d);
cudaFree(KS_d);
cudaEventRecord(tt1, 0);
for(int i = 0; i < def_KSn; ++i){
cudaStreamDestroy(VStream[i]);
}
for(int i = 0; i < def_rBs; ++i){
cudaStreamDestroy(CStream[i]);
}
for(int i = 0; i < 2; ++i){
cudaStreamDestroy(tuneStream[i]);
}
for(int i = 0; i < 5; ++i){
cudaStreamDestroy(nuLimitsStream[i]);
}
// ****************************
// Write the full line profile
// ****************************
if(param.doStoreFullK == 1){
FILE *OutFile;
char OutFilename[300];
sprintf(OutFilename, "Out_%s.dat", param.name);
OutFile = fopen(OutFilename, filemode);
for(int iP = 0; iP < param.nP; ++iP){
cudaMemcpy(K_h, K_d + iP * Nx, Nx * sizeof(double), cudaMemcpyDeviceToHost);
for(int j = 0; j < Nx; ++j){
if(param.nP == 1){
fprintf(OutFile, "%.20g %.20g\n", x_h[j], K_h[j] * unitScale);
}
else{
fprintf(OutFile, "%.20g %.20g %.20g %.20g\n", x_h[j], K_h[j] * unitScale, param.T, P_h[iP]);
}
}
fprintf(OutFile, "\n\n");
}
fclose(OutFile);
}
if(param.doStoreFullK == -1){
FILE *OutFile;
char OutFilename[500];
sprintf(OutFilename, "%sOut_%s.dat", param.pathK, param.name);
OutFile = fopen(OutFilename, "r");
if(OutFile == NULL){
printf("Error: Input file not found %s\n", OutFilename);
return 0;
}
for(int iP = 0; iP < param.nP; ++iP){
for(int j = 0; j < Nx; ++j){
if(param.nP == 1){
double k;
fscanf(OutFile, "%lf %lf\n", &x_h[j], &k);
K_h[j] = k / unitScale;
}
else{
double k, t, p;
fscanf(OutFile, "%lf %lf %lf %lf\n", &x_h[j], &k, &t, &p);
K_h[j] = k / unitScale;
}
}
cudaMemcpy(K_d + iP * Nx, K_h, Nx * sizeof(double), cudaMemcpyHostToDevice);
fscanf(OutFile, "\n\n");
}
fclose(OutFile);
}
if(param.doStoreFullK == 2){
//write a binary file in single precision
FILE *OutFile;
char OutFilename[300];
sprintf(OutFilename, "Out_%s.bin", param.name);
if(param.replaceFiles == 0){
OutFile = fopen(OutFilename, "ab");
}
else{
OutFile = fopen(OutFilename, "wb");
}
for(int iP = 0; iP < param.nP; ++iP){
cudaMemcpy(K_h, K_d + iP * Nx, Nx * sizeof(double), cudaMemcpyDeviceToHost);
for(int j = 0; j < Nx; ++j){
float Kf = (float)(K_h[j] * unitScale);
fwrite(&Kf, sizeof(float), 1, OutFile);
}
}
fclose(OutFile);
}
if(param.doStoreFullK == -2){
//read a binary file
FILE *OutFile;
char OutFilename[500];
sprintf(OutFilename, "%sOut_%s.bin", param.pathK, param.name);
OutFile = fopen(OutFilename, "rb");
if(OutFile == NULL){
printf("Error: Input file not found %s\n", OutFilename);
return 0;
}
for(int iP = 0; iP < param.nP; ++iP){
for(int j = 0; j < Nx; ++j){
float Kf;
fread(&Kf, sizeof(float), 1, OutFile);
K_h[j] = (double)(Kf) / unitScale;
}
cudaMemcpy(K_d + iP * Nx, K_h, Nx * sizeof(double), cudaMemcpyHostToDevice);
}
fclose(OutFile);
}
// *******************************
cudaDeviceSynchronize();
error = cudaGetLastError();
if(error != 0){
printf("Write error = %d = %s\n",error, cudaGetErrorString(error));
return 0;
}
cudaEventRecord(tt2, 0);
cudaEventSynchronize(tt2);
cudaEventElapsedTime(&milliseconds, tt1, tt2);
time[3] += milliseconds * 0.001;
printf("Time for write K(x): %g seconds\n", time[3]);
cudaEventRecord(tt1, 0);
// **************************************
// compute the Planck and Rosseland means
// **************************************
if(param.doMean > 0){
double *Pmn_d;
double *Rmn_d;
cudaMalloc((void **) &Pmn_d, Nx * sizeof(double));
cudaMalloc((void **) &Rmn_d, Nx * sizeof(double));
double *means_h, *means_d;
means_h = (double*)malloc(4 * sizeof(double));
cudaMalloc((void **) &means_d, 4 * sizeof(double));
FILE *Out4File;
char Out4Filename[300];
sprintf(Out4Filename, "Out_%s_mean.dat", param.name);
Out4File = fopen(Out4Filename, filemode);
for(int iP = 0; iP < param.nP; ++iP){
Mean_kernel <<< (Nx + 511) / 512, 512 >>> (x_d, Pmn_d, Rmn_d, param.T, Nx);
/*
printf("\n\n");
cudaMemcpy(K_h, Pmn_d, Nx * sizeof(double), cudaMemcpyDeviceToHost);
for(int i = 0; i < Nx; ++i){
printf("%g %g\n", param.numin + i * param.dnu, K_h[i]);
}
printf("\n\n");
cudaMemcpy(K_h, Rmn_d, Nx * sizeof(double), cudaMemcpyDeviceToHost);
for(int i = 0; i < Nx; ++i){
printf("%g %g\n", param.numin + i * param.dnu, K_h[i]);
}
printf("\n\n");
*/
IntegrateMean_kernel <512> <<< 4, 512 >>> (Pmn_d, Rmn_d, x_d, K_d + iP * Nx, means_d, Nx, param.useIndividualX);
double sigma = 2.0 * def_kB * def_kB * def_kB * def_kB / ( def_h * def_h * def_h * def_c * def_c * 15.0) * M_PI * M_PI * M_PI * M_PI * M_PI;
double integral1 = sigma * param.T * param.T * param.T * param.T / M_PI;
double integral2 = M_PI / (4.0 * sigma * param.T * param.T * param.T);
cudaMemcpy(means_h, means_d, 4 * sizeof(double), cudaMemcpyDeviceToHost);
if(param.nP == 1){
fprintf(Out4File, "%.20g\n", means_h[0] / means_h[2]);
fprintf(Out4File, "%.20g\n", means_h[3] / means_h[1]);
fprintf(Out4File, "%.20g\n", means_h[2] * param.dnu);
fprintf(Out4File, "%.20g\n", integral1);
fprintf(Out4File, "%.20g\n", means_h[3] * param.dnu);
fprintf(Out4File, "%.20g\n", 1.0 / integral2);
}
else{
fprintf(Out4File, "%.20g %.20g %.20g\n", means_h[0] / means_h[2], param.T, P_h[iP]);
fprintf(Out4File, "%.20g %.20g %.20g\n", means_h[3] / means_h[1], param.T, P_h[iP]);
fprintf(Out4File, "%.20g %.20g %.20g\n", means_h[2], param.T, P_h[iP]);
fprintf(Out4File, "%.20g %.20g %.20g\n", integral1, param.T, P_h[iP]);
fprintf(Out4File, "%.20g %.20g %.20g\n", means_h[3], param.T, P_h[iP]);
fprintf(Out4File, "%.20g %.20g %.20g\n", 1.0 / integral2, param.T, P_h[iP]);
}
//fprintf(Out4File, "\n\n");
}
fclose(Out4File);
free(means_h);
cudaFree(means_d);
cudaFree(Pmn_d);
cudaFree(Rmn_d);
}
cudaFree(x_d);
cudaDeviceSynchronize();
error = cudaGetLastError();
if(error != 0){
printf("maen error = %d = %s\n",error, cudaGetErrorString(error));
return 0;
}
cudaEventRecord(tt2, 0);
cudaEventSynchronize(tt2);
cudaEventElapsedTime(&milliseconds, tt1, tt2);
time[4] += milliseconds * 0.001;
printf("Time for mean K(x): %g seconds\n", time[4]);
cudaEventRecord(tt1, 0);
// ***************************************
// Do the sorting of K for all bins
// ***************************************
thrust::device_ptr<double> K_dt = thrust::device_pointer_cast(K_d);
thrust::device_ptr<int> binKey_dt = thrust::device_pointer_cast(binKey_d);
for(int iP = 0; iP < param.nP; ++iP){
thrust::sort_by_key(K_dt + iP * Nx, K_dt + Nx + iP * Nx, binKey_dt);
thrust::stable_sort_by_key(binKey_dt, binKey_dt + Nx, K_dt + iP * Nx);
}
cudaFree(binKey_d);
// ****************************************
cudaDeviceSynchronize();
error = cudaGetLastError();
if(error != 0){
printf("Sort error = %d = %s\n",error, cudaGetErrorString(error));
return 0;
}
cudaEventRecord(tt2, 0);
cudaEventSynchronize(tt2);
cudaEventElapsedTime(&milliseconds, tt1, tt2);
time[5] += milliseconds * 0.001;
printf("Time for sort K(x): %g seconds\n", time[5]);
cudaEventRecord(tt1, 0);
// *********************************
// Prepare Resampling and do QR factorization, the same for all bins
// this doesn't work with individual bins
// *********************************
//size_t free_byte;
//size_t total_byte;
//cudaMemGetInfo( &free_byte, &total_byte );
//printf("***MEMORY %g %g %g\n", (double)(free_byte), (double)(total_byte), (double)(total_byte) - (double)(free_byte));
int *Nxmin_h, *Nxmin_d;
Nxmin_h = (int*)malloc(param.nbins * sizeof(int));
cudaMalloc((void **) &Nxmin_d, param.nbins * sizeof(int));
for(int i = 0; i < param.nbins; ++i){
Nxmin_h[i] = 0;
}
cudaMemset(Nxmin_d, 0, param.nbins * sizeof(int));
if(param.doResampling > 0){
double *K2_h, *K2_d;
K2_h = (double*)malloc(Nx * sizeof(double));
cudaMalloc((void **) &K2_d, Nx * sizeof(double));
//cudaMemGetInfo( &free_byte, &total_byte );
//printf("***MEMORY %g %g %g\n", (double)(free_byte), (double)(total_byte), (double)(total_byte) - (double)(free_byte));
double *V_d; //Vandermonde like matrix for least sqaures
double *C_d, *D_d;
cudaMalloc((void **) &V_d, param.nC * param.Nxb * sizeof(double));
cudaMalloc((void **) &C_d, param.nC * sizeof(double));
cudaMalloc((void **) &D_d, param.nC * sizeof(double));
cudaDeviceSynchronize();
error = cudaGetLastError();
if(error != 0){
printf("Resampling Allocation error = %d = %s\n",error, cudaGetErrorString(error));
return 0;
}
Vandermonde_kernel <<< (param.Nxb + 511) / 512, 512 >>> (V_d, (double)(param.Nxb), param.nC);
QR_kernel <512> <<< 1, 512 >>> (V_d, C_d, D_d, param.Nxb, param.nC);
FILE *Out3File;
char Out3Filename[300];
if(param.doResampling == 1){
sprintf(Out3Filename, "Out_%s_cbin.dat", param.name);
Out3File = fopen(Out3Filename, filemode);
}
if(param.doResampling == 2){
if(param.replaceFiles == 1){
for(int i = 0; i < param.nbins; ++i){
sprintf(Out3Filename, "Out_%s_cbin%.4d.dat", param.name, i);
Out3File = fopen(Out3Filename, "w");
fclose(Out3File);
}
}
sprintf(Out3Filename, "Out_%s_cbin%.4d.dat", param.name, 0);
Out3File = fopen(Out3Filename, "a");
}
for(int iP = 0; iP < param.nP; ++iP){
if(param.doResampling == 2 && iP > 0){
fclose(Out3File);
sprintf(Out3Filename, "Out_%s_cbin%.4d.dat", param.name, 0);
Out3File = fopen(Out3Filename, "a");
}
cudaMemset(K2_d, 0, Nx * sizeof(double));
cudaMemset(Nxmin_d, 0, param.nbins * sizeof(int));
findCut_kernel <<< (Nx + 511) / 512, 512 >>> (K_d + iP * Nx, Nx, param.Nxb, param.kmin, Nxmin_d, param.nbins);
rescale_kernel < 512 > <<< param.nbins, 512 >>> (Nxmin_d, K_d + iP * Nx, K2_d, param.Nxb, param.kmin, 1);
/*
cudaMemcpy(K2_h, K2_d, Nx * sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(K_h, K_d + iP * Nx, Nx * sizeof(double), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
//printf only cut and empty bins
for(int i = 0; i < param.nbins; ++i){
int il = i * param.Nxb;
if(K_h[il] == param.kmin){
for(int j = 0; j < param.Nxb; ++j){
// printf("%g %.20g\n", j / (double)(param.Nxb), K2_h[j + il]);
}
// printf("\n\n");
}
}
//print all bins
for(int i = 0; i < Nx; ++i){
printf("%d %.20g %.20g\n", i, K_h[i], K2_h[i]);
}
*/
copyK2_kernel< 512 > <<< param.nbins, 512 >>> (Nxmin_d, K_d + iP * Nx, K2_d, param.Nxb);
cudaMemcpy(Nxmin_h, Nxmin_d, param.nbins * sizeof(int), cudaMemcpyDeviceToHost);
lnK_kernel <<< (Nx + 511) / 512, 512 >>> (K_d + iP * Nx, Nx);
leastSquare_kernel <512> <<< param.nbins, 512 >>> (V_d, C_d, D_d, K_d + iP * Nx, param.Nxb, param.nC);
for(int i = 0; i < param.nbins; ++i){
int il = i * param.Nxb;
cudaMemcpy(K_h + il, K_d + il + iP * Nx, param.nC * sizeof(double), cudaMemcpyDeviceToHost);
fprintf(Out3File, "%.20g %.20g ", param.kmin, fmin(Nxmin_h[i] / ((double)(param.Nxb - 1)), 1.0));
for(int ic = 0; ic < param.nC; ++ic){
if(Nxmin_h[i] != param.Nxb) fprintf(Out3File, "%.20g ", K_h[il + ic]);
else fprintf(Out3File, "0.0 ");
}
if(param.nP > 1){
fprintf(Out3File, "%.20g %.20g ", param.T, P_h[iP]);
}
if(param.doResampling == 1){
fprintf(Out3File, "\n\n");
}
if(param.doResampling == 2 && i < param.nbins - 1){
fprintf(Out3File, "\n");
fclose(Out3File);
sprintf(Out3Filename, "Out_%s_cbin%.4d.dat", param.name, i + 1);
Out3File = fopen(Out3Filename, "a");
}
}
//fprintf(Out3File, "\n\n");
if(param.doTransmission > 0 || param.doStoreK > 0){
expfx_kernel <<< param.nbins, 512 >>> (K_d + iP * Nx, param.nC, param.Nxb);
rescale_kernel < 512 > <<< param.nbins, 512 >>> (Nxmin_d, K_d + iP * Nx, K2_d, param.Nxb, param.kmin, -1);
copyK2_kernel< 512 > <<< param.nbins, 512 >>> (Nxmin_d, K_d + iP * Nx, K2_d, param.Nxb);
}
}
fclose(Out3File);
cudaFree(V_d);
cudaFree(C_d);
cudaFree(D_d);
cudaFree(K2_d);
free(K2_h);
}
// **********************************
cudaDeviceSynchronize();
error = cudaGetLastError();
if(error != 0){
printf("Resampling error = %d = %s\n",error, cudaGetErrorString(error));
return 0;
}
cudaEventRecord(tt2, 0);
cudaEventSynchronize(tt2);
cudaEventElapsedTime(&milliseconds, tt1, tt2);
time[6] += milliseconds * 0.001;
printf("Time for Resampling: %g seconds\n", time[6]);
cudaEventRecord(tt1, 0);
// *****************************
// Write K per bin output
// *****************************
if(param.doStoreK > 0){
FILE *Out2File;
char Out2Filename[300];
if(param.doStoreK == 1){
sprintf(Out2Filename, "Out_%s_bin.dat", param.name);
Out2File = fopen(Out2Filename, filemode);
}
if(param.doStoreK == 2){
if(param.replaceFiles == 1){
for(int i = 0; i < param.nbins; ++i){
sprintf(Out2Filename, "Out_%s_bin%.4d.dat", param.name, i);
Out2File = fopen(Out2Filename, "w");
fclose(Out2File);
}
}
sprintf(Out2Filename, "Out_%s_bin%.4d.dat", param.name, 0);
Out2File = fopen(Out2Filename, "a");
}
for(int iP = 0; iP < param.nP; ++iP){
if(param.doStoreK == 2 && iP > 0){
fclose(Out2File);
sprintf(Out2Filename, "Out_%s_bin%.4d.dat", param.name, 0);
Out2File = fopen(Out2Filename, "a");
}
cudaMemcpy(K_h, K_d + iP * Nx, Nx * sizeof(double), cudaMemcpyDeviceToHost);
if(param.useIndividualBins == 0){
for(int i = 0; i < param.nbins; ++i){
int Nxb = param.Nxb;
int il = i * Nxb;
int iedge = 0; //index of edge
int nedge = 0; //number of points per edge intervall
double sedge = 0.0; //sum of points in edge intervall
for(int j = 0; j < Nxb; ++j){
double y = j / ((double)(Nxb - 1));
double y1 = (j + 1) / ((double)(Nxb - 1));
if(param.useOutputEdges == 0){
if(param.nP == 1){
fprintf(Out2File, "%g %.20g\n", y, K_h[j + il] * unitScale);
}
else{
fprintf(Out2File, "%g %.20g %g %g %d\n", y, K_h[j + il] * unitScale, param.T, P_h[iP], j);
}
}
else{
double edge = outputEdges_h[iedge];
++nedge;
sedge += K_h[j + il] * unitScale;
if(y <= edge && edge <= y1 && iedge < param.nedges){
if(param.nP == 1){
if(iedge > 0) fprintf(Out2File, "%g %.20g\n", 0.5 * (edge + outputEdges_h[iedge - 1]), sedge / ((double)(nedge)));
}
else{
if(iedge > 0) fprintf(Out2File, "%g %.20g %g %g %d\n", 0.5 * (edge + outputEdges_h[iedge - 1]), sedge / ((double)(nedge)), param.T, P_h[iP], iedge - 1);
}
++iedge;
nedge = 0;
sedge = 0.0;
}
}
}
if(param.doStoreK == 1){
fprintf(Out2File,"\n\n");
}
if(param.doStoreK == 2 && i < param.nbins - 1){
fclose(Out2File);
sprintf(Out2Filename, "Out_%s_bin%.4d.dat", param.name, i + 1);
Out2File = fopen(Out2Filename, "a");
}
}
}
else{
int ib = 0;
int j = 0;
int iedge = 0; //inde of edge
int nedge = 0; //number of points per edge intervall
double sedge = 0.0; //sum of points in edge intervall
for(int i = 0; i < Nx; ++i){
int il = binIndex_h[ib];
int ir = binIndex_h[ib + 1];
int Nxb = ir - il;
double y = j / ((double)(Nxb - 1));
double y1 = (j + 1) / ((double)(Nxb - 1));
if(param.useOutputEdges == 0){
if(param.nP == 1){
fprintf(Out2File, "%g %.20g\n", y, K_h[i] * unitScale);
}
else{
fprintf(Out2File, "%g %.20g %.20g %.20g %d\n", y, K_h[i] * unitScale, param.T, P_h[iP], j);
}
}
else{
double edge = outputEdges_h[iedge];
++nedge;
sedge += K_h[i] * unitScale;
if(y <= edge && edge <= y1 && iedge < param.nedges){
if(param.nP == 1){
if(iedge > 0) fprintf(Out2File, "%g %.20g\n", 0.5 * (edge + outputEdges_h[iedge - 1]), sedge / ((double)(nedge)));
}
else{
if(iedge > 0) fprintf(Out2File, "%g %.20g %.20g %.20g %d\n", 0.5 * (edge + outputEdges_h[iedge - 1]), sedge / ((double)(nedge)), param.T, P_h[iP], iedge - 1);
}
++iedge;
nedge = 0;
sedge = 0.0;
}
}
++j;
if(i >= ir - 1){
//printf("%d %d %d %d\n", ib, il, ir, Nxb);
++ib;
j = 0;
if(param.doStoreK == 1){
fprintf(Out2File,"\n\n");
}
if(param.doStoreK == 2 && ib < param.nbins){
fclose(Out2File);
sprintf(Out2Filename, "Out_%s_bin%.4d.dat", param.name, ib);
Out2File = fopen(Out2Filename, "a");
}
iedge = 0;
}
if(ib >= param.nbins){
break;
}
}
}
}//end of P loop
fclose(Out2File);
}
// ******************************
cudaDeviceSynchronize();
error = cudaGetLastError();
if(error != 0){
printf("Write error = %d = %s\n",error, cudaGetErrorString(error));
return 0;
}
cudaEventRecord(tt2, 0);
cudaEventSynchronize(tt2);
cudaEventElapsedTime(&milliseconds, tt1, tt2);
time[7] += milliseconds * 0.001;
printf("Time for write K(y): %g seconds\n", time[7]);
cudaEventRecord(tt1, 0);
//set correction factor for simpsons rule needed for resampling
SimpsonCoefficient();
// *********************************
// Calculate the Transmission function
// *********************************
if(param.doTransmission > 0 ){
double *Tr_h, *Tr_d;
Tr_h = (double*)malloc(param.nbins * param.nTr * sizeof(double));
cudaMalloc((void **) &Tr_d, param.nbins * param.nTr * sizeof(double));
FILE *Out3File;
char Out3Filename[300];
if(param.doTransmission == 1){
sprintf(Out3Filename, "Out_%s_tr.dat", param.name);
Out3File = fopen(Out3Filename, filemode);
}
if(param.doTransmission == 2){
if(param.replaceFiles == 1){
for(int i = 0; i < param.nbins; ++i){
sprintf(Out3Filename, "Out_%s_tr%.4d.dat", param.name, i);
Out3File = fopen(Out3Filename, "w");
fclose(Out3File);
}
}
sprintf(Out3Filename, "Out_%s_tr%.4d.dat", param.name, 0);
Out3File = fopen(Out3Filename, "a");
}
for(int iP = 0; iP < param.nP; ++iP){
if(param.doTransmission == 2 && iP > 0){
fclose(Out3File);
sprintf(Out3Filename, "Out_%s_tr%.4d.dat", param.name, 0);
Out3File = fopen(Out3Filename, "a");
}
Integrate_kernel < 512 > <<< param.nbins, 512 >>> (K_d + iP * Nx, Tr_d, param.Nxb, param.nTr, param.dTr, Nxmin_d, param.kmin);
cudaMemcpy(Tr_h, Tr_d, param.nbins * param.nTr * sizeof(double), cudaMemcpyDeviceToHost);
for(int i = 0; i < param.nbins; ++i){
for(int j = 0; j < param.nTr; ++j){
double m = exp((j - param.nTr/2) * param.dTr);
if(param.nP == 1){
fprintf(Out3File, "%.20g %.20g\n", m, Tr_h[i * param.nTr + j]);
}
else{
fprintf(Out3File, "%.20g %.20g %.20g %.20g %d\n", m, Tr_h[i * param.nTr + j], param.T, P_h[iP], j);
}
}
if(param.doTransmission == 1){
fprintf(Out3File, "\n\n");
}
if(param.doTransmission == 2 && i < param.nbins - 1){
fclose(Out3File);
sprintf(Out3Filename, "Out_%s_tr%.4d.dat", param.name, i + 1);
Out3File = fopen(Out3Filename, "a");
}
}
}
fclose(Out3File);
free(Tr_h);
cudaFree(Tr_d);
}
cudaDeviceSynchronize();
error = cudaGetLastError();
if(error != 0){
printf("Transmission error = %d = %s\n",error, cudaGetErrorString(error));
return 0;
}
cudaEventRecord(tt2, 0);
cudaEventSynchronize(tt2);
cudaEventElapsedTime(&milliseconds, tt1, tt2);
time[8] += milliseconds * 0.001;
printf("Time for Transmission: %g seconds\n", time[8]);
InfoFile = fopen(InfoFilename, "a");
fprintf(InfoFile,"\n");
fprintf(InfoFile,"Time for input total: %g seconds\n", timeT[0]);
fprintf(InfoFile,"Time for Lines total: %g seconds\n", timeT[1]);
fprintf(InfoFile,"Time for K(x) total: %g seconds\n", timeT[2]);
fprintf(InfoFile,"Time for write K(x): %g seconds\n", time[3]);
fprintf(InfoFile,"Time for mean K(x): %g seconds\n", time[4]);
fprintf(InfoFile,"Time for sort K(x): %g seconds\n", time[5]);
fprintf(InfoFile,"Time for Resampling: %g seconds\n", time[6]);
fprintf(InfoFile,"Time for write K(y): %g seconds\n", time[7]);
fprintf(InfoFile,"Time for Transmission: %g seconds\n", time[8]);
fclose(InfoFile);
free(K_h);
free(x_h);
free(Nxmin_h);
free(outputEdges_h);
free(binIndex_h);
cudaFree(K_d);
cudaFree(Nxmin_d);
error = cudaGetLastError();
printf("Final error = %d = %s\n",error, cudaGetErrorString(error));
return 0;
}
|
3f072ad0ddf01f340038bc35e103d1b2cea7d6cc.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_view.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/get_value.cuh>
#include <cudf/dictionary/dictionary_column_view.hpp>
#include <cudf/lists/lists_column_view.hpp>
#include <cudf/strings/convert/convert_datetime.hpp>
#include <cudf/structs/struct_view.hpp>
#include <cudf/structs/structs_column_view.hpp>
#include <cudf/table/row_operators.cuh>
#include <cudf/table/table_device_view.cuh>
#include <cudf/utilities/bit.hpp>
#include "cudf/utilities/type_dispatcher.hpp"
#include <cudf_test/column_utilities.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <cudf_test/cudf_gtest.hpp>
#include <cudf_test/detail/column_utilities.hpp>
#include <jit/type.h>
#include <thrust/equal.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/logical.h>
#include <numeric>
#include <sstream>
namespace cudf {
namespace test {
namespace {
template <bool check_exact_equality>
struct column_property_comparator {
bool types_equivalent(cudf::data_type const& lhs, cudf::data_type const& rhs)
{
return is_fixed_point(lhs) ? lhs.id() == rhs.id() : lhs == rhs;
}
void compare_common(cudf::column_view const& lhs, cudf::column_view const& rhs)
{
if (check_exact_equality) {
EXPECT_EQ(lhs.type(), rhs.type());
} else {
EXPECT_TRUE(types_equivalent(lhs.type(), rhs.type()));
}
EXPECT_EQ(lhs.size(), rhs.size());
if (lhs.size() > 0 && check_exact_equality) { EXPECT_EQ(lhs.nullable(), rhs.nullable()); }
// equivalent, but not exactly equal columns can have a different number of children if their
// sizes are both 0. Specifically, empty string columns may or may not have children.
if (check_exact_equality || lhs.size() > 0) {
EXPECT_EQ(lhs.num_children(), rhs.num_children());
}
}
template <typename T, std::enable_if_t<!std::is_same<T, cudf::list_view>::value>* = nullptr>
void operator()(cudf::column_view const& lhs, cudf::column_view const& rhs)
{
compare_common(lhs, rhs);
}
template <typename T, std::enable_if_t<std::is_same<T, cudf::list_view>::value>* = nullptr>
void operator()(cudf::column_view const& lhs, cudf::column_view const& rhs)
{
compare_common(lhs, rhs);
cudf::lists_column_view lhs_l(lhs);
cudf::lists_column_view rhs_l(rhs);
// recurse
cudf::type_dispatcher(lhs_l.child().type(),
column_property_comparator<check_exact_equality>{},
lhs_l.get_sliced_child(0),
rhs_l.get_sliced_child(0));
}
};
class corresponding_rows_unequal {
public:
corresponding_rows_unequal(table_device_view d_lhs, table_device_view d_rhs) : comp(d_lhs, d_rhs)
{
}
cudf::row_equality_comparator<true> comp;
__device__ bool operator()(size_type index) { return !comp(index, index); }
};
class corresponding_rows_not_equivalent {
table_device_view d_lhs;
table_device_view d_rhs;
public:
corresponding_rows_not_equivalent(table_device_view d_lhs, table_device_view d_rhs)
: d_lhs(d_lhs), d_rhs(d_rhs), comp(d_lhs, d_rhs)
{
CUDF_EXPECTS(d_lhs.num_columns() == 1 and d_rhs.num_columns() == 1,
"Unsupported number of columns");
}
struct typed_element_not_equivalent {
template <typename T>
__device__ std::enable_if_t<std::is_floating_point<T>::value, bool> operator()(
column_device_view const& lhs, column_device_view const& rhs, size_type index)
{
if (lhs.is_valid(index) and rhs.is_valid(index)) {
int ulp = 4; // value taken from google test
T x = lhs.element<T>(index);
T y = rhs.element<T>(index);
return std::abs(x - y) > std::numeric_limits<T>::epsilon() * std::abs(x + y) * ulp &&
std::abs(x - y) >= std::numeric_limits<T>::min();
} else {
// if either is null, then the inequality was checked already
return true;
}
}
template <typename T, typename... Args>
__device__ std::enable_if_t<not std::is_floating_point<T>::value, bool> operator()(Args... args)
{
// Non-floating point inequality is checked already
return true;
}
};
cudf::row_equality_comparator<true> comp;
__device__ bool operator()(size_type index)
{
if (not comp(index, index)) {
auto lhs_col = this->d_lhs.column(0);
auto rhs_col = this->d_rhs.column(0);
return type_dispatcher(
lhs_col.type(), typed_element_not_equivalent{}, lhs_col, rhs_col, index);
}
return false;
}
};
std::string differences_message(thrust::device_vector<int> const& differences,
column_view const& lhs,
column_view const& rhs,
bool all_differences,
int depth)
{
CUDF_EXPECTS(not differences.empty(), "Shouldn't enter this function if `differences` is empty");
std::string const depth_str = depth > 0 ? "depth " + std::to_string(depth) + '\n' : "";
if (all_differences) {
std::ostringstream buffer;
buffer << depth_str << "differences:" << std::endl;
auto source_table = cudf::table_view({lhs, rhs});
auto diff_column = fixed_width_column_wrapper<int32_t>(differences.begin(), differences.end());
auto diff_table = cudf::gather(source_table, diff_column);
// Need to pull back the differences
auto const h_left_strings = to_strings(diff_table->get_column(0));
auto const h_right_strings = to_strings(diff_table->get_column(1));
for (size_t i = 0; i < differences.size(); ++i)
buffer << depth_str << "lhs[" << differences[i] << "] = " << h_left_strings[i] << ", rhs["
<< differences[i] << "] = " << h_right_strings[i] << std::endl;
return buffer.str();
} else {
int index = differences[0]; // only stringify first difference
auto diff_lhs = cudf::detail::slice(lhs, index, index + 1);
auto diff_rhs = cudf::detail::slice(rhs, index, index + 1);
return depth_str + "first difference: " + "lhs[" + std::to_string(index) +
"] = " + to_string(diff_lhs, "") + ", rhs[" + std::to_string(index) +
"] = " + to_string(diff_rhs, "");
}
}
// non-nested column types
template <typename T, bool check_exact_equality>
struct column_comparator_impl {
void operator()(column_view const& lhs,
column_view const& rhs,
bool print_all_differences,
int depth)
{
auto d_lhs = cudf::table_device_view::create(table_view{{lhs}});
auto d_rhs = cudf::table_device_view::create(table_view{{rhs}});
using ComparatorType = std::conditional_t<check_exact_equality,
corresponding_rows_unequal,
corresponding_rows_not_equivalent>;
auto differences = thrust::device_vector<int>(lhs.size()); // worst case: everything different
auto diff_iter = thrust::copy_if(thrust::device,
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(lhs.size()),
differences.begin(),
ComparatorType(*d_lhs, *d_rhs));
differences.resize(thrust::distance(differences.begin(), diff_iter)); // shrink back down
if (not differences.empty())
GTEST_FAIL() << differences_message(differences, lhs, rhs, print_all_differences, depth);
}
};
// forward declaration for nested-type recursion.
template <bool check_exact_equality>
struct column_comparator;
// specialization for list columns
template <bool check_exact_equality>
struct column_comparator_impl<list_view, check_exact_equality> {
void operator()(column_view const& lhs,
column_view const& rhs,
bool print_all_differences,
int depth)
{
lists_column_view lhs_l(lhs);
lists_column_view rhs_l(rhs);
CUDF_EXPECTS(lhs_l.size() == rhs_l.size(), "List column size mismatch");
if (lhs_l.is_empty()) { return; }
// worst case - everything is different
thrust::device_vector<int> differences(lhs.size());
// TODO : determine how equals/equivalency should work for columns with divergent underlying
// data, but equivalent null masks. Example:
//
// List<int32_t>:
// Length : 3
// Offsets : 0, 3, 5, 5
// Nulls: 011
// Children :
// 1, 2, 3, 4, 5
//
// List<int32_t>:
// Length : 3
// Offsets : 0, 3, 5, 7
// Nulls: 011
// Children :
// 1, 2, 3, 4, 5, 7, 8
//
// These two columns are seemingly equivalent, since their top level rows are the same, with
// just the last element being null. However, pyArrow will say these are -not- equal and
// does not appear to have an equivalent() check. So the question is : should we be handling
// this case when someone calls expect_columns_equivalent()?
// compare offsets, taking slicing into account
// left side
size_type lhs_shift = cudf::detail::get_value<size_type>(lhs_l.offsets(), lhs_l.offset(), 0);
auto lhs_offsets = thrust::make_transform_iterator(
lhs_l.offsets().begin<size_type>() + lhs_l.offset(),
[lhs_shift] __device__(size_type offset) { return offset - lhs_shift; });
auto lhs_valids = thrust::make_transform_iterator(
thrust::make_counting_iterator(0),
[mask = lhs_l.null_mask(), offset = lhs_l.offset()] __device__(size_type index) {
return mask == nullptr ? true : cudf::bit_is_set(mask, index + offset);
});
// right side
size_type rhs_shift = cudf::detail::get_value<size_type>(rhs_l.offsets(), rhs_l.offset(), 0);
auto rhs_offsets = thrust::make_transform_iterator(
rhs_l.offsets().begin<size_type>() + rhs_l.offset(),
[rhs_shift] __device__(size_type offset) { return offset - rhs_shift; });
auto rhs_valids = thrust::make_transform_iterator(
thrust::make_counting_iterator(0),
[mask = rhs_l.null_mask(), offset = rhs_l.offset()] __device__(size_type index) {
return mask == nullptr ? true : cudf::bit_is_set(mask, index + offset);
});
auto diff_iter = thrust::copy_if(
thrust::device,
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(lhs_l.size() + 1),
differences.begin(),
[lhs_offsets, rhs_offsets, lhs_valids, rhs_valids, num_rows = lhs_l.size()] __device__(
size_type index) {
// last offset has no validity associated with it
if (index < num_rows - 1) {
if (lhs_valids[index] != rhs_valids[index]) { return true; }
// if validity matches -and- is false, we can ignore the actual values. this
// is technically not checking "equal()", but it's how the non-list code path handles it
if (!lhs_valids[index]) { return false; }
}
return lhs_offsets[index] == rhs_offsets[index] ? false : true;
});
differences.resize(thrust::distance(differences.begin(), diff_iter)); // shrink back down
if (not differences.empty())
GTEST_FAIL() << differences_message(differences, lhs, rhs, print_all_differences, depth);
// recurse
auto lhs_child = lhs_l.get_sliced_child(0);
auto rhs_child = rhs_l.get_sliced_child(0);
cudf::type_dispatcher(lhs_child.type(),
column_comparator<check_exact_equality>{},
lhs_child,
rhs_child,
print_all_differences,
depth + 1);
}
};
template <bool check_exact_equality>
struct column_comparator_impl<struct_view, check_exact_equality> {
void operator()(column_view const& lhs,
column_view const& rhs,
bool print_all_differences,
int depth)
{
structs_column_view l_scv(lhs);
structs_column_view r_scv(rhs);
std::for_each(thrust::make_counting_iterator(0),
thrust::make_counting_iterator(0) + lhs.num_children(),
[&](auto i) {
column_view lhs_child = l_scv.get_sliced_child(i);
column_view rhs_child = r_scv.get_sliced_child(i);
cudf::type_dispatcher(lhs_child.type(),
column_comparator<check_exact_equality>{},
lhs_child,
rhs_child,
print_all_differences,
depth + 1);
});
}
};
template <bool check_exact_equality>
struct column_comparator {
template <typename T>
void operator()(column_view const& lhs,
column_view const& rhs,
bool print_all_differences,
int depth = 0)
{
// compare properties
cudf::type_dispatcher(lhs.type(), column_property_comparator<check_exact_equality>{}, lhs, rhs);
// compare values
column_comparator_impl<T, check_exact_equality> comparator{};
comparator(lhs, rhs, print_all_differences, depth);
}
};
} // namespace
/**
* @copydoc cudf::test::expect_column_properties_equal
*
*/
void expect_column_properties_equal(column_view const& lhs, column_view const& rhs)
{
cudf::type_dispatcher(lhs.type(), column_property_comparator<true>{}, lhs, rhs);
}
/**
* @copydoc cudf::test::expect_column_properties_equivalent
*
*/
void expect_column_properties_equivalent(column_view const& lhs, column_view const& rhs)
{
cudf::type_dispatcher(lhs.type(), column_property_comparator<false>{}, lhs, rhs);
}
/**
* @copydoc cudf::test::expect_columns_equal
*
*/
void expect_columns_equal(cudf::column_view const& lhs,
cudf::column_view const& rhs,
bool print_all_differences)
{
cudf::type_dispatcher(lhs.type(), column_comparator<true>{}, lhs, rhs, print_all_differences);
}
/**
* @copydoc cudf::test::expect_columns_equivalent
*
*/
void expect_columns_equivalent(cudf::column_view const& lhs,
cudf::column_view const& rhs,
bool print_all_differences)
{
cudf::type_dispatcher(lhs.type(), column_comparator<false>{}, lhs, rhs, print_all_differences);
}
/**
* @copydoc cudf::test::expect_equal_buffers
*
*/
void expect_equal_buffers(void const* lhs, void const* rhs, std::size_t size_bytes)
{
if (size_bytes > 0) {
EXPECT_NE(nullptr, lhs);
EXPECT_NE(nullptr, rhs);
}
auto typed_lhs = static_cast<char const*>(lhs);
auto typed_rhs = static_cast<char const*>(rhs);
EXPECT_TRUE(thrust::equal(thrust::device, typed_lhs, typed_lhs + size_bytes, typed_rhs));
}
/**
* @copydoc cudf::test::bitmask_to_host
*
*/
std::vector<bitmask_type> bitmask_to_host(cudf::column_view const& c)
{
if (c.nullable()) {
auto num_bitmasks = bitmask_allocation_size_bytes(c.size()) / sizeof(bitmask_type);
std::vector<bitmask_type> host_bitmask(num_bitmasks);
if (c.offset() == 0) {
CUDA_TRY(hipMemcpy(host_bitmask.data(),
c.null_mask(),
num_bitmasks * sizeof(bitmask_type),
hipMemcpyDeviceToHost));
} else {
auto mask = copy_bitmask(c.null_mask(), c.offset(), c.offset() + c.size());
CUDA_TRY(hipMemcpy(host_bitmask.data(),
mask.data(),
num_bitmasks * sizeof(bitmask_type),
hipMemcpyDeviceToHost));
}
return host_bitmask;
} else {
return std::vector<bitmask_type>{};
}
}
namespace {
template <typename T, typename std::enable_if_t<std::is_integral<T>::value>* = nullptr>
static auto numeric_to_string_precise(T value)
{
return std::to_string(value);
}
template <typename T, typename std::enable_if_t<std::is_floating_point<T>::value>* = nullptr>
static auto numeric_to_string_precise(T value)
{
std::ostringstream o;
o << std::setprecision(std::numeric_limits<T>::max_digits10) << value;
return o.str();
}
static auto duration_suffix(cudf::duration_D) { return " days"; }
static auto duration_suffix(cudf::duration_s) { return " seconds"; }
static auto duration_suffix(cudf::duration_ms) { return " milliseconds"; }
static auto duration_suffix(cudf::duration_us) { return " microseconds"; }
static auto duration_suffix(cudf::duration_ns) { return " nanoseconds"; }
std::string get_nested_type_str(cudf::column_view const& view)
{
if (view.type().id() == cudf::type_id::LIST) {
lists_column_view lcv(view);
return cudf::jit::get_type_name(view.type()) + "<" + (get_nested_type_str(lcv.child())) + ">";
}
if (view.type().id() == cudf::type_id::STRUCT) {
std::ostringstream out;
out << cudf::jit::get_type_name(view.type()) + "<";
std::transform(view.child_begin(),
view.child_end(),
std::ostream_iterator<std::string>(out, ","),
[&out](auto const col) { return get_nested_type_str(col); });
out << ">";
return out.str();
}
return cudf::jit::get_type_name(view.type());
}
template <typename NestedColumnView>
std::string nested_offsets_to_string(NestedColumnView const& c, std::string const& delimiter = ", ")
{
column_view offsets = (c.parent()).child(NestedColumnView::offsets_column_index);
CUDF_EXPECTS(offsets.type().id() == type_id::INT32,
"Column does not appear to be an offsets column");
CUDF_EXPECTS(offsets.offset() == 0, "Offsets column has an internal offset!");
size_type output_size = c.size() + 1;
// the first offset value to normalize everything against
size_type first = cudf::detail::get_value<size_type>(offsets, c.offset(), 0);
rmm::device_vector<size_type> shifted_offsets(output_size);
// normalize the offset values for the column offset
size_type const* d_offsets = offsets.head<size_type>() + c.offset();
thrust::transform(
rmm::exec_policy(0)->on(0),
d_offsets,
d_offsets + output_size,
shifted_offsets.begin(),
[first] __device__(int32_t offset) { return static_cast<size_type>(offset - first); });
thrust::host_vector<size_type> h_shifted_offsets(shifted_offsets);
std::ostringstream buffer;
for (size_t idx = 0; idx < h_shifted_offsets.size(); idx++) {
buffer << h_shifted_offsets[idx];
if (idx < h_shifted_offsets.size() - 1) { buffer << delimiter; }
}
return buffer.str();
}
struct column_view_printer {
template <typename Element, typename std::enable_if_t<is_numeric<Element>()>* = nullptr>
void operator()(cudf::column_view const& col,
std::vector<std::string>& out,
std::string const& indent)
{
auto h_data = cudf::test::to_host<Element>(col);
out.resize(col.size());
if (col.nullable()) {
std::transform(thrust::make_counting_iterator(size_type{0}),
thrust::make_counting_iterator(col.size()),
out.begin(),
[&h_data](auto idx) {
return bit_is_set(h_data.second.data(), idx)
? numeric_to_string_precise(h_data.first[idx])
: std::string("NULL");
});
} else {
std::transform(h_data.first.begin(), h_data.first.end(), out.begin(), [](Element el) {
return numeric_to_string_precise(el);
});
}
}
template <typename Element, typename std::enable_if_t<is_timestamp<Element>()>* = nullptr>
void operator()(cudf::column_view const& col,
std::vector<std::string>& out,
std::string const& indent)
{
//
// For timestamps, convert timestamp column to column of strings, then
// call string version
//
auto col_as_strings = cudf::strings::from_timestamps(col);
if (col_as_strings->size() == 0) { return; }
this->template operator()<cudf::string_view>(*col_as_strings, out, indent);
}
template <typename Element, typename std::enable_if_t<cudf::is_fixed_point<Element>()>* = nullptr>
void operator()(cudf::column_view const& col,
std::vector<std::string>& out,
std::string const& indent)
{
auto const h_data = cudf::test::to_host<Element>(col);
if (col.nullable()) {
std::transform(thrust::make_counting_iterator(size_type{0}),
thrust::make_counting_iterator(col.size()),
std::back_inserter(out),
[&h_data](auto idx) {
return h_data.second.empty() || bit_is_set(h_data.second.data(), idx)
? static_cast<std::string>(h_data.first[idx])
: std::string("NULL");
});
} else {
std::transform(std::cbegin(h_data.first),
std::cend(h_data.first),
std::back_inserter(out),
[col](auto const& fp) { return static_cast<std::string>(fp); });
}
}
template <typename Element,
typename std::enable_if_t<std::is_same<Element, cudf::string_view>::value>* = nullptr>
void operator()(cudf::column_view const& col,
std::vector<std::string>& out,
std::string const& indent)
{
//
// Implementation for strings, call special to_host variant
//
if (col.is_empty()) return;
auto h_data = cudf::test::to_host<std::string>(col);
out.resize(col.size());
std::transform(thrust::make_counting_iterator(size_type{0}),
thrust::make_counting_iterator(col.size()),
out.begin(),
[&h_data](auto idx) {
return h_data.second.empty() || bit_is_set(h_data.second.data(), idx)
? h_data.first[idx]
: std::string("NULL");
});
}
template <typename Element,
typename std::enable_if_t<std::is_same<Element, cudf::dictionary32>::value>* = nullptr>
void operator()(cudf::column_view const& col,
std::vector<std::string>& out,
std::string const& indent)
{
cudf::dictionary_column_view dictionary(col);
if (col.is_empty()) return;
std::vector<std::string> keys = to_strings(dictionary.keys());
std::vector<std::string> indices = to_strings({dictionary.indices().type(),
dictionary.size(),
dictionary.indices().head(),
dictionary.null_mask(),
dictionary.null_count(),
dictionary.offset()});
out.insert(out.end(), keys.begin(), keys.end());
if (!indices.empty()) {
std::string first = "\x08 : " + indices.front(); // use : as delimiter
out.push_back(first); // between keys and indices
out.insert(out.end(), indices.begin() + 1, indices.end());
}
}
// Print the tick counts with the units
template <typename Element, typename std::enable_if_t<is_duration<Element>()>* = nullptr>
void operator()(cudf::column_view const& col,
std::vector<std::string>& out,
std::string const& indent)
{
auto h_data = cudf::test::to_host<Element>(col);
out.resize(col.size());
if (col.nullable()) {
std::transform(thrust::make_counting_iterator(size_type{0}),
thrust::make_counting_iterator(col.size()),
out.begin(),
[&h_data](auto idx) {
return bit_is_set(h_data.second.data(), idx)
? numeric_to_string_precise(h_data.first[idx].count()) +
duration_suffix(h_data.first[idx])
: std::string("NULL");
});
} else {
std::transform(h_data.first.begin(), h_data.first.end(), out.begin(), [](Element el) {
return numeric_to_string_precise(el.count()) + duration_suffix(el);
});
}
}
template <typename Element,
typename std::enable_if_t<std::is_same<Element, cudf::list_view>::value>* = nullptr>
void operator()(cudf::column_view const& col,
std::vector<std::string>& out,
std::string const& indent)
{
lists_column_view lcv(col);
// propage slicing to the child if necessary
column_view child = lcv.get_sliced_child(0);
bool const is_sliced = lcv.offset() > 0 || child.offset() > 0;
std::string tmp =
get_nested_type_str(col) + (is_sliced ? "(sliced)" : "") + ":\n" + indent +
"Length : " + std::to_string(lcv.size()) + "\n" + indent +
"Offsets : " + (lcv.size() > 0 ? nested_offsets_to_string(lcv) : "") + "\n" +
(lcv.has_nulls() ? indent + "Null count: " + std::to_string(lcv.null_count()) + "\n" +
detail::to_string(bitmask_to_host(col), col.size(), indent) + "\n"
: "") +
indent + "Children :\n" +
(child.type().id() != type_id::LIST && child.has_nulls()
? indent + detail::to_string(bitmask_to_host(child), child.size(), indent) + "\n"
: "") +
(detail::to_string(child, ", ", indent + " ")) + "\n";
out.push_back(tmp);
}
template <typename Element,
typename std::enable_if_t<std::is_same<Element, cudf::struct_view>::value>* = nullptr>
void operator()(cudf::column_view const& col,
std::vector<std::string>& out,
std::string const& indent)
{
structs_column_view view{col};
std::ostringstream out_stream;
out_stream << get_nested_type_str(col) << ":\n"
<< indent << "Length : " << view.size() << ":\n";
if (view.has_nulls()) {
out_stream << indent << "Null count: " << view.null_count() << "\n"
<< detail::to_string(bitmask_to_host(col), col.size(), indent) << "\n";
}
auto iter = thrust::make_counting_iterator(0);
std::transform(iter,
iter + view.num_children(),
std::ostream_iterator<std::string>(out_stream, "\n"),
[&](size_type index) {
return detail::to_string(view.get_sliced_child(index), ", ", indent + " ");
});
out.push_back(out_stream.str());
}
};
} // namespace
namespace detail {
/**
* @copydoc cudf::test::detail::to_strings
*
*/
std::vector<std::string> to_strings(cudf::column_view const& col, std::string const& indent)
{
std::vector<std::string> reply;
cudf::type_dispatcher(col.type(), column_view_printer{}, col, reply, indent);
return reply;
}
/**
* @copydoc cudf::test::detail::to_string(cudf::column_view, std::string, std::string)
*
* @param indent Indentation for all output
*/
std::string to_string(cudf::column_view const& col,
std::string const& delimiter,
std::string const& indent)
{
std::ostringstream buffer;
std::vector<std::string> h_data = to_strings(col, indent);
buffer << indent;
std::copy(h_data.begin(),
h_data.end() - (!h_data.empty()),
std::ostream_iterator<std::string>(buffer, delimiter.c_str()));
if (!h_data.empty()) buffer << h_data.back();
return buffer.str();
}
/**
* @copydoc cudf::test::detail::to_string(std::vector<bitmask_type>, size_type, std::string)
*
* @param indent Indentation for all output. See comment in `to_strings` for
* a detailed description.
*/
std::string to_string(std::vector<bitmask_type> const& null_mask,
size_type null_mask_size,
std::string const& indent)
{
std::ostringstream buffer;
buffer << indent;
for (int idx = null_mask_size - 1; idx >= 0; idx--) {
buffer << (cudf::bit_is_set(null_mask.data(), idx) ? "1" : "0");
}
return buffer.str();
}
} // namespace detail
/**
* @copydoc cudf::test::to_strings
*
*/
std::vector<std::string> to_strings(cudf::column_view const& col)
{
return detail::to_strings(col);
}
/**
* @copydoc cudf::test::to_string(cudf::column_view, std::string)
*
*/
std::string to_string(cudf::column_view const& col, std::string const& delimiter)
{
return detail::to_string(col, delimiter);
}
/**
* @copydoc cudf::test::to_string(std::vector<bitmask_type>, size_type)
*
*/
std::string to_string(std::vector<bitmask_type> const& null_mask, size_type null_mask_size)
{
return detail::to_string(null_mask, null_mask_size);
}
/**
* @copydoc cudf::test::print
*
*/
void print(cudf::column_view const& col, std::ostream& os, std::string const& delimiter)
{
os << to_string(col, delimiter) << std::endl;
}
/**
* @copydoc cudf::test::validate_host_masks
*
*/
bool validate_host_masks(std::vector<bitmask_type> const& expected_mask,
std::vector<bitmask_type> const& got_mask,
size_type number_of_elements)
{
return std::all_of(thrust::make_counting_iterator(0),
thrust::make_counting_iterator(number_of_elements),
[&expected_mask, &got_mask](auto index) {
return cudf::bit_is_set(expected_mask.data(), index) ==
cudf::bit_is_set(got_mask.data(), index);
});
}
} // namespace test
} // namespace cudf
| 3f072ad0ddf01f340038bc35e103d1b2cea7d6cc.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_view.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/get_value.cuh>
#include <cudf/dictionary/dictionary_column_view.hpp>
#include <cudf/lists/lists_column_view.hpp>
#include <cudf/strings/convert/convert_datetime.hpp>
#include <cudf/structs/struct_view.hpp>
#include <cudf/structs/structs_column_view.hpp>
#include <cudf/table/row_operators.cuh>
#include <cudf/table/table_device_view.cuh>
#include <cudf/utilities/bit.hpp>
#include "cudf/utilities/type_dispatcher.hpp"
#include <cudf_test/column_utilities.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <cudf_test/cudf_gtest.hpp>
#include <cudf_test/detail/column_utilities.hpp>
#include <jit/type.h>
#include <thrust/equal.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/logical.h>
#include <numeric>
#include <sstream>
namespace cudf {
namespace test {
namespace {
template <bool check_exact_equality>
struct column_property_comparator {
bool types_equivalent(cudf::data_type const& lhs, cudf::data_type const& rhs)
{
return is_fixed_point(lhs) ? lhs.id() == rhs.id() : lhs == rhs;
}
void compare_common(cudf::column_view const& lhs, cudf::column_view const& rhs)
{
if (check_exact_equality) {
EXPECT_EQ(lhs.type(), rhs.type());
} else {
EXPECT_TRUE(types_equivalent(lhs.type(), rhs.type()));
}
EXPECT_EQ(lhs.size(), rhs.size());
if (lhs.size() > 0 && check_exact_equality) { EXPECT_EQ(lhs.nullable(), rhs.nullable()); }
// equivalent, but not exactly equal columns can have a different number of children if their
// sizes are both 0. Specifically, empty string columns may or may not have children.
if (check_exact_equality || lhs.size() > 0) {
EXPECT_EQ(lhs.num_children(), rhs.num_children());
}
}
template <typename T, std::enable_if_t<!std::is_same<T, cudf::list_view>::value>* = nullptr>
void operator()(cudf::column_view const& lhs, cudf::column_view const& rhs)
{
compare_common(lhs, rhs);
}
template <typename T, std::enable_if_t<std::is_same<T, cudf::list_view>::value>* = nullptr>
void operator()(cudf::column_view const& lhs, cudf::column_view const& rhs)
{
compare_common(lhs, rhs);
cudf::lists_column_view lhs_l(lhs);
cudf::lists_column_view rhs_l(rhs);
// recurse
cudf::type_dispatcher(lhs_l.child().type(),
column_property_comparator<check_exact_equality>{},
lhs_l.get_sliced_child(0),
rhs_l.get_sliced_child(0));
}
};
class corresponding_rows_unequal {
public:
corresponding_rows_unequal(table_device_view d_lhs, table_device_view d_rhs) : comp(d_lhs, d_rhs)
{
}
cudf::row_equality_comparator<true> comp;
__device__ bool operator()(size_type index) { return !comp(index, index); }
};
class corresponding_rows_not_equivalent {
table_device_view d_lhs;
table_device_view d_rhs;
public:
corresponding_rows_not_equivalent(table_device_view d_lhs, table_device_view d_rhs)
: d_lhs(d_lhs), d_rhs(d_rhs), comp(d_lhs, d_rhs)
{
CUDF_EXPECTS(d_lhs.num_columns() == 1 and d_rhs.num_columns() == 1,
"Unsupported number of columns");
}
struct typed_element_not_equivalent {
template <typename T>
__device__ std::enable_if_t<std::is_floating_point<T>::value, bool> operator()(
column_device_view const& lhs, column_device_view const& rhs, size_type index)
{
if (lhs.is_valid(index) and rhs.is_valid(index)) {
int ulp = 4; // value taken from google test
T x = lhs.element<T>(index);
T y = rhs.element<T>(index);
return std::abs(x - y) > std::numeric_limits<T>::epsilon() * std::abs(x + y) * ulp &&
std::abs(x - y) >= std::numeric_limits<T>::min();
} else {
// if either is null, then the inequality was checked already
return true;
}
}
template <typename T, typename... Args>
__device__ std::enable_if_t<not std::is_floating_point<T>::value, bool> operator()(Args... args)
{
// Non-floating point inequality is checked already
return true;
}
};
cudf::row_equality_comparator<true> comp;
__device__ bool operator()(size_type index)
{
if (not comp(index, index)) {
auto lhs_col = this->d_lhs.column(0);
auto rhs_col = this->d_rhs.column(0);
return type_dispatcher(
lhs_col.type(), typed_element_not_equivalent{}, lhs_col, rhs_col, index);
}
return false;
}
};
std::string differences_message(thrust::device_vector<int> const& differences,
column_view const& lhs,
column_view const& rhs,
bool all_differences,
int depth)
{
CUDF_EXPECTS(not differences.empty(), "Shouldn't enter this function if `differences` is empty");
std::string const depth_str = depth > 0 ? "depth " + std::to_string(depth) + '\n' : "";
if (all_differences) {
std::ostringstream buffer;
buffer << depth_str << "differences:" << std::endl;
auto source_table = cudf::table_view({lhs, rhs});
auto diff_column = fixed_width_column_wrapper<int32_t>(differences.begin(), differences.end());
auto diff_table = cudf::gather(source_table, diff_column);
// Need to pull back the differences
auto const h_left_strings = to_strings(diff_table->get_column(0));
auto const h_right_strings = to_strings(diff_table->get_column(1));
for (size_t i = 0; i < differences.size(); ++i)
buffer << depth_str << "lhs[" << differences[i] << "] = " << h_left_strings[i] << ", rhs["
<< differences[i] << "] = " << h_right_strings[i] << std::endl;
return buffer.str();
} else {
int index = differences[0]; // only stringify first difference
auto diff_lhs = cudf::detail::slice(lhs, index, index + 1);
auto diff_rhs = cudf::detail::slice(rhs, index, index + 1);
return depth_str + "first difference: " + "lhs[" + std::to_string(index) +
"] = " + to_string(diff_lhs, "") + ", rhs[" + std::to_string(index) +
"] = " + to_string(diff_rhs, "");
}
}
// non-nested column types
template <typename T, bool check_exact_equality>
struct column_comparator_impl {
void operator()(column_view const& lhs,
column_view const& rhs,
bool print_all_differences,
int depth)
{
auto d_lhs = cudf::table_device_view::create(table_view{{lhs}});
auto d_rhs = cudf::table_device_view::create(table_view{{rhs}});
using ComparatorType = std::conditional_t<check_exact_equality,
corresponding_rows_unequal,
corresponding_rows_not_equivalent>;
auto differences = thrust::device_vector<int>(lhs.size()); // worst case: everything different
auto diff_iter = thrust::copy_if(thrust::device,
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(lhs.size()),
differences.begin(),
ComparatorType(*d_lhs, *d_rhs));
differences.resize(thrust::distance(differences.begin(), diff_iter)); // shrink back down
if (not differences.empty())
GTEST_FAIL() << differences_message(differences, lhs, rhs, print_all_differences, depth);
}
};
// forward declaration for nested-type recursion.
template <bool check_exact_equality>
struct column_comparator;
// specialization for list columns
template <bool check_exact_equality>
struct column_comparator_impl<list_view, check_exact_equality> {
void operator()(column_view const& lhs,
column_view const& rhs,
bool print_all_differences,
int depth)
{
lists_column_view lhs_l(lhs);
lists_column_view rhs_l(rhs);
CUDF_EXPECTS(lhs_l.size() == rhs_l.size(), "List column size mismatch");
if (lhs_l.is_empty()) { return; }
// worst case - everything is different
thrust::device_vector<int> differences(lhs.size());
// TODO : determine how equals/equivalency should work for columns with divergent underlying
// data, but equivalent null masks. Example:
//
// List<int32_t>:
// Length : 3
// Offsets : 0, 3, 5, 5
// Nulls: 011
// Children :
// 1, 2, 3, 4, 5
//
// List<int32_t>:
// Length : 3
// Offsets : 0, 3, 5, 7
// Nulls: 011
// Children :
// 1, 2, 3, 4, 5, 7, 8
//
// These two columns are seemingly equivalent, since their top level rows are the same, with
// just the last element being null. However, pyArrow will say these are -not- equal and
// does not appear to have an equivalent() check. So the question is : should we be handling
// this case when someone calls expect_columns_equivalent()?
// compare offsets, taking slicing into account
// left side
size_type lhs_shift = cudf::detail::get_value<size_type>(lhs_l.offsets(), lhs_l.offset(), 0);
auto lhs_offsets = thrust::make_transform_iterator(
lhs_l.offsets().begin<size_type>() + lhs_l.offset(),
[lhs_shift] __device__(size_type offset) { return offset - lhs_shift; });
auto lhs_valids = thrust::make_transform_iterator(
thrust::make_counting_iterator(0),
[mask = lhs_l.null_mask(), offset = lhs_l.offset()] __device__(size_type index) {
return mask == nullptr ? true : cudf::bit_is_set(mask, index + offset);
});
// right side
size_type rhs_shift = cudf::detail::get_value<size_type>(rhs_l.offsets(), rhs_l.offset(), 0);
auto rhs_offsets = thrust::make_transform_iterator(
rhs_l.offsets().begin<size_type>() + rhs_l.offset(),
[rhs_shift] __device__(size_type offset) { return offset - rhs_shift; });
auto rhs_valids = thrust::make_transform_iterator(
thrust::make_counting_iterator(0),
[mask = rhs_l.null_mask(), offset = rhs_l.offset()] __device__(size_type index) {
return mask == nullptr ? true : cudf::bit_is_set(mask, index + offset);
});
auto diff_iter = thrust::copy_if(
thrust::device,
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(lhs_l.size() + 1),
differences.begin(),
[lhs_offsets, rhs_offsets, lhs_valids, rhs_valids, num_rows = lhs_l.size()] __device__(
size_type index) {
// last offset has no validity associated with it
if (index < num_rows - 1) {
if (lhs_valids[index] != rhs_valids[index]) { return true; }
// if validity matches -and- is false, we can ignore the actual values. this
// is technically not checking "equal()", but it's how the non-list code path handles it
if (!lhs_valids[index]) { return false; }
}
return lhs_offsets[index] == rhs_offsets[index] ? false : true;
});
differences.resize(thrust::distance(differences.begin(), diff_iter)); // shrink back down
if (not differences.empty())
GTEST_FAIL() << differences_message(differences, lhs, rhs, print_all_differences, depth);
// recurse
auto lhs_child = lhs_l.get_sliced_child(0);
auto rhs_child = rhs_l.get_sliced_child(0);
cudf::type_dispatcher(lhs_child.type(),
column_comparator<check_exact_equality>{},
lhs_child,
rhs_child,
print_all_differences,
depth + 1);
}
};
template <bool check_exact_equality>
struct column_comparator_impl<struct_view, check_exact_equality> {
void operator()(column_view const& lhs,
column_view const& rhs,
bool print_all_differences,
int depth)
{
structs_column_view l_scv(lhs);
structs_column_view r_scv(rhs);
std::for_each(thrust::make_counting_iterator(0),
thrust::make_counting_iterator(0) + lhs.num_children(),
[&](auto i) {
column_view lhs_child = l_scv.get_sliced_child(i);
column_view rhs_child = r_scv.get_sliced_child(i);
cudf::type_dispatcher(lhs_child.type(),
column_comparator<check_exact_equality>{},
lhs_child,
rhs_child,
print_all_differences,
depth + 1);
});
}
};
template <bool check_exact_equality>
struct column_comparator {
template <typename T>
void operator()(column_view const& lhs,
column_view const& rhs,
bool print_all_differences,
int depth = 0)
{
// compare properties
cudf::type_dispatcher(lhs.type(), column_property_comparator<check_exact_equality>{}, lhs, rhs);
// compare values
column_comparator_impl<T, check_exact_equality> comparator{};
comparator(lhs, rhs, print_all_differences, depth);
}
};
} // namespace
/**
* @copydoc cudf::test::expect_column_properties_equal
*
*/
void expect_column_properties_equal(column_view const& lhs, column_view const& rhs)
{
cudf::type_dispatcher(lhs.type(), column_property_comparator<true>{}, lhs, rhs);
}
/**
* @copydoc cudf::test::expect_column_properties_equivalent
*
*/
void expect_column_properties_equivalent(column_view const& lhs, column_view const& rhs)
{
cudf::type_dispatcher(lhs.type(), column_property_comparator<false>{}, lhs, rhs);
}
/**
* @copydoc cudf::test::expect_columns_equal
*
*/
void expect_columns_equal(cudf::column_view const& lhs,
cudf::column_view const& rhs,
bool print_all_differences)
{
cudf::type_dispatcher(lhs.type(), column_comparator<true>{}, lhs, rhs, print_all_differences);
}
/**
* @copydoc cudf::test::expect_columns_equivalent
*
*/
void expect_columns_equivalent(cudf::column_view const& lhs,
cudf::column_view const& rhs,
bool print_all_differences)
{
cudf::type_dispatcher(lhs.type(), column_comparator<false>{}, lhs, rhs, print_all_differences);
}
/**
* @copydoc cudf::test::expect_equal_buffers
*
*/
void expect_equal_buffers(void const* lhs, void const* rhs, std::size_t size_bytes)
{
if (size_bytes > 0) {
EXPECT_NE(nullptr, lhs);
EXPECT_NE(nullptr, rhs);
}
auto typed_lhs = static_cast<char const*>(lhs);
auto typed_rhs = static_cast<char const*>(rhs);
EXPECT_TRUE(thrust::equal(thrust::device, typed_lhs, typed_lhs + size_bytes, typed_rhs));
}
/**
* @copydoc cudf::test::bitmask_to_host
*
*/
std::vector<bitmask_type> bitmask_to_host(cudf::column_view const& c)
{
if (c.nullable()) {
auto num_bitmasks = bitmask_allocation_size_bytes(c.size()) / sizeof(bitmask_type);
std::vector<bitmask_type> host_bitmask(num_bitmasks);
if (c.offset() == 0) {
CUDA_TRY(cudaMemcpy(host_bitmask.data(),
c.null_mask(),
num_bitmasks * sizeof(bitmask_type),
cudaMemcpyDeviceToHost));
} else {
auto mask = copy_bitmask(c.null_mask(), c.offset(), c.offset() + c.size());
CUDA_TRY(cudaMemcpy(host_bitmask.data(),
mask.data(),
num_bitmasks * sizeof(bitmask_type),
cudaMemcpyDeviceToHost));
}
return host_bitmask;
} else {
return std::vector<bitmask_type>{};
}
}
namespace {
template <typename T, typename std::enable_if_t<std::is_integral<T>::value>* = nullptr>
static auto numeric_to_string_precise(T value)
{
return std::to_string(value);
}
template <typename T, typename std::enable_if_t<std::is_floating_point<T>::value>* = nullptr>
static auto numeric_to_string_precise(T value)
{
std::ostringstream o;
o << std::setprecision(std::numeric_limits<T>::max_digits10) << value;
return o.str();
}
static auto duration_suffix(cudf::duration_D) { return " days"; }
static auto duration_suffix(cudf::duration_s) { return " seconds"; }
static auto duration_suffix(cudf::duration_ms) { return " milliseconds"; }
static auto duration_suffix(cudf::duration_us) { return " microseconds"; }
static auto duration_suffix(cudf::duration_ns) { return " nanoseconds"; }
std::string get_nested_type_str(cudf::column_view const& view)
{
if (view.type().id() == cudf::type_id::LIST) {
lists_column_view lcv(view);
return cudf::jit::get_type_name(view.type()) + "<" + (get_nested_type_str(lcv.child())) + ">";
}
if (view.type().id() == cudf::type_id::STRUCT) {
std::ostringstream out;
out << cudf::jit::get_type_name(view.type()) + "<";
std::transform(view.child_begin(),
view.child_end(),
std::ostream_iterator<std::string>(out, ","),
[&out](auto const col) { return get_nested_type_str(col); });
out << ">";
return out.str();
}
return cudf::jit::get_type_name(view.type());
}
template <typename NestedColumnView>
std::string nested_offsets_to_string(NestedColumnView const& c, std::string const& delimiter = ", ")
{
column_view offsets = (c.parent()).child(NestedColumnView::offsets_column_index);
CUDF_EXPECTS(offsets.type().id() == type_id::INT32,
"Column does not appear to be an offsets column");
CUDF_EXPECTS(offsets.offset() == 0, "Offsets column has an internal offset!");
size_type output_size = c.size() + 1;
// the first offset value to normalize everything against
size_type first = cudf::detail::get_value<size_type>(offsets, c.offset(), 0);
rmm::device_vector<size_type> shifted_offsets(output_size);
// normalize the offset values for the column offset
size_type const* d_offsets = offsets.head<size_type>() + c.offset();
thrust::transform(
rmm::exec_policy(0)->on(0),
d_offsets,
d_offsets + output_size,
shifted_offsets.begin(),
[first] __device__(int32_t offset) { return static_cast<size_type>(offset - first); });
thrust::host_vector<size_type> h_shifted_offsets(shifted_offsets);
std::ostringstream buffer;
for (size_t idx = 0; idx < h_shifted_offsets.size(); idx++) {
buffer << h_shifted_offsets[idx];
if (idx < h_shifted_offsets.size() - 1) { buffer << delimiter; }
}
return buffer.str();
}
struct column_view_printer {
template <typename Element, typename std::enable_if_t<is_numeric<Element>()>* = nullptr>
void operator()(cudf::column_view const& col,
std::vector<std::string>& out,
std::string const& indent)
{
auto h_data = cudf::test::to_host<Element>(col);
out.resize(col.size());
if (col.nullable()) {
std::transform(thrust::make_counting_iterator(size_type{0}),
thrust::make_counting_iterator(col.size()),
out.begin(),
[&h_data](auto idx) {
return bit_is_set(h_data.second.data(), idx)
? numeric_to_string_precise(h_data.first[idx])
: std::string("NULL");
});
} else {
std::transform(h_data.first.begin(), h_data.first.end(), out.begin(), [](Element el) {
return numeric_to_string_precise(el);
});
}
}
template <typename Element, typename std::enable_if_t<is_timestamp<Element>()>* = nullptr>
void operator()(cudf::column_view const& col,
std::vector<std::string>& out,
std::string const& indent)
{
//
// For timestamps, convert timestamp column to column of strings, then
// call string version
//
auto col_as_strings = cudf::strings::from_timestamps(col);
if (col_as_strings->size() == 0) { return; }
this->template operator()<cudf::string_view>(*col_as_strings, out, indent);
}
template <typename Element, typename std::enable_if_t<cudf::is_fixed_point<Element>()>* = nullptr>
void operator()(cudf::column_view const& col,
std::vector<std::string>& out,
std::string const& indent)
{
auto const h_data = cudf::test::to_host<Element>(col);
if (col.nullable()) {
std::transform(thrust::make_counting_iterator(size_type{0}),
thrust::make_counting_iterator(col.size()),
std::back_inserter(out),
[&h_data](auto idx) {
return h_data.second.empty() || bit_is_set(h_data.second.data(), idx)
? static_cast<std::string>(h_data.first[idx])
: std::string("NULL");
});
} else {
std::transform(std::cbegin(h_data.first),
std::cend(h_data.first),
std::back_inserter(out),
[col](auto const& fp) { return static_cast<std::string>(fp); });
}
}
template <typename Element,
typename std::enable_if_t<std::is_same<Element, cudf::string_view>::value>* = nullptr>
void operator()(cudf::column_view const& col,
std::vector<std::string>& out,
std::string const& indent)
{
//
// Implementation for strings, call special to_host variant
//
if (col.is_empty()) return;
auto h_data = cudf::test::to_host<std::string>(col);
out.resize(col.size());
std::transform(thrust::make_counting_iterator(size_type{0}),
thrust::make_counting_iterator(col.size()),
out.begin(),
[&h_data](auto idx) {
return h_data.second.empty() || bit_is_set(h_data.second.data(), idx)
? h_data.first[idx]
: std::string("NULL");
});
}
template <typename Element,
typename std::enable_if_t<std::is_same<Element, cudf::dictionary32>::value>* = nullptr>
void operator()(cudf::column_view const& col,
std::vector<std::string>& out,
std::string const& indent)
{
cudf::dictionary_column_view dictionary(col);
if (col.is_empty()) return;
std::vector<std::string> keys = to_strings(dictionary.keys());
std::vector<std::string> indices = to_strings({dictionary.indices().type(),
dictionary.size(),
dictionary.indices().head(),
dictionary.null_mask(),
dictionary.null_count(),
dictionary.offset()});
out.insert(out.end(), keys.begin(), keys.end());
if (!indices.empty()) {
std::string first = "\x08 : " + indices.front(); // use : as delimiter
out.push_back(first); // between keys and indices
out.insert(out.end(), indices.begin() + 1, indices.end());
}
}
// Print the tick counts with the units
template <typename Element, typename std::enable_if_t<is_duration<Element>()>* = nullptr>
void operator()(cudf::column_view const& col,
std::vector<std::string>& out,
std::string const& indent)
{
auto h_data = cudf::test::to_host<Element>(col);
out.resize(col.size());
if (col.nullable()) {
std::transform(thrust::make_counting_iterator(size_type{0}),
thrust::make_counting_iterator(col.size()),
out.begin(),
[&h_data](auto idx) {
return bit_is_set(h_data.second.data(), idx)
? numeric_to_string_precise(h_data.first[idx].count()) +
duration_suffix(h_data.first[idx])
: std::string("NULL");
});
} else {
std::transform(h_data.first.begin(), h_data.first.end(), out.begin(), [](Element el) {
return numeric_to_string_precise(el.count()) + duration_suffix(el);
});
}
}
template <typename Element,
typename std::enable_if_t<std::is_same<Element, cudf::list_view>::value>* = nullptr>
void operator()(cudf::column_view const& col,
std::vector<std::string>& out,
std::string const& indent)
{
lists_column_view lcv(col);
// propage slicing to the child if necessary
column_view child = lcv.get_sliced_child(0);
bool const is_sliced = lcv.offset() > 0 || child.offset() > 0;
std::string tmp =
get_nested_type_str(col) + (is_sliced ? "(sliced)" : "") + ":\n" + indent +
"Length : " + std::to_string(lcv.size()) + "\n" + indent +
"Offsets : " + (lcv.size() > 0 ? nested_offsets_to_string(lcv) : "") + "\n" +
(lcv.has_nulls() ? indent + "Null count: " + std::to_string(lcv.null_count()) + "\n" +
detail::to_string(bitmask_to_host(col), col.size(), indent) + "\n"
: "") +
indent + "Children :\n" +
(child.type().id() != type_id::LIST && child.has_nulls()
? indent + detail::to_string(bitmask_to_host(child), child.size(), indent) + "\n"
: "") +
(detail::to_string(child, ", ", indent + " ")) + "\n";
out.push_back(tmp);
}
template <typename Element,
typename std::enable_if_t<std::is_same<Element, cudf::struct_view>::value>* = nullptr>
void operator()(cudf::column_view const& col,
std::vector<std::string>& out,
std::string const& indent)
{
structs_column_view view{col};
std::ostringstream out_stream;
out_stream << get_nested_type_str(col) << ":\n"
<< indent << "Length : " << view.size() << ":\n";
if (view.has_nulls()) {
out_stream << indent << "Null count: " << view.null_count() << "\n"
<< detail::to_string(bitmask_to_host(col), col.size(), indent) << "\n";
}
auto iter = thrust::make_counting_iterator(0);
std::transform(iter,
iter + view.num_children(),
std::ostream_iterator<std::string>(out_stream, "\n"),
[&](size_type index) {
return detail::to_string(view.get_sliced_child(index), ", ", indent + " ");
});
out.push_back(out_stream.str());
}
};
} // namespace
namespace detail {
/**
* @copydoc cudf::test::detail::to_strings
*
*/
std::vector<std::string> to_strings(cudf::column_view const& col, std::string const& indent)
{
std::vector<std::string> reply;
cudf::type_dispatcher(col.type(), column_view_printer{}, col, reply, indent);
return reply;
}
/**
* @copydoc cudf::test::detail::to_string(cudf::column_view, std::string, std::string)
*
* @param indent Indentation for all output
*/
std::string to_string(cudf::column_view const& col,
std::string const& delimiter,
std::string const& indent)
{
std::ostringstream buffer;
std::vector<std::string> h_data = to_strings(col, indent);
buffer << indent;
std::copy(h_data.begin(),
h_data.end() - (!h_data.empty()),
std::ostream_iterator<std::string>(buffer, delimiter.c_str()));
if (!h_data.empty()) buffer << h_data.back();
return buffer.str();
}
/**
* @copydoc cudf::test::detail::to_string(std::vector<bitmask_type>, size_type, std::string)
*
* @param indent Indentation for all output. See comment in `to_strings` for
* a detailed description.
*/
std::string to_string(std::vector<bitmask_type> const& null_mask,
size_type null_mask_size,
std::string const& indent)
{
std::ostringstream buffer;
buffer << indent;
for (int idx = null_mask_size - 1; idx >= 0; idx--) {
buffer << (cudf::bit_is_set(null_mask.data(), idx) ? "1" : "0");
}
return buffer.str();
}
} // namespace detail
/**
* @copydoc cudf::test::to_strings
*
*/
std::vector<std::string> to_strings(cudf::column_view const& col)
{
return detail::to_strings(col);
}
/**
* @copydoc cudf::test::to_string(cudf::column_view, std::string)
*
*/
std::string to_string(cudf::column_view const& col, std::string const& delimiter)
{
return detail::to_string(col, delimiter);
}
/**
* @copydoc cudf::test::to_string(std::vector<bitmask_type>, size_type)
*
*/
std::string to_string(std::vector<bitmask_type> const& null_mask, size_type null_mask_size)
{
return detail::to_string(null_mask, null_mask_size);
}
/**
* @copydoc cudf::test::print
*
*/
void print(cudf::column_view const& col, std::ostream& os, std::string const& delimiter)
{
os << to_string(col, delimiter) << std::endl;
}
/**
* @copydoc cudf::test::validate_host_masks
*
*/
bool validate_host_masks(std::vector<bitmask_type> const& expected_mask,
std::vector<bitmask_type> const& got_mask,
size_type number_of_elements)
{
return std::all_of(thrust::make_counting_iterator(0),
thrust::make_counting_iterator(number_of_elements),
[&expected_mask, &got_mask](auto index) {
return cudf::bit_is_set(expected_mask.data(), index) ==
cudf::bit_is_set(got_mask.data(), index);
});
}
} // namespace test
} // namespace cudf
|
8a0fa3ff68ee0a1d27ed39fc42dc3a5015370e00.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <kappa/core.hpp>
__global__
void prescan_blelloch_kernel(uint32_t* a, uint32_t* sum, uint32_t* bsum, int n)
{
extern __shared__ uint32_t ssum[];
int tid = threadIdx.x;
ssum[tid] = 0;
ssum[tid + blockDim.x] = 0;
__syncthreads();
int i = 2 * blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) ssum[tid] = a[i];
if (i + blockDim.x < n) ssum[tid + blockDim.x] = a[i + blockDim.x];
__syncthreads();
int offset = 1;
for (int d = blockDim.x; d > 0; d >>= 1) {
__syncthreads();
if (tid < d) {
int ai = offset * (2 * tid + 1) - 1;
int bi = offset * (2 * tid + 2) - 1;
ssum[bi] += ssum[ai];
}
offset <<= 1;
}
if (tid == 0) {
if (bsum) bsum[blockIdx.x] = ssum[2 * blockDim.x - 1];
ssum[2 * blockDim.x - 1] = 0;
}
for (int d = 1; d <= blockDim.x; d <<= 1) {
offset >>= 1;
__syncthreads();
if (tid < d) {
int ai = offset * (2 * tid + 1) - 1;
int bi = offset * (2 * tid + 2) - 1;
uint32_t t = ssum[ai];
ssum[ai] = ssum[bi];
ssum[bi] += t;
}
}
__syncthreads();
if (i < n) sum[i] = ssum[tid];
if (i + blockDim.x < n) sum[i + blockDim.x] = ssum[tid + blockDim.x];
}
__global__
void prescan_add_block_kernel(uint32_t* a, uint32_t* sum, uint32_t* bsum, int n)
{
int i = 2 * blockIdx.x * blockDim.x + threadIdx.x;
uint32_t v = bsum[blockIdx.x];
if (i < n) sum[i] += v;
if (i + blockDim.x < n) sum[i + blockDim.x] += v;
}
uint32_t prescan(uint32_t* a, uint32_t* sum, int n)
{
static int block_size = 512;
static int elems_per_block = 2 * block_size;
int grid_size = divup(n, elems_per_block) + (n % elems_per_block != 0);
uint32_t* bsum = nullptr;
if (bsum == nullptr)
hipMalloc((void**)&bsum, sizeof(uint32_t) * grid_size);
hipMemset(bsum, 0, sizeof(uint32_t) * grid_size);
hipLaunchKernelGGL(( prescan_blelloch_kernel), dim3(grid_size), dim3(block_size),
sizeof(uint32_t) * elems_per_block, 0, a, sum, bsum, n);
if (grid_size <= elems_per_block) {
hipLaunchKernelGGL(( prescan_blelloch_kernel), dim3(1), dim3(block_size),
sizeof(uint32_t) * elems_per_block, 0,
bsum, bsum, nullptr, grid_size);
}
else {
prescan(bsum, bsum, grid_size);
}
hipLaunchKernelGGL(( prescan_add_block_kernel), dim3(grid_size), dim3(block_size), 0, 0, a, sum, bsum, n);
hipFree(bsum);
uint32_t size;
hipMemcpy(&size, &sum[n - 1], sizeof(uint32_t), hipMemcpyDeviceToHost);
return size;
}
| 8a0fa3ff68ee0a1d27ed39fc42dc3a5015370e00.cu | #include <kappa/core.hpp>
__global__
void prescan_blelloch_kernel(uint32_t* a, uint32_t* sum, uint32_t* bsum, int n)
{
extern __shared__ uint32_t ssum[];
int tid = threadIdx.x;
ssum[tid] = 0;
ssum[tid + blockDim.x] = 0;
__syncthreads();
int i = 2 * blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) ssum[tid] = a[i];
if (i + blockDim.x < n) ssum[tid + blockDim.x] = a[i + blockDim.x];
__syncthreads();
int offset = 1;
for (int d = blockDim.x; d > 0; d >>= 1) {
__syncthreads();
if (tid < d) {
int ai = offset * (2 * tid + 1) - 1;
int bi = offset * (2 * tid + 2) - 1;
ssum[bi] += ssum[ai];
}
offset <<= 1;
}
if (tid == 0) {
if (bsum) bsum[blockIdx.x] = ssum[2 * blockDim.x - 1];
ssum[2 * blockDim.x - 1] = 0;
}
for (int d = 1; d <= blockDim.x; d <<= 1) {
offset >>= 1;
__syncthreads();
if (tid < d) {
int ai = offset * (2 * tid + 1) - 1;
int bi = offset * (2 * tid + 2) - 1;
uint32_t t = ssum[ai];
ssum[ai] = ssum[bi];
ssum[bi] += t;
}
}
__syncthreads();
if (i < n) sum[i] = ssum[tid];
if (i + blockDim.x < n) sum[i + blockDim.x] = ssum[tid + blockDim.x];
}
__global__
void prescan_add_block_kernel(uint32_t* a, uint32_t* sum, uint32_t* bsum, int n)
{
int i = 2 * blockIdx.x * blockDim.x + threadIdx.x;
uint32_t v = bsum[blockIdx.x];
if (i < n) sum[i] += v;
if (i + blockDim.x < n) sum[i + blockDim.x] += v;
}
uint32_t prescan(uint32_t* a, uint32_t* sum, int n)
{
static int block_size = 512;
static int elems_per_block = 2 * block_size;
int grid_size = divup(n, elems_per_block) + (n % elems_per_block != 0);
uint32_t* bsum = nullptr;
if (bsum == nullptr)
cudaMalloc((void**)&bsum, sizeof(uint32_t) * grid_size);
cudaMemset(bsum, 0, sizeof(uint32_t) * grid_size);
prescan_blelloch_kernel<<<grid_size, block_size,
sizeof(uint32_t) * elems_per_block>>>(a, sum, bsum, n);
if (grid_size <= elems_per_block) {
prescan_blelloch_kernel<<<1, block_size,
sizeof(uint32_t) * elems_per_block>>>(
bsum, bsum, nullptr, grid_size);
}
else {
prescan(bsum, bsum, grid_size);
}
prescan_add_block_kernel<<<grid_size, block_size>>>(a, sum, bsum, n);
cudaFree(bsum);
uint32_t size;
cudaMemcpy(&size, &sum[n - 1], sizeof(uint32_t), cudaMemcpyDeviceToHost);
return size;
}
|
3f944eb9cdd73c2ffddd3a96bbd80ea7b1d60c08.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <time.h>
#include <stdio.h>
#include <math.h>
#define RADIUS 3000
#define NUM_ELEMENTS 1000000
static void handleError(hipError_t err, const char *file, int line ) {
if (err != hipSuccess) {
printf("%s in %s at line %d\n", hipGetErrorString(err), file, line);
exit(EXIT_FAILURE);
}
}
#define cudaCheck( err ) (handleError(err, __FILE__, __LINE__ ))
__global__ void stencil_1d(int *in, int *out) {
//PUT YOUR CODE HERE
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < NUM_ELEMENTS) {
int total = 0;
for (int j = max(0, i - RADIUS); j < min(NUM_ELEMENTS, i + RADIUS); j++) {
total += in[j];
}
out[i] = total;
}
}
void cpu_stencil_1d(int *in, int *out) {
for (int i = 0; i < NUM_ELEMENTS; i++) {
int total = 0;
for (int j = max(0, i - RADIUS); j < min(NUM_ELEMENTS, i + RADIUS); j++) {
total += in[j];
}
out[i] = total;
}
}
int main() {
//PUT YOUR CODE HERE - INPUT AND OUTPUT ARRAYS
int *in, *out, *d_in, *d_out;
in = (int*)malloc(sizeof(int) * NUM_ELEMENTS);
out = (int*)malloc(sizeof(int) * NUM_ELEMENTS);
for (int i = 0; i < NUM_ELEMENTS; i++) {
in[i] = 1;
out[i] = 0;
}
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord( start, 0 );
//PUT YOUR CODE HERE - DEVICE MEMORY ALLOCATION
cudaCheck(hipMalloc((void**)&d_in, sizeof(int) * NUM_ELEMENTS));
cudaCheck(hipMalloc((void**)&d_out, sizeof(int) * NUM_ELEMENTS));
cudaCheck(hipMemcpy(d_in, in, sizeof(int) * NUM_ELEMENTS, hipMemcpyHostToDevice));
cudaCheck(hipMemcpy(d_out, in, sizeof(int) * NUM_ELEMENTS, hipMemcpyHostToDevice));
hipEvent_t start_k, stop_k;
hipEventCreate(&start_k);
hipEventCreate(&stop_k);
hipEventRecord( start_k, 0 );
//PUT YOUR CODE HERE - KERNEL EXECUTION
hipLaunchKernelGGL(( stencil_1d), dim3(((NUM_ELEMENTS+1024)/1024)), dim3(1024), 0, 0, d_in, d_out);
hipEventRecord(stop_k, 0);
hipEventSynchronize(stop_k);
float elapsedTime_k;
hipEventElapsedTime( &elapsedTime_k, start_k, stop_k);
printf("GPU kernel execution time: %3.1f ms\n", elapsedTime_k);
hipEventDestroy(start_k);
hipEventDestroy(stop_k);
cudaCheck(hipPeekAtLastError());
//PUT YOUR CODE HERE - COPY RESULT FROM DEVICE TO HOST
cudaCheck(hipMemcpy(out, d_out, sizeof(int) * NUM_ELEMENTS, hipMemcpyDeviceToHost));
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float elapsedTime;
hipEventElapsedTime( &elapsedTime, start, stop);
printf("Total GPU execution time: %3.1f ms\n", elapsedTime);
hipEventDestroy(start);
hipEventDestroy(stop);
//PUT YOUR CODE HERE - FREE DEVICE MEMORY
cudaCheck(hipFree(d_in));
cudaCheck(hipFree(d_out));
for (int i = 0; i < NUM_ELEMENTS; i++) {
in[i] = 1;
out[i] = 0;
}
struct timespec cpu_start, cpu_stop;
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &cpu_start);
cpu_stencil_1d(in, out);
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &cpu_stop);
double result = (cpu_stop.tv_sec - cpu_start.tv_sec) * 1e3 + (cpu_stop.tv_nsec - cpu_start.tv_nsec) / 1e6;
printf( "CPU execution time: %3.1f ms\n", result);
return 0;
}
| 3f944eb9cdd73c2ffddd3a96bbd80ea7b1d60c08.cu | #include <time.h>
#include <stdio.h>
#include <math.h>
#define RADIUS 3000
#define NUM_ELEMENTS 1000000
static void handleError(cudaError_t err, const char *file, int line ) {
if (err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString(err), file, line);
exit(EXIT_FAILURE);
}
}
#define cudaCheck( err ) (handleError(err, __FILE__, __LINE__ ))
__global__ void stencil_1d(int *in, int *out) {
//PUT YOUR CODE HERE
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < NUM_ELEMENTS) {
int total = 0;
for (int j = max(0, i - RADIUS); j < min(NUM_ELEMENTS, i + RADIUS); j++) {
total += in[j];
}
out[i] = total;
}
}
void cpu_stencil_1d(int *in, int *out) {
for (int i = 0; i < NUM_ELEMENTS; i++) {
int total = 0;
for (int j = max(0, i - RADIUS); j < min(NUM_ELEMENTS, i + RADIUS); j++) {
total += in[j];
}
out[i] = total;
}
}
int main() {
//PUT YOUR CODE HERE - INPUT AND OUTPUT ARRAYS
int *in, *out, *d_in, *d_out;
in = (int*)malloc(sizeof(int) * NUM_ELEMENTS);
out = (int*)malloc(sizeof(int) * NUM_ELEMENTS);
for (int i = 0; i < NUM_ELEMENTS; i++) {
in[i] = 1;
out[i] = 0;
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
//PUT YOUR CODE HERE - DEVICE MEMORY ALLOCATION
cudaCheck(cudaMalloc((void**)&d_in, sizeof(int) * NUM_ELEMENTS));
cudaCheck(cudaMalloc((void**)&d_out, sizeof(int) * NUM_ELEMENTS));
cudaCheck(cudaMemcpy(d_in, in, sizeof(int) * NUM_ELEMENTS, cudaMemcpyHostToDevice));
cudaCheck(cudaMemcpy(d_out, in, sizeof(int) * NUM_ELEMENTS, cudaMemcpyHostToDevice));
cudaEvent_t start_k, stop_k;
cudaEventCreate(&start_k);
cudaEventCreate(&stop_k);
cudaEventRecord( start_k, 0 );
//PUT YOUR CODE HERE - KERNEL EXECUTION
stencil_1d<<<((NUM_ELEMENTS+1024)/1024), 1024>>>(d_in, d_out);
cudaEventRecord(stop_k, 0);
cudaEventSynchronize(stop_k);
float elapsedTime_k;
cudaEventElapsedTime( &elapsedTime_k, start_k, stop_k);
printf("GPU kernel execution time: %3.1f ms\n", elapsedTime_k);
cudaEventDestroy(start_k);
cudaEventDestroy(stop_k);
cudaCheck(cudaPeekAtLastError());
//PUT YOUR CODE HERE - COPY RESULT FROM DEVICE TO HOST
cudaCheck(cudaMemcpy(out, d_out, sizeof(int) * NUM_ELEMENTS, cudaMemcpyDeviceToHost));
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime( &elapsedTime, start, stop);
printf("Total GPU execution time: %3.1f ms\n", elapsedTime);
cudaEventDestroy(start);
cudaEventDestroy(stop);
//PUT YOUR CODE HERE - FREE DEVICE MEMORY
cudaCheck(cudaFree(d_in));
cudaCheck(cudaFree(d_out));
for (int i = 0; i < NUM_ELEMENTS; i++) {
in[i] = 1;
out[i] = 0;
}
struct timespec cpu_start, cpu_stop;
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &cpu_start);
cpu_stencil_1d(in, out);
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &cpu_stop);
double result = (cpu_stop.tv_sec - cpu_start.tv_sec) * 1e3 + (cpu_stop.tv_nsec - cpu_start.tv_nsec) / 1e6;
printf( "CPU execution time: %3.1f ms\n", result);
return 0;
}
|
6b6f851eef79b22b38c4e36d19e0af24696d1c22.hip | // !!! This is a file automatically generated by hipify!!!
#include "stable.hpp"
#include "census.hpp"
#include "../util.hpp"
#include "../util_opencv.hpp"
#include <opencv2/imgproc.hpp>
#include <opencv2/core.hpp>
#include <random>
#include <chrono>
#include <hip/hip_runtime.h>
namespace algorithms {
template<int NBITS>
struct Stable {
__host__ __device__ inline void window(const int y, const int x, uint64_t* __restrict__ out) {
int32_t accumulator[NBITS] = {0};
uint16_t i = 0;
for (int wy = -WINY/2; wy <= WINY/2; wy++) {
for (int wx = -WINX/2; wx <= WINX/2; wx++) {
const int16_t value = im(min(height,max(0,int(float(y)*scaleY) + wy)), min(width,max(0,int(float(x)*scaleX) + wx)));
const int16_t filter = filter_mask(0, i++);
const int16_t sign = filter > 0 ? 1 : -1;
// NOTE: indexing starts from 1
const int16_t index = int(abs(filter)) - 1;
accumulator[index] += sign*value;
}
}
for (i = 0; i < NBITS;) {
// zero if first value, otherwise shift to left
if (i % 64 == 0) { *out = 0; }
else { *out = (*out << 1); }
*out |= ((accumulator[i] > 0) ? 1 : 0);
i += 1;
// if all bits set, continue to next element
if (i % 64 == 0) { out++; }
}
}
__host__ __device__ void operator()(ushort2 thread, ushort2 stride, ushort2 size) {
for (int y = thread.y+WINY/2; y<size.y-WINY/2-1; y+=stride.y) {
for (int x = thread.x+WINX/2; x<size.x-WINX/2-1; x+=stride.x) {
window(y, x, &(out(y, x*WSTEP)));
}
}
}
const Array2D<uchar>::Data im;
const Array2D<int16_t>::Data filter_mask;
Array2D<uint64_t>::Data out;
const int WINX;
const int WINY;
float scaleX;
float scaleY;
const int width;
const int height;
// number of uint64_t values for each window
const int WSTEP = (NBITS - 1)/(sizeof(uint64_t)*8) + 1;
};
}
#include <iostream>
void StableMatchingCost::generateFilterMask(const int wsize, const int bits) {
if (bits > 127) {
// TODO: hardcoded in HammingCost template parameters
throw std::exception();
}
cv::Mat mask(cv::Size(wsize*wsize, 1), CV_16SC1, cv::Scalar(0));
if (!mask.isContinuous()) { throw std::exception(); }
unsigned seed = std::chrono::system_clock::now().time_since_epoch().count();
std::default_random_engine generator(seed);
std::uniform_int_distribution<int16_t> distribution(1, bits*2);
for (int i = 0; i < mask.total(); i++) {
// index from 1 to bits (instead of 0 to bits-1) value truncated if
// outside window
int16_t val = distribution(generator);
if (val <= bits) {
mask.at<int16_t>(i) = ((i + val) > mask.total()) ? bits - 1 : val;
}
else {
val = -(val - bits);
mask.at<int16_t>(i) = ((i + val) < 0) ? 0 : val;
}
}
wsize_ = wsize;
filter_mask_.create(wsize*wsize, 1);
filter_mask_.toGpuMat().upload(mask);
}
void StableMatchingCost::set(const Array2D<uchar> &l, const Array2D<uchar> &r) {
parallel2D<algorithms::Stable<16>>({l.data(), filter_mask_.data(), stable_l_.data(), wsize_, wsize_, 1.0f, 1.0f, l.width, l.height}, l.width, l.height);
parallel2D<algorithms::Stable<16>>({r.data(), filter_mask_.data(), stable_r_.data(), wsize_, wsize_, 1.0f, 1.0f, r.width, r.height}, r.width, r.height);
}
void StableMatchingCost::set(const Array2D<uchar> &l, const Array2D<uchar> &r, size_t w, size_t h) {
float scaleX = float(l.width) / float(w);
float scaleY = float(l.height) / float(h);
parallel2D<algorithms::Stable<16>>({l.data(), filter_mask_.data(), stable_l_.data(), wsize_, wsize_, scaleX, scaleY, l.width, l.height}, w, h);
parallel2D<algorithms::Stable<16>>({r.data(), filter_mask_.data(), stable_r_.data(), wsize_, wsize_, scaleX, scaleY, r.width, r.height}, w, h);
}
void StableMatchingCost::set(cv::InputArray l, cv::InputArray r) {
if (l.type() != CV_8UC1 || r.type() != CV_8UC1) { throw std::exception(); }
if (l.rows() != r.rows() || l.cols() != r.cols() || l.rows() != height() || l.cols() != width()) {
throw std::exception();
}
if (l.isGpuMat() && r.isGpuMat()) {
auto ml = l.getGpuMat();
auto mr = r.getGpuMat();
set(Array2D<uchar>(ml), Array2D<uchar>(mr));
}
else if (l.isMat() && r.isMat()) {
auto ml = l.getMat();
auto mr = r.getMat();
set(Array2D<uchar>(ml), Array2D<uchar>(mr));
}
else {
throw std::exception();
}
}
| 6b6f851eef79b22b38c4e36d19e0af24696d1c22.cu | #include "stable.hpp"
#include "census.hpp"
#include "../util.hpp"
#include "../util_opencv.hpp"
#include <opencv2/imgproc.hpp>
#include <opencv2/core.hpp>
#include <random>
#include <chrono>
#include <cuda_runtime.h>
namespace algorithms {
template<int NBITS>
struct Stable {
__host__ __device__ inline void window(const int y, const int x, uint64_t* __restrict__ out) {
int32_t accumulator[NBITS] = {0};
uint16_t i = 0;
for (int wy = -WINY/2; wy <= WINY/2; wy++) {
for (int wx = -WINX/2; wx <= WINX/2; wx++) {
const int16_t value = im(min(height,max(0,int(float(y)*scaleY) + wy)), min(width,max(0,int(float(x)*scaleX) + wx)));
const int16_t filter = filter_mask(0, i++);
const int16_t sign = filter > 0 ? 1 : -1;
// NOTE: indexing starts from 1
const int16_t index = int(abs(filter)) - 1;
accumulator[index] += sign*value;
}
}
for (i = 0; i < NBITS;) {
// zero if first value, otherwise shift to left
if (i % 64 == 0) { *out = 0; }
else { *out = (*out << 1); }
*out |= ((accumulator[i] > 0) ? 1 : 0);
i += 1;
// if all bits set, continue to next element
if (i % 64 == 0) { out++; }
}
}
__host__ __device__ void operator()(ushort2 thread, ushort2 stride, ushort2 size) {
for (int y = thread.y+WINY/2; y<size.y-WINY/2-1; y+=stride.y) {
for (int x = thread.x+WINX/2; x<size.x-WINX/2-1; x+=stride.x) {
window(y, x, &(out(y, x*WSTEP)));
}
}
}
const Array2D<uchar>::Data im;
const Array2D<int16_t>::Data filter_mask;
Array2D<uint64_t>::Data out;
const int WINX;
const int WINY;
float scaleX;
float scaleY;
const int width;
const int height;
// number of uint64_t values for each window
const int WSTEP = (NBITS - 1)/(sizeof(uint64_t)*8) + 1;
};
}
#include <iostream>
void StableMatchingCost::generateFilterMask(const int wsize, const int bits) {
if (bits > 127) {
// TODO: hardcoded in HammingCost template parameters
throw std::exception();
}
cv::Mat mask(cv::Size(wsize*wsize, 1), CV_16SC1, cv::Scalar(0));
if (!mask.isContinuous()) { throw std::exception(); }
unsigned seed = std::chrono::system_clock::now().time_since_epoch().count();
std::default_random_engine generator(seed);
std::uniform_int_distribution<int16_t> distribution(1, bits*2);
for (int i = 0; i < mask.total(); i++) {
// index from 1 to bits (instead of 0 to bits-1) value truncated if
// outside window
int16_t val = distribution(generator);
if (val <= bits) {
mask.at<int16_t>(i) = ((i + val) > mask.total()) ? bits - 1 : val;
}
else {
val = -(val - bits);
mask.at<int16_t>(i) = ((i + val) < 0) ? 0 : val;
}
}
wsize_ = wsize;
filter_mask_.create(wsize*wsize, 1);
filter_mask_.toGpuMat().upload(mask);
}
void StableMatchingCost::set(const Array2D<uchar> &l, const Array2D<uchar> &r) {
parallel2D<algorithms::Stable<16>>({l.data(), filter_mask_.data(), stable_l_.data(), wsize_, wsize_, 1.0f, 1.0f, l.width, l.height}, l.width, l.height);
parallel2D<algorithms::Stable<16>>({r.data(), filter_mask_.data(), stable_r_.data(), wsize_, wsize_, 1.0f, 1.0f, r.width, r.height}, r.width, r.height);
}
void StableMatchingCost::set(const Array2D<uchar> &l, const Array2D<uchar> &r, size_t w, size_t h) {
float scaleX = float(l.width) / float(w);
float scaleY = float(l.height) / float(h);
parallel2D<algorithms::Stable<16>>({l.data(), filter_mask_.data(), stable_l_.data(), wsize_, wsize_, scaleX, scaleY, l.width, l.height}, w, h);
parallel2D<algorithms::Stable<16>>({r.data(), filter_mask_.data(), stable_r_.data(), wsize_, wsize_, scaleX, scaleY, r.width, r.height}, w, h);
}
void StableMatchingCost::set(cv::InputArray l, cv::InputArray r) {
if (l.type() != CV_8UC1 || r.type() != CV_8UC1) { throw std::exception(); }
if (l.rows() != r.rows() || l.cols() != r.cols() || l.rows() != height() || l.cols() != width()) {
throw std::exception();
}
if (l.isGpuMat() && r.isGpuMat()) {
auto ml = l.getGpuMat();
auto mr = r.getGpuMat();
set(Array2D<uchar>(ml), Array2D<uchar>(mr));
}
else if (l.isMat() && r.isMat()) {
auto ml = l.getMat();
auto mr = r.getMat();
set(Array2D<uchar>(ml), Array2D<uchar>(mr));
}
else {
throw std::exception();
}
}
|
ac58a1ec95f4d87af71ef475e6ea066e65b0634a.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#define BLOCK_SIZE 1024
// kernel
__global__ void tiledConvolution_1D_Kernel(float* d_m, const float* __restrict__ d_mask, float* d_n, size_t length, size_t maskLength, int N_TILE_LENGTH)
{
float result = 0;
// indexing variables
int n_index = blockIdx.x * N_TILE_LENGTH + threadIdx.x;
int m_index = n_index - maskLength / 2;
__shared__ float tile_m[BLOCK_SIZE];
// thread boundary check for loading input tiles
if(m_index >= 0 && m_index < length)
{
tile_m[threadIdx.x] = d_m[m_index];
}
else
{
tile_m[threadIdx.x] = 0;
}
__syncthreads();
// thread boundary check for calculation
if(threadIdx.x < N_TILE_LENGTH && n_index < length)
{
for(int i = 0; i < maskLength; ++i)
{
result += d_mask[i] * tile_m[threadIdx.x + i];
}
// write result
d_n[n_index] = result;
}
}
// CUDA error checking
void errorCheck(unsigned int line)
{
hipError_t hipError_t = hipGetLastError();
if(hipError_t != hipSuccess)
{
printf("CUDA error in line %u in file %s: %s\n", line - 1, __FILE__, hipGetErrorString(hipError_t));
exit(EXIT_FAILURE);
}
}
// host function containing kernel call
void convolution_1D(float* m, float* mask, float* n, size_t length, size_t maskLength, int N_TILE_LENGTH)
{
dim3 numOfBlocks(ceil(length / (float) N_TILE_LENGTH), 1, 1);
dim3 numOfThreads(BLOCK_SIZE, 1, 1);
size_t bytes_m = length * sizeof(float);
size_t bytes_mask = maskLength * sizeof(float);
float* d_m;
float* d_mask;
float* d_n;
hipMalloc((void**) &d_m, bytes_m);
errorCheck(__LINE__);
hipMalloc((void**) &d_mask, bytes_mask);
errorCheck(__LINE__);
hipMalloc((void**) &d_n, bytes_m);
errorCheck(__LINE__);
hipMemcpy(d_m, m, bytes_m, hipMemcpyHostToDevice);
errorCheck(__LINE__);
hipMemcpy(d_mask, mask, bytes_mask, hipMemcpyHostToDevice);
errorCheck(__LINE__);
hipLaunchKernelGGL(( tiledConvolution_1D_Kernel), dim3(numOfBlocks), dim3(numOfThreads), 0, 0, d_m, d_mask, d_n, length, maskLength, N_TILE_LENGTH);
errorCheck(__LINE__);
hipMemcpy(n, d_n, bytes_m, hipMemcpyDeviceToHost);
errorCheck(__LINE__);
hipFree(d_m);
errorCheck(__LINE__);
hipFree(d_mask);
errorCheck(__LINE__);
hipFree(d_n);
errorCheck(__LINE__);
}
int main()
{
struct timespec start, end;
srand(time(NULL));
size_t length = rand() % 1048577 + 15728640;
size_t maskLength = 121;
int N_TILE_LENGTH = BLOCK_SIZE - (maskLength - 1);
float* m = (float*) malloc(length * sizeof(float));
float* mask = (float*) malloc(maskLength * sizeof(float));
float* n = (float*) malloc(length * sizeof(float));
for(int i = 0; i < length; ++i)
{
m[i] = rand() % 129 - 64;
}
for(int j = 0; j < maskLength; ++j)
{
mask[j] = rand() % 1001 / 1000.0;
}
clock_gettime(CLOCK_REALTIME, &start);
// do convolution
convolution_1D(m, mask, n, length, maskLength, N_TILE_LENGTH);
clock_gettime(CLOCK_REALTIME, &end);
time_t execTime = (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_nsec - start.tv_nsec) / 1000;
printf("Execution time: %d microseconds.", execTime);
return 0;
}
| ac58a1ec95f4d87af71ef475e6ea066e65b0634a.cu | #include <cuda.h>
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#define BLOCK_SIZE 1024
// kernel
__global__ void tiledConvolution_1D_Kernel(float* d_m, const float* __restrict__ d_mask, float* d_n, size_t length, size_t maskLength, int N_TILE_LENGTH)
{
float result = 0;
// indexing variables
int n_index = blockIdx.x * N_TILE_LENGTH + threadIdx.x;
int m_index = n_index - maskLength / 2;
__shared__ float tile_m[BLOCK_SIZE];
// thread boundary check for loading input tiles
if(m_index >= 0 && m_index < length)
{
tile_m[threadIdx.x] = d_m[m_index];
}
else
{
tile_m[threadIdx.x] = 0;
}
__syncthreads();
// thread boundary check for calculation
if(threadIdx.x < N_TILE_LENGTH && n_index < length)
{
for(int i = 0; i < maskLength; ++i)
{
result += d_mask[i] * tile_m[threadIdx.x + i];
}
// write result
d_n[n_index] = result;
}
}
// CUDA error checking
void errorCheck(unsigned int line)
{
cudaError_t cudaError = cudaGetLastError();
if(cudaError != cudaSuccess)
{
printf("CUDA error in line %u in file %s: %s\n", line - 1, __FILE__, cudaGetErrorString(cudaError));
exit(EXIT_FAILURE);
}
}
// host function containing kernel call
void convolution_1D(float* m, float* mask, float* n, size_t length, size_t maskLength, int N_TILE_LENGTH)
{
dim3 numOfBlocks(ceil(length / (float) N_TILE_LENGTH), 1, 1);
dim3 numOfThreads(BLOCK_SIZE, 1, 1);
size_t bytes_m = length * sizeof(float);
size_t bytes_mask = maskLength * sizeof(float);
float* d_m;
float* d_mask;
float* d_n;
cudaMalloc((void**) &d_m, bytes_m);
errorCheck(__LINE__);
cudaMalloc((void**) &d_mask, bytes_mask);
errorCheck(__LINE__);
cudaMalloc((void**) &d_n, bytes_m);
errorCheck(__LINE__);
cudaMemcpy(d_m, m, bytes_m, cudaMemcpyHostToDevice);
errorCheck(__LINE__);
cudaMemcpy(d_mask, mask, bytes_mask, cudaMemcpyHostToDevice);
errorCheck(__LINE__);
tiledConvolution_1D_Kernel<<<numOfBlocks, numOfThreads>>>(d_m, d_mask, d_n, length, maskLength, N_TILE_LENGTH);
errorCheck(__LINE__);
cudaMemcpy(n, d_n, bytes_m, cudaMemcpyDeviceToHost);
errorCheck(__LINE__);
cudaFree(d_m);
errorCheck(__LINE__);
cudaFree(d_mask);
errorCheck(__LINE__);
cudaFree(d_n);
errorCheck(__LINE__);
}
int main()
{
struct timespec start, end;
srand(time(NULL));
size_t length = rand() % 1048577 + 15728640;
size_t maskLength = 121;
int N_TILE_LENGTH = BLOCK_SIZE - (maskLength - 1);
float* m = (float*) malloc(length * sizeof(float));
float* mask = (float*) malloc(maskLength * sizeof(float));
float* n = (float*) malloc(length * sizeof(float));
for(int i = 0; i < length; ++i)
{
m[i] = rand() % 129 - 64;
}
for(int j = 0; j < maskLength; ++j)
{
mask[j] = rand() % 1001 / 1000.0;
}
clock_gettime(CLOCK_REALTIME, &start);
// do convolution
convolution_1D(m, mask, n, length, maskLength, N_TILE_LENGTH);
clock_gettime(CLOCK_REALTIME, &end);
time_t execTime = (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_nsec - start.tv_nsec) / 1000;
printf("Execution time: %d microseconds.", execTime);
return 0;
}
|
a7615c7af47848f44219cb46d1307437b61c2075.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 2
* of the programming guide with some additions like error checking.
*/
#include <stdio.h>
#include <iostream>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into B. The 2 vectors have the same
* number of elements numElements.
*/
__global__ void
vectorAdd(int numElements, float *x, float *y)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
y[i] = x[i] + y[i];
}
}
/**
* Host main routine
*/
int
main(void)
{
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
int dev = 0;
hipSetDevice(dev);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
// Print the vector length to be used, and compute its size
unsigned long long numElements = 2<<29;
size_t size = numElements * sizeof(float);
std::cout<<deviceProp.name<<std::endl;
std::cout<<"[Vector addition of "<<numElements<<" elements]\n";
std::cout<<size<<std::endl;
if(size > deviceProp.totalGlobalMem)
{
std::cout<<"NOT ENOUGH MEMORY!\n Total memory: "<<deviceProp.totalGlobalMem<<std::endl;
return 0;
}
else
{
std::cout<<"You got the memory :)\n";
}
///
float *x, *y;
hipMallocManaged(&x, size);
hipMallocManaged(&y, size);
///
// Initialize the host input vectors
for (int i = 0; i < numElements; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
hipLaunchKernelGGL(( vectorAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, numElements, x, y);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < numElements; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
printf("Test PASSED\n");
///
hipFree(x);
hipFree(y);
printf("Done\n");
return 0;
}
| a7615c7af47848f44219cb46d1307437b61c2075.cu | /**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 2
* of the programming guide with some additions like error checking.
*/
#include <stdio.h>
#include <iostream>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
#include <helper_cuda.h>
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into B. The 2 vectors have the same
* number of elements numElements.
*/
__global__ void
vectorAdd(int numElements, float *x, float *y)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
y[i] = x[i] + y[i];
}
}
/**
* Host main routine
*/
int
main(void)
{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
int dev = 0;
cudaSetDevice(dev);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
// Print the vector length to be used, and compute its size
unsigned long long numElements = 2<<29;
size_t size = numElements * sizeof(float);
std::cout<<deviceProp.name<<std::endl;
std::cout<<"[Vector addition of "<<numElements<<" elements]\n";
std::cout<<size<<std::endl;
if(size > deviceProp.totalGlobalMem)
{
std::cout<<"NOT ENOUGH MEMORY!\n Total memory: "<<deviceProp.totalGlobalMem<<std::endl;
return 0;
}
else
{
std::cout<<"You got the memory :)\n";
}
///
float *x, *y;
cudaMallocManaged(&x, size);
cudaMallocManaged(&y, size);
///
// Initialize the host input vectors
for (int i = 0; i < numElements; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(numElements, x, y);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < numElements; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
printf("Test PASSED\n");
///
cudaFree(x);
cudaFree(y);
printf("Done\n");
return 0;
}
|
c4b8257e483ec5ac6fbe4e3a00016791b82e4a4c.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $Id$
*
*/
#include "pcl/cuda/sample_consensus/sac_model_plane.h"
#include "pcl/cuda/cutil_math.h"
#include <hip/hip_vector_types.h>
#include <thrust/copy.h>
#include <thrust/count.h>
#include <stdio.h>
#include <limits>
namespace pcl
{
namespace cuda
{
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage>
SampleConsensusModelPlane<Storage>::SampleConsensusModelPlane (
const PointCloudConstPtr &cloud) :
SampleConsensusModel<Storage> (cloud)
{
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> void
SampleConsensusModelPlane<Storage>::getSamples (int &iterations, Indices &samples)
{
samples.resize (3);
float trand = indices_->size () / (RAND_MAX + 1.0f);
for (int i = 0; i < 3; ++i)
{
int idx = (int)(rngl_ () * trand);
samples[i] = (*indices_)[idx];
}
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> bool
SampleConsensusModelPlane<Storage>::computeModelCoefficients (
const Indices &samples, Coefficients &model_coefficients)
{
if (samples.size () != 3)
{
return (false);
}
// Compute the segment values (in 3d) between p1 and p0
float3 p1p0 = ((PointXYZRGB)input_->points[samples[1]]).xyz - ((PointXYZRGB)input_->points[samples[0]]).xyz;
// Compute the segment values (in 3d) between p2 and p0
float3 p2p0 = ((PointXYZRGB)input_->points[samples[2]]).xyz - ((PointXYZRGB)input_->points[samples[0]]).xyz;
// Avoid some crashes by checking for collinearity here
float3 dy1dy2 = p1p0 / p2p0;
if ( (dy1dy2.x == dy1dy2.y) && (dy1dy2.z == dy1dy2.y) ) // Check for collinearity
return (false);
// Compute the plane coefficients from the 3 given points in a straightforward manner
// calculate the plane normal n = (p2-p1) x (p3-p1) = cross (p2-p1, p3-p1)
float3 mc = normalize (cross (p1p0, p2p0));
if (model_coefficients.size () != 4)
model_coefficients.resize (4);
model_coefficients[0] = mc.x;
model_coefficients[1] = mc.y;
model_coefficients[2] = mc.z;
// ... + d = 0
model_coefficients[3] = -1 * dot (mc, ((PointXYZRGB)input_->points[samples[0]]).xyz);
return (true);
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage>
//template <typename Tuple>
float4
CreatePlaneHypothesis<Storage>::operator () (int t)
{
float4 coeff;
coeff.x = coeff.y = coeff.z = coeff.w = bad_value;
int3 samples;
float trand = nr_indices / (RAND_MAX + 1.0f);
thrust::default_random_engine rng (t);
// rng.discard (10);
samples.x = indices[(int)(rng () * trand)];
// rng.discard (20);
samples.y = indices[(int)(rng () * trand)];
// rng.discard (30);
samples.z = indices[(int)(rng () * trand)];
/* samples.x = indices[(int)(thrust::get<0>(t) * trand)];
samples.y = indices[(int)(thrust::get<1>(t) * trand)];
samples.z = indices[(int)(thrust::get<2>(t) * trand)];*/
if (isnan (input[samples.x].x) ||
isnan (input[samples.y].x) ||
isnan (input[samples.z].x))
return (coeff);
// Compute the segment values (in 3d) between p1 and p0
float3 p1p0 = input[samples.y].xyz - input[samples.x].xyz;
// Compute the segment values (in 3d) between p2 and p0
float3 p2p0 = input[samples.z].xyz - input[samples.x].xyz;
// Avoid some crashes by checking for collinearity here
float3 dy1dy2 = p1p0 / p2p0;
if ( (dy1dy2.x == dy1dy2.y) && (dy1dy2.z == dy1dy2.y) ) // Check for collinearity
return (coeff);
// Compute the plane coefficients from the 3 given points in a straightforward manner
// calculate the plane normal n = (p2-p1) x (p3-p1) = cross (p2-p1, p3-p1)
float3 mc = normalize (cross (p1p0, p2p0));
coeff.x = mc.x;
coeff.y = mc.y;
coeff.z = mc.z;
// ... + d = 0
coeff.w = -1 * dot (mc, input[samples.x].xyz);
return (coeff);
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> bool
SampleConsensusModelPlane<Storage>::generateModelHypotheses (
Hypotheses &h, int max_iterations)
{
using namespace thrust;
// Create a vector of how many samples/coefficients do we want to get
h.resize (max_iterations);
typename Storage<int>::type randoms (max_iterations);
// a sequence counting up from 0
thrust::counting_iterator<int> index_sequence_begin (0);
// transform the range [0,1,2,...N]
// to a range of random numbers
thrust::transform (index_sequence_begin,
index_sequence_begin + max_iterations,
randoms.begin (),
parallel_random_generator ((int) time (0)));
thrust::counting_iterator<int> first (0);
// Input: Point Cloud, Indices
// Output: Hypotheses
transform (//first, first + max_iterations,
randoms.begin (), randoms.begin () + max_iterations,
h.begin (),
CreatePlaneHypothesis<Storage> (thrust::raw_pointer_cast (&input_->points[0]),
thrust::raw_pointer_cast (&(*indices_)[0]),
(int) indices_->size (), std::numeric_limits<float>::quiet_NaN ()));
return (true);
}
//////////////////////////////////////////////////////////////////////////
template <typename Tuple> bool
CountPlanarInlier::operator () (const Tuple &t)
{
if (!isfinite (thrust::raw_reference_cast(thrust::get<0>(t)).x))
return (false);
return (fabs (thrust::raw_reference_cast(thrust::get<0>(t)).x * coefficients.x +
thrust::raw_reference_cast(thrust::get<0>(t)).y * coefficients.y +
thrust::raw_reference_cast(thrust::get<0>(t)).z * coefficients.z + coefficients.w) < threshold);
}
//////////////////////////////////////////////////////////////////////////
template <typename Tuple> int
CheckPlanarInlier::operator () (const Tuple &t)
{
if (isnan (thrust::get<0>(t).x))
return (-1);
// Fill in XYZ (and copy NaNs with it)
float4 pt;
pt.x = thrust::get<0>(t).x;
pt.y = thrust::get<0>(t).y;
pt.z = thrust::get<0>(t).z;
pt.w = 1;
if (fabs (dot (pt, coefficients)) < threshold)
// If inlier, return its position in the vector
return (thrust::get<1>(t));
else
// If outlier, return -1
return (-1);
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> int
SampleConsensusModelPlane<Storage>::countWithinDistance (
const Coefficients &model_coefficients, float threshold)
{
using namespace thrust;
// Needs a valid set of model coefficients
if (model_coefficients.size () != 4)
{
fprintf (stderr, "[pcl::cuda::SampleConsensusModelPlane::countWithinDistance] Invalid number of model coefficients given (%lu)!\n", (unsigned long) model_coefficients.size ());
return 0;
}
float4 coefficients;
coefficients.x = model_coefficients[0];
coefficients.y = model_coefficients[1];
coefficients.z = model_coefficients[2];
coefficients.w = model_coefficients[3];
return (int) count_if (
make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())),
make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())) +
indices_->size (),
CountPlanarInlier (coefficients, threshold));
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> int
SampleConsensusModelPlane<Storage>::countWithinDistance (
const Hypotheses &h, int idx, float threshold)
{
if (isnan (((float4)h[idx]).x))
return (0);
return (int)
(thrust::count_if (
thrust::make_zip_iterator (thrust::make_tuple (input_->points.begin (), indices_->begin ())),
thrust::make_zip_iterator (thrust::make_tuple (input_->points.begin (), indices_->begin ())) +
indices_->size (),
CountPlanarInlier (h[idx], threshold)));
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> int
SampleConsensusModelPlane<Storage>::selectWithinDistance (
const Coefficients &model_coefficients, float threshold, IndicesPtr &inliers, IndicesPtr &inliers_stencil)
{
using namespace thrust;
// Needs a valid set of model coefficients
if (model_coefficients.size () != 4)
{
fprintf (stderr, "[pcl::cuda::SampleConsensusModelPlane::selectWithinDistance] Invalid number of model coefficients given (%lu)!\n", (unsigned long) model_coefficients.size ());
return 0;
}
int nr_points = (int) indices_->size ();
if (!inliers_stencil)
inliers_stencil.reset (new Indices());
inliers_stencil->resize (nr_points);
float4 coefficients;
coefficients.x = model_coefficients[0];
coefficients.y = model_coefficients[1];
coefficients.z = model_coefficients[2];
coefficients.w = model_coefficients[3];
// Send the data to the device
transform (
make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())),
make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())) +
nr_points,
inliers_stencil->begin (),
CheckPlanarInlier (coefficients, threshold));
if (!inliers)
inliers.reset (new Indices());
inliers->resize (nr_points);
typename Indices::iterator it = copy_if (inliers_stencil->begin (), inliers_stencil->end (), inliers->begin (), isInlier ());
// Copy data
//it = remove_copy (inliers_stencil->begin (), inliers_stencil->end (), inliers->begin (), -1);
inliers->resize (it - inliers->begin ());
return (int) inliers->size();
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> int
SampleConsensusModelPlane<Storage>::selectWithinDistance (
const Hypotheses &h, int idx, float threshold, IndicesPtr &inliers, IndicesPtr &inliers_stencil)
{
using namespace thrust;
// Needs a valid set of model coefficients
/* if (model_coefficients.size () != 4)
{
fprintf (stderr, "[pcl::cuda::SampleConsensusModelPlane::selectWithinDistance] Invalid number of model coefficients given (%lu)!\n", (unsigned long) model_coefficients.size ());
return;
}*/
int nr_points = (int) indices_->size ();
if (!inliers_stencil)
inliers_stencil.reset (new Indices());
inliers_stencil->resize (nr_points);
float4 coefficients;
coefficients.x = ((float4)h[idx]).x;
coefficients.y = ((float4)h[idx]).y;
coefficients.z = ((float4)h[idx]).z;
coefficients.w = ((float4)h[idx]).w;
// Send the data to the device
transform (
make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())),
make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())) +
nr_points,
inliers_stencil->begin (),
CheckPlanarInlier (coefficients, threshold));
if (!inliers)
inliers.reset (new Indices());
inliers->resize (nr_points);
// Copy data
typename Indices::iterator it = copy_if (inliers_stencil->begin (), inliers_stencil->end (), inliers->begin (), isInlier ());
inliers->resize (it - inliers->begin ());
return (int) inliers->size ();
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> int
SampleConsensusModelPlane<Storage>::selectWithinDistance (
Hypotheses &h, int idx, float threshold, IndicesPtr &inliers_stencil, float3 & centroid)
{
using namespace thrust;
// Needs a valid set of model coefficients
/* if (model_coefficients.size () != 4)
{
fprintf (stderr, "[pcl::cuda::SampleConsensusModelPlane::selectWithinDistance] Invalid number of model coefficients given (%lu)!\n", (unsigned long) model_coefficients.size ());
return;
}*/
int nr_points = (int) indices_->size ();
if (!inliers_stencil)
inliers_stencil.reset (new Indices());
inliers_stencil->resize (nr_points);
float4 coefficients;
coefficients.x = ((float4)h[idx]).x;
coefficients.y = ((float4)h[idx]).y;
coefficients.z = ((float4)h[idx]).z;
coefficients.w = ((float4)h[idx]).w;
transform (
make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())),
make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())) +
nr_points,
inliers_stencil->begin (),
CheckPlanarInlier (coefficients, threshold));
return nr_points - (int) thrust::count (inliers_stencil->begin (), inliers_stencil->end (), -1);
}
template class SampleConsensusModelPlane<Device>;
template class SampleConsensusModelPlane<Host>;
} // namespace
} // namespace
| c4b8257e483ec5ac6fbe4e3a00016791b82e4a4c.cu | /*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $Id$
*
*/
#include "pcl/cuda/sample_consensus/sac_model_plane.h"
#include "pcl/cuda/cutil_math.h"
#include <vector_types.h>
#include <thrust/copy.h>
#include <thrust/count.h>
#include <stdio.h>
#include <limits>
namespace pcl
{
namespace cuda
{
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage>
SampleConsensusModelPlane<Storage>::SampleConsensusModelPlane (
const PointCloudConstPtr &cloud) :
SampleConsensusModel<Storage> (cloud)
{
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> void
SampleConsensusModelPlane<Storage>::getSamples (int &iterations, Indices &samples)
{
samples.resize (3);
float trand = indices_->size () / (RAND_MAX + 1.0f);
for (int i = 0; i < 3; ++i)
{
int idx = (int)(rngl_ () * trand);
samples[i] = (*indices_)[idx];
}
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> bool
SampleConsensusModelPlane<Storage>::computeModelCoefficients (
const Indices &samples, Coefficients &model_coefficients)
{
if (samples.size () != 3)
{
return (false);
}
// Compute the segment values (in 3d) between p1 and p0
float3 p1p0 = ((PointXYZRGB)input_->points[samples[1]]).xyz - ((PointXYZRGB)input_->points[samples[0]]).xyz;
// Compute the segment values (in 3d) between p2 and p0
float3 p2p0 = ((PointXYZRGB)input_->points[samples[2]]).xyz - ((PointXYZRGB)input_->points[samples[0]]).xyz;
// Avoid some crashes by checking for collinearity here
float3 dy1dy2 = p1p0 / p2p0;
if ( (dy1dy2.x == dy1dy2.y) && (dy1dy2.z == dy1dy2.y) ) // Check for collinearity
return (false);
// Compute the plane coefficients from the 3 given points in a straightforward manner
// calculate the plane normal n = (p2-p1) x (p3-p1) = cross (p2-p1, p3-p1)
float3 mc = normalize (cross (p1p0, p2p0));
if (model_coefficients.size () != 4)
model_coefficients.resize (4);
model_coefficients[0] = mc.x;
model_coefficients[1] = mc.y;
model_coefficients[2] = mc.z;
// ... + d = 0
model_coefficients[3] = -1 * dot (mc, ((PointXYZRGB)input_->points[samples[0]]).xyz);
return (true);
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage>
//template <typename Tuple>
float4
CreatePlaneHypothesis<Storage>::operator () (int t)
{
float4 coeff;
coeff.x = coeff.y = coeff.z = coeff.w = bad_value;
int3 samples;
float trand = nr_indices / (RAND_MAX + 1.0f);
thrust::default_random_engine rng (t);
// rng.discard (10);
samples.x = indices[(int)(rng () * trand)];
// rng.discard (20);
samples.y = indices[(int)(rng () * trand)];
// rng.discard (30);
samples.z = indices[(int)(rng () * trand)];
/* samples.x = indices[(int)(thrust::get<0>(t) * trand)];
samples.y = indices[(int)(thrust::get<1>(t) * trand)];
samples.z = indices[(int)(thrust::get<2>(t) * trand)];*/
if (isnan (input[samples.x].x) ||
isnan (input[samples.y].x) ||
isnan (input[samples.z].x))
return (coeff);
// Compute the segment values (in 3d) between p1 and p0
float3 p1p0 = input[samples.y].xyz - input[samples.x].xyz;
// Compute the segment values (in 3d) between p2 and p0
float3 p2p0 = input[samples.z].xyz - input[samples.x].xyz;
// Avoid some crashes by checking for collinearity here
float3 dy1dy2 = p1p0 / p2p0;
if ( (dy1dy2.x == dy1dy2.y) && (dy1dy2.z == dy1dy2.y) ) // Check for collinearity
return (coeff);
// Compute the plane coefficients from the 3 given points in a straightforward manner
// calculate the plane normal n = (p2-p1) x (p3-p1) = cross (p2-p1, p3-p1)
float3 mc = normalize (cross (p1p0, p2p0));
coeff.x = mc.x;
coeff.y = mc.y;
coeff.z = mc.z;
// ... + d = 0
coeff.w = -1 * dot (mc, input[samples.x].xyz);
return (coeff);
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> bool
SampleConsensusModelPlane<Storage>::generateModelHypotheses (
Hypotheses &h, int max_iterations)
{
using namespace thrust;
// Create a vector of how many samples/coefficients do we want to get
h.resize (max_iterations);
typename Storage<int>::type randoms (max_iterations);
// a sequence counting up from 0
thrust::counting_iterator<int> index_sequence_begin (0);
// transform the range [0,1,2,...N]
// to a range of random numbers
thrust::transform (index_sequence_begin,
index_sequence_begin + max_iterations,
randoms.begin (),
parallel_random_generator ((int) time (0)));
thrust::counting_iterator<int> first (0);
// Input: Point Cloud, Indices
// Output: Hypotheses
transform (//first, first + max_iterations,
randoms.begin (), randoms.begin () + max_iterations,
h.begin (),
CreatePlaneHypothesis<Storage> (thrust::raw_pointer_cast (&input_->points[0]),
thrust::raw_pointer_cast (&(*indices_)[0]),
(int) indices_->size (), std::numeric_limits<float>::quiet_NaN ()));
return (true);
}
//////////////////////////////////////////////////////////////////////////
template <typename Tuple> bool
CountPlanarInlier::operator () (const Tuple &t)
{
if (!isfinite (thrust::raw_reference_cast(thrust::get<0>(t)).x))
return (false);
return (fabs (thrust::raw_reference_cast(thrust::get<0>(t)).x * coefficients.x +
thrust::raw_reference_cast(thrust::get<0>(t)).y * coefficients.y +
thrust::raw_reference_cast(thrust::get<0>(t)).z * coefficients.z + coefficients.w) < threshold);
}
//////////////////////////////////////////////////////////////////////////
template <typename Tuple> int
CheckPlanarInlier::operator () (const Tuple &t)
{
if (isnan (thrust::get<0>(t).x))
return (-1);
// Fill in XYZ (and copy NaNs with it)
float4 pt;
pt.x = thrust::get<0>(t).x;
pt.y = thrust::get<0>(t).y;
pt.z = thrust::get<0>(t).z;
pt.w = 1;
if (fabs (dot (pt, coefficients)) < threshold)
// If inlier, return its position in the vector
return (thrust::get<1>(t));
else
// If outlier, return -1
return (-1);
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> int
SampleConsensusModelPlane<Storage>::countWithinDistance (
const Coefficients &model_coefficients, float threshold)
{
using namespace thrust;
// Needs a valid set of model coefficients
if (model_coefficients.size () != 4)
{
fprintf (stderr, "[pcl::cuda::SampleConsensusModelPlane::countWithinDistance] Invalid number of model coefficients given (%lu)!\n", (unsigned long) model_coefficients.size ());
return 0;
}
float4 coefficients;
coefficients.x = model_coefficients[0];
coefficients.y = model_coefficients[1];
coefficients.z = model_coefficients[2];
coefficients.w = model_coefficients[3];
return (int) count_if (
make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())),
make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())) +
indices_->size (),
CountPlanarInlier (coefficients, threshold));
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> int
SampleConsensusModelPlane<Storage>::countWithinDistance (
const Hypotheses &h, int idx, float threshold)
{
if (isnan (((float4)h[idx]).x))
return (0);
return (int)
(thrust::count_if (
thrust::make_zip_iterator (thrust::make_tuple (input_->points.begin (), indices_->begin ())),
thrust::make_zip_iterator (thrust::make_tuple (input_->points.begin (), indices_->begin ())) +
indices_->size (),
CountPlanarInlier (h[idx], threshold)));
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> int
SampleConsensusModelPlane<Storage>::selectWithinDistance (
const Coefficients &model_coefficients, float threshold, IndicesPtr &inliers, IndicesPtr &inliers_stencil)
{
using namespace thrust;
// Needs a valid set of model coefficients
if (model_coefficients.size () != 4)
{
fprintf (stderr, "[pcl::cuda::SampleConsensusModelPlane::selectWithinDistance] Invalid number of model coefficients given (%lu)!\n", (unsigned long) model_coefficients.size ());
return 0;
}
int nr_points = (int) indices_->size ();
if (!inliers_stencil)
inliers_stencil.reset (new Indices());
inliers_stencil->resize (nr_points);
float4 coefficients;
coefficients.x = model_coefficients[0];
coefficients.y = model_coefficients[1];
coefficients.z = model_coefficients[2];
coefficients.w = model_coefficients[3];
// Send the data to the device
transform (
make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())),
make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())) +
nr_points,
inliers_stencil->begin (),
CheckPlanarInlier (coefficients, threshold));
if (!inliers)
inliers.reset (new Indices());
inliers->resize (nr_points);
typename Indices::iterator it = copy_if (inliers_stencil->begin (), inliers_stencil->end (), inliers->begin (), isInlier ());
// Copy data
//it = remove_copy (inliers_stencil->begin (), inliers_stencil->end (), inliers->begin (), -1);
inliers->resize (it - inliers->begin ());
return (int) inliers->size();
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> int
SampleConsensusModelPlane<Storage>::selectWithinDistance (
const Hypotheses &h, int idx, float threshold, IndicesPtr &inliers, IndicesPtr &inliers_stencil)
{
using namespace thrust;
// Needs a valid set of model coefficients
/* if (model_coefficients.size () != 4)
{
fprintf (stderr, "[pcl::cuda::SampleConsensusModelPlane::selectWithinDistance] Invalid number of model coefficients given (%lu)!\n", (unsigned long) model_coefficients.size ());
return;
}*/
int nr_points = (int) indices_->size ();
if (!inliers_stencil)
inliers_stencil.reset (new Indices());
inliers_stencil->resize (nr_points);
float4 coefficients;
coefficients.x = ((float4)h[idx]).x;
coefficients.y = ((float4)h[idx]).y;
coefficients.z = ((float4)h[idx]).z;
coefficients.w = ((float4)h[idx]).w;
// Send the data to the device
transform (
make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())),
make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())) +
nr_points,
inliers_stencil->begin (),
CheckPlanarInlier (coefficients, threshold));
if (!inliers)
inliers.reset (new Indices());
inliers->resize (nr_points);
// Copy data
typename Indices::iterator it = copy_if (inliers_stencil->begin (), inliers_stencil->end (), inliers->begin (), isInlier ());
inliers->resize (it - inliers->begin ());
return (int) inliers->size ();
}
//////////////////////////////////////////////////////////////////////////
template <template <typename> class Storage> int
SampleConsensusModelPlane<Storage>::selectWithinDistance (
Hypotheses &h, int idx, float threshold, IndicesPtr &inliers_stencil, float3 & centroid)
{
using namespace thrust;
// Needs a valid set of model coefficients
/* if (model_coefficients.size () != 4)
{
fprintf (stderr, "[pcl::cuda::SampleConsensusModelPlane::selectWithinDistance] Invalid number of model coefficients given (%lu)!\n", (unsigned long) model_coefficients.size ());
return;
}*/
int nr_points = (int) indices_->size ();
if (!inliers_stencil)
inliers_stencil.reset (new Indices());
inliers_stencil->resize (nr_points);
float4 coefficients;
coefficients.x = ((float4)h[idx]).x;
coefficients.y = ((float4)h[idx]).y;
coefficients.z = ((float4)h[idx]).z;
coefficients.w = ((float4)h[idx]).w;
transform (
make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())),
make_zip_iterator (make_tuple (input_->points.begin (), indices_->begin ())) +
nr_points,
inliers_stencil->begin (),
CheckPlanarInlier (coefficients, threshold));
return nr_points - (int) thrust::count (inliers_stencil->begin (), inliers_stencil->end (), -1);
}
template class SampleConsensusModelPlane<Device>;
template class SampleConsensusModelPlane<Host>;
} // namespace
} // namespace
|
d91c1c36f9c29ff61851a958adec595257c0cfe9.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <time.h>
#include "hip/hip_runtime.h"
#include "windows.h"
#include "src_hip.cuh"
#include "kernel_hip.cuh"
int main()
{
//Just an example here - you are free to modify them
int I_width, I_height, T_width, T_height;
float *I, *T;
int x1, y1, x2, y2;
//set the file location of I, T, and Output
char I_path[] = "img.bmp";
char T_path[] = "img_t.bmp";
char out_path[] = "output.bmp";
I = ReadBMP(I_path, &I_width, &I_height);
T = ReadBMP(T_path, &T_width, &T_height);
//-----------------------------------------------------
float *l1_dev, *l2_dev, *lx_dev, *ly_dev, *I_dev;
float *l1_host, *l2_host, *lx_host, *ly_host;
float *l1_shadow, *l2_shadow, *lx_shadow, *ly_shadow;
size_t memsize_in;
memsize_in = I_height * I_width * sizeof(float);
l1_host = (float *)malloc(memsize_in);
l2_host = (float *)malloc(memsize_in);
lx_host = (float *)malloc(memsize_in);
ly_host = (float *)malloc(memsize_in);
l1_shadow = (float *)malloc(memsize_in);
l2_shadow = (float *)malloc(memsize_in);
lx_shadow = (float *)malloc(memsize_in);
ly_shadow = (float *)malloc(memsize_in);
hipMalloc((void **)&l1_dev, memsize_in);
hipMalloc((void **)&l2_dev, memsize_in);
hipMalloc((void **)&lx_dev, memsize_in);
hipMalloc((void **)&ly_dev, memsize_in);
hipMalloc((void **)&I_dev, memsize_in);
hipMemcpy(I_dev, I, memsize_in, hipMemcpyHostToDevice);
// Invoke kernel sum_row
int nblock_h = I_height / BLOCK_HEIGHT + (I_height%BLOCK_HEIGHT >0);
int nblock_w = 1;
dim3 nblocks(nblock_w, nblock_h);
dim3 nthreads(BLOCK_WIDTH, BLOCK_HEIGHT);
sum_row << <nblocks, nthreads >> >(I_dev, l1_dev, l2_dev, lx_dev, ly_dev, I_width, I_height);
// Sum column -------------------------
// Invoke kernel sum_col
int nblock_h_col = 1;
int nblock_w_col = I_width / BLOCK_WIDTH2 + (I_width%BLOCK_WIDTH2 > 0);
dim3 nblocks_col(nblock_w_col, nblock_h_col);
dim3 nthreads_col(BLOCK_WIDTH2, BLOCK_HEIGHT2);
sum_col << <nblocks_col, nthreads_col >> > (l1_dev, l2_dev, lx_dev, ly_dev, I_width, I_height);
// Since the template is small, use CPU instead of CUDA kernel
sum_row_cpu(T, l1_host, l2_host, lx_host, ly_host, T_width, T_height);
sum_col_cpu(l1_host, l2_host, lx_host, ly_host, T_width, T_height);
hipMemcpy(l1_shadow, l1_dev, memsize_in, hipMemcpyDeviceToHost);
hipMemcpy(l2_shadow, l2_dev, memsize_in, hipMemcpyDeviceToHost);
hipMemcpy(lx_shadow, lx_dev, memsize_in, hipMemcpyDeviceToHost);
hipMemcpy(ly_shadow, ly_dev, memsize_in, hipMemcpyDeviceToHost);
//Compute feature...................................
float *v1_dev, *v2_dev, *v3_dev, *v4_dev, *X_dev;
float S1, S2, Sx, Sy;
float *v1_shadow, *v2_shadow, *v3_shadow, *v4_shadow, *X_shadow;
float vt1, vt2, vt3, vt4;
size_t memsize_in_t, memsize_output;
int NI = I_width * I_height;
int NO = (I_width - T_width + 1) * (I_height - T_width + 1);
memsize_output = NO * sizeof(float);
memsize_in_t = T_height * T_width * sizeof(float);
v1_shadow = (float *)malloc(memsize_output);
v2_shadow = (float *)malloc(memsize_output);
v3_shadow = (float *)malloc(memsize_output);
v4_shadow = (float *)malloc(memsize_output);
X_shadow = (float *)malloc(memsize_output);
hipMalloc((void **)&v1_dev, memsize_output);
hipMalloc((void **)&v2_dev, memsize_output);
hipMalloc((void **)&v3_dev, memsize_output);
hipMalloc((void **)&v4_dev, memsize_output);
hipMalloc((void **)&X_dev, memsize_output);
// Since the template is small, use CPU instead of CUDA kernel
compute_template_feature_cpu(&S1, &S2, &Sx, &Sy, &vt1, &vt2, &vt3, &vt4, l1_host, l2_host, lx_host, ly_host, T_width, T_width, T_height);
// Invoke kernel to compute feature vectors and square of Euclidean distance
int nblock_w_f = (I_width - T_width + 1) / BLOCK_WIDTH3 + ((I_width - T_width + 1) % BLOCK_HEIGHT3>0);
int nblock_h_f = (I_height - T_width + 1) / BLOCK_HEIGHT3 + ((I_height - T_width + 1) % BLOCK_HEIGHT3>0);
dim3 nblocks_f(nblock_w_f, nblock_h_f);
dim3 nthreads_f(BLOCK_WIDTH3, BLOCK_HEIGHT3);
compute_feature << <nblocks_f, nthreads_f >> >(vt1, vt2, vt3, vt4, v1_dev, v2_dev, v3_dev, v4_dev, X_dev, l1_dev, l2_dev, lx_dev, ly_dev, T_width, I_width, I_height);
hipMemcpy(v1_shadow, v1_dev, memsize_output, hipMemcpyDeviceToHost);
hipMemcpy(v2_shadow, v2_dev, memsize_output, hipMemcpyDeviceToHost);
hipMemcpy(v3_shadow, v3_dev, memsize_output, hipMemcpyDeviceToHost);
hipMemcpy(v4_shadow, v4_dev, memsize_output, hipMemcpyDeviceToHost);
hipMemcpy(X_shadow, X_dev, memsize_output, hipMemcpyDeviceToHost);
// Find the coordinates of bounding box
find_min(X_shadow, x1, x2, y1, y2, I_width, I_height, T_width);
printf("x1 is %d, x2 is %d, y1 is %d, y2 is %d\n", x1, x2, y1, y2);
free(v1_shadow);
free(v2_shadow);
free(v3_shadow);
free(v4_shadow);
free(X_shadow);
hipFree(v1_dev);
hipFree(v2_dev);
hipFree(v3_dev);
hipFree(v4_dev);
hipFree(X_dev);
hipFree(l1_dev);
hipFree(l2_dev);
hipFree(lx_dev);
hipFree(ly_dev);
hipFree(I_dev);
free(l1_host);
free(l2_host);
free(lx_host);
free(ly_host);
free(l1_shadow);
free(l2_shadow);
free(lx_shadow);
free(ly_shadow);
// Assuming that the best match patch is enclosed by vertices (x1,y1)(x2,y1)(x1,y2)(x2,y2)
MarkAndSave(I_path, x1, y1, x2, y2, out_path);
free(I); free(T);
return 0;
} | d91c1c36f9c29ff61851a958adec595257c0cfe9.cu | #include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <time.h>
#include "cuda_runtime.h"
#include "windows.h"
#include "src.cuh"
#include "kernel.cuh"
int main()
{
//Just an example here - you are free to modify them
int I_width, I_height, T_width, T_height;
float *I, *T;
int x1, y1, x2, y2;
//set the file location of I, T, and Output
char I_path[] = "img.bmp";
char T_path[] = "img_t.bmp";
char out_path[] = "output.bmp";
I = ReadBMP(I_path, &I_width, &I_height);
T = ReadBMP(T_path, &T_width, &T_height);
//-----------------------------------------------------
float *l1_dev, *l2_dev, *lx_dev, *ly_dev, *I_dev;
float *l1_host, *l2_host, *lx_host, *ly_host;
float *l1_shadow, *l2_shadow, *lx_shadow, *ly_shadow;
size_t memsize_in;
memsize_in = I_height * I_width * sizeof(float);
l1_host = (float *)malloc(memsize_in);
l2_host = (float *)malloc(memsize_in);
lx_host = (float *)malloc(memsize_in);
ly_host = (float *)malloc(memsize_in);
l1_shadow = (float *)malloc(memsize_in);
l2_shadow = (float *)malloc(memsize_in);
lx_shadow = (float *)malloc(memsize_in);
ly_shadow = (float *)malloc(memsize_in);
cudaMalloc((void **)&l1_dev, memsize_in);
cudaMalloc((void **)&l2_dev, memsize_in);
cudaMalloc((void **)&lx_dev, memsize_in);
cudaMalloc((void **)&ly_dev, memsize_in);
cudaMalloc((void **)&I_dev, memsize_in);
cudaMemcpy(I_dev, I, memsize_in, cudaMemcpyHostToDevice);
// Invoke kernel sum_row
int nblock_h = I_height / BLOCK_HEIGHT + (I_height%BLOCK_HEIGHT >0);
int nblock_w = 1;
dim3 nblocks(nblock_w, nblock_h);
dim3 nthreads(BLOCK_WIDTH, BLOCK_HEIGHT);
sum_row << <nblocks, nthreads >> >(I_dev, l1_dev, l2_dev, lx_dev, ly_dev, I_width, I_height);
// Sum column -------------------------
// Invoke kernel sum_col
int nblock_h_col = 1;
int nblock_w_col = I_width / BLOCK_WIDTH2 + (I_width%BLOCK_WIDTH2 > 0);
dim3 nblocks_col(nblock_w_col, nblock_h_col);
dim3 nthreads_col(BLOCK_WIDTH2, BLOCK_HEIGHT2);
sum_col << <nblocks_col, nthreads_col >> > (l1_dev, l2_dev, lx_dev, ly_dev, I_width, I_height);
// Since the template is small, use CPU instead of CUDA kernel
sum_row_cpu(T, l1_host, l2_host, lx_host, ly_host, T_width, T_height);
sum_col_cpu(l1_host, l2_host, lx_host, ly_host, T_width, T_height);
cudaMemcpy(l1_shadow, l1_dev, memsize_in, cudaMemcpyDeviceToHost);
cudaMemcpy(l2_shadow, l2_dev, memsize_in, cudaMemcpyDeviceToHost);
cudaMemcpy(lx_shadow, lx_dev, memsize_in, cudaMemcpyDeviceToHost);
cudaMemcpy(ly_shadow, ly_dev, memsize_in, cudaMemcpyDeviceToHost);
//Compute feature...................................
float *v1_dev, *v2_dev, *v3_dev, *v4_dev, *X_dev;
float S1, S2, Sx, Sy;
float *v1_shadow, *v2_shadow, *v3_shadow, *v4_shadow, *X_shadow;
float vt1, vt2, vt3, vt4;
size_t memsize_in_t, memsize_output;
int NI = I_width * I_height;
int NO = (I_width - T_width + 1) * (I_height - T_width + 1);
memsize_output = NO * sizeof(float);
memsize_in_t = T_height * T_width * sizeof(float);
v1_shadow = (float *)malloc(memsize_output);
v2_shadow = (float *)malloc(memsize_output);
v3_shadow = (float *)malloc(memsize_output);
v4_shadow = (float *)malloc(memsize_output);
X_shadow = (float *)malloc(memsize_output);
cudaMalloc((void **)&v1_dev, memsize_output);
cudaMalloc((void **)&v2_dev, memsize_output);
cudaMalloc((void **)&v3_dev, memsize_output);
cudaMalloc((void **)&v4_dev, memsize_output);
cudaMalloc((void **)&X_dev, memsize_output);
// Since the template is small, use CPU instead of CUDA kernel
compute_template_feature_cpu(&S1, &S2, &Sx, &Sy, &vt1, &vt2, &vt3, &vt4, l1_host, l2_host, lx_host, ly_host, T_width, T_width, T_height);
// Invoke kernel to compute feature vectors and square of Euclidean distance
int nblock_w_f = (I_width - T_width + 1) / BLOCK_WIDTH3 + ((I_width - T_width + 1) % BLOCK_HEIGHT3>0);
int nblock_h_f = (I_height - T_width + 1) / BLOCK_HEIGHT3 + ((I_height - T_width + 1) % BLOCK_HEIGHT3>0);
dim3 nblocks_f(nblock_w_f, nblock_h_f);
dim3 nthreads_f(BLOCK_WIDTH3, BLOCK_HEIGHT3);
compute_feature << <nblocks_f, nthreads_f >> >(vt1, vt2, vt3, vt4, v1_dev, v2_dev, v3_dev, v4_dev, X_dev, l1_dev, l2_dev, lx_dev, ly_dev, T_width, I_width, I_height);
cudaMemcpy(v1_shadow, v1_dev, memsize_output, cudaMemcpyDeviceToHost);
cudaMemcpy(v2_shadow, v2_dev, memsize_output, cudaMemcpyDeviceToHost);
cudaMemcpy(v3_shadow, v3_dev, memsize_output, cudaMemcpyDeviceToHost);
cudaMemcpy(v4_shadow, v4_dev, memsize_output, cudaMemcpyDeviceToHost);
cudaMemcpy(X_shadow, X_dev, memsize_output, cudaMemcpyDeviceToHost);
// Find the coordinates of bounding box
find_min(X_shadow, x1, x2, y1, y2, I_width, I_height, T_width);
printf("x1 is %d, x2 is %d, y1 is %d, y2 is %d\n", x1, x2, y1, y2);
free(v1_shadow);
free(v2_shadow);
free(v3_shadow);
free(v4_shadow);
free(X_shadow);
cudaFree(v1_dev);
cudaFree(v2_dev);
cudaFree(v3_dev);
cudaFree(v4_dev);
cudaFree(X_dev);
cudaFree(l1_dev);
cudaFree(l2_dev);
cudaFree(lx_dev);
cudaFree(ly_dev);
cudaFree(I_dev);
free(l1_host);
free(l2_host);
free(lx_host);
free(ly_host);
free(l1_shadow);
free(l2_shadow);
free(lx_shadow);
free(ly_shadow);
// Assuming that the best match patch is enclosed by vertices (x1,y1)(x2,y1)(x1,y2)(x2,y2)
MarkAndSave(I_path, x1, y1, x2, y2, out_path);
free(I); free(T);
return 0;
} |
b3273be7920f5c2e59c0b696d20beaf921db5fe8.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at { namespace native {
void atan2_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "atan2_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return ::atan2(a, b);
});
});
}
void hypot_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "hypot_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return ::hypot(a, b);
});
});
}
REGISTER_DISPATCH(atan2_stub, &atan2_kernel_cuda);
REGISTER_DISPATCH(hypot_stub, &hypot_kernel_cuda);
}} // namespace at::native
| b3273be7920f5c2e59c0b696d20beaf921db5fe8.cu | #include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at { namespace native {
void atan2_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "atan2_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return ::atan2(a, b);
});
});
}
void hypot_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "hypot_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return ::hypot(a, b);
});
});
}
REGISTER_DISPATCH(atan2_stub, &atan2_kernel_cuda);
REGISTER_DISPATCH(hypot_stub, &hypot_kernel_cuda);
}} // namespace at::native
|
dc8539c69c1bbe669b278fb0b7b1b8bbb11165d4.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <cmath>
#include <iostream>
#include <random>
#include <ctime>
/**
* generate random double with range: @fMin ~ @fMax
*/
double fRand(double fMin, double fMax)
{
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_real_distribution<> dis(fMin, fMax);
double a = dis(gen);
return a;
}
/**
* create balls with radius @r, coordinate (@_x, @_y), velocity vector <@v_x, @v_y>
*/
struct Obstacle
{
public:
double _x, _y, v_x, v_y, r;
Obstacle()
{
_x = fRand(-100.0, 100.0);
_y = fRand(-100.0, 100.0);
v_x = fRand(0.0, 5.0);
v_y = fRand(0.0, 5.0);
r = 1.0;
}
};
__device__ double infty(void)
{
const unsigned long long ieee754inf = 0x7ff0000000000000;
return __longlong_as_double(ieee754inf);
}
/**
* @n obstacles
* for each obstacle, return time elapsed when collison starts @t_s and ends @t_e
* stored in @list[]
*/
__global__ void intersectTime_g(int n, Obstacle points[], double list[])
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
// process each obstacle
for(int j = index; j < n; j += stride)
{
Obstacle a = points[j];
//distance @d b/w obstacle and scooter
double d = sqrt(a._x * a._x + a._y * a._y);
double t_s = 0;
double t_e = 0;
//Case 1: object alrd collide w scooter
if(d <= 1)
{
t_s = 0;
t_e = infty();
}
//Case 2: object move in opposite dir w.r.t scooter
else if(a._x * a.v_x >= 0 || a._y * a.v_y >= 0)
{
t_s = infty();
t_e = infty();
} else
{
double v = sqrt(a.v_x * a.v_x + a.v_y * a.v_y);
double delta_t = 2 * sqrt((double)3.0) / v;
t_s = (sqrt(d * d -1.0) / v) - 0.5 * delta_t;
t_e = t_s + delta_t;
}
//store in list[j]
list[2 * j] = t_s;
list[2 * j + 1] = t_e;
//for test output
//printf("GPU: (%.2lf, %.2lf), v = %.3lf, t_s = %.2lf, t_e = %.2lf\n", a._x, a._y, v, t_s, t_e);
}
}
void intersectTime_c(int n, Obstacle points[], double list[])
{
for(int j = 0; j < n; j++)
{
Obstacle a = points[j];
//distance @d b/w obstacle and scooter
double d = sqrt(a._x * a._x + a._y * a._y);
//distance travelled when collision starts @d_s and ends @d_e
double d_s = d - 2.0;
double d_e = d + 2.0;
//velocity @v of obstacle
double v = sqrt(a.v_x * a.v_x + a.v_y * a.v_y);
//time elapsed when collision starts @t_s and ends @t_e
double t_s = d_s / v;
double t_e = d_e / v;
//store in list[j]
list[2 * j] = t_s;
list[2 * j + 1] = t_e;
// for test output
//printf("CPU: (%.2lf, %.2lf), v = %.3lf, t_s = %.2lf, t_e = %.2lf\n",a._x, a._y, v, t_s, t_e);
}
}
int main()
{
//(@n*10) obstacles
for(int n = 0; n < 100; n++)
{
double total_time_c = 0.0;
double total_time_g = 0.0;
Obstacle* points_g;
hipMalloc(&points_g, n * 10 * sizeof(Obstacle));
double* list_g;
hipMalloc(&list_g, n * 10 * 2 * sizeof(double));
for(int s = 0; s < 1000; s++)
{
//create same set of points for both CPU and GPU
Obstacle * points = new Obstacle[n * 10];
for(int i = 0; i < n * 10; i++)
{
points[i] = Obstacle();
}
//GPU
//copy points to GPU
hipMemcpy(points_g, points, n * 10 * sizeof(Obstacle), hipMemcpyHostToDevice);
//initialize list: store 2 time data for each obstacle
//process obstacles
int blockSize = 256;
int numBlocks = (n * 10 + blockSize - 1) / blockSize;
//timing
clock_t time = clock();
hipLaunchKernelGGL(( intersectTime_g), dim3(numBlocks), dim3(blockSize), 0, 0, n * 10, points_g, list_g);
hipDeviceSynchronize();
hipMemcpy(points, points_g, n * 10 * sizeof(Obstacle), hipMemcpyDeviceToHost);
time = clock() - time;
double elapsed_g = time / (double) CLOCKS_PER_SEC;
total_time_g += elapsed_g;
//CPU
double* list_c = new double[n * 10 * 2];
clock_t e = clock();
intersectTime_c(n * 10, points, list_c);
e = clock() - e;
double elapsed_c = e / (double) CLOCKS_PER_SEC;
total_time_c += elapsed_c;
}
printf("%d GPU: %.8lf s ", (n * 10), total_time_g);
printf("CPU: %.8lf s ", total_time_c);
printf("%.2lf \n", total_time_c / total_time_g);
hipFree(points_g);
hipFree(list_g);
}
}
| dc8539c69c1bbe669b278fb0b7b1b8bbb11165d4.cu | #include <cuda.h>
#include <cmath>
#include <iostream>
#include <random>
#include <ctime>
/**
* generate random double with range: @fMin ~ @fMax
*/
double fRand(double fMin, double fMax)
{
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_real_distribution<> dis(fMin, fMax);
double a = dis(gen);
return a;
}
/**
* create balls with radius @r, coordinate (@_x, @_y), velocity vector <@v_x, @v_y>
*/
struct Obstacle
{
public:
double _x, _y, v_x, v_y, r;
Obstacle()
{
_x = fRand(-100.0, 100.0);
_y = fRand(-100.0, 100.0);
v_x = fRand(0.0, 5.0);
v_y = fRand(0.0, 5.0);
r = 1.0;
}
};
__device__ double infty(void)
{
const unsigned long long ieee754inf = 0x7ff0000000000000;
return __longlong_as_double(ieee754inf);
}
/**
* @n obstacles
* for each obstacle, return time elapsed when collison starts @t_s and ends @t_e
* stored in @list[]
*/
__global__ void intersectTime_g(int n, Obstacle points[], double list[])
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
// process each obstacle
for(int j = index; j < n; j += stride)
{
Obstacle a = points[j];
//distance @d b/w obstacle and scooter
double d = sqrt(a._x * a._x + a._y * a._y);
double t_s = 0;
double t_e = 0;
//Case 1: object alrd collide w scooter
if(d <= 1)
{
t_s = 0;
t_e = infty();
}
//Case 2: object move in opposite dir w.r.t scooter
else if(a._x * a.v_x >= 0 || a._y * a.v_y >= 0)
{
t_s = infty();
t_e = infty();
} else
{
double v = sqrt(a.v_x * a.v_x + a.v_y * a.v_y);
double delta_t = 2 * sqrt((double)3.0) / v;
t_s = (sqrt(d * d -1.0) / v) - 0.5 * delta_t;
t_e = t_s + delta_t;
}
//store in list[j]
list[2 * j] = t_s;
list[2 * j + 1] = t_e;
//for test output
//printf("GPU: (%.2lf, %.2lf), v = %.3lf, t_s = %.2lf, t_e = %.2lf\n", a._x, a._y, v, t_s, t_e);
}
}
void intersectTime_c(int n, Obstacle points[], double list[])
{
for(int j = 0; j < n; j++)
{
Obstacle a = points[j];
//distance @d b/w obstacle and scooter
double d = sqrt(a._x * a._x + a._y * a._y);
//distance travelled when collision starts @d_s and ends @d_e
double d_s = d - 2.0;
double d_e = d + 2.0;
//velocity @v of obstacle
double v = sqrt(a.v_x * a.v_x + a.v_y * a.v_y);
//time elapsed when collision starts @t_s and ends @t_e
double t_s = d_s / v;
double t_e = d_e / v;
//store in list[j]
list[2 * j] = t_s;
list[2 * j + 1] = t_e;
// for test output
//printf("CPU: (%.2lf, %.2lf), v = %.3lf, t_s = %.2lf, t_e = %.2lf\n",a._x, a._y, v, t_s, t_e);
}
}
int main()
{
//(@n*10) obstacles
for(int n = 0; n < 100; n++)
{
double total_time_c = 0.0;
double total_time_g = 0.0;
Obstacle* points_g;
cudaMalloc(&points_g, n * 10 * sizeof(Obstacle));
double* list_g;
cudaMalloc(&list_g, n * 10 * 2 * sizeof(double));
for(int s = 0; s < 1000; s++)
{
//create same set of points for both CPU and GPU
Obstacle * points = new Obstacle[n * 10];
for(int i = 0; i < n * 10; i++)
{
points[i] = Obstacle();
}
//GPU
//copy points to GPU
cudaMemcpy(points_g, points, n * 10 * sizeof(Obstacle), cudaMemcpyHostToDevice);
//initialize list: store 2 time data for each obstacle
//process obstacles
int blockSize = 256;
int numBlocks = (n * 10 + blockSize - 1) / blockSize;
//timing
clock_t time = clock();
intersectTime_g<<<numBlocks, blockSize>>>(n * 10, points_g, list_g);
cudaDeviceSynchronize();
cudaMemcpy(points, points_g, n * 10 * sizeof(Obstacle), cudaMemcpyDeviceToHost);
time = clock() - time;
double elapsed_g = time / (double) CLOCKS_PER_SEC;
total_time_g += elapsed_g;
//CPU
double* list_c = new double[n * 10 * 2];
clock_t e = clock();
intersectTime_c(n * 10, points, list_c);
e = clock() - e;
double elapsed_c = e / (double) CLOCKS_PER_SEC;
total_time_c += elapsed_c;
}
printf("%d GPU: %.8lf s ", (n * 10), total_time_g);
printf("CPU: %.8lf s ", total_time_c);
printf("%.2lf \n", total_time_c / total_time_g);
cudaFree(points_g);
cudaFree(list_g);
}
}
|
84bbf724a8d2c440b43722c15a647185c8749602.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdlib>
#include <cstdio>
#include <iostream>
#include <hip/hip_runtime.h>
#include <armadillo>
#include <GL/glut.h>
using namespace arma;
using namespace std;
#define N (2042 * 2042)
#define THREADS_PER_BLOCK 512
GLfloat xRotated, yRotated, zRotated;
void init(void);
void DrawCube(void);
void animation(void);
void reshape(int x, int y);
void randomInts(int *a, int n)
{
int i;
for (i = 0; i < n; i++)
{
a[i] = rand() % (10000 - 100 + 1) + 100;
}
}
void saveToFile(FILE *fp, int *a, int *b, int *c)
{
for (int i = 0; i < 10; i++)
{
fprintf(fp, "%d + %d = %d\n", a[i], b[i], c[i]);
}
}
__global__ void add(int *a, int *b, int *c)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
c[index] = a[index] + b[index];
}
int main(int argc, char **argv)
{
cout << "Armadillo version: " << arma_version::as_string() << endl;
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N * sizeof(int);
// Setup input values
a = (int *)malloc(size);
randomInts(a, N);
b = (int *)malloc(size);
randomInts(b, N);
c = (int *)malloc(size);
// Allocate space for device copies of a, b, c
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
hipMalloc((void **)&d_c, size);
// Copy inputs to device
hipMemcpy(d_a, a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, size, hipMemcpyHostToDevice);
// Launch add() kernel on GPU
hipLaunchKernelGGL(( add), dim3(N / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, d_a, d_b, d_c);
// Copy result back to host
hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost);
FILE *fp;
fp = fopen("result.txt", "w");
saveToFile(fp, a, b, c);
fclose(fp);
// Cleanup
free(a);
free(b);
free(c);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
glutInit(&argc, argv);
//we initizlilze the glut. functions
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB);
glutInitWindowPosition(100, 100);
glutCreateWindow("glut openGl and cuda");
//info version GLSL
cout << "***** Info GPU *****" << std::endl;
cout << "Fabricant : " << glGetString(GL_VENDOR) << std::endl;
cout << "Carte graphique: " << glGetString(GL_RENDERER) << std::endl;
cout << "Version : " << glGetString(GL_VERSION) << std::endl;
cout << "Version GLSL : " << glGetString(GL_SHADING_LANGUAGE_VERSION) << std::endl;
init();
glutDisplayFunc(DrawCube);
glutReshapeFunc(reshape);
//Set the function for the animation.
glutIdleFunc(animation);
glutMainLoop();
return 0;
}
void init(void)
{
glClearColor(0, 0, 0, 0);
glCullFace(GL_BACK);
glEnable(GL_CULL_FACE);
}
void DrawCube(void)
{
glMatrixMode(GL_MODELVIEW);
// clear the drawing buffer.
glClear(GL_COLOR_BUFFER_BIT);
glLoadIdentity();
glTranslatef(0.0, 0.0, -10.5);
glRotatef(xRotated, 1.0, 0.0, 0.0);
// rotation about Y axis
glRotatef(yRotated, 0.0, 1.0, 0.0);
// rotation about Z axis
glRotatef(zRotated, 0.0, 0.0, 1.0);
glBegin(GL_QUADS); // Draw The Cube Using quads
glColor3f(0.0f, 1.0f, 0.0f); // Color Blue
glVertex3f(1.0f, 1.0f, -1.0f); // Top Right Of The Quad (Top)
glVertex3f(-1.0f, 1.0f, -1.0f); // Top Left Of The Quad (Top)
glVertex3f(-1.0f, 1.0f, 1.0f); // Bottom Left Of The Quad (Top)
glVertex3f(1.0f, 1.0f, 1.0f); // Bottom Right Of The Quad (Top)
glColor3f(1.0f, 0.5f, 0.0f); // Color Orange
glVertex3f(1.0f, -1.0f, 1.0f); // Top Right Of The Quad (Bottom)
glVertex3f(-1.0f, -1.0f, 1.0f); // Top Left Of The Quad (Bottom)
glVertex3f(-1.0f, -1.0f, -1.0f); // Bottom Left Of The Quad (Bottom)
glVertex3f(1.0f, -1.0f, -1.0f); // Bottom Right Of The Quad (Bottom)
glColor3f(1.0f, 0.0f, 0.0f); // Color Red
glVertex3f(1.0f, 1.0f, 1.0f); // Top Right Of The Quad (Front)
glVertex3f(-1.0f, 1.0f, 1.0f); // Top Left Of The Quad (Front)
glVertex3f(-1.0f, -1.0f, 1.0f); // Bottom Left Of The Quad (Front)
glVertex3f(1.0f, -1.0f, 1.0f); // Bottom Right Of The Quad (Front)
glColor3f(1.0f, 1.0f, 0.0f); // Color Yellow
glVertex3f(1.0f, -1.0f, -1.0f); // Top Right Of The Quad (Back)
glVertex3f(-1.0f, -1.0f, -1.0f); // Top Left Of The Quad (Back)
glVertex3f(-1.0f, 1.0f, -1.0f); // Bottom Left Of The Quad (Back)
glVertex3f(1.0f, 1.0f, -1.0f); // Bottom Right Of The Quad (Back)
glColor3f(0.0f, 0.0f, 1.0f); // Color Blue
glVertex3f(-1.0f, 1.0f, 1.0f); // Top Right Of The Quad (Left)
glVertex3f(-1.0f, 1.0f, -1.0f); // Top Left Of The Quad (Left)
glVertex3f(-1.0f, -1.0f, -1.0f); // Bottom Left Of The Quad (Left)
glVertex3f(-1.0f, -1.0f, 1.0f); // Bottom Right Of The Quad (Left)
glColor3f(1.0f, 0.0f, 1.0f); // Color Violet
glVertex3f(1.0f, 1.0f, -1.0f); // Top Right Of The Quad (Right)
glVertex3f(1.0f, 1.0f, 1.0f); // Top Left Of The Quad (Right)
glVertex3f(1.0f, -1.0f, 1.0f); // Bottom Left Of The Quad (Right)
glVertex3f(1.0f, -1.0f, -1.0f); // Bottom Right Of The Quad (Right)
glEnd(); // End Drawing The Cube
glFlush();
}
void animation(void)
{
yRotated += 0.01;
xRotated += 0.02;
DrawCube();
}
void reshape(int x, int y)
{
if (y == 0 || x == 0)
return; //Nothing is visible then, so return
//Set a new projection matrix
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
//Angle of view:40 degrees
//Near clipping plane distance: 0.5
//Far clipping plane distance: 20.0
gluPerspective(40.0, (GLdouble)x / (GLdouble)y, 0.5, 20.0);
glMatrixMode(GL_MODELVIEW);
glViewport(0, 0, x, y); //Use the whole window for rendering
} | 84bbf724a8d2c440b43722c15a647185c8749602.cu | #include <cstdlib>
#include <cstdio>
#include <iostream>
#include <cuda.h>
#include <armadillo>
#include <GL/glut.h>
using namespace arma;
using namespace std;
#define N (2042 * 2042)
#define THREADS_PER_BLOCK 512
GLfloat xRotated, yRotated, zRotated;
void init(void);
void DrawCube(void);
void animation(void);
void reshape(int x, int y);
void randomInts(int *a, int n)
{
int i;
for (i = 0; i < n; i++)
{
a[i] = rand() % (10000 - 100 + 1) + 100;
}
}
void saveToFile(FILE *fp, int *a, int *b, int *c)
{
for (int i = 0; i < 10; i++)
{
fprintf(fp, "%d + %d = %d\n", a[i], b[i], c[i]);
}
}
__global__ void add(int *a, int *b, int *c)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
c[index] = a[index] + b[index];
}
int main(int argc, char **argv)
{
cout << "Armadillo version: " << arma_version::as_string() << endl;
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N * sizeof(int);
// Setup input values
a = (int *)malloc(size);
randomInts(a, N);
b = (int *)malloc(size);
randomInts(b, N);
c = (int *)malloc(size);
// Allocate space for device copies of a, b, c
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
// Copy inputs to device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
// Launch add() kernel on GPU
add<<<N / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(d_a, d_b, d_c);
// Copy result back to host
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
FILE *fp;
fp = fopen("result.txt", "w");
saveToFile(fp, a, b, c);
fclose(fp);
// Cleanup
free(a);
free(b);
free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
glutInit(&argc, argv);
//we initizlilze the glut. functions
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB);
glutInitWindowPosition(100, 100);
glutCreateWindow("glut openGl and cuda");
//info version GLSL
cout << "***** Info GPU *****" << std::endl;
cout << "Fabricant : " << glGetString(GL_VENDOR) << std::endl;
cout << "Carte graphique: " << glGetString(GL_RENDERER) << std::endl;
cout << "Version : " << glGetString(GL_VERSION) << std::endl;
cout << "Version GLSL : " << glGetString(GL_SHADING_LANGUAGE_VERSION) << std::endl;
init();
glutDisplayFunc(DrawCube);
glutReshapeFunc(reshape);
//Set the function for the animation.
glutIdleFunc(animation);
glutMainLoop();
return 0;
}
void init(void)
{
glClearColor(0, 0, 0, 0);
glCullFace(GL_BACK);
glEnable(GL_CULL_FACE);
}
void DrawCube(void)
{
glMatrixMode(GL_MODELVIEW);
// clear the drawing buffer.
glClear(GL_COLOR_BUFFER_BIT);
glLoadIdentity();
glTranslatef(0.0, 0.0, -10.5);
glRotatef(xRotated, 1.0, 0.0, 0.0);
// rotation about Y axis
glRotatef(yRotated, 0.0, 1.0, 0.0);
// rotation about Z axis
glRotatef(zRotated, 0.0, 0.0, 1.0);
glBegin(GL_QUADS); // Draw The Cube Using quads
glColor3f(0.0f, 1.0f, 0.0f); // Color Blue
glVertex3f(1.0f, 1.0f, -1.0f); // Top Right Of The Quad (Top)
glVertex3f(-1.0f, 1.0f, -1.0f); // Top Left Of The Quad (Top)
glVertex3f(-1.0f, 1.0f, 1.0f); // Bottom Left Of The Quad (Top)
glVertex3f(1.0f, 1.0f, 1.0f); // Bottom Right Of The Quad (Top)
glColor3f(1.0f, 0.5f, 0.0f); // Color Orange
glVertex3f(1.0f, -1.0f, 1.0f); // Top Right Of The Quad (Bottom)
glVertex3f(-1.0f, -1.0f, 1.0f); // Top Left Of The Quad (Bottom)
glVertex3f(-1.0f, -1.0f, -1.0f); // Bottom Left Of The Quad (Bottom)
glVertex3f(1.0f, -1.0f, -1.0f); // Bottom Right Of The Quad (Bottom)
glColor3f(1.0f, 0.0f, 0.0f); // Color Red
glVertex3f(1.0f, 1.0f, 1.0f); // Top Right Of The Quad (Front)
glVertex3f(-1.0f, 1.0f, 1.0f); // Top Left Of The Quad (Front)
glVertex3f(-1.0f, -1.0f, 1.0f); // Bottom Left Of The Quad (Front)
glVertex3f(1.0f, -1.0f, 1.0f); // Bottom Right Of The Quad (Front)
glColor3f(1.0f, 1.0f, 0.0f); // Color Yellow
glVertex3f(1.0f, -1.0f, -1.0f); // Top Right Of The Quad (Back)
glVertex3f(-1.0f, -1.0f, -1.0f); // Top Left Of The Quad (Back)
glVertex3f(-1.0f, 1.0f, -1.0f); // Bottom Left Of The Quad (Back)
glVertex3f(1.0f, 1.0f, -1.0f); // Bottom Right Of The Quad (Back)
glColor3f(0.0f, 0.0f, 1.0f); // Color Blue
glVertex3f(-1.0f, 1.0f, 1.0f); // Top Right Of The Quad (Left)
glVertex3f(-1.0f, 1.0f, -1.0f); // Top Left Of The Quad (Left)
glVertex3f(-1.0f, -1.0f, -1.0f); // Bottom Left Of The Quad (Left)
glVertex3f(-1.0f, -1.0f, 1.0f); // Bottom Right Of The Quad (Left)
glColor3f(1.0f, 0.0f, 1.0f); // Color Violet
glVertex3f(1.0f, 1.0f, -1.0f); // Top Right Of The Quad (Right)
glVertex3f(1.0f, 1.0f, 1.0f); // Top Left Of The Quad (Right)
glVertex3f(1.0f, -1.0f, 1.0f); // Bottom Left Of The Quad (Right)
glVertex3f(1.0f, -1.0f, -1.0f); // Bottom Right Of The Quad (Right)
glEnd(); // End Drawing The Cube
glFlush();
}
void animation(void)
{
yRotated += 0.01;
xRotated += 0.02;
DrawCube();
}
void reshape(int x, int y)
{
if (y == 0 || x == 0)
return; //Nothing is visible then, so return
//Set a new projection matrix
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
//Angle of view:40 degrees
//Near clipping plane distance: 0.5
//Far clipping plane distance: 20.0
gluPerspective(40.0, (GLdouble)x / (GLdouble)y, 0.5, 20.0);
glMatrixMode(GL_MODELVIEW);
glViewport(0, 0, x, y); //Use the whole window for rendering
} |
5d89a73b67965e8ee15fe376ad81f02e3328fa06.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date
zsymv.cu is nearly identical to zhemv.cu, just change names and drop MAGMA_Z_CONJ.
zhemv_kernel_U (upper) in zhemv_upper.cu is very similar to
zhemv_kernel_L (lower) in zhemv.cu; diff the two files to compare.
@precisions normal z -> s d c
@author Mark Gates
*/
#include "magma_internal.h"
#include "commonblas_z.h"
#define PRECISION_z
#define NB_X 64
#define NB_Y 4
#define bank_shift 33
#define quarter_NB_X 16
#define half_NB_X 32
/***************************************************************************//**
Lower case, compute block multiply, work = A*x, for any size n:
[ (A11*x1) (A21^H*x2) (A31^H*x3) ] [ A11 A21^H A31^H ] [ x1 ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ] = [ A21 A22 A32^H ] * [ x2 ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ] [ A31 A32 A33 ] [ x3 ]
Uses a 64x4 thread block.
For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed).
For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles.
In both cases, each thread multiplies 4 elements.
For rows past the bottom of the matrix, the A pointer is adjusted to be the
last valid row of A, which multiple threads will read.
Extra rows are ignored when saving results to work.
Columns past the right edge are explicitly ignored when loading.
x values past the bottom are set to zero, thus, extra columns are zeroed
when multiplying.
Previously:
[ (A11*x1) --- ]
work = [ (A21^H*x2) (A21*x1 + A22*x2) --- ]
[ (A31^H*x3) (A32^H*x3) (A31*x1 + A32*x2 + A33*x3) ]
which doesn't work as well because that has dimension blocks*NB by blocks,
where blocks*NB >= n, and it can be that blocks*NB > lda, so it won't fit in
lda*blocks space. This is why it used to need lwork = lda*(blocks + 1).
*******************************************************************************/
__global__ void
zhemv_kernel_L(
int n,
magmaDoubleComplex const * __restrict__ A, int lda,
magmaDoubleComplex const * __restrict__ x, int incx,
magmaDoubleComplex * __restrict__ work)
{
#if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200) || defined(HAVE_HIP)
// treats sA as 16x64 block
#define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ]
// treats sA as 32x32 block
#define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)])
// 64x4 thread block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int blk = blockIdx.x;
const int blk_ind = NB_X * blk;
const int td = NB_X * ty + tx;
// 32x8 thread block
const int tx2 = td % half_NB_X;
const int ty2 = td / half_NB_X;
// If this blk has fewer than NB_X rows, partial is the number of valid rows,
// so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid.
// Else, partial == 0.
const int partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0);
magmaDoubleComplex psum, psum_t;
magmaDoubleComplex total = MAGMA_Z_ZERO;
// sA is used as a 32x32 block, sA32(i,j),
// and as a 16x64 block, sA16(i,j), in different parts of the code.
// sA must be at least half_NB_X*bank_shift = 32x33 = 1056;
// quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056
__shared__ magmaDoubleComplex sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */
__shared__ magmaDoubleComplex sx_blk[NB_X]; // for x[ blk ]
__shared__ magmaDoubleComplex sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks left of diag
magmaDoubleComplex rA[4];
magmaDoubleComplex psums_t[4];
// --------------------
// load 64x1 block x(blk_ind + 0:63) into sx_blk
x += (blk_ind + tx)*incx; // x is x(blk_ind + tx)
if ( ty == 0 ) {
if ( partial == 0 || tx < partial ) {
sx_blk[tx] = x[0];
}
else {
sx_blk[tx] = MAGMA_Z_ZERO;
}
}
// --------------------
// move to block row
work += blk*lda; // work is work(0, blk)
A += blk_ind; // A is A(blk_ind, 0)
A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2)
// move to 32x32 diag block
A += blk_ind*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
// load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_Z_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2)
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle,
// as four 32x8 sections in parallel:
// columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = MAGMA_Z_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
// each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3)
psum = MAGMA_Z_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 32x32 diag block, then repeat steps from first diag block
A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2)
// load 32x32 diag block A[block + 0:31, block + 0:31] into sA
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_Z_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = MAGMA_Z_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
psum = MAGMA_Z_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to off-diag 32x32 block
A -= half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + ty2)
// load 32x32 block of A into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_Z_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// multiply 32x32 block (below diag)
psum = MAGMA_Z_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2 + j*8) * sx_blk[j*8 + ty2];
}
//__syncthreads(); // no sync needed here
// multiply transposed 32x32 block (above diag)
psum_t = MAGMA_Z_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += MAGMA_Z_CONJ( sA32(ty2*4 + j, tx2) ) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial sums for non-transposed 32x32 block
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// store partial sums for transposed 32x32 block
sA32(ty2, tx2) = psum_t;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to leftmost 64x64 block in block row, and
// switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block
A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + ty2)
A -= blk_ind*lda; // A is A(blk_ind + tx2, ty2)
A -= ty2*lda + tx2; // A is A(blk_ind, 0)
A += 4*ty*lda + tx; // A is A(blk_ind + tx, 4*ty)
if ( partial && tx >= partial ) {
A = A - tx + (partial - 1); // A is A(blk_ind + partial-1, 4*ty), the bottom-most valid row
}
x -= blk_ind*incx; // x is x(tx)
// 16x16 thread block
const int tx4 = td % quarter_NB_X;
const int ty4 = td / quarter_NB_X;
// cycle over blocks jj left of diagonal, in block row blk
for (int jj=0; jj < blk; ++jj) {
// load 64x1 block x(jj_ind + 0:63) into sx_jj
// since this block is left of diagonal, x must have all NB rows
if ( ty == 0 ) {
sx_jj[tx] = x[jj*NB_X*incx];
}
__syncthreads();
for (int k=0; k < 4; k++) {
// load 64x16 block of A into rA, 4 elements per thread,
// as four 64x4 sections in parallel:
// columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15
// since this block is left of diagonal, it has all NB columns,
// and block of x must have all NB rows.
#pragma unroll
for (int j=0; j < 4; j++) {
rA[j] = A[j*lda];
}
// 1) multiply 64x16 block A_{blk,jj} * x_jj
// each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k)
// 2) multiply transposed 16x64 block A_{blk,jj}^H * x_blk,
// storing each product Aji*xi to sA(j,i)
#pragma unroll
for (int j=0; j < 4; j++) {
total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj
sA16(ty*4 + j, tx) = MAGMA_Z_CONJ( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk
}
__syncthreads();
// do partial row sums for transposed 16x64 result
// use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty)
// sum sixteen 16x4 sections in parallel:
// columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63
psum_t = MAGMA_Z_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA16(tx4, ty4*4 + j);
}
__syncthreads();
// store partial row sums of transposed result, y_jj (locally)
psums_t[k] = psum_t;
// move right to next 64x16 block
A += lda * quarter_NB_X; // A is A(blk_ind + tx#, jj*NB_x + (k+1)*NB_X/4 + 4*ty), # tx or partial
}
// already at next 64x64 block
// A is A(blk_ind + tx#, (jj+1)*NB_x + 4*ty), # tx or partial
// store partial row sums of transposed result, y_jj
#pragma unroll
for (int k=0; k < 4; k++) {
sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k];
}
__syncthreads();
// sum up partial row sums of transposed result, y_jj, and store final total to workspace
// thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16
// since this is the transposed block above the diagonal, it must have all NB rows
if ( ty4 < 4 ) {
int ty4_nb4 = ty4*quarter_NB_X;
psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4)
+ sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4)
+ sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4)
+ sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4)
+ sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4)
+ sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4)
+ sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4)
+ sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4);
work[jj*NB_X + tx4 + ty4_nb4] = psum_t; // store at work( jj*NB_X + tx4 + ty4*16, blk )
}
__syncthreads();
}
// store row sums
sA16(ty, tx) = total;
__syncthreads();
// sum up final total, y_blk, for row tx
if ( ty == 0 && (partial == 0 || tx < partial) ) {
total = sA16(0, tx)
+ sA16(1, tx)
+ sA16(2, tx)
+ sA16(3, tx);
work[blk*NB_X + tx] = total; // store at work( blk*NB_X + tx, blk )
}
#endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) || defined(HAVE_HIP) */
}
// end zhemv_kernel_L
/***************************************************************************//**
Lower case, sum up final results
Each block sums one block row; each thread sums one row.
On input (for 3 blocks):
[ (A11*x1) (A21^H*x2) (A31^H*x3) ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ]
On output:
[ (A11*x1) + (A21^H*x2) + (A31^H*x3) ]
y = alpha*[ (A21*x1 + A22*x2) + (A32^H*x3) ] + beta*y
[ (A21*x1 + A22*x2 + A33*x3) ]
*******************************************************************************/
__global__ void
zhemv_kernel_L_sum(
int n,
magmaDoubleComplex alpha,
int lda,
magmaDoubleComplex beta,
magmaDoubleComplex * __restrict__ y, int incy,
magmaDoubleComplex const * __restrict__ work )
{
int tx = threadIdx.x;
int blk = blockIdx.x;
int blk_ind = blk * NB_X;
int ind = blk_ind + tx;
int blocks = gridDim.x;
// Don't write outside [0, ..., n)
if ( ind < n ) {
work += ind + blk*lda;
magmaDoubleComplex Ax = MAGMA_Z_ZERO;
for (int j = blk; j < blocks; ++j) {
Ax += work[0];
work += lda;
}
y[ind * incy] = beta*y[ind * incy] + alpha*Ax;
}
}
/***************************************************************************//**
Purpose
-------
magmablas_zhemv_work performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n Hermitian matrix.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
- = MagmaUpper: Only the upper triangular part of A is to be referenced.
- = MagmaLower: Only the lower triangular part of A is to be referenced.
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
alpha COMPLEX_16.
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA COMPLEX_16 array of DIMENSION ( LDDA, n ).
Before entry with UPLO = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the Hermitian matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the Hermitian matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the first dimension of A as declared
in the calling (sub) program. LDDA must be at least
max( 1, n ).
It is recommended that ldda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
@param[in]
dx COMPLEX_16 array of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
@param[in]
incx INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
@param[in]
beta COMPLEX_16.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[in,out]
dy COMPLEX_16 array of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
@param[in]
incy INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
@param[in]
dwork (workspace) COMPLEX_16 array on the GPU, dimension (MAX(1, LWORK)),
@param[in]
lwork INTEGER.
The dimension of the array DWORK. LWORK >= LDDA * ceil( N / NB_X ),
where NB_X = 64.
@param[in]
queue magma_queue_t.
Queue to execute in.
MAGMA implements zhemv through two steps:
1) perform the multiplication in each thread block and put the
intermediate value in dwork.
2) sum the intermediate values and store the final result in y.
magamblas_zhemv_work requires users to provide a workspace, while
magmablas_zhemv is a wrapper routine allocating the workspace inside the
routine and provides the same interface as cublas.
If users need to call zhemv frequently, we suggest using
magmablas_zhemv_work instead of magmablas_zhemv. As the overhead to
allocate and free in device memory in magmablas_zhemv would hurt performance.
Our tests show that this penalty is about 10 Gflop/s when the matrix
size is around 10000.
@ingroup magma_hemv
*******************************************************************************/
extern "C"
magma_int_t
magmablas_zhemv_work(
magma_uplo_t uplo, magma_int_t n,
magmaDoubleComplex alpha,
magmaDoubleComplex_const_ptr dA, magma_int_t ldda,
magmaDoubleComplex_const_ptr dx, magma_int_t incx,
magmaDoubleComplex beta,
magmaDoubleComplex_ptr dy, magma_int_t incy,
magmaDoubleComplex_ptr dwork, magma_int_t lwork,
magma_queue_t queue )
{
#if defined(PRECISION_z)
// z precision requires CUDA ARCH 2.x; call CUBLAS version instead.
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
magma_zhemv( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy, queue );
return MAGMA_SUCCESS;
}
#endif
// --------------------
// [sdc] precisions, or z precision with CUDA ARCH 2.x
bool upper = (uplo == MagmaUpper);
magma_int_t blocks = magma_ceildiv( n, NB_X );
magma_int_t lwmin = ldda*blocks;
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ((! upper) && (uplo != MagmaLower)) {
info = -1;
} else if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1, n) ) {
info = -5;
} else if ( incx == 0 ) {
info = -7;
} else if ( incy == 0 ) {
info = -10;
} else if ( lwork < lwmin ) {
info = -12;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_Z_EQUAL(alpha, MAGMA_Z_ZERO) && MAGMA_Z_EQUAL(beta, MAGMA_Z_ONE) ) )
return info;
dim3 grid( blocks, 1, 1 );
dim3 threads( NB_X, NB_Y, 1 );
dim3 threads_sum( NB_X, 1, 1 );
if ( upper ) {
hipLaunchKernelGGL(( zhemv_kernel_U), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
n, dA, ldda, dx, incx, dwork);
hipLaunchKernelGGL(( zhemv_kernel_U_sum), dim3(grid), dim3(threads_sum), 0, queue->cuda_stream() ,
n, alpha, ldda, beta, dy, incy, dwork);
}
else {
hipLaunchKernelGGL(( zhemv_kernel_L), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
n, dA, ldda, dx, incx, dwork);
hipLaunchKernelGGL(( zhemv_kernel_L_sum), dim3(grid), dim3(threads_sum), 0, queue->cuda_stream() ,
n, alpha, ldda, beta, dy, incy, dwork);
}
return info;
}
// end magmablas_zhemv_work
/***************************************************************************//**
Purpose
-------
magmablas_zhemv performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n Hermitian matrix.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
- = MagmaUpper: Only the upper triangular part of A is to be referenced.
- = MagmaLower: Only the lower triangular part of A is to be referenced.
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
alpha COMPLEX_16.
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA COMPLEX_16 array of DIMENSION ( LDDA, n ).
Before entry with UPLO = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the Hermitian matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the Hermitian matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the first dimension of A as declared
in the calling (sub) program. LDDA must be at least
max( 1, n ).
It is recommended that ldda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
@param[in]
dx COMPLEX_16 array of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
@param[in]
incx INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
@param[in]
beta COMPLEX_16.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[in,out]
dy COMPLEX_16 array of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
@param[in]
incy INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_hemv
*******************************************************************************/
extern "C"
magma_int_t
magmablas_zhemv(
magma_uplo_t uplo, magma_int_t n,
magmaDoubleComplex alpha,
magmaDoubleComplex_const_ptr dA, magma_int_t ldda,
magmaDoubleComplex_const_ptr dx, magma_int_t incx,
magmaDoubleComplex beta,
magmaDoubleComplex_ptr dy, magma_int_t incy,
magma_queue_t queue )
{
#if defined(PRECISION_z)
// z precision requires CUDA ARCH 2.x; call CUBLAS version instead.
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
magma_zhemv( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy, queue );
return MAGMA_SUCCESS;
}
#endif
// --------------------
// [sdc] precisions, or z precision with CUDA ARCH 2.x
bool upper = (uplo == MagmaUpper);
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ((! upper) && (uplo != MagmaLower)) {
info = -1;
} else if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1, n) ) {
info = -5;
} else if ( incx == 0 ) {
info = -7;
} else if ( incy == 0 ) {
info = -10;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_Z_EQUAL(alpha, MAGMA_Z_ZERO) && MAGMA_Z_EQUAL(beta, MAGMA_Z_ONE) ) )
return info;
magmaDoubleComplex_ptr dwork;
magma_int_t blocks = magma_ceildiv( n, NB_X );
magma_int_t lwork = ldda*blocks;
magma_zmalloc( &dwork, lwork );
if ( dwork == NULL ) {
info = MAGMA_ERR_DEVICE_ALLOC;
magma_xerbla( __func__, -(info) );
return info;
}
magmablas_zhemv_work( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy,
dwork, lwork, queue );
magma_free( dwork );
return info;
}
// end magmablas_zhemv
| 5d89a73b67965e8ee15fe376ad81f02e3328fa06.cu | /*
-- MAGMA (version 2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date
zsymv.cu is nearly identical to zhemv.cu, just change names and drop MAGMA_Z_CONJ.
zhemv_kernel_U (upper) in zhemv_upper.cu is very similar to
zhemv_kernel_L (lower) in zhemv.cu; diff the two files to compare.
@precisions normal z -> s d c
@author Mark Gates
*/
#include "magma_internal.h"
#include "commonblas_z.h"
#define PRECISION_z
#define NB_X 64
#define NB_Y 4
#define bank_shift 33
#define quarter_NB_X 16
#define half_NB_X 32
/***************************************************************************//**
Lower case, compute block multiply, work = A*x, for any size n:
[ (A11*x1) (A21^H*x2) (A31^H*x3) ] [ A11 A21^H A31^H ] [ x1 ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ] = [ A21 A22 A32^H ] * [ x2 ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ] [ A31 A32 A33 ] [ x3 ]
Uses a 64x4 thread block.
For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed).
For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles.
In both cases, each thread multiplies 4 elements.
For rows past the bottom of the matrix, the A pointer is adjusted to be the
last valid row of A, which multiple threads will read.
Extra rows are ignored when saving results to work.
Columns past the right edge are explicitly ignored when loading.
x values past the bottom are set to zero, thus, extra columns are zeroed
when multiplying.
Previously:
[ (A11*x1) --- ]
work = [ (A21^H*x2) (A21*x1 + A22*x2) --- ]
[ (A31^H*x3) (A32^H*x3) (A31*x1 + A32*x2 + A33*x3) ]
which doesn't work as well because that has dimension blocks*NB by blocks,
where blocks*NB >= n, and it can be that blocks*NB > lda, so it won't fit in
lda*blocks space. This is why it used to need lwork = lda*(blocks + 1).
*******************************************************************************/
__global__ void
zhemv_kernel_L(
int n,
magmaDoubleComplex const * __restrict__ A, int lda,
magmaDoubleComplex const * __restrict__ x, int incx,
magmaDoubleComplex * __restrict__ work)
{
#if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200) || defined(HAVE_HIP)
// treats sA as 16x64 block
#define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ]
// treats sA as 32x32 block
#define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)])
// 64x4 thread block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int blk = blockIdx.x;
const int blk_ind = NB_X * blk;
const int td = NB_X * ty + tx;
// 32x8 thread block
const int tx2 = td % half_NB_X;
const int ty2 = td / half_NB_X;
// If this blk has fewer than NB_X rows, partial is the number of valid rows,
// so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid.
// Else, partial == 0.
const int partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0);
magmaDoubleComplex psum, psum_t;
magmaDoubleComplex total = MAGMA_Z_ZERO;
// sA is used as a 32x32 block, sA32(i,j),
// and as a 16x64 block, sA16(i,j), in different parts of the code.
// sA must be at least half_NB_X*bank_shift = 32x33 = 1056;
// quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056
__shared__ magmaDoubleComplex sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */
__shared__ magmaDoubleComplex sx_blk[NB_X]; // for x[ blk ]
__shared__ magmaDoubleComplex sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks left of diag
magmaDoubleComplex rA[4];
magmaDoubleComplex psums_t[4];
// --------------------
// load 64x1 block x(blk_ind + 0:63) into sx_blk
x += (blk_ind + tx)*incx; // x is x(blk_ind + tx)
if ( ty == 0 ) {
if ( partial == 0 || tx < partial ) {
sx_blk[tx] = x[0];
}
else {
sx_blk[tx] = MAGMA_Z_ZERO;
}
}
// --------------------
// move to block row
work += blk*lda; // work is work(0, blk)
A += blk_ind; // A is A(blk_ind, 0)
A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2)
// move to 32x32 diag block
A += blk_ind*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
// load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_Z_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2)
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle,
// as four 32x8 sections in parallel:
// columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = MAGMA_Z_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
// each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3)
psum = MAGMA_Z_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 32x32 diag block, then repeat steps from first diag block
A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2)
// load 32x32 diag block A[block + 0:31, block + 0:31] into sA
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_Z_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = MAGMA_Z_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
psum = MAGMA_Z_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to off-diag 32x32 block
A -= half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + ty2)
// load 32x32 block of A into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_Z_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// multiply 32x32 block (below diag)
psum = MAGMA_Z_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2 + j*8) * sx_blk[j*8 + ty2];
}
//__syncthreads(); // no sync needed here
// multiply transposed 32x32 block (above diag)
psum_t = MAGMA_Z_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += MAGMA_Z_CONJ( sA32(ty2*4 + j, tx2) ) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial sums for non-transposed 32x32 block
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// store partial sums for transposed 32x32 block
sA32(ty2, tx2) = psum_t;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to leftmost 64x64 block in block row, and
// switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block
A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + ty2)
A -= blk_ind*lda; // A is A(blk_ind + tx2, ty2)
A -= ty2*lda + tx2; // A is A(blk_ind, 0)
A += 4*ty*lda + tx; // A is A(blk_ind + tx, 4*ty)
if ( partial && tx >= partial ) {
A = A - tx + (partial - 1); // A is A(blk_ind + partial-1, 4*ty), the bottom-most valid row
}
x -= blk_ind*incx; // x is x(tx)
// 16x16 thread block
const int tx4 = td % quarter_NB_X;
const int ty4 = td / quarter_NB_X;
// cycle over blocks jj left of diagonal, in block row blk
for (int jj=0; jj < blk; ++jj) {
// load 64x1 block x(jj_ind + 0:63) into sx_jj
// since this block is left of diagonal, x must have all NB rows
if ( ty == 0 ) {
sx_jj[tx] = x[jj*NB_X*incx];
}
__syncthreads();
for (int k=0; k < 4; k++) {
// load 64x16 block of A into rA, 4 elements per thread,
// as four 64x4 sections in parallel:
// columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15
// since this block is left of diagonal, it has all NB columns,
// and block of x must have all NB rows.
#pragma unroll
for (int j=0; j < 4; j++) {
rA[j] = A[j*lda];
}
// 1) multiply 64x16 block A_{blk,jj} * x_jj
// each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k)
// 2) multiply transposed 16x64 block A_{blk,jj}^H * x_blk,
// storing each product Aji*xi to sA(j,i)
#pragma unroll
for (int j=0; j < 4; j++) {
total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj
sA16(ty*4 + j, tx) = MAGMA_Z_CONJ( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk
}
__syncthreads();
// do partial row sums for transposed 16x64 result
// use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty)
// sum sixteen 16x4 sections in parallel:
// columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63
psum_t = MAGMA_Z_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA16(tx4, ty4*4 + j);
}
__syncthreads();
// store partial row sums of transposed result, y_jj (locally)
psums_t[k] = psum_t;
// move right to next 64x16 block
A += lda * quarter_NB_X; // A is A(blk_ind + tx#, jj*NB_x + (k+1)*NB_X/4 + 4*ty), # tx or partial
}
// already at next 64x64 block
// A is A(blk_ind + tx#, (jj+1)*NB_x + 4*ty), # tx or partial
// store partial row sums of transposed result, y_jj
#pragma unroll
for (int k=0; k < 4; k++) {
sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k];
}
__syncthreads();
// sum up partial row sums of transposed result, y_jj, and store final total to workspace
// thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16
// since this is the transposed block above the diagonal, it must have all NB rows
if ( ty4 < 4 ) {
int ty4_nb4 = ty4*quarter_NB_X;
psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4)
+ sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4)
+ sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4)
+ sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4)
+ sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4)
+ sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4)
+ sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4)
+ sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4);
work[jj*NB_X + tx4 + ty4_nb4] = psum_t; // store at work( jj*NB_X + tx4 + ty4*16, blk )
}
__syncthreads();
}
// store row sums
sA16(ty, tx) = total;
__syncthreads();
// sum up final total, y_blk, for row tx
if ( ty == 0 && (partial == 0 || tx < partial) ) {
total = sA16(0, tx)
+ sA16(1, tx)
+ sA16(2, tx)
+ sA16(3, tx);
work[blk*NB_X + tx] = total; // store at work( blk*NB_X + tx, blk )
}
#endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) || defined(HAVE_HIP) */
}
// end zhemv_kernel_L
/***************************************************************************//**
Lower case, sum up final results
Each block sums one block row; each thread sums one row.
On input (for 3 blocks):
[ (A11*x1) (A21^H*x2) (A31^H*x3) ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ]
On output:
[ (A11*x1) + (A21^H*x2) + (A31^H*x3) ]
y = alpha*[ (A21*x1 + A22*x2) + (A32^H*x3) ] + beta*y
[ (A21*x1 + A22*x2 + A33*x3) ]
*******************************************************************************/
__global__ void
zhemv_kernel_L_sum(
int n,
magmaDoubleComplex alpha,
int lda,
magmaDoubleComplex beta,
magmaDoubleComplex * __restrict__ y, int incy,
magmaDoubleComplex const * __restrict__ work )
{
int tx = threadIdx.x;
int blk = blockIdx.x;
int blk_ind = blk * NB_X;
int ind = blk_ind + tx;
int blocks = gridDim.x;
// Don't write outside [0, ..., n)
if ( ind < n ) {
work += ind + blk*lda;
magmaDoubleComplex Ax = MAGMA_Z_ZERO;
for (int j = blk; j < blocks; ++j) {
Ax += work[0];
work += lda;
}
y[ind * incy] = beta*y[ind * incy] + alpha*Ax;
}
}
/***************************************************************************//**
Purpose
-------
magmablas_zhemv_work performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n Hermitian matrix.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
- = MagmaUpper: Only the upper triangular part of A is to be referenced.
- = MagmaLower: Only the lower triangular part of A is to be referenced.
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
alpha COMPLEX_16.
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA COMPLEX_16 array of DIMENSION ( LDDA, n ).
Before entry with UPLO = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the Hermitian matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the Hermitian matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the first dimension of A as declared
in the calling (sub) program. LDDA must be at least
max( 1, n ).
It is recommended that ldda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
@param[in]
dx COMPLEX_16 array of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
@param[in]
incx INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
@param[in]
beta COMPLEX_16.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[in,out]
dy COMPLEX_16 array of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
@param[in]
incy INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
@param[in]
dwork (workspace) COMPLEX_16 array on the GPU, dimension (MAX(1, LWORK)),
@param[in]
lwork INTEGER.
The dimension of the array DWORK. LWORK >= LDDA * ceil( N / NB_X ),
where NB_X = 64.
@param[in]
queue magma_queue_t.
Queue to execute in.
MAGMA implements zhemv through two steps:
1) perform the multiplication in each thread block and put the
intermediate value in dwork.
2) sum the intermediate values and store the final result in y.
magamblas_zhemv_work requires users to provide a workspace, while
magmablas_zhemv is a wrapper routine allocating the workspace inside the
routine and provides the same interface as cublas.
If users need to call zhemv frequently, we suggest using
magmablas_zhemv_work instead of magmablas_zhemv. As the overhead to
allocate and free in device memory in magmablas_zhemv would hurt performance.
Our tests show that this penalty is about 10 Gflop/s when the matrix
size is around 10000.
@ingroup magma_hemv
*******************************************************************************/
extern "C"
magma_int_t
magmablas_zhemv_work(
magma_uplo_t uplo, magma_int_t n,
magmaDoubleComplex alpha,
magmaDoubleComplex_const_ptr dA, magma_int_t ldda,
magmaDoubleComplex_const_ptr dx, magma_int_t incx,
magmaDoubleComplex beta,
magmaDoubleComplex_ptr dy, magma_int_t incy,
magmaDoubleComplex_ptr dwork, magma_int_t lwork,
magma_queue_t queue )
{
#if defined(PRECISION_z)
// z precision requires CUDA ARCH 2.x; call CUBLAS version instead.
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
magma_zhemv( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy, queue );
return MAGMA_SUCCESS;
}
#endif
// --------------------
// [sdc] precisions, or z precision with CUDA ARCH 2.x
bool upper = (uplo == MagmaUpper);
magma_int_t blocks = magma_ceildiv( n, NB_X );
magma_int_t lwmin = ldda*blocks;
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ((! upper) && (uplo != MagmaLower)) {
info = -1;
} else if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1, n) ) {
info = -5;
} else if ( incx == 0 ) {
info = -7;
} else if ( incy == 0 ) {
info = -10;
} else if ( lwork < lwmin ) {
info = -12;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_Z_EQUAL(alpha, MAGMA_Z_ZERO) && MAGMA_Z_EQUAL(beta, MAGMA_Z_ONE) ) )
return info;
dim3 grid( blocks, 1, 1 );
dim3 threads( NB_X, NB_Y, 1 );
dim3 threads_sum( NB_X, 1, 1 );
if ( upper ) {
zhemv_kernel_U<<< grid, threads, 0, queue->cuda_stream() >>>
(n, dA, ldda, dx, incx, dwork);
zhemv_kernel_U_sum<<< grid, threads_sum, 0, queue->cuda_stream() >>>
(n, alpha, ldda, beta, dy, incy, dwork);
}
else {
zhemv_kernel_L<<< grid, threads, 0, queue->cuda_stream() >>>
(n, dA, ldda, dx, incx, dwork);
zhemv_kernel_L_sum<<< grid, threads_sum, 0, queue->cuda_stream() >>>
(n, alpha, ldda, beta, dy, incy, dwork);
}
return info;
}
// end magmablas_zhemv_work
/***************************************************************************//**
Purpose
-------
magmablas_zhemv performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n Hermitian matrix.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
- = MagmaUpper: Only the upper triangular part of A is to be referenced.
- = MagmaLower: Only the lower triangular part of A is to be referenced.
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
alpha COMPLEX_16.
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA COMPLEX_16 array of DIMENSION ( LDDA, n ).
Before entry with UPLO = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the Hermitian matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the Hermitian matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the first dimension of A as declared
in the calling (sub) program. LDDA must be at least
max( 1, n ).
It is recommended that ldda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
@param[in]
dx COMPLEX_16 array of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
@param[in]
incx INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
@param[in]
beta COMPLEX_16.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[in,out]
dy COMPLEX_16 array of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
@param[in]
incy INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_hemv
*******************************************************************************/
extern "C"
magma_int_t
magmablas_zhemv(
magma_uplo_t uplo, magma_int_t n,
magmaDoubleComplex alpha,
magmaDoubleComplex_const_ptr dA, magma_int_t ldda,
magmaDoubleComplex_const_ptr dx, magma_int_t incx,
magmaDoubleComplex beta,
magmaDoubleComplex_ptr dy, magma_int_t incy,
magma_queue_t queue )
{
#if defined(PRECISION_z)
// z precision requires CUDA ARCH 2.x; call CUBLAS version instead.
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
magma_zhemv( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy, queue );
return MAGMA_SUCCESS;
}
#endif
// --------------------
// [sdc] precisions, or z precision with CUDA ARCH 2.x
bool upper = (uplo == MagmaUpper);
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ((! upper) && (uplo != MagmaLower)) {
info = -1;
} else if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1, n) ) {
info = -5;
} else if ( incx == 0 ) {
info = -7;
} else if ( incy == 0 ) {
info = -10;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_Z_EQUAL(alpha, MAGMA_Z_ZERO) && MAGMA_Z_EQUAL(beta, MAGMA_Z_ONE) ) )
return info;
magmaDoubleComplex_ptr dwork;
magma_int_t blocks = magma_ceildiv( n, NB_X );
magma_int_t lwork = ldda*blocks;
magma_zmalloc( &dwork, lwork );
if ( dwork == NULL ) {
info = MAGMA_ERR_DEVICE_ALLOC;
magma_xerbla( __func__, -(info) );
return info;
}
magmablas_zhemv_work( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy,
dwork, lwork, queue );
magma_free( dwork );
return info;
}
// end magmablas_zhemv
|
14dec09097b1e0fbbc351c902188ff279ac99798.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "int2lin_resmpl_messy_gpu_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *dev_in_img = NULL;
hipMalloc(&dev_in_img, XSIZE*YSIZE);
float *dev_out_img = NULL;
hipMalloc(&dev_out_img, XSIZE*YSIZE);
float *dev_C0_tmp = NULL;
hipMalloc(&dev_C0_tmp, XSIZE*YSIZE);
float *dev_C1_tmp = NULL;
hipMalloc(&dev_C1_tmp, XSIZE*YSIZE);
float *dev_C2_tmp = NULL;
hipMalloc(&dev_C2_tmp, XSIZE*YSIZE);
int org_wd = 1;
int org_ht = 1;
int dst_wd = 1;
int dst_ht = 1;
int n_channels = 1;
float r = 1;
int hn = 1;
int wn = 1;
int xbd0 = 1;
int xbd1 = 1;
int ybd0 = 1;
int ybd1 = 1;
int *xas_const = NULL;
hipMalloc(&xas_const, XSIZE*YSIZE);
int *xbs_const = NULL;
hipMalloc(&xbs_const, XSIZE*YSIZE);
float *xwts_const = NULL;
hipMalloc(&xwts_const, XSIZE*YSIZE);
int *yas_const = NULL;
hipMalloc(&yas_const, XSIZE*YSIZE);
int *ybs_const = NULL;
hipMalloc(&ybs_const, XSIZE*YSIZE);
float *ywts_const = NULL;
hipMalloc(&ywts_const, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
int2lin_resmpl_messy_gpu_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, dev_in_img,dev_out_img,dev_C0_tmp,dev_C1_tmp,dev_C2_tmp,org_wd,org_ht,dst_wd,dst_ht,n_channels,r,hn,wn,xbd0,xbd1,ybd0,ybd1,xas_const,xbs_const,xwts_const,yas_const,ybs_const,ywts_const);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
int2lin_resmpl_messy_gpu_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, dev_in_img,dev_out_img,dev_C0_tmp,dev_C1_tmp,dev_C2_tmp,org_wd,org_ht,dst_wd,dst_ht,n_channels,r,hn,wn,xbd0,xbd1,ybd0,ybd1,xas_const,xbs_const,xwts_const,yas_const,ybs_const,ywts_const);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
int2lin_resmpl_messy_gpu_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, dev_in_img,dev_out_img,dev_C0_tmp,dev_C1_tmp,dev_C2_tmp,org_wd,org_ht,dst_wd,dst_ht,n_channels,r,hn,wn,xbd0,xbd1,ybd0,ybd1,xas_const,xbs_const,xwts_const,yas_const,ybs_const,ywts_const);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 14dec09097b1e0fbbc351c902188ff279ac99798.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "int2lin_resmpl_messy_gpu_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *dev_in_img = NULL;
cudaMalloc(&dev_in_img, XSIZE*YSIZE);
float *dev_out_img = NULL;
cudaMalloc(&dev_out_img, XSIZE*YSIZE);
float *dev_C0_tmp = NULL;
cudaMalloc(&dev_C0_tmp, XSIZE*YSIZE);
float *dev_C1_tmp = NULL;
cudaMalloc(&dev_C1_tmp, XSIZE*YSIZE);
float *dev_C2_tmp = NULL;
cudaMalloc(&dev_C2_tmp, XSIZE*YSIZE);
int org_wd = 1;
int org_ht = 1;
int dst_wd = 1;
int dst_ht = 1;
int n_channels = 1;
float r = 1;
int hn = 1;
int wn = 1;
int xbd0 = 1;
int xbd1 = 1;
int ybd0 = 1;
int ybd1 = 1;
int *xas_const = NULL;
cudaMalloc(&xas_const, XSIZE*YSIZE);
int *xbs_const = NULL;
cudaMalloc(&xbs_const, XSIZE*YSIZE);
float *xwts_const = NULL;
cudaMalloc(&xwts_const, XSIZE*YSIZE);
int *yas_const = NULL;
cudaMalloc(&yas_const, XSIZE*YSIZE);
int *ybs_const = NULL;
cudaMalloc(&ybs_const, XSIZE*YSIZE);
float *ywts_const = NULL;
cudaMalloc(&ywts_const, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
int2lin_resmpl_messy_gpu_kernel<<<gridBlock,threadBlock>>>(dev_in_img,dev_out_img,dev_C0_tmp,dev_C1_tmp,dev_C2_tmp,org_wd,org_ht,dst_wd,dst_ht,n_channels,r,hn,wn,xbd0,xbd1,ybd0,ybd1,xas_const,xbs_const,xwts_const,yas_const,ybs_const,ywts_const);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
int2lin_resmpl_messy_gpu_kernel<<<gridBlock,threadBlock>>>(dev_in_img,dev_out_img,dev_C0_tmp,dev_C1_tmp,dev_C2_tmp,org_wd,org_ht,dst_wd,dst_ht,n_channels,r,hn,wn,xbd0,xbd1,ybd0,ybd1,xas_const,xbs_const,xwts_const,yas_const,ybs_const,ywts_const);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
int2lin_resmpl_messy_gpu_kernel<<<gridBlock,threadBlock>>>(dev_in_img,dev_out_img,dev_C0_tmp,dev_C1_tmp,dev_C2_tmp,org_wd,org_ht,dst_wd,dst_ht,n_channels,r,hn,wn,xbd0,xbd1,ybd0,ybd1,xas_const,xbs_const,xwts_const,yas_const,ybs_const,ywts_const);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
98c3ef94ced6e6061cf5b9545e67cea61324ec38.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) "2019, by Stanford University
// Developer: Mario Di Renzo
// Affiliation: Center for Turbulence Research, Stanford University
// URL: https://ctr.stanford.edu
// Citation: Di Renzo, M., Lin, F., and Urzay, J. (2020).
// HTR solver: An open-source exascale-oriented task-based
// multi-GPU high-order code for hypersonic aerothermodynamics.
// Computer Physics Communications 255, 107262"
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "prometeo_variables.hpp"
#include "cuda_utils.hpp"
// Declare a constant memory that will hold the Mixture struct (initialized in prometeo_mixture.cu)
extern __device__ __constant__ Mix mix;
//-----------------------------------------------------------------------------
// KERNELS FOR UpdatePropertiesFromPrimitiveTask
//-----------------------------------------------------------------------------
__global__
void UpdatePropertiesFromPrimitive_kernel(const AccessorRO<double, 3> pressure,
const AccessorRO<double, 3> temperature,
const AccessorRO<VecNSp, 3> MolarFracs,
const AccessorRO< Vec3, 3> velocity,
const AccessorWO<VecNSp, 3> MassFracs,
const AccessorWO<double, 3> rho,
const AccessorWO<double, 3> mu,
const AccessorWO<double, 3> lam,
const AccessorWO<VecNSp, 3> Di,
const AccessorWO<double, 3> SoS,
#if (defined(ELECTRIC_FIELD) && (nIons > 0))
const AccessorWO<VecNIo, 3> Ki,
#endif
const Rect<3> my_bounds,
const coord_t size_x,
const coord_t size_y,
const coord_t size_z)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
if ((x < size_x) && (y < size_y) && (z < size_z)) {
const Point<3> p = Point<3>(x + my_bounds.lo.x,
y + my_bounds.lo.y,
z + my_bounds.lo.z);
// Mixture check
assert(mix.CheckMixture(MolarFracs[p]));
UpdatePropertiesFromPrimitiveTask::UpdateProperties(
pressure, temperature, MolarFracs, velocity,
MassFracs,
rho, mu, lam, Di, SoS,
#if (defined(ELECTRIC_FIELD) && (nIons > 0))
Ki,
#endif
p, mix);
}
}
__host__
void UpdatePropertiesFromPrimitiveTask::gpu_base_impl(
const Args &args,
const std::vector<PhysicalRegion> ®ions,
const std::vector<Future> &futures,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 2);
assert(futures.size() == 0);
// Accessors for primitive variables
const AccessorRO<double, 3> acc_pressure (regions[0], FID_pressure);
const AccessorRO<double, 3> acc_temperature (regions[0], FID_temperature);
const AccessorRO<VecNSp, 3> acc_MolarFracs (regions[0], FID_MolarFracs);
const AccessorRO< Vec3, 3> acc_velocity (regions[0], FID_velocity);
const AccessorWO<VecNSp, 3> acc_MassFracs (regions[1], FID_MassFracs);
// Accessors for properties
const AccessorWO<double, 3> acc_rho (regions[1], FID_rho);
const AccessorWO<double, 3> acc_mu (regions[1], FID_mu);
const AccessorWO<double, 3> acc_lam (regions[1], FID_lam);
const AccessorWO<VecNSp, 3> acc_Di (regions[1], FID_Di);
const AccessorWO<double, 3> acc_SoS (regions[1], FID_SoS);
#if (defined(ELECTRIC_FIELD) && (nIons > 0))
const AccessorWO<VecNIo, 3> acc_Ki (regions[1], FID_Ki);
#endif
// Extract execution domains
Rect<3> r_Fluid = runtime->get_index_space_domain(ctx, regions[1].get_logical_region().get_index_space());
// Launch the kernel
const int threads_per_block = 256;
const dim3 TPB_3d = splitThreadsPerBlock<Xdir>(threads_per_block, r_Fluid);
const dim3 num_blocks_3d = dim3((getSize<Xdir>(r_Fluid) + (TPB_3d.x - 1)) / TPB_3d.x,
(getSize<Ydir>(r_Fluid) + (TPB_3d.y - 1)) / TPB_3d.y,
(getSize<Zdir>(r_Fluid) + (TPB_3d.z - 1)) / TPB_3d.z);
hipLaunchKernelGGL(( UpdatePropertiesFromPrimitive_kernel), dim3(num_blocks_3d), dim3(TPB_3d), 0, 0,
acc_pressure, acc_temperature, acc_MolarFracs,
acc_velocity, acc_MassFracs,
acc_rho, acc_mu, acc_lam, acc_Di, acc_SoS,
#if (defined(ELECTRIC_FIELD) && (nIons > 0))
acc_Ki,
#endif
r_Fluid, getSize<Xdir>(r_Fluid), getSize<Ydir>(r_Fluid), getSize<Zdir>(r_Fluid));
}
//-----------------------------------------------------------------------------
// KERNELS FOR UpdateConservedFromPrimitiveTask
//-----------------------------------------------------------------------------
__global__
void UpdateConservedFromPrimitive_kernel(const AccessorRO<VecNSp, 3> MassFracs,
const AccessorRO<double, 3> temperature,
const AccessorRO< Vec3, 3> velocity,
const AccessorRO<double, 3> rho,
const AccessorWO<VecNEq, 3> Conserved,
const Rect<3> my_bounds,
const coord_t size_x,
const coord_t size_y,
const coord_t size_z)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
if ((x < size_x) && (y < size_y) && (z < size_z)) {
const Point<3> p = Point<3>(x + my_bounds.lo.x,
y + my_bounds.lo.y,
z + my_bounds.lo.z);
// Mixture check
assert(mix.CheckMixture(MassFracs[p]));
UpdateConservedFromPrimitiveTask::UpdateConserved(
MassFracs, temperature, velocity,
rho, Conserved,
p, mix);
}
}
__host__
void UpdateConservedFromPrimitiveTask::gpu_base_impl(
const Args &args,
const std::vector<PhysicalRegion> ®ions,
const std::vector<Future> &futures,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 2);
assert(futures.size() == 0);
// Accessors for primitive variables
const AccessorRO<VecNSp, 3> acc_MassFracs (regions[0], FID_MassFracs);
const AccessorRO<double, 3> acc_temperature (regions[0], FID_temperature);
const AccessorRO< Vec3, 3> acc_velocity (regions[0], FID_velocity);
// Accessors for properties
const AccessorRO<double, 3> acc_rho (regions[0], FID_rho);
// Accessors for conserved variables
const AccessorWO<VecNEq, 3> acc_Conserved (regions[1], FID_Conserved);
// Extract execution domains
Domain r_Fluid = runtime->get_index_space_domain(ctx, regions[1].get_logical_region().get_index_space());
// Launch the kernel (launch domain might be composed by multiple rectangles)
for (RectInDomainIterator<3> Rit(r_Fluid); Rit(); Rit++) {
const int threads_per_block = 256;
const dim3 TPB_3d = splitThreadsPerBlock<Xdir>(threads_per_block, (*Rit));
const dim3 num_blocks_3d = dim3((getSize<Xdir>(*Rit) + (TPB_3d.x - 1)) / TPB_3d.x,
(getSize<Ydir>(*Rit) + (TPB_3d.y - 1)) / TPB_3d.y,
(getSize<Zdir>(*Rit) + (TPB_3d.z - 1)) / TPB_3d.z);
hipLaunchKernelGGL(( UpdateConservedFromPrimitive_kernel), dim3(num_blocks_3d), dim3(TPB_3d), 0, 0,
acc_MassFracs, acc_temperature, acc_velocity,
acc_rho, acc_Conserved, (*Rit),
getSize<Xdir>(*Rit), getSize<Ydir>(*Rit), getSize<Zdir>(*Rit));
}
}
//-----------------------------------------------------------------------------
// KERNELS FOR UpdatePrimitiveFromConservedTask
//-----------------------------------------------------------------------------
__global__
void UpdatePrimitiveFromConserved_kernel(const AccessorRO<VecNEq, 3> Conserved,
const AccessorRW<double, 3> temperature,
const AccessorWO<double, 3> pressure,
const AccessorWO<VecNSp, 3> MolarFracs,
const AccessorWO< Vec3, 3> velocity,
const Rect<3> my_bounds,
const coord_t size_x,
const coord_t size_y,
const coord_t size_z)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
if ((x < size_x) && (y < size_y) && (z < size_z)) {
const Point<3> p = Point<3>(x + my_bounds.lo.x,
y + my_bounds.lo.y,
z + my_bounds.lo.z);
UpdatePrimitiveFromConservedTask::UpdatePrimitive(
Conserved, temperature, pressure,
MolarFracs, velocity,
p, mix);
}
}
__host__
void UpdatePrimitiveFromConservedTask::gpu_base_impl(
const Args &args,
const std::vector<PhysicalRegion> ®ions,
const std::vector<Future> &futures,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 2);
assert(futures.size() == 0);
// Accessors for conserved variables
const AccessorRO<VecNEq, 3> acc_Conserved (regions[0], FID_Conserved);
// Accessors for temperature variables
const AccessorRW<double, 3> acc_temperature (regions[1], FID_temperature);
// Accessors for primitive variables
const AccessorWO<double, 3> acc_pressure (regions[1], FID_pressure);
const AccessorWO<VecNSp, 3> acc_MolarFracs (regions[1], FID_MolarFracs);
const AccessorWO< Vec3, 3> acc_velocity (regions[1], FID_velocity);
// Extract execution domains
Rect<3> r_Fluid = runtime->get_index_space_domain(ctx, regions[1].get_logical_region().get_index_space());
// Launch the kernel
const int threads_per_block = 256;
const dim3 TPB_3d = splitThreadsPerBlock<Xdir>(threads_per_block, r_Fluid);
const dim3 num_blocks_3d = dim3((getSize<Xdir>(r_Fluid) + (TPB_3d.x - 1)) / TPB_3d.x,
(getSize<Ydir>(r_Fluid) + (TPB_3d.y - 1)) / TPB_3d.y,
(getSize<Zdir>(r_Fluid) + (TPB_3d.z - 1)) / TPB_3d.z);
hipLaunchKernelGGL(( UpdatePrimitiveFromConserved_kernel), dim3(num_blocks_3d), dim3(TPB_3d), 0, 0,
acc_Conserved, acc_temperature, acc_pressure,
acc_MolarFracs, acc_velocity, r_Fluid,
getSize<Xdir>(r_Fluid), getSize<Ydir>(r_Fluid), getSize<Zdir>(r_Fluid));
}
| 98c3ef94ced6e6061cf5b9545e67cea61324ec38.cu | // Copyright (c) "2019, by Stanford University
// Developer: Mario Di Renzo
// Affiliation: Center for Turbulence Research, Stanford University
// URL: https://ctr.stanford.edu
// Citation: Di Renzo, M., Lin, F., and Urzay, J. (2020).
// HTR solver: An open-source exascale-oriented task-based
// multi-GPU high-order code for hypersonic aerothermodynamics.
// Computer Physics Communications 255, 107262"
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "prometeo_variables.hpp"
#include "cuda_utils.hpp"
// Declare a constant memory that will hold the Mixture struct (initialized in prometeo_mixture.cu)
extern __device__ __constant__ Mix mix;
//-----------------------------------------------------------------------------
// KERNELS FOR UpdatePropertiesFromPrimitiveTask
//-----------------------------------------------------------------------------
__global__
void UpdatePropertiesFromPrimitive_kernel(const AccessorRO<double, 3> pressure,
const AccessorRO<double, 3> temperature,
const AccessorRO<VecNSp, 3> MolarFracs,
const AccessorRO< Vec3, 3> velocity,
const AccessorWO<VecNSp, 3> MassFracs,
const AccessorWO<double, 3> rho,
const AccessorWO<double, 3> mu,
const AccessorWO<double, 3> lam,
const AccessorWO<VecNSp, 3> Di,
const AccessorWO<double, 3> SoS,
#if (defined(ELECTRIC_FIELD) && (nIons > 0))
const AccessorWO<VecNIo, 3> Ki,
#endif
const Rect<3> my_bounds,
const coord_t size_x,
const coord_t size_y,
const coord_t size_z)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
if ((x < size_x) && (y < size_y) && (z < size_z)) {
const Point<3> p = Point<3>(x + my_bounds.lo.x,
y + my_bounds.lo.y,
z + my_bounds.lo.z);
// Mixture check
assert(mix.CheckMixture(MolarFracs[p]));
UpdatePropertiesFromPrimitiveTask::UpdateProperties(
pressure, temperature, MolarFracs, velocity,
MassFracs,
rho, mu, lam, Di, SoS,
#if (defined(ELECTRIC_FIELD) && (nIons > 0))
Ki,
#endif
p, mix);
}
}
__host__
void UpdatePropertiesFromPrimitiveTask::gpu_base_impl(
const Args &args,
const std::vector<PhysicalRegion> ®ions,
const std::vector<Future> &futures,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 2);
assert(futures.size() == 0);
// Accessors for primitive variables
const AccessorRO<double, 3> acc_pressure (regions[0], FID_pressure);
const AccessorRO<double, 3> acc_temperature (regions[0], FID_temperature);
const AccessorRO<VecNSp, 3> acc_MolarFracs (regions[0], FID_MolarFracs);
const AccessorRO< Vec3, 3> acc_velocity (regions[0], FID_velocity);
const AccessorWO<VecNSp, 3> acc_MassFracs (regions[1], FID_MassFracs);
// Accessors for properties
const AccessorWO<double, 3> acc_rho (regions[1], FID_rho);
const AccessorWO<double, 3> acc_mu (regions[1], FID_mu);
const AccessorWO<double, 3> acc_lam (regions[1], FID_lam);
const AccessorWO<VecNSp, 3> acc_Di (regions[1], FID_Di);
const AccessorWO<double, 3> acc_SoS (regions[1], FID_SoS);
#if (defined(ELECTRIC_FIELD) && (nIons > 0))
const AccessorWO<VecNIo, 3> acc_Ki (regions[1], FID_Ki);
#endif
// Extract execution domains
Rect<3> r_Fluid = runtime->get_index_space_domain(ctx, regions[1].get_logical_region().get_index_space());
// Launch the kernel
const int threads_per_block = 256;
const dim3 TPB_3d = splitThreadsPerBlock<Xdir>(threads_per_block, r_Fluid);
const dim3 num_blocks_3d = dim3((getSize<Xdir>(r_Fluid) + (TPB_3d.x - 1)) / TPB_3d.x,
(getSize<Ydir>(r_Fluid) + (TPB_3d.y - 1)) / TPB_3d.y,
(getSize<Zdir>(r_Fluid) + (TPB_3d.z - 1)) / TPB_3d.z);
UpdatePropertiesFromPrimitive_kernel<<<num_blocks_3d, TPB_3d>>>(
acc_pressure, acc_temperature, acc_MolarFracs,
acc_velocity, acc_MassFracs,
acc_rho, acc_mu, acc_lam, acc_Di, acc_SoS,
#if (defined(ELECTRIC_FIELD) && (nIons > 0))
acc_Ki,
#endif
r_Fluid, getSize<Xdir>(r_Fluid), getSize<Ydir>(r_Fluid), getSize<Zdir>(r_Fluid));
}
//-----------------------------------------------------------------------------
// KERNELS FOR UpdateConservedFromPrimitiveTask
//-----------------------------------------------------------------------------
__global__
void UpdateConservedFromPrimitive_kernel(const AccessorRO<VecNSp, 3> MassFracs,
const AccessorRO<double, 3> temperature,
const AccessorRO< Vec3, 3> velocity,
const AccessorRO<double, 3> rho,
const AccessorWO<VecNEq, 3> Conserved,
const Rect<3> my_bounds,
const coord_t size_x,
const coord_t size_y,
const coord_t size_z)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
if ((x < size_x) && (y < size_y) && (z < size_z)) {
const Point<3> p = Point<3>(x + my_bounds.lo.x,
y + my_bounds.lo.y,
z + my_bounds.lo.z);
// Mixture check
assert(mix.CheckMixture(MassFracs[p]));
UpdateConservedFromPrimitiveTask::UpdateConserved(
MassFracs, temperature, velocity,
rho, Conserved,
p, mix);
}
}
__host__
void UpdateConservedFromPrimitiveTask::gpu_base_impl(
const Args &args,
const std::vector<PhysicalRegion> ®ions,
const std::vector<Future> &futures,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 2);
assert(futures.size() == 0);
// Accessors for primitive variables
const AccessorRO<VecNSp, 3> acc_MassFracs (regions[0], FID_MassFracs);
const AccessorRO<double, 3> acc_temperature (regions[0], FID_temperature);
const AccessorRO< Vec3, 3> acc_velocity (regions[0], FID_velocity);
// Accessors for properties
const AccessorRO<double, 3> acc_rho (regions[0], FID_rho);
// Accessors for conserved variables
const AccessorWO<VecNEq, 3> acc_Conserved (regions[1], FID_Conserved);
// Extract execution domains
Domain r_Fluid = runtime->get_index_space_domain(ctx, regions[1].get_logical_region().get_index_space());
// Launch the kernel (launch domain might be composed by multiple rectangles)
for (RectInDomainIterator<3> Rit(r_Fluid); Rit(); Rit++) {
const int threads_per_block = 256;
const dim3 TPB_3d = splitThreadsPerBlock<Xdir>(threads_per_block, (*Rit));
const dim3 num_blocks_3d = dim3((getSize<Xdir>(*Rit) + (TPB_3d.x - 1)) / TPB_3d.x,
(getSize<Ydir>(*Rit) + (TPB_3d.y - 1)) / TPB_3d.y,
(getSize<Zdir>(*Rit) + (TPB_3d.z - 1)) / TPB_3d.z);
UpdateConservedFromPrimitive_kernel<<<num_blocks_3d, TPB_3d>>>(
acc_MassFracs, acc_temperature, acc_velocity,
acc_rho, acc_Conserved, (*Rit),
getSize<Xdir>(*Rit), getSize<Ydir>(*Rit), getSize<Zdir>(*Rit));
}
}
//-----------------------------------------------------------------------------
// KERNELS FOR UpdatePrimitiveFromConservedTask
//-----------------------------------------------------------------------------
__global__
void UpdatePrimitiveFromConserved_kernel(const AccessorRO<VecNEq, 3> Conserved,
const AccessorRW<double, 3> temperature,
const AccessorWO<double, 3> pressure,
const AccessorWO<VecNSp, 3> MolarFracs,
const AccessorWO< Vec3, 3> velocity,
const Rect<3> my_bounds,
const coord_t size_x,
const coord_t size_y,
const coord_t size_z)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
if ((x < size_x) && (y < size_y) && (z < size_z)) {
const Point<3> p = Point<3>(x + my_bounds.lo.x,
y + my_bounds.lo.y,
z + my_bounds.lo.z);
UpdatePrimitiveFromConservedTask::UpdatePrimitive(
Conserved, temperature, pressure,
MolarFracs, velocity,
p, mix);
}
}
__host__
void UpdatePrimitiveFromConservedTask::gpu_base_impl(
const Args &args,
const std::vector<PhysicalRegion> ®ions,
const std::vector<Future> &futures,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 2);
assert(futures.size() == 0);
// Accessors for conserved variables
const AccessorRO<VecNEq, 3> acc_Conserved (regions[0], FID_Conserved);
// Accessors for temperature variables
const AccessorRW<double, 3> acc_temperature (regions[1], FID_temperature);
// Accessors for primitive variables
const AccessorWO<double, 3> acc_pressure (regions[1], FID_pressure);
const AccessorWO<VecNSp, 3> acc_MolarFracs (regions[1], FID_MolarFracs);
const AccessorWO< Vec3, 3> acc_velocity (regions[1], FID_velocity);
// Extract execution domains
Rect<3> r_Fluid = runtime->get_index_space_domain(ctx, regions[1].get_logical_region().get_index_space());
// Launch the kernel
const int threads_per_block = 256;
const dim3 TPB_3d = splitThreadsPerBlock<Xdir>(threads_per_block, r_Fluid);
const dim3 num_blocks_3d = dim3((getSize<Xdir>(r_Fluid) + (TPB_3d.x - 1)) / TPB_3d.x,
(getSize<Ydir>(r_Fluid) + (TPB_3d.y - 1)) / TPB_3d.y,
(getSize<Zdir>(r_Fluid) + (TPB_3d.z - 1)) / TPB_3d.z);
UpdatePrimitiveFromConserved_kernel<<<num_blocks_3d, TPB_3d>>>(
acc_Conserved, acc_temperature, acc_pressure,
acc_MolarFracs, acc_velocity, r_Fluid,
getSize<Xdir>(r_Fluid), getSize<Ydir>(r_Fluid), getSize<Zdir>(r_Fluid));
}
|
291f5de862ebd7266d8523341496e06dd24def0d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void square(float * d_out, float *d_in) {
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f * f;
}
int main(int argc, char ** argv){
const int ARRAY_SIZE = 64;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
//generate the input array on the host
float h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
//declare GPU memory pointers
float * d_in;
float * d_out;
//allocate GPU memory
hipMalloc((void **) &d_in, ARRAY_BYTES);
hipMalloc((void **) &d_out, ARRAY_BYTES);
//transfer the array to the GPU
hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice);
//launch the kernel
hipLaunchKernelGGL(( square), dim3(1), dim3(ARRAY_SIZE), 0, 0, d_out, d_in);
//copy back the result array to the CPU
hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost);
//print out the resulting array
for ( int i =0 ; i < ARRAY_SIZE; i++) {
printf("%f", h_out[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
// free GPU memory allocation
hipFree(d_in);
hipFree(d_out);
return 0;
}
| 291f5de862ebd7266d8523341496e06dd24def0d.cu |
#include <stdio.h>
__global__ void square(float * d_out, float *d_in) {
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f * f;
}
int main(int argc, char ** argv){
const int ARRAY_SIZE = 64;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
//generate the input array on the host
float h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
//declare GPU memory pointers
float * d_in;
float * d_out;
//allocate GPU memory
cudaMalloc((void **) &d_in, ARRAY_BYTES);
cudaMalloc((void **) &d_out, ARRAY_BYTES);
//transfer the array to the GPU
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
//launch the kernel
square<<<1, ARRAY_SIZE>>>(d_out, d_in);
//copy back the result array to the CPU
cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
//print out the resulting array
for ( int i =0 ; i < ARRAY_SIZE; i++) {
printf("%f", h_out[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
// free GPU memory allocation
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
53e4f740b4e157ea0d594d48f41b54bd47a68ca4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include "mpi.h"
#include <stdlib.h> //rand
#include <time.h> //srand
//
hipError_t multiWithCuda(float* c, float* a, float* b, unsigned int size);
// cuda
__global__ void multiKernel(float* c, float* a, float* b, unsigned int size)
{
int i = threadIdx.x;
int j = blockIdx.x;
// -> thread
for(int x=0; x<size; x++)
c[size*j + i] += a[size*j + x] * b[size*x + i];
}
int main(int argc, char **argv)
{
int rank,size;
const int arraySize = 5;
srand(time(NULL));
/*MPI */
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
// a,b,c 1 32 * 32
float a[arraySize*arraySize] = {0}; // 1024
float b[arraySize*arraySize] = {0}; // 1024
float c[arraySize*arraySize] = {0}; // 1024
//0 1024
for(int i=0; i<arraySize*arraySize; i++)
{
a[i]=2;
b[i]=2;
//a[i] = static_cast<float>(i);
//b[i] = static_cast<float>(i);
//a[i] = rand() % 1024;
//b[i] = rand() % 1024;
}
// .
hipError_t cudaStatus = multiWithCuda(c, a, b, arraySize);
if(cudaStatus != hipSuccess)
{
fprintf(stderr, "multiWithCuda failed!");
// MPI_Finalize(); //MPI
return -1;
}
/*send, recv */
//node0 master node1 slave
// slave
for(int i=0; i<arraySize*arraySize; i++)
{
if(rank == 0)
{
//MPI_Send(address, count, datatype, destination, tag, comm)
MPI_Send(&c[i], 0, MPI_FLOAT, 1, 0, MPI_COMM_WORLD);
printf("ping c[%d] = %8.1f\n",i,c[i]);
MPI_Recv(&c[i], 1, MPI_FLOAT, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
else
{
//MPI_Recv(address, maxcount, datatype, source, tag, comm, status)
MPI_Recv(&c[i], 1, MPI_FLOAT, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
printf("c[%d] = %8.1f \n",i,c[i]);
MPI_Send(&c[i], 0, MPI_FLOAT,1,0,MPI_COMM_WORLD);
}
//
// device reset .
cudaStatus = hipDeviceReset();
if(cudaStatus != hipSuccess)
{
fprintf(stderr,"hipDeviceReset, failed!");
//MPI_Finalize();
return 1;
}
//MPI_Finalize();
}
return 0;
}
// multiWithCuda
hipError_t multiWithCuda(float* c, float* a, float* b, unsigned int size)
{
// gpu .
float* dev_a = 0;
float* dev_b = 0;
float* dev_c = 0;
hipError_t cudaStatus;
cudaStatus = hipSetDevice(0);
if(cudaStatus != hipSuccess)
{
fprintf(stderr, "CudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// GPU .
// .
cudaStatus = hipMalloc((void**)&dev_c, size*size*sizeof(float));
if(cudaStatus != hipSuccess)
{
fprintf(stderr,"hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, size*size*sizeof(float));
if(cudaStatus != hipSuccess)
{
fprintf(stderr,"hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_b, size*size*sizeof(float));
if(cudaStatus != hipSuccess)
{
fprintf(stderr,"hipMalloc failed!");
goto Error;
}
// .
cudaStatus = hipMemcpy(dev_a, a, size*size*sizeof(float), hipMemcpyHostToDevice);
if(cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_b, b, size*size*sizeof(float), hipMemcpyHostToDevice);
if(cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_c, c, size*size*sizeof(float), hipMemcpyHostToDevice);
if(cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// .
hipLaunchKernelGGL(( multiKernel), dim3(size), dim3(size), 0, 0, dev_c, dev_a, dev_b, size);
//
cudaStatus = hipGetLastError();
if(cudaStatus != hipSuccess)
{
fprintf(stderr, "multiKernel launch failed : %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
//
cudaStatus = hipDeviceSynchronize();
if(cudaStatus!= hipSuccess)
{
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
//
cudaStatus = hipMemcpy(c, dev_c, size*size*sizeof(float), hipMemcpyDeviceToHost);
if(cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
// gpu
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
//mpi
// MPI_Finalize();
return cudaStatus;
}
| 53e4f740b4e157ea0d594d48f41b54bd47a68ca4.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include "mpi.h"
#include <stdlib.h> //rand사용을 위해 추가
#include <time.h> //srand사용을 위해 추가
// 행렬 곱셈 커널 함수를 콜할 호스트 함수
cudaError_t multiWithCuda(float* c, float* a, float* b, unsigned int size);
// cuda 행렬 곱
__global__ void multiKernel(float* c, float* a, float* b, unsigned int size)
{
int i = threadIdx.x;
int j = blockIdx.x;
// 행렬의 곱셈 -> thread의 인덱스 값에 접근하여 계산
for(int x=0; x<size; x++)
c[size*j + i] += a[size*j + x] * b[size*x + i];
}
int main(int argc, char **argv)
{
int rank,size;
const int arraySize = 5;
srand(time(NULL));
/*MPI환경 초기화*/
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
// 행렬 a,b,c을 1차원 배열로 하여 32 * 32 사이즈로 구성
float a[arraySize*arraySize] = {0}; //행렬 크기는 1024
float b[arraySize*arraySize] = {0}; //행렬 크기는 1024
float c[arraySize*arraySize] = {0}; //행렬 크기는 1024
//0부터 1024까지의 값이 들어감
for(int i=0; i<arraySize*arraySize; i++)
{
a[i]=2;
b[i]=2;
//a[i] = static_cast<float>(i);
//b[i] = static_cast<float>(i);
//a[i] = rand() % 1024;
//b[i] = rand() % 1024;
}
// 작업할 함수를 콜한다.
cudaError_t cudaStatus = multiWithCuda(c, a, b, arraySize);
if(cudaStatus != cudaSuccess)
{
fprintf(stderr, "multiWithCuda failed!");
// MPI_Finalize(); //MPI환경 제거
return -1;
}
/*send, recv을 활용하여 결과값을 출력*/
//node0은 master이고 node1은 slave가 되는 경우를 생각
// 단 slave는 여러개가 되어 다대다 연결이 가능하게 해야함
for(int i=0; i<arraySize*arraySize; i++)
{
if(rank == 0)
{
//MPI_Send(address, count, datatype, destination, tag, comm)
MPI_Send(&c[i], 0, MPI_FLOAT, 1, 0, MPI_COMM_WORLD);
printf("ping c[%d] = %8.1f\n",i,c[i]);
MPI_Recv(&c[i], 1, MPI_FLOAT, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
else
{
//MPI_Recv(address, maxcount, datatype, source, tag, comm, status)
MPI_Recv(&c[i], 1, MPI_FLOAT, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
printf("c[%d] = %8.1f \n",i,c[i]);
MPI_Send(&c[i], 0, MPI_FLOAT,1,0,MPI_COMM_WORLD);
}
// 모든 작업이 완료되었으므로
// device 를 reset 한다.
cudaStatus = cudaDeviceReset();
if(cudaStatus != cudaSuccess)
{
fprintf(stderr,"cudaDeviceReset, failed!");
//MPI_Finalize();
return 1;
}
//MPI_Finalize();
}
return 0;
}
// 커널함수 호출하는 헬퍼 함수 multiWithCuda를 정의하자
cudaError_t multiWithCuda(float* c, float* a, float* b, unsigned int size)
{
// gpu에 할당한 메모리 주소값을 저장할 변수를 선언한다.
float* dev_a = 0;
float* dev_b = 0;
float* dev_c = 0;
cudaError_t cudaStatus;
cudaStatus = cudaSetDevice(0);
if(cudaStatus != cudaSuccess)
{
fprintf(stderr, "CudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// GPU에 메모리를 할당한다.
// 행렬 크기만큼 할당한다.
cudaStatus = cudaMalloc((void**)&dev_c, size*size*sizeof(float));
if(cudaStatus != cudaSuccess)
{
fprintf(stderr,"cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size*size*sizeof(float));
if(cudaStatus != cudaSuccess)
{
fprintf(stderr,"cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size*size*sizeof(float));
if(cudaStatus != cudaSuccess)
{
fprintf(stderr,"cudaMalloc failed!");
goto Error;
}
// 호스트 메모리에 있는 값을 디바이스 메모리에 복사한다.
cudaStatus = cudaMemcpy(dev_a, a, size*size*sizeof(float), cudaMemcpyHostToDevice);
if(cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size*size*sizeof(float), cudaMemcpyHostToDevice);
if(cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_c, c, size*size*sizeof(float), cudaMemcpyHostToDevice);
if(cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// 커널 함수를 실행한다.
multiKernel<<<size, size>>>(dev_c, dev_a, dev_b, size);
// 커널 함수 실행후 에러가 있는지 확인
cudaStatus = cudaGetLastError();
if(cudaStatus != cudaSuccess)
{
fprintf(stderr, "multiKernel launch failed : %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// 커널이 모두 종료되었는지 확인
cudaStatus = cudaDeviceSynchronize();
if(cudaStatus!= cudaSuccess)
{
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// 결과를 호스트 메모리에 복사
cudaStatus = cudaMemcpy(c, dev_c, size*size*sizeof(float), cudaMemcpyDeviceToHost);
if(cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
// gpu에 할당한 메모리를 반환
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
//mpi 해제
// MPI_Finalize();
return cudaStatus;
}
|
2f64ea79991696226d7d439e3a881636fac96d96.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Cuda accelerated motion estimation for VP8 libvpx encoder
by Pietro Paglierani, Giuliano Grossi, Federico Pedersini and Alessandro Petrini
for Italtel and Universita' degli Studi di Milano
2015-2016, Milano
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <wchar.h>
#include <locale.h>
#include "vpx_config.h"
#include "cuda/typedef_cuda.h"
#include "cuda/me_cuda.h"
#ifdef __cplusplus
extern "C" {
#endif
#if HAVE_CUDA_ENABLED_DEVICE
__constant__ int MV_offset4[128];
__constant__ int MV_offset_refin[32];
void setup_constant_mem(int img_stride) {
int I = img_stride;
int MV_off4[128] = { -22*I,
-20*I,
-18*I-4,-18*I,-18*I+4,
-16*I-4,-16*I,-16*I+4,
-14*I-8,-14*I-4,-14*I,-14*I+4,-14*I+8,
-12*I-8,-12*I-4,-12*I,-12*I+4,-12*I+8,
-10*I-12,-10*I-8,-10*I-4,-10*I,-10*I+4,-10*I+8,-10*I+12,
-8*I-12, -8*I-8, -8*I-4, -8*I, -8*I+4, -8*I+8, -8*I+12,
-6*I-12, -6*I-8, -6*I-4, -6*I, -6*I+4, -6*I+8, -6*I+12,
-4*I-16, -4*I-12, -4*I-8, -4*I-4, -4*I, -4*I+4, -4*I+8, -4*I+12, -4*I+16,
-2*I-16, -2*I-12, -2*I-8, -2*I-4, -2*I, -2*I+4, -2*I+8, -2*I+12, -2*I+16,
-24, -20, -16, -12, -8, -4, 0, 4, 8, 12, 16, 20, 24,
2*I-16, 2*I-12, 2*I-8, 2*I-4, 2*I, 2*I+4, 2*I+8, 2*I+12, 2*I+16,
4*I-16, 4*I-12, 4*I-8, 4*I-4, 4*I, 4*I+4, 4*I+8, 4*I+12, 4*I+16,
6*I-12, 6*I-8, 6*I-4, 6*I, 6*I+4, 6*I+8, 6*I+12,
8*I-12, 8*I-8, 8*I-4, 8*I, 8*I+4, 8*I+8, 8*I+12,
10*I-12,10*I-8, 10*I-4, 10*I, 10*I+4, 10*I+8, 10*I+12,
12*I-8, 12*I-4, 12*I, 12*I+4, 12*I+8,
14*I-8, 14*I-4, 14*I, 14*I+4, 14*I+8,
16*I-4, 16*I, 16*I+4,
18*I-4, 18*I, 18*I+4,
20*I,
22*I, 22*I+4,
};
int MV_refin[32] = {
-3*I,
-2*I-2, -2*I-1, -2*I, -2*I+1, -2*I+2,
-I-3, -I-2, -I-1, -I, -I+1, -I+2, -I+3,
-3, -2, -1, 1, 2, 3,
I-3, I-2, I-1, I, I+1, I+2, I+3,
2*I-2, 2*I-1, 2*I, 2*I+1, 2*I+2,
3*I
};
CHECK(hipMemcpyToSymbol(MV_offset4, MV_off4, 128*sizeof(int)));
CHECK(hipMemcpyToSymbol(MV_offset_refin, MV_refin, 32*sizeof(int)));
}
__device__ __constant__ MV MV_lookup4[128] = { // Unit: pixel
{-22,0},
{-20,0},
{-18,-4},{-18,0},{-18,4},
{-16,-4},{-16,0},{-16,4},
{-14,-8},{-14,-4},{-14,0},{-14,4},{-14,8},
{-12,-8},{-12,-4},{-12,0},{-12,4},{-12,8},
{-10,-12},{-10,-8},{-10,-4},{-10,0},{-10,4},{-10,8},{-10,12},
{ -8,-12},{ -8,-8},{ -8,-4},{ -8,0},{ -8,4},{ -8,8},{ -8,12},
{ -6,-12},{ -6,-8},{ -6,-4},{ -6,0},{ -6,4},{ -6,8},{ -6,12},
{-4,-16},{ -4,-12},{ -4,-8},{ -4,-4},{ -4,0},{ -4,4},{ -4,8},{ -4,12},{ -4,16},
{-2,-16},{ -2,-12},{ -2,-8},{ -2,-4},{ -2,0},{ -2,4},{ -2,8},{ -2,12},{ -2,16},
{0,-24},{0,-20},{ 0,-16},{ 0,-12},{ 0,-8},{ 0,-4},{ 0,0},{ 0,4},{ 0,8},{ 0,12},{ 0,16},{ 0,20},{ 0,24},
{ 2,-16},{ 2,-12},{ 2,-8},{ 2,-4},{ 2,0},{ 2,4},{ 2,8},{ 2,12},{ 2,16},
{ 4,-16},{ 4,-12},{ 4,-8},{ 4,-4},{ 4,0},{ 4,4},{ 4,8},{ 4,12},{ 4,16},
{ 6,-12},{ 6,-8},{ 6,-4},{ 6,0},{ 6,4},{ 6,8},{ 6,12},
{ 8,-12},{ 8,-8},{ 8,-4},{ 8,0},{ 8,4},{ 8,8},{ 8,12},
{ 10,-12},{ 10,-8},{ 10,-4},{ 10,0},{ 10,4},{ 10,8},{ 10,12},
{ 12,-8},{ 12,-4},{ 12,0},{ 12,4},{ 12,8},
{ 14,-8},{ 14,-4},{ 14,0},{ 14,4},{ 14,8},
{ 16,-4},{ 16,0},{ 16,4},
{ 18,-4},{ 18,0},{ 18,4},
{ 20,0},
{ 22,0},{ 22,4}
};
// Ne basterebbero molte meno (17, per la precisione), ma cosi' riempiamo un warp
__device__ __constant__ MV MV_lookup_refin[32] = {
{-3, 0},
{-2, -2}, {-2, -1}, {-2, 0}, {-2, 1}, {-2, 2},
{-1, -3}, {-1, -2}, {-1, -1}, {-1, 0}, {-1, 1}, {-1, 2}, {-1, 3},
{ 0, -3}, { 0, -2}, { 0, -1}, { 0, 1}, { 0, 2}, { 0, 3},
{ 1, -3}, { 1, -2}, { 1, -1}, { 1, 0}, { 1, 1}, { 1, 2}, { 1, 3},
{ 2, -2}, { 2, -1}, { 2, 0}, { 2, 1}, { 2, 2},
{ 3, 0}
};
__inline__ __device__ uint32_t __vabsdiff4( uint32_t u, uint32_t v )
{
uint32_t w = 0;
uint32_t ww = 0;
asm volatile("vabsdiff4.u32.u32.u32.add %0, %1, %2, %3;" : "=r"(ww) : "r"(u), "r"(v), "r"(w));
return ww;
}
// Called with a (ROWS,COLS,1) GRID of (8x16x1) blocks
// block: 4x8x1, ogni blocco calcola la me per un MB
// grid: 16 ?
__global__ void ME_CUDA_p ( const uint8_t * const in_frame, const uint8_t * const ref_frame,
int const streamID, int const stride, int const width, int const num_MB_width,
int_mv * const MVs_g, int * const MV_vars_g )
{
__shared__ uint32_t sad[128][32];
__shared__ int32_t minpos[128];
uint32_t i;
int32_t TID = threadIdx.y * blockDim.x + threadIdx.x;
// 16 blocks per grid (16,1,1)
int32_t MBoffset = streamID * 16 + blockIdx.x;
int32_t blockY = MBoffset / num_MB_width;
int32_t blockX = MBoffset % num_MB_width;
//if (( MBoffset == 3010 ) && ( TID == 10 ))
// printf( "%d %d ", blockX, blockY);
// Occhio alle dimensioni: ref_frame ha la cornice, raw_frame no
int32_t img_offset = 16 * (blockY * width + blockX) + 2 * threadIdx.y * width + threadIdx.x;
int32_t ref_offset = 16 * (blockY * stride + blockX) + 2 * threadIdx.y * stride + threadIdx.x;
uint8_t * img = (uint8_t *) in_frame + img_offset;
uint8_t * ref = (uint8_t *) ref_frame + ref_offset + 32 * (stride + 1);
// one thread loads two quad pixels, one 4x8 block covers one img MB
// ind0: 0-31, relative position of the first quad with respect to the first MB pixel
int32_t delta_img = (1 * width);
int32_t delta_ref = (1 * stride);
// Senor... no capito, Senor...
// Valori dell'immagine di input
// Ogni thread carica 4 pixel (in un int) del MB di riferimento
uint32_t img0 = (uint32_t) ( (*img << 24) | (*(img + 1) << 16) | (*(img + 2) << 8) | *(img + 3) ); //*img;
uint32_t img1 = (uint32_t) ( (*(img + delta_img) << 24) | (*(img + delta_img + 1) << 16) | (*(img + delta_img + 2) << 8) | *(img + delta_img + 3) ); //*(img + delta_img);
//uint8_t *imgd = img + delta_img;
//uint32_t img0 = *( (uint32_t *)(img) );
//uint32_t img1 = *( (uint32_t *)(img) );// + delta_img) );
// Puntatori e valori dell'immagine di riferimento (no init)
//uint8_t *refp;
//uint8_t *refpd;2 *
uint32_t ref0;
uint32_t ref1;
// Valori di out calcolati dalle sad
uint32_t result;
//uint32_t result1;
// ref0=0x01020304;
// ref1=0x05060708;8
// img0=0x01010101;
// img1=0x01010101;
// Compute pixel differences: //
asm(".reg .u64 ss<4>;"::);
asm(".reg .u32 st<4>;"::);
asm(".reg .u32 rr<2>;"::);
asm(" mov.u32 st0, %0;"::"r"(img0));
//asm( " mov.u32 st1, %0;"::"r"(img1));
//asm(" mov.u64 ss0, %0;"::"l"(img));
//asm(" mov.u64 ss1, %0;"::"l"(img));
//asm(" ld.global.u32 st0, [ss0];"::);
//asm(" ld.global.u32 st1, [ss1];"::);
// ss0 : *img0
// ss1 : *img1
// ss1 : *ref0
// ss3 : *ref1
// st0 : img0
// st1 : img1
// st1 : ref0
// st3 : ref1
// rr0 : risult
// rr1 : risult1
for(i=0; i < 128; i++)
{
const uint8_t *refp = ref + MV_offset4[i];
//refpd = refp + delta_ref;
//result = abs( refp[0] - img[0] ) + abs( refp[1] - img[1] ) + abs( refp[2] - img[2] ) + abs( refp[3] - img[3] );
//result += abs( refpd[0] - imgd[0] ) + abs( refpd[1] - imgd[1] ) + abs( refpd[2] - imgd[2] ) + abs( refpd[3] - imgd[3] );
ref0 = (uint32_t)( *(refp) << 24 | *(refp + 1) << 16 | *(refp + 2) << 8 | *(refp + 3) );
ref1 = (uint32_t)( *(refp + delta_ref) << 24 | *(refp + delta_ref + 1) << 16 | *(refp + delta_ref + 2) << 8 | *(refp + delta_ref + 3) );
//asm(" mov.u64 ss2, %0;"::"l"(ref));
//asm(" mov.u64 ss3, %0;"::"l"(ref));
//asm(" mov.u32 rr0, 0;"::);
//asm(" mov.u32 rr1, 0;"::);
//asm(" ld.global.u32 st2, [ss2];"::);
//asm(" ld.global.u32 st3, [ss3];"::);
//asm(" mov.u32 st2, %0;"::"r"(ref0));
//asm(" mov.u32 st3, %0;"::"r"(ref1));result
//asm(" vabsdiff4.u32.u32.u32.add rr0, st0, st2, rr1;"::);
//asm(" vabsdiff4.u32.u32.u32.add rr1, st1, st3, rr0;"::);
//uint32_t result1;
//asm(" mov.u32 %0, rr0;":"=r"(result):);
//ref0 = *( (uint32_t *)(ref) );// + MV_offset4[i]
//ref1 = *( (uint32_t *)(ref) );// + MV_offset4[i] + delta_ref) );
//result = 0;
//result1 = 0;
//asm(" .reg .u32 r1;\n\t");
//asm(" vabsdiff4.u32.u32.u32.add %0, %1, %2, %3;\n\t": "=r"(result) : "r" (img0), "r" (ref0), "r" (result1));
//asm(" vabsdiff4.u32.u32.u32.add %0, %1, %2, %3;\n\t": "=r"(result1) : "r" (img1), "r" (ref1), "r" (result));
//" vabsdiff4.u32.u32.u32.add %0, %3, %4, r1;\n\t"
//" vabsdiff4.u32.u32.u32.add r1, %3, %4, r1;\n\t"
//" mov.u32 %0, r1;\n\t"
result = 0;
result += abs( *(refp) - *(img));
result += abs( *(refp + 1) - *(img + 1));
result += abs( *(refp + 2) - *(img + 2));
result += abs( *(refp + 3) - *(img + 3));
result += abs( *(refp + delta_ref) - *(img + delta_img));
result += abs( *(refp + 1 + delta_ref) - *(img + 1 + delta_img));
result += abs( *(refp + 2 + delta_ref) - *(img + 3 + delta_img));
result += abs( *(refp + 2 + delta_ref) - *(img + 3 + delta_img));
//result = __vabsdiff4( img0, ref0 );
//result += __vabsdiff4( img1, ref1 );
sad[i][TID] = result;
}
__syncthreads();
// accumulate diff, 32 -> 16: 128 righe e 32 colonne
for (i=0; i<16; i++)
sad[TID][i]+=sad[TID][i+16];
__syncthreads();
for (i=0; i<16; i++)
sad[TID+32][i]+=sad[TID + 32][i+16];
__syncthreads();
for (i=0; i<16; i++)
sad[TID+64][i]+=sad[TID+64][i+16];
__syncthreads();
for (i=0; i<16; i++)
sad[TID+96][i]+=sad[TID+96][i+16];
__syncthreads();
// accumulate diff, 16 -> 8: 128 righe e 16 colonne
for (i=0; i<8; i++)
sad[TID][i]+=sad[TID][i+8];
__syncthreads();
for (i=0; i<8; i++)
sad[TID+32][i]+=sad[TID+32][i+8];
__syncthreads();
for (i=0; i<8; i++)
sad[TID+64][i]+=sad[TID+64][i+8];
__syncthreads();
for (i=0; i<8; i++)
sad[TID+96][i]+=sad[TID+96][i+8];
__syncthreads();
// accumulate diff, 8 -> 4: 128 righe e 8 colonne
for (i=0; i<4; i++)
sad[TID][i]+=sad[TID][i+4];
__syncthreads();
for (i=0; i<4; i++)
sad[TID+32][i]+=sad[TID+32][i+4];
__syncthreads();
for (i=0; i<4; i++)
sad[TID+64][i]+=sad[TID+64][i+4];
__syncthreads();
for (i=0; i<4; i++)
sad[TID+96][i]+=sad[TID+96][i+4];
__syncthreads();
// accumulate diff, 4 -> 2 128 righe e 4 colonne
for (i=0; i<2; i++)
sad[TID][i]+=sad[TID][i+2];
__syncthreads();
for (i=0; i<2; i++)
sad[TID+32][i]+=sad[TID+32][i+2];
__syncthreads();
for (i=0; i<2; i++)
sad[TID+64][i]+=sad[TID+64][i+2];
__syncthreads();
for (i=0; i<2; i++)
sad[TID+96][i]+=sad[TID+96][i+2];
__syncthreads();
// accumulate diff, 2 -> 1
for (i=0; i<1; i++)
sad[TID][i]+=sad[TID][i+1];
__syncthreads();
for (i=0; i<1; i++)
sad[TID+32][i]+=sad[TID+32][i+1];
__syncthreads();
for (i=0; i<1; i++)
sad[TID+64][i]+=sad[TID+64][i+1];
__syncthreads();
for (i=0; i<1; i++)
sad[TID+96][i]+=sad[TID+96][i+1];
__syncthreads();
// Find MINIMUM (and corresponding best MV) of 128 Pts - 32 threads //
//
minpos[TID] = TID;
//__syncthreads(); // serve?
minpos[32+TID]=32+TID;
//__syncthreads(); // SERVE?
minpos[64+TID]=64+TID;
//__syncthreads(); // SERVEEEEEE??????
minpos[96+TID]=96+TID;
__syncthreads();
if( sad[TID][0] < sad[TID+32][0] )
{
sad[TID][0] = sad[TID+32][0];
minpos[TID] = minpos[TID+32];
}
__syncthreads();
if( sad[TID][0] < sad[TID+64][0] )
{
sad[TID][0] = sad[TID+64][0];
minpos[TID] = minpos[TID+64];
}
__syncthreads();
if( sad[TID][0] < sad[TID+96][0] )
{
sad[TID][0] = sad[TID+96][0];
minpos[TID] = minpos[TID+96];
}
__syncthreads();
if( TID < 16 ) // 16 threads
if( sad[TID][0] < sad[TID+16][0] ) {
sad[TID][0] = sad[TID+16][0];
minpos[TID] = minpos[TID+16];
}
__syncthreads();
if( TID < 8 ) // 8 threads
if( sad[TID][0] < sad[TID+8][0] ) {
sad[TID][0] = sad[TID+8][0];
minpos[TID] = minpos[TID+8];
}
__syncthreads();
if( TID < 4 ) // 4 threads
if( sad[TID][0] < sad[TID + 4][0] ) {
sad[TID][0] = sad[TID + 4][0];
minpos[TID] = minpos[TID + 4];
}
__syncthreads();
if( TID < 2 ) // 2 threads
if( sad[TID][0] < sad[TID + 2][0] ) {
sad[TID][0] = sad[TID + 2][0];
minpos[TID] = minpos[TID + 2];
}
__syncthreads();
int minsad;
if( TID == 0 ) // Only thread 0
{
if( sad[0][0] < sad[1][0] ) {
sad[0][0] = sad[1][0];
minpos[0] = minpos[1];
}
// And finally assign resulting MV
//MVs_g[MBoffset].as_mv = MV_lookup4[ minpos[0] ];
MVs_g[MBoffset].as_mv.row = MV_lookup4[ minpos[0] ].row;
MVs_g[MBoffset].as_mv.col = MV_lookup4[ minpos[0] ].col;
minsad = sad[0][0];
}
// Refining search su diamante interno.
ref += MV_offset4[minpos[0]];
// calcolo matrice delle sad
for(i=0; i < 16; i++) {
const uint8_t *refp = ref + MV_offset_refin[i];
//ref0 = (uint32_t)( *(refp) << 24 | *(refp + 1) << 16 | *(refp + 2) << 8 | *(refp + 3) );
//ref1 = (uint32_t)( *(refp + delta_ref) << 24 | *(refp + delta_ref + 1) << 16 | *(refp + delta_ref + 2) << 8 | *(refp + delta_ref + 3) );
//result = __vabsdiff4( img0, ref0 );
//result += __vabsdiff4( img1, ref1 );
result = 0;
result += abs( *(refp) - *(img));
result += abs( *(refp + 1) - *(img + 1));
result += abs( *(refp + 2) - *(img + 2));
result += abs( *(refp + 3) - *(img + 3));
result += abs( *(refp + delta_ref) - *(img + delta_img));
result += abs( *(refp + 1 + delta_ref) - *(img + 1 + delta_img));
result += abs( *(refp + 2 + delta_ref) - *(img + 3 + delta_img));
result += abs( *(refp + 2 + delta_ref) - *(img + 3 + delta_img));
sad[i][TID] = result;
}
__syncthreads();
// Accumulazione
// non serve controllo "if TID < 32" perche' thread sono sempre 32
for (i=0; i<16; i++)
sad[TID][i]+=sad[TID][i+16];
__syncthreads();
for (i=0; i<8; i++)
sad[TID][i]+=sad[TID][i+8];
__syncthreads();
for (i=0; i<4; i++)
sad[TID][i]+=sad[TID][i+4];
__syncthreads();
sad[TID][0] += ( sad[TID][1] + sad[TID][2] + sad[TID][3] );
__syncthreads();
// Ricerca del minimo
minpos[TID] = TID;
__syncthreads();
if( TID < 16 ) // 16 threads
if( sad[TID][0] < sad[TID+16][0] ) {
sad[TID][0] = sad[TID+16][0];
minpos[TID] = minpos[TID+16];
}
__syncthreads();
if( TID < 8 ) // 8 threads
if( sad[TID][0] < sad[TID+8][0] ) {
sad[TID][0] = sad[TID+8][0];
minpos[TID] = minpos[TID+8];
}
__syncthreads();
if( TID < 4 ) // 4 threads
if( sad[TID][0] < sad[TID + 4][0] ) {
sad[TID][0] = sad[TID + 4][0];
minpos[TID] = minpos[TID + 4];
}
__syncthreads();
if( TID < 2 ) // 2 threads
if( sad[TID][0] < sad[TID + 2][0] ) {
sad[TID][0] = sad[TID + 2][0];
minpos[TID] = minpos[TID + 2];
}
__syncthreads();
if( TID == 0 ) // Only thread 0
{
if( sad[0][0] < sad[1][0] ) {
sad[0][0] = sad[1][0];
minpos[0] = minpos[1];
}
if ( sad[0][0] < minsad ) {
MVs_g[MBoffset].as_mv.row += MV_lookup_refin[ minpos[0] ].row;
MVs_g[MBoffset].as_mv.col += MV_lookup_refin[ minpos[0] ].col;
}
}
}
void me_kernel_launch( VP8_COMMON * const common, const uint8_t * const in_frame, const uint8_t * const ref_frame,
int const streamID, int_mv * const MVs, int * const MV_vars ) {
hipLaunchKernelGGL(( ME_CUDA_p) , dim3(common->GPU.gridDim), dim3(common->GPU.blockDim), 0, common->GPU.streams.frame[streamID] , in_frame, ref_frame,
streamID, common->gpu_frame.stride, common->gpu_frame.width, common->gpu_frame.num_MB_width, MVs, MV_vars );
}
void me_cuda_launch_interleaved( VP8_COMMON * const cm, int fb_idx, int ref_frame_flags ) {
int MV_size_16 = 16*sizeof(int_mv);
int MV_vars_size_16 = 16*sizeof(int);
// for printing informations about reference frame flags and thei usage, I left a commented prinft at line 3625
// at the beginning of encode_frame_to_data_rate(..) in onyx_if.c
for (int s = 0; s < cm->GPU.num_mb16th; s++) {
int offset = 16*s;
// bugfix per immagini il cui n di mb non e' divisibile per 16
// prima venivano lanciati troppi processi e hipMemcpyAsync andava a leggere oltre i limiti degli array
if (offset + 16 > cm->gpu_frame.num_mv) {
MV_size_16 = ( offset + 16 - cm->gpu_frame.num_mv ) * sizeof( int_mv );
MV_vars_size_16 = ( offset + 16 - cm->gpu_frame.num_mv ) * sizeof( int );
}
if ((ref_frame_flags & GPUFLAG_LAST_FRAME) && (cm->yv12_fb[cm->lst_fb_idx].flags & GPUFLAG_LAST_FRAME)) {
me_kernel_launch(cm, cm->gpu_frame.raw_current_fb_g, (cm->gpu_frame.yv12_fb_g)[cm->lst_fb_idx], s, (cm->gpu_frame.MVs_g)[0], (cm->gpu_frame.MV_vars_g)[0] );
CHECK(hipMemcpyAsync( &(cm->host_frame.MVs_h)[0][offset], &(cm->gpu_frame.MVs_g)[0][offset], MV_size_16, hipMemcpyDeviceToHost, cm->GPU.streams.frame[s]));
//CHECK(hipMemcpyAsync( &(cm->host_frame.MV_vars_h)[0][offset], &(cm->gpu_frame.MV_vars_g)[0][offset], MV_vars_size_16, hipMemcpyDeviceToHost, cm->GPU.streams.frame[s]));
}
// Se ref_frame_flags indica la presenza di un gold e se il flag del fb puntato da gld_fb_idx indica che e' gold, allora...
if ((ref_frame_flags & GPUFLAG_GOLD_FRAME) && (cm->yv12_fb[cm->gld_fb_idx].flags & GPUFLAG_GOLD_FRAME)) {
me_kernel_launch(cm, cm->gpu_frame.raw_current_fb_g, (cm->gpu_frame.yv12_fb_g)[cm->gld_fb_idx], s, (cm->gpu_frame.MVs_g)[1], (cm->gpu_frame.MV_vars_g)[1] );
CHECK(hipMemcpyAsync( &(cm->host_frame.MVs_h)[1][offset], &(cm->gpu_frame.MVs_g)[1][offset], MV_size_16, hipMemcpyDeviceToHost, cm->GPU.streams.frame[s]));
//CHECK(hipMemcpyAsync( &(cm->host_frame.MV_vars_h)[1][offset], &(cm->gpu_frame.MV_vars_g)[1][offset], MV_vars_size_16, hipMemcpyDeviceToHost, cm->GPU.streams.frame[s]));
}
// Se ref_frame_flags indica la presenza di un altref e se il flag del fb puntato da alt_fb_idx indica che e' altref, allora...
if ((ref_frame_flags & GPUFLAG_ALTR_FRAME) && (cm->yv12_fb[cm->alt_fb_idx].flags & GPUFLAG_ALTR_FRAME)) {
me_kernel_launch(cm, cm->gpu_frame.raw_current_fb_g, (cm->gpu_frame.yv12_fb_g)[cm->alt_fb_idx], s, (cm->gpu_frame.MVs_g)[2], (cm->gpu_frame.MV_vars_g)[2] );
CHECK(hipMemcpyAsync( &(cm->host_frame.MVs_h)[2][offset], &(cm->gpu_frame.MVs_g)[2][offset], MV_size_16, hipMemcpyDeviceToHost, cm->GPU.streams.frame[s]));
//CHECK(hipMemcpyAsync( &(cm->host_frame.MV_vars_h)[2][offset], &(cm->gpu_frame.MV_vars_g)[2][offset], MV_vars_size_16, hipMemcpyDeviceToHost, cm->GPU.streams.frame[s]));
}
}
}
void me_cuda_launch_not_interleaved( VP8_COMMON * const cm, int fb_idx, int ref_frame_flags ) {
int MV_size_16 = 16*sizeof(int_mv);
int MV_vars_size_16 = 16*sizeof(int);
// for printing informations about reference frame flags and thei usage, I left a commented prinft at line 3625
// at the beginning of encode_frame_to_data_rate(..) in onyx_if.c
for (int s = 0; s < cm->GPU.num_mb16th; s++) {
int offset = 16*s;
// bugfix per immagini il cui n di mb non e' divisibile per 16
// prima venivano lanciati troppi processi e hipMemcpyAsync andava a leggere oltre i limiti degli array
if (offset + 16 > cm->gpu_frame.num_mv) {
MV_size_16 = ( offset + 16 - cm->gpu_frame.num_mv ) * sizeof( int_mv );
MV_vars_size_16 = ( offset + 16 - cm->gpu_frame.num_mv ) * sizeof( int );
}
if ((ref_frame_flags & GPUFLAG_LAST_FRAME) && (cm->yv12_fb[cm->lst_fb_idx].flags & GPUFLAG_LAST_FRAME)) {
me_kernel_launch(cm, cm->gpu_frame.raw_current_fb_g, (cm->gpu_frame.yv12_fb_g)[cm->lst_fb_idx], s, (cm->gpu_frame.MVs_g)[0], (cm->gpu_frame.MV_vars_g)[0] );
}
// Se ref_frame_flags indica la presenza di un gold e se il flag del fb puntato da gld_fb_idx indica che e' gold, allora...
if ((ref_frame_flags & GPUFLAG_GOLD_FRAME) && (cm->yv12_fb[cm->gld_fb_idx].flags & GPUFLAG_GOLD_FRAME)) {
me_kernel_launch(cm, cm->gpu_frame.raw_current_fb_g, (cm->gpu_frame.yv12_fb_g)[cm->gld_fb_idx], s, (cm->gpu_frame.MVs_g)[1], (cm->gpu_frame.MV_vars_g)[1] );
}
// Se ref_frame_flags indica la presenza di un altref e se il flag del fb puntato da alt_fb_idx indica che e' altref, allora...
if ((ref_frame_flags & GPUFLAG_ALTR_FRAME) && (cm->yv12_fb[cm->alt_fb_idx].flags & GPUFLAG_ALTR_FRAME)) {
me_kernel_launch(cm, cm->gpu_frame.raw_current_fb_g, (cm->gpu_frame.yv12_fb_g)[cm->alt_fb_idx], s, (cm->gpu_frame.MVs_g)[2], (cm->gpu_frame.MV_vars_g)[2] );
}
if (ref_frame_flags & GPUFLAG_LAST_FRAME) {
CHECK(hipMemcpyAsync( &(cm->host_frame.MVs_h)[0][offset], &(cm->gpu_frame.MVs_g)[0][offset], MV_size_16, hipMemcpyDeviceToHost, cm->GPU.streams.frame[s]));
//CHECK(hipMemcpyAsync( &(cm->host_frame.MV_vars_h)[0][offset], &(cm->gpu_frame.MV_vars_g)[0][offset], MV_vars_size_16, hipMemcpyDeviceToHost, cm->GPU.streams.frame[s]));
}
if ((ref_frame_flags & GPUFLAG_GOLD_FRAME) && (cm->yv12_fb[cm->gld_fb_idx].flags & GPUFLAG_GOLD_FRAME)) {
CHECK(hipMemcpyAsync( &(cm->host_frame.MVs_h)[1][offset], &(cm->gpu_frame.MVs_g)[1][offset], MV_size_16, hipMemcpyDeviceToHost, cm->GPU.streams.frame[s]));
//CHECK(hipMemcpyAsync( &(cm->host_frame.MV_vars_h)[1][offset], &(cm->gpu_frame.MV_vars_g)[1][offset], MV_vars_size_16, hipMemcpyDeviceToHost, cm->GPU.streams.frame[s]));
}
if ((ref_frame_flags & GPUFLAG_ALTR_FRAME) && (cm->yv12_fb[cm->alt_fb_idx].flags & GPUFLAG_ALTR_FRAME)) {
CHECK(hipMemcpyAsync( &(cm->host_frame.MVs_h)[2][offset], &(cm->gpu_frame.MVs_g)[2][offset], MV_size_16, hipMemcpyDeviceToHost, cm->GPU.streams.frame[s]));
//CHECK(hipMemcpyAsync( &(cm->host_frame.MV_vars_h)[2][offset], &(cm->gpu_frame.MV_vars_g)[2][offset], MV_vars_size_16, hipMemcpyDeviceToHost, cm->GPU.streams.frame[s]));
}
}
}
#endif /* HAVE_CUDA_ENABLED_DEVICE */
#ifdef __cplusplus
}
#endif
| 2f64ea79991696226d7d439e3a881636fac96d96.cu | /*
Cuda accelerated motion estimation for VP8 libvpx encoder
by Pietro Paglierani, Giuliano Grossi, Federico Pedersini and Alessandro Petrini
for Italtel and Universita' degli Studi di Milano
2015-2016, Milano
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <wchar.h>
#include <locale.h>
#include "vpx_config.h"
#include "cuda/typedef_cuda.h"
#include "cuda/me_cuda.h"
#ifdef __cplusplus
extern "C" {
#endif
#if HAVE_CUDA_ENABLED_DEVICE
__constant__ int MV_offset4[128];
__constant__ int MV_offset_refin[32];
void setup_constant_mem(int img_stride) {
int I = img_stride;
int MV_off4[128] = { -22*I,
-20*I,
-18*I-4,-18*I,-18*I+4,
-16*I-4,-16*I,-16*I+4,
-14*I-8,-14*I-4,-14*I,-14*I+4,-14*I+8,
-12*I-8,-12*I-4,-12*I,-12*I+4,-12*I+8,
-10*I-12,-10*I-8,-10*I-4,-10*I,-10*I+4,-10*I+8,-10*I+12,
-8*I-12, -8*I-8, -8*I-4, -8*I, -8*I+4, -8*I+8, -8*I+12,
-6*I-12, -6*I-8, -6*I-4, -6*I, -6*I+4, -6*I+8, -6*I+12,
-4*I-16, -4*I-12, -4*I-8, -4*I-4, -4*I, -4*I+4, -4*I+8, -4*I+12, -4*I+16,
-2*I-16, -2*I-12, -2*I-8, -2*I-4, -2*I, -2*I+4, -2*I+8, -2*I+12, -2*I+16,
-24, -20, -16, -12, -8, -4, 0, 4, 8, 12, 16, 20, 24,
2*I-16, 2*I-12, 2*I-8, 2*I-4, 2*I, 2*I+4, 2*I+8, 2*I+12, 2*I+16,
4*I-16, 4*I-12, 4*I-8, 4*I-4, 4*I, 4*I+4, 4*I+8, 4*I+12, 4*I+16,
6*I-12, 6*I-8, 6*I-4, 6*I, 6*I+4, 6*I+8, 6*I+12,
8*I-12, 8*I-8, 8*I-4, 8*I, 8*I+4, 8*I+8, 8*I+12,
10*I-12,10*I-8, 10*I-4, 10*I, 10*I+4, 10*I+8, 10*I+12,
12*I-8, 12*I-4, 12*I, 12*I+4, 12*I+8,
14*I-8, 14*I-4, 14*I, 14*I+4, 14*I+8,
16*I-4, 16*I, 16*I+4,
18*I-4, 18*I, 18*I+4,
20*I,
22*I, 22*I+4,
};
int MV_refin[32] = {
-3*I,
-2*I-2, -2*I-1, -2*I, -2*I+1, -2*I+2,
-I-3, -I-2, -I-1, -I, -I+1, -I+2, -I+3,
-3, -2, -1, 1, 2, 3,
I-3, I-2, I-1, I, I+1, I+2, I+3,
2*I-2, 2*I-1, 2*I, 2*I+1, 2*I+2,
3*I
};
CHECK(cudaMemcpyToSymbol(MV_offset4, MV_off4, 128*sizeof(int)));
CHECK(cudaMemcpyToSymbol(MV_offset_refin, MV_refin, 32*sizeof(int)));
}
__device__ __constant__ MV MV_lookup4[128] = { // Unit: pixel
{-22,0},
{-20,0},
{-18,-4},{-18,0},{-18,4},
{-16,-4},{-16,0},{-16,4},
{-14,-8},{-14,-4},{-14,0},{-14,4},{-14,8},
{-12,-8},{-12,-4},{-12,0},{-12,4},{-12,8},
{-10,-12},{-10,-8},{-10,-4},{-10,0},{-10,4},{-10,8},{-10,12},
{ -8,-12},{ -8,-8},{ -8,-4},{ -8,0},{ -8,4},{ -8,8},{ -8,12},
{ -6,-12},{ -6,-8},{ -6,-4},{ -6,0},{ -6,4},{ -6,8},{ -6,12},
{-4,-16},{ -4,-12},{ -4,-8},{ -4,-4},{ -4,0},{ -4,4},{ -4,8},{ -4,12},{ -4,16},
{-2,-16},{ -2,-12},{ -2,-8},{ -2,-4},{ -2,0},{ -2,4},{ -2,8},{ -2,12},{ -2,16},
{0,-24},{0,-20},{ 0,-16},{ 0,-12},{ 0,-8},{ 0,-4},{ 0,0},{ 0,4},{ 0,8},{ 0,12},{ 0,16},{ 0,20},{ 0,24},
{ 2,-16},{ 2,-12},{ 2,-8},{ 2,-4},{ 2,0},{ 2,4},{ 2,8},{ 2,12},{ 2,16},
{ 4,-16},{ 4,-12},{ 4,-8},{ 4,-4},{ 4,0},{ 4,4},{ 4,8},{ 4,12},{ 4,16},
{ 6,-12},{ 6,-8},{ 6,-4},{ 6,0},{ 6,4},{ 6,8},{ 6,12},
{ 8,-12},{ 8,-8},{ 8,-4},{ 8,0},{ 8,4},{ 8,8},{ 8,12},
{ 10,-12},{ 10,-8},{ 10,-4},{ 10,0},{ 10,4},{ 10,8},{ 10,12},
{ 12,-8},{ 12,-4},{ 12,0},{ 12,4},{ 12,8},
{ 14,-8},{ 14,-4},{ 14,0},{ 14,4},{ 14,8},
{ 16,-4},{ 16,0},{ 16,4},
{ 18,-4},{ 18,0},{ 18,4},
{ 20,0},
{ 22,0},{ 22,4}
};
// Ne basterebbero molte meno (17, per la precisione), ma cosi' riempiamo un warp
__device__ __constant__ MV MV_lookup_refin[32] = {
{-3, 0},
{-2, -2}, {-2, -1}, {-2, 0}, {-2, 1}, {-2, 2},
{-1, -3}, {-1, -2}, {-1, -1}, {-1, 0}, {-1, 1}, {-1, 2}, {-1, 3},
{ 0, -3}, { 0, -2}, { 0, -1}, { 0, 1}, { 0, 2}, { 0, 3},
{ 1, -3}, { 1, -2}, { 1, -1}, { 1, 0}, { 1, 1}, { 1, 2}, { 1, 3},
{ 2, -2}, { 2, -1}, { 2, 0}, { 2, 1}, { 2, 2},
{ 3, 0}
};
__inline__ __device__ uint32_t __vabsdiff4( uint32_t u, uint32_t v )
{
uint32_t w = 0;
uint32_t ww = 0;
asm volatile("vabsdiff4.u32.u32.u32.add %0, %1, %2, %3;" : "=r"(ww) : "r"(u), "r"(v), "r"(w));
return ww;
}
// Called with a (ROWS,COLS,1) GRID of (8x16x1) blocks
// block: 4x8x1, ogni blocco calcola la me per un MB
// grid: 16 ?
__global__ void ME_CUDA_p ( const uint8_t * const in_frame, const uint8_t * const ref_frame,
int const streamID, int const stride, int const width, int const num_MB_width,
int_mv * const MVs_g, int * const MV_vars_g )
{
__shared__ uint32_t sad[128][32];
__shared__ int32_t minpos[128];
uint32_t i;
int32_t TID = threadIdx.y * blockDim.x + threadIdx.x;
// 16 blocks per grid (16,1,1)
int32_t MBoffset = streamID * 16 + blockIdx.x;
int32_t blockY = MBoffset / num_MB_width;
int32_t blockX = MBoffset % num_MB_width;
//if (( MBoffset == 3010 ) && ( TID == 10 ))
// printf( "%d %d ", blockX, blockY);
// Occhio alle dimensioni: ref_frame ha la cornice, raw_frame no
int32_t img_offset = 16 * (blockY * width + blockX) + 2 * threadIdx.y * width + threadIdx.x;
int32_t ref_offset = 16 * (blockY * stride + blockX) + 2 * threadIdx.y * stride + threadIdx.x;
uint8_t * img = (uint8_t *) in_frame + img_offset;
uint8_t * ref = (uint8_t *) ref_frame + ref_offset + 32 * (stride + 1);
// one thread loads two quad pixels, one 4x8 block covers one img MB
// ind0: 0-31, relative position of the first quad with respect to the first MB pixel
int32_t delta_img = (1 * width);
int32_t delta_ref = (1 * stride);
// Senor... no capito, Senor...
// Valori dell'immagine di input
// Ogni thread carica 4 pixel (in un int) del MB di riferimento
uint32_t img0 = (uint32_t) ( (*img << 24) | (*(img + 1) << 16) | (*(img + 2) << 8) | *(img + 3) ); //*img;
uint32_t img1 = (uint32_t) ( (*(img + delta_img) << 24) | (*(img + delta_img + 1) << 16) | (*(img + delta_img + 2) << 8) | *(img + delta_img + 3) ); //*(img + delta_img);
//uint8_t *imgd = img + delta_img;
//uint32_t img0 = *( (uint32_t *)(img) );
//uint32_t img1 = *( (uint32_t *)(img) );// + delta_img) );
// Puntatori e valori dell'immagine di riferimento (no init)
//uint8_t *refp;
//uint8_t *refpd;2 *
uint32_t ref0;
uint32_t ref1;
// Valori di out calcolati dalle sad
uint32_t result;
//uint32_t result1;
// ref0=0x01020304;
// ref1=0x05060708;8
// img0=0x01010101;
// img1=0x01010101;
// Compute pixel differences: //
asm(".reg .u64 ss<4>;"::);
asm(".reg .u32 st<4>;"::);
asm(".reg .u32 rr<2>;"::);
asm(" mov.u32 st0, %0;"::"r"(img0));
//asm( " mov.u32 st1, %0;"::"r"(img1));
//asm(" mov.u64 ss0, %0;"::"l"(img));
//asm(" mov.u64 ss1, %0;"::"l"(img));
//asm(" ld.global.u32 st0, [ss0];"::);
//asm(" ld.global.u32 st1, [ss1];"::);
// ss0 : *img0
// ss1 : *img1
// ss1 : *ref0
// ss3 : *ref1
// st0 : img0
// st1 : img1
// st1 : ref0
// st3 : ref1
// rr0 : risult
// rr1 : risult1
for(i=0; i < 128; i++)
{
const uint8_t *refp = ref + MV_offset4[i];
//refpd = refp + delta_ref;
//result = abs( refp[0] - img[0] ) + abs( refp[1] - img[1] ) + abs( refp[2] - img[2] ) + abs( refp[3] - img[3] );
//result += abs( refpd[0] - imgd[0] ) + abs( refpd[1] - imgd[1] ) + abs( refpd[2] - imgd[2] ) + abs( refpd[3] - imgd[3] );
ref0 = (uint32_t)( *(refp) << 24 | *(refp + 1) << 16 | *(refp + 2) << 8 | *(refp + 3) );
ref1 = (uint32_t)( *(refp + delta_ref) << 24 | *(refp + delta_ref + 1) << 16 | *(refp + delta_ref + 2) << 8 | *(refp + delta_ref + 3) );
//asm(" mov.u64 ss2, %0;"::"l"(ref));
//asm(" mov.u64 ss3, %0;"::"l"(ref));
//asm(" mov.u32 rr0, 0;"::);
//asm(" mov.u32 rr1, 0;"::);
//asm(" ld.global.u32 st2, [ss2];"::);
//asm(" ld.global.u32 st3, [ss3];"::);
//asm(" mov.u32 st2, %0;"::"r"(ref0));
//asm(" mov.u32 st3, %0;"::"r"(ref1));result
//asm(" vabsdiff4.u32.u32.u32.add rr0, st0, st2, rr1;"::);
//asm(" vabsdiff4.u32.u32.u32.add rr1, st1, st3, rr0;"::);
//uint32_t result1;
//asm(" mov.u32 %0, rr0;":"=r"(result):);
//ref0 = *( (uint32_t *)(ref) );// + MV_offset4[i]
//ref1 = *( (uint32_t *)(ref) );// + MV_offset4[i] + delta_ref) );
//result = 0;
//result1 = 0;
//asm(" .reg .u32 r1;\n\t");
//asm(" vabsdiff4.u32.u32.u32.add %0, %1, %2, %3;\n\t": "=r"(result) : "r" (img0), "r" (ref0), "r" (result1));
//asm(" vabsdiff4.u32.u32.u32.add %0, %1, %2, %3;\n\t": "=r"(result1) : "r" (img1), "r" (ref1), "r" (result));
//" vabsdiff4.u32.u32.u32.add %0, %3, %4, r1;\n\t"
//" vabsdiff4.u32.u32.u32.add r1, %3, %4, r1;\n\t"
//" mov.u32 %0, r1;\n\t"
result = 0;
result += abs( *(refp) - *(img));
result += abs( *(refp + 1) - *(img + 1));
result += abs( *(refp + 2) - *(img + 2));
result += abs( *(refp + 3) - *(img + 3));
result += abs( *(refp + delta_ref) - *(img + delta_img));
result += abs( *(refp + 1 + delta_ref) - *(img + 1 + delta_img));
result += abs( *(refp + 2 + delta_ref) - *(img + 3 + delta_img));
result += abs( *(refp + 2 + delta_ref) - *(img + 3 + delta_img));
//result = __vabsdiff4( img0, ref0 );
//result += __vabsdiff4( img1, ref1 );
sad[i][TID] = result;
}
__syncthreads();
// accumulate diff, 32 -> 16: 128 righe e 32 colonne
for (i=0; i<16; i++)
sad[TID][i]+=sad[TID][i+16];
__syncthreads();
for (i=0; i<16; i++)
sad[TID+32][i]+=sad[TID + 32][i+16];
__syncthreads();
for (i=0; i<16; i++)
sad[TID+64][i]+=sad[TID+64][i+16];
__syncthreads();
for (i=0; i<16; i++)
sad[TID+96][i]+=sad[TID+96][i+16];
__syncthreads();
// accumulate diff, 16 -> 8: 128 righe e 16 colonne
for (i=0; i<8; i++)
sad[TID][i]+=sad[TID][i+8];
__syncthreads();
for (i=0; i<8; i++)
sad[TID+32][i]+=sad[TID+32][i+8];
__syncthreads();
for (i=0; i<8; i++)
sad[TID+64][i]+=sad[TID+64][i+8];
__syncthreads();
for (i=0; i<8; i++)
sad[TID+96][i]+=sad[TID+96][i+8];
__syncthreads();
// accumulate diff, 8 -> 4: 128 righe e 8 colonne
for (i=0; i<4; i++)
sad[TID][i]+=sad[TID][i+4];
__syncthreads();
for (i=0; i<4; i++)
sad[TID+32][i]+=sad[TID+32][i+4];
__syncthreads();
for (i=0; i<4; i++)
sad[TID+64][i]+=sad[TID+64][i+4];
__syncthreads();
for (i=0; i<4; i++)
sad[TID+96][i]+=sad[TID+96][i+4];
__syncthreads();
// accumulate diff, 4 -> 2 128 righe e 4 colonne
for (i=0; i<2; i++)
sad[TID][i]+=sad[TID][i+2];
__syncthreads();
for (i=0; i<2; i++)
sad[TID+32][i]+=sad[TID+32][i+2];
__syncthreads();
for (i=0; i<2; i++)
sad[TID+64][i]+=sad[TID+64][i+2];
__syncthreads();
for (i=0; i<2; i++)
sad[TID+96][i]+=sad[TID+96][i+2];
__syncthreads();
// accumulate diff, 2 -> 1
for (i=0; i<1; i++)
sad[TID][i]+=sad[TID][i+1];
__syncthreads();
for (i=0; i<1; i++)
sad[TID+32][i]+=sad[TID+32][i+1];
__syncthreads();
for (i=0; i<1; i++)
sad[TID+64][i]+=sad[TID+64][i+1];
__syncthreads();
for (i=0; i<1; i++)
sad[TID+96][i]+=sad[TID+96][i+1];
__syncthreads();
// Find MINIMUM (and corresponding best MV) of 128 Pts - 32 threads //
//
minpos[TID] = TID;
//__syncthreads(); // serve?
minpos[32+TID]=32+TID;
//__syncthreads(); // SERVE?
minpos[64+TID]=64+TID;
//__syncthreads(); // SERVEEEEEE??????
minpos[96+TID]=96+TID;
__syncthreads();
if( sad[TID][0] < sad[TID+32][0] )
{
sad[TID][0] = sad[TID+32][0];
minpos[TID] = minpos[TID+32];
}
__syncthreads();
if( sad[TID][0] < sad[TID+64][0] )
{
sad[TID][0] = sad[TID+64][0];
minpos[TID] = minpos[TID+64];
}
__syncthreads();
if( sad[TID][0] < sad[TID+96][0] )
{
sad[TID][0] = sad[TID+96][0];
minpos[TID] = minpos[TID+96];
}
__syncthreads();
if( TID < 16 ) // 16 threads
if( sad[TID][0] < sad[TID+16][0] ) {
sad[TID][0] = sad[TID+16][0];
minpos[TID] = minpos[TID+16];
}
__syncthreads();
if( TID < 8 ) // 8 threads
if( sad[TID][0] < sad[TID+8][0] ) {
sad[TID][0] = sad[TID+8][0];
minpos[TID] = minpos[TID+8];
}
__syncthreads();
if( TID < 4 ) // 4 threads
if( sad[TID][0] < sad[TID + 4][0] ) {
sad[TID][0] = sad[TID + 4][0];
minpos[TID] = minpos[TID + 4];
}
__syncthreads();
if( TID < 2 ) // 2 threads
if( sad[TID][0] < sad[TID + 2][0] ) {
sad[TID][0] = sad[TID + 2][0];
minpos[TID] = minpos[TID + 2];
}
__syncthreads();
int minsad;
if( TID == 0 ) // Only thread 0
{
if( sad[0][0] < sad[1][0] ) {
sad[0][0] = sad[1][0];
minpos[0] = minpos[1];
}
// And finally assign resulting MV
//MVs_g[MBoffset].as_mv = MV_lookup4[ minpos[0] ];
MVs_g[MBoffset].as_mv.row = MV_lookup4[ minpos[0] ].row;
MVs_g[MBoffset].as_mv.col = MV_lookup4[ minpos[0] ].col;
minsad = sad[0][0];
}
// Refining search su diamante interno.
ref += MV_offset4[minpos[0]];
// calcolo matrice delle sad
for(i=0; i < 16; i++) {
const uint8_t *refp = ref + MV_offset_refin[i];
//ref0 = (uint32_t)( *(refp) << 24 | *(refp + 1) << 16 | *(refp + 2) << 8 | *(refp + 3) );
//ref1 = (uint32_t)( *(refp + delta_ref) << 24 | *(refp + delta_ref + 1) << 16 | *(refp + delta_ref + 2) << 8 | *(refp + delta_ref + 3) );
//result = __vabsdiff4( img0, ref0 );
//result += __vabsdiff4( img1, ref1 );
result = 0;
result += abs( *(refp) - *(img));
result += abs( *(refp + 1) - *(img + 1));
result += abs( *(refp + 2) - *(img + 2));
result += abs( *(refp + 3) - *(img + 3));
result += abs( *(refp + delta_ref) - *(img + delta_img));
result += abs( *(refp + 1 + delta_ref) - *(img + 1 + delta_img));
result += abs( *(refp + 2 + delta_ref) - *(img + 3 + delta_img));
result += abs( *(refp + 2 + delta_ref) - *(img + 3 + delta_img));
sad[i][TID] = result;
}
__syncthreads();
// Accumulazione
// non serve controllo "if TID < 32" perche' thread sono sempre 32
for (i=0; i<16; i++)
sad[TID][i]+=sad[TID][i+16];
__syncthreads();
for (i=0; i<8; i++)
sad[TID][i]+=sad[TID][i+8];
__syncthreads();
for (i=0; i<4; i++)
sad[TID][i]+=sad[TID][i+4];
__syncthreads();
sad[TID][0] += ( sad[TID][1] + sad[TID][2] + sad[TID][3] );
__syncthreads();
// Ricerca del minimo
minpos[TID] = TID;
__syncthreads();
if( TID < 16 ) // 16 threads
if( sad[TID][0] < sad[TID+16][0] ) {
sad[TID][0] = sad[TID+16][0];
minpos[TID] = minpos[TID+16];
}
__syncthreads();
if( TID < 8 ) // 8 threads
if( sad[TID][0] < sad[TID+8][0] ) {
sad[TID][0] = sad[TID+8][0];
minpos[TID] = minpos[TID+8];
}
__syncthreads();
if( TID < 4 ) // 4 threads
if( sad[TID][0] < sad[TID + 4][0] ) {
sad[TID][0] = sad[TID + 4][0];
minpos[TID] = minpos[TID + 4];
}
__syncthreads();
if( TID < 2 ) // 2 threads
if( sad[TID][0] < sad[TID + 2][0] ) {
sad[TID][0] = sad[TID + 2][0];
minpos[TID] = minpos[TID + 2];
}
__syncthreads();
if( TID == 0 ) // Only thread 0
{
if( sad[0][0] < sad[1][0] ) {
sad[0][0] = sad[1][0];
minpos[0] = minpos[1];
}
if ( sad[0][0] < minsad ) {
MVs_g[MBoffset].as_mv.row += MV_lookup_refin[ minpos[0] ].row;
MVs_g[MBoffset].as_mv.col += MV_lookup_refin[ minpos[0] ].col;
}
}
}
void me_kernel_launch( VP8_COMMON * const common, const uint8_t * const in_frame, const uint8_t * const ref_frame,
int const streamID, int_mv * const MVs, int * const MV_vars ) {
ME_CUDA_p <<< common->GPU.gridDim, common->GPU.blockDim, 0, common->GPU.streams.frame[streamID] >>> (in_frame, ref_frame,
streamID, common->gpu_frame.stride, common->gpu_frame.width, common->gpu_frame.num_MB_width, MVs, MV_vars );
}
void me_cuda_launch_interleaved( VP8_COMMON * const cm, int fb_idx, int ref_frame_flags ) {
int MV_size_16 = 16*sizeof(int_mv);
int MV_vars_size_16 = 16*sizeof(int);
// for printing informations about reference frame flags and thei usage, I left a commented prinft at line 3625
// at the beginning of encode_frame_to_data_rate(..) in onyx_if.c
for (int s = 0; s < cm->GPU.num_mb16th; s++) {
int offset = 16*s;
// bugfix per immagini il cui n di mb non e' divisibile per 16
// prima venivano lanciati troppi processi e cudaMemcpyAsync andava a leggere oltre i limiti degli array
if (offset + 16 > cm->gpu_frame.num_mv) {
MV_size_16 = ( offset + 16 - cm->gpu_frame.num_mv ) * sizeof( int_mv );
MV_vars_size_16 = ( offset + 16 - cm->gpu_frame.num_mv ) * sizeof( int );
}
if ((ref_frame_flags & GPUFLAG_LAST_FRAME) && (cm->yv12_fb[cm->lst_fb_idx].flags & GPUFLAG_LAST_FRAME)) {
me_kernel_launch(cm, cm->gpu_frame.raw_current_fb_g, (cm->gpu_frame.yv12_fb_g)[cm->lst_fb_idx], s, (cm->gpu_frame.MVs_g)[0], (cm->gpu_frame.MV_vars_g)[0] );
CHECK(cudaMemcpyAsync( &(cm->host_frame.MVs_h)[0][offset], &(cm->gpu_frame.MVs_g)[0][offset], MV_size_16, cudaMemcpyDeviceToHost, cm->GPU.streams.frame[s]));
//CHECK(cudaMemcpyAsync( &(cm->host_frame.MV_vars_h)[0][offset], &(cm->gpu_frame.MV_vars_g)[0][offset], MV_vars_size_16, cudaMemcpyDeviceToHost, cm->GPU.streams.frame[s]));
}
// Se ref_frame_flags indica la presenza di un gold e se il flag del fb puntato da gld_fb_idx indica che e' gold, allora...
if ((ref_frame_flags & GPUFLAG_GOLD_FRAME) && (cm->yv12_fb[cm->gld_fb_idx].flags & GPUFLAG_GOLD_FRAME)) {
me_kernel_launch(cm, cm->gpu_frame.raw_current_fb_g, (cm->gpu_frame.yv12_fb_g)[cm->gld_fb_idx], s, (cm->gpu_frame.MVs_g)[1], (cm->gpu_frame.MV_vars_g)[1] );
CHECK(cudaMemcpyAsync( &(cm->host_frame.MVs_h)[1][offset], &(cm->gpu_frame.MVs_g)[1][offset], MV_size_16, cudaMemcpyDeviceToHost, cm->GPU.streams.frame[s]));
//CHECK(cudaMemcpyAsync( &(cm->host_frame.MV_vars_h)[1][offset], &(cm->gpu_frame.MV_vars_g)[1][offset], MV_vars_size_16, cudaMemcpyDeviceToHost, cm->GPU.streams.frame[s]));
}
// Se ref_frame_flags indica la presenza di un altref e se il flag del fb puntato da alt_fb_idx indica che e' altref, allora...
if ((ref_frame_flags & GPUFLAG_ALTR_FRAME) && (cm->yv12_fb[cm->alt_fb_idx].flags & GPUFLAG_ALTR_FRAME)) {
me_kernel_launch(cm, cm->gpu_frame.raw_current_fb_g, (cm->gpu_frame.yv12_fb_g)[cm->alt_fb_idx], s, (cm->gpu_frame.MVs_g)[2], (cm->gpu_frame.MV_vars_g)[2] );
CHECK(cudaMemcpyAsync( &(cm->host_frame.MVs_h)[2][offset], &(cm->gpu_frame.MVs_g)[2][offset], MV_size_16, cudaMemcpyDeviceToHost, cm->GPU.streams.frame[s]));
//CHECK(cudaMemcpyAsync( &(cm->host_frame.MV_vars_h)[2][offset], &(cm->gpu_frame.MV_vars_g)[2][offset], MV_vars_size_16, cudaMemcpyDeviceToHost, cm->GPU.streams.frame[s]));
}
}
}
void me_cuda_launch_not_interleaved( VP8_COMMON * const cm, int fb_idx, int ref_frame_flags ) {
int MV_size_16 = 16*sizeof(int_mv);
int MV_vars_size_16 = 16*sizeof(int);
// for printing informations about reference frame flags and thei usage, I left a commented prinft at line 3625
// at the beginning of encode_frame_to_data_rate(..) in onyx_if.c
for (int s = 0; s < cm->GPU.num_mb16th; s++) {
int offset = 16*s;
// bugfix per immagini il cui n di mb non e' divisibile per 16
// prima venivano lanciati troppi processi e cudaMemcpyAsync andava a leggere oltre i limiti degli array
if (offset + 16 > cm->gpu_frame.num_mv) {
MV_size_16 = ( offset + 16 - cm->gpu_frame.num_mv ) * sizeof( int_mv );
MV_vars_size_16 = ( offset + 16 - cm->gpu_frame.num_mv ) * sizeof( int );
}
if ((ref_frame_flags & GPUFLAG_LAST_FRAME) && (cm->yv12_fb[cm->lst_fb_idx].flags & GPUFLAG_LAST_FRAME)) {
me_kernel_launch(cm, cm->gpu_frame.raw_current_fb_g, (cm->gpu_frame.yv12_fb_g)[cm->lst_fb_idx], s, (cm->gpu_frame.MVs_g)[0], (cm->gpu_frame.MV_vars_g)[0] );
}
// Se ref_frame_flags indica la presenza di un gold e se il flag del fb puntato da gld_fb_idx indica che e' gold, allora...
if ((ref_frame_flags & GPUFLAG_GOLD_FRAME) && (cm->yv12_fb[cm->gld_fb_idx].flags & GPUFLAG_GOLD_FRAME)) {
me_kernel_launch(cm, cm->gpu_frame.raw_current_fb_g, (cm->gpu_frame.yv12_fb_g)[cm->gld_fb_idx], s, (cm->gpu_frame.MVs_g)[1], (cm->gpu_frame.MV_vars_g)[1] );
}
// Se ref_frame_flags indica la presenza di un altref e se il flag del fb puntato da alt_fb_idx indica che e' altref, allora...
if ((ref_frame_flags & GPUFLAG_ALTR_FRAME) && (cm->yv12_fb[cm->alt_fb_idx].flags & GPUFLAG_ALTR_FRAME)) {
me_kernel_launch(cm, cm->gpu_frame.raw_current_fb_g, (cm->gpu_frame.yv12_fb_g)[cm->alt_fb_idx], s, (cm->gpu_frame.MVs_g)[2], (cm->gpu_frame.MV_vars_g)[2] );
}
if (ref_frame_flags & GPUFLAG_LAST_FRAME) {
CHECK(cudaMemcpyAsync( &(cm->host_frame.MVs_h)[0][offset], &(cm->gpu_frame.MVs_g)[0][offset], MV_size_16, cudaMemcpyDeviceToHost, cm->GPU.streams.frame[s]));
//CHECK(cudaMemcpyAsync( &(cm->host_frame.MV_vars_h)[0][offset], &(cm->gpu_frame.MV_vars_g)[0][offset], MV_vars_size_16, cudaMemcpyDeviceToHost, cm->GPU.streams.frame[s]));
}
if ((ref_frame_flags & GPUFLAG_GOLD_FRAME) && (cm->yv12_fb[cm->gld_fb_idx].flags & GPUFLAG_GOLD_FRAME)) {
CHECK(cudaMemcpyAsync( &(cm->host_frame.MVs_h)[1][offset], &(cm->gpu_frame.MVs_g)[1][offset], MV_size_16, cudaMemcpyDeviceToHost, cm->GPU.streams.frame[s]));
//CHECK(cudaMemcpyAsync( &(cm->host_frame.MV_vars_h)[1][offset], &(cm->gpu_frame.MV_vars_g)[1][offset], MV_vars_size_16, cudaMemcpyDeviceToHost, cm->GPU.streams.frame[s]));
}
if ((ref_frame_flags & GPUFLAG_ALTR_FRAME) && (cm->yv12_fb[cm->alt_fb_idx].flags & GPUFLAG_ALTR_FRAME)) {
CHECK(cudaMemcpyAsync( &(cm->host_frame.MVs_h)[2][offset], &(cm->gpu_frame.MVs_g)[2][offset], MV_size_16, cudaMemcpyDeviceToHost, cm->GPU.streams.frame[s]));
//CHECK(cudaMemcpyAsync( &(cm->host_frame.MV_vars_h)[2][offset], &(cm->gpu_frame.MV_vars_g)[2][offset], MV_vars_size_16, cudaMemcpyDeviceToHost, cm->GPU.streams.frame[s]));
}
}
}
#endif /* HAVE_CUDA_ENABLED_DEVICE */
#ifdef __cplusplus
}
#endif
|
5fe1ddd74ab6c85fa2e1fdac63baee9ac26582e2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// BlockDim = 16x16
//GridDim = w/16*h/16
extern "C" __global__ void InterleaveUV( unsigned char *yuv_cb, unsigned char *yuv_cr, unsigned char *nv12_chroma,
int chroma_width, int chroma_height, int cb_pitch, int cr_pitch, int nv12_pitch )
{
int x,y;
unsigned char *pCb;
unsigned char *pCr;
unsigned char *pDst;
x = blockIdx.x*blockDim.x+threadIdx.x;
y = blockIdx.y*blockDim.y+threadIdx.y;
if ((x < chroma_width) && (y < chroma_height))
{
#if 0
pCb = yuv_cb + (y*cb_pitch);
pCr = yuv_cr + (y*cr_pitch);
pDst = nv12_chroma + y*nv12_pitch;
pDst[x << 1] = pCb[x];
pDst[(x << 1) + 1] = pCr[x];
#elif 1
pCb = yuv_cb + (y*cb_pitch);
pDst = nv12_chroma + y*nv12_pitch;
pDst[x] = pCb[x];
pCr = yuv_cr + (y*cr_pitch);
pDst = nv12_chroma + y*nv12_pitch;
pDst[x] = pCr[x];
#else
pCb = yuv_cb + (y*cb_pitch);
pDst = nv12_chroma + y*nv12_pitch;
pDst[x] = pCb[x];
#endif
}
}
| 5fe1ddd74ab6c85fa2e1fdac63baee9ac26582e2.cu |
// BlockDim = 16x16
//GridDim = w/16*h/16
extern "C" __global__ void InterleaveUV( unsigned char *yuv_cb, unsigned char *yuv_cr, unsigned char *nv12_chroma,
int chroma_width, int chroma_height, int cb_pitch, int cr_pitch, int nv12_pitch )
{
int x,y;
unsigned char *pCb;
unsigned char *pCr;
unsigned char *pDst;
x = blockIdx.x*blockDim.x+threadIdx.x;
y = blockIdx.y*blockDim.y+threadIdx.y;
if ((x < chroma_width) && (y < chroma_height))
{
#if 0
pCb = yuv_cb + (y*cb_pitch);
pCr = yuv_cr + (y*cr_pitch);
pDst = nv12_chroma + y*nv12_pitch;
pDst[x << 1] = pCb[x];
pDst[(x << 1) + 1] = pCr[x];
#elif 1
pCb = yuv_cb + (y*cb_pitch);
pDst = nv12_chroma + y*nv12_pitch;
pDst[x] = pCb[x];
pCr = yuv_cr + (y*cr_pitch);
pDst = nv12_chroma + y*nv12_pitch;
pDst[x] = pCr[x];
#else
pCb = yuv_cb + (y*cb_pitch);
pDst = nv12_chroma + y*nv12_pitch;
pDst[x] = pCb[x];
#endif
}
}
|
1af2c19812ddd38586670a24a3682348efca6363.hip | // !!! This is a file automatically generated by hipify!!!
#include "stdio.h"
#include <iostream>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
__constant__ int dim;
__global__ void test(int *gpu_Num){
*gpu_Num = dim;
}
int main(int argc, char* argv[])
{
int num = 25;
hipMemcpyToSymbol(dim,&num,sizeof(int),0,hipMemcpyHostToDevice);
int *gpu_Num;
hipMalloc(&gpu_Num,sizeof(int));
hipLaunchKernelGGL(( test), dim3(1),dim3(1), 0, 0, gpu_Num);
int hostResult;
hipMemcpy(&hostResult,gpu_Num,sizeof(int),hipMemcpyDefault);
printf("Result: %i\n",hostResult);
}
| 1af2c19812ddd38586670a24a3682348efca6363.cu | #include "stdio.h"
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
__constant__ int dim;
__global__ void test(int *gpu_Num){
*gpu_Num = dim;
}
int main(int argc, char* argv[])
{
int num = 25;
cudaMemcpyToSymbol(dim,&num,sizeof(int),0,cudaMemcpyHostToDevice);
int *gpu_Num;
cudaMalloc(&gpu_Num,sizeof(int));
test<<<1,1>>>(gpu_Num);
int hostResult;
cudaMemcpy(&hostResult,gpu_Num,sizeof(int),cudaMemcpyDefault);
printf("Result: %i\n",hostResult);
}
|
9bc8b91f6fa62b5895d418c2f0409726608bfde5.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <cstdint>
#include <cstdlib>
#include "../utils/SyncedMemory.h"
#include "lab1.h"
using namespace std;
#define CHECK {\
auto e = hipDeviceSynchronize();\
if (e != hipSuccess) {\
printf("At " __FILE__ ":%d, %s\n", __LINE__, hipGetErrorString(e));\
abort();\
}\
}
int main(int argc, char **argv)
{
Lab1VideoGenerator g;
Lab1VideoInfo i;
g.get_info(i);
if (i.w == 0 or i.h == 0 or i.n_frame == 0 or i.fps_n == 0 or i.fps_d == 0) {
puts("Cannot be zero");
abort();
} else if (i.w%2 != 0 or i.h%2 != 0) {
puts("Only even frame size is supported");
abort();
}
unsigned FRAME_SIZE = i.w*i.h*3/2;
MemoryBuffer<uint8_t> frameb(FRAME_SIZE);
auto frames = frameb.CreateSync(FRAME_SIZE);
FILE *fp = fopen("result.y4m", "wb");
fprintf(fp, "YUV4MPEG2 W%d H%d F%d:%d Ip A1:1 C420\n", i.w, i.h, i.fps_n, i.fps_d);
for (unsigned j = 0; j < i.n_frame; ++j) {
fputs("FRAME\n", fp);
g.Generate(frames.get_gpu_wo());
fwrite(frames.get_cpu_ro(), sizeof(uint8_t), FRAME_SIZE, fp);
}
fclose(fp);
return 0;
}
| 9bc8b91f6fa62b5895d418c2f0409726608bfde5.cu | #include <cstdio>
#include <cstdint>
#include <cstdlib>
#include "../utils/SyncedMemory.h"
#include "lab1.h"
using namespace std;
#define CHECK {\
auto e = cudaDeviceSynchronize();\
if (e != cudaSuccess) {\
printf("At " __FILE__ ":%d, %s\n", __LINE__, cudaGetErrorString(e));\
abort();\
}\
}
int main(int argc, char **argv)
{
Lab1VideoGenerator g;
Lab1VideoInfo i;
g.get_info(i);
if (i.w == 0 or i.h == 0 or i.n_frame == 0 or i.fps_n == 0 or i.fps_d == 0) {
puts("Cannot be zero");
abort();
} else if (i.w%2 != 0 or i.h%2 != 0) {
puts("Only even frame size is supported");
abort();
}
unsigned FRAME_SIZE = i.w*i.h*3/2;
MemoryBuffer<uint8_t> frameb(FRAME_SIZE);
auto frames = frameb.CreateSync(FRAME_SIZE);
FILE *fp = fopen("result.y4m", "wb");
fprintf(fp, "YUV4MPEG2 W%d H%d F%d:%d Ip A1:1 C420\n", i.w, i.h, i.fps_n, i.fps_d);
for (unsigned j = 0; j < i.n_frame; ++j) {
fputs("FRAME\n", fp);
g.Generate(frames.get_gpu_wo());
fwrite(frames.get_cpu_ro(), sizeof(uint8_t), FRAME_SIZE, fp);
}
fclose(fp);
return 0;
}
|
28920da695dc79c32a2fbb3d514348ad6233eed4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <stdio.h>
#include "j2d9pt-gol-256-10-512_kernel.hu"
#define BENCH_DIM 2
#define BENCH_FPP 18
#define BENCH_RAD 1
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 3 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
hipError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != hipSuccess) { \
fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == hipSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(hipGetLastError()); \
} while(0)
double *dev_A;
cudaCheckReturn(hipMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double)));
{
cudaCheckReturn(hipMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), hipMemcpyHostToDevice));
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 10;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 236;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
hipLaunchKernelGGL(( kernel0_10), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 2)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 254;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 254;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 3)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 254;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 4)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 5)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 6)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 7)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 8)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 9)
{
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 254;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 2)
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 3)
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 4)
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 5)
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 6)
{
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 244;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_6), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 7)
{
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 242;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_7), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 8)
{
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 240;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_8), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 9)
{
const AN5D_TYPE __side0Len = 9;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 238;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_9), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(hipMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), hipMemcpyDeviceToHost));
}
cudaCheckReturn(hipFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
A[(t+1)%2][i][j] =
A[(t+1)%2][i][j] =
( 7.1f * A[t%2][i-1][j-1] + 5.1f * A[t%2][i-1][j] + 9.2f * A[t%2][i-1][j+1] +
12.1f * A[t%2][i][j-1] + 15.f * A[t%2][i][j] + 12.2f * A[t%2][i][j+1] +
9.1f * A[t%2][i+1][j-1] + 5.2f * A[t%2][i+1][j] + 7.2f * A[t%2][i+1][j+1]) / 118;
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
| 28920da695dc79c32a2fbb3d514348ad6233eed4.cu | #include <assert.h>
#include <stdio.h>
#include "j2d9pt-gol-256-10-512_kernel.hu"
#define BENCH_DIM 2
#define BENCH_FPP 18
#define BENCH_RAD 1
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 3 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
cudaError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != cudaSuccess) { \
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == cudaSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(cudaGetLastError()); \
} while(0)
double *dev_A;
cudaCheckReturn(cudaMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double)));
{
cudaCheckReturn(cudaMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), cudaMemcpyHostToDevice));
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 10;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 236;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
kernel0_10<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 2)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 254;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 254;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 3)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 254;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 4)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 5)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 6)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 7)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 8)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 9)
{
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 254;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 2)
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 3)
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 4)
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 5)
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 6)
{
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 244;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_6<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 7)
{
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 242;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_7<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 8)
{
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 240;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_8<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 9)
{
const AN5D_TYPE __side0Len = 9;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 238;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_9<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(cudaMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), cudaMemcpyDeviceToHost));
}
cudaCheckReturn(cudaFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
A[(t+1)%2][i][j] =
A[(t+1)%2][i][j] =
( 7.1f * A[t%2][i-1][j-1] + 5.1f * A[t%2][i-1][j] + 9.2f * A[t%2][i-1][j+1] +
12.1f * A[t%2][i][j-1] + 15.f * A[t%2][i][j] + 12.2f * A[t%2][i][j+1] +
9.1f * A[t%2][i+1][j-1] + 5.2f * A[t%2][i+1][j] + 7.2f * A[t%2][i+1][j+1]) / 118;
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
|
a7059fb2983ddd69dc4c64ef30c34b8bc8812348.hip | // !!! This is a file automatically generated by hipify!!!
#pragma once
#include <vector>
#include "Ray.cuh.cu"
#include "HitRecord.cuh.cu"
#include "dummy_helper.cuh.cu"
#include "TriangleFace.cuh.cu"
#include "Material.cuh.cu"
#include "aabb.cuh.cu"
namespace RayTracing
{
template<bool isGPU>
class PolygonsManager
{
};
template<>
class PolygonsManager<false>
{
protected:
std::vector<std::vector<MappedTriangleFace>> m_faces;
std::vector<aabb> m_boxes;
public:
PolygonsManager() {}
void AddFigure(const aabb &box)
{
m_faces.emplace_back();
m_boxes.emplace_back(box);
}
void AddPolygon(const TriangleFace &face)
{
m_faces[m_boxes.size() - 1].emplace_back(face);
}
void AddPolygon(const MappedTriangleFace &face)
{
m_faces[m_boxes.size() - 1].emplace_back(face);
}
void ConstructQuad(
const Point3 &A,
const Point3 &B,
const Point3 &C,
const Point3 &D,
const Point3 &origin,
const Material * const * const material
)
{
AddPolygon(
TriangleFace{
A, B, C, origin, material
}
);
AddPolygon(
TriangleFace{
A, D, C, origin, material
}
);
}
void ConstructQuad(
const Point3 &A,
const Point3 &B,
const Point3 &C,
const Point3 &D,
const Point3 &origin,
const Material * const * const material,
const TriangleMapping &mapping1,
const TriangleMapping &mapping2
)
{
AddPolygon(
MappedTriangleFace{
A, B, C, origin, material, mapping1
}
);
AddPolygon(
MappedTriangleFace{
A, D, C, origin, material, mapping2
}
);
}
bool Hit(
const Ray &ray,
const float tMin,
HitRecord &hitRecord
) const
{
bool hitAtLeastOnce = false;
for (int j = 0; j < m_boxes.size(); ++j)
{
if (!m_boxes[j].Hit(ray, tMin, hitRecord.t))
continue;
for (int i = 0; i < m_faces[j].size(); ++i)
hitAtLeastOnce |= m_faces[j][i].Hit(ray, tMin, hitRecord);
}
return hitAtLeastOnce;
}
void CompleteAdding() {}
void Deinit() {}
};
template<>
class PolygonsManager<true> : public PolygonsManager<false>
{
private:
CudaMemoryLogic<CudaMemoryLogic<MappedTriangleFace>> m_faces_d;
CudaMemoryLogic<aabb> m_boxes_d;
std::vector<CudaMemoryLogic<MappedTriangleFace>> m_faces_keeper;
public:
using PolygonsManager<false>::PolygonsManager;
using PolygonsManager<false>::AddPolygon;
using PolygonsManager<false>::ConstructQuad;
__device__
bool Hit(
const Ray &ray,
const float tMin,
HitRecord &hitRecord
) const
{
bool hitAtLeastOnce = false;
for (int j = 0; j < m_boxes_d.count; ++j)
{
if (!m_boxes_d.get()[j].Hit(ray, tMin, hitRecord.t))
continue;
for (int i = 0; i < m_faces_d.get()[j].count; ++i)
hitAtLeastOnce |= m_faces_d.get()[j].get()[i].Hit(ray, tMin, hitRecord);
}
return hitAtLeastOnce;
}
void CompleteAdding()
{
for (int i = 0; i < m_faces.size(); ++i)
{
m_faces_keeper.emplace_back();
m_faces_keeper.back().alloc(this->m_faces[i].size());
m_faces_keeper.back().memcpy(this->m_faces[i].data(), hipMemcpyHostToDevice);
}
m_faces_d.alloc(m_faces_keeper.size());
m_faces_d.memcpy(m_faces_keeper.data(), hipMemcpyHostToDevice);
m_boxes_d.alloc(this->m_boxes.size());
m_boxes_d.memcpy(this->m_boxes.data(), hipMemcpyHostToDevice);
this->m_faces.clear();
this->m_boxes.clear();
}
void Deinit()
{
for (int i = 0; i < m_faces_keeper.size(); ++i)
m_faces_keeper[i].dealloc();
m_faces_d.dealloc();
m_boxes_d.dealloc();
}
};
} // namespace RayTracing
| a7059fb2983ddd69dc4c64ef30c34b8bc8812348.cu | #pragma once
#include <vector>
#include "Ray.cuh.cu"
#include "HitRecord.cuh.cu"
#include "dummy_helper.cuh.cu"
#include "TriangleFace.cuh.cu"
#include "Material.cuh.cu"
#include "aabb.cuh.cu"
namespace RayTracing
{
template<bool isGPU>
class PolygonsManager
{
};
template<>
class PolygonsManager<false>
{
protected:
std::vector<std::vector<MappedTriangleFace>> m_faces;
std::vector<aabb> m_boxes;
public:
PolygonsManager() {}
void AddFigure(const aabb &box)
{
m_faces.emplace_back();
m_boxes.emplace_back(box);
}
void AddPolygon(const TriangleFace &face)
{
m_faces[m_boxes.size() - 1].emplace_back(face);
}
void AddPolygon(const MappedTriangleFace &face)
{
m_faces[m_boxes.size() - 1].emplace_back(face);
}
void ConstructQuad(
const Point3 &A,
const Point3 &B,
const Point3 &C,
const Point3 &D,
const Point3 &origin,
const Material * const * const material
)
{
AddPolygon(
TriangleFace{
A, B, C, origin, material
}
);
AddPolygon(
TriangleFace{
A, D, C, origin, material
}
);
}
void ConstructQuad(
const Point3 &A,
const Point3 &B,
const Point3 &C,
const Point3 &D,
const Point3 &origin,
const Material * const * const material,
const TriangleMapping &mapping1,
const TriangleMapping &mapping2
)
{
AddPolygon(
MappedTriangleFace{
A, B, C, origin, material, mapping1
}
);
AddPolygon(
MappedTriangleFace{
A, D, C, origin, material, mapping2
}
);
}
bool Hit(
const Ray &ray,
const float tMin,
HitRecord &hitRecord
) const
{
bool hitAtLeastOnce = false;
for (int j = 0; j < m_boxes.size(); ++j)
{
if (!m_boxes[j].Hit(ray, tMin, hitRecord.t))
continue;
for (int i = 0; i < m_faces[j].size(); ++i)
hitAtLeastOnce |= m_faces[j][i].Hit(ray, tMin, hitRecord);
}
return hitAtLeastOnce;
}
void CompleteAdding() {}
void Deinit() {}
};
template<>
class PolygonsManager<true> : public PolygonsManager<false>
{
private:
CudaMemoryLogic<CudaMemoryLogic<MappedTriangleFace>> m_faces_d;
CudaMemoryLogic<aabb> m_boxes_d;
std::vector<CudaMemoryLogic<MappedTriangleFace>> m_faces_keeper;
public:
using PolygonsManager<false>::PolygonsManager;
using PolygonsManager<false>::AddPolygon;
using PolygonsManager<false>::ConstructQuad;
__device__
bool Hit(
const Ray &ray,
const float tMin,
HitRecord &hitRecord
) const
{
bool hitAtLeastOnce = false;
for (int j = 0; j < m_boxes_d.count; ++j)
{
if (!m_boxes_d.get()[j].Hit(ray, tMin, hitRecord.t))
continue;
for (int i = 0; i < m_faces_d.get()[j].count; ++i)
hitAtLeastOnce |= m_faces_d.get()[j].get()[i].Hit(ray, tMin, hitRecord);
}
return hitAtLeastOnce;
}
void CompleteAdding()
{
for (int i = 0; i < m_faces.size(); ++i)
{
m_faces_keeper.emplace_back();
m_faces_keeper.back().alloc(this->m_faces[i].size());
m_faces_keeper.back().memcpy(this->m_faces[i].data(), cudaMemcpyHostToDevice);
}
m_faces_d.alloc(m_faces_keeper.size());
m_faces_d.memcpy(m_faces_keeper.data(), cudaMemcpyHostToDevice);
m_boxes_d.alloc(this->m_boxes.size());
m_boxes_d.memcpy(this->m_boxes.data(), cudaMemcpyHostToDevice);
this->m_faces.clear();
this->m_boxes.clear();
}
void Deinit()
{
for (int i = 0; i < m_faces_keeper.size(); ++i)
m_faces_keeper[i].dealloc();
m_faces_d.dealloc();
m_boxes_d.dealloc();
}
};
} // namespace RayTracing
|
01530764be7d89f4dcedc0ea8686ed78308171be.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
//********************************************************//
// CUDA SIFT extractor by Marten Bjorkman aka Celebrandil //
//********************************************************//
///////////////////////////////////////////////////////////////////////////////
// Kernel configuration
///////////////////////////////////////////////////////////////////////////////
__constant__ float d_Threshold[2];
__constant__ float d_Scales[8], d_Factor;
__constant__ float d_EdgeLimit;
__constant__ int d_MaxNumPoints;
__device__ unsigned int d_PointCounter[1];
__constant__ float d_Kernel1[5];
__constant__ float d_Kernel2[12*16];
///////////////////////////////////////////////////////////////////////////////
// Lowpass filter an subsample image
///////////////////////////////////////////////////////////////////////////////
__global__ void ScaleDown_D(float *d_Result, float *d_Data, int width, int pitch, int height, int newpitch) {
// TODO: one element per thread in a block?
__shared__ float inrow[SCALEDOWN_W + 4];
__shared__ float brow[5 * (SCALEDOWN_W / 2)];
//
__shared__ int yRead[SCALEDOWN_H + 4];
__shared__ int yWrite[SCALEDOWN_H + 4];
// Get thread index, which ranges from 0 to SCALEDOWN_W + 4
const int tx = threadIdx.x;
// Get indices in brow
// TODO: move this out?
#define dx2 (SCALEDOWN_W / 2)
const int tx0 = tx + 0 * dx2;
const int tx1 = tx + 1 * dx2;
const int tx2 = tx + 2 * dx2;
const int tx3 = tx + 3 * dx2;
const int tx4 = tx + 4 * dx2;
// TODO: x and y pixel index
const int xStart = blockIdx.x * SCALEDOWN_W;
const int yStart = blockIdx.y * SCALEDOWN_H;
// TODO: x coordinate to write to?
const int xWrite = xStart / 2 + tx;
int xRead = xStart + tx - 2;
xRead = (xRead < 0 ? 0 : xRead);
xRead = (xRead >= width ? width - 1 : xRead);
const float *k = d_Kernel1;
// Identify y read and write indices; note we ignore SCALEDOWN_H + 4 <= tx <
// SCALEDOWN_H + 4 in this section
if (tx < SCALEDOWN_H + 4) {
// TODO: tx = 0 and tx = 1 are the same; why?
int y = yStart + tx - 1;
// Clamp at 0 and height - 1
y = (y < 0 ? 0 : y);
y = (y >= height ? height - 1 : y);
// Read start index
yRead[tx] = y * pitch;
// Write start index
yWrite[tx] = (yStart + tx - 4) / 2 * newpitch;
}
// Synchronize threads to ensure we have yRead and yWrite filled for current
// warp
__syncthreads();
// For each thread (which runs 0 to SCALEDOWN_W + 4 - 1), loop through 0 to
// SCALEDOWN_H + 4 - 1 by kernel size.
for (int dy = 0; dy < SCALEDOWN_H + 4; dy += 5) {
// yRead[dy + 0] is the y index to 0th row of data from source image (may
// be the same as 1st, 2nd, etc row, depending on how close we are to the
// edge of image). xRead is determined by thread id and starts from size
// of kernel / 2 + 1 to the left of our current pixel
inrow[tx] = d_Data[yRead[dy + 0] + xRead];
// Once we synchronize, inrow should contain the data from the source
// image corresponding to the first row in the current block. It is length
// SCALEDOWN_W + 4.
__syncthreads();
// For the SCALEDOWN_W / 2 threads in block, compute the first of 5
// indices for this thread. Convolve the 1-D kernel k with every other
// 'pixel' in the block via 2 * tx
if (tx < dx2) {
brow[tx0] = k[0] * (inrow[2 * tx] + inrow[2 * tx + 4]) +
k[1] * (inrow[2 * tx + 1] + inrow[2 * tx + 3]) +
k[2] * inrow[2 * tx + 2];
}
// TODO: Once we synchronize, brow[tx0] should contain
__syncthreads();
// Compute for SCALEDOWN_W / 2 threads in block. dy & 1 is true if dy is
// odd. We require that dy is even and after we've completed at least one
// iteration
if (tx < dx2 && dy >= 4 && !(dy & 1)) {
d_Result[yWrite[dy + 0] + xWrite] = k[2] * brow[tx2] +
k[0] * (brow[tx0] + brow[tx4]) +
k[1] * (brow[tx1] + brow[tx3]);
}
// And...this is all just the same as above. One big unrolled for loop.
if (dy < (SCALEDOWN_H + 3)) {
// yRead[dy + 1] is the y index to 1th row of data from source image
// (may be the same as 1st, 2nd, etc row, depending on how close we are
// to the edge of image). xRead is determined by thread id and starts
// from size of kernel / 2 + 1 to the left of our current pixel
inrow[tx] = d_Data[yRead[dy + 1] + xRead];
__syncthreads();
if (tx < dx2) {
brow[tx1] = k[0] * (inrow[2 * tx] + inrow[2 * tx + 4]) +
k[1] * (inrow[2 * tx + 1] + inrow[2 * tx + 3]) +
k[2] * inrow[2 * tx + 2];
}
__syncthreads();
if (tx<dx2 && dy>=3 && (dy&1)) {
d_Result[yWrite[dy+1] + xWrite] = k[2]*brow[tx3] + k[0]*(brow[tx1]+brow[tx0]) + k[1]*(brow[tx2]+brow[tx4]);
}
}
if (dy<(SCALEDOWN_H+2)) {
inrow[tx] = d_Data[yRead[dy+2] + xRead];
__syncthreads();
if (tx<dx2) {
brow[tx2] = k[0]*(inrow[2*tx]+inrow[2*tx+4]) + k[1]*(inrow[2*tx+1]+inrow[2*tx+3]) + k[2]*inrow[2*tx+2];
}
__syncthreads();
if (tx<dx2 && dy>=2 && !(dy&1)) {
d_Result[yWrite[dy+2] + xWrite] = k[2]*brow[tx4] + k[0]*(brow[tx2]+brow[tx1]) + k[1]*(brow[tx3]+brow[tx0]);
}
}
if (dy<(SCALEDOWN_H+1)) {
inrow[tx] = d_Data[yRead[dy+3] + xRead];
__syncthreads();
if (tx<dx2) {
brow[tx3] = k[0]*(inrow[2*tx]+inrow[2*tx+4]) + k[1]*(inrow[2*tx+1]+inrow[2*tx+3]) + k[2]*inrow[2*tx+2];
}
__syncthreads();
if (tx<dx2 && dy>=1 && (dy&1)) {
d_Result[yWrite[dy+3] + xWrite] = k[2]*brow[tx0] + k[0]*(brow[tx3]+brow[tx2]) + k[1]*(brow[tx4]+brow[tx1]);
}
}
if (dy<SCALEDOWN_H) {
inrow[tx] = d_Data[yRead[dy+4] + xRead];
__syncthreads();
if (tx<dx2) {
brow[tx4] = k[0]*(inrow[2*tx]+inrow[2*tx+4]) + k[1]*(inrow[2*tx+1]+inrow[2*tx+3]) + k[2]*inrow[2*tx+2];
}
__syncthreads();
if (tx<dx2 && !(dy&1)) {
d_Result[yWrite[dy+4] + xWrite] = k[2]*brow[tx1] + k[0]*(brow[tx4]+brow[tx3]) + k[1]*(brow[tx0]+brow[tx2]);
}
}
__syncthreads();
}
} | 01530764be7d89f4dcedc0ea8686ed78308171be.cu | #include "includes.h"
//********************************************************//
// CUDA SIFT extractor by Marten Bjorkman aka Celebrandil //
//********************************************************//
///////////////////////////////////////////////////////////////////////////////
// Kernel configuration
///////////////////////////////////////////////////////////////////////////////
__constant__ float d_Threshold[2];
__constant__ float d_Scales[8], d_Factor;
__constant__ float d_EdgeLimit;
__constant__ int d_MaxNumPoints;
__device__ unsigned int d_PointCounter[1];
__constant__ float d_Kernel1[5];
__constant__ float d_Kernel2[12*16];
///////////////////////////////////////////////////////////////////////////////
// Lowpass filter an subsample image
///////////////////////////////////////////////////////////////////////////////
__global__ void ScaleDown_D(float *d_Result, float *d_Data, int width, int pitch, int height, int newpitch) {
// TODO: one element per thread in a block?
__shared__ float inrow[SCALEDOWN_W + 4];
__shared__ float brow[5 * (SCALEDOWN_W / 2)];
//
__shared__ int yRead[SCALEDOWN_H + 4];
__shared__ int yWrite[SCALEDOWN_H + 4];
// Get thread index, which ranges from 0 to SCALEDOWN_W + 4
const int tx = threadIdx.x;
// Get indices in brow
// TODO: move this out?
#define dx2 (SCALEDOWN_W / 2)
const int tx0 = tx + 0 * dx2;
const int tx1 = tx + 1 * dx2;
const int tx2 = tx + 2 * dx2;
const int tx3 = tx + 3 * dx2;
const int tx4 = tx + 4 * dx2;
// TODO: x and y pixel index
const int xStart = blockIdx.x * SCALEDOWN_W;
const int yStart = blockIdx.y * SCALEDOWN_H;
// TODO: x coordinate to write to?
const int xWrite = xStart / 2 + tx;
int xRead = xStart + tx - 2;
xRead = (xRead < 0 ? 0 : xRead);
xRead = (xRead >= width ? width - 1 : xRead);
const float *k = d_Kernel1;
// Identify y read and write indices; note we ignore SCALEDOWN_H + 4 <= tx <
// SCALEDOWN_H + 4 in this section
if (tx < SCALEDOWN_H + 4) {
// TODO: tx = 0 and tx = 1 are the same; why?
int y = yStart + tx - 1;
// Clamp at 0 and height - 1
y = (y < 0 ? 0 : y);
y = (y >= height ? height - 1 : y);
// Read start index
yRead[tx] = y * pitch;
// Write start index
yWrite[tx] = (yStart + tx - 4) / 2 * newpitch;
}
// Synchronize threads to ensure we have yRead and yWrite filled for current
// warp
__syncthreads();
// For each thread (which runs 0 to SCALEDOWN_W + 4 - 1), loop through 0 to
// SCALEDOWN_H + 4 - 1 by kernel size.
for (int dy = 0; dy < SCALEDOWN_H + 4; dy += 5) {
// yRead[dy + 0] is the y index to 0th row of data from source image (may
// be the same as 1st, 2nd, etc row, depending on how close we are to the
// edge of image). xRead is determined by thread id and starts from size
// of kernel / 2 + 1 to the left of our current pixel
inrow[tx] = d_Data[yRead[dy + 0] + xRead];
// Once we synchronize, inrow should contain the data from the source
// image corresponding to the first row in the current block. It is length
// SCALEDOWN_W + 4.
__syncthreads();
// For the SCALEDOWN_W / 2 threads in block, compute the first of 5
// indices for this thread. Convolve the 1-D kernel k with every other
// 'pixel' in the block via 2 * tx
if (tx < dx2) {
brow[tx0] = k[0] * (inrow[2 * tx] + inrow[2 * tx + 4]) +
k[1] * (inrow[2 * tx + 1] + inrow[2 * tx + 3]) +
k[2] * inrow[2 * tx + 2];
}
// TODO: Once we synchronize, brow[tx0] should contain
__syncthreads();
// Compute for SCALEDOWN_W / 2 threads in block. dy & 1 is true if dy is
// odd. We require that dy is even and after we've completed at least one
// iteration
if (tx < dx2 && dy >= 4 && !(dy & 1)) {
d_Result[yWrite[dy + 0] + xWrite] = k[2] * brow[tx2] +
k[0] * (brow[tx0] + brow[tx4]) +
k[1] * (brow[tx1] + brow[tx3]);
}
// And...this is all just the same as above. One big unrolled for loop.
if (dy < (SCALEDOWN_H + 3)) {
// yRead[dy + 1] is the y index to 1th row of data from source image
// (may be the same as 1st, 2nd, etc row, depending on how close we are
// to the edge of image). xRead is determined by thread id and starts
// from size of kernel / 2 + 1 to the left of our current pixel
inrow[tx] = d_Data[yRead[dy + 1] + xRead];
__syncthreads();
if (tx < dx2) {
brow[tx1] = k[0] * (inrow[2 * tx] + inrow[2 * tx + 4]) +
k[1] * (inrow[2 * tx + 1] + inrow[2 * tx + 3]) +
k[2] * inrow[2 * tx + 2];
}
__syncthreads();
if (tx<dx2 && dy>=3 && (dy&1)) {
d_Result[yWrite[dy+1] + xWrite] = k[2]*brow[tx3] + k[0]*(brow[tx1]+brow[tx0]) + k[1]*(brow[tx2]+brow[tx4]);
}
}
if (dy<(SCALEDOWN_H+2)) {
inrow[tx] = d_Data[yRead[dy+2] + xRead];
__syncthreads();
if (tx<dx2) {
brow[tx2] = k[0]*(inrow[2*tx]+inrow[2*tx+4]) + k[1]*(inrow[2*tx+1]+inrow[2*tx+3]) + k[2]*inrow[2*tx+2];
}
__syncthreads();
if (tx<dx2 && dy>=2 && !(dy&1)) {
d_Result[yWrite[dy+2] + xWrite] = k[2]*brow[tx4] + k[0]*(brow[tx2]+brow[tx1]) + k[1]*(brow[tx3]+brow[tx0]);
}
}
if (dy<(SCALEDOWN_H+1)) {
inrow[tx] = d_Data[yRead[dy+3] + xRead];
__syncthreads();
if (tx<dx2) {
brow[tx3] = k[0]*(inrow[2*tx]+inrow[2*tx+4]) + k[1]*(inrow[2*tx+1]+inrow[2*tx+3]) + k[2]*inrow[2*tx+2];
}
__syncthreads();
if (tx<dx2 && dy>=1 && (dy&1)) {
d_Result[yWrite[dy+3] + xWrite] = k[2]*brow[tx0] + k[0]*(brow[tx3]+brow[tx2]) + k[1]*(brow[tx4]+brow[tx1]);
}
}
if (dy<SCALEDOWN_H) {
inrow[tx] = d_Data[yRead[dy+4] + xRead];
__syncthreads();
if (tx<dx2) {
brow[tx4] = k[0]*(inrow[2*tx]+inrow[2*tx+4]) + k[1]*(inrow[2*tx+1]+inrow[2*tx+3]) + k[2]*inrow[2*tx+2];
}
__syncthreads();
if (tx<dx2 && !(dy&1)) {
d_Result[yWrite[dy+4] + xWrite] = k[2]*brow[tx1] + k[0]*(brow[tx4]+brow[tx3]) + k[1]*(brow[tx0]+brow[tx2]);
}
}
__syncthreads();
}
} |
b8f4aa56d538f013641d740635040c16cbef9b89.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Modified from Ken Perlin's implementation of the Perlin Noise
// https://mrl.nyu.edu/~perlin/noise/
#include <cmath>
#include "PerlinNoise.cuh"
#include <iostream>
#ifndef GLOBAL_H
#define GLOBAL_H
#include "globals.h"
#endif
using namespace std;
const int Permutation[512] = { 151,160,137,91,90,15,
131,13,201,95,96,53,194,233,7,225,140,36,103,30,69,142,8,99,37,240,21,10,23,
190, 6,148,247,120,234,75,0,26,197,62,94,252,219,203,117,35,11,32,57,177,33,
88,237,149,56,87,174,20,125,136,171,168, 68,175,74,165,71,134,139,48,27,166,
77,146,158,231,83,111,229,122,60,211,133,230,220,105,92,41,55,46,245,40,244,
102,143,54, 65,25,63,161, 1,216,80,73,209,76,132,187,208, 89,18,169,200,196,
135,130,116,188,159,86,164,100,109,198,173,186, 3,64,52,217,226,250,124,123,
5,202,38,147,118,126,255,82,85,212,207,206,59,227,47,16,58,17,182,189,28,42,
223,183,170,213,119,248,152, 2,44,154,163, 70,221,153,101,155,167, 43,172,9,
129,22,39,253, 19,98,108,110,79,113,224,232,178,185, 112,104,218,246,97,228,
251,34,242,193,238,210,144,12,191,179,162,241, 81,51,145,235,249,14,239,107,
49,192,214, 31,181,199,106,157,184, 84,204,176,115,121,50,45,127, 4,150,254,
138,236,205,93,222,114,67,29,24,72,243,141,128,195,78,66,215,61,156,180,
151,160,137,91,90,15,
131,13,201,95,96,53,194,233,7,225,140,36,103,30,69,142,8,99,37,240,21,10,23,
190, 6,148,247,120,234,75,0,26,197,62,94,252,219,203,117,35,11,32,57,177,33,
88,237,149,56,87,174,20,125,136,171,168, 68,175,74,165,71,134,139,48,27,166,
77,146,158,231,83,111,229,122,60,211,133,230,220,105,92,41,55,46,245,40,244,
102,143,54, 65,25,63,161, 1,216,80,73,209,76,132,187,208, 89,18,169,200,196,
135,130,116,188,159,86,164,100,109,198,173,186, 3,64,52,217,226,250,124,123,
5,202,38,147,118,126,255,82,85,212,207,206,59,227,47,16,58,17,182,189,28,42,
223,183,170,213,119,248,152, 2,44,154,163, 70,221,153,101,155,167, 43,172,9,
129,22,39,253, 19,98,108,110,79,113,224,232,178,185, 112,104,218,246,97,228,
251,34,242,193,238,210,144,12,191,179,162,241, 81,51,145,235,249,14,239,107,
49,192,214, 31,181,199,106,157,184, 84,204,176,115,121,50,45,127, 4,150,254,
138,236,205,93,222,114,67,29,24,72,243,141,128,195,78,66,215,61,156,180
};
__device__ float fade(float t) { return t * t * t * (t * (t * 6 - 15) + 10); }
__device__ float lerp(float t, float a, float b) { return a + t * (b - a); }
__device__ float grad(int hash, float x, float y, float z) {
int h = hash & 15; // CONVERT LO 4 BITS OF HASH CODE
float u = h<8 ? x : y, // INTO 12 GRADIENT DIRECTIONS.
v = h<4 ? y : h==12||h==14 ? x : z;
return ((h&1) == 0 ? u : -u) + ((h&2) == 0 ? v : -v);
}
__device__ float noise(float x, float y, float z, int* permutation) {
int X = (int)floor(x) & 255; // FIND UNIT CUBE THAT
int Y = (int)floor(y) & 255; // CONTAINS POINT.
int Z = (int)floor(z) & 255;
x -= floor(x); // FIND RELATIVE X,Y,Z
y -= floor(y); // OF POINT IN CUBE.
z -= floor(z);
float u = fade(x); // COMPUTE FADE CURVES
float v = fade(y); // FOR EACH OF X,Y,Z.
float w = fade(z);
int A = permutation[X ]+Y, AA = permutation[A]+Z, AB = permutation[A+1]+Z, // HASH COORDINATES OF
B = permutation[X+1]+Y, BA = permutation[B]+Z, BB = permutation[B+1]+Z; // THE 8 CUBE CORNERS,
return lerp(w, lerp(v, lerp(u, grad(permutation[AA ], x , y , z ), // AND ADD
grad(permutation[BA ], x-1, y , z )), // BLENDED
lerp(u, grad(permutation[AB ], x , y-1, z ), // RESULTS
grad(permutation[BB ], x-1, y-1, z ))),// FROM 8
lerp(v, lerp(u, grad(permutation[AA+1], x , y , z-1 ), // CORNERS
grad(permutation[BA+1], x-1, y , z-1 )), // OF CUBE
lerp(u, grad(permutation[AB+1], x , y-1, z-1 ),
grad(permutation[BB+1], x-1, y-1, z-1 ))));
}
__global__ void PerlinKernel(Vector3<float>* points, const int* permu, const size_t size, const int octaves, const float persistence, const float divided) {
__shared__ int permutation[512];
unsigned int pos = blockIdx.x * blockDim.x + threadIdx.x;
if (threadIdx.x < 512) permutation[threadIdx.x] = permu[threadIdx.x];
__syncthreads();
float addedtotal = 0;
float frequency = 1;
float amplitude = 1;
float maxValue = 0;
float x = points[pos].x;
float y = points[pos].y;
float z = points[pos].z;
if (pos < size){
for(int i=0;i<octaves;i++) {
addedtotal += noise(x * frequency, y * frequency, z * frequency, permutation) * amplitude;
maxValue += amplitude;
amplitude *= persistence;
frequency *= 2;
}
points[pos].y += addedtotal/maxValue / divided;
}
}
__host__ void randomize(OutputObject Out) {
size_t objectCount = Out.otherObjectCount + Out.streetCount;
// cout<<"now in CUDA, with "<<objectCount<<" objects\n" << std::flush;
Vector3<float> **host_vertex = new Vector3<float>*[objectCount];
Vector3<float> **device_vertex = new Vector3<float>*[objectCount];
hipStream_t stream[objectCount];
// cout<<"allocate done\n" << std::flush;
int *permu;
// hipError_t code;
hipMallocManaged(&permu, 512*sizeof(int));
// cout<<hipGetErrorString(code);
// cout<<Permutation[10]<<"\n"<< std::flush;
// cout<<"stg"<< std::flush;
for (int i = 0; i < 512; i++) {
permu[i] = Permutation[i];
// cout<<permu[i]<<"\n"<< std::flush;
}
// cout<<permu[2]<<" "<<permu[511]<<"\n"<< std::flush;
// cout<<Out.objects[0].vertice_count<<"\n"<< std::flush;
for(size_t i = 0; i < objectCount; ++i) {
hipStreamCreate(&stream[i]);
hipHostMalloc(&host_vertex[i], Out.objects[i].vertice_count * sizeof(Vector3<float>), hipHostMallocDefault);
hipMalloc(&device_vertex[i], Out.objects[i].vertice_count * sizeof(Vector3<float>));
for (size_t j = 0; j < Out.objects[i].vertice_count; j++){
host_vertex[i][j] = Out.objects[i].vertices[j];
}
}
// cout<<host_vertex[0][2]<<" "<<host_vertex[0][9999]<<"\n"<< std::flush;;
for (size_t i = 0; i < objectCount; ++i) {
hipMemcpyAsync( device_vertex[i], host_vertex[i], Out.objects[i].vertice_count * sizeof(Vector3<float>), hipMemcpyHostToDevice, stream[i]);
unsigned int blockNum = (Out.objects[i].vertice_count + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK;
int octaves = i < Out.streetCount? 4 : 2;
float persistence = i < Out.streetCount? 0.75 : 0.5;
float divided = i < Out.streetCount? 20 : 40;
hipLaunchKernelGGL(( PerlinKernel), dim3(blockNum), dim3(THREAD_PER_BLOCK), 0, stream[i], device_vertex[i],permu, Out.objects[i].vertice_count, octaves, persistence, divided);
hipMemcpyAsync( host_vertex[i], device_vertex[i], Out.objects[i].vertice_count * sizeof(Vector3<float>), hipMemcpyDeviceToHost, stream[i]);
}
for(size_t i = 0; i < objectCount; ++i) hipStreamSynchronize( stream[i]);
// cout<<host_vertex[0][2]<<" "<<host_vertex[0][9999]<<"\n"<< std::flush;;
for(size_t i = 0; i < objectCount; ++i) {
for (size_t j = 0; j < Out.objects[i].vertice_count; j++){
Out.objects[i].vertices[j] = host_vertex[i][j];
}
hipStreamDestroy(stream[i]);
hipHostFree(host_vertex[i]);
hipFree(device_vertex[i]);
}
} | b8f4aa56d538f013641d740635040c16cbef9b89.cu | // Modified from Ken Perlin's implementation of the Perlin Noise
// https://mrl.nyu.edu/~perlin/noise/
#include <cmath>
#include "PerlinNoise.cuh"
#include <iostream>
#ifndef GLOBAL_H
#define GLOBAL_H
#include "globals.h"
#endif
using namespace std;
const int Permutation[512] = { 151,160,137,91,90,15,
131,13,201,95,96,53,194,233,7,225,140,36,103,30,69,142,8,99,37,240,21,10,23,
190, 6,148,247,120,234,75,0,26,197,62,94,252,219,203,117,35,11,32,57,177,33,
88,237,149,56,87,174,20,125,136,171,168, 68,175,74,165,71,134,139,48,27,166,
77,146,158,231,83,111,229,122,60,211,133,230,220,105,92,41,55,46,245,40,244,
102,143,54, 65,25,63,161, 1,216,80,73,209,76,132,187,208, 89,18,169,200,196,
135,130,116,188,159,86,164,100,109,198,173,186, 3,64,52,217,226,250,124,123,
5,202,38,147,118,126,255,82,85,212,207,206,59,227,47,16,58,17,182,189,28,42,
223,183,170,213,119,248,152, 2,44,154,163, 70,221,153,101,155,167, 43,172,9,
129,22,39,253, 19,98,108,110,79,113,224,232,178,185, 112,104,218,246,97,228,
251,34,242,193,238,210,144,12,191,179,162,241, 81,51,145,235,249,14,239,107,
49,192,214, 31,181,199,106,157,184, 84,204,176,115,121,50,45,127, 4,150,254,
138,236,205,93,222,114,67,29,24,72,243,141,128,195,78,66,215,61,156,180,
151,160,137,91,90,15,
131,13,201,95,96,53,194,233,7,225,140,36,103,30,69,142,8,99,37,240,21,10,23,
190, 6,148,247,120,234,75,0,26,197,62,94,252,219,203,117,35,11,32,57,177,33,
88,237,149,56,87,174,20,125,136,171,168, 68,175,74,165,71,134,139,48,27,166,
77,146,158,231,83,111,229,122,60,211,133,230,220,105,92,41,55,46,245,40,244,
102,143,54, 65,25,63,161, 1,216,80,73,209,76,132,187,208, 89,18,169,200,196,
135,130,116,188,159,86,164,100,109,198,173,186, 3,64,52,217,226,250,124,123,
5,202,38,147,118,126,255,82,85,212,207,206,59,227,47,16,58,17,182,189,28,42,
223,183,170,213,119,248,152, 2,44,154,163, 70,221,153,101,155,167, 43,172,9,
129,22,39,253, 19,98,108,110,79,113,224,232,178,185, 112,104,218,246,97,228,
251,34,242,193,238,210,144,12,191,179,162,241, 81,51,145,235,249,14,239,107,
49,192,214, 31,181,199,106,157,184, 84,204,176,115,121,50,45,127, 4,150,254,
138,236,205,93,222,114,67,29,24,72,243,141,128,195,78,66,215,61,156,180
};
__device__ float fade(float t) { return t * t * t * (t * (t * 6 - 15) + 10); }
__device__ float lerp(float t, float a, float b) { return a + t * (b - a); }
__device__ float grad(int hash, float x, float y, float z) {
int h = hash & 15; // CONVERT LO 4 BITS OF HASH CODE
float u = h<8 ? x : y, // INTO 12 GRADIENT DIRECTIONS.
v = h<4 ? y : h==12||h==14 ? x : z;
return ((h&1) == 0 ? u : -u) + ((h&2) == 0 ? v : -v);
}
__device__ float noise(float x, float y, float z, int* permutation) {
int X = (int)floor(x) & 255; // FIND UNIT CUBE THAT
int Y = (int)floor(y) & 255; // CONTAINS POINT.
int Z = (int)floor(z) & 255;
x -= floor(x); // FIND RELATIVE X,Y,Z
y -= floor(y); // OF POINT IN CUBE.
z -= floor(z);
float u = fade(x); // COMPUTE FADE CURVES
float v = fade(y); // FOR EACH OF X,Y,Z.
float w = fade(z);
int A = permutation[X ]+Y, AA = permutation[A]+Z, AB = permutation[A+1]+Z, // HASH COORDINATES OF
B = permutation[X+1]+Y, BA = permutation[B]+Z, BB = permutation[B+1]+Z; // THE 8 CUBE CORNERS,
return lerp(w, lerp(v, lerp(u, grad(permutation[AA ], x , y , z ), // AND ADD
grad(permutation[BA ], x-1, y , z )), // BLENDED
lerp(u, grad(permutation[AB ], x , y-1, z ), // RESULTS
grad(permutation[BB ], x-1, y-1, z ))),// FROM 8
lerp(v, lerp(u, grad(permutation[AA+1], x , y , z-1 ), // CORNERS
grad(permutation[BA+1], x-1, y , z-1 )), // OF CUBE
lerp(u, grad(permutation[AB+1], x , y-1, z-1 ),
grad(permutation[BB+1], x-1, y-1, z-1 ))));
}
__global__ void PerlinKernel(Vector3<float>* points, const int* permu, const size_t size, const int octaves, const float persistence, const float divided) {
__shared__ int permutation[512];
unsigned int pos = blockIdx.x * blockDim.x + threadIdx.x;
if (threadIdx.x < 512) permutation[threadIdx.x] = permu[threadIdx.x];
__syncthreads();
float addedtotal = 0;
float frequency = 1;
float amplitude = 1;
float maxValue = 0;
float x = points[pos].x;
float y = points[pos].y;
float z = points[pos].z;
if (pos < size){
for(int i=0;i<octaves;i++) {
addedtotal += noise(x * frequency, y * frequency, z * frequency, permutation) * amplitude;
maxValue += amplitude;
amplitude *= persistence;
frequency *= 2;
}
points[pos].y += addedtotal/maxValue / divided;
}
}
__host__ void randomize(OutputObject Out) {
size_t objectCount = Out.otherObjectCount + Out.streetCount;
// cout<<"now in CUDA, with "<<objectCount<<" objects\n" << std::flush;
Vector3<float> **host_vertex = new Vector3<float>*[objectCount];
Vector3<float> **device_vertex = new Vector3<float>*[objectCount];
cudaStream_t stream[objectCount];
// cout<<"allocate done\n" << std::flush;
int *permu;
// cudaError_t code;
cudaMallocManaged(&permu, 512*sizeof(int));
// cout<<cudaGetErrorString(code);
// cout<<Permutation[10]<<"\n"<< std::flush;
// cout<<"stg"<< std::flush;
for (int i = 0; i < 512; i++) {
permu[i] = Permutation[i];
// cout<<permu[i]<<"\n"<< std::flush;
}
// cout<<permu[2]<<" "<<permu[511]<<"\n"<< std::flush;
// cout<<Out.objects[0].vertice_count<<"\n"<< std::flush;
for(size_t i = 0; i < objectCount; ++i) {
cudaStreamCreate(&stream[i]);
cudaHostAlloc(&host_vertex[i], Out.objects[i].vertice_count * sizeof(Vector3<float>), cudaHostAllocDefault);
cudaMalloc(&device_vertex[i], Out.objects[i].vertice_count * sizeof(Vector3<float>));
for (size_t j = 0; j < Out.objects[i].vertice_count; j++){
host_vertex[i][j] = Out.objects[i].vertices[j];
}
}
// cout<<host_vertex[0][2]<<" "<<host_vertex[0][9999]<<"\n"<< std::flush;;
for (size_t i = 0; i < objectCount; ++i) {
cudaMemcpyAsync( device_vertex[i], host_vertex[i], Out.objects[i].vertice_count * sizeof(Vector3<float>), cudaMemcpyHostToDevice, stream[i]);
unsigned int blockNum = (Out.objects[i].vertice_count + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK;
int octaves = i < Out.streetCount? 4 : 2;
float persistence = i < Out.streetCount? 0.75 : 0.5;
float divided = i < Out.streetCount? 20 : 40;
PerlinKernel<<<blockNum, THREAD_PER_BLOCK, 0, stream[i]>>>(device_vertex[i],permu, Out.objects[i].vertice_count, octaves, persistence, divided);
cudaMemcpyAsync( host_vertex[i], device_vertex[i], Out.objects[i].vertice_count * sizeof(Vector3<float>), cudaMemcpyDeviceToHost, stream[i]);
}
for(size_t i = 0; i < objectCount; ++i) cudaStreamSynchronize( stream[i]);
// cout<<host_vertex[0][2]<<" "<<host_vertex[0][9999]<<"\n"<< std::flush;;
for(size_t i = 0; i < objectCount; ++i) {
for (size_t j = 0; j < Out.objects[i].vertice_count; j++){
Out.objects[i].vertices[j] = host_vertex[i][j];
}
cudaStreamDestroy(stream[i]);
cudaFreeHost(host_vertex[i]);
cudaFree(device_vertex[i]);
}
} |
da5ec023dbff6c369b0923d9af9ef929bdf2fea3.hip | // !!! This is a file automatically generated by hipify!!!
// ******************************
//
// compile with nvcc
// - remember to link against the relevant libraries
//
// for API documentation: see docs.nvidia.com
//
// ******************************
#include <stdio.h>
#include "hipfft.h"
#include "rocblas.h"
#include "hip/hip_runtime.h"
#include "hip/hip_runtime_api.h"
#ifdef DOUBLE
#define Complex hipfftDoubleComplex
#define Real double
#define Transform HIPFFT_Z2Z
#define TransformExec hipfftExecZ2Z
#else
#define Complex hipfftComplex
#define Real float
#define Transform HIPFFT_C2C
#define TransformExec hipfftExecC2C
#endif
#define NX 5
#define NY 5
#define NZ 4
#define BATCH 1
#define NRANK 3
// initialize data on the CPU
void initData(Complex* h_data) {
int i, j, k, b;
for(b=0; b<BATCH; ++b)
for(k=0; k<NZ; ++k)
for(j=0; j<NY; ++j)
for(i=0; i<NX; ++i){
h_data[b * NX * NY * NZ + k*NX*NY + j*NX + i].x = 42.;
h_data[b * NX * NY * NZ + k*NX*NY + j*NX + i].y = 0.;
}
}
// get results back from the device and print it out
void reportGPUData(Complex *h_data, Complex* d_data) {
int i, j, k;
hipMemcpy(h_data, d_data, sizeof(Complex)*NX*NY*NZ,
hipMemcpyDeviceToHost);
for(k=0;k<NZ; ++k)
for(j=0; j<NY; ++j)
for(i=0; i<NX; ++i){
int ind= k * NX * NY + j * NX + i;
printf("data[%d] = (%g , %g)\n", ind, h_data[ind].x, h_data[ind].y);
}
}
int main(int argc, char** argv)
{
hipfftHandle plan;
Complex *h_data;
Complex *d_data;
Complex *d_result;
hipSetDevice(0);
// *******************************
// Exercise 3: get the name of the device we are running on
// *******************************
// initialize data and transfer to GPU
h_data = (Complex*) malloc(sizeof(Complex)*NX*NY*NZ*BATCH);
initData(h_data);
hipMalloc((void**)&d_data, sizeof(Complex)*NX*NY*NZ*BATCH);
if( hipGetLastError() != hipSuccess)
printf("d_data allocate error\n");
hipMalloc((void**)&d_result, sizeof(Complex)*NX*NY*NZ*BATCH);
if( hipGetLastError() != hipSuccess)
printf("d_result allocate error\n");
hipMemcpy(d_data, h_data, sizeof(Complex)*NX*NY*NZ*BATCH,
hipMemcpyHostToDevice);
if( hipGetLastError() != hipSuccess)
printf("transfer error\n");
// *******************************
// Exercise 1: create plan for the FFT
// *******************************
// *******************************
// Exercise 3: set up timers
// *******************************
// *******************************
// Exercise 1: Perform the transform
// *******************************
// *******************************
// Exercise 3: report the time
// *******************************
// report result
reportGPUData(h_data, d_result);
// *******************************
// Exercise 2: use cublas to report norm
// *******************************
// *******************************
// Exercise 1, 2, 3: cleanup
// *******************************
}
| da5ec023dbff6c369b0923d9af9ef929bdf2fea3.cu | // ******************************
//
// compile with nvcc
// - remember to link against the relevant libraries
//
// for API documentation: see docs.nvidia.com
//
// ******************************
#include <stdio.h>
#include "cufft.h"
#include "cublas.h"
#include "cuda.h"
#include "cuda_runtime_api.h"
#ifdef DOUBLE
#define Complex cufftDoubleComplex
#define Real double
#define Transform CUFFT_Z2Z
#define TransformExec cufftExecZ2Z
#else
#define Complex cufftComplex
#define Real float
#define Transform CUFFT_C2C
#define TransformExec cufftExecC2C
#endif
#define NX 5
#define NY 5
#define NZ 4
#define BATCH 1
#define NRANK 3
// initialize data on the CPU
void initData(Complex* h_data) {
int i, j, k, b;
for(b=0; b<BATCH; ++b)
for(k=0; k<NZ; ++k)
for(j=0; j<NY; ++j)
for(i=0; i<NX; ++i){
h_data[b * NX * NY * NZ + k*NX*NY + j*NX + i].x = 42.;
h_data[b * NX * NY * NZ + k*NX*NY + j*NX + i].y = 0.;
}
}
// get results back from the device and print it out
void reportGPUData(Complex *h_data, Complex* d_data) {
int i, j, k;
cudaMemcpy(h_data, d_data, sizeof(Complex)*NX*NY*NZ,
cudaMemcpyDeviceToHost);
for(k=0;k<NZ; ++k)
for(j=0; j<NY; ++j)
for(i=0; i<NX; ++i){
int ind= k * NX * NY + j * NX + i;
printf("data[%d] = (%g , %g)\n", ind, h_data[ind].x, h_data[ind].y);
}
}
int main(int argc, char** argv)
{
cufftHandle plan;
Complex *h_data;
Complex *d_data;
Complex *d_result;
cudaSetDevice(0);
// *******************************
// Exercise 3: get the name of the device we are running on
// *******************************
// initialize data and transfer to GPU
h_data = (Complex*) malloc(sizeof(Complex)*NX*NY*NZ*BATCH);
initData(h_data);
cudaMalloc((void**)&d_data, sizeof(Complex)*NX*NY*NZ*BATCH);
if( cudaGetLastError() != cudaSuccess)
printf("d_data allocate error\n");
cudaMalloc((void**)&d_result, sizeof(Complex)*NX*NY*NZ*BATCH);
if( cudaGetLastError() != cudaSuccess)
printf("d_result allocate error\n");
cudaMemcpy(d_data, h_data, sizeof(Complex)*NX*NY*NZ*BATCH,
cudaMemcpyHostToDevice);
if( cudaGetLastError() != cudaSuccess)
printf("transfer error\n");
// *******************************
// Exercise 1: create plan for the FFT
// *******************************
// *******************************
// Exercise 3: set up timers
// *******************************
// *******************************
// Exercise 1: Perform the transform
// *******************************
// *******************************
// Exercise 3: report the time
// *******************************
// report result
reportGPUData(h_data, d_result);
// *******************************
// Exercise 2: use cublas to report norm
// *******************************
// *******************************
// Exercise 1, 2, 3: cleanup
// *******************************
}
|
0900fef77673c7f7ba20d32a5b6e768c5a5ec346.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define BOOST_THREAD_VERSION 4
#include <boost/thread/future.hpp>
#include <boost/thread.hpp>
#include <cassert>
#include <cstdlib>
#include <iostream>
#include <CUDA/HPP/InitData.hpp>
#include <CUDA/HPP/sumArraysOnDevice.hpp>
typedef float value_type;
const int N = 8;
std::size_t NBytes = N * sizeof(value_type);
void SpawnKernel(hipStream_t stream = nullptr) {
std::cout << boost::this_thread::get_id() << "\n";
value_type* h_a;
value_type* h_b;
value_type* h_c;
h_a = (value_type*)malloc(NBytes);
h_b = (value_type*)malloc(NBytes);
h_c = (value_type*)malloc(NBytes);
InitData(h_a, N);
InitData(h_b, N);
value_type* d_a;
value_type* d_b;
value_type* d_c;
hipMalloc(&d_a, NBytes);
hipMalloc(&d_b, NBytes);
hipMalloc(&d_c, NBytes);
hipMemcpy(d_a, h_a, NBytes, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, NBytes, hipMemcpyHostToDevice);
hipMemcpy(d_c, h_c, NBytes, hipMemcpyHostToDevice);
dim3 Block(N);
dim3 Grid((N + Block.x - 1) / Block.x);
if (stream) {
hipMemcpyAsync(d_a, h_a, NBytes, hipMemcpyHostToDevice, stream);
hipMemcpyAsync(d_b, h_b, NBytes, hipMemcpyHostToDevice, stream);
hipMemcpyAsync(d_c, h_c, NBytes, hipMemcpyHostToDevice, stream);
hipLaunchKernelGGL(( sumArraysOnDevice), dim3(Grid), dim3(Block), 0, stream,
d_a, d_b, d_c, N);
hipMemcpyAsync(h_c, d_c, NBytes, hipMemcpyDeviceToHost, stream);
} else {
hipMemcpy(d_a, h_a, NBytes, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, NBytes, hipMemcpyHostToDevice);
hipMemcpy(d_c, h_c, NBytes, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( sumArraysOnDevice), dim3(Grid), dim3(Block), 0, 0, d_a, d_b, d_c, N);
hipDeviceSynchronize();
}
free(h_a);
free(h_b);
free(h_c);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
}
void DoStream() {
hipStream_t s[N];
value_type* Data[N];
for (int i = 0; i < N; ++i) {
hipStreamCreate(&s[i]);
hipMalloc(&Data[i], NBytes);
SpawnKernel(s[i]);
}
for (int i = 0; i < N; ++i) {
hipStreamSynchronize(s[i]);
hipStreamDestroy(s[i]);
}
}
void DoFuture() {
boost::future<void> f[N];
for (auto& f_ : f) {
boost::packaged_task<void(hipStream_t)> t(SpawnKernel);
f_ = t.get_future();
boost::thread(boost::move(t), nullptr).detach();
}
for (auto& f_ : f) {
f_.get();
assert(f_.is_ready());
assert(f_.has_value());
assert(!f_.has_exception());
assert(f_.get_state() == boost::future_state::ready);
}
}
void DoAsync() {
boost::future<void> f[N];
for (int i = 0; i < N; ++i) {
f[i] = boost::async(boost::launch::async, []() mutable {
SpawnKernel(nullptr);
});
}
for (auto& f_ : f) {
f_.get();
assert(f_.is_ready());
assert(f_.has_value());
assert(!f_.has_exception());
assert(f_.get_state() == boost::future_state::ready);
}
}
void Dummy() {
std::cout << boost::this_thread::get_id() << "\n";
}
void Job1(boost::future<void> f) {
std::cout << __func__ << ": Start!" << "\n";
assert(f.valid());
f.get();
assert(!f.valid());
SpawnKernel(nullptr);
std::cout << __func__ << ": Done!" << "\n";
}
void Job2(boost::future<void> f) {
std::cout << __func__ << ": Start!" << "\n";
assert(f.valid());
f.get();
assert(!f.valid());
SpawnKernel(nullptr);
std::cout << __func__ << ": Done!" << "\n";
}
void Job3(boost::future<void> f) {
std::cout << __func__ << ": Start!" << "\n";
SpawnKernel(nullptr);
assert(f.valid());
f.get();
assert(!f.valid());
std::cout << __func__ << ": Done!" << "\n";
}
void Job4(boost::future<void> f) {
std::cout << __func__ << ": Start!" << "\n";
SpawnKernel(nullptr);
assert(f.valid());
f.get();
assert(!f.valid());
std::cout << __func__ << ": Done!" << "\n";
}
void Job5(boost::future<void> f) {
std::cout << __func__ << ": Start!" << "\n";
SpawnKernel(nullptr);
assert(f.valid());
f.get();
assert(!f.valid());
std::cout << __func__ << ": Done!" << "\n";
}
void DoContinuation() {
boost::future<void> f1 = boost::async(boost::launch::async, &Dummy);
assert(f1.valid());
boost::future<void> f2 =
f1.then(boost::launch::async, &Job1).
then(boost::launch::async, &Job3).
then(boost::launch::async, &Job5).
then(boost::launch::async, &Job2).
then(boost::launch::async, &Job4);
assert(f2.valid());
assert(!f1.valid());
f2.get();
assert(!f2.valid());
}
auto main() -> decltype(0) {
// DoStream();
// DoFuture();
// DoAsync();
// DoContinuation();
return 0;
}
| 0900fef77673c7f7ba20d32a5b6e768c5a5ec346.cu | #define BOOST_THREAD_VERSION 4
#include <boost/thread/future.hpp>
#include <boost/thread.hpp>
#include <cassert>
#include <cstdlib>
#include <iostream>
#include <CUDA/HPP/InitData.hpp>
#include <CUDA/HPP/sumArraysOnDevice.hpp>
typedef float value_type;
const int N = 8;
std::size_t NBytes = N * sizeof(value_type);
void SpawnKernel(cudaStream_t stream = nullptr) {
std::cout << boost::this_thread::get_id() << "\n";
value_type* h_a;
value_type* h_b;
value_type* h_c;
h_a = (value_type*)malloc(NBytes);
h_b = (value_type*)malloc(NBytes);
h_c = (value_type*)malloc(NBytes);
InitData(h_a, N);
InitData(h_b, N);
value_type* d_a;
value_type* d_b;
value_type* d_c;
cudaMalloc(&d_a, NBytes);
cudaMalloc(&d_b, NBytes);
cudaMalloc(&d_c, NBytes);
cudaMemcpy(d_a, h_a, NBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, NBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_c, h_c, NBytes, cudaMemcpyHostToDevice);
dim3 Block(N);
dim3 Grid((N + Block.x - 1) / Block.x);
if (stream) {
cudaMemcpyAsync(d_a, h_a, NBytes, cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(d_b, h_b, NBytes, cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(d_c, h_c, NBytes, cudaMemcpyHostToDevice, stream);
sumArraysOnDevice<<<Grid, Block, 0, stream>>>(
d_a, d_b, d_c, N);
cudaMemcpyAsync(h_c, d_c, NBytes, cudaMemcpyDeviceToHost, stream);
} else {
cudaMemcpy(d_a, h_a, NBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, NBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_c, h_c, NBytes, cudaMemcpyHostToDevice);
sumArraysOnDevice<<<Grid, Block>>>(d_a, d_b, d_c, N);
cudaDeviceSynchronize();
}
free(h_a);
free(h_b);
free(h_c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
}
void DoStream() {
cudaStream_t s[N];
value_type* Data[N];
for (int i = 0; i < N; ++i) {
cudaStreamCreate(&s[i]);
cudaMalloc(&Data[i], NBytes);
SpawnKernel(s[i]);
}
for (int i = 0; i < N; ++i) {
cudaStreamSynchronize(s[i]);
cudaStreamDestroy(s[i]);
}
}
void DoFuture() {
boost::future<void> f[N];
for (auto& f_ : f) {
boost::packaged_task<void(cudaStream_t)> t(SpawnKernel);
f_ = t.get_future();
boost::thread(boost::move(t), nullptr).detach();
}
for (auto& f_ : f) {
f_.get();
assert(f_.is_ready());
assert(f_.has_value());
assert(!f_.has_exception());
assert(f_.get_state() == boost::future_state::ready);
}
}
void DoAsync() {
boost::future<void> f[N];
for (int i = 0; i < N; ++i) {
f[i] = boost::async(boost::launch::async, []() mutable {
SpawnKernel(nullptr);
});
}
for (auto& f_ : f) {
f_.get();
assert(f_.is_ready());
assert(f_.has_value());
assert(!f_.has_exception());
assert(f_.get_state() == boost::future_state::ready);
}
}
void Dummy() {
std::cout << boost::this_thread::get_id() << "\n";
}
void Job1(boost::future<void> f) {
std::cout << __func__ << ": Start!" << "\n";
assert(f.valid());
f.get();
assert(!f.valid());
SpawnKernel(nullptr);
std::cout << __func__ << ": Done!" << "\n";
}
void Job2(boost::future<void> f) {
std::cout << __func__ << ": Start!" << "\n";
assert(f.valid());
f.get();
assert(!f.valid());
SpawnKernel(nullptr);
std::cout << __func__ << ": Done!" << "\n";
}
void Job3(boost::future<void> f) {
std::cout << __func__ << ": Start!" << "\n";
SpawnKernel(nullptr);
assert(f.valid());
f.get();
assert(!f.valid());
std::cout << __func__ << ": Done!" << "\n";
}
void Job4(boost::future<void> f) {
std::cout << __func__ << ": Start!" << "\n";
SpawnKernel(nullptr);
assert(f.valid());
f.get();
assert(!f.valid());
std::cout << __func__ << ": Done!" << "\n";
}
void Job5(boost::future<void> f) {
std::cout << __func__ << ": Start!" << "\n";
SpawnKernel(nullptr);
assert(f.valid());
f.get();
assert(!f.valid());
std::cout << __func__ << ": Done!" << "\n";
}
void DoContinuation() {
boost::future<void> f1 = boost::async(boost::launch::async, &Dummy);
assert(f1.valid());
boost::future<void> f2 =
f1.then(boost::launch::async, &Job1).
then(boost::launch::async, &Job3).
then(boost::launch::async, &Job5).
then(boost::launch::async, &Job2).
then(boost::launch::async, &Job4);
assert(f2.valid());
assert(!f1.valid());
f2.get();
assert(!f2.valid());
}
auto main() -> decltype(0) {
// DoStream();
// DoFuture();
// DoAsync();
// DoContinuation();
return 0;
}
|
8945afb43b4679e0b77f5e58129e9ad5a1e80dba.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../common/common.h"
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
/*
* The crypt application implements IDEA encryption and decryption of a single
* input file using the secret key provided.
*/
// Chunking size for IDEA, in bytes
#define CHUNK_SIZE 8
// Length of the encryption/decryption keys, in bytes
#define KEY_LENGTH 52
#define BLOCK_SIZE_IN_CHUNKS 1024000
// Length of the secret key, in bytes
#define USERKEY_LENGTH 8
#define BITS_PER_BYTE 8
typedef struct _device_context
{
signed char *dPlain, *dCrypt;
hipStream_t *streams;
int nBlocks;
} device_context;
typedef enum { ENCRYPT, DECRYPT } action;
__constant__ int dkey[KEY_LENGTH];
/*
* doCrypt implements the core logic of IDEA. It iterates over the byte
* chunks stored in plainList and outputs their encrypted/decrypted form to the
* corresponding element in cryptList using the secret key provided.
*/
__host__ __device__ void doCrypt(int chunk, signed char *plain,
signed char *crypt, int *key)
{
long x1, x2, x3, x4, t1, t2, ik, r;
x1 = (((unsigned int)plain[chunk * CHUNK_SIZE]) & 0xff);
x1 |= ((((unsigned int)plain[chunk * CHUNK_SIZE + 1]) & 0xff) <<
BITS_PER_BYTE);
x2 = (((unsigned int)plain[chunk * CHUNK_SIZE + 2]) & 0xff);
x2 |= ((((unsigned int)plain[chunk * CHUNK_SIZE + 3]) & 0xff) <<
BITS_PER_BYTE);
x3 = (((unsigned int)plain[chunk * CHUNK_SIZE + 4]) & 0xff);
x3 |= ((((unsigned int)plain[chunk * CHUNK_SIZE + 5]) & 0xff) <<
BITS_PER_BYTE);
x4 = (((unsigned int)plain[chunk * CHUNK_SIZE + 6]) & 0xff);
x4 |= ((((unsigned int)plain[chunk * CHUNK_SIZE + 7]) & 0xff) <<
BITS_PER_BYTE);
ik = 0;
r = CHUNK_SIZE;
do
{
x1 = (int)((((long)x1 * key[ik++]) % 0x10001L) & 0xffff);
x2 = ((x2 + key[ik++]) & 0xffff);
x3 = ((x3 + key[ik++]) & 0xffff);
x4 = (int)((((long)x4 * key[ik++]) % 0x10001L) & 0xffff);
t2 = (x1 ^ x3);
t2 = (int)((((long)t2 * key[ik++]) % 0x10001L) & 0xffff);
t1 = ((t2 + (x2 ^ x4)) & 0xffff);
t1 = (int)((((long)t1 * key[ik++]) % 0x10001L) & 0xffff);
t2 = (t1 + t2 & 0xffff);
x1 = (x1 ^ t1);
x4 = (x4 ^ t2);
t2 = (t2 ^ x2);
x2 = (x3 ^ t1);
x3 = t2;
}
while(--r != 0);
x1 = (int)((((long)x1 * key[ik++]) % 0x10001L) & 0xffff);
x3 = ((x3 + key[ik++]) & 0xffff);
x2 = ((x2 + key[ik++]) & 0xffff);
x4 = (int)((((long)x4 * key[ik++]) % 0x10001L) & 0xffff);
crypt[chunk * CHUNK_SIZE] = (signed char) x1;
crypt[chunk * CHUNK_SIZE + 1] = (signed char) ((unsigned long)x1 >>
BITS_PER_BYTE);
crypt[chunk * CHUNK_SIZE + 2] = (signed char) x3;
crypt[chunk * CHUNK_SIZE + 3] = (signed char) ((unsigned long)x3 >>
BITS_PER_BYTE);
crypt[chunk * CHUNK_SIZE + 4] = (signed char) x2;
crypt[chunk * CHUNK_SIZE + 5] = (signed char) ((unsigned long)x2 >>
BITS_PER_BYTE);
crypt[chunk * CHUNK_SIZE + 6] = (signed char) x4;
crypt[chunk * CHUNK_SIZE + 7] = (signed char) ((unsigned long)x4 >>
BITS_PER_BYTE);
}
__global__ void d_encrypt_decrypt(signed char *plain, signed char *crypt,
int nChunks)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int nthreads = blockDim.x * gridDim.x;
for ( ; tid < nChunks; tid += nthreads)
{
doCrypt(tid, plain, crypt, dkey);
}
}
static void h_encrypt_decrypt(signed char *plain, signed char *crypt, int *key,
int plainLength)
{
int c;
int nChunks = plainLength / CHUNK_SIZE;
for (c = 0; c < nChunks; c++)
{
doCrypt(c, plain, crypt, key);
}
}
static void init_context(device_context *ctx, int plainLength)
{
signed char *dPlain, *dCrypt;
hipStream_t *streams;
int nBlocks, b;
if (plainLength % CHUNK_SIZE != 0)
{
fprintf(stderr, "Invalid encryption: length of plain must be an even "
"multiple of %d but is %d\n", CHUNK_SIZE, plainLength);
exit(-1);
}
CHECK(hipMalloc((void **)&dPlain,
plainLength * sizeof(signed char)));
CHECK(hipMalloc((void **)&dCrypt,
plainLength * sizeof(signed char)));
int nChunks = plainLength / CHUNK_SIZE;
nBlocks = (nChunks + BLOCK_SIZE_IN_CHUNKS - 1) / BLOCK_SIZE_IN_CHUNKS;
streams = (hipStream_t *)malloc(sizeof(hipStream_t) * nBlocks);
for (b = 0; b < nBlocks; b++)
{
CHECK(hipStreamCreate(streams + b));
}
ctx->dPlain = dPlain;
ctx->dCrypt = dCrypt;
ctx->streams = streams;
ctx->nBlocks = nBlocks;
}
static void encrypt_decrypt_driver(signed char *plain, signed char *crypt,
int *key,
int plainLength, int nThreadsPerBlock,
device_context *ctx)
{
int b;
hipDeviceProp_t info;
CHECK(hipGetDeviceProperties(&info, 0));
int nChunks = plainLength / CHUNK_SIZE;
int nThreadBlocks = (nChunks + nThreadsPerBlock - 1) / nThreadsPerBlock;
if (nThreadBlocks > info.maxGridSize[0])
{
nThreadBlocks = info.maxGridSize[0];
}
CHECK(hipMemcpyToSymbolAsync(dkey, key, KEY_LENGTH * sizeof(int), 0,
hipMemcpyHostToDevice, (ctx->streams)[0]));
CHECK(hipStreamSynchronize((ctx->streams)[0]));
for (b = 0; b < ctx->nBlocks; b++)
{
int blockOffset = b * BLOCK_SIZE_IN_CHUNKS * CHUNK_SIZE;
int localChunks = BLOCK_SIZE_IN_CHUNKS;
if (b * BLOCK_SIZE_IN_CHUNKS + localChunks > nChunks)
{
localChunks = nChunks - b * BLOCK_SIZE_IN_CHUNKS;
}
CHECK(hipMemcpyAsync(ctx->dPlain + blockOffset, plain + blockOffset,
localChunks * CHUNK_SIZE * sizeof(signed char),
hipMemcpyHostToDevice, (ctx->streams)[b]));
hipLaunchKernelGGL(( d_encrypt_decrypt), dim3(nThreadBlocks), dim3(nThreadsPerBlock), 0,
(ctx->streams)[b], ctx->dPlain + blockOffset,
ctx->dCrypt + blockOffset, localChunks);
CHECK(hipMemcpyAsync(crypt + blockOffset, ctx->dCrypt + blockOffset,
localChunks * CHUNK_SIZE * sizeof(signed char),
hipMemcpyDeviceToHost, (ctx->streams)[b]));
}
}
static void cleanup_context(device_context *ctx)
{
int b;
for (b = 0; b < ctx->nBlocks; b++)
{
CHECK(hipStreamDestroy(ctx->streams[b]));
}
free(ctx->streams);
CHECK(hipFree(ctx->dPlain));
CHECK(hipFree(ctx->dCrypt));
}
/*
* Get the length of a file on disk.
*/
static size_t getFileLength(FILE *fp)
{
fseek(fp, 0L, SEEK_END);
size_t fileLen = ftell(fp);
fseek(fp, 0L, SEEK_SET);
return (fileLen);
}
/*
* inv is used to generate the key used for decryption from the secret key.
*/
static int inv(int x)
{
int t0, t1;
int q, y;
if (x <= 1) // Assumes positive x.
return (x); // 0 and 1 are self-inverse.
t1 = 0x10001 / x; // (2**16+1)/x; x is >= 2, so fits 16 bits.
y = 0x10001 % x;
if (y == 1)
return ((1 - t1) & 0xffff);
t0 = 1;
do
{
q = x / y;
x = x % y;
t0 += q * t1;
if (x == 1) return (t0);
q = y / x;
y = y % x;
t1 += q * t0;
}
while (y != 1);
return ((1 - t1) & 0xffff);
}
/*
* Generate the key to be used for encryption, based on the user key read from
* disk.
*/
static int *generateEncryptKey(int16_t *userkey)
{
int i, j;
int *key;
CHECK(hipHostMalloc(&key, KEY_LENGTH * sizeof(int)));
memset(key, 0x00, sizeof(int) * KEY_LENGTH);
for (i = 0; i < CHUNK_SIZE; i++)
{
key[i] = (userkey[i] & 0xffff);
}
for (i = CHUNK_SIZE; i < KEY_LENGTH; i++)
{
j = i % CHUNK_SIZE;
if (j < 6)
{
key[i] = ((key[i - 7] >> 9) | (key[i - 6] << 7))
& 0xffff;
continue;
}
if (j == 6)
{
key[i] = ((key[i - 7] >> 9) | (key[i - 14] << 7))
& 0xffff;
continue;
}
key[i] = ((key[i - 15] >> 9) | (key[i - 14] << 7))
& 0xffff;
}
return (key);
}
/*
* Generate the key to be used for decryption, based on the user key read from
* disk.
*/
static int *generateDecryptKey(int16_t *userkey)
{
int *key;
int i, j, k;
int t1, t2, t3;
CHECK(hipHostMalloc(&key, KEY_LENGTH * sizeof(int)));
int *Z = generateEncryptKey(userkey);
t1 = inv(Z[0]);
t2 = - Z[1] & 0xffff;
t3 = - Z[2] & 0xffff;
key[51] = inv(Z[3]);
key[50] = t3;
key[49] = t2;
key[48] = t1;
j = 47;
k = 4;
for (i = 0; i < 7; i++)
{
t1 = Z[k++];
key[j--] = Z[k++];
key[j--] = t1;
t1 = inv(Z[k++]);
t2 = -Z[k++] & 0xffff;
t3 = -Z[k++] & 0xffff;
key[j--] = inv(Z[k++]);
key[j--] = t2;
key[j--] = t3;
key[j--] = t1;
}
t1 = Z[k++];
key[j--] = Z[k++];
key[j--] = t1;
t1 = inv(Z[k++]);
t2 = -Z[k++] & 0xffff;
t3 = -Z[k++] & 0xffff;
key[j--] = inv(Z[k++]);
key[j--] = t3;
key[j--] = t2;
key[j--] = t1;
CHECK(hipHostFree(Z));
return (key);
}
void readInputData(FILE *in, size_t textLen, signed char **text,
signed char **crypt)
{
CHECK(hipHostMalloc(text, textLen * sizeof(signed char)));
CHECK(hipHostMalloc(crypt, textLen * sizeof(signed char)));
if (fread(*text, sizeof(signed char), textLen, in) != textLen)
{
fprintf(stderr, "Failed reading text from input file\n");
exit(1);
}
}
void cleanup(signed char *text, signed char *crypt, int *key,
int16_t *userkey)
{
free(userkey);
CHECK(hipHostFree(key));
CHECK(hipHostFree(text));
CHECK(hipHostFree(crypt));
}
/*
* Initialize application state by reading inputs from the disk and
* pre-allocating memory. Hand off to encrypt_decrypt to perform the actualy
* encryption or decryption. Then, write the encrypted/decrypted results to
* disk.
*/
int main(int argc, char **argv)
{
FILE *in, *out, *keyfile;
signed char *text, *crypt;
size_t textLen, keyFileLength;
int16_t *userkey;
int *key;
action a;
hipEvent_t startEvent, finishEvent;
if (argc != 6)
{
printf("usage: %s <encrypt|decrypt> <file.in> <file.out> <key.file> "
"<threads-per-block>\n", argv[0]);
return (1);
}
// Are we encrypting or decrypting?
if (strncmp(argv[1], "encrypt", 7) == 0)
{
a = ENCRYPT;
}
else if (strncmp(argv[1], "decrypt", 7) == 0)
{
a = DECRYPT;
}
else
{
fprintf(stderr, "The action specified ('%s') is not valid. Must be "
"either 'encrypt' or 'decrypt'\n", argv[1]);
return (1);
}
// Input file
in = fopen(argv[2], "r");
if (in == NULL)
{
fprintf(stderr, "Unable to open %s for reading\n", argv[2]);
return (1);
}
// Output file
out = fopen(argv[3], "w");
if (out == NULL)
{
fprintf(stderr, "Unable to open %s for writing\n", argv[3]);
return (1);
}
// Key file
keyfile = fopen(argv[4], "r");
if (keyfile == NULL)
{
fprintf(stderr, "Unable to open key file %s for reading\n", argv[4]);
return (1);
}
int nThreadsPerBlock = atoi(argv[5]);
keyFileLength = getFileLength(keyfile);
if (keyFileLength != sizeof(*userkey) * USERKEY_LENGTH)
{
fprintf(stderr, "Invalid user key file length %lu, must be %lu\n",
keyFileLength, sizeof(*userkey) * USERKEY_LENGTH);
return (1);
}
userkey = (int16_t *)malloc(sizeof(int16_t) * USERKEY_LENGTH);
if (userkey == NULL)
{
fprintf(stderr, "Error allocating user key\n");
return (1);
}
if (fread(userkey, sizeof(*userkey), USERKEY_LENGTH, keyfile) !=
USERKEY_LENGTH)
{
fprintf(stderr, "Error reading user key\n");
return (1);
}
if (a == ENCRYPT)
{
key = generateEncryptKey(userkey);
}
else
{
key = generateDecryptKey(userkey);
}
textLen = getFileLength(in);
if (textLen % CHUNK_SIZE != 0)
{
fprintf(stderr, "Invalid input file length %lu, must be evenly "
"divisible by %d\n", textLen, CHUNK_SIZE);
return (1);
}
readInputData(in, textLen, &text, &crypt);
fclose(in);
int nDevices;
if (hipGetDeviceCount(&nDevices) == hipErrorNoDevice) {
// If no devices are found, run all computation on the CPU.
double overall_start = seconds();
h_encrypt_decrypt(text, crypt, key, textLen);
double overall_finish = seconds();
double overall_ms = 1000.0 * (overall_finish - overall_start);
printf("Processed %d bytes in %.3f s on CPU ( %.4f KB/ms )\n",
textLen, overall_ms,
((float)textLen / overall_ms) / 1024.0f);
}
else
{
int d;
int nTotalChunks = textLen / CHUNK_SIZE;
int chunksPerDevice = (nTotalChunks + nDevices - 1) / nDevices;
device_context *ctxs = (device_context *)malloc(nDevices *
sizeof(device_context));
for (d = 0; d < nDevices; d++)
{
CHECK(hipSetDevice(d));
int start = d * chunksPerDevice * CHUNK_SIZE;
int len = chunksPerDevice * CHUNK_SIZE;
if (start + len > textLen)
{
len = textLen - start;
}
init_context(ctxs + d, len);
}
CHECK(hipEventCreate(&startEvent));
CHECK(hipEventCreate(&finishEvent));
/*
* Iterate over each device, launching a subset of the total chunks at
* a time.
*/
double overall_start = seconds();
CHECK(hipEventRecord(startEvent));
for (d = 0; d < nDevices; d++)
{
CHECK(hipSetDevice(d));
int start = d * chunksPerDevice * CHUNK_SIZE;
int len = chunksPerDevice * CHUNK_SIZE;
if (start + len > textLen)
{
len = textLen - start;
}
encrypt_decrypt_driver(text + start, crypt + start, key, len,
nThreadsPerBlock, ctxs + d);
}
CHECK(hipEventRecord(finishEvent));
// Wait for each device to finish its work.
for (d = 0; d < nDevices; d++)
{
CHECK(hipSetDevice(d));
CHECK(hipDeviceSynchronize());
}
double overall_finish = seconds();
for (d = 0; d < nDevices; d++)
{
// Clean up any CUDA resource allocated for this device.
CHECK(hipSetDevice(d));
cleanup_context(ctxs + d);
}
float gpuElapsed;
CHECK(hipEventElapsedTime(&gpuElapsed, startEvent, finishEvent));
printf("Processed %d bytes in %.3f ms on GPUs ( %.4f KB/ms )\n",
textLen, gpuElapsed, ((float)textLen / gpuElapsed) / 1024.0f);
// Display the aggregate performance of all devices.
double overall_elapsed_ms = 1000.0 * (overall_finish - overall_start);
printf("In total, processed %d bytes in %.3f ms on %d devices\n",
textLen, overall_elapsed_ms, nDevices);
printf("Aggregate bandwith = %.4f KB/ms\n",
(float)(textLen / 1024) / overall_elapsed_ms);
free(ctxs);
}
if (fwrite(crypt, sizeof(signed char), textLen, out) != textLen)
{
fprintf(stderr, "Failed writing crypt to %s\n", argv[3]);
return (1);
}
fclose(out);
cleanup(text, crypt, key, userkey);
return (0);
}
| 8945afb43b4679e0b77f5e58129e9ad5a1e80dba.cu | #include "../common/common.h"
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
/*
* The crypt application implements IDEA encryption and decryption of a single
* input file using the secret key provided.
*/
// Chunking size for IDEA, in bytes
#define CHUNK_SIZE 8
// Length of the encryption/decryption keys, in bytes
#define KEY_LENGTH 52
#define BLOCK_SIZE_IN_CHUNKS 1024000
// Length of the secret key, in bytes
#define USERKEY_LENGTH 8
#define BITS_PER_BYTE 8
typedef struct _device_context
{
signed char *dPlain, *dCrypt;
cudaStream_t *streams;
int nBlocks;
} device_context;
typedef enum { ENCRYPT, DECRYPT } action;
__constant__ int dkey[KEY_LENGTH];
/*
* doCrypt implements the core logic of IDEA. It iterates over the byte
* chunks stored in plainList and outputs their encrypted/decrypted form to the
* corresponding element in cryptList using the secret key provided.
*/
__host__ __device__ void doCrypt(int chunk, signed char *plain,
signed char *crypt, int *key)
{
long x1, x2, x3, x4, t1, t2, ik, r;
x1 = (((unsigned int)plain[chunk * CHUNK_SIZE]) & 0xff);
x1 |= ((((unsigned int)plain[chunk * CHUNK_SIZE + 1]) & 0xff) <<
BITS_PER_BYTE);
x2 = (((unsigned int)plain[chunk * CHUNK_SIZE + 2]) & 0xff);
x2 |= ((((unsigned int)plain[chunk * CHUNK_SIZE + 3]) & 0xff) <<
BITS_PER_BYTE);
x3 = (((unsigned int)plain[chunk * CHUNK_SIZE + 4]) & 0xff);
x3 |= ((((unsigned int)plain[chunk * CHUNK_SIZE + 5]) & 0xff) <<
BITS_PER_BYTE);
x4 = (((unsigned int)plain[chunk * CHUNK_SIZE + 6]) & 0xff);
x4 |= ((((unsigned int)plain[chunk * CHUNK_SIZE + 7]) & 0xff) <<
BITS_PER_BYTE);
ik = 0;
r = CHUNK_SIZE;
do
{
x1 = (int)((((long)x1 * key[ik++]) % 0x10001L) & 0xffff);
x2 = ((x2 + key[ik++]) & 0xffff);
x3 = ((x3 + key[ik++]) & 0xffff);
x4 = (int)((((long)x4 * key[ik++]) % 0x10001L) & 0xffff);
t2 = (x1 ^ x3);
t2 = (int)((((long)t2 * key[ik++]) % 0x10001L) & 0xffff);
t1 = ((t2 + (x2 ^ x4)) & 0xffff);
t1 = (int)((((long)t1 * key[ik++]) % 0x10001L) & 0xffff);
t2 = (t1 + t2 & 0xffff);
x1 = (x1 ^ t1);
x4 = (x4 ^ t2);
t2 = (t2 ^ x2);
x2 = (x3 ^ t1);
x3 = t2;
}
while(--r != 0);
x1 = (int)((((long)x1 * key[ik++]) % 0x10001L) & 0xffff);
x3 = ((x3 + key[ik++]) & 0xffff);
x2 = ((x2 + key[ik++]) & 0xffff);
x4 = (int)((((long)x4 * key[ik++]) % 0x10001L) & 0xffff);
crypt[chunk * CHUNK_SIZE] = (signed char) x1;
crypt[chunk * CHUNK_SIZE + 1] = (signed char) ((unsigned long)x1 >>
BITS_PER_BYTE);
crypt[chunk * CHUNK_SIZE + 2] = (signed char) x3;
crypt[chunk * CHUNK_SIZE + 3] = (signed char) ((unsigned long)x3 >>
BITS_PER_BYTE);
crypt[chunk * CHUNK_SIZE + 4] = (signed char) x2;
crypt[chunk * CHUNK_SIZE + 5] = (signed char) ((unsigned long)x2 >>
BITS_PER_BYTE);
crypt[chunk * CHUNK_SIZE + 6] = (signed char) x4;
crypt[chunk * CHUNK_SIZE + 7] = (signed char) ((unsigned long)x4 >>
BITS_PER_BYTE);
}
__global__ void d_encrypt_decrypt(signed char *plain, signed char *crypt,
int nChunks)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int nthreads = blockDim.x * gridDim.x;
for ( ; tid < nChunks; tid += nthreads)
{
doCrypt(tid, plain, crypt, dkey);
}
}
static void h_encrypt_decrypt(signed char *plain, signed char *crypt, int *key,
int plainLength)
{
int c;
int nChunks = plainLength / CHUNK_SIZE;
for (c = 0; c < nChunks; c++)
{
doCrypt(c, plain, crypt, key);
}
}
static void init_context(device_context *ctx, int plainLength)
{
signed char *dPlain, *dCrypt;
cudaStream_t *streams;
int nBlocks, b;
if (plainLength % CHUNK_SIZE != 0)
{
fprintf(stderr, "Invalid encryption: length of plain must be an even "
"multiple of %d but is %d\n", CHUNK_SIZE, plainLength);
exit(-1);
}
CHECK(cudaMalloc((void **)&dPlain,
plainLength * sizeof(signed char)));
CHECK(cudaMalloc((void **)&dCrypt,
plainLength * sizeof(signed char)));
int nChunks = plainLength / CHUNK_SIZE;
nBlocks = (nChunks + BLOCK_SIZE_IN_CHUNKS - 1) / BLOCK_SIZE_IN_CHUNKS;
streams = (cudaStream_t *)malloc(sizeof(cudaStream_t) * nBlocks);
for (b = 0; b < nBlocks; b++)
{
CHECK(cudaStreamCreate(streams + b));
}
ctx->dPlain = dPlain;
ctx->dCrypt = dCrypt;
ctx->streams = streams;
ctx->nBlocks = nBlocks;
}
static void encrypt_decrypt_driver(signed char *plain, signed char *crypt,
int *key,
int plainLength, int nThreadsPerBlock,
device_context *ctx)
{
int b;
cudaDeviceProp info;
CHECK(cudaGetDeviceProperties(&info, 0));
int nChunks = plainLength / CHUNK_SIZE;
int nThreadBlocks = (nChunks + nThreadsPerBlock - 1) / nThreadsPerBlock;
if (nThreadBlocks > info.maxGridSize[0])
{
nThreadBlocks = info.maxGridSize[0];
}
CHECK(cudaMemcpyToSymbolAsync(dkey, key, KEY_LENGTH * sizeof(int), 0,
cudaMemcpyHostToDevice, (ctx->streams)[0]));
CHECK(cudaStreamSynchronize((ctx->streams)[0]));
for (b = 0; b < ctx->nBlocks; b++)
{
int blockOffset = b * BLOCK_SIZE_IN_CHUNKS * CHUNK_SIZE;
int localChunks = BLOCK_SIZE_IN_CHUNKS;
if (b * BLOCK_SIZE_IN_CHUNKS + localChunks > nChunks)
{
localChunks = nChunks - b * BLOCK_SIZE_IN_CHUNKS;
}
CHECK(cudaMemcpyAsync(ctx->dPlain + blockOffset, plain + blockOffset,
localChunks * CHUNK_SIZE * sizeof(signed char),
cudaMemcpyHostToDevice, (ctx->streams)[b]));
d_encrypt_decrypt<<<nThreadBlocks, nThreadsPerBlock, 0,
(ctx->streams)[b]>>>(ctx->dPlain + blockOffset,
ctx->dCrypt + blockOffset, localChunks);
CHECK(cudaMemcpyAsync(crypt + blockOffset, ctx->dCrypt + blockOffset,
localChunks * CHUNK_SIZE * sizeof(signed char),
cudaMemcpyDeviceToHost, (ctx->streams)[b]));
}
}
static void cleanup_context(device_context *ctx)
{
int b;
for (b = 0; b < ctx->nBlocks; b++)
{
CHECK(cudaStreamDestroy(ctx->streams[b]));
}
free(ctx->streams);
CHECK(cudaFree(ctx->dPlain));
CHECK(cudaFree(ctx->dCrypt));
}
/*
* Get the length of a file on disk.
*/
static size_t getFileLength(FILE *fp)
{
fseek(fp, 0L, SEEK_END);
size_t fileLen = ftell(fp);
fseek(fp, 0L, SEEK_SET);
return (fileLen);
}
/*
* inv is used to generate the key used for decryption from the secret key.
*/
static int inv(int x)
{
int t0, t1;
int q, y;
if (x <= 1) // Assumes positive x.
return (x); // 0 and 1 are self-inverse.
t1 = 0x10001 / x; // (2**16+1)/x; x is >= 2, so fits 16 bits.
y = 0x10001 % x;
if (y == 1)
return ((1 - t1) & 0xffff);
t0 = 1;
do
{
q = x / y;
x = x % y;
t0 += q * t1;
if (x == 1) return (t0);
q = y / x;
y = y % x;
t1 += q * t0;
}
while (y != 1);
return ((1 - t1) & 0xffff);
}
/*
* Generate the key to be used for encryption, based on the user key read from
* disk.
*/
static int *generateEncryptKey(int16_t *userkey)
{
int i, j;
int *key;
CHECK(cudaMallocHost(&key, KEY_LENGTH * sizeof(int)));
memset(key, 0x00, sizeof(int) * KEY_LENGTH);
for (i = 0; i < CHUNK_SIZE; i++)
{
key[i] = (userkey[i] & 0xffff);
}
for (i = CHUNK_SIZE; i < KEY_LENGTH; i++)
{
j = i % CHUNK_SIZE;
if (j < 6)
{
key[i] = ((key[i - 7] >> 9) | (key[i - 6] << 7))
& 0xffff;
continue;
}
if (j == 6)
{
key[i] = ((key[i - 7] >> 9) | (key[i - 14] << 7))
& 0xffff;
continue;
}
key[i] = ((key[i - 15] >> 9) | (key[i - 14] << 7))
& 0xffff;
}
return (key);
}
/*
* Generate the key to be used for decryption, based on the user key read from
* disk.
*/
static int *generateDecryptKey(int16_t *userkey)
{
int *key;
int i, j, k;
int t1, t2, t3;
CHECK(cudaMallocHost(&key, KEY_LENGTH * sizeof(int)));
int *Z = generateEncryptKey(userkey);
t1 = inv(Z[0]);
t2 = - Z[1] & 0xffff;
t3 = - Z[2] & 0xffff;
key[51] = inv(Z[3]);
key[50] = t3;
key[49] = t2;
key[48] = t1;
j = 47;
k = 4;
for (i = 0; i < 7; i++)
{
t1 = Z[k++];
key[j--] = Z[k++];
key[j--] = t1;
t1 = inv(Z[k++]);
t2 = -Z[k++] & 0xffff;
t3 = -Z[k++] & 0xffff;
key[j--] = inv(Z[k++]);
key[j--] = t2;
key[j--] = t3;
key[j--] = t1;
}
t1 = Z[k++];
key[j--] = Z[k++];
key[j--] = t1;
t1 = inv(Z[k++]);
t2 = -Z[k++] & 0xffff;
t3 = -Z[k++] & 0xffff;
key[j--] = inv(Z[k++]);
key[j--] = t3;
key[j--] = t2;
key[j--] = t1;
CHECK(cudaFreeHost(Z));
return (key);
}
void readInputData(FILE *in, size_t textLen, signed char **text,
signed char **crypt)
{
CHECK(cudaMallocHost(text, textLen * sizeof(signed char)));
CHECK(cudaMallocHost(crypt, textLen * sizeof(signed char)));
if (fread(*text, sizeof(signed char), textLen, in) != textLen)
{
fprintf(stderr, "Failed reading text from input file\n");
exit(1);
}
}
void cleanup(signed char *text, signed char *crypt, int *key,
int16_t *userkey)
{
free(userkey);
CHECK(cudaFreeHost(key));
CHECK(cudaFreeHost(text));
CHECK(cudaFreeHost(crypt));
}
/*
* Initialize application state by reading inputs from the disk and
* pre-allocating memory. Hand off to encrypt_decrypt to perform the actualy
* encryption or decryption. Then, write the encrypted/decrypted results to
* disk.
*/
int main(int argc, char **argv)
{
FILE *in, *out, *keyfile;
signed char *text, *crypt;
size_t textLen, keyFileLength;
int16_t *userkey;
int *key;
action a;
cudaEvent_t startEvent, finishEvent;
if (argc != 6)
{
printf("usage: %s <encrypt|decrypt> <file.in> <file.out> <key.file> "
"<threads-per-block>\n", argv[0]);
return (1);
}
// Are we encrypting or decrypting?
if (strncmp(argv[1], "encrypt", 7) == 0)
{
a = ENCRYPT;
}
else if (strncmp(argv[1], "decrypt", 7) == 0)
{
a = DECRYPT;
}
else
{
fprintf(stderr, "The action specified ('%s') is not valid. Must be "
"either 'encrypt' or 'decrypt'\n", argv[1]);
return (1);
}
// Input file
in = fopen(argv[2], "r");
if (in == NULL)
{
fprintf(stderr, "Unable to open %s for reading\n", argv[2]);
return (1);
}
// Output file
out = fopen(argv[3], "w");
if (out == NULL)
{
fprintf(stderr, "Unable to open %s for writing\n", argv[3]);
return (1);
}
// Key file
keyfile = fopen(argv[4], "r");
if (keyfile == NULL)
{
fprintf(stderr, "Unable to open key file %s for reading\n", argv[4]);
return (1);
}
int nThreadsPerBlock = atoi(argv[5]);
keyFileLength = getFileLength(keyfile);
if (keyFileLength != sizeof(*userkey) * USERKEY_LENGTH)
{
fprintf(stderr, "Invalid user key file length %lu, must be %lu\n",
keyFileLength, sizeof(*userkey) * USERKEY_LENGTH);
return (1);
}
userkey = (int16_t *)malloc(sizeof(int16_t) * USERKEY_LENGTH);
if (userkey == NULL)
{
fprintf(stderr, "Error allocating user key\n");
return (1);
}
if (fread(userkey, sizeof(*userkey), USERKEY_LENGTH, keyfile) !=
USERKEY_LENGTH)
{
fprintf(stderr, "Error reading user key\n");
return (1);
}
if (a == ENCRYPT)
{
key = generateEncryptKey(userkey);
}
else
{
key = generateDecryptKey(userkey);
}
textLen = getFileLength(in);
if (textLen % CHUNK_SIZE != 0)
{
fprintf(stderr, "Invalid input file length %lu, must be evenly "
"divisible by %d\n", textLen, CHUNK_SIZE);
return (1);
}
readInputData(in, textLen, &text, &crypt);
fclose(in);
int nDevices;
if (cudaGetDeviceCount(&nDevices) == cudaErrorNoDevice) {
// If no devices are found, run all computation on the CPU.
double overall_start = seconds();
h_encrypt_decrypt(text, crypt, key, textLen);
double overall_finish = seconds();
double overall_ms = 1000.0 * (overall_finish - overall_start);
printf("Processed %d bytes in %.3f s on CPU ( %.4f KB/ms )\n",
textLen, overall_ms,
((float)textLen / overall_ms) / 1024.0f);
}
else
{
int d;
int nTotalChunks = textLen / CHUNK_SIZE;
int chunksPerDevice = (nTotalChunks + nDevices - 1) / nDevices;
device_context *ctxs = (device_context *)malloc(nDevices *
sizeof(device_context));
for (d = 0; d < nDevices; d++)
{
CHECK(cudaSetDevice(d));
int start = d * chunksPerDevice * CHUNK_SIZE;
int len = chunksPerDevice * CHUNK_SIZE;
if (start + len > textLen)
{
len = textLen - start;
}
init_context(ctxs + d, len);
}
CHECK(cudaEventCreate(&startEvent));
CHECK(cudaEventCreate(&finishEvent));
/*
* Iterate over each device, launching a subset of the total chunks at
* a time.
*/
double overall_start = seconds();
CHECK(cudaEventRecord(startEvent));
for (d = 0; d < nDevices; d++)
{
CHECK(cudaSetDevice(d));
int start = d * chunksPerDevice * CHUNK_SIZE;
int len = chunksPerDevice * CHUNK_SIZE;
if (start + len > textLen)
{
len = textLen - start;
}
encrypt_decrypt_driver(text + start, crypt + start, key, len,
nThreadsPerBlock, ctxs + d);
}
CHECK(cudaEventRecord(finishEvent));
// Wait for each device to finish its work.
for (d = 0; d < nDevices; d++)
{
CHECK(cudaSetDevice(d));
CHECK(cudaDeviceSynchronize());
}
double overall_finish = seconds();
for (d = 0; d < nDevices; d++)
{
// Clean up any CUDA resource allocated for this device.
CHECK(cudaSetDevice(d));
cleanup_context(ctxs + d);
}
float gpuElapsed;
CHECK(cudaEventElapsedTime(&gpuElapsed, startEvent, finishEvent));
printf("Processed %d bytes in %.3f ms on GPUs ( %.4f KB/ms )\n",
textLen, gpuElapsed, ((float)textLen / gpuElapsed) / 1024.0f);
// Display the aggregate performance of all devices.
double overall_elapsed_ms = 1000.0 * (overall_finish - overall_start);
printf("In total, processed %d bytes in %.3f ms on %d devices\n",
textLen, overall_elapsed_ms, nDevices);
printf("Aggregate bandwith = %.4f KB/ms\n",
(float)(textLen / 1024) / overall_elapsed_ms);
free(ctxs);
}
if (fwrite(crypt, sizeof(signed char), textLen, out) != textLen)
{
fprintf(stderr, "Failed writing crypt to %s\n", argv[3]);
return (1);
}
fclose(out);
cleanup(text, crypt, key, userkey);
return (0);
}
|
85e7d75862b8e95489b512c3cd999acda3642e55.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_advec_mom_kernel_mass_flux_y [2][2];
static int dims_advec_mom_kernel_mass_flux_y_h [2][2] = {0};
//user function
__device__
inline void advec_mom_kernel_mass_flux_y_gpu(ACC<double> &node_flux,
const ACC<double> &mass_flux_y) {
node_flux(0,0,0) = 0.125 * ( mass_flux_y(-1,0,0) + mass_flux_y(0,0,0) +
mass_flux_y(-1,1,0) + mass_flux_y(0,1,0) +
mass_flux_y(-1,0,-1) + mass_flux_y(0,0,-1) +
mass_flux_y(-1,1,-1) + mass_flux_y(0,1,-1) );
}
__global__ void ops_advec_mom_kernel_mass_flux_y(
double* __restrict arg0,
double* __restrict arg1,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_mass_flux_y[0][0] + idx_z * 1*1 * dims_advec_mom_kernel_mass_flux_y[0][0] * dims_advec_mom_kernel_mass_flux_y[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_mass_flux_y[1][0] + idx_z * 1*1 * dims_advec_mom_kernel_mass_flux_y[1][0] * dims_advec_mom_kernel_mass_flux_y[1][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_advec_mom_kernel_mass_flux_y[0][0], dims_advec_mom_kernel_mass_flux_y[0][1], arg0);
const ACC<double> argp1(dims_advec_mom_kernel_mass_flux_y[1][0], dims_advec_mom_kernel_mass_flux_y[1][1], arg1);
advec_mom_kernel_mass_flux_y_gpu(argp0, argp1);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_advec_mom_kernel_mass_flux_y(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1) {
#else
void ops_par_loop_advec_mom_kernel_mass_flux_y_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[2] = { arg0, arg1};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,2,range,130)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(130,"advec_mom_kernel_mass_flux_y");
OPS_kernels[130].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 2,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != dims_advec_mom_kernel_mass_flux_y_h[0][0] || ydim0 != dims_advec_mom_kernel_mass_flux_y_h[0][1] || xdim1 != dims_advec_mom_kernel_mass_flux_y_h[1][0] || ydim1 != dims_advec_mom_kernel_mass_flux_y_h[1][1]) {
dims_advec_mom_kernel_mass_flux_y_h[0][0] = xdim0;
dims_advec_mom_kernel_mass_flux_y_h[0][1] = ydim0;
dims_advec_mom_kernel_mass_flux_y_h[1][0] = xdim1;
dims_advec_mom_kernel_mass_flux_y_h[1][1] = ydim1;
cutilSafeCall(hipMemcpyToSymbol( dims_advec_mom_kernel_mass_flux_y, dims_advec_mom_kernel_mass_flux_y_h, sizeof(dims_advec_mom_kernel_mass_flux_y)));
}
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[2];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 2);
ops_halo_exchanges(args,2,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[130].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_advec_mom_kernel_mass_flux_y), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[130].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 2);
ops_set_halo_dirtybit3(&args[0],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[130].mpi_time += t2-t1;
OPS_kernels[130].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[130].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_advec_mom_kernel_mass_flux_y(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 130;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 130;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 2;
desc->args = (ops_arg*)malloc(2*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->function = ops_par_loop_advec_mom_kernel_mass_flux_y_execute;
if (OPS_diags > 1) {
ops_timing_realloc(130,"advec_mom_kernel_mass_flux_y");
}
ops_enqueue_kernel(desc);
}
#endif
| 85e7d75862b8e95489b512c3cd999acda3642e55.cu | //
// auto-generated by ops.py
//
__constant__ int dims_advec_mom_kernel_mass_flux_y [2][2];
static int dims_advec_mom_kernel_mass_flux_y_h [2][2] = {0};
//user function
__device__
inline void advec_mom_kernel_mass_flux_y_gpu(ACC<double> &node_flux,
const ACC<double> &mass_flux_y) {
node_flux(0,0,0) = 0.125 * ( mass_flux_y(-1,0,0) + mass_flux_y(0,0,0) +
mass_flux_y(-1,1,0) + mass_flux_y(0,1,0) +
mass_flux_y(-1,0,-1) + mass_flux_y(0,0,-1) +
mass_flux_y(-1,1,-1) + mass_flux_y(0,1,-1) );
}
__global__ void ops_advec_mom_kernel_mass_flux_y(
double* __restrict arg0,
double* __restrict arg1,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_mass_flux_y[0][0] + idx_z * 1*1 * dims_advec_mom_kernel_mass_flux_y[0][0] * dims_advec_mom_kernel_mass_flux_y[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_mass_flux_y[1][0] + idx_z * 1*1 * dims_advec_mom_kernel_mass_flux_y[1][0] * dims_advec_mom_kernel_mass_flux_y[1][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_advec_mom_kernel_mass_flux_y[0][0], dims_advec_mom_kernel_mass_flux_y[0][1], arg0);
const ACC<double> argp1(dims_advec_mom_kernel_mass_flux_y[1][0], dims_advec_mom_kernel_mass_flux_y[1][1], arg1);
advec_mom_kernel_mass_flux_y_gpu(argp0, argp1);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_advec_mom_kernel_mass_flux_y(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1) {
#else
void ops_par_loop_advec_mom_kernel_mass_flux_y_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[2] = { arg0, arg1};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,2,range,130)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(130,"advec_mom_kernel_mass_flux_y");
OPS_kernels[130].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 2,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != dims_advec_mom_kernel_mass_flux_y_h[0][0] || ydim0 != dims_advec_mom_kernel_mass_flux_y_h[0][1] || xdim1 != dims_advec_mom_kernel_mass_flux_y_h[1][0] || ydim1 != dims_advec_mom_kernel_mass_flux_y_h[1][1]) {
dims_advec_mom_kernel_mass_flux_y_h[0][0] = xdim0;
dims_advec_mom_kernel_mass_flux_y_h[0][1] = ydim0;
dims_advec_mom_kernel_mass_flux_y_h[1][0] = xdim1;
dims_advec_mom_kernel_mass_flux_y_h[1][1] = ydim1;
cutilSafeCall(cudaMemcpyToSymbol( dims_advec_mom_kernel_mass_flux_y, dims_advec_mom_kernel_mass_flux_y_h, sizeof(dims_advec_mom_kernel_mass_flux_y)));
}
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[2];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 2);
ops_halo_exchanges(args,2,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[130].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_advec_mom_kernel_mass_flux_y<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[130].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 2);
ops_set_halo_dirtybit3(&args[0],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[130].mpi_time += t2-t1;
OPS_kernels[130].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[130].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_advec_mom_kernel_mass_flux_y(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 130;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 130;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 2;
desc->args = (ops_arg*)malloc(2*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->function = ops_par_loop_advec_mom_kernel_mass_flux_y_execute;
if (OPS_diags > 1) {
ops_timing_realloc(130,"advec_mom_kernel_mass_flux_y");
}
ops_enqueue_kernel(desc);
}
#endif
|
530275e686927faff203bbfb4bfbe367593856cd.hip | // !!! This is a file automatically generated by hipify!!!
#include "RecoLocalCalo/EcalRecAlgos/interface/EcalUncalibRecHitMultiFitAlgo_gpu_new.h"
#include "DataFormats/EcalDigi/interface/EcalDigiCollections.h"
#include "CondFormats/EcalObjects/interface/EcalPedestals.h"
#include "CondFormats/EcalObjects/interface/EcalMGPAGainRatio.h"
#include "CondFormats/EcalObjects/interface/EcalXtalGroupId.h"
#include "CondFormats/EcalObjects/interface/EcalPulseShapes.h"
#include "CondFormats/EcalObjects/interface/EcalPulseCovariances.h"
#include "CondFormats/EcalObjects/interface/EcalSampleMask.h"
#include "CondFormats/EcalObjects/interface/EcalSamplesCorrelation.h"
#include <iostream>
#include <limits>
#include "DataFormats/EcalDigi/interface/EcalDataFrame.h"
#include "RecoLocalCalo/EcalRecAlgos/interface/Common.h"
#include "hip/hip_runtime.h"
#include "AmplitudeComputationCommonKernels.h"
#include "AmplitudeComputationKernelsV1.h"
#include "TimeComputationKernels.h"
//#define DEBUG
//#define ECAL_RECO_CUDA_DEBUG
namespace ecal {
namespace multifit {
void entryPoint(EventInputDataCPU const& eventInputCPU,
EventInputDataGPU& eventInputGPU,
EventOutputDataGPU& eventOutputGPU,
EventDataForScratchGPU& scratch,
ConditionsProducts const& conditions,
ConfigurationParameters const& configParameters,
cuda::stream_t<>& cudaStream) {
using digis_type = std::vector<uint16_t>;
using dids_type = std::vector<uint32_t>;
// accodring to the cpu setup //----> hardcoded
bool const gainSwitchUseMaxSampleEB = true;
// accodring to the cpu setup //----> hardcoded
bool const gainSwitchUseMaxSampleEE = false;
uint32_t const offsetForHashes = conditions.offsetForHashes;
unsigned int totalChannels = eventInputCPU.ebDigis.size() + eventInputCPU.eeDigis.size();
// temporary for recording
/*hipEvent_t start_event;
hipEvent_t end_event;
cudaCheck( hipEventCreate(&start_event) );
cudaCheck( hipEventCreate(&end_event) );
cudaCheck (hipEventRecord(start_event, 0) );
*/
//
// in what follows we copy eb then ee.
// offset by size
//
//
// copy event data: digis + ids, not really async as vectors have default
// allocators
//
cudaCheck(hipMemcpyAsync(eventInputGPU.digis,
eventInputCPU.ebDigis.data().data(),
eventInputCPU.ebDigis.data().size() * sizeof(digis_type::value_type),
hipMemcpyHostToDevice,
cudaStream.id()));
cudaCheck(hipMemcpyAsync(eventInputGPU.digis + eventInputCPU.ebDigis.data().size(),
eventInputCPU.eeDigis.data().data(),
eventInputCPU.eeDigis.data().size() * sizeof(digis_type::value_type),
hipMemcpyHostToDevice,
cudaStream.id()));
cudaCheck(hipMemcpyAsync(eventInputGPU.ids,
eventInputCPU.ebDigis.ids().data(),
eventInputCPU.ebDigis.ids().size() * sizeof(dids_type::value_type),
hipMemcpyHostToDevice,
cudaStream.id()));
cudaCheck(hipMemcpyAsync(eventInputGPU.ids + eventInputCPU.ebDigis.ids().size(),
eventInputCPU.eeDigis.ids().data(),
eventInputCPU.eeDigis.ids().size() * sizeof(dids_type::value_type),
hipMemcpyHostToDevice,
cudaStream.id()));
//
// 1d preparation kernel
//
unsigned int nchannels_per_block = 32;
unsigned int threads_1d = 10 * nchannels_per_block;
unsigned int blocks_1d = threads_1d > 10 * totalChannels ? 1 : (totalChannels * 10 + threads_1d - 1) / threads_1d;
int shared_bytes = nchannels_per_block * EcalDataFrame::MAXSAMPLES *
(sizeof(bool) + sizeof(bool) + sizeof(bool) + sizeof(bool) + sizeof(char) + sizeof(bool));
hipLaunchKernelGGL(( kernel_prep_1d_and_initialize), dim3(blocks_1d), dim3(threads_1d), shared_bytes, cudaStream.id(),
conditions.pulseShapes.values,
eventInputGPU.digis,
eventInputGPU.ids,
scratch.samples,
(SampleVector*)eventOutputGPU.amplitudesAll,
scratch.gainsNoise,
conditions.pedestals.mean_x1,
conditions.pedestals.mean_x12,
conditions.pedestals.rms_x12,
conditions.pedestals.mean_x6,
conditions.gainRatios.gain6Over1,
conditions.gainRatios.gain12Over6,
scratch.hasSwitchToGain6,
scratch.hasSwitchToGain1,
scratch.isSaturated,
eventOutputGPU.amplitude,
eventOutputGPU.chi2,
eventOutputGPU.pedestal,
eventOutputGPU.flags,
scratch.acState,
scratch.activeBXs,
offsetForHashes,
gainSwitchUseMaxSampleEB,
gainSwitchUseMaxSampleEE,
totalChannels);
cudaCheck(hipGetLastError());
//
// 2d preparation kernel
//
int blocks_2d = totalChannels;
dim3 threads_2d{10, 10};
hipLaunchKernelGGL(( kernel_prep_2d), dim3(blocks_2d), dim3(threads_2d), 0, cudaStream.id(),
conditions.pulseCovariances.values,
scratch.pulse_covariances,
scratch.gainsNoise,
eventInputGPU.ids,
conditions.pedestals.rms_x12,
conditions.pedestals.rms_x6,
conditions.pedestals.rms_x1,
conditions.gainRatios.gain12Over6,
conditions.gainRatios.gain6Over1,
conditions.samplesCorrelation.EBG12SamplesCorrelation,
conditions.samplesCorrelation.EBG6SamplesCorrelation,
conditions.samplesCorrelation.EBG1SamplesCorrelation,
conditions.samplesCorrelation.EEG12SamplesCorrelation,
conditions.samplesCorrelation.EEG6SamplesCorrelation,
conditions.samplesCorrelation.EEG1SamplesCorrelation,
scratch.noisecov,
scratch.pulse_matrix,
conditions.pulseShapes.values,
scratch.hasSwitchToGain6,
scratch.hasSwitchToGain1,
scratch.isSaturated,
offsetForHashes);
cudaCheck(hipGetLastError());
// run minimization kernels
v1::minimization_procedure(
eventInputCPU, eventInputGPU, eventOutputGPU, scratch, conditions, configParameters, cudaStream);
if (configParameters.shouldRunTimingComputation) {
//
// TODO: this guy can run concurrently with other kernels,
// there is no dependence on the order of execution
//
unsigned int threads_time_init = threads_1d;
unsigned int blocks_time_init = blocks_1d;
int sharedBytesInit = 2 * threads_time_init * sizeof(SampleVector::Scalar);
hipLaunchKernelGGL(( kernel_time_computation_init), dim3(blocks_time_init), dim3(threads_time_init), sharedBytesInit, cudaStream.id(),
eventInputGPU.digis,
eventInputGPU.ids,
conditions.pedestals.rms_x12,
conditions.pedestals.rms_x6,
conditions.pedestals.rms_x1,
conditions.pedestals.mean_x12,
conditions.pedestals.mean_x6,
conditions.pedestals.mean_x1,
conditions.gainRatios.gain12Over6,
conditions.gainRatios.gain6Over1,
scratch.sample_values,
scratch.sample_value_errors,
scratch.ampMaxError,
scratch.useless_sample_values,
scratch.pedestal_nums,
offsetForHashes,
conditions.sampleMask.getEcalSampleMaskRecordEB(),
conditions.sampleMask.getEcalSampleMaskRecordEE(),
totalChannels);
cudaCheck(hipGetLastError());
//
// TODO: small kernel only for EB. It needs to be checked if
/// fusing such small kernels is beneficial in here
//
// we are running only over EB digis
// therefore we need to create threads/blocks only for that
unsigned int const threadsFixMGPA = threads_1d;
unsigned int const blocksFixMGPA =
threadsFixMGPA > 10 * eventInputCPU.ebDigis.size()
? 1
: (10 * eventInputCPU.ebDigis.size() + threadsFixMGPA - 1) / threadsFixMGPA;
hipLaunchKernelGGL(( kernel_time_compute_fixMGPAslew), dim3(blocksFixMGPA), dim3(threadsFixMGPA), 0, cudaStream.id(),
eventInputGPU.digis,
scratch.sample_values,
scratch.sample_value_errors,
scratch.useless_sample_values,
conditions.sampleMask.getEcalSampleMaskRecordEB(),
totalChannels);
cudaCheck(hipGetLastError());
//
//
//
int sharedBytes = EcalDataFrame::MAXSAMPLES * nchannels_per_block * 4 * sizeof(SampleVector::Scalar);
auto const threads_nullhypot = threads_1d;
auto const blocks_nullhypot = blocks_1d;
hipLaunchKernelGGL(( kernel_time_compute_nullhypot), dim3(blocks_nullhypot), dim3(threads_nullhypot), sharedBytes, cudaStream.id(),
scratch.sample_values,
scratch.sample_value_errors,
scratch.useless_sample_values,
scratch.chi2sNullHypot,
scratch.sum0sNullHypot,
scratch.sumAAsNullHypot,
totalChannels);
cudaCheck(hipGetLastError());
unsigned int nchannels_per_block_makeratio = 10;
unsigned int threads_makeratio = 45 * nchannels_per_block_makeratio;
unsigned int blocks_makeratio = threads_makeratio > 45 * totalChannels
? 1
: (totalChannels * 45 + threads_makeratio - 1) / threads_makeratio;
int sharedBytesMakeRatio = 5 * threads_makeratio * sizeof(SampleVector::Scalar);
hipLaunchKernelGGL(( kernel_time_compute_makeratio), dim3(blocks_makeratio), dim3(threads_makeratio), sharedBytesMakeRatio, cudaStream.id(),
scratch.sample_values,
scratch.sample_value_errors,
eventInputGPU.ids,
scratch.useless_sample_values,
scratch.pedestal_nums,
configParameters.amplitudeFitParametersEB,
configParameters.amplitudeFitParametersEE,
configParameters.timeFitParametersEB,
configParameters.timeFitParametersEE,
scratch.sumAAsNullHypot,
scratch.sum0sNullHypot,
scratch.tMaxAlphaBetas,
scratch.tMaxErrorAlphaBetas,
scratch.accTimeMax,
scratch.accTimeWgt,
scratch.tcState,
configParameters.timeFitParametersSizeEB,
configParameters.timeFitParametersSizeEE,
configParameters.timeFitLimitsFirstEB,
configParameters.timeFitLimitsFirstEE,
configParameters.timeFitLimitsSecondEB,
configParameters.timeFitLimitsSecondEE,
totalChannels);
cudaCheck(hipGetLastError());
//
//
//
auto const threads_findamplchi2 = threads_1d;
auto const blocks_findamplchi2 = blocks_1d;
int const sharedBytesFindAmplChi2 = 2 * threads_findamplchi2 * sizeof(SampleVector::Scalar);
hipLaunchKernelGGL(( kernel_time_compute_findamplchi2_and_finish), dim3(blocks_findamplchi2),
dim3(threads_findamplchi2),
sharedBytesFindAmplChi2,
cudaStream.id(), scratch.sample_values,
scratch.sample_value_errors,
eventInputGPU.ids,
scratch.useless_sample_values,
scratch.tMaxAlphaBetas,
scratch.tMaxErrorAlphaBetas,
scratch.accTimeMax,
scratch.accTimeWgt,
configParameters.amplitudeFitParametersEB,
configParameters.amplitudeFitParametersEE,
scratch.sumAAsNullHypot,
scratch.sum0sNullHypot,
scratch.chi2sNullHypot,
scratch.tcState,
scratch.ampMaxAlphaBeta,
scratch.ampMaxError,
scratch.timeMax,
scratch.timeError,
totalChannels);
cudaCheck(hipGetLastError());
//
//
//
auto const threads_timecorr = 32;
auto const blocks_timecorr =
threads_timecorr > totalChannels ? 1 : (totalChannels + threads_timecorr - 1) / threads_timecorr;
hipLaunchKernelGGL(( kernel_time_correction_and_finalize), dim3(blocks_timecorr), dim3(threads_timecorr), 0, cudaStream.id(),
eventOutputGPU.amplitude,
eventInputGPU.digis,
eventInputGPU.ids,
conditions.timeBiasCorrections.EBTimeCorrAmplitudeBins,
conditions.timeBiasCorrections.EETimeCorrAmplitudeBins,
conditions.timeBiasCorrections.EBTimeCorrShiftBins,
conditions.timeBiasCorrections.EETimeCorrShiftBins,
scratch.timeMax,
scratch.timeError,
conditions.pedestals.rms_x12,
conditions.timeCalibConstants.values,
eventOutputGPU.jitter,
eventOutputGPU.jitterError,
eventOutputGPU.flags,
conditions.timeBiasCorrections.EBTimeCorrAmplitudeBinsSize,
conditions.timeBiasCorrections.EETimeCorrAmplitudeBinsSize,
configParameters.timeConstantTermEB,
configParameters.timeConstantTermEE,
conditions.timeOffsetConstant.getEBValue(),
conditions.timeOffsetConstant.getEEValue(),
configParameters.timeNconstEB,
configParameters.timeNconstEE,
configParameters.amplitudeThreshEB,
configParameters.amplitudeThreshEE,
configParameters.outOfTimeThreshG12pEB,
configParameters.outOfTimeThreshG12pEE,
configParameters.outOfTimeThreshG12mEB,
configParameters.outOfTimeThreshG12mEE,
configParameters.outOfTimeThreshG61pEB,
configParameters.outOfTimeThreshG61pEE,
configParameters.outOfTimeThreshG61mEB,
configParameters.outOfTimeThreshG61mEE,
offsetForHashes,
totalChannels);
cudaCheck(hipGetLastError());
}
/*
hipEventRecord(end_event, 0);
hipEventSynchronize(end_event);
float ms;
hipEventElapsedTime(&ms, start_event, end_event);
std::cout << "elapsed time = " << ms << std::endl;
*/
}
} // namespace multifit
} // namespace ecal
| 530275e686927faff203bbfb4bfbe367593856cd.cu | #include "RecoLocalCalo/EcalRecAlgos/interface/EcalUncalibRecHitMultiFitAlgo_gpu_new.h"
#include "DataFormats/EcalDigi/interface/EcalDigiCollections.h"
#include "CondFormats/EcalObjects/interface/EcalPedestals.h"
#include "CondFormats/EcalObjects/interface/EcalMGPAGainRatio.h"
#include "CondFormats/EcalObjects/interface/EcalXtalGroupId.h"
#include "CondFormats/EcalObjects/interface/EcalPulseShapes.h"
#include "CondFormats/EcalObjects/interface/EcalPulseCovariances.h"
#include "CondFormats/EcalObjects/interface/EcalSampleMask.h"
#include "CondFormats/EcalObjects/interface/EcalSamplesCorrelation.h"
#include <iostream>
#include <limits>
#include "DataFormats/EcalDigi/interface/EcalDataFrame.h"
#include "RecoLocalCalo/EcalRecAlgos/interface/Common.h"
#include "cuda.h"
#include "AmplitudeComputationCommonKernels.h"
#include "AmplitudeComputationKernelsV1.h"
#include "TimeComputationKernels.h"
//#define DEBUG
//#define ECAL_RECO_CUDA_DEBUG
namespace ecal {
namespace multifit {
void entryPoint(EventInputDataCPU const& eventInputCPU,
EventInputDataGPU& eventInputGPU,
EventOutputDataGPU& eventOutputGPU,
EventDataForScratchGPU& scratch,
ConditionsProducts const& conditions,
ConfigurationParameters const& configParameters,
cuda::stream_t<>& cudaStream) {
using digis_type = std::vector<uint16_t>;
using dids_type = std::vector<uint32_t>;
// accodring to the cpu setup //----> hardcoded
bool const gainSwitchUseMaxSampleEB = true;
// accodring to the cpu setup //----> hardcoded
bool const gainSwitchUseMaxSampleEE = false;
uint32_t const offsetForHashes = conditions.offsetForHashes;
unsigned int totalChannels = eventInputCPU.ebDigis.size() + eventInputCPU.eeDigis.size();
// temporary for recording
/*cudaEvent_t start_event;
cudaEvent_t end_event;
cudaCheck( cudaEventCreate(&start_event) );
cudaCheck( cudaEventCreate(&end_event) );
cudaCheck (cudaEventRecord(start_event, 0) );
*/
//
// in what follows we copy eb then ee.
// offset by size
//
//
// copy event data: digis + ids, not really async as vectors have default
// allocators
//
cudaCheck(cudaMemcpyAsync(eventInputGPU.digis,
eventInputCPU.ebDigis.data().data(),
eventInputCPU.ebDigis.data().size() * sizeof(digis_type::value_type),
cudaMemcpyHostToDevice,
cudaStream.id()));
cudaCheck(cudaMemcpyAsync(eventInputGPU.digis + eventInputCPU.ebDigis.data().size(),
eventInputCPU.eeDigis.data().data(),
eventInputCPU.eeDigis.data().size() * sizeof(digis_type::value_type),
cudaMemcpyHostToDevice,
cudaStream.id()));
cudaCheck(cudaMemcpyAsync(eventInputGPU.ids,
eventInputCPU.ebDigis.ids().data(),
eventInputCPU.ebDigis.ids().size() * sizeof(dids_type::value_type),
cudaMemcpyHostToDevice,
cudaStream.id()));
cudaCheck(cudaMemcpyAsync(eventInputGPU.ids + eventInputCPU.ebDigis.ids().size(),
eventInputCPU.eeDigis.ids().data(),
eventInputCPU.eeDigis.ids().size() * sizeof(dids_type::value_type),
cudaMemcpyHostToDevice,
cudaStream.id()));
//
// 1d preparation kernel
//
unsigned int nchannels_per_block = 32;
unsigned int threads_1d = 10 * nchannels_per_block;
unsigned int blocks_1d = threads_1d > 10 * totalChannels ? 1 : (totalChannels * 10 + threads_1d - 1) / threads_1d;
int shared_bytes = nchannels_per_block * EcalDataFrame::MAXSAMPLES *
(sizeof(bool) + sizeof(bool) + sizeof(bool) + sizeof(bool) + sizeof(char) + sizeof(bool));
kernel_prep_1d_and_initialize<<<blocks_1d, threads_1d, shared_bytes, cudaStream.id()>>>(
conditions.pulseShapes.values,
eventInputGPU.digis,
eventInputGPU.ids,
scratch.samples,
(SampleVector*)eventOutputGPU.amplitudesAll,
scratch.gainsNoise,
conditions.pedestals.mean_x1,
conditions.pedestals.mean_x12,
conditions.pedestals.rms_x12,
conditions.pedestals.mean_x6,
conditions.gainRatios.gain6Over1,
conditions.gainRatios.gain12Over6,
scratch.hasSwitchToGain6,
scratch.hasSwitchToGain1,
scratch.isSaturated,
eventOutputGPU.amplitude,
eventOutputGPU.chi2,
eventOutputGPU.pedestal,
eventOutputGPU.flags,
scratch.acState,
scratch.activeBXs,
offsetForHashes,
gainSwitchUseMaxSampleEB,
gainSwitchUseMaxSampleEE,
totalChannels);
cudaCheck(cudaGetLastError());
//
// 2d preparation kernel
//
int blocks_2d = totalChannels;
dim3 threads_2d{10, 10};
kernel_prep_2d<<<blocks_2d, threads_2d, 0, cudaStream.id()>>>(
conditions.pulseCovariances.values,
scratch.pulse_covariances,
scratch.gainsNoise,
eventInputGPU.ids,
conditions.pedestals.rms_x12,
conditions.pedestals.rms_x6,
conditions.pedestals.rms_x1,
conditions.gainRatios.gain12Over6,
conditions.gainRatios.gain6Over1,
conditions.samplesCorrelation.EBG12SamplesCorrelation,
conditions.samplesCorrelation.EBG6SamplesCorrelation,
conditions.samplesCorrelation.EBG1SamplesCorrelation,
conditions.samplesCorrelation.EEG12SamplesCorrelation,
conditions.samplesCorrelation.EEG6SamplesCorrelation,
conditions.samplesCorrelation.EEG1SamplesCorrelation,
scratch.noisecov,
scratch.pulse_matrix,
conditions.pulseShapes.values,
scratch.hasSwitchToGain6,
scratch.hasSwitchToGain1,
scratch.isSaturated,
offsetForHashes);
cudaCheck(cudaGetLastError());
// run minimization kernels
v1::minimization_procedure(
eventInputCPU, eventInputGPU, eventOutputGPU, scratch, conditions, configParameters, cudaStream);
if (configParameters.shouldRunTimingComputation) {
//
// TODO: this guy can run concurrently with other kernels,
// there is no dependence on the order of execution
//
unsigned int threads_time_init = threads_1d;
unsigned int blocks_time_init = blocks_1d;
int sharedBytesInit = 2 * threads_time_init * sizeof(SampleVector::Scalar);
kernel_time_computation_init<<<blocks_time_init, threads_time_init, sharedBytesInit, cudaStream.id()>>>(
eventInputGPU.digis,
eventInputGPU.ids,
conditions.pedestals.rms_x12,
conditions.pedestals.rms_x6,
conditions.pedestals.rms_x1,
conditions.pedestals.mean_x12,
conditions.pedestals.mean_x6,
conditions.pedestals.mean_x1,
conditions.gainRatios.gain12Over6,
conditions.gainRatios.gain6Over1,
scratch.sample_values,
scratch.sample_value_errors,
scratch.ampMaxError,
scratch.useless_sample_values,
scratch.pedestal_nums,
offsetForHashes,
conditions.sampleMask.getEcalSampleMaskRecordEB(),
conditions.sampleMask.getEcalSampleMaskRecordEE(),
totalChannels);
cudaCheck(cudaGetLastError());
//
// TODO: small kernel only for EB. It needs to be checked if
/// fusing such small kernels is beneficial in here
//
// we are running only over EB digis
// therefore we need to create threads/blocks only for that
unsigned int const threadsFixMGPA = threads_1d;
unsigned int const blocksFixMGPA =
threadsFixMGPA > 10 * eventInputCPU.ebDigis.size()
? 1
: (10 * eventInputCPU.ebDigis.size() + threadsFixMGPA - 1) / threadsFixMGPA;
kernel_time_compute_fixMGPAslew<<<blocksFixMGPA, threadsFixMGPA, 0, cudaStream.id()>>>(
eventInputGPU.digis,
scratch.sample_values,
scratch.sample_value_errors,
scratch.useless_sample_values,
conditions.sampleMask.getEcalSampleMaskRecordEB(),
totalChannels);
cudaCheck(cudaGetLastError());
//
//
//
int sharedBytes = EcalDataFrame::MAXSAMPLES * nchannels_per_block * 4 * sizeof(SampleVector::Scalar);
auto const threads_nullhypot = threads_1d;
auto const blocks_nullhypot = blocks_1d;
kernel_time_compute_nullhypot<<<blocks_nullhypot, threads_nullhypot, sharedBytes, cudaStream.id()>>>(
scratch.sample_values,
scratch.sample_value_errors,
scratch.useless_sample_values,
scratch.chi2sNullHypot,
scratch.sum0sNullHypot,
scratch.sumAAsNullHypot,
totalChannels);
cudaCheck(cudaGetLastError());
unsigned int nchannels_per_block_makeratio = 10;
unsigned int threads_makeratio = 45 * nchannels_per_block_makeratio;
unsigned int blocks_makeratio = threads_makeratio > 45 * totalChannels
? 1
: (totalChannels * 45 + threads_makeratio - 1) / threads_makeratio;
int sharedBytesMakeRatio = 5 * threads_makeratio * sizeof(SampleVector::Scalar);
kernel_time_compute_makeratio<<<blocks_makeratio, threads_makeratio, sharedBytesMakeRatio, cudaStream.id()>>>(
scratch.sample_values,
scratch.sample_value_errors,
eventInputGPU.ids,
scratch.useless_sample_values,
scratch.pedestal_nums,
configParameters.amplitudeFitParametersEB,
configParameters.amplitudeFitParametersEE,
configParameters.timeFitParametersEB,
configParameters.timeFitParametersEE,
scratch.sumAAsNullHypot,
scratch.sum0sNullHypot,
scratch.tMaxAlphaBetas,
scratch.tMaxErrorAlphaBetas,
scratch.accTimeMax,
scratch.accTimeWgt,
scratch.tcState,
configParameters.timeFitParametersSizeEB,
configParameters.timeFitParametersSizeEE,
configParameters.timeFitLimitsFirstEB,
configParameters.timeFitLimitsFirstEE,
configParameters.timeFitLimitsSecondEB,
configParameters.timeFitLimitsSecondEE,
totalChannels);
cudaCheck(cudaGetLastError());
//
//
//
auto const threads_findamplchi2 = threads_1d;
auto const blocks_findamplchi2 = blocks_1d;
int const sharedBytesFindAmplChi2 = 2 * threads_findamplchi2 * sizeof(SampleVector::Scalar);
kernel_time_compute_findamplchi2_and_finish<<<blocks_findamplchi2,
threads_findamplchi2,
sharedBytesFindAmplChi2,
cudaStream.id()>>>(scratch.sample_values,
scratch.sample_value_errors,
eventInputGPU.ids,
scratch.useless_sample_values,
scratch.tMaxAlphaBetas,
scratch.tMaxErrorAlphaBetas,
scratch.accTimeMax,
scratch.accTimeWgt,
configParameters.amplitudeFitParametersEB,
configParameters.amplitudeFitParametersEE,
scratch.sumAAsNullHypot,
scratch.sum0sNullHypot,
scratch.chi2sNullHypot,
scratch.tcState,
scratch.ampMaxAlphaBeta,
scratch.ampMaxError,
scratch.timeMax,
scratch.timeError,
totalChannels);
cudaCheck(cudaGetLastError());
//
//
//
auto const threads_timecorr = 32;
auto const blocks_timecorr =
threads_timecorr > totalChannels ? 1 : (totalChannels + threads_timecorr - 1) / threads_timecorr;
kernel_time_correction_and_finalize<<<blocks_timecorr, threads_timecorr, 0, cudaStream.id()>>>(
eventOutputGPU.amplitude,
eventInputGPU.digis,
eventInputGPU.ids,
conditions.timeBiasCorrections.EBTimeCorrAmplitudeBins,
conditions.timeBiasCorrections.EETimeCorrAmplitudeBins,
conditions.timeBiasCorrections.EBTimeCorrShiftBins,
conditions.timeBiasCorrections.EETimeCorrShiftBins,
scratch.timeMax,
scratch.timeError,
conditions.pedestals.rms_x12,
conditions.timeCalibConstants.values,
eventOutputGPU.jitter,
eventOutputGPU.jitterError,
eventOutputGPU.flags,
conditions.timeBiasCorrections.EBTimeCorrAmplitudeBinsSize,
conditions.timeBiasCorrections.EETimeCorrAmplitudeBinsSize,
configParameters.timeConstantTermEB,
configParameters.timeConstantTermEE,
conditions.timeOffsetConstant.getEBValue(),
conditions.timeOffsetConstant.getEEValue(),
configParameters.timeNconstEB,
configParameters.timeNconstEE,
configParameters.amplitudeThreshEB,
configParameters.amplitudeThreshEE,
configParameters.outOfTimeThreshG12pEB,
configParameters.outOfTimeThreshG12pEE,
configParameters.outOfTimeThreshG12mEB,
configParameters.outOfTimeThreshG12mEE,
configParameters.outOfTimeThreshG61pEB,
configParameters.outOfTimeThreshG61pEE,
configParameters.outOfTimeThreshG61mEB,
configParameters.outOfTimeThreshG61mEE,
offsetForHashes,
totalChannels);
cudaCheck(cudaGetLastError());
}
/*
cudaEventRecord(end_event, 0);
cudaEventSynchronize(end_event);
float ms;
cudaEventElapsedTime(&ms, start_event, end_event);
std::cout << "elapsed time = " << ms << std::endl;
*/
}
} // namespace multifit
} // namespace ecal
|
1dd17dda18497848b8f1d36e5bf19430d0c437f7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<cmath>
#include<cstdio>
#define M 2
#define N 2
#define K 2
__global__
void matrix_multiply(int* A,int*B,int*C)
{
//printf("%d %d\n", A[0],A[1]);
//printf("%d %d\n", C[0],C[1]);
for(int i=0;i<M*K;i++){ printf("%d\n",A[i]);}
for(int i=0;i<K*N;i++){ printf("%d\n",B[i]);}
int I=blockIdx.x*blockDim.x+threadIdx.x;
int J=blockIdx.y*blockDim.y+threadIdx.y;
if( I < M || J < N)
{
for( i=0;i<M;i++){
for( j=0;j<N;j++){
for( k=0;k<K;k++){
C[I*N+J]=C[I*N+J]+ A[I*K+k]*B[k*N+J];
}
}
}
}
/********************************************************
row major
what is Cij =C[I*n+J];
What is Aik=A[i*K+k];
what is Bkj=B[k*N+J];
*/**************************************************************************
}
for (i = 0; i < M * N; i++){
printf("[%d] =%d\n",i, C[i]);
}
}
int main(){
int A[M*K]={1,2,3,1};
int B[K*N]={2,4,5,2};
int C[M*N]={0,0,0,0};
int* d_A;int* d_B;int* d_C;
//allocating space for variables on device
hipMalloc(&d_A,M * sizeof(int));//let memory store that m*n space for you of size ints
hipMalloc(&d_B, K * sizeof(int));
hipMalloc(&d_C, sizeof(int));
//copy A and B FROM HOST TO DEVICE
hipMemcpy(d_A, &A[0],M* sizeof(int) , hipMemcpyHostToDevice);
hipMemcpy(d_B, &B[0],K *sizeof(int) , hipMemcpyHostToDevice);
hipMemcpy(d_C, &C[0],sizeof(int) , hipMemcpyHostToDevice);
matrix_multiply<<<1,1>>>(d_A,d_B,d_C);
//COPY RESULT BACK TO HOST
hipMemcpy(&C[0], d_C, sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(&C[1], d_C, sizeof(int), hipMemcpyDeviceToHost);
//printf("%d", C[0]);
hipFree(A);//TO FREE MEMORY
hipFree(B);
hipFree(C);
}
| 1dd17dda18497848b8f1d36e5bf19430d0c437f7.cu |
#include<cmath>
#include<cstdio>
#define M 2
#define N 2
#define K 2
__global__
void matrix_multiply(int* A,int*B,int*C)
{
//printf("%d %d\n", A[0],A[1]);
//printf("%d %d\n", C[0],C[1]);
for(int i=0;i<M*K;i++){ printf("%d\n",A[i]);}
for(int i=0;i<K*N;i++){ printf("%d\n",B[i]);}
int I=blockIdx.x*blockDim.x+threadIdx.x;
int J=blockIdx.y*blockDim.y+threadIdx.y;
if( I < M || J < N)
{
for( i=0;i<M;i++){
for( j=0;j<N;j++){
for( k=0;k<K;k++){
C[I*N+J]=C[I*N+J]+ A[I*K+k]*B[k*N+J];
}
}
}
}
/********************************************************
row major
what is Cij =C[I*n+J];
What is Aik=A[i*K+k];
what is Bkj=B[k*N+J];
*/**************************************************************************
}
for (i = 0; i < M * N; i++){
printf("[%d] =%d\n",i, C[i]);
}
}
int main(){
int A[M*K]={1,2,3,1};
int B[K*N]={2,4,5,2};
int C[M*N]={0,0,0,0};
int* d_A;int* d_B;int* d_C;
//allocating space for variables on device
cudaMalloc(&d_A,M * sizeof(int));//let memory store that m*n space for you of size ints
cudaMalloc(&d_B, K * sizeof(int));
cudaMalloc(&d_C, sizeof(int));
//copy A and B FROM HOST TO DEVICE
cudaMemcpy(d_A, &A[0],M* sizeof(int) , cudaMemcpyHostToDevice);
cudaMemcpy(d_B, &B[0],K *sizeof(int) , cudaMemcpyHostToDevice);
cudaMemcpy(d_C, &C[0],sizeof(int) , cudaMemcpyHostToDevice);
matrix_multiply<<<1,1>>>(d_A,d_B,d_C);
//COPY RESULT BACK TO HOST
cudaMemcpy(&C[0], d_C, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(&C[1], d_C, sizeof(int), cudaMemcpyDeviceToHost);
//printf("%d", C[0]);
cudaFree(A);//TO FREE MEMORY
cudaFree(B);
cudaFree(C);
}
|
2477c534f9b7b42c9c19bd901533d3aed641aa06.hip | // !!! This is a file automatically generated by hipify!!!
/*-------------------------------------------------------------------------
*
* CUDA function for backrpojection for parallel beam
*
*
* CODE by Ander Biguri
* Optimized and modified by RB
---------------------------------------------------------------------------
---------------------------------------------------------------------------
Copyright (c) 2015, University of Bath and CERN- European Organization for
Nuclear Research
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
---------------------------------------------------------------------------
Contact: [email protected]
Codes : https://github.com/CERN/TIGRE
---------------------------------------------------------------------------
*/
#define PI_2 1.57079632679489661923
#include <algorithm>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include "voxel_backprojection.hpp"
#include "voxel_backprojection_parallel.hpp"
#include <stdio.h>
#include <math.h>
// https://stackoverflow.com/questions/16282136/is-there-a-cuda-equivalent-of-perror
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
printf("%s \n",msg);\
printf("CBCT:CUDA:Atb",hipGetErrorString(__err));\
hipDeviceReset();\
exit(__err);\
} \
} while (0)
#define MAXTREADS 1024
/*GEOMETRY DEFINITION
*
* Detector plane, behind
* |-----------------------------|
* | |
* | |
* | |
* | |
* | +--------+ |
* | / /| |
* A Z | / / |*D |
* | | +--------+ | |
* | | | | | |
* | | | *O | + |
* *--->y | | | / |
* / | | |/ |
* V X | +--------+ |
* |-----------------------------|
*
* *S
*
*
*
*
*
**/
texture<float, hipTextureType3D , hipReadModeElementType> tex;
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// RB, 10/31/2016: Add constant memory arrays to store parameters for all projections to be analyzed during a single kernel call
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// The optimal values of two constants obtained by RB on NVIDIA Quadro K2200 (4 GB RAM, 640 CUDA cores) for 512^3 volume and 512^3 projections (512 proj, each 512 x 512) were:
// PROJ_PER_KERNEL = 32 or 16 (very similar times)
// VOXELS_PER_THREAD = 8
// Speedup of the entire FDK backprojection (not only kernel run, also memcpy etc.) was nearly 4x relative to the original (single projection, single voxel per thread) code.
// (e.g. 16.2 s vs. ~62 s).
const int PROJ_PER_KERNEL = 32; // Number of 2D projections to be analyzed by a single thread. This can be tweaked to see what works best. 32 was the optimal value in the paper by Zinsser and Keck.
const int VOXELS_PER_THREAD = 8; // Number of voxels to be computed by s single thread. Can be tweaked to see what works best. 4 was the optimal value in the paper by Zinsser and Keck.
// We have PROJ_PER_KERNEL projections and we need 6 parameters for each projection:
// deltaX, deltaY, deltaZ, xyzOrigin, offOrig, offDetec
// So we need to keep PROJ_PER_KERNEL*6 values in our deltas array FOR EACH CALL to our main kernel
// (they will be updated in the main loop before each kernel call).
__constant__ Point3D projParamsArrayDevParallel[6*PROJ_PER_KERNEL]; // Dev means it is on device
// We also need a corresponding array on the host side to be filled before each kernel call, then copied to the device (array in constant memory above)
Point3D projParamsArrayHostParallel[6*PROJ_PER_KERNEL]; // Host means it is host memory
// Now we also need to store sinAlpha and cosAlpha for each projection (two floats per projection)
__constant__ float projSinCosArrayDevParallel[3*PROJ_PER_KERNEL];
float projSinCosArrayHostParallel[3*PROJ_PER_KERNEL];
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// END RB, 10/31/2016: Add constant memory arrays to store parameters for all projections to be analyzed during a single kernel call
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//______________________________________________________________________________
//
// Function: rollPitchYaw
//
// Description: Main FDK backprojection kernel
//______________________________________________________________________________
//______________________________________________________________________________
//
// Function: kernelPixelBackprojectionFDK
//
// Description: Main FDK backprojection kernel
//______________________________________________________________________________
__global__ void kernelPixelBackprojection_parallel(const Geometry geo, float* image,const int currProjSetNumber, const int totalNoOfProjections)
{
// Old kernel call signature:
// kernelPixelBackprojectionFDK<<<grid,block>>>(geo,dimage,i,deltaX,deltaY,deltaZ,xyzOrigin,offOrig,offDetec,sinalpha,cosalpha);
// We just read in most of the params from the constant memory instead of getting them from the param list.
// This is because we now have MANY params, since single kernel processes more than one projection!
/* __global__ void kernelPixelBackprojectionFDK(const Geometry geo,
* float* image,
* const int indAlpha,
* const Point3D deltaX ,
* const Point3D deltaY,
* const Point3D deltaZ,
* const Point3D xyzOrigin,
* const Point3D xyzOffset,
* const Point3D uv0Offset,
* const float sinalpha,
* const float cosalpha){
*/
unsigned long indY = blockIdx.y * blockDim.y + threadIdx.y;
unsigned long indX = blockIdx.x * blockDim.x + threadIdx.x;
// unsigned long startIndZ = blockIdx.z * blockDim.z + threadIdx.z; // This is only STARTING z index of the column of voxels that the thread will handle
unsigned long startIndZ = blockIdx.z * VOXELS_PER_THREAD + threadIdx.z; // This is only STARTING z index of the column of voxels that the thread will handle
//Make sure we dont go out of bounds
if (indX>=geo.nVoxelX | indY>=geo.nVoxelY |startIndZ>=geo.nVoxelZ)
return;
// We'll keep a local auxiliary array of values of a column of voxels that this thread will update
float voxelColumn[VOXELS_PER_THREAD];
// First we need to copy the curent 3D volume values from the column to our auxiliary array so that we can then
// work on them (update them by computing values from multiple projections) locally - avoiding main memory reads/writes
int colIdx;
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values back to the 3D volume anyway (bounds checks will be done in the final loop where the updated values go back to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
unsigned long long idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX;
voxelColumn[colIdx] = image[idx]; // Read the current volume value that we'll update by computing values from MULTIPLE projections (not just one)
// We'll be updating the local (register) variable, avoiding reads/writes from the slow main memory.
} // END copy 3D volume voxels to local array
// Now iterate through projections
for(int projNumber=0; projNumber<PROJ_PER_KERNEL; projNumber++)
{
// Get the current parameters from parameter arrays in constant memory.
int indAlpha = currProjSetNumber*PROJ_PER_KERNEL+projNumber; // This is the ABSOLUTE projection number in the projection array
// Our currImageVal will be updated by hovewer many projections we had left in the "remainder" - that's OK.
if(indAlpha>=totalNoOfProjections)
break;
Point3D deltaX = projParamsArrayDevParallel[6*projNumber]; // 6*projNumber because we have 6 Point3D values per projection
Point3D deltaY = projParamsArrayDevParallel[6*projNumber+1];
Point3D deltaZ = projParamsArrayDevParallel[6*projNumber+2];
Point3D xyzOrigin = projParamsArrayDevParallel[6*projNumber+3];
Point3D xyzOffset = projParamsArrayDevParallel[6*projNumber+4];
Point3D S = projParamsArrayDevParallel[6*projNumber+5];
float DSD = projSinCosArrayDevParallel[3*projNumber]; // 2*projNumber because we have 2 float (sin or cos angle) values per projection
float DSO = projSinCosArrayDevParallel[3*projNumber+1];
float COR = projSinCosArrayDevParallel[3*projNumber+2];
// Geometric trasnformations:
//Source, scaled XYZ coordinates
// Now iterate through Z in our voxel column FOR A GIVEN PROJECTION
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values anyway (bounds checks will be done in the final loop where the values go to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
// "XYZ" in the scaled coordinate system of the current point. The image is rotated with the projection angles.
Point3D P;
S.x=DSO;
P.x=(xyzOrigin.x+indX*deltaX.x+indY*deltaY.x+indZ*deltaZ.x);
P.y=(xyzOrigin.y+indX*deltaX.y+indY*deltaY.y+indZ*deltaZ.y)-COR/geo.dDetecU;
P.z=(xyzOrigin.z+indX*deltaX.z+indY*deltaY.z+indZ*deltaZ.z);
S.y=P.y;S.z=P.z;
// This is the vector defining the line from the source to the Voxel
float vectX,vectY,vectZ;
vectX=(P.x -S.x);
vectY=(P.y -S.y);
vectZ=(P.z -S.z);
// Get the coordinates in the detector UV where the mid point of the voxel is projected.
float t=(DSO-DSD /*-DOD*/ - S.x)/vectX;
float y,z;
y=vectY*t+S.y;
z=vectZ*t+S.z;
float u,v;
u=y+geo.nDetecU/2-0.5;
v=z+geo.nDetecV/2-0.5;
// Get Value in the computed (U,V) and multiply by the corresponding weigth.
// indAlpha is the ABSOLUTE number of projection in the projection array (NOT the current number of projection set!)
voxelColumn[colIdx]+=tex3D(tex, v +0.5 ,
u +0.5 ,
indAlpha+0.5);
} // END iterating through column of voxels
} // END iterating through multiple projections
// And finally copy the updated local voxelColumn array back to our 3D volume (main memory)
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values back to the 3D volume anyway (bounds checks will be done in the final loop where the values go to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
unsigned long long idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX;
image[idx] = voxelColumn[colIdx]; // Read the current volume value that we'll update by computing values from MULTIPLE projections (not just one)
// We'll be updating the local (register) variable, avoiding reads/writes from the slow main memory.
// According to references (Papenhausen), doing = is better than +=, since += requires main memory read followed by a write.
// We did all the reads into the local array at the BEGINNING of this kernel. According to Papenhausen, this type of read-write split is
// better for avoiding memory congestion.
} // END copy updated voxels from local array to our 3D volume
} // END kernelPixelBackprojectionFDK
//______________________________________________________________________________
//
// Function: voxel_backprojection_parallel
//
// Description: Main host function for FDK backprojection (invokes the kernel)
//______________________________________________________________________________
int voxel_backprojection_parallel(float const * const projections, Geometry geo, float* result,float const * const alphas, int nalpha)
{
/*
* Allocate texture memory on the device
*/
// copy data to CUDA memory
hipArray *d_projectiondata = 0;
const hipExtent extent = make_hipExtent(geo.nDetecV,geo.nDetecU,nalpha);
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
hipMalloc3DArray(&d_projectiondata, &channelDesc, extent);
cudaCheckErrors("hipMalloc3D error 3D tex");
hipMemcpy3DParms copyParams = { 0 };
copyParams.srcPtr = make_hipPitchedPtr((void*)projections, extent.width*sizeof(float), extent.width, extent.height);
copyParams.dstArray = d_projectiondata;
copyParams.extent = extent;
copyParams.kind = hipMemcpyHostToDevice;
hipMemcpy3D(©Params);
cudaCheckErrors("hipMemcpy3D fail");
// Configure texture options
tex.normalized = false;
tex.filterMode = hipFilterModeLinear;
tex.addressMode[0] = hipAddressModeBorder;
tex.addressMode[1] = hipAddressModeBorder;
tex.addressMode[2] = hipAddressModeBorder;
hipBindTextureToArray(tex, d_projectiondata, channelDesc);
cudaCheckErrors("3D texture memory bind fail");
// Allocate result image memory
size_t num_bytes = geo.nVoxelX*geo.nVoxelY*geo.nVoxelZ * sizeof(float);
float* dimage;
hipMalloc((void**)&dimage, num_bytes);
hipMemset(dimage,0,num_bytes);
cudaCheckErrors("hipMalloc fail");
// If we are going to time
bool timekernel=false;
hipEvent_t start, stop;
float elapsedTime;
if (timekernel){
hipEventCreate(&start);
hipEventRecord(start,0);
}
int divx,divy,divz;
// RB: Use the optimal (in their tests) block size from paper by Zinsser and Keck (16 in x and 32 in y).
// I tried different sizes and shapes of blocks (tiles), but it does not appear to significantly affect trhoughput, so
// let's stick with the values from Zinsser and Keck.
divx=16;
divy=32;
divz=VOXELS_PER_THREAD; // We now only have 32 x 16 threads per block (flat tile, see below), BUT each thread works on a Z column of VOXELS_PER_THREAD voxels, so we effectively need fewer blocks!
dim3 grid((geo.nVoxelX+divx-1)/divx,
(geo.nVoxelY+divy-1)/divy,
(geo.nVoxelZ+divz-1)/divz);
dim3 block(divx,divy,1); // Note that we have 1 in the Z size, not divz, since each thread works on a vertical set of VOXELS_PER_THREAD voxels (so we only need a "flat" tile of threads, with depth of 1)
//////////////////////////////////////////////////////////////////////////////////////
// Main reconstruction loop: go through projections (rotation angles) and backproject
//////////////////////////////////////////////////////////////////////////////////////
// Since we'll have multiple projections processed by a SINGLE kernel call, compute how many
// kernel calls we'll need altogether.
int noOfKernelCalls = (nalpha+PROJ_PER_KERNEL-1)/PROJ_PER_KERNEL; // We'll take care of bounds checking inside the loop if nalpha is not divisible by PROJ_PER_KERNEL
for (unsigned int i=0; i<noOfKernelCalls; i++)
{
// Now we need to generate and copy all data for PROJ_PER_KERNEL projections to constant memory so that our kernel can use it
int j;
for(j=0; j<PROJ_PER_KERNEL; j++)
{
int currProjNumber=i*PROJ_PER_KERNEL+j;
if(currProjNumber>=nalpha)
break; // Exit the loop. Even when we leave the param arrays only partially filled, this is OK, since the kernel will check bounds anyway.
Point3D deltaX,deltaY,deltaZ,xyzOrigin, offOrig, /*offDetec,*/source;
float sinalpha,cosalpha;
geo.alpha=-alphas[currProjNumber*3];
// sinalpha=sin(geo.alpha);
// cosalpha=cos(geo.alpha);
projSinCosArrayHostParallel[3*j]=geo.DSD[currProjNumber]; // 3*j because we have 3 float (sin or cos angle) values per projection
projSinCosArrayHostParallel[3*j+1]=geo.DSO[currProjNumber];
projSinCosArrayHostParallel[3*j+2]=geo.COR[currProjNumber];
computeDeltasCubeParallel(geo,geo.alpha,currProjNumber,&xyzOrigin,&deltaX,&deltaY,&deltaZ);
offOrig.x=geo.offOrigX[currProjNumber];
offOrig.y=geo.offOrigY[currProjNumber];
projParamsArrayHostParallel[6*j]=deltaX; // 6*j because we have 6 Point3D values per projection
projParamsArrayHostParallel[6*j+1]=deltaY;
projParamsArrayHostParallel[6*j+2]=deltaZ;
projParamsArrayHostParallel[6*j+3]=xyzOrigin;
projParamsArrayHostParallel[6*j+4]=offOrig;
projParamsArrayHostParallel[6*j+5]=source;
} // END for (preparing params for kernel call)
// Copy the prepared parameter arrays to constant memory to make it available for the kernel
hipMemcpyToSymbol(projSinCosArrayDevParallel, projSinCosArrayHostParallel, sizeof(float)*3*PROJ_PER_KERNEL);
hipMemcpyToSymbol(projParamsArrayDevParallel, projParamsArrayHostParallel, sizeof(Point3D)*6*PROJ_PER_KERNEL);
hipLaunchKernelGGL(( kernelPixelBackprojection_parallel), dim3(grid),dim3(block), 0, 0, geo,dimage,i,nalpha);
cudaCheckErrors("Kernel fail");
} // END for
//////////////////////////////////////////////////////////////////////////////////////
// END Main reconstruction loop: go through projections (rotation angles) and backproject
//////////////////////////////////////////////////////////////////////////////////////
if (timekernel)
{
hipEventCreate(&stop);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start,stop);
printf("%f\n" ,elapsedTime);
cudaCheckErrors("cuda Timing fail");
}
hipMemcpy(result, dimage, num_bytes, hipMemcpyDeviceToHost);
cudaCheckErrors("hipMemcpy result fail");
hipUnbindTexture(tex);
cudaCheckErrors("Unbind fail");
hipFree(dimage);
hipFreeArray(d_projectiondata);
cudaCheckErrors("hipFree d_imagedata fail");
hipDeviceReset();
return 0;
} // END voxel_backprojection
void computeDeltasCubeParallel(Geometry geo, float alpha,int i, Point3D* xyzorigin, Point3D* deltaX, Point3D* deltaY, Point3D* deltaZ)
{
Point3D P0, Px0,Py0,Pz0, source;
// Get coords of Img(0,0,0)
P0.x=-(geo.sVoxelX/2-geo.dVoxelX/2)+geo.offOrigX[i];
P0.y=-(geo.sVoxelY/2-geo.dVoxelY/2)+geo.offOrigY[i];
P0.z=-(geo.sVoxelZ/2-geo.dVoxelZ/2)+geo.offOrigZ[i];
// Get coors from next voxel in each direction
Px0.x=P0.x+geo.dVoxelX; Py0.x=P0.x; Pz0.x=P0.x;
Px0.y=P0.y; Py0.y=P0.y+geo.dVoxelY; Pz0.y=P0.y;
Px0.z=P0.z; Py0.z=P0.z; Pz0.z=P0.z+geo.dVoxelZ;
// Rotate image (this is equivalent of rotating the source and detector)
Point3D P, Px,Py,Pz; // We need other auxiliar variables to be able to perform the rotation, or we would overwrite values!
P.x =P0.x *cos(alpha)-P0.y *sin(alpha); P.y =P0.x *sin(alpha)+P0.y *cos(alpha); P.z =P0.z;
Px.x=Px0.x*cos(alpha)-Px0.y*sin(alpha); Px.y=Px0.x*sin(alpha)+Px0.y*cos(alpha); Px.z=Px0.z;
Py.x=Py0.x*cos(alpha)-Py0.y*sin(alpha); Py.y=Py0.x*sin(alpha)+Py0.y*cos(alpha); Py.z=Py0.z;
Pz.x=Pz0.x*cos(alpha)-Pz0.y*sin(alpha); Pz.y=Pz0.x*sin(alpha)+Pz0.y*cos(alpha); Pz.z=Pz0.z;
//detector offset
P.z =P.z-geo.offDetecV[i]; P.y =P.y-geo.offDetecU[i];
Px.z =Px.z-geo.offDetecV[i]; Px.y =Px.y-geo.offDetecU[i];
Py.z =Py.z-geo.offDetecV[i]; Py.y =Py.y-geo.offDetecU[i];
Pz.z =Pz.z-geo.offDetecV[i]; Pz.y =Pz.y-geo.offDetecU[i];
// printf("%f,%f,%f\n",source.x,source.y,source.z);
// Scale coords so detector pixels are 1x1
P.z =P.z /geo.dDetecV; P.y =P.y/geo.dDetecU;
Px.z=Px.z/geo.dDetecV; Px.y=Px.y/geo.dDetecU;
Py.z=Py.z/geo.dDetecV; Py.y=Py.y/geo.dDetecU;
Pz.z=Pz.z/geo.dDetecV; Pz.y=Pz.y/geo.dDetecU;
// get deltas of the changes in voxels
deltaX->x=Px.x-P.x; deltaX->y=Px.y-P.y; deltaX->z=Px.z-P.z;
deltaY->x=Py.x-P.x; deltaY->y=Py.y-P.y; deltaY->z=Py.z-P.z;
deltaZ->x=Pz.x-P.x; deltaZ->y=Pz.y-P.y; deltaZ->z=Pz.z-P.z;
*xyzorigin=P;
} // END computeDeltasCube
| 2477c534f9b7b42c9c19bd901533d3aed641aa06.cu | /*-------------------------------------------------------------------------
*
* CUDA function for backrpojection for parallel beam
*
*
* CODE by Ander Biguri
* Optimized and modified by RB
---------------------------------------------------------------------------
---------------------------------------------------------------------------
Copyright (c) 2015, University of Bath and CERN- European Organization for
Nuclear Research
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
---------------------------------------------------------------------------
Contact: [email protected]
Codes : https://github.com/CERN/TIGRE
---------------------------------------------------------------------------
*/
#define PI_2 1.57079632679489661923
#include <algorithm>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include "voxel_backprojection.hpp"
#include "voxel_backprojection_parallel.hpp"
#include <stdio.h>
#include <math.h>
// https://stackoverflow.com/questions/16282136/is-there-a-cuda-equivalent-of-perror
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
printf("%s \n",msg);\
printf("CBCT:CUDA:Atb",cudaGetErrorString(__err));\
cudaDeviceReset();\
exit(__err);\
} \
} while (0)
#define MAXTREADS 1024
/*GEOMETRY DEFINITION
*
* Detector plane, behind
* |-----------------------------|
* | |
* | |
* | |
* | |
* | +--------+ |
* | / /| |
* A Z | / / |*D |
* | | +--------+ | |
* | | | | | |
* | | | *O | + |
* *--->y | | | / |
* / | | |/ |
* V X | +--------+ |
* |-----------------------------|
*
* *S
*
*
*
*
*
**/
texture<float, cudaTextureType3D , cudaReadModeElementType> tex;
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// RB, 10/31/2016: Add constant memory arrays to store parameters for all projections to be analyzed during a single kernel call
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// The optimal values of two constants obtained by RB on NVIDIA Quadro K2200 (4 GB RAM, 640 CUDA cores) for 512^3 volume and 512^3 projections (512 proj, each 512 x 512) were:
// PROJ_PER_KERNEL = 32 or 16 (very similar times)
// VOXELS_PER_THREAD = 8
// Speedup of the entire FDK backprojection (not only kernel run, also memcpy etc.) was nearly 4x relative to the original (single projection, single voxel per thread) code.
// (e.g. 16.2 s vs. ~62 s).
const int PROJ_PER_KERNEL = 32; // Number of 2D projections to be analyzed by a single thread. This can be tweaked to see what works best. 32 was the optimal value in the paper by Zinsser and Keck.
const int VOXELS_PER_THREAD = 8; // Number of voxels to be computed by s single thread. Can be tweaked to see what works best. 4 was the optimal value in the paper by Zinsser and Keck.
// We have PROJ_PER_KERNEL projections and we need 6 parameters for each projection:
// deltaX, deltaY, deltaZ, xyzOrigin, offOrig, offDetec
// So we need to keep PROJ_PER_KERNEL*6 values in our deltas array FOR EACH CALL to our main kernel
// (they will be updated in the main loop before each kernel call).
__constant__ Point3D projParamsArrayDevParallel[6*PROJ_PER_KERNEL]; // Dev means it is on device
// We also need a corresponding array on the host side to be filled before each kernel call, then copied to the device (array in constant memory above)
Point3D projParamsArrayHostParallel[6*PROJ_PER_KERNEL]; // Host means it is host memory
// Now we also need to store sinAlpha and cosAlpha for each projection (two floats per projection)
__constant__ float projSinCosArrayDevParallel[3*PROJ_PER_KERNEL];
float projSinCosArrayHostParallel[3*PROJ_PER_KERNEL];
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// END RB, 10/31/2016: Add constant memory arrays to store parameters for all projections to be analyzed during a single kernel call
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//______________________________________________________________________________
//
// Function: rollPitchYaw
//
// Description: Main FDK backprojection kernel
//______________________________________________________________________________
//______________________________________________________________________________
//
// Function: kernelPixelBackprojectionFDK
//
// Description: Main FDK backprojection kernel
//______________________________________________________________________________
__global__ void kernelPixelBackprojection_parallel(const Geometry geo, float* image,const int currProjSetNumber, const int totalNoOfProjections)
{
// Old kernel call signature:
// kernelPixelBackprojectionFDK<<<grid,block>>>(geo,dimage,i,deltaX,deltaY,deltaZ,xyzOrigin,offOrig,offDetec,sinalpha,cosalpha);
// We just read in most of the params from the constant memory instead of getting them from the param list.
// This is because we now have MANY params, since single kernel processes more than one projection!
/* __global__ void kernelPixelBackprojectionFDK(const Geometry geo,
* float* image,
* const int indAlpha,
* const Point3D deltaX ,
* const Point3D deltaY,
* const Point3D deltaZ,
* const Point3D xyzOrigin,
* const Point3D xyzOffset,
* const Point3D uv0Offset,
* const float sinalpha,
* const float cosalpha){
*/
unsigned long indY = blockIdx.y * blockDim.y + threadIdx.y;
unsigned long indX = blockIdx.x * blockDim.x + threadIdx.x;
// unsigned long startIndZ = blockIdx.z * blockDim.z + threadIdx.z; // This is only STARTING z index of the column of voxels that the thread will handle
unsigned long startIndZ = blockIdx.z * VOXELS_PER_THREAD + threadIdx.z; // This is only STARTING z index of the column of voxels that the thread will handle
//Make sure we dont go out of bounds
if (indX>=geo.nVoxelX | indY>=geo.nVoxelY |startIndZ>=geo.nVoxelZ)
return;
// We'll keep a local auxiliary array of values of a column of voxels that this thread will update
float voxelColumn[VOXELS_PER_THREAD];
// First we need to copy the curent 3D volume values from the column to our auxiliary array so that we can then
// work on them (update them by computing values from multiple projections) locally - avoiding main memory reads/writes
int colIdx;
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values back to the 3D volume anyway (bounds checks will be done in the final loop where the updated values go back to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
unsigned long long idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX;
voxelColumn[colIdx] = image[idx]; // Read the current volume value that we'll update by computing values from MULTIPLE projections (not just one)
// We'll be updating the local (register) variable, avoiding reads/writes from the slow main memory.
} // END copy 3D volume voxels to local array
// Now iterate through projections
for(int projNumber=0; projNumber<PROJ_PER_KERNEL; projNumber++)
{
// Get the current parameters from parameter arrays in constant memory.
int indAlpha = currProjSetNumber*PROJ_PER_KERNEL+projNumber; // This is the ABSOLUTE projection number in the projection array
// Our currImageVal will be updated by hovewer many projections we had left in the "remainder" - that's OK.
if(indAlpha>=totalNoOfProjections)
break;
Point3D deltaX = projParamsArrayDevParallel[6*projNumber]; // 6*projNumber because we have 6 Point3D values per projection
Point3D deltaY = projParamsArrayDevParallel[6*projNumber+1];
Point3D deltaZ = projParamsArrayDevParallel[6*projNumber+2];
Point3D xyzOrigin = projParamsArrayDevParallel[6*projNumber+3];
Point3D xyzOffset = projParamsArrayDevParallel[6*projNumber+4];
Point3D S = projParamsArrayDevParallel[6*projNumber+5];
float DSD = projSinCosArrayDevParallel[3*projNumber]; // 2*projNumber because we have 2 float (sin or cos angle) values per projection
float DSO = projSinCosArrayDevParallel[3*projNumber+1];
float COR = projSinCosArrayDevParallel[3*projNumber+2];
// Geometric trasnformations:
//Source, scaled XYZ coordinates
// Now iterate through Z in our voxel column FOR A GIVEN PROJECTION
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values anyway (bounds checks will be done in the final loop where the values go to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
// "XYZ" in the scaled coordinate system of the current point. The image is rotated with the projection angles.
Point3D P;
S.x=DSO;
P.x=(xyzOrigin.x+indX*deltaX.x+indY*deltaY.x+indZ*deltaZ.x);
P.y=(xyzOrigin.y+indX*deltaX.y+indY*deltaY.y+indZ*deltaZ.y)-COR/geo.dDetecU;
P.z=(xyzOrigin.z+indX*deltaX.z+indY*deltaY.z+indZ*deltaZ.z);
S.y=P.y;S.z=P.z;
// This is the vector defining the line from the source to the Voxel
float vectX,vectY,vectZ;
vectX=(P.x -S.x);
vectY=(P.y -S.y);
vectZ=(P.z -S.z);
// Get the coordinates in the detector UV where the mid point of the voxel is projected.
float t=(DSO-DSD /*-DOD*/ - S.x)/vectX;
float y,z;
y=vectY*t+S.y;
z=vectZ*t+S.z;
float u,v;
u=y+geo.nDetecU/2-0.5;
v=z+geo.nDetecV/2-0.5;
// Get Value in the computed (U,V) and multiply by the corresponding weigth.
// indAlpha is the ABSOLUTE number of projection in the projection array (NOT the current number of projection set!)
voxelColumn[colIdx]+=tex3D(tex, v +0.5 ,
u +0.5 ,
indAlpha+0.5);
} // END iterating through column of voxels
} // END iterating through multiple projections
// And finally copy the updated local voxelColumn array back to our 3D volume (main memory)
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values back to the 3D volume anyway (bounds checks will be done in the final loop where the values go to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
unsigned long long idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX;
image[idx] = voxelColumn[colIdx]; // Read the current volume value that we'll update by computing values from MULTIPLE projections (not just one)
// We'll be updating the local (register) variable, avoiding reads/writes from the slow main memory.
// According to references (Papenhausen), doing = is better than +=, since += requires main memory read followed by a write.
// We did all the reads into the local array at the BEGINNING of this kernel. According to Papenhausen, this type of read-write split is
// better for avoiding memory congestion.
} // END copy updated voxels from local array to our 3D volume
} // END kernelPixelBackprojectionFDK
//______________________________________________________________________________
//
// Function: voxel_backprojection_parallel
//
// Description: Main host function for FDK backprojection (invokes the kernel)
//______________________________________________________________________________
int voxel_backprojection_parallel(float const * const projections, Geometry geo, float* result,float const * const alphas, int nalpha)
{
/*
* Allocate texture memory on the device
*/
// copy data to CUDA memory
cudaArray *d_projectiondata = 0;
const cudaExtent extent = make_cudaExtent(geo.nDetecV,geo.nDetecU,nalpha);
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
cudaMalloc3DArray(&d_projectiondata, &channelDesc, extent);
cudaCheckErrors("cudaMalloc3D error 3D tex");
cudaMemcpy3DParms copyParams = { 0 };
copyParams.srcPtr = make_cudaPitchedPtr((void*)projections, extent.width*sizeof(float), extent.width, extent.height);
copyParams.dstArray = d_projectiondata;
copyParams.extent = extent;
copyParams.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(©Params);
cudaCheckErrors("cudaMemcpy3D fail");
// Configure texture options
tex.normalized = false;
tex.filterMode = cudaFilterModeLinear;
tex.addressMode[0] = cudaAddressModeBorder;
tex.addressMode[1] = cudaAddressModeBorder;
tex.addressMode[2] = cudaAddressModeBorder;
cudaBindTextureToArray(tex, d_projectiondata, channelDesc);
cudaCheckErrors("3D texture memory bind fail");
// Allocate result image memory
size_t num_bytes = geo.nVoxelX*geo.nVoxelY*geo.nVoxelZ * sizeof(float);
float* dimage;
cudaMalloc((void**)&dimage, num_bytes);
cudaMemset(dimage,0,num_bytes);
cudaCheckErrors("cudaMalloc fail");
// If we are going to time
bool timekernel=false;
cudaEvent_t start, stop;
float elapsedTime;
if (timekernel){
cudaEventCreate(&start);
cudaEventRecord(start,0);
}
int divx,divy,divz;
// RB: Use the optimal (in their tests) block size from paper by Zinsser and Keck (16 in x and 32 in y).
// I tried different sizes and shapes of blocks (tiles), but it does not appear to significantly affect trhoughput, so
// let's stick with the values from Zinsser and Keck.
divx=16;
divy=32;
divz=VOXELS_PER_THREAD; // We now only have 32 x 16 threads per block (flat tile, see below), BUT each thread works on a Z column of VOXELS_PER_THREAD voxels, so we effectively need fewer blocks!
dim3 grid((geo.nVoxelX+divx-1)/divx,
(geo.nVoxelY+divy-1)/divy,
(geo.nVoxelZ+divz-1)/divz);
dim3 block(divx,divy,1); // Note that we have 1 in the Z size, not divz, since each thread works on a vertical set of VOXELS_PER_THREAD voxels (so we only need a "flat" tile of threads, with depth of 1)
//////////////////////////////////////////////////////////////////////////////////////
// Main reconstruction loop: go through projections (rotation angles) and backproject
//////////////////////////////////////////////////////////////////////////////////////
// Since we'll have multiple projections processed by a SINGLE kernel call, compute how many
// kernel calls we'll need altogether.
int noOfKernelCalls = (nalpha+PROJ_PER_KERNEL-1)/PROJ_PER_KERNEL; // We'll take care of bounds checking inside the loop if nalpha is not divisible by PROJ_PER_KERNEL
for (unsigned int i=0; i<noOfKernelCalls; i++)
{
// Now we need to generate and copy all data for PROJ_PER_KERNEL projections to constant memory so that our kernel can use it
int j;
for(j=0; j<PROJ_PER_KERNEL; j++)
{
int currProjNumber=i*PROJ_PER_KERNEL+j;
if(currProjNumber>=nalpha)
break; // Exit the loop. Even when we leave the param arrays only partially filled, this is OK, since the kernel will check bounds anyway.
Point3D deltaX,deltaY,deltaZ,xyzOrigin, offOrig, /*offDetec,*/source;
float sinalpha,cosalpha;
geo.alpha=-alphas[currProjNumber*3];
// sinalpha=sin(geo.alpha);
// cosalpha=cos(geo.alpha);
projSinCosArrayHostParallel[3*j]=geo.DSD[currProjNumber]; // 3*j because we have 3 float (sin or cos angle) values per projection
projSinCosArrayHostParallel[3*j+1]=geo.DSO[currProjNumber];
projSinCosArrayHostParallel[3*j+2]=geo.COR[currProjNumber];
computeDeltasCubeParallel(geo,geo.alpha,currProjNumber,&xyzOrigin,&deltaX,&deltaY,&deltaZ);
offOrig.x=geo.offOrigX[currProjNumber];
offOrig.y=geo.offOrigY[currProjNumber];
projParamsArrayHostParallel[6*j]=deltaX; // 6*j because we have 6 Point3D values per projection
projParamsArrayHostParallel[6*j+1]=deltaY;
projParamsArrayHostParallel[6*j+2]=deltaZ;
projParamsArrayHostParallel[6*j+3]=xyzOrigin;
projParamsArrayHostParallel[6*j+4]=offOrig;
projParamsArrayHostParallel[6*j+5]=source;
} // END for (preparing params for kernel call)
// Copy the prepared parameter arrays to constant memory to make it available for the kernel
cudaMemcpyToSymbol(projSinCosArrayDevParallel, projSinCosArrayHostParallel, sizeof(float)*3*PROJ_PER_KERNEL);
cudaMemcpyToSymbol(projParamsArrayDevParallel, projParamsArrayHostParallel, sizeof(Point3D)*6*PROJ_PER_KERNEL);
kernelPixelBackprojection_parallel<<<grid,block>>>(geo,dimage,i,nalpha);
cudaCheckErrors("Kernel fail");
} // END for
//////////////////////////////////////////////////////////////////////////////////////
// END Main reconstruction loop: go through projections (rotation angles) and backproject
//////////////////////////////////////////////////////////////////////////////////////
if (timekernel)
{
cudaEventCreate(&stop);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start,stop);
printf("%f\n" ,elapsedTime);
cudaCheckErrors("cuda Timing fail");
}
cudaMemcpy(result, dimage, num_bytes, cudaMemcpyDeviceToHost);
cudaCheckErrors("cudaMemcpy result fail");
cudaUnbindTexture(tex);
cudaCheckErrors("Unbind fail");
cudaFree(dimage);
cudaFreeArray(d_projectiondata);
cudaCheckErrors("cudaFree d_imagedata fail");
cudaDeviceReset();
return 0;
} // END voxel_backprojection
void computeDeltasCubeParallel(Geometry geo, float alpha,int i, Point3D* xyzorigin, Point3D* deltaX, Point3D* deltaY, Point3D* deltaZ)
{
Point3D P0, Px0,Py0,Pz0, source;
// Get coords of Img(0,0,0)
P0.x=-(geo.sVoxelX/2-geo.dVoxelX/2)+geo.offOrigX[i];
P0.y=-(geo.sVoxelY/2-geo.dVoxelY/2)+geo.offOrigY[i];
P0.z=-(geo.sVoxelZ/2-geo.dVoxelZ/2)+geo.offOrigZ[i];
// Get coors from next voxel in each direction
Px0.x=P0.x+geo.dVoxelX; Py0.x=P0.x; Pz0.x=P0.x;
Px0.y=P0.y; Py0.y=P0.y+geo.dVoxelY; Pz0.y=P0.y;
Px0.z=P0.z; Py0.z=P0.z; Pz0.z=P0.z+geo.dVoxelZ;
// Rotate image (this is equivalent of rotating the source and detector)
Point3D P, Px,Py,Pz; // We need other auxiliar variables to be able to perform the rotation, or we would overwrite values!
P.x =P0.x *cos(alpha)-P0.y *sin(alpha); P.y =P0.x *sin(alpha)+P0.y *cos(alpha); P.z =P0.z;
Px.x=Px0.x*cos(alpha)-Px0.y*sin(alpha); Px.y=Px0.x*sin(alpha)+Px0.y*cos(alpha); Px.z=Px0.z;
Py.x=Py0.x*cos(alpha)-Py0.y*sin(alpha); Py.y=Py0.x*sin(alpha)+Py0.y*cos(alpha); Py.z=Py0.z;
Pz.x=Pz0.x*cos(alpha)-Pz0.y*sin(alpha); Pz.y=Pz0.x*sin(alpha)+Pz0.y*cos(alpha); Pz.z=Pz0.z;
//detector offset
P.z =P.z-geo.offDetecV[i]; P.y =P.y-geo.offDetecU[i];
Px.z =Px.z-geo.offDetecV[i]; Px.y =Px.y-geo.offDetecU[i];
Py.z =Py.z-geo.offDetecV[i]; Py.y =Py.y-geo.offDetecU[i];
Pz.z =Pz.z-geo.offDetecV[i]; Pz.y =Pz.y-geo.offDetecU[i];
// printf("%f,%f,%f\n",source.x,source.y,source.z);
// Scale coords so detector pixels are 1x1
P.z =P.z /geo.dDetecV; P.y =P.y/geo.dDetecU;
Px.z=Px.z/geo.dDetecV; Px.y=Px.y/geo.dDetecU;
Py.z=Py.z/geo.dDetecV; Py.y=Py.y/geo.dDetecU;
Pz.z=Pz.z/geo.dDetecV; Pz.y=Pz.y/geo.dDetecU;
// get deltas of the changes in voxels
deltaX->x=Px.x-P.x; deltaX->y=Px.y-P.y; deltaX->z=Px.z-P.z;
deltaY->x=Py.x-P.x; deltaY->y=Py.y-P.y; deltaY->z=Py.z-P.z;
deltaZ->x=Pz.x-P.x; deltaZ->y=Pz.y-P.y; deltaZ->z=Pz.z-P.z;
*xyzorigin=P;
} // END computeDeltasCube
|
768cfc6e84d187321df8d0a77ce3ba2ad08de5cd.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "convolution_kernel_naive.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *output = NULL;
hipMalloc(&output, XSIZE*YSIZE);
float *input = NULL;
hipMalloc(&input, XSIZE*YSIZE);
float *filter = NULL;
hipMalloc(&filter, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
convolution_kernel_naive), dim3(gridBlock),dim3(threadBlock), 0, 0, output,input,filter);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
convolution_kernel_naive), dim3(gridBlock),dim3(threadBlock), 0, 0, output,input,filter);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
convolution_kernel_naive), dim3(gridBlock),dim3(threadBlock), 0, 0, output,input,filter);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 768cfc6e84d187321df8d0a77ce3ba2ad08de5cd.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "convolution_kernel_naive.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *output = NULL;
cudaMalloc(&output, XSIZE*YSIZE);
float *input = NULL;
cudaMalloc(&input, XSIZE*YSIZE);
float *filter = NULL;
cudaMalloc(&filter, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
convolution_kernel_naive<<<gridBlock,threadBlock>>>(output,input,filter);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
convolution_kernel_naive<<<gridBlock,threadBlock>>>(output,input,filter);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
convolution_kernel_naive<<<gridBlock,threadBlock>>>(output,input,filter);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
9713aacbb1c64d86920757eb048096e3a1cd04b9.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "test_utils.h"
#include <algorithm>
#include <gtest/gtest.h>
#include <iostream>
#include <metrics/v_measure.cuh>
#include <raft/cudart_utils.h>
#include <random>
namespace MLCommon {
namespace Metrics {
// parameter structure definition
struct vMeasureParam {
int nElements;
int lowerLabelRange;
int upperLabelRange;
double beta;
bool sameArrays;
double tolerance;
};
// test fixture class
template <typename T>
class vMeasureTest : public ::testing::TestWithParam<vMeasureParam> {
protected:
// the constructor
void SetUp() override
{
// getting the parameters
params = ::testing::TestWithParam<vMeasureParam>::GetParam();
nElements = params.nElements;
lowerLabelRange = params.lowerLabelRange;
upperLabelRange = params.upperLabelRange;
// generating random value test input
std::vector<int> arr1(nElements, 0);
std::vector<int> arr2(nElements, 0);
std::random_device rd;
std::default_random_engine dre(rd());
std::uniform_int_distribution<int> intGenerator(lowerLabelRange, upperLabelRange);
std::generate(arr1.begin(), arr1.end(), [&]() { return intGenerator(dre); });
if (params.sameArrays) {
arr2 = arr1;
} else {
std::generate(arr2.begin(), arr2.end(), [&]() { return intGenerator(dre); });
}
// allocating and initializing memory to the GPU
RAFT_CUDA_TRY(hipStreamCreate(&stream));
rmm::device_uvector<T> truthClusterArray(nElements, stream);
rmm::device_uvector<T> predClusterArray(nElements, stream);
raft::update_device(truthClusterArray.data(), &arr1[0], (int)nElements, stream);
raft::update_device(predClusterArray.data(), &arr2[0], (int)nElements, stream);
// calculating the golden output
double truthHomogeity, truthCompleteness;
truthHomogeity = MLCommon::Metrics::homogeneity_score(truthClusterArray.data(),
predClusterArray.data(),
nElements,
lowerLabelRange,
upperLabelRange,
stream);
truthCompleteness = MLCommon::Metrics::homogeneity_score(predClusterArray.data(),
truthClusterArray.data(),
nElements,
lowerLabelRange,
upperLabelRange,
stream);
if (truthCompleteness + truthHomogeity == 0.0)
truthVMeasure = 0.0;
else
truthVMeasure = ((1 + params.beta) * truthHomogeity * truthCompleteness /
(params.beta * truthHomogeity + truthCompleteness));
// calling the v_measure CUDA implementation
computedVMeasure = MLCommon::Metrics::v_measure(truthClusterArray.data(),
predClusterArray.data(),
nElements,
lowerLabelRange,
upperLabelRange,
stream,
params.beta);
}
// the destructor
void TearDown() override { RAFT_CUDA_TRY(hipStreamDestroy(stream)); }
// declaring the data values
vMeasureParam params;
T lowerLabelRange, upperLabelRange;
int nElements = 0;
double truthVMeasure = 0;
double computedVMeasure = 0;
hipStream_t stream = 0;
};
// setting test parameter values
const std::vector<vMeasureParam> inputs = {{199, 1, 10, 1.0, false, 0.000001},
{200, 15, 100, 1.0, false, 0.000001},
{100, 1, 20, 1.0, false, 0.000001},
{10, 1, 10, 1.0, false, 0.000001},
{198, 1, 100, 1.0, false, 0.000001},
{300, 3, 99, 1.0, false, 0.000001},
{199, 1, 10, 1.0, true, 0.000001},
{200, 15, 100, 1.0, true, 0.000001},
{100, 1, 20, 1.0, true, 0.000001},
{10, 1, 10, 1.0, true, 0.000001},
{198, 1, 100, 1.0, true, 0.000001},
{300, 3, 99, 1.0, true, 0.000001}};
// writing the test suite
typedef vMeasureTest<int> vMeasureTestClass;
TEST_P(vMeasureTestClass, Result)
{
ASSERT_NEAR(computedVMeasure, truthVMeasure, params.tolerance);
}
INSTANTIATE_TEST_CASE_P(vMeasure, vMeasureTestClass, ::testing::ValuesIn(inputs));
} // end namespace Metrics
} // end namespace MLCommon
| 9713aacbb1c64d86920757eb048096e3a1cd04b9.cu | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "test_utils.h"
#include <algorithm>
#include <gtest/gtest.h>
#include <iostream>
#include <metrics/v_measure.cuh>
#include <raft/cudart_utils.h>
#include <random>
namespace MLCommon {
namespace Metrics {
// parameter structure definition
struct vMeasureParam {
int nElements;
int lowerLabelRange;
int upperLabelRange;
double beta;
bool sameArrays;
double tolerance;
};
// test fixture class
template <typename T>
class vMeasureTest : public ::testing::TestWithParam<vMeasureParam> {
protected:
// the constructor
void SetUp() override
{
// getting the parameters
params = ::testing::TestWithParam<vMeasureParam>::GetParam();
nElements = params.nElements;
lowerLabelRange = params.lowerLabelRange;
upperLabelRange = params.upperLabelRange;
// generating random value test input
std::vector<int> arr1(nElements, 0);
std::vector<int> arr2(nElements, 0);
std::random_device rd;
std::default_random_engine dre(rd());
std::uniform_int_distribution<int> intGenerator(lowerLabelRange, upperLabelRange);
std::generate(arr1.begin(), arr1.end(), [&]() { return intGenerator(dre); });
if (params.sameArrays) {
arr2 = arr1;
} else {
std::generate(arr2.begin(), arr2.end(), [&]() { return intGenerator(dre); });
}
// allocating and initializing memory to the GPU
RAFT_CUDA_TRY(cudaStreamCreate(&stream));
rmm::device_uvector<T> truthClusterArray(nElements, stream);
rmm::device_uvector<T> predClusterArray(nElements, stream);
raft::update_device(truthClusterArray.data(), &arr1[0], (int)nElements, stream);
raft::update_device(predClusterArray.data(), &arr2[0], (int)nElements, stream);
// calculating the golden output
double truthHomogeity, truthCompleteness;
truthHomogeity = MLCommon::Metrics::homogeneity_score(truthClusterArray.data(),
predClusterArray.data(),
nElements,
lowerLabelRange,
upperLabelRange,
stream);
truthCompleteness = MLCommon::Metrics::homogeneity_score(predClusterArray.data(),
truthClusterArray.data(),
nElements,
lowerLabelRange,
upperLabelRange,
stream);
if (truthCompleteness + truthHomogeity == 0.0)
truthVMeasure = 0.0;
else
truthVMeasure = ((1 + params.beta) * truthHomogeity * truthCompleteness /
(params.beta * truthHomogeity + truthCompleteness));
// calling the v_measure CUDA implementation
computedVMeasure = MLCommon::Metrics::v_measure(truthClusterArray.data(),
predClusterArray.data(),
nElements,
lowerLabelRange,
upperLabelRange,
stream,
params.beta);
}
// the destructor
void TearDown() override { RAFT_CUDA_TRY(cudaStreamDestroy(stream)); }
// declaring the data values
vMeasureParam params;
T lowerLabelRange, upperLabelRange;
int nElements = 0;
double truthVMeasure = 0;
double computedVMeasure = 0;
cudaStream_t stream = 0;
};
// setting test parameter values
const std::vector<vMeasureParam> inputs = {{199, 1, 10, 1.0, false, 0.000001},
{200, 15, 100, 1.0, false, 0.000001},
{100, 1, 20, 1.0, false, 0.000001},
{10, 1, 10, 1.0, false, 0.000001},
{198, 1, 100, 1.0, false, 0.000001},
{300, 3, 99, 1.0, false, 0.000001},
{199, 1, 10, 1.0, true, 0.000001},
{200, 15, 100, 1.0, true, 0.000001},
{100, 1, 20, 1.0, true, 0.000001},
{10, 1, 10, 1.0, true, 0.000001},
{198, 1, 100, 1.0, true, 0.000001},
{300, 3, 99, 1.0, true, 0.000001}};
// writing the test suite
typedef vMeasureTest<int> vMeasureTestClass;
TEST_P(vMeasureTestClass, Result)
{
ASSERT_NEAR(computedVMeasure, truthVMeasure, params.tolerance);
}
INSTANTIATE_TEST_CASE_P(vMeasure, vMeasureTestClass, ::testing::ValuesIn(inputs));
} // end namespace Metrics
} // end namespace MLCommon
|
0c478e2c9e2de51f24d19fca898b8601b643f02f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Paulius Micikevicius ([email protected])
* Max Grossman ([email protected])
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include <unistd.h>
#include "common.h"
#include "common2d.h"
#define BDIM 256
__global__ void fwd_kernel(TYPE *next, TYPE *curr, TYPE *vsq, TYPE *c_coeff,
int nx, int ny, int dimx, int radius) {
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
const int y = tid / nx;
const int x = tid % nx;
const int this_offset = POINT_OFFSET(x, y, dimx, radius);
TYPE div = c_coeff[0] * curr[this_offset];
for (int d = 1; d <= radius; d++) {
const int y_pos_offset = POINT_OFFSET(x, y + d, dimx, radius);
const int y_neg_offset = POINT_OFFSET(x, y - d, dimx, radius);
const int x_pos_offset = POINT_OFFSET(x + d, y, dimx, radius);
const int x_neg_offset = POINT_OFFSET(x - d, y, dimx, radius);
div += c_coeff[d] * (curr[y_pos_offset] +
curr[y_neg_offset] + curr[x_pos_offset] +
curr[x_neg_offset]);
}
const TYPE temp = 2.0f * curr[this_offset] - next[this_offset];
next[this_offset] = temp + div * vsq[this_offset];
}
int main( int argc, char *argv[] ) {
config conf;
setup_config(&conf, argc, argv);
init_progress(conf.progress_width, conf.nsteps, conf.progress_disabled);
if ((conf.ny * conf.nx) % BDIM != 0) {
fprintf(stderr, "Invalid problem configuration, ny x nx must be an "
"even multiple of %d\n", BDIM);
return 1;
}
TYPE dx = 20.f;
TYPE dt = 0.002f;
// compute the pitch for perfect coalescing
size_t dimx = conf.nx + 2*conf.radius;
size_t dimy = conf.ny + 2*conf.radius;
size_t nbytes = dimx * dimy * sizeof(TYPE);
if (conf.verbose) {
printf("x = %zu, y = %zu\n", dimx, dimy);
printf("nsteps = %d\n", conf.nsteps);
printf("radius = %d\n", conf.radius);
}
TYPE c_coeff[NUM_COEFF];
TYPE *curr = (TYPE *)malloc(nbytes);
TYPE *next = (TYPE *)malloc(nbytes);
TYPE *vsq = (TYPE *)malloc(nbytes);
if (curr == NULL || next == NULL || vsq == NULL) {
fprintf(stderr, "Allocations failed\n");
return 1;
}
config_sources(&conf.srcs, &conf.nsrcs, conf.nx, conf.ny, conf.nsteps);
TYPE **srcs = sample_sources(conf.srcs, conf.nsrcs, conf.nsteps, dt);
init_data(curr, next, vsq, c_coeff, dimx, dimy, dimx * sizeof(TYPE), dx, dt);
TYPE *d_curr, *d_next, *d_vsq, *d_c_coeff;
CHECK(hipMalloc((void **)&d_curr, nbytes));
CHECK(hipMalloc((void **)&d_next, nbytes));
CHECK(hipMalloc((void **)&d_vsq, nbytes));
CHECK(hipMalloc((void **)&d_c_coeff, NUM_COEFF * sizeof(TYPE)));
dim3 block(BDIM);
dim3 grid((conf.nx * conf.ny) / BDIM);
double mem_start = seconds();
CHECK(hipMemcpy(d_curr, curr, nbytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_next, next, nbytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_vsq, vsq, nbytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_c_coeff, c_coeff, NUM_COEFF * sizeof(TYPE),
hipMemcpyHostToDevice));
double start = seconds();
for (int step = 0; step < conf.nsteps; step++) {
for (int src = 0; src < conf.nsrcs; src++) {
if (conf.srcs[src].t > step) continue;
int src_offset = POINT_OFFSET(conf.srcs[src].x, conf.srcs[src].y,
dimx, conf.radius);
CHECK(hipMemcpy(d_curr + src_offset, srcs[src] + step,
sizeof(TYPE), hipMemcpyHostToDevice));
}
hipLaunchKernelGGL(( fwd_kernel), dim3(grid), dim3(block), 0, 0, d_next, d_curr, d_vsq, d_c_coeff,
conf.nx, conf.ny, dimx, conf.radius);
TYPE *tmp = d_next;
d_next = d_curr;
d_curr = tmp;
update_progress(step + 1);
}
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
double compute_s = seconds() - start;
CHECK(hipMemcpy(curr, d_curr, nbytes, hipMemcpyDeviceToHost));
double total_s = seconds() - mem_start;
float point_rate = (float)conf.nx * conf.ny / (compute_s / conf.nsteps);
printf("iso_r4_2x: %8.10f s total, %8.10f s/step, %8.2f Mcells/s/step\n",
total_s, compute_s / conf.nsteps, point_rate / 1000000.f);
if (conf.save_text) {
save_text(curr, dimx, dimy, conf.ny, conf.nx, "snap.text", conf.radius);
}
free(curr);
free(next);
free(vsq);
for (int i = 0; i < conf.nsrcs; i++) {
free(srcs[i]);
}
free(srcs);
CHECK(hipFree(d_curr));
CHECK(hipFree(d_next));
CHECK(hipFree(d_vsq));
CHECK(hipFree(d_c_coeff));
return 0;
}
| 0c478e2c9e2de51f24d19fca898b8601b643f02f.cu | /*
* Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Paulius Micikevicius ([email protected])
* Max Grossman ([email protected])
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include <unistd.h>
#include "common.h"
#include "common2d.h"
#define BDIM 256
__global__ void fwd_kernel(TYPE *next, TYPE *curr, TYPE *vsq, TYPE *c_coeff,
int nx, int ny, int dimx, int radius) {
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
const int y = tid / nx;
const int x = tid % nx;
const int this_offset = POINT_OFFSET(x, y, dimx, radius);
TYPE div = c_coeff[0] * curr[this_offset];
for (int d = 1; d <= radius; d++) {
const int y_pos_offset = POINT_OFFSET(x, y + d, dimx, radius);
const int y_neg_offset = POINT_OFFSET(x, y - d, dimx, radius);
const int x_pos_offset = POINT_OFFSET(x + d, y, dimx, radius);
const int x_neg_offset = POINT_OFFSET(x - d, y, dimx, radius);
div += c_coeff[d] * (curr[y_pos_offset] +
curr[y_neg_offset] + curr[x_pos_offset] +
curr[x_neg_offset]);
}
const TYPE temp = 2.0f * curr[this_offset] - next[this_offset];
next[this_offset] = temp + div * vsq[this_offset];
}
int main( int argc, char *argv[] ) {
config conf;
setup_config(&conf, argc, argv);
init_progress(conf.progress_width, conf.nsteps, conf.progress_disabled);
if ((conf.ny * conf.nx) % BDIM != 0) {
fprintf(stderr, "Invalid problem configuration, ny x nx must be an "
"even multiple of %d\n", BDIM);
return 1;
}
TYPE dx = 20.f;
TYPE dt = 0.002f;
// compute the pitch for perfect coalescing
size_t dimx = conf.nx + 2*conf.radius;
size_t dimy = conf.ny + 2*conf.radius;
size_t nbytes = dimx * dimy * sizeof(TYPE);
if (conf.verbose) {
printf("x = %zu, y = %zu\n", dimx, dimy);
printf("nsteps = %d\n", conf.nsteps);
printf("radius = %d\n", conf.radius);
}
TYPE c_coeff[NUM_COEFF];
TYPE *curr = (TYPE *)malloc(nbytes);
TYPE *next = (TYPE *)malloc(nbytes);
TYPE *vsq = (TYPE *)malloc(nbytes);
if (curr == NULL || next == NULL || vsq == NULL) {
fprintf(stderr, "Allocations failed\n");
return 1;
}
config_sources(&conf.srcs, &conf.nsrcs, conf.nx, conf.ny, conf.nsteps);
TYPE **srcs = sample_sources(conf.srcs, conf.nsrcs, conf.nsteps, dt);
init_data(curr, next, vsq, c_coeff, dimx, dimy, dimx * sizeof(TYPE), dx, dt);
TYPE *d_curr, *d_next, *d_vsq, *d_c_coeff;
CHECK(cudaMalloc((void **)&d_curr, nbytes));
CHECK(cudaMalloc((void **)&d_next, nbytes));
CHECK(cudaMalloc((void **)&d_vsq, nbytes));
CHECK(cudaMalloc((void **)&d_c_coeff, NUM_COEFF * sizeof(TYPE)));
dim3 block(BDIM);
dim3 grid((conf.nx * conf.ny) / BDIM);
double mem_start = seconds();
CHECK(cudaMemcpy(d_curr, curr, nbytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_next, next, nbytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_vsq, vsq, nbytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_c_coeff, c_coeff, NUM_COEFF * sizeof(TYPE),
cudaMemcpyHostToDevice));
double start = seconds();
for (int step = 0; step < conf.nsteps; step++) {
for (int src = 0; src < conf.nsrcs; src++) {
if (conf.srcs[src].t > step) continue;
int src_offset = POINT_OFFSET(conf.srcs[src].x, conf.srcs[src].y,
dimx, conf.radius);
CHECK(cudaMemcpy(d_curr + src_offset, srcs[src] + step,
sizeof(TYPE), cudaMemcpyHostToDevice));
}
fwd_kernel<<<grid, block>>>(d_next, d_curr, d_vsq, d_c_coeff,
conf.nx, conf.ny, dimx, conf.radius);
TYPE *tmp = d_next;
d_next = d_curr;
d_curr = tmp;
update_progress(step + 1);
}
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
double compute_s = seconds() - start;
CHECK(cudaMemcpy(curr, d_curr, nbytes, cudaMemcpyDeviceToHost));
double total_s = seconds() - mem_start;
float point_rate = (float)conf.nx * conf.ny / (compute_s / conf.nsteps);
printf("iso_r4_2x: %8.10f s total, %8.10f s/step, %8.2f Mcells/s/step\n",
total_s, compute_s / conf.nsteps, point_rate / 1000000.f);
if (conf.save_text) {
save_text(curr, dimx, dimy, conf.ny, conf.nx, "snap.text", conf.radius);
}
free(curr);
free(next);
free(vsq);
for (int i = 0; i < conf.nsrcs; i++) {
free(srcs[i]);
}
free(srcs);
CHECK(cudaFree(d_curr));
CHECK(cudaFree(d_next));
CHECK(cudaFree(d_vsq));
CHECK(cudaFree(d_c_coeff));
return 0;
}
|
9e6c812fa1f3a617d80f299bac723e01c8908cf0.hip | // !!! This is a file automatically generated by hipify!!!
/*
* File: gpu.cpp
* Author: aliendo
*
* Created on 26 de diciembre de 2013, 11:23 AM
*/
#include "gpu.h"
/*
gpu::gpu() {
setPresent();
int deviceCount;
if (present){
hipGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
cout << "There is no device supporting CUDA" << endl;
gpu(false);
} else {
cout << "Nro de dispostivos:" << deviceCount << ":" << endl;
setDeviceProperties();
}
} else {
deviceCount = 0;
name = new string[1];
name[0]=" ";
major = new int[1];
major[0]=0;
minor = new int[1];
minor[0]=0;
totalGlobalMem = new unsigned int[1];
totalGlobalMem[0]=0;
multiProcessorCount = new int[1];
multiProcessorCount[0]=0;
numCores = new int[1];
numCores[0]=0;
totalConstMem = new unsigned int[1];
totalConstMem[0]=0;
sharedMemPerBlock = new unsigned int[1];
sharedMemPerBlock[0]=0;
regsPerBlock = new int[1];
regsPerBlock[0]=0;
warpSize = new int[1];
warpSize[0]=0;
maxThreadsPerBlock = new int[1];
maxThreadsPerBlock[0]=0;
maxThreadsDim0 = new int[1];
maxThreadsDim0[0]=0;
maxThreadsDim1 = new int[1];
maxThreadsDim1[0]=0;
maxThreadsDim2 = new int[1];
maxThreadsDim2[0]=0;
maxGridSize0 = new int[1];
maxGridSize0[0]=0;
maxGridSize1 = new int[1];
maxGridSize1[0]=0;
maxGridSize2 = new int[1];
maxGridSize2[0]=0;
memPitch = new unsigned int[1];
memPitch[0]=0;
textureAlignment = new unsigned int[1];
textureAlignment[0]=0;
clockRate = new float[1];
clockRate[0]=0;
deviceOverlap = new bool[1];
deviceOverlap[0]=0;
}
setNatr();
setValueatr();
setNameatr();
}
*/
void gpu::setDeviceProperties(){
int dev;
hipDeviceProp_t deviceProp;
name = new string[deviceCount];
major = new int[deviceCount];
minor = new int[deviceCount];
totalGlobalMem = new unsigned int[deviceCount];
multiProcessorCount = new int[deviceCount];
numCores = new int[deviceCount];
totalConstMem = new unsigned int[deviceCount];
sharedMemPerBlock = new unsigned int[deviceCount];
regsPerBlock = new int[deviceCount];
warpSize = new int[deviceCount];
maxThreadsPerBlock = new int[deviceCount];
maxThreadsDim0 = new int[deviceCount];
maxThreadsDim1 = new int[deviceCount];
maxThreadsDim2 = new int[deviceCount];
maxGridSize0 = new int[deviceCount];
maxGridSize1 = new int[deviceCount];
maxGridSize2 = new int[deviceCount];
memPitch = new unsigned int[deviceCount];
textureAlignment = new unsigned int[deviceCount];
clockRate = new float[deviceCount];
deviceOverlap = new bool[deviceCount];
for (dev = 0; dev < deviceCount; ++dev) {
hipGetDeviceProperties(&deviceProp, dev);
if (dev == 0) {
if (deviceProp.major == 9999 && deviceProp.minor == 9999){
//cout << "There is no device supporting CUDA." << endl;
gpu(false);
}
}
name[dev]=deviceProp.name;
major[dev]=deviceProp.major;
minor[dev]=deviceProp.minor;
totalGlobalMem[dev]=(unsigned int)deviceProp.totalGlobalMem;
#if CUDART_VERSION >= 2000
multiProcessorCount[dev]=deviceProp.multiProcessorCount;
numCores[dev]=8 * deviceProp.multiProcessorCount;
#else
multiProcessorCount[dev]=0;
numCores[dev]=0;
#endif
totalConstMem[dev]=(unsigned int)deviceProp.totalConstMem;
sharedMemPerBlock[dev]=(unsigned int)deviceProp.sharedMemPerBlock;
regsPerBlock[dev]=deviceProp.regsPerBlock;
warpSize[dev]=deviceProp.warpSize;
maxThreadsPerBlock[dev]=deviceProp.maxThreadsPerBlock;
maxThreadsDim0[dev]=deviceProp.maxThreadsDim[0];
maxThreadsDim1[dev]=deviceProp.maxThreadsDim[1];
maxThreadsDim2[dev]=deviceProp.maxThreadsDim[2];
maxGridSize0[dev]=deviceProp.maxGridSize[0];
maxGridSize1[dev]=deviceProp.maxGridSize[1];
maxGridSize2[dev]=deviceProp.maxGridSize[2];
memPitch[dev]=(unsigned int)deviceProp.memPitch;
textureAlignment[dev]=(unsigned int)deviceProp.textureAlignment;
clockRate[dev]=deviceProp.clockRate * 1e-6f;
#if CUDART_VERSION >= 2000
deviceOverlap[dev]=deviceProp.deviceOverlap;
#else
deviceOverlap[dev]=false;
#endif
}
}
void gpu::setDeviceCount(){
if (present){
hipGetDeviceCount(&deviceCount);
} else {
deviceCount=0;
}
}
| 9e6c812fa1f3a617d80f299bac723e01c8908cf0.cu | /*
* File: gpu.cpp
* Author: aliendo
*
* Created on 26 de diciembre de 2013, 11:23 AM
*/
#include "gpu.h"
/*
gpu::gpu() {
setPresent();
int deviceCount;
if (present){
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
cout << "There is no device supporting CUDA" << endl;
gpu(false);
} else {
cout << "Nro de dispostivos:" << deviceCount << ":" << endl;
setDeviceProperties();
}
} else {
deviceCount = 0;
name = new string[1];
name[0]=" ";
major = new int[1];
major[0]=0;
minor = new int[1];
minor[0]=0;
totalGlobalMem = new unsigned int[1];
totalGlobalMem[0]=0;
multiProcessorCount = new int[1];
multiProcessorCount[0]=0;
numCores = new int[1];
numCores[0]=0;
totalConstMem = new unsigned int[1];
totalConstMem[0]=0;
sharedMemPerBlock = new unsigned int[1];
sharedMemPerBlock[0]=0;
regsPerBlock = new int[1];
regsPerBlock[0]=0;
warpSize = new int[1];
warpSize[0]=0;
maxThreadsPerBlock = new int[1];
maxThreadsPerBlock[0]=0;
maxThreadsDim0 = new int[1];
maxThreadsDim0[0]=0;
maxThreadsDim1 = new int[1];
maxThreadsDim1[0]=0;
maxThreadsDim2 = new int[1];
maxThreadsDim2[0]=0;
maxGridSize0 = new int[1];
maxGridSize0[0]=0;
maxGridSize1 = new int[1];
maxGridSize1[0]=0;
maxGridSize2 = new int[1];
maxGridSize2[0]=0;
memPitch = new unsigned int[1];
memPitch[0]=0;
textureAlignment = new unsigned int[1];
textureAlignment[0]=0;
clockRate = new float[1];
clockRate[0]=0;
deviceOverlap = new bool[1];
deviceOverlap[0]=0;
}
setNatr();
setValueatr();
setNameatr();
}
*/
void gpu::setDeviceProperties(){
int dev;
cudaDeviceProp deviceProp;
name = new string[deviceCount];
major = new int[deviceCount];
minor = new int[deviceCount];
totalGlobalMem = new unsigned int[deviceCount];
multiProcessorCount = new int[deviceCount];
numCores = new int[deviceCount];
totalConstMem = new unsigned int[deviceCount];
sharedMemPerBlock = new unsigned int[deviceCount];
regsPerBlock = new int[deviceCount];
warpSize = new int[deviceCount];
maxThreadsPerBlock = new int[deviceCount];
maxThreadsDim0 = new int[deviceCount];
maxThreadsDim1 = new int[deviceCount];
maxThreadsDim2 = new int[deviceCount];
maxGridSize0 = new int[deviceCount];
maxGridSize1 = new int[deviceCount];
maxGridSize2 = new int[deviceCount];
memPitch = new unsigned int[deviceCount];
textureAlignment = new unsigned int[deviceCount];
clockRate = new float[deviceCount];
deviceOverlap = new bool[deviceCount];
for (dev = 0; dev < deviceCount; ++dev) {
cudaGetDeviceProperties(&deviceProp, dev);
if (dev == 0) {
if (deviceProp.major == 9999 && deviceProp.minor == 9999){
//cout << "There is no device supporting CUDA." << endl;
gpu(false);
}
}
name[dev]=deviceProp.name;
major[dev]=deviceProp.major;
minor[dev]=deviceProp.minor;
totalGlobalMem[dev]=(unsigned int)deviceProp.totalGlobalMem;
#if CUDART_VERSION >= 2000
multiProcessorCount[dev]=deviceProp.multiProcessorCount;
numCores[dev]=8 * deviceProp.multiProcessorCount;
#else
multiProcessorCount[dev]=0;
numCores[dev]=0;
#endif
totalConstMem[dev]=(unsigned int)deviceProp.totalConstMem;
sharedMemPerBlock[dev]=(unsigned int)deviceProp.sharedMemPerBlock;
regsPerBlock[dev]=deviceProp.regsPerBlock;
warpSize[dev]=deviceProp.warpSize;
maxThreadsPerBlock[dev]=deviceProp.maxThreadsPerBlock;
maxThreadsDim0[dev]=deviceProp.maxThreadsDim[0];
maxThreadsDim1[dev]=deviceProp.maxThreadsDim[1];
maxThreadsDim2[dev]=deviceProp.maxThreadsDim[2];
maxGridSize0[dev]=deviceProp.maxGridSize[0];
maxGridSize1[dev]=deviceProp.maxGridSize[1];
maxGridSize2[dev]=deviceProp.maxGridSize[2];
memPitch[dev]=(unsigned int)deviceProp.memPitch;
textureAlignment[dev]=(unsigned int)deviceProp.textureAlignment;
clockRate[dev]=deviceProp.clockRate * 1e-6f;
#if CUDART_VERSION >= 2000
deviceOverlap[dev]=deviceProp.deviceOverlap;
#else
deviceOverlap[dev]=false;
#endif
}
}
void gpu::setDeviceCount(){
if (present){
cudaGetDeviceCount(&deviceCount);
} else {
deviceCount=0;
}
}
|
58d7a323fd316ee3363065a514a18806c43ec5bd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cassert>
#include <cstdlib>
#include <iostream>
#include <numeric>
#include <vector>
using std::accumulate;
using std::cout;
using std::generate;
using std::vector;
#define SHMEM_SIZE 256
__global__ void sumReduction(int *v, int *v_r) {
// Allocate shared memory
__shared__ int partial_sum[SHMEM_SIZE];
// Calculate thread ID
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// Load elements into shared memory
partial_sum[threadIdx.x] = v[tid];
__syncthreads();
// Increase the stride of the access until we exceed the CTA dimensions
for (int s = 1; s < blockDim.x; s *= 2) {
// Change the indexing to be sequential threads
int index = 2 * s * threadIdx.x;
// Each thread does work unless the index goes off the block
if (index < blockDim.x) {
partial_sum[index] += partial_sum[index + s];
}
__syncthreads();
}
// Let the thread 0 for this block write it's result to main memory
// Result is inexed by this block
if (threadIdx.x == 0) {
v_r[blockIdx.x] = partial_sum[0];
}
}
int main() {
// Vector size
int N = 1 << 16;
size_t bytes = N * sizeof(int);
// Host data
vector<int> h_v(N);
vector<int> h_v_r(N);
// Initialize the input data
generate(begin(h_v), end(h_v), []() { return rand() % 10; });
// Allocate device memory
int *d_v, *d_v_r;
hipMalloc(&d_v, bytes);
hipMalloc(&d_v_r, bytes);
// Copy to device
hipMemcpy(d_v, h_v.data(), bytes, hipMemcpyHostToDevice);
// TB Size
const int TB_SIZE = 256;
// Grid Size (No padding)
int GRID_SIZE = N / TB_SIZE;
// Call kernels
hipLaunchKernelGGL(( sumReduction), dim3(GRID_SIZE), dim3(TB_SIZE), 0, 0, d_v, d_v_r);
hipLaunchKernelGGL(( sumReduction), dim3(1), dim3(TB_SIZE), 0, 0, d_v_r, d_v_r);
// Copy to host;
hipMemcpy(h_v_r.data(), d_v_r, bytes, hipMemcpyDeviceToHost);
// Print the result
assert(h_v_r[0] == std::accumulate(begin(h_v), end(h_v), 0));
cout << "COMPLETED SUCCESSFULLY\n";
return 0;
}
| 58d7a323fd316ee3363065a514a18806c43ec5bd.cu | #include <algorithm>
#include <cassert>
#include <cstdlib>
#include <iostream>
#include <numeric>
#include <vector>
using std::accumulate;
using std::cout;
using std::generate;
using std::vector;
#define SHMEM_SIZE 256
__global__ void sumReduction(int *v, int *v_r) {
// Allocate shared memory
__shared__ int partial_sum[SHMEM_SIZE];
// Calculate thread ID
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// Load elements into shared memory
partial_sum[threadIdx.x] = v[tid];
__syncthreads();
// Increase the stride of the access until we exceed the CTA dimensions
for (int s = 1; s < blockDim.x; s *= 2) {
// Change the indexing to be sequential threads
int index = 2 * s * threadIdx.x;
// Each thread does work unless the index goes off the block
if (index < blockDim.x) {
partial_sum[index] += partial_sum[index + s];
}
__syncthreads();
}
// Let the thread 0 for this block write it's result to main memory
// Result is inexed by this block
if (threadIdx.x == 0) {
v_r[blockIdx.x] = partial_sum[0];
}
}
int main() {
// Vector size
int N = 1 << 16;
size_t bytes = N * sizeof(int);
// Host data
vector<int> h_v(N);
vector<int> h_v_r(N);
// Initialize the input data
generate(begin(h_v), end(h_v), []() { return rand() % 10; });
// Allocate device memory
int *d_v, *d_v_r;
cudaMalloc(&d_v, bytes);
cudaMalloc(&d_v_r, bytes);
// Copy to device
cudaMemcpy(d_v, h_v.data(), bytes, cudaMemcpyHostToDevice);
// TB Size
const int TB_SIZE = 256;
// Grid Size (No padding)
int GRID_SIZE = N / TB_SIZE;
// Call kernels
sumReduction<<<GRID_SIZE, TB_SIZE>>>(d_v, d_v_r);
sumReduction<<<1, TB_SIZE>>>(d_v_r, d_v_r);
// Copy to host;
cudaMemcpy(h_v_r.data(), d_v_r, bytes, cudaMemcpyDeviceToHost);
// Print the result
assert(h_v_r[0] == std::accumulate(begin(h_v), end(h_v), 0));
cout << "COMPLETED SUCCESSFULLY\n";
return 0;
}
|
9564f7614fafeb1187116f50cf5ac89f1aa794b9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ConvSpiking.h"
#include "../common/cuBase.h"
#include "../common/Config.h"
#include "../common/util.h"
//#define DEBUG
#define ROW 4
#define COL 2
#define IN_CH 0
#define OUT_CH 4
/*
* blocks : dim3(batch, div, endTime);
* threads : dim3(min(outputDim * outputDim, 1024), remain));
*/
__global__ void g_ConvSpiking_fast_input_response(
bool* inputs,
float** ws,
float** bs,
float* inputs_resp,
int inputDim,
int kernelSize,
int padding,
int outputDim,
int endTime,
int inputAmount,
int outputAmount,
int inputArea,
int responseArea);
/*
* blocks : dim3(batch, outputAmount),
* threads: dim3(min(outputDim * outputDim, 1024));
*/
__global__ void g_ConvSpiking_feedforward(
float* inputs_resp,
bool* outputs,
int* fireCount,
int outputDim,
int endTime,
int outputAmount,
int outputArea,
float vth,
int T_REFRAC,
float TAU_M,
float TAU_S);
/*
* dim3 block = dim3(batch, inputAmount);
* dim3 thread= dim3(min(inputDim * inputDim, 1024), 1);
*/
__global__ void g_ConvSpiking_backpropagation(
int* _inputs_time,
int* _outputs_time,
int* batchPreFireCount,
int* batchFireCount,
float* _curDelta,
float** ws,
float* _preDelta,
float* batchSideEffect,
int curDim,
int preDim,
int endTime,
int curAmount,
int kernelSize,
int padding,
int outputArea,
int inputArea,
int T_REFRAC,
float TAU_M,
float TAU_S);
/*
* dim3 block = dim3(batch, outputDim2, outputAmount);
* dim3 thread= min(kernelSize2*inputAmount, 512);
*/
__global__ void g_ConvSpiking_sideEffect(
int* _inputs_time,
int* _outputs_time,
int* batchPreFireCount,
int* batchFireCount,
float** ws,
float vth,
float* batchSideEffect,
float* effectPoly,
int out_size,
int degree,
int inputDim,
int outputDim,
int endTime,
int kernelSize,
int padding,
int inputAmount,
int inputArea,
int outputArea,
int T_REFRAC,
float TAU_M,
float TAU_S);
/*
* dim3 block = dim3(batch, outputAmount*inputAmount, kernelSize * kernelSize);
* dim3 thread= min(outputDim * outputDim, 512);
*/
__global__ void g_ConvSpiking_wgrad(
int* _inputs_time,
int* _outputs_time,
int* batchPreFireCount,
int* batchFireCount,
float* batchSideEffect,
float* _curDelta,
float** wgradTmp,
int inputDim,
int curDeltaDim,
int endTime,
int kernelSize,
int padding,
int inputAmount,
int inputArea,
int outputArea,
int curDeltaArea,
int wgradTmpArea,
int T_REFRAC,
float TAU_M,
float TAU_S);
/*
*blocks : dim3(kernelAmount2)
*threads : dim3(256)
*shared : sizeof(float) * 256
*/
__global__ void g_ConvSpiking_Bgrad(float* delta,
float** bgrad,
int deltaSize,
int kernelAmount2,
int batch,
int deltaArea);
void ConvSpiking::calCost()
{
cost->gpuClear();
hipLaunchKernelGGL(( g_getCost_3), dim3(dim3(w.size())), dim3(dim3(32)), sizeof(float) * 32, 0, cost->getDev(),
w.m_devPoint,
lambda,
w[0]->getLen());
hipStreamSynchronize(0);
getLastCudaError("ConvSpiking:getCost");
}
void ConvSpiking::feedforward()
{
if((inputs == NULL))
{
printf("ConvSpiking init error\n");
assert(0);
}
int outputDim2 = outputDim * outputDim;
int remain = min(1024 / outputDim2, outputAmount); //1
int div = (outputAmount + remain - 1) / remain;//32
// fast input response: compute the convolved spikes for each time step
hipLaunchKernelGGL(( g_ConvSpiking_fast_input_response), dim3(dim3(batch, div, endTime)), dim3(dim3(min(outputDim2, 1024), remain)), 0, 0,
inputs->getDev(),
w.m_devPoint,
b.m_devPoint,
inputs_resp->getDev(),
inputDim,
kernelSize,
padding,
outputDim,
endTime,
inputAmount,
outputAmount,
inputs->getArea(),
inputs_resp->getArea());
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("ConvSpiking::g_ConvSpiking_fast_input_response");
dim3 thread= dim3(min(outputDim2, 1024), remain);
dim3 block = dim3(batch, div);
hipLaunchKernelGGL(( g_ConvSpiking_feedforward), dim3(block), dim3(thread), 0, 0,
inputs_resp->getDev(),
outputs->getDev(),
fireCount->getDev(),
outputDim,
endTime,
outputAmount,
outputs->getArea(),
threshold,
T_REFRAC,
TAU_M,
TAU_S);
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("ConvSpiking::g_ConvSpiking_feedforward");
block = dim3(batch, outputAmount);
thread = dim3(min(outputDim2, 1024));
// transform the binary response matrix to the spike times
hipLaunchKernelGGL(( g_response_2_spiketime), dim3(block), dim3(thread), 0, 0,
outputs->getDev(),
outputs_time->getDev(),
outputs->getArea(),
outputDim2,
endTime);
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("ConvSpiking:g_response_2_spiketime");
}
void ConvSpiking::backpropagation()
{
hipLaunchKernelGGL(( g_divide_by_threshold), dim3(dim3(batch, outputAmount)), dim3(dim3(min(1024, outputDim*outputDim))), 0, 0, curDelta->getDev(), curDelta->getArea(), curDelta->cols, threshold);
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("g_divide_by_threshold");
if(Config::instance()->getLayerByName(m_name)->m_input == std::string("data"))
return;
int outputDim2 = outputDim * outputDim;
int kernelSize2 = kernelSize * kernelSize;
int threadx = min(512, kernelSize2 * inputAmount);
dim3 block = dim3(batch, outputDim2, outputAmount);
dim3 thread = dim3(threadx);
hipLaunchKernelGGL(( g_ConvSpiking_sideEffect), dim3(block), dim3(thread), sizeof(float)*threadx, 0,
inputs_time->getDev(),
outputs_time->getDev(),
preFireCount->getDev(),
fireCount->getDev(),
w.m_devPoint,
threshold,
sideEffect->getDev(),
effectPoly->getDev(),
100,
5,
inputDim,
outputDim,
endTime,
kernelSize,
padding,
inputAmount,
inputs->getArea(),
outputs->getArea(),
T_REFRAC,
TAU_M,
TAU_S);
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("g_ConvSpiking_sideEffect");
block = dim3(batch, inputAmount, inputDim * inputDim);
threadx = min(kernelSize * kernelSize * outputAmount, 1024);
thread = dim3(threadx);
hipLaunchKernelGGL(( g_ConvSpiking_backpropagation), dim3(block), dim3(thread), sizeof(float)*threadx, 0,
inputs_time->getDev(),
outputs_time->getDev(),
preFireCount->getDev(),
fireCount->getDev(),
curDelta->getDev(),
w.m_devPoint,
preDelta->getDev(),
sideEffect->getDev(),
outputDim,
inputDim,
endTime,
outputAmount,
kernelSize,
padding,
outputs->getArea(),
inputs->getArea(),
T_REFRAC,
TAU_M,
TAU_S);
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("ConvSpiking::g_ConvSpiking_backpropagation");
}
/*
* block = dim3(outputAmount, kernelSize * kernelSize * inputAmount);
* thread= dim3(batch);
*/
__global__ void g_ConvSpiking_wgradAdd(
float** _WgradTmp,
float** Wgrad,
float** w,
float* w_sq_sum,
int kernelSize,
int batch,
float lambda,
float beta,
float wlimit,
int wgradTmpArea,
int wArea)
{
extern __shared__ float _sum[];
int ok = blockIdx.x;
int kernelSize2 = kernelSize * kernelSize;
int wgradArea = wArea;
int kid= blockIdx.y % kernelSize2;
int c = blockIdx.y / kernelSize2;
int tid = threadIdx.x;
float* wgradTmp = _WgradTmp[ok];
int skip = c * wgradTmpArea + kid;
_sum[tid] = 0;
__syncthreads();
for(int i = 0; i < batch; i += blockDim.x)
{
int b = i + threadIdx.x;
if(b < batch)
{
_sum[threadIdx.x] += wgradTmp[b * kernelSize2 + skip];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(tid < skip && (tid + skip < len))
{
_sum[tid] += _sum[tid + skip];
}
len = skip;
}
if(tid == 0)
{
float sq_sum = w_sq_sum[ok];
Wgrad[ok][kid + c * wgradArea] = _sum[0] / batch + lambda*beta*(w[ok][kid + c * wArea]/wlimit)*__expf(beta*(sq_sum - 1));
#ifdef DEBUG
// i j ik ok
if(kid == ROW*kernelSize + COL && c == IN_CH && ok == OUT_CH)
printf("Wgrad: %f\n", Wgrad[ok][kid + c * wgradArea]);
#endif
}
}
/*
* block = dim3(outputAmount);
* thread= dim3(inputAmount);
*/
__global__ void g_ConvSpiking_calSquareSum(
float** ws,
float* w_sq_sum,
int weightArea,
int inputAmount,
int kernelSize,
float weight_limit)
{
extern __shared__ float _sum[];
int kernelSize2 = kernelSize * kernelSize;
int ok = blockIdx.x;
int ik = threadIdx.x;
int tid = threadIdx.x;
float* w = ws[ok] + ik * weightArea;
_sum[tid] = 0;
__syncthreads();
for(int i = 0; i < kernelSize; ++i){
for(int j = 0; j < kernelSize; ++j){
float weight = w[i * kernelSize + j];
_sum[tid] += (weight/weight_limit) * (weight/weight_limit);
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(tid < skip && (tid + skip) < len)
{
_sum[tid] += _sum[tid + skip];
}
len = skip;
}
if(tid == 0)
w_sq_sum[ok] = _sum[0] / (inputAmount * kernelSize2);
}
void ConvSpiking::getGrad()
{
int outputDim2 = outputDim * outputDim;
int kernelSize2 = kernelSize * kernelSize;
dim3 block = dim3(batch, outputAmount * inputAmount, kernelSize2);
int n_threads = min(outputDim2, 512);
dim3 thread = n_threads;
hipFuncSetCacheConfig(g_ConvSpiking_wgrad,hipFuncCachePreferL1);
hipLaunchKernelGGL(( g_ConvSpiking_wgrad), dim3(block), dim3(thread), sizeof(float)*n_threads, 0,
inputs_time->getDev(),
outputs_time->getDev(),
preFireCount->getDev(),
fireCount->getDev(),
sideEffect->getDev(),
curDelta->getDev(),
wgradTmp.m_devPoint,
inputDim,
outputDim,
endTime,
kernelSize,
padding,
inputAmount,
inputs->getArea(),
outputs->getArea(),
curDelta->getArea(),
wgradTmp[0]->getArea(),
T_REFRAC,
TAU_M,
TAU_S);
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("g_ConvSpiking_wgrad");
block = dim3(outputAmount);
thread = dim3(inputAmount);
hipLaunchKernelGGL(( g_ConvSpiking_calSquareSum), dim3(block), dim3(thread), sizeof(float) * inputAmount, 0,
w.m_devPoint,
weightSqSum->getDev(),
w[0]->getArea(),
inputAmount,
kernelSize,
weightLimit);
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("g_ConvSpiking_calSquareSum");
block = dim3(outputAmount, kernelSize * kernelSize * inputAmount);
thread = dim3(batch);
hipLaunchKernelGGL(( g_ConvSpiking_wgradAdd), dim3(block), dim3(thread), sizeof(float) * batch, 0,
wgradTmp.m_devPoint,
wgrad.m_devPoint,
w.m_devPoint,
weightSqSum->getDev(),
kernelSize,
batch,
lambda,
beta,
weightLimit,
wgradTmp[0]->getArea(),
w[0]->getArea());
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("g_ConvSpiking_wgradAdd");
}
void ConvSpiking::updateWeight(int epoch)
{
dim3 block = outputAmount;
dim3 thread = min(512, w[0]->getLen());
if(Config::instance()->getOptimizerType() == std::string("adam")){
hipLaunchKernelGGL(( g_adam_vecAdd), dim3(block), dim3(thread), 0, Layers::instance()->get_stream(),
g1_w.m_devPoint,
g2_w.m_devPoint,
b1_t->getDev(),
b2_t->getDev(),
wgrad.m_devPoint,
w.m_devPoint,
w[0]->getLen(),
lRate/sqrt((float)epoch+1));
//Config::instance()->getLrate());
}
else{
hipLaunchKernelGGL(( g_sgd_vecAdd), dim3(block), dim3(thread), 0, Layers::instance()->get_stream(),
momentum_w.m_devPoint,
wgrad.m_devPoint,
w.m_devPoint,
w[0]->getLen(),
Config::instance()->getMomentum(),
lRate/sqrt((float)epoch+1));
//Config::instance()->getLrate());
}
}
ConvSpiking::ConvSpiking(std::string name)
{
m_name = name;
ConfigConvSpiking* config = (ConfigConvSpiking*)Config::instance()->getLayerByName(m_name);
SpikingLayerBase * preLayer = (SpikingLayerBase*)Layers::instance()->get(config->m_input);
inputs = preLayer->getSpikingOutputs();
inputs_time = preLayer->getSpikingTimeOutputs();
preDelta = preLayer->getCurDelta();
preFireCount = preLayer->getFireCount();
inputAmount = preLayer->outputAmount;
outputAmount = config->m_amount;
kernelSize = config->m_kernelSize;
padding = config->m_padding;
inputDim = preLayer->outputDim;
outputDim = (inputDim + 1 - kernelSize) + padding * 2;
batch = Config::instance()->getBatchSize();
endTime = Config::instance()->getEndTime();
batch = Config::instance()->getBatchSize();
lambda = Config::instance()->getLambda();
beta = Config::instance()->getBeta();
T_REFRAC = config->m_t_ref;
TAU_M = config->m_tau_m;
TAU_S = config->m_tau_s;
threshold = config->m_vth;
lRate = config->m_lrate;
weightLimit = Config::instance()->getWeightLimit();
outputs = new cuMatrix<bool>(batch, endTime * outputDim * outputDim, outputAmount);
outputs_time = new cuMatrix<int>(batch, outputDim * outputDim * endTime, outputAmount);
inputs_resp = new cuMatrix<float>(batch, endTime * outputDim * outputDim, outputAmount);
curDelta = new cuMatrix<float>(batch, outputDim * outputDim, outputAmount);
fireCount= new cuMatrix<int>(batch, outputDim * outputDim, outputAmount);
weightSqSum = new cuMatrix<float>(outputAmount, 1, 1);
sideEffect = new cuMatrix<float>(batch, outputDim * outputDim, outputAmount);
for(int i = 0; i < outputAmount; i++){
w.push_back(new cuMatrix<float>(kernelSize, kernelSize, inputAmount));
b.push_back(new cuMatrix<float>(1, 1, 1));
wgrad.push_back(new cuMatrix<float>(kernelSize, kernelSize, inputAmount));
bgrad.push_back(new cuMatrix<float>(1, 1, 1));
wgradTmp.push_back(new cuMatrix<float>(batch, kernelSize * kernelSize, inputAmount));
}
w.toGpu();
b.toGpu();
wgrad.toGpu();
bgrad.toGpu();
wgradTmp.toGpu();
for(int i = 0; i < outputAmount; i++){
momentum_w.push_back(new cuMatrix<float>(kernelSize, kernelSize, inputAmount));
momentum_b.push_back(new cuMatrix<float>(1, 1, 1));
g1_w.push_back(new cuMatrix<float>(kernelSize, kernelSize, inputAmount)); // for adam
g1_b.push_back(new cuMatrix<float>(1, 1, 1));
g2_w.push_back(new cuMatrix<float>(kernelSize, kernelSize, inputAmount));
g2_b.push_back(new cuMatrix<float>(1, 1, 1));
}
effectPoly = new cuMatrix<float>(100, 5, 1);
std::string filename=std::string("./Effect_Ratio_file/p_Tau_")+std::to_string(int(TAU_M))+std::string("_")+std::to_string(endTime)+std::string("-100.txt");
loadPoly(filename, 100, 5, effectPoly);
momentum_w.toGpu();
momentum_b.toGpu();
g1_w.toGpu();
g1_b.toGpu();
g2_w.toGpu();
g2_b.toGpu();
b1_t = new cuMatrix<float>(outputAmount, 1, 1);
b2_t = new cuMatrix<float>(outputAmount, 1, 1);
for(int i = 0; i < outputAmount; i++){
b1_t->getHost()[i] = 0.9;
b2_t->getHost()[i] = 0.999;
}
b1_t->toGpu();
b2_t->toGpu();
this->initRandom();
output_train_ref = NULL;
output_test_ref = NULL;
if(Config::instance()->getIsGradientChecking())
this->loadRef(); // for verification purpose
Layers::instance()->set(m_name, this);
}
void ConvSpiking::save(FILE* file)
{
for(int a = 0; a < (int)w.size(); a++){
w[a]->toCpu();
for(int c = 0; c < w[a]->channels; c++){
for(int i = 0; i < w[a]->rows; i++){
for(int j = 0; j < w[a]->cols; j++){
fprintf(file, "%f ", w[a]->get(i, j, c));
}
}
}
}
for(int a = 0; a < (int)w.size(); a++){
b[a]->toCpu();
for(int c = 0; c < b[a]->channels; c++){
for(int i = 0; i < b[a]->rows; i++){
for(int j = 0; j < b[a]->cols; j++){
fprintf(file, "%f ", b[a]->get(i, j, c));
}
}
}
}
}
void ConvSpiking::clearMomentum()
{
for(int i = 0; i < (int)momentum_b.size(); i++){
momentum_b[i]->gpuClear();
}
for(int i = 0; i < (int)momentum_w.size(); i++){
momentum_w[i]->gpuClear();
}
}
//* load the reference weights and output spikes for verification
void ConvSpiking::loadRef()
{
if(batch != 1){
printf("Only do the verification for one batch and one sample!\n");
exit(0);
}
ConfigConvSpiking * config = (ConfigConvSpiking*)Config::instance()->getLayerByName(m_name);
if(config->m_ref_weight_path != std::string("NULL")){
for(int i = 0; i < outputAmount; ++i)
w_ref.push_back(new cuMatrix<float>(kernelSize, kernelSize, inputAmount));
initFromDumpfile(config->m_ref_weight_path, w_ref);
}
if(config->m_ref_output_train_path != std::string("NULL")){
output_train_ref = new cuMatrix<bool>(1, endTime * outputDim * outputDim, outputAmount);
readSpikesFromDumpfile(config->m_ref_output_train_path, output_train_ref);
}
if(config->m_ref_output_test_path != std::string("NULL")){
output_test_ref = new cuMatrix<bool>(1, endTime * outputDim * outputDim, outputAmount);
readSpikesFromDumpfile(config->m_ref_output_test_path, output_test_ref);
}
}
void ConvSpiking::initRandom()
{
ConfigConvSpiking * config = (ConfigConvSpiking*)Config::instance()->getLayerByName(m_name);
float initW = config->m_initW;
if(config->isGaussian()){
for(int i = 0; i < (int)w.size(); i++){
float epsilon = initW;
for(int c = 0; c < w[i]->channels; c++)
{
createGaussian(w[i]->getHost() + c * w[i]->getArea(),
kernelSize, kernelSize, w[i]->channels, epsilon);
}
w[i]->toGpu();
}
}
else if(config->isExternal()){
initFromDumpfile(config->m_weightPath, w);
}
else{
for(int i = 0; i < (int)w.size(); i++){
for(int j = 0; j < w[i]->getLen(); j++){
w[i]->getHost()[j] = initW * (2.0f * rand() / RAND_MAX - 1.0f);
}
w[i]->toGpu();
}
}
}
void ConvSpiking::initFromCheckpoint(FILE* file)
{
float val = 0;
for(size_t a = 0; a < w.size(); a++){
for(int c = 0; c < w[a]->channels; c++){
for(int i = 0; i < w[a]->rows; i++){
for(int j = 0; j < w[a]->cols; j++){
if(fscanf(file, "%f", &val) == EOF)
{
LOG("scanf fail", "result/log.txt");
}
w[a]->set(i, j, c, val);
}
}
}
w[a]->toGpu();
}
for(size_t a = 0; a < b.size(); a++){
for(int c = 0; c < b[a]->channels; c++){
for(int i = 0; i < b[a]->rows; i++){
for(int j = 0; j < b[a]->cols; j++){
if(fscanf(file, "%f", &val) == EOF)
{
LOG("scanf fail", "result/log.txt");
}
b[a]->set(i, j, c, val);
}
}
}
b[a]->toGpu();
}
}
//* initialize the wight from the dump file by Matlab sim
void ConvSpiking::initFromDumpfile(const std::string& filename, cuMatrixVector<float>& cuW)
{
assert(filename != std::string("NULL"));
FILE *file = fopen(filename.c_str(), "r");
char logStr[256];
if(file == NULL){
sprintf(logStr, "Cannot open file: %s", filename.c_str()); LOG(logStr, "Result/log.txt");
assert(0);
}
float val = 0;
for(size_t a = 0; a < cuW.size(); a++){
for(int c = 0; c < cuW[a]->channels; c++){
for(int i = 0; i < cuW[a]->rows; i++){
for(int j = 0; j < cuW[a]->cols; j++){
if(fscanf(file, "%f", &val) == EOF)
{
sprintf(logStr, "Reading weight failed for %s @row: %d\t@col: %d\t@channel: %d\t@outputAmount: %d", filename.c_str(), i, j, c, int(a));
LOG(logStr, "Result/log.txt");
assert(0);
}
cuW[a]->set(i, j, c, val);
}
}
}
cuW[a]->toGpu();
}
}
void ConvSpiking::verify(const std::string& phrase)
{
printf("Verify for the layer: %s at %s phrase.\n", m_name.c_str(), phrase.c_str());
if(phrase == std::string("train"))
{
if(output_train_ref != NULL){
outputs->toCpu();
checkMatrixIsSame(output_train_ref, outputs, outputDim*outputDim);
}
}
else if(phrase == std::string("test"))
{
if(!w_ref.empty()){
for(int i = 0; i < outputAmount; ++i){
w[i]->toCpu();
checkMatrixIsSame(w_ref[i], w[i], i);
}
}
if(output_test_ref != NULL){
outputs->toCpu();
checkMatrixIsSame(output_test_ref, outputs, outputDim*outputDim);
}
}
printf("Verification for the layer: %s at %s phrase. Pased!!\n", m_name.c_str(), phrase.c_str());
}
__device__ float d_ConvSpiking_accumulate_spikes(
int inputDim,
int inputArea,
int inputAmount,
int kernelSize,
bool* inputs,
int x,
int y,
int padding,
int ok,
int batchId,
float ** ws,
int t,
int endTime)
{
int inputSize2 = inputDim * inputDim;
int kernelSize2 = kernelSize * kernelSize;
float response = 0.0f;
// for all inputAmount (channels)
for(int c = 0; c < inputAmount; c++){
bool* curInput = inputs + c * inputArea + batchId * inputSize2 * endTime;
float* w = ws[ok] + c * kernelSize2;
for(int i = 0; i < kernelSize; i++){
int xx = x + i - padding;
for(int j = 0; j < kernelSize; j++){
int yy = y + j - padding;
if(xx >= 0 && xx < inputDim && yy >= 0 && yy < inputDim){
int i_idx = xx * inputDim + yy;
response += curInput[i_idx + t * inputSize2] * w[i * kernelSize + j];
}
}
}
}
return response;
}
/*
* dim3 block = dim3(batch, div, endTime);
* dim3 thread= dim3(min(outputDim * outputDim, 1024), remain));
*/
__global__ void g_ConvSpiking_fast_input_response(
bool* inputs,
float** ws,
float** bs,
float* inputs_resp,
int inputDim,
int kernelSize,
int padding,
int outputDim,
int endTime,
int inputAmount,
int outputAmount,
int inputArea,
int responseArea)
{
int batchId = blockIdx.x;
int t = blockIdx.z;
int ok = blockIdx.y * blockDim.y + threadIdx.y;
if(ok >= outputAmount)return;
int outputSize2 = outputDim * outputDim;
float* curResp = inputs_resp + ok * responseArea + batchId * outputSize2 * endTime;
for(int tidx = 0; tidx < outputSize2; tidx += blockDim.x)
{
int o_idx = tidx + threadIdx.x;
if(o_idx < outputSize2)
{
int x = o_idx / outputDim;
int y = o_idx % outputDim;
curResp[o_idx + t * outputSize2] = d_ConvSpiking_accumulate_spikes(inputDim, inputArea, inputAmount, kernelSize, inputs, x, y, padding, ok, batchId, ws, t, endTime);
}
}
}
/*
* dim3 block = dim3(batch, div);
* dim3 thread= dim3(min(outputDim * outputDim, 1024), remain));
*/
__global__ void g_ConvSpiking_feedforward(
float* inputs_resp,
bool* outputs,
int* fireCount,
int outputDim,
int endTime,
int outputAmount,
int outputArea,
float vth,
int T_REFRAC,
float TAU_M,
float TAU_S)
{
int batchId = blockIdx.x;
int ok = blockIdx.y * blockDim.y + threadIdx.y;
if(ok >= outputAmount)return;
int outputSize2 = outputDim * outputDim;
bool* curOutput = outputs + ok * outputArea + batchId * outputSize2 * endTime;
int* curFireCount = fireCount + ok * outputArea / endTime + batchId * outputSize2;
float* curResponse = inputs_resp + ok * outputArea + batchId * outputSize2 * endTime;
for(int tidx = 0; tidx < outputSize2; tidx += blockDim.x)
{
int o_idx = tidx + threadIdx.x;
if(o_idx < outputSize2)
{
float v = 0.0f;
float ep = 0.0f;
float threshold = vth;
int t_ref= 0;
float response = 0.0f;
int fire_count = 0;
for(int t = 0; t < endTime; t++){
v -= v / TAU_M;
ep -= ep / TAU_S;
if(t == 0)
{
curOutput[o_idx + t * outputSize2] = false;
continue;
}
// get the convoluted the spikes
response = curResponse[o_idx + (t - 1)*outputSize2];
ep += response;
v += ep/TAU_S;
if(t_ref > 0){
v = 0;
t_ref--;
}
// Fire or not
curOutput[o_idx + t * outputSize2] = v > threshold ? true : false;
t_ref = v > threshold ? T_REFRAC : t_ref;
fire_count += v > threshold ? 1 : 0;
v = v > threshold ? 0 : v;
}
curFireCount[o_idx] = fire_count;
}
}
}
/*
* dim3 block = dim3(batch, inputAmount, inputDim * inputDim);
* dim3 thread= dim3(min(kernelSize * kernelSize * outputAmount, 1024));
*/
__global__ void g_ConvSpiking_backpropagation(
int* _inputs_time,
int* _outputs_time,
int* batchPreFireCount,
int* batchFireCount,
float* _curDelta,
float** ws,
float* _preDelta,
float* batchSideEffect,
int curDim,
int preDim,
int endTime,
int curAmount,
int kernelSize,
int padding,
int outputArea,
int inputArea,
int T_REFRAC,
float TAU_M,
float TAU_S)
{
extern __shared__ float _sum[];
int tid = threadIdx.x;
_sum[tid] = 0;
__syncthreads();
int batchId = blockIdx.x;
int ik = blockIdx.y;
int i_idx = blockIdx.z;
int curSize2 = curDim * curDim;
int preSize2 = preDim * preDim;
int kernelSize2 = kernelSize * kernelSize;
int curArea = outputArea / endTime;
int preArea = inputArea / endTime;
int* input_time = _inputs_time + inputArea * ik + batchId * preSize2 * endTime;
int* input_fireCount = batchPreFireCount + ik * inputArea / endTime + batchId * preSize2;
float *preDelta = _preDelta + ik * preArea + batchId * preSize2;
int i = i_idx / preDim;
int j = i_idx % preDim;
int totalBackAmount = curAmount * kernelSize2;
for (int tidx = 0; tidx < totalBackAmount; tidx += blockDim.x) {
int idx = tidx + threadIdx.x;
if (idx < totalBackAmount) {
int ok = idx / kernelSize2;
float *curDelta = _curDelta + ok * curArea + batchId * curSize2;
float *w = ws[ok] + ik * kernelSize2;
int* output_time = _outputs_time + outputArea * ok + batchId * curSize2 * endTime;
int* output_fireCount = batchFireCount + ok * outputArea / endTime + batchId * curSize2;
float* side_effect = batchSideEffect + ok * curSize2 + batchId * curSize2;
int x = (idx % kernelSize2) / kernelSize;
int y = (idx % kernelSize2) % kernelSize;
int cx = i - x + padding;
int cy = j - y + padding;
if(cx >= 0 && cx < curDim && cy >= 0 && cy < curDim) {
int o_idx = cx * curDim + cy;
float e = d_Spiking_accumulate_effect(output_time, input_time, output_fireCount[o_idx], input_fireCount[i_idx], o_idx, i_idx, curSize2, preSize2, endTime, T_REFRAC, TAU_M, TAU_S);
int o_cnt = output_fireCount[o_idx];
int i_cnt = input_fireCount[i_idx];
float ratio = i_cnt == 0 || o_cnt == 0 ? 0.5 : e / float(i_cnt);
float s_effect = side_effect[o_idx];
_sum[threadIdx.x] += curDelta[cx * curDim + cy] * w[x * kernelSize + y] * ratio / (1-s_effect);
}
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(tid < skip && (tid + skip < len))
{
_sum[tid] += _sum[tid + skip];
}
len = skip;
}
if(tid == 0)
preDelta[i_idx] = _sum[0];
}
/*
* dim3 block = dim3(batch, outputDim2, outputAmount);
* dim3 thread= min(kernelSize2*inputAmount, 512);
*/
__global__ void g_ConvSpiking_sideEffect(
int* _inputs_time,
int* _outputs_time,
int* batchPreFireCount,
int* batchFireCount,
float** ws,
float vth,
float* batchSideEffect,
float* effectPoly,
int out_size,
int degree,
int inputDim,
int outputDim,
int endTime,
int kernelSize,
int padding,
int inputAmount,
int inputArea,
int outputArea,
int T_REFRAC,
float TAU_M,
float TAU_S)
{
extern __shared__ float _sum[];
int tid = threadIdx.x;
_sum[tid] = 0;
__syncthreads();
int batchId = blockIdx.x;
int o_idx = blockIdx.y;
int ok = blockIdx.z;
int inputSize2 = inputDim * inputDim;
int outputSize2 = outputDim * outputDim;
int kernelSize2 = kernelSize * kernelSize;
int* output_time = _outputs_time + outputArea * ok + batchId * outputSize2 * endTime;
int* output_fireCount = batchFireCount + ok * outputArea / endTime + batchId * outputSize2;
float* side_effect = batchSideEffect + ok * outputSize2 + batchId * outputSize2;
int o_cnt = output_fireCount[o_idx];
int len = kernelSize2*inputAmount;
for(int tidx = 0; tidx < len; tidx += blockDim.x)
{
int idx = tidx + threadIdx.x;
if(idx < len){
int ik = idx / kernelSize2;
float* w = ws[ok] + ik * kernelSize2;
int* input_time = _inputs_time + inputArea * ik + batchId * inputSize2 * endTime;
int* input_fireCount= batchPreFireCount + ik * inputArea / endTime + batchId * inputSize2;
int k_id = idx % kernelSize2;
int i = k_id / kernelSize;
int j = k_id % kernelSize;
int x = o_idx / outputDim;
int y = o_idx % outputDim;
int xx = x + i - padding;
int yy = y + j - padding;
if(xx >= 0 && xx < inputDim && yy >= 0 && yy < inputDim){
int i_idx = xx * inputDim + yy;
float e = d_Spiking_accumulate_effect(output_time, input_time, o_cnt, input_fireCount[i_idx], o_idx, i_idx, outputSize2, inputSize2, endTime, T_REFRAC, TAU_M, TAU_S);
float ratio;
if(o_cnt == 0)
ratio=0;
else{
int o_cnt_tmp = o_cnt;
o_cnt_tmp = o_cnt_tmp > 0 ? o_cnt_tmp : 1;
o_cnt_tmp = o_cnt_tmp <= out_size ? o_cnt_tmp : out_size;
int i_cnt = input_fireCount[i_idx];
i_cnt = i_cnt <= out_size ? i_cnt : out_size;
i_cnt = i_cnt > 0 ? i_cnt : 1;
ratio=0;
for(int i=0; i< degree; i++){
float base = (float)o_cnt_tmp;
float exponent = (float)(degree-2-i);
float coef=(float)(degree-i-1);
float v=coef*powf(base, exponent)*effectPoly[(i_cnt-1)*degree+i];
ratio+=v;
}
}
if(w[i*kernelSize+j]==0){
printf("side effect: weight is zero\n");
}
_sum[tid] += w[i * kernelSize + j] * ratio;
}
}
}
len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(tid < skip && (tid + skip) < len)
{
_sum[tid] += _sum[tid + skip];
}
len = skip;
}
if(tid == 0){
int s= _sum[0] / vth;
side_effect[o_idx] += s;
}
}
/*
* dim3 block = dim3(batch, outputAmount*inputAmount, kernelSize * kernelSize);
* dim3 thread= min(outputDim * outputDim, 512);
*/
__global__ void g_ConvSpiking_wgrad(
int* _inputs_time,
int* _outputs_time,
int* batchPreFireCount,
int* batchFireCount,
float* batchSideEffect,
float* _curDelta,
float** wgradTmp,
int inputDim,
int curDeltaDim,
int endTime,
int kernelSize,
int padding,
int inputAmount,
int inputArea,
int outputArea,
int curDeltaArea,
int wgradTmpArea,
int T_REFRAC,
float TAU_M,
float TAU_S)
{
extern __shared__ float _sum[];
int tid = threadIdx.x;
_sum[tid] = 0;
__syncthreads();
int ok = blockIdx.y / inputAmount;
int ik = blockIdx.y % inputAmount;
int k_id = blockIdx.z;
int batchId = blockIdx.x;
int inputSize2 = inputDim * inputDim;
int curDeltaSize2 = curDeltaDim * curDeltaDim;
int outputSize2 = curDeltaSize2;
int kernelSize2 = kernelSize * kernelSize;
float* wgrad = wgradTmp[ok] + ik * wgradTmpArea + batchId * kernelSize2;
int* input_time = _inputs_time + inputArea * ik + batchId * inputSize2 * endTime;
int* output_time = _outputs_time + outputArea * ok + batchId * outputSize2 * endTime;
int* input_fireCount = batchPreFireCount + ik * inputArea / endTime + batchId * inputSize2;
int* output_fireCount = batchFireCount + ok * outputArea / endTime + batchId * outputSize2;
float* side_effect = batchSideEffect + ok * outputSize2 + batchId * outputSize2;
float* curDelta = _curDelta + ok * curDeltaArea + batchId * curDeltaSize2;
for(int tidx = 0; tidx < outputSize2; tidx += blockDim.x)
{
int o_idx = tidx + threadIdx.x;
if(o_idx < outputSize2)
{
int i = k_id / kernelSize;
int j = k_id % kernelSize;
int x = o_idx / curDeltaDim;
int y = o_idx % curDeltaDim;
int cx = i + x - padding;
int cy = j + y - padding;
if(cx >= 0 && cy >= 0 && cx < inputDim && cy < inputDim){
int i_idx = cx * inputDim + cy;
float e = d_Spiking_accumulate_effect(output_time, input_time, output_fireCount[o_idx], input_fireCount[i_idx], o_idx, i_idx, outputSize2, inputSize2, endTime, T_REFRAC, TAU_M, TAU_S);
float s_effect = side_effect[o_idx];
float val= e * curDelta[x * curDeltaDim + y]/(1-s_effect) ;
_sum[tid] += val ;
#ifdef DEBUG
if(i == ROW && j == COL && ik == IN_CH && ok == OUT_CH)
printf("Collect x= %d; y = %d; Acc effect: %f\tdelta= %f\n", x,y,e,curDelta[x*curDeltaDim + y]);
#endif
}
}
}
int len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(tid < skip && (tid + skip) < len){
_sum[tid] += _sum[tid + skip];
}
len = skip;
}
if(tid == 0){
wgrad[k_id] = _sum[0];
}
}
/*
* blocks : dim3(kernelAmount2)
* threads : dim3(256)
* shared : sizeof(float) * 256
*/
__global__ void g_ConvSpiking_Bgrad(float* delta,
float** bgrad,
int deltaSize,
int kernelAmount2,
int batch,
int deltaArea)
{
extern __shared__ float _sum[];
int k2 = blockIdx.x;
_sum[threadIdx.x] = 0.0;
__syncthreads();
int deltaSize2 = deltaSize * deltaSize;
int tlen = deltaSize2 * batch;
int skip = deltaArea * k2;
for(int i = 0; i < tlen; i += blockDim.x)
{
int idx = i + threadIdx.x;
if(idx < tlen)
{
_sum[threadIdx.x] += delta[idx + skip];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < skip && (threadIdx.x + skip < len))
{
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
}
else{
return;
}
len = skip;
}
if(threadIdx.x == 0)
{
bgrad[k2][0] = _sum[0] / batch;
}
}
void ConvSpiking::loadPoly(std::string& filename, int out_size, int degree, cuMatrix<float>* poly){
ifstream f_in(filename.c_str());
if(!f_in.is_open()){
printf("Cannot open the file: %s\n", filename.c_str());
exit(EXIT_FAILURE);
}
float p;
std::string data;
for(int i=0;i<out_size;i++){
getline(f_in, data);
std::istringstream iss(data);
for(int j=0;j<degree;j++){
iss>>p;
//std::cout<<ER<<std::endl;
poly->getHost()[i*degree+j] = p;
}
}
f_in.close();
poly->toGpu();
}
| 9564f7614fafeb1187116f50cf5ac89f1aa794b9.cu | #include "ConvSpiking.h"
#include "../common/cuBase.h"
#include "../common/Config.h"
#include "../common/util.h"
//#define DEBUG
#define ROW 4
#define COL 2
#define IN_CH 0
#define OUT_CH 4
/*
* blocks : dim3(batch, div, endTime);
* threads : dim3(min(outputDim * outputDim, 1024), remain));
*/
__global__ void g_ConvSpiking_fast_input_response(
bool* inputs,
float** ws,
float** bs,
float* inputs_resp,
int inputDim,
int kernelSize,
int padding,
int outputDim,
int endTime,
int inputAmount,
int outputAmount,
int inputArea,
int responseArea);
/*
* blocks : dim3(batch, outputAmount),
* threads: dim3(min(outputDim * outputDim, 1024));
*/
__global__ void g_ConvSpiking_feedforward(
float* inputs_resp,
bool* outputs,
int* fireCount,
int outputDim,
int endTime,
int outputAmount,
int outputArea,
float vth,
int T_REFRAC,
float TAU_M,
float TAU_S);
/*
* dim3 block = dim3(batch, inputAmount);
* dim3 thread= dim3(min(inputDim * inputDim, 1024), 1);
*/
__global__ void g_ConvSpiking_backpropagation(
int* _inputs_time,
int* _outputs_time,
int* batchPreFireCount,
int* batchFireCount,
float* _curDelta,
float** ws,
float* _preDelta,
float* batchSideEffect,
int curDim,
int preDim,
int endTime,
int curAmount,
int kernelSize,
int padding,
int outputArea,
int inputArea,
int T_REFRAC,
float TAU_M,
float TAU_S);
/*
* dim3 block = dim3(batch, outputDim2, outputAmount);
* dim3 thread= min(kernelSize2*inputAmount, 512);
*/
__global__ void g_ConvSpiking_sideEffect(
int* _inputs_time,
int* _outputs_time,
int* batchPreFireCount,
int* batchFireCount,
float** ws,
float vth,
float* batchSideEffect,
float* effectPoly,
int out_size,
int degree,
int inputDim,
int outputDim,
int endTime,
int kernelSize,
int padding,
int inputAmount,
int inputArea,
int outputArea,
int T_REFRAC,
float TAU_M,
float TAU_S);
/*
* dim3 block = dim3(batch, outputAmount*inputAmount, kernelSize * kernelSize);
* dim3 thread= min(outputDim * outputDim, 512);
*/
__global__ void g_ConvSpiking_wgrad(
int* _inputs_time,
int* _outputs_time,
int* batchPreFireCount,
int* batchFireCount,
float* batchSideEffect,
float* _curDelta,
float** wgradTmp,
int inputDim,
int curDeltaDim,
int endTime,
int kernelSize,
int padding,
int inputAmount,
int inputArea,
int outputArea,
int curDeltaArea,
int wgradTmpArea,
int T_REFRAC,
float TAU_M,
float TAU_S);
/*
*blocks : dim3(kernelAmount2)
*threads : dim3(256)
*shared : sizeof(float) * 256
*/
__global__ void g_ConvSpiking_Bgrad(float* delta,
float** bgrad,
int deltaSize,
int kernelAmount2,
int batch,
int deltaArea);
void ConvSpiking::calCost()
{
cost->gpuClear();
g_getCost_3<<<dim3(w.size()), dim3(32), sizeof(float) * 32>>>(cost->getDev(),
w.m_devPoint,
lambda,
w[0]->getLen());
cudaStreamSynchronize(0);
getLastCudaError("ConvSpiking:getCost");
}
void ConvSpiking::feedforward()
{
if((inputs == NULL))
{
printf("ConvSpiking init error\n");
assert(0);
}
int outputDim2 = outputDim * outputDim;
int remain = min(1024 / outputDim2, outputAmount); //1
int div = (outputAmount + remain - 1) / remain;//32
// fast input response: compute the convolved spikes for each time step
g_ConvSpiking_fast_input_response<<<dim3(batch, div, endTime), dim3(min(outputDim2, 1024), remain)>>>(
inputs->getDev(),
w.m_devPoint,
b.m_devPoint,
inputs_resp->getDev(),
inputDim,
kernelSize,
padding,
outputDim,
endTime,
inputAmount,
outputAmount,
inputs->getArea(),
inputs_resp->getArea());
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("ConvSpiking::g_ConvSpiking_fast_input_response");
dim3 thread= dim3(min(outputDim2, 1024), remain);
dim3 block = dim3(batch, div);
g_ConvSpiking_feedforward<<<block, thread>>>(
inputs_resp->getDev(),
outputs->getDev(),
fireCount->getDev(),
outputDim,
endTime,
outputAmount,
outputs->getArea(),
threshold,
T_REFRAC,
TAU_M,
TAU_S);
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("ConvSpiking::g_ConvSpiking_feedforward");
block = dim3(batch, outputAmount);
thread = dim3(min(outputDim2, 1024));
// transform the binary response matrix to the spike times
g_response_2_spiketime<<<block, thread>>>(
outputs->getDev(),
outputs_time->getDev(),
outputs->getArea(),
outputDim2,
endTime);
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("ConvSpiking:g_response_2_spiketime");
}
void ConvSpiking::backpropagation()
{
g_divide_by_threshold<<<dim3(batch, outputAmount), dim3(min(1024, outputDim*outputDim))>>>(curDelta->getDev(), curDelta->getArea(), curDelta->cols, threshold);
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("g_divide_by_threshold");
if(Config::instance()->getLayerByName(m_name)->m_input == std::string("data"))
return;
int outputDim2 = outputDim * outputDim;
int kernelSize2 = kernelSize * kernelSize;
int threadx = min(512, kernelSize2 * inputAmount);
dim3 block = dim3(batch, outputDim2, outputAmount);
dim3 thread = dim3(threadx);
g_ConvSpiking_sideEffect<<<block, thread, sizeof(float)*threadx>>>(
inputs_time->getDev(),
outputs_time->getDev(),
preFireCount->getDev(),
fireCount->getDev(),
w.m_devPoint,
threshold,
sideEffect->getDev(),
effectPoly->getDev(),
100,
5,
inputDim,
outputDim,
endTime,
kernelSize,
padding,
inputAmount,
inputs->getArea(),
outputs->getArea(),
T_REFRAC,
TAU_M,
TAU_S);
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("g_ConvSpiking_sideEffect");
block = dim3(batch, inputAmount, inputDim * inputDim);
threadx = min(kernelSize * kernelSize * outputAmount, 1024);
thread = dim3(threadx);
g_ConvSpiking_backpropagation<<<block, thread, sizeof(float)*threadx>>>(
inputs_time->getDev(),
outputs_time->getDev(),
preFireCount->getDev(),
fireCount->getDev(),
curDelta->getDev(),
w.m_devPoint,
preDelta->getDev(),
sideEffect->getDev(),
outputDim,
inputDim,
endTime,
outputAmount,
kernelSize,
padding,
outputs->getArea(),
inputs->getArea(),
T_REFRAC,
TAU_M,
TAU_S);
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("ConvSpiking::g_ConvSpiking_backpropagation");
}
/*
* block = dim3(outputAmount, kernelSize * kernelSize * inputAmount);
* thread= dim3(batch);
*/
__global__ void g_ConvSpiking_wgradAdd(
float** _WgradTmp,
float** Wgrad,
float** w,
float* w_sq_sum,
int kernelSize,
int batch,
float lambda,
float beta,
float wlimit,
int wgradTmpArea,
int wArea)
{
extern __shared__ float _sum[];
int ok = blockIdx.x;
int kernelSize2 = kernelSize * kernelSize;
int wgradArea = wArea;
int kid= blockIdx.y % kernelSize2;
int c = blockIdx.y / kernelSize2;
int tid = threadIdx.x;
float* wgradTmp = _WgradTmp[ok];
int skip = c * wgradTmpArea + kid;
_sum[tid] = 0;
__syncthreads();
for(int i = 0; i < batch; i += blockDim.x)
{
int b = i + threadIdx.x;
if(b < batch)
{
_sum[threadIdx.x] += wgradTmp[b * kernelSize2 + skip];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(tid < skip && (tid + skip < len))
{
_sum[tid] += _sum[tid + skip];
}
len = skip;
}
if(tid == 0)
{
float sq_sum = w_sq_sum[ok];
Wgrad[ok][kid + c * wgradArea] = _sum[0] / batch + lambda*beta*(w[ok][kid + c * wArea]/wlimit)*__expf(beta*(sq_sum - 1));
#ifdef DEBUG
// i j ik ok
if(kid == ROW*kernelSize + COL && c == IN_CH && ok == OUT_CH)
printf("Wgrad: %f\n", Wgrad[ok][kid + c * wgradArea]);
#endif
}
}
/*
* block = dim3(outputAmount);
* thread= dim3(inputAmount);
*/
__global__ void g_ConvSpiking_calSquareSum(
float** ws,
float* w_sq_sum,
int weightArea,
int inputAmount,
int kernelSize,
float weight_limit)
{
extern __shared__ float _sum[];
int kernelSize2 = kernelSize * kernelSize;
int ok = blockIdx.x;
int ik = threadIdx.x;
int tid = threadIdx.x;
float* w = ws[ok] + ik * weightArea;
_sum[tid] = 0;
__syncthreads();
for(int i = 0; i < kernelSize; ++i){
for(int j = 0; j < kernelSize; ++j){
float weight = w[i * kernelSize + j];
_sum[tid] += (weight/weight_limit) * (weight/weight_limit);
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(tid < skip && (tid + skip) < len)
{
_sum[tid] += _sum[tid + skip];
}
len = skip;
}
if(tid == 0)
w_sq_sum[ok] = _sum[0] / (inputAmount * kernelSize2);
}
void ConvSpiking::getGrad()
{
int outputDim2 = outputDim * outputDim;
int kernelSize2 = kernelSize * kernelSize;
dim3 block = dim3(batch, outputAmount * inputAmount, kernelSize2);
int n_threads = min(outputDim2, 512);
dim3 thread = n_threads;
cudaFuncSetCacheConfig(g_ConvSpiking_wgrad,cudaFuncCachePreferL1);
g_ConvSpiking_wgrad<<<block, thread, sizeof(float)*n_threads>>>(
inputs_time->getDev(),
outputs_time->getDev(),
preFireCount->getDev(),
fireCount->getDev(),
sideEffect->getDev(),
curDelta->getDev(),
wgradTmp.m_devPoint,
inputDim,
outputDim,
endTime,
kernelSize,
padding,
inputAmount,
inputs->getArea(),
outputs->getArea(),
curDelta->getArea(),
wgradTmp[0]->getArea(),
T_REFRAC,
TAU_M,
TAU_S);
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("g_ConvSpiking_wgrad");
block = dim3(outputAmount);
thread = dim3(inputAmount);
g_ConvSpiking_calSquareSum<<<block, thread, sizeof(float) * inputAmount>>>(
w.m_devPoint,
weightSqSum->getDev(),
w[0]->getArea(),
inputAmount,
kernelSize,
weightLimit);
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("g_ConvSpiking_calSquareSum");
block = dim3(outputAmount, kernelSize * kernelSize * inputAmount);
thread = dim3(batch);
g_ConvSpiking_wgradAdd<<<block, thread, sizeof(float) * batch>>>(
wgradTmp.m_devPoint,
wgrad.m_devPoint,
w.m_devPoint,
weightSqSum->getDev(),
kernelSize,
batch,
lambda,
beta,
weightLimit,
wgradTmp[0]->getArea(),
w[0]->getArea());
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("g_ConvSpiking_wgradAdd");
}
void ConvSpiking::updateWeight(int epoch)
{
dim3 block = outputAmount;
dim3 thread = min(512, w[0]->getLen());
if(Config::instance()->getOptimizerType() == std::string("adam")){
g_adam_vecAdd<<<block, thread, 0, Layers::instance()->get_stream()>>>(
g1_w.m_devPoint,
g2_w.m_devPoint,
b1_t->getDev(),
b2_t->getDev(),
wgrad.m_devPoint,
w.m_devPoint,
w[0]->getLen(),
lRate/sqrt((float)epoch+1));
//Config::instance()->getLrate());
}
else{
g_sgd_vecAdd<<<block, thread, 0, Layers::instance()->get_stream()>>>(
momentum_w.m_devPoint,
wgrad.m_devPoint,
w.m_devPoint,
w[0]->getLen(),
Config::instance()->getMomentum(),
lRate/sqrt((float)epoch+1));
//Config::instance()->getLrate());
}
}
ConvSpiking::ConvSpiking(std::string name)
{
m_name = name;
ConfigConvSpiking* config = (ConfigConvSpiking*)Config::instance()->getLayerByName(m_name);
SpikingLayerBase * preLayer = (SpikingLayerBase*)Layers::instance()->get(config->m_input);
inputs = preLayer->getSpikingOutputs();
inputs_time = preLayer->getSpikingTimeOutputs();
preDelta = preLayer->getCurDelta();
preFireCount = preLayer->getFireCount();
inputAmount = preLayer->outputAmount;
outputAmount = config->m_amount;
kernelSize = config->m_kernelSize;
padding = config->m_padding;
inputDim = preLayer->outputDim;
outputDim = (inputDim + 1 - kernelSize) + padding * 2;
batch = Config::instance()->getBatchSize();
endTime = Config::instance()->getEndTime();
batch = Config::instance()->getBatchSize();
lambda = Config::instance()->getLambda();
beta = Config::instance()->getBeta();
T_REFRAC = config->m_t_ref;
TAU_M = config->m_tau_m;
TAU_S = config->m_tau_s;
threshold = config->m_vth;
lRate = config->m_lrate;
weightLimit = Config::instance()->getWeightLimit();
outputs = new cuMatrix<bool>(batch, endTime * outputDim * outputDim, outputAmount);
outputs_time = new cuMatrix<int>(batch, outputDim * outputDim * endTime, outputAmount);
inputs_resp = new cuMatrix<float>(batch, endTime * outputDim * outputDim, outputAmount);
curDelta = new cuMatrix<float>(batch, outputDim * outputDim, outputAmount);
fireCount= new cuMatrix<int>(batch, outputDim * outputDim, outputAmount);
weightSqSum = new cuMatrix<float>(outputAmount, 1, 1);
sideEffect = new cuMatrix<float>(batch, outputDim * outputDim, outputAmount);
for(int i = 0; i < outputAmount; i++){
w.push_back(new cuMatrix<float>(kernelSize, kernelSize, inputAmount));
b.push_back(new cuMatrix<float>(1, 1, 1));
wgrad.push_back(new cuMatrix<float>(kernelSize, kernelSize, inputAmount));
bgrad.push_back(new cuMatrix<float>(1, 1, 1));
wgradTmp.push_back(new cuMatrix<float>(batch, kernelSize * kernelSize, inputAmount));
}
w.toGpu();
b.toGpu();
wgrad.toGpu();
bgrad.toGpu();
wgradTmp.toGpu();
for(int i = 0; i < outputAmount; i++){
momentum_w.push_back(new cuMatrix<float>(kernelSize, kernelSize, inputAmount));
momentum_b.push_back(new cuMatrix<float>(1, 1, 1));
g1_w.push_back(new cuMatrix<float>(kernelSize, kernelSize, inputAmount)); // for adam
g1_b.push_back(new cuMatrix<float>(1, 1, 1));
g2_w.push_back(new cuMatrix<float>(kernelSize, kernelSize, inputAmount));
g2_b.push_back(new cuMatrix<float>(1, 1, 1));
}
effectPoly = new cuMatrix<float>(100, 5, 1);
std::string filename=std::string("./Effect_Ratio_file/p_Tau_")+std::to_string(int(TAU_M))+std::string("_")+std::to_string(endTime)+std::string("-100.txt");
loadPoly(filename, 100, 5, effectPoly);
momentum_w.toGpu();
momentum_b.toGpu();
g1_w.toGpu();
g1_b.toGpu();
g2_w.toGpu();
g2_b.toGpu();
b1_t = new cuMatrix<float>(outputAmount, 1, 1);
b2_t = new cuMatrix<float>(outputAmount, 1, 1);
for(int i = 0; i < outputAmount; i++){
b1_t->getHost()[i] = 0.9;
b2_t->getHost()[i] = 0.999;
}
b1_t->toGpu();
b2_t->toGpu();
this->initRandom();
output_train_ref = NULL;
output_test_ref = NULL;
if(Config::instance()->getIsGradientChecking())
this->loadRef(); // for verification purpose
Layers::instance()->set(m_name, this);
}
void ConvSpiking::save(FILE* file)
{
for(int a = 0; a < (int)w.size(); a++){
w[a]->toCpu();
for(int c = 0; c < w[a]->channels; c++){
for(int i = 0; i < w[a]->rows; i++){
for(int j = 0; j < w[a]->cols; j++){
fprintf(file, "%f ", w[a]->get(i, j, c));
}
}
}
}
for(int a = 0; a < (int)w.size(); a++){
b[a]->toCpu();
for(int c = 0; c < b[a]->channels; c++){
for(int i = 0; i < b[a]->rows; i++){
for(int j = 0; j < b[a]->cols; j++){
fprintf(file, "%f ", b[a]->get(i, j, c));
}
}
}
}
}
void ConvSpiking::clearMomentum()
{
for(int i = 0; i < (int)momentum_b.size(); i++){
momentum_b[i]->gpuClear();
}
for(int i = 0; i < (int)momentum_w.size(); i++){
momentum_w[i]->gpuClear();
}
}
//* load the reference weights and output spikes for verification
void ConvSpiking::loadRef()
{
if(batch != 1){
printf("Only do the verification for one batch and one sample!\n");
exit(0);
}
ConfigConvSpiking * config = (ConfigConvSpiking*)Config::instance()->getLayerByName(m_name);
if(config->m_ref_weight_path != std::string("NULL")){
for(int i = 0; i < outputAmount; ++i)
w_ref.push_back(new cuMatrix<float>(kernelSize, kernelSize, inputAmount));
initFromDumpfile(config->m_ref_weight_path, w_ref);
}
if(config->m_ref_output_train_path != std::string("NULL")){
output_train_ref = new cuMatrix<bool>(1, endTime * outputDim * outputDim, outputAmount);
readSpikesFromDumpfile(config->m_ref_output_train_path, output_train_ref);
}
if(config->m_ref_output_test_path != std::string("NULL")){
output_test_ref = new cuMatrix<bool>(1, endTime * outputDim * outputDim, outputAmount);
readSpikesFromDumpfile(config->m_ref_output_test_path, output_test_ref);
}
}
void ConvSpiking::initRandom()
{
ConfigConvSpiking * config = (ConfigConvSpiking*)Config::instance()->getLayerByName(m_name);
float initW = config->m_initW;
if(config->isGaussian()){
for(int i = 0; i < (int)w.size(); i++){
float epsilon = initW;
for(int c = 0; c < w[i]->channels; c++)
{
createGaussian(w[i]->getHost() + c * w[i]->getArea(),
kernelSize, kernelSize, w[i]->channels, epsilon);
}
w[i]->toGpu();
}
}
else if(config->isExternal()){
initFromDumpfile(config->m_weightPath, w);
}
else{
for(int i = 0; i < (int)w.size(); i++){
for(int j = 0; j < w[i]->getLen(); j++){
w[i]->getHost()[j] = initW * (2.0f * rand() / RAND_MAX - 1.0f);
}
w[i]->toGpu();
}
}
}
void ConvSpiking::initFromCheckpoint(FILE* file)
{
float val = 0;
for(size_t a = 0; a < w.size(); a++){
for(int c = 0; c < w[a]->channels; c++){
for(int i = 0; i < w[a]->rows; i++){
for(int j = 0; j < w[a]->cols; j++){
if(fscanf(file, "%f", &val) == EOF)
{
LOG("scanf fail", "result/log.txt");
}
w[a]->set(i, j, c, val);
}
}
}
w[a]->toGpu();
}
for(size_t a = 0; a < b.size(); a++){
for(int c = 0; c < b[a]->channels; c++){
for(int i = 0; i < b[a]->rows; i++){
for(int j = 0; j < b[a]->cols; j++){
if(fscanf(file, "%f", &val) == EOF)
{
LOG("scanf fail", "result/log.txt");
}
b[a]->set(i, j, c, val);
}
}
}
b[a]->toGpu();
}
}
//* initialize the wight from the dump file by Matlab sim
void ConvSpiking::initFromDumpfile(const std::string& filename, cuMatrixVector<float>& cuW)
{
assert(filename != std::string("NULL"));
FILE *file = fopen(filename.c_str(), "r");
char logStr[256];
if(file == NULL){
sprintf(logStr, "Cannot open file: %s", filename.c_str()); LOG(logStr, "Result/log.txt");
assert(0);
}
float val = 0;
for(size_t a = 0; a < cuW.size(); a++){
for(int c = 0; c < cuW[a]->channels; c++){
for(int i = 0; i < cuW[a]->rows; i++){
for(int j = 0; j < cuW[a]->cols; j++){
if(fscanf(file, "%f", &val) == EOF)
{
sprintf(logStr, "Reading weight failed for %s @row: %d\t@col: %d\t@channel: %d\t@outputAmount: %d", filename.c_str(), i, j, c, int(a));
LOG(logStr, "Result/log.txt");
assert(0);
}
cuW[a]->set(i, j, c, val);
}
}
}
cuW[a]->toGpu();
}
}
void ConvSpiking::verify(const std::string& phrase)
{
printf("Verify for the layer: %s at %s phrase.\n", m_name.c_str(), phrase.c_str());
if(phrase == std::string("train"))
{
if(output_train_ref != NULL){
outputs->toCpu();
checkMatrixIsSame(output_train_ref, outputs, outputDim*outputDim);
}
}
else if(phrase == std::string("test"))
{
if(!w_ref.empty()){
for(int i = 0; i < outputAmount; ++i){
w[i]->toCpu();
checkMatrixIsSame(w_ref[i], w[i], i);
}
}
if(output_test_ref != NULL){
outputs->toCpu();
checkMatrixIsSame(output_test_ref, outputs, outputDim*outputDim);
}
}
printf("Verification for the layer: %s at %s phrase. Pased!!\n", m_name.c_str(), phrase.c_str());
}
__device__ float d_ConvSpiking_accumulate_spikes(
int inputDim,
int inputArea,
int inputAmount,
int kernelSize,
bool* inputs,
int x,
int y,
int padding,
int ok,
int batchId,
float ** ws,
int t,
int endTime)
{
int inputSize2 = inputDim * inputDim;
int kernelSize2 = kernelSize * kernelSize;
float response = 0.0f;
// for all inputAmount (channels)
for(int c = 0; c < inputAmount; c++){
bool* curInput = inputs + c * inputArea + batchId * inputSize2 * endTime;
float* w = ws[ok] + c * kernelSize2;
for(int i = 0; i < kernelSize; i++){
int xx = x + i - padding;
for(int j = 0; j < kernelSize; j++){
int yy = y + j - padding;
if(xx >= 0 && xx < inputDim && yy >= 0 && yy < inputDim){
int i_idx = xx * inputDim + yy;
response += curInput[i_idx + t * inputSize2] * w[i * kernelSize + j];
}
}
}
}
return response;
}
/*
* dim3 block = dim3(batch, div, endTime);
* dim3 thread= dim3(min(outputDim * outputDim, 1024), remain));
*/
__global__ void g_ConvSpiking_fast_input_response(
bool* inputs,
float** ws,
float** bs,
float* inputs_resp,
int inputDim,
int kernelSize,
int padding,
int outputDim,
int endTime,
int inputAmount,
int outputAmount,
int inputArea,
int responseArea)
{
int batchId = blockIdx.x;
int t = blockIdx.z;
int ok = blockIdx.y * blockDim.y + threadIdx.y;
if(ok >= outputAmount)return;
int outputSize2 = outputDim * outputDim;
float* curResp = inputs_resp + ok * responseArea + batchId * outputSize2 * endTime;
for(int tidx = 0; tidx < outputSize2; tidx += blockDim.x)
{
int o_idx = tidx + threadIdx.x;
if(o_idx < outputSize2)
{
int x = o_idx / outputDim;
int y = o_idx % outputDim;
curResp[o_idx + t * outputSize2] = d_ConvSpiking_accumulate_spikes(inputDim, inputArea, inputAmount, kernelSize, inputs, x, y, padding, ok, batchId, ws, t, endTime);
}
}
}
/*
* dim3 block = dim3(batch, div);
* dim3 thread= dim3(min(outputDim * outputDim, 1024), remain));
*/
__global__ void g_ConvSpiking_feedforward(
float* inputs_resp,
bool* outputs,
int* fireCount,
int outputDim,
int endTime,
int outputAmount,
int outputArea,
float vth,
int T_REFRAC,
float TAU_M,
float TAU_S)
{
int batchId = blockIdx.x;
int ok = blockIdx.y * blockDim.y + threadIdx.y;
if(ok >= outputAmount)return;
int outputSize2 = outputDim * outputDim;
bool* curOutput = outputs + ok * outputArea + batchId * outputSize2 * endTime;
int* curFireCount = fireCount + ok * outputArea / endTime + batchId * outputSize2;
float* curResponse = inputs_resp + ok * outputArea + batchId * outputSize2 * endTime;
for(int tidx = 0; tidx < outputSize2; tidx += blockDim.x)
{
int o_idx = tidx + threadIdx.x;
if(o_idx < outputSize2)
{
float v = 0.0f;
float ep = 0.0f;
float threshold = vth;
int t_ref= 0;
float response = 0.0f;
int fire_count = 0;
for(int t = 0; t < endTime; t++){
v -= v / TAU_M;
ep -= ep / TAU_S;
if(t == 0)
{
curOutput[o_idx + t * outputSize2] = false;
continue;
}
// get the convoluted the spikes
response = curResponse[o_idx + (t - 1)*outputSize2];
ep += response;
v += ep/TAU_S;
if(t_ref > 0){
v = 0;
t_ref--;
}
// Fire or not
curOutput[o_idx + t * outputSize2] = v > threshold ? true : false;
t_ref = v > threshold ? T_REFRAC : t_ref;
fire_count += v > threshold ? 1 : 0;
v = v > threshold ? 0 : v;
}
curFireCount[o_idx] = fire_count;
}
}
}
/*
* dim3 block = dim3(batch, inputAmount, inputDim * inputDim);
* dim3 thread= dim3(min(kernelSize * kernelSize * outputAmount, 1024));
*/
__global__ void g_ConvSpiking_backpropagation(
int* _inputs_time,
int* _outputs_time,
int* batchPreFireCount,
int* batchFireCount,
float* _curDelta,
float** ws,
float* _preDelta,
float* batchSideEffect,
int curDim,
int preDim,
int endTime,
int curAmount,
int kernelSize,
int padding,
int outputArea,
int inputArea,
int T_REFRAC,
float TAU_M,
float TAU_S)
{
extern __shared__ float _sum[];
int tid = threadIdx.x;
_sum[tid] = 0;
__syncthreads();
int batchId = blockIdx.x;
int ik = blockIdx.y;
int i_idx = blockIdx.z;
int curSize2 = curDim * curDim;
int preSize2 = preDim * preDim;
int kernelSize2 = kernelSize * kernelSize;
int curArea = outputArea / endTime;
int preArea = inputArea / endTime;
int* input_time = _inputs_time + inputArea * ik + batchId * preSize2 * endTime;
int* input_fireCount = batchPreFireCount + ik * inputArea / endTime + batchId * preSize2;
float *preDelta = _preDelta + ik * preArea + batchId * preSize2;
int i = i_idx / preDim;
int j = i_idx % preDim;
int totalBackAmount = curAmount * kernelSize2;
for (int tidx = 0; tidx < totalBackAmount; tidx += blockDim.x) {
int idx = tidx + threadIdx.x;
if (idx < totalBackAmount) {
int ok = idx / kernelSize2;
float *curDelta = _curDelta + ok * curArea + batchId * curSize2;
float *w = ws[ok] + ik * kernelSize2;
int* output_time = _outputs_time + outputArea * ok + batchId * curSize2 * endTime;
int* output_fireCount = batchFireCount + ok * outputArea / endTime + batchId * curSize2;
float* side_effect = batchSideEffect + ok * curSize2 + batchId * curSize2;
int x = (idx % kernelSize2) / kernelSize;
int y = (idx % kernelSize2) % kernelSize;
int cx = i - x + padding;
int cy = j - y + padding;
if(cx >= 0 && cx < curDim && cy >= 0 && cy < curDim) {
int o_idx = cx * curDim + cy;
float e = d_Spiking_accumulate_effect(output_time, input_time, output_fireCount[o_idx], input_fireCount[i_idx], o_idx, i_idx, curSize2, preSize2, endTime, T_REFRAC, TAU_M, TAU_S);
int o_cnt = output_fireCount[o_idx];
int i_cnt = input_fireCount[i_idx];
float ratio = i_cnt == 0 || o_cnt == 0 ? 0.5 : e / float(i_cnt);
float s_effect = side_effect[o_idx];
_sum[threadIdx.x] += curDelta[cx * curDim + cy] * w[x * kernelSize + y] * ratio / (1-s_effect);
}
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(tid < skip && (tid + skip < len))
{
_sum[tid] += _sum[tid + skip];
}
len = skip;
}
if(tid == 0)
preDelta[i_idx] = _sum[0];
}
/*
* dim3 block = dim3(batch, outputDim2, outputAmount);
* dim3 thread= min(kernelSize2*inputAmount, 512);
*/
__global__ void g_ConvSpiking_sideEffect(
int* _inputs_time,
int* _outputs_time,
int* batchPreFireCount,
int* batchFireCount,
float** ws,
float vth,
float* batchSideEffect,
float* effectPoly,
int out_size,
int degree,
int inputDim,
int outputDim,
int endTime,
int kernelSize,
int padding,
int inputAmount,
int inputArea,
int outputArea,
int T_REFRAC,
float TAU_M,
float TAU_S)
{
extern __shared__ float _sum[];
int tid = threadIdx.x;
_sum[tid] = 0;
__syncthreads();
int batchId = blockIdx.x;
int o_idx = blockIdx.y;
int ok = blockIdx.z;
int inputSize2 = inputDim * inputDim;
int outputSize2 = outputDim * outputDim;
int kernelSize2 = kernelSize * kernelSize;
int* output_time = _outputs_time + outputArea * ok + batchId * outputSize2 * endTime;
int* output_fireCount = batchFireCount + ok * outputArea / endTime + batchId * outputSize2;
float* side_effect = batchSideEffect + ok * outputSize2 + batchId * outputSize2;
int o_cnt = output_fireCount[o_idx];
int len = kernelSize2*inputAmount;
for(int tidx = 0; tidx < len; tidx += blockDim.x)
{
int idx = tidx + threadIdx.x;
if(idx < len){
int ik = idx / kernelSize2;
float* w = ws[ok] + ik * kernelSize2;
int* input_time = _inputs_time + inputArea * ik + batchId * inputSize2 * endTime;
int* input_fireCount= batchPreFireCount + ik * inputArea / endTime + batchId * inputSize2;
int k_id = idx % kernelSize2;
int i = k_id / kernelSize;
int j = k_id % kernelSize;
int x = o_idx / outputDim;
int y = o_idx % outputDim;
int xx = x + i - padding;
int yy = y + j - padding;
if(xx >= 0 && xx < inputDim && yy >= 0 && yy < inputDim){
int i_idx = xx * inputDim + yy;
float e = d_Spiking_accumulate_effect(output_time, input_time, o_cnt, input_fireCount[i_idx], o_idx, i_idx, outputSize2, inputSize2, endTime, T_REFRAC, TAU_M, TAU_S);
float ratio;
if(o_cnt == 0)
ratio=0;
else{
int o_cnt_tmp = o_cnt;
o_cnt_tmp = o_cnt_tmp > 0 ? o_cnt_tmp : 1;
o_cnt_tmp = o_cnt_tmp <= out_size ? o_cnt_tmp : out_size;
int i_cnt = input_fireCount[i_idx];
i_cnt = i_cnt <= out_size ? i_cnt : out_size;
i_cnt = i_cnt > 0 ? i_cnt : 1;
ratio=0;
for(int i=0; i< degree; i++){
float base = (float)o_cnt_tmp;
float exponent = (float)(degree-2-i);
float coef=(float)(degree-i-1);
float v=coef*powf(base, exponent)*effectPoly[(i_cnt-1)*degree+i];
ratio+=v;
}
}
if(w[i*kernelSize+j]==0){
printf("side effect: weight is zero\n");
}
_sum[tid] += w[i * kernelSize + j] * ratio;
}
}
}
len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(tid < skip && (tid + skip) < len)
{
_sum[tid] += _sum[tid + skip];
}
len = skip;
}
if(tid == 0){
int s= _sum[0] / vth;
side_effect[o_idx] += s;
}
}
/*
* dim3 block = dim3(batch, outputAmount*inputAmount, kernelSize * kernelSize);
* dim3 thread= min(outputDim * outputDim, 512);
*/
__global__ void g_ConvSpiking_wgrad(
int* _inputs_time,
int* _outputs_time,
int* batchPreFireCount,
int* batchFireCount,
float* batchSideEffect,
float* _curDelta,
float** wgradTmp,
int inputDim,
int curDeltaDim,
int endTime,
int kernelSize,
int padding,
int inputAmount,
int inputArea,
int outputArea,
int curDeltaArea,
int wgradTmpArea,
int T_REFRAC,
float TAU_M,
float TAU_S)
{
extern __shared__ float _sum[];
int tid = threadIdx.x;
_sum[tid] = 0;
__syncthreads();
int ok = blockIdx.y / inputAmount;
int ik = blockIdx.y % inputAmount;
int k_id = blockIdx.z;
int batchId = blockIdx.x;
int inputSize2 = inputDim * inputDim;
int curDeltaSize2 = curDeltaDim * curDeltaDim;
int outputSize2 = curDeltaSize2;
int kernelSize2 = kernelSize * kernelSize;
float* wgrad = wgradTmp[ok] + ik * wgradTmpArea + batchId * kernelSize2;
int* input_time = _inputs_time + inputArea * ik + batchId * inputSize2 * endTime;
int* output_time = _outputs_time + outputArea * ok + batchId * outputSize2 * endTime;
int* input_fireCount = batchPreFireCount + ik * inputArea / endTime + batchId * inputSize2;
int* output_fireCount = batchFireCount + ok * outputArea / endTime + batchId * outputSize2;
float* side_effect = batchSideEffect + ok * outputSize2 + batchId * outputSize2;
float* curDelta = _curDelta + ok * curDeltaArea + batchId * curDeltaSize2;
for(int tidx = 0; tidx < outputSize2; tidx += blockDim.x)
{
int o_idx = tidx + threadIdx.x;
if(o_idx < outputSize2)
{
int i = k_id / kernelSize;
int j = k_id % kernelSize;
int x = o_idx / curDeltaDim;
int y = o_idx % curDeltaDim;
int cx = i + x - padding;
int cy = j + y - padding;
if(cx >= 0 && cy >= 0 && cx < inputDim && cy < inputDim){
int i_idx = cx * inputDim + cy;
float e = d_Spiking_accumulate_effect(output_time, input_time, output_fireCount[o_idx], input_fireCount[i_idx], o_idx, i_idx, outputSize2, inputSize2, endTime, T_REFRAC, TAU_M, TAU_S);
float s_effect = side_effect[o_idx];
float val= e * curDelta[x * curDeltaDim + y]/(1-s_effect) ;
_sum[tid] += val ;
#ifdef DEBUG
if(i == ROW && j == COL && ik == IN_CH && ok == OUT_CH)
printf("Collect x= %d; y = %d; Acc effect: %f\tdelta= %f\n", x,y,e,curDelta[x*curDeltaDim + y]);
#endif
}
}
}
int len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(tid < skip && (tid + skip) < len){
_sum[tid] += _sum[tid + skip];
}
len = skip;
}
if(tid == 0){
wgrad[k_id] = _sum[0];
}
}
/*
* blocks : dim3(kernelAmount2)
* threads : dim3(256)
* shared : sizeof(float) * 256
*/
__global__ void g_ConvSpiking_Bgrad(float* delta,
float** bgrad,
int deltaSize,
int kernelAmount2,
int batch,
int deltaArea)
{
extern __shared__ float _sum[];
int k2 = blockIdx.x;
_sum[threadIdx.x] = 0.0;
__syncthreads();
int deltaSize2 = deltaSize * deltaSize;
int tlen = deltaSize2 * batch;
int skip = deltaArea * k2;
for(int i = 0; i < tlen; i += blockDim.x)
{
int idx = i + threadIdx.x;
if(idx < tlen)
{
_sum[threadIdx.x] += delta[idx + skip];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < skip && (threadIdx.x + skip < len))
{
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
}
else{
return;
}
len = skip;
}
if(threadIdx.x == 0)
{
bgrad[k2][0] = _sum[0] / batch;
}
}
void ConvSpiking::loadPoly(std::string& filename, int out_size, int degree, cuMatrix<float>* poly){
ifstream f_in(filename.c_str());
if(!f_in.is_open()){
printf("Cannot open the file: %s\n", filename.c_str());
exit(EXIT_FAILURE);
}
float p;
std::string data;
for(int i=0;i<out_size;i++){
getline(f_in, data);
std::istringstream iss(data);
for(int j=0;j<degree;j++){
iss>>p;
//std::cout<<ER<<std::endl;
poly->getHost()[i*degree+j] = p;
}
}
f_in.close();
poly->toGpu();
}
|
ffa0bf81bc622a09699a26214795c5831e82c3d2.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/strings/convert/convert_booleans.hpp>
#include <cudf/strings/detail/converters.hpp>
#include <cudf/strings/detail/utilities.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/traits.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <strings/utilities.cuh>
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/transform.h>
namespace cudf {
namespace strings {
namespace detail {
// Convert strings column to boolean column
std::unique_ptr<column> to_booleans(strings_column_view const& strings,
string_scalar const& true_string,
hipStream_t stream,
rmm::mr::device_memory_resource* mr)
{
size_type strings_count = strings.size();
if (strings_count == 0) return make_numeric_column(data_type{type_id::BOOL8}, 0);
CUDF_EXPECTS(true_string.is_valid() && true_string.size() > 0,
"Parameter true_string must not be empty.");
auto d_true = string_view(true_string.data(), true_string.size());
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_strings = *strings_column;
// create output column copying the strings' null-mask
auto results = make_numeric_column(data_type{type_id::BOOL8},
strings_count,
copy_bitmask(strings.parent(), stream, mr),
strings.null_count(),
stream,
mr);
auto results_view = results->mutable_view();
auto d_results = results_view.data<bool>();
thrust::transform(rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
d_results,
[d_strings, d_true] __device__(size_type idx) {
bool result = false;
if (!d_strings.is_null(idx))
result = d_strings.element<string_view>(idx).compare(d_true) == 0;
return result;
});
results->set_null_count(strings.null_count());
return results;
}
} // namespace detail
// external API
std::unique_ptr<column> to_booleans(strings_column_view const& strings,
string_scalar const& true_string,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::to_booleans(strings, true_string, hipStream_t{}, mr);
}
namespace detail {
// Convert boolean column to strings column
std::unique_ptr<column> from_booleans(column_view const& booleans,
string_scalar const& true_string,
string_scalar const& false_string,
hipStream_t stream,
rmm::mr::device_memory_resource* mr)
{
size_type strings_count = booleans.size();
if (strings_count == 0) return make_empty_strings_column(mr, stream);
CUDF_EXPECTS(booleans.type().id() == type_id::BOOL8, "Input column must be boolean type");
CUDF_EXPECTS(true_string.is_valid() && true_string.size() > 0,
"Parameter true_string must not be empty.");
auto d_true = string_view(true_string.data(), true_string.size());
CUDF_EXPECTS(false_string.is_valid() && false_string.size() > 0,
"Parameter false_string must not be empty.");
auto d_false = string_view(false_string.data(), false_string.size());
auto column = column_device_view::create(booleans, stream);
auto d_column = *column;
// copy null mask
rmm::device_buffer null_mask = copy_bitmask(booleans, stream, mr);
// build offsets column
auto offsets_transformer_itr =
thrust::make_transform_iterator(thrust::make_counting_iterator<int32_t>(0),
[d_column, d_true, d_false] __device__(size_type idx) {
if (d_column.is_null(idx)) return 0;
size_type bytes = 0;
if (d_column.element<bool>(idx))
bytes = d_true.size_bytes();
else
bytes = d_false.size_bytes();
return bytes;
});
auto offsets_column = make_offsets_child_column(
offsets_transformer_itr, offsets_transformer_itr + strings_count, mr, stream);
auto offsets_view = offsets_column->view();
auto d_offsets = offsets_view.data<int32_t>();
// build chars column
size_type bytes = thrust::device_pointer_cast(d_offsets)[strings_count];
auto chars_column =
create_chars_child_column(strings_count, booleans.null_count(), bytes, mr, stream);
auto chars_view = chars_column->mutable_view();
auto d_chars = chars_view.data<char>();
thrust::for_each_n(rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator<size_type>(0),
strings_count,
[d_column, d_true, d_false, d_offsets, d_chars] __device__(size_type idx) {
if (d_column.is_null(idx)) return;
string_view result = (d_column.element<bool>(idx) ? d_true : d_false);
memcpy(d_chars + d_offsets[idx], result.data(), result.size_bytes());
});
//
return make_strings_column(strings_count,
std::move(offsets_column),
std::move(chars_column),
booleans.null_count(),
std::move(null_mask),
stream,
mr);
}
} // namespace detail
// external API
std::unique_ptr<column> from_booleans(column_view const& booleans,
string_scalar const& true_string,
string_scalar const& false_string,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::from_booleans(booleans, true_string, false_string, hipStream_t{}, mr);
}
} // namespace strings
} // namespace cudf
| ffa0bf81bc622a09699a26214795c5831e82c3d2.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/strings/convert/convert_booleans.hpp>
#include <cudf/strings/detail/converters.hpp>
#include <cudf/strings/detail/utilities.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/traits.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <strings/utilities.cuh>
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/transform.h>
namespace cudf {
namespace strings {
namespace detail {
// Convert strings column to boolean column
std::unique_ptr<column> to_booleans(strings_column_view const& strings,
string_scalar const& true_string,
cudaStream_t stream,
rmm::mr::device_memory_resource* mr)
{
size_type strings_count = strings.size();
if (strings_count == 0) return make_numeric_column(data_type{type_id::BOOL8}, 0);
CUDF_EXPECTS(true_string.is_valid() && true_string.size() > 0,
"Parameter true_string must not be empty.");
auto d_true = string_view(true_string.data(), true_string.size());
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_strings = *strings_column;
// create output column copying the strings' null-mask
auto results = make_numeric_column(data_type{type_id::BOOL8},
strings_count,
copy_bitmask(strings.parent(), stream, mr),
strings.null_count(),
stream,
mr);
auto results_view = results->mutable_view();
auto d_results = results_view.data<bool>();
thrust::transform(rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
d_results,
[d_strings, d_true] __device__(size_type idx) {
bool result = false;
if (!d_strings.is_null(idx))
result = d_strings.element<string_view>(idx).compare(d_true) == 0;
return result;
});
results->set_null_count(strings.null_count());
return results;
}
} // namespace detail
// external API
std::unique_ptr<column> to_booleans(strings_column_view const& strings,
string_scalar const& true_string,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::to_booleans(strings, true_string, cudaStream_t{}, mr);
}
namespace detail {
// Convert boolean column to strings column
std::unique_ptr<column> from_booleans(column_view const& booleans,
string_scalar const& true_string,
string_scalar const& false_string,
cudaStream_t stream,
rmm::mr::device_memory_resource* mr)
{
size_type strings_count = booleans.size();
if (strings_count == 0) return make_empty_strings_column(mr, stream);
CUDF_EXPECTS(booleans.type().id() == type_id::BOOL8, "Input column must be boolean type");
CUDF_EXPECTS(true_string.is_valid() && true_string.size() > 0,
"Parameter true_string must not be empty.");
auto d_true = string_view(true_string.data(), true_string.size());
CUDF_EXPECTS(false_string.is_valid() && false_string.size() > 0,
"Parameter false_string must not be empty.");
auto d_false = string_view(false_string.data(), false_string.size());
auto column = column_device_view::create(booleans, stream);
auto d_column = *column;
// copy null mask
rmm::device_buffer null_mask = copy_bitmask(booleans, stream, mr);
// build offsets column
auto offsets_transformer_itr =
thrust::make_transform_iterator(thrust::make_counting_iterator<int32_t>(0),
[d_column, d_true, d_false] __device__(size_type idx) {
if (d_column.is_null(idx)) return 0;
size_type bytes = 0;
if (d_column.element<bool>(idx))
bytes = d_true.size_bytes();
else
bytes = d_false.size_bytes();
return bytes;
});
auto offsets_column = make_offsets_child_column(
offsets_transformer_itr, offsets_transformer_itr + strings_count, mr, stream);
auto offsets_view = offsets_column->view();
auto d_offsets = offsets_view.data<int32_t>();
// build chars column
size_type bytes = thrust::device_pointer_cast(d_offsets)[strings_count];
auto chars_column =
create_chars_child_column(strings_count, booleans.null_count(), bytes, mr, stream);
auto chars_view = chars_column->mutable_view();
auto d_chars = chars_view.data<char>();
thrust::for_each_n(rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator<size_type>(0),
strings_count,
[d_column, d_true, d_false, d_offsets, d_chars] __device__(size_type idx) {
if (d_column.is_null(idx)) return;
string_view result = (d_column.element<bool>(idx) ? d_true : d_false);
memcpy(d_chars + d_offsets[idx], result.data(), result.size_bytes());
});
//
return make_strings_column(strings_count,
std::move(offsets_column),
std::move(chars_column),
booleans.null_count(),
std::move(null_mask),
stream,
mr);
}
} // namespace detail
// external API
std::unique_ptr<column> from_booleans(column_view const& booleans,
string_scalar const& true_string,
string_scalar const& false_string,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::from_booleans(booleans, true_string, false_string, cudaStream_t{}, mr);
}
} // namespace strings
} // namespace cudf
|
3c6ce5fe17741aae20ce3d1dd79c811cba7fb1b4.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdio.h>
#include <assert.h>
#include <algorithm>
#include <cmath>
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include <THH/THH.h>
#include "integral-strided-cuda.hpp"
#define NUM_THREADS 256
#define BLOCK_SIZE 4
#define BLOCK_CHANNELS (NUM_THREADS / (BLOCK_SIZE * BLOCK_SIZE))
using std::max;
using std::min;
using std::floor;
using std::ceil;
// TODO remove this code
#define CUDA_ERROR_CHECK
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ )
inline void __cudaSafeCall( hipError_t err, const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
if ( hipSuccess != err )
{
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
inline void __cudaCheckError( const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
hipError_t err = hipGetLastError();
if ( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
// More careful checking. However, this will affect performance.
// Comment away if needed.
err = hipDeviceSynchronize();
if( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
/************************ Integral image computation ************************/
__global__ void accumulateRowsKernel(
const float * __restrict__ input, float * __restrict__ output,
const int channels, const int h, const int w);
__global__ void accumulateColsKernel(
const float * __restrict__ input, float * __restrict__ output,
const int channels, const int h, const int w);
__global__ void accumulateColsInplaceKernel(
float * __restrict__ input, const int channels, const int h, const int w);
__global__ void accumulateColsInplaceTransposedKernel(
float * __restrict__ input, const int channels, const int h, const int w);
extern "C"
void integralImageCuda(THCState *state,
float *input, float *output, int channels, int h, int w, float *tmp) {
// input : (channels) x (h) x (w), contiguous
// output: (channels) x (h+1) x (w+1), contiguous
// tmp : at least (channels) * (h+1) * (w+1)
int blockSize1D, gridSize1D;
const float ONE = 1.0, ZERO = 0.0;
hipblasSetStream(THCState_getCurrentBlasHandle(state), THCState_getCurrentStream(state));
// Compute prefix sums of columns, `input` -> `output`
// (channels) x (h) x (w) ==> (channels) x (h+1) x (w+1)
// Note: output[:,:,0] remains uninitialized
int totalCols = channels * w;
blockSize1D = NUM_THREADS;
gridSize1D = (totalCols + blockSize1D - 1) / blockSize1D;
hipLaunchKernelGGL(( accumulateColsKernel) , dim3(gridSize1D), dim3(blockSize1D), 0, THCState_getCurrentStream(state),
input, output, channels, h, w);
THCudaCheck(hipGetLastError());
// transpose, `output` -> `tmp`
// (channels) x (h+1) x (w+1) ==> (w+1) x (channels) x (h+1)
THCublasCheck(hipblasSgeam(
THCState_getCurrentBlasHandle(state),
HIPBLAS_OP_T, HIPBLAS_OP_N, channels * (h+1), w+1,
&ONE, output, w+1,
&ZERO, tmp, channels * (h+1),
tmp, channels * (h+1)));
// Compute prefix sums of columns (former rows), `tmp` -> `tmp`
// (w+1) x (channels) x (h+1) ==> (w+1) x (channels) x (h+1)
int totalRows = channels * h; // actually, number of cols in (w+1) x (channels * (h+1)) image
blockSize1D = NUM_THREADS;
gridSize1D = (totalRows + blockSize1D - 1) / blockSize1D;
hipLaunchKernelGGL(( accumulateColsInplaceTransposedKernel)
, dim3(gridSize1D), dim3(blockSize1D), 0, THCState_getCurrentStream(state), tmp, channels, h, w);
THCudaCheck(hipGetLastError());
// transpose, `tmp` -> `output`
// (w+1) x (channels) x (h+1) ==> (channels) x (h+1) x (w+1)
THCublasCheck(hipblasSgeam(
THCState_getCurrentBlasHandle(state),
HIPBLAS_OP_T, HIPBLAS_OP_N, w+1, channels * (h+1),
&ONE, tmp, channels * (h+1),
&ZERO, output, w+1,
output, w+1));
}
__global__ void accumulateRowsKernel(
const float * __restrict__ input, float * __restrict__ output,
const int channels, const int h, const int w) {
// view multichannel image as a multiline single-channel image
int globalRowIdx = NUM_THREADS * blockIdx.x + threadIdx.x;
if (globalRowIdx < channels * h) {
float *outputRow = output + (globalRowIdx + globalRowIdx / h + 1) * (w+1) + 1;
outputRow[-1] = 0;
double sum = 0;
for (int i = 0; i < w; ++i) {
sum += input[globalRowIdx * w + i];
outputRow[i] = static_cast<float>(sum);
}
// need to zero the (0,0) corner of the output separately >:(
output[(globalRowIdx / h) * (w+1) * (h+1)] = 0;
}
}
__global__ void accumulateColsKernel(
const float * __restrict__ input, float * __restrict__ output,
const int channels, const int h, const int w) {
// input : (channels * h) x (w)
// output: (channels * (h+1)) x (w+1) -- first column remains untouched
// global column index (of total `channels * w` columns in this image):
const int globalColIdx = NUM_THREADS * blockIdx.x + threadIdx.x;
if (globalColIdx < channels * w) {
const int channelIdx = globalColIdx / w;
const int colIdx = globalColIdx - channelIdx * w;
// jump to the channel of interest:
int inputPos = channelIdx * h * w + colIdx;
// (let local columns be 1-indexed: 0-th output column is always zero)
int outputPos = channelIdx * (h+1) * (w+1) + colIdx + 1;
output[outputPos] = 0; // 0-th element of every column is always zero
double sum = 0;
for (int i = 1; i <= h; ++i) {
sum += static_cast<double>(input[inputPos + (i-1) * w]);
output[outputPos + i * (w+1)] = static_cast<float>(sum);
}
}
}
__global__ void accumulateColsInplaceTransposedKernel(
float * __restrict__ input, const int channels, const int h, const int w) {
// in-place.
// input: (w+1) x (channels * (h+1))
// global column index (of total `channels * w` columns in this image):
const int globalColIdx = NUM_THREADS * blockIdx.x + threadIdx.x;
if (globalColIdx < channels * h) {
const int channelIdx = globalColIdx / h;
// add `channelIdx + 1` to account for one extra column in each horizontally stacked image
const int colIdx = globalColIdx + channelIdx + 1;
// need to zero the (0,0) corner of the output separately >:(
input[channelIdx * (h+1)] = 0;
input[colIdx] = 0; // first element of every column is always zero
double sum = 0;
for (int i = 1; i <= w; ++i) {
float *currentElement = &input[i * channels * (h+1) + colIdx];
sum += static_cast<double>(*currentElement);
*currentElement = static_cast<float>(sum);
}
}
}
__global__ void accumulateColsInplaceKernel(
float * __restrict__ input, const int channels, const int h, const int w) {
// in-place.
// input is already a `channels * (h+1) x (w+1)` array
// global column index (of all `channels * w` columns in this image)
int colIdx = NUM_THREADS * blockIdx.x + threadIdx.x;
if (colIdx < channels * w) {
input += (colIdx / w) * (h+1) * (w+1); // jump to current channel
colIdx %= w; // switch to local column index,
++colIdx; // it's 1-indexed because first output column is always zero
input[colIdx] = 0; // first element of every column is always zero
double sum = 0;
for (int i = 1; i <= h; ++i) {
float *currentElement = &input[i * (w+1) + colIdx];
sum += static_cast<double>(*currentElement);
*currentElement = static_cast<float>(sum);
}
}
}
/************************ updateOutput ************************/
__global__ void forwardNoNormReplicateKernel(
const float *intData, const int intDataStrideChannel, float *outData,
const int batchSize, const int nInputPlane, const int nWindows, const int h, const int w,
const float *const xMin, const float *const xMax,
const float *const yMin, const float *const yMax) {
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
outData += id; // outData now points to our output pixel
const int y = id % w; id /= w;
const int x = id % h; id /= h;
const int windowIdx = id % nWindows; id /= nWindows;
// `id` is now is now the current global input plane number
intData += id * intDataStrideChannel;
const int globalWindowIdx = (id % nInputPlane) * nWindows + windowIdx; id /= nInputPlane;
const int & batchIdx = id;
if (batchIdx < batchSize) {
// Must add 1 to xMax/yMax/xMin/yMin due to OpenCV's
// `integral()` behavior. Namely, I(x,0) and I(0,y) are
// always 0 (so it's a C-style array sum).
// However, when computing sums, we subtract values at points
// like y+yMin-1 and x+xMin-1, so we also SUBTRACT 1 from xMin
// and yMin, and thus finally they are not affected.
const int xMinCurr = (int)ceil(xMin[globalWindowIdx]);
const int yMinCurr = (int)ceil(yMin[globalWindowIdx]);
const int xMaxCurr = (int)floor(xMax[globalWindowIdx]) + 1;
const int yMaxCurr = (int)floor(yMax[globalWindowIdx]) + 1;
const int t = max(0, min(x+xMinCurr, h-1) );
const int b = max(1, min(x+xMaxCurr, h) );
const int l = max(0, min(y+yMinCurr, w-1) );
const int r = max(1, min(y+yMaxCurr, w) );
float outValue = 0;
outValue += intData[b*(w+1) + r];
outValue -= intData[t*(w+1) + r];
outValue -= intData[b*(w+1) + l];
outValue += intData[t*(w+1) + l];
*outData = outValue;
}
}
__global__ void forwardNoNormReplicateFracKernel(
const float *intData, const int intDataStrideChannel, float *outData,
const int batchSize, const int nInputPlane, const int nWindows, const int h, const int w,
const float *const xMin, const float *const xMax,
const float *const yMin, const float *const yMax,
const float *inData, const int inDataStrideRow, const int inDataStrideChannel) {
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
outData += id; // outData now points to our output pixel
const int y = id % w; id /= w;
const int x = id % h; id /= h;
const int windowIdx = id % nWindows; id /= nWindows;
// `id` is now is now the current global input plane number
intData += id * intDataStrideChannel;
inData += id * inDataStrideChannel;
const int globalWindowIdx = (id % nInputPlane) * nWindows + windowIdx; id /= nInputPlane;
const int & batchIdx = id;
if (batchIdx < batchSize) {
// Must add 1 to xMax/yMax/xMin/yMin due to OpenCV's
// `integral()` behavior. Namely, I(x,0) and I(0,y) are
// always 0 (so it's a C-style array sum).
// However, when computing sums, we subtract values at points
// like y+yMin-1 and x+xMin-1, so we also SUBTRACT 1 from xMin
// and yMin, and thus finally they are not affected.
const int xMinCurr = (int)ceil(xMin[globalWindowIdx]);
const float xMinCurrFrac = (float)xMinCurr - xMin[globalWindowIdx];
const int yMinCurr = (int)ceil(yMin[globalWindowIdx]);
const float yMinCurrFrac = (float)yMinCurr - yMin[globalWindowIdx];
const float xMaxCurrFrac = xMax[globalWindowIdx] - floor(xMax[globalWindowIdx]);
const int xMaxCurr = (int)floor(xMax[globalWindowIdx]) + 1;
const float yMaxCurrFrac = yMax[globalWindowIdx] - floor(yMax[globalWindowIdx]);
const int yMaxCurr = (int)floor(yMax[globalWindowIdx]) + 1;
const int t = max(0, min(x+xMinCurr, h-1) );
const int b = max(1, min(x+xMaxCurr, h) );
const int l = max(0, min(y+yMinCurr, w-1) );
const int r = max(1, min(y+yMaxCurr, w) );
const int bAdv = max(1, min(x+xMaxCurr+1, h ));
const int rAdv = max(1, min(y+yMaxCurr+1, w ));
const int tAdv = max(0, min(x+xMinCurr-1, h-1));
const int lAdv = max(0, min(y+yMinCurr-1, w-1));
float outValue = 0;
outValue += intData[b*(w+1) + r];
outValue -= intData[t*(w+1) + r];
outValue -= intData[b*(w+1) + l];
outValue += intData[t*(w+1) + l];
// -- xMax border
outValue +=
( intData[bAdv*(w+1) + r]
- intData[b *(w+1) + r]
- intData[bAdv*(w+1) + l]
+ intData[b *(w+1) + l]) * xMaxCurrFrac;
// -- yMax border
outValue +=
( intData[b*(w+1) + rAdv]
- intData[b*(w+1) + r ]
- intData[t*(w+1) + rAdv]
+ intData[t*(w+1) + r ]) * yMaxCurrFrac;
// -- xMin border
outValue +=
( intData[t *(w+1) + r]
- intData[tAdv*(w+1) + r]
- intData[t *(w+1) + l]
+ intData[tAdv*(w+1) + l]) * xMinCurrFrac;
// -- yMin border
outValue +=
( intData[b*(w+1) + l ]
- intData[b*(w+1) + lAdv]
- intData[t*(w+1) + l ]
+ intData[t*(w+1) + lAdv]) * yMinCurrFrac;
// -- corner pixels
bool cornerIsValid;
cornerIsValid = not (
(x+xMaxCurr > h-1) |
(y+yMaxCurr > w-1) |
(x+xMaxCurr <= 0) |
(y+yMaxCurr <= 0));
outValue +=
xMaxCurrFrac * yMaxCurrFrac *
cornerIsValid *
inData[((x+xMaxCurr) * inDataStrideRow + (y+yMaxCurr)) * cornerIsValid];
cornerIsValid = not (
(x+xMinCurr-1 >= h-1) |
(y+yMaxCurr > w-1) |
(x+xMinCurr-1 < 0) |
(y+yMaxCurr <= 0));
outValue +=
xMinCurrFrac * yMaxCurrFrac *
cornerIsValid *
inData[((x+xMinCurr-1) * inDataStrideRow + (y+yMaxCurr)) * cornerIsValid];
cornerIsValid = not (
(x+xMaxCurr > h-1) |
(y+yMinCurr-1 >= w-1) |
(x+xMaxCurr <= 0) |
(y+yMinCurr-1 < 0));
outValue +=
xMaxCurrFrac * yMinCurrFrac *
cornerIsValid *
inData[((x+xMaxCurr) * inDataStrideRow + (y+yMinCurr-1)) * cornerIsValid];
cornerIsValid = not (
(x+xMinCurr-1 >= h-1) |
(y+yMinCurr-1 >= w-1) |
(x+xMinCurr-1 < 0) |
(y+yMinCurr-1 < 0));
outValue +=
xMinCurrFrac * yMinCurrFrac *
cornerIsValid *
inData[((x+xMinCurr-1) * inDataStrideRow + (y+yMinCurr-1)) * cornerIsValid];
*outData = outValue;
}
}
extern "C"
void forwardNoNormReplicateCuda(THCState *state,
float *intData, int intDataStrideChannel, float *outData,
int batchSize, int nInputPlane, int nWindows, int h, int w,
float *xMin, float *xMax, float *yMin, float *yMax,
const int strideH, const int strideW) {
if (strideH != 1 or strideW != 1) {
strided::forwardNoNormReplicateCuda(state,
intData, intDataStrideChannel, outData,
batchSize, nInputPlane, nWindows, h, w,
xMin, xMax, yMin, yMax,
strideH, strideW);
return;
}
const int threadsNeeded = batchSize * nInputPlane * nWindows * h * w;
const int numBlocks = (threadsNeeded + NUM_THREADS - 1) / NUM_THREADS;
hipLaunchKernelGGL(( forwardNoNormReplicateKernel) , dim3(numBlocks), dim3(NUM_THREADS), 0, THCState_getCurrentStream(state),
intData, intDataStrideChannel, outData,
batchSize, nInputPlane, nWindows, h, w,
xMin, xMax, yMin, yMax);
THCudaCheck(hipGetLastError());
}
extern "C"
void forwardNoNormReplicateFracCuda(THCState *state,
float *intData, int intDataStrideChannel, float *outData,
int batchSize, int nInputPlane, int nWindows, int h, int w,
float *xMin, float *xMax, float *yMin, float *yMax,
float *inData, int inDataStrideRow, int inDataStrideChannel,
const int strideH, const int strideW) {
if (strideH != 1 or strideW != 1) {
strided::forwardNoNormReplicateFracCuda(state,
intData, intDataStrideChannel, outData,
batchSize, nInputPlane, nWindows, h, w,
xMin, xMax, yMin, yMax,
inData, inDataStrideRow, inDataStrideChannel,
strideH, strideW);
return;
}
const int threadsNeeded = batchSize * nInputPlane * nWindows * h * w;
const int numBlocks = (threadsNeeded + NUM_THREADS - 1) / NUM_THREADS;
hipLaunchKernelGGL(( forwardNoNormReplicateFracKernel) , dim3(numBlocks), dim3(NUM_THREADS), 0, THCState_getCurrentStream(state),
intData, intDataStrideChannel, outData,
batchSize, nInputPlane, nWindows, h, w,
xMin, xMax, yMin, yMax,
inData, inDataStrideRow, inDataStrideChannel);
THCudaCheck(hipGetLastError());
}
/************************ updateGradInput ************************/
/************** Planewise *************/
__global__ void updateGradInputReplicatePlanewiseKernel(
float *gradOutputIntData, float *gradInputData,
int h, int w, int nWindows,
float *xMin, float *xMax, float *yMin, float *yMax) {
const int x = BLOCK_SIZE * blockIdx.x + threadIdx.x;
const int y = BLOCK_SIZE * blockIdx.y + threadIdx.y;
if (x < h and y < w) {
int xMinCurr, xMaxCurr, yMinCurr, yMaxCurr;
double outValue = 0;
for (int windowIdx = 0; windowIdx < nWindows; ++windowIdx) {
xMinCurr = (int)ceil(-xMax[windowIdx]);
yMinCurr = (int)ceil(-yMax[windowIdx]);
xMaxCurr = (int)floor(-xMin[windowIdx]) + 1;
yMaxCurr = (int)floor(-yMin[windowIdx]) + 1;
// The following code block implements these lines
// as if they were executed simultaneously (see `void updateGradInputFrac()`):
// xMinCurr = (x == 0 and xMaxCurr >= 0 ? 0 : xMinCurr);
// xMaxCurr = (x == h-1 and xMinCurr <= 0 ? h+66 : xMaxCurr);
// yMinCurr = (y == 0 and yMaxCurr >= 0 ? 0 : yMinCurr);
// yMaxCurr = (y == w-1 and yMinCurr <= 0 ? w+66 : yMaxCurr);
bool needToChangeMin, needToChangeMax;
needToChangeMin = x == 0 and xMaxCurr >= 0;
needToChangeMax = x == h-1 and xMinCurr <= 0;
if (needToChangeMin) xMinCurr = 0;
if (needToChangeMax) xMaxCurr = h+66;
needToChangeMin = y == 0 and yMaxCurr >= 0;
needToChangeMax = y == w-1 and yMinCurr <= 0;
if (needToChangeMin) yMinCurr = 0;
if (needToChangeMax) yMaxCurr = w+66;
const int t = max(0, min(x+xMinCurr, h) );
const int b = max(0, min(x+xMaxCurr, h) );
const int l = max(0, min(y+yMinCurr, w) );
const int r = max(0, min(y+yMaxCurr, w) );
outValue += gradOutputIntData[b*(w+1) + r];
outValue -= gradOutputIntData[t*(w+1) + r];
outValue -= gradOutputIntData[b*(w+1) + l];
outValue += gradOutputIntData[t*(w+1) + l];
// go to the next channel
gradOutputIntData += (h+1)*(w+1);
}
gradInputData[x*w + y] = outValue;
}
}
__global__ void updateGradInputReplicatePlanewiseFracKernel(
float *gradOutputIntData, float *gradInputData,
int h, int w, int nWindows,
float *xMin, float *xMax, float *yMin, float *yMax,
float *gradOutputData, int gradOutputStrideRow, int gradOutputStrideChannel) {
const int x = BLOCK_SIZE * blockIdx.x + threadIdx.x;
const int y = BLOCK_SIZE * blockIdx.y + threadIdx.y;
if (x < h and y < w) {
int xMinCurr, xMaxCurr, yMinCurr, yMaxCurr;
double outValue = 0;
for (int windowIdx = 0; windowIdx < nWindows; ++windowIdx) {
xMinCurr = (int)ceil(-xMax[windowIdx]);
yMinCurr = (int)ceil(-yMax[windowIdx]);
const float xMinCurrFrac = (float)xMinCurr + xMax[windowIdx];
const float yMinCurrFrac = (float)yMinCurr + yMax[windowIdx];
xMaxCurr = (int)floor(-xMin[windowIdx]) + 1;
yMaxCurr = (int)floor(-yMin[windowIdx]) + 1;
const float xMaxCurrFrac = -xMin[windowIdx] + 1 - xMaxCurr;
const float yMaxCurrFrac = -yMin[windowIdx] + 1 - yMaxCurr;
// The following code block implements these lines
// as if they were executed simultaneously (see `void updateGradInputFrac()`):
// xMinCurr = (x == 0 and xMaxCurr >= 0 ? 0 : xMinCurr);
// xMaxCurr = (x == h-1 and xMinCurr <= 0 ? h+66 : xMaxCurr);
// yMinCurr = (y == 0 and yMaxCurr >= 0 ? 0 : yMinCurr);
// yMaxCurr = (y == w-1 and yMinCurr <= 0 ? w+66 : yMaxCurr);
bool needToChangeMin, needToChangeMax;
needToChangeMin = x == 0 and xMaxCurr >= 0;
needToChangeMax = x == h-1 and xMinCurr <= 0;
if (needToChangeMin) xMinCurr = 0;
if (needToChangeMax) xMaxCurr = h+66;
needToChangeMin = y == 0 and yMaxCurr >= 0;
needToChangeMax = y == w-1 and yMinCurr <= 0;
if (needToChangeMin) yMinCurr = 0;
if (needToChangeMax) yMaxCurr = w+66;
const int t = max(0, min(x+xMinCurr, h) );
const int b = max(0, min(x+xMaxCurr, h) );
const int l = max(0, min(y+yMinCurr, w) );
const int r = max(0, min(y+yMaxCurr, w) );
const int tAdv = x+xMinCurr-1 < h ? max(0, min(t-1, h)) : t;
const int bAdv = x+xMaxCurr >= 0 ? max(0, min(b+1, h)) : b;
const int lAdv = y+yMinCurr-1 < w ? max(0, min(l-1, w)) : l;
const int rAdv = y+yMaxCurr >= 0 ? max(0, min(r+1, w)) : r;
// TODO: 1D grid
outValue += gradOutputIntData[b*(w+1) + r];
outValue -= gradOutputIntData[t*(w+1) + r];
outValue -= gradOutputIntData[b*(w+1) + l];
outValue += gradOutputIntData[t*(w+1) + l];
// -- xMax border
outValue +=
( gradOutputIntData[bAdv*(w+1) + r]
- gradOutputIntData[b *(w+1) + r]
- gradOutputIntData[bAdv*(w+1) + l]
+ gradOutputIntData[b *(w+1) + l]
) * xMaxCurrFrac;
// -- yMax border
outValue +=
( gradOutputIntData[b*(w+1) + rAdv]
- gradOutputIntData[b*(w+1) + r ]
- gradOutputIntData[t*(w+1) + rAdv]
+ gradOutputIntData[t*(w+1) + r ]
) * yMaxCurrFrac;
// -- xMin border
outValue +=
( gradOutputIntData[t *(w+1) + r]
- gradOutputIntData[tAdv*(w+1) + r]
- gradOutputIntData[t *(w+1) + l]
+ gradOutputIntData[tAdv*(w+1) + l]
) * xMinCurrFrac;
// -- yMin border
outValue +=
( gradOutputIntData[b*(w+1) + l ]
- gradOutputIntData[b*(w+1) + lAdv]
- gradOutputIntData[t*(w+1) + l ]
+ gradOutputIntData[t*(w+1) + lAdv]
) * yMinCurrFrac;
// -- corner pixels
outValue +=
xMaxCurrFrac*yMaxCurrFrac * (
(x+xMaxCurr > h-1 or
y+yMaxCurr > w-1 or
x+xMaxCurr < 0 or
y+yMaxCurr < 0 or
b == bAdv or
r == rAdv) ? 0 :
gradOutputData[b*gradOutputStrideRow + r]);
outValue +=
xMinCurrFrac*yMaxCurrFrac * (
(x+xMinCurr-1 > h-1 or
y+yMaxCurr > w-1 or
x+xMinCurr-1 < 0 or
y+yMaxCurr < 0 or
t == tAdv or
r == rAdv) ? 0 :
gradOutputData[tAdv*gradOutputStrideRow + r]);
outValue +=
xMaxCurrFrac*yMinCurrFrac * (
(x+xMaxCurr > h-1 or
y+yMinCurr-1 > w-1 or
x+xMaxCurr < 0 or
y+yMinCurr-1 < 0 or
b == bAdv or
l == lAdv) ? 0 :
gradOutputData[b*gradOutputStrideRow + lAdv]);
outValue +=
xMinCurrFrac*yMinCurrFrac * (
(x+xMinCurr-1 > h-1 or
y+yMinCurr-1 > w-1 or
x+xMinCurr-1 < 0 or
y+yMinCurr-1 < 0 or
t == tAdv or
l == lAdv) ? 0 :
gradOutputData[tAdv*gradOutputStrideRow + lAdv]);
// go to the next channel
gradOutputIntData += (h+1)*(w+1);
gradOutputData += gradOutputStrideChannel;
}
gradInputData[x*w + y] = outValue;
}
}
extern "C"
void updateGradInputReplicatePlanewiseCuda(THCState *state,
float *gradOutputIntData, float *gradInputData,
int h, int w, int nWindows,
float *xMin, float *xMax, float *yMin, float *yMax,
const int strideH, const int strideW) {
if (strideH != 1 or strideW != 1) {
strided::updateGradInputReplicatePlanewiseCuda(
gradOutputIntData, gradInputData, h, w, nWindows,
xMin, xMax, yMin, yMax, strideH, strideW);
return;
}
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, BLOCK_CHANNELS);
dim3 dimGrid(
(h + dimBlock.x - 1) / dimBlock.x,
(w + dimBlock.y - 1) / dimBlock.y);
hipLaunchKernelGGL(( updateGradInputReplicatePlanewiseKernel) , dim3(dimGrid), dim3(dimBlock), 0, THCState_getCurrentStream(state),
gradOutputIntData, gradInputData,
h, w, nWindows,
xMin, xMax, yMin, yMax);
THCudaCheck(hipGetLastError());
}
extern "C"
void updateGradInputReplicatePlanewiseFracCuda(THCState *state,
float *gradOutputIntData, float *gradInputData,
int h, int w, int nWindows,
float *xMin, float *xMax, float *yMin, float *yMax,
float *gradOutputData, int gradOutputStrideRow, int gradOutputStrideChannel,
const int strideH, const int strideW) {
if (strideH != 1 or strideW != 1) {
strided::updateGradInputReplicatePlanewiseFracCuda(
gradOutputIntData, gradInputData, h, w, nWindows,
xMin, xMax, yMin, yMax,
gradOutputData, gradOutputStrideRow, gradOutputStrideChannel,
strideH, strideW);
return;
}
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, BLOCK_CHANNELS);
dim3 dimGrid(
(h + dimBlock.x - 1) / dimBlock.x,
(w + dimBlock.y - 1) / dimBlock.y);
hipLaunchKernelGGL(( updateGradInputReplicatePlanewiseFracKernel) , dim3(dimGrid), dim3(dimBlock), 0, THCState_getCurrentStream(state),
gradOutputIntData, gradInputData,
h, w, nWindows,
xMin, xMax, yMin, yMax,
gradOutputData, gradOutputStrideRow, gradOutputStrideChannel);
THCudaCheck(hipGetLastError());
}
/****************** Single-kernel updateGradInput (faster) **************/
__global__ void updateGradInputReplicateKernel(
const float *gradOutputIntData, float *tmpArray,
const int batchSize, const int nInputPlane, const int nWindows,
const int h, const int w,
const float *const xMin, const float *const xMax,
const float *const yMin, const float *const yMax) {
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
tmpArray += id; // tmpArray now points to our output pixel
const int y = id % w; id /= w;
const int x = id % h; id /= h;
const int globalWindowIdx = id % (nInputPlane * nWindows);
// `id` is now the current plane number
gradOutputIntData += id * (w+1) * (h+1);
if (id < batchSize * nInputPlane * nWindows) {
float outValue = 0;
int xMinCurr, xMaxCurr, yMinCurr, yMaxCurr;
xMinCurr = (int)ceil(-xMax[globalWindowIdx]);
yMinCurr = (int)ceil(-yMax[globalWindowIdx]);
xMaxCurr = (int)floor(-xMin[globalWindowIdx]) + 1;
yMaxCurr = (int)floor(-yMin[globalWindowIdx]) + 1;
// The following code block implements these lines
// as if they were executed simultaneously (see `void updateGradInputFrac()`):
// xMinCurr = (x == 0 and xMaxCurr >= 0 ? 0 : xMinCurr);
// xMaxCurr = (x == h-1 and xMinCurr <= 0 ? h+66 : xMaxCurr);
// yMinCurr = (y == 0 and yMaxCurr >= 0 ? 0 : yMinCurr);
// yMaxCurr = (y == w-1 and yMinCurr <= 0 ? w+66 : yMaxCurr);
bool needToChangeMin, needToChangeMax;
needToChangeMin = x == 0 and xMaxCurr >= 0;
needToChangeMax = x == h-1 and xMinCurr <= 0;
if (needToChangeMin) xMinCurr = 0;
if (needToChangeMax) xMaxCurr = h+66;
needToChangeMin = y == 0 and yMaxCurr >= 0;
needToChangeMax = y == w-1 and yMinCurr <= 0;
if (needToChangeMin) yMinCurr = 0;
if (needToChangeMax) yMaxCurr = w+66;
const int t = max(0, min(x+xMinCurr, h) );
const int b = max(0, min(x+xMaxCurr, h) );
const int l = max(0, min(y+yMinCurr, w) );
const int r = max(0, min(y+yMaxCurr, w) );
outValue += gradOutputIntData[b*(w+1) + r];
outValue -= gradOutputIntData[t*(w+1) + r];
outValue -= gradOutputIntData[b*(w+1) + l];
outValue += gradOutputIntData[t*(w+1) + l];
*tmpArray = outValue;
}
}
__global__ void updateGradInputReplicateFracKernel(
const float *gradOutputIntData, float *tmpArray,
const int batchSize, const int nInputPlane, const int nWindows,
const int h, const int w,
const float *const xMin, const float *const xMax,
const float *const yMin, const float *const yMax,
const float *gradOutputData,
const int gradOutputStrideRow, const int gradOutputStrideChannel) {
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
tmpArray += id; // tmpArray now points to our output pixel
const int y = id % w; id /= w;
const int x = id % h; id /= h;
const int globalWindowIdx = id % (nInputPlane * nWindows);
// `id` is now the current plane number
gradOutputIntData += id * (w+1) * (h+1);
gradOutputData += id * gradOutputStrideChannel;
if (id < batchSize * nInputPlane * nWindows) {
float outValue = 0;
int xMinCurr, xMaxCurr, yMinCurr, yMaxCurr;
xMinCurr = (int)ceil(-xMax[globalWindowIdx]);
yMinCurr = (int)ceil(-yMax[globalWindowIdx]);
const float xMinCurrFrac = (float)xMinCurr + xMax[globalWindowIdx];
const float yMinCurrFrac = (float)yMinCurr + yMax[globalWindowIdx];
xMaxCurr = (int)floor(-xMin[globalWindowIdx]) + 1;
yMaxCurr = (int)floor(-yMin[globalWindowIdx]) + 1;
const float xMaxCurrFrac = -xMin[globalWindowIdx] + 1 - xMaxCurr;
const float yMaxCurrFrac = -yMin[globalWindowIdx] + 1 - yMaxCurr;
// The following code block implements these lines
// as if they were executed simultaneously (see `void updateGradInputFrac()`):
// xMinCurr = (x == 0 and xMaxCurr >= 0 ? 0 : xMinCurr);
// xMaxCurr = (x == h-1 and xMinCurr <= 0 ? h+66 : xMaxCurr);
// yMinCurr = (y == 0 and yMaxCurr >= 0 ? 0 : yMinCurr);
// yMaxCurr = (y == w-1 and yMinCurr <= 0 ? w+66 : yMaxCurr);
bool needToChangeMin, needToChangeMax;
needToChangeMin = x == 0 and xMaxCurr >= 0;
needToChangeMax = x == h-1 and xMinCurr <= 0;
if (needToChangeMin) xMinCurr = 0;
if (needToChangeMax) xMaxCurr = h+66;
needToChangeMin = y == 0 and yMaxCurr >= 0;
needToChangeMax = y == w-1 and yMinCurr <= 0;
if (needToChangeMin) yMinCurr = 0;
if (needToChangeMax) yMaxCurr = w+66;
const int t = max(0, min(x+xMinCurr, h) );
const int b = max(0, min(x+xMaxCurr, h) );
const int l = max(0, min(y+yMinCurr, w) );
const int r = max(0, min(y+yMaxCurr, w) );
const int tAdv = x+xMinCurr-1 < h ? max(0, min(t-1, h)) : t;
const int bAdv = x+xMaxCurr >= 0 ? max(0, min(b+1, h)) : b;
const int lAdv = y+yMinCurr-1 < w ? max(0, min(l-1, w)) : l;
const int rAdv = y+yMaxCurr >= 0 ? max(0, min(r+1, w)) : r;
outValue += gradOutputIntData[b*(w+1) + r];
outValue -= gradOutputIntData[t*(w+1) + r];
outValue -= gradOutputIntData[b*(w+1) + l];
outValue += gradOutputIntData[t*(w+1) + l];
// -- xMax border
outValue +=
( gradOutputIntData[bAdv*(w+1) + r]
- gradOutputIntData[b *(w+1) + r]
- gradOutputIntData[bAdv*(w+1) + l]
+ gradOutputIntData[b *(w+1) + l]
) * xMaxCurrFrac;
// -- yMax border
outValue +=
( gradOutputIntData[b*(w+1) + rAdv]
- gradOutputIntData[b*(w+1) + r ]
- gradOutputIntData[t*(w+1) + rAdv]
+ gradOutputIntData[t*(w+1) + r ]
) * yMaxCurrFrac;
// -- xMin border
outValue +=
( gradOutputIntData[t *(w+1) + r]
- gradOutputIntData[tAdv*(w+1) + r]
- gradOutputIntData[t *(w+1) + l]
+ gradOutputIntData[tAdv*(w+1) + l]
) * xMinCurrFrac;
// -- yMin border
outValue +=
( gradOutputIntData[b*(w+1) + l ]
- gradOutputIntData[b*(w+1) + lAdv]
- gradOutputIntData[t*(w+1) + l ]
+ gradOutputIntData[t*(w+1) + lAdv]
) * yMinCurrFrac;
// -- corner pixels
outValue +=
xMaxCurrFrac*yMaxCurrFrac * (
(x+xMaxCurr > h-1 or
y+yMaxCurr > w-1 or
x+xMaxCurr < 0 or
y+yMaxCurr < 0 or
b == bAdv or
r == rAdv) ? 0 :
gradOutputData[b*gradOutputStrideRow + r]);
outValue +=
xMinCurrFrac*yMaxCurrFrac * (
(x+xMinCurr-1 > h-1 or
y+yMaxCurr > w-1 or
x+xMinCurr-1 < 0 or
y+yMaxCurr < 0 or
t == tAdv or
r == rAdv) ? 0 :
gradOutputData[tAdv*gradOutputStrideRow + r]);
outValue +=
xMaxCurrFrac*yMinCurrFrac * (
(x+xMaxCurr > h-1 or
y+yMinCurr-1 > w-1 or
x+xMaxCurr < 0 or
y+yMinCurr-1 < 0 or
b == bAdv or
l == lAdv) ? 0 :
gradOutputData[b*gradOutputStrideRow + lAdv]);
outValue +=
xMinCurrFrac*yMinCurrFrac * (
(x+xMinCurr-1 > h-1 or
y+yMinCurr-1 > w-1 or
x+xMinCurr-1 < 0 or
y+yMinCurr-1 < 0 or
t == tAdv or
l == lAdv) ? 0 :
gradOutputData[tAdv*gradOutputStrideRow + lAdv]);
*tmpArray = outValue;
}
}
extern "C"
void updateGradInputReplicateCuda(THCState *state,
const float *gradOutputIntData, float *tmpArray,
const int batchSize, const int nInputPlane, const int nWindows, const int h, const int w,
const float *const xMin, const float *const xMax,
const float *const yMin, const float *const yMax,
const int strideH, const int strideW) {
if (strideH != 1 or strideW != 1) {
THError("NYI");
// strided::updateGradInputPlanewiseFracCuda(
// gradOutputIntData, gradInputData, h, w, nWindows,
// xMin, xMax, yMin, yMax,
// gradOutputData, gradOutputStrideRow, gradOutputStrideChannel,
// strideH, strideW);
return;
}
const int threadsNeeded = batchSize * nInputPlane * nWindows * h * w;
const int numBlocks = (threadsNeeded + NUM_THREADS - 1) / NUM_THREADS;
hipLaunchKernelGGL(( updateGradInputReplicateKernel) , dim3(numBlocks), dim3(NUM_THREADS), 0, THCState_getCurrentStream(state),
gradOutputIntData, tmpArray,
batchSize, nInputPlane, nWindows,
h, w, xMin, xMax, yMin, yMax);
THCudaCheck(hipGetLastError());
}
extern "C"
void updateGradInputReplicateFracCuda(THCState *state,
const float *gradOutputIntData, float *tmpArray,
const int batchSize, const int nInputPlane, const int nWindows, const int h, const int w,
const float *const xMin, const float *const xMax,
const float *const yMin, const float *const yMax,
const float *gradOutputData,
const int gradOutputStrideRow, const int gradOutputStrideChannel,
const int strideH, const int strideW) {
if (strideH != 1 or strideW != 1) {
THError("NYI");
// strided::updateGradInputPlanewiseFracCuda(
// gradOutputIntData, gradInputData, h, w, nWindows,
// xMin, xMax, yMin, yMax,
// gradOutputData, gradOutputStrideRow, gradOutputStrideChannel,
// strideH, strideW);
return;
}
const int threadsNeeded = batchSize * nInputPlane * nWindows * h * w;
const int numBlocks = (threadsNeeded + NUM_THREADS - 1) / NUM_THREADS;
hipLaunchKernelGGL(( updateGradInputReplicateFracKernel) , dim3(numBlocks), dim3(NUM_THREADS), 0, THCState_getCurrentStream(state),
gradOutputIntData, tmpArray,
batchSize, nInputPlane, nWindows,
h, w, xMin, xMax, yMin, yMax,
gradOutputData, gradOutputStrideRow, gradOutputStrideChannel);
THCudaCheck(hipGetLastError());
}
/************************ accGradParameters planewise ************************/
__global__ void xMaxDeltaIntegralReplicatePlanewiseFracKernel(
const float *intData, float *tmpArray,
const int nWindows, const int h, const int w,
const float *xMax, const float *yMin, const float *yMax,
const float *inData, const int inDataStrideRow) {
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int & windowIdx = id;
if (windowIdx < nWindows and x <= h and y <= w) {
tmpArray += windowIdx * h * w;
// const int xMinInt = (int)ceil(xMin[windowIdx]-1);
// const float xMinFrac = xMinInt-xMin[windowIdx]+1;
const int yMinInt = (int)ceil(yMin[windowIdx]-1);
const float yMinFrac = yMinInt-yMin[windowIdx]+1;
const int xMaxInt = (int)floor(xMax[windowIdx]);
// const float xMaxFrac = xMax[windowIdx]-xMaxInt;
const int yMaxInt = (int)floor(yMax[windowIdx]);
const float yMaxFrac = yMax[windowIdx]-yMaxInt;
// const float tlCorner = y+yMinInt < 1 or x+xMinInt < 1 ? 0 :
// inData[
// max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow +
// max(0,min(w-1,y+yMinInt-1))];
const float blCorner = y+yMinInt < 1 or x+xMaxInt >= h ? 0 :
inData[
max(0,min(h-1,x+xMaxInt )) * inDataStrideRow +
max(0,min(w-1,y+yMinInt-1))];
// const float trCorner = y+yMaxInt >= w or x+xMinInt < 1 ? 0 :
// inData[
// max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow +
// max(0,min(w-1,y+yMaxInt ))];
const float brCorner = y+yMaxInt >= w or x+xMaxInt >= h ? 0 :
inData[
max(0,min(h-1,x+xMaxInt )) * inDataStrideRow +
max(0,min(w-1,y+yMaxInt ))];
float delta = 0;
delta += brCorner * (y+yMaxInt < 1 ? 1.0f : yMaxFrac);
delta += blCorner * (y+yMinInt >= w ? 1.0f : yMinFrac);
delta +=
intData[max(0,min(x+xMaxInt+1, h))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMaxInt , h))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMaxInt+1, h))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta +=
intData[max(0,min(x+xMaxInt , h))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta *= (x+xMaxInt >= 1 and x+xMaxInt < h);
tmpArray[(x-1)*w + (y-1)] *= delta;
}
}
__global__ void xMinDeltaIntegralReplicatePlanewiseFracKernel(
const float *intData, float *tmpArray,
const int nWindows, const int h, const int w,
const float *xMin, const float *yMin, const float *yMax,
const float *inData, const int inDataStrideRow) {
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int & windowIdx = id;
if (windowIdx < nWindows and x <= h and y <= w) {
tmpArray += windowIdx * h * w;
const int xMinInt = (int)ceil(xMin[windowIdx]-1);
// const float xMinFrac = xMinInt-xMin[windowIdx]+1;
const int yMinInt = (int)ceil(yMin[windowIdx]-1);
const float yMinFrac = yMinInt-yMin[windowIdx]+1;
// const int xMaxInt = (int)floor(xMax[windowIdx]);
// const float xMaxFrac = xMax[windowIdx]-xMaxInt;
const int yMaxInt = (int)floor(yMax[windowIdx]);
const float yMaxFrac = yMax[windowIdx]-yMaxInt;
const float tlCorner = y+yMinInt < 1 or x+xMinInt < 1 ? 0 :
inData[
max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow +
max(0,min(w-1,y+yMinInt-1))];
// const float blCorner = y+yMinInt < 1 or x+xMaxInt >= h ? 0 :
// inData[
// max(0,min(h-1,x+xMaxInt )) * inDataStrideRow +
// max(0,min(w-1,y+yMinInt-1))];
const float trCorner = y+yMaxInt >= w or x+xMinInt < 1 ? 0 :
inData[
max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow +
max(0,min(w-1,y+yMaxInt ))];
// const float brCorner = y+yMaxInt >= w or x+xMaxInt >= h ? 0 :
// inData[
// max(0,min(h-1,x+xMaxInt )) * inDataStrideRow +
// max(0,min(w-1,y+yMaxInt ))];
float delta = 0;
delta += trCorner * (y+yMaxInt < 1 ? 1.0f : yMaxFrac);
delta += tlCorner * (y+yMinInt >= w ? 1.0f : yMinFrac);
delta +=
intData[max(0,min(x+xMinInt , h))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMinInt-1, h))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMinInt , h))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta +=
intData[max(0,min(x+xMinInt-1, h))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta *= (x+xMinInt >= 1 and x+xMinInt < h);
tmpArray[(x-1)*w + (y-1)] *= -delta;
}
}
__global__ void yMaxDeltaIntegralReplicatePlanewiseFracKernel(
const float *intData, float *tmpArray,
const int nWindows, const int h, const int w,
const float *xMin, const float *xMax, const float *yMax,
const float *inData, const int inDataStrideRow) {
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int & windowIdx = id;
if (windowIdx < nWindows and x <= h and y <= w) {
tmpArray += windowIdx * h * w;
const int xMinInt = (int)ceil(xMin[windowIdx]-1);
const float xMinFrac = xMinInt-xMin[windowIdx]+1;
// const int yMinInt = (int)ceil(yMin[windowIdx]-1);
// const float yMinFrac = yMinInt-yMin[windowIdx]+1;
const int xMaxInt = (int)floor(xMax[windowIdx]);
const float xMaxFrac = xMax[windowIdx]-xMaxInt;
const int yMaxInt = (int)floor(yMax[windowIdx]);
// const float yMaxFrac = yMax[windowIdx]-yMaxInt;
// const float tlCorner = y+yMinInt < 1 or x+xMinInt < 1 ? 0 :
// inData[
// max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow +
// max(0,min(w-1,y+yMinInt-1))];
// const float blCorner = y+yMinInt < 1 or x+xMaxInt >= h ? 0 :
// inData[
// max(0,min(h-1,x+xMaxInt )) * inDataStrideRow +
// max(0,min(w-1,y+yMinInt-1))];
const float trCorner = y+yMaxInt >= w or x+xMinInt < 1 ? 0 :
inData[
max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow +
max(0,min(w-1,y+yMaxInt ))];
const float brCorner = y+yMaxInt >= w or x+xMaxInt >= h ? 0 :
inData[
max(0,min(h-1,x+xMaxInt )) * inDataStrideRow +
max(0,min(w-1,y+yMaxInt ))];
float delta = 0;
delta += trCorner * (x+xMinInt >= h ? 1.0f : xMinFrac);
delta += brCorner * (x+xMaxInt < 1 ? 1.0f : xMaxFrac);
delta +=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(0,min(y+yMaxInt+1, w))];
delta -=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(0,min(y+yMaxInt , w))];
delta -=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(0,min(y+yMaxInt+1, w))];
delta +=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(0,min(y+yMaxInt , w))];
delta *= (y+yMaxInt >= 1 and y+yMaxInt < w);
tmpArray[(x-1)*w + (y-1)] *= delta;
}
}
__global__ void yMinDeltaIntegralReplicatePlanewiseFracKernel(
const float *intData, float *tmpArray,
const int nWindows, const int h, const int w,
const float *xMin, const float *xMax, const float *yMin,
const float *inData, const int inDataStrideRow) {
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int & windowIdx = id;
if (windowIdx < nWindows and x <= h and y <= w) {
tmpArray += windowIdx * h * w;
const int xMinInt = (int)ceil(xMin[windowIdx]-1);
const float xMinFrac = xMinInt-xMin[windowIdx]+1;
const int yMinInt = (int)ceil(yMin[windowIdx]-1);
// const float yMinFrac = yMinInt-yMin[windowIdx]+1;
const int xMaxInt = (int)floor(xMax[windowIdx]);
const float xMaxFrac = xMax[windowIdx]-xMaxInt;
// const int yMaxInt = (int)floor(yMax[windowIdx]);
// const float yMaxFrac = yMax[windowIdx]-yMaxInt;
const float tlCorner = y+yMinInt < 1 or x+xMinInt < 1 ? 0 :
inData[
max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow +
max(0,min(w-1,y+yMinInt-1))];
const float blCorner = y+yMinInt < 1 or x+xMaxInt >= h ? 0 :
inData[
max(0,min(h-1,x+xMaxInt )) * inDataStrideRow +
max(0,min(w-1,y+yMinInt-1))];
// const float trCorner = y+yMaxInt >= w or x+xMinInt < 1 ? 0 :
// inData[
// max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow +
// max(0,min(w-1,y+yMaxInt ))];
// const float brCorner = y+yMaxInt >= w or x+xMaxInt >= h ? 0 :
// inData[
// max(0,min(h-1,x+xMaxInt )) * inDataStrideRow +
// max(0,min(w-1,y+yMaxInt ))];
float delta = 0;
delta += tlCorner * (x+xMinInt >= h ? 1.0f : xMinFrac);
delta += blCorner * (x+xMaxInt < 1 ? 1.0f : xMaxFrac);
delta +=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(0,min(y+yMinInt , w))];
delta -=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(0,min(y+yMinInt-1, w))];
delta -=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(0,min(y+yMinInt , w))];
delta +=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(0,min(y+yMinInt-1, w))];
delta *= (y+yMinInt >= 1 and y+yMinInt < w);
tmpArray[(x-1)*w + (y-1)] *= -delta;
}
}
extern "C"
void backwardReplicatePlanewiseFracCuda(THCState *state,
float *intData, float *tmpArray,
int nWindows, int h, int w,
float *xMin, float *xMax, float *yMin, float *yMax,
float *inData, int inDataStrideRow,
const int strideH, const int strideW) {
if (strideH != 1 or strideW != 1) {
strided::backwardReplicateFracCuda(
intData, tmpArray, nWindows, h, w,
xMin, xMax, yMin, yMax, inData, inDataStrideRow,
strideH, strideW);
return;
}
dim3 dimBlock(NUM_THREADS);
dim3 dimGrid((nWindows * h * w + dimBlock.x - 1) / dimBlock.x);
hipLaunchKernelGGL(( xMaxDeltaIntegralReplicatePlanewiseFracKernel) , dim3(dimGrid), dim3(dimBlock), 0, THCState_getCurrentStream(state),
intData, tmpArray + 0*nWindows*h*w, nWindows, h, w,
xMax, yMin, yMax, inData, inDataStrideRow);
hipLaunchKernelGGL(( xMinDeltaIntegralReplicatePlanewiseFracKernel) , dim3(dimGrid), dim3(dimBlock), 0, THCState_getCurrentStream(state),
intData, tmpArray + 1*nWindows*h*w, nWindows, h, w,
xMin, yMin, yMax, inData, inDataStrideRow);
hipLaunchKernelGGL(( yMaxDeltaIntegralReplicatePlanewiseFracKernel) , dim3(dimGrid), dim3(dimBlock), 0, THCState_getCurrentStream(state),
intData, tmpArray + 2*nWindows*h*w, nWindows, h, w,
xMin, xMax, yMax, inData, inDataStrideRow);
hipLaunchKernelGGL(( yMinDeltaIntegralReplicatePlanewiseFracKernel) , dim3(dimGrid), dim3(dimBlock), 0, THCState_getCurrentStream(state),
intData, tmpArray + 3*nWindows*h*w, nWindows, h, w,
xMin, xMax, yMin, inData, inDataStrideRow);
THCudaCheck(hipGetLastError());
}
__global__ void xMaxDeltaIntegralReplicatePlanewiseKernel(
const float *intData, float *tmpArray,
const int nWindows, const int h, const int w,
const float *xMax, const float *yMin, const float *yMax) {
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int & windowIdx = id;
if (windowIdx < nWindows and x <= h and y <= w) {
tmpArray += windowIdx * h * w;
// const int xMinInt = (int)ceil(xMin[windowIdx]-1);
const int yMinInt = (int)ceil(yMin[windowIdx]-1);
const int xMaxInt = (int)floor(xMax[windowIdx]);
const int yMaxInt = (int)floor(yMax[windowIdx]);
float delta = 0;
delta +=
intData[max(1,min(x+xMaxInt+1, h))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMaxInt , h))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(1,min(x+xMaxInt+1, h))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta +=
intData[max(0,min(x+xMaxInt , h))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta *= (x+xMaxInt >= 1 and x+xMaxInt < h);
tmpArray[(x-1)*w + (y-1)] *= delta;
}
}
__global__ void xMinDeltaIntegralReplicatePlanewiseKernel(
const float *intData, float *tmpArray,
const int nWindows, const int h, const int w,
const float *xMin, const float *yMin, const float *yMax) {
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int & windowIdx = id;
if (windowIdx < nWindows and x <= h and y <= w) {
tmpArray += windowIdx * h * w;
const int xMinInt = (int)ceil(xMin[windowIdx]-1);
const int yMinInt = (int)ceil(yMin[windowIdx]-1);
// const int xMaxInt = (int)floor(xMax[windowIdx]);
const int yMaxInt = (int)floor(yMax[windowIdx]);
float delta = 0;
delta +=
intData[max(0,min(x+xMinInt , h-1))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMinInt-1, h ))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMinInt , h-1))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta +=
intData[max(0,min(x+xMinInt-1, h ))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta *= (x+xMinInt >= 1 and x+xMinInt < h);
tmpArray[(x-1)*w + (y-1)] *= -delta;
}
}
__global__ void yMaxDeltaIntegralReplicatePlanewiseKernel(
const float *intData, float *tmpArray,
const int nWindows, const int h, const int w,
const float *xMin, const float *xMax, const float *yMax) {
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int & windowIdx = id;
if (windowIdx < nWindows and x <= h and y <= w) {
tmpArray += windowIdx * h * w;
const int xMinInt = (int)ceil(xMin[windowIdx]-1);
// const int yMinInt = (int)ceil(yMin[windowIdx]-1);
const int xMaxInt = (int)floor(xMax[windowIdx]);
const int yMaxInt = (int)floor(yMax[windowIdx]);
float delta = 0;
delta +=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(1,min(y+yMaxInt+1, w))];
delta -=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(0,min(y+yMaxInt , w))];
delta -=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(1,min(y+yMaxInt+1, w))];
delta +=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(0,min(y+yMaxInt , w))];
delta *= (y+yMaxInt >= 1 and y+yMaxInt < w);
tmpArray[(x-1)*w + (y-1)] *= delta;
}
}
__global__ void yMinDeltaIntegralReplicatePlanewiseKernel(
const float *intData, float *tmpArray,
const int nWindows, const int h, const int w,
const float *xMin, const float *xMax, const float *yMin) {
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int & windowIdx = id;
if (windowIdx < nWindows and x <= h and y <= w) {
tmpArray += windowIdx * h * w;
const int xMinInt = (int)ceil(xMin[windowIdx]-1);
const int yMinInt = (int)ceil(yMin[windowIdx]-1);
const int xMaxInt = (int)floor(xMax[windowIdx]);
// const int yMaxInt = (int)floor(yMax[windowIdx]);
float delta = 0;
delta +=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(0,min(y+yMinInt , w ))];
delta -=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(0,min(y+yMinInt-1, w-1))];
delta -=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(0,min(y+yMinInt , w ))];
delta +=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(0,min(y+yMinInt-1, w-1))];
delta *= (y+yMinInt >= 1 and y+yMinInt < w);
tmpArray[(x-1)*w + (y-1)] *= -delta;
}
}
extern "C"
void backwardReplicatePlanewiseCuda(THCState *state,
float *intData, float *tmpArray,
int nWindows, int h, int w,
float *xMin, float *xMax, float *yMin, float *yMax,
const int strideH, const int strideW) {
if (strideH != 1 or strideW != 1) {
strided::backwardReplicateCuda(
intData, tmpArray, nWindows, h, w,
xMin, xMax, yMin, yMax, strideH, strideW);
return;
}
dim3 dimBlock(NUM_THREADS);
dim3 dimGrid((nWindows * h * w + dimBlock.x - 1) / dimBlock.x);
hipLaunchKernelGGL(( xMaxDeltaIntegralReplicatePlanewiseKernel) , dim3(dimGrid), dim3(dimBlock), 0, THCState_getCurrentStream(state),
intData, tmpArray + 0*nWindows*h*w,
nWindows, h, w, xMax, yMin, yMax);
hipLaunchKernelGGL(( xMinDeltaIntegralReplicatePlanewiseKernel) , dim3(dimGrid), dim3(dimBlock), 0, THCState_getCurrentStream(state),
intData, tmpArray + 1*nWindows*h*w,
nWindows, h, w, xMin, yMin, yMax);
hipLaunchKernelGGL(( yMaxDeltaIntegralReplicatePlanewiseKernel) , dim3(dimGrid), dim3(dimBlock), 0, THCState_getCurrentStream(state),
intData, tmpArray + 2*nWindows*h*w,
nWindows, h, w, xMin, xMax, yMax);
hipLaunchKernelGGL(( yMinDeltaIntegralReplicatePlanewiseKernel) , dim3(dimGrid), dim3(dimBlock), 0, THCState_getCurrentStream(state),
intData, tmpArray + 3*nWindows*h*w,
nWindows, h, w, xMin, xMax, yMin);
THCudaCheck(hipGetLastError());
}
/************************ accGradParameters fastest *********************/
__global__ void xMaxDeltaIntegralReplicateFracKernel(
const float *intData, const int intDataStrideChannel, float *tmpArray,
const int batchSize, const int nInputPlane, const int nWindows, const int h, const int w,
const float *xMax, const float *yMin, const float *yMax,
const float *inData, const int inDataStrideRow, const int inDataStrideChannel) {
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
tmpArray += id; // tmpArray now points to our output pixel
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int windowIdx = id % nWindows; id /= nWindows;
// `id` is now is now the current global input plane number
intData += id * intDataStrideChannel;
inData += id * inDataStrideChannel;
const int globalWindowIdx = (id % nInputPlane) * nWindows + windowIdx; id /= nInputPlane;
const int & batchIdx = id;
if (batchIdx < batchSize) {
// const int xMinInt = (int)ceil(xMin[globalWindowIdx]-1);
// const float xMinFrac = xMinInt-xMin[globalWindowIdx]+1;
const int yMinInt = (int)ceil(yMin[globalWindowIdx]-1);
const float yMinFrac = yMinInt-yMin[globalWindowIdx]+1;
const int xMaxInt = (int)floor(xMax[globalWindowIdx]);
// const float xMaxFrac = xMax[globalWindowIdx]-xMaxInt;
const int yMaxInt = (int)floor(yMax[globalWindowIdx]);
const float yMaxFrac = yMax[globalWindowIdx]-yMaxInt;
// const float tlCorner = y+yMinInt < 1 or x+xMinInt < 1 ? 0 :
// inData[
// max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow +
// max(0,min(w-1,y+yMinInt-1))];
const float blCorner = y+yMinInt < 1 or x+xMaxInt >= h ? 0 :
inData[
max(0,min(h-1,x+xMaxInt )) * inDataStrideRow +
max(0,min(w-1,y+yMinInt-1))];
// const float trCorner = y+yMaxInt >= w or x+xMinInt < 1 ? 0 :
// inData[
// max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow +
// max(0,min(w-1,y+yMaxInt ))];
const float brCorner = y+yMaxInt >= w or x+xMaxInt >= h ? 0 :
inData[
max(0,min(h-1,x+xMaxInt )) * inDataStrideRow +
max(0,min(w-1,y+yMaxInt ))];
float delta = 0;
delta += brCorner * (y+yMaxInt < 1 ? 1.0f : yMaxFrac);
delta += blCorner * (y+yMinInt >= w ? 1.0f : yMinFrac);
delta +=
intData[max(0,min(x+xMaxInt+1, h))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMaxInt , h))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMaxInt+1, h))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta +=
intData[max(0,min(x+xMaxInt , h))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta *= (x+xMaxInt >= 1 and x+xMaxInt < h);
*tmpArray = delta;
}
}
template <bool inputIsOnes>
__global__ void xMinDeltaIntegralReplicateFracKernel(
const float *intData, const int intDataStrideChannel, float *tmpArray,
const int batchSize, const int nInputPlane, const int nWindows, const int h, const int w,
const float *xMin, const float *yMin, const float *yMax,
const float *inData, const int inDataStrideRow, const int inDataStrideChannel) {
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
tmpArray += id; // tmpArray now points to our output pixel
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int windowIdx = id % nWindows; id /= nWindows;
// `id` is now is now the current global input plane number
intData += id * intDataStrideChannel;
if (not inputIsOnes) inData += id * inDataStrideChannel;
const int globalWindowIdx = (id % nInputPlane) * nWindows + windowIdx; id /= nInputPlane;
const int & batchIdx = id;
if (batchIdx < batchSize) {
const int xMinInt = (int)ceil(xMin[globalWindowIdx]-1);
// const float xMinFrac = xMinInt-xMin[globalWindowIdx]+1;
const int yMinInt = (int)ceil(yMin[globalWindowIdx]-1);
const float yMinFrac = yMinInt-yMin[globalWindowIdx]+1;
// const int xMaxInt = (int)floor(xMax[globalWindowIdx]);
// const float xMaxFrac = xMax[globalWindowIdx]-xMaxInt;
const int yMaxInt = (int)floor(yMax[globalWindowIdx]);
const float yMaxFrac = yMax[globalWindowIdx]-yMaxInt;
int valid;
valid = not (y+yMinInt < 1) & not (x+xMinInt < 1);
const float tlCorner = valid * (inputIsOnes ? 1 :
inData[(max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow + max(0,min(w-1,y+yMinInt-1))) * valid]);
// valid = not (y+yMinInt < 1) & not (x+xMaxInt >= h);
// const float blCorner = valid * (inputIsOnes ? 1 :
// inData[(max(0,min(h-1,x+xMaxInt )) * inDataStrideRow + max(0,min(w-1,y+yMinInt-1))) * valid]);
valid = not (y+yMaxInt >= w) & not (x+xMinInt < 1);
const float trCorner = valid * (inputIsOnes ? 1 :
inData[(max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow + max(0,min(w-1,y+yMaxInt ))) * valid]);
// valid = not (y+yMaxInt >= w) & not (x+xMaxInt >= h);
// const float brCorner = valid * (inputIsOnes ? 1 :
// inData[(max(0,min(h-1,x+xMaxInt )) * inDataStrideRow + max(0,min(w-1,y+yMaxInt ))) * valid]);
float delta = 0;
delta += trCorner * (y+yMaxInt < 1 ? 1.0f : yMaxFrac);
delta += tlCorner * (y+yMinInt >= w ? 1.0f : yMinFrac);
delta +=
intData[max(0,min(x+xMinInt , h))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMinInt-1, h))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMinInt , h))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta +=
intData[max(0,min(x+xMinInt-1, h))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta *= (x+xMinInt >= 1) & (x+xMinInt < h);
*tmpArray = -delta;
}
}
template __global__ void xMinDeltaIntegralReplicateFracKernel<false>(
const float *intData, const int intDataStrideChannel, float *tmpArray,
const int batchSize, const int nInputPlane, const int nWindows, const int h, const int w,
const float *xMin, const float *yMin, const float *yMax,
const float *inData, const int inDataStrideRow, const int inDataStrideChannel);
template __global__ void xMinDeltaIntegralReplicateFracKernel<true>(
const float *intData, const int intDataStrideChannel, float *tmpArray,
const int batchSize, const int nInputPlane, const int nWindows, const int h, const int w,
const float *xMin, const float *yMin, const float *yMax,
const float *inData, const int inDataStrideRow, const int inDataStrideChannel);
__global__ void yMaxDeltaIntegralReplicateFracKernel(
const float *intData, const int intDataStrideChannel, float *tmpArray,
const int batchSize, const int nInputPlane, const int nWindows, const int h, const int w,
const float *xMin, const float *xMax, const float *yMax,
const float *inData, const int inDataStrideRow, const int inDataStrideChannel) {
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
tmpArray += id; // tmpArray now points to our output pixel
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int windowIdx = id % nWindows; id /= nWindows;
// `id` is now is now the current global input plane number
intData += id * intDataStrideChannel;
inData += id * inDataStrideChannel;
const int globalWindowIdx = (id % nInputPlane) * nWindows + windowIdx; id /= nInputPlane;
const int & batchIdx = id;
if (batchIdx < batchSize) {
const int xMinInt = (int)ceil(xMin[globalWindowIdx]-1);
const float xMinFrac = xMinInt-xMin[globalWindowIdx]+1;
// const int yMinInt = (int)ceil(yMin[globalWindowIdx]-1);
// const float yMinFrac = yMinInt-yMin[globalWindowIdx]+1;
const int xMaxInt = (int)floor(xMax[globalWindowIdx]);
const float xMaxFrac = xMax[globalWindowIdx]-xMaxInt;
const int yMaxInt = (int)floor(yMax[globalWindowIdx]);
// const float yMaxFrac = yMax[globalWindowIdx]-yMaxInt;
// const float tlCorner = y+yMinInt < 1 or x+xMinInt < 1 ? 0 :
// inData[
// max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow +
// max(0,min(w-1,y+yMinInt-1))];
// const float blCorner = y+yMinInt < 1 or x+xMaxInt >= h ? 0 :
// inData[
// max(0,min(h-1,x+xMaxInt )) * inDataStrideRow +
// max(0,min(w-1,y+yMinInt-1))];
const float trCorner = y+yMaxInt >= w or x+xMinInt < 1 ? 0 :
inData[
max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow +
max(0,min(w-1,y+yMaxInt ))];
const float brCorner = y+yMaxInt >= w or x+xMaxInt >= h ? 0 :
inData[
max(0,min(h-1,x+xMaxInt )) * inDataStrideRow +
max(0,min(w-1,y+yMaxInt ))];
float delta = 0;
delta += trCorner * (x+xMinInt >= h ? 1.0f : xMinFrac);
delta += brCorner * (x+xMaxInt < 1 ? 1.0f : xMaxFrac);
delta +=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(0,min(y+yMaxInt+1, w))];
delta -=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(0,min(y+yMaxInt , w))];
delta -=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(0,min(y+yMaxInt+1, w))];
delta +=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(0,min(y+yMaxInt , w))];
delta *= (y+yMaxInt >= 1 and y+yMaxInt < w);
*tmpArray = delta;
}
}
__global__ void yMinDeltaIntegralReplicateFracKernel(
const float *intData, const int intDataStrideChannel, float *tmpArray,
const int batchSize, const int nInputPlane, const int nWindows, const int h, const int w,
const float *xMin, const float *xMax, const float *yMin,
const float *inData, const int inDataStrideRow, const int inDataStrideChannel) {
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
tmpArray += id; // tmpArray now points to our output pixel
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int windowIdx = id % nWindows; id /= nWindows;
// `id` is now is now the current global input plane number
intData += id * intDataStrideChannel;
inData += id * inDataStrideChannel;
const int globalWindowIdx = (id % nInputPlane) * nWindows + windowIdx; id /= nInputPlane;
const int & batchIdx = id;
if (batchIdx < batchSize) {
const int xMinInt = (int)ceil(xMin[globalWindowIdx]-1);
const float xMinFrac = xMinInt-xMin[globalWindowIdx]+1;
const int yMinInt = (int)ceil(yMin[globalWindowIdx]-1);
// const float yMinFrac = yMinInt-yMin[globalWindowIdx]+1;
const int xMaxInt = (int)floor(xMax[globalWindowIdx]);
const float xMaxFrac = xMax[globalWindowIdx]-xMaxInt;
// const int yMaxInt = (int)floor(yMax[globalWindowIdx]);
// const float yMaxFrac = yMax[globalWindowIdx]-yMaxInt;
const float tlCorner = y+yMinInt < 1 or x+xMinInt < 1 ? 0 :
inData[
max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow +
max(0,min(w-1,y+yMinInt-1))];
const float blCorner = y+yMinInt < 1 or x+xMaxInt >= h ? 0 :
inData[
max(0,min(h-1,x+xMaxInt )) * inDataStrideRow +
max(0,min(w-1,y+yMinInt-1))];
// const float trCorner = y+yMaxInt >= w or x+xMinInt < 1 ? 0 :
// inData[
// max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow +
// max(0,min(w-1,y+yMaxInt ))];
// const float brCorner = y+yMaxInt >= w or x+xMaxInt >= h ? 0 :
// inData[
// max(0,min(h-1,x+xMaxInt )) * inDataStrideRow +
// max(0,min(w-1,y+yMaxInt ))];
float delta = 0;
delta += tlCorner * (x+xMinInt >= h ? 1.0f : xMinFrac);
delta += blCorner * (x+xMaxInt < 1 ? 1.0f : xMaxFrac);
delta +=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(0,min(y+yMinInt , w))];
delta -=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(0,min(y+yMinInt-1, w))];
delta -=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(0,min(y+yMinInt , w))];
delta +=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(0,min(y+yMinInt-1, w))];
delta *= (y+yMinInt >= 1 and y+yMinInt < w);
*tmpArray = -delta;
}
}
extern "C"
void backwardReplicateFracCuda(THCState *state, const int paramId, const bool inputIsOnes,
const float *intData, const int intDataStrideChannel, float *tmpArray,
const int batchSize, const int nInputPlane, const int nWindows, const int h, const int w,
const float *xMin, const float *xMax, const float *yMin, const float *yMax,
const float *inData, int inDataStrideRow, const int inDataStrideChannel,
const int strideH, const int strideW) {
if (strideH != 1 or strideW != 1) {
THError("NYI");
// strided::backwardFracCuda(
// intData, tmpArray, nWindows, h, w,
// xMin, xMax, yMin, yMax, inData, inDataStrideRow,
// strideH, strideW);
return;
}
const int threadsNeeded = batchSize * nInputPlane * nWindows * h * w;
const int numBlocks = (threadsNeeded + NUM_THREADS - 1) / NUM_THREADS;
switch (paramId) {
case 0:
if (inputIsOnes)
hipLaunchKernelGGL(( xMinDeltaIntegralReplicateFracKernel <true>) , dim3(numBlocks), dim3(NUM_THREADS), 0, THCState_getCurrentStream(state),
intData, intDataStrideChannel, tmpArray,
batchSize, nInputPlane, nWindows, h, w,
xMin, yMin, yMax,
inData, inDataStrideRow, inDataStrideChannel);
else
hipLaunchKernelGGL(( xMinDeltaIntegralReplicateFracKernel <false>) , dim3(numBlocks), dim3(NUM_THREADS), 0, THCState_getCurrentStream(state),
intData, intDataStrideChannel, tmpArray,
batchSize, nInputPlane, nWindows, h, w,
xMin, yMin, yMax,
inData, inDataStrideRow, inDataStrideChannel);
break;
case 1:
hipLaunchKernelGGL(( xMaxDeltaIntegralReplicateFracKernel) , dim3(numBlocks), dim3(NUM_THREADS), 0, THCState_getCurrentStream(state),
intData, intDataStrideChannel, tmpArray,
batchSize, nInputPlane, nWindows, h, w,
xMax, yMin, yMax,
inData, inDataStrideRow, inDataStrideChannel); break;
case 2:
hipLaunchKernelGGL(( yMinDeltaIntegralReplicateFracKernel) , dim3(numBlocks), dim3(NUM_THREADS), 0, THCState_getCurrentStream(state),
intData, intDataStrideChannel, tmpArray,
batchSize, nInputPlane, nWindows, h, w,
xMin, xMax, yMin,
inData, inDataStrideRow, inDataStrideChannel); break;
case 3:
hipLaunchKernelGGL(( yMaxDeltaIntegralReplicateFracKernel) , dim3(numBlocks), dim3(NUM_THREADS), 0, THCState_getCurrentStream(state),
intData, intDataStrideChannel, tmpArray,
batchSize, nInputPlane, nWindows, h, w,
xMin, xMax, yMax,
inData, inDataStrideRow, inDataStrideChannel); break;
}
THCudaCheck(hipGetLastError());
}
__global__ void xMaxDeltaIntegralReplicateKernel(
const float *intData, const int intDataStrideChannel, float *tmpArray,
const int batchSize, const int nInputPlane, const int nWindows, const int h, const int w,
const float *xMax, const float *yMin, const float *yMax) {
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
tmpArray += id; // tmpArray now points to our output pixel
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int windowIdx = id % nWindows; id /= nWindows;
// `id` is now is now the current global input plane number
intData += id * intDataStrideChannel;
const int globalWindowIdx = (id % nInputPlane) * nWindows + windowIdx; id /= nInputPlane;
const int & batchIdx = id;
if (batchIdx < batchSize) {
// const int xMinInt = (int)ceil(xMin[globalWindowIdx]-1);
const int yMinInt = (int)ceil(yMin[globalWindowIdx]-1);
const int xMaxInt = (int)floor(xMax[globalWindowIdx]);
const int yMaxInt = (int)floor(yMax[globalWindowIdx]);
float delta = 0;
delta +=
intData[max(0,min(x+xMaxInt+1, h))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMaxInt , h))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMaxInt+1, h))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta +=
intData[max(0,min(x+xMaxInt , h))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta *= (x+xMaxInt >= 1 and x+xMaxInt < h);
*tmpArray = delta;
}
}
__global__ void xMinDeltaIntegralReplicateKernel(
const float *intData, const int intDataStrideChannel, float *tmpArray,
const int batchSize, const int nInputPlane, const int nWindows, const int h, const int w,
const float *xMin, const float *yMin, const float *yMax) {
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
tmpArray += id; // tmpArray now points to our output pixel
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int windowIdx = id % nWindows; id /= nWindows;
// `id` is now is now the current global input plane number
intData += id * intDataStrideChannel;
const int globalWindowIdx = (id % nInputPlane) * nWindows + windowIdx; id /= nInputPlane;
const int & batchIdx = id;
if (batchIdx < batchSize) {
const int xMinInt = (int)ceil(xMin[globalWindowIdx]-1);
const int yMinInt = (int)ceil(yMin[globalWindowIdx]-1);
// const int xMaxInt = (int)floor(xMax[globalWindowIdx]);
const int yMaxInt = (int)floor(yMax[globalWindowIdx]);
float delta = 0;
delta +=
intData[max(0,min(x+xMinInt , h))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMinInt-1, h))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMinInt , h))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta +=
intData[max(0,min(x+xMinInt-1, h))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta *= (x+xMinInt >= 1 and x+xMinInt < h);
*tmpArray = -delta;
}
}
__global__ void yMaxDeltaIntegralReplicateKernel(
const float *intData, const int intDataStrideChannel, float *tmpArray,
const int batchSize, const int nInputPlane, const int nWindows, const int h, const int w,
const float *xMin, const float *xMax, const float *yMax) {
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
tmpArray += id; // tmpArray now points to our output pixel
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int windowIdx = id % nWindows; id /= nWindows;
// `id` is now is now the current global input plane number
intData += id * intDataStrideChannel;
const int globalWindowIdx = (id % nInputPlane) * nWindows + windowIdx; id /= nInputPlane;
const int & batchIdx = id;
if (batchIdx < batchSize) {
const int xMinInt = (int)ceil(xMin[globalWindowIdx]-1);
// const int yMinInt = (int)ceil(yMin[globalWindowIdx]-1);
const int xMaxInt = (int)floor(xMax[globalWindowIdx]);
const int yMaxInt = (int)floor(yMax[globalWindowIdx]);
float delta = 0;
delta +=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(0,min(y+yMaxInt+1, w))];
delta -=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(0,min(y+yMaxInt , w))];
delta -=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(0,min(y+yMaxInt+1, w))];
delta +=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(0,min(y+yMaxInt , w))];
delta *= (y+yMaxInt >= 1 and y+yMaxInt < w);
*tmpArray = delta;
}
}
__global__ void yMinDeltaIntegralReplicateKernel(
const float *intData, const int intDataStrideChannel, float *tmpArray,
const int batchSize, const int nInputPlane, const int nWindows, const int h, const int w,
const float *xMin, const float *xMax, const float *yMin) {
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
tmpArray += id; // tmpArray now points to our output pixel
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int windowIdx = id % nWindows; id /= nWindows;
// `id` is now is now the current global input plane number
intData += id * intDataStrideChannel;
const int globalWindowIdx = (id % nInputPlane) * nWindows + windowIdx; id /= nInputPlane;
const int & batchIdx = id;
if (batchIdx < batchSize) {
const int xMinInt = (int)ceil(xMin[globalWindowIdx]-1);
const int yMinInt = (int)ceil(yMin[globalWindowIdx]-1);
const int xMaxInt = (int)floor(xMax[globalWindowIdx]);
// const int yMaxInt = (int)floor(yMax[globalWindowIdx]);
float delta = 0;
delta +=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(0,min(y+yMinInt , w))];
delta -=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(0,min(y+yMinInt-1, w))];
delta -=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(0,min(y+yMinInt , w))];
delta +=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(0,min(y+yMinInt-1, w))];
delta *= (y+yMinInt >= 1 and y+yMinInt < w);
*tmpArray = -delta;
}
}
extern "C"
void backwardReplicateCuda(THCState *state, const int paramId,
const float *intData, const int intDataStrideChannel, float *tmpArray,
const int batchSize, const int nInputPlane, const int nWindows, const int h, const int w,
const float *xMin, const float *xMax, const float *yMin, const float *yMax,
const int strideH, const int strideW) {
if (strideH != 1 or strideW != 1) {
THError("NYI");
// strided::backwardFracCuda(
// intData, tmpArray, nWindows, h, w,
// xMin, xMax, yMin, yMax, inData, inDataStrideRow,
// strideH, strideW);
return;
}
const int threadsNeeded = batchSize * nInputPlane * nWindows * h * w;
const int numBlocks = (threadsNeeded + NUM_THREADS - 1) / NUM_THREADS;
switch (paramId) {
case 0:
hipLaunchKernelGGL(( xMinDeltaIntegralReplicateKernel) , dim3(numBlocks), dim3(NUM_THREADS), 0, THCState_getCurrentStream(state),
intData, intDataStrideChannel, tmpArray,
batchSize, nInputPlane, nWindows, h, w,
xMin, yMin, yMax); break;
case 1:
hipLaunchKernelGGL(( xMaxDeltaIntegralReplicateKernel) , dim3(numBlocks), dim3(NUM_THREADS), 0, THCState_getCurrentStream(state),
intData, intDataStrideChannel, tmpArray,
batchSize, nInputPlane, nWindows, h, w,
xMax, yMin, yMax); break;
case 2:
hipLaunchKernelGGL(( yMinDeltaIntegralReplicateKernel) , dim3(numBlocks), dim3(NUM_THREADS), 0, THCState_getCurrentStream(state),
intData, intDataStrideChannel, tmpArray,
batchSize, nInputPlane, nWindows, h, w,
xMin, xMax, yMin); break;
case 3:
hipLaunchKernelGGL(( yMaxDeltaIntegralReplicateKernel) , dim3(numBlocks), dim3(NUM_THREADS), 0, THCState_getCurrentStream(state),
intData, intDataStrideChannel, tmpArray,
batchSize, nInputPlane, nWindows, h, w,
xMin, xMax, yMax); break;
}
THCudaCheck(hipGetLastError());
}
/************************ Other stuff ************************/
__global__ void dirtyFixWindowsKernel(
float *xMin, float *xMax, float *yMin, float *yMax,
const int size, const float h, const float w, const float minWidth) {
int idx = NUM_THREADS * blockIdx.x + threadIdx.x;
if (idx < 2*size) {
float paramMin, paramMax;
if (idx < size) {
paramMin = max(-h+1, min(h-1, xMin[idx]));
paramMax = max(-h+1, min(h-1, xMax[idx]));
if (paramMin + minWidth - 0.99 > paramMax) {
const float mean = 0.5 * (paramMin + paramMax);
paramMin = mean - 0.5 * (minWidth - 0.9);
paramMax = mean + 0.5 * (minWidth - 0.9);
}
xMin[idx] = paramMin;
xMax[idx] = paramMax;
} else {
idx -= size;
paramMin = max(-w+1, min(w-1, yMin[idx]));
paramMax = max(-w+1, min(w-1, yMax[idx]));
if (paramMin + minWidth - 0.99 > paramMax) {
const float mean = 0.5 * (paramMin + paramMax);
paramMin = mean - 0.5 * (minWidth - 0.9);
paramMax = mean + 0.5 * (minWidth - 0.9);
}
yMin[idx] = paramMin;
yMax[idx] = paramMax;
}
}
}
extern "C"
void dirtyFixWindows(THCState *state,
float *xMin, float *xMax, float *yMin, float *yMax,
int size, int h, int w, float minWidth) {
dim3 dimBlock(NUM_THREADS);
dim3 dimGrid((2*size + dimBlock.x - 1) / dimBlock.x);
hipLaunchKernelGGL(( dirtyFixWindowsKernel) , dim3(dimGrid), dim3(dimBlock), 0, THCState_getCurrentStream(state),
xMin, xMax, yMin, yMax, size, (float)h, (float)w, minWidth);
THCudaCheck(hipGetLastError());
}
| 3c6ce5fe17741aae20ce3d1dd79c811cba7fb1b4.cu | #include <iostream>
#include <stdio.h>
#include <assert.h>
#include <algorithm>
#include <cmath>
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <THC/THC.h>
#include "integral-strided-cuda.hpp"
#define NUM_THREADS 256
#define BLOCK_SIZE 4
#define BLOCK_CHANNELS (NUM_THREADS / (BLOCK_SIZE * BLOCK_SIZE))
using std::max;
using std::min;
using std::floor;
using std::ceil;
// TODO remove this code
#define CUDA_ERROR_CHECK
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ )
inline void __cudaSafeCall( cudaError err, const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
inline void __cudaCheckError( const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
cudaError err = cudaGetLastError();
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
// More careful checking. However, this will affect performance.
// Comment away if needed.
err = cudaDeviceSynchronize();
if( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
/************************ Integral image computation ************************/
__global__ void accumulateRowsKernel(
const float * __restrict__ input, float * __restrict__ output,
const int channels, const int h, const int w);
__global__ void accumulateColsKernel(
const float * __restrict__ input, float * __restrict__ output,
const int channels, const int h, const int w);
__global__ void accumulateColsInplaceKernel(
float * __restrict__ input, const int channels, const int h, const int w);
__global__ void accumulateColsInplaceTransposedKernel(
float * __restrict__ input, const int channels, const int h, const int w);
extern "C"
void integralImageCuda(THCState *state,
float *input, float *output, int channels, int h, int w, float *tmp) {
// input : (channels) x (h) x (w), contiguous
// output: (channels) x (h+1) x (w+1), contiguous
// tmp : at least (channels) * (h+1) * (w+1)
int blockSize1D, gridSize1D;
const float ONE = 1.0, ZERO = 0.0;
cublasSetStream(THCState_getCurrentBlasHandle(state), THCState_getCurrentStream(state));
// Compute prefix sums of columns, `input` -> `output`
// (channels) x (h) x (w) ==> (channels) x (h+1) x (w+1)
// Note: output[:,:,0] remains uninitialized
int totalCols = channels * w;
blockSize1D = NUM_THREADS;
gridSize1D = (totalCols + blockSize1D - 1) / blockSize1D;
accumulateColsKernel <<<gridSize1D, blockSize1D, 0, THCState_getCurrentStream(state)>>>
(input, output, channels, h, w);
THCudaCheck(cudaGetLastError());
// transpose, `output` -> `tmp`
// (channels) x (h+1) x (w+1) ==> (w+1) x (channels) x (h+1)
THCublasCheck(cublasSgeam(
THCState_getCurrentBlasHandle(state),
CUBLAS_OP_T, CUBLAS_OP_N, channels * (h+1), w+1,
&ONE, output, w+1,
&ZERO, tmp, channels * (h+1),
tmp, channels * (h+1)));
// Compute prefix sums of columns (former rows), `tmp` -> `tmp`
// (w+1) x (channels) x (h+1) ==> (w+1) x (channels) x (h+1)
int totalRows = channels * h; // actually, number of cols in (w+1) x (channels * (h+1)) image
blockSize1D = NUM_THREADS;
gridSize1D = (totalRows + blockSize1D - 1) / blockSize1D;
accumulateColsInplaceTransposedKernel
<<<gridSize1D, blockSize1D, 0, THCState_getCurrentStream(state)>>> (tmp, channels, h, w);
THCudaCheck(cudaGetLastError());
// transpose, `tmp` -> `output`
// (w+1) x (channels) x (h+1) ==> (channels) x (h+1) x (w+1)
THCublasCheck(cublasSgeam(
THCState_getCurrentBlasHandle(state),
CUBLAS_OP_T, CUBLAS_OP_N, w+1, channels * (h+1),
&ONE, tmp, channels * (h+1),
&ZERO, output, w+1,
output, w+1));
}
__global__ void accumulateRowsKernel(
const float * __restrict__ input, float * __restrict__ output,
const int channels, const int h, const int w) {
// view multichannel image as a multiline single-channel image
int globalRowIdx = NUM_THREADS * blockIdx.x + threadIdx.x;
if (globalRowIdx < channels * h) {
float *outputRow = output + (globalRowIdx + globalRowIdx / h + 1) * (w+1) + 1;
outputRow[-1] = 0;
double sum = 0;
for (int i = 0; i < w; ++i) {
sum += input[globalRowIdx * w + i];
outputRow[i] = static_cast<float>(sum);
}
// need to zero the (0,0) corner of the output separately >:(
output[(globalRowIdx / h) * (w+1) * (h+1)] = 0;
}
}
__global__ void accumulateColsKernel(
const float * __restrict__ input, float * __restrict__ output,
const int channels, const int h, const int w) {
// input : (channels * h) x (w)
// output: (channels * (h+1)) x (w+1) -- first column remains untouched
// global column index (of total `channels * w` columns in this image):
const int globalColIdx = NUM_THREADS * blockIdx.x + threadIdx.x;
if (globalColIdx < channels * w) {
const int channelIdx = globalColIdx / w;
const int colIdx = globalColIdx - channelIdx * w;
// jump to the channel of interest:
int inputPos = channelIdx * h * w + colIdx;
// (let local columns be 1-indexed: 0-th output column is always zero)
int outputPos = channelIdx * (h+1) * (w+1) + colIdx + 1;
output[outputPos] = 0; // 0-th element of every column is always zero
double sum = 0;
for (int i = 1; i <= h; ++i) {
sum += static_cast<double>(input[inputPos + (i-1) * w]);
output[outputPos + i * (w+1)] = static_cast<float>(sum);
}
}
}
__global__ void accumulateColsInplaceTransposedKernel(
float * __restrict__ input, const int channels, const int h, const int w) {
// in-place.
// input: (w+1) x (channels * (h+1))
// global column index (of total `channels * w` columns in this image):
const int globalColIdx = NUM_THREADS * blockIdx.x + threadIdx.x;
if (globalColIdx < channels * h) {
const int channelIdx = globalColIdx / h;
// add `channelIdx + 1` to account for one extra column in each horizontally stacked image
const int colIdx = globalColIdx + channelIdx + 1;
// need to zero the (0,0) corner of the output separately >:(
input[channelIdx * (h+1)] = 0;
input[colIdx] = 0; // first element of every column is always zero
double sum = 0;
for (int i = 1; i <= w; ++i) {
float *currentElement = &input[i * channels * (h+1) + colIdx];
sum += static_cast<double>(*currentElement);
*currentElement = static_cast<float>(sum);
}
}
}
__global__ void accumulateColsInplaceKernel(
float * __restrict__ input, const int channels, const int h, const int w) {
// in-place.
// input is already a `channels * (h+1) x (w+1)` array
// global column index (of all `channels * w` columns in this image)
int colIdx = NUM_THREADS * blockIdx.x + threadIdx.x;
if (colIdx < channels * w) {
input += (colIdx / w) * (h+1) * (w+1); // jump to current channel
colIdx %= w; // switch to local column index,
++colIdx; // it's 1-indexed because first output column is always zero
input[colIdx] = 0; // first element of every column is always zero
double sum = 0;
for (int i = 1; i <= h; ++i) {
float *currentElement = &input[i * (w+1) + colIdx];
sum += static_cast<double>(*currentElement);
*currentElement = static_cast<float>(sum);
}
}
}
/************************ updateOutput ************************/
__global__ void forwardNoNormReplicateKernel(
const float *intData, const int intDataStrideChannel, float *outData,
const int batchSize, const int nInputPlane, const int nWindows, const int h, const int w,
const float *const xMin, const float *const xMax,
const float *const yMin, const float *const yMax) {
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
outData += id; // outData now points to our output pixel
const int y = id % w; id /= w;
const int x = id % h; id /= h;
const int windowIdx = id % nWindows; id /= nWindows;
// `id` is now is now the current global input plane number
intData += id * intDataStrideChannel;
const int globalWindowIdx = (id % nInputPlane) * nWindows + windowIdx; id /= nInputPlane;
const int & batchIdx = id;
if (batchIdx < batchSize) {
// Must add 1 to xMax/yMax/xMin/yMin due to OpenCV's
// `integral()` behavior. Namely, I(x,0) and I(0,y) are
// always 0 (so it's a C-style array sum).
// However, when computing sums, we subtract values at points
// like y+yMin-1 and x+xMin-1, so we also SUBTRACT 1 from xMin
// and yMin, and thus finally they are not affected.
const int xMinCurr = (int)ceil(xMin[globalWindowIdx]);
const int yMinCurr = (int)ceil(yMin[globalWindowIdx]);
const int xMaxCurr = (int)floor(xMax[globalWindowIdx]) + 1;
const int yMaxCurr = (int)floor(yMax[globalWindowIdx]) + 1;
const int t = max(0, min(x+xMinCurr, h-1) );
const int b = max(1, min(x+xMaxCurr, h) );
const int l = max(0, min(y+yMinCurr, w-1) );
const int r = max(1, min(y+yMaxCurr, w) );
float outValue = 0;
outValue += intData[b*(w+1) + r];
outValue -= intData[t*(w+1) + r];
outValue -= intData[b*(w+1) + l];
outValue += intData[t*(w+1) + l];
*outData = outValue;
}
}
__global__ void forwardNoNormReplicateFracKernel(
const float *intData, const int intDataStrideChannel, float *outData,
const int batchSize, const int nInputPlane, const int nWindows, const int h, const int w,
const float *const xMin, const float *const xMax,
const float *const yMin, const float *const yMax,
const float *inData, const int inDataStrideRow, const int inDataStrideChannel) {
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
outData += id; // outData now points to our output pixel
const int y = id % w; id /= w;
const int x = id % h; id /= h;
const int windowIdx = id % nWindows; id /= nWindows;
// `id` is now is now the current global input plane number
intData += id * intDataStrideChannel;
inData += id * inDataStrideChannel;
const int globalWindowIdx = (id % nInputPlane) * nWindows + windowIdx; id /= nInputPlane;
const int & batchIdx = id;
if (batchIdx < batchSize) {
// Must add 1 to xMax/yMax/xMin/yMin due to OpenCV's
// `integral()` behavior. Namely, I(x,0) and I(0,y) are
// always 0 (so it's a C-style array sum).
// However, when computing sums, we subtract values at points
// like y+yMin-1 and x+xMin-1, so we also SUBTRACT 1 from xMin
// and yMin, and thus finally they are not affected.
const int xMinCurr = (int)ceil(xMin[globalWindowIdx]);
const float xMinCurrFrac = (float)xMinCurr - xMin[globalWindowIdx];
const int yMinCurr = (int)ceil(yMin[globalWindowIdx]);
const float yMinCurrFrac = (float)yMinCurr - yMin[globalWindowIdx];
const float xMaxCurrFrac = xMax[globalWindowIdx] - floor(xMax[globalWindowIdx]);
const int xMaxCurr = (int)floor(xMax[globalWindowIdx]) + 1;
const float yMaxCurrFrac = yMax[globalWindowIdx] - floor(yMax[globalWindowIdx]);
const int yMaxCurr = (int)floor(yMax[globalWindowIdx]) + 1;
const int t = max(0, min(x+xMinCurr, h-1) );
const int b = max(1, min(x+xMaxCurr, h) );
const int l = max(0, min(y+yMinCurr, w-1) );
const int r = max(1, min(y+yMaxCurr, w) );
const int bAdv = max(1, min(x+xMaxCurr+1, h ));
const int rAdv = max(1, min(y+yMaxCurr+1, w ));
const int tAdv = max(0, min(x+xMinCurr-1, h-1));
const int lAdv = max(0, min(y+yMinCurr-1, w-1));
float outValue = 0;
outValue += intData[b*(w+1) + r];
outValue -= intData[t*(w+1) + r];
outValue -= intData[b*(w+1) + l];
outValue += intData[t*(w+1) + l];
// -- xMax border
outValue +=
( intData[bAdv*(w+1) + r]
- intData[b *(w+1) + r]
- intData[bAdv*(w+1) + l]
+ intData[b *(w+1) + l]) * xMaxCurrFrac;
// -- yMax border
outValue +=
( intData[b*(w+1) + rAdv]
- intData[b*(w+1) + r ]
- intData[t*(w+1) + rAdv]
+ intData[t*(w+1) + r ]) * yMaxCurrFrac;
// -- xMin border
outValue +=
( intData[t *(w+1) + r]
- intData[tAdv*(w+1) + r]
- intData[t *(w+1) + l]
+ intData[tAdv*(w+1) + l]) * xMinCurrFrac;
// -- yMin border
outValue +=
( intData[b*(w+1) + l ]
- intData[b*(w+1) + lAdv]
- intData[t*(w+1) + l ]
+ intData[t*(w+1) + lAdv]) * yMinCurrFrac;
// -- corner pixels
bool cornerIsValid;
cornerIsValid = not (
(x+xMaxCurr > h-1) |
(y+yMaxCurr > w-1) |
(x+xMaxCurr <= 0) |
(y+yMaxCurr <= 0));
outValue +=
xMaxCurrFrac * yMaxCurrFrac *
cornerIsValid *
inData[((x+xMaxCurr) * inDataStrideRow + (y+yMaxCurr)) * cornerIsValid];
cornerIsValid = not (
(x+xMinCurr-1 >= h-1) |
(y+yMaxCurr > w-1) |
(x+xMinCurr-1 < 0) |
(y+yMaxCurr <= 0));
outValue +=
xMinCurrFrac * yMaxCurrFrac *
cornerIsValid *
inData[((x+xMinCurr-1) * inDataStrideRow + (y+yMaxCurr)) * cornerIsValid];
cornerIsValid = not (
(x+xMaxCurr > h-1) |
(y+yMinCurr-1 >= w-1) |
(x+xMaxCurr <= 0) |
(y+yMinCurr-1 < 0));
outValue +=
xMaxCurrFrac * yMinCurrFrac *
cornerIsValid *
inData[((x+xMaxCurr) * inDataStrideRow + (y+yMinCurr-1)) * cornerIsValid];
cornerIsValid = not (
(x+xMinCurr-1 >= h-1) |
(y+yMinCurr-1 >= w-1) |
(x+xMinCurr-1 < 0) |
(y+yMinCurr-1 < 0));
outValue +=
xMinCurrFrac * yMinCurrFrac *
cornerIsValid *
inData[((x+xMinCurr-1) * inDataStrideRow + (y+yMinCurr-1)) * cornerIsValid];
*outData = outValue;
}
}
extern "C"
void forwardNoNormReplicateCuda(THCState *state,
float *intData, int intDataStrideChannel, float *outData,
int batchSize, int nInputPlane, int nWindows, int h, int w,
float *xMin, float *xMax, float *yMin, float *yMax,
const int strideH, const int strideW) {
if (strideH != 1 or strideW != 1) {
strided::forwardNoNormReplicateCuda(state,
intData, intDataStrideChannel, outData,
batchSize, nInputPlane, nWindows, h, w,
xMin, xMax, yMin, yMax,
strideH, strideW);
return;
}
const int threadsNeeded = batchSize * nInputPlane * nWindows * h * w;
const int numBlocks = (threadsNeeded + NUM_THREADS - 1) / NUM_THREADS;
forwardNoNormReplicateKernel <<<numBlocks, NUM_THREADS, 0, THCState_getCurrentStream(state)>>> (
intData, intDataStrideChannel, outData,
batchSize, nInputPlane, nWindows, h, w,
xMin, xMax, yMin, yMax);
THCudaCheck(cudaGetLastError());
}
extern "C"
void forwardNoNormReplicateFracCuda(THCState *state,
float *intData, int intDataStrideChannel, float *outData,
int batchSize, int nInputPlane, int nWindows, int h, int w,
float *xMin, float *xMax, float *yMin, float *yMax,
float *inData, int inDataStrideRow, int inDataStrideChannel,
const int strideH, const int strideW) {
if (strideH != 1 or strideW != 1) {
strided::forwardNoNormReplicateFracCuda(state,
intData, intDataStrideChannel, outData,
batchSize, nInputPlane, nWindows, h, w,
xMin, xMax, yMin, yMax,
inData, inDataStrideRow, inDataStrideChannel,
strideH, strideW);
return;
}
const int threadsNeeded = batchSize * nInputPlane * nWindows * h * w;
const int numBlocks = (threadsNeeded + NUM_THREADS - 1) / NUM_THREADS;
forwardNoNormReplicateFracKernel <<<numBlocks, NUM_THREADS, 0, THCState_getCurrentStream(state)>>> (
intData, intDataStrideChannel, outData,
batchSize, nInputPlane, nWindows, h, w,
xMin, xMax, yMin, yMax,
inData, inDataStrideRow, inDataStrideChannel);
THCudaCheck(cudaGetLastError());
}
/************************ updateGradInput ************************/
/************** Planewise *************/
__global__ void updateGradInputReplicatePlanewiseKernel(
float *gradOutputIntData, float *gradInputData,
int h, int w, int nWindows,
float *xMin, float *xMax, float *yMin, float *yMax) {
const int x = BLOCK_SIZE * blockIdx.x + threadIdx.x;
const int y = BLOCK_SIZE * blockIdx.y + threadIdx.y;
if (x < h and y < w) {
int xMinCurr, xMaxCurr, yMinCurr, yMaxCurr;
double outValue = 0;
for (int windowIdx = 0; windowIdx < nWindows; ++windowIdx) {
xMinCurr = (int)ceil(-xMax[windowIdx]);
yMinCurr = (int)ceil(-yMax[windowIdx]);
xMaxCurr = (int)floor(-xMin[windowIdx]) + 1;
yMaxCurr = (int)floor(-yMin[windowIdx]) + 1;
// The following code block implements these lines
// as if they were executed simultaneously (see `void updateGradInputFrac()`):
// xMinCurr = (x == 0 and xMaxCurr >= 0 ? 0 : xMinCurr);
// xMaxCurr = (x == h-1 and xMinCurr <= 0 ? h+66 : xMaxCurr);
// yMinCurr = (y == 0 and yMaxCurr >= 0 ? 0 : yMinCurr);
// yMaxCurr = (y == w-1 and yMinCurr <= 0 ? w+66 : yMaxCurr);
bool needToChangeMin, needToChangeMax;
needToChangeMin = x == 0 and xMaxCurr >= 0;
needToChangeMax = x == h-1 and xMinCurr <= 0;
if (needToChangeMin) xMinCurr = 0;
if (needToChangeMax) xMaxCurr = h+66;
needToChangeMin = y == 0 and yMaxCurr >= 0;
needToChangeMax = y == w-1 and yMinCurr <= 0;
if (needToChangeMin) yMinCurr = 0;
if (needToChangeMax) yMaxCurr = w+66;
const int t = max(0, min(x+xMinCurr, h) );
const int b = max(0, min(x+xMaxCurr, h) );
const int l = max(0, min(y+yMinCurr, w) );
const int r = max(0, min(y+yMaxCurr, w) );
outValue += gradOutputIntData[b*(w+1) + r];
outValue -= gradOutputIntData[t*(w+1) + r];
outValue -= gradOutputIntData[b*(w+1) + l];
outValue += gradOutputIntData[t*(w+1) + l];
// go to the next channel
gradOutputIntData += (h+1)*(w+1);
}
gradInputData[x*w + y] = outValue;
}
}
__global__ void updateGradInputReplicatePlanewiseFracKernel(
float *gradOutputIntData, float *gradInputData,
int h, int w, int nWindows,
float *xMin, float *xMax, float *yMin, float *yMax,
float *gradOutputData, int gradOutputStrideRow, int gradOutputStrideChannel) {
const int x = BLOCK_SIZE * blockIdx.x + threadIdx.x;
const int y = BLOCK_SIZE * blockIdx.y + threadIdx.y;
if (x < h and y < w) {
int xMinCurr, xMaxCurr, yMinCurr, yMaxCurr;
double outValue = 0;
for (int windowIdx = 0; windowIdx < nWindows; ++windowIdx) {
xMinCurr = (int)ceil(-xMax[windowIdx]);
yMinCurr = (int)ceil(-yMax[windowIdx]);
const float xMinCurrFrac = (float)xMinCurr + xMax[windowIdx];
const float yMinCurrFrac = (float)yMinCurr + yMax[windowIdx];
xMaxCurr = (int)floor(-xMin[windowIdx]) + 1;
yMaxCurr = (int)floor(-yMin[windowIdx]) + 1;
const float xMaxCurrFrac = -xMin[windowIdx] + 1 - xMaxCurr;
const float yMaxCurrFrac = -yMin[windowIdx] + 1 - yMaxCurr;
// The following code block implements these lines
// as if they were executed simultaneously (see `void updateGradInputFrac()`):
// xMinCurr = (x == 0 and xMaxCurr >= 0 ? 0 : xMinCurr);
// xMaxCurr = (x == h-1 and xMinCurr <= 0 ? h+66 : xMaxCurr);
// yMinCurr = (y == 0 and yMaxCurr >= 0 ? 0 : yMinCurr);
// yMaxCurr = (y == w-1 and yMinCurr <= 0 ? w+66 : yMaxCurr);
bool needToChangeMin, needToChangeMax;
needToChangeMin = x == 0 and xMaxCurr >= 0;
needToChangeMax = x == h-1 and xMinCurr <= 0;
if (needToChangeMin) xMinCurr = 0;
if (needToChangeMax) xMaxCurr = h+66;
needToChangeMin = y == 0 and yMaxCurr >= 0;
needToChangeMax = y == w-1 and yMinCurr <= 0;
if (needToChangeMin) yMinCurr = 0;
if (needToChangeMax) yMaxCurr = w+66;
const int t = max(0, min(x+xMinCurr, h) );
const int b = max(0, min(x+xMaxCurr, h) );
const int l = max(0, min(y+yMinCurr, w) );
const int r = max(0, min(y+yMaxCurr, w) );
const int tAdv = x+xMinCurr-1 < h ? max(0, min(t-1, h)) : t;
const int bAdv = x+xMaxCurr >= 0 ? max(0, min(b+1, h)) : b;
const int lAdv = y+yMinCurr-1 < w ? max(0, min(l-1, w)) : l;
const int rAdv = y+yMaxCurr >= 0 ? max(0, min(r+1, w)) : r;
// TODO: 1D grid
outValue += gradOutputIntData[b*(w+1) + r];
outValue -= gradOutputIntData[t*(w+1) + r];
outValue -= gradOutputIntData[b*(w+1) + l];
outValue += gradOutputIntData[t*(w+1) + l];
// -- xMax border
outValue +=
( gradOutputIntData[bAdv*(w+1) + r]
- gradOutputIntData[b *(w+1) + r]
- gradOutputIntData[bAdv*(w+1) + l]
+ gradOutputIntData[b *(w+1) + l]
) * xMaxCurrFrac;
// -- yMax border
outValue +=
( gradOutputIntData[b*(w+1) + rAdv]
- gradOutputIntData[b*(w+1) + r ]
- gradOutputIntData[t*(w+1) + rAdv]
+ gradOutputIntData[t*(w+1) + r ]
) * yMaxCurrFrac;
// -- xMin border
outValue +=
( gradOutputIntData[t *(w+1) + r]
- gradOutputIntData[tAdv*(w+1) + r]
- gradOutputIntData[t *(w+1) + l]
+ gradOutputIntData[tAdv*(w+1) + l]
) * xMinCurrFrac;
// -- yMin border
outValue +=
( gradOutputIntData[b*(w+1) + l ]
- gradOutputIntData[b*(w+1) + lAdv]
- gradOutputIntData[t*(w+1) + l ]
+ gradOutputIntData[t*(w+1) + lAdv]
) * yMinCurrFrac;
// -- corner pixels
outValue +=
xMaxCurrFrac*yMaxCurrFrac * (
(x+xMaxCurr > h-1 or
y+yMaxCurr > w-1 or
x+xMaxCurr < 0 or
y+yMaxCurr < 0 or
b == bAdv or
r == rAdv) ? 0 :
gradOutputData[b*gradOutputStrideRow + r]);
outValue +=
xMinCurrFrac*yMaxCurrFrac * (
(x+xMinCurr-1 > h-1 or
y+yMaxCurr > w-1 or
x+xMinCurr-1 < 0 or
y+yMaxCurr < 0 or
t == tAdv or
r == rAdv) ? 0 :
gradOutputData[tAdv*gradOutputStrideRow + r]);
outValue +=
xMaxCurrFrac*yMinCurrFrac * (
(x+xMaxCurr > h-1 or
y+yMinCurr-1 > w-1 or
x+xMaxCurr < 0 or
y+yMinCurr-1 < 0 or
b == bAdv or
l == lAdv) ? 0 :
gradOutputData[b*gradOutputStrideRow + lAdv]);
outValue +=
xMinCurrFrac*yMinCurrFrac * (
(x+xMinCurr-1 > h-1 or
y+yMinCurr-1 > w-1 or
x+xMinCurr-1 < 0 or
y+yMinCurr-1 < 0 or
t == tAdv or
l == lAdv) ? 0 :
gradOutputData[tAdv*gradOutputStrideRow + lAdv]);
// go to the next channel
gradOutputIntData += (h+1)*(w+1);
gradOutputData += gradOutputStrideChannel;
}
gradInputData[x*w + y] = outValue;
}
}
extern "C"
void updateGradInputReplicatePlanewiseCuda(THCState *state,
float *gradOutputIntData, float *gradInputData,
int h, int w, int nWindows,
float *xMin, float *xMax, float *yMin, float *yMax,
const int strideH, const int strideW) {
if (strideH != 1 or strideW != 1) {
strided::updateGradInputReplicatePlanewiseCuda(
gradOutputIntData, gradInputData, h, w, nWindows,
xMin, xMax, yMin, yMax, strideH, strideW);
return;
}
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, BLOCK_CHANNELS);
dim3 dimGrid(
(h + dimBlock.x - 1) / dimBlock.x,
(w + dimBlock.y - 1) / dimBlock.y);
updateGradInputReplicatePlanewiseKernel <<<dimGrid, dimBlock, 0, THCState_getCurrentStream(state)>>> (
gradOutputIntData, gradInputData,
h, w, nWindows,
xMin, xMax, yMin, yMax);
THCudaCheck(cudaGetLastError());
}
extern "C"
void updateGradInputReplicatePlanewiseFracCuda(THCState *state,
float *gradOutputIntData, float *gradInputData,
int h, int w, int nWindows,
float *xMin, float *xMax, float *yMin, float *yMax,
float *gradOutputData, int gradOutputStrideRow, int gradOutputStrideChannel,
const int strideH, const int strideW) {
if (strideH != 1 or strideW != 1) {
strided::updateGradInputReplicatePlanewiseFracCuda(
gradOutputIntData, gradInputData, h, w, nWindows,
xMin, xMax, yMin, yMax,
gradOutputData, gradOutputStrideRow, gradOutputStrideChannel,
strideH, strideW);
return;
}
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, BLOCK_CHANNELS);
dim3 dimGrid(
(h + dimBlock.x - 1) / dimBlock.x,
(w + dimBlock.y - 1) / dimBlock.y);
updateGradInputReplicatePlanewiseFracKernel <<<dimGrid, dimBlock, 0, THCState_getCurrentStream(state)>>> (
gradOutputIntData, gradInputData,
h, w, nWindows,
xMin, xMax, yMin, yMax,
gradOutputData, gradOutputStrideRow, gradOutputStrideChannel);
THCudaCheck(cudaGetLastError());
}
/****************** Single-kernel updateGradInput (faster) **************/
__global__ void updateGradInputReplicateKernel(
const float *gradOutputIntData, float *tmpArray,
const int batchSize, const int nInputPlane, const int nWindows,
const int h, const int w,
const float *const xMin, const float *const xMax,
const float *const yMin, const float *const yMax) {
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
tmpArray += id; // tmpArray now points to our output pixel
const int y = id % w; id /= w;
const int x = id % h; id /= h;
const int globalWindowIdx = id % (nInputPlane * nWindows);
// `id` is now the current plane number
gradOutputIntData += id * (w+1) * (h+1);
if (id < batchSize * nInputPlane * nWindows) {
float outValue = 0;
int xMinCurr, xMaxCurr, yMinCurr, yMaxCurr;
xMinCurr = (int)ceil(-xMax[globalWindowIdx]);
yMinCurr = (int)ceil(-yMax[globalWindowIdx]);
xMaxCurr = (int)floor(-xMin[globalWindowIdx]) + 1;
yMaxCurr = (int)floor(-yMin[globalWindowIdx]) + 1;
// The following code block implements these lines
// as if they were executed simultaneously (see `void updateGradInputFrac()`):
// xMinCurr = (x == 0 and xMaxCurr >= 0 ? 0 : xMinCurr);
// xMaxCurr = (x == h-1 and xMinCurr <= 0 ? h+66 : xMaxCurr);
// yMinCurr = (y == 0 and yMaxCurr >= 0 ? 0 : yMinCurr);
// yMaxCurr = (y == w-1 and yMinCurr <= 0 ? w+66 : yMaxCurr);
bool needToChangeMin, needToChangeMax;
needToChangeMin = x == 0 and xMaxCurr >= 0;
needToChangeMax = x == h-1 and xMinCurr <= 0;
if (needToChangeMin) xMinCurr = 0;
if (needToChangeMax) xMaxCurr = h+66;
needToChangeMin = y == 0 and yMaxCurr >= 0;
needToChangeMax = y == w-1 and yMinCurr <= 0;
if (needToChangeMin) yMinCurr = 0;
if (needToChangeMax) yMaxCurr = w+66;
const int t = max(0, min(x+xMinCurr, h) );
const int b = max(0, min(x+xMaxCurr, h) );
const int l = max(0, min(y+yMinCurr, w) );
const int r = max(0, min(y+yMaxCurr, w) );
outValue += gradOutputIntData[b*(w+1) + r];
outValue -= gradOutputIntData[t*(w+1) + r];
outValue -= gradOutputIntData[b*(w+1) + l];
outValue += gradOutputIntData[t*(w+1) + l];
*tmpArray = outValue;
}
}
__global__ void updateGradInputReplicateFracKernel(
const float *gradOutputIntData, float *tmpArray,
const int batchSize, const int nInputPlane, const int nWindows,
const int h, const int w,
const float *const xMin, const float *const xMax,
const float *const yMin, const float *const yMax,
const float *gradOutputData,
const int gradOutputStrideRow, const int gradOutputStrideChannel) {
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
tmpArray += id; // tmpArray now points to our output pixel
const int y = id % w; id /= w;
const int x = id % h; id /= h;
const int globalWindowIdx = id % (nInputPlane * nWindows);
// `id` is now the current plane number
gradOutputIntData += id * (w+1) * (h+1);
gradOutputData += id * gradOutputStrideChannel;
if (id < batchSize * nInputPlane * nWindows) {
float outValue = 0;
int xMinCurr, xMaxCurr, yMinCurr, yMaxCurr;
xMinCurr = (int)ceil(-xMax[globalWindowIdx]);
yMinCurr = (int)ceil(-yMax[globalWindowIdx]);
const float xMinCurrFrac = (float)xMinCurr + xMax[globalWindowIdx];
const float yMinCurrFrac = (float)yMinCurr + yMax[globalWindowIdx];
xMaxCurr = (int)floor(-xMin[globalWindowIdx]) + 1;
yMaxCurr = (int)floor(-yMin[globalWindowIdx]) + 1;
const float xMaxCurrFrac = -xMin[globalWindowIdx] + 1 - xMaxCurr;
const float yMaxCurrFrac = -yMin[globalWindowIdx] + 1 - yMaxCurr;
// The following code block implements these lines
// as if they were executed simultaneously (see `void updateGradInputFrac()`):
// xMinCurr = (x == 0 and xMaxCurr >= 0 ? 0 : xMinCurr);
// xMaxCurr = (x == h-1 and xMinCurr <= 0 ? h+66 : xMaxCurr);
// yMinCurr = (y == 0 and yMaxCurr >= 0 ? 0 : yMinCurr);
// yMaxCurr = (y == w-1 and yMinCurr <= 0 ? w+66 : yMaxCurr);
bool needToChangeMin, needToChangeMax;
needToChangeMin = x == 0 and xMaxCurr >= 0;
needToChangeMax = x == h-1 and xMinCurr <= 0;
if (needToChangeMin) xMinCurr = 0;
if (needToChangeMax) xMaxCurr = h+66;
needToChangeMin = y == 0 and yMaxCurr >= 0;
needToChangeMax = y == w-1 and yMinCurr <= 0;
if (needToChangeMin) yMinCurr = 0;
if (needToChangeMax) yMaxCurr = w+66;
const int t = max(0, min(x+xMinCurr, h) );
const int b = max(0, min(x+xMaxCurr, h) );
const int l = max(0, min(y+yMinCurr, w) );
const int r = max(0, min(y+yMaxCurr, w) );
const int tAdv = x+xMinCurr-1 < h ? max(0, min(t-1, h)) : t;
const int bAdv = x+xMaxCurr >= 0 ? max(0, min(b+1, h)) : b;
const int lAdv = y+yMinCurr-1 < w ? max(0, min(l-1, w)) : l;
const int rAdv = y+yMaxCurr >= 0 ? max(0, min(r+1, w)) : r;
outValue += gradOutputIntData[b*(w+1) + r];
outValue -= gradOutputIntData[t*(w+1) + r];
outValue -= gradOutputIntData[b*(w+1) + l];
outValue += gradOutputIntData[t*(w+1) + l];
// -- xMax border
outValue +=
( gradOutputIntData[bAdv*(w+1) + r]
- gradOutputIntData[b *(w+1) + r]
- gradOutputIntData[bAdv*(w+1) + l]
+ gradOutputIntData[b *(w+1) + l]
) * xMaxCurrFrac;
// -- yMax border
outValue +=
( gradOutputIntData[b*(w+1) + rAdv]
- gradOutputIntData[b*(w+1) + r ]
- gradOutputIntData[t*(w+1) + rAdv]
+ gradOutputIntData[t*(w+1) + r ]
) * yMaxCurrFrac;
// -- xMin border
outValue +=
( gradOutputIntData[t *(w+1) + r]
- gradOutputIntData[tAdv*(w+1) + r]
- gradOutputIntData[t *(w+1) + l]
+ gradOutputIntData[tAdv*(w+1) + l]
) * xMinCurrFrac;
// -- yMin border
outValue +=
( gradOutputIntData[b*(w+1) + l ]
- gradOutputIntData[b*(w+1) + lAdv]
- gradOutputIntData[t*(w+1) + l ]
+ gradOutputIntData[t*(w+1) + lAdv]
) * yMinCurrFrac;
// -- corner pixels
outValue +=
xMaxCurrFrac*yMaxCurrFrac * (
(x+xMaxCurr > h-1 or
y+yMaxCurr > w-1 or
x+xMaxCurr < 0 or
y+yMaxCurr < 0 or
b == bAdv or
r == rAdv) ? 0 :
gradOutputData[b*gradOutputStrideRow + r]);
outValue +=
xMinCurrFrac*yMaxCurrFrac * (
(x+xMinCurr-1 > h-1 or
y+yMaxCurr > w-1 or
x+xMinCurr-1 < 0 or
y+yMaxCurr < 0 or
t == tAdv or
r == rAdv) ? 0 :
gradOutputData[tAdv*gradOutputStrideRow + r]);
outValue +=
xMaxCurrFrac*yMinCurrFrac * (
(x+xMaxCurr > h-1 or
y+yMinCurr-1 > w-1 or
x+xMaxCurr < 0 or
y+yMinCurr-1 < 0 or
b == bAdv or
l == lAdv) ? 0 :
gradOutputData[b*gradOutputStrideRow + lAdv]);
outValue +=
xMinCurrFrac*yMinCurrFrac * (
(x+xMinCurr-1 > h-1 or
y+yMinCurr-1 > w-1 or
x+xMinCurr-1 < 0 or
y+yMinCurr-1 < 0 or
t == tAdv or
l == lAdv) ? 0 :
gradOutputData[tAdv*gradOutputStrideRow + lAdv]);
*tmpArray = outValue;
}
}
extern "C"
void updateGradInputReplicateCuda(THCState *state,
const float *gradOutputIntData, float *tmpArray,
const int batchSize, const int nInputPlane, const int nWindows, const int h, const int w,
const float *const xMin, const float *const xMax,
const float *const yMin, const float *const yMax,
const int strideH, const int strideW) {
if (strideH != 1 or strideW != 1) {
THError("NYI");
// strided::updateGradInputPlanewiseFracCuda(
// gradOutputIntData, gradInputData, h, w, nWindows,
// xMin, xMax, yMin, yMax,
// gradOutputData, gradOutputStrideRow, gradOutputStrideChannel,
// strideH, strideW);
return;
}
const int threadsNeeded = batchSize * nInputPlane * nWindows * h * w;
const int numBlocks = (threadsNeeded + NUM_THREADS - 1) / NUM_THREADS;
updateGradInputReplicateKernel <<<numBlocks, NUM_THREADS, 0, THCState_getCurrentStream(state)>>> (
gradOutputIntData, tmpArray,
batchSize, nInputPlane, nWindows,
h, w, xMin, xMax, yMin, yMax);
THCudaCheck(cudaGetLastError());
}
extern "C"
void updateGradInputReplicateFracCuda(THCState *state,
const float *gradOutputIntData, float *tmpArray,
const int batchSize, const int nInputPlane, const int nWindows, const int h, const int w,
const float *const xMin, const float *const xMax,
const float *const yMin, const float *const yMax,
const float *gradOutputData,
const int gradOutputStrideRow, const int gradOutputStrideChannel,
const int strideH, const int strideW) {
if (strideH != 1 or strideW != 1) {
THError("NYI");
// strided::updateGradInputPlanewiseFracCuda(
// gradOutputIntData, gradInputData, h, w, nWindows,
// xMin, xMax, yMin, yMax,
// gradOutputData, gradOutputStrideRow, gradOutputStrideChannel,
// strideH, strideW);
return;
}
const int threadsNeeded = batchSize * nInputPlane * nWindows * h * w;
const int numBlocks = (threadsNeeded + NUM_THREADS - 1) / NUM_THREADS;
updateGradInputReplicateFracKernel <<<numBlocks, NUM_THREADS, 0, THCState_getCurrentStream(state)>>> (
gradOutputIntData, tmpArray,
batchSize, nInputPlane, nWindows,
h, w, xMin, xMax, yMin, yMax,
gradOutputData, gradOutputStrideRow, gradOutputStrideChannel);
THCudaCheck(cudaGetLastError());
}
/************************ accGradParameters planewise ************************/
__global__ void xMaxDeltaIntegralReplicatePlanewiseFracKernel(
const float *intData, float *tmpArray,
const int nWindows, const int h, const int w,
const float *xMax, const float *yMin, const float *yMax,
const float *inData, const int inDataStrideRow) {
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int & windowIdx = id;
if (windowIdx < nWindows and x <= h and y <= w) {
tmpArray += windowIdx * h * w;
// const int xMinInt = (int)ceil(xMin[windowIdx]-1);
// const float xMinFrac = xMinInt-xMin[windowIdx]+1;
const int yMinInt = (int)ceil(yMin[windowIdx]-1);
const float yMinFrac = yMinInt-yMin[windowIdx]+1;
const int xMaxInt = (int)floor(xMax[windowIdx]);
// const float xMaxFrac = xMax[windowIdx]-xMaxInt;
const int yMaxInt = (int)floor(yMax[windowIdx]);
const float yMaxFrac = yMax[windowIdx]-yMaxInt;
// const float tlCorner = y+yMinInt < 1 or x+xMinInt < 1 ? 0 :
// inData[
// max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow +
// max(0,min(w-1,y+yMinInt-1))];
const float blCorner = y+yMinInt < 1 or x+xMaxInt >= h ? 0 :
inData[
max(0,min(h-1,x+xMaxInt )) * inDataStrideRow +
max(0,min(w-1,y+yMinInt-1))];
// const float trCorner = y+yMaxInt >= w or x+xMinInt < 1 ? 0 :
// inData[
// max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow +
// max(0,min(w-1,y+yMaxInt ))];
const float brCorner = y+yMaxInt >= w or x+xMaxInt >= h ? 0 :
inData[
max(0,min(h-1,x+xMaxInt )) * inDataStrideRow +
max(0,min(w-1,y+yMaxInt ))];
float delta = 0;
delta += brCorner * (y+yMaxInt < 1 ? 1.0f : yMaxFrac);
delta += blCorner * (y+yMinInt >= w ? 1.0f : yMinFrac);
delta +=
intData[max(0,min(x+xMaxInt+1, h))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMaxInt , h))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMaxInt+1, h))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta +=
intData[max(0,min(x+xMaxInt , h))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta *= (x+xMaxInt >= 1 and x+xMaxInt < h);
tmpArray[(x-1)*w + (y-1)] *= delta;
}
}
__global__ void xMinDeltaIntegralReplicatePlanewiseFracKernel(
const float *intData, float *tmpArray,
const int nWindows, const int h, const int w,
const float *xMin, const float *yMin, const float *yMax,
const float *inData, const int inDataStrideRow) {
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int & windowIdx = id;
if (windowIdx < nWindows and x <= h and y <= w) {
tmpArray += windowIdx * h * w;
const int xMinInt = (int)ceil(xMin[windowIdx]-1);
// const float xMinFrac = xMinInt-xMin[windowIdx]+1;
const int yMinInt = (int)ceil(yMin[windowIdx]-1);
const float yMinFrac = yMinInt-yMin[windowIdx]+1;
// const int xMaxInt = (int)floor(xMax[windowIdx]);
// const float xMaxFrac = xMax[windowIdx]-xMaxInt;
const int yMaxInt = (int)floor(yMax[windowIdx]);
const float yMaxFrac = yMax[windowIdx]-yMaxInt;
const float tlCorner = y+yMinInt < 1 or x+xMinInt < 1 ? 0 :
inData[
max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow +
max(0,min(w-1,y+yMinInt-1))];
// const float blCorner = y+yMinInt < 1 or x+xMaxInt >= h ? 0 :
// inData[
// max(0,min(h-1,x+xMaxInt )) * inDataStrideRow +
// max(0,min(w-1,y+yMinInt-1))];
const float trCorner = y+yMaxInt >= w or x+xMinInt < 1 ? 0 :
inData[
max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow +
max(0,min(w-1,y+yMaxInt ))];
// const float brCorner = y+yMaxInt >= w or x+xMaxInt >= h ? 0 :
// inData[
// max(0,min(h-1,x+xMaxInt )) * inDataStrideRow +
// max(0,min(w-1,y+yMaxInt ))];
float delta = 0;
delta += trCorner * (y+yMaxInt < 1 ? 1.0f : yMaxFrac);
delta += tlCorner * (y+yMinInt >= w ? 1.0f : yMinFrac);
delta +=
intData[max(0,min(x+xMinInt , h))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMinInt-1, h))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMinInt , h))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta +=
intData[max(0,min(x+xMinInt-1, h))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta *= (x+xMinInt >= 1 and x+xMinInt < h);
tmpArray[(x-1)*w + (y-1)] *= -delta;
}
}
__global__ void yMaxDeltaIntegralReplicatePlanewiseFracKernel(
const float *intData, float *tmpArray,
const int nWindows, const int h, const int w,
const float *xMin, const float *xMax, const float *yMax,
const float *inData, const int inDataStrideRow) {
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int & windowIdx = id;
if (windowIdx < nWindows and x <= h and y <= w) {
tmpArray += windowIdx * h * w;
const int xMinInt = (int)ceil(xMin[windowIdx]-1);
const float xMinFrac = xMinInt-xMin[windowIdx]+1;
// const int yMinInt = (int)ceil(yMin[windowIdx]-1);
// const float yMinFrac = yMinInt-yMin[windowIdx]+1;
const int xMaxInt = (int)floor(xMax[windowIdx]);
const float xMaxFrac = xMax[windowIdx]-xMaxInt;
const int yMaxInt = (int)floor(yMax[windowIdx]);
// const float yMaxFrac = yMax[windowIdx]-yMaxInt;
// const float tlCorner = y+yMinInt < 1 or x+xMinInt < 1 ? 0 :
// inData[
// max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow +
// max(0,min(w-1,y+yMinInt-1))];
// const float blCorner = y+yMinInt < 1 or x+xMaxInt >= h ? 0 :
// inData[
// max(0,min(h-1,x+xMaxInt )) * inDataStrideRow +
// max(0,min(w-1,y+yMinInt-1))];
const float trCorner = y+yMaxInt >= w or x+xMinInt < 1 ? 0 :
inData[
max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow +
max(0,min(w-1,y+yMaxInt ))];
const float brCorner = y+yMaxInt >= w or x+xMaxInt >= h ? 0 :
inData[
max(0,min(h-1,x+xMaxInt )) * inDataStrideRow +
max(0,min(w-1,y+yMaxInt ))];
float delta = 0;
delta += trCorner * (x+xMinInt >= h ? 1.0f : xMinFrac);
delta += brCorner * (x+xMaxInt < 1 ? 1.0f : xMaxFrac);
delta +=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(0,min(y+yMaxInt+1, w))];
delta -=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(0,min(y+yMaxInt , w))];
delta -=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(0,min(y+yMaxInt+1, w))];
delta +=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(0,min(y+yMaxInt , w))];
delta *= (y+yMaxInt >= 1 and y+yMaxInt < w);
tmpArray[(x-1)*w + (y-1)] *= delta;
}
}
__global__ void yMinDeltaIntegralReplicatePlanewiseFracKernel(
const float *intData, float *tmpArray,
const int nWindows, const int h, const int w,
const float *xMin, const float *xMax, const float *yMin,
const float *inData, const int inDataStrideRow) {
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int & windowIdx = id;
if (windowIdx < nWindows and x <= h and y <= w) {
tmpArray += windowIdx * h * w;
const int xMinInt = (int)ceil(xMin[windowIdx]-1);
const float xMinFrac = xMinInt-xMin[windowIdx]+1;
const int yMinInt = (int)ceil(yMin[windowIdx]-1);
// const float yMinFrac = yMinInt-yMin[windowIdx]+1;
const int xMaxInt = (int)floor(xMax[windowIdx]);
const float xMaxFrac = xMax[windowIdx]-xMaxInt;
// const int yMaxInt = (int)floor(yMax[windowIdx]);
// const float yMaxFrac = yMax[windowIdx]-yMaxInt;
const float tlCorner = y+yMinInt < 1 or x+xMinInt < 1 ? 0 :
inData[
max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow +
max(0,min(w-1,y+yMinInt-1))];
const float blCorner = y+yMinInt < 1 or x+xMaxInt >= h ? 0 :
inData[
max(0,min(h-1,x+xMaxInt )) * inDataStrideRow +
max(0,min(w-1,y+yMinInt-1))];
// const float trCorner = y+yMaxInt >= w or x+xMinInt < 1 ? 0 :
// inData[
// max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow +
// max(0,min(w-1,y+yMaxInt ))];
// const float brCorner = y+yMaxInt >= w or x+xMaxInt >= h ? 0 :
// inData[
// max(0,min(h-1,x+xMaxInt )) * inDataStrideRow +
// max(0,min(w-1,y+yMaxInt ))];
float delta = 0;
delta += tlCorner * (x+xMinInt >= h ? 1.0f : xMinFrac);
delta += blCorner * (x+xMaxInt < 1 ? 1.0f : xMaxFrac);
delta +=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(0,min(y+yMinInt , w))];
delta -=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(0,min(y+yMinInt-1, w))];
delta -=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(0,min(y+yMinInt , w))];
delta +=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(0,min(y+yMinInt-1, w))];
delta *= (y+yMinInt >= 1 and y+yMinInt < w);
tmpArray[(x-1)*w + (y-1)] *= -delta;
}
}
extern "C"
void backwardReplicatePlanewiseFracCuda(THCState *state,
float *intData, float *tmpArray,
int nWindows, int h, int w,
float *xMin, float *xMax, float *yMin, float *yMax,
float *inData, int inDataStrideRow,
const int strideH, const int strideW) {
if (strideH != 1 or strideW != 1) {
strided::backwardReplicateFracCuda(
intData, tmpArray, nWindows, h, w,
xMin, xMax, yMin, yMax, inData, inDataStrideRow,
strideH, strideW);
return;
}
dim3 dimBlock(NUM_THREADS);
dim3 dimGrid((nWindows * h * w + dimBlock.x - 1) / dimBlock.x);
xMaxDeltaIntegralReplicatePlanewiseFracKernel <<<dimGrid, dimBlock, 0, THCState_getCurrentStream(state)>>> (
intData, tmpArray + 0*nWindows*h*w, nWindows, h, w,
xMax, yMin, yMax, inData, inDataStrideRow);
xMinDeltaIntegralReplicatePlanewiseFracKernel <<<dimGrid, dimBlock, 0, THCState_getCurrentStream(state)>>> (
intData, tmpArray + 1*nWindows*h*w, nWindows, h, w,
xMin, yMin, yMax, inData, inDataStrideRow);
yMaxDeltaIntegralReplicatePlanewiseFracKernel <<<dimGrid, dimBlock, 0, THCState_getCurrentStream(state)>>> (
intData, tmpArray + 2*nWindows*h*w, nWindows, h, w,
xMin, xMax, yMax, inData, inDataStrideRow);
yMinDeltaIntegralReplicatePlanewiseFracKernel <<<dimGrid, dimBlock, 0, THCState_getCurrentStream(state)>>> (
intData, tmpArray + 3*nWindows*h*w, nWindows, h, w,
xMin, xMax, yMin, inData, inDataStrideRow);
THCudaCheck(cudaGetLastError());
}
__global__ void xMaxDeltaIntegralReplicatePlanewiseKernel(
const float *intData, float *tmpArray,
const int nWindows, const int h, const int w,
const float *xMax, const float *yMin, const float *yMax) {
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int & windowIdx = id;
if (windowIdx < nWindows and x <= h and y <= w) {
tmpArray += windowIdx * h * w;
// const int xMinInt = (int)ceil(xMin[windowIdx]-1);
const int yMinInt = (int)ceil(yMin[windowIdx]-1);
const int xMaxInt = (int)floor(xMax[windowIdx]);
const int yMaxInt = (int)floor(yMax[windowIdx]);
float delta = 0;
delta +=
intData[max(1,min(x+xMaxInt+1, h))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMaxInt , h))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(1,min(x+xMaxInt+1, h))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta +=
intData[max(0,min(x+xMaxInt , h))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta *= (x+xMaxInt >= 1 and x+xMaxInt < h);
tmpArray[(x-1)*w + (y-1)] *= delta;
}
}
__global__ void xMinDeltaIntegralReplicatePlanewiseKernel(
const float *intData, float *tmpArray,
const int nWindows, const int h, const int w,
const float *xMin, const float *yMin, const float *yMax) {
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int & windowIdx = id;
if (windowIdx < nWindows and x <= h and y <= w) {
tmpArray += windowIdx * h * w;
const int xMinInt = (int)ceil(xMin[windowIdx]-1);
const int yMinInt = (int)ceil(yMin[windowIdx]-1);
// const int xMaxInt = (int)floor(xMax[windowIdx]);
const int yMaxInt = (int)floor(yMax[windowIdx]);
float delta = 0;
delta +=
intData[max(0,min(x+xMinInt , h-1))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMinInt-1, h ))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMinInt , h-1))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta +=
intData[max(0,min(x+xMinInt-1, h ))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta *= (x+xMinInt >= 1 and x+xMinInt < h);
tmpArray[(x-1)*w + (y-1)] *= -delta;
}
}
__global__ void yMaxDeltaIntegralReplicatePlanewiseKernel(
const float *intData, float *tmpArray,
const int nWindows, const int h, const int w,
const float *xMin, const float *xMax, const float *yMax) {
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int & windowIdx = id;
if (windowIdx < nWindows and x <= h and y <= w) {
tmpArray += windowIdx * h * w;
const int xMinInt = (int)ceil(xMin[windowIdx]-1);
// const int yMinInt = (int)ceil(yMin[windowIdx]-1);
const int xMaxInt = (int)floor(xMax[windowIdx]);
const int yMaxInt = (int)floor(yMax[windowIdx]);
float delta = 0;
delta +=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(1,min(y+yMaxInt+1, w))];
delta -=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(0,min(y+yMaxInt , w))];
delta -=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(1,min(y+yMaxInt+1, w))];
delta +=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(0,min(y+yMaxInt , w))];
delta *= (y+yMaxInt >= 1 and y+yMaxInt < w);
tmpArray[(x-1)*w + (y-1)] *= delta;
}
}
__global__ void yMinDeltaIntegralReplicatePlanewiseKernel(
const float *intData, float *tmpArray,
const int nWindows, const int h, const int w,
const float *xMin, const float *xMax, const float *yMin) {
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int & windowIdx = id;
if (windowIdx < nWindows and x <= h and y <= w) {
tmpArray += windowIdx * h * w;
const int xMinInt = (int)ceil(xMin[windowIdx]-1);
const int yMinInt = (int)ceil(yMin[windowIdx]-1);
const int xMaxInt = (int)floor(xMax[windowIdx]);
// const int yMaxInt = (int)floor(yMax[windowIdx]);
float delta = 0;
delta +=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(0,min(y+yMinInt , w ))];
delta -=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(0,min(y+yMinInt-1, w-1))];
delta -=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(0,min(y+yMinInt , w ))];
delta +=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(0,min(y+yMinInt-1, w-1))];
delta *= (y+yMinInt >= 1 and y+yMinInt < w);
tmpArray[(x-1)*w + (y-1)] *= -delta;
}
}
extern "C"
void backwardReplicatePlanewiseCuda(THCState *state,
float *intData, float *tmpArray,
int nWindows, int h, int w,
float *xMin, float *xMax, float *yMin, float *yMax,
const int strideH, const int strideW) {
if (strideH != 1 or strideW != 1) {
strided::backwardReplicateCuda(
intData, tmpArray, nWindows, h, w,
xMin, xMax, yMin, yMax, strideH, strideW);
return;
}
dim3 dimBlock(NUM_THREADS);
dim3 dimGrid((nWindows * h * w + dimBlock.x - 1) / dimBlock.x);
xMaxDeltaIntegralReplicatePlanewiseKernel <<<dimGrid, dimBlock, 0, THCState_getCurrentStream(state)>>> (
intData, tmpArray + 0*nWindows*h*w,
nWindows, h, w, xMax, yMin, yMax);
xMinDeltaIntegralReplicatePlanewiseKernel <<<dimGrid, dimBlock, 0, THCState_getCurrentStream(state)>>> (
intData, tmpArray + 1*nWindows*h*w,
nWindows, h, w, xMin, yMin, yMax);
yMaxDeltaIntegralReplicatePlanewiseKernel <<<dimGrid, dimBlock, 0, THCState_getCurrentStream(state)>>> (
intData, tmpArray + 2*nWindows*h*w,
nWindows, h, w, xMin, xMax, yMax);
yMinDeltaIntegralReplicatePlanewiseKernel <<<dimGrid, dimBlock, 0, THCState_getCurrentStream(state)>>> (
intData, tmpArray + 3*nWindows*h*w,
nWindows, h, w, xMin, xMax, yMin);
THCudaCheck(cudaGetLastError());
}
/************************ accGradParameters fastest *********************/
__global__ void xMaxDeltaIntegralReplicateFracKernel(
const float *intData, const int intDataStrideChannel, float *tmpArray,
const int batchSize, const int nInputPlane, const int nWindows, const int h, const int w,
const float *xMax, const float *yMin, const float *yMax,
const float *inData, const int inDataStrideRow, const int inDataStrideChannel) {
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
tmpArray += id; // tmpArray now points to our output pixel
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int windowIdx = id % nWindows; id /= nWindows;
// `id` is now is now the current global input plane number
intData += id * intDataStrideChannel;
inData += id * inDataStrideChannel;
const int globalWindowIdx = (id % nInputPlane) * nWindows + windowIdx; id /= nInputPlane;
const int & batchIdx = id;
if (batchIdx < batchSize) {
// const int xMinInt = (int)ceil(xMin[globalWindowIdx]-1);
// const float xMinFrac = xMinInt-xMin[globalWindowIdx]+1;
const int yMinInt = (int)ceil(yMin[globalWindowIdx]-1);
const float yMinFrac = yMinInt-yMin[globalWindowIdx]+1;
const int xMaxInt = (int)floor(xMax[globalWindowIdx]);
// const float xMaxFrac = xMax[globalWindowIdx]-xMaxInt;
const int yMaxInt = (int)floor(yMax[globalWindowIdx]);
const float yMaxFrac = yMax[globalWindowIdx]-yMaxInt;
// const float tlCorner = y+yMinInt < 1 or x+xMinInt < 1 ? 0 :
// inData[
// max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow +
// max(0,min(w-1,y+yMinInt-1))];
const float blCorner = y+yMinInt < 1 or x+xMaxInt >= h ? 0 :
inData[
max(0,min(h-1,x+xMaxInt )) * inDataStrideRow +
max(0,min(w-1,y+yMinInt-1))];
// const float trCorner = y+yMaxInt >= w or x+xMinInt < 1 ? 0 :
// inData[
// max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow +
// max(0,min(w-1,y+yMaxInt ))];
const float brCorner = y+yMaxInt >= w or x+xMaxInt >= h ? 0 :
inData[
max(0,min(h-1,x+xMaxInt )) * inDataStrideRow +
max(0,min(w-1,y+yMaxInt ))];
float delta = 0;
delta += brCorner * (y+yMaxInt < 1 ? 1.0f : yMaxFrac);
delta += blCorner * (y+yMinInt >= w ? 1.0f : yMinFrac);
delta +=
intData[max(0,min(x+xMaxInt+1, h))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMaxInt , h))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMaxInt+1, h))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta +=
intData[max(0,min(x+xMaxInt , h))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta *= (x+xMaxInt >= 1 and x+xMaxInt < h);
*tmpArray = delta;
}
}
template <bool inputIsOnes>
__global__ void xMinDeltaIntegralReplicateFracKernel(
const float *intData, const int intDataStrideChannel, float *tmpArray,
const int batchSize, const int nInputPlane, const int nWindows, const int h, const int w,
const float *xMin, const float *yMin, const float *yMax,
const float *inData, const int inDataStrideRow, const int inDataStrideChannel) {
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
tmpArray += id; // tmpArray now points to our output pixel
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int windowIdx = id % nWindows; id /= nWindows;
// `id` is now is now the current global input plane number
intData += id * intDataStrideChannel;
if (not inputIsOnes) inData += id * inDataStrideChannel;
const int globalWindowIdx = (id % nInputPlane) * nWindows + windowIdx; id /= nInputPlane;
const int & batchIdx = id;
if (batchIdx < batchSize) {
const int xMinInt = (int)ceil(xMin[globalWindowIdx]-1);
// const float xMinFrac = xMinInt-xMin[globalWindowIdx]+1;
const int yMinInt = (int)ceil(yMin[globalWindowIdx]-1);
const float yMinFrac = yMinInt-yMin[globalWindowIdx]+1;
// const int xMaxInt = (int)floor(xMax[globalWindowIdx]);
// const float xMaxFrac = xMax[globalWindowIdx]-xMaxInt;
const int yMaxInt = (int)floor(yMax[globalWindowIdx]);
const float yMaxFrac = yMax[globalWindowIdx]-yMaxInt;
int valid;
valid = not (y+yMinInt < 1) & not (x+xMinInt < 1);
const float tlCorner = valid * (inputIsOnes ? 1 :
inData[(max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow + max(0,min(w-1,y+yMinInt-1))) * valid]);
// valid = not (y+yMinInt < 1) & not (x+xMaxInt >= h);
// const float blCorner = valid * (inputIsOnes ? 1 :
// inData[(max(0,min(h-1,x+xMaxInt )) * inDataStrideRow + max(0,min(w-1,y+yMinInt-1))) * valid]);
valid = not (y+yMaxInt >= w) & not (x+xMinInt < 1);
const float trCorner = valid * (inputIsOnes ? 1 :
inData[(max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow + max(0,min(w-1,y+yMaxInt ))) * valid]);
// valid = not (y+yMaxInt >= w) & not (x+xMaxInt >= h);
// const float brCorner = valid * (inputIsOnes ? 1 :
// inData[(max(0,min(h-1,x+xMaxInt )) * inDataStrideRow + max(0,min(w-1,y+yMaxInt ))) * valid]);
float delta = 0;
delta += trCorner * (y+yMaxInt < 1 ? 1.0f : yMaxFrac);
delta += tlCorner * (y+yMinInt >= w ? 1.0f : yMinFrac);
delta +=
intData[max(0,min(x+xMinInt , h))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMinInt-1, h))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMinInt , h))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta +=
intData[max(0,min(x+xMinInt-1, h))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta *= (x+xMinInt >= 1) & (x+xMinInt < h);
*tmpArray = -delta;
}
}
template __global__ void xMinDeltaIntegralReplicateFracKernel<false>(
const float *intData, const int intDataStrideChannel, float *tmpArray,
const int batchSize, const int nInputPlane, const int nWindows, const int h, const int w,
const float *xMin, const float *yMin, const float *yMax,
const float *inData, const int inDataStrideRow, const int inDataStrideChannel);
template __global__ void xMinDeltaIntegralReplicateFracKernel<true>(
const float *intData, const int intDataStrideChannel, float *tmpArray,
const int batchSize, const int nInputPlane, const int nWindows, const int h, const int w,
const float *xMin, const float *yMin, const float *yMax,
const float *inData, const int inDataStrideRow, const int inDataStrideChannel);
__global__ void yMaxDeltaIntegralReplicateFracKernel(
const float *intData, const int intDataStrideChannel, float *tmpArray,
const int batchSize, const int nInputPlane, const int nWindows, const int h, const int w,
const float *xMin, const float *xMax, const float *yMax,
const float *inData, const int inDataStrideRow, const int inDataStrideChannel) {
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
tmpArray += id; // tmpArray now points to our output pixel
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int windowIdx = id % nWindows; id /= nWindows;
// `id` is now is now the current global input plane number
intData += id * intDataStrideChannel;
inData += id * inDataStrideChannel;
const int globalWindowIdx = (id % nInputPlane) * nWindows + windowIdx; id /= nInputPlane;
const int & batchIdx = id;
if (batchIdx < batchSize) {
const int xMinInt = (int)ceil(xMin[globalWindowIdx]-1);
const float xMinFrac = xMinInt-xMin[globalWindowIdx]+1;
// const int yMinInt = (int)ceil(yMin[globalWindowIdx]-1);
// const float yMinFrac = yMinInt-yMin[globalWindowIdx]+1;
const int xMaxInt = (int)floor(xMax[globalWindowIdx]);
const float xMaxFrac = xMax[globalWindowIdx]-xMaxInt;
const int yMaxInt = (int)floor(yMax[globalWindowIdx]);
// const float yMaxFrac = yMax[globalWindowIdx]-yMaxInt;
// const float tlCorner = y+yMinInt < 1 or x+xMinInt < 1 ? 0 :
// inData[
// max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow +
// max(0,min(w-1,y+yMinInt-1))];
// const float blCorner = y+yMinInt < 1 or x+xMaxInt >= h ? 0 :
// inData[
// max(0,min(h-1,x+xMaxInt )) * inDataStrideRow +
// max(0,min(w-1,y+yMinInt-1))];
const float trCorner = y+yMaxInt >= w or x+xMinInt < 1 ? 0 :
inData[
max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow +
max(0,min(w-1,y+yMaxInt ))];
const float brCorner = y+yMaxInt >= w or x+xMaxInt >= h ? 0 :
inData[
max(0,min(h-1,x+xMaxInt )) * inDataStrideRow +
max(0,min(w-1,y+yMaxInt ))];
float delta = 0;
delta += trCorner * (x+xMinInt >= h ? 1.0f : xMinFrac);
delta += brCorner * (x+xMaxInt < 1 ? 1.0f : xMaxFrac);
delta +=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(0,min(y+yMaxInt+1, w))];
delta -=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(0,min(y+yMaxInt , w))];
delta -=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(0,min(y+yMaxInt+1, w))];
delta +=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(0,min(y+yMaxInt , w))];
delta *= (y+yMaxInt >= 1 and y+yMaxInt < w);
*tmpArray = delta;
}
}
__global__ void yMinDeltaIntegralReplicateFracKernel(
const float *intData, const int intDataStrideChannel, float *tmpArray,
const int batchSize, const int nInputPlane, const int nWindows, const int h, const int w,
const float *xMin, const float *xMax, const float *yMin,
const float *inData, const int inDataStrideRow, const int inDataStrideChannel) {
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
tmpArray += id; // tmpArray now points to our output pixel
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int windowIdx = id % nWindows; id /= nWindows;
// `id` is now is now the current global input plane number
intData += id * intDataStrideChannel;
inData += id * inDataStrideChannel;
const int globalWindowIdx = (id % nInputPlane) * nWindows + windowIdx; id /= nInputPlane;
const int & batchIdx = id;
if (batchIdx < batchSize) {
const int xMinInt = (int)ceil(xMin[globalWindowIdx]-1);
const float xMinFrac = xMinInt-xMin[globalWindowIdx]+1;
const int yMinInt = (int)ceil(yMin[globalWindowIdx]-1);
// const float yMinFrac = yMinInt-yMin[globalWindowIdx]+1;
const int xMaxInt = (int)floor(xMax[globalWindowIdx]);
const float xMaxFrac = xMax[globalWindowIdx]-xMaxInt;
// const int yMaxInt = (int)floor(yMax[globalWindowIdx]);
// const float yMaxFrac = yMax[globalWindowIdx]-yMaxInt;
const float tlCorner = y+yMinInt < 1 or x+xMinInt < 1 ? 0 :
inData[
max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow +
max(0,min(w-1,y+yMinInt-1))];
const float blCorner = y+yMinInt < 1 or x+xMaxInt >= h ? 0 :
inData[
max(0,min(h-1,x+xMaxInt )) * inDataStrideRow +
max(0,min(w-1,y+yMinInt-1))];
// const float trCorner = y+yMaxInt >= w or x+xMinInt < 1 ? 0 :
// inData[
// max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow +
// max(0,min(w-1,y+yMaxInt ))];
// const float brCorner = y+yMaxInt >= w or x+xMaxInt >= h ? 0 :
// inData[
// max(0,min(h-1,x+xMaxInt )) * inDataStrideRow +
// max(0,min(w-1,y+yMaxInt ))];
float delta = 0;
delta += tlCorner * (x+xMinInt >= h ? 1.0f : xMinFrac);
delta += blCorner * (x+xMaxInt < 1 ? 1.0f : xMaxFrac);
delta +=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(0,min(y+yMinInt , w))];
delta -=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(0,min(y+yMinInt-1, w))];
delta -=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(0,min(y+yMinInt , w))];
delta +=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(0,min(y+yMinInt-1, w))];
delta *= (y+yMinInt >= 1 and y+yMinInt < w);
*tmpArray = -delta;
}
}
extern "C"
void backwardReplicateFracCuda(THCState *state, const int paramId, const bool inputIsOnes,
const float *intData, const int intDataStrideChannel, float *tmpArray,
const int batchSize, const int nInputPlane, const int nWindows, const int h, const int w,
const float *xMin, const float *xMax, const float *yMin, const float *yMax,
const float *inData, int inDataStrideRow, const int inDataStrideChannel,
const int strideH, const int strideW) {
if (strideH != 1 or strideW != 1) {
THError("NYI");
// strided::backwardFracCuda(
// intData, tmpArray, nWindows, h, w,
// xMin, xMax, yMin, yMax, inData, inDataStrideRow,
// strideH, strideW);
return;
}
const int threadsNeeded = batchSize * nInputPlane * nWindows * h * w;
const int numBlocks = (threadsNeeded + NUM_THREADS - 1) / NUM_THREADS;
switch (paramId) {
case 0:
if (inputIsOnes)
xMinDeltaIntegralReplicateFracKernel <true> <<<numBlocks, NUM_THREADS, 0, THCState_getCurrentStream(state)>>> (
intData, intDataStrideChannel, tmpArray,
batchSize, nInputPlane, nWindows, h, w,
xMin, yMin, yMax,
inData, inDataStrideRow, inDataStrideChannel);
else
xMinDeltaIntegralReplicateFracKernel <false> <<<numBlocks, NUM_THREADS, 0, THCState_getCurrentStream(state)>>> (
intData, intDataStrideChannel, tmpArray,
batchSize, nInputPlane, nWindows, h, w,
xMin, yMin, yMax,
inData, inDataStrideRow, inDataStrideChannel);
break;
case 1:
xMaxDeltaIntegralReplicateFracKernel <<<numBlocks, NUM_THREADS, 0, THCState_getCurrentStream(state)>>> (
intData, intDataStrideChannel, tmpArray,
batchSize, nInputPlane, nWindows, h, w,
xMax, yMin, yMax,
inData, inDataStrideRow, inDataStrideChannel); break;
case 2:
yMinDeltaIntegralReplicateFracKernel <<<numBlocks, NUM_THREADS, 0, THCState_getCurrentStream(state)>>> (
intData, intDataStrideChannel, tmpArray,
batchSize, nInputPlane, nWindows, h, w,
xMin, xMax, yMin,
inData, inDataStrideRow, inDataStrideChannel); break;
case 3:
yMaxDeltaIntegralReplicateFracKernel <<<numBlocks, NUM_THREADS, 0, THCState_getCurrentStream(state)>>> (
intData, intDataStrideChannel, tmpArray,
batchSize, nInputPlane, nWindows, h, w,
xMin, xMax, yMax,
inData, inDataStrideRow, inDataStrideChannel); break;
}
THCudaCheck(cudaGetLastError());
}
__global__ void xMaxDeltaIntegralReplicateKernel(
const float *intData, const int intDataStrideChannel, float *tmpArray,
const int batchSize, const int nInputPlane, const int nWindows, const int h, const int w,
const float *xMax, const float *yMin, const float *yMax) {
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
tmpArray += id; // tmpArray now points to our output pixel
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int windowIdx = id % nWindows; id /= nWindows;
// `id` is now is now the current global input plane number
intData += id * intDataStrideChannel;
const int globalWindowIdx = (id % nInputPlane) * nWindows + windowIdx; id /= nInputPlane;
const int & batchIdx = id;
if (batchIdx < batchSize) {
// const int xMinInt = (int)ceil(xMin[globalWindowIdx]-1);
const int yMinInt = (int)ceil(yMin[globalWindowIdx]-1);
const int xMaxInt = (int)floor(xMax[globalWindowIdx]);
const int yMaxInt = (int)floor(yMax[globalWindowIdx]);
float delta = 0;
delta +=
intData[max(0,min(x+xMaxInt+1, h))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMaxInt , h))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMaxInt+1, h))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta +=
intData[max(0,min(x+xMaxInt , h))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta *= (x+xMaxInt >= 1 and x+xMaxInt < h);
*tmpArray = delta;
}
}
__global__ void xMinDeltaIntegralReplicateKernel(
const float *intData, const int intDataStrideChannel, float *tmpArray,
const int batchSize, const int nInputPlane, const int nWindows, const int h, const int w,
const float *xMin, const float *yMin, const float *yMax) {
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
tmpArray += id; // tmpArray now points to our output pixel
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int windowIdx = id % nWindows; id /= nWindows;
// `id` is now is now the current global input plane number
intData += id * intDataStrideChannel;
const int globalWindowIdx = (id % nInputPlane) * nWindows + windowIdx; id /= nInputPlane;
const int & batchIdx = id;
if (batchIdx < batchSize) {
const int xMinInt = (int)ceil(xMin[globalWindowIdx]-1);
const int yMinInt = (int)ceil(yMin[globalWindowIdx]-1);
// const int xMaxInt = (int)floor(xMax[globalWindowIdx]);
const int yMaxInt = (int)floor(yMax[globalWindowIdx]);
float delta = 0;
delta +=
intData[max(0,min(x+xMinInt , h))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMinInt-1, h))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMinInt , h))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta +=
intData[max(0,min(x+xMinInt-1, h))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta *= (x+xMinInt >= 1 and x+xMinInt < h);
*tmpArray = -delta;
}
}
__global__ void yMaxDeltaIntegralReplicateKernel(
const float *intData, const int intDataStrideChannel, float *tmpArray,
const int batchSize, const int nInputPlane, const int nWindows, const int h, const int w,
const float *xMin, const float *xMax, const float *yMax) {
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
tmpArray += id; // tmpArray now points to our output pixel
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int windowIdx = id % nWindows; id /= nWindows;
// `id` is now is now the current global input plane number
intData += id * intDataStrideChannel;
const int globalWindowIdx = (id % nInputPlane) * nWindows + windowIdx; id /= nInputPlane;
const int & batchIdx = id;
if (batchIdx < batchSize) {
const int xMinInt = (int)ceil(xMin[globalWindowIdx]-1);
// const int yMinInt = (int)ceil(yMin[globalWindowIdx]-1);
const int xMaxInt = (int)floor(xMax[globalWindowIdx]);
const int yMaxInt = (int)floor(yMax[globalWindowIdx]);
float delta = 0;
delta +=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(0,min(y+yMaxInt+1, w))];
delta -=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(0,min(y+yMaxInt , w))];
delta -=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(0,min(y+yMaxInt+1, w))];
delta +=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(0,min(y+yMaxInt , w))];
delta *= (y+yMaxInt >= 1 and y+yMaxInt < w);
*tmpArray = delta;
}
}
__global__ void yMinDeltaIntegralReplicateKernel(
const float *intData, const int intDataStrideChannel, float *tmpArray,
const int batchSize, const int nInputPlane, const int nWindows, const int h, const int w,
const float *xMin, const float *xMax, const float *yMin) {
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
tmpArray += id; // tmpArray now points to our output pixel
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int windowIdx = id % nWindows; id /= nWindows;
// `id` is now is now the current global input plane number
intData += id * intDataStrideChannel;
const int globalWindowIdx = (id % nInputPlane) * nWindows + windowIdx; id /= nInputPlane;
const int & batchIdx = id;
if (batchIdx < batchSize) {
const int xMinInt = (int)ceil(xMin[globalWindowIdx]-1);
const int yMinInt = (int)ceil(yMin[globalWindowIdx]-1);
const int xMaxInt = (int)floor(xMax[globalWindowIdx]);
// const int yMaxInt = (int)floor(yMax[globalWindowIdx]);
float delta = 0;
delta +=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(0,min(y+yMinInt , w))];
delta -=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(0,min(y+yMinInt-1, w))];
delta -=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(0,min(y+yMinInt , w))];
delta +=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(0,min(y+yMinInt-1, w))];
delta *= (y+yMinInt >= 1 and y+yMinInt < w);
*tmpArray = -delta;
}
}
extern "C"
void backwardReplicateCuda(THCState *state, const int paramId,
const float *intData, const int intDataStrideChannel, float *tmpArray,
const int batchSize, const int nInputPlane, const int nWindows, const int h, const int w,
const float *xMin, const float *xMax, const float *yMin, const float *yMax,
const int strideH, const int strideW) {
if (strideH != 1 or strideW != 1) {
THError("NYI");
// strided::backwardFracCuda(
// intData, tmpArray, nWindows, h, w,
// xMin, xMax, yMin, yMax, inData, inDataStrideRow,
// strideH, strideW);
return;
}
const int threadsNeeded = batchSize * nInputPlane * nWindows * h * w;
const int numBlocks = (threadsNeeded + NUM_THREADS - 1) / NUM_THREADS;
switch (paramId) {
case 0:
xMinDeltaIntegralReplicateKernel <<<numBlocks, NUM_THREADS, 0, THCState_getCurrentStream(state)>>> (
intData, intDataStrideChannel, tmpArray,
batchSize, nInputPlane, nWindows, h, w,
xMin, yMin, yMax); break;
case 1:
xMaxDeltaIntegralReplicateKernel <<<numBlocks, NUM_THREADS, 0, THCState_getCurrentStream(state)>>> (
intData, intDataStrideChannel, tmpArray,
batchSize, nInputPlane, nWindows, h, w,
xMax, yMin, yMax); break;
case 2:
yMinDeltaIntegralReplicateKernel <<<numBlocks, NUM_THREADS, 0, THCState_getCurrentStream(state)>>> (
intData, intDataStrideChannel, tmpArray,
batchSize, nInputPlane, nWindows, h, w,
xMin, xMax, yMin); break;
case 3:
yMaxDeltaIntegralReplicateKernel <<<numBlocks, NUM_THREADS, 0, THCState_getCurrentStream(state)>>> (
intData, intDataStrideChannel, tmpArray,
batchSize, nInputPlane, nWindows, h, w,
xMin, xMax, yMax); break;
}
THCudaCheck(cudaGetLastError());
}
/************************ Other stuff ************************/
__global__ void dirtyFixWindowsKernel(
float *xMin, float *xMax, float *yMin, float *yMax,
const int size, const float h, const float w, const float minWidth) {
int idx = NUM_THREADS * blockIdx.x + threadIdx.x;
if (idx < 2*size) {
float paramMin, paramMax;
if (idx < size) {
paramMin = max(-h+1, min(h-1, xMin[idx]));
paramMax = max(-h+1, min(h-1, xMax[idx]));
if (paramMin + minWidth - 0.99 > paramMax) {
const float mean = 0.5 * (paramMin + paramMax);
paramMin = mean - 0.5 * (minWidth - 0.9);
paramMax = mean + 0.5 * (minWidth - 0.9);
}
xMin[idx] = paramMin;
xMax[idx] = paramMax;
} else {
idx -= size;
paramMin = max(-w+1, min(w-1, yMin[idx]));
paramMax = max(-w+1, min(w-1, yMax[idx]));
if (paramMin + minWidth - 0.99 > paramMax) {
const float mean = 0.5 * (paramMin + paramMax);
paramMin = mean - 0.5 * (minWidth - 0.9);
paramMax = mean + 0.5 * (minWidth - 0.9);
}
yMin[idx] = paramMin;
yMax[idx] = paramMax;
}
}
}
extern "C"
void dirtyFixWindows(THCState *state,
float *xMin, float *xMax, float *yMin, float *yMax,
int size, int h, int w, float minWidth) {
dim3 dimBlock(NUM_THREADS);
dim3 dimGrid((2*size + dimBlock.x - 1) / dimBlock.x);
dirtyFixWindowsKernel <<<dimGrid, dimBlock, 0, THCState_getCurrentStream(state)>>> (
xMin, xMax, yMin, yMax, size, (float)h, (float)w, minWidth);
THCudaCheck(cudaGetLastError());
}
|
cf1b02b98d56e6cd2b9d151ded5115bcd67ed685.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "headers/myHeaders.h"
#include "headers/myUtilityFunctions.h"
using namespace std;
__global__ void update_array_gpu_tiled(int i, cellType *d_array, int *d_v, int *d_w, int dependencyWidthLeft, int dependencyWidthRight)
{
int myBlockId = (blockIdx.x) + 1;
int r,c;
r = i;
c = myBlockId * TILE_COLS;
//generate my location and process the block
// myCol is the column assigned to thread x of a given block
int myRow = r;
int myCol = c + threadIdx.x - (dependencyWidthLeft);
__shared__ cellType sharedArray [TILE_COLS + 10 + 10];
//copy
if (myCol >= 0)
sharedArray[threadIdx.x] = d_array(myRow-1, myCol);
else
sharedArray[threadIdx.x] = d_array(myRow-1, 0);
__syncthreads();
if ((threadIdx.x >= dependencyWidthLeft) && (threadIdx.x <=TILE_COLS+dependencyWidthLeft))
{
int a = sharedArray[threadIdx.x];
int b = d_v[myRow] + sharedArray[threadIdx.x - d_w[myRow]];
(( (d_w[myRow] > myCol) || (a >= b)) ? d_array(myRow,myCol) = a : d_array(myRow,myCol) = b );
//d_array(myRow, myCol) = (sharedArray[threadIdx.x] + sharedArray[threadIdx.x -1 ] + sharedArray[threadIdx.x -2] + 1) % 10;
}
}
int main(int argc, char const *argv[])
{
//create array at host : initialize accordingly
cellType *h_array;
h_array = create_array_host();
//initialize base row arguments (cellType *h_array, int rowNumber, int mode, int value)
// : mode =1 for random initialization, put any value in that case
initialize_this_row(h_array, 0, 0, 0);
initialize_this_col(h_array, 0, 0, 0);
//Create array at device
cellType *d_array;
hipMalloc((void**) &d_array, sizeof(cellType)*(nRows*TOTAL_COLS));
//copy host array to device arrray, if needed
copy_host_to_device(h_array, d_array);
// create/initialize and transfer other resources and pass to the function
//int W = nRows;
/*int h_v[5] = {0, 10, 40, 30, 50};
int h_w[5] = {0, 5, 4, 6, 3};*/
int *h_v = create_array_host_1D(nRows);
initialize_this_1D_array(h_v, nRows);
int *d_v;
hipMalloc((void**) &d_v, sizeof(int)*(nRows));
copy_host_to_device_1D(h_v, d_v, nRows);
int *h_w = create_array_host_1D(nRows);
initialize_this_1D_array(h_w, nRows);
int *d_w;
hipMalloc((void**) &d_w, sizeof(int)*(nRows));
copy_host_to_device_1D(h_w, d_w, nRows);
GpuTimer phase1;
phase1.Start();
int dependencyWidthLeft = 10;
int dependencyWidthRight = 0;
//create a wrapper to design tiling iterations
int ThreadsPerBlock = dependencyWidthLeft + TILE_COLS + dependencyWidthRight;
for (int i = 1; i <= nRows; i++)
{
hipLaunchKernelGGL(( update_array_gpu_tiled), dim3(dim3(nRows/TILE_COLS,1,1)), dim3(dim3(ThreadsPerBlock,1,1)), 0, 0, i, d_array, d_v, d_w, dependencyWidthLeft, dependencyWidthRight);
}
phase1.Stop();
cout <<"Time (Tiled GPU): " <<phase1.Elapsed()<< " Milli Seconds\n";
//copy back to cpu
copy_device_to_host(h_array, d_array);
//Access the resultant matrix : dump into output file
//write_array_console(h_array);
ofstream myfile ("files_output/o_gpu_tiled_shmem.txt");
write_array_file(h_array, myfile);
return 0;
}
| cf1b02b98d56e6cd2b9d151ded5115bcd67ed685.cu |
#include "headers/myHeaders.h"
#include "headers/myUtilityFunctions.h"
using namespace std;
__global__ void update_array_gpu_tiled(int i, cellType *d_array, int *d_v, int *d_w, int dependencyWidthLeft, int dependencyWidthRight)
{
int myBlockId = (blockIdx.x) + 1;
int r,c;
r = i;
c = myBlockId * TILE_COLS;
//generate my location and process the block
// myCol is the column assigned to thread x of a given block
int myRow = r;
int myCol = c + threadIdx.x - (dependencyWidthLeft);
__shared__ cellType sharedArray [TILE_COLS + 10 + 10];
//copy
if (myCol >= 0)
sharedArray[threadIdx.x] = d_array(myRow-1, myCol);
else
sharedArray[threadIdx.x] = d_array(myRow-1, 0);
__syncthreads();
if ((threadIdx.x >= dependencyWidthLeft) && (threadIdx.x <=TILE_COLS+dependencyWidthLeft))
{
int a = sharedArray[threadIdx.x];
int b = d_v[myRow] + sharedArray[threadIdx.x - d_w[myRow]];
(( (d_w[myRow] > myCol) || (a >= b)) ? d_array(myRow,myCol) = a : d_array(myRow,myCol) = b );
//d_array(myRow, myCol) = (sharedArray[threadIdx.x] + sharedArray[threadIdx.x -1 ] + sharedArray[threadIdx.x -2] + 1) % 10;
}
}
int main(int argc, char const *argv[])
{
//create array at host : initialize accordingly
cellType *h_array;
h_array = create_array_host();
//initialize base row arguments (cellType *h_array, int rowNumber, int mode, int value)
// : mode =1 for random initialization, put any value in that case
initialize_this_row(h_array, 0, 0, 0);
initialize_this_col(h_array, 0, 0, 0);
//Create array at device
cellType *d_array;
cudaMalloc((void**) &d_array, sizeof(cellType)*(nRows*TOTAL_COLS));
//copy host array to device arrray, if needed
copy_host_to_device(h_array, d_array);
// create/initialize and transfer other resources and pass to the function
//int W = nRows;
/*int h_v[5] = {0, 10, 40, 30, 50};
int h_w[5] = {0, 5, 4, 6, 3};*/
int *h_v = create_array_host_1D(nRows);
initialize_this_1D_array(h_v, nRows);
int *d_v;
cudaMalloc((void**) &d_v, sizeof(int)*(nRows));
copy_host_to_device_1D(h_v, d_v, nRows);
int *h_w = create_array_host_1D(nRows);
initialize_this_1D_array(h_w, nRows);
int *d_w;
cudaMalloc((void**) &d_w, sizeof(int)*(nRows));
copy_host_to_device_1D(h_w, d_w, nRows);
GpuTimer phase1;
phase1.Start();
int dependencyWidthLeft = 10;
int dependencyWidthRight = 0;
//create a wrapper to design tiling iterations
int ThreadsPerBlock = dependencyWidthLeft + TILE_COLS + dependencyWidthRight;
for (int i = 1; i <= nRows; i++)
{
update_array_gpu_tiled<<<dim3(nRows/TILE_COLS,1,1), dim3(ThreadsPerBlock,1,1)>>>(i, d_array, d_v, d_w, dependencyWidthLeft, dependencyWidthRight);
}
phase1.Stop();
cout <<"Time (Tiled GPU): " <<phase1.Elapsed()<< " Milli Seconds\n";
//copy back to cpu
copy_device_to_host(h_array, d_array);
//Access the resultant matrix : dump into output file
//write_array_console(h_array);
ofstream myfile ("files_output/o_gpu_tiled_shmem.txt");
write_array_file(h_array, myfile);
return 0;
}
|
de64f724520c6e79e382ae3b1a8fea7f983f6e72.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by ss on 19-1-20.
//
#include "thundergbm/builder/hist_tree_builder.h"
#include "thundergbm/util/cub_wrapper.h"
#include "thundergbm/util/device_lambda.cuh"
#include "thrust/iterator/counting_iterator.h"
#include "thrust/iterator/transform_iterator.h"
#include "thrust/iterator/discard_iterator.h"
#include "thrust/sequence.h"
#include "thrust/binary_search.h"
#include "thundergbm/util/multi_device.h"
inline unsigned int __host__ __device__ compose(unsigned int offset, unsigned char bid) {
unsigned int id;
unsigned int temp_id = (unsigned int)bid;
id = ((temp_id << 24) | offset);
return id;
}
inline void __host__ __device__ decompose(unsigned int id, unsigned int &offset, unsigned char &bid) {
unsigned char *tmp = (unsigned char*)(&id);
bid = (unsigned char)tmp[3];
offset = ((id << 8) >> 8);
}
void HistTreeBuilder::get_bin_ids() {
DO_ON_MULTI_DEVICES(param.n_device, [&](int device_id){
SparseColumns &columns = shards[device_id].columns;
HistCut &cut = this->cut[device_id];
auto &dense_bin_id = this->dense_bin_id[device_id];
// auto &char_dense_bin_id = this->char_dense_bin_id[device_id];
using namespace thrust;
int n_column = columns.n_column;
int nnz = columns.nnz;
auto cut_row_ptr = cut.cut_row_ptr.device_data();
auto cut_points_ptr = cut.cut_points_val.device_data();
auto csc_val_data = columns.csc_val.device_data();
SyncArray<unsigned char> bin_id;
bin_id.resize(columns.nnz);
auto bin_id_data = bin_id.device_data();
int n_block = fminf((nnz / n_column - 1) / 256 + 1, 4 * 56);
{
auto lowerBound = [=]__device__(const float_type *search_begin, const float_type *search_end, float_type val) {
const float_type *left = search_begin;
const float_type *right = search_end - 1;
while (left != right) {
const float_type *mid = left + (right - left) / 2;
if (*mid <= val)
right = mid;
else left = mid + 1;
}
return left;
};
TIMED_SCOPE(timerObj, "binning");
device_loop_2d(n_column, columns.csc_col_ptr.device_data(), [=]__device__(int cid, int i) {
auto search_begin = cut_points_ptr + cut_row_ptr[cid];
auto search_end = cut_points_ptr + cut_row_ptr[cid + 1];
auto val = csc_val_data[i];
bin_id_data[i] = lowerBound(search_begin, search_end, val) - search_begin;
}, n_block);
}
auto max_num_bin = param.max_num_bin;
dense_bin_id.resize(n_instances * n_column);
// char_dense_bin_id.resize(n_instances * n_column);
// auto char_dense_bin_id_data = char_dense_bin_id.device_data();
auto dense_bin_id_data = dense_bin_id.device_data();
auto csc_row_idx_data = columns.csc_row_idx.device_data();
device_loop(n_instances * n_column, [=]__device__(int i) {
dense_bin_id_data[i] = max_num_bin << 24;
// char_dense_bin_id_data[i] = max_num_bin;
});
auto cut_row_ptr_data = cut.cut_row_ptr.device_data();
device_loop_2d(n_column, columns.csc_col_ptr.device_data(), [=]__device__(int fid, int i) {
int row = csc_row_idx_data[i];
unsigned char bid = bin_id_data[i];
unsigned int feature_offset = cut_row_ptr_data[fid];
unsigned int id = compose(feature_offset, bid);
dense_bin_id_data[row * n_column + fid] = id;
// char_dense_bin_id_data[row * n_column + fid] = bid;
}, n_block);
// auto h_char_dense_bin_id_data = char_dense_bin_id.host_data();
auto h_dense_bin_id_data = dense_bin_id.host_data();
});
}
void HistTreeBuilder::find_split(int level, int device_id) {
std::chrono::high_resolution_clock timer;
const SparseColumns &columns = shards[device_id].columns;
SyncArray<int> &nid = ins2node_id[device_id];
SyncArray<GHPair> &gh_pair = gradients[device_id];
Tree &tree = trees[device_id];
SyncArray<SplitPoint> &sp = this->sp[device_id];
SyncArray<bool> &ignored_set = shards[device_id].ignored_set;
HistCut &cut = this->cut[device_id];
auto &dense_bin_id = this->dense_bin_id[device_id];
auto &last_hist = this->last_hist[device_id];
TIMED_FUNC(timerObj);
int n_nodes_in_level = static_cast<int>(pow(2, level));
int nid_offset = static_cast<int>(pow(2, level) - 1);
int n_column = columns.n_column;
int n_partition = n_column * n_nodes_in_level;
int n_bins = cut.cut_points_val.size();
int n_max_nodes = 2 << param.depth;
int n_max_splits = n_max_nodes * n_bins;
int n_split = n_nodes_in_level * n_bins;
// auto char_dense_bin_id_data = char_dense_bin_id[device_id].device_data();
LOG(TRACE) << "start finding split";
//find the best split locally
{
using namespace thrust;
auto t_build_start = timer.now();
//calculate split information for each split
SyncArray<GHPair> hist(n_max_splits);
SyncArray<GHPair> missing_gh(n_partition);
auto cut_fid_data = cut.cut_fid.device_data();
auto i2fid = [=] __device__(int i) { return cut_fid_data[i % n_bins]; };
auto hist_fid = make_transform_iterator(counting_iterator<int>(0), i2fid);
{
{
TIMED_SCOPE(timerObj, "build hist");
{
size_t
smem_size = n_bins * sizeof(GHPair);
LOG(DEBUG) << "shared memory size = " << smem_size / 1024.0 << " KB";
if (n_nodes_in_level == 1) {
//root
auto hist_data = hist.device_data();
auto cut_row_ptr_data = cut.cut_row_ptr.device_data();
auto gh_data = gh_pair.device_data();
auto dense_bin_id_data = dense_bin_id.device_data();
auto max_num_bin = param.max_num_bin;
auto n_instances = this->n_instances;
if (smem_size > 48 * 1024) {
device_loop(n_instances * n_column, [=]__device__(int i) {
int iid = i / n_column;
int fid = i % n_column;
unsigned int id = dense_bin_id_data[iid * n_column + fid];
unsigned char bid;
unsigned int feature_offset;
decompose(id, feature_offset, bid);
// bid = char_dense_bin_id_data[iid * n_column + fid];
if (bid != max_num_bin) {
const GHPair src = gh_data[iid];
if(src.h != 0) {
// int feature_offset = cut_row_ptr_data[fid];
GHPair &dest = hist_data[feature_offset + bid];
atomicAdd(&dest.h, src.h);
if(src.g != 0)
atomicAdd(&dest.g, src.g);
}
}
});
} else {
int num_fv = n_instances * n_column;
anonymous_kernel([=]__device__() {
extern __shared__ GHPair local_hist[];
for (int i = threadIdx.x; i < n_bins; i += blockDim.x) {
local_hist[i] = 0;
}
__syncthreads();
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < num_fv; i += blockDim.x * gridDim.x) {
int iid = i / n_column;
int fid = i % n_column;
unsigned int id = dense_bin_id_data[iid * n_column + fid];
unsigned char bid;
unsigned int feature_offset;
decompose(id, feature_offset, bid);
// bid = char_dense_bin_id_data[iid * n_column + fid];
if (bid != max_num_bin) {
const GHPair src = gh_data[iid];
if(src.h != 0) {
// int feature_offset = cut_row_ptr_data[fid];
GHPair &dest = hist_data[feature_offset + bid];
atomicAdd(&dest.h, src.h);
if(src.g != 0)
atomicAdd(&dest.g, src.g);
}
}
}
__syncthreads();
for (int i = threadIdx.x; i < n_bins; i += blockDim.x) {
GHPair src = local_hist[i];
if(src.h != 0) {
GHPair &dest = hist_data[i];
atomicAdd(&dest.h, src.h);
if(src.g != 0)
atomicAdd(&dest.g, src.g);
}
}
}, num_fv, smem_size);
}
} else {
//otherwise
auto t_dp_begin = timer.now();
SyncArray<int> node_idx(n_instances);
SyncArray<int> node_ptr(n_nodes_in_level + 1);
{
TIMED_SCOPE(timerObj, "data partitioning");
SyncArray<int> nid4sort(n_instances);
nid4sort.copy_from(ins2node_id[device_id]);
sequence(cuda::par, node_idx.device_data(), node_idx.device_end(), 0);
cub_sort_by_key(nid4sort, node_idx);
auto counting_iter = make_counting_iterator < int > (nid_offset);
node_ptr.host_data()[0] =
lower_bound(cuda::par, nid4sort.device_data(), nid4sort.device_end(), nid_offset) -
nid4sort.device_data();
upper_bound(cuda::par, nid4sort.device_data(), nid4sort.device_end(), counting_iter,
counting_iter + n_nodes_in_level, node_ptr.device_data() + 1);
LOG(DEBUG) << "node ptr = " << node_ptr;
hipDeviceSynchronize();
}
auto t_dp_end = timer.now();
std::chrono::duration<double> dp_used_time = t_dp_end - t_dp_begin;
this->total_dp_time += dp_used_time.count();
auto node_ptr_data = node_ptr.host_data();
auto node_idx_data = node_idx.device_data();
auto cut_row_ptr_data = cut.cut_row_ptr.device_data();
auto gh_data = gh_pair.device_data();
auto dense_bin_id_data = dense_bin_id.device_data();
auto max_num_bin = param.max_num_bin;
for (int i = 0; i < n_nodes_in_level / 2; ++i) {
int nid0_to_compute = i * 2;
int nid0_to_substract = i * 2 + 1;
int n_ins_left = node_ptr_data[nid0_to_compute + 1] - node_ptr_data[nid0_to_compute];
int n_ins_right = node_ptr_data[nid0_to_substract + 1] - node_ptr_data[nid0_to_substract];
if (max(n_ins_left, n_ins_right) == 0) continue;
if (n_ins_left > n_ins_right)
swap(nid0_to_compute, nid0_to_substract);
//compute
{
int nid0 = nid0_to_compute;
auto idx_begin = node_ptr.host_data()[nid0];
auto idx_end = node_ptr.host_data()[nid0 + 1];
auto hist_data = hist.device_data() + nid0 * n_bins;
this->total_hist_num++;
if (smem_size > 48 * 1024) {
CHECK_EQ(1, 2) << "Unexpected cases";
device_loop((idx_end - idx_begin) * n_column, [=]__device__(int i) {
int iid = node_idx_data[i / n_column + idx_begin];
int fid = i % n_column;
unsigned int id = dense_bin_id_data[iid * n_column + fid];
unsigned char bid;
unsigned int feature_offset;
decompose(id, feature_offset, bid);
// bid = char_dense_bin_id_data[iid * n_column + fid];
if (bid != max_num_bin) {
const GHPair src = gh_data[iid];
if(src.h != 0) {
// int feature_offset = cut_row_ptr_data[fid];
GHPair &dest = hist_data[feature_offset + bid];
atomicAdd(&dest.h, src.h);
if(src.g != 0)
atomicAdd(&dest.g, src.g);
}
}
});
} else {
int num_fv = (idx_end - idx_begin) * n_column;
anonymous_kernel([=] __device__() {
extern __shared__ char smem[];
GHPair *local_hist = reinterpret_cast<GHPair*>(smem);
for (int i = threadIdx.x; i < n_bins; i += blockDim.x) {
local_hist[i] = GHPair();
}
__syncthreads();
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < num_fv; i += blockDim.x * gridDim.x) {
int iid = node_idx_data[i / n_column + idx_begin];
//int fid = i - n_column *( i / n_column);
int fid = i % n_column;
unsigned int id = dense_bin_id_data[iid * n_column + fid];
unsigned char bid;
unsigned int feature_offset;
decompose(id, feature_offset, bid);
// bid = char_dense_bin_id_data[iid * n_column + fid];
if (bid != max_num_bin) {
const GHPair src = gh_data[iid];
// int feature_offset = cut_row_ptr_data[fid];
GHPair &dest = local_hist[feature_offset + bid];
if(src.h != 0)
atomicAdd(&dest.h, src.h);
if(src.g != 0)
atomicAdd(&dest.g, src.g);
}
}
__syncthreads();
for (int i = threadIdx.x; i < n_bins; i += blockDim.x) {
GHPair src = local_hist[i];
if(src.h != 0) {
GHPair &dest = hist_data[i];
atomicAdd(&dest.h, src.h);
if(src.g != 0)
atomicAdd(&dest.g, src.g);
}
}
}, num_fv, smem_size);
}
}
//subtract
auto t_copy_start = timer.now();
{
auto hist_data_computed = hist.device_data() + nid0_to_compute * n_bins;
auto hist_data_to_compute = hist.device_data() + nid0_to_substract * n_bins;
auto father_hist_data = last_hist.device_data() + (nid0_to_substract / 2) * n_bins;
device_loop(n_bins, [=]__device__(int i) {
hist_data_to_compute[i] = father_hist_data[i] - hist_data_computed[i];
});
}
auto t_copy_end = timer.now();
std::chrono::duration<double> cp_used_time = t_copy_end - t_copy_start;
this->total_copy_time += cp_used_time.count();
// PERFORMANCE_CHECKPOINT(timerObj);
} // end for each node
}//end # node > 1
last_hist.copy_from(hist);
hipDeviceSynchronize();
}
LOG(DEBUG) << "level: " << level;
LOG(DEBUG) << "hist new = " << hist;
auto t_build_hist_end = timer.now();
std::chrono::duration<double> bh_used_time = t_build_hist_end - t_build_start;
this->build_hist_used_time += bh_used_time.count();
this->build_n_hist++;
LOG(DEBUG) << "-------------->>> build_hist_used_time: " << bh_used_time.count();
LOG(DEBUG) << "-------------->>> build_num_hist: " << this->build_n_hist;
LOG(DEBUG) << "-------------->>> total_build_hist_used_time: " << this->build_hist_used_time - this->total_dp_time;
LOG(DEBUG) << "-------------->>> n_hist::::: " << this->total_hist_num;
LOG(DEBUG) << "-------------->>> dp_time::::: " << this->total_dp_time;
LOG(DEBUG) << "-------------->>> cp_time::::: " << this->total_copy_time;
//LOG(DEBUG) << "cutfid = " << cut.cut_fid;
inclusive_scan_by_key(cuda::par, hist_fid, hist_fid + n_split,
hist.device_data(), hist.device_data());
LOG(DEBUG) << hist;
auto nodes_data = tree.nodes.device_data();
auto missing_gh_data = missing_gh.device_data();
auto cut_row_ptr = cut.cut_row_ptr.device_data();
auto hist_data = hist.device_data();
device_loop(n_partition, [=]__device__(int pid) {
int nid0 = pid / n_column;
int nid = nid0 + nid_offset;
if (!nodes_data[nid].splittable()) return;
int fid = pid % n_column;
if (cut_row_ptr[fid + 1] != cut_row_ptr[fid]) {
GHPair node_gh = hist_data[nid0 * n_bins + cut_row_ptr[fid + 1] - 1];
missing_gh_data[pid] = nodes_data[nid].sum_gh_pair - node_gh;
}
});
LOG(DEBUG) << missing_gh;
}
}
//calculate gain of each split
SyncArray<float_type> gain(n_max_splits);
{
// TIMED_SCOPE(timerObj, "calculate gain");
auto compute_gain = []__device__(GHPair father, GHPair lch, GHPair rch, float_type min_child_weight,
float_type lambda) -> float_type {
if (lch.h >= min_child_weight && rch.h >= min_child_weight)
return (lch.g * lch.g) / (lch.h + lambda) + (rch.g * rch.g) / (rch.h + lambda) -
(father.g * father.g) / (father.h + lambda);
else
return 0;
};
const Tree::TreeNode *nodes_data = tree.nodes.device_data();
GHPair *gh_prefix_sum_data = hist.device_data();
float_type *gain_data = gain.device_data();
const auto missing_gh_data = missing_gh.device_data();
auto ignored_set_data = ignored_set.device_data();
//for lambda expression
float_type mcw = param.min_child_weight;
float_type l = param.lambda;
device_loop(n_split, [=]__device__(int i) {
int nid0 = i / n_bins;
int nid = nid0 + nid_offset;
int fid = hist_fid[i % n_bins];
if (nodes_data[nid].is_valid && !ignored_set_data[fid]) {
int pid = nid0 * n_column + hist_fid[i];
GHPair father_gh = nodes_data[nid].sum_gh_pair;
GHPair p_missing_gh = missing_gh_data[pid];
GHPair rch_gh = gh_prefix_sum_data[i];
float_type default_to_left_gain = max(0.f,
compute_gain(father_gh, father_gh - rch_gh, rch_gh, mcw, l));
rch_gh = rch_gh + p_missing_gh;
float_type default_to_right_gain = max(0.f,
compute_gain(father_gh, father_gh - rch_gh, rch_gh, mcw, l));
if (default_to_left_gain > default_to_right_gain)
gain_data[i] = default_to_left_gain;
else
gain_data[i] = -default_to_right_gain;//negative means default split to right
} else gain_data[i] = 0;
});
LOG(DEBUG) << "gain = " << gain;
}
SyncArray<int_float> best_idx_gain(n_nodes_in_level);
{
// TIMED_SCOPE(timerObj, "get best gain");
auto arg_abs_max = []__device__(const int_float &a, const int_float &b) {
if (fabsf(get<1>(a)) == fabsf(get<1>(b)))
return get<0>(a) < get<0>(b) ? a : b;
else
return fabsf(get<1>(a)) > fabsf(get<1>(b)) ? a : b;
};
auto nid_iterator = make_transform_iterator(counting_iterator<int>(0), placeholders::_1 / n_bins);
reduce_by_key(
cuda::par,
nid_iterator, nid_iterator + n_split,
make_zip_iterator(make_tuple(counting_iterator<int>(0), gain.device_data())),
make_discard_iterator(),
best_idx_gain.device_data(),
thrust::equal_to<int>(),
arg_abs_max
);
LOG(DEBUG) << n_split;
LOG(DEBUG) << "best rank & gain = " << best_idx_gain;
}
//get split points
{
const int_float *best_idx_gain_data = best_idx_gain.device_data();
auto hist_data = hist.device_data();
const auto missing_gh_data = missing_gh.device_data();
auto cut_val_data = cut.cut_points_val.device_data();
sp.resize(n_nodes_in_level);
auto sp_data = sp.device_data();
auto nodes_data = tree.nodes.device_data();
int column_offset = columns.column_offset;
auto cut_row_ptr_data = cut.cut_row_ptr.device_data();
device_loop(n_nodes_in_level, [=]__device__(int i) {
int_float bst = best_idx_gain_data[i];
float_type best_split_gain = get<1>(bst);
int split_index = get<0>(bst);
if (!nodes_data[i + nid_offset].is_valid) {
sp_data[i].split_fea_id = -1;
sp_data[i].nid = -1;
return;
}
int fid = hist_fid[split_index];
sp_data[i].split_fea_id = fid + column_offset;
sp_data[i].nid = i + nid_offset;
sp_data[i].gain = fabsf(best_split_gain);
sp_data[i].fval = cut_val_data[split_index % n_bins];
sp_data[i].split_bid = (unsigned char) (split_index % n_bins - cut_row_ptr_data[fid]);
sp_data[i].fea_missing_gh = missing_gh_data[i * n_column + hist_fid[split_index]];
sp_data[i].default_right = best_split_gain < 0;
sp_data[i].rch_sum_gh = hist_data[split_index];
});
}
}
LOG(DEBUG) << "split points (gain/fea_id/nid): " << sp;
}
void HistTreeBuilder::update_ins2node_id() {
TIMED_FUNC(timerObj);
DO_ON_MULTI_DEVICES(param.n_device, [&](int device_id){
SyncArray<bool> has_splittable(1);
auto &columns = shards[device_id].columns;
//set new node id for each instance
{
// TIMED_SCOPE(timerObj, "get new node id");
auto nid_data = ins2node_id[device_id].device_data();
const Tree::TreeNode *nodes_data = trees[device_id].nodes.device_data();
has_splittable.host_data()[0] = false;
bool *h_s_data = has_splittable.device_data();
int column_offset = columns.column_offset;
int n_column = columns.n_column;
auto dense_bin_id_data = dense_bin_id[device_id].device_data();
// auto char_dense_bin_id_data = char_dense_bin_id[device_id].device_data();
int max_num_bin = param.max_num_bin;
device_loop(n_instances, [=]__device__(int iid) {
int nid = nid_data[iid];
const Tree::TreeNode &node = nodes_data[nid];
int split_fid = node.split_feature_id;
if (node.splittable() && ((split_fid - column_offset < n_column) && (split_fid >= column_offset))) {
h_s_data[0] = true;
unsigned char split_bid = node.split_bid;
unsigned int id = dense_bin_id_data[iid * n_column + split_fid - column_offset];
unsigned char bid;
unsigned int feature_offset;
decompose(id, feature_offset, bid);
bool to_left = true;
if ((bid == max_num_bin && node.default_right) || (bid <= split_bid))
to_left = false;
if (to_left) {
//goes to left child
nid_data[iid] = node.lch_index;
} else {
//right child
nid_data[iid] = node.rch_index;
}
}
});
}
LOG(DEBUG) << "new tree_id = " << ins2node_id[device_id];
has_split[device_id] = has_splittable.host_data()[0];
});
}
void HistTreeBuilder::init(const DataSet &dataset, const GBMParam ¶m) {
TreeBuilder::init(dataset, param);
//TODO refactor
//init shards
int n_device = param.n_device;
shards = vector<Shard>(n_device);
vector<std::unique_ptr<SparseColumns>> v_columns(param.n_device);
for (int i = 0; i < param.n_device; ++i) {
v_columns[i].reset(&shards[i].columns);
shards[i].ignored_set = SyncArray<bool>(dataset.n_features());
}
SparseColumns columns;
if(dataset.use_cpu)
columns.csr2csc_cpu(dataset, v_columns);
else
columns.csr2csc_gpu(dataset, v_columns);
// columns.csc_by_default(dataset, v_columns);
cut = vector<HistCut>(param.n_device);
dense_bin_id = MSyncArray<unsigned int>(param.n_device);
// char_dense_bin_id = MSyncArray<unsigned char>(param.n_device);
last_hist = MSyncArray<GHPair>(param.n_device);
DO_ON_MULTI_DEVICES(param.n_device, [&](int device_id){
if(dataset.use_cpu)
cut[device_id].get_cut_points2(shards[device_id].columns, param.max_num_bin, n_instances);
else
cut[device_id].get_cut_points3(shards[device_id].columns, param.max_num_bin, n_instances);
last_hist[device_id].resize((2 << param.depth) * cut[device_id].cut_points_val.size());
});
get_bin_ids();
for (int i = 0; i < param.n_device; ++i) {
v_columns[i].release();
}
SyncMem::clear_cache();
}
| de64f724520c6e79e382ae3b1a8fea7f983f6e72.cu | //
// Created by ss on 19-1-20.
//
#include "thundergbm/builder/hist_tree_builder.h"
#include "thundergbm/util/cub_wrapper.h"
#include "thundergbm/util/device_lambda.cuh"
#include "thrust/iterator/counting_iterator.h"
#include "thrust/iterator/transform_iterator.h"
#include "thrust/iterator/discard_iterator.h"
#include "thrust/sequence.h"
#include "thrust/binary_search.h"
#include "thundergbm/util/multi_device.h"
inline unsigned int __host__ __device__ compose(unsigned int offset, unsigned char bid) {
unsigned int id;
unsigned int temp_id = (unsigned int)bid;
id = ((temp_id << 24) | offset);
return id;
}
inline void __host__ __device__ decompose(unsigned int id, unsigned int &offset, unsigned char &bid) {
unsigned char *tmp = (unsigned char*)(&id);
bid = (unsigned char)tmp[3];
offset = ((id << 8) >> 8);
}
void HistTreeBuilder::get_bin_ids() {
DO_ON_MULTI_DEVICES(param.n_device, [&](int device_id){
SparseColumns &columns = shards[device_id].columns;
HistCut &cut = this->cut[device_id];
auto &dense_bin_id = this->dense_bin_id[device_id];
// auto &char_dense_bin_id = this->char_dense_bin_id[device_id];
using namespace thrust;
int n_column = columns.n_column;
int nnz = columns.nnz;
auto cut_row_ptr = cut.cut_row_ptr.device_data();
auto cut_points_ptr = cut.cut_points_val.device_data();
auto csc_val_data = columns.csc_val.device_data();
SyncArray<unsigned char> bin_id;
bin_id.resize(columns.nnz);
auto bin_id_data = bin_id.device_data();
int n_block = fminf((nnz / n_column - 1) / 256 + 1, 4 * 56);
{
auto lowerBound = [=]__device__(const float_type *search_begin, const float_type *search_end, float_type val) {
const float_type *left = search_begin;
const float_type *right = search_end - 1;
while (left != right) {
const float_type *mid = left + (right - left) / 2;
if (*mid <= val)
right = mid;
else left = mid + 1;
}
return left;
};
TIMED_SCOPE(timerObj, "binning");
device_loop_2d(n_column, columns.csc_col_ptr.device_data(), [=]__device__(int cid, int i) {
auto search_begin = cut_points_ptr + cut_row_ptr[cid];
auto search_end = cut_points_ptr + cut_row_ptr[cid + 1];
auto val = csc_val_data[i];
bin_id_data[i] = lowerBound(search_begin, search_end, val) - search_begin;
}, n_block);
}
auto max_num_bin = param.max_num_bin;
dense_bin_id.resize(n_instances * n_column);
// char_dense_bin_id.resize(n_instances * n_column);
// auto char_dense_bin_id_data = char_dense_bin_id.device_data();
auto dense_bin_id_data = dense_bin_id.device_data();
auto csc_row_idx_data = columns.csc_row_idx.device_data();
device_loop(n_instances * n_column, [=]__device__(int i) {
dense_bin_id_data[i] = max_num_bin << 24;
// char_dense_bin_id_data[i] = max_num_bin;
});
auto cut_row_ptr_data = cut.cut_row_ptr.device_data();
device_loop_2d(n_column, columns.csc_col_ptr.device_data(), [=]__device__(int fid, int i) {
int row = csc_row_idx_data[i];
unsigned char bid = bin_id_data[i];
unsigned int feature_offset = cut_row_ptr_data[fid];
unsigned int id = compose(feature_offset, bid);
dense_bin_id_data[row * n_column + fid] = id;
// char_dense_bin_id_data[row * n_column + fid] = bid;
}, n_block);
// auto h_char_dense_bin_id_data = char_dense_bin_id.host_data();
auto h_dense_bin_id_data = dense_bin_id.host_data();
});
}
void HistTreeBuilder::find_split(int level, int device_id) {
std::chrono::high_resolution_clock timer;
const SparseColumns &columns = shards[device_id].columns;
SyncArray<int> &nid = ins2node_id[device_id];
SyncArray<GHPair> &gh_pair = gradients[device_id];
Tree &tree = trees[device_id];
SyncArray<SplitPoint> &sp = this->sp[device_id];
SyncArray<bool> &ignored_set = shards[device_id].ignored_set;
HistCut &cut = this->cut[device_id];
auto &dense_bin_id = this->dense_bin_id[device_id];
auto &last_hist = this->last_hist[device_id];
TIMED_FUNC(timerObj);
int n_nodes_in_level = static_cast<int>(pow(2, level));
int nid_offset = static_cast<int>(pow(2, level) - 1);
int n_column = columns.n_column;
int n_partition = n_column * n_nodes_in_level;
int n_bins = cut.cut_points_val.size();
int n_max_nodes = 2 << param.depth;
int n_max_splits = n_max_nodes * n_bins;
int n_split = n_nodes_in_level * n_bins;
// auto char_dense_bin_id_data = char_dense_bin_id[device_id].device_data();
LOG(TRACE) << "start finding split";
//find the best split locally
{
using namespace thrust;
auto t_build_start = timer.now();
//calculate split information for each split
SyncArray<GHPair> hist(n_max_splits);
SyncArray<GHPair> missing_gh(n_partition);
auto cut_fid_data = cut.cut_fid.device_data();
auto i2fid = [=] __device__(int i) { return cut_fid_data[i % n_bins]; };
auto hist_fid = make_transform_iterator(counting_iterator<int>(0), i2fid);
{
{
TIMED_SCOPE(timerObj, "build hist");
{
size_t
smem_size = n_bins * sizeof(GHPair);
LOG(DEBUG) << "shared memory size = " << smem_size / 1024.0 << " KB";
if (n_nodes_in_level == 1) {
//root
auto hist_data = hist.device_data();
auto cut_row_ptr_data = cut.cut_row_ptr.device_data();
auto gh_data = gh_pair.device_data();
auto dense_bin_id_data = dense_bin_id.device_data();
auto max_num_bin = param.max_num_bin;
auto n_instances = this->n_instances;
if (smem_size > 48 * 1024) {
device_loop(n_instances * n_column, [=]__device__(int i) {
int iid = i / n_column;
int fid = i % n_column;
unsigned int id = dense_bin_id_data[iid * n_column + fid];
unsigned char bid;
unsigned int feature_offset;
decompose(id, feature_offset, bid);
// bid = char_dense_bin_id_data[iid * n_column + fid];
if (bid != max_num_bin) {
const GHPair src = gh_data[iid];
if(src.h != 0) {
// int feature_offset = cut_row_ptr_data[fid];
GHPair &dest = hist_data[feature_offset + bid];
atomicAdd(&dest.h, src.h);
if(src.g != 0)
atomicAdd(&dest.g, src.g);
}
}
});
} else {
int num_fv = n_instances * n_column;
anonymous_kernel([=]__device__() {
extern __shared__ GHPair local_hist[];
for (int i = threadIdx.x; i < n_bins; i += blockDim.x) {
local_hist[i] = 0;
}
__syncthreads();
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < num_fv; i += blockDim.x * gridDim.x) {
int iid = i / n_column;
int fid = i % n_column;
unsigned int id = dense_bin_id_data[iid * n_column + fid];
unsigned char bid;
unsigned int feature_offset;
decompose(id, feature_offset, bid);
// bid = char_dense_bin_id_data[iid * n_column + fid];
if (bid != max_num_bin) {
const GHPair src = gh_data[iid];
if(src.h != 0) {
// int feature_offset = cut_row_ptr_data[fid];
GHPair &dest = hist_data[feature_offset + bid];
atomicAdd(&dest.h, src.h);
if(src.g != 0)
atomicAdd(&dest.g, src.g);
}
}
}
__syncthreads();
for (int i = threadIdx.x; i < n_bins; i += blockDim.x) {
GHPair src = local_hist[i];
if(src.h != 0) {
GHPair &dest = hist_data[i];
atomicAdd(&dest.h, src.h);
if(src.g != 0)
atomicAdd(&dest.g, src.g);
}
}
}, num_fv, smem_size);
}
} else {
//otherwise
auto t_dp_begin = timer.now();
SyncArray<int> node_idx(n_instances);
SyncArray<int> node_ptr(n_nodes_in_level + 1);
{
TIMED_SCOPE(timerObj, "data partitioning");
SyncArray<int> nid4sort(n_instances);
nid4sort.copy_from(ins2node_id[device_id]);
sequence(cuda::par, node_idx.device_data(), node_idx.device_end(), 0);
cub_sort_by_key(nid4sort, node_idx);
auto counting_iter = make_counting_iterator < int > (nid_offset);
node_ptr.host_data()[0] =
lower_bound(cuda::par, nid4sort.device_data(), nid4sort.device_end(), nid_offset) -
nid4sort.device_data();
upper_bound(cuda::par, nid4sort.device_data(), nid4sort.device_end(), counting_iter,
counting_iter + n_nodes_in_level, node_ptr.device_data() + 1);
LOG(DEBUG) << "node ptr = " << node_ptr;
cudaDeviceSynchronize();
}
auto t_dp_end = timer.now();
std::chrono::duration<double> dp_used_time = t_dp_end - t_dp_begin;
this->total_dp_time += dp_used_time.count();
auto node_ptr_data = node_ptr.host_data();
auto node_idx_data = node_idx.device_data();
auto cut_row_ptr_data = cut.cut_row_ptr.device_data();
auto gh_data = gh_pair.device_data();
auto dense_bin_id_data = dense_bin_id.device_data();
auto max_num_bin = param.max_num_bin;
for (int i = 0; i < n_nodes_in_level / 2; ++i) {
int nid0_to_compute = i * 2;
int nid0_to_substract = i * 2 + 1;
int n_ins_left = node_ptr_data[nid0_to_compute + 1] - node_ptr_data[nid0_to_compute];
int n_ins_right = node_ptr_data[nid0_to_substract + 1] - node_ptr_data[nid0_to_substract];
if (max(n_ins_left, n_ins_right) == 0) continue;
if (n_ins_left > n_ins_right)
swap(nid0_to_compute, nid0_to_substract);
//compute
{
int nid0 = nid0_to_compute;
auto idx_begin = node_ptr.host_data()[nid0];
auto idx_end = node_ptr.host_data()[nid0 + 1];
auto hist_data = hist.device_data() + nid0 * n_bins;
this->total_hist_num++;
if (smem_size > 48 * 1024) {
CHECK_EQ(1, 2) << "Unexpected cases";
device_loop((idx_end - idx_begin) * n_column, [=]__device__(int i) {
int iid = node_idx_data[i / n_column + idx_begin];
int fid = i % n_column;
unsigned int id = dense_bin_id_data[iid * n_column + fid];
unsigned char bid;
unsigned int feature_offset;
decompose(id, feature_offset, bid);
// bid = char_dense_bin_id_data[iid * n_column + fid];
if (bid != max_num_bin) {
const GHPair src = gh_data[iid];
if(src.h != 0) {
// int feature_offset = cut_row_ptr_data[fid];
GHPair &dest = hist_data[feature_offset + bid];
atomicAdd(&dest.h, src.h);
if(src.g != 0)
atomicAdd(&dest.g, src.g);
}
}
});
} else {
int num_fv = (idx_end - idx_begin) * n_column;
anonymous_kernel([=] __device__() {
extern __shared__ char smem[];
GHPair *local_hist = reinterpret_cast<GHPair*>(smem);
for (int i = threadIdx.x; i < n_bins; i += blockDim.x) {
local_hist[i] = GHPair();
}
__syncthreads();
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < num_fv; i += blockDim.x * gridDim.x) {
int iid = node_idx_data[i / n_column + idx_begin];
//int fid = i - n_column *( i / n_column);
int fid = i % n_column;
unsigned int id = dense_bin_id_data[iid * n_column + fid];
unsigned char bid;
unsigned int feature_offset;
decompose(id, feature_offset, bid);
// bid = char_dense_bin_id_data[iid * n_column + fid];
if (bid != max_num_bin) {
const GHPair src = gh_data[iid];
// int feature_offset = cut_row_ptr_data[fid];
GHPair &dest = local_hist[feature_offset + bid];
if(src.h != 0)
atomicAdd(&dest.h, src.h);
if(src.g != 0)
atomicAdd(&dest.g, src.g);
}
}
__syncthreads();
for (int i = threadIdx.x; i < n_bins; i += blockDim.x) {
GHPair src = local_hist[i];
if(src.h != 0) {
GHPair &dest = hist_data[i];
atomicAdd(&dest.h, src.h);
if(src.g != 0)
atomicAdd(&dest.g, src.g);
}
}
}, num_fv, smem_size);
}
}
//subtract
auto t_copy_start = timer.now();
{
auto hist_data_computed = hist.device_data() + nid0_to_compute * n_bins;
auto hist_data_to_compute = hist.device_data() + nid0_to_substract * n_bins;
auto father_hist_data = last_hist.device_data() + (nid0_to_substract / 2) * n_bins;
device_loop(n_bins, [=]__device__(int i) {
hist_data_to_compute[i] = father_hist_data[i] - hist_data_computed[i];
});
}
auto t_copy_end = timer.now();
std::chrono::duration<double> cp_used_time = t_copy_end - t_copy_start;
this->total_copy_time += cp_used_time.count();
// PERFORMANCE_CHECKPOINT(timerObj);
} // end for each node
}//end # node > 1
last_hist.copy_from(hist);
cudaDeviceSynchronize();
}
LOG(DEBUG) << "level: " << level;
LOG(DEBUG) << "hist new = " << hist;
auto t_build_hist_end = timer.now();
std::chrono::duration<double> bh_used_time = t_build_hist_end - t_build_start;
this->build_hist_used_time += bh_used_time.count();
this->build_n_hist++;
LOG(DEBUG) << "-------------->>> build_hist_used_time: " << bh_used_time.count();
LOG(DEBUG) << "-------------->>> build_num_hist: " << this->build_n_hist;
LOG(DEBUG) << "-------------->>> total_build_hist_used_time: " << this->build_hist_used_time - this->total_dp_time;
LOG(DEBUG) << "-------------->>> n_hist::::: " << this->total_hist_num;
LOG(DEBUG) << "-------------->>> dp_time::::: " << this->total_dp_time;
LOG(DEBUG) << "-------------->>> cp_time::::: " << this->total_copy_time;
//LOG(DEBUG) << "cutfid = " << cut.cut_fid;
inclusive_scan_by_key(cuda::par, hist_fid, hist_fid + n_split,
hist.device_data(), hist.device_data());
LOG(DEBUG) << hist;
auto nodes_data = tree.nodes.device_data();
auto missing_gh_data = missing_gh.device_data();
auto cut_row_ptr = cut.cut_row_ptr.device_data();
auto hist_data = hist.device_data();
device_loop(n_partition, [=]__device__(int pid) {
int nid0 = pid / n_column;
int nid = nid0 + nid_offset;
if (!nodes_data[nid].splittable()) return;
int fid = pid % n_column;
if (cut_row_ptr[fid + 1] != cut_row_ptr[fid]) {
GHPair node_gh = hist_data[nid0 * n_bins + cut_row_ptr[fid + 1] - 1];
missing_gh_data[pid] = nodes_data[nid].sum_gh_pair - node_gh;
}
});
LOG(DEBUG) << missing_gh;
}
}
//calculate gain of each split
SyncArray<float_type> gain(n_max_splits);
{
// TIMED_SCOPE(timerObj, "calculate gain");
auto compute_gain = []__device__(GHPair father, GHPair lch, GHPair rch, float_type min_child_weight,
float_type lambda) -> float_type {
if (lch.h >= min_child_weight && rch.h >= min_child_weight)
return (lch.g * lch.g) / (lch.h + lambda) + (rch.g * rch.g) / (rch.h + lambda) -
(father.g * father.g) / (father.h + lambda);
else
return 0;
};
const Tree::TreeNode *nodes_data = tree.nodes.device_data();
GHPair *gh_prefix_sum_data = hist.device_data();
float_type *gain_data = gain.device_data();
const auto missing_gh_data = missing_gh.device_data();
auto ignored_set_data = ignored_set.device_data();
//for lambda expression
float_type mcw = param.min_child_weight;
float_type l = param.lambda;
device_loop(n_split, [=]__device__(int i) {
int nid0 = i / n_bins;
int nid = nid0 + nid_offset;
int fid = hist_fid[i % n_bins];
if (nodes_data[nid].is_valid && !ignored_set_data[fid]) {
int pid = nid0 * n_column + hist_fid[i];
GHPair father_gh = nodes_data[nid].sum_gh_pair;
GHPair p_missing_gh = missing_gh_data[pid];
GHPair rch_gh = gh_prefix_sum_data[i];
float_type default_to_left_gain = max(0.f,
compute_gain(father_gh, father_gh - rch_gh, rch_gh, mcw, l));
rch_gh = rch_gh + p_missing_gh;
float_type default_to_right_gain = max(0.f,
compute_gain(father_gh, father_gh - rch_gh, rch_gh, mcw, l));
if (default_to_left_gain > default_to_right_gain)
gain_data[i] = default_to_left_gain;
else
gain_data[i] = -default_to_right_gain;//negative means default split to right
} else gain_data[i] = 0;
});
LOG(DEBUG) << "gain = " << gain;
}
SyncArray<int_float> best_idx_gain(n_nodes_in_level);
{
// TIMED_SCOPE(timerObj, "get best gain");
auto arg_abs_max = []__device__(const int_float &a, const int_float &b) {
if (fabsf(get<1>(a)) == fabsf(get<1>(b)))
return get<0>(a) < get<0>(b) ? a : b;
else
return fabsf(get<1>(a)) > fabsf(get<1>(b)) ? a : b;
};
auto nid_iterator = make_transform_iterator(counting_iterator<int>(0), placeholders::_1 / n_bins);
reduce_by_key(
cuda::par,
nid_iterator, nid_iterator + n_split,
make_zip_iterator(make_tuple(counting_iterator<int>(0), gain.device_data())),
make_discard_iterator(),
best_idx_gain.device_data(),
thrust::equal_to<int>(),
arg_abs_max
);
LOG(DEBUG) << n_split;
LOG(DEBUG) << "best rank & gain = " << best_idx_gain;
}
//get split points
{
const int_float *best_idx_gain_data = best_idx_gain.device_data();
auto hist_data = hist.device_data();
const auto missing_gh_data = missing_gh.device_data();
auto cut_val_data = cut.cut_points_val.device_data();
sp.resize(n_nodes_in_level);
auto sp_data = sp.device_data();
auto nodes_data = tree.nodes.device_data();
int column_offset = columns.column_offset;
auto cut_row_ptr_data = cut.cut_row_ptr.device_data();
device_loop(n_nodes_in_level, [=]__device__(int i) {
int_float bst = best_idx_gain_data[i];
float_type best_split_gain = get<1>(bst);
int split_index = get<0>(bst);
if (!nodes_data[i + nid_offset].is_valid) {
sp_data[i].split_fea_id = -1;
sp_data[i].nid = -1;
return;
}
int fid = hist_fid[split_index];
sp_data[i].split_fea_id = fid + column_offset;
sp_data[i].nid = i + nid_offset;
sp_data[i].gain = fabsf(best_split_gain);
sp_data[i].fval = cut_val_data[split_index % n_bins];
sp_data[i].split_bid = (unsigned char) (split_index % n_bins - cut_row_ptr_data[fid]);
sp_data[i].fea_missing_gh = missing_gh_data[i * n_column + hist_fid[split_index]];
sp_data[i].default_right = best_split_gain < 0;
sp_data[i].rch_sum_gh = hist_data[split_index];
});
}
}
LOG(DEBUG) << "split points (gain/fea_id/nid): " << sp;
}
void HistTreeBuilder::update_ins2node_id() {
TIMED_FUNC(timerObj);
DO_ON_MULTI_DEVICES(param.n_device, [&](int device_id){
SyncArray<bool> has_splittable(1);
auto &columns = shards[device_id].columns;
//set new node id for each instance
{
// TIMED_SCOPE(timerObj, "get new node id");
auto nid_data = ins2node_id[device_id].device_data();
const Tree::TreeNode *nodes_data = trees[device_id].nodes.device_data();
has_splittable.host_data()[0] = false;
bool *h_s_data = has_splittable.device_data();
int column_offset = columns.column_offset;
int n_column = columns.n_column;
auto dense_bin_id_data = dense_bin_id[device_id].device_data();
// auto char_dense_bin_id_data = char_dense_bin_id[device_id].device_data();
int max_num_bin = param.max_num_bin;
device_loop(n_instances, [=]__device__(int iid) {
int nid = nid_data[iid];
const Tree::TreeNode &node = nodes_data[nid];
int split_fid = node.split_feature_id;
if (node.splittable() && ((split_fid - column_offset < n_column) && (split_fid >= column_offset))) {
h_s_data[0] = true;
unsigned char split_bid = node.split_bid;
unsigned int id = dense_bin_id_data[iid * n_column + split_fid - column_offset];
unsigned char bid;
unsigned int feature_offset;
decompose(id, feature_offset, bid);
bool to_left = true;
if ((bid == max_num_bin && node.default_right) || (bid <= split_bid))
to_left = false;
if (to_left) {
//goes to left child
nid_data[iid] = node.lch_index;
} else {
//right child
nid_data[iid] = node.rch_index;
}
}
});
}
LOG(DEBUG) << "new tree_id = " << ins2node_id[device_id];
has_split[device_id] = has_splittable.host_data()[0];
});
}
void HistTreeBuilder::init(const DataSet &dataset, const GBMParam ¶m) {
TreeBuilder::init(dataset, param);
//TODO refactor
//init shards
int n_device = param.n_device;
shards = vector<Shard>(n_device);
vector<std::unique_ptr<SparseColumns>> v_columns(param.n_device);
for (int i = 0; i < param.n_device; ++i) {
v_columns[i].reset(&shards[i].columns);
shards[i].ignored_set = SyncArray<bool>(dataset.n_features());
}
SparseColumns columns;
if(dataset.use_cpu)
columns.csr2csc_cpu(dataset, v_columns);
else
columns.csr2csc_gpu(dataset, v_columns);
// columns.csc_by_default(dataset, v_columns);
cut = vector<HistCut>(param.n_device);
dense_bin_id = MSyncArray<unsigned int>(param.n_device);
// char_dense_bin_id = MSyncArray<unsigned char>(param.n_device);
last_hist = MSyncArray<GHPair>(param.n_device);
DO_ON_MULTI_DEVICES(param.n_device, [&](int device_id){
if(dataset.use_cpu)
cut[device_id].get_cut_points2(shards[device_id].columns, param.max_num_bin, n_instances);
else
cut[device_id].get_cut_points3(shards[device_id].columns, param.max_num_bin, n_instances);
last_hist[device_id].resize((2 << param.depth) * cut[device_id].cut_points_val.size());
});
get_bin_ids();
for (int i = 0; i < param.n_device; ++i) {
v_columns[i].release();
}
SyncMem::clear_cache();
}
|
96608a55be81d83ce9474951a47c70f990b9ed24.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "BC_GPU.cuh"
// changeable for performance
#define VIRTUAL_WARP 16
__global__ void betweenness_node_pred_kernel(float *BC, int * r, int * edge_begin, int *edge_end, int * dist, float * sigma, float * delta, int * P, int * P_end, int numVertices, int numEdges, int offset_source)
{
int offset_vertices = blockIdx.x * numVertices;
int offset_edge = blockIdx.x * numEdges;
for (int i = threadIdx.x; i < numVertices; i += blockDim.x)
{
dist[offset_vertices + i] = -1;
sigma[offset_vertices + i] = 0;
delta[offset_vertices + i] = 0;
P_end[offset_vertices + i] = r[i];
}
for (int i = threadIdx.x; i < numEdges; i += blockDim.x)
{
P[offset_edge + i] = 0;
}
int source = blockIdx.x + offset_source;
if (source >= numVertices)
return;
__shared__ bool done;
done = false;
int level = 0;
dist[offset_vertices + source] = level++;
sigma[offset_vertices + source ] = 1;
while (!done)
{
__syncthreads(); // attention: this sync is neccessary
done = true;
for (int edge = threadIdx.x; edge < numEdges; edge += blockDim.x)
{
int current = edge_begin[edge];
if (dist[offset_vertices + current] != level - 1)
continue;
int next = edge_end[edge];
int read_dist = dist[offset_vertices + next];
if (read_dist == -1)
{
dist[offset_vertices + next] = level;
done = false;
}
if (read_dist < level && read_dist >= 0)
continue;
atomicAdd(sigma + offset_vertices + next, sigma[offset_vertices + current]); //atomic!
int p = atomicAdd(P_end + offset_vertices + next, 1);
P[offset_edge + p] = current;
}
level ++;
__syncthreads();
}
for (int i = level - 1; i > 0; i--)
{
for (int next = threadIdx.x; next < numVertices; next += blockDim.x)
{
if (dist[offset_vertices + next] != i)
continue;
for (int j = r[next]; j < P_end[offset_vertices + next]; j += 1)
{
int current = P[offset_edge + j];
atomicAdd(delta + offset_vertices + current, (double) sigma[offset_vertices + current] / sigma[offset_vertices + next]*(1 + delta[offset_vertices + next]));
}
}
__syncthreads();
}
for (int current = threadIdx.x; current < numVertices; current += blockDim.x)
{
if(current != source)
atomicAdd(BC + current, delta[offset_vertices + current]);
}
}
void Betweenness_GPU_edge_pred(int *r, int *r_full, int *c, int numVertices, int numEdges, float *BC, int grid, int thread)
{
int devID;
hipDeviceProp_t deviceProps;
devID = findCudaDevice();
// get number of SMs on this GPU
checkCudaErrors(hipGetDeviceProperties(&deviceProps, devID));
printf("CUDA device [%s] has %d Multi-Processors\n", deviceProps.name, deviceProps.multiProcessorCount);
//int thread = 256;
//int grid = 100;
// allocate device memory
int* d_r;
int* d_c;
int* d_r_full;
int* dist;
float* sigma;
float* delta;
int* P;
int* P_end;
checkCudaErrors( hipMalloc( (void**) &d_r, sizeof(int) * (numVertices + 1)));
checkCudaErrors( hipMalloc( (void**) &d_r_full, sizeof(int) * numEdges));
checkCudaErrors( hipMalloc( (void**) &d_c, sizeof(int) * numEdges));
checkCudaErrors( hipMalloc( (void**) &dist, sizeof(int) * numVertices * grid));
checkCudaErrors( hipMalloc( (void**) &sigma, sizeof(int) * numVertices * grid));
checkCudaErrors( hipMalloc( (void**) &delta, sizeof(int) * numVertices * grid));
checkCudaErrors( hipMalloc( (void**) &P, sizeof(int) * numEdges * grid));
checkCudaErrors( hipMalloc( (void**) &P_end, sizeof(int) * numVertices * grid));
// copy host memory to device
checkCudaErrors( hipMemcpy( d_r, r, sizeof(int) * (numVertices + 1), hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy( d_c, c, sizeof(int) * numEdges, hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy( d_r_full, r_full, sizeof(int) * numEdges, hipMemcpyHostToDevice) );
// allocate device memory for result
float* d_BC;
checkCudaErrors( hipMalloc( (void**) &d_BC, sizeof(float) * numVertices));
checkCudaErrors( hipMemset( d_BC, 0, sizeof(float) * numVertices));
// execute the kernel
clock_t kernel_time = 0;
for (int offset_source = 0; offset_source < numVertices; offset_source += grid)
{
clock_t time = clock();
hipLaunchKernelGGL(( betweenness_node_pred_kernel), dim3(grid), dim3(thread), 0, 0, d_BC, d_r, d_r_full, d_c, dist, sigma, delta, P, P_end, numVertices, numEdges, offset_source);
// check if kernel execution generated and error
getLastCudaError("Kernel execution failed");
hipDeviceSynchronize();
time = clock() - time;
kernel_time += time;
cout<<offset_source<<" done. Time = "<<time<<"ms."<<endl;
}
cout<<"total kernel time: "<<kernel_time<<"ms."<<endl;
// copy result from device to host
checkCudaErrors(hipMemcpy(BC, d_BC, sizeof(float) * numVertices, hipMemcpyDeviceToHost));
// cleanup memory
checkCudaErrors(hipFree(d_r));
checkCudaErrors(hipFree(d_r_full));
checkCudaErrors(hipFree(d_c));
checkCudaErrors(hipFree(d_BC));
checkCudaErrors(hipFree(dist));
checkCudaErrors(hipFree(sigma));
checkCudaErrors(hipFree(delta));
checkCudaErrors(hipFree(P));
checkCudaErrors(hipFree(P_end));
hipDeviceReset();
}
| 96608a55be81d83ce9474951a47c70f990b9ed24.cu | #include "BC_GPU.cuh"
// changeable for performance
#define VIRTUAL_WARP 16
__global__ void betweenness_node_pred_kernel(float *BC, int * r, int * edge_begin, int *edge_end, int * dist, float * sigma, float * delta, int * P, int * P_end, int numVertices, int numEdges, int offset_source)
{
int offset_vertices = blockIdx.x * numVertices;
int offset_edge = blockIdx.x * numEdges;
for (int i = threadIdx.x; i < numVertices; i += blockDim.x)
{
dist[offset_vertices + i] = -1;
sigma[offset_vertices + i] = 0;
delta[offset_vertices + i] = 0;
P_end[offset_vertices + i] = r[i];
}
for (int i = threadIdx.x; i < numEdges; i += blockDim.x)
{
P[offset_edge + i] = 0;
}
int source = blockIdx.x + offset_source;
if (source >= numVertices)
return;
__shared__ bool done;
done = false;
int level = 0;
dist[offset_vertices + source] = level++;
sigma[offset_vertices + source ] = 1;
while (!done)
{
__syncthreads(); // attention: this sync is neccessary
done = true;
for (int edge = threadIdx.x; edge < numEdges; edge += blockDim.x)
{
int current = edge_begin[edge];
if (dist[offset_vertices + current] != level - 1)
continue;
int next = edge_end[edge];
int read_dist = dist[offset_vertices + next];
if (read_dist == -1)
{
dist[offset_vertices + next] = level;
done = false;
}
if (read_dist < level && read_dist >= 0)
continue;
atomicAdd(sigma + offset_vertices + next, sigma[offset_vertices + current]); //atomic!
int p = atomicAdd(P_end + offset_vertices + next, 1);
P[offset_edge + p] = current;
}
level ++;
__syncthreads();
}
for (int i = level - 1; i > 0; i--)
{
for (int next = threadIdx.x; next < numVertices; next += blockDim.x)
{
if (dist[offset_vertices + next] != i)
continue;
for (int j = r[next]; j < P_end[offset_vertices + next]; j += 1)
{
int current = P[offset_edge + j];
atomicAdd(delta + offset_vertices + current, (double) sigma[offset_vertices + current] / sigma[offset_vertices + next]*(1 + delta[offset_vertices + next]));
}
}
__syncthreads();
}
for (int current = threadIdx.x; current < numVertices; current += blockDim.x)
{
if(current != source)
atomicAdd(BC + current, delta[offset_vertices + current]);
}
}
void Betweenness_GPU_edge_pred(int *r, int *r_full, int *c, int numVertices, int numEdges, float *BC, int grid, int thread)
{
int devID;
cudaDeviceProp deviceProps;
devID = findCudaDevice();
// get number of SMs on this GPU
checkCudaErrors(cudaGetDeviceProperties(&deviceProps, devID));
printf("CUDA device [%s] has %d Multi-Processors\n", deviceProps.name, deviceProps.multiProcessorCount);
//int thread = 256;
//int grid = 100;
// allocate device memory
int* d_r;
int* d_c;
int* d_r_full;
int* dist;
float* sigma;
float* delta;
int* P;
int* P_end;
checkCudaErrors( cudaMalloc( (void**) &d_r, sizeof(int) * (numVertices + 1)));
checkCudaErrors( cudaMalloc( (void**) &d_r_full, sizeof(int) * numEdges));
checkCudaErrors( cudaMalloc( (void**) &d_c, sizeof(int) * numEdges));
checkCudaErrors( cudaMalloc( (void**) &dist, sizeof(int) * numVertices * grid));
checkCudaErrors( cudaMalloc( (void**) &sigma, sizeof(int) * numVertices * grid));
checkCudaErrors( cudaMalloc( (void**) &delta, sizeof(int) * numVertices * grid));
checkCudaErrors( cudaMalloc( (void**) &P, sizeof(int) * numEdges * grid));
checkCudaErrors( cudaMalloc( (void**) &P_end, sizeof(int) * numVertices * grid));
// copy host memory to device
checkCudaErrors( cudaMemcpy( d_r, r, sizeof(int) * (numVertices + 1), cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy( d_c, c, sizeof(int) * numEdges, cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy( d_r_full, r_full, sizeof(int) * numEdges, cudaMemcpyHostToDevice) );
// allocate device memory for result
float* d_BC;
checkCudaErrors( cudaMalloc( (void**) &d_BC, sizeof(float) * numVertices));
checkCudaErrors( cudaMemset( d_BC, 0, sizeof(float) * numVertices));
// execute the kernel
clock_t kernel_time = 0;
for (int offset_source = 0; offset_source < numVertices; offset_source += grid)
{
clock_t time = clock();
betweenness_node_pred_kernel<<<grid, thread>>>(d_BC, d_r, d_r_full, d_c, dist, sigma, delta, P, P_end, numVertices, numEdges, offset_source);
// check if kernel execution generated and error
getLastCudaError("Kernel execution failed");
cudaThreadSynchronize();
time = clock() - time;
kernel_time += time;
cout<<offset_source<<" done. Time = "<<time<<"ms."<<endl;
}
cout<<"total kernel time: "<<kernel_time<<"ms."<<endl;
// copy result from device to host
checkCudaErrors(cudaMemcpy(BC, d_BC, sizeof(float) * numVertices, cudaMemcpyDeviceToHost));
// cleanup memory
checkCudaErrors(cudaFree(d_r));
checkCudaErrors(cudaFree(d_r_full));
checkCudaErrors(cudaFree(d_c));
checkCudaErrors(cudaFree(d_BC));
checkCudaErrors(cudaFree(dist));
checkCudaErrors(cudaFree(sigma));
checkCudaErrors(cudaFree(delta));
checkCudaErrors(cudaFree(P));
checkCudaErrors(cudaFree(P_end));
cudaDeviceReset();
}
|
a67877dd6fb2c7755f209221a2d09abf93b9cb89.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "internal.h"
#include "vector_math.hpp"
#include "containers/safe_call.hpp"
#if __CUDA_ARCH__ < 300
__inline__ __device__
float __shfl_down(float val, int offset, int width = 32)
{
static __shared__ float shared[MAX_THREADS];
int lane = threadIdx.x % 32;
shared[threadIdx.x] = val;
__syncthreads();
val = (lane + offset < width) ? shared[threadIdx.x + offset] : 0;
__syncthreads();
return val;
}
#endif
#if __CUDA_ARCH__ < 350
template<typename T>
__device__ __forceinline__ T __ldg(const T* ptr)
{
return *ptr;
}
#endif
__inline__ __device__ jtjjtr warpReduceSum(jtjjtr val)
{
for(int offset = warpSize / 2; offset > 0; offset /= 2)
{
val.aa += __shfl_down(val.aa, offset);
val.ab += __shfl_down(val.ab, offset);
val.ac += __shfl_down(val.ac, offset);
val.ad += __shfl_down(val.ad, offset);
val.ae += __shfl_down(val.ae, offset);
val.af += __shfl_down(val.af, offset);
val.ag += __shfl_down(val.ag, offset);
val.bb += __shfl_down(val.bb, offset);
val.bc += __shfl_down(val.bc, offset);
val.bd += __shfl_down(val.bd, offset);
val.be += __shfl_down(val.be, offset);
val.bf += __shfl_down(val.bf, offset);
val.bg += __shfl_down(val.bg, offset);
val.cc += __shfl_down(val.cc, offset);
val.cd += __shfl_down(val.cd, offset);
val.ce += __shfl_down(val.ce, offset);
val.cf += __shfl_down(val.cf, offset);
val.cg += __shfl_down(val.cg, offset);
val.dd += __shfl_down(val.dd, offset);
val.de += __shfl_down(val.de, offset);
val.df += __shfl_down(val.df, offset);
val.dg += __shfl_down(val.dg, offset);
val.ee += __shfl_down(val.ee, offset);
val.ef += __shfl_down(val.ef, offset);
val.eg += __shfl_down(val.eg, offset);
val.ff += __shfl_down(val.ff, offset);
val.fg += __shfl_down(val.fg, offset);
val.residual += __shfl_down(val.residual, offset);
val.inliers += __shfl_down(val.inliers, offset);
}
return val;
}
__inline__ __device__ jtjjtr blockReduceSum(jtjjtr val)
{
static __shared__ jtjjtr shared[32];
int lane = threadIdx.x % warpSize;
int wid = threadIdx.x / warpSize;
val = warpReduceSum(val);
//write reduced value to shared memory
if(lane == 0)
{
shared[wid] = val;
}
__syncthreads();
const jtjjtr zero = {0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0};
//ensure we only grab a value from shared memory if that warp existed
val = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : zero;
if(wid == 0)
{
val = warpReduceSum(val);
}
return val;
}
__global__ void reduceSum(jtjjtr * in, jtjjtr * out, int N)
{
jtjjtr sum = {0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0};
for(int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x)
{
sum.add(in[i]);
}
sum = blockReduceSum(sum);
if(threadIdx.x == 0)
{
out[blockIdx.x] = sum;
}
}
struct ICPReduction
{
Mat33 Rcurr;
float3 tcurr;
PtrStep<float> vmap_curr;
PtrStep<float> nmap_curr;
Mat33 Rprev_inv;
float3 tprev;
Intr intr;
PtrStep<float> vmap_g_prev;
PtrStep<float> nmap_g_prev;
float distThres;
float angleThres;
int cols;
int rows;
int N;
jtjjtr * out;
__device__ __forceinline__ bool
search (int & x, int & y, float3& n, float3& d, float3& s) const
{
float3 vcurr;
vcurr.x = vmap_curr.ptr (y )[x];
vcurr.y = vmap_curr.ptr (y + rows)[x];
vcurr.z = vmap_curr.ptr (y + 2 * rows)[x];
float3 vcurr_g = Rcurr * vcurr + tcurr;
float3 vcurr_cp = Rprev_inv * (vcurr_g - tprev); // prev camera coo space
int2 ukr; //projection
ukr.x = __float2int_rn (vcurr_cp.x * intr.fx / vcurr_cp.z + intr.cx); //4
ukr.y = __float2int_rn (vcurr_cp.y * intr.fy / vcurr_cp.z + intr.cy); //4
if(ukr.x < 0 || ukr.y < 0 || ukr.x >= cols || ukr.y >= rows || vcurr_cp.z < 0)
return false;
float3 vprev_g;
vprev_g.x = __ldg(&vmap_g_prev.ptr (ukr.y )[ukr.x]);
vprev_g.y = __ldg(&vmap_g_prev.ptr (ukr.y + rows)[ukr.x]);
vprev_g.z = __ldg(&vmap_g_prev.ptr (ukr.y + 2 * rows)[ukr.x]);
float3 ncurr;
ncurr.x = nmap_curr.ptr (y)[x];
ncurr.y = nmap_curr.ptr (y + rows)[x];
ncurr.z = nmap_curr.ptr (y + 2 * rows)[x];
float3 ncurr_g = Rcurr * ncurr;
float3 nprev_g;
nprev_g.x = __ldg(&nmap_g_prev.ptr (ukr.y)[ukr.x]);
nprev_g.y = __ldg(&nmap_g_prev.ptr (ukr.y + rows)[ukr.x]);
nprev_g.z = __ldg(&nmap_g_prev.ptr (ukr.y + 2 * rows)[ukr.x]);
float dist = norm (vprev_g - vcurr_g);
float sine = norm (cross (ncurr_g, nprev_g));
n = nprev_g;
d = vprev_g;
s = vcurr_g;
return (sine < angleThres && dist <= distThres && !isnan (ncurr.x) && !isnan (nprev_g.x));
}
__device__ __forceinline__ jtjjtr
getProducts(int & i) const
{
int y = i / cols;
int x = i - (y * cols);
float3 n_cp, d_cp, s_cp;
bool found_coresp = search (x, y, n_cp, d_cp, s_cp);
float row[7] = {0, 0, 0, 0, 0, 0, 0};
if(found_coresp)
{
s_cp = Rprev_inv * (s_cp - tprev); // prev camera coo space
d_cp = Rprev_inv * (d_cp - tprev); // prev camera coo space
n_cp = Rprev_inv * (n_cp); // prev camera coo space
*(float3*)&row[0] = n_cp;
*(float3*)&row[3] = cross (s_cp, n_cp);
row[6] = dot (n_cp, s_cp - d_cp);
}
jtjjtr values = {row[0] * row[0],
row[0] * row[1],
row[0] * row[2],
row[0] * row[3],
row[0] * row[4],
row[0] * row[5],
row[0] * row[6],
row[1] * row[1],
row[1] * row[2],
row[1] * row[3],
row[1] * row[4],
row[1] * row[5],
row[1] * row[6],
row[2] * row[2],
row[2] * row[3],
row[2] * row[4],
row[2] * row[5],
row[2] * row[6],
row[3] * row[3],
row[3] * row[4],
row[3] * row[5],
row[3] * row[6],
row[4] * row[4],
row[4] * row[5],
row[4] * row[6],
row[5] * row[5],
row[5] * row[6],
row[6] * row[6],
found_coresp};
return values;
}
__device__ __forceinline__ void
operator () () const
{
jtjjtr sum = {0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0};
for(int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x)
{
jtjjtr val = getProducts(i);
sum.add(val);
}
sum = blockReduceSum(sum);
if(threadIdx.x == 0)
{
out[blockIdx.x] = sum;
}
}
};
__global__ void icpKernel(const ICPReduction icp)
{
icp();
}
void icpStep(const Mat33& Rcurr,
const float3& tcurr,
const DeviceArray2D<float>& vmap_curr,
const DeviceArray2D<float>& nmap_curr,
const Mat33& Rprev_inv,
const float3& tprev,
const Intr& intr,
const DeviceArray2D<float>& vmap_g_prev,
const DeviceArray2D<float>& nmap_g_prev,
float distThres,
float angleThres,
DeviceArray<jtjjtr> & sum,
DeviceArray<jtjjtr> & out,
float * matrixA_host,
float * vectorB_host,
float * residual_host,
int threads, int blocks)
{
int cols = vmap_curr.cols ();
int rows = vmap_curr.rows () / 3;
ICPReduction icp;
icp.Rcurr = Rcurr;
icp.tcurr = tcurr;
icp.vmap_curr = vmap_curr;
icp.nmap_curr = nmap_curr;
icp.Rprev_inv = Rprev_inv;
icp.tprev = tprev;
icp.intr = intr;
icp.vmap_g_prev = vmap_g_prev;
icp.nmap_g_prev = nmap_g_prev;
icp.distThres = distThres;
icp.angleThres = angleThres;
icp.cols = cols;
icp.rows = rows;
icp.N = cols * rows;
icp.out = sum;
hipLaunchKernelGGL(( icpKernel), dim3(blocks), dim3(threads), 0, 0, icp);
hipLaunchKernelGGL(( reduceSum), dim3(1), dim3(MAX_THREADS), 0, 0, sum, out, blocks);
cudaSafeCall(hipGetLastError());
cudaSafeCall(hipDeviceSynchronize());
float host_data[32];
out.download((jtjjtr *)&host_data[0]);
int shift = 0;
for (int i = 0; i < 6; ++i) //rows
{
for (int j = i; j < 7; ++j) // cols + b
{
float value = host_data[shift++];
if (j == 6) // vector b
vectorB_host[i] = value;
else
matrixA_host[j * 6 + i] = matrixA_host[i * 6 + j] = value;
}
}
residual_host[0] = host_data[27];
residual_host[1] = host_data[28];
}
| a67877dd6fb2c7755f209221a2d09abf93b9cb89.cu | #include "internal.h"
#include "vector_math.hpp"
#include "containers/safe_call.hpp"
#if __CUDA_ARCH__ < 300
__inline__ __device__
float __shfl_down(float val, int offset, int width = 32)
{
static __shared__ float shared[MAX_THREADS];
int lane = threadIdx.x % 32;
shared[threadIdx.x] = val;
__syncthreads();
val = (lane + offset < width) ? shared[threadIdx.x + offset] : 0;
__syncthreads();
return val;
}
#endif
#if __CUDA_ARCH__ < 350
template<typename T>
__device__ __forceinline__ T __ldg(const T* ptr)
{
return *ptr;
}
#endif
__inline__ __device__ jtjjtr warpReduceSum(jtjjtr val)
{
for(int offset = warpSize / 2; offset > 0; offset /= 2)
{
val.aa += __shfl_down(val.aa, offset);
val.ab += __shfl_down(val.ab, offset);
val.ac += __shfl_down(val.ac, offset);
val.ad += __shfl_down(val.ad, offset);
val.ae += __shfl_down(val.ae, offset);
val.af += __shfl_down(val.af, offset);
val.ag += __shfl_down(val.ag, offset);
val.bb += __shfl_down(val.bb, offset);
val.bc += __shfl_down(val.bc, offset);
val.bd += __shfl_down(val.bd, offset);
val.be += __shfl_down(val.be, offset);
val.bf += __shfl_down(val.bf, offset);
val.bg += __shfl_down(val.bg, offset);
val.cc += __shfl_down(val.cc, offset);
val.cd += __shfl_down(val.cd, offset);
val.ce += __shfl_down(val.ce, offset);
val.cf += __shfl_down(val.cf, offset);
val.cg += __shfl_down(val.cg, offset);
val.dd += __shfl_down(val.dd, offset);
val.de += __shfl_down(val.de, offset);
val.df += __shfl_down(val.df, offset);
val.dg += __shfl_down(val.dg, offset);
val.ee += __shfl_down(val.ee, offset);
val.ef += __shfl_down(val.ef, offset);
val.eg += __shfl_down(val.eg, offset);
val.ff += __shfl_down(val.ff, offset);
val.fg += __shfl_down(val.fg, offset);
val.residual += __shfl_down(val.residual, offset);
val.inliers += __shfl_down(val.inliers, offset);
}
return val;
}
__inline__ __device__ jtjjtr blockReduceSum(jtjjtr val)
{
static __shared__ jtjjtr shared[32];
int lane = threadIdx.x % warpSize;
int wid = threadIdx.x / warpSize;
val = warpReduceSum(val);
//write reduced value to shared memory
if(lane == 0)
{
shared[wid] = val;
}
__syncthreads();
const jtjjtr zero = {0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0};
//ensure we only grab a value from shared memory if that warp existed
val = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : zero;
if(wid == 0)
{
val = warpReduceSum(val);
}
return val;
}
__global__ void reduceSum(jtjjtr * in, jtjjtr * out, int N)
{
jtjjtr sum = {0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0};
for(int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x)
{
sum.add(in[i]);
}
sum = blockReduceSum(sum);
if(threadIdx.x == 0)
{
out[blockIdx.x] = sum;
}
}
struct ICPReduction
{
Mat33 Rcurr;
float3 tcurr;
PtrStep<float> vmap_curr;
PtrStep<float> nmap_curr;
Mat33 Rprev_inv;
float3 tprev;
Intr intr;
PtrStep<float> vmap_g_prev;
PtrStep<float> nmap_g_prev;
float distThres;
float angleThres;
int cols;
int rows;
int N;
jtjjtr * out;
__device__ __forceinline__ bool
search (int & x, int & y, float3& n, float3& d, float3& s) const
{
float3 vcurr;
vcurr.x = vmap_curr.ptr (y )[x];
vcurr.y = vmap_curr.ptr (y + rows)[x];
vcurr.z = vmap_curr.ptr (y + 2 * rows)[x];
float3 vcurr_g = Rcurr * vcurr + tcurr;
float3 vcurr_cp = Rprev_inv * (vcurr_g - tprev); // prev camera coo space
int2 ukr; //projection
ukr.x = __float2int_rn (vcurr_cp.x * intr.fx / vcurr_cp.z + intr.cx); //4
ukr.y = __float2int_rn (vcurr_cp.y * intr.fy / vcurr_cp.z + intr.cy); //4
if(ukr.x < 0 || ukr.y < 0 || ukr.x >= cols || ukr.y >= rows || vcurr_cp.z < 0)
return false;
float3 vprev_g;
vprev_g.x = __ldg(&vmap_g_prev.ptr (ukr.y )[ukr.x]);
vprev_g.y = __ldg(&vmap_g_prev.ptr (ukr.y + rows)[ukr.x]);
vprev_g.z = __ldg(&vmap_g_prev.ptr (ukr.y + 2 * rows)[ukr.x]);
float3 ncurr;
ncurr.x = nmap_curr.ptr (y)[x];
ncurr.y = nmap_curr.ptr (y + rows)[x];
ncurr.z = nmap_curr.ptr (y + 2 * rows)[x];
float3 ncurr_g = Rcurr * ncurr;
float3 nprev_g;
nprev_g.x = __ldg(&nmap_g_prev.ptr (ukr.y)[ukr.x]);
nprev_g.y = __ldg(&nmap_g_prev.ptr (ukr.y + rows)[ukr.x]);
nprev_g.z = __ldg(&nmap_g_prev.ptr (ukr.y + 2 * rows)[ukr.x]);
float dist = norm (vprev_g - vcurr_g);
float sine = norm (cross (ncurr_g, nprev_g));
n = nprev_g;
d = vprev_g;
s = vcurr_g;
return (sine < angleThres && dist <= distThres && !isnan (ncurr.x) && !isnan (nprev_g.x));
}
__device__ __forceinline__ jtjjtr
getProducts(int & i) const
{
int y = i / cols;
int x = i - (y * cols);
float3 n_cp, d_cp, s_cp;
bool found_coresp = search (x, y, n_cp, d_cp, s_cp);
float row[7] = {0, 0, 0, 0, 0, 0, 0};
if(found_coresp)
{
s_cp = Rprev_inv * (s_cp - tprev); // prev camera coo space
d_cp = Rprev_inv * (d_cp - tprev); // prev camera coo space
n_cp = Rprev_inv * (n_cp); // prev camera coo space
*(float3*)&row[0] = n_cp;
*(float3*)&row[3] = cross (s_cp, n_cp);
row[6] = dot (n_cp, s_cp - d_cp);
}
jtjjtr values = {row[0] * row[0],
row[0] * row[1],
row[0] * row[2],
row[0] * row[3],
row[0] * row[4],
row[0] * row[5],
row[0] * row[6],
row[1] * row[1],
row[1] * row[2],
row[1] * row[3],
row[1] * row[4],
row[1] * row[5],
row[1] * row[6],
row[2] * row[2],
row[2] * row[3],
row[2] * row[4],
row[2] * row[5],
row[2] * row[6],
row[3] * row[3],
row[3] * row[4],
row[3] * row[5],
row[3] * row[6],
row[4] * row[4],
row[4] * row[5],
row[4] * row[6],
row[5] * row[5],
row[5] * row[6],
row[6] * row[6],
found_coresp};
return values;
}
__device__ __forceinline__ void
operator () () const
{
jtjjtr sum = {0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0};
for(int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x)
{
jtjjtr val = getProducts(i);
sum.add(val);
}
sum = blockReduceSum(sum);
if(threadIdx.x == 0)
{
out[blockIdx.x] = sum;
}
}
};
__global__ void icpKernel(const ICPReduction icp)
{
icp();
}
void icpStep(const Mat33& Rcurr,
const float3& tcurr,
const DeviceArray2D<float>& vmap_curr,
const DeviceArray2D<float>& nmap_curr,
const Mat33& Rprev_inv,
const float3& tprev,
const Intr& intr,
const DeviceArray2D<float>& vmap_g_prev,
const DeviceArray2D<float>& nmap_g_prev,
float distThres,
float angleThres,
DeviceArray<jtjjtr> & sum,
DeviceArray<jtjjtr> & out,
float * matrixA_host,
float * vectorB_host,
float * residual_host,
int threads, int blocks)
{
int cols = vmap_curr.cols ();
int rows = vmap_curr.rows () / 3;
ICPReduction icp;
icp.Rcurr = Rcurr;
icp.tcurr = tcurr;
icp.vmap_curr = vmap_curr;
icp.nmap_curr = nmap_curr;
icp.Rprev_inv = Rprev_inv;
icp.tprev = tprev;
icp.intr = intr;
icp.vmap_g_prev = vmap_g_prev;
icp.nmap_g_prev = nmap_g_prev;
icp.distThres = distThres;
icp.angleThres = angleThres;
icp.cols = cols;
icp.rows = rows;
icp.N = cols * rows;
icp.out = sum;
icpKernel<<<blocks, threads>>>(icp);
reduceSum<<<1, MAX_THREADS>>>(sum, out, blocks);
cudaSafeCall(cudaGetLastError());
cudaSafeCall(cudaDeviceSynchronize());
float host_data[32];
out.download((jtjjtr *)&host_data[0]);
int shift = 0;
for (int i = 0; i < 6; ++i) //rows
{
for (int j = i; j < 7; ++j) // cols + b
{
float value = host_data[shift++];
if (j == 6) // vector b
vectorB_host[i] = value;
else
matrixA_host[j * 6 + i] = matrixA_host[i * 6 + j] = value;
}
}
residual_host[0] = host_data[27];
residual_host[1] = host_data[28];
}
|
8d49a5e718ff41076ee2ec1a2644f448fe0be4ee.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
__global__ void MatrixMulKernel(float* M, float* N, float* P, int Width)
{
// Calculate the row index of the P element and M
int Row = blockIdx.y*blockDim.y+threadIdx.y;
// Calculate the column index of P and N
int Col = blockIdx.x*blockDim.x+threadIdx.x;
if ((Row < Width) && (Col < Width)) {
float Pvalue = 0;
// each thread computes one element of the block sub-matrix
for (int k = 0; k < Width; ++k) {
Pvalue += M[Row*Width+k]*N[k*Width+Col];
}
P[Row*Width+Col] = Pvalue;
}
}
int main()
{
const int w = 2;
float M[w][w] = {{0, 1}, {2, 3}};
float N[w][w] = {{0, 1}, {2, 3}};
float P[w][w];
int sizeMat = w * w * sizeof(float);
float *d_M, *d_N, *d_P;
hipMalloc((void **) &d_M, w * w * sizeof(float));
hipMalloc((void **) &d_N, w * w * sizeof(float));
hipMalloc((void **) &d_P, w * w * sizeof(float));
hipMemcpy(d_M, M, sizeMat, hipMemcpyHostToDevice);
hipMemcpy(d_N, N, sizeMat, hipMemcpyHostToDevice);
hipMemcpy(d_P, P, sizeMat, hipMemcpyHostToDevice);
dim3 dimBlock(w, w, 1);
hipLaunchKernelGGL(( MatrixMulKernel), dim3(1), dim3(dimBlock), 0, 0, d_M, d_N, d_P, w);
hipMemcpy(P, d_P, sizeMat, hipMemcpyDeviceToHost);
for (int i = 0; i < w; ++i)
{
for (int j = 0; j < w; ++j)
{
std::cout << P[i][j] << " ";
}
std::cout << "\n";
}
return 0;
}
| 8d49a5e718ff41076ee2ec1a2644f448fe0be4ee.cu | #include <iostream>
__global__ void MatrixMulKernel(float* M, float* N, float* P, int Width)
{
// Calculate the row index of the P element and M
int Row = blockIdx.y*blockDim.y+threadIdx.y;
// Calculate the column index of P and N
int Col = blockIdx.x*blockDim.x+threadIdx.x;
if ((Row < Width) && (Col < Width)) {
float Pvalue = 0;
// each thread computes one element of the block sub-matrix
for (int k = 0; k < Width; ++k) {
Pvalue += M[Row*Width+k]*N[k*Width+Col];
}
P[Row*Width+Col] = Pvalue;
}
}
int main()
{
const int w = 2;
float M[w][w] = {{0, 1}, {2, 3}};
float N[w][w] = {{0, 1}, {2, 3}};
float P[w][w];
int sizeMat = w * w * sizeof(float);
float *d_M, *d_N, *d_P;
cudaMalloc((void **) &d_M, w * w * sizeof(float));
cudaMalloc((void **) &d_N, w * w * sizeof(float));
cudaMalloc((void **) &d_P, w * w * sizeof(float));
cudaMemcpy(d_M, M, sizeMat, cudaMemcpyHostToDevice);
cudaMemcpy(d_N, N, sizeMat, cudaMemcpyHostToDevice);
cudaMemcpy(d_P, P, sizeMat, cudaMemcpyHostToDevice);
dim3 dimBlock(w, w, 1);
MatrixMulKernel<<<1, dimBlock>>>(d_M, d_N, d_P, w);
cudaMemcpy(P, d_P, sizeMat, cudaMemcpyDeviceToHost);
for (int i = 0; i < w; ++i)
{
for (int j = 0; j < w; ++j)
{
std::cout << P[i][j] << " ";
}
std::cout << "\n";
}
return 0;
}
|
e944146d74feb91b30a62cc96d495999a2871e65.hip | // !!! This is a file automatically generated by hipify!!!
#include "../header/beta_hat.h"
typedef thrust::tuple<strideIter, strideIter> nrml_tuple;
typedef thrust::tuple<double &, double &> nrml_eltup;
struct solve_normal_eq {
int dim;
__host__ __device__ solve_normal_eq(int _dim): dim(_dim){}
template <typename T>
__host__ __device__ void operator()(T Tup){
hipblasHandle_t handle;
hipblasCreate(&handle);
int n=dim, lda=dim, incx=1;
double *L = thrust::raw_pointer_cast(&(thrust::get<0>(Tup)));
double *x = thrust::raw_pointer_cast(&(thrust::get<1>(Tup)));
hipblasDtrsv(handle, HIPBLAS_FILL_MODE_LOWER, HIPBLAS_OP_N, HIPBLAS_DIAG_NON_UNIT, n, L, lda, x, incx);
hipblasDtrsv(handle, HIPBLAS_FILL_MODE_LOWER, HIPBLAS_OP_T, HIPBLAS_DIAG_NON_UNIT, n, L, lda, x, incx);
hipblasDestroy(handle);
}
};
struct left_mult_chol_inv{
int dim;
__host__ __device__ left_mult_chol_inv(int _dim): dim(_dim) {}
template <typename T>
__host__ __device__ void operator()(T tup){
hipblasHandle_t handle;
hipblasCreate(&handle);
int n=dim, lda=dim, incx=1;
double *z = thrust::raw_pointer_cast(&(thrust::get<0>(tup)));
double *L = thrust::raw_pointer_cast(&(thrust::get<1>(tup)));
// t(L)^{-1} %*% t(t(L)^{-1}) = (L%*%t(L))^{-1}
hipblasDtrsv(handle, HIPBLAS_FILL_MODE_LOWER, HIPBLAS_OP_T, HIPBLAS_DIAG_NON_UNIT, n, L, lda, z, incx);
hipblasDestroy(handle);
}
};
void beta_hat(fvec_d &chol_prec, fvec_d &beta_hat, int K, int V){
rowIter L_first = getRowIter(V*V, 0);
rowIter xty_first = getRowIter(V, 0);
strideIter L = thrust::permutation_iterator<realIter, rowIter>(chol_prec.begin(), L_first);
strideIter x = thrust::permutation_iterator<realIter, rowIter>(beta_hat.begin(), xty_first);
nrml_tuple my_tuple = thrust::make_tuple(L, x);
thrust::zip_iterator<nrml_tuple> zipped = thrust::zip_iterator<nrml_tuple>(my_tuple);
solve_normal_eq f(V);
thrust::for_each(zipped, zipped + K, f);
//std::cout << "betahat:\n";
//printVec(beta_hat, V, K);
}
void scale_chol_inv(fvec_d &chol_prec, fvec_d &z, int n, int dim){
typedef thrust::tuple<strideIter, strideIter> scl_z_tup;
typedef thrust::zip_iterator<scl_z_tup> scl_z_zip;
//need access to first elems of chols and occ. betas
rowIter strides_z = getRowIter(dim, 0);
rowIter strides_L = getRowIter(dim*dim, 0);
strideIter z_first = thrust::permutation_iterator<realIter, rowIter>(z.begin(), strides_z);
strideIter L_first = thrust::permutation_iterator<realIter, rowIter>(chol_prec.begin(), strides_L);
scl_z_zip scale_zip = thrust::zip_iterator<scl_z_tup>(thrust::make_tuple(z_first, L_first));
left_mult_chol_inv f(dim);
thrust::for_each(scale_zip, scale_zip + n, f);
}
| e944146d74feb91b30a62cc96d495999a2871e65.cu | #include "../header/beta_hat.h"
typedef thrust::tuple<strideIter, strideIter> nrml_tuple;
typedef thrust::tuple<double &, double &> nrml_eltup;
struct solve_normal_eq {
int dim;
__host__ __device__ solve_normal_eq(int _dim): dim(_dim){}
template <typename T>
__host__ __device__ void operator()(T Tup){
cublasHandle_t handle;
cublasCreate_v2(&handle);
int n=dim, lda=dim, incx=1;
double *L = thrust::raw_pointer_cast(&(thrust::get<0>(Tup)));
double *x = thrust::raw_pointer_cast(&(thrust::get<1>(Tup)));
cublasDtrsv(handle, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, CUBLAS_DIAG_NON_UNIT, n, L, lda, x, incx);
cublasDtrsv(handle, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_T, CUBLAS_DIAG_NON_UNIT, n, L, lda, x, incx);
cublasDestroy_v2(handle);
}
};
struct left_mult_chol_inv{
int dim;
__host__ __device__ left_mult_chol_inv(int _dim): dim(_dim) {}
template <typename T>
__host__ __device__ void operator()(T tup){
cublasHandle_t handle;
cublasCreate_v2(&handle);
int n=dim, lda=dim, incx=1;
double *z = thrust::raw_pointer_cast(&(thrust::get<0>(tup)));
double *L = thrust::raw_pointer_cast(&(thrust::get<1>(tup)));
// t(L)^{-1} %*% t(t(L)^{-1}) = (L%*%t(L))^{-1}
cublasDtrsv(handle, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_T, CUBLAS_DIAG_NON_UNIT, n, L, lda, z, incx);
cublasDestroy(handle);
}
};
void beta_hat(fvec_d &chol_prec, fvec_d &beta_hat, int K, int V){
rowIter L_first = getRowIter(V*V, 0);
rowIter xty_first = getRowIter(V, 0);
strideIter L = thrust::permutation_iterator<realIter, rowIter>(chol_prec.begin(), L_first);
strideIter x = thrust::permutation_iterator<realIter, rowIter>(beta_hat.begin(), xty_first);
nrml_tuple my_tuple = thrust::make_tuple(L, x);
thrust::zip_iterator<nrml_tuple> zipped = thrust::zip_iterator<nrml_tuple>(my_tuple);
solve_normal_eq f(V);
thrust::for_each(zipped, zipped + K, f);
//std::cout << "betahat:\n";
//printVec(beta_hat, V, K);
}
void scale_chol_inv(fvec_d &chol_prec, fvec_d &z, int n, int dim){
typedef thrust::tuple<strideIter, strideIter> scl_z_tup;
typedef thrust::zip_iterator<scl_z_tup> scl_z_zip;
//need access to first elems of chols and occ. betas
rowIter strides_z = getRowIter(dim, 0);
rowIter strides_L = getRowIter(dim*dim, 0);
strideIter z_first = thrust::permutation_iterator<realIter, rowIter>(z.begin(), strides_z);
strideIter L_first = thrust::permutation_iterator<realIter, rowIter>(chol_prec.begin(), strides_L);
scl_z_zip scale_zip = thrust::zip_iterator<scl_z_tup>(thrust::make_tuple(z_first, L_first));
left_mult_chol_inv f(dim);
thrust::for_each(scale_zip, scale_zip + n, f);
}
|
403ce1cf985dad0eb407c4d6db68597390b7f6c4.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "math.h"
#include "Header.h"
hipError_t groupPointsToClustersCuda(POINT_STR* &points, int points_size, CLUSTER_STR* &clusters, int clusters_size, int nMaxIterations);
void freeDevBuffers(POINT_STR *dev_points, CLUSTER_STR *dev_clusters);
__device__ void assignPointToClusterDevice(POINT_STR p, CLUSTER_STR* clusters, int clusters_size);
__device__ double getDistanceBetweenPoints(POINT_STR p1, POINT_STR p2);
// calculate point distance to each one of the clusters centroid and join it to the closest cluster
__device__ void assignPointToClusterDevice(POINT_STR *p, CLUSTER_STR* clusters, int clusters_size)
{
int clusterID = clusters[0].id;
double minDistance = getDistanceBetweenPoints(*p, clusters[0].centroid);
for (int i = 1; i<clusters_size; i++) {
double dist = getDistanceBetweenPoints(*p, clusters[i].centroid);
if (dist<minDistance) {
minDistance = dist;
clusterID = clusters[i].id;
}
}
//assign point p to the cluster.
p->clusterID = clusterID;
}
__device__ double getDistanceBetweenPoints(POINT_STR p1, POINT_STR p2)
{
double dx = p2.x - p1.x;
double dy = p2.y - p1.y;
return sqrt(dx*dx + dy*dy);
}
// take single point from points array, checks its distance to clusters and move to closest
__global__ void AssignPointsToClosestClusters(POINT_STR *dev_points, CLUSTER_STR *dev_clusters, int clusters_size, int nThreadsInBlock, int startIndexOffset)
{
int tID = threadIdx.x;
int bID = blockIdx.x;
int pointIndex = startIndexOffset + ((bID * nThreadsInBlock) + tID);
assignPointToClusterDevice(&dev_points[pointIndex], dev_clusters, clusters_size);
}
// kernel function where each thread takes range of points from points array , for each point checks the distance to each cluster and assigns it to closest
__global__ void AssignRangeOfPointsToClosestClusters(POINT_STR *dev_points, int points_size, CLUSTER_STR *dev_clusters, int clusters_size, int pointsRangeForThread, int pointsRangeForBlock)
{
int tID = threadIdx.x;
int bID = blockIdx.x;
int startIndexOffset = bID*pointsRangeForBlock + tID*pointsRangeForThread;
// check if hipOccupancyMaxPotentialBlockSize overfeeded our needs
if (startIndexOffset>points_size - 1) {
return;
}
//move each point to closest cluster
for (int i = startIndexOffset; i<(startIndexOffset + pointsRangeForThread); i++) {
assignPointToClusterDevice(&dev_points[i], dev_clusters, clusters_size);
}
}
// For given array of points and clusters, calculates the distance for each point to each cluster
hipError_t groupPointsToClustersCuda(POINT_STR* &points, int points_size, CLUSTER_STR* &clusters, int clusters_size, int nMaxIterations)
{
POINT_STR *dev_points = 0;
CLUSTER_STR *dev_clusters = 0;
hipError_t cudaStatus;
int numBlocks, nThreadsForBlock, minGridSize;
// calculates number of threads for block size that achieves the maximum multiprocessor-level occupancy. Device specs is recieved automatically
hipOccupancyMaxPotentialBlockSize(&minGridSize, &nThreadsForBlock, AssignRangeOfPointsToClosestClusters, 0, points_size);
// Round up numBlocks to use kernel function
numBlocks = (points_size + nThreadsForBlock - 1) / nThreadsForBlock;
// each thread will make calculation to range of points from points array
// calculate the length of range which each thread should work.
int pointsRangeForThread;
if (numBlocks*nThreadsForBlock>points_size) {
pointsRangeForThread = 1;
}
else {
pointsRangeForThread = points_size / (numBlocks*nThreadsForBlock);
}
// calculate the total range size which each block will work on
int pointsRangeForBlock = pointsRangeForThread*nThreadsForBlock;
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
printf("hipSetDevice failed! Do you have a CUDA-capable GPU installed?\n%s", hipGetErrorString(cudaStatus));
freeDevBuffers(dev_points, dev_clusters);
}
// Allocate GPU buffers for points and clusters array
cudaStatus = hipMalloc((void**)&dev_points, points_size * sizeof(POINT_STR));
if (cudaStatus != hipSuccess) {
printf("hipMalloc failed! Allocate GPU buffer for points array \n%s", hipGetErrorString(cudaStatus));
freeDevBuffers(dev_points, dev_clusters);
}
cudaStatus = hipMalloc((void**)&dev_clusters, clusters_size * sizeof(CLUSTER_STR));
if (cudaStatus != hipSuccess) {
printf("hipMalloc failed! Allocate GPU buffer for clusters array \n%s", hipGetErrorString(cudaStatus));
freeDevBuffers(dev_points, dev_clusters);
}
// Copy points and clusters array to alocated GPU buffers
cudaStatus = hipMemcpy(dev_points, points, points_size * sizeof(POINT_STR), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
printf("hipMemcpy failed! Copy points alocated GPU buffers\n%s", hipGetErrorString(cudaStatus));
freeDevBuffers(dev_points, dev_clusters);
}
for (int i = 0; i<nMaxIterations; i++) {
cudaStatus = hipMemcpy(dev_clusters, clusters, clusters_size * sizeof(CLUSTER_STR), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
printf("hipMemcpy failed! Copy points alocated GPU buffers\n%s", hipGetErrorString(cudaStatus));
freeDevBuffers(dev_points, dev_clusters);
}
//run kernel function which will asign each point to closest clusters
AssignRangeOfPointsToClosestClusters << <numBlocks, nThreadsForBlock >> >(dev_points, points_size, dev_clusters, clusters_size, pointsRangeForThread, pointsRangeForBlock);
// wait for the kernel to finish.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
printf("hipDeviceSynchronize failed! AssignRangeOfPointsToClosestClusters\n%s", hipGetErrorString(cudaStatus));
freeDevBuffers(dev_points, dev_clusters);
}
// special case where not all points got assign to clusters due to reminder
if (points_size % pointsRangeForThread != 0) {
printf("reminder case\n");
int nRemindPoints = points_size % pointsRangeForThread;
int startIndexOffset = points_size - nRemindPoints;
hipOccupancyMaxPotentialBlockSize(&minGridSize, &nThreadsForBlock, AssignPointsToClosestClusters, 0, nRemindPoints);
numBlocks = (nRemindPoints + nThreadsForBlock - 1) / nThreadsForBlock;
AssignPointsToClosestClusters << <numBlocks, nThreadsForBlock >> >(dev_points, dev_clusters, clusters_size, nThreadsForBlock, startIndexOffset);
// wait for the kernel to finish.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
printf("hipDeviceSynchronize failed! AssignRangeOfPointsToClosestClusters\n%s", hipGetErrorString(cudaStatus));
freeDevBuffers(dev_points, dev_clusters);
}
}
// Copy results of sorted points per clusters
cudaStatus = hipMemcpy(points, dev_points, points_size * sizeof(POINT_STR), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
printf("Copy results of found clusters from device to host failed!\n%s", hipGetErrorString(cudaStatus));
freeDevBuffers(dev_points, dev_clusters);
}
recalculateClusterCentroids(clusters, clusters_size, points, points_size);
// stop K Means when all clusters centeroids stays the same
if (!isClustersCentroidsHasChanged(clusters, clusters_size)) {
break;
}
}
freeDevBuffers(dev_points, dev_clusters);
return cudaStatus;
}
void freeDevBuffers(POINT_STR *dev_points, CLUSTER_STR *dev_clusters) {
hipFree(dev_points);
hipFree(dev_clusters);
}
| 403ce1cf985dad0eb407c4d6db68597390b7f6c4.cu | #include <stdlib.h>
#include <stdio.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "math.h"
#include "Header.h"
cudaError_t groupPointsToClustersCuda(POINT_STR* &points, int points_size, CLUSTER_STR* &clusters, int clusters_size, int nMaxIterations);
void freeDevBuffers(POINT_STR *dev_points, CLUSTER_STR *dev_clusters);
__device__ void assignPointToClusterDevice(POINT_STR p, CLUSTER_STR* clusters, int clusters_size);
__device__ double getDistanceBetweenPoints(POINT_STR p1, POINT_STR p2);
// calculate point distance to each one of the clusters centroid and join it to the closest cluster
__device__ void assignPointToClusterDevice(POINT_STR *p, CLUSTER_STR* clusters, int clusters_size)
{
int clusterID = clusters[0].id;
double minDistance = getDistanceBetweenPoints(*p, clusters[0].centroid);
for (int i = 1; i<clusters_size; i++) {
double dist = getDistanceBetweenPoints(*p, clusters[i].centroid);
if (dist<minDistance) {
minDistance = dist;
clusterID = clusters[i].id;
}
}
//assign point p to the cluster.
p->clusterID = clusterID;
}
__device__ double getDistanceBetweenPoints(POINT_STR p1, POINT_STR p2)
{
double dx = p2.x - p1.x;
double dy = p2.y - p1.y;
return sqrt(dx*dx + dy*dy);
}
// take single point from points array, checks its distance to clusters and move to closest
__global__ void AssignPointsToClosestClusters(POINT_STR *dev_points, CLUSTER_STR *dev_clusters, int clusters_size, int nThreadsInBlock, int startIndexOffset)
{
int tID = threadIdx.x;
int bID = blockIdx.x;
int pointIndex = startIndexOffset + ((bID * nThreadsInBlock) + tID);
assignPointToClusterDevice(&dev_points[pointIndex], dev_clusters, clusters_size);
}
// kernel function where each thread takes range of points from points array , for each point checks the distance to each cluster and assigns it to closest
__global__ void AssignRangeOfPointsToClosestClusters(POINT_STR *dev_points, int points_size, CLUSTER_STR *dev_clusters, int clusters_size, int pointsRangeForThread, int pointsRangeForBlock)
{
int tID = threadIdx.x;
int bID = blockIdx.x;
int startIndexOffset = bID*pointsRangeForBlock + tID*pointsRangeForThread;
// check if cudaOccupancyMaxPotentialBlockSize overfeeded our needs
if (startIndexOffset>points_size - 1) {
return;
}
//move each point to closest cluster
for (int i = startIndexOffset; i<(startIndexOffset + pointsRangeForThread); i++) {
assignPointToClusterDevice(&dev_points[i], dev_clusters, clusters_size);
}
}
// For given array of points and clusters, calculates the distance for each point to each cluster
cudaError_t groupPointsToClustersCuda(POINT_STR* &points, int points_size, CLUSTER_STR* &clusters, int clusters_size, int nMaxIterations)
{
POINT_STR *dev_points = 0;
CLUSTER_STR *dev_clusters = 0;
cudaError_t cudaStatus;
int numBlocks, nThreadsForBlock, minGridSize;
// calculates number of threads for block size that achieves the maximum multiprocessor-level occupancy. Device specs is recieved automatically
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &nThreadsForBlock, AssignRangeOfPointsToClosestClusters, 0, points_size);
// Round up numBlocks to use kernel function
numBlocks = (points_size + nThreadsForBlock - 1) / nThreadsForBlock;
// each thread will make calculation to range of points from points array
// calculate the length of range which each thread should work.
int pointsRangeForThread;
if (numBlocks*nThreadsForBlock>points_size) {
pointsRangeForThread = 1;
}
else {
pointsRangeForThread = points_size / (numBlocks*nThreadsForBlock);
}
// calculate the total range size which each block will work on
int pointsRangeForBlock = pointsRangeForThread*nThreadsForBlock;
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
printf("cudaSetDevice failed! Do you have a CUDA-capable GPU installed?\n%s", cudaGetErrorString(cudaStatus));
freeDevBuffers(dev_points, dev_clusters);
}
// Allocate GPU buffers for points and clusters array
cudaStatus = cudaMalloc((void**)&dev_points, points_size * sizeof(POINT_STR));
if (cudaStatus != cudaSuccess) {
printf("cudaMalloc failed! Allocate GPU buffer for points array \n%s", cudaGetErrorString(cudaStatus));
freeDevBuffers(dev_points, dev_clusters);
}
cudaStatus = cudaMalloc((void**)&dev_clusters, clusters_size * sizeof(CLUSTER_STR));
if (cudaStatus != cudaSuccess) {
printf("cudaMalloc failed! Allocate GPU buffer for clusters array \n%s", cudaGetErrorString(cudaStatus));
freeDevBuffers(dev_points, dev_clusters);
}
// Copy points and clusters array to alocated GPU buffers
cudaStatus = cudaMemcpy(dev_points, points, points_size * sizeof(POINT_STR), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
printf("cudaMemcpy failed! Copy points alocated GPU buffers\n%s", cudaGetErrorString(cudaStatus));
freeDevBuffers(dev_points, dev_clusters);
}
for (int i = 0; i<nMaxIterations; i++) {
cudaStatus = cudaMemcpy(dev_clusters, clusters, clusters_size * sizeof(CLUSTER_STR), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
printf("cudaMemcpy failed! Copy points alocated GPU buffers\n%s", cudaGetErrorString(cudaStatus));
freeDevBuffers(dev_points, dev_clusters);
}
//run kernel function which will asign each point to closest clusters
AssignRangeOfPointsToClosestClusters << <numBlocks, nThreadsForBlock >> >(dev_points, points_size, dev_clusters, clusters_size, pointsRangeForThread, pointsRangeForBlock);
// wait for the kernel to finish.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
printf("cudaDeviceSynchronize failed! AssignRangeOfPointsToClosestClusters\n%s", cudaGetErrorString(cudaStatus));
freeDevBuffers(dev_points, dev_clusters);
}
// special case where not all points got assign to clusters due to reminder
if (points_size % pointsRangeForThread != 0) {
printf("reminder case\n");
int nRemindPoints = points_size % pointsRangeForThread;
int startIndexOffset = points_size - nRemindPoints;
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &nThreadsForBlock, AssignPointsToClosestClusters, 0, nRemindPoints);
numBlocks = (nRemindPoints + nThreadsForBlock - 1) / nThreadsForBlock;
AssignPointsToClosestClusters << <numBlocks, nThreadsForBlock >> >(dev_points, dev_clusters, clusters_size, nThreadsForBlock, startIndexOffset);
// wait for the kernel to finish.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
printf("cudaDeviceSynchronize failed! AssignRangeOfPointsToClosestClusters\n%s", cudaGetErrorString(cudaStatus));
freeDevBuffers(dev_points, dev_clusters);
}
}
// Copy results of sorted points per clusters
cudaStatus = cudaMemcpy(points, dev_points, points_size * sizeof(POINT_STR), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
printf("Copy results of found clusters from device to host failed!\n%s", cudaGetErrorString(cudaStatus));
freeDevBuffers(dev_points, dev_clusters);
}
recalculateClusterCentroids(clusters, clusters_size, points, points_size);
// stop K Means when all clusters centeroids stays the same
if (!isClustersCentroidsHasChanged(clusters, clusters_size)) {
break;
}
}
freeDevBuffers(dev_points, dev_clusters);
return cudaStatus;
}
void freeDevBuffers(POINT_STR *dev_points, CLUSTER_STR *dev_clusters) {
cudaFree(dev_points);
cudaFree(dev_clusters);
}
|
f01dea1c1a01a71b179e485c75ebb25ce3bbf64a.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef _BOUNDARIES_THERMO_CUDA_
#define _BOUNDARIES_THERMO_CUDA_
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <hip/hip_runtime.h>
#include "device_launch_parameters.h"
#include "math.h"
#include "../Source/CUDA/cuda_header.h"
//output velocity derrivitive teture //input velcoity texutre
__global__ void cuda_kernel_boundaries_thermo(float*input, Size size, float left, float right){
int x_iter = blockIdx.x*blockDim.x + threadIdx.x;
int y_iter = blockIdx.y*blockDim.y + threadIdx.y;
int z_iter = 0;
for(z_iter = 0; z_iter < size.depth_; z_iter++){
float*cell = input + (z_iter*size.pitch_slice_) + (y_iter*size.pitch_) + (PIXEL_FMT_SIZE_RG * x_iter);
if(y_iter == 0){
if(x_iter < 3*(size.width_/4.f) && x_iter > (size.width_/4.f)){
cell[0] = left;
}else{
cell[0] = right;
}
}
}
}
extern "C"
void cuda_fluid_boundaries_thermo(void *input, Size size, float left, float right){
hipError_t error = hipSuccess;
dim3 Db = dim3(16, 16); // block dimensions are fixed to be 256 threads
dim3 Dg = dim3((size.width_+Db.x-1)/Db.x, (size.height_+Db.y-1)/Db.y);
hipLaunchKernelGGL(( cuda_kernel_boundaries_thermo), dim3(Dg),dim3(Db), 0, 0, (float *)input, size, left, right);
error = hipGetLastError();
if (error != hipSuccess){
printf("cuda_fluid_project() failed to launch error = %d\n", error);
}
}
#endif | f01dea1c1a01a71b179e485c75ebb25ce3bbf64a.cu | #ifndef _BOUNDARIES_THERMO_CUDA_
#define _BOUNDARIES_THERMO_CUDA_
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
#include "math.h"
#include "../Source/CUDA/cuda_header.h"
//output velocity derrivitive teture //input velcoity texutre
__global__ void cuda_kernel_boundaries_thermo(float*input, Size size, float left, float right){
int x_iter = blockIdx.x*blockDim.x + threadIdx.x;
int y_iter = blockIdx.y*blockDim.y + threadIdx.y;
int z_iter = 0;
for(z_iter = 0; z_iter < size.depth_; z_iter++){
float*cell = input + (z_iter*size.pitch_slice_) + (y_iter*size.pitch_) + (PIXEL_FMT_SIZE_RG * x_iter);
if(y_iter == 0){
if(x_iter < 3*(size.width_/4.f) && x_iter > (size.width_/4.f)){
cell[0] = left;
}else{
cell[0] = right;
}
}
}
}
extern "C"
void cuda_fluid_boundaries_thermo(void *input, Size size, float left, float right){
cudaError_t error = cudaSuccess;
dim3 Db = dim3(16, 16); // block dimensions are fixed to be 256 threads
dim3 Dg = dim3((size.width_+Db.x-1)/Db.x, (size.height_+Db.y-1)/Db.y);
cuda_kernel_boundaries_thermo<<<Dg,Db>>>((float *)input, size, left, right);
error = cudaGetLastError();
if (error != cudaSuccess){
printf("cuda_fluid_project() failed to launch error = %d\n", error);
}
}
#endif |
595fd8d8111dee47d628731df2b77d5af7d2d1bd.hip | // !!! This is a file automatically generated by hipify!!!
#include "Kernel.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
__global__ void grayScaleConvertGPU(unsigned char* image, int dimx, int dimy, int d)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if(x < dimx && y < dimy)
{
unsigned int lum = image[d*(x + y*dimx) + 0]*.2126 + image[d*(x + y*dimx) + 1]*.7152 + image[d*(x + y*dimx) + 2]*.0722;
image[d*(x + y*dimx) + 0] = (unsigned char) lum;
image[d*(x + y*dimx) + 1] = (unsigned char) lum;
image[d*(x + y*dimx) + 2] = (unsigned char) lum;
}
}
void grayScaleConvert(unsigned char* image, int x, int y, int d)
{
unsigned char* pixelBuffer;
hipMalloc((void**)&pixelBuffer, sizeof(unsigned char)*x*y*d);
hipMemcpy(pixelBuffer, image, sizeof(unsigned char)*x*y*d, hipMemcpyHostToDevice);
dim3 blocks(roundf(x/16), roundf(y/16));
dim3 threads(16, 16);
hipLaunchKernelGGL(( grayScaleConvertGPU), dim3(blocks), dim3(threads), 0, 0, pixelBuffer, x, y, d);
hipMemcpy(image, pixelBuffer, sizeof(unsigned char)*x*y*d, hipMemcpyDeviceToHost);
hipFree(pixelBuffer);
} | 595fd8d8111dee47d628731df2b77d5af7d2d1bd.cu |
#include "Kernel.h"
#include <cuda.h>
#include <cuda_runtime.h>
__global__ void grayScaleConvertGPU(unsigned char* image, int dimx, int dimy, int d)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if(x < dimx && y < dimy)
{
unsigned int lum = image[d*(x + y*dimx) + 0]*.2126 + image[d*(x + y*dimx) + 1]*.7152 + image[d*(x + y*dimx) + 2]*.0722;
image[d*(x + y*dimx) + 0] = (unsigned char) lum;
image[d*(x + y*dimx) + 1] = (unsigned char) lum;
image[d*(x + y*dimx) + 2] = (unsigned char) lum;
}
}
void grayScaleConvert(unsigned char* image, int x, int y, int d)
{
unsigned char* pixelBuffer;
cudaMalloc((void**)&pixelBuffer, sizeof(unsigned char)*x*y*d);
cudaMemcpy(pixelBuffer, image, sizeof(unsigned char)*x*y*d, cudaMemcpyHostToDevice);
dim3 blocks(roundf(x/16), roundf(y/16));
dim3 threads(16, 16);
grayScaleConvertGPU<<<blocks, threads>>>(pixelBuffer, x, y, d);
cudaMemcpy(image, pixelBuffer, sizeof(unsigned char)*x*y*d, cudaMemcpyDeviceToHost);
cudaFree(pixelBuffer);
} |
02c961ee7c6da1dcc50b8a5d87582b12257b6438.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
__global__ void kernel(int *array,int goal,bool *flag,int size)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int first = index * size ;
int last = first + size;
int middle = (first+last)/2;
while (first <= last) {
if (array[middle] < goal)
first = middle + 1;
else if (array[middle] == goal) {
// printf("number is found in bolackid=%d threadid=%d\n",blockIdx.x,threadIdx.x);
*flag = true;
// assert(0);
break;
}
else
last = middle - 1;
middle = (first + last)/2;
}
if(array[threadIdx.x] == goal){
*flag = true;
}
}
int main()
{
int BlockNumber;
int ThreadNumber;
int Goal;
int N ;
int *array;
bool *flag ;
printf("Enter The array size: ");
scanf("%d", &N);
printf("Enter Block number: ");
scanf("%d", &BlockNumber);
printf("Enter Thread number: ");
scanf("%d", &ThreadNumber);
printf("Enter the number to find: ");
scanf("%d", &Goal);
hipMallocManaged(&array, N*sizeof(int));
hipMallocManaged(&flag, sizeof(bool));
for(int i = 0; i < N; i++){
array[i] = i ;
}
hipLaunchKernelGGL(( kernel), dim3(BlockNumber), dim3(ThreadNumber), 0, 0, array, Goal, flag,N/(BlockNumber*ThreadNumber));
hipDeviceSynchronize();
if(*flag == true){
printf("goal is found \n");
}else printf("goal not found\n");
}
| 02c961ee7c6da1dcc50b8a5d87582b12257b6438.cu | #include<stdio.h>
__global__ void kernel(int *array,int goal,bool *flag,int size)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int first = index * size ;
int last = first + size;
int middle = (first+last)/2;
while (first <= last) {
if (array[middle] < goal)
first = middle + 1;
else if (array[middle] == goal) {
// printf("number is found in bolackid=%d threadid=%d\n",blockIdx.x,threadIdx.x);
*flag = true;
// assert(0);
break;
}
else
last = middle - 1;
middle = (first + last)/2;
}
if(array[threadIdx.x] == goal){
*flag = true;
}
}
int main()
{
int BlockNumber;
int ThreadNumber;
int Goal;
int N ;
int *array;
bool *flag ;
printf("Enter The array size: ");
scanf("%d", &N);
printf("Enter Block number: ");
scanf("%d", &BlockNumber);
printf("Enter Thread number: ");
scanf("%d", &ThreadNumber);
printf("Enter the number to find: ");
scanf("%d", &Goal);
cudaMallocManaged(&array, N*sizeof(int));
cudaMallocManaged(&flag, sizeof(bool));
for(int i = 0; i < N; i++){
array[i] = i ;
}
kernel<<<BlockNumber, ThreadNumber>>>(array, Goal, flag,N/(BlockNumber*ThreadNumber));
cudaDeviceSynchronize();
if(*flag == true){
printf("goal is found \n");
}else printf("goal not found\n");
}
|
82877d1325e7bb2ccda9ea19192598d0c5d5ae74.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <math.h>
#include <stdio.h>
#include <windows.h>
#include <d2d1.h>
#include <d2d1helper.h>
#pragma comment(lib, "d2d1")
#include <mmsystem.h>
#pragma comment(lib, "winmm.lib")
//*****double buffering*****
#define SCREEN_WIDTH 1920
#define SCREEN_HEIGHT 1000
D2D1_RECT_U display_area;
ID2D1Bitmap *image_container = NULL;
unsigned int *dev_image_data, image_data[SCREEN_WIDTH * SCREEN_HEIGHT];
float *dev_zbuffer;//ez int is volt/lehet
typedef struct Vec3f {
float x, y, z;
};
//**************************************
//**************PEGAZUS 3D************
#define MAX_OBJ_NUM 20000000
int drawing_in_progress = 0;
int viewpoint = -500;
float rot_degree_x;
float rot_degree_y;
float rot_degree_z;
float rot_degree_x2 = 0;
float rot_degree_y2 = 90.0f;
float rot_degree_z2 = 0;
float Math_PI = 3.14159265358979323846;
float raw_verticesX[MAX_OBJ_NUM], raw_verticesY[MAX_OBJ_NUM], raw_verticesZ[MAX_OBJ_NUM];
int raw_vertices_length;
struct VEKTOR {
float x;
float y;
float z;
};
VEKTOR Vector1, Vector2, vNormal;
//*******CUDA*************
float *dev_raw_verticesX, *dev_raw_verticesY, *dev_raw_verticesZ;
float *dev_rotated_verticesX, *dev_rotated_verticesY, *dev_rotated_verticesZ;
//************************
void init_3D(void);
void data_transfer_to_GPU(void);
void D2D_drawing(void);
__global__ void CUDA_rotation(int maxitemcount, float *rawarrayX, float *rawarrayY, float *rawarrayZ, float *rotarrayX, float *rotarrayY, float *rotarrayZ, float degree_cosx, float degree_sinx, float degree_cosy, float degree_siny, float degree_cosz, float degree_sinz);
void drawing(void);
__global__ void render_objects(int maxitemcount, float *rotarrayX, float *rotarrayY, float *rotarrayZ, unsigned int *puffer, float *zpuffer);
__global__ void zoom_in(int maxitemcount, float *rawarrayX, float *rawarrayY, float *rawarrayZ);
__global__ void zoom_out(int maxitemcount, float *rawarrayX, float *rawarrayY, float *rawarrayZ);
//************************************
//***********STANDARD WIN32API WINDOWING************
ID2D1Factory* pD2DFactory = NULL;
ID2D1HwndRenderTarget* pRT = NULL;
#define HIBA_00 TEXT("Error:Program initialisation process.")
HINSTANCE hInstGlob;
int SajatiCmdShow;
char szClassName[] = "WindowsApp";
HWND Form1; //Ablak kezeloje
LRESULT CALLBACK WndProc0(HWND, UINT, WPARAM, LPARAM);
//******************************************************
//*******for measurements********
long int vertex_counter, poly_counter;
float fps_stat;
int starttime;
int endtime;
//*****double buffering*****
void create_main_buffer(void);
void CUDA_cleanup_main_buffer(void);
__global__ void CUDA_CleanUp_Zbuffer(float *zpuffer);
void swap_main_buffer(void);
//**************************************
//*****drawig algorithms*****
__device__ void CUDA_SetPixel(int x1, int y1, int color, unsigned int *puffer);
__device__ void CUDA_SetPixel_Zbuffer(int x1, int y1, int z1, int color, unsigned int *puffer, float *zpuffer);
__device__ void CUDA_DrawLine(int x1, int y1, int x2, int y2, int color, unsigned int *puffer);
__device__ void CUDA_DrawLine_Zbuffer(int x1, int y1, int z1, int x2, int y2, int z2, int color, unsigned int *puffer, float *zpuffer);
__device__ void CUDA_FillTriangle(int x1, int y1, int x2, int y2, int x3, int y3, int color, unsigned int *puffer);
__device__ void CUDA_FillTriangle_Zbuffer(int x1, int y1, int z1, int x2, int y2, int z2, int x3, int y3, int z3, int color, unsigned int *puffer, float *zpuffer);
//**************************************
//********************************
//OBJ format handling
//********************************
float tomb_vertices[MAX_OBJ_NUM][3];
int tomb_faces[MAX_OBJ_NUM][5];
int tomb_vertices_length = 0, tomb_faces_length = 0;
int getelementcount(unsigned char csv_content[]);
void getelement(unsigned char csv_content[], unsigned int data_index, unsigned char csv_content2[]);
void obj_loader(void);
//*********************************
//The main entry point of our program
//*********************************
int WINAPI WinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, PSTR szCmdLine, int iCmdShow)
{
static TCHAR szAppName[] = TEXT("StdWinClassName");
HWND hwnd;
MSG msg;
WNDCLASS wndclass0;
SajatiCmdShow = iCmdShow;
hInstGlob = hInstance;
//*********************************
//Preparing Windows class
//*********************************
wndclass0.style = CS_HREDRAW | CS_VREDRAW;
wndclass0.lpfnWndProc = WndProc0;
wndclass0.cbClsExtra = 0;
wndclass0.cbWndExtra = 0;
wndclass0.hInstance = hInstance;
wndclass0.hIcon = LoadIcon(NULL, IDI_APPLICATION);
wndclass0.hCursor = LoadCursor(NULL, IDC_ARROW);
wndclass0.hbrBackground = (HBRUSH)GetStockObject(LTGRAY_BRUSH);
wndclass0.lpszMenuName = NULL;
wndclass0.lpszClassName = TEXT("WIN0");
//*********************************
//Registering our windows class
//*********************************
if (!RegisterClass(&wndclass0))
{
MessageBox(NULL, HIBA_00, TEXT("Program Start"), MB_ICONERROR);
return 0;
}
//*********************************
//Creating the window
//*********************************
Form1 = CreateWindow(TEXT("WIN0"),
TEXT("CUDA - DIRECT2D"),
(WS_OVERLAPPED | WS_SYSMENU | WS_THICKFRAME | WS_MAXIMIZEBOX | WS_MINIMIZEBOX),
50,
50,
SCREEN_WIDTH,
SCREEN_HEIGHT,
NULL,
NULL,
hInstance,
NULL);
//*********************************
//Displaying the window
//*********************************
ShowWindow(Form1, SajatiCmdShow);
UpdateWindow(Form1);
//*********************************
//Activating the message processing for our window
//*********************************
while (GetMessage(&msg, NULL, 0, 0))
{
TranslateMessage(&msg);
DispatchMessage(&msg);
}
return msg.wParam;
}
//*********************************
//The window's callback funtcion: handling events
//*********************************
LRESULT CALLBACK WndProc0(HWND hwnd, UINT message, WPARAM wParam, LPARAM lParam)
{
HDC hdc;
PAINTSTRUCT ps;
unsigned int xPos, yPos, xPos2, yPos2, fwButtons;
switch (message)
{
//*********************************
//When creating the window
//*********************************
case WM_CREATE:
D2D1CreateFactory(D2D1_FACTORY_TYPE_SINGLE_THREADED, &pD2DFactory);
pD2DFactory->CreateHwndRenderTarget(
D2D1::RenderTargetProperties(),
D2D1::HwndRenderTargetProperties(
hwnd, D2D1::SizeU(SCREEN_WIDTH, SCREEN_HEIGHT)),
&pRT);
create_main_buffer();
hipMalloc((void**)&dev_raw_verticesX, MAX_OBJ_NUM * sizeof(float));
hipMalloc((void**)&dev_raw_verticesY, MAX_OBJ_NUM * sizeof(float));
hipMalloc((void**)&dev_raw_verticesZ, MAX_OBJ_NUM * sizeof(float));
hipMalloc((void**)&dev_rotated_verticesX, MAX_OBJ_NUM * sizeof(float));
hipMalloc((void**)&dev_rotated_verticesY, MAX_OBJ_NUM * sizeof(float));
hipMalloc((void**)&dev_rotated_verticesZ, MAX_OBJ_NUM * sizeof(float));
hipMalloc((void**)&dev_image_data, SCREEN_WIDTH * SCREEN_HEIGHT * sizeof(unsigned int));
hipMalloc((void**)&dev_zbuffer, SCREEN_WIDTH * SCREEN_HEIGHT * sizeof(float));
init_3D();
obj_loader();
data_transfer_to_GPU();
if ((joyGetNumDevs()) > 0) joySetCapture(hwnd, JOYSTICKID1, NULL, FALSE);
return 0;
//*********************************
//to eliminate color flickering
//*********************************
case WM_ERASEBKGND:
return (LRESULT)1;
case MM_JOY1MOVE:
fwButtons = wParam;
xPos = LOWORD(lParam);
yPos = HIWORD(lParam);
if (xPos == 65535) {
rot_degree_y2 += 2.0; D2D_drawing();
}
else if (xPos == 0) {
rot_degree_y2 -= 2.0; D2D_drawing();
}
if (yPos == 65535) {
rot_degree_x2 += 2.0; D2D_drawing();
}
else if (yPos == 0) {
rot_degree_x2 -= 2.0; D2D_drawing();
}
if (fwButtons == 128) {
rot_degree_z2 += 2.0; D2D_drawing();
}
else if (fwButtons == 64) {
rot_degree_z2 -= 2.0; D2D_drawing();
}
if (rot_degree_y2 > 360) {
rot_degree_y2 = 0; D2D_drawing();
}
else if (rot_degree_y2 < 0) {
rot_degree_y2 = 358; D2D_drawing();
}
if (rot_degree_x2 > 359) {
rot_degree_x2 = 0; D2D_drawing();
}
else if (rot_degree_x2 < 0) {
rot_degree_x2 = 358; D2D_drawing();
}
if (rot_degree_z2 > 359) {
rot_degree_z2 = 0; D2D_drawing();
}
else if (rot_degree_z2 < 0) {
rot_degree_z2 = 358; D2D_drawing();
}
if (fwButtons == 2)
{
int blockSize = 384;
int numBlocks = (raw_vertices_length + blockSize - 1) / blockSize;
zoom_in << <numBlocks, blockSize >> > (raw_vertices_length, dev_raw_verticesX, dev_raw_verticesY, dev_raw_verticesZ);
hipDeviceSynchronize();
D2D_drawing();
}
else if (fwButtons == 4)
{
int blockSize = 384;
int numBlocks = (raw_vertices_length + blockSize - 1) / blockSize;
zoom_out << <numBlocks, blockSize >> > (raw_vertices_length, dev_raw_verticesX, dev_raw_verticesY, dev_raw_verticesZ);
hipDeviceSynchronize();
D2D_drawing();
}
break;
//*********************************
//Repainting the client area of the window
//*********************************
case WM_PAINT:
hdc = BeginPaint(hwnd, &ps);
EndPaint(hwnd, &ps);
D2D_drawing();
return 0;
//*********************************
//Closing the window, freeing resources
//*********************************
case WM_CLOSE:
pRT->Release();
pD2DFactory->Release();
hipFree(dev_raw_verticesX);
hipFree(dev_raw_verticesY);
hipFree(dev_raw_verticesZ);
hipFree(dev_rotated_verticesX);
hipFree(dev_rotated_verticesY);
hipFree(dev_rotated_verticesZ);
hipFree(dev_image_data);
hipFree(dev_zbuffer);
DestroyWindow(hwnd);
return 0;
//*********************************
//Destroying the window
//*********************************
case WM_DESTROY:
PostQuitMessage(0);
return 0;
}
return DefWindowProc(hwnd, message, wParam, lParam);
}
//********************************
//PEGAZUS 3D
//********************************
void create_main_buffer(void)
{
pRT->CreateBitmap(D2D1::SizeU(SCREEN_WIDTH, SCREEN_HEIGHT),
D2D1::BitmapProperties(D2D1::PixelFormat(DXGI_FORMAT_B8G8R8A8_UNORM,
D2D1_ALPHA_MODE_IGNORE)), &image_container);
}
void CUDA_cleanup_main_buffer(void)
{
hipMemset(dev_image_data, 255, SCREEN_HEIGHT*SCREEN_WIDTH * sizeof(unsigned int));
}
__global__ void CUDA_CleanUp_Zbuffer(float *zpuffer)
{
int i;
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (i = index; i < SCREEN_HEIGHT*SCREEN_WIDTH; i += stride)
{
zpuffer[i] = 999999;
}
}
void swap_main_buffer(void)
{
display_area.left = 0;
display_area.top = 0;
display_area.right = SCREEN_WIDTH;
display_area.bottom = SCREEN_HEIGHT;
image_container->CopyFromMemory(&display_area, image_data, SCREEN_WIDTH * sizeof(unsigned int));
pRT->BeginDraw();
pRT->DrawBitmap(image_container, D2D1::RectF(0.0f, 0.0f, SCREEN_WIDTH, SCREEN_HEIGHT), 1.0f, D2D1_BITMAP_INTERPOLATION_MODE_NEAREST_NEIGHBOR, NULL);
pRT->EndDraw();
}
__device__ void CUDA_SetPixel(int x1, int y1, int color, unsigned int *puffer)
{
puffer[(y1 * SCREEN_WIDTH) + x1] = color;
}
__device__ void CUDA_SetPixel_Zbuffer(int x1, int y1, int z1, int color, unsigned int *puffer, float *zpuffer)
{
int offset = (y1 * SCREEN_WIDTH) + x1;
if (zpuffer[offset] > z1)
{
zpuffer[offset] = z1;
puffer[offset] = color;
}
}
__device__ void CUDA_DrawLine(int x1, int y1, int x2, int y2, int color, unsigned int *puffer)
{
bool flip = false;
int swap, offset;
if (abs(x2 - x1) < 2 && abs(y2 - y1) < 2)
{
puffer[(y2*SCREEN_WIDTH) + x2] = color; return;
}
if (abs(x1 - x2) < abs(y1 - y2))
{
swap = x1;
x1 = y1;
y1 = swap;
swap = x2;
x2 = y2;
y2 = swap;
flip = true;
}
if (x1 > x2)
{
swap = x1;
x1 = x2;
x2 = swap;
swap = y1;
y1 = y2;
y2 = swap;
}
int dx = x2 - x1;
int dy = y2 - y1;
int marker1 = abs(dy) * 2;
int marker2 = 0;
int y = y1, x;
if (flip)
{
for (x = x1; x <= x2; ++x)
{
offset = (x * SCREEN_WIDTH);
puffer[offset + y] = color;
marker2 += marker1;
if (marker2 > dx)
{
y += (y2 > y1 ? 1 : -1);
marker2 -= dx * 2;
}
}
}
else
{
for (x = x1; x <= x2; ++x)
{
offset = (y * SCREEN_WIDTH);
puffer[offset + x] = color;
marker2 += marker1;
if (marker2 > dx)
{
y += (y2 > y1 ? 1 : -1);
marker2 -= dx * 2;
}
}
}
}
__device__ void CUDA_DrawLine_Zbuffer(int x1, int y1, int z1, int x2, int y2, int z2, int color, unsigned int *puffer, float *zpuffer)
{
float Pz;
bool flip = false;
int swap, offset;
if (abs(x2 - x1) < 2 && abs(y2 - y1) < 2) {
puffer[(y2*SCREEN_WIDTH) + x2] = color; return;
}
if (abs(x1 - x2) < abs(y1 - y2))
{
swap = x1;
x1 = y1;
y1 = swap;
swap = x2;
x2 = y2;
y2 = swap;
flip = true;
}
if (x1 > x2)
{
swap = x1;
x1 = x2;
x2 = swap;
swap = y1;
y1 = y2;
y2 = swap;
}
int dx = x2 - x1;
int dy = y2 - y1;
int marker1 = abs(dy) * 2;
int marker2 = 0;
int y = y1, x;
for (x = x1; x <= x2; ++x)
{
if (z1 == z2) Pz = z1;
else
{
int s1 = abs(x2 - x1);
int s2 = abs(z1 - z2);
Pz = (float)z2 + (float)((((float)x - (float)x1) / (float)s1) * (float)s2);
}
if (flip)
{
offset = (x * SCREEN_WIDTH);
if (zpuffer[offset + y] > Pz)
{
zpuffer[offset + y] = Pz;
puffer[offset + y] = color;
}
}
else
{
offset = (y * SCREEN_WIDTH);
if (zpuffer[offset + x] > Pz)
{
zpuffer[offset + x] = Pz;
puffer[offset + x] = color;
}
}
marker2 += marker1;
if (marker2 > dx)
{
y += (y2 > y1 ? 1 : -1);
marker2 -= dx * 2;
}
}
}
void CUDA_FillTriangle(int x1, int y1, int x2, int y2, int x3, int y3, int color, unsigned int *puffer)
{
int Ax, Ay, Bx, By, i, j;
int swapx, swapy, offset, maxoffset = SCREEN_HEIGHT * SCREEN_WIDTH;
if (y1 == y2 && y1 == y3) return;
if (y1 > y2)
{
swapx = x1;
swapy = y1;
x1 = x2;
y1 = y2;
x2 = swapx;
y2 = swapy;
}
if (y1 > y3)
{
swapx = x1;
swapy = y1;
x1 = x3;
y1 = y3;
x3 = swapx;
y3 = swapy;
}
if (y2 > y3)
{
swapx = x3;
swapy = y3;
x3 = x2;
y3 = y2;
x2 = swapx;
y2 = swapy;
}
int t_height = y3 - y1;
for (i = 0; i < t_height; ++i)
{
bool lower_part = i > y2 - y1 || y2 == y1;
int part_height = lower_part ? y3 - y2 : y2 - y1;
float alpha = (float)i / t_height;
float beta = (float)(i - (lower_part ? y2 - y1 : 0)) / part_height;
Ax = x1 + (x3 - x1)*alpha;
Ay = y1 + (y3 - y1)*alpha;
Bx = lower_part ? x2 + (x3 - x2)*beta : x1 + (x2 - x1)*beta;
By = lower_part ? y2 + (y3 - y2)*beta : y1 + (y2 - y1)*beta;
if (Ax > Bx)
{
swapx = Ax;
swapy = Ay;
Ax = Bx;
Ay = By;
Bx = swapx;
By = swapy;
}
offset = (y1 + i)*SCREEN_WIDTH;
for (j = Ax; j < Bx; ++j)
{
if (offset + j > maxoffset) continue;
puffer[offset + j] = color;
}
}
}
__device__ void CUDA_FillTriangle_Zbuffer(int x1, int y1, int z1, int x2, int y2, int z2, int x3, int y3, int z3, int color, unsigned int *puffer, float *zpuffer)
{
int Ax, Ay, Bx, By, i, j, depth_value;
int swapx, swapy, offset;
Vec3f interpolate, helper_vector;
if (y1 == y2 && y1 == y3) return;
if (y1 > y2)
{
swapx = x1;
swapy = y1;
x1 = x2;
y1 = y2;
x2 = swapx;
y2 = swapy;
}
if (y1 > y3)
{
swapx = x1;
swapy = y1;
x1 = x3;
y1 = y3;
x3 = swapx;
y3 = swapy;
}
if (y2 > y3)
{
swapx = x3;
swapy = y3;
x3 = x2;
y3 = y2;
x2 = swapx;
y2 = swapy;
}
int t_height = y3 - y1;
for (i = 0; i < t_height; ++i)
{
bool second_half = i > y2 - y1 || y2 == y1;
int segment_height = second_half ? y3 - y2 : y2 - y1;
float alpha = (float)i / t_height;
float beta = (float)(i - (second_half ? y2 - y1 : 0)) / segment_height;
Ax = x1 + (x3 - x1)*alpha;
Ay = y1 + (y3 - y1)*alpha;
Bx = second_half ? x2 + (x3 - x2)*beta : x1 + (x2 - x1)*beta;
By = second_half ? y2 + (y3 - y2)*beta : y1 + (y2 - y1)*beta;
if (Ax > Bx)
{
swapx = Ax;
swapy = Ay;
Ax = Bx;
Ay = By;
Bx = swapx;
By = swapy;
}
offset = (y1 + i)*SCREEN_WIDTH;
for (j = Ax; j <= Bx; ++j)
{
helper_vector.x = (x2 - x1) * (y1 - (y1 + i)) - (x1 - j) * (y2 - y1);
helper_vector.y = (x1 - j) * (y3 - y1) - (x3 - x1) * (y1 - (y1 + i));
helper_vector.z = (x3 - x1) * (y2 - y1) - (x2 - x1) * (y3 - y1);
if (abs((int)helper_vector.z) < 1) { interpolate.x = -1; interpolate.y = 0; interpolate.z = 0; }
else
{
interpolate.x = 1.f - (helper_vector.x + helper_vector.y) / helper_vector.z;
interpolate.y = helper_vector.y / helper_vector.z;
interpolate.z = helper_vector.x / helper_vector.z;
}
if (interpolate.x < 0 || interpolate.y < 0 || interpolate.z < 0) continue;
depth_value = (z1*interpolate.x) + (z2*interpolate.y) + (z3*interpolate.z);
if (zpuffer[offset + j] > depth_value)
{
zpuffer[offset + j] = depth_value;
puffer[offset + j] = color;
}
}
}
}
void init_3D(void)
{
rot_degree_x = 0 * Math_PI / 180; rot_degree_x2 = 0;
rot_degree_y = 0 * Math_PI / 180; rot_degree_y2 = 0;
rot_degree_z = 0 * Math_PI / 180; rot_degree_z2 = 0;
raw_vertices_length = 0;
}
void data_transfer_to_GPU(void)
{
hipMemcpy(dev_raw_verticesX, raw_verticesX, raw_vertices_length * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_raw_verticesY, raw_verticesY, raw_vertices_length * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_raw_verticesZ, raw_verticesZ, raw_vertices_length * sizeof(float), hipMemcpyHostToDevice);
}
//********************************
//OBJ format handling
//********************************
int getelementcount(unsigned char csv_content[])
{
int s1, s2;
for (s1 = s2 = 0; s1 < strlen((const char *)csv_content); ++s1)
{
if (csv_content[s1] == 10) break;
else if (csv_content[s1] == 32) ++s2;
}
return s2;
}
void getelement(unsigned char csv_content[], unsigned int data_index, unsigned char csv_content2[])
{
int s1, s2, s3, s4 = 0;
for (s1 = 0, s2 = 0; s1 < strlen((const char *)csv_content); ++s1)
{
if (csv_content[s1] == 32)
{
++s2;
if (s2 == data_index)
{
for (s3 = s1 + 1; s3 < strlen((const char *)csv_content); ++s3)
{
if (csv_content[s3] == 32 || csv_content[s3] == 10)
{
csv_content2[s4] = 0;
return;
}
else csv_content2[s4++] = csv_content[s3];
}
}
}
}
}
void obj_loader(void)
{
FILE *objfile;
int i, j;
float data1, data2, data3;
unsigned char row1[1024], row2[1024];
int data_count, max_row_length = 250;
char tempstr[200];
objfile = fopen("model.obj", "rt");
if (objfile == NULL) return;
vertex_counter = poly_counter = 0;
tomb_vertices_length = tomb_vertices_length = 0;
while (!feof(objfile))
{
fgets((char *)row1, max_row_length, objfile);
if (row1[0] == 118 && row1[1] == 32) //*** 'v '
{
getelement(row1, 1, row2); data1 = atof((const char *)row2);
getelement(row1, 2, row2); data2 = atof((const char *)row2);
getelement(row1, 3, row2); data3 = atof((const char *)row2);
tomb_vertices[tomb_vertices_length][0] = data1 * 4;
tomb_vertices[tomb_vertices_length][1] = data2 * 4;
tomb_vertices[tomb_vertices_length++][2] = data3 * 4;
}
else if (row1[0] == 102 && row1[1] == 32) //*** 'f '
{
data_count = getelementcount(row1);
tomb_faces[tomb_faces_length][0] = data_count;
for (i = 1; i < data_count + 1; ++i)
{
getelement(row1, i, row2);
data1 = atof((const char *)row2);
tomb_faces[tomb_faces_length][i] = data1 - 1;
}
++tomb_faces_length;
}
}
fclose(objfile);
int base_index;
for (i = 0; i < tomb_faces_length; ++i)
{
base_index = tomb_faces[i][1];
if (tomb_faces[i][0] == 3)
{
raw_verticesX[raw_vertices_length] = tomb_vertices[tomb_faces[i][1]][0];
raw_verticesY[raw_vertices_length] = tomb_vertices[tomb_faces[i][1]][1];
raw_verticesZ[raw_vertices_length++] = tomb_vertices[tomb_faces[i][1]][2];
raw_verticesX[raw_vertices_length] = tomb_vertices[tomb_faces[i][2]][0];
raw_verticesY[raw_vertices_length] = tomb_vertices[tomb_faces[i][2]][1];
raw_verticesZ[raw_vertices_length++] = tomb_vertices[tomb_faces[i][2]][2];
raw_verticesX[raw_vertices_length] = tomb_vertices[tomb_faces[i][3]][0];
raw_verticesY[raw_vertices_length] = tomb_vertices[tomb_faces[i][3]][1];
raw_verticesZ[raw_vertices_length++] = tomb_vertices[tomb_faces[i][3]][2];
++poly_counter;
vertex_counter += 3;
}
else if (tomb_faces[i][0] == 4)
{
raw_verticesX[raw_vertices_length] = tomb_vertices[tomb_faces[i][1]][0];
raw_verticesY[raw_vertices_length] = tomb_vertices[tomb_faces[i][1]][1];
raw_verticesZ[raw_vertices_length++] = tomb_vertices[tomb_faces[i][1]][2];
raw_verticesX[raw_vertices_length] = tomb_vertices[tomb_faces[i][2]][0];
raw_verticesY[raw_vertices_length] = tomb_vertices[tomb_faces[i][2]][1];
raw_verticesZ[raw_vertices_length++] = tomb_vertices[tomb_faces[i][2]][2];
raw_verticesX[raw_vertices_length] = tomb_vertices[tomb_faces[i][3]][0];
raw_verticesY[raw_vertices_length] = tomb_vertices[tomb_faces[i][3]][1];
raw_verticesZ[raw_vertices_length++] = tomb_vertices[tomb_faces[i][3]][2];
raw_verticesX[raw_vertices_length] = tomb_vertices[tomb_faces[i][1]][0];
raw_verticesY[raw_vertices_length] = tomb_vertices[tomb_faces[i][1]][1];
raw_verticesZ[raw_vertices_length++] = tomb_vertices[tomb_faces[i][1]][2];
raw_verticesX[raw_vertices_length] = tomb_vertices[tomb_faces[i][3]][0];
raw_verticesY[raw_vertices_length] = tomb_vertices[tomb_faces[i][3]][1];
raw_verticesZ[raw_vertices_length++] = tomb_vertices[tomb_faces[i][3]][2];
raw_verticesX[raw_vertices_length] = tomb_vertices[tomb_faces[i][4]][0];
raw_verticesY[raw_vertices_length] = tomb_vertices[tomb_faces[i][4]][1];
raw_verticesZ[raw_vertices_length++] = tomb_vertices[tomb_faces[i][4]][2];
poly_counter += 2;
vertex_counter += 6;
}
}
}
void D2D_drawing(void)
{
if (drawing_in_progress == 1) return;
drawing_in_progress = 1;
char tempstr[255], tempstr2[255], hibauzenet[256];
int blockSize = 384;
int numBlocks = (raw_vertices_length + blockSize - 1) / blockSize;
strcpy(tempstr2, "Vertices: ");
_itoa(vertex_counter, tempstr, 10); strcat(tempstr2, tempstr); strcat(tempstr2, " Polygons: ");
_itoa(poly_counter, tempstr, 10); strcat(tempstr2, tempstr); strcat(tempstr2, " Z ordered: ");
starttime = GetTickCount();
rot_degree_x = rot_degree_x2 * Math_PI / 180;
rot_degree_y = rot_degree_y2 * Math_PI / 180;
rot_degree_z = rot_degree_z2 * Math_PI / 180;
float degree_sinx = sin(rot_degree_x);
float degree_cosx = cos(rot_degree_x);
float degree_siny = sin(rot_degree_y);
float degree_cosy = cos(rot_degree_y);
float degree_sinz = sin(rot_degree_z);
float degree_cosz = cos(rot_degree_z);
CUDA_rotation << <numBlocks, blockSize >> > (raw_vertices_length, dev_raw_verticesX, dev_raw_verticesY, dev_raw_verticesZ, dev_rotated_verticesX, dev_rotated_verticesY, dev_rotated_verticesZ, degree_cosx, degree_sinx, degree_cosy, degree_siny, degree_cosz, degree_sinz);
hipDeviceSynchronize();
strcpy_s(hibauzenet, hipGetErrorString(hipGetLastError()));
drawing();
endtime = GetTickCount();
if ((endtime - starttime) == 0) ++endtime;
fps_stat = 1000 / (endtime - starttime); strcat(tempstr2, " FPS: "); _itoa(fps_stat, tempstr, 10); strcat(tempstr2, tempstr);
strcat(tempstr2, ", X: "); _itoa(rot_degree_x2, tempstr, 10); strcat(tempstr2, tempstr);
strcat(tempstr2, ", Y: "); _itoa(rot_degree_y2, tempstr, 10); strcat(tempstr2, tempstr);
strcat(tempstr2, ", Z: "); _itoa(rot_degree_z2, tempstr, 10); strcat(tempstr2, tempstr);
strcat(tempstr2, ", CUDA: "); strcat(tempstr2, hibauzenet);
SetWindowTextA(Form1, tempstr2);
drawing_in_progress = 0;
}
__global__ void CUDA_rotation(int maxitemcount, float *rawarrayX, float *rawarrayY, float *rawarrayZ, float *rotarrayX, float *rotarrayY, float *rotarrayZ, float degree_cosx, float degree_sinx, float degree_cosy, float degree_siny, float degree_cosz, float degree_sinz)
{
int i;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
float t0;
//rotaion
for (i = index; i < maxitemcount; i += stride)
{
rotarrayY[i] = (rawarrayY[i] * degree_cosx) - (rawarrayZ[i] * degree_sinx);
rotarrayZ[i] = rawarrayY[i] * degree_sinx + rawarrayZ[i] * degree_cosx;
rotarrayX[i] = rawarrayX[i] * degree_cosy + rotarrayZ[i] * degree_siny;
rotarrayZ[i] = -rawarrayX[i] * degree_siny + rotarrayZ[i] * degree_cosy;// +
t0 = rotarrayX[i];
//some tweaking for OBJ models: "+ (SCREEN_WIDTH / 4)" and "+ (SCREEN_HEIGHT / 4)"
rotarrayX[i] = t0 * degree_cosz - rotarrayY[i] * degree_sinz + (SCREEN_WIDTH / 4);
rotarrayY[i] = t0 * degree_sinz + rotarrayY[i] * degree_cosz + (SCREEN_HEIGHT / 4);
}
//perspective projection
int s1;
int viewpoint = -1100;
float sx = SCREEN_WIDTH / 2;
float sultra = SCREEN_HEIGHT / 2, sultra2 = SCREEN_HEIGHT / 3;
int x_minusz_edge = 0, y_minusz_edge = 0, x_max_edge = SCREEN_WIDTH - 1, y_max_edge = SCREEN_HEIGHT - 1;
float distance;
for (i = index; i < maxitemcount; i += stride)
{
distance = 999999;
if (rotarrayZ[i] < distance) distance = rotarrayZ[i];
if (distance < viewpoint) { rotarrayZ[i] = -9999999; continue; }
sultra = viewpoint / (viewpoint - rotarrayZ[i]);
rotarrayX[i] = rotarrayX[i] * sultra + 400;
rotarrayY[i] = (rotarrayY[i] * sultra) + sultra2;
if (rotarrayX[i] < x_minusz_edge || rotarrayX[i] > x_max_edge) { rotarrayZ[i] = -9999999; continue; }
if (rotarrayY[i] < y_minusz_edge || rotarrayY[i] > y_max_edge) { rotarrayZ[i] = -9999999; continue; }
}
}
void drawing(void)
{
CUDA_cleanup_main_buffer();
CUDA_CleanUp_Zbuffer << < ((SCREEN_WIDTH*SCREEN_HEIGHT) + 384 - 1) / 384, 384 >> > (dev_zbuffer);
hipDeviceSynchronize();
render_objects << <12, 384 >> > (raw_vertices_length, dev_rotated_verticesX, dev_rotated_verticesY, dev_rotated_verticesZ, dev_image_data, dev_zbuffer);
hipDeviceSynchronize();
hipMemcpy(image_data, dev_image_data, SCREEN_WIDTH * SCREEN_HEIGHT * sizeof(unsigned int), hipMemcpyDeviceToHost);
swap_main_buffer();
}
__global__ void render_objects(int maxitemcount, float *rotarrayX, float *rotarrayY, float *rotarrayZ, unsigned int *puffer, float *zpuffer)
{
int i, px, py, drawcolor;
int index = (blockIdx.x * blockDim.x) + (threadIdx.x * 3);
int stride = blockDim.x * gridDim.x;
//VEKTOR Vector1, Vector2, vNormal;//for visibility check
for (i = index; i < maxitemcount - 3; i += stride)
{
if ((rotarrayZ[i] < -9000000) || (rotarrayZ[i + 1] < -9000000) || (rotarrayZ[i + 2] < -9000000)) continue;
/* for visibility check
Vector1.x = rotarrayX[i + 1] - rotarrayX[i];
Vector1.y = rotarrayY[i + 1] - rotarrayY[i];
Vector1.z = rotarrayZ[i + 1] - rotarrayZ[i];
Vector2.x = rotarrayX[i + 2] - rotarrayX[i];
Vector2.y = rotarrayY[i + 2] - rotarrayY[i];
Vector2.z = rotarrayZ[i + 2] - rotarrayZ[i];
vNormal.x = ((Vector1.y * Vector2.z) - (Vector1.z * Vector2.y));
vNormal.y = ((Vector1.z * Vector2.x) - (Vector1.x * Vector2.z));
vNormal.z = ((Vector1.x * Vector2.y) - (Vector1.y * Vector2.x));
if (vNormal.z > 0) continue;
*/
drawcolor = RGB(180 * ((float)i / (float)maxitemcount * 100), 180 * ((float)i / (float)maxitemcount * 100), 180 * ((float)i / (float)maxitemcount * 100));
//CUDA_SetPixel(rotarrayX[i], rotarrayY[i], RGB(0, 0, 0),puffer);
//CUDA_SetPixel_Zbuffer(rotarrayX[i], rotarrayY[i], rotarrayZ[i], drawcolor, puffer, zpuffer);
/*CUDA_DrawLine(rotarrayX[i], rotarrayY[i], rotarrayX[i + 1], rotarrayY[i + 1], RGB(0, 0, 0), puffer);
CUDA_DrawLine(rotarrayX[i+2], rotarrayY[i+2], rotarrayX[i + 1], rotarrayY[i + 1], RGB(0, 0, 0), puffer);
CUDA_DrawLine(rotarrayX[i], rotarrayY[i], rotarrayX[i + 2], rotarrayY[i + 2], RGB(0, 0, 0), puffer);//*/
/*CUDA_DrawLine_Zbuffer(rotarrayX[i], rotarrayY[i], rotarrayZ[i], rotarrayX[i + 1], rotarrayY[i + 1], rotarrayZ[i+1], RGB(0, 0, 0), puffer,zpuffer);
CUDA_DrawLine_Zbuffer(rotarrayX[i + 2], rotarrayY[i + 2], rotarrayZ[i+2], rotarrayX[i + 1], rotarrayY[i + 1], rotarrayZ[i+1], RGB(0, 0, 0), puffer, zpuffer);
CUDA_DrawLine_Zbuffer(rotarrayX[i], rotarrayY[i], rotarrayZ[i], rotarrayX[i + 2], rotarrayY[i + 2], rotarrayZ[i+2], RGB(0, 0, 0), puffer, zpuffer);//*/
//CUDA_FillTriangle(rotarrayX[i], rotarrayY[i], rotarrayX[i + 1], rotarrayY[i + 1], rotarrayX[i + 2], rotarrayY[i + 2], RGB(i*0.05, i*0.05, i*0.05), puffer);
CUDA_FillTriangle_Zbuffer(rotarrayX[i], rotarrayY[i], rotarrayZ[i], rotarrayX[i + 1], rotarrayY[i + 1], rotarrayZ[i + 1], rotarrayX[i + 2], rotarrayY[i + 2], rotarrayZ[i + 2], drawcolor, puffer, zpuffer);
}
}
__global__ void zoom_in(int maxitemcount, float *rawarrayX, float *rawarrayY, float *rawarrayZ)
{
int i;
int index = (blockIdx.x * blockDim.x) + (threadIdx.x * 1);
int stride = blockDim.x * gridDim.x;
for (i = index; i < maxitemcount; i += stride)
{
rawarrayX[i] *= 1.2;
rawarrayY[i] *= 1.2;
rawarrayZ[i] *= 1.2;
}
}
__global__ void zoom_out(int maxitemcount, float *rawarrayX, float *rawarrayY, float *rawarrayZ)
{
int i;
int index = (blockIdx.x * blockDim.x) + (threadIdx.x * 1);
int stride = blockDim.x * gridDim.x;
for (i = index; i < maxitemcount; i += stride)
{
rawarrayX[i] /= 1.2;
rawarrayY[i] /= 1.2;
rawarrayZ[i] /= 1.2;
}
} | 82877d1325e7bb2ccda9ea19192598d0c5d5ae74.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <math.h>
#include <stdio.h>
#include <windows.h>
#include <d2d1.h>
#include <d2d1helper.h>
#pragma comment(lib, "d2d1")
#include <mmsystem.h>
#pragma comment(lib, "winmm.lib")
//*****double buffering*****
#define SCREEN_WIDTH 1920
#define SCREEN_HEIGHT 1000
D2D1_RECT_U display_area;
ID2D1Bitmap *image_container = NULL;
unsigned int *dev_image_data, image_data[SCREEN_WIDTH * SCREEN_HEIGHT];
float *dev_zbuffer;//ez int is volt/lehet
typedef struct Vec3f {
float x, y, z;
};
//**************************************
//**************PEGAZUS 3D************
#define MAX_OBJ_NUM 20000000
int drawing_in_progress = 0;
int viewpoint = -500;
float rot_degree_x;
float rot_degree_y;
float rot_degree_z;
float rot_degree_x2 = 0;
float rot_degree_y2 = 90.0f;
float rot_degree_z2 = 0;
float Math_PI = 3.14159265358979323846;
float raw_verticesX[MAX_OBJ_NUM], raw_verticesY[MAX_OBJ_NUM], raw_verticesZ[MAX_OBJ_NUM];
int raw_vertices_length;
struct VEKTOR {
float x;
float y;
float z;
};
VEKTOR Vector1, Vector2, vNormal;
//*******CUDA*************
float *dev_raw_verticesX, *dev_raw_verticesY, *dev_raw_verticesZ;
float *dev_rotated_verticesX, *dev_rotated_verticesY, *dev_rotated_verticesZ;
//************************
void init_3D(void);
void data_transfer_to_GPU(void);
void D2D_drawing(void);
__global__ void CUDA_rotation(int maxitemcount, float *rawarrayX, float *rawarrayY, float *rawarrayZ, float *rotarrayX, float *rotarrayY, float *rotarrayZ, float degree_cosx, float degree_sinx, float degree_cosy, float degree_siny, float degree_cosz, float degree_sinz);
void drawing(void);
__global__ void render_objects(int maxitemcount, float *rotarrayX, float *rotarrayY, float *rotarrayZ, unsigned int *puffer, float *zpuffer);
__global__ void zoom_in(int maxitemcount, float *rawarrayX, float *rawarrayY, float *rawarrayZ);
__global__ void zoom_out(int maxitemcount, float *rawarrayX, float *rawarrayY, float *rawarrayZ);
//************************************
//***********STANDARD WIN32API WINDOWING************
ID2D1Factory* pD2DFactory = NULL;
ID2D1HwndRenderTarget* pRT = NULL;
#define HIBA_00 TEXT("Error:Program initialisation process.")
HINSTANCE hInstGlob;
int SajatiCmdShow;
char szClassName[] = "WindowsApp";
HWND Form1; //Ablak kezeloje
LRESULT CALLBACK WndProc0(HWND, UINT, WPARAM, LPARAM);
//******************************************************
//*******for measurements********
long int vertex_counter, poly_counter;
float fps_stat;
int starttime;
int endtime;
//*****double buffering*****
void create_main_buffer(void);
void CUDA_cleanup_main_buffer(void);
__global__ void CUDA_CleanUp_Zbuffer(float *zpuffer);
void swap_main_buffer(void);
//**************************************
//*****drawig algorithms*****
__device__ void CUDA_SetPixel(int x1, int y1, int color, unsigned int *puffer);
__device__ void CUDA_SetPixel_Zbuffer(int x1, int y1, int z1, int color, unsigned int *puffer, float *zpuffer);
__device__ void CUDA_DrawLine(int x1, int y1, int x2, int y2, int color, unsigned int *puffer);
__device__ void CUDA_DrawLine_Zbuffer(int x1, int y1, int z1, int x2, int y2, int z2, int color, unsigned int *puffer, float *zpuffer);
__device__ void CUDA_FillTriangle(int x1, int y1, int x2, int y2, int x3, int y3, int color, unsigned int *puffer);
__device__ void CUDA_FillTriangle_Zbuffer(int x1, int y1, int z1, int x2, int y2, int z2, int x3, int y3, int z3, int color, unsigned int *puffer, float *zpuffer);
//**************************************
//********************************
//OBJ format handling
//********************************
float tomb_vertices[MAX_OBJ_NUM][3];
int tomb_faces[MAX_OBJ_NUM][5];
int tomb_vertices_length = 0, tomb_faces_length = 0;
int getelementcount(unsigned char csv_content[]);
void getelement(unsigned char csv_content[], unsigned int data_index, unsigned char csv_content2[]);
void obj_loader(void);
//*********************************
//The main entry point of our program
//*********************************
int WINAPI WinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, PSTR szCmdLine, int iCmdShow)
{
static TCHAR szAppName[] = TEXT("StdWinClassName");
HWND hwnd;
MSG msg;
WNDCLASS wndclass0;
SajatiCmdShow = iCmdShow;
hInstGlob = hInstance;
//*********************************
//Preparing Windows class
//*********************************
wndclass0.style = CS_HREDRAW | CS_VREDRAW;
wndclass0.lpfnWndProc = WndProc0;
wndclass0.cbClsExtra = 0;
wndclass0.cbWndExtra = 0;
wndclass0.hInstance = hInstance;
wndclass0.hIcon = LoadIcon(NULL, IDI_APPLICATION);
wndclass0.hCursor = LoadCursor(NULL, IDC_ARROW);
wndclass0.hbrBackground = (HBRUSH)GetStockObject(LTGRAY_BRUSH);
wndclass0.lpszMenuName = NULL;
wndclass0.lpszClassName = TEXT("WIN0");
//*********************************
//Registering our windows class
//*********************************
if (!RegisterClass(&wndclass0))
{
MessageBox(NULL, HIBA_00, TEXT("Program Start"), MB_ICONERROR);
return 0;
}
//*********************************
//Creating the window
//*********************************
Form1 = CreateWindow(TEXT("WIN0"),
TEXT("CUDA - DIRECT2D"),
(WS_OVERLAPPED | WS_SYSMENU | WS_THICKFRAME | WS_MAXIMIZEBOX | WS_MINIMIZEBOX),
50,
50,
SCREEN_WIDTH,
SCREEN_HEIGHT,
NULL,
NULL,
hInstance,
NULL);
//*********************************
//Displaying the window
//*********************************
ShowWindow(Form1, SajatiCmdShow);
UpdateWindow(Form1);
//*********************************
//Activating the message processing for our window
//*********************************
while (GetMessage(&msg, NULL, 0, 0))
{
TranslateMessage(&msg);
DispatchMessage(&msg);
}
return msg.wParam;
}
//*********************************
//The window's callback funtcion: handling events
//*********************************
LRESULT CALLBACK WndProc0(HWND hwnd, UINT message, WPARAM wParam, LPARAM lParam)
{
HDC hdc;
PAINTSTRUCT ps;
unsigned int xPos, yPos, xPos2, yPos2, fwButtons;
switch (message)
{
//*********************************
//When creating the window
//*********************************
case WM_CREATE:
D2D1CreateFactory(D2D1_FACTORY_TYPE_SINGLE_THREADED, &pD2DFactory);
pD2DFactory->CreateHwndRenderTarget(
D2D1::RenderTargetProperties(),
D2D1::HwndRenderTargetProperties(
hwnd, D2D1::SizeU(SCREEN_WIDTH, SCREEN_HEIGHT)),
&pRT);
create_main_buffer();
cudaMalloc((void**)&dev_raw_verticesX, MAX_OBJ_NUM * sizeof(float));
cudaMalloc((void**)&dev_raw_verticesY, MAX_OBJ_NUM * sizeof(float));
cudaMalloc((void**)&dev_raw_verticesZ, MAX_OBJ_NUM * sizeof(float));
cudaMalloc((void**)&dev_rotated_verticesX, MAX_OBJ_NUM * sizeof(float));
cudaMalloc((void**)&dev_rotated_verticesY, MAX_OBJ_NUM * sizeof(float));
cudaMalloc((void**)&dev_rotated_verticesZ, MAX_OBJ_NUM * sizeof(float));
cudaMalloc((void**)&dev_image_data, SCREEN_WIDTH * SCREEN_HEIGHT * sizeof(unsigned int));
cudaMalloc((void**)&dev_zbuffer, SCREEN_WIDTH * SCREEN_HEIGHT * sizeof(float));
init_3D();
obj_loader();
data_transfer_to_GPU();
if ((joyGetNumDevs()) > 0) joySetCapture(hwnd, JOYSTICKID1, NULL, FALSE);
return 0;
//*********************************
//to eliminate color flickering
//*********************************
case WM_ERASEBKGND:
return (LRESULT)1;
case MM_JOY1MOVE:
fwButtons = wParam;
xPos = LOWORD(lParam);
yPos = HIWORD(lParam);
if (xPos == 65535) {
rot_degree_y2 += 2.0; D2D_drawing();
}
else if (xPos == 0) {
rot_degree_y2 -= 2.0; D2D_drawing();
}
if (yPos == 65535) {
rot_degree_x2 += 2.0; D2D_drawing();
}
else if (yPos == 0) {
rot_degree_x2 -= 2.0; D2D_drawing();
}
if (fwButtons == 128) {
rot_degree_z2 += 2.0; D2D_drawing();
}
else if (fwButtons == 64) {
rot_degree_z2 -= 2.0; D2D_drawing();
}
if (rot_degree_y2 > 360) {
rot_degree_y2 = 0; D2D_drawing();
}
else if (rot_degree_y2 < 0) {
rot_degree_y2 = 358; D2D_drawing();
}
if (rot_degree_x2 > 359) {
rot_degree_x2 = 0; D2D_drawing();
}
else if (rot_degree_x2 < 0) {
rot_degree_x2 = 358; D2D_drawing();
}
if (rot_degree_z2 > 359) {
rot_degree_z2 = 0; D2D_drawing();
}
else if (rot_degree_z2 < 0) {
rot_degree_z2 = 358; D2D_drawing();
}
if (fwButtons == 2)
{
int blockSize = 384;
int numBlocks = (raw_vertices_length + blockSize - 1) / blockSize;
zoom_in << <numBlocks, blockSize >> > (raw_vertices_length, dev_raw_verticesX, dev_raw_verticesY, dev_raw_verticesZ);
cudaDeviceSynchronize();
D2D_drawing();
}
else if (fwButtons == 4)
{
int blockSize = 384;
int numBlocks = (raw_vertices_length + blockSize - 1) / blockSize;
zoom_out << <numBlocks, blockSize >> > (raw_vertices_length, dev_raw_verticesX, dev_raw_verticesY, dev_raw_verticesZ);
cudaDeviceSynchronize();
D2D_drawing();
}
break;
//*********************************
//Repainting the client area of the window
//*********************************
case WM_PAINT:
hdc = BeginPaint(hwnd, &ps);
EndPaint(hwnd, &ps);
D2D_drawing();
return 0;
//*********************************
//Closing the window, freeing resources
//*********************************
case WM_CLOSE:
pRT->Release();
pD2DFactory->Release();
cudaFree(dev_raw_verticesX);
cudaFree(dev_raw_verticesY);
cudaFree(dev_raw_verticesZ);
cudaFree(dev_rotated_verticesX);
cudaFree(dev_rotated_verticesY);
cudaFree(dev_rotated_verticesZ);
cudaFree(dev_image_data);
cudaFree(dev_zbuffer);
DestroyWindow(hwnd);
return 0;
//*********************************
//Destroying the window
//*********************************
case WM_DESTROY:
PostQuitMessage(0);
return 0;
}
return DefWindowProc(hwnd, message, wParam, lParam);
}
//********************************
//PEGAZUS 3D
//********************************
void create_main_buffer(void)
{
pRT->CreateBitmap(D2D1::SizeU(SCREEN_WIDTH, SCREEN_HEIGHT),
D2D1::BitmapProperties(D2D1::PixelFormat(DXGI_FORMAT_B8G8R8A8_UNORM,
D2D1_ALPHA_MODE_IGNORE)), &image_container);
}
void CUDA_cleanup_main_buffer(void)
{
cudaMemset(dev_image_data, 255, SCREEN_HEIGHT*SCREEN_WIDTH * sizeof(unsigned int));
}
__global__ void CUDA_CleanUp_Zbuffer(float *zpuffer)
{
int i;
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (i = index; i < SCREEN_HEIGHT*SCREEN_WIDTH; i += stride)
{
zpuffer[i] = 999999;
}
}
void swap_main_buffer(void)
{
display_area.left = 0;
display_area.top = 0;
display_area.right = SCREEN_WIDTH;
display_area.bottom = SCREEN_HEIGHT;
image_container->CopyFromMemory(&display_area, image_data, SCREEN_WIDTH * sizeof(unsigned int));
pRT->BeginDraw();
pRT->DrawBitmap(image_container, D2D1::RectF(0.0f, 0.0f, SCREEN_WIDTH, SCREEN_HEIGHT), 1.0f, D2D1_BITMAP_INTERPOLATION_MODE_NEAREST_NEIGHBOR, NULL);
pRT->EndDraw();
}
__device__ void CUDA_SetPixel(int x1, int y1, int color, unsigned int *puffer)
{
puffer[(y1 * SCREEN_WIDTH) + x1] = color;
}
__device__ void CUDA_SetPixel_Zbuffer(int x1, int y1, int z1, int color, unsigned int *puffer, float *zpuffer)
{
int offset = (y1 * SCREEN_WIDTH) + x1;
if (zpuffer[offset] > z1)
{
zpuffer[offset] = z1;
puffer[offset] = color;
}
}
__device__ void CUDA_DrawLine(int x1, int y1, int x2, int y2, int color, unsigned int *puffer)
{
bool flip = false;
int swap, offset;
if (abs(x2 - x1) < 2 && abs(y2 - y1) < 2)
{
puffer[(y2*SCREEN_WIDTH) + x2] = color; return;
}
if (abs(x1 - x2) < abs(y1 - y2))
{
swap = x1;
x1 = y1;
y1 = swap;
swap = x2;
x2 = y2;
y2 = swap;
flip = true;
}
if (x1 > x2)
{
swap = x1;
x1 = x2;
x2 = swap;
swap = y1;
y1 = y2;
y2 = swap;
}
int dx = x2 - x1;
int dy = y2 - y1;
int marker1 = abs(dy) * 2;
int marker2 = 0;
int y = y1, x;
if (flip)
{
for (x = x1; x <= x2; ++x)
{
offset = (x * SCREEN_WIDTH);
puffer[offset + y] = color;
marker2 += marker1;
if (marker2 > dx)
{
y += (y2 > y1 ? 1 : -1);
marker2 -= dx * 2;
}
}
}
else
{
for (x = x1; x <= x2; ++x)
{
offset = (y * SCREEN_WIDTH);
puffer[offset + x] = color;
marker2 += marker1;
if (marker2 > dx)
{
y += (y2 > y1 ? 1 : -1);
marker2 -= dx * 2;
}
}
}
}
__device__ void CUDA_DrawLine_Zbuffer(int x1, int y1, int z1, int x2, int y2, int z2, int color, unsigned int *puffer, float *zpuffer)
{
float Pz;
bool flip = false;
int swap, offset;
if (abs(x2 - x1) < 2 && abs(y2 - y1) < 2) {
puffer[(y2*SCREEN_WIDTH) + x2] = color; return;
}
if (abs(x1 - x2) < abs(y1 - y2))
{
swap = x1;
x1 = y1;
y1 = swap;
swap = x2;
x2 = y2;
y2 = swap;
flip = true;
}
if (x1 > x2)
{
swap = x1;
x1 = x2;
x2 = swap;
swap = y1;
y1 = y2;
y2 = swap;
}
int dx = x2 - x1;
int dy = y2 - y1;
int marker1 = abs(dy) * 2;
int marker2 = 0;
int y = y1, x;
for (x = x1; x <= x2; ++x)
{
if (z1 == z2) Pz = z1;
else
{
int s1 = abs(x2 - x1);
int s2 = abs(z1 - z2);
Pz = (float)z2 + (float)((((float)x - (float)x1) / (float)s1) * (float)s2);
}
if (flip)
{
offset = (x * SCREEN_WIDTH);
if (zpuffer[offset + y] > Pz)
{
zpuffer[offset + y] = Pz;
puffer[offset + y] = color;
}
}
else
{
offset = (y * SCREEN_WIDTH);
if (zpuffer[offset + x] > Pz)
{
zpuffer[offset + x] = Pz;
puffer[offset + x] = color;
}
}
marker2 += marker1;
if (marker2 > dx)
{
y += (y2 > y1 ? 1 : -1);
marker2 -= dx * 2;
}
}
}
void CUDA_FillTriangle(int x1, int y1, int x2, int y2, int x3, int y3, int color, unsigned int *puffer)
{
int Ax, Ay, Bx, By, i, j;
int swapx, swapy, offset, maxoffset = SCREEN_HEIGHT * SCREEN_WIDTH;
if (y1 == y2 && y1 == y3) return;
if (y1 > y2)
{
swapx = x1;
swapy = y1;
x1 = x2;
y1 = y2;
x2 = swapx;
y2 = swapy;
}
if (y1 > y3)
{
swapx = x1;
swapy = y1;
x1 = x3;
y1 = y3;
x3 = swapx;
y3 = swapy;
}
if (y2 > y3)
{
swapx = x3;
swapy = y3;
x3 = x2;
y3 = y2;
x2 = swapx;
y2 = swapy;
}
int t_height = y3 - y1;
for (i = 0; i < t_height; ++i)
{
bool lower_part = i > y2 - y1 || y2 == y1;
int part_height = lower_part ? y3 - y2 : y2 - y1;
float alpha = (float)i / t_height;
float beta = (float)(i - (lower_part ? y2 - y1 : 0)) / part_height;
Ax = x1 + (x3 - x1)*alpha;
Ay = y1 + (y3 - y1)*alpha;
Bx = lower_part ? x2 + (x3 - x2)*beta : x1 + (x2 - x1)*beta;
By = lower_part ? y2 + (y3 - y2)*beta : y1 + (y2 - y1)*beta;
if (Ax > Bx)
{
swapx = Ax;
swapy = Ay;
Ax = Bx;
Ay = By;
Bx = swapx;
By = swapy;
}
offset = (y1 + i)*SCREEN_WIDTH;
for (j = Ax; j < Bx; ++j)
{
if (offset + j > maxoffset) continue;
puffer[offset + j] = color;
}
}
}
__device__ void CUDA_FillTriangle_Zbuffer(int x1, int y1, int z1, int x2, int y2, int z2, int x3, int y3, int z3, int color, unsigned int *puffer, float *zpuffer)
{
int Ax, Ay, Bx, By, i, j, depth_value;
int swapx, swapy, offset;
Vec3f interpolate, helper_vector;
if (y1 == y2 && y1 == y3) return;
if (y1 > y2)
{
swapx = x1;
swapy = y1;
x1 = x2;
y1 = y2;
x2 = swapx;
y2 = swapy;
}
if (y1 > y3)
{
swapx = x1;
swapy = y1;
x1 = x3;
y1 = y3;
x3 = swapx;
y3 = swapy;
}
if (y2 > y3)
{
swapx = x3;
swapy = y3;
x3 = x2;
y3 = y2;
x2 = swapx;
y2 = swapy;
}
int t_height = y3 - y1;
for (i = 0; i < t_height; ++i)
{
bool second_half = i > y2 - y1 || y2 == y1;
int segment_height = second_half ? y3 - y2 : y2 - y1;
float alpha = (float)i / t_height;
float beta = (float)(i - (second_half ? y2 - y1 : 0)) / segment_height;
Ax = x1 + (x3 - x1)*alpha;
Ay = y1 + (y3 - y1)*alpha;
Bx = second_half ? x2 + (x3 - x2)*beta : x1 + (x2 - x1)*beta;
By = second_half ? y2 + (y3 - y2)*beta : y1 + (y2 - y1)*beta;
if (Ax > Bx)
{
swapx = Ax;
swapy = Ay;
Ax = Bx;
Ay = By;
Bx = swapx;
By = swapy;
}
offset = (y1 + i)*SCREEN_WIDTH;
for (j = Ax; j <= Bx; ++j)
{
helper_vector.x = (x2 - x1) * (y1 - (y1 + i)) - (x1 - j) * (y2 - y1);
helper_vector.y = (x1 - j) * (y3 - y1) - (x3 - x1) * (y1 - (y1 + i));
helper_vector.z = (x3 - x1) * (y2 - y1) - (x2 - x1) * (y3 - y1);
if (abs((int)helper_vector.z) < 1) { interpolate.x = -1; interpolate.y = 0; interpolate.z = 0; }
else
{
interpolate.x = 1.f - (helper_vector.x + helper_vector.y) / helper_vector.z;
interpolate.y = helper_vector.y / helper_vector.z;
interpolate.z = helper_vector.x / helper_vector.z;
}
if (interpolate.x < 0 || interpolate.y < 0 || interpolate.z < 0) continue;
depth_value = (z1*interpolate.x) + (z2*interpolate.y) + (z3*interpolate.z);
if (zpuffer[offset + j] > depth_value)
{
zpuffer[offset + j] = depth_value;
puffer[offset + j] = color;
}
}
}
}
void init_3D(void)
{
rot_degree_x = 0 * Math_PI / 180; rot_degree_x2 = 0;
rot_degree_y = 0 * Math_PI / 180; rot_degree_y2 = 0;
rot_degree_z = 0 * Math_PI / 180; rot_degree_z2 = 0;
raw_vertices_length = 0;
}
void data_transfer_to_GPU(void)
{
cudaMemcpy(dev_raw_verticesX, raw_verticesX, raw_vertices_length * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_raw_verticesY, raw_verticesY, raw_vertices_length * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_raw_verticesZ, raw_verticesZ, raw_vertices_length * sizeof(float), cudaMemcpyHostToDevice);
}
//********************************
//OBJ format handling
//********************************
int getelementcount(unsigned char csv_content[])
{
int s1, s2;
for (s1 = s2 = 0; s1 < strlen((const char *)csv_content); ++s1)
{
if (csv_content[s1] == 10) break;
else if (csv_content[s1] == 32) ++s2;
}
return s2;
}
void getelement(unsigned char csv_content[], unsigned int data_index, unsigned char csv_content2[])
{
int s1, s2, s3, s4 = 0;
for (s1 = 0, s2 = 0; s1 < strlen((const char *)csv_content); ++s1)
{
if (csv_content[s1] == 32)
{
++s2;
if (s2 == data_index)
{
for (s3 = s1 + 1; s3 < strlen((const char *)csv_content); ++s3)
{
if (csv_content[s3] == 32 || csv_content[s3] == 10)
{
csv_content2[s4] = 0;
return;
}
else csv_content2[s4++] = csv_content[s3];
}
}
}
}
}
void obj_loader(void)
{
FILE *objfile;
int i, j;
float data1, data2, data3;
unsigned char row1[1024], row2[1024];
int data_count, max_row_length = 250;
char tempstr[200];
objfile = fopen("model.obj", "rt");
if (objfile == NULL) return;
vertex_counter = poly_counter = 0;
tomb_vertices_length = tomb_vertices_length = 0;
while (!feof(objfile))
{
fgets((char *)row1, max_row_length, objfile);
if (row1[0] == 118 && row1[1] == 32) //*** 'v '
{
getelement(row1, 1, row2); data1 = atof((const char *)row2);
getelement(row1, 2, row2); data2 = atof((const char *)row2);
getelement(row1, 3, row2); data3 = atof((const char *)row2);
tomb_vertices[tomb_vertices_length][0] = data1 * 4;
tomb_vertices[tomb_vertices_length][1] = data2 * 4;
tomb_vertices[tomb_vertices_length++][2] = data3 * 4;
}
else if (row1[0] == 102 && row1[1] == 32) //*** 'f '
{
data_count = getelementcount(row1);
tomb_faces[tomb_faces_length][0] = data_count;
for (i = 1; i < data_count + 1; ++i)
{
getelement(row1, i, row2);
data1 = atof((const char *)row2);
tomb_faces[tomb_faces_length][i] = data1 - 1;
}
++tomb_faces_length;
}
}
fclose(objfile);
int base_index;
for (i = 0; i < tomb_faces_length; ++i)
{
base_index = tomb_faces[i][1];
if (tomb_faces[i][0] == 3)
{
raw_verticesX[raw_vertices_length] = tomb_vertices[tomb_faces[i][1]][0];
raw_verticesY[raw_vertices_length] = tomb_vertices[tomb_faces[i][1]][1];
raw_verticesZ[raw_vertices_length++] = tomb_vertices[tomb_faces[i][1]][2];
raw_verticesX[raw_vertices_length] = tomb_vertices[tomb_faces[i][2]][0];
raw_verticesY[raw_vertices_length] = tomb_vertices[tomb_faces[i][2]][1];
raw_verticesZ[raw_vertices_length++] = tomb_vertices[tomb_faces[i][2]][2];
raw_verticesX[raw_vertices_length] = tomb_vertices[tomb_faces[i][3]][0];
raw_verticesY[raw_vertices_length] = tomb_vertices[tomb_faces[i][3]][1];
raw_verticesZ[raw_vertices_length++] = tomb_vertices[tomb_faces[i][3]][2];
++poly_counter;
vertex_counter += 3;
}
else if (tomb_faces[i][0] == 4)
{
raw_verticesX[raw_vertices_length] = tomb_vertices[tomb_faces[i][1]][0];
raw_verticesY[raw_vertices_length] = tomb_vertices[tomb_faces[i][1]][1];
raw_verticesZ[raw_vertices_length++] = tomb_vertices[tomb_faces[i][1]][2];
raw_verticesX[raw_vertices_length] = tomb_vertices[tomb_faces[i][2]][0];
raw_verticesY[raw_vertices_length] = tomb_vertices[tomb_faces[i][2]][1];
raw_verticesZ[raw_vertices_length++] = tomb_vertices[tomb_faces[i][2]][2];
raw_verticesX[raw_vertices_length] = tomb_vertices[tomb_faces[i][3]][0];
raw_verticesY[raw_vertices_length] = tomb_vertices[tomb_faces[i][3]][1];
raw_verticesZ[raw_vertices_length++] = tomb_vertices[tomb_faces[i][3]][2];
raw_verticesX[raw_vertices_length] = tomb_vertices[tomb_faces[i][1]][0];
raw_verticesY[raw_vertices_length] = tomb_vertices[tomb_faces[i][1]][1];
raw_verticesZ[raw_vertices_length++] = tomb_vertices[tomb_faces[i][1]][2];
raw_verticesX[raw_vertices_length] = tomb_vertices[tomb_faces[i][3]][0];
raw_verticesY[raw_vertices_length] = tomb_vertices[tomb_faces[i][3]][1];
raw_verticesZ[raw_vertices_length++] = tomb_vertices[tomb_faces[i][3]][2];
raw_verticesX[raw_vertices_length] = tomb_vertices[tomb_faces[i][4]][0];
raw_verticesY[raw_vertices_length] = tomb_vertices[tomb_faces[i][4]][1];
raw_verticesZ[raw_vertices_length++] = tomb_vertices[tomb_faces[i][4]][2];
poly_counter += 2;
vertex_counter += 6;
}
}
}
void D2D_drawing(void)
{
if (drawing_in_progress == 1) return;
drawing_in_progress = 1;
char tempstr[255], tempstr2[255], hibauzenet[256];
int blockSize = 384;
int numBlocks = (raw_vertices_length + blockSize - 1) / blockSize;
strcpy(tempstr2, "Vertices: ");
_itoa(vertex_counter, tempstr, 10); strcat(tempstr2, tempstr); strcat(tempstr2, " Polygons: ");
_itoa(poly_counter, tempstr, 10); strcat(tempstr2, tempstr); strcat(tempstr2, " Z ordered: ");
starttime = GetTickCount();
rot_degree_x = rot_degree_x2 * Math_PI / 180;
rot_degree_y = rot_degree_y2 * Math_PI / 180;
rot_degree_z = rot_degree_z2 * Math_PI / 180;
float degree_sinx = sin(rot_degree_x);
float degree_cosx = cos(rot_degree_x);
float degree_siny = sin(rot_degree_y);
float degree_cosy = cos(rot_degree_y);
float degree_sinz = sin(rot_degree_z);
float degree_cosz = cos(rot_degree_z);
CUDA_rotation << <numBlocks, blockSize >> > (raw_vertices_length, dev_raw_verticesX, dev_raw_verticesY, dev_raw_verticesZ, dev_rotated_verticesX, dev_rotated_verticesY, dev_rotated_verticesZ, degree_cosx, degree_sinx, degree_cosy, degree_siny, degree_cosz, degree_sinz);
cudaDeviceSynchronize();
strcpy_s(hibauzenet, cudaGetErrorString(cudaGetLastError()));
drawing();
endtime = GetTickCount();
if ((endtime - starttime) == 0) ++endtime;
fps_stat = 1000 / (endtime - starttime); strcat(tempstr2, " FPS: "); _itoa(fps_stat, tempstr, 10); strcat(tempstr2, tempstr);
strcat(tempstr2, ", X: "); _itoa(rot_degree_x2, tempstr, 10); strcat(tempstr2, tempstr);
strcat(tempstr2, ", Y: "); _itoa(rot_degree_y2, tempstr, 10); strcat(tempstr2, tempstr);
strcat(tempstr2, ", Z: "); _itoa(rot_degree_z2, tempstr, 10); strcat(tempstr2, tempstr);
strcat(tempstr2, ", CUDA: "); strcat(tempstr2, hibauzenet);
SetWindowTextA(Form1, tempstr2);
drawing_in_progress = 0;
}
__global__ void CUDA_rotation(int maxitemcount, float *rawarrayX, float *rawarrayY, float *rawarrayZ, float *rotarrayX, float *rotarrayY, float *rotarrayZ, float degree_cosx, float degree_sinx, float degree_cosy, float degree_siny, float degree_cosz, float degree_sinz)
{
int i;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
float t0;
//rotaion
for (i = index; i < maxitemcount; i += stride)
{
rotarrayY[i] = (rawarrayY[i] * degree_cosx) - (rawarrayZ[i] * degree_sinx);
rotarrayZ[i] = rawarrayY[i] * degree_sinx + rawarrayZ[i] * degree_cosx;
rotarrayX[i] = rawarrayX[i] * degree_cosy + rotarrayZ[i] * degree_siny;
rotarrayZ[i] = -rawarrayX[i] * degree_siny + rotarrayZ[i] * degree_cosy;// +
t0 = rotarrayX[i];
//some tweaking for OBJ models: "+ (SCREEN_WIDTH / 4)" and "+ (SCREEN_HEIGHT / 4)"
rotarrayX[i] = t0 * degree_cosz - rotarrayY[i] * degree_sinz + (SCREEN_WIDTH / 4);
rotarrayY[i] = t0 * degree_sinz + rotarrayY[i] * degree_cosz + (SCREEN_HEIGHT / 4);
}
//perspective projection
int s1;
int viewpoint = -1100;
float sx = SCREEN_WIDTH / 2;
float sultra = SCREEN_HEIGHT / 2, sultra2 = SCREEN_HEIGHT / 3;
int x_minusz_edge = 0, y_minusz_edge = 0, x_max_edge = SCREEN_WIDTH - 1, y_max_edge = SCREEN_HEIGHT - 1;
float distance;
for (i = index; i < maxitemcount; i += stride)
{
distance = 999999;
if (rotarrayZ[i] < distance) distance = rotarrayZ[i];
if (distance < viewpoint) { rotarrayZ[i] = -9999999; continue; }
sultra = viewpoint / (viewpoint - rotarrayZ[i]);
rotarrayX[i] = rotarrayX[i] * sultra + 400;
rotarrayY[i] = (rotarrayY[i] * sultra) + sultra2;
if (rotarrayX[i] < x_minusz_edge || rotarrayX[i] > x_max_edge) { rotarrayZ[i] = -9999999; continue; }
if (rotarrayY[i] < y_minusz_edge || rotarrayY[i] > y_max_edge) { rotarrayZ[i] = -9999999; continue; }
}
}
void drawing(void)
{
CUDA_cleanup_main_buffer();
CUDA_CleanUp_Zbuffer << < ((SCREEN_WIDTH*SCREEN_HEIGHT) + 384 - 1) / 384, 384 >> > (dev_zbuffer);
cudaDeviceSynchronize();
render_objects << <12, 384 >> > (raw_vertices_length, dev_rotated_verticesX, dev_rotated_verticesY, dev_rotated_verticesZ, dev_image_data, dev_zbuffer);
cudaDeviceSynchronize();
cudaMemcpy(image_data, dev_image_data, SCREEN_WIDTH * SCREEN_HEIGHT * sizeof(unsigned int), cudaMemcpyDeviceToHost);
swap_main_buffer();
}
__global__ void render_objects(int maxitemcount, float *rotarrayX, float *rotarrayY, float *rotarrayZ, unsigned int *puffer, float *zpuffer)
{
int i, px, py, drawcolor;
int index = (blockIdx.x * blockDim.x) + (threadIdx.x * 3);
int stride = blockDim.x * gridDim.x;
//VEKTOR Vector1, Vector2, vNormal;//for visibility check
for (i = index; i < maxitemcount - 3; i += stride)
{
if ((rotarrayZ[i] < -9000000) || (rotarrayZ[i + 1] < -9000000) || (rotarrayZ[i + 2] < -9000000)) continue;
/* for visibility check
Vector1.x = rotarrayX[i + 1] - rotarrayX[i];
Vector1.y = rotarrayY[i + 1] - rotarrayY[i];
Vector1.z = rotarrayZ[i + 1] - rotarrayZ[i];
Vector2.x = rotarrayX[i + 2] - rotarrayX[i];
Vector2.y = rotarrayY[i + 2] - rotarrayY[i];
Vector2.z = rotarrayZ[i + 2] - rotarrayZ[i];
vNormal.x = ((Vector1.y * Vector2.z) - (Vector1.z * Vector2.y));
vNormal.y = ((Vector1.z * Vector2.x) - (Vector1.x * Vector2.z));
vNormal.z = ((Vector1.x * Vector2.y) - (Vector1.y * Vector2.x));
if (vNormal.z > 0) continue;
*/
drawcolor = RGB(180 * ((float)i / (float)maxitemcount * 100), 180 * ((float)i / (float)maxitemcount * 100), 180 * ((float)i / (float)maxitemcount * 100));
//CUDA_SetPixel(rotarrayX[i], rotarrayY[i], RGB(0, 0, 0),puffer);
//CUDA_SetPixel_Zbuffer(rotarrayX[i], rotarrayY[i], rotarrayZ[i], drawcolor, puffer, zpuffer);
/*CUDA_DrawLine(rotarrayX[i], rotarrayY[i], rotarrayX[i + 1], rotarrayY[i + 1], RGB(0, 0, 0), puffer);
CUDA_DrawLine(rotarrayX[i+2], rotarrayY[i+2], rotarrayX[i + 1], rotarrayY[i + 1], RGB(0, 0, 0), puffer);
CUDA_DrawLine(rotarrayX[i], rotarrayY[i], rotarrayX[i + 2], rotarrayY[i + 2], RGB(0, 0, 0), puffer);//*/
/*CUDA_DrawLine_Zbuffer(rotarrayX[i], rotarrayY[i], rotarrayZ[i], rotarrayX[i + 1], rotarrayY[i + 1], rotarrayZ[i+1], RGB(0, 0, 0), puffer,zpuffer);
CUDA_DrawLine_Zbuffer(rotarrayX[i + 2], rotarrayY[i + 2], rotarrayZ[i+2], rotarrayX[i + 1], rotarrayY[i + 1], rotarrayZ[i+1], RGB(0, 0, 0), puffer, zpuffer);
CUDA_DrawLine_Zbuffer(rotarrayX[i], rotarrayY[i], rotarrayZ[i], rotarrayX[i + 2], rotarrayY[i + 2], rotarrayZ[i+2], RGB(0, 0, 0), puffer, zpuffer);//*/
//CUDA_FillTriangle(rotarrayX[i], rotarrayY[i], rotarrayX[i + 1], rotarrayY[i + 1], rotarrayX[i + 2], rotarrayY[i + 2], RGB(i*0.05, i*0.05, i*0.05), puffer);
CUDA_FillTriangle_Zbuffer(rotarrayX[i], rotarrayY[i], rotarrayZ[i], rotarrayX[i + 1], rotarrayY[i + 1], rotarrayZ[i + 1], rotarrayX[i + 2], rotarrayY[i + 2], rotarrayZ[i + 2], drawcolor, puffer, zpuffer);
}
}
__global__ void zoom_in(int maxitemcount, float *rawarrayX, float *rawarrayY, float *rawarrayZ)
{
int i;
int index = (blockIdx.x * blockDim.x) + (threadIdx.x * 1);
int stride = blockDim.x * gridDim.x;
for (i = index; i < maxitemcount; i += stride)
{
rawarrayX[i] *= 1.2;
rawarrayY[i] *= 1.2;
rawarrayZ[i] *= 1.2;
}
}
__global__ void zoom_out(int maxitemcount, float *rawarrayX, float *rawarrayY, float *rawarrayZ)
{
int i;
int index = (blockIdx.x * blockDim.x) + (threadIdx.x * 1);
int stride = blockDim.x * gridDim.x;
for (i = index; i < maxitemcount; i += stride)
{
rawarrayX[i] /= 1.2;
rawarrayY[i] /= 1.2;
rawarrayZ[i] /= 1.2;
}
} |
1d41825c491f64d5c999aee89a4ddc64ea2d3be8.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include<time.h>
#include<math.h>
#include "hip/hip_runtime.h"
//device funcion
__global__ void kernalAddVectors(double *a, double *b, double*c) {
int threadid = threadIdx.x; // thread number
int blockid = blockIdx.x; // block number
int Nblock = blockDim.x; //number of threads in a block
int id = threadid + blockid*Nblock;
if (id<N) {
c[id] = a[id] + b[id];
}
}
int main(int argc, char **argv) {
// get vector size from command line argument
int N = atoi(argv[1]);
double seed = clock();
srand48(seed);
double *h_a, *h_b, *h_c; //host vectors
//alocate storage
h_a = (double *) malloc(N*sizeof(double));
h_b = (double *) malloc(N*sizeof(double));
h_c = (double *) malloc(N*sizeof(double));
//populate a and b
for(int n=0; n<N; n++) {
h_a[n] = drand48();
h_b[n] = drand48();
}
double hostStart = clock();
// c=a+b
for(int n=0;n<N:n++) {
h_c[n] = h_a[n] + h_b[n];
}
double hostEnd = clock();
double hostTime = (hostEnd - hostStart)/(double) CLOCKS_PER_SEC;
printf("The host took %g seconds to add a and b \n", hostTime);
//Device arrays
double *d_a, *d_b, *d_c;
//allocate memory on the Device with hipMalloc
hipMalloc(&d_a, N*sizeof(double));
hipMalloc(&d_b, N*sizeof(double));
hipMalloc(&d_c, N*sizeof(double));
//copy data from the host to the device
hipMemcpy(d_a, h_a, N*sizeof(double), cuaMemcpyHostToDevice);
hipMemcpy(d_b, h_b, N*sizeof(double), cuaMemcpyHostToDevice);
//at his point the data is allocated and populated on the device
int Nthreads = atoi(argv[2]); //get the num of threads per block
int Nblocks = (N+Nthreads-1)/Nthreads;
double deviceStart = clock();
hipLaunchKernelGGL(( kernalAddVectors) , dim3() dim3(),dim3()dim3() dim3(), 0, 0, N, d_a, d_b, d_c);
double deviceEnd = clock();
double deivceTime = (deviceEnd-deviceStart)/(double) CLOCKS_PER_SEC;
printf("The device took %f seconds to add a and b \n", deviceTime);
printf("The device was %d times faster \n", hostTime/deviceTime);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
free(h_a);
free(h_b);
free(h_c);
}
| 1d41825c491f64d5c999aee89a4ddc64ea2d3be8.cu | #include <stdio.h>
#include <stdlib.h>
#include<time.h>
#include<math.h>
#include "cuda.h"
//device funcion
__global__ void kernalAddVectors(double *a, double *b, double*c) {
int threadid = threadIdx.x; // thread number
int blockid = blockIdx.x; // block number
int Nblock = blockDim.x; //number of threads in a block
int id = threadid + blockid*Nblock;
if (id<N) {
c[id] = a[id] + b[id];
}
}
int main(int argc, char **argv) {
// get vector size from command line argument
int N = atoi(argv[1]);
double seed = clock();
srand48(seed);
double *h_a, *h_b, *h_c; //host vectors
//alocate storage
h_a = (double *) malloc(N*sizeof(double));
h_b = (double *) malloc(N*sizeof(double));
h_c = (double *) malloc(N*sizeof(double));
//populate a and b
for(int n=0; n<N; n++) {
h_a[n] = drand48();
h_b[n] = drand48();
}
double hostStart = clock();
// c=a+b
for(int n=0;n<N:n++) {
h_c[n] = h_a[n] + h_b[n];
}
double hostEnd = clock();
double hostTime = (hostEnd - hostStart)/(double) CLOCKS_PER_SEC;
printf("The host took %g seconds to add a and b \n", hostTime);
//Device arrays
double *d_a, *d_b, *d_c;
//allocate memory on the Device with cudaMalloc
cudaMalloc(&d_a, N*sizeof(double));
cudaMalloc(&d_b, N*sizeof(double));
cudaMalloc(&d_c, N*sizeof(double));
//copy data from the host to the device
cudaMemcpy(d_a, h_a, N*sizeof(double), cuaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, N*sizeof(double), cuaMemcpyHostToDevice);
//at his point the data is allocated and populated on the device
int Nthreads = atoi(argv[2]); //get the num of threads per block
int Nblocks = (N+Nthreads-1)/Nthreads;
double deviceStart = clock();
kernalAddVectors <<< , >>>(N, d_a, d_b, d_c);
double deviceEnd = clock();
double deivceTime = (deviceEnd-deviceStart)/(double) CLOCKS_PER_SEC;
printf("The device took %f seconds to add a and b \n", deviceTime);
printf("The device was %d times faster \n", hostTime/deviceTime);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(h_a);
free(h_b);
free(h_c);
}
|
915bc9ac1018cbbbaf8492df4adcbf4ddd04963c.hip | // !!! This is a file automatically generated by hipify!!!
#include "LocalIntensityScaleCuda.h"
#include "LocalIntensityScale.hpp"
#include <iostream>
#include <memory>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <math_functions.h>
#include "misc/CudaTools.hpp"
/**
*
* How it works along y-dir (let's suppose offset = 2 and number of workers = 8 for simplicity):
*
* image idx: 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2
*
* loop #1
* workersIdx 0 1 2 3 4 5 6 7
* loop #2
* workersIdx 6 7 0 1 2 3 4 5
* loop #3
* workersIdx 4 5 6 7 0 1 2 3
* ..............
*
* so #offset workers must wait in each loop to have next elements to sum
*
* @tparam T
* @param image
* @param offset
* @param x_num
* @param y_num
* @param z_num
*/
template <typename T>
__global__ void meanYdir(T *image, int offset, size_t x_num, size_t y_num, size_t z_num) {
// NOTE: Block size in x/z direction must be 1
const size_t workersOffset = (blockIdx.z * x_num + blockIdx.x) * y_num;
const int numOfWorkers = blockDim.y;
const unsigned int active = __activemask();
const int workerIdx = threadIdx.y;
int workerOffset = workerIdx;
int offsetInTheLoop = 0;
T sum = 0;
T v = 0;
bool waitForNextLoop = false;
int countNumOfSumElements = 1;
while(workerOffset < y_num) {
if (!waitForNextLoop) v = image[workersOffset + workerOffset];
bool waitForNextValues = (workerIdx + offsetInTheLoop) % numOfWorkers >= (numOfWorkers - offset);
for (int off = 1; off <= offset; ++off) {
T prevElement = __shfl_sync(active, v, workerIdx + blockDim.y - off, blockDim.y);
T nextElement = __shfl_sync(active, v, workerIdx + off, blockDim.y);
// LHS boundary check + don't add previous values if they were added in a previous loop execution
if (workerOffset >= off && !waitForNextLoop) {sum += prevElement; ++countNumOfSumElements;}
// RHS boundary check + don't read next values since they are not read yet
if (!waitForNextValues && workerOffset + off < y_num) {sum += nextElement; ++countNumOfSumElements;}
}
waitForNextLoop = waitForNextValues;
if (!waitForNextLoop) {
sum += v;
image[workersOffset + workerOffset] = sum / countNumOfSumElements;
// workere is done with current element - move to next one
sum = 0;
countNumOfSumElements = 1;
workerOffset += numOfWorkers;
}
offsetInTheLoop += offset;
}
}
constexpr int NumberOfWorkers = 32; // Cannot be greater than 32 since there is no inter-warp communication implemented.
/**
* Filter in X-dir moves circular buffer along direction adding to sum of elements newly read element and removing last one.
* For instance (filter len = 5)
*
* idx: 0 1 2 3 4 5 6 7 8 9
* image elements: 1 2 2 4 5 3 2 1 3 4
*
* buffer: 2 3 4 5 2 current sum = 16 element @idx=4 will be updated to 16/5
*
* next step
* buffer: 3 4 5 2 1 sum = sum - 2 + 1 = 15 element @idx=5 = 15 / 5
*
* In general circular buffer is kept to speedup operations and to not reach to global memory more than once for
* read/write operations for given element.
*/
template <typename T>
__global__ void meanXdir(T *image, int offset, size_t x_num, size_t y_num, size_t z_num) {
const size_t workerOffset = blockIdx.y * blockDim.y + threadIdx.y + (blockIdx.z * blockDim.z + threadIdx.z) * y_num * x_num;
const int workerYoffset = blockIdx.y * blockDim.y + threadIdx.y ;
const int workerIdx = threadIdx.y;
const int nextElementOffset = y_num;
extern __shared__ float sharedMem[];
float (*data)[NumberOfWorkers] = (float (*)[NumberOfWorkers])sharedMem;
const int divisor = 2 * offset + 1;
int currElementOffset = 0;
int saveElementOffset = 0;
if (workerYoffset < y_num) {
// clear shared mem
for (int i = offset; i < divisor; ++i) data[i][workerIdx] = 0;
// saturate cache with #offset elements since it will allow to calculate first element value on LHS
float sum = 0;
int count = 0;
while (count < offset) {
T v = image[workerOffset + currElementOffset];
sum += v;
data[count][workerIdx] = v;
currElementOffset += nextElementOffset;
++count;
}
// Pointer in circular buffer
int beginPtr = offset;
// main loop going through all elements in range [0, x_num-offset)
for (int x = 0; x < x_num - offset; ++x) {
// Read new element
T v = image[workerOffset + currElementOffset];
// Update sum to cover [-offset, offset] of currently processed element
sum += v;
sum -= data[beginPtr][workerIdx];
// Save and move pointer
data[beginPtr][workerIdx] = v;
beginPtr = (beginPtr + 1) % divisor;
// Update count and save currently processed element
count = min(count + 1, divisor);
image[workerOffset + saveElementOffset] = sum / count;
// Move to next elements
currElementOffset += nextElementOffset;
saveElementOffset += nextElementOffset;
}
// Handle last #offset elements on RHS
while (saveElementOffset < currElementOffset) {
count = count - 1;
sum -= data[beginPtr][workerIdx];
image[workerOffset + saveElementOffset] = sum / count;
beginPtr = (beginPtr + 1) % divisor;
saveElementOffset += nextElementOffset;
}
}
}
/**
* Filter in Z-dir moves circular buffer along direction adding to sum of elements newly read element and removing last one.
* For instance (filter len = 5)
*
* idx: 0 1 2 3 4 5 6 7 8 9
* image elements: 1 2 2 4 5 3 2 1 3 4
*
* buffer: 2 3 4 5 2 current sum = 16 element @idx=4 will be updated to 16/5
*
* next step
* buffer: 3 4 5 2 1 sum = sum - 2 + 1 = 15 element @idx=5 = 15 / 5
*
* In general circular buffer is kept to speedup operations and to not reach to global memory more than once for
* read/write operations for given element.
*/
template <typename T>
__global__ void meanZdir(T *image, int offset, size_t x_num, size_t y_num, size_t z_num) {
const size_t workerOffset = blockIdx.y * blockDim.y + threadIdx.y + (blockIdx.z * blockDim.z + threadIdx.z) * y_num; // *.z is 'x'
const int workerYoffset = blockIdx.y * blockDim.y + threadIdx.y ;
const int workerIdx = threadIdx.y;
const int nextElementOffset = x_num * y_num;
extern __shared__ float sharedMem[];
float (*data)[NumberOfWorkers] = (float (*)[NumberOfWorkers])sharedMem;
const int divisor = 2 * offset + 1;
int currElementOffset = 0;
int saveElementOffset = 0;
if (workerYoffset < y_num) {
// clear shared mem
for (int i = offset; i < divisor; ++i) data[i][workerIdx] = 0;
// saturate cache with #offset elements since it will allow to calculate first element value on LHS
float sum = 0;
int count = 0;
while (count < offset) {
T v = image[workerOffset + currElementOffset];
sum += v;
data[count][workerIdx] = v;
currElementOffset += nextElementOffset;
++count;
}
// Pointer in circular buffer
int beginPtr = offset;
// main loop going through all elements in range [0, x_num-offset)
for (int z = 0; z < z_num - offset; ++z) {
// Read new element
T v = image[workerOffset + currElementOffset];
// Update sum to cover [-offset, offset] of currently processed element
sum += v;
sum -= data[beginPtr][workerIdx];
// Save and move pointer
data[beginPtr][workerIdx] = v;
beginPtr = (beginPtr + 1) % divisor;
// Update count and save currently processed element
count = min(count + 1, divisor);
image[workerOffset + saveElementOffset] = sum / count;
// Move to next elements
currElementOffset += nextElementOffset;
saveElementOffset += nextElementOffset;
}
// Handle last #offset elements on RHS
while (saveElementOffset < currElementOffset) {
count = count - 1;
sum -= data[beginPtr][workerIdx];
image[workerOffset + saveElementOffset] = sum / count;
beginPtr = (beginPtr + 1) % divisor;
saveElementOffset += nextElementOffset;
}
}
}
template <typename T, typename S>
void localIntensityScaleCUDA(T *cudaImage, const PixelData<S> &image, int offsetX, int offsetY, int offsetZ, TypeOfMeanFlags flags) {
APRTimer timer(true);
if (flags & MEAN_Y_DIR) {
timer.start_timer("GpuDeviceTimeYdirLIS");
dim3 threadsPerBlock(1, NumberOfWorkers, 1);
dim3 numBlocks((image.x_num + threadsPerBlock.x - 1)/threadsPerBlock.x,
1,
(image.z_num + threadsPerBlock.z - 1)/threadsPerBlock.z);
printCudaDims(threadsPerBlock, numBlocks);
hipLaunchKernelGGL(( meanYdir), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, cudaImage, offsetY, image.x_num, image.y_num, image.z_num);
// waitForCuda();
timer.stop_timer();
}
if (flags & MEAN_X_DIR) {
// Shared memory size - it is able to keep filter len elements for each worker.
const int sharedMemorySize = (offsetX * 2 + 1) * sizeof(float) * NumberOfWorkers;
timer.start_timer("GpuDeviceTimeXdirLIS");
dim3 threadsPerBlock(1, NumberOfWorkers, 1);
dim3 numBlocks(1,
(image.y_num + threadsPerBlock.y - 1) / threadsPerBlock.y,
(image.z_num + threadsPerBlock.z - 1) / threadsPerBlock.z);
printCudaDims(threadsPerBlock, numBlocks);
hipLaunchKernelGGL(( meanXdir) , dim3(numBlocks), dim3(threadsPerBlock), sharedMemorySize , 0, cudaImage, offsetX, image.x_num, image.y_num, image.z_num);
// waitForCuda();
timer.stop_timer();
}
if (flags & MEAN_Z_DIR) {
// Shared memory size - it is able to keep filter len elements for each worker.
const int sharedMemorySize = (offsetZ * 2 + 1) * sizeof(float) * NumberOfWorkers;
timer.start_timer("GpuDeviceTimeZdirLIS");
dim3 threadsPerBlock(1, NumberOfWorkers, 1);
dim3 numBlocks(1,
(image.y_num + threadsPerBlock.y - 1) / threadsPerBlock.y,
(image.x_num + threadsPerBlock.x - 1) / threadsPerBlock.x); // intentionally here for better memory readings
printCudaDims(threadsPerBlock, numBlocks);
hipLaunchKernelGGL(( meanZdir) , dim3(numBlocks), dim3(threadsPerBlock), sharedMemorySize , 0, cudaImage, offsetZ, image.x_num, image.y_num, image.z_num);
// waitForCuda();
timer.stop_timer();
}
}
template <typename T>
void calcMean(PixelData<T> &image, int offset, TypeOfMeanFlags flags) {
APRTimer timer(true);
timer.start_timer("GpuMemTransferHostToDevice");
size_t imageSize = image.mesh.size() * sizeof(T);
T *cudaImage;
hipMalloc(&cudaImage, imageSize);
hipMemcpy(cudaImage, image.mesh.get(), imageSize, hipMemcpyHostToDevice);
timer.stop_timer();
// --------- CUDA ----------------
timer.start_timer("GpuDeviceTimeFull");
localIntensityScaleCUDA(cudaImage, image, offset, offset, offset, flags);
timer.stop_timer();
timer.start_timer("cuda: transfer data from device and freeing memory");
hipMemcpy((void*)image.mesh.get(), cudaImage, imageSize, hipMemcpyDeviceToHost);
hipFree(cudaImage);
timer.stop_timer();
}
template <typename T>
__global__ void copy1dKernel(const T *input, T *output, size_t len) {
size_t idx = (size_t)blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
output[idx] = input[idx];
}
}
template <typename T>
void copy1d(const T *input, T *output, size_t len) {
dim3 threadsPerBlock(64);
dim3 numBlocks((len + threadsPerBlock.x - 1) / threadsPerBlock.x);
hipLaunchKernelGGL(( copy1dKernel) , dim3(numBlocks), dim3(threadsPerBlock) , 0, 0, input, output, len);
}
template<typename T>
__global__ void absDiff1dKernel(T *data, const T *reference, size_t len) {
size_t idx = (size_t)blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
data[idx] = abs(data[idx] - reference[idx]);
}
}
template <typename T>
void absDiff1d(T *data, const T *reference, size_t len) {
dim3 threadsPerBlock(64);
dim3 numBlocks((len + threadsPerBlock.x - 1) / threadsPerBlock.x);
hipLaunchKernelGGL(( absDiff1dKernel) , dim3(numBlocks), dim3(threadsPerBlock) , 0, 0, data, reference, len);
}
template<typename T>
__global__ void rescaleKernel(T *data, size_t len, float varRescale, float sigmaThreshold, float sigmaThresholdMax) {
const float max_th = 60000.0;
size_t idx = (size_t)blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
float rescaled = varRescale * data[idx];
if (rescaled < sigmaThreshold) {
rescaled = (rescaled < sigmaThresholdMax) ? max_th : sigmaThreshold;
}
data[idx] = rescaled;
}
}
template <typename T>
void rescale(T *data, size_t len, float varRescale, float sigma, float sigmaMax) {
dim3 threadsPerBlock(64);
dim3 numBlocks((len + threadsPerBlock.x - 1) / threadsPerBlock.x);
hipLaunchKernelGGL(( rescaleKernel) , dim3(numBlocks), dim3(threadsPerBlock) , 0, 0, data, len, varRescale, sigma, sigmaMax);
}
template <typename T, typename S>
void localIntensityScaleCuda(const PixelData<T> &image, const APRParameters &par, S *cudaImage, S *cudaTemp) {
CudaTimer timer(true, "localIntensityScaleCuda");
float var_rescale;
std::vector<int> var_win;
LocalIntensityScale().get_window(var_rescale,var_win,par);
size_t win_y = var_win[0];
size_t win_x = var_win[1];
size_t win_z = var_win[2];
size_t win_y2 = var_win[3];
size_t win_x2 = var_win[4];
size_t win_z2 = var_win[5];
std::cout << "GPU WINDOWS: " << win_y << " " << win_x << " " << win_z << " " << win_y2 << " " << win_x2 << " " << win_z2 << std::endl;
// --------- CUDA ----------------
timer.start_timer("copy_intensities_from_bsplines");
copy1d(cudaImage, cudaTemp, image.mesh.size());
timer.stop_timer();
localIntensityScaleCUDA(cudaImage, image, win_x, win_y, win_z, MEAN_ALL_DIR);
timer.start_timer("second_pass_and_rescale");
absDiff1d(cudaImage, cudaTemp, image.mesh.size());
localIntensityScaleCUDA(cudaImage, image, win_x2, win_y2, win_z2, MEAN_ALL_DIR);
rescale(cudaImage, image.mesh.size(), var_rescale, par.sigma_th, par.sigma_th_max);
timer.stop_timer();
}
template <typename T>
void getLocalIntensityScale(PixelData<T> &image, PixelData<T> &temp, const APRParameters &par) {
APRTimer timer(true), timerFullPipelilne(true);
timer.start_timer("GpuMemTransferHostToDevice");
size_t imageSize = image.mesh.size() * sizeof(T);
T *cudaImage;
hipMalloc(&cudaImage, imageSize);
hipMemcpy(cudaImage, image.mesh.get(), imageSize, hipMemcpyHostToDevice);
T *cudaTemp;
hipMalloc(&cudaTemp, imageSize);
timer.stop_timer();
timerFullPipelilne.start_timer("GpuDeviceTimeFull");
localIntensityScaleCuda(image, par, cudaImage, cudaTemp);
timerFullPipelilne.stop_timer();
timer.start_timer("GpuMemTransferDeviceToHost");
getDataFromKernel(image, imageSize, cudaImage);
getDataFromKernel(temp, imageSize, cudaTemp);
timer.stop_timer();
}
// explicit instantiation of handled types
template void calcMean(PixelData<float>&, int, TypeOfMeanFlags);
template void calcMean(PixelData<uint16_t>&, int, TypeOfMeanFlags);
template void calcMean(PixelData<uint8_t>&, int, TypeOfMeanFlags);
template void getLocalIntensityScale(PixelData<float>&, PixelData<float>&, const APRParameters&);
| 915bc9ac1018cbbbaf8492df4adcbf4ddd04963c.cu | #include "LocalIntensityScaleCuda.h"
#include "LocalIntensityScale.hpp"
#include <iostream>
#include <memory>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <math_functions.h>
#include "misc/CudaTools.hpp"
/**
*
* How it works along y-dir (let's suppose offset = 2 and number of workers = 8 for simplicity):
*
* image idx: 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2
*
* loop #1
* workersIdx 0 1 2 3 4 5 6 7
* loop #2
* workersIdx 6 7 0 1 2 3 4 5
* loop #3
* workersIdx 4 5 6 7 0 1 2 3
* ..............
*
* so #offset workers must wait in each loop to have next elements to sum
*
* @tparam T
* @param image
* @param offset
* @param x_num
* @param y_num
* @param z_num
*/
template <typename T>
__global__ void meanYdir(T *image, int offset, size_t x_num, size_t y_num, size_t z_num) {
// NOTE: Block size in x/z direction must be 1
const size_t workersOffset = (blockIdx.z * x_num + blockIdx.x) * y_num;
const int numOfWorkers = blockDim.y;
const unsigned int active = __activemask();
const int workerIdx = threadIdx.y;
int workerOffset = workerIdx;
int offsetInTheLoop = 0;
T sum = 0;
T v = 0;
bool waitForNextLoop = false;
int countNumOfSumElements = 1;
while(workerOffset < y_num) {
if (!waitForNextLoop) v = image[workersOffset + workerOffset];
bool waitForNextValues = (workerIdx + offsetInTheLoop) % numOfWorkers >= (numOfWorkers - offset);
for (int off = 1; off <= offset; ++off) {
T prevElement = __shfl_sync(active, v, workerIdx + blockDim.y - off, blockDim.y);
T nextElement = __shfl_sync(active, v, workerIdx + off, blockDim.y);
// LHS boundary check + don't add previous values if they were added in a previous loop execution
if (workerOffset >= off && !waitForNextLoop) {sum += prevElement; ++countNumOfSumElements;}
// RHS boundary check + don't read next values since they are not read yet
if (!waitForNextValues && workerOffset + off < y_num) {sum += nextElement; ++countNumOfSumElements;}
}
waitForNextLoop = waitForNextValues;
if (!waitForNextLoop) {
sum += v;
image[workersOffset + workerOffset] = sum / countNumOfSumElements;
// workere is done with current element - move to next one
sum = 0;
countNumOfSumElements = 1;
workerOffset += numOfWorkers;
}
offsetInTheLoop += offset;
}
}
constexpr int NumberOfWorkers = 32; // Cannot be greater than 32 since there is no inter-warp communication implemented.
/**
* Filter in X-dir moves circular buffer along direction adding to sum of elements newly read element and removing last one.
* For instance (filter len = 5)
*
* idx: 0 1 2 3 4 5 6 7 8 9
* image elements: 1 2 2 4 5 3 2 1 3 4
*
* buffer: 2 3 4 5 2 current sum = 16 element @idx=4 will be updated to 16/5
*
* next step
* buffer: 3 4 5 2 1 sum = sum - 2 + 1 = 15 element @idx=5 = 15 / 5
*
* In general circular buffer is kept to speedup operations and to not reach to global memory more than once for
* read/write operations for given element.
*/
template <typename T>
__global__ void meanXdir(T *image, int offset, size_t x_num, size_t y_num, size_t z_num) {
const size_t workerOffset = blockIdx.y * blockDim.y + threadIdx.y + (blockIdx.z * blockDim.z + threadIdx.z) * y_num * x_num;
const int workerYoffset = blockIdx.y * blockDim.y + threadIdx.y ;
const int workerIdx = threadIdx.y;
const int nextElementOffset = y_num;
extern __shared__ float sharedMem[];
float (*data)[NumberOfWorkers] = (float (*)[NumberOfWorkers])sharedMem;
const int divisor = 2 * offset + 1;
int currElementOffset = 0;
int saveElementOffset = 0;
if (workerYoffset < y_num) {
// clear shared mem
for (int i = offset; i < divisor; ++i) data[i][workerIdx] = 0;
// saturate cache with #offset elements since it will allow to calculate first element value on LHS
float sum = 0;
int count = 0;
while (count < offset) {
T v = image[workerOffset + currElementOffset];
sum += v;
data[count][workerIdx] = v;
currElementOffset += nextElementOffset;
++count;
}
// Pointer in circular buffer
int beginPtr = offset;
// main loop going through all elements in range [0, x_num-offset)
for (int x = 0; x < x_num - offset; ++x) {
// Read new element
T v = image[workerOffset + currElementOffset];
// Update sum to cover [-offset, offset] of currently processed element
sum += v;
sum -= data[beginPtr][workerIdx];
// Save and move pointer
data[beginPtr][workerIdx] = v;
beginPtr = (beginPtr + 1) % divisor;
// Update count and save currently processed element
count = min(count + 1, divisor);
image[workerOffset + saveElementOffset] = sum / count;
// Move to next elements
currElementOffset += nextElementOffset;
saveElementOffset += nextElementOffset;
}
// Handle last #offset elements on RHS
while (saveElementOffset < currElementOffset) {
count = count - 1;
sum -= data[beginPtr][workerIdx];
image[workerOffset + saveElementOffset] = sum / count;
beginPtr = (beginPtr + 1) % divisor;
saveElementOffset += nextElementOffset;
}
}
}
/**
* Filter in Z-dir moves circular buffer along direction adding to sum of elements newly read element and removing last one.
* For instance (filter len = 5)
*
* idx: 0 1 2 3 4 5 6 7 8 9
* image elements: 1 2 2 4 5 3 2 1 3 4
*
* buffer: 2 3 4 5 2 current sum = 16 element @idx=4 will be updated to 16/5
*
* next step
* buffer: 3 4 5 2 1 sum = sum - 2 + 1 = 15 element @idx=5 = 15 / 5
*
* In general circular buffer is kept to speedup operations and to not reach to global memory more than once for
* read/write operations for given element.
*/
template <typename T>
__global__ void meanZdir(T *image, int offset, size_t x_num, size_t y_num, size_t z_num) {
const size_t workerOffset = blockIdx.y * blockDim.y + threadIdx.y + (blockIdx.z * blockDim.z + threadIdx.z) * y_num; // *.z is 'x'
const int workerYoffset = blockIdx.y * blockDim.y + threadIdx.y ;
const int workerIdx = threadIdx.y;
const int nextElementOffset = x_num * y_num;
extern __shared__ float sharedMem[];
float (*data)[NumberOfWorkers] = (float (*)[NumberOfWorkers])sharedMem;
const int divisor = 2 * offset + 1;
int currElementOffset = 0;
int saveElementOffset = 0;
if (workerYoffset < y_num) {
// clear shared mem
for (int i = offset; i < divisor; ++i) data[i][workerIdx] = 0;
// saturate cache with #offset elements since it will allow to calculate first element value on LHS
float sum = 0;
int count = 0;
while (count < offset) {
T v = image[workerOffset + currElementOffset];
sum += v;
data[count][workerIdx] = v;
currElementOffset += nextElementOffset;
++count;
}
// Pointer in circular buffer
int beginPtr = offset;
// main loop going through all elements in range [0, x_num-offset)
for (int z = 0; z < z_num - offset; ++z) {
// Read new element
T v = image[workerOffset + currElementOffset];
// Update sum to cover [-offset, offset] of currently processed element
sum += v;
sum -= data[beginPtr][workerIdx];
// Save and move pointer
data[beginPtr][workerIdx] = v;
beginPtr = (beginPtr + 1) % divisor;
// Update count and save currently processed element
count = min(count + 1, divisor);
image[workerOffset + saveElementOffset] = sum / count;
// Move to next elements
currElementOffset += nextElementOffset;
saveElementOffset += nextElementOffset;
}
// Handle last #offset elements on RHS
while (saveElementOffset < currElementOffset) {
count = count - 1;
sum -= data[beginPtr][workerIdx];
image[workerOffset + saveElementOffset] = sum / count;
beginPtr = (beginPtr + 1) % divisor;
saveElementOffset += nextElementOffset;
}
}
}
template <typename T, typename S>
void localIntensityScaleCUDA(T *cudaImage, const PixelData<S> &image, int offsetX, int offsetY, int offsetZ, TypeOfMeanFlags flags) {
APRTimer timer(true);
if (flags & MEAN_Y_DIR) {
timer.start_timer("GpuDeviceTimeYdirLIS");
dim3 threadsPerBlock(1, NumberOfWorkers, 1);
dim3 numBlocks((image.x_num + threadsPerBlock.x - 1)/threadsPerBlock.x,
1,
(image.z_num + threadsPerBlock.z - 1)/threadsPerBlock.z);
printCudaDims(threadsPerBlock, numBlocks);
meanYdir<<<numBlocks,threadsPerBlock>>>(cudaImage, offsetY, image.x_num, image.y_num, image.z_num);
// waitForCuda();
timer.stop_timer();
}
if (flags & MEAN_X_DIR) {
// Shared memory size - it is able to keep filter len elements for each worker.
const int sharedMemorySize = (offsetX * 2 + 1) * sizeof(float) * NumberOfWorkers;
timer.start_timer("GpuDeviceTimeXdirLIS");
dim3 threadsPerBlock(1, NumberOfWorkers, 1);
dim3 numBlocks(1,
(image.y_num + threadsPerBlock.y - 1) / threadsPerBlock.y,
(image.z_num + threadsPerBlock.z - 1) / threadsPerBlock.z);
printCudaDims(threadsPerBlock, numBlocks);
meanXdir <<< numBlocks, threadsPerBlock, sharedMemorySize >>> (cudaImage, offsetX, image.x_num, image.y_num, image.z_num);
// waitForCuda();
timer.stop_timer();
}
if (flags & MEAN_Z_DIR) {
// Shared memory size - it is able to keep filter len elements for each worker.
const int sharedMemorySize = (offsetZ * 2 + 1) * sizeof(float) * NumberOfWorkers;
timer.start_timer("GpuDeviceTimeZdirLIS");
dim3 threadsPerBlock(1, NumberOfWorkers, 1);
dim3 numBlocks(1,
(image.y_num + threadsPerBlock.y - 1) / threadsPerBlock.y,
(image.x_num + threadsPerBlock.x - 1) / threadsPerBlock.x); // intentionally here for better memory readings
printCudaDims(threadsPerBlock, numBlocks);
meanZdir <<< numBlocks, threadsPerBlock, sharedMemorySize >>> (cudaImage, offsetZ, image.x_num, image.y_num, image.z_num);
// waitForCuda();
timer.stop_timer();
}
}
template <typename T>
void calcMean(PixelData<T> &image, int offset, TypeOfMeanFlags flags) {
APRTimer timer(true);
timer.start_timer("GpuMemTransferHostToDevice");
size_t imageSize = image.mesh.size() * sizeof(T);
T *cudaImage;
cudaMalloc(&cudaImage, imageSize);
cudaMemcpy(cudaImage, image.mesh.get(), imageSize, cudaMemcpyHostToDevice);
timer.stop_timer();
// --------- CUDA ----------------
timer.start_timer("GpuDeviceTimeFull");
localIntensityScaleCUDA(cudaImage, image, offset, offset, offset, flags);
timer.stop_timer();
timer.start_timer("cuda: transfer data from device and freeing memory");
cudaMemcpy((void*)image.mesh.get(), cudaImage, imageSize, cudaMemcpyDeviceToHost);
cudaFree(cudaImage);
timer.stop_timer();
}
template <typename T>
__global__ void copy1dKernel(const T *input, T *output, size_t len) {
size_t idx = (size_t)blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
output[idx] = input[idx];
}
}
template <typename T>
void copy1d(const T *input, T *output, size_t len) {
dim3 threadsPerBlock(64);
dim3 numBlocks((len + threadsPerBlock.x - 1) / threadsPerBlock.x);
copy1dKernel <<< numBlocks, threadsPerBlock >>> (input, output, len);
}
template<typename T>
__global__ void absDiff1dKernel(T *data, const T *reference, size_t len) {
size_t idx = (size_t)blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
data[idx] = abs(data[idx] - reference[idx]);
}
}
template <typename T>
void absDiff1d(T *data, const T *reference, size_t len) {
dim3 threadsPerBlock(64);
dim3 numBlocks((len + threadsPerBlock.x - 1) / threadsPerBlock.x);
absDiff1dKernel <<< numBlocks, threadsPerBlock >>> (data, reference, len);
}
template<typename T>
__global__ void rescaleKernel(T *data, size_t len, float varRescale, float sigmaThreshold, float sigmaThresholdMax) {
const float max_th = 60000.0;
size_t idx = (size_t)blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
float rescaled = varRescale * data[idx];
if (rescaled < sigmaThreshold) {
rescaled = (rescaled < sigmaThresholdMax) ? max_th : sigmaThreshold;
}
data[idx] = rescaled;
}
}
template <typename T>
void rescale(T *data, size_t len, float varRescale, float sigma, float sigmaMax) {
dim3 threadsPerBlock(64);
dim3 numBlocks((len + threadsPerBlock.x - 1) / threadsPerBlock.x);
rescaleKernel <<< numBlocks, threadsPerBlock >>> (data, len, varRescale, sigma, sigmaMax);
}
template <typename T, typename S>
void localIntensityScaleCuda(const PixelData<T> &image, const APRParameters &par, S *cudaImage, S *cudaTemp) {
CudaTimer timer(true, "localIntensityScaleCuda");
float var_rescale;
std::vector<int> var_win;
LocalIntensityScale().get_window(var_rescale,var_win,par);
size_t win_y = var_win[0];
size_t win_x = var_win[1];
size_t win_z = var_win[2];
size_t win_y2 = var_win[3];
size_t win_x2 = var_win[4];
size_t win_z2 = var_win[5];
std::cout << "GPU WINDOWS: " << win_y << " " << win_x << " " << win_z << " " << win_y2 << " " << win_x2 << " " << win_z2 << std::endl;
// --------- CUDA ----------------
timer.start_timer("copy_intensities_from_bsplines");
copy1d(cudaImage, cudaTemp, image.mesh.size());
timer.stop_timer();
localIntensityScaleCUDA(cudaImage, image, win_x, win_y, win_z, MEAN_ALL_DIR);
timer.start_timer("second_pass_and_rescale");
absDiff1d(cudaImage, cudaTemp, image.mesh.size());
localIntensityScaleCUDA(cudaImage, image, win_x2, win_y2, win_z2, MEAN_ALL_DIR);
rescale(cudaImage, image.mesh.size(), var_rescale, par.sigma_th, par.sigma_th_max);
timer.stop_timer();
}
template <typename T>
void getLocalIntensityScale(PixelData<T> &image, PixelData<T> &temp, const APRParameters &par) {
APRTimer timer(true), timerFullPipelilne(true);
timer.start_timer("GpuMemTransferHostToDevice");
size_t imageSize = image.mesh.size() * sizeof(T);
T *cudaImage;
cudaMalloc(&cudaImage, imageSize);
cudaMemcpy(cudaImage, image.mesh.get(), imageSize, cudaMemcpyHostToDevice);
T *cudaTemp;
cudaMalloc(&cudaTemp, imageSize);
timer.stop_timer();
timerFullPipelilne.start_timer("GpuDeviceTimeFull");
localIntensityScaleCuda(image, par, cudaImage, cudaTemp);
timerFullPipelilne.stop_timer();
timer.start_timer("GpuMemTransferDeviceToHost");
getDataFromKernel(image, imageSize, cudaImage);
getDataFromKernel(temp, imageSize, cudaTemp);
timer.stop_timer();
}
// explicit instantiation of handled types
template void calcMean(PixelData<float>&, int, TypeOfMeanFlags);
template void calcMean(PixelData<uint16_t>&, int, TypeOfMeanFlags);
template void calcMean(PixelData<uint8_t>&, int, TypeOfMeanFlags);
template void getLocalIntensityScale(PixelData<float>&, PixelData<float>&, const APRParameters&);
|
824c016f67fc44b24686b8e0a8d2c18cb212dce8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
// error checking macro
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, hipGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
const size_t DSIZE = 16384; // matrix side dimension
const int block_size = 1024; // CUDA maximum is 1024
// matrix row-sum kernel
__global__ void row_sums(const float *A, float *sums, size_t ds){
int idx = threadIdx.x+blockDim.x*blockIdx.x; // create typical 1D thread index from built-in variables
if (idx < ds){
float sum = 0.0f;
for (size_t i = 0; i < ds; i++)
sum += A[idx*ds+i]; // write a for loop that will cause the thread to iterate across a row, keeeping a running sum, and write the result to sums
sums[idx] = sum;
}}
// matrix row-sum kernel parallel
__global__ void row_sums_parallel(const float *A, float *sums, size_t ds){
__shared__ float shared_row[block_size];
size_t tid = threadIdx.x;
size_t bid = blockIdx.x;
size_t tidx = threadIdx.x;
shared_row[tid] = 0.0f;
while (tidx < ds) { // block stride loop to load row
shared_row[tid] += A[bid * ds + tidx];
tidx += blockDim.x;
}
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
__syncthreads();
if (tid < s)
shared_row[tid] += shared_row[tid + s];
}
if (tid == 0)
sums[bid] = shared_row[0];
}
// matrix column-sum kernel
__global__ void column_sums(const float *A, float *sums, size_t ds){
int idx = threadIdx.x+blockDim.x*blockIdx.x; // create typical 1D thread index from built-in variables
if (idx < ds){
float sum = 0.0f;
for (size_t i = 0; i < ds; i++)
sum += A[idx+ds*i]; // write a for loop that will cause the thread to iterate down a column, keeeping a running sum, and write the result to sums
sums[idx] = sum;
}}
bool validate(float *data, size_t sz){
for (size_t i = 0; i < sz; i++)
if (data[i] != (float)sz) {printf("results mismatch at %lu, was: %f, should be: %f\n", i, data[i], (float)sz); return false;}
return true;
}
int main(){
float *h_A, *h_sums, *d_A, *d_sums;
h_A = new float[DSIZE*DSIZE]; // allocate space for data in host memory
h_sums = new float[DSIZE]();
for (int i = 0; i < DSIZE*DSIZE; i++) // initialize matrix in host memory
h_A[i] = 1.0f;
hipMalloc(&d_A, DSIZE*DSIZE*sizeof(float)); // allocate device space for A
hipMalloc(&d_sums, DSIZE*sizeof(float)); // allocate device space for vector d_sums
cudaCheckErrors("hipMalloc failure"); // error checking
// copy matrix A to device:
hipMemcpy(d_A, h_A, DSIZE*DSIZE*sizeof(float), hipMemcpyHostToDevice);
cudaCheckErrors("hipMemcpy H2D failure");
//cuda processing sequence step 1 is complete
hipLaunchKernelGGL(( row_sums), dim3((DSIZE+block_size-1)/block_size), dim3(block_size), 0, 0, d_A, d_sums, DSIZE);
cudaCheckErrors("kernel launch failure");
//cuda processing sequence step 2 is complete
// copy vector sums from device to host:
hipMemcpy(h_sums, d_sums, DSIZE*sizeof(float), hipMemcpyDeviceToHost);
//cuda processing sequence step 3 is complete
cudaCheckErrors("kernel execution failure or hipMemcpy H2D failure");
if (!validate(h_sums, DSIZE)) return -1;
printf("row sums correct!\n");
hipMemset(d_sums, 0, DSIZE*sizeof(float));
hipLaunchKernelGGL(( row_sums_parallel), dim3(DSIZE), dim3(block_size), 0, 0, d_A, d_sums, DSIZE);
cudaCheckErrors("kernel launch failure");
//cuda processing sequence step 2 is complete
// copy vector sums from device to host:
hipMemcpy(h_sums, d_sums, DSIZE*sizeof(float), hipMemcpyDeviceToHost);
//cuda processing sequence step 3 is complete
cudaCheckErrors("kernel execution failure or hipMemcpy H2D failure");
if (!validate(h_sums, DSIZE)) return -1;
printf("row sums parallel correct!\n");
hipMemset(d_sums, 0, DSIZE*sizeof(float));
hipLaunchKernelGGL(( column_sums), dim3((DSIZE+block_size-1)/block_size), dim3(block_size), 0, 0, d_A, d_sums, DSIZE);
cudaCheckErrors("kernel launch failure");
//cuda processing sequence step 2 is complete
// copy vector sums from device to host:
hipMemcpy(h_sums, d_sums, DSIZE*sizeof(float), hipMemcpyDeviceToHost);
//cuda processing sequence step 3 is complete
cudaCheckErrors("kernel execution failure or hipMemcpy H2D failure");
if (!validate(h_sums, DSIZE)) return -1;
printf("column sums correct!\n");
return 0;
}
| 824c016f67fc44b24686b8e0a8d2c18cb212dce8.cu | #include <stdio.h>
// error checking macro
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
const size_t DSIZE = 16384; // matrix side dimension
const int block_size = 1024; // CUDA maximum is 1024
// matrix row-sum kernel
__global__ void row_sums(const float *A, float *sums, size_t ds){
int idx = threadIdx.x+blockDim.x*blockIdx.x; // create typical 1D thread index from built-in variables
if (idx < ds){
float sum = 0.0f;
for (size_t i = 0; i < ds; i++)
sum += A[idx*ds+i]; // write a for loop that will cause the thread to iterate across a row, keeeping a running sum, and write the result to sums
sums[idx] = sum;
}}
// matrix row-sum kernel parallel
__global__ void row_sums_parallel(const float *A, float *sums, size_t ds){
__shared__ float shared_row[block_size];
size_t tid = threadIdx.x;
size_t bid = blockIdx.x;
size_t tidx = threadIdx.x;
shared_row[tid] = 0.0f;
while (tidx < ds) { // block stride loop to load row
shared_row[tid] += A[bid * ds + tidx];
tidx += blockDim.x;
}
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
__syncthreads();
if (tid < s)
shared_row[tid] += shared_row[tid + s];
}
if (tid == 0)
sums[bid] = shared_row[0];
}
// matrix column-sum kernel
__global__ void column_sums(const float *A, float *sums, size_t ds){
int idx = threadIdx.x+blockDim.x*blockIdx.x; // create typical 1D thread index from built-in variables
if (idx < ds){
float sum = 0.0f;
for (size_t i = 0; i < ds; i++)
sum += A[idx+ds*i]; // write a for loop that will cause the thread to iterate down a column, keeeping a running sum, and write the result to sums
sums[idx] = sum;
}}
bool validate(float *data, size_t sz){
for (size_t i = 0; i < sz; i++)
if (data[i] != (float)sz) {printf("results mismatch at %lu, was: %f, should be: %f\n", i, data[i], (float)sz); return false;}
return true;
}
int main(){
float *h_A, *h_sums, *d_A, *d_sums;
h_A = new float[DSIZE*DSIZE]; // allocate space for data in host memory
h_sums = new float[DSIZE]();
for (int i = 0; i < DSIZE*DSIZE; i++) // initialize matrix in host memory
h_A[i] = 1.0f;
cudaMalloc(&d_A, DSIZE*DSIZE*sizeof(float)); // allocate device space for A
cudaMalloc(&d_sums, DSIZE*sizeof(float)); // allocate device space for vector d_sums
cudaCheckErrors("cudaMalloc failure"); // error checking
// copy matrix A to device:
cudaMemcpy(d_A, h_A, DSIZE*DSIZE*sizeof(float), cudaMemcpyHostToDevice);
cudaCheckErrors("cudaMemcpy H2D failure");
//cuda processing sequence step 1 is complete
row_sums<<<(DSIZE+block_size-1)/block_size, block_size>>>(d_A, d_sums, DSIZE);
cudaCheckErrors("kernel launch failure");
//cuda processing sequence step 2 is complete
// copy vector sums from device to host:
cudaMemcpy(h_sums, d_sums, DSIZE*sizeof(float), cudaMemcpyDeviceToHost);
//cuda processing sequence step 3 is complete
cudaCheckErrors("kernel execution failure or cudaMemcpy H2D failure");
if (!validate(h_sums, DSIZE)) return -1;
printf("row sums correct!\n");
cudaMemset(d_sums, 0, DSIZE*sizeof(float));
row_sums_parallel<<<DSIZE, block_size>>>(d_A, d_sums, DSIZE);
cudaCheckErrors("kernel launch failure");
//cuda processing sequence step 2 is complete
// copy vector sums from device to host:
cudaMemcpy(h_sums, d_sums, DSIZE*sizeof(float), cudaMemcpyDeviceToHost);
//cuda processing sequence step 3 is complete
cudaCheckErrors("kernel execution failure or cudaMemcpy H2D failure");
if (!validate(h_sums, DSIZE)) return -1;
printf("row sums parallel correct!\n");
cudaMemset(d_sums, 0, DSIZE*sizeof(float));
column_sums<<<(DSIZE+block_size-1)/block_size, block_size>>>(d_A, d_sums, DSIZE);
cudaCheckErrors("kernel launch failure");
//cuda processing sequence step 2 is complete
// copy vector sums from device to host:
cudaMemcpy(h_sums, d_sums, DSIZE*sizeof(float), cudaMemcpyDeviceToHost);
//cuda processing sequence step 3 is complete
cudaCheckErrors("kernel execution failure or cudaMemcpy H2D failure");
if (!validate(h_sums, DSIZE)) return -1;
printf("column sums correct!\n");
return 0;
}
|
434f3c3365ab070f9e4fcd3d64a8d02d8489f797.hip | // !!! This is a file automatically generated by hipify!!!
#include <vector>
#include <iostream>
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <hip/hip_runtime_api.h>
#include "THH/THH.h"
#include <ATen/hip/HIPContext.h>
#include <torch/extension.h>
#include <math.h>
#include "strided_batched_gemm.h"
#include "softmax.h"
#include "dropout.h"
#include "layer_norm.h"
// symbol to be automatically resolved by PyTorch libs
extern THCState *state;
namespace multihead_attn {
namespace encdec {
namespace cublas_gemmex {
std::vector<torch::Tensor> fwd_cuda(
bool use_time_mask,
bool is_training,
int heads,
torch::Tensor const& inputs_q,
torch::Tensor const& inputs_kv,
torch::Tensor const& input_weights_q,
torch::Tensor const& input_weights_kv,
torch::Tensor const& output_weights,
const uint8_t* pad_mask,
float dropout_prob
)
{
const int embed_dim = inputs_q.size(2);
const int sequences = inputs_q.size(1);
const int q_seq_len = inputs_q.size(0);
const int k_seq_len = inputs_kv.size(0);
const int batches_q = sequences * q_seq_len;
const int batches_kv = sequences * k_seq_len;
const int head_dim = embed_dim / heads;
const int output_lin_q_dim = embed_dim;
const int output_lin_kv_dim = 2 * embed_dim;
const int attn_batches = heads * sequences;
const int lead_dim_q = attn_batches * head_dim;
const int lead_dim_kv = attn_batches * 2 *head_dim;
const int batch_stride_q = head_dim;
const int batch_stride_kv = 2 * head_dim;
const int dropout_elems = attn_batches * q_seq_len * k_seq_len;
const float alpha = 1.0;
const float beta = 0.0;
const float scale = 1.0 / sqrt(static_cast<float>(head_dim));
// There is no reason to use more than one stream as every kernel is
// sequentially dependent
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream();
hipblasSetStream(handle, stream);
// 3 Intermediate Results + Output (Note: dropout intermediates are generated by ATen library code)
auto act_options = inputs_q.options().requires_grad(false);
auto mask_options = act_options.dtype(torch::kUInt8);
torch::Tensor input_lin_q_results = torch::empty({q_seq_len, sequences, output_lin_q_dim}, act_options);
torch::Tensor input_lin_kv_results = torch::empty({k_seq_len, sequences, output_lin_kv_dim}, act_options);
torch::Tensor softmax_results = torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options);
torch::Tensor dropout_results = torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options);
torch::Tensor dropout_mask = torch::empty({attn_batches, q_seq_len, k_seq_len}, mask_options);
torch::Tensor matmul2_results = torch::empty({q_seq_len, attn_batches, head_dim}, act_options);
torch::Tensor outputs = torch::empty_like(inputs_q, act_options);
// Input Linear Results Pointers to Q, K, and V of interviewed activations
void* q_lin_results_ptr = static_cast<void*>(input_lin_q_results.data_ptr());
void* k_lin_results_ptr = static_cast<void*>(input_lin_kv_results.data_ptr());
void* v_lin_results_ptr = static_cast<void*>(static_cast<half*>(input_lin_kv_results.data_ptr()) + head_dim);
// Softmax Intermediate Result Ptr (used by Matmul1 -> Softmax)
void* softmax_results_ptr = static_cast<void*>(softmax_results.data_ptr());
char a_layout_t{'t'};
char a_layout_n{'n'};
char b_layout_n{'n'};
THCublasCheck(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH));
// Input Linear Q Fwd
THCublasCheck(hipblasGemmEx(handle,
HIPBLAS_OP_T,
HIPBLAS_OP_N,
output_lin_q_dim,
batches_q,
embed_dim,
static_cast<const void*>(&alpha),
static_cast<const void*>(input_weights_q.data_ptr()),
HIP_R_16F,
embed_dim,
static_cast<const void*>(inputs_q.data_ptr()),
HIP_R_16F,
embed_dim,
static_cast<const void*>(&beta),
q_lin_results_ptr,
HIP_R_16F,
output_lin_q_dim,
HIP_R_32F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// Input Linear KV Fwd
THCublasCheck(hipblasGemmEx(handle,
HIPBLAS_OP_T,
HIPBLAS_OP_N,
output_lin_kv_dim,
batches_kv,
embed_dim,
static_cast<const void*>(&alpha),
static_cast<const void*>(input_weights_kv.data_ptr()),
HIP_R_16F,
embed_dim,
static_cast<const void*>(inputs_kv.data_ptr()),
HIP_R_16F,
embed_dim,
static_cast<const void*>(&beta),
k_lin_results_ptr,
HIP_R_16F,
output_lin_kv_dim,
HIP_R_32F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// MatMul1 of Dot-Product Attention Plus scaling by 1/Sqrt(head size)
gemm_switch_fp32accum( state,
a_layout_t,
b_layout_n,
k_seq_len,
q_seq_len,
head_dim,
scale,
static_cast<const half*>(k_lin_results_ptr),
lead_dim_kv,
batch_stride_kv,
static_cast<const half*>(q_lin_results_ptr),
lead_dim_q,
batch_stride_q,
beta,
static_cast<half*>(softmax_results_ptr),
k_seq_len,
k_seq_len*q_seq_len,
attn_batches);
// Padded Softmax
bool softmax_success = false;
if (pad_mask == nullptr) {
softmax_success = dispatch_softmax<half, half, float>(
reinterpret_cast<half*>(softmax_results_ptr),
reinterpret_cast<const half*>(softmax_results_ptr),
k_seq_len,
k_seq_len,
attn_batches*q_seq_len);
} else {
if (use_time_mask) {
softmax_success = dispatch_time_masked_softmax<half, half, float>(
reinterpret_cast<half*>(softmax_results_ptr),
reinterpret_cast<const half*>(softmax_results_ptr),
pad_mask,
k_seq_len,
k_seq_len,
attn_batches*q_seq_len,
q_seq_len);
} else {
softmax_success = dispatch_masked_softmax<half, half, float>(
reinterpret_cast<half*>(softmax_results_ptr),
reinterpret_cast<const half*>(softmax_results_ptr),
pad_mask,
k_seq_len,
k_seq_len,
attn_batches*q_seq_len,
attn_batches*q_seq_len/sequences);
}
}
assert(softmax_success);
if (is_training) {
apex_fused_dropout_cuda<at::Half,float,uint32_t>(
static_cast<at::Half const*>(softmax_results.data_ptr()),
static_cast<at::Half*>(dropout_results.data_ptr()),
static_cast<uint8_t*>(dropout_mask.data_ptr()),
dropout_elems,
(1.0f - dropout_prob));
}
// Matmul2
gemm_switch_fp32accum( state,
a_layout_n,
b_layout_n,
head_dim,
q_seq_len,
k_seq_len,
alpha,
static_cast<const half*>(v_lin_results_ptr),
lead_dim_kv,
batch_stride_kv,
(is_training) ? static_cast<const half*>(dropout_results.data_ptr()) : static_cast<const half*>(softmax_results.data_ptr()) ,
k_seq_len,
k_seq_len*q_seq_len,
beta,
static_cast<half*>(matmul2_results.data_ptr()),
head_dim*attn_batches,
head_dim,
attn_batches);
// Output Linear
THCublasCheck(hipblasGemmEx(handle,
HIPBLAS_OP_T,
HIPBLAS_OP_N,
embed_dim,
batches_q,
embed_dim,
static_cast<const void*>(&alpha),
static_cast<const void*>(output_weights.data_ptr()),
HIP_R_16F,
embed_dim,
static_cast<const void*>(matmul2_results.data_ptr()),
HIP_R_16F,
embed_dim,
static_cast<const void*>(&beta),
static_cast<void*>(outputs.data_ptr()),
HIP_R_16F,
embed_dim,
HIP_R_32F,
//CUBLAS_GEMM_ALGO1_TENSOR_OP));
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
THCublasCheck(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH));
return {
input_lin_q_results,
input_lin_kv_results,
softmax_results,
dropout_results,
dropout_mask,
matmul2_results,
outputs
};
}
std::vector<torch::Tensor> bwd_cuda(
int heads,
torch::Tensor const& output_grads,
torch::Tensor const& matmul2_results,
torch::Tensor const& dropout_results,
torch::Tensor const& softmax_results,
torch::Tensor const& input_lin_q_results,
torch::Tensor const& input_lin_kv_results,
torch::Tensor const& inputs_q,
torch::Tensor const& inputs_kv,
torch::Tensor const& input_weights_q,
torch::Tensor const& input_weights_kv,
torch::Tensor const& output_weights,
torch::Tensor const& dropout_mask,
float dropout_prob
)
{
const int embed_dim = inputs_q.size(2);
const int sequences = inputs_q.size(1);
const int q_seq_len = inputs_q.size(0);
const int k_seq_len = inputs_kv.size(0);
const int batches_q = sequences * q_seq_len;
const int batches_kv = sequences * k_seq_len;
const int head_dim = embed_dim / heads;
const int output_lin_q_dim = embed_dim;
const int output_lin_kv_dim = 2 * embed_dim;
const int attn_batches = heads * sequences;
const int lead_dim_q = attn_batches * head_dim;
const int lead_dim_kv = attn_batches * 2 *head_dim;
const int batch_stride_q = head_dim;
const int batch_stride_kv = 2 * head_dim;
const int dropout_elems = attn_batches * q_seq_len * k_seq_len;
const float alpha = 1.0;
const float beta = 0.0;
const float scale = 1.0 / sqrt(static_cast<float>(head_dim));
// TODO: Streams can be used in Backprop but I haven't added more than one
// in my first attempt to create the code
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream();
hipblasSetStream(handle, stream);
// Output Tensor Allocations
torch::Tensor input_q_grads = torch::empty_like(inputs_q);
torch::Tensor input_kv_grads = torch::empty_like(inputs_kv);
torch::Tensor input_weight_q_grads = torch::empty_like(input_weights_q);
torch::Tensor input_weight_kv_grads = torch::empty_like(input_weights_kv);
torch::Tensor output_weight_grads = torch::empty_like(output_weights);
// Intermediate Tensor Allocations
at::Tensor output_lin_grads = torch::empty_like(matmul2_results);
at::Tensor matmul2_grads = torch::empty_like(dropout_results);
at::Tensor input_lin_q_output_grads = torch::empty_like(input_lin_q_results);
at::Tensor input_lin_kv_output_grads = torch::empty_like(input_lin_kv_results);
auto q_lin_results_ptr = static_cast<half*>(input_lin_q_results.data_ptr());
auto k_lin_results_ptr = static_cast<half*>(input_lin_kv_results.data_ptr());
auto v_lin_results_ptr = static_cast<half*>(input_lin_kv_results.data_ptr()) + head_dim;
auto q_lin_grads_ptr = static_cast<half*>(input_lin_q_output_grads.data_ptr());
auto k_lin_grads_ptr = static_cast<half*>(input_lin_kv_output_grads.data_ptr());
auto v_lin_grads_ptr = static_cast<half*>(input_lin_kv_output_grads.data_ptr()) + head_dim;
char a_layout_n{'n'};
char a_layout_t{'t'};
char b_layout_n{'n'};
char b_layout_t{'t'};
THCublasCheck(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH));
// Output Linear Dgrad
THCublasCheck(hipblasGemmEx(handle,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
embed_dim,
batches_q,
embed_dim,
static_cast<const void*>(&alpha),
static_cast<const void*>(output_weights.data_ptr()),
HIP_R_16F,
embed_dim,
static_cast<const void*>(output_grads.data_ptr()),
HIP_R_16F,
embed_dim,
static_cast<const void*>(&beta),
static_cast<void*>(output_lin_grads.data_ptr()),
HIP_R_16F,
embed_dim,
HIP_R_32F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// Output Linear Wgrad
THCublasCheck(hipblasGemmEx(handle,
HIPBLAS_OP_N,
HIPBLAS_OP_T,
embed_dim,
embed_dim,
batches_q,
static_cast<const void*>(&alpha),
static_cast<const void*>(matmul2_results.data_ptr()),
HIP_R_16F,
embed_dim,
static_cast<const void*>(output_grads.data_ptr()),
HIP_R_16F,
embed_dim,
static_cast<const void*>(&beta),
static_cast<void*>(output_weight_grads.data_ptr()),
HIP_R_16F,
embed_dim,
HIP_R_32F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// MatMul2 Dgrad1
gemm_switch_fp32accum( state,
a_layout_t,
b_layout_n,
k_seq_len,
q_seq_len,
head_dim,
alpha,
static_cast<const half*>(v_lin_results_ptr),
lead_dim_kv,
batch_stride_kv,
static_cast<const half*>(output_lin_grads.data_ptr()),
head_dim*attn_batches,
head_dim,
beta,
static_cast<half*>(matmul2_grads.data_ptr()),
k_seq_len,
k_seq_len*q_seq_len,
attn_batches);
// Matmul2 Dgrad2
gemm_switch_fp32accum( state,
a_layout_n,
b_layout_t,
head_dim,
k_seq_len,
q_seq_len,
alpha,
static_cast<const half*>(output_lin_grads.data_ptr()),
head_dim*attn_batches,
head_dim,
static_cast<const half*>(dropout_results.data_ptr()),
k_seq_len,
k_seq_len*q_seq_len,
beta,
v_lin_grads_ptr,
lead_dim_kv,
batch_stride_kv,
attn_batches);
// Apply Dropout Mask and Scale by Dropout Probability
apex_masked_scale_cuda<at::Half,float,uint32_t>(
static_cast<at::Half const*>(matmul2_grads.data_ptr()),
static_cast<at::Half*>(matmul2_grads.data_ptr()),
static_cast<uint8_t const*>(dropout_mask.data_ptr()),
dropout_elems,
(1.0 / (1.0 - dropout_prob)));
// Softmax Grad
bool softmax_success = false;
softmax_success = dispatch_softmax_backward<half, half, float>(
static_cast<half*>(matmul2_grads.data_ptr()),
static_cast<half*>(matmul2_grads.data_ptr()),
reinterpret_cast<half const*>(softmax_results.data_ptr()),
k_seq_len,
k_seq_len,
attn_batches*q_seq_len);
assert(softmax_success);
// Matmul1 Dgrad1
gemm_switch_fp32accum( state,
a_layout_n,
b_layout_n,
head_dim,
q_seq_len,
k_seq_len,
scale,
k_lin_results_ptr,
lead_dim_kv,
batch_stride_kv,
static_cast<half*>(matmul2_grads.data_ptr()),
k_seq_len,
k_seq_len*q_seq_len,
beta,
q_lin_grads_ptr,
lead_dim_q,
batch_stride_q,
attn_batches);
// Matmul1 Dgrad2
gemm_switch_fp32accum( state,
a_layout_n,
b_layout_t,
head_dim,
k_seq_len,
q_seq_len,
scale,
q_lin_results_ptr,
lead_dim_q,
batch_stride_q,
static_cast<half*>(matmul2_grads.data_ptr()),
k_seq_len,
k_seq_len*q_seq_len,
beta,
k_lin_grads_ptr,
lead_dim_kv,
batch_stride_kv,
attn_batches);
// Input Linear Q Dgrad
THCublasCheck(hipblasGemmEx(handle,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
embed_dim,
batches_q,
output_lin_q_dim,
static_cast<const void*>(&alpha),
static_cast<const void*>(input_weights_q.data_ptr()),
HIP_R_16F,
embed_dim,
static_cast<const void*>(q_lin_grads_ptr),
HIP_R_16F,
output_lin_q_dim,
static_cast<const void*>(&beta),
static_cast<void*>(input_q_grads.data_ptr()),
HIP_R_16F,
embed_dim,
HIP_R_32F,
//CUBLAS_GEMM_ALGO10_TENSOR_OP));
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// Input Linear Q Wgrad
THCublasCheck(hipblasGemmEx(handle,
HIPBLAS_OP_N,
HIPBLAS_OP_T,
embed_dim,
output_lin_q_dim,
batches_q,
static_cast<const void*>(&alpha),
static_cast<const void*>(inputs_q.data_ptr()),
HIP_R_16F,
embed_dim,
static_cast<const void*>(q_lin_grads_ptr),
HIP_R_16F,
output_lin_q_dim,
static_cast<const void*>(&beta),
static_cast<void*>(input_weight_q_grads.data_ptr()),
HIP_R_16F,
embed_dim,
HIP_R_32F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// Input Linear KV Dgrad
THCublasCheck(hipblasGemmEx(handle,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
embed_dim,
batches_kv,
output_lin_kv_dim,
static_cast<const void*>(&alpha),
static_cast<const void*>(input_weights_kv.data_ptr()),
HIP_R_16F,
embed_dim,
static_cast<const void*>(k_lin_grads_ptr),
HIP_R_16F,
output_lin_kv_dim,
static_cast<const void*>(&beta),
static_cast<void*>(input_kv_grads.data_ptr()),
HIP_R_16F,
embed_dim,
HIP_R_32F,
//CUBLAS_GEMM_ALGO10_TENSOR_OP));
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// Input Linear KV Wgrad
THCublasCheck(hipblasGemmEx(handle,
HIPBLAS_OP_N,
HIPBLAS_OP_T,
embed_dim,
output_lin_kv_dim,
batches_kv,
static_cast<const void*>(&alpha),
static_cast<const void*>(inputs_kv.data_ptr()),
HIP_R_16F,
embed_dim,
static_cast<const void*>(k_lin_grads_ptr),
HIP_R_16F,
output_lin_kv_dim,
static_cast<const void*>(&beta),
static_cast<void*>(input_weight_kv_grads.data_ptr()),
HIP_R_16F,
embed_dim,
HIP_R_32F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
THCublasCheck(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH));
return {
input_q_grads,
input_kv_grads,
input_weight_q_grads,
input_weight_kv_grads,
output_weight_grads
};
}
} // end namespace cublas_gemmex
} // end namespace encdec
} // end namespace multihead_attn
| 434f3c3365ab070f9e4fcd3d64a8d02d8489f797.cu | #include <vector>
#include <iostream>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include <cuda_profiler_api.h>
#include "THC/THC.h"
#include <ATen/cuda/CUDAContext.h>
#include <torch/extension.h>
#include <math.h>
#include "strided_batched_gemm.h"
#include "softmax.h"
#include "dropout.h"
#include "layer_norm.h"
// symbol to be automatically resolved by PyTorch libs
extern THCState *state;
namespace multihead_attn {
namespace encdec {
namespace cublas_gemmex {
std::vector<torch::Tensor> fwd_cuda(
bool use_time_mask,
bool is_training,
int heads,
torch::Tensor const& inputs_q,
torch::Tensor const& inputs_kv,
torch::Tensor const& input_weights_q,
torch::Tensor const& input_weights_kv,
torch::Tensor const& output_weights,
const uint8_t* pad_mask,
float dropout_prob
)
{
const int embed_dim = inputs_q.size(2);
const int sequences = inputs_q.size(1);
const int q_seq_len = inputs_q.size(0);
const int k_seq_len = inputs_kv.size(0);
const int batches_q = sequences * q_seq_len;
const int batches_kv = sequences * k_seq_len;
const int head_dim = embed_dim / heads;
const int output_lin_q_dim = embed_dim;
const int output_lin_kv_dim = 2 * embed_dim;
const int attn_batches = heads * sequences;
const int lead_dim_q = attn_batches * head_dim;
const int lead_dim_kv = attn_batches * 2 *head_dim;
const int batch_stride_q = head_dim;
const int batch_stride_kv = 2 * head_dim;
const int dropout_elems = attn_batches * q_seq_len * k_seq_len;
const float alpha = 1.0;
const float beta = 0.0;
const float scale = 1.0 / sqrt(static_cast<float>(head_dim));
// There is no reason to use more than one stream as every kernel is
// sequentially dependent
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();
cublasSetStream(handle, stream);
// 3 Intermediate Results + Output (Note: dropout intermediates are generated by ATen library code)
auto act_options = inputs_q.options().requires_grad(false);
auto mask_options = act_options.dtype(torch::kUInt8);
torch::Tensor input_lin_q_results = torch::empty({q_seq_len, sequences, output_lin_q_dim}, act_options);
torch::Tensor input_lin_kv_results = torch::empty({k_seq_len, sequences, output_lin_kv_dim}, act_options);
torch::Tensor softmax_results = torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options);
torch::Tensor dropout_results = torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options);
torch::Tensor dropout_mask = torch::empty({attn_batches, q_seq_len, k_seq_len}, mask_options);
torch::Tensor matmul2_results = torch::empty({q_seq_len, attn_batches, head_dim}, act_options);
torch::Tensor outputs = torch::empty_like(inputs_q, act_options);
// Input Linear Results Pointers to Q, K, and V of interviewed activations
void* q_lin_results_ptr = static_cast<void*>(input_lin_q_results.data_ptr());
void* k_lin_results_ptr = static_cast<void*>(input_lin_kv_results.data_ptr());
void* v_lin_results_ptr = static_cast<void*>(static_cast<half*>(input_lin_kv_results.data_ptr()) + head_dim);
// Softmax Intermediate Result Ptr (used by Matmul1 -> Softmax)
void* softmax_results_ptr = static_cast<void*>(softmax_results.data_ptr());
char a_layout_t{'t'};
char a_layout_n{'n'};
char b_layout_n{'n'};
THCublasCheck(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH));
// Input Linear Q Fwd
THCublasCheck(cublasGemmEx(handle,
CUBLAS_OP_T,
CUBLAS_OP_N,
output_lin_q_dim,
batches_q,
embed_dim,
static_cast<const void*>(&alpha),
static_cast<const void*>(input_weights_q.data_ptr()),
CUDA_R_16F,
embed_dim,
static_cast<const void*>(inputs_q.data_ptr()),
CUDA_R_16F,
embed_dim,
static_cast<const void*>(&beta),
q_lin_results_ptr,
CUDA_R_16F,
output_lin_q_dim,
CUDA_R_32F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// Input Linear KV Fwd
THCublasCheck(cublasGemmEx(handle,
CUBLAS_OP_T,
CUBLAS_OP_N,
output_lin_kv_dim,
batches_kv,
embed_dim,
static_cast<const void*>(&alpha),
static_cast<const void*>(input_weights_kv.data_ptr()),
CUDA_R_16F,
embed_dim,
static_cast<const void*>(inputs_kv.data_ptr()),
CUDA_R_16F,
embed_dim,
static_cast<const void*>(&beta),
k_lin_results_ptr,
CUDA_R_16F,
output_lin_kv_dim,
CUDA_R_32F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// MatMul1 of Dot-Product Attention Plus scaling by 1/Sqrt(head size)
gemm_switch_fp32accum( state,
a_layout_t,
b_layout_n,
k_seq_len,
q_seq_len,
head_dim,
scale,
static_cast<const half*>(k_lin_results_ptr),
lead_dim_kv,
batch_stride_kv,
static_cast<const half*>(q_lin_results_ptr),
lead_dim_q,
batch_stride_q,
beta,
static_cast<half*>(softmax_results_ptr),
k_seq_len,
k_seq_len*q_seq_len,
attn_batches);
// Padded Softmax
bool softmax_success = false;
if (pad_mask == nullptr) {
softmax_success = dispatch_softmax<half, half, float>(
reinterpret_cast<half*>(softmax_results_ptr),
reinterpret_cast<const half*>(softmax_results_ptr),
k_seq_len,
k_seq_len,
attn_batches*q_seq_len);
} else {
if (use_time_mask) {
softmax_success = dispatch_time_masked_softmax<half, half, float>(
reinterpret_cast<half*>(softmax_results_ptr),
reinterpret_cast<const half*>(softmax_results_ptr),
pad_mask,
k_seq_len,
k_seq_len,
attn_batches*q_seq_len,
q_seq_len);
} else {
softmax_success = dispatch_masked_softmax<half, half, float>(
reinterpret_cast<half*>(softmax_results_ptr),
reinterpret_cast<const half*>(softmax_results_ptr),
pad_mask,
k_seq_len,
k_seq_len,
attn_batches*q_seq_len,
attn_batches*q_seq_len/sequences);
}
}
assert(softmax_success);
if (is_training) {
apex_fused_dropout_cuda<at::Half,float,uint32_t>(
static_cast<at::Half const*>(softmax_results.data_ptr()),
static_cast<at::Half*>(dropout_results.data_ptr()),
static_cast<uint8_t*>(dropout_mask.data_ptr()),
dropout_elems,
(1.0f - dropout_prob));
}
// Matmul2
gemm_switch_fp32accum( state,
a_layout_n,
b_layout_n,
head_dim,
q_seq_len,
k_seq_len,
alpha,
static_cast<const half*>(v_lin_results_ptr),
lead_dim_kv,
batch_stride_kv,
(is_training) ? static_cast<const half*>(dropout_results.data_ptr()) : static_cast<const half*>(softmax_results.data_ptr()) ,
k_seq_len,
k_seq_len*q_seq_len,
beta,
static_cast<half*>(matmul2_results.data_ptr()),
head_dim*attn_batches,
head_dim,
attn_batches);
// Output Linear
THCublasCheck(cublasGemmEx(handle,
CUBLAS_OP_T,
CUBLAS_OP_N,
embed_dim,
batches_q,
embed_dim,
static_cast<const void*>(&alpha),
static_cast<const void*>(output_weights.data_ptr()),
CUDA_R_16F,
embed_dim,
static_cast<const void*>(matmul2_results.data_ptr()),
CUDA_R_16F,
embed_dim,
static_cast<const void*>(&beta),
static_cast<void*>(outputs.data_ptr()),
CUDA_R_16F,
embed_dim,
CUDA_R_32F,
//CUBLAS_GEMM_ALGO1_TENSOR_OP));
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
THCublasCheck(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH));
return {
input_lin_q_results,
input_lin_kv_results,
softmax_results,
dropout_results,
dropout_mask,
matmul2_results,
outputs
};
}
std::vector<torch::Tensor> bwd_cuda(
int heads,
torch::Tensor const& output_grads,
torch::Tensor const& matmul2_results,
torch::Tensor const& dropout_results,
torch::Tensor const& softmax_results,
torch::Tensor const& input_lin_q_results,
torch::Tensor const& input_lin_kv_results,
torch::Tensor const& inputs_q,
torch::Tensor const& inputs_kv,
torch::Tensor const& input_weights_q,
torch::Tensor const& input_weights_kv,
torch::Tensor const& output_weights,
torch::Tensor const& dropout_mask,
float dropout_prob
)
{
const int embed_dim = inputs_q.size(2);
const int sequences = inputs_q.size(1);
const int q_seq_len = inputs_q.size(0);
const int k_seq_len = inputs_kv.size(0);
const int batches_q = sequences * q_seq_len;
const int batches_kv = sequences * k_seq_len;
const int head_dim = embed_dim / heads;
const int output_lin_q_dim = embed_dim;
const int output_lin_kv_dim = 2 * embed_dim;
const int attn_batches = heads * sequences;
const int lead_dim_q = attn_batches * head_dim;
const int lead_dim_kv = attn_batches * 2 *head_dim;
const int batch_stride_q = head_dim;
const int batch_stride_kv = 2 * head_dim;
const int dropout_elems = attn_batches * q_seq_len * k_seq_len;
const float alpha = 1.0;
const float beta = 0.0;
const float scale = 1.0 / sqrt(static_cast<float>(head_dim));
// TODO: Streams can be used in Backprop but I haven't added more than one
// in my first attempt to create the code
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();
cublasSetStream(handle, stream);
// Output Tensor Allocations
torch::Tensor input_q_grads = torch::empty_like(inputs_q);
torch::Tensor input_kv_grads = torch::empty_like(inputs_kv);
torch::Tensor input_weight_q_grads = torch::empty_like(input_weights_q);
torch::Tensor input_weight_kv_grads = torch::empty_like(input_weights_kv);
torch::Tensor output_weight_grads = torch::empty_like(output_weights);
// Intermediate Tensor Allocations
at::Tensor output_lin_grads = torch::empty_like(matmul2_results);
at::Tensor matmul2_grads = torch::empty_like(dropout_results);
at::Tensor input_lin_q_output_grads = torch::empty_like(input_lin_q_results);
at::Tensor input_lin_kv_output_grads = torch::empty_like(input_lin_kv_results);
auto q_lin_results_ptr = static_cast<half*>(input_lin_q_results.data_ptr());
auto k_lin_results_ptr = static_cast<half*>(input_lin_kv_results.data_ptr());
auto v_lin_results_ptr = static_cast<half*>(input_lin_kv_results.data_ptr()) + head_dim;
auto q_lin_grads_ptr = static_cast<half*>(input_lin_q_output_grads.data_ptr());
auto k_lin_grads_ptr = static_cast<half*>(input_lin_kv_output_grads.data_ptr());
auto v_lin_grads_ptr = static_cast<half*>(input_lin_kv_output_grads.data_ptr()) + head_dim;
char a_layout_n{'n'};
char a_layout_t{'t'};
char b_layout_n{'n'};
char b_layout_t{'t'};
THCublasCheck(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH));
// Output Linear Dgrad
THCublasCheck(cublasGemmEx(handle,
CUBLAS_OP_N,
CUBLAS_OP_N,
embed_dim,
batches_q,
embed_dim,
static_cast<const void*>(&alpha),
static_cast<const void*>(output_weights.data_ptr()),
CUDA_R_16F,
embed_dim,
static_cast<const void*>(output_grads.data_ptr()),
CUDA_R_16F,
embed_dim,
static_cast<const void*>(&beta),
static_cast<void*>(output_lin_grads.data_ptr()),
CUDA_R_16F,
embed_dim,
CUDA_R_32F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// Output Linear Wgrad
THCublasCheck(cublasGemmEx(handle,
CUBLAS_OP_N,
CUBLAS_OP_T,
embed_dim,
embed_dim,
batches_q,
static_cast<const void*>(&alpha),
static_cast<const void*>(matmul2_results.data_ptr()),
CUDA_R_16F,
embed_dim,
static_cast<const void*>(output_grads.data_ptr()),
CUDA_R_16F,
embed_dim,
static_cast<const void*>(&beta),
static_cast<void*>(output_weight_grads.data_ptr()),
CUDA_R_16F,
embed_dim,
CUDA_R_32F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// MatMul2 Dgrad1
gemm_switch_fp32accum( state,
a_layout_t,
b_layout_n,
k_seq_len,
q_seq_len,
head_dim,
alpha,
static_cast<const half*>(v_lin_results_ptr),
lead_dim_kv,
batch_stride_kv,
static_cast<const half*>(output_lin_grads.data_ptr()),
head_dim*attn_batches,
head_dim,
beta,
static_cast<half*>(matmul2_grads.data_ptr()),
k_seq_len,
k_seq_len*q_seq_len,
attn_batches);
// Matmul2 Dgrad2
gemm_switch_fp32accum( state,
a_layout_n,
b_layout_t,
head_dim,
k_seq_len,
q_seq_len,
alpha,
static_cast<const half*>(output_lin_grads.data_ptr()),
head_dim*attn_batches,
head_dim,
static_cast<const half*>(dropout_results.data_ptr()),
k_seq_len,
k_seq_len*q_seq_len,
beta,
v_lin_grads_ptr,
lead_dim_kv,
batch_stride_kv,
attn_batches);
// Apply Dropout Mask and Scale by Dropout Probability
apex_masked_scale_cuda<at::Half,float,uint32_t>(
static_cast<at::Half const*>(matmul2_grads.data_ptr()),
static_cast<at::Half*>(matmul2_grads.data_ptr()),
static_cast<uint8_t const*>(dropout_mask.data_ptr()),
dropout_elems,
(1.0 / (1.0 - dropout_prob)));
// Softmax Grad
bool softmax_success = false;
softmax_success = dispatch_softmax_backward<half, half, float>(
static_cast<half*>(matmul2_grads.data_ptr()),
static_cast<half*>(matmul2_grads.data_ptr()),
reinterpret_cast<half const*>(softmax_results.data_ptr()),
k_seq_len,
k_seq_len,
attn_batches*q_seq_len);
assert(softmax_success);
// Matmul1 Dgrad1
gemm_switch_fp32accum( state,
a_layout_n,
b_layout_n,
head_dim,
q_seq_len,
k_seq_len,
scale,
k_lin_results_ptr,
lead_dim_kv,
batch_stride_kv,
static_cast<half*>(matmul2_grads.data_ptr()),
k_seq_len,
k_seq_len*q_seq_len,
beta,
q_lin_grads_ptr,
lead_dim_q,
batch_stride_q,
attn_batches);
// Matmul1 Dgrad2
gemm_switch_fp32accum( state,
a_layout_n,
b_layout_t,
head_dim,
k_seq_len,
q_seq_len,
scale,
q_lin_results_ptr,
lead_dim_q,
batch_stride_q,
static_cast<half*>(matmul2_grads.data_ptr()),
k_seq_len,
k_seq_len*q_seq_len,
beta,
k_lin_grads_ptr,
lead_dim_kv,
batch_stride_kv,
attn_batches);
// Input Linear Q Dgrad
THCublasCheck(cublasGemmEx(handle,
CUBLAS_OP_N,
CUBLAS_OP_N,
embed_dim,
batches_q,
output_lin_q_dim,
static_cast<const void*>(&alpha),
static_cast<const void*>(input_weights_q.data_ptr()),
CUDA_R_16F,
embed_dim,
static_cast<const void*>(q_lin_grads_ptr),
CUDA_R_16F,
output_lin_q_dim,
static_cast<const void*>(&beta),
static_cast<void*>(input_q_grads.data_ptr()),
CUDA_R_16F,
embed_dim,
CUDA_R_32F,
//CUBLAS_GEMM_ALGO10_TENSOR_OP));
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// Input Linear Q Wgrad
THCublasCheck(cublasGemmEx(handle,
CUBLAS_OP_N,
CUBLAS_OP_T,
embed_dim,
output_lin_q_dim,
batches_q,
static_cast<const void*>(&alpha),
static_cast<const void*>(inputs_q.data_ptr()),
CUDA_R_16F,
embed_dim,
static_cast<const void*>(q_lin_grads_ptr),
CUDA_R_16F,
output_lin_q_dim,
static_cast<const void*>(&beta),
static_cast<void*>(input_weight_q_grads.data_ptr()),
CUDA_R_16F,
embed_dim,
CUDA_R_32F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// Input Linear KV Dgrad
THCublasCheck(cublasGemmEx(handle,
CUBLAS_OP_N,
CUBLAS_OP_N,
embed_dim,
batches_kv,
output_lin_kv_dim,
static_cast<const void*>(&alpha),
static_cast<const void*>(input_weights_kv.data_ptr()),
CUDA_R_16F,
embed_dim,
static_cast<const void*>(k_lin_grads_ptr),
CUDA_R_16F,
output_lin_kv_dim,
static_cast<const void*>(&beta),
static_cast<void*>(input_kv_grads.data_ptr()),
CUDA_R_16F,
embed_dim,
CUDA_R_32F,
//CUBLAS_GEMM_ALGO10_TENSOR_OP));
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// Input Linear KV Wgrad
THCublasCheck(cublasGemmEx(handle,
CUBLAS_OP_N,
CUBLAS_OP_T,
embed_dim,
output_lin_kv_dim,
batches_kv,
static_cast<const void*>(&alpha),
static_cast<const void*>(inputs_kv.data_ptr()),
CUDA_R_16F,
embed_dim,
static_cast<const void*>(k_lin_grads_ptr),
CUDA_R_16F,
output_lin_kv_dim,
static_cast<const void*>(&beta),
static_cast<void*>(input_weight_kv_grads.data_ptr()),
CUDA_R_16F,
embed_dim,
CUDA_R_32F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
THCublasCheck(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH));
return {
input_q_grads,
input_kv_grads,
input_weight_q_grads,
input_weight_kv_grads,
output_weight_grads
};
}
} // end namespace cublas_gemmex
} // end namespace encdec
} // end namespace multihead_attn
|
49fc4ef4c44265c13fbf191664c9f051d943ad82.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHUNN.h"
#include "common.h"
template <typename Dtype>
__global__ void
#if __CUDA_ARCH__ >= 320
__launch_bounds__(CUDA_NUM_THREADS)
#endif
LRNFillScale(const int nthreads, const Dtype* const in,
const int num, const int channels, const int height,
const int width, const int size, const Dtype alpha_over_size,
const Dtype k, Dtype* const scale) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int n = index / width / height;
const int offset = (n * channels * height + h) * width + w;
const int step = height * width;
const Dtype* const in_off = in + offset;
Dtype* const scale_off = scale + offset;
int head = 0;
const int pre_pad = (size - 1) / 2;
const int post_pad = size - pre_pad - 1;
Dtype accum_scale = 0;
// fill the scale at [n, :, h, w]
// accumulate values
while (head < post_pad && head < channels) {
accum_scale += in_off[head * step] * in_off[head * step];
++head;
}
// both add and subtract
while (head < channels) {
accum_scale += in_off[head * step] * in_off[head * step];
if (head - size >= 0) {
accum_scale -= in_off[(head - size) * step]
* in_off[(head - size) * step];
}
scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;
++head;
}
// subtract only
while (head < channels + post_pad) {
if (head - size >= 0) {
accum_scale -= in_off[(head - size) * step]
* in_off[(head - size) * step];
}
scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;
++head;
}
}
}
__global__ void LRNComputeOutput(const int nthreads, const float* in,
const float* scale, const float negative_beta, float* out) {
CUDA_KERNEL_LOOP(index, nthreads) {
out[index] = in[index] * pow(scale[index], negative_beta);
}
}
template <typename Dtype>
__global__ void LRNComputeDiff(const int nthreads,
const Dtype* const bottom_data, const Dtype* const top_data,
const Dtype* const scale, const Dtype* const top_diff,
const int num, const int channels, const int height,
const int width, const int size, const Dtype negative_beta,
const Dtype cache_ratio, Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int n = index / width / height;
const int offset = (n * channels * height + h) * width + w;
const int step = height * width;
const Dtype* const bottom_off = bottom_data + offset;
const Dtype* const top_off = top_data + offset;
const Dtype* const scale_off = scale + offset;
const Dtype* const top_diff_off = top_diff + offset;
Dtype* const bottom_diff_off = bottom_diff + offset;
int head = 0;
const int pre_pad = size - (size + 1) / 2;
const int post_pad = size - pre_pad - 1;
Dtype accum_ratio = 0;
// accumulate values
while (head < post_pad && head < channels) {
accum_ratio += top_diff_off[head * step] * top_off[head * step] /
scale_off[head * step];
++head;
}
// both add and subtract
while (head < channels) {
accum_ratio += top_diff_off[head * step] * top_off[head * step] /
scale_off[head * step];
if (head - size >= 0) {
accum_ratio -= top_diff_off[(head - size) * step] *
top_off[(head - size) * step] / scale_off[(head - size) * step];
}
bottom_diff_off[(head - post_pad) * step] =
top_diff_off[(head - post_pad) * step]
* pow(scale_off[(head - post_pad) * step], negative_beta)
- cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;
++head;
}
// subtract only
while (head < channels + post_pad) {
if (head - size >= 0) {
accum_ratio -= top_diff_off[(head - size) * step] *
top_off[(head - size) * step] / scale_off[(head - size) * step];
}
bottom_diff_off[(head - post_pad) * step] =
top_diff_off[(head - post_pad) * step]
* pow(scale_off[(head - post_pad) * step], negative_beta)
- cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;
++head;
}
}
}
extern "C"
void LRNforward(THCState* state, THCudaTensor* input, THCudaTensor* output,
THCudaTensor* scale, int local_size, float alpha, float beta, float k)
{
THCudaTensor_resizeAs(state, output, input);
THCudaTensor_resizeAs(state, scale, input);
int batchSize;
int nInputPlane;
int imsize_h;
int imsize_w;
if (input->nDimension == 3) {
batchSize = 1;
nInputPlane = input->size[0];
imsize_h = input->size[1];
imsize_w = input->size[2];
}
else
{
batchSize = input->size[0];
nInputPlane = input->size[1];
imsize_h = input->size[2];
imsize_w = input->size[3];
}
input = THCudaTensor_newContiguous(state, input);
int n_threads = batchSize * imsize_h * imsize_w;
hipLaunchKernelGGL(( LRNFillScale), dim3(GET_BLOCKS(n_threads)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state),
n_threads, THCudaTensor_data(state, input), batchSize, nInputPlane, imsize_h, imsize_w, local_size,
alpha / local_size, k, THCudaTensor_data(state, scale));
n_threads *= nInputPlane;
hipLaunchKernelGGL(( LRNComputeOutput), dim3(GET_BLOCKS(n_threads)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state),
n_threads, THCudaTensor_data(state, input), THCudaTensor_data(state, scale), -beta, THCudaTensor_data(state, output));
THCudaTensor_free(state, input);
}
extern "C"
void LRNbackward(THCState* state, THCudaTensor* input, THCudaTensor* output,
THCudaTensor* gradOutput, THCudaTensor* gradInput, THCudaTensor* scale,
int local_size, float alpha, float beta, float k)
{
THCudaTensor_resizeAs(state, gradInput, input);
int batchSize;
int nInputPlane;
int imsize_h;
int imsize_w;
if (input->nDimension == 3) {
batchSize = 1;
nInputPlane = input->size[0];
imsize_h = input->size[1];
imsize_w = input->size[2];
}
else
{
batchSize = input->size[0];
nInputPlane = input->size[1];
imsize_h = input->size[2];
imsize_w = input->size[3];
}
input = THCudaTensor_newContiguous(state, input);
gradOutput = THCudaTensor_newContiguous(state, gradOutput);
int n_threads = batchSize * imsize_h * imsize_w;
hipLaunchKernelGGL(( LRNComputeDiff), dim3(GET_BLOCKS(n_threads)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state),
n_threads, THCudaTensor_data(state, input), THCudaTensor_data(state, output),
THCudaTensor_data(state, scale), THCudaTensor_data(state, gradOutput), batchSize, nInputPlane, imsize_h, imsize_w,
local_size, -beta, float(2. * alpha * beta / local_size),
THCudaTensor_data(state, gradInput));
THCudaTensor_free(state, input);
THCudaTensor_free(state, gradOutput);
}
void THNN_CudaSpatialCrossMapLRN_updateOutput(
THCState *state,
THCudaTensor *input,
THCudaTensor *output,
THCudaTensor *scale,
int size,
float alpha,
float beta,
float k)
{
LRNforward(state, input, output, scale, size, alpha, beta, k);
}
void THNN_CudaSpatialCrossMapLRN_updateGradInput(
THCState *state,
THCudaTensor *input,
THCudaTensor *gradOutput,
THCudaTensor *gradInput,
THCudaTensor *scale,
THCudaTensor *output,
int size,
float alpha,
float beta,
float k)
{
LRNbackward(state, input, output, gradOutput, gradInput, scale, size, alpha, beta, k);
}
| 49fc4ef4c44265c13fbf191664c9f051d943ad82.cu | #include "THCUNN.h"
#include "common.h"
template <typename Dtype>
__global__ void
#if __CUDA_ARCH__ >= 320
__launch_bounds__(CUDA_NUM_THREADS)
#endif
LRNFillScale(const int nthreads, const Dtype* const in,
const int num, const int channels, const int height,
const int width, const int size, const Dtype alpha_over_size,
const Dtype k, Dtype* const scale) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int n = index / width / height;
const int offset = (n * channels * height + h) * width + w;
const int step = height * width;
const Dtype* const in_off = in + offset;
Dtype* const scale_off = scale + offset;
int head = 0;
const int pre_pad = (size - 1) / 2;
const int post_pad = size - pre_pad - 1;
Dtype accum_scale = 0;
// fill the scale at [n, :, h, w]
// accumulate values
while (head < post_pad && head < channels) {
accum_scale += in_off[head * step] * in_off[head * step];
++head;
}
// both add and subtract
while (head < channels) {
accum_scale += in_off[head * step] * in_off[head * step];
if (head - size >= 0) {
accum_scale -= in_off[(head - size) * step]
* in_off[(head - size) * step];
}
scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;
++head;
}
// subtract only
while (head < channels + post_pad) {
if (head - size >= 0) {
accum_scale -= in_off[(head - size) * step]
* in_off[(head - size) * step];
}
scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;
++head;
}
}
}
__global__ void LRNComputeOutput(const int nthreads, const float* in,
const float* scale, const float negative_beta, float* out) {
CUDA_KERNEL_LOOP(index, nthreads) {
out[index] = in[index] * pow(scale[index], negative_beta);
}
}
template <typename Dtype>
__global__ void LRNComputeDiff(const int nthreads,
const Dtype* const bottom_data, const Dtype* const top_data,
const Dtype* const scale, const Dtype* const top_diff,
const int num, const int channels, const int height,
const int width, const int size, const Dtype negative_beta,
const Dtype cache_ratio, Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int n = index / width / height;
const int offset = (n * channels * height + h) * width + w;
const int step = height * width;
const Dtype* const bottom_off = bottom_data + offset;
const Dtype* const top_off = top_data + offset;
const Dtype* const scale_off = scale + offset;
const Dtype* const top_diff_off = top_diff + offset;
Dtype* const bottom_diff_off = bottom_diff + offset;
int head = 0;
const int pre_pad = size - (size + 1) / 2;
const int post_pad = size - pre_pad - 1;
Dtype accum_ratio = 0;
// accumulate values
while (head < post_pad && head < channels) {
accum_ratio += top_diff_off[head * step] * top_off[head * step] /
scale_off[head * step];
++head;
}
// both add and subtract
while (head < channels) {
accum_ratio += top_diff_off[head * step] * top_off[head * step] /
scale_off[head * step];
if (head - size >= 0) {
accum_ratio -= top_diff_off[(head - size) * step] *
top_off[(head - size) * step] / scale_off[(head - size) * step];
}
bottom_diff_off[(head - post_pad) * step] =
top_diff_off[(head - post_pad) * step]
* pow(scale_off[(head - post_pad) * step], negative_beta)
- cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;
++head;
}
// subtract only
while (head < channels + post_pad) {
if (head - size >= 0) {
accum_ratio -= top_diff_off[(head - size) * step] *
top_off[(head - size) * step] / scale_off[(head - size) * step];
}
bottom_diff_off[(head - post_pad) * step] =
top_diff_off[(head - post_pad) * step]
* pow(scale_off[(head - post_pad) * step], negative_beta)
- cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;
++head;
}
}
}
extern "C"
void LRNforward(THCState* state, THCudaTensor* input, THCudaTensor* output,
THCudaTensor* scale, int local_size, float alpha, float beta, float k)
{
THCudaTensor_resizeAs(state, output, input);
THCudaTensor_resizeAs(state, scale, input);
int batchSize;
int nInputPlane;
int imsize_h;
int imsize_w;
if (input->nDimension == 3) {
batchSize = 1;
nInputPlane = input->size[0];
imsize_h = input->size[1];
imsize_w = input->size[2];
}
else
{
batchSize = input->size[0];
nInputPlane = input->size[1];
imsize_h = input->size[2];
imsize_w = input->size[3];
}
input = THCudaTensor_newContiguous(state, input);
int n_threads = batchSize * imsize_h * imsize_w;
LRNFillScale<<<GET_BLOCKS(n_threads), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>(
n_threads, THCudaTensor_data(state, input), batchSize, nInputPlane, imsize_h, imsize_w, local_size,
alpha / local_size, k, THCudaTensor_data(state, scale));
n_threads *= nInputPlane;
LRNComputeOutput<<<GET_BLOCKS(n_threads), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>(
n_threads, THCudaTensor_data(state, input), THCudaTensor_data(state, scale), -beta, THCudaTensor_data(state, output));
THCudaTensor_free(state, input);
}
extern "C"
void LRNbackward(THCState* state, THCudaTensor* input, THCudaTensor* output,
THCudaTensor* gradOutput, THCudaTensor* gradInput, THCudaTensor* scale,
int local_size, float alpha, float beta, float k)
{
THCudaTensor_resizeAs(state, gradInput, input);
int batchSize;
int nInputPlane;
int imsize_h;
int imsize_w;
if (input->nDimension == 3) {
batchSize = 1;
nInputPlane = input->size[0];
imsize_h = input->size[1];
imsize_w = input->size[2];
}
else
{
batchSize = input->size[0];
nInputPlane = input->size[1];
imsize_h = input->size[2];
imsize_w = input->size[3];
}
input = THCudaTensor_newContiguous(state, input);
gradOutput = THCudaTensor_newContiguous(state, gradOutput);
int n_threads = batchSize * imsize_h * imsize_w;
LRNComputeDiff<<<GET_BLOCKS(n_threads), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>(
n_threads, THCudaTensor_data(state, input), THCudaTensor_data(state, output),
THCudaTensor_data(state, scale), THCudaTensor_data(state, gradOutput), batchSize, nInputPlane, imsize_h, imsize_w,
local_size, -beta, float(2. * alpha * beta / local_size),
THCudaTensor_data(state, gradInput));
THCudaTensor_free(state, input);
THCudaTensor_free(state, gradOutput);
}
void THNN_CudaSpatialCrossMapLRN_updateOutput(
THCState *state,
THCudaTensor *input,
THCudaTensor *output,
THCudaTensor *scale,
int size,
float alpha,
float beta,
float k)
{
LRNforward(state, input, output, scale, size, alpha, beta, k);
}
void THNN_CudaSpatialCrossMapLRN_updateGradInput(
THCState *state,
THCudaTensor *input,
THCudaTensor *gradOutput,
THCudaTensor *gradInput,
THCudaTensor *scale,
THCudaTensor *output,
int size,
float alpha,
float beta,
float k)
{
LRNbackward(state, input, output, gradOutput, gradInput, scale, size, alpha, beta, k);
}
|
57bdae010e2183706010af5eec53800f79d192e2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/elementwise/elementwise_add_op.h"
#include "paddle/fluid/operators/elementwise/elementwise_op_function.cu.h"
#include "paddle/fluid/platform/float16.h"
namespace ops = paddle::operators;
namespace plat = paddle::platform;
namespace paddle {
namespace operators {
template <typename T>
struct SameDimsElemwiseAdd<platform::CUDADeviceContext, T> {
void operator()(const framework::ExecutionContext& ctx,
const framework::Tensor* x, const framework::Tensor* y,
framework::Tensor* z) {
AddRangeFunctor<T> functor(x->data<T>(), y->data<T>(), z->data<T>());
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
platform::ForRange<platform::CUDADeviceContext> for_range(dev_ctx,
x->numel());
for_range(functor);
}
};
template <>
struct SameDimsElemwiseAdd<platform::CUDADeviceContext, platform::float16> {
void operator()(const framework::ExecutionContext& ctx,
const framework::Tensor* x, const framework::Tensor* y,
framework::Tensor* z) {
auto size = x->numel();
dim3 grid_size = dim3(((size + 1) / 2 + PADDLE_CUDA_THREAD_SIZE - 1) /
PADDLE_CUDA_THREAD_SIZE,
1);
dim3 block_size = dim3(PADDLE_CUDA_THREAD_SIZE, 1);
const half* x2 =
reinterpret_cast<const half*>(x->data<platform::float16>());
const half* y2 =
reinterpret_cast<const half*>(y->data<platform::float16>());
half* z2 = reinterpret_cast<half*>(z->data<platform::float16>());
hipLaunchKernelGGL(( SameDimsElemwiseAddCUDAKernel),
dim3(grid_size), dim3(block_size), 0,
ctx.template device_context<platform::CUDADeviceContext>().stream(),
x2, y2, z2, size);
}
};
template <typename T>
static __global__ void SimpleElemwiseAddGradCUDAKernel(const T* dout,
int64_t size, T* dx,
T* dy) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
while (col < size) {
dx[col] = dout[col];
dy[col] = dout[col];
col += blockDim.x * gridDim.x;
}
}
template <typename DeviceContext, typename T>
typename std::enable_if<
std::is_same<DeviceContext, plat::CUDADeviceContext>::value>::type
elementwise_add_grad(const framework::ExecutionContext& ctx,
const framework::Tensor* x, const framework::Tensor* y,
const framework::Tensor* out,
const framework::Tensor* dout, framework::Tensor* dx,
framework::Tensor* dy) {
dim3 block_size = dim3(PADDLE_CUDA_THREAD_SIZE, 1);
auto size = x->numel();
dim3 grid_size =
dim3((size + PADDLE_CUDA_THREAD_SIZE - 1) / PADDLE_CUDA_THREAD_SIZE, 1);
hipLaunchKernelGGL(( SimpleElemwiseAddGradCUDAKernel<
T>), dim3(grid_size), dim3(block_size), 0,
ctx.template device_context<plat::CUDADeviceContext>().stream(),
dout->data<T>(), size, dx->mutable_data<T>(ctx.GetPlace()),
dy->mutable_data<T>(ctx.GetPlace()));
}
} // namespace operators
} // namespace paddle
REGISTER_OP_CUDA_KERNEL(
elementwise_add, ops::ElementwiseAddKernel<plat::CUDADeviceContext, float>,
ops::ElementwiseAddKernel<plat::CUDADeviceContext, double>,
ops::ElementwiseAddKernel<plat::CUDADeviceContext, int>,
ops::ElementwiseAddKernel<plat::CUDADeviceContext, int64_t>,
ops::ElementwiseAddKernel<plat::CUDADeviceContext, plat::float16>);
REGISTER_OP_CUDA_KERNEL(
elementwise_add_grad,
ops::ElementwiseAddGradKernel<plat::CUDADeviceContext, float>,
ops::ElementwiseAddGradKernel<plat::CUDADeviceContext, double>,
ops::ElementwiseAddGradKernel<plat::CUDADeviceContext, int>,
ops::ElementwiseAddGradKernel<plat::CUDADeviceContext, int64_t>,
ops::ElementwiseAddGradKernel<plat::CUDADeviceContext, plat::float16>);
REGISTER_OP_CUDA_KERNEL(
elementwise_add_grad_grad,
ops::ElementwiseAddDoubleGradKernel<plat::CUDADeviceContext, float>,
ops::ElementwiseAddDoubleGradKernel<plat::CUDADeviceContext, double>,
ops::ElementwiseAddDoubleGradKernel<plat::CUDADeviceContext, int>,
ops::ElementwiseAddDoubleGradKernel<plat::CUDADeviceContext, int64_t>,
ops::ElementwiseAddDoubleGradKernel<plat::CUDADeviceContext,
plat::float16>);
REGISTER_OP_CUDA_KERNEL(
grad_add, ops::ElementwiseAddKernel<plat::CUDADeviceContext, float>,
ops::ElementwiseAddKernel<plat::CUDADeviceContext, double>,
ops::ElementwiseAddKernel<plat::CUDADeviceContext, int>,
ops::ElementwiseAddKernel<plat::CUDADeviceContext, int64_t>,
ops::ElementwiseAddKernel<plat::CUDADeviceContext, plat::float16>);
| 57bdae010e2183706010af5eec53800f79d192e2.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/elementwise/elementwise_add_op.h"
#include "paddle/fluid/operators/elementwise/elementwise_op_function.cu.h"
#include "paddle/fluid/platform/float16.h"
namespace ops = paddle::operators;
namespace plat = paddle::platform;
namespace paddle {
namespace operators {
template <typename T>
struct SameDimsElemwiseAdd<platform::CUDADeviceContext, T> {
void operator()(const framework::ExecutionContext& ctx,
const framework::Tensor* x, const framework::Tensor* y,
framework::Tensor* z) {
AddRangeFunctor<T> functor(x->data<T>(), y->data<T>(), z->data<T>());
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
platform::ForRange<platform::CUDADeviceContext> for_range(dev_ctx,
x->numel());
for_range(functor);
}
};
template <>
struct SameDimsElemwiseAdd<platform::CUDADeviceContext, platform::float16> {
void operator()(const framework::ExecutionContext& ctx,
const framework::Tensor* x, const framework::Tensor* y,
framework::Tensor* z) {
auto size = x->numel();
dim3 grid_size = dim3(((size + 1) / 2 + PADDLE_CUDA_THREAD_SIZE - 1) /
PADDLE_CUDA_THREAD_SIZE,
1);
dim3 block_size = dim3(PADDLE_CUDA_THREAD_SIZE, 1);
const half* x2 =
reinterpret_cast<const half*>(x->data<platform::float16>());
const half* y2 =
reinterpret_cast<const half*>(y->data<platform::float16>());
half* z2 = reinterpret_cast<half*>(z->data<platform::float16>());
SameDimsElemwiseAddCUDAKernel<<<
grid_size, block_size, 0,
ctx.template device_context<platform::CUDADeviceContext>().stream()>>>(
x2, y2, z2, size);
}
};
template <typename T>
static __global__ void SimpleElemwiseAddGradCUDAKernel(const T* dout,
int64_t size, T* dx,
T* dy) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
while (col < size) {
dx[col] = dout[col];
dy[col] = dout[col];
col += blockDim.x * gridDim.x;
}
}
template <typename DeviceContext, typename T>
typename std::enable_if<
std::is_same<DeviceContext, plat::CUDADeviceContext>::value>::type
elementwise_add_grad(const framework::ExecutionContext& ctx,
const framework::Tensor* x, const framework::Tensor* y,
const framework::Tensor* out,
const framework::Tensor* dout, framework::Tensor* dx,
framework::Tensor* dy) {
dim3 block_size = dim3(PADDLE_CUDA_THREAD_SIZE, 1);
auto size = x->numel();
dim3 grid_size =
dim3((size + PADDLE_CUDA_THREAD_SIZE - 1) / PADDLE_CUDA_THREAD_SIZE, 1);
SimpleElemwiseAddGradCUDAKernel<
T><<<grid_size, block_size, 0,
ctx.template device_context<plat::CUDADeviceContext>().stream()>>>(
dout->data<T>(), size, dx->mutable_data<T>(ctx.GetPlace()),
dy->mutable_data<T>(ctx.GetPlace()));
}
} // namespace operators
} // namespace paddle
REGISTER_OP_CUDA_KERNEL(
elementwise_add, ops::ElementwiseAddKernel<plat::CUDADeviceContext, float>,
ops::ElementwiseAddKernel<plat::CUDADeviceContext, double>,
ops::ElementwiseAddKernel<plat::CUDADeviceContext, int>,
ops::ElementwiseAddKernel<plat::CUDADeviceContext, int64_t>,
ops::ElementwiseAddKernel<plat::CUDADeviceContext, plat::float16>);
REGISTER_OP_CUDA_KERNEL(
elementwise_add_grad,
ops::ElementwiseAddGradKernel<plat::CUDADeviceContext, float>,
ops::ElementwiseAddGradKernel<plat::CUDADeviceContext, double>,
ops::ElementwiseAddGradKernel<plat::CUDADeviceContext, int>,
ops::ElementwiseAddGradKernel<plat::CUDADeviceContext, int64_t>,
ops::ElementwiseAddGradKernel<plat::CUDADeviceContext, plat::float16>);
REGISTER_OP_CUDA_KERNEL(
elementwise_add_grad_grad,
ops::ElementwiseAddDoubleGradKernel<plat::CUDADeviceContext, float>,
ops::ElementwiseAddDoubleGradKernel<plat::CUDADeviceContext, double>,
ops::ElementwiseAddDoubleGradKernel<plat::CUDADeviceContext, int>,
ops::ElementwiseAddDoubleGradKernel<plat::CUDADeviceContext, int64_t>,
ops::ElementwiseAddDoubleGradKernel<plat::CUDADeviceContext,
plat::float16>);
REGISTER_OP_CUDA_KERNEL(
grad_add, ops::ElementwiseAddKernel<plat::CUDADeviceContext, float>,
ops::ElementwiseAddKernel<plat::CUDADeviceContext, double>,
ops::ElementwiseAddKernel<plat::CUDADeviceContext, int>,
ops::ElementwiseAddKernel<plat::CUDADeviceContext, int64_t>,
ops::ElementwiseAddKernel<plat::CUDADeviceContext, plat::float16>);
|
2357539660fa8b439a0027f1ea8692a92b83ca34.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
int main()
{
int count = 0;
if (hipSuccess != hipGetDeviceCount(&count)) return -1;
if (count == 0) return -1;
for (int device = 0; device < count; ++device)
{
hipDeviceProp_t prop;
if (hipSuccess == hipGetDeviceProperties(&prop, device))
std::printf("%d.%d;", prop.major, prop.minor);
}
return 0;
}
| 2357539660fa8b439a0027f1ea8692a92b83ca34.cu | #include <cstdio>
int main()
{
int count = 0;
if (cudaSuccess != cudaGetDeviceCount(&count)) return -1;
if (count == 0) return -1;
for (int device = 0; device < count; ++device)
{
cudaDeviceProp prop;
if (cudaSuccess == cudaGetDeviceProperties(&prop, device))
std::printf("%d.%d;", prop.major, prop.minor);
}
return 0;
}
|
dcaaed9c361d0452646ba2ba954195edf8bb667a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@precisions normal z -> s d c
@author Mark Gates
*/
#include "common_magma.h"
#define BLK_X 64
#define BLK_Y 32
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to zlaset.
*/
__global__
void zlacpy_full(
int m, int n,
const magmaDoubleComplex *dA, int ldda,
magmaDoubleComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
/*
Similar to zlacpy_full, but updates only the diagonal and below.
Blocks that are fully above the diagonal exit immediately.
Code similar to zlaset.
*/
__global__
void zlacpy_lower(
int m, int n,
const magmaDoubleComplex *dA, int ldda,
magmaDoubleComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (below diag) */
bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y));
/* do only rows inside matrix, and blocks not above diag */
if ( ind < m && ind + BLK_X > iby ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n && ind >= iby+j; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
/*
Similar to zlacpy_full, but updates only the diagonal and above.
Blocks that are fully below the diagonal exit immediately.
Code similar to zlaset.
*/
__global__
void zlacpy_upper(
int m, int n,
const magmaDoubleComplex *dA, int ldda,
magmaDoubleComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (above diag) */
bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby));
/* do only rows inside matrix, and blocks not below diag */
if ( ind < m && ind < iby + BLK_Y ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( ind <= iby+j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
}
/**
Purpose
-------
ZLACPY_STREAM copies all or part of a two-dimensional matrix dA to another
matrix dB.
This is the same as ZLACPY, but adds queue argument.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA to be copied to dB.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
Otherwise: All of the matrix dA
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
dA COMPLEX_16 array, dimension (LDDA,N)
The m by n matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[out]
dB COMPLEX_16 array, dimension (LDDB,N)
The m by n matrix dB.
On exit, dB = dA in the locations specified by UPLO.
@param[in]
lddb INTEGER
The leading dimension of the array dB. LDDB >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zlacpy_q(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
const magmaDoubleComplex *dA, magma_int_t ldda,
magmaDoubleComplex *dB, magma_int_t lddb,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 )
return;
dim3 threads( BLK_X );
dim3 grid( (m + BLK_X - 1)/BLK_X, (n + BLK_Y - 1)/BLK_Y );
if ( uplo == MagmaLower ) {
hipLaunchKernelGGL(( zlacpy_lower), dim3(grid), dim3(threads), 0, queue , m, n, dA, ldda, dB, lddb );
}
else if ( uplo == MagmaUpper ) {
hipLaunchKernelGGL(( zlacpy_upper), dim3(grid), dim3(threads), 0, queue , m, n, dA, ldda, dB, lddb );
}
else {
hipLaunchKernelGGL(( zlacpy_full) , dim3(grid), dim3(threads), 0, queue , m, n, dA, ldda, dB, lddb );
}
}
/**
@see magmablas_zlacpy_q
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zlacpy(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
const magmaDoubleComplex *dA, magma_int_t ldda,
magmaDoubleComplex *dB, magma_int_t lddb )
{
magmablas_zlacpy_q( uplo, m, n, dA, ldda, dB, lddb, magma_stream );
}
| dcaaed9c361d0452646ba2ba954195edf8bb667a.cu | /*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@precisions normal z -> s d c
@author Mark Gates
*/
#include "common_magma.h"
#define BLK_X 64
#define BLK_Y 32
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to zlaset.
*/
__global__
void zlacpy_full(
int m, int n,
const magmaDoubleComplex *dA, int ldda,
magmaDoubleComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
/*
Similar to zlacpy_full, but updates only the diagonal and below.
Blocks that are fully above the diagonal exit immediately.
Code similar to zlaset.
*/
__global__
void zlacpy_lower(
int m, int n,
const magmaDoubleComplex *dA, int ldda,
magmaDoubleComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (below diag) */
bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y));
/* do only rows inside matrix, and blocks not above diag */
if ( ind < m && ind + BLK_X > iby ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n && ind >= iby+j; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
/*
Similar to zlacpy_full, but updates only the diagonal and above.
Blocks that are fully below the diagonal exit immediately.
Code similar to zlaset.
*/
__global__
void zlacpy_upper(
int m, int n,
const magmaDoubleComplex *dA, int ldda,
magmaDoubleComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (above diag) */
bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby));
/* do only rows inside matrix, and blocks not below diag */
if ( ind < m && ind < iby + BLK_Y ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( ind <= iby+j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
}
/**
Purpose
-------
ZLACPY_STREAM copies all or part of a two-dimensional matrix dA to another
matrix dB.
This is the same as ZLACPY, but adds queue argument.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA to be copied to dB.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
Otherwise: All of the matrix dA
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
dA COMPLEX_16 array, dimension (LDDA,N)
The m by n matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[out]
dB COMPLEX_16 array, dimension (LDDB,N)
The m by n matrix dB.
On exit, dB = dA in the locations specified by UPLO.
@param[in]
lddb INTEGER
The leading dimension of the array dB. LDDB >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zlacpy_q(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
const magmaDoubleComplex *dA, magma_int_t ldda,
magmaDoubleComplex *dB, magma_int_t lddb,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 )
return;
dim3 threads( BLK_X );
dim3 grid( (m + BLK_X - 1)/BLK_X, (n + BLK_Y - 1)/BLK_Y );
if ( uplo == MagmaLower ) {
zlacpy_lower<<< grid, threads, 0, queue >>> ( m, n, dA, ldda, dB, lddb );
}
else if ( uplo == MagmaUpper ) {
zlacpy_upper<<< grid, threads, 0, queue >>> ( m, n, dA, ldda, dB, lddb );
}
else {
zlacpy_full <<< grid, threads, 0, queue >>> ( m, n, dA, ldda, dB, lddb );
}
}
/**
@see magmablas_zlacpy_q
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zlacpy(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
const magmaDoubleComplex *dA, magma_int_t ldda,
magmaDoubleComplex *dB, magma_int_t lddb )
{
magmablas_zlacpy_q( uplo, m, n, dA, ldda, dB, lddb, magma_stream );
}
|
c52cc9acb48e712ff77fce4ae4bc185a4a434966.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void rgbUtoLab3F_kernel(int width, int height, float gamma, unsigned int* rgbU, float* devL, float* devA, float* devB) {
int x0 = blockDim.x * blockIdx.x + threadIdx.x;
int y0 = blockDim.y * blockIdx.y + threadIdx.y;
if ((x0 < width) && (y0 < height)) {
int index = y0 * width + x0;
unsigned int rgb = rgbU[index];
float r = (float)(rgb & 0xff)/255.0;
float g = (float)((rgb & 0xff00) >> 8)/255.0;
float b = (float)((rgb & 0xff0000) >> 16)/255.0;
r = powf(r, gamma);
g = powf(g, gamma);
b = powf(b, gamma);
float x = (0.412453 * r) + (0.357580 * g) + (0.180423 * b);
float y = (0.212671 * r) + (0.715160 * g) + (0.072169 * b);
float z = (0.019334 * r) + (0.119193 * g) + (0.950227 * b);
/*D65 white point reference */
const float x_ref = 0.950456;
const float y_ref = 1.000000;
const float z_ref = 1.088754;
/* threshold value */
const float threshold = 0.008856;
x = x / x_ref;
y = y / y_ref;
z = z / z_ref;
float fx =
(x > threshold) ? powf(x,(1.0/3.0)) : (7.787*x + (16.0/116.0));
float fy =
(y > threshold) ? powf(y,(1.0/3.0)) : (7.787*y + (16.0/116.0));
float fz =
(z > threshold) ? powf(z,(1.0/3.0)) : (7.787*z + (16.0/116.0));
/* compute Lab color value */
devL[index] =
(y > threshold) ? (116*powf(y,(1.0/3.0)) - 16) : (903.3*y);
devA[index] = 500.0f * (fx - fy);
devB[index] = 200.0f * (fy - fz);
}
} | c52cc9acb48e712ff77fce4ae4bc185a4a434966.cu | #include "includes.h"
__global__ void rgbUtoLab3F_kernel(int width, int height, float gamma, unsigned int* rgbU, float* devL, float* devA, float* devB) {
int x0 = blockDim.x * blockIdx.x + threadIdx.x;
int y0 = blockDim.y * blockIdx.y + threadIdx.y;
if ((x0 < width) && (y0 < height)) {
int index = y0 * width + x0;
unsigned int rgb = rgbU[index];
float r = (float)(rgb & 0xff)/255.0;
float g = (float)((rgb & 0xff00) >> 8)/255.0;
float b = (float)((rgb & 0xff0000) >> 16)/255.0;
r = powf(r, gamma);
g = powf(g, gamma);
b = powf(b, gamma);
float x = (0.412453 * r) + (0.357580 * g) + (0.180423 * b);
float y = (0.212671 * r) + (0.715160 * g) + (0.072169 * b);
float z = (0.019334 * r) + (0.119193 * g) + (0.950227 * b);
/*D65 white point reference */
const float x_ref = 0.950456;
const float y_ref = 1.000000;
const float z_ref = 1.088754;
/* threshold value */
const float threshold = 0.008856;
x = x / x_ref;
y = y / y_ref;
z = z / z_ref;
float fx =
(x > threshold) ? powf(x,(1.0/3.0)) : (7.787*x + (16.0/116.0));
float fy =
(y > threshold) ? powf(y,(1.0/3.0)) : (7.787*y + (16.0/116.0));
float fz =
(z > threshold) ? powf(z,(1.0/3.0)) : (7.787*z + (16.0/116.0));
/* compute Lab color value */
devL[index] =
(y > threshold) ? (116*powf(y,(1.0/3.0)) - 16) : (903.3*y);
devA[index] = 500.0f * (fx - fy);
devB[index] = 200.0f * (fy - fz);
}
} |
17f8be9350dd8c54b16c5f19cd219d37dd2a3365.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <quda_internal.h>
#include <quda_matrix.h>
#include <tune_quda.h>
#include <gauge_field.h>
#include <gauge_field_order.h>
#include <launch_kernel.cuh>
#include <atomic.cuh>
#include <cub_helper.cuh>
#include <index_helper.cuh>
namespace quda {
#ifdef GPU_GAUGE_TOOLS
template <typename Gauge>
struct GaugePlaqArg : public ReduceArg<double2> {
int threads; // number of active threads required
int E[4]; // extended grid dimensions
int X[4]; // true grid dimensions
int border[4];
Gauge dataOr;
GaugePlaqArg(const Gauge &dataOr, const GaugeField &data)
: ReduceArg<double2>(), dataOr(dataOr)
{
int R = 0;
for (int dir=0; dir<4; ++dir){
border[dir] = data.R()[dir];
E[dir] = data.X()[dir];
X[dir] = data.X()[dir] - border[dir]*2;
R += border[dir];
}
threads = X[0]*X[1]*X[2]*X[3]/2;
}
};
template<int blockSize, typename Float, typename Gauge>
__global__ void computePlaq(GaugePlaqArg<Gauge> arg){
typedef Matrix<complex<Float>,3> Link;
int idx = threadIdx.x + blockIdx.x*blockDim.x;
int parity = threadIdx.y;
double2 plaq = make_double2(0.0,0.0);
if(idx < arg.threads) {
int x[4];
getCoords(x, idx, arg.X, parity);
for (int dr=0; dr<4; ++dr) x[dr] += arg.border[dr]; // extended grid coordinates
int dx[4] = {0, 0, 0, 0};
for (int mu = 0; mu < 3; mu++) {
for (int nu = (mu+1); nu < 3; nu++) {
Link U1 = arg.dataOr(mu, linkIndexShift(x,dx,arg.E), parity);
dx[mu]++;
Link U2 = arg.dataOr(nu, linkIndexShift(x,dx,arg.E), 1-parity);
dx[mu]--;
dx[nu]++;
Link U3 = arg.dataOr(mu, linkIndexShift(x,dx,arg.E), 1-parity);
dx[nu]--;
Link U4 = arg.dataOr(nu, linkIndexShift(x,dx,arg.E), parity);
plaq.x += getTrace( U1 * U2 * conj(U3) * conj(U4) ).x;
}
Link U1 = arg.dataOr(mu, linkIndexShift(x,dx,arg.E), parity);
dx[mu]++;
Link U2 = arg.dataOr(3, linkIndexShift(x,dx,arg.E), 1-parity);
dx[mu]--;
dx[3]++;
Link U3 = arg.dataOr(mu,linkIndexShift(x,dx,arg.E), 1-parity);
dx[3]--;
Link U4 = arg.dataOr(3, linkIndexShift(x,dx,arg.E), parity);
plaq.y += getTrace( U1 * U2 * conj(U3) * conj(U4) ).x;
}
}
// perform final inter-block reduction and write out result
reduce2d<blockSize,2>(arg, plaq);
}
template<typename Float, typename Gauge>
class GaugePlaq : TunableLocalParity {
GaugePlaqArg<Gauge> arg;
const QudaFieldLocation location;
private:
unsigned int minThreads() const { return arg.threads; }
public:
GaugePlaq(GaugePlaqArg<Gauge> &arg, QudaFieldLocation location)
: arg(arg), location(location) {}
~GaugePlaq () { }
void apply(const hipStream_t &stream){
if(location == QUDA_CUDA_FIELD_LOCATION){
arg.result_h[0] = make_double2(0.,0.);
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
LAUNCH_KERNEL_LOCAL_PARITY(computePlaq, tp, stream, arg, Float, Gauge);
hipDeviceSynchronize();
} else {
errorQuda("CPU not supported yet\n");
}
}
TuneKey tuneKey() const {
std::stringstream vol, aux;
vol << arg.X[0] << "x" << arg.X[1] << "x" << arg.X[2] << "x" << arg.X[3];
aux << "threads=" << arg.threads << ",prec=" << sizeof(Float);
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux.str().c_str());
}
long long flops() const { return 6ll*2*arg.threads*(3*198+3); }
long long bytes() const { return 6ll*4*2*arg.threads*arg.dataOr.Bytes(); }
};
template<typename Float, typename Gauge>
void plaquette(const Gauge dataOr, const GaugeField& data, double2 &plq, QudaFieldLocation location) {
GaugePlaqArg<Gauge> arg(dataOr, data);
GaugePlaq<Float,Gauge> gaugePlaq(arg, location);
gaugePlaq.apply(0);
comm_allreduce_array((double*) arg.result_h, 2);
arg.result_h[0].x /= 9.*(2*arg.threads*comm_size());
arg.result_h[0].y /= 9.*(2*arg.threads*comm_size());
plq.x = arg.result_h[0].x;
plq.y = arg.result_h[0].y;
}
template<typename Float>
void plaquette(const GaugeField& data, double2 &plq, QudaFieldLocation location) {
INSTANTIATE_RECONSTRUCT(plaquette<Float>, data, plq, location);
}
#endif
double3 plaquette(const GaugeField& data, QudaFieldLocation location) {
#ifdef GPU_GAUGE_TOOLS
double2 plq;
INSTANTIATE_PRECISION(plaquette, data, plq, location);
double3 plaq = make_double3(0.5*(plq.x + plq.y), plq.x, plq.y);
#else
errorQuda("Gauge tools are not build");
double3 plaq = make_double3(0., 0., 0.);
#endif
return plaq;
}
} // namespace quda
| 17f8be9350dd8c54b16c5f19cd219d37dd2a3365.cu | #include <quda_internal.h>
#include <quda_matrix.h>
#include <tune_quda.h>
#include <gauge_field.h>
#include <gauge_field_order.h>
#include <launch_kernel.cuh>
#include <atomic.cuh>
#include <cub_helper.cuh>
#include <index_helper.cuh>
namespace quda {
#ifdef GPU_GAUGE_TOOLS
template <typename Gauge>
struct GaugePlaqArg : public ReduceArg<double2> {
int threads; // number of active threads required
int E[4]; // extended grid dimensions
int X[4]; // true grid dimensions
int border[4];
Gauge dataOr;
GaugePlaqArg(const Gauge &dataOr, const GaugeField &data)
: ReduceArg<double2>(), dataOr(dataOr)
{
int R = 0;
for (int dir=0; dir<4; ++dir){
border[dir] = data.R()[dir];
E[dir] = data.X()[dir];
X[dir] = data.X()[dir] - border[dir]*2;
R += border[dir];
}
threads = X[0]*X[1]*X[2]*X[3]/2;
}
};
template<int blockSize, typename Float, typename Gauge>
__global__ void computePlaq(GaugePlaqArg<Gauge> arg){
typedef Matrix<complex<Float>,3> Link;
int idx = threadIdx.x + blockIdx.x*blockDim.x;
int parity = threadIdx.y;
double2 plaq = make_double2(0.0,0.0);
if(idx < arg.threads) {
int x[4];
getCoords(x, idx, arg.X, parity);
for (int dr=0; dr<4; ++dr) x[dr] += arg.border[dr]; // extended grid coordinates
int dx[4] = {0, 0, 0, 0};
for (int mu = 0; mu < 3; mu++) {
for (int nu = (mu+1); nu < 3; nu++) {
Link U1 = arg.dataOr(mu, linkIndexShift(x,dx,arg.E), parity);
dx[mu]++;
Link U2 = arg.dataOr(nu, linkIndexShift(x,dx,arg.E), 1-parity);
dx[mu]--;
dx[nu]++;
Link U3 = arg.dataOr(mu, linkIndexShift(x,dx,arg.E), 1-parity);
dx[nu]--;
Link U4 = arg.dataOr(nu, linkIndexShift(x,dx,arg.E), parity);
plaq.x += getTrace( U1 * U2 * conj(U3) * conj(U4) ).x;
}
Link U1 = arg.dataOr(mu, linkIndexShift(x,dx,arg.E), parity);
dx[mu]++;
Link U2 = arg.dataOr(3, linkIndexShift(x,dx,arg.E), 1-parity);
dx[mu]--;
dx[3]++;
Link U3 = arg.dataOr(mu,linkIndexShift(x,dx,arg.E), 1-parity);
dx[3]--;
Link U4 = arg.dataOr(3, linkIndexShift(x,dx,arg.E), parity);
plaq.y += getTrace( U1 * U2 * conj(U3) * conj(U4) ).x;
}
}
// perform final inter-block reduction and write out result
reduce2d<blockSize,2>(arg, plaq);
}
template<typename Float, typename Gauge>
class GaugePlaq : TunableLocalParity {
GaugePlaqArg<Gauge> arg;
const QudaFieldLocation location;
private:
unsigned int minThreads() const { return arg.threads; }
public:
GaugePlaq(GaugePlaqArg<Gauge> &arg, QudaFieldLocation location)
: arg(arg), location(location) {}
~GaugePlaq () { }
void apply(const cudaStream_t &stream){
if(location == QUDA_CUDA_FIELD_LOCATION){
arg.result_h[0] = make_double2(0.,0.);
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
LAUNCH_KERNEL_LOCAL_PARITY(computePlaq, tp, stream, arg, Float, Gauge);
cudaDeviceSynchronize();
} else {
errorQuda("CPU not supported yet\n");
}
}
TuneKey tuneKey() const {
std::stringstream vol, aux;
vol << arg.X[0] << "x" << arg.X[1] << "x" << arg.X[2] << "x" << arg.X[3];
aux << "threads=" << arg.threads << ",prec=" << sizeof(Float);
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux.str().c_str());
}
long long flops() const { return 6ll*2*arg.threads*(3*198+3); }
long long bytes() const { return 6ll*4*2*arg.threads*arg.dataOr.Bytes(); }
};
template<typename Float, typename Gauge>
void plaquette(const Gauge dataOr, const GaugeField& data, double2 &plq, QudaFieldLocation location) {
GaugePlaqArg<Gauge> arg(dataOr, data);
GaugePlaq<Float,Gauge> gaugePlaq(arg, location);
gaugePlaq.apply(0);
comm_allreduce_array((double*) arg.result_h, 2);
arg.result_h[0].x /= 9.*(2*arg.threads*comm_size());
arg.result_h[0].y /= 9.*(2*arg.threads*comm_size());
plq.x = arg.result_h[0].x;
plq.y = arg.result_h[0].y;
}
template<typename Float>
void plaquette(const GaugeField& data, double2 &plq, QudaFieldLocation location) {
INSTANTIATE_RECONSTRUCT(plaquette<Float>, data, plq, location);
}
#endif
double3 plaquette(const GaugeField& data, QudaFieldLocation location) {
#ifdef GPU_GAUGE_TOOLS
double2 plq;
INSTANTIATE_PRECISION(plaquette, data, plq, location);
double3 plaq = make_double3(0.5*(plq.x + plq.y), plq.x, plq.y);
#else
errorQuda("Gauge tools are not build");
double3 plaq = make_double3(0., 0., 0.);
#endif
return plaq;
}
} // namespace quda
|
00973cf0d79ee9616129ffb150b630d7e2d87e3b.hip | // !!! This is a file automatically generated by hipify!!!
/* -----------------------------------------------------------------
* Programmer(s): Cody J.Balos @ LLNL
* -----------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2021, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
* -----------------------------------------------------------------
* This is the testing routine to check the SUNLinSol cuSolverSp
* module implementation.
* -----------------------------------------------------------------*/
#include <stdio.h>
#include <stdlib.h>
#include <sundials/sundials_types.h>
#include <sunlinsol/sunlinsol_cusolversp_batchqr.h>
#include <sunmatrix/sunmatrix_dense.h>
#include <sunmatrix/sunmatrix_sparse.h>
#include <sunmatrix/sunmatrix_cusparse.h>
#include <nvector/nvector_cuda.h>
#include <nvector/nvector_serial.h>
#include <sundials/sundials_math.h>
#include "test_sunlinsol.h"
/* ----------------------------------------------------------------------
* SUNLinSol_KLU Linear Solver Testing Routine
* --------------------------------------------------------------------*/
int main(int argc, char *argv[])
{
int fails = 0; /* counter for test failures */
sunindextype N; /* matrix columns, rows */
int block_size; /* matrix block columns, rows */
int block_nnz; /* number of nonzeros in a block */
int block_nnz_max; /* max nonzeros per block */
int nblocks; /* number of blocks */
SUNLinearSolver LS; /* linear solver object */
SUNMatrix A, B, dA; /* test matrices */
N_Vector x, b, d_x, d_xref, d_b;/* test vectors */
realtype *matdata, *xdata, *xrefdata;
int print_timing;
sunindextype i, j;
hipsparseStatus_t cusp_status;
cusolverStatus_t cusol_status;
hipsparseHandle_t cusp_handle;
cusolverSpHandle_t cusol_handle;
/* check input and set matrix dimensions */
if (argc < 4){
printf("ERROR: THREE (3) Inputs required: matrix block size, number of blocks, print timing \n");
return(-1);
}
block_size = atol(argv[1]);
if (block_size <= 0) {
printf("ERROR: matrix size must be a positive integer \n");
return(-1);
}
block_nnz_max = block_size*block_size / 4;
nblocks = atol(argv[2]);
if (nblocks <= 0) {
printf("ERROR: number of blocks must be a positive integer \n");
return(-1);
}
/* calculate the size of the overall martrix */
N = block_size * nblocks;
print_timing = atoi(argv[3]);
SetTiming(print_timing);
printf("\ncuSolverSp linear solver test: size %ld, block size %ld, number of blocks %d\n\n",
(long int) N, (long int) block_size, (long int) nblocks);
/* Initialize cuSPARSE */
cusp_status = hipsparseCreate(&cusp_handle);
if (cusp_status != HIPSPARSE_STATUS_SUCCESS) {
printf("ERROR: could not create cuSPARSE handle\n");
return(-1);
}
/* Initialize cuSOLVER */
cusol_status = cusolverSpCreate(&cusol_handle);
if (cusol_status != CUSOLVER_STATUS_SUCCESS) {
printf("ERROR: could not create cuSOLVER handle\n");
return(-1);
}
/* Create matrices and vectors */
B = SUNDenseMatrix(N, N);
d_x = N_VNew_Cuda(N);
d_xref = N_VNew_Cuda(N);
d_b = N_VNew_Cuda(N);
x = N_VMake_Serial(N, N_VGetHostArrayPointer_Cuda(d_x));
b = N_VMake_Serial(N, N_VGetHostArrayPointer_Cuda(d_b));
/* Zero the matrix */
fails = SUNMatZero(B);
/* Create sparsity pattern for a block. */
sunindextype *cols = (sunindextype *) malloc(block_nnz_max*sizeof(sunindextype));
sunindextype *rows = (sunindextype *) malloc(block_nnz_max*sizeof(sunindextype));
for (i=0; i<block_nnz_max; i++) {
cols[i] = rand() % block_size;
rows[i] = rand() % block_size;
}
/* Fill matrix with uniform random data in [0,1/N] */
for (i=0; i<nblocks; i++) {
for (j=0; j<block_nnz_max; j++) {
sunindextype col = cols[j] + block_size*i;
sunindextype row = rows[j] + block_size*i;
matdata = SUNDenseMatrix_Column(B,col);
matdata[row] = (realtype) rand() / (realtype) RAND_MAX / N;
}
}
/* Free temporary rows and cols variables */
free(cols); free(rows);
/* Add identity to B */
fails = SUNMatScaleAddI(ONE, B);
if (fails) {
printf("FAIL: SUNMatScaleAddI failure\n");
return(1);
}
/* Create sparse matrix from dense, and destroy B */
A = SUNSparseFromDenseMatrix(B, ZERO, CSR_MAT);
SUNMatDestroy(B);
/* Calculate actual number of nonzeros per block */
block_nnz = SUNSparseMatrix_NNZ(A) / nblocks;
/* Create the device matrix */
dA = SUNMatrix_cuSparse_NewBlockCSR(nblocks, block_size, block_size, block_nnz, cusp_handle);
if (dA == NULL) {
printf("ERROR: could not create dA\n");
}
/* Copy data to device */
fails = SUNMatrix_cuSparse_CopyToDevice(dA, SUNSparseMatrix_Data(A),
SUNSparseMatrix_IndexPointers(A),
SUNSparseMatrix_IndexValues(A));
if (fails != 0) {
printf("ERROR: could not copy A to the device\n");
return(-1);
}
/* Fill x vector with uniform random data in [0,1] */
xdata = N_VGetHostArrayPointer_Cuda(d_x);
xrefdata = N_VGetHostArrayPointer_Cuda(d_xref);
for (i=0; i<N; i++) {
realtype tmp = (realtype) rand() / (realtype) RAND_MAX;
xdata[i] = tmp;
xrefdata[i] = tmp;
}
N_VCopyToDevice_Cuda(d_x);
N_VCopyToDevice_Cuda(d_xref);
/* Synchronize before peforming dense operation on CPU */
hipDeviceSynchronize();
/* create right-hand side vector for linear solve */
fails = SUNMatMatvec(A, x, b);
if (fails) {
printf("FAIL: SUNLinSol SUNMatMatvec failure\n");
return(1);
}
N_VCopyToDevice_Cuda(d_b);
/* Create cuSolverSp linear solver
* The BatchedQR method allows you to solve many small subsystems in parallel.
*/
LS = SUNLinSol_cuSolverSp_batchQR(d_x, dA, cusol_handle);
if (LS == NULL) {
printf("FAIL: SUNLinSol_cuSolverSp_batchQR returned NULL\n");
return(1);
}
/* Run Tests */
fails += Test_SUNLinSolInitialize(LS, 0);
fails += Test_SUNLinSolSetup(LS, dA, 0);
fails += Test_SUNLinSolSolve(LS, dA, d_x, d_b, 1000*UNIT_ROUNDOFF, 0);
fails += Test_SUNLinSolGetType(LS, SUNLINEARSOLVER_DIRECT, 0);
fails += Test_SUNLinSolGetID(LS, SUNLINEARSOLVER_CUSOLVERSP_BATCHQR, 0);
fails += Test_SUNLinSolLastFlag(LS, 0);
fails += Test_SUNLinSolSpace(LS, 0);
/* Print result */
if (fails) {
printf("FAIL: SUNLinSol module failed %i tests \n \n", fails);
SUNMatrix_cuSparse_CopyFromDevice(dA, SUNSparseMatrix_Data(A), NULL, NULL);
printf("\nA =\n");
SUNSparseMatrix_Print(A,stdout);
N_VCopyFromDevice_Cuda(d_xref);
printf("x (reference)\n");
N_VPrint_Cuda(d_xref);
N_VCopyFromDevice_Cuda(d_x); /* copy solution from device */
printf("x (computed)\n");
N_VPrint_Cuda(d_x);
N_VCopyFromDevice_Cuda(d_b);
printf("\nb = Ax (reference)\n");
N_VPrint_Cuda(d_b);
} else {
printf("SUCCESS: SUNLinSol module passed all tests \n \n");
}
/* Free solver, matrix and vectors */
SUNLinSolFree(LS);
SUNMatDestroy(A); SUNMatDestroy(dA);
N_VDestroy(x); N_VDestroy(d_x); N_VDestroy(d_xref);
N_VDestroy(b); N_VDestroy(d_b);
/* Destroy the cuSOLVER and cuSPARSE handles */
hipsparseDestroy(cusp_handle);
cusolverSpDestroy(cusol_handle);
return(fails);
}
/* ----------------------------------------------------------------------
* Implementation-specific 'check' routines
* --------------------------------------------------------------------*/
int check_vector(N_Vector X, N_Vector Y, realtype tol)
{
int failure = 0;
sunindextype i, local_length, maxloc;
realtype *Xdata, *Ydata, maxerr;
hipDeviceSynchronize();
N_VCopyFromDevice_Cuda(X);
N_VCopyFromDevice_Cuda(Y);
Xdata = N_VGetHostArrayPointer_Cuda(X);
Ydata = N_VGetHostArrayPointer_Cuda(Y);
local_length = N_VGetLength(X);
/* check vector data */
for(i=0; i < local_length; i++)
failure += FNEQ(Xdata[i], Ydata[i], tol);
if (failure > ZERO) {
maxerr = ZERO;
maxloc = -1;
for(i=0; i < local_length; i++) {
if (SUNRabs(Xdata[i]-Ydata[i]) > maxerr) {
maxerr = SUNRabs(Xdata[i]-Ydata[i]);
maxloc = i;
}
}
printf("check err failure: maxerr = %g at loc %li (tol = %g)\n",
maxerr, (long int) maxloc, tol);
return(1);
}
else
return(0);
}
void sync_device()
{
hipDeviceSynchronize();
} | 00973cf0d79ee9616129ffb150b630d7e2d87e3b.cu | /* -----------------------------------------------------------------
* Programmer(s): Cody J.Balos @ LLNL
* -----------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2021, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
* -----------------------------------------------------------------
* This is the testing routine to check the SUNLinSol cuSolverSp
* module implementation.
* -----------------------------------------------------------------*/
#include <stdio.h>
#include <stdlib.h>
#include <sundials/sundials_types.h>
#include <sunlinsol/sunlinsol_cusolversp_batchqr.h>
#include <sunmatrix/sunmatrix_dense.h>
#include <sunmatrix/sunmatrix_sparse.h>
#include <sunmatrix/sunmatrix_cusparse.h>
#include <nvector/nvector_cuda.h>
#include <nvector/nvector_serial.h>
#include <sundials/sundials_math.h>
#include "test_sunlinsol.h"
/* ----------------------------------------------------------------------
* SUNLinSol_KLU Linear Solver Testing Routine
* --------------------------------------------------------------------*/
int main(int argc, char *argv[])
{
int fails = 0; /* counter for test failures */
sunindextype N; /* matrix columns, rows */
int block_size; /* matrix block columns, rows */
int block_nnz; /* number of nonzeros in a block */
int block_nnz_max; /* max nonzeros per block */
int nblocks; /* number of blocks */
SUNLinearSolver LS; /* linear solver object */
SUNMatrix A, B, dA; /* test matrices */
N_Vector x, b, d_x, d_xref, d_b;/* test vectors */
realtype *matdata, *xdata, *xrefdata;
int print_timing;
sunindextype i, j;
cusparseStatus_t cusp_status;
cusolverStatus_t cusol_status;
cusparseHandle_t cusp_handle;
cusolverSpHandle_t cusol_handle;
/* check input and set matrix dimensions */
if (argc < 4){
printf("ERROR: THREE (3) Inputs required: matrix block size, number of blocks, print timing \n");
return(-1);
}
block_size = atol(argv[1]);
if (block_size <= 0) {
printf("ERROR: matrix size must be a positive integer \n");
return(-1);
}
block_nnz_max = block_size*block_size / 4;
nblocks = atol(argv[2]);
if (nblocks <= 0) {
printf("ERROR: number of blocks must be a positive integer \n");
return(-1);
}
/* calculate the size of the overall martrix */
N = block_size * nblocks;
print_timing = atoi(argv[3]);
SetTiming(print_timing);
printf("\ncuSolverSp linear solver test: size %ld, block size %ld, number of blocks %d\n\n",
(long int) N, (long int) block_size, (long int) nblocks);
/* Initialize cuSPARSE */
cusp_status = cusparseCreate(&cusp_handle);
if (cusp_status != CUSPARSE_STATUS_SUCCESS) {
printf("ERROR: could not create cuSPARSE handle\n");
return(-1);
}
/* Initialize cuSOLVER */
cusol_status = cusolverSpCreate(&cusol_handle);
if (cusol_status != CUSOLVER_STATUS_SUCCESS) {
printf("ERROR: could not create cuSOLVER handle\n");
return(-1);
}
/* Create matrices and vectors */
B = SUNDenseMatrix(N, N);
d_x = N_VNew_Cuda(N);
d_xref = N_VNew_Cuda(N);
d_b = N_VNew_Cuda(N);
x = N_VMake_Serial(N, N_VGetHostArrayPointer_Cuda(d_x));
b = N_VMake_Serial(N, N_VGetHostArrayPointer_Cuda(d_b));
/* Zero the matrix */
fails = SUNMatZero(B);
/* Create sparsity pattern for a block. */
sunindextype *cols = (sunindextype *) malloc(block_nnz_max*sizeof(sunindextype));
sunindextype *rows = (sunindextype *) malloc(block_nnz_max*sizeof(sunindextype));
for (i=0; i<block_nnz_max; i++) {
cols[i] = rand() % block_size;
rows[i] = rand() % block_size;
}
/* Fill matrix with uniform random data in [0,1/N] */
for (i=0; i<nblocks; i++) {
for (j=0; j<block_nnz_max; j++) {
sunindextype col = cols[j] + block_size*i;
sunindextype row = rows[j] + block_size*i;
matdata = SUNDenseMatrix_Column(B,col);
matdata[row] = (realtype) rand() / (realtype) RAND_MAX / N;
}
}
/* Free temporary rows and cols variables */
free(cols); free(rows);
/* Add identity to B */
fails = SUNMatScaleAddI(ONE, B);
if (fails) {
printf("FAIL: SUNMatScaleAddI failure\n");
return(1);
}
/* Create sparse matrix from dense, and destroy B */
A = SUNSparseFromDenseMatrix(B, ZERO, CSR_MAT);
SUNMatDestroy(B);
/* Calculate actual number of nonzeros per block */
block_nnz = SUNSparseMatrix_NNZ(A) / nblocks;
/* Create the device matrix */
dA = SUNMatrix_cuSparse_NewBlockCSR(nblocks, block_size, block_size, block_nnz, cusp_handle);
if (dA == NULL) {
printf("ERROR: could not create dA\n");
}
/* Copy data to device */
fails = SUNMatrix_cuSparse_CopyToDevice(dA, SUNSparseMatrix_Data(A),
SUNSparseMatrix_IndexPointers(A),
SUNSparseMatrix_IndexValues(A));
if (fails != 0) {
printf("ERROR: could not copy A to the device\n");
return(-1);
}
/* Fill x vector with uniform random data in [0,1] */
xdata = N_VGetHostArrayPointer_Cuda(d_x);
xrefdata = N_VGetHostArrayPointer_Cuda(d_xref);
for (i=0; i<N; i++) {
realtype tmp = (realtype) rand() / (realtype) RAND_MAX;
xdata[i] = tmp;
xrefdata[i] = tmp;
}
N_VCopyToDevice_Cuda(d_x);
N_VCopyToDevice_Cuda(d_xref);
/* Synchronize before peforming dense operation on CPU */
cudaDeviceSynchronize();
/* create right-hand side vector for linear solve */
fails = SUNMatMatvec(A, x, b);
if (fails) {
printf("FAIL: SUNLinSol SUNMatMatvec failure\n");
return(1);
}
N_VCopyToDevice_Cuda(d_b);
/* Create cuSolverSp linear solver
* The BatchedQR method allows you to solve many small subsystems in parallel.
*/
LS = SUNLinSol_cuSolverSp_batchQR(d_x, dA, cusol_handle);
if (LS == NULL) {
printf("FAIL: SUNLinSol_cuSolverSp_batchQR returned NULL\n");
return(1);
}
/* Run Tests */
fails += Test_SUNLinSolInitialize(LS, 0);
fails += Test_SUNLinSolSetup(LS, dA, 0);
fails += Test_SUNLinSolSolve(LS, dA, d_x, d_b, 1000*UNIT_ROUNDOFF, 0);
fails += Test_SUNLinSolGetType(LS, SUNLINEARSOLVER_DIRECT, 0);
fails += Test_SUNLinSolGetID(LS, SUNLINEARSOLVER_CUSOLVERSP_BATCHQR, 0);
fails += Test_SUNLinSolLastFlag(LS, 0);
fails += Test_SUNLinSolSpace(LS, 0);
/* Print result */
if (fails) {
printf("FAIL: SUNLinSol module failed %i tests \n \n", fails);
SUNMatrix_cuSparse_CopyFromDevice(dA, SUNSparseMatrix_Data(A), NULL, NULL);
printf("\nA =\n");
SUNSparseMatrix_Print(A,stdout);
N_VCopyFromDevice_Cuda(d_xref);
printf("x (reference)\n");
N_VPrint_Cuda(d_xref);
N_VCopyFromDevice_Cuda(d_x); /* copy solution from device */
printf("x (computed)\n");
N_VPrint_Cuda(d_x);
N_VCopyFromDevice_Cuda(d_b);
printf("\nb = Ax (reference)\n");
N_VPrint_Cuda(d_b);
} else {
printf("SUCCESS: SUNLinSol module passed all tests \n \n");
}
/* Free solver, matrix and vectors */
SUNLinSolFree(LS);
SUNMatDestroy(A); SUNMatDestroy(dA);
N_VDestroy(x); N_VDestroy(d_x); N_VDestroy(d_xref);
N_VDestroy(b); N_VDestroy(d_b);
/* Destroy the cuSOLVER and cuSPARSE handles */
cusparseDestroy(cusp_handle);
cusolverSpDestroy(cusol_handle);
return(fails);
}
/* ----------------------------------------------------------------------
* Implementation-specific 'check' routines
* --------------------------------------------------------------------*/
int check_vector(N_Vector X, N_Vector Y, realtype tol)
{
int failure = 0;
sunindextype i, local_length, maxloc;
realtype *Xdata, *Ydata, maxerr;
cudaDeviceSynchronize();
N_VCopyFromDevice_Cuda(X);
N_VCopyFromDevice_Cuda(Y);
Xdata = N_VGetHostArrayPointer_Cuda(X);
Ydata = N_VGetHostArrayPointer_Cuda(Y);
local_length = N_VGetLength(X);
/* check vector data */
for(i=0; i < local_length; i++)
failure += FNEQ(Xdata[i], Ydata[i], tol);
if (failure > ZERO) {
maxerr = ZERO;
maxloc = -1;
for(i=0; i < local_length; i++) {
if (SUNRabs(Xdata[i]-Ydata[i]) > maxerr) {
maxerr = SUNRabs(Xdata[i]-Ydata[i]);
maxloc = i;
}
}
printf("check err failure: maxerr = %g at loc %li (tol = %g)\n",
maxerr, (long int) maxloc, tol);
return(1);
}
else
return(0);
}
void sync_device()
{
cudaDeviceSynchronize();
} |
b9bfab32ffbe0a803f5b549be69eca5b732032c5.hip | // !!! This is a file automatically generated by hipify!!!
//
// Created by yanhao on 17-11-23.
//
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
//#include <Windows.h>
#include <string.h>
#include <malloc.h>
#include "opencv2/opencv.hpp"
#include "hip/device_functions.h"
#include "mul_cublas.h"
#include <rocblas.h> //cuda
using namespace cv;
#define W_B_LEN 20
#define W_B_Data_Dim 4
#define FC_W_H 21504
#define FC_W_W 512
#define SAFE_FREE(p) {if((p) != NULL) {free(p); (p) = NULL;}}
#define SAFE_CLOSE(fp) {if((fp) != NULL) {fclose((fp)); (fp) = NULL;}}
typedef struct _MODEL_LEN {
int k1;
int k2;
int in_len;
int out_len;
} MODEL_LEN;
typedef struct _CNN_Model {
MODEL_LEN *model_len;
float **CNN_W;
float **CNN_B;
float **CNN_Prelu;
float *CNN_fc_w;
float *CNN_fc_b;
} CNN_Model;
typedef struct _CNN_Data {
float *data;
float *data1;
float *dstdata;
float *data_cp;
} CNN_Data;
void checkCudaErrors(hipError_t code) {
if (code != hipSuccess) {
std::cout << "CUDA error: " << hipGetErrorString(code) << std::endl;
exit(-1);
// if( abort )
// exit( code );
}
}
int init(CNN_Model &cnn_model) {
FILE *fp_cnn_len = fopen("/home/yanhao/tmpCNN/model_300.bin", "rb");
FILE *fp_cnn_w = fopen("/home/yanhao/tmpCNN/model_301.bin", "rb");
FILE *fp_cnn_b = fopen("/home/yanhao/tmpCNN/model_302.bin", "rb");
FILE *fp_cnn_prelu = fopen("/home/yanhao/tmpCNN/model_303.bin", "rb");
FILE *fp_cnn_fc_w = fopen("/home/yanhao/tmpCNN/model_304.bin", "rb");
FILE *fp_cnn_fc_b = fopen("/home/yanhao/tmpCNN/model_305.bin", "rb");
if (!fp_cnn_len || !fp_cnn_w || !fp_cnn_b || !fp_cnn_prelu || !fp_cnn_fc_w || !fp_cnn_fc_b) {
printf("open model file error!\n");
return -1;
}
int len[W_B_LEN * W_B_Data_Dim];
MODEL_LEN model_len[W_B_LEN];
fread(len, sizeof(int), W_B_LEN * W_B_Data_Dim, fp_cnn_len);
for (int i = 0; i < W_B_LEN; ++i) {
model_len[i].k1 = len[W_B_Data_Dim * i];
model_len[i].k2 = len[W_B_Data_Dim * i + 1];
model_len[i].in_len = len[W_B_Data_Dim * i + 2];
model_len[i].out_len = len[W_B_Data_Dim * i + 3];
}
cnn_model.model_len = (MODEL_LEN *) malloc(W_B_LEN * sizeof(MODEL_LEN));
cnn_model.CNN_W = (float **) malloc(W_B_LEN * sizeof(float *));
cnn_model.CNN_B = (float **) malloc(W_B_LEN * sizeof(float *));
cnn_model.CNN_Prelu = (float **) malloc(W_B_LEN * sizeof(float *));
cnn_model.CNN_fc_w = (float *) malloc(FC_W_H * FC_W_W * sizeof(float));
cnn_model.CNN_fc_b = (float *) malloc(FC_W_W * sizeof(float));
if (!cnn_model.model_len || !cnn_model.CNN_W || !cnn_model.CNN_B
|| !cnn_model.CNN_Prelu || !cnn_model.CNN_fc_w || !cnn_model.CNN_fc_b) {
printf("molloc error!\n");
return -1;
}
fread(cnn_model.CNN_fc_w, sizeof(float), FC_W_H * FC_W_W, fp_cnn_fc_w);
fread(cnn_model.CNN_fc_b, sizeof(float), FC_W_W, fp_cnn_fc_b);
for (int k = 0; k < W_B_LEN; ++k) {
int k1 = model_len[k].k1;
int k2 = model_len[k].k2;
int in_len = model_len[k].in_len;
int out_len = model_len[k].out_len;
cnn_model.CNN_W[k] = (float *) malloc(sizeof(float) * k1 * k2 * in_len * out_len);
cnn_model.CNN_B[k] = (float *) malloc(sizeof(float) * 1 * out_len);
cnn_model.CNN_Prelu[k] = (float *) malloc(sizeof(float) * 1 * out_len);
if (!cnn_model.CNN_W[k] || !cnn_model.CNN_B[k] || !cnn_model.CNN_Prelu[k]) {
printf("molloc error!\n");
return -1;
}
fread(cnn_model.CNN_W[k], sizeof(float), k1 * k2 * in_len * out_len, fp_cnn_w);
fread(cnn_model.CNN_B[k], sizeof(float), 1 * out_len, fp_cnn_b);
fread(cnn_model.CNN_Prelu[k], sizeof(float), 1 * out_len, fp_cnn_prelu);
}
for (int j = 0; j < W_B_LEN; ++j) {
printf("%d,%d,%d,%d\n", model_len[j].k1, model_len[j].k2, model_len[j].in_len, model_len[j].out_len);
}
for (int l = 0; l < W_B_LEN; ++l) {
cnn_model.model_len[l].k1 = model_len[l].k1;
cnn_model.model_len[l].k2 = model_len[l].k2;
cnn_model.model_len[l].in_len = model_len[l].in_len;
cnn_model.model_len[l].out_len = model_len[l].out_len;
}
SAFE_CLOSE(fp_cnn_len);
SAFE_CLOSE(fp_cnn_w);
SAFE_CLOSE(fp_cnn_b);
SAFE_CLOSE(fp_cnn_prelu);
SAFE_CLOSE(fp_cnn_fc_w);
SAFE_CLOSE(fp_cnn_fc_b);
printf("init ok!\n");
return 0;
}
static void rgb2Mat(IplImage *img, unsigned char *mat) {
int i, j, offset;
for (i = 0; i < img->height; i++) {
for (j = 0; j < img->width; j++) {
for (int k = 0; k < 3; ++k) {
offset = (i * img->widthStep + j * 3 + k);
mat[(i * img->width + j) * 3 + k] = *(img->imageData + offset);
}
}
}
}
template<typename T>
void gpu_memory_alloc(size_t len, T *&ptr) {
hipMalloc(&ptr, sizeof(T) * len);
}
//#define CHECK_EQ(val1, val2) ((val1)==(val2))
#define CHECK_NE(val1, val2) CHECK_OP(_NE, !=, val1, val2)
#define CHECK_LE(val1, val2) CHECK_OP(_LE, <=, val1, val2)
#define CHECK_LT(val1, val2) CHECK_OP(_LT, < , val1, val2)
#define CHECK_GE(val1, val2) CHECK_OP(_GE, >=, val1, val2)
#define CHECK_GT(val1, val2) CHECK_OP(_GT, > , val1, val2)
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of hipError_t error */ \
// do {\
// hipError_t error = condition; \
// //std::cout<< " log:" << hipGetErrorString(error)<<std::endl; \
// } while (0)
#define CUBLAS_CHECK(condition) \
do { \
hipblasStatus_t status = condition; \
CHECK_EQ(status, HIPBLAS_STATUS_SUCCESS) << " " \
<< caffe::cublasGetErrorString(status); \
} while (0)
#define CURAND_CHECK(condition) \
do { \
hiprandStatus_t status = condition; \
CHECK_EQ(status, HIPRAND_STATUS_SUCCESS) << " " \
<< caffe::curandGetErrorString(status); \
} while (0)
// CUDA: grid stride looping
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
// CUDA: check for error after kernel execution and exit loudly if there is one.
#define CUDA_POST_KERNEL_CHECK CUDA_CHECK(hipPeekAtLastError())
// CUDA: use 512 threads per block
const int CAFFE_CUDA_NUM_THREADS = 64;
// CUDA: number of blocks for threads.
inline int CAFFE_GET_BLOCKS(const int N) {
return (N + CAFFE_CUDA_NUM_THREADS - 1) / CAFFE_CUDA_NUM_THREADS;
}
__global__ void im2col_gpu_kernel(const int n, const float *data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
float *data_col) {
CUDA_KERNEL_LOOP(index, n) {
const int h_index = index / width_col;
const int h_col = h_index % height_col;
const int w_col = index % width_col;
const int c_im = h_index / height_col;
const int c_col = c_im * kernel_h * kernel_w;
const int h_offset = h_col * stride_h - pad_h;
const int w_offset = w_col * stride_w - pad_w;
float *data_col_ptr = data_col;
data_col_ptr += (c_col * height_col + h_col) * width_col + w_col;
const float *data_im_ptr = data_im;
data_im_ptr += (c_im * height + h_offset) * width + w_offset;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
int h_im = h_offset + i * dilation_h;
int w_im = w_offset + j * dilation_w;
*data_col_ptr =
(h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ?
data_im_ptr[i * dilation_h * width + j * dilation_w] : 0;
data_col_ptr += height_col * width_col;
}
}
}
}
void im2col_gpu(const float *data_im, const int channels,
const int height, const int width, const int kernel,
const int pad,
const int stride,
float *data_col, int *h_out, int *w_out) {
const int kernel_h = kernel;
const int kernel_w = kernel;
const int dilation_h = 1;
const int dilation_w = 1;
const int pad_h = pad;
const int pad_w = pad;
const int stride_h = stride;
const int stride_w = stride;
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad_h -
(dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w -
(dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
*h_out = height_col;
*w_out = width_col;
int num_kernels = channels * height_col * width_col;
// NOLINT_NEXT_LINE(whitespace/operators)
im2col_gpu_kernel << < CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS >> > (
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h,
pad_w, stride_h, stride_w, dilation_h, dilation_w, height_col,
width_col, data_col);
CUDA_POST_KERNEL_CHECK;
}
__global__ void PReLUForward(const int n, const int channels, const int dim,
const float *in, float *out, const float *b_data, const float *slope_data,
const int div_factor) {
CUDA_KERNEL_LOOP(index, n) {
int c = (index / dim) % channels / div_factor;
out[index] = (in[index] + b_data[c]) > 0 ? (in[index] + b_data[c]) : (in[index] + b_data[c]) * slope_data[c];
}
}
__global__ void ADD_GPU(const float *src1, const float *src2,const int n, float *dst) {
CUDA_KERNEL_LOOP(index, n) {
//int c = (index / dim) % channels / div_factor;
dst[index] =src1[index] + src2[index];
}
}
void Bi_D_gpu(float *src, float *dst, const int dim, const int channels, const float *b_data, const float *slope_data) {
//const Dtype* bottom_data = bottom[0]->gpu_data();
//Dtype* top_data = top[0]->mutable_gpu_data();
//const int count = bottom[0]->count();
//const int dim = bottom[0]->count(2);
//const int channels = bottom[0]->channels();
//const Dtype* slope_data = this->blobs_[0]->gpu_data();
//const int div_factor = channel_shared_ ? channels : 1;
//
//// For in-place computation
//if (top[0] == bottom[0]) {
//caffe_copy(count, bottom_data, bottom_memory_.mutable_gpu_data());
//}
const int count = dim * channels;
// NOLINT_NEXT_LINE(whitespace/operators)
PReLUForward<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> > (
count, channels, dim, src, dst, b_data, slope_data, 1);
CUDA_POST_KERNEL_CHECK;
}
void ADD_G(const float *src1,const float*src2,const int count, float*dst){
ADD_GPU<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(src1,src2,count,dst);
CUDA_POST_KERNEL_CHECK;
}
//hipblasStatus_t
//addWithCuda6(const hipblasHandle_t &handle, const float *dev_a, const float *dev_b, const int WA, const int HA,
// const int WB,
// const int HB, float *dev_c) {
//// if(WA!=HB || WA<=0 || WB <=0 ||HA <=0 || HB <=0 || !a || !b || !c){
//// return HIPBLAS_STATUS_INTERNAL_ERROR;
//// }
////
////
//// float *dev_a = 0;
//// float *dev_b = 0;
//// float *dev_c = 0;
// hipError_t cudaStatus;
// hipblasStatus_t cublasStatus;
//
// // Allocate GPU buffers for three vectors (two input, one output) .
// cudaStatus = hipMalloc((void **) &dev_c, HA * WB * sizeof(float));
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMalloc failed!");
// return HIPBLAS_STATUS_INTERNAL_ERROR;
// }
//
// cudaStatus = hipMalloc((void **) &dev_a, HA * WA * sizeof(float));
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMalloc failed!");
// return HIPBLAS_STATUS_INTERNAL_ERROR;
// }
//
// cudaStatus = hipMalloc((void **) &dev_b, HB * WB * sizeof(float));
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMalloc failed!");
// return HIPBLAS_STATUS_INTERNAL_ERROR;
// }
//
// hipblasSetVector(HA * WA, sizeof(float), a, 1, dev_a, 1);
// hipblasSetVector(HB * WB, sizeof(float), b, 1, dev_b, 1);
//
// //
// hipDeviceSynchronize();
// float alpha = 1.0;
// float beta = 0.0;
// clock_t start = clock();
//
// cublasStatus = hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, WA, HA, WB, &alpha, dev_b, HA, dev_a, HA, &beta, dev_c,
// HA);
//
// hipDeviceSynchronize();
//
// clock_t time_used = clock() - start;
// printf("(GPU31) time:%ld\n", time_used);
// hipDeviceSynchronize();
// hipblasGetVector(HA * HB, sizeof(float), dev_c, 1, c, 1);
// //Error:
// hipFree(dev_c);
// hipFree(dev_a);
// hipFree(dev_b);
// return cublasStatus;
//}
void change_zhizheng(float **data, float **dstdata, int &data_h, int &data_w, int &h_out, int &w_out) {
float *tmp;
tmp = *data;
*data = *dstdata;
*dstdata = tmp;
data_h = h_out;
data_w = w_out;
}
hipblasStatus_t
run_cublasgemm(const hipblasHandle_t &handle, const float *dev_a, const float *dev_b, float *dev_c, const int HA,
const int WB,
const int WA, int Mode = 0) {
// float *dev_a = 0;
// float *dev_b = 0;
// float *dev_c = 0;
//
//
// hipError_t cudaStatus;
hipblasStatus_t cublasStatus;
//
//hipDeviceSynchronize();
float alpha = 1.0;
float beta = 0.0;
//printf("aaaaaaaaaaa!\n");
int m = WB;
int n = HA;
int k = WA;
int lda = WA;
int ldb = WB;
int ldc = WB;
cublasStatus = hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, n, k, &alpha, dev_b, ldb, dev_a, lda, &beta, dev_c,
ldc);
//hipDeviceSynchronize();
//hipDeviceSynchronize();
return cublasStatus;
}
void
run(const hipblasHandle_t &handle, float *dev_data, int *dev_data_h, int *dev_data_w, float *dev_data1,
float *dev_dstdata,
const CNN_Model &dev_cnn_model, int ID, int pad,
int stride, int *h_out, int *w_out) {
const int Mode = 0;
int kern = dev_cnn_model.model_len[ID].k1;
int in_len = dev_cnn_model.model_len[ID].in_len;
int out_len = dev_cnn_model.model_len[ID].out_len;
//printf("aa\n");
im2col_gpu(dev_data, in_len, *dev_data_h, *dev_data_w, kern, pad, stride, dev_data1, h_out, w_out);
// float *b = (float *) malloc(sizeof(float) * 1000);
// hipMemcpy(b,dev_cnn_model.CNN_W[ID],sizeof(float) * 1000,hipMemcpyDeviceToHost);
//
// for (int i = 0; i < 1000; ++i) {
//
// printf("%f,%f,%f\n",b[i],b[i],b[i]);
//
// }
//printf("aaa");
float alpha = 1.0;
float beta = 0.0;
clock_t start = clock();
hipblasStatus_t cublasStatus;
cublasStatus = run_cublasgemm(handle, dev_cnn_model.CNN_W[ID], dev_data1, dev_dstdata, out_len, (*h_out) * (*w_out),
in_len * kern * kern, Mode);
if (cublasStatus != HIPBLAS_STATUS_SUCCESS) {
if (cublasStatus == HIPBLAS_STATUS_NOT_INITIALIZED) {
//cout << "CUBLAS " << endl;
}
printf("CUBLAS \n");
//getchar();
printf("hello,is r\n");
exit(-1);
}
Bi_D_gpu(dev_dstdata,dev_dstdata,(*h_out) * (*w_out),dev_cnn_model.model_len[ID].out_len,
dev_cnn_model.CNN_B[ID],dev_cnn_model.CNN_Prelu[ID]);
//printf("%f,%f,%f,%d\n",c[0],c[1],c[2],ID);
if(ID==190) {
float *c= (float *) malloc((*h_out) * (*w_out)*dev_cnn_model.model_len[ID].out_len*sizeof(float));
// float b = dev_cnn_model.CNN_B[ID][0];
hipError_t cudaStatus = hipMemcpy(c,dev_dstdata,(*h_out) * (*w_out)*dev_cnn_model.model_len[ID].out_len*sizeof(float),hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
exit(-1);
}
float *p_im_data = c;//dev_dstdata;
for (int l = 0; l < dev_cnn_model.model_len[ID].out_len; ++l) {
//float b = dev_cnn_model.CNN_B[ID][l];
//float ai = dev_cnn_model.CNN_Prelu[ID][l];
for (int k = 0; k < (*h_out) * (*w_out); ++k) {
//float tmp = *p_im_data + b;
//*p_im_data++ = (tmp > 0 ? tmp : (tmp * ai));
printf("%d:%d:%f\n",ID,l*(*h_out) * (*w_out)+k, *p_im_data++);
}
}
}
}
//__global__ void add_gpu(float *a, float *b, float *c, int n) {
// int i = (blockIdx.x * gridDim.x + blockIdx.y) * blockDim.x * blockDim.y + threadIdx.x * blockDim.x + threadIdx.y;
// if (i < n) {
// c[i] = a[i] + b[i];
// }
//}
void runFace(const hipblasHandle_t &handle, float *dataIn, int w, int h, int c, const CNN_Model &dev_cnn_model,
CNN_Data &dev_cnn_data, float *dataOut,
int FeaLen) {
int data_h = h;
int data_w = w;
int h_out;
int w_out;
float *data = dev_cnn_data.data;
float *data1 = dev_cnn_data.data1;
float *dstdata = dev_cnn_data.dstdata;
float *data_cp = dev_cnn_data.data_cp;
double Time = (double) cvGetTickCount();
hipMemcpy(data, dataIn, sizeof(float) * w * h * c, hipMemcpyHostToDevice);
//hipDeviceSynchronize();
//printf("ccc\n");
//c_1_1
run(handle, data, &data_h, &data_w, data1, dstdata, dev_cnn_model, 0, 1, 2, &h_out, &w_out);
//hipMemcpy(data_cp, dstdata, dev_cnn_model.model_len[0].out_len * h_out * w_out * sizeof(float));
hipMemcpy(data_cp,dstdata,dev_cnn_model.model_len[0].out_len * h_out * w_out * sizeof(float),hipMemcpyDeviceToDevice);
//hipDeviceSynchronize();
//c_1_2
change_zhizheng(&data, &dstdata, data_h, data_w, h_out, w_out);
run(handle,data, &data_h, &data_w, data1, dstdata, dev_cnn_model, 1, 1, 1, &h_out, &w_out);
//c_1_3
change_zhizheng(&data, &dstdata, data_h, data_w, h_out, w_out);
run(handle,data, &data_h, &data_w, data1, dstdata, dev_cnn_model, 2, 1, 1, &h_out, &w_out);
//rest_1_3
// float *p = data_cp;
// float *q = dstdata;
// for (int m = 0; m < cnn_model.model_len[0].out_len * h_out * w_out; ++m) {
// *q = *q + (*p);
// q++;
// p++;
// }
ADD_G(dstdata,data_cp,dev_cnn_model.model_len[0].out_len * h_out * w_out,dstdata);
//c_2_1
change_zhizheng(&data, &dstdata, data_h, data_w, h_out, w_out);
run(handle,data, &data_h, &data_w, data1, dstdata, dev_cnn_model, 3, 1, 2, &h_out, &w_out);
hipMemcpy(data_cp, dstdata, dev_cnn_model.model_len[3].out_len * h_out * w_out * sizeof(float),hipMemcpyDeviceToHost);
//c_2_2
change_zhizheng(&data, &dstdata, data_h, data_w, h_out, w_out);
run(handle,data, &data_h, &data_w, data1, dstdata, dev_cnn_model, 4, 1, 1, &h_out, &w_out);
//c_2_3
change_zhizheng(&data, &dstdata, data_h, data_w, h_out, w_out);
run(handle,data, &data_h, &data_w, data1, dstdata, dev_cnn_model, 5, 1, 1, &h_out, &w_out);
//res_2_3
//hipDeviceSynchronize();
/*{
//res_2_3
p = data_cp;
q = dstdata;
for (int m = 0; m < cnn_model.model_len[3].out_len * h_out * w_out; ++m) {
*q = *q + (*p);
q++;
p++;
}
memcpy(data_cp, dstdata, cnn_model.model_len[3].out_len * h_out * w_out * sizeof(float));
//c_2-4
change_zhizheng(&data, &dstdata, data_h, data_w, h_out, w_out);
run(data, &data_h, &data_w, data1, dstdata, cnn_model, 6, 1, 1, &h_out, &w_out);
//c_2_5
}*/
ADD_G(dstdata,data_cp,dev_cnn_model.model_len[3].out_len * h_out * w_out,dstdata);
//ADD_G(dstdata,data_cp,dev_cnn_model.model_len[0].out_len * h_out * w_out,dstdata);
//hipDeviceSynchronize();
// p = data_cp;
// q = dstdata;
// for (int m = 0; m < dev_cnn_model.model_len[3].out_len * h_out * w_out; ++m) {
// *q = *q + (*p);
// q++;
// p++;
// }
//memcpy(data_cp, dstdata, cnn_model.model_len[3].out_len * h_out * w_out * sizeof(float));
hipMemcpy(data_cp, dstdata, dev_cnn_model.model_len[3].out_len * h_out * w_out * sizeof(float),hipMemcpyDeviceToDevice);
//hipDeviceSynchronize();
//c_2-4
change_zhizheng(&data, &dstdata, data_h, data_w, h_out, w_out);
run(handle,data, &data_h, &data_w, data1, dstdata, dev_cnn_model, 6, 1, 1, &h_out, &w_out);
//c_2_5
change_zhizheng(&data, &dstdata, data_h, data_w, h_out, w_out);
run(handle,data, &data_h, &data_w, data1, dstdata, dev_cnn_model, 7, 1, 1, &h_out, &w_out);
//res_2_5
ADD_G(dstdata,data_cp,dev_cnn_model.model_len[3].out_len * h_out * w_out,dstdata);
// p = data_cp;
// q = dstdata;
// for (int m = 0; m < dev_cnn_model.model_len[3].out_len * h_out * w_out; ++m) {
// *q = *q + (*p);
// q++;
// p++;
// }
//c_3_1
change_zhizheng(&data, &dstdata, data_h, data_w, h_out, w_out);
run(handle,data, &data_h, &data_w, data1, dstdata, dev_cnn_model, 8, 1, 2, &h_out, &w_out);
hipMemcpy(data_cp, dstdata, dev_cnn_model.model_len[8].out_len * h_out * w_out * sizeof(float),hipMemcpyDeviceToDevice);
//c_3_2
change_zhizheng(&data, &dstdata, data_h, data_w, h_out, w_out);
run(handle,data, &data_h, &data_w, data1, dstdata, dev_cnn_model, 9, 1, 1, &h_out, &w_out);
//c_3_3
change_zhizheng(&data, &dstdata, data_h, data_w, h_out, w_out);
run(handle,data, &data_h, &data_w, data1, dstdata, dev_cnn_model, 10, 1, 1, &h_out, &w_out);
//res_3_3
ADD_G(dstdata,data_cp,dev_cnn_model.model_len[8].out_len * h_out * w_out,dstdata);
// p = data_cp;
// q = dstdata;
// for (int m = 0; m < dev_cnn_model.model_len[8].out_len * h_out * w_out; ++m) {
// *q = *q + (*p);
// q++;
// p++;
// }
hipMemcpy(data_cp, dstdata, dev_cnn_model.model_len[10].out_len * h_out * w_out * sizeof(float),hipMemcpyDeviceToDevice);
//c_3_4
change_zhizheng(&data, &dstdata, data_h, data_w, h_out, w_out);
run(handle,data, &data_h, &data_w, data1, dstdata, dev_cnn_model, 11, 1, 1, &h_out, &w_out);
//c_3_5
change_zhizheng(&data, &dstdata, data_h, data_w, h_out, w_out);
run(handle,data, &data_h, &data_w, data1, dstdata, dev_cnn_model, 12, 1, 1, &h_out, &w_out);
//res_3_5
ADD_G(dstdata,data_cp,dev_cnn_model.model_len[10].out_len * h_out * w_out,dstdata);
// p = data_cp;
// q = dstdata;
// for (int m = 0; m < dev_cnn_model.model_len[10].out_len * h_out * w_out; ++m) {
// *q = *q + (*p);
// q++;
// p++;
// }
hipMemcpy(data_cp, dstdata, dev_cnn_model.model_len[12].out_len * h_out * w_out * sizeof(float),hipMemcpyDeviceToDevice);
//c_3_6
change_zhizheng(&data, &dstdata, data_h, data_w, h_out, w_out);
run(handle,data, &data_h, &data_w, data1, dstdata, dev_cnn_model, 13, 1, 1, &h_out, &w_out);
//c_3_7
change_zhizheng(&data, &dstdata, data_h, data_w, h_out, w_out);
run(handle,data, &data_h, &data_w, data1, dstdata, dev_cnn_model, 14, 1, 1, &h_out, &w_out);
//res_3_7
ADD_G(dstdata,data_cp,dev_cnn_model.model_len[12].out_len * h_out * w_out,dstdata);
// p = data_cp;
// q = dstdata;
// for (int m = 0; m < dev_cnn_model.model_len[12].out_len * h_out * w_out; ++m) {
// *q = *q + (*p);
// q++;
// p++;
// }
hipMemcpy(data_cp, dstdata, dev_cnn_model.model_len[14].out_len * h_out * w_out * sizeof(float),hipMemcpyDeviceToDevice);
//c_3_8
change_zhizheng(&data, &dstdata, data_h, data_w, h_out, w_out);
run(handle,data, &data_h, &data_w, data1, dstdata, dev_cnn_model, 15, 1, 1, &h_out, &w_out);
//c_3_9
change_zhizheng(&data, &dstdata, data_h, data_w, h_out, w_out);
run(handle,data, &data_h, &data_w, data1, dstdata, dev_cnn_model, 16, 1, 1, &h_out, &w_out);
//res_3_9
ADD_G(dstdata,data_cp,dev_cnn_model.model_len[14].out_len * h_out * w_out,dstdata);
// p = data_cp;
// q = dstdata;
// for (int m = 0; m < dev_cnn_model.model_len[14].out_len * h_out * w_out; ++m) {
// *q = *q + (*p);
// q++;
// p++;
// }
//c_4_1
change_zhizheng(&data, &dstdata, data_h, data_w, h_out, w_out);
run(handle,data, &data_h, &data_w, data1, dstdata, dev_cnn_model, 17, 1, 2, &h_out, &w_out);
hipMemcpy(data_cp, dstdata, dev_cnn_model.model_len[17].out_len * h_out * w_out * sizeof(float),hipMemcpyDeviceToDevice);
//c_4_2
change_zhizheng(&data, &dstdata, data_h, data_w, h_out, w_out);
run(handle,data, &data_h, &data_w, data1, dstdata, dev_cnn_model, 18, 1, 1, &h_out, &w_out);
//c_4_3
change_zhizheng(&data, &dstdata, data_h, data_w, h_out, w_out);
run(handle,data, &data_h, &data_w, data1, dstdata, dev_cnn_model, 19, 1, 1, &h_out, &w_out);
//res_4_3
ADD_G(dstdata,data_cp,dev_cnn_model.model_len[17].out_len * h_out * w_out,dstdata);
//fc
change_zhizheng(&data, &dstdata, data_h, data_w, h_out, w_out);
hipblasStatus_t cublasStatus;
//Time = (double) cvGetTickCount();
//run_gemm(cnn_model.CNN_fc_w, data, dstdata, FC_W_W, 1, FC_W_H, 1);
cublasStatus = run_cublasgemm(handle, dev_cnn_model.CNN_fc_w, data, dstdata,FC_W_W,1,
FC_W_H, 1);
//float *tmpdata;
//float *dev_tmpdata;
//hipHostGetDevicePointer((void**)&dev_tmpdata, (void*)dataOut, 0);
ADD_G(dstdata,dev_cnn_model.CNN_fc_b,FC_W_W,dstdata);
//hipDeviceSynchronize();
//hipDeviceSynchronize
Time = (double) cvGetTickCount() - Time;
printf("run time11 = %gms\n", Time / (cvGetTickFrequency() * 1000));
Time = (double) cvGetTickCount();
//float *dd= (float *) malloc(FC_W_W * sizeof(float));
//hipMemcpy(dataOut,dstdata,FC_W_W * sizeof(float),hipMemcpyDeviceToHost);
//hipHostGetDevicePointer(&dataOut, dstdata, 0);
// hipMemcpyAsync( dataOut, dstdata,
// FC_W_W * sizeof(float),
// hipMemcpyDeviceToHost,
// stream );
// hipStreamSynchronize( stream ) ;
hipMemcpy(dataOut, dstdata, FC_W_W * sizeof(float), hipMemcpyDefault);
// // hipStreamSynchronize( stream ) ;
Time = (double) cvGetTickCount() - Time;
printf("run time21 = %gms\n", Time / (cvGetTickFrequency() * 1000));
printf("dd:%d:%f\n",511,dataOut[511]);
// for (int k = 0; k < FC_W_W; ++k) {
// printf("dd:%d:%f\n",k,dataOut[k]);
// }
}
int main() {
CNN_Model cnn_model;
init(cnn_model);
CNN_Model dev_cnn_model;
dev_cnn_model.CNN_W = (float **) malloc(W_B_LEN * sizeof(float *));
dev_cnn_model.CNN_B = (float **) malloc(W_B_LEN * sizeof(float *));
dev_cnn_model.CNN_Prelu = (float **) malloc(W_B_LEN * sizeof(float *));
checkCudaErrors(hipMalloc((void **) (&dev_cnn_model.CNN_fc_w), FC_W_H * FC_W_W * sizeof(float)));
checkCudaErrors(hipMalloc((void **) (&dev_cnn_model.CNN_fc_b), FC_W_W * sizeof(float)));
checkCudaErrors(hipMemcpy(dev_cnn_model.CNN_fc_w, cnn_model.CNN_fc_w, sizeof(float) * FC_W_H * FC_W_W,
hipMemcpyHostToDevice));
checkCudaErrors(
hipMemcpy(dev_cnn_model.CNN_fc_b, cnn_model.CNN_fc_b, sizeof(float) * FC_W_W, hipMemcpyHostToDevice));
//checkCudaErrors(hipMalloc((void **) (&dev_cnn_model.model_len), W_B_LEN * sizeof(MODEL_LEN)));
dev_cnn_model.model_len = (MODEL_LEN *) malloc(W_B_LEN * sizeof(MODEL_LEN));
for (int k = 0; k < W_B_LEN; ++k) {
int k1 = cnn_model.model_len[k].k1;
int k2 = cnn_model.model_len[k].k2;
int in_len = cnn_model.model_len[k].in_len;
int out_len = cnn_model.model_len[k].out_len;
dev_cnn_model.model_len[k].k1 = k1;
dev_cnn_model.model_len[k].k2 = k2;
dev_cnn_model.model_len[k].in_len = in_len;
dev_cnn_model.model_len[k].out_len = out_len;
// checkCudaErrors(hipMemcpy(&dev_cnn_model.model_len[k].k1, &k1, sizeof(int) * 1, hipMemcpyHostToDevice));
//
// checkCudaErrors(hipMemcpy(&dev_cnn_model.model_len[k].k2, &k2, sizeof(int) * 1, hipMemcpyHostToDevice));
//
// checkCudaErrors(
// hipMemcpy(&dev_cnn_model.model_len[k].in_len, &in_len, sizeof(int) * 1, hipMemcpyHostToDevice));
//
// checkCudaErrors(
// hipMemcpy(&dev_cnn_model.model_len[k].out_len, &out_len, sizeof(int) * 1, hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc((void **) &(dev_cnn_model.CNN_W[k]), sizeof(float) * k1 * k2 * in_len * out_len));
checkCudaErrors(hipMalloc((void **) (&dev_cnn_model.CNN_B[k]), sizeof(float) * 1 * out_len));
checkCudaErrors(hipMalloc((void **) (&dev_cnn_model.CNN_Prelu[k]), sizeof(float) * 1 * out_len));
checkCudaErrors(
hipMemcpy(dev_cnn_model.CNN_W[k], cnn_model.CNN_W[k], sizeof(float) * k1 * k2 * in_len * out_len,
hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(dev_cnn_model.CNN_B[k], cnn_model.CNN_B[k], sizeof(float) * 1 * out_len,
hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(dev_cnn_model.CNN_Prelu[k], cnn_model.CNN_Prelu[k], sizeof(float) * 1 * out_len,
hipMemcpyHostToDevice));
}
const int WIDTH = 96;
const int HEIGHT = 112;
const int Channels = 3;
const int SCALE = 512;
CNN_Data dev_cnn_data;
checkCudaErrors(hipMalloc((void **) &(dev_cnn_data.data), sizeof(float) * SCALE * WIDTH * HEIGHT * Channels));
checkCudaErrors(hipMalloc((void **) &(dev_cnn_data.data1), sizeof(float) * SCALE * WIDTH * HEIGHT * Channels));
checkCudaErrors(hipMalloc((void **) &(dev_cnn_data.dstdata), sizeof(float) * SCALE * WIDTH * HEIGHT * Channels));
checkCudaErrors(hipMalloc((void **) &(dev_cnn_data.data_cp), sizeof(float) * SCALE * WIDTH * HEIGHT * Channels));
IplImage *bgr = cvLoadImage("/home/yanhao/360.bmp", 1);
unsigned char *bgr_mat = (unsigned char *) malloc(bgr->width * bgr->height * 3);
rgb2Mat(bgr, bgr_mat);
int w = bgr->width;
int h = bgr->height;
int c = bgr->nChannels;
float *img = (float *) malloc(sizeof(float) * w * h * c);
float *dataIn = (float *) malloc(sizeof(float) * w * h * c);
unsigned char *p1 = (unsigned char *) bgr_mat;
float *p2 = img;
for (int i = 0; i < w * h * c; ++i) {
//float tmp = (unsigned char) (*p1++);
*p2++ = ((unsigned char) (*p1++) - 127.5) * 0.0078125;
}
float *p_b = img;
float *p_g = img + 1;
float *p_r = img + 2;
float *data_b = (float *) dataIn;
float *data_g = (float *) (dataIn + w * h);
float *data_r = (float *) (dataIn + 2 * w * h);
for (int j = 0; j < w * h; ++j) {
*data_b++ = *p_b;
*data_g++ = *p_g;
*data_r++ = *p_r;
p_b += 3;
p_g += 3;
p_r += 3;
}
//memcpy(data, img, w * h * c * sizeof(float));
hipblasStatus_t cublasStatus;
hipblasHandle_t handle;
cublasStatus = hipblasCreate(&handle);
if (cublasStatus != HIPBLAS_STATUS_SUCCESS) {
if (cublasStatus == HIPBLAS_STATUS_NOT_INITIALIZED) {
//cout << "CUBLAS " << endl;
}
printf("CUBLAS \n");
//getchar();
printf("hello,is r\n");
return -1;
}
const int FEA_LEN = 512;
//float *fea;
//hipHostMalloc((void**)&fea,FEA_LEN * sizeof(float));
//hipHostMalloc((void **)&fea, sizeof(float)*FEA_LEN, hipHostMallocMapped);
float *fea = (float *) malloc(FEA_LEN * sizeof(float));
// double Time = (double) cvGetTickCount();
//
hipStream_t stream;
hipStreamCreate( &stream ) ;
runFace(handle, dataIn, w, h, c, dev_cnn_model, dev_cnn_data, fea, FEA_LEN);
printf("%f\n",fea[511]);
//hipStreamSynchronize( stream ) ;
//hipHostFree(fea);
// hipStreamDestroy( stream );
// for (int l = 0; l < 512; ++l) {
// printf("%f\n",fea[l]);
// }
//getchar();
printf("ok!\n");
}
| b9bfab32ffbe0a803f5b549be69eca5b732032c5.cu | //
// Created by yanhao on 17-11-23.
//
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "cuda.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
//#include <Windows.h>
#include <string.h>
#include <malloc.h>
#include "opencv2/opencv.hpp"
#include "device_functions.h"
#include "mul_cublas.h"
#include <cublas_v2.h> //cuda自带库函数
using namespace cv;
#define W_B_LEN 20
#define W_B_Data_Dim 4
#define FC_W_H 21504
#define FC_W_W 512
#define SAFE_FREE(p) {if((p) != NULL) {free(p); (p) = NULL;}}
#define SAFE_CLOSE(fp) {if((fp) != NULL) {fclose((fp)); (fp) = NULL;}}
typedef struct _MODEL_LEN {
int k1;
int k2;
int in_len;
int out_len;
} MODEL_LEN;
typedef struct _CNN_Model {
MODEL_LEN *model_len;
float **CNN_W;
float **CNN_B;
float **CNN_Prelu;
float *CNN_fc_w;
float *CNN_fc_b;
} CNN_Model;
typedef struct _CNN_Data {
float *data;
float *data1;
float *dstdata;
float *data_cp;
} CNN_Data;
void checkCudaErrors(cudaError_t code) {
if (code != cudaSuccess) {
std::cout << "CUDA error: " << cudaGetErrorString(code) << std::endl;
exit(-1);
// if( abort )
// exit( code );
}
}
int init(CNN_Model &cnn_model) {
FILE *fp_cnn_len = fopen("/home/yanhao/tmpCNN/model_300.bin", "rb");
FILE *fp_cnn_w = fopen("/home/yanhao/tmpCNN/model_301.bin", "rb");
FILE *fp_cnn_b = fopen("/home/yanhao/tmpCNN/model_302.bin", "rb");
FILE *fp_cnn_prelu = fopen("/home/yanhao/tmpCNN/model_303.bin", "rb");
FILE *fp_cnn_fc_w = fopen("/home/yanhao/tmpCNN/model_304.bin", "rb");
FILE *fp_cnn_fc_b = fopen("/home/yanhao/tmpCNN/model_305.bin", "rb");
if (!fp_cnn_len || !fp_cnn_w || !fp_cnn_b || !fp_cnn_prelu || !fp_cnn_fc_w || !fp_cnn_fc_b) {
printf("open model file error!\n");
return -1;
}
int len[W_B_LEN * W_B_Data_Dim];
MODEL_LEN model_len[W_B_LEN];
fread(len, sizeof(int), W_B_LEN * W_B_Data_Dim, fp_cnn_len);
for (int i = 0; i < W_B_LEN; ++i) {
model_len[i].k1 = len[W_B_Data_Dim * i];
model_len[i].k2 = len[W_B_Data_Dim * i + 1];
model_len[i].in_len = len[W_B_Data_Dim * i + 2];
model_len[i].out_len = len[W_B_Data_Dim * i + 3];
}
cnn_model.model_len = (MODEL_LEN *) malloc(W_B_LEN * sizeof(MODEL_LEN));
cnn_model.CNN_W = (float **) malloc(W_B_LEN * sizeof(float *));
cnn_model.CNN_B = (float **) malloc(W_B_LEN * sizeof(float *));
cnn_model.CNN_Prelu = (float **) malloc(W_B_LEN * sizeof(float *));
cnn_model.CNN_fc_w = (float *) malloc(FC_W_H * FC_W_W * sizeof(float));
cnn_model.CNN_fc_b = (float *) malloc(FC_W_W * sizeof(float));
if (!cnn_model.model_len || !cnn_model.CNN_W || !cnn_model.CNN_B
|| !cnn_model.CNN_Prelu || !cnn_model.CNN_fc_w || !cnn_model.CNN_fc_b) {
printf("molloc error!\n");
return -1;
}
fread(cnn_model.CNN_fc_w, sizeof(float), FC_W_H * FC_W_W, fp_cnn_fc_w);
fread(cnn_model.CNN_fc_b, sizeof(float), FC_W_W, fp_cnn_fc_b);
for (int k = 0; k < W_B_LEN; ++k) {
int k1 = model_len[k].k1;
int k2 = model_len[k].k2;
int in_len = model_len[k].in_len;
int out_len = model_len[k].out_len;
cnn_model.CNN_W[k] = (float *) malloc(sizeof(float) * k1 * k2 * in_len * out_len);
cnn_model.CNN_B[k] = (float *) malloc(sizeof(float) * 1 * out_len);
cnn_model.CNN_Prelu[k] = (float *) malloc(sizeof(float) * 1 * out_len);
if (!cnn_model.CNN_W[k] || !cnn_model.CNN_B[k] || !cnn_model.CNN_Prelu[k]) {
printf("molloc error!\n");
return -1;
}
fread(cnn_model.CNN_W[k], sizeof(float), k1 * k2 * in_len * out_len, fp_cnn_w);
fread(cnn_model.CNN_B[k], sizeof(float), 1 * out_len, fp_cnn_b);
fread(cnn_model.CNN_Prelu[k], sizeof(float), 1 * out_len, fp_cnn_prelu);
}
for (int j = 0; j < W_B_LEN; ++j) {
printf("%d,%d,%d,%d\n", model_len[j].k1, model_len[j].k2, model_len[j].in_len, model_len[j].out_len);
}
for (int l = 0; l < W_B_LEN; ++l) {
cnn_model.model_len[l].k1 = model_len[l].k1;
cnn_model.model_len[l].k2 = model_len[l].k2;
cnn_model.model_len[l].in_len = model_len[l].in_len;
cnn_model.model_len[l].out_len = model_len[l].out_len;
}
SAFE_CLOSE(fp_cnn_len);
SAFE_CLOSE(fp_cnn_w);
SAFE_CLOSE(fp_cnn_b);
SAFE_CLOSE(fp_cnn_prelu);
SAFE_CLOSE(fp_cnn_fc_w);
SAFE_CLOSE(fp_cnn_fc_b);
printf("init ok!\n");
return 0;
}
static void rgb2Mat(IplImage *img, unsigned char *mat) {
int i, j, offset;
for (i = 0; i < img->height; i++) {
for (j = 0; j < img->width; j++) {
for (int k = 0; k < 3; ++k) {
offset = (i * img->widthStep + j * 3 + k);
mat[(i * img->width + j) * 3 + k] = *(img->imageData + offset);
}
}
}
}
template<typename T>
void gpu_memory_alloc(size_t len, T *&ptr) {
cudaMalloc(&ptr, sizeof(T) * len);
}
//#define CHECK_EQ(val1, val2) ((val1)==(val2))
#define CHECK_NE(val1, val2) CHECK_OP(_NE, !=, val1, val2)
#define CHECK_LE(val1, val2) CHECK_OP(_LE, <=, val1, val2)
#define CHECK_LT(val1, val2) CHECK_OP(_LT, < , val1, val2)
#define CHECK_GE(val1, val2) CHECK_OP(_GE, >=, val1, val2)
#define CHECK_GT(val1, val2) CHECK_OP(_GT, > , val1, val2)
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
// do {\
// cudaError_t error = condition; \
// //std::cout<< " log:" << cudaGetErrorString(error)<<std::endl; \
// } while (0)
#define CUBLAS_CHECK(condition) \
do { \
cublasStatus_t status = condition; \
CHECK_EQ(status, CUBLAS_STATUS_SUCCESS) << " " \
<< caffe::cublasGetErrorString(status); \
} while (0)
#define CURAND_CHECK(condition) \
do { \
curandStatus_t status = condition; \
CHECK_EQ(status, CURAND_STATUS_SUCCESS) << " " \
<< caffe::curandGetErrorString(status); \
} while (0)
// CUDA: grid stride looping
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
// CUDA: check for error after kernel execution and exit loudly if there is one.
#define CUDA_POST_KERNEL_CHECK CUDA_CHECK(cudaPeekAtLastError())
// CUDA: use 512 threads per block
const int CAFFE_CUDA_NUM_THREADS = 64;
// CUDA: number of blocks for threads.
inline int CAFFE_GET_BLOCKS(const int N) {
return (N + CAFFE_CUDA_NUM_THREADS - 1) / CAFFE_CUDA_NUM_THREADS;
}
__global__ void im2col_gpu_kernel(const int n, const float *data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
float *data_col) {
CUDA_KERNEL_LOOP(index, n) {
const int h_index = index / width_col;
const int h_col = h_index % height_col;
const int w_col = index % width_col;
const int c_im = h_index / height_col;
const int c_col = c_im * kernel_h * kernel_w;
const int h_offset = h_col * stride_h - pad_h;
const int w_offset = w_col * stride_w - pad_w;
float *data_col_ptr = data_col;
data_col_ptr += (c_col * height_col + h_col) * width_col + w_col;
const float *data_im_ptr = data_im;
data_im_ptr += (c_im * height + h_offset) * width + w_offset;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
int h_im = h_offset + i * dilation_h;
int w_im = w_offset + j * dilation_w;
*data_col_ptr =
(h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ?
data_im_ptr[i * dilation_h * width + j * dilation_w] : 0;
data_col_ptr += height_col * width_col;
}
}
}
}
void im2col_gpu(const float *data_im, const int channels,
const int height, const int width, const int kernel,
const int pad,
const int stride,
float *data_col, int *h_out, int *w_out) {
const int kernel_h = kernel;
const int kernel_w = kernel;
const int dilation_h = 1;
const int dilation_w = 1;
const int pad_h = pad;
const int pad_w = pad;
const int stride_h = stride;
const int stride_w = stride;
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad_h -
(dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w -
(dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
*h_out = height_col;
*w_out = width_col;
int num_kernels = channels * height_col * width_col;
// NOLINT_NEXT_LINE(whitespace/operators)
im2col_gpu_kernel << < CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS >> > (
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h,
pad_w, stride_h, stride_w, dilation_h, dilation_w, height_col,
width_col, data_col);
CUDA_POST_KERNEL_CHECK;
}
__global__ void PReLUForward(const int n, const int channels, const int dim,
const float *in, float *out, const float *b_data, const float *slope_data,
const int div_factor) {
CUDA_KERNEL_LOOP(index, n) {
int c = (index / dim) % channels / div_factor;
out[index] = (in[index] + b_data[c]) > 0 ? (in[index] + b_data[c]) : (in[index] + b_data[c]) * slope_data[c];
}
}
__global__ void ADD_GPU(const float *src1, const float *src2,const int n, float *dst) {
CUDA_KERNEL_LOOP(index, n) {
//int c = (index / dim) % channels / div_factor;
dst[index] =src1[index] + src2[index];
}
}
void Bi_D_gpu(float *src, float *dst, const int dim, const int channels, const float *b_data, const float *slope_data) {
//const Dtype* bottom_data = bottom[0]->gpu_data();
//Dtype* top_data = top[0]->mutable_gpu_data();
//const int count = bottom[0]->count();
//const int dim = bottom[0]->count(2);
//const int channels = bottom[0]->channels();
//const Dtype* slope_data = this->blobs_[0]->gpu_data();
//const int div_factor = channel_shared_ ? channels : 1;
//
//// For in-place computation
//if (top[0] == bottom[0]) {
//caffe_copy(count, bottom_data, bottom_memory_.mutable_gpu_data());
//}
const int count = dim * channels;
// NOLINT_NEXT_LINE(whitespace/operators)
PReLUForward<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> > (
count, channels, dim, src, dst, b_data, slope_data, 1);
CUDA_POST_KERNEL_CHECK;
}
void ADD_G(const float *src1,const float*src2,const int count, float*dst){
ADD_GPU<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(src1,src2,count,dst);
CUDA_POST_KERNEL_CHECK;
}
//cublasStatus_t
//addWithCuda6(const cublasHandle_t &handle, const float *dev_a, const float *dev_b, const int WA, const int HA,
// const int WB,
// const int HB, float *dev_c) {
//// if(WA!=HB || WA<=0 || WB <=0 ||HA <=0 || HB <=0 || !a || !b || !c){
//// return CUBLAS_STATUS_INTERNAL_ERROR;
//// }
////
////
//// float *dev_a = 0;
//// float *dev_b = 0;
//// float *dev_c = 0;
// cudaError_t cudaStatus;
// cublasStatus_t cublasStatus;
//
// // Allocate GPU buffers for three vectors (two input, one output) .
// cudaStatus = cudaMalloc((void **) &dev_c, HA * WB * sizeof(float));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// return CUBLAS_STATUS_INTERNAL_ERROR;
// }
//
// cudaStatus = cudaMalloc((void **) &dev_a, HA * WA * sizeof(float));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// return CUBLAS_STATUS_INTERNAL_ERROR;
// }
//
// cudaStatus = cudaMalloc((void **) &dev_b, HB * WB * sizeof(float));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// return CUBLAS_STATUS_INTERNAL_ERROR;
// }
//
// cublasSetVector(HA * WA, sizeof(float), a, 1, dev_a, 1);
// cublasSetVector(HB * WB, sizeof(float), b, 1, dev_b, 1);
//
// // 同步函数
// cudaThreadSynchronize();
// float alpha = 1.0;
// float beta = 0.0;
// clock_t start = clock();
//
// cublasStatus = cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, WA, HA, WB, &alpha, dev_b, HA, dev_a, HA, &beta, dev_c,
// HA);
//
// cudaThreadSynchronize();
//
// clock_t time_used = clock() - start;
// printf("(GPU31) time:%ld\n", time_used);
// cudaThreadSynchronize();
// cublasGetVector(HA * HB, sizeof(float), dev_c, 1, c, 1);
// //Error:
// cudaFree(dev_c);
// cudaFree(dev_a);
// cudaFree(dev_b);
// return cublasStatus;
//}
void change_zhizheng(float **data, float **dstdata, int &data_h, int &data_w, int &h_out, int &w_out) {
float *tmp;
tmp = *data;
*data = *dstdata;
*dstdata = tmp;
data_h = h_out;
data_w = w_out;
}
cublasStatus_t
run_cublasgemm(const cublasHandle_t &handle, const float *dev_a, const float *dev_b, float *dev_c, const int HA,
const int WB,
const int WA, int Mode = 0) {
// float *dev_a = 0;
// float *dev_b = 0;
// float *dev_c = 0;
//
//
// cudaError_t cudaStatus;
cublasStatus_t cublasStatus;
// 同步函数
//cudaThreadSynchronize();
float alpha = 1.0;
float beta = 0.0;
//printf("aaaaaaaaaaa!\n");
int m = WB;
int n = HA;
int k = WA;
int lda = WA;
int ldb = WB;
int ldc = WB;
cublasStatus = cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &alpha, dev_b, ldb, dev_a, lda, &beta, dev_c,
ldc);
//cudaThreadSynchronize();
//cudaThreadSynchronize();
return cublasStatus;
}
void
run(const cublasHandle_t &handle, float *dev_data, int *dev_data_h, int *dev_data_w, float *dev_data1,
float *dev_dstdata,
const CNN_Model &dev_cnn_model, int ID, int pad,
int stride, int *h_out, int *w_out) {
const int Mode = 0;
int kern = dev_cnn_model.model_len[ID].k1;
int in_len = dev_cnn_model.model_len[ID].in_len;
int out_len = dev_cnn_model.model_len[ID].out_len;
//printf("aa\n");
im2col_gpu(dev_data, in_len, *dev_data_h, *dev_data_w, kern, pad, stride, dev_data1, h_out, w_out);
// float *b = (float *) malloc(sizeof(float) * 1000);
// cudaMemcpy(b,dev_cnn_model.CNN_W[ID],sizeof(float) * 1000,cudaMemcpyDeviceToHost);
//
// for (int i = 0; i < 1000; ++i) {
//
// printf("%f,%f,%f\n",b[i],b[i],b[i]);
//
// }
//printf("aaa");
float alpha = 1.0;
float beta = 0.0;
clock_t start = clock();
cublasStatus_t cublasStatus;
cublasStatus = run_cublasgemm(handle, dev_cnn_model.CNN_W[ID], dev_data1, dev_dstdata, out_len, (*h_out) * (*w_out),
in_len * kern * kern, Mode);
if (cublasStatus != CUBLAS_STATUS_SUCCESS) {
if (cublasStatus == CUBLAS_STATUS_NOT_INITIALIZED) {
//cout << "CUBLAS 对象实例化出错" << endl;
}
printf("CUBLAS 对象实例化出错\n");
//getchar();
printf("hello,is r\n");
exit(-1);
}
Bi_D_gpu(dev_dstdata,dev_dstdata,(*h_out) * (*w_out),dev_cnn_model.model_len[ID].out_len,
dev_cnn_model.CNN_B[ID],dev_cnn_model.CNN_Prelu[ID]);
//printf("%f,%f,%f,%d\n",c[0],c[1],c[2],ID);
if(ID==190) {
float *c= (float *) malloc((*h_out) * (*w_out)*dev_cnn_model.model_len[ID].out_len*sizeof(float));
// float b = dev_cnn_model.CNN_B[ID][0];
cudaError_t cudaStatus = cudaMemcpy(c,dev_dstdata,(*h_out) * (*w_out)*dev_cnn_model.model_len[ID].out_len*sizeof(float),cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
exit(-1);
}
float *p_im_data = c;//dev_dstdata;
for (int l = 0; l < dev_cnn_model.model_len[ID].out_len; ++l) {
//float b = dev_cnn_model.CNN_B[ID][l];
//float ai = dev_cnn_model.CNN_Prelu[ID][l];
for (int k = 0; k < (*h_out) * (*w_out); ++k) {
//float tmp = *p_im_data + b;
//*p_im_data++ = (tmp > 0 ? tmp : (tmp * ai));
printf("%d:%d:%f\n",ID,l*(*h_out) * (*w_out)+k, *p_im_data++);
}
}
}
}
//__global__ void add_gpu(float *a, float *b, float *c, int n) {
// int i = (blockIdx.x * gridDim.x + blockIdx.y) * blockDim.x * blockDim.y + threadIdx.x * blockDim.x + threadIdx.y;
// if (i < n) {
// c[i] = a[i] + b[i];
// }
//}
void runFace(const cublasHandle_t &handle, float *dataIn, int w, int h, int c, const CNN_Model &dev_cnn_model,
CNN_Data &dev_cnn_data, float *dataOut,
int FeaLen) {
int data_h = h;
int data_w = w;
int h_out;
int w_out;
float *data = dev_cnn_data.data;
float *data1 = dev_cnn_data.data1;
float *dstdata = dev_cnn_data.dstdata;
float *data_cp = dev_cnn_data.data_cp;
double Time = (double) cvGetTickCount();
cudaMemcpy(data, dataIn, sizeof(float) * w * h * c, cudaMemcpyHostToDevice);
//cudaThreadSynchronize();
//printf("ccc\n");
//c_1_1
run(handle, data, &data_h, &data_w, data1, dstdata, dev_cnn_model, 0, 1, 2, &h_out, &w_out);
//cudaMemcpy(data_cp, dstdata, dev_cnn_model.model_len[0].out_len * h_out * w_out * sizeof(float));
cudaMemcpy(data_cp,dstdata,dev_cnn_model.model_len[0].out_len * h_out * w_out * sizeof(float),cudaMemcpyDeviceToDevice);
//cudaThreadSynchronize();
//c_1_2
change_zhizheng(&data, &dstdata, data_h, data_w, h_out, w_out);
run(handle,data, &data_h, &data_w, data1, dstdata, dev_cnn_model, 1, 1, 1, &h_out, &w_out);
//c_1_3
change_zhizheng(&data, &dstdata, data_h, data_w, h_out, w_out);
run(handle,data, &data_h, &data_w, data1, dstdata, dev_cnn_model, 2, 1, 1, &h_out, &w_out);
//rest_1_3
// float *p = data_cp;
// float *q = dstdata;
// for (int m = 0; m < cnn_model.model_len[0].out_len * h_out * w_out; ++m) {
// *q = *q + (*p);
// q++;
// p++;
// }
ADD_G(dstdata,data_cp,dev_cnn_model.model_len[0].out_len * h_out * w_out,dstdata);
//c_2_1
change_zhizheng(&data, &dstdata, data_h, data_w, h_out, w_out);
run(handle,data, &data_h, &data_w, data1, dstdata, dev_cnn_model, 3, 1, 2, &h_out, &w_out);
cudaMemcpy(data_cp, dstdata, dev_cnn_model.model_len[3].out_len * h_out * w_out * sizeof(float),cudaMemcpyDeviceToHost);
//c_2_2
change_zhizheng(&data, &dstdata, data_h, data_w, h_out, w_out);
run(handle,data, &data_h, &data_w, data1, dstdata, dev_cnn_model, 4, 1, 1, &h_out, &w_out);
//c_2_3
change_zhizheng(&data, &dstdata, data_h, data_w, h_out, w_out);
run(handle,data, &data_h, &data_w, data1, dstdata, dev_cnn_model, 5, 1, 1, &h_out, &w_out);
//res_2_3
//cudaThreadSynchronize();
/*{
//res_2_3
p = data_cp;
q = dstdata;
for (int m = 0; m < cnn_model.model_len[3].out_len * h_out * w_out; ++m) {
*q = *q + (*p);
q++;
p++;
}
memcpy(data_cp, dstdata, cnn_model.model_len[3].out_len * h_out * w_out * sizeof(float));
//c_2-4
change_zhizheng(&data, &dstdata, data_h, data_w, h_out, w_out);
run(data, &data_h, &data_w, data1, dstdata, cnn_model, 6, 1, 1, &h_out, &w_out);
//c_2_5
}*/
ADD_G(dstdata,data_cp,dev_cnn_model.model_len[3].out_len * h_out * w_out,dstdata);
//ADD_G(dstdata,data_cp,dev_cnn_model.model_len[0].out_len * h_out * w_out,dstdata);
//cudaThreadSynchronize();
// p = data_cp;
// q = dstdata;
// for (int m = 0; m < dev_cnn_model.model_len[3].out_len * h_out * w_out; ++m) {
// *q = *q + (*p);
// q++;
// p++;
// }
//memcpy(data_cp, dstdata, cnn_model.model_len[3].out_len * h_out * w_out * sizeof(float));
cudaMemcpy(data_cp, dstdata, dev_cnn_model.model_len[3].out_len * h_out * w_out * sizeof(float),cudaMemcpyDeviceToDevice);
//cudaThreadSynchronize();
//c_2-4
change_zhizheng(&data, &dstdata, data_h, data_w, h_out, w_out);
run(handle,data, &data_h, &data_w, data1, dstdata, dev_cnn_model, 6, 1, 1, &h_out, &w_out);
//c_2_5
change_zhizheng(&data, &dstdata, data_h, data_w, h_out, w_out);
run(handle,data, &data_h, &data_w, data1, dstdata, dev_cnn_model, 7, 1, 1, &h_out, &w_out);
//res_2_5
ADD_G(dstdata,data_cp,dev_cnn_model.model_len[3].out_len * h_out * w_out,dstdata);
// p = data_cp;
// q = dstdata;
// for (int m = 0; m < dev_cnn_model.model_len[3].out_len * h_out * w_out; ++m) {
// *q = *q + (*p);
// q++;
// p++;
// }
//c_3_1
change_zhizheng(&data, &dstdata, data_h, data_w, h_out, w_out);
run(handle,data, &data_h, &data_w, data1, dstdata, dev_cnn_model, 8, 1, 2, &h_out, &w_out);
cudaMemcpy(data_cp, dstdata, dev_cnn_model.model_len[8].out_len * h_out * w_out * sizeof(float),cudaMemcpyDeviceToDevice);
//c_3_2
change_zhizheng(&data, &dstdata, data_h, data_w, h_out, w_out);
run(handle,data, &data_h, &data_w, data1, dstdata, dev_cnn_model, 9, 1, 1, &h_out, &w_out);
//c_3_3
change_zhizheng(&data, &dstdata, data_h, data_w, h_out, w_out);
run(handle,data, &data_h, &data_w, data1, dstdata, dev_cnn_model, 10, 1, 1, &h_out, &w_out);
//res_3_3
ADD_G(dstdata,data_cp,dev_cnn_model.model_len[8].out_len * h_out * w_out,dstdata);
// p = data_cp;
// q = dstdata;
// for (int m = 0; m < dev_cnn_model.model_len[8].out_len * h_out * w_out; ++m) {
// *q = *q + (*p);
// q++;
// p++;
// }
cudaMemcpy(data_cp, dstdata, dev_cnn_model.model_len[10].out_len * h_out * w_out * sizeof(float),cudaMemcpyDeviceToDevice);
//c_3_4
change_zhizheng(&data, &dstdata, data_h, data_w, h_out, w_out);
run(handle,data, &data_h, &data_w, data1, dstdata, dev_cnn_model, 11, 1, 1, &h_out, &w_out);
//c_3_5
change_zhizheng(&data, &dstdata, data_h, data_w, h_out, w_out);
run(handle,data, &data_h, &data_w, data1, dstdata, dev_cnn_model, 12, 1, 1, &h_out, &w_out);
//res_3_5
ADD_G(dstdata,data_cp,dev_cnn_model.model_len[10].out_len * h_out * w_out,dstdata);
// p = data_cp;
// q = dstdata;
// for (int m = 0; m < dev_cnn_model.model_len[10].out_len * h_out * w_out; ++m) {
// *q = *q + (*p);
// q++;
// p++;
// }
cudaMemcpy(data_cp, dstdata, dev_cnn_model.model_len[12].out_len * h_out * w_out * sizeof(float),cudaMemcpyDeviceToDevice);
//c_3_6
change_zhizheng(&data, &dstdata, data_h, data_w, h_out, w_out);
run(handle,data, &data_h, &data_w, data1, dstdata, dev_cnn_model, 13, 1, 1, &h_out, &w_out);
//c_3_7
change_zhizheng(&data, &dstdata, data_h, data_w, h_out, w_out);
run(handle,data, &data_h, &data_w, data1, dstdata, dev_cnn_model, 14, 1, 1, &h_out, &w_out);
//res_3_7
ADD_G(dstdata,data_cp,dev_cnn_model.model_len[12].out_len * h_out * w_out,dstdata);
// p = data_cp;
// q = dstdata;
// for (int m = 0; m < dev_cnn_model.model_len[12].out_len * h_out * w_out; ++m) {
// *q = *q + (*p);
// q++;
// p++;
// }
cudaMemcpy(data_cp, dstdata, dev_cnn_model.model_len[14].out_len * h_out * w_out * sizeof(float),cudaMemcpyDeviceToDevice);
//c_3_8
change_zhizheng(&data, &dstdata, data_h, data_w, h_out, w_out);
run(handle,data, &data_h, &data_w, data1, dstdata, dev_cnn_model, 15, 1, 1, &h_out, &w_out);
//c_3_9
change_zhizheng(&data, &dstdata, data_h, data_w, h_out, w_out);
run(handle,data, &data_h, &data_w, data1, dstdata, dev_cnn_model, 16, 1, 1, &h_out, &w_out);
//res_3_9
ADD_G(dstdata,data_cp,dev_cnn_model.model_len[14].out_len * h_out * w_out,dstdata);
// p = data_cp;
// q = dstdata;
// for (int m = 0; m < dev_cnn_model.model_len[14].out_len * h_out * w_out; ++m) {
// *q = *q + (*p);
// q++;
// p++;
// }
//c_4_1
change_zhizheng(&data, &dstdata, data_h, data_w, h_out, w_out);
run(handle,data, &data_h, &data_w, data1, dstdata, dev_cnn_model, 17, 1, 2, &h_out, &w_out);
cudaMemcpy(data_cp, dstdata, dev_cnn_model.model_len[17].out_len * h_out * w_out * sizeof(float),cudaMemcpyDeviceToDevice);
//c_4_2
change_zhizheng(&data, &dstdata, data_h, data_w, h_out, w_out);
run(handle,data, &data_h, &data_w, data1, dstdata, dev_cnn_model, 18, 1, 1, &h_out, &w_out);
//c_4_3
change_zhizheng(&data, &dstdata, data_h, data_w, h_out, w_out);
run(handle,data, &data_h, &data_w, data1, dstdata, dev_cnn_model, 19, 1, 1, &h_out, &w_out);
//res_4_3
ADD_G(dstdata,data_cp,dev_cnn_model.model_len[17].out_len * h_out * w_out,dstdata);
//fc
change_zhizheng(&data, &dstdata, data_h, data_w, h_out, w_out);
cublasStatus_t cublasStatus;
//Time = (double) cvGetTickCount();
//run_gemm(cnn_model.CNN_fc_w, data, dstdata, FC_W_W, 1, FC_W_H, 1);
cublasStatus = run_cublasgemm(handle, dev_cnn_model.CNN_fc_w, data, dstdata,FC_W_W,1,
FC_W_H, 1);
//float *tmpdata;
//float *dev_tmpdata;
//cudaHostGetDevicePointer((void**)&dev_tmpdata, (void*)dataOut, 0);
ADD_G(dstdata,dev_cnn_model.CNN_fc_b,FC_W_W,dstdata);
//cudaDeviceSynchronize();
//cudaDeviceSynchronize
Time = (double) cvGetTickCount() - Time;
printf("run time11 = %gms\n", Time / (cvGetTickFrequency() * 1000));
Time = (double) cvGetTickCount();
//float *dd= (float *) malloc(FC_W_W * sizeof(float));
//cudaMemcpy(dataOut,dstdata,FC_W_W * sizeof(float),cudaMemcpyDeviceToHost);
//cudaHostGetDevicePointer(&dataOut, dstdata, 0);
// cudaMemcpyAsync( dataOut, dstdata,
// FC_W_W * sizeof(float),
// cudaMemcpyDeviceToHost,
// stream );
// cudaStreamSynchronize( stream ) ;
cudaMemcpy(dataOut, dstdata, FC_W_W * sizeof(float), cudaMemcpyDefault);
// // cudaStreamSynchronize( stream ) ;
Time = (double) cvGetTickCount() - Time;
printf("run time21 = %gms\n", Time / (cvGetTickFrequency() * 1000));
printf("dd:%d:%f\n",511,dataOut[511]);
// for (int k = 0; k < FC_W_W; ++k) {
// printf("dd:%d:%f\n",k,dataOut[k]);
// }
}
int main() {
CNN_Model cnn_model;
init(cnn_model);
CNN_Model dev_cnn_model;
dev_cnn_model.CNN_W = (float **) malloc(W_B_LEN * sizeof(float *));
dev_cnn_model.CNN_B = (float **) malloc(W_B_LEN * sizeof(float *));
dev_cnn_model.CNN_Prelu = (float **) malloc(W_B_LEN * sizeof(float *));
checkCudaErrors(cudaMalloc((void **) (&dev_cnn_model.CNN_fc_w), FC_W_H * FC_W_W * sizeof(float)));
checkCudaErrors(cudaMalloc((void **) (&dev_cnn_model.CNN_fc_b), FC_W_W * sizeof(float)));
checkCudaErrors(cudaMemcpy(dev_cnn_model.CNN_fc_w, cnn_model.CNN_fc_w, sizeof(float) * FC_W_H * FC_W_W,
cudaMemcpyHostToDevice));
checkCudaErrors(
cudaMemcpy(dev_cnn_model.CNN_fc_b, cnn_model.CNN_fc_b, sizeof(float) * FC_W_W, cudaMemcpyHostToDevice));
//checkCudaErrors(cudaMalloc((void **) (&dev_cnn_model.model_len), W_B_LEN * sizeof(MODEL_LEN)));
dev_cnn_model.model_len = (MODEL_LEN *) malloc(W_B_LEN * sizeof(MODEL_LEN));
for (int k = 0; k < W_B_LEN; ++k) {
int k1 = cnn_model.model_len[k].k1;
int k2 = cnn_model.model_len[k].k2;
int in_len = cnn_model.model_len[k].in_len;
int out_len = cnn_model.model_len[k].out_len;
dev_cnn_model.model_len[k].k1 = k1;
dev_cnn_model.model_len[k].k2 = k2;
dev_cnn_model.model_len[k].in_len = in_len;
dev_cnn_model.model_len[k].out_len = out_len;
// checkCudaErrors(cudaMemcpy(&dev_cnn_model.model_len[k].k1, &k1, sizeof(int) * 1, cudaMemcpyHostToDevice));
//
// checkCudaErrors(cudaMemcpy(&dev_cnn_model.model_len[k].k2, &k2, sizeof(int) * 1, cudaMemcpyHostToDevice));
//
// checkCudaErrors(
// cudaMemcpy(&dev_cnn_model.model_len[k].in_len, &in_len, sizeof(int) * 1, cudaMemcpyHostToDevice));
//
// checkCudaErrors(
// cudaMemcpy(&dev_cnn_model.model_len[k].out_len, &out_len, sizeof(int) * 1, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc((void **) &(dev_cnn_model.CNN_W[k]), sizeof(float) * k1 * k2 * in_len * out_len));
checkCudaErrors(cudaMalloc((void **) (&dev_cnn_model.CNN_B[k]), sizeof(float) * 1 * out_len));
checkCudaErrors(cudaMalloc((void **) (&dev_cnn_model.CNN_Prelu[k]), sizeof(float) * 1 * out_len));
checkCudaErrors(
cudaMemcpy(dev_cnn_model.CNN_W[k], cnn_model.CNN_W[k], sizeof(float) * k1 * k2 * in_len * out_len,
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(dev_cnn_model.CNN_B[k], cnn_model.CNN_B[k], sizeof(float) * 1 * out_len,
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(dev_cnn_model.CNN_Prelu[k], cnn_model.CNN_Prelu[k], sizeof(float) * 1 * out_len,
cudaMemcpyHostToDevice));
}
const int WIDTH = 96;
const int HEIGHT = 112;
const int Channels = 3;
const int SCALE = 512;
CNN_Data dev_cnn_data;
checkCudaErrors(cudaMalloc((void **) &(dev_cnn_data.data), sizeof(float) * SCALE * WIDTH * HEIGHT * Channels));
checkCudaErrors(cudaMalloc((void **) &(dev_cnn_data.data1), sizeof(float) * SCALE * WIDTH * HEIGHT * Channels));
checkCudaErrors(cudaMalloc((void **) &(dev_cnn_data.dstdata), sizeof(float) * SCALE * WIDTH * HEIGHT * Channels));
checkCudaErrors(cudaMalloc((void **) &(dev_cnn_data.data_cp), sizeof(float) * SCALE * WIDTH * HEIGHT * Channels));
IplImage *bgr = cvLoadImage("/home/yanhao/360.bmp", 1);
unsigned char *bgr_mat = (unsigned char *) malloc(bgr->width * bgr->height * 3);
rgb2Mat(bgr, bgr_mat);
int w = bgr->width;
int h = bgr->height;
int c = bgr->nChannels;
float *img = (float *) malloc(sizeof(float) * w * h * c);
float *dataIn = (float *) malloc(sizeof(float) * w * h * c);
unsigned char *p1 = (unsigned char *) bgr_mat;
float *p2 = img;
for (int i = 0; i < w * h * c; ++i) {
//float tmp = (unsigned char) (*p1++);
*p2++ = ((unsigned char) (*p1++) - 127.5) * 0.0078125;
}
float *p_b = img;
float *p_g = img + 1;
float *p_r = img + 2;
float *data_b = (float *) dataIn;
float *data_g = (float *) (dataIn + w * h);
float *data_r = (float *) (dataIn + 2 * w * h);
for (int j = 0; j < w * h; ++j) {
*data_b++ = *p_b;
*data_g++ = *p_g;
*data_r++ = *p_r;
p_b += 3;
p_g += 3;
p_r += 3;
}
//memcpy(data, img, w * h * c * sizeof(float));
cublasStatus_t cublasStatus;
cublasHandle_t handle;
cublasStatus = cublasCreate(&handle);
if (cublasStatus != CUBLAS_STATUS_SUCCESS) {
if (cublasStatus == CUBLAS_STATUS_NOT_INITIALIZED) {
//cout << "CUBLAS 对象实例化出错" << endl;
}
printf("CUBLAS 对象实例化出错\n");
//getchar();
printf("hello,is r\n");
return -1;
}
const int FEA_LEN = 512;
//float *fea;
//cudaHostAlloc((void**)&fea,FEA_LEN * sizeof(float));
//cudaMallocHost((void **)&fea, sizeof(float)*FEA_LEN, cudaHostAllocMapped);
float *fea = (float *) malloc(FEA_LEN * sizeof(float));
// double Time = (double) cvGetTickCount();
//
cudaStream_t stream;
cudaStreamCreate( &stream ) ;
runFace(handle, dataIn, w, h, c, dev_cnn_model, dev_cnn_data, fea, FEA_LEN);
printf("%f\n",fea[511]);
//cudaStreamSynchronize( stream ) ;
//cudaFreeHost(fea);
// cudaStreamDestroy( stream );
// for (int l = 0; l < 512; ++l) {
// printf("%f\n",fea[l]);
// }
//getchar();
printf("ok!\n");
}
|
f8497f21b359475a596f59502503d84d7c3aca44.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// -*- LSST-C++ -*- // fixed format comment for emacs
/*
* LSST Data Management System
* Copyright 2008 - 2012 LSST Corporation.
*
* This product includes software developed by the
* LSST Project (http://www.lsst.org/).
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the LSST License Statement and
* the GNU General Public License along with this program. If not,
* see <http://www.lsstcorp.org/LegalNotices/>.
*/
/**
* \file
*
* \ingroup afw
*
* \brief GPU image warping CUDA implementation
*
* \author Kresimir Cosic.
*/
#define NVCC_COMPILING
#include "lsst/afw/image/LsstImageTypes.h"
#include "lsst/afw/math/detail/CudaLanczos.h"
namespace lsst {
namespace afw {
namespace math {
namespace detail {
namespace gpu {
namespace
{
// CeilDivide: returns the smallest integer n such that n*divisor>=num
// preconditions: num>=0, divisor>0
__device__
int CeilDivide(int num, int divisor) {
return (num + divisor - 1) / divisor;
}
// Min function
template<typename T> __device__
T Min(T a, T b) {
return a < b ? a : b;
}
// Max function
template<typename T> __device__
T Max(T a, T b) {
return a > b ? a : b;
}
// Lanczos function
// precondition: -order <= x <= order
template <typename T>
__device__ T Lanczos(T x, T orderInv)
{
const T PI = 3.1415926535897932384626433832795028;
const T xArg1 = fabs(x) * PI;
if ( xArg1 > 1.0e-5) {
const T xArg2 = xArg1 * orderInv;
return sin(xArg1) * sin(xArg2) / (xArg1 * xArg2);
}
return T(1.0);
}
// Is Lanczos or bilinear function equal zero
__device__ bool IsEqualZeroLanczosOrBilinear(double x)
{
if (x != floor(x)) return false;
if (x == 0) return false;
return true;
}
// Calculates the value of a single output pixel (for MaskedImage)
template<typename SrcPixelT>
__device__ PixelIVM<double> ApplyLanczosFilterMI(
const ImageDataPtr<SrcPixelT> srcImage,
int const srcX, int const srcY,
int const mainKernelSize,
const KernelType maskKernelType,
int const maskKernelSize,
double const kernelFracX, double const kernelFracY
)
{
int const srcTLX = srcX + 1 - mainKernelSize / 2;
int const srcTLY = srcY + 1 - mainKernelSize / 2;
//calculate values of Lanczos function for rows
double kernelRowVal[SIZE_MAX_WARPING_KERNEL];
for (int kernelX = 0; kernelX < mainKernelSize; kernelX++) {
kernelRowVal[kernelX] = Lanczos(1 - mainKernelSize / 2 - kernelFracX + kernelX, 2.0 / mainKernelSize);
}
double colSumImg = 0;
double colSumVar = 0;
MskPixel colSumMsk = 0;
double kernelSum = 0;
if (maskKernelType == KERNEL_TYPE_LANCZOS && mainKernelSize == maskKernelSize) {
// mask kernel is identical to main kernel
for (int kernelY = 0; kernelY < mainKernelSize; kernelY++) {
double rowSumImg = 0;
double rowSumVar = 0;
MskPixel rowSumMsk = 0;
double rowKernelSum = 0;
int srcPosImg = srcTLX + srcImage.strideImg * (srcTLY + kernelY);
int srcPosVar = srcTLX + srcImage.strideVar * (srcTLY + kernelY);
int srcPosMsk = srcTLX + srcImage.strideMsk * (srcTLY + kernelY);
for (int kernelX = 0; kernelX < mainKernelSize; kernelX++) {
double srcImgPixel = srcImage.img[srcPosImg++];
double srcVarPixel = srcImage.var[srcPosVar++];
MskPixel srcMskPixel = srcImage.msk[srcPosMsk++];
double kernelVal = kernelRowVal[kernelX];
if (kernelVal != 0) {
rowSumImg += srcImgPixel * kernelVal;
rowSumVar += srcVarPixel * kernelVal * kernelVal;
rowSumMsk |= srcMskPixel;
rowKernelSum += kernelVal;
}
}
double kernelVal = Lanczos(1 - mainKernelSize / 2 - kernelFracY + kernelY, 2.0 / mainKernelSize);
if (kernelVal != 0) {
colSumImg += rowSumImg * kernelVal;
colSumVar += rowSumVar * kernelVal * kernelVal;
colSumMsk |= rowSumMsk;
kernelSum += rowKernelSum * kernelVal;
}
}
} else { // mask kernel not identical to main kernel
// variance and image kernel
for (int kernelY = 0; kernelY < mainKernelSize; kernelY++) {
double rowSumImg = 0;
double rowSumVar = 0;
double rowKernelSum = 0;
int srcPosImg = srcTLX + srcImage.strideImg * (srcTLY + kernelY);
int srcPosVar = srcTLX + srcImage.strideVar * (srcTLY + kernelY);
for (int kernelX = 0; kernelX < mainKernelSize; kernelX++) {
double srcImgPixel = srcImage.img[srcPosImg++];
double srcVarPixel = srcImage.var[srcPosVar++];
double kernelVal = kernelRowVal[kernelX];
if (kernelVal != 0) {
rowSumImg += srcImgPixel * kernelVal;
rowSumVar += srcVarPixel * kernelVal * kernelVal;
rowKernelSum += kernelVal;
}
}
double kernelVal = Lanczos(1 - mainKernelSize / 2 - kernelFracY + kernelY, 2.0 / mainKernelSize);
if (kernelVal != 0) {
colSumImg += rowSumImg * kernelVal;
colSumVar += rowSumVar * kernelVal * kernelVal;
kernelSum += rowKernelSum * kernelVal;
}
}
if (maskKernelType == KERNEL_TYPE_NEAREST_NEIGHBOR) {
int const srcTLXMask = srcX;
int const srcTLYMask = srcY;
int const kernelX = int(kernelFracX + 0.5);
int const kernelY = int(kernelFracY + 0.5);
int srcPosMsk = srcTLXMask + kernelX + srcImage.strideMsk * (srcTLYMask + kernelY);
MskPixel srcMskPixel = srcImage.msk[srcPosMsk];
colSumMsk = srcMskPixel;
} else { // lanczos or bilinear mask kernel
int const srcTLXMask = srcX + 1 - maskKernelSize / 2;
int const srcTLYMask = srcY + 1 - maskKernelSize / 2;
for (int kernelY = 0; kernelY < maskKernelSize; kernelY++) {
if (IsEqualZeroLanczosOrBilinear(1 - maskKernelSize / 2 - kernelFracY + kernelY) ) continue;
int srcPosMsk = srcTLXMask + srcImage.strideMsk * (srcTLYMask + kernelY);
for (int kernelX = 0; kernelX < maskKernelSize; kernelX++, srcPosMsk++) {
if (!IsEqualZeroLanczosOrBilinear(1 - maskKernelSize / 2 - kernelFracX + kernelX)) {
MskPixel srcMskPixel = srcImage.msk[srcPosMsk];
colSumMsk |= srcMskPixel;
}
}
}
}
}
PixelIVM<double> ret;
ret.img = colSumImg / kernelSum;
ret.var = colSumVar / (kernelSum * kernelSum);
ret.msk = colSumMsk;
return ret;
}
// Calculates the value of a single output pixel (for plain image)
template<typename SrcPixelT>
__device__ double ApplyLanczosFilter(const SrcPixelT* srcImgPtr, int const srcImgStride, int const srcWidth,
int const srcX, int const srcY,
int const mainKernelSize,
double const kernelFracX, double const kernelFracY
)
{
int const srcTLX = srcX + 1 - mainKernelSize / 2;
int const srcTLY = srcY + 1 - mainKernelSize / 2;
//calculate values of Lanczos function for rows
double kernelRowVal[SIZE_MAX_WARPING_KERNEL];
for (int kernelX = 0; kernelX < mainKernelSize; kernelX++) {
kernelRowVal[kernelX] = Lanczos(1 - mainKernelSize / 2 - kernelFracX + kernelX, 2.0 / mainKernelSize);
}
double colSumImg = 0;
double kernelSum = 0;
for (int kernelY = 0; kernelY < mainKernelSize; kernelY++) {
double rowSumImg = 0;
double rowKernelSum = 0;
int srcPosImg = srcTLX + srcImgStride * (srcTLY + kernelY);
for (int kernelX = 0; kernelX < mainKernelSize; kernelX++) {
double srcImgPixel = srcImgPtr[srcPosImg++];
double kernelVal = kernelRowVal[kernelX];
if (kernelVal != 0) {
rowSumImg += srcImgPixel * kernelVal;
rowKernelSum += kernelVal;
}
}
double kernelVal = Lanczos(1 - mainKernelSize / 2 - kernelFracY + kernelY, 2.0 / mainKernelSize);
if (kernelVal != 0) {
colSumImg += rowSumImg * kernelVal;
kernelSum += rowKernelSum * kernelVal;
}
}
return colSumImg / kernelSum;
}
// calculate the interpolated value given the data for bilinear interpolation
__device__ SPoint2 GetInterpolatedValue(BilinearInterp* const interpBuf, int interpBufPitch,
int interpLen, int x, int y
)
{
int blkX = x / interpLen;
int blkY = y / interpLen;
int subX = x - blkX * interpLen;
int subY = y - blkY * interpLen;
BilinearInterp interp = interpBuf[blkX + blkY* interpBufPitch];
return interp.Interpolate(subX, subY);
}
} //local namespace ends
/// GPU kernel for lanczos resampling
template<typename DestPixelT, typename SrcPixelT>
__global__ void WarpImageGpuKernel(
bool isMaskedImage,
ImageDataPtr<DestPixelT> destImage,
ImageDataPtr<SrcPixelT> srcImage,
int const mainKernelSize,
const KernelType maskKernelType,
int const maskKernelSize,
SBox2I srcGoodBox,
PixelIVM<DestPixelT> edgePixel,
BilinearInterp* srcPosInterp,
int interpLength
)
{
int const blockSizeX = SIZE_X_WARPING_BLOCK;
int const blockSizeY = SIZE_Y_WARPING_BLOCK;
//number of blocks in X and Y directions
int const blockNX = CeilDivide(destImage.width, blockSizeX);
int const blockNY = CeilDivide(destImage.height, blockSizeY);
int const totalBlocks = blockNX * blockNY;
// calculates pitch of srcPosInterp array
int const srcPosInterpPitch = CeilDivide(destImage.width, interpLength) + 1;
// for each block of destination image
for (int blkI = blockIdx.x; blkI < totalBlocks; blkI += gridDim.x)
{
// claculate coordinates of the block that is being processed
int const blkIX = blkI % blockNX;
int const blkIY = blkI / blockNX;
// coordinate of upper left corner of the block
int const blkX = blkIX * blockSizeX;
int const blkY = blkIY * blockSizeY;
// Each thread gets its own pixel.
// The calling function ensures that the number of pixels in a block
// matches the number of threads in a block
// (or less pixels than threads for blocks on the edge)
int const curBlkPixelX = threadIdx.x % blockSizeX;
int const curBlkPixelY = threadIdx.x / blockSizeX;
// calculate the position of a destination pixel for current thread
int const pixelX = blkX + curBlkPixelX;
int const pixelY = blkY + curBlkPixelY;
// On edges: skip calculation for threads that got pixels which are outside the destination image
if (pixelX >= destImage.width || pixelY >= destImage.height) continue;
// srcPos - position in source (of Lanczos kernel center)
// calculated as a linear interpolation of the transformation function
const SPoint2 srcPos = GetInterpolatedValue(srcPosInterp, srcPosInterpPitch,
interpLength, pixelX + 1, pixelY + 1);
double const roundedSrcPtX = floor(srcPos.x);
double const roundedSrcPtY = floor(srcPos.y);
//integer and frac parts of the kernel center
int const srcX = int(roundedSrcPtX);
int const srcY = int(roundedSrcPtY);
double const kernelFracX = srcPos.x - roundedSrcPtX;
double const kernelFracY = srcPos.y - roundedSrcPtY;
// check that destination pixel is mapped from within the source image
if ( srcGoodBox.begX <= srcX && srcX < srcGoodBox.endX
&& srcGoodBox.begY <= srcY && srcY < srcGoodBox.endY
) {
//relative area
const SPoint2 leftSrcPos = GetInterpolatedValue(srcPosInterp, srcPosInterpPitch,
interpLength, pixelX, pixelY + 1);
const SPoint2 upSrcPos = GetInterpolatedValue(srcPosInterp, srcPosInterpPitch,
interpLength, pixelX + 1, pixelY);
const SVec2 dSrcA = SVec2(leftSrcPos, srcPos);
const SVec2 dSrcB = SVec2(upSrcPos, srcPos);
double const relativeArea = fabs(dSrcA.x * dSrcB.y - dSrcA.y * dSrcB.x);
if (isMaskedImage) {
const PixelIVM<double> sample = ApplyLanczosFilterMI(srcImage,
srcX, srcY,
mainKernelSize, maskKernelType, maskKernelSize,
kernelFracX, kernelFracY
);
int const pixelIimg = pixelY * destImage.strideImg + pixelX;
int const pixelIvar = pixelY * destImage.strideVar + pixelX;
int const pixelImsk = pixelY * destImage.strideMsk + pixelX;
destImage.img[pixelIimg] = sample.img * relativeArea;
destImage.var[pixelIvar] = sample.var * relativeArea * relativeArea;
destImage.msk[pixelImsk] = sample.msk;
} else {
double sample = ApplyLanczosFilter(srcImage.img, srcImage.strideImg, srcImage.width,
srcX, srcY,
mainKernelSize, kernelFracX, kernelFracY
);
int const pixelIimg = pixelY * destImage.strideImg + pixelX; //pixel index in destination image
destImage.img[pixelIimg] = sample * relativeArea;
}
} else {
//set the output pixel to the value of edgePixel
int const pixelIimg = pixelY * destImage.strideImg + pixelX; //pixel index in destination image
destImage.img[pixelIimg] = edgePixel.img;
if (isMaskedImage) {
int const pixelIvar = pixelY * destImage.strideVar + pixelX;
int const pixelImsk = pixelY * destImage.strideMsk + pixelX;
destImage.var[pixelIvar] = edgePixel.var;
destImage.msk[pixelImsk] = edgePixel.msk;
}
}
}
}
// External interface, calls the GPU kernel for lanczos resampling
template<typename DestPixelT, typename SrcPixelT>
void WarpImageGpuCallKernel(bool isMaskedImage,
ImageDataPtr<DestPixelT> destImageGpu,
ImageDataPtr<SrcPixelT> srcImageGpu,
int mainKernelSize,
KernelType maskKernelType,
int maskKernelSize,
SBox2I srcGoodBox,
PixelIVM<DestPixelT> edgePixel,
BilinearInterp* srcPosInterp,
int interpLength
)
{
dim3 block(SIZE_X_WARPING_BLOCK * SIZE_Y_WARPING_BLOCK);
dim3 grid(7 * 16); //divisible by no. of SM's in most GPUs, performs well
hipLaunchKernelGGL(( WarpImageGpuKernel) , dim3(grid), dim3(block), 0, 0,
isMaskedImage,
destImageGpu,
srcImageGpu,
mainKernelSize,
maskKernelType,
maskKernelSize,
srcGoodBox,
edgePixel,
srcPosInterp,
interpLength
);
}
//
// Explicit instantiations
//
/// \cond
#define MASKEDIMAGE(PIXTYPE) afwImage::MaskedImage<PIXTYPE, afwImage::MaskPixel, afwImage::VariancePixel>
#define IMAGE(PIXTYPE) afwImage::Image<PIXTYPE>
#define NL /* */
#define INSTANTIATE(DESTIMAGEPIXELT, SRCIMAGEPIXELT) \
template void WarpImageGpuCallKernel( \
bool isMaskedImage, \
ImageDataPtr<DESTIMAGEPIXELT> destImageGpu, \
ImageDataPtr<SRCIMAGEPIXELT> srcImageGpu, \
int mainKernelSize, \
KernelType maskKernelType, \
int maskKernelSize, \
SBox2I srcGoodBox, \
PixelIVM<DESTIMAGEPIXELT> edgePixel, \
BilinearInterp* srcPosInterp, \
int interpLength \
);
INSTANTIATE(double, double)
INSTANTIATE(double, float)
INSTANTIATE(double, int)
INSTANTIATE(double, boost::uint16_t)
INSTANTIATE(float, float)
INSTANTIATE(float, int)
INSTANTIATE(float, boost::uint16_t)
INSTANTIATE(int, int)
INSTANTIATE(boost::uint16_t, boost::uint16_t)
/// \endcond
}
}
}
}
} //namespace lsst::afw::math::detail::gpu ends
| f8497f21b359475a596f59502503d84d7c3aca44.cu | // -*- LSST-C++ -*- // fixed format comment for emacs
/*
* LSST Data Management System
* Copyright 2008 - 2012 LSST Corporation.
*
* This product includes software developed by the
* LSST Project (http://www.lsst.org/).
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the LSST License Statement and
* the GNU General Public License along with this program. If not,
* see <http://www.lsstcorp.org/LegalNotices/>.
*/
/**
* \file
*
* \ingroup afw
*
* \brief GPU image warping CUDA implementation
*
* \author Kresimir Cosic.
*/
#define NVCC_COMPILING
#include "lsst/afw/image/LsstImageTypes.h"
#include "lsst/afw/math/detail/CudaLanczos.h"
namespace lsst {
namespace afw {
namespace math {
namespace detail {
namespace gpu {
namespace
{
// CeilDivide: returns the smallest integer n such that n*divisor>=num
// preconditions: num>=0, divisor>0
__device__
int CeilDivide(int num, int divisor) {
return (num + divisor - 1) / divisor;
}
// Min function
template<typename T> __device__
T Min(T a, T b) {
return a < b ? a : b;
}
// Max function
template<typename T> __device__
T Max(T a, T b) {
return a > b ? a : b;
}
// Lanczos function
// precondition: -order <= x <= order
template <typename T>
__device__ T Lanczos(T x, T orderInv)
{
const T PI = 3.1415926535897932384626433832795028;
const T xArg1 = fabs(x) * PI;
if ( xArg1 > 1.0e-5) {
const T xArg2 = xArg1 * orderInv;
return sin(xArg1) * sin(xArg2) / (xArg1 * xArg2);
}
return T(1.0);
}
// Is Lanczos or bilinear function equal zero
__device__ bool IsEqualZeroLanczosOrBilinear(double x)
{
if (x != floor(x)) return false;
if (x == 0) return false;
return true;
}
// Calculates the value of a single output pixel (for MaskedImage)
template<typename SrcPixelT>
__device__ PixelIVM<double> ApplyLanczosFilterMI(
const ImageDataPtr<SrcPixelT> srcImage,
int const srcX, int const srcY,
int const mainKernelSize,
const KernelType maskKernelType,
int const maskKernelSize,
double const kernelFracX, double const kernelFracY
)
{
int const srcTLX = srcX + 1 - mainKernelSize / 2;
int const srcTLY = srcY + 1 - mainKernelSize / 2;
//calculate values of Lanczos function for rows
double kernelRowVal[SIZE_MAX_WARPING_KERNEL];
for (int kernelX = 0; kernelX < mainKernelSize; kernelX++) {
kernelRowVal[kernelX] = Lanczos(1 - mainKernelSize / 2 - kernelFracX + kernelX, 2.0 / mainKernelSize);
}
double colSumImg = 0;
double colSumVar = 0;
MskPixel colSumMsk = 0;
double kernelSum = 0;
if (maskKernelType == KERNEL_TYPE_LANCZOS && mainKernelSize == maskKernelSize) {
// mask kernel is identical to main kernel
for (int kernelY = 0; kernelY < mainKernelSize; kernelY++) {
double rowSumImg = 0;
double rowSumVar = 0;
MskPixel rowSumMsk = 0;
double rowKernelSum = 0;
int srcPosImg = srcTLX + srcImage.strideImg * (srcTLY + kernelY);
int srcPosVar = srcTLX + srcImage.strideVar * (srcTLY + kernelY);
int srcPosMsk = srcTLX + srcImage.strideMsk * (srcTLY + kernelY);
for (int kernelX = 0; kernelX < mainKernelSize; kernelX++) {
double srcImgPixel = srcImage.img[srcPosImg++];
double srcVarPixel = srcImage.var[srcPosVar++];
MskPixel srcMskPixel = srcImage.msk[srcPosMsk++];
double kernelVal = kernelRowVal[kernelX];
if (kernelVal != 0) {
rowSumImg += srcImgPixel * kernelVal;
rowSumVar += srcVarPixel * kernelVal * kernelVal;
rowSumMsk |= srcMskPixel;
rowKernelSum += kernelVal;
}
}
double kernelVal = Lanczos(1 - mainKernelSize / 2 - kernelFracY + kernelY, 2.0 / mainKernelSize);
if (kernelVal != 0) {
colSumImg += rowSumImg * kernelVal;
colSumVar += rowSumVar * kernelVal * kernelVal;
colSumMsk |= rowSumMsk;
kernelSum += rowKernelSum * kernelVal;
}
}
} else { // mask kernel not identical to main kernel
// variance and image kernel
for (int kernelY = 0; kernelY < mainKernelSize; kernelY++) {
double rowSumImg = 0;
double rowSumVar = 0;
double rowKernelSum = 0;
int srcPosImg = srcTLX + srcImage.strideImg * (srcTLY + kernelY);
int srcPosVar = srcTLX + srcImage.strideVar * (srcTLY + kernelY);
for (int kernelX = 0; kernelX < mainKernelSize; kernelX++) {
double srcImgPixel = srcImage.img[srcPosImg++];
double srcVarPixel = srcImage.var[srcPosVar++];
double kernelVal = kernelRowVal[kernelX];
if (kernelVal != 0) {
rowSumImg += srcImgPixel * kernelVal;
rowSumVar += srcVarPixel * kernelVal * kernelVal;
rowKernelSum += kernelVal;
}
}
double kernelVal = Lanczos(1 - mainKernelSize / 2 - kernelFracY + kernelY, 2.0 / mainKernelSize);
if (kernelVal != 0) {
colSumImg += rowSumImg * kernelVal;
colSumVar += rowSumVar * kernelVal * kernelVal;
kernelSum += rowKernelSum * kernelVal;
}
}
if (maskKernelType == KERNEL_TYPE_NEAREST_NEIGHBOR) {
int const srcTLXMask = srcX;
int const srcTLYMask = srcY;
int const kernelX = int(kernelFracX + 0.5);
int const kernelY = int(kernelFracY + 0.5);
int srcPosMsk = srcTLXMask + kernelX + srcImage.strideMsk * (srcTLYMask + kernelY);
MskPixel srcMskPixel = srcImage.msk[srcPosMsk];
colSumMsk = srcMskPixel;
} else { // lanczos or bilinear mask kernel
int const srcTLXMask = srcX + 1 - maskKernelSize / 2;
int const srcTLYMask = srcY + 1 - maskKernelSize / 2;
for (int kernelY = 0; kernelY < maskKernelSize; kernelY++) {
if (IsEqualZeroLanczosOrBilinear(1 - maskKernelSize / 2 - kernelFracY + kernelY) ) continue;
int srcPosMsk = srcTLXMask + srcImage.strideMsk * (srcTLYMask + kernelY);
for (int kernelX = 0; kernelX < maskKernelSize; kernelX++, srcPosMsk++) {
if (!IsEqualZeroLanczosOrBilinear(1 - maskKernelSize / 2 - kernelFracX + kernelX)) {
MskPixel srcMskPixel = srcImage.msk[srcPosMsk];
colSumMsk |= srcMskPixel;
}
}
}
}
}
PixelIVM<double> ret;
ret.img = colSumImg / kernelSum;
ret.var = colSumVar / (kernelSum * kernelSum);
ret.msk = colSumMsk;
return ret;
}
// Calculates the value of a single output pixel (for plain image)
template<typename SrcPixelT>
__device__ double ApplyLanczosFilter(const SrcPixelT* srcImgPtr, int const srcImgStride, int const srcWidth,
int const srcX, int const srcY,
int const mainKernelSize,
double const kernelFracX, double const kernelFracY
)
{
int const srcTLX = srcX + 1 - mainKernelSize / 2;
int const srcTLY = srcY + 1 - mainKernelSize / 2;
//calculate values of Lanczos function for rows
double kernelRowVal[SIZE_MAX_WARPING_KERNEL];
for (int kernelX = 0; kernelX < mainKernelSize; kernelX++) {
kernelRowVal[kernelX] = Lanczos(1 - mainKernelSize / 2 - kernelFracX + kernelX, 2.0 / mainKernelSize);
}
double colSumImg = 0;
double kernelSum = 0;
for (int kernelY = 0; kernelY < mainKernelSize; kernelY++) {
double rowSumImg = 0;
double rowKernelSum = 0;
int srcPosImg = srcTLX + srcImgStride * (srcTLY + kernelY);
for (int kernelX = 0; kernelX < mainKernelSize; kernelX++) {
double srcImgPixel = srcImgPtr[srcPosImg++];
double kernelVal = kernelRowVal[kernelX];
if (kernelVal != 0) {
rowSumImg += srcImgPixel * kernelVal;
rowKernelSum += kernelVal;
}
}
double kernelVal = Lanczos(1 - mainKernelSize / 2 - kernelFracY + kernelY, 2.0 / mainKernelSize);
if (kernelVal != 0) {
colSumImg += rowSumImg * kernelVal;
kernelSum += rowKernelSum * kernelVal;
}
}
return colSumImg / kernelSum;
}
// calculate the interpolated value given the data for bilinear interpolation
__device__ SPoint2 GetInterpolatedValue(BilinearInterp* const interpBuf, int interpBufPitch,
int interpLen, int x, int y
)
{
int blkX = x / interpLen;
int blkY = y / interpLen;
int subX = x - blkX * interpLen;
int subY = y - blkY * interpLen;
BilinearInterp interp = interpBuf[blkX + blkY* interpBufPitch];
return interp.Interpolate(subX, subY);
}
} //local namespace ends
/// GPU kernel for lanczos resampling
template<typename DestPixelT, typename SrcPixelT>
__global__ void WarpImageGpuKernel(
bool isMaskedImage,
ImageDataPtr<DestPixelT> destImage,
ImageDataPtr<SrcPixelT> srcImage,
int const mainKernelSize,
const KernelType maskKernelType,
int const maskKernelSize,
SBox2I srcGoodBox,
PixelIVM<DestPixelT> edgePixel,
BilinearInterp* srcPosInterp,
int interpLength
)
{
int const blockSizeX = SIZE_X_WARPING_BLOCK;
int const blockSizeY = SIZE_Y_WARPING_BLOCK;
//number of blocks in X and Y directions
int const blockNX = CeilDivide(destImage.width, blockSizeX);
int const blockNY = CeilDivide(destImage.height, blockSizeY);
int const totalBlocks = blockNX * blockNY;
// calculates pitch of srcPosInterp array
int const srcPosInterpPitch = CeilDivide(destImage.width, interpLength) + 1;
// for each block of destination image
for (int blkI = blockIdx.x; blkI < totalBlocks; blkI += gridDim.x)
{
// claculate coordinates of the block that is being processed
int const blkIX = blkI % blockNX;
int const blkIY = blkI / blockNX;
// coordinate of upper left corner of the block
int const blkX = blkIX * blockSizeX;
int const blkY = blkIY * blockSizeY;
// Each thread gets its own pixel.
// The calling function ensures that the number of pixels in a block
// matches the number of threads in a block
// (or less pixels than threads for blocks on the edge)
int const curBlkPixelX = threadIdx.x % blockSizeX;
int const curBlkPixelY = threadIdx.x / blockSizeX;
// calculate the position of a destination pixel for current thread
int const pixelX = blkX + curBlkPixelX;
int const pixelY = blkY + curBlkPixelY;
// On edges: skip calculation for threads that got pixels which are outside the destination image
if (pixelX >= destImage.width || pixelY >= destImage.height) continue;
// srcPos - position in source (of Lanczos kernel center)
// calculated as a linear interpolation of the transformation function
const SPoint2 srcPos = GetInterpolatedValue(srcPosInterp, srcPosInterpPitch,
interpLength, pixelX + 1, pixelY + 1);
double const roundedSrcPtX = floor(srcPos.x);
double const roundedSrcPtY = floor(srcPos.y);
//integer and frac parts of the kernel center
int const srcX = int(roundedSrcPtX);
int const srcY = int(roundedSrcPtY);
double const kernelFracX = srcPos.x - roundedSrcPtX;
double const kernelFracY = srcPos.y - roundedSrcPtY;
// check that destination pixel is mapped from within the source image
if ( srcGoodBox.begX <= srcX && srcX < srcGoodBox.endX
&& srcGoodBox.begY <= srcY && srcY < srcGoodBox.endY
) {
//relative area
const SPoint2 leftSrcPos = GetInterpolatedValue(srcPosInterp, srcPosInterpPitch,
interpLength, pixelX, pixelY + 1);
const SPoint2 upSrcPos = GetInterpolatedValue(srcPosInterp, srcPosInterpPitch,
interpLength, pixelX + 1, pixelY);
const SVec2 dSrcA = SVec2(leftSrcPos, srcPos);
const SVec2 dSrcB = SVec2(upSrcPos, srcPos);
double const relativeArea = fabs(dSrcA.x * dSrcB.y - dSrcA.y * dSrcB.x);
if (isMaskedImage) {
const PixelIVM<double> sample = ApplyLanczosFilterMI(srcImage,
srcX, srcY,
mainKernelSize, maskKernelType, maskKernelSize,
kernelFracX, kernelFracY
);
int const pixelIimg = pixelY * destImage.strideImg + pixelX;
int const pixelIvar = pixelY * destImage.strideVar + pixelX;
int const pixelImsk = pixelY * destImage.strideMsk + pixelX;
destImage.img[pixelIimg] = sample.img * relativeArea;
destImage.var[pixelIvar] = sample.var * relativeArea * relativeArea;
destImage.msk[pixelImsk] = sample.msk;
} else {
double sample = ApplyLanczosFilter(srcImage.img, srcImage.strideImg, srcImage.width,
srcX, srcY,
mainKernelSize, kernelFracX, kernelFracY
);
int const pixelIimg = pixelY * destImage.strideImg + pixelX; //pixel index in destination image
destImage.img[pixelIimg] = sample * relativeArea;
}
} else {
//set the output pixel to the value of edgePixel
int const pixelIimg = pixelY * destImage.strideImg + pixelX; //pixel index in destination image
destImage.img[pixelIimg] = edgePixel.img;
if (isMaskedImage) {
int const pixelIvar = pixelY * destImage.strideVar + pixelX;
int const pixelImsk = pixelY * destImage.strideMsk + pixelX;
destImage.var[pixelIvar] = edgePixel.var;
destImage.msk[pixelImsk] = edgePixel.msk;
}
}
}
}
// External interface, calls the GPU kernel for lanczos resampling
template<typename DestPixelT, typename SrcPixelT>
void WarpImageGpuCallKernel(bool isMaskedImage,
ImageDataPtr<DestPixelT> destImageGpu,
ImageDataPtr<SrcPixelT> srcImageGpu,
int mainKernelSize,
KernelType maskKernelType,
int maskKernelSize,
SBox2I srcGoodBox,
PixelIVM<DestPixelT> edgePixel,
BilinearInterp* srcPosInterp,
int interpLength
)
{
dim3 block(SIZE_X_WARPING_BLOCK * SIZE_Y_WARPING_BLOCK);
dim3 grid(7 * 16); //divisible by no. of SM's in most GPUs, performs well
WarpImageGpuKernel <<< grid, block, 0>>>(
isMaskedImage,
destImageGpu,
srcImageGpu,
mainKernelSize,
maskKernelType,
maskKernelSize,
srcGoodBox,
edgePixel,
srcPosInterp,
interpLength
);
}
//
// Explicit instantiations
//
/// \cond
#define MASKEDIMAGE(PIXTYPE) afwImage::MaskedImage<PIXTYPE, afwImage::MaskPixel, afwImage::VariancePixel>
#define IMAGE(PIXTYPE) afwImage::Image<PIXTYPE>
#define NL /* */
#define INSTANTIATE(DESTIMAGEPIXELT, SRCIMAGEPIXELT) \
template void WarpImageGpuCallKernel( \
bool isMaskedImage, \
ImageDataPtr<DESTIMAGEPIXELT> destImageGpu, \
ImageDataPtr<SRCIMAGEPIXELT> srcImageGpu, \
int mainKernelSize, \
KernelType maskKernelType, \
int maskKernelSize, \
SBox2I srcGoodBox, \
PixelIVM<DESTIMAGEPIXELT> edgePixel, \
BilinearInterp* srcPosInterp, \
int interpLength \
);
INSTANTIATE(double, double)
INSTANTIATE(double, float)
INSTANTIATE(double, int)
INSTANTIATE(double, boost::uint16_t)
INSTANTIATE(float, float)
INSTANTIATE(float, int)
INSTANTIATE(float, boost::uint16_t)
INSTANTIATE(int, int)
INSTANTIATE(boost::uint16_t, boost::uint16_t)
/// \endcond
}
}
}
}
} //namespace lsst::afw::math::detail::gpu ends
|
90673acd1486b36e01a23cf30f98283cb6e238e6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
August 2013
@generated c Tue Aug 13 16:45:10 2013
*/
#include "common_magma.h"
#define PRECISION_c
/*The version for tesla can be found in chemv_tesla.cu */
#if (GPUSHMEM >= 200)
#define magmablas_chemv_200 magmablas_chemv
#define magmablas_chemv2_200 magmablas_chemv2
#define NB_64
/*
turning on NB_64, it will call routine blocksize = 64
otherwise it will can blocksize = 32 which is 10% faster in z,c precision
*/
#ifdef NB_64 // using block size 64
#define chemv_bs 64
#define thread_x 64
#define thread_y 4
#define bank_shift 33
#define quarter_thread_x 16
#define half_thread_x 32
#else // using block size 32
#define chemv_bs 32
#define thread_x 32
#define thread_y 8
#define bank_shift 33
#define SWITCH 1400
#endif
/*******************************************************************************
* Functions for each specific cases - Lower case
*/
#ifdef NB_64
__global__ void
magmablas_chemv_200_L_special(
int n, magmaFloatComplex alpha,
const magmaFloatComplex *A, int lda,
const magmaFloatComplex *x, int incx,
magmaFloatComplex beta,
magmaFloatComplex *y, int incy,
magmaFloatComplex *WC)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int blkc = blockIdx.x;
magmaFloatComplex res = MAGMA_C_ZERO;
magmaFloatComplex res_ = MAGMA_C_ZERO;
magmaFloatComplex res1 = MAGMA_C_ZERO;
__shared__ magmaFloatComplex la [quarter_thread_x][thread_x+2];
__shared__ magmaFloatComplex buff [thread_x];
__shared__ magmaFloatComplex buff2 [thread_x];
magmaFloatComplex tr[4];
magmaFloatComplex b[4];
int break_d = thread_x * blkc;
const int td = (thread_x * ty ) + tx;
int tx_ = td % half_thread_x;
int ty_ = td / half_thread_x;
WC += break_d + tx;
x += (break_d + tx ) * incx;
A += break_d * (lda+1);
A += ty_* lda + tx_;
if( ty == 0 ) {
buff[tx] = x[0];
} // obtain the vector x store in buff;
tx = tx_; ty = ty_;
#pragma unroll
for(int j =0; j < half_thread_x; j += 8)
la[0][ bank_shift * (ty_+j) + tx_] = A[ j * lda];
__syncthreads();
#pragma unroll
for(int i=ty_*4; i < (ty_ * 4 + 4); i++) {
if ( i < tx_ ) {
la[0][bank_shift * tx_ + i] = cuConjf( la[0][ i * bank_shift + tx_] );
}
else
la[0][bank_shift * tx_ + i] = la[0][ bank_shift * tx_ + i];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += cuConjf( la[0][bank_shift * tx_ + j + ty_ * 4] ) * buff[j + ty_ * 4];
__syncthreads();
la[0][bank_shift*tx_+ty_]= res;
__syncthreads();
if( ty_ == 0 )
res1 = la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7];
else
{
MAGMA_C_SET2REAL(res1,0);
}
__syncthreads();
MAGMA_C_SET2REAL(res, 0);
A += half_thread_x + half_thread_x *lda;
#pragma unroll
for(int j =0; j < half_thread_x; j += 8)
la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda];
__syncthreads();
#pragma unroll
for(int i=ty_*4; i < (4+ty_*4); i++) {
if ( i < tx_ ) {
la[0][bank_shift*tx_+i] = cuConjf( la[0][bank_shift*i+tx_] );
}
else
la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += cuConjf( la[0][bank_shift*tx_+j+ty_*4] ) * buff[half_thread_x + j + 4 * ty_];
__syncthreads();
la[0][bank_shift*tx_+ty_]= res;
__syncthreads();
magmaFloatComplex res2;
MAGMA_C_SET2REAL(res2,0);
if( ty_ == 1 )
res2 = la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7];
else
{
MAGMA_C_SET2REAL(res2,0);
}
__syncthreads();
MAGMA_C_SET2REAL(res,0);
A -= half_thread_x *lda;
MAGMA_C_SET2REAL(res_,0);
#pragma unroll
for(int j=0; j < half_thread_x; j += 8)
tr[j/8] = A[ j * lda];
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j] * buff[ j*8 + ty_];
la[0][bank_shift*(ty_+j*8)+tx_] = tr[j];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res_ += cuConjf(la[0][bank_shift*tx_+j+ty_*4]) * buff[half_thread_x +j+ty_*4];
__syncthreads();
la[0][bank_shift*tx_+ty_]= res;
__syncthreads();
if( ty_ == 1 )
res2 = res2
+ la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7];
else
{
MAGMA_C_SET2REAL(res2,0);
}
__syncthreads();
la[0][bank_shift*tx_+ty_]= res_;
__syncthreads();
if( ty_ == 0 ) {
res1 = res1
+ la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7];
}
else
{
MAGMA_C_SET2REAL(res1,0);
}
A -= half_thread_x;
__syncthreads();
tx = threadIdx.x;
ty = threadIdx.y;
if( ty_ == 0 && ty == 0 )
res = res1;
else if( ty_ == 1 && ty == 0 )
res = res2;
else
{
MAGMA_C_SET2REAL(res,0);
}
A -= ty_* lda;
A -= tx_;
A= A - lda * blkc * thread_x;
x= x - blkc * thread_x *incx;
A += 4 * ty* lda;
A += tx;
int wc_c = 0;
int count = 0;
tx_ = td % quarter_thread_x;
ty_ = td / quarter_thread_x;
WC -= tx;
WC += tx_;
if( blkc * thread_x >= thread_x) {
#pragma unroll
for(int i=0; i < thread_x; i += thread_x )
{
MAGMA_C_SET2REAL(res_,0);
count++;
if( ty == 0 ) {
buff2[tx] = x[i*incx];
}
__syncthreads();
#pragma unroll
for( int k=0; k < 4; k++)
{
#pragma unroll
for(int j=0; j < 4; j++)
tr[j] = A[j*lda];
#pragma unroll
for(int j=0; j < 4; j++)
{
res += tr[j] * buff2[ quarter_thread_x * k + ty * 4 + j];
la[( j + ty * 4)][tx] = cuConjf(tr[j]) * buff[tx];
}
__syncthreads();
MAGMA_C_SET2REAL(res_,0);
#pragma unroll
for(int j=0; j < 4; j++)
{
res_ += la[tx_][ty_*4+j];
}
b[k] = res_;
__syncthreads();
A += lda * quarter_thread_x;
}
#pragma unroll
for(int k=0; k < 4; k++) {
la[tx_][ty_+quarter_thread_x*k]= b[k];
}
__syncthreads();
if( ty_ < 4 ) {
int k = ty_*quarter_thread_x;
res_ = la[tx_][0+k] + la[tx_][1+k]
+ la[tx_][2+k] + la[tx_][3+k]
+ la[tx_][4+k] + la[tx_][5+k]
+ la[tx_][6+k] + la[tx_][7+k]
+ la[tx_][8+k] + la[tx_][9+k]
+ la[tx_][10+k]+ la[tx_][11+k]
+ la[tx_][12+k]+ la[tx_][13+k]
+ la[tx_][14+k]+ la[tx_][15+k];
WC[k + wc_c*lda ] = res_;
}
wc_c++;
__syncthreads();
}
}
for(int i=thread_x; i < (blkc * thread_x); i += thread_x )
{
MAGMA_C_SET2REAL(res_,0);
count++;
if( ty == 0 ) {
buff2[tx] = x[i*incx];
}
__syncthreads();
#pragma unroll
for( int k=0; k < 4; k++)
{
#pragma unroll
for(int j=0; j < 4; j++)
tr[j] = A[j*lda];
#pragma unroll
for(int j=0; j < 4; j++)
{
res += tr[j] * buff2[quarter_thread_x*k + ty*4+(j)];
la[( j + ty * 4)][tx] = cuConjf( tr[j] )* buff[tx];
}
__syncthreads();
MAGMA_C_SET2REAL(res_,0);
#pragma unroll
for(int j=0; j < 4; j++)
res_ += la[tx_][ty_*4+j];
b[k] = res_;
__syncthreads();
A += lda * quarter_thread_x;
}
#pragma unroll
for(int k=0; k < 4; k++) {
la[tx_][ty_+quarter_thread_x*k]= b[k];
}
__syncthreads();
if( ty_ < 4 ) {
int k = ty_*quarter_thread_x;
res_ = la[tx_][0+k] + la[tx_][1+k]
+ la[tx_][2+k] + la[tx_][3+k]
+ la[tx_][4+k] + la[tx_][5+k]
+ la[tx_][6+k] + la[tx_][7+k]
+ la[tx_][8+k] + la[tx_][9+k]
+ la[tx_][10+k]+ la[tx_][11+k]
+ la[tx_][12+k]+ la[tx_][13+k]
+ la[tx_][14+k]+ la[tx_][15+k];
WC[k + wc_c*lda ] = res_;
}
wc_c++;
__syncthreads();
}
WC += tx;
WC -= tx_;
la[ty][tx]= res;
__syncthreads();
if( ty == 0 ) {
res = la[0][tx]+ la[1][tx]
+ la[2][tx]+ la[3][tx];
WC[0+lda*(blkc) ] = res;
}
}
/**************************************************************
* Lower case for generic sizes
*/
__global__ void
magmablas_chemv_200_L_generic(
int n, magmaFloatComplex alpha,
const magmaFloatComplex *A, int lda,
const magmaFloatComplex *x, int incx,
magmaFloatComplex beta,
magmaFloatComplex *y, int incy,
magmaFloatComplex *WC,
int m_mod_thread_x)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int blkc = blockIdx.x;
magmaFloatComplex res = MAGMA_C_ZERO;
magmaFloatComplex res_ = MAGMA_C_ZERO;
magmaFloatComplex res1 = MAGMA_C_ZERO;
__shared__ magmaFloatComplex la [quarter_thread_x][thread_x+2];
__shared__ magmaFloatComplex buff [thread_x];
__shared__ magmaFloatComplex buff2[thread_x];
magmaFloatComplex tr[4];
magmaFloatComplex b[8];
int break_d = thread_x * blkc;
const int td = (thread_x * ty ) + tx;
int tx_ = td % half_thread_x;
int ty_ = td / half_thread_x;
WC+= break_d + tx;
x += (break_d + tx ) * incx;
A += break_d * (lda+1);
A += lda * ty_;
int trackA;
if( blkc == ( gridDim.x - 1 ) ) {
if( ty == 0 ) {
if( tx > m_mod_thread_x )
{
MAGMA_C_SET2REAL(buff[tx],0);
}
else
buff[tx] = x[0];
}
if ( tx_ > m_mod_thread_x )
trackA=m_mod_thread_x;
else
trackA=tx_;
A += trackA;
}
else {
if( ty == 0 ) {
buff[tx] = x[0];
}
trackA = tx_;
A += trackA;
}
// Somehow merging these two if - else creates problem
// It could be a potential bug -- from synchronization or from cuda or compiler
if( blkc == ( gridDim.x - 1 ) ) {
#pragma unroll
for(int j =0; j < half_thread_x; j += 8) {
if( ( ty_ + j ) > m_mod_thread_x )
{
MAGMA_C_SET2REAL(la[0][bank_shift*(ty_+j)+tx_], 9999);
}
else
la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda];
}
A -= trackA;
}
else {
#pragma unroll
for(int j =0; j < half_thread_x; j += 8) {
la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda];
}
}
tx = tx_;
ty = ty_;
__syncthreads();
#pragma unroll
for(int i=ty_*4; i < (ty_*4+4); i++) {
if ( i < tx_ ) {
la[0][bank_shift*tx_+i] = cuConjf(la[0][i*bank_shift+tx_]);
}
else
la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += cuConjf(la[0][bank_shift*tx_+j+ty_*4])* buff[j+ty_*4];
__syncthreads();
la[0][bank_shift*tx_+ty_]= res;
__syncthreads();
if( ty_ == 0 )
res1 = la[0][tx_*bank_shift+0]
+ la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]
+ la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]
+ la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]
+ la[0][tx_*bank_shift+7];
else
{
MAGMA_C_SET2REAL(res1,0);
}
__syncthreads();
MAGMA_C_SET2REAL(res,0);
if( blkc == ( gridDim.x - 1 ) ) {
if ( (tx_+half_thread_x) > m_mod_thread_x )
trackA = m_mod_thread_x;
else
trackA = tx_ + half_thread_x;
A += trackA+half_thread_x*lda;
#pragma unroll
for(int j =0; j < half_thread_x; j += 8) {
if( ( ty_ + j+half_thread_x ) > m_mod_thread_x )
{
MAGMA_C_SET2REAL(la[0][bank_shift*(ty_+j)+tx_], 99999);
}
else
la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda];
}
A -= trackA+half_thread_x*lda;
A += tx_;
A += half_thread_x + half_thread_x *lda;
}
else {
A += half_thread_x + half_thread_x *lda;
#pragma unroll
for(int j =0; j < half_thread_x; j += 8) {
la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda];
}
}
__syncthreads();
#pragma unroll
for(int i=ty_*4; i < (4+ty_*4); i++) {
if ( i < tx_ ) {
la[0][bank_shift*tx_+i] = cuConjf(la[0][bank_shift*i+tx_]);
}
else
la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += cuConjf(la[0][bank_shift*tx_+j+ty_*4]) * buff[half_thread_x + j + 4 * ty_];
__syncthreads();
la[0][bank_shift*tx_+ty_]= res;
__syncthreads();
magmaFloatComplex res2;
MAGMA_C_SET2REAL(res2,0);
if( ty_ == 1 )
res2 = la[0][tx_*bank_shift+0]
+ la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]
+ la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]
+ la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]
+ la[0][tx_*bank_shift+7];
else
{
MAGMA_C_SET2REAL(res2,0);
}
__syncthreads();
MAGMA_C_SET2REAL(res,0);
MAGMA_C_SET2REAL(res_,0);
A -= half_thread_x *lda;
if( blkc == ( gridDim.x - 1 ) ) {
A -= tx_;
if ( tx_ > m_mod_thread_x )
trackA=m_mod_thread_x;
else
trackA=tx_;
A += trackA;
#pragma unroll
for(int j =0; j < half_thread_x; j += 8)
if( ( ty_ + j ) > m_mod_thread_x )
{
MAGMA_C_SET2REAL(tr[j/8], 99999);
}
else
tr[j/8] = A[ j * lda];
A -= trackA;
A += tx_;
}
else {
#pragma unroll
for(int j =0; j < half_thread_x; j += 8)
tr[j/8] = A[ j * lda];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j] * buff[ j*8 + ty_];
la[0][bank_shift*(ty_+j*8)+tx_] = tr[j];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res_ += cuConjf(la[0][bank_shift*tx_+j+ty_*4]) * buff[half_thread_x +j+ty_*4];
__syncthreads();
la[0][bank_shift*tx_+ty_]= res;
__syncthreads();
if( ty_ == 1 )
res2 = res2
+ la[0][tx_*bank_shift+0]
+ la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]
+ la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]
+ la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]
+ la[0][tx_*bank_shift+7];
else
{
MAGMA_C_SET2REAL(res2,0);
}
__syncthreads();
la[0][bank_shift*tx_+ty_]= res_;
__syncthreads();
if( ty_ == 0 ) {
res1 = res1
+ la[0][tx_*bank_shift+0]
+ la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]
+ la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]
+ la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]
+ la[0][tx_*bank_shift+7];
}
else
{
MAGMA_C_SET2REAL(res1,0);
}
A -= half_thread_x;
__syncthreads();
tx = threadIdx.x;
ty = threadIdx.y;
if( ty_ == 0 && ty == 0 )
res = res1;
else if( ty_ == 1 && ty == 0 )
res = res2;
else
{
MAGMA_C_SET2REAL(res,0);
}
A -= ty_* lda;
A -= tx_;
A= A - lda*break_d;
x= x - break_d *incx;
A += 4 * ty* lda;
if( blkc == ( gridDim.x - 1 ) ) {
if(tx <= m_mod_thread_x )
A += tx;
else
A += m_mod_thread_x;
}
else{
A += tx;
}
int wc_c = 0;
int count = 0;
tx_ = td % quarter_thread_x;
ty_ = td / quarter_thread_x;
WC -= tx;
WC += tx_;
#pragma unroll
for(int j=0; j < 4; j++)
b[j] = buff[ty_*4+j];
if( break_d > 0)
#pragma unroll
for(int i=0; i < thread_x; i += thread_x ) {
MAGMA_C_SET2REAL(res_,0);
count++;
if( ty == 0 ) {
buff2[tx] = x[i*incx];
}
__syncthreads();
#pragma unroll
for( int k=0; k < 4; k++) {
#pragma unroll
for(int j=0; j < 4; j++)
tr[j] = A[j*lda];
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j]*buff2[quarter_thread_x*k + ty*4+(j)];
la[( (j)+ty*4)][tx] = cuConjf(tr[j]);
}
__syncthreads();
MAGMA_C_SET2REAL(res_, 0);
#pragma unroll
for(int j=0; j < 4; j++)
res_ += la[tx_][ty_*4+j]* b[j];
b[4+k] = res_;
__syncthreads();
A += lda* quarter_thread_x;
}
#pragma unroll
for(int k=0; k < 4; k++) {
la[tx_][ty_+quarter_thread_x*k]= b[4+k];
}
__syncthreads();
if( ty_ < 4 ) {
int k = ty_*quarter_thread_x;
res_ = la[tx_][0+k] + la[tx_][1+k]
+ la[tx_][2+k] + la[tx_][3+k]
+ la[tx_][4+k] + la[tx_][5+k]
+ la[tx_][6+k] + la[tx_][7+k]
+ la[tx_][8+k] + la[tx_][9+k]
+ la[tx_][10+k]+ la[tx_][11+k]
+ la[tx_][12+k]+ la[tx_][13+k]
+ la[tx_][14+k]+ la[tx_][15+k];
WC[k + wc_c*lda ] = res_;
}
wc_c++;
__syncthreads();
}
for(int i=thread_x; i < break_d; i += thread_x ) {
MAGMA_C_SET2REAL(res_, 0);
count++;
if(ty == 0 )
buff2[tx] = x[i*incx];
__syncthreads();
#pragma unroll
for( int k=0; k < 4; k++) {
#pragma unroll
for(int j=0; j < 4; j++)
tr[j] = A[j*lda];
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j]*buff2[quarter_thread_x*k + ty*4+(j)];
la[( (j)+ty*4)][tx] = cuConjf(tr[j]);
}
__syncthreads();
MAGMA_C_SET2REAL(res_, 0);
#pragma unroll
for(int j=0; j < 4; j++)
res_ += la[tx_][ty_*4+j]* b[j];
b[4+k] = res_;
__syncthreads();
A += lda* quarter_thread_x;
}
#pragma unroll
for(int k=0; k < 4; k++) {
la[tx_][ty_+quarter_thread_x*k]= b[4+k];
}
__syncthreads();
if( ty_ < 4 ) {
int k = ty_*quarter_thread_x;
res_ = la[tx_][0+k] + la[tx_][1+k]
+ la[tx_][2+k] + la[tx_][3+k]
+ la[tx_][4+k] + la[tx_][5+k]
+ la[tx_][6+k] + la[tx_][7+k]
+ la[tx_][8+k] + la[tx_][9+k]
+ la[tx_][10+k]+ la[tx_][11+k]
+ la[tx_][12+k]+ la[tx_][13+k]
+ la[tx_][14+k]+ la[tx_][15+k];
WC[k + wc_c*lda ] = res_;
}
wc_c++;
__syncthreads();
}
WC += tx;
WC -= tx_;
la[ty][tx]= res;
__syncthreads();
if( ty == 0 ) {
res=la[0][tx]+ la[1][tx]+ la[2][tx]+ la[3][tx];
WC[0+lda*(blkc)] = res;
}
}
__global__ void
magmablas_chemv_200_L_update(
int n, magmaFloatComplex alpha,
const magmaFloatComplex *A, int lda,
const magmaFloatComplex *x, int incx,
magmaFloatComplex beta,
magmaFloatComplex *y, int incy,
magmaFloatComplex *WC )
{
int i;
int tx = threadIdx.x;
int ind = blockIdx.x * thread_x + tx;
magmaFloatComplex Ca;
MAGMA_C_SET2REAL(Ca, 0);
WC += ind + lda * blockIdx.x;
for(i = blockIdx.x*thread_x; i < n; i += thread_x) {
Ca += WC[0];
WC += thread_x;
}
if( ind < n )
y[ind * incy] = beta * y[ind * incy] + alpha * Ca;
}
extern "C"
void magmablas_chemv_200_L(
magma_int_t m, magmaFloatComplex alpha,
const magmaFloatComplex *A, magma_int_t lda,
const magmaFloatComplex *X, magma_int_t incx,
magmaFloatComplex beta,
magmaFloatComplex *Y, magma_int_t incy,
magmaFloatComplex *dC_work)
{
magma_int_t blocks;
if (m % chemv_bs == 0)
blocks = m / chemv_bs;
else
blocks = m / chemv_bs + 1;
dim3 grid(blocks, 1, 1);
dim3 threads(thread_x, thread_y, 1);
dim3 threads_u(chemv_bs, 1, 1);
/*
* If matrix size is multiple of chemv_bs, we use a specific code.
* otherwise, we call the generic case.
*/
if(m % chemv_bs == 0 ) {
hipLaunchKernelGGL(( magmablas_chemv_200_L_special) , dim3(grid), dim3(threads), 0, magma_stream ,
m, alpha, A, lda, X, incx, beta, Y, incy, dC_work);
}
else{
magma_int_t m_mod_thread_x = m%chemv_bs - 1;
hipLaunchKernelGGL(( magmablas_chemv_200_L_generic) , dim3(grid), dim3(threads), 0, magma_stream ,
m, alpha, A, lda, X, incx ,beta, Y, incy, dC_work, m_mod_thread_x);
}
hipLaunchKernelGGL(( magmablas_chemv_200_L_update), dim3(grid), dim3(threads_u), 0, magma_stream ,
m, alpha, A, lda, X, incx, beta, Y, incy, dC_work);
}
#else // not defined NB_64
/*******************************************************************************
* Functions for each specific cases - Lower case nb = 32
*/
__global__ void
magmablas_chemv_200_L_special_32_s(
int n, magmaFloatComplex alpha,
magmaFloatComplex *A, int lda,
magmaFloatComplex *x, int incx,
magmaFloatComplex beta,
magmaFloatComplex *y, int incy,
magmaFloatComplex *WC,
int nb)
{
if(blockIdx.y > blockIdx.x) return;
int tx = threadIdx.x;
int ty = threadIdx.y;
magmaFloatComplex res = MAGMA_C_ZERO; // used in scan the row
magmaFloatComplex res_ = MAGMA_C_ZERO; // used in scan the column
__shared__ magmaFloatComplex la [1056];
__shared__ magmaFloatComplex buff [chemv_bs];
__shared__ magmaFloatComplex buff2 [chemv_bs];
int break_d = chemv_bs * blockIdx.x;
A += break_d;
A += lda * ty + tx;
A += lda * (blockIdx.y ) * chemv_bs; //
x += tx;
if ( blockIdx.x == blockIdx.y ) // diagonal
{
x += (blockIdx.y * chemv_bs) * incx;
if( ty == 0 )
{
buff[tx] = x[0];
} // obtain the vector x store in buff;
#pragma unroll
for(int j =0; j < chemv_bs; j += 8)
la[ bank_shift * (ty+j) + tx] = A[ j * lda];
__syncthreads();
#pragma unroll
for(int i=ty*4; i < (ty * 4 + 4); i++)
{
if ( i < tx )
{
la[bank_shift * tx + i] = cuConjf(la[ i * bank_shift + tx]);
}
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += cuConjf(la[bank_shift * tx + j + ty * 4]) * buff[j + ty * 4];
__syncthreads();
}
else // non diagonal
{
x += (blockIdx.x * chemv_bs) * incx;
if( ty == 0 )
{
buff[tx] = x[0];
} // obtain the vector x and store in buff; buff store its corresponding upper elements instead of buff2;
x -= (blockIdx.x * chemv_bs ) * incx;
x += (blockIdx.y * chemv_bs ) * incx;
if( ty == 0 )
{
buff2[tx] = x[0];
} // obtain the vector x store in buff2;
#pragma unroll
for(int j =0; j < chemv_bs; j += 8)
{
la[ bank_shift * (ty+j) + tx] = A[ j * lda];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
{
res += (la[bank_shift * (ty + j * 8) + tx] )* buff2[ ty + j * 8];
res_ += cuConjf(la[bank_shift * tx + j + ty * 4]) * buff[j + ty * 4]; //
}
__syncthreads();
la[bank_shift*tx+ty]= res_;
__syncthreads();
if( ty == 0 )
{
res_ = la[tx*bank_shift+0]+la[tx*bank_shift+1]
+ la[tx*bank_shift+2]+la[tx*bank_shift+3]
+ la[tx*bank_shift+4]+la[tx*bank_shift+5]
+ la[tx*bank_shift+6]+la[tx*bank_shift+7];
WC[ tx + blockIdx.y * chemv_bs + lda * blockIdx.x ] = res_; // write to its corresponding upper side position
}
__syncthreads();
} // end if else
la[bank_shift*tx+ty]= res;
__syncthreads();
if( ty == 0 )
{
res = la[tx*bank_shift+0]+la[tx*bank_shift+1]
+ la[tx*bank_shift+2]+la[tx*bank_shift+3]
+ la[tx*bank_shift+4]+la[tx*bank_shift+5]
+ la[tx*bank_shift+6]+la[tx*bank_shift+7];
WC[ tx + blockIdx.x * chemv_bs + lda * blockIdx.y] = res;
}
}
__global__ void
magmablas_chemv_200_L_special_32(
int n, magmaFloatComplex alpha,
magmaFloatComplex *A, int lda,
magmaFloatComplex *x, int incx,
magmaFloatComplex beta,
magmaFloatComplex *y, int incy,
magmaFloatComplex *WC,
int nb)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int blkc = blockIdx.x;
magmaFloatComplex res = MAGMA_C_ZERO; // used in scan the row
magmaFloatComplex res_ = MAGMA_C_ZERO; // used in scan the column
magmaFloatComplex res1 = MAGMA_C_ZERO; // tem for res
magmaFloatComplex res2 = MAGMA_C_ZERO; // tem for res_
__shared__ magmaFloatComplex la [16][64+2];
__shared__ magmaFloatComplex buff [chemv_bs];
__shared__ magmaFloatComplex buff2 [chemv_bs];
int break_d = chemv_bs * blkc;
x += (break_d + tx ) * incx;
A += break_d;
A += ty * lda + tx;
if( ty == 0 )
{
buff[tx] = x[0];
} // obtain the vector x store in buff;
{
A += lda * (blkc) * chemv_bs; // change
#pragma unroll
for(int j =0; j < chemv_bs; j += 8)
la[0][ bank_shift * (ty+j) + tx] = A[ j * lda];
__syncthreads();
#pragma unroll
for(int i=ty*4; i < (ty * 4 + 4); i++) {
if ( i < tx ) {
la[0][bank_shift * tx + i] = cuConjf( la[0][ i * bank_shift + tx] );
}
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += cuConjf( la[0][bank_shift * tx + j + ty * 4] ) * buff[j + ty * 4];
__syncthreads();
A -= lda * (blkc) * chemv_bs;
}
x -= blkc * chemv_bs *incx;
x= x- tx*incx;
int wc_c = 0;
int count = 0;
WC += break_d + tx;
if( blkc > 0) {
for(int s=0; s < (blkc * chemv_bs); s += chemv_bs )
{
MAGMA_C_SET2REAL(res_,0);
count++;
#pragma unroll
for(int j =0; j < chemv_bs; j += 8)
la[0][ bank_shift * (ty+j) + tx] = A[ j * lda];
if( ty == 0 )
{
buff2[tx] = x[tx];
} // obtain the vector x store in buff;
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
{
res += (la[0][bank_shift * (ty + j * 8) + tx] )* buff2[ ty + j * 8];
res_ += cuConjf( la[0][bank_shift * tx + j + ty * 4] ) * buff[j + ty * 4]; //iterate colum
}
__syncthreads();
la[0][bank_shift*tx+ty]= res_;
__syncthreads();
if( ty == 0 )
{
res2 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1]
+ la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3]
+ la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5]
+ la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7];
WC[wc_c*lda ] = res2;
}
__syncthreads();
wc_c += 1;
x += chemv_bs;
A += lda * chemv_bs;
}
}
la[0][bank_shift*tx+ty]= res;
__syncthreads();
if( ty == 0 )
{
res1 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1]
+ la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3]
+ la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5]
+ la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7];
WC[0+lda*(blkc)] = res1;
}
}
/**************************************************************
* Lower case for generic sizes
*/
__global__ void
magmablas_chemv_200_L_generic_32_s(
int n, magmaFloatComplex alpha,
magmaFloatComplex *A, int lda,
magmaFloatComplex *x, int incx,
magmaFloatComplex beta,
magmaFloatComplex *y, int incy,
magmaFloatComplex *WC,
int m_mod_thread_x,
int nb)
{
if(blockIdx.y > blockIdx.x) return;
int tx = threadIdx.x;
int ty = threadIdx.y;
magmaFloatComplex res = MAGMA_C_ZERO; // used in scan the row
magmaFloatComplex res_ = MAGMA_C_ZERO; // used in scan the column
__shared__ magmaFloatComplex la [1056];
__shared__ magmaFloatComplex buff [chemv_bs];
__shared__ magmaFloatComplex buff2 [chemv_bs];
int break_d = chemv_bs * blockIdx.x;
A += break_d;
A += lda * ty;
A += lda * (blockIdx.y ) * chemv_bs; //
x += tx;
x += (blockIdx.x * chemv_bs) * incx;
int trackA;
if( blockIdx.x == ( gridDim.x - 1 ) ) {
if( ty == 0 ) {
if( tx > m_mod_thread_x )
{
MAGMA_C_SET2REAL(buff[tx],0);
}
else
buff[tx] = x[0];
}
if ( tx > m_mod_thread_x )
trackA=m_mod_thread_x;
else
trackA=tx;
A += trackA;
}
else {
if( ty == 0 ) {
buff[tx] = x[0];
}
trackA = tx;
A += trackA;
}
__syncthreads();
if ( blockIdx.x == blockIdx.y) // diagonal
{
if( blockIdx.x == ( gridDim.x - 1 ) ) {
#pragma unroll
for(int j =0; j < chemv_bs; j += 8) {
if( ( ty + j ) > m_mod_thread_x )
{
MAGMA_C_SET2REAL(la[bank_shift*(ty+j)+tx], 9999);
}
else
la[bank_shift*(ty+j)+tx] = A[ j * lda];
}
}
else {
#pragma unroll
for(int j =0; j < chemv_bs; j += 8) {
la[bank_shift*(ty+j)+tx] = A[ j * lda];
}
}
__syncthreads();
#pragma unroll
for(int i=ty*4; i < (ty * 4 + 4); i++)
{
if ( i < tx )
{
la[bank_shift * tx + i] = cuConjf(la[ i * bank_shift + tx]);
}
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += cuConjf(la[bank_shift * tx + j + ty * 4]) * buff[j + ty * 4];
__syncthreads();
}
else // non diagonal
{
// obtain the vector x and store in buff; buff store its corresponding upper elements instead of buff2;
x -= (blockIdx.x * chemv_bs ) * incx;
x += (blockIdx.y * chemv_bs ) * incx;
if( ty == 0 )
{
buff2[tx] = x[0];
} // obtain the vector x store in buff2;
#pragma unroll
for(int j =0; j < chemv_bs; j += 8)
{
la[ bank_shift * (ty+j) + tx] = A[ j * lda];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
{
res += (la[bank_shift * (ty + j * 8) + tx] )* buff2[ ty + j * 8];
res_ += cuConjf(la[bank_shift * tx + j + ty * 4]) * buff[j + ty * 4]; //
}
__syncthreads();
la[bank_shift*tx+ty]= res_;
__syncthreads();
if( ty == 0 )
{
res_ = la[tx*bank_shift+0]+la[tx*bank_shift+1]
+ la[tx*bank_shift+2]+la[tx*bank_shift+3]
+ la[tx*bank_shift+4]+la[tx*bank_shift+5]
+ la[tx*bank_shift+6]+la[tx*bank_shift+7];
WC[ tx + blockIdx.y * chemv_bs + lda * blockIdx.x ] = res_; // write to its corresponding upper side position
}
__syncthreads();
} // end if else
la[bank_shift*tx+ty]= res;
__syncthreads();
if( ty == 0 )
{
res = la[tx*bank_shift+0]+la[tx*bank_shift+1]
+ la[tx*bank_shift+2]+la[tx*bank_shift+3]
+ la[tx*bank_shift+4]+la[tx*bank_shift+5]
+ la[tx*bank_shift+6]+la[tx*bank_shift+7];
WC[ tx + blockIdx.x * chemv_bs + lda * blockIdx.y] = res;
}
}
__global__ void
magmablas_chemv_200_L_generic_32(
int n, magmaFloatComplex alpha,
magmaFloatComplex *A, int lda,
magmaFloatComplex *x, int incx,
magmaFloatComplex beta,
magmaFloatComplex *y, int incy,
magmaFloatComplex *WC,
int m_mod_thread_x,
int nb)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int blkc = blockIdx.x;
magmaFloatComplex res = MAGMA_C_ZERO;
magmaFloatComplex res_ = MAGMA_C_ZERO;
magmaFloatComplex res1 = MAGMA_C_ZERO;
magmaFloatComplex res2 = MAGMA_C_ZERO;
__shared__ magmaFloatComplex la [16][64+2];
__shared__ magmaFloatComplex buff [chemv_bs];
__shared__ magmaFloatComplex buff2 [chemv_bs];
int break_d = chemv_bs * blkc;
x += (break_d + tx ) * incx;
A += break_d;
A += lda * ty;
int trackA;
if( blkc == ( gridDim.x - 1 ) ) {
if( ty == 0 ) {
if( tx > m_mod_thread_x )
{
MAGMA_C_SET2REAL(buff[tx],0);
}
else
buff[tx] = x[0];
}
if ( tx > m_mod_thread_x )
trackA=m_mod_thread_x;
else
trackA=tx;
A += trackA;
}
else {
if( ty == 0 ) {
buff[tx] = x[0];
}
trackA = tx;
A += trackA;
}
{
A += lda * (blkc) * chemv_bs; // change
// Somehow merging these two if - else creates problem
// It could be a potential bug -- from synchronization or from cuda or compiler
if( blkc == ( gridDim.x - 1 ) ) {
#pragma unroll
for(int j =0; j < chemv_bs; j += 8) {
if( ( ty + j ) > m_mod_thread_x )
{
MAGMA_C_SET2REAL(la[0][bank_shift*(ty+j)+tx], 9999);
}
else
la[0][bank_shift*(ty+j)+tx] = A[ j * lda];
}
}
else {
#pragma unroll
for(int j =0; j < chemv_bs; j += 8) {
la[0][bank_shift*(ty+j)+tx] = A[ j * lda];
}
}
__syncthreads();
#pragma unroll
for(int i=ty*4; i < (ty*4+4); i++) {
if ( i < tx ) {
la[0][bank_shift*tx+i] = cuConjf(la[0][i*bank_shift+tx]);
}
else
la[0][bank_shift*tx+i] = la[0][bank_shift*tx+i];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += cuConjf(la[0][bank_shift*tx+j+ty*4])* buff[j+ty*4];
__syncthreads();
A -= lda * (blkc) * chemv_bs;
}
__syncthreads();
x = x - break_d *incx;
x = x - tx * incx;
int wc_c = 0;
int count = 0;
WC += break_d + tx;
if( blkc > 0) {
for(int s=0; s < (blkc * chemv_bs); s += chemv_bs )
{
MAGMA_C_SET2REAL(res_,0);
count++;
#pragma unroll
for(int j =0; j < chemv_bs; j += 8)
la[0][ bank_shift * (ty+j) + tx] = A[ j * lda];
__syncthreads();
if( ty == 0 )
{
buff2[tx] = x[tx];
} // obtain the vector x store in buff2;
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
{
res += (la[0][bank_shift * (ty + j * 8) + tx] )* buff2[ ty + j * 8];
res_ += cuConjf( la[0][bank_shift * tx + j + ty * 4] ) * buff[j + ty * 4]; //iterate colum
}
__syncthreads();
la[0][bank_shift*tx+ty]= res_;
__syncthreads();
if( ty == 0 )
{
res2 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1]
+ la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3]
+ la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5]
+ la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7];
WC[wc_c*lda ] = res2;
}
__syncthreads();
wc_c += 1;
x += chemv_bs;
A += lda * chemv_bs;
}
}
la[0][bank_shift*tx+ty]= res;
__syncthreads();
if( ty == 0 )
{
res1 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1]
+ la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3]
+ la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5]
+ la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7];
WC[0+lda*(blkc)] = res1;
}
}
__global__ void
magmablas_chemv_200_L_update_32_s(
int n, magmaFloatComplex alpha,
magmaFloatComplex *A, int lda,
magmaFloatComplex *x, int incx,
magmaFloatComplex beta,
magmaFloatComplex *y, int incy,
magmaFloatComplex *WC,
int nb )
{
int i;
int tx = threadIdx.x;
int ind = blockIdx.x * chemv_bs + tx;
magmaFloatComplex Ca;
MAGMA_C_SET2REAL(Ca, 0);
WC += ind;
for(i =0; i < n; i += chemv_bs) {
Ca += WC[i/chemv_bs * lda];
}
if( ind < n )
y[ind * incy] = beta * y[ind * incy] + alpha * Ca;
}
__global__ void
magmablas_chemv_200_L_update_32(
int n, magmaFloatComplex alpha,
magmaFloatComplex *A, int lda,
magmaFloatComplex *x, int incx,
magmaFloatComplex beta,
magmaFloatComplex *y, int incy,
magmaFloatComplex *WC,
int nb )
{
int i;
int tx = threadIdx.x;
int ind = blockIdx.x * chemv_bs + tx;
magmaFloatComplex Ca;
MAGMA_C_SET2REAL(Ca, 0);
WC += ind + lda * blockIdx.x;
for(i = blockIdx.x*chemv_bs; i < n; i += chemv_bs) {
Ca += WC[0];
WC += chemv_bs;
}
if( ind < n )
y[ind * incy] = beta * y[ind * incy] + alpha * Ca;
}
extern "C"
void magmablas_chemv_200_L_32(
magma_int_t m, magmaFloatComplex alpha,
magmaFloatComplex *A, magma_int_t lda,
magmaFloatComplex *X, magma_int_t incx,
magmaFloatComplex beta,
magmaFloatComplex *Y, magma_int_t incy,
magmaFloatComplex *dC_work,
magma_int_t nb)
{
magma_int_t blocks;
if (m % chemv_bs == 0)
blocks = m / chemv_bs;
else
blocks = m / chemv_bs + 1;
dim3 grid(blocks, 1, 1);
dim3 grid_s(blocks, blocks, 1);
dim3 threads(thread_x, thread_y, 1);
dim3 threads_u(chemv_bs, 1, 1);
/*
* If matrix size is multiple of chemv_bs, we use a specific code.
* otherwise, we call the generic case.
*/
if(m % chemv_bs == 0 ) {
if(m < SWITCH)
hipLaunchKernelGGL(( magmablas_chemv_200_L_special_32_s) , dim3(grid_s), dim3(threads), 0, magma_stream ,
m, alpha, A, lda, X, incx, beta, Y, incy, dC_work, nb);
else
hipLaunchKernelGGL(( magmablas_chemv_200_L_special_32) , dim3(grid), dim3(threads), 0, magma_stream ,
m, alpha, A, lda, X, incx, beta, Y, incy, dC_work, nb);
}
else{
magma_int_t m_mod_thread_x = m%chemv_bs - 1;
if(m < SWITCH)
hipLaunchKernelGGL(( magmablas_chemv_200_L_generic_32_s) , dim3(grid_s), dim3(threads), 0, magma_stream ,
m, alpha, A, lda, X, incx ,beta, Y, incy, dC_work, m_mod_thread_x, nb);
else
hipLaunchKernelGGL(( magmablas_chemv_200_L_generic_32) , dim3(grid), dim3(threads), 0, magma_stream ,
m, alpha, A, lda, X, incx ,beta, Y, incy, dC_work, m_mod_thread_x, nb);
}
if(m < SWITCH)
hipLaunchKernelGGL(( magmablas_chemv_200_L_update_32_s), dim3(grid), dim3(threads_u), 0, magma_stream ,
m, alpha, A, lda, X, incx, beta, Y, incy, dC_work, nb);
else
hipLaunchKernelGGL(( magmablas_chemv_200_L_update_32), dim3(grid), dim3(threads_u), 0, magma_stream ,
m, alpha, A, lda, X, incx, beta, Y, incy, dC_work, nb);
}
#endif // not defined NB_64
/*************************************************************************
Purpose
=======
magmablas_chemv performs the matrix-vector operation on fermi:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n hermitian matrix.
Arguments
==========
UPLO - CHARACTER*1.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
UPLO = 'U' or 'u' Only the upper triangular part of A
is to be referenced.
UPLO = 'L' or 'l' Only the lower triangular part of A
is to be referenced.
Unchanged on exit.
N - INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
Unchanged on exit.
ALPHA - COMPLEX*16 .
On entry, ALPHA specifies the scalar alpha.
Unchanged on exit.
A - COMPLEX*16 array of DIMENSION ( LDA, n ).
Before entry with UPLO = 'U' or 'u', the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the hermitian matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = 'L' or 'l', the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the hermitian matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
Unchanged on exit.
LDA - INTEGER.
On entry, LDA specifies the first dimension of A as declared
in the calling (sub) program. LDA must be at least
max( 1, n ).
Unchanged on exit.
It is recommended that lda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
X - COMPLEX*16 array of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
Unchanged on exit.
INCX - INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
Unchanged on exit.
BETA - COMPLEX*16 .
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
Unchanged on exit.
Y - COMPLEX*16 array of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
INCY - INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
Unchanged on exit.
*/
extern "C"
magma_int_t
magmablas_chemv_200(
char uplo, magma_int_t n,
magmaFloatComplex alpha,
const magmaFloatComplex *A, magma_int_t lda,
const magmaFloatComplex *X, magma_int_t incx,
magmaFloatComplex beta,
magmaFloatComplex *Y, magma_int_t incy)
{
char uplo_[2] = {uplo, 0};
int upper = lapackf77_lsame(uplo_, "U");
/*
* Test the input parameters.
*/
if ((! upper) && (! lapackf77_lsame(uplo_, "L"))) {
return -1;
} else if ( n < 0 ) {
return -2;
} else if ( lda < max(1,n) ) {
return -5;
} else if ( incx == 0 ) {
return -7;
} else if ( incy == 0 ) {
return -10;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_C_EQUAL(alpha, MAGMA_C_ZERO) && MAGMA_C_EQUAL(beta, MAGMA_C_ONE) ) )
return MAGMA_SUCCESS;
/* TODO: Upper case is not implemented in MAGMA */
if ( upper )
hipblasChemv(uplo, n, alpha, A, lda, X, incx, beta, Y, incy);
else
{
magmaFloatComplex *dC_work;
magma_int_t blocks = n / chemv_bs + (n % chemv_bs != 0);
magma_int_t workspace = lda * (blocks + 1);
/* TODO: need to add a MAGMA context to handle workspaces */
hipblasAlloc( workspace, sizeof(magmaFloatComplex), (void**)&dC_work );
hipblasGetError( );
#ifdef NB_64
magmablas_chemv_200_L(n, alpha, A, lda, X, incx, beta, Y, incy, dC_work);
#else
magmablas_chemv_200_L_32(n, alpha, A, lda, X, incx, beta, Y, incy, dC_work, chemv_bs);
#endif
hipblasFree(dC_work);
hipblasGetError( );
}
return MAGMA_SUCCESS;
}
extern "C"
magma_int_t
magmablas_chemv2_200(
char uplo, magma_int_t n,
magmaFloatComplex alpha,
const magmaFloatComplex *A, magma_int_t lda,
const magmaFloatComplex *X, magma_int_t incx,
magmaFloatComplex beta,
magmaFloatComplex *Y, magma_int_t incy,
magmaFloatComplex *work, magma_int_t lwork)
{
char uplo_[2] = {uplo, 0};
int upper = lapackf77_lsame(uplo_, "U");
/*
* Test the input parameters.
*/
if ((! upper) && (! lapackf77_lsame(uplo_, "L"))) {
return -1;
} else if ( n < 0 ) {
return -2;
} else if ( lda < max(1,n) ) {
return -5;
} else if ( incx == 0 ) {
return -7;
} else if ( incy == 0 ) {
return -10;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_C_EQUAL(alpha, MAGMA_C_ZERO) && MAGMA_C_EQUAL(beta, MAGMA_C_ONE) ) )
return MAGMA_SUCCESS;
/* TODO: Upper case is not implemented in MAGMA */
if ( upper )
hipblasChemv(uplo, n, alpha, A, lda, X, incx, beta, Y, incy);
else
{
magma_int_t blocks = n / chemv_bs + (n % chemv_bs != 0);
magma_int_t workspace = n * (blocks );
if (lwork < workspace) {
printf("Not enough work space in magmablas_chemv: passed %d, required %d\n",
(int) lwork, (int) workspace);
exit(1);
}
//printf("You are using chemv_bs=%d\n", chemv_bs);
#ifdef NB_64
if( n < 1622)
hipblasChemv(uplo, n, alpha, A, lda, X, incx, beta, Y, incy);
else
magmablas_chemv_200_L(n, alpha, A, lda, X, incx, beta, Y, incy, work);
#else
magmablas_chemv_200_L_32(n, alpha, A, lda, X, incx, beta, Y, incy, work, chemv_bs);
#endif
}
return MAGMA_SUCCESS;
}
#endif /* (GPUSHMEM >= 200) */
| 90673acd1486b36e01a23cf30f98283cb6e238e6.cu | /*
-- MAGMA (version 1.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
August 2013
@generated c Tue Aug 13 16:45:10 2013
*/
#include "common_magma.h"
#define PRECISION_c
/*The version for tesla can be found in chemv_tesla.cu */
#if (GPUSHMEM >= 200)
#define magmablas_chemv_200 magmablas_chemv
#define magmablas_chemv2_200 magmablas_chemv2
#define NB_64
/*
turning on NB_64, it will call routine blocksize = 64
otherwise it will can blocksize = 32 which is 10% faster in z,c precision
*/
#ifdef NB_64 // using block size 64
#define chemv_bs 64
#define thread_x 64
#define thread_y 4
#define bank_shift 33
#define quarter_thread_x 16
#define half_thread_x 32
#else // using block size 32
#define chemv_bs 32
#define thread_x 32
#define thread_y 8
#define bank_shift 33
#define SWITCH 1400
#endif
/*******************************************************************************
* Functions for each specific cases - Lower case
*/
#ifdef NB_64
__global__ void
magmablas_chemv_200_L_special(
int n, magmaFloatComplex alpha,
const magmaFloatComplex *A, int lda,
const magmaFloatComplex *x, int incx,
magmaFloatComplex beta,
magmaFloatComplex *y, int incy,
magmaFloatComplex *WC)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int blkc = blockIdx.x;
magmaFloatComplex res = MAGMA_C_ZERO;
magmaFloatComplex res_ = MAGMA_C_ZERO;
magmaFloatComplex res1 = MAGMA_C_ZERO;
__shared__ magmaFloatComplex la [quarter_thread_x][thread_x+2];
__shared__ magmaFloatComplex buff [thread_x];
__shared__ magmaFloatComplex buff2 [thread_x];
magmaFloatComplex tr[4];
magmaFloatComplex b[4];
int break_d = thread_x * blkc;
const int td = (thread_x * ty ) + tx;
int tx_ = td % half_thread_x;
int ty_ = td / half_thread_x;
WC += break_d + tx;
x += (break_d + tx ) * incx;
A += break_d * (lda+1);
A += ty_* lda + tx_;
if( ty == 0 ) {
buff[tx] = x[0];
} // obtain the vector x store in buff;
tx = tx_; ty = ty_;
#pragma unroll
for(int j =0; j < half_thread_x; j += 8)
la[0][ bank_shift * (ty_+j) + tx_] = A[ j * lda];
__syncthreads();
#pragma unroll
for(int i=ty_*4; i < (ty_ * 4 + 4); i++) {
if ( i < tx_ ) {
la[0][bank_shift * tx_ + i] = cuConjf( la[0][ i * bank_shift + tx_] );
}
else
la[0][bank_shift * tx_ + i] = la[0][ bank_shift * tx_ + i];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += cuConjf( la[0][bank_shift * tx_ + j + ty_ * 4] ) * buff[j + ty_ * 4];
__syncthreads();
la[0][bank_shift*tx_+ty_]= res;
__syncthreads();
if( ty_ == 0 )
res1 = la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7];
else
{
MAGMA_C_SET2REAL(res1,0);
}
__syncthreads();
MAGMA_C_SET2REAL(res, 0);
A += half_thread_x + half_thread_x *lda;
#pragma unroll
for(int j =0; j < half_thread_x; j += 8)
la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda];
__syncthreads();
#pragma unroll
for(int i=ty_*4; i < (4+ty_*4); i++) {
if ( i < tx_ ) {
la[0][bank_shift*tx_+i] = cuConjf( la[0][bank_shift*i+tx_] );
}
else
la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += cuConjf( la[0][bank_shift*tx_+j+ty_*4] ) * buff[half_thread_x + j + 4 * ty_];
__syncthreads();
la[0][bank_shift*tx_+ty_]= res;
__syncthreads();
magmaFloatComplex res2;
MAGMA_C_SET2REAL(res2,0);
if( ty_ == 1 )
res2 = la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7];
else
{
MAGMA_C_SET2REAL(res2,0);
}
__syncthreads();
MAGMA_C_SET2REAL(res,0);
A -= half_thread_x *lda;
MAGMA_C_SET2REAL(res_,0);
#pragma unroll
for(int j=0; j < half_thread_x; j += 8)
tr[j/8] = A[ j * lda];
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j] * buff[ j*8 + ty_];
la[0][bank_shift*(ty_+j*8)+tx_] = tr[j];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res_ += cuConjf(la[0][bank_shift*tx_+j+ty_*4]) * buff[half_thread_x +j+ty_*4];
__syncthreads();
la[0][bank_shift*tx_+ty_]= res;
__syncthreads();
if( ty_ == 1 )
res2 = res2
+ la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7];
else
{
MAGMA_C_SET2REAL(res2,0);
}
__syncthreads();
la[0][bank_shift*tx_+ty_]= res_;
__syncthreads();
if( ty_ == 0 ) {
res1 = res1
+ la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7];
}
else
{
MAGMA_C_SET2REAL(res1,0);
}
A -= half_thread_x;
__syncthreads();
tx = threadIdx.x;
ty = threadIdx.y;
if( ty_ == 0 && ty == 0 )
res = res1;
else if( ty_ == 1 && ty == 0 )
res = res2;
else
{
MAGMA_C_SET2REAL(res,0);
}
A -= ty_* lda;
A -= tx_;
A= A - lda * blkc * thread_x;
x= x - blkc * thread_x *incx;
A += 4 * ty* lda;
A += tx;
int wc_c = 0;
int count = 0;
tx_ = td % quarter_thread_x;
ty_ = td / quarter_thread_x;
WC -= tx;
WC += tx_;
if( blkc * thread_x >= thread_x) {
#pragma unroll
for(int i=0; i < thread_x; i += thread_x )
{
MAGMA_C_SET2REAL(res_,0);
count++;
if( ty == 0 ) {
buff2[tx] = x[i*incx];
}
__syncthreads();
#pragma unroll
for( int k=0; k < 4; k++)
{
#pragma unroll
for(int j=0; j < 4; j++)
tr[j] = A[j*lda];
#pragma unroll
for(int j=0; j < 4; j++)
{
res += tr[j] * buff2[ quarter_thread_x * k + ty * 4 + j];
la[( j + ty * 4)][tx] = cuConjf(tr[j]) * buff[tx];
}
__syncthreads();
MAGMA_C_SET2REAL(res_,0);
#pragma unroll
for(int j=0; j < 4; j++)
{
res_ += la[tx_][ty_*4+j];
}
b[k] = res_;
__syncthreads();
A += lda * quarter_thread_x;
}
#pragma unroll
for(int k=0; k < 4; k++) {
la[tx_][ty_+quarter_thread_x*k]= b[k];
}
__syncthreads();
if( ty_ < 4 ) {
int k = ty_*quarter_thread_x;
res_ = la[tx_][0+k] + la[tx_][1+k]
+ la[tx_][2+k] + la[tx_][3+k]
+ la[tx_][4+k] + la[tx_][5+k]
+ la[tx_][6+k] + la[tx_][7+k]
+ la[tx_][8+k] + la[tx_][9+k]
+ la[tx_][10+k]+ la[tx_][11+k]
+ la[tx_][12+k]+ la[tx_][13+k]
+ la[tx_][14+k]+ la[tx_][15+k];
WC[k + wc_c*lda ] = res_;
}
wc_c++;
__syncthreads();
}
}
for(int i=thread_x; i < (blkc * thread_x); i += thread_x )
{
MAGMA_C_SET2REAL(res_,0);
count++;
if( ty == 0 ) {
buff2[tx] = x[i*incx];
}
__syncthreads();
#pragma unroll
for( int k=0; k < 4; k++)
{
#pragma unroll
for(int j=0; j < 4; j++)
tr[j] = A[j*lda];
#pragma unroll
for(int j=0; j < 4; j++)
{
res += tr[j] * buff2[quarter_thread_x*k + ty*4+(j)];
la[( j + ty * 4)][tx] = cuConjf( tr[j] )* buff[tx];
}
__syncthreads();
MAGMA_C_SET2REAL(res_,0);
#pragma unroll
for(int j=0; j < 4; j++)
res_ += la[tx_][ty_*4+j];
b[k] = res_;
__syncthreads();
A += lda * quarter_thread_x;
}
#pragma unroll
for(int k=0; k < 4; k++) {
la[tx_][ty_+quarter_thread_x*k]= b[k];
}
__syncthreads();
if( ty_ < 4 ) {
int k = ty_*quarter_thread_x;
res_ = la[tx_][0+k] + la[tx_][1+k]
+ la[tx_][2+k] + la[tx_][3+k]
+ la[tx_][4+k] + la[tx_][5+k]
+ la[tx_][6+k] + la[tx_][7+k]
+ la[tx_][8+k] + la[tx_][9+k]
+ la[tx_][10+k]+ la[tx_][11+k]
+ la[tx_][12+k]+ la[tx_][13+k]
+ la[tx_][14+k]+ la[tx_][15+k];
WC[k + wc_c*lda ] = res_;
}
wc_c++;
__syncthreads();
}
WC += tx;
WC -= tx_;
la[ty][tx]= res;
__syncthreads();
if( ty == 0 ) {
res = la[0][tx]+ la[1][tx]
+ la[2][tx]+ la[3][tx];
WC[0+lda*(blkc) ] = res;
}
}
/**************************************************************
* Lower case for generic sizes
*/
__global__ void
magmablas_chemv_200_L_generic(
int n, magmaFloatComplex alpha,
const magmaFloatComplex *A, int lda,
const magmaFloatComplex *x, int incx,
magmaFloatComplex beta,
magmaFloatComplex *y, int incy,
magmaFloatComplex *WC,
int m_mod_thread_x)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int blkc = blockIdx.x;
magmaFloatComplex res = MAGMA_C_ZERO;
magmaFloatComplex res_ = MAGMA_C_ZERO;
magmaFloatComplex res1 = MAGMA_C_ZERO;
__shared__ magmaFloatComplex la [quarter_thread_x][thread_x+2];
__shared__ magmaFloatComplex buff [thread_x];
__shared__ magmaFloatComplex buff2[thread_x];
magmaFloatComplex tr[4];
magmaFloatComplex b[8];
int break_d = thread_x * blkc;
const int td = (thread_x * ty ) + tx;
int tx_ = td % half_thread_x;
int ty_ = td / half_thread_x;
WC+= break_d + tx;
x += (break_d + tx ) * incx;
A += break_d * (lda+1);
A += lda * ty_;
int trackA;
if( blkc == ( gridDim.x - 1 ) ) {
if( ty == 0 ) {
if( tx > m_mod_thread_x )
{
MAGMA_C_SET2REAL(buff[tx],0);
}
else
buff[tx] = x[0];
}
if ( tx_ > m_mod_thread_x )
trackA=m_mod_thread_x;
else
trackA=tx_;
A += trackA;
}
else {
if( ty == 0 ) {
buff[tx] = x[0];
}
trackA = tx_;
A += trackA;
}
// Somehow merging these two if - else creates problem
// It could be a potential bug -- from synchronization or from cuda or compiler
if( blkc == ( gridDim.x - 1 ) ) {
#pragma unroll
for(int j =0; j < half_thread_x; j += 8) {
if( ( ty_ + j ) > m_mod_thread_x )
{
MAGMA_C_SET2REAL(la[0][bank_shift*(ty_+j)+tx_], 9999);
}
else
la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda];
}
A -= trackA;
}
else {
#pragma unroll
for(int j =0; j < half_thread_x; j += 8) {
la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda];
}
}
tx = tx_;
ty = ty_;
__syncthreads();
#pragma unroll
for(int i=ty_*4; i < (ty_*4+4); i++) {
if ( i < tx_ ) {
la[0][bank_shift*tx_+i] = cuConjf(la[0][i*bank_shift+tx_]);
}
else
la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += cuConjf(la[0][bank_shift*tx_+j+ty_*4])* buff[j+ty_*4];
__syncthreads();
la[0][bank_shift*tx_+ty_]= res;
__syncthreads();
if( ty_ == 0 )
res1 = la[0][tx_*bank_shift+0]
+ la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]
+ la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]
+ la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]
+ la[0][tx_*bank_shift+7];
else
{
MAGMA_C_SET2REAL(res1,0);
}
__syncthreads();
MAGMA_C_SET2REAL(res,0);
if( blkc == ( gridDim.x - 1 ) ) {
if ( (tx_+half_thread_x) > m_mod_thread_x )
trackA = m_mod_thread_x;
else
trackA = tx_ + half_thread_x;
A += trackA+half_thread_x*lda;
#pragma unroll
for(int j =0; j < half_thread_x; j += 8) {
if( ( ty_ + j+half_thread_x ) > m_mod_thread_x )
{
MAGMA_C_SET2REAL(la[0][bank_shift*(ty_+j)+tx_], 99999);
}
else
la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda];
}
A -= trackA+half_thread_x*lda;
A += tx_;
A += half_thread_x + half_thread_x *lda;
}
else {
A += half_thread_x + half_thread_x *lda;
#pragma unroll
for(int j =0; j < half_thread_x; j += 8) {
la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda];
}
}
__syncthreads();
#pragma unroll
for(int i=ty_*4; i < (4+ty_*4); i++) {
if ( i < tx_ ) {
la[0][bank_shift*tx_+i] = cuConjf(la[0][bank_shift*i+tx_]);
}
else
la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += cuConjf(la[0][bank_shift*tx_+j+ty_*4]) * buff[half_thread_x + j + 4 * ty_];
__syncthreads();
la[0][bank_shift*tx_+ty_]= res;
__syncthreads();
magmaFloatComplex res2;
MAGMA_C_SET2REAL(res2,0);
if( ty_ == 1 )
res2 = la[0][tx_*bank_shift+0]
+ la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]
+ la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]
+ la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]
+ la[0][tx_*bank_shift+7];
else
{
MAGMA_C_SET2REAL(res2,0);
}
__syncthreads();
MAGMA_C_SET2REAL(res,0);
MAGMA_C_SET2REAL(res_,0);
A -= half_thread_x *lda;
if( blkc == ( gridDim.x - 1 ) ) {
A -= tx_;
if ( tx_ > m_mod_thread_x )
trackA=m_mod_thread_x;
else
trackA=tx_;
A += trackA;
#pragma unroll
for(int j =0; j < half_thread_x; j += 8)
if( ( ty_ + j ) > m_mod_thread_x )
{
MAGMA_C_SET2REAL(tr[j/8], 99999);
}
else
tr[j/8] = A[ j * lda];
A -= trackA;
A += tx_;
}
else {
#pragma unroll
for(int j =0; j < half_thread_x; j += 8)
tr[j/8] = A[ j * lda];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j] * buff[ j*8 + ty_];
la[0][bank_shift*(ty_+j*8)+tx_] = tr[j];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res_ += cuConjf(la[0][bank_shift*tx_+j+ty_*4]) * buff[half_thread_x +j+ty_*4];
__syncthreads();
la[0][bank_shift*tx_+ty_]= res;
__syncthreads();
if( ty_ == 1 )
res2 = res2
+ la[0][tx_*bank_shift+0]
+ la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]
+ la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]
+ la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]
+ la[0][tx_*bank_shift+7];
else
{
MAGMA_C_SET2REAL(res2,0);
}
__syncthreads();
la[0][bank_shift*tx_+ty_]= res_;
__syncthreads();
if( ty_ == 0 ) {
res1 = res1
+ la[0][tx_*bank_shift+0]
+ la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]
+ la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]
+ la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]
+ la[0][tx_*bank_shift+7];
}
else
{
MAGMA_C_SET2REAL(res1,0);
}
A -= half_thread_x;
__syncthreads();
tx = threadIdx.x;
ty = threadIdx.y;
if( ty_ == 0 && ty == 0 )
res = res1;
else if( ty_ == 1 && ty == 0 )
res = res2;
else
{
MAGMA_C_SET2REAL(res,0);
}
A -= ty_* lda;
A -= tx_;
A= A - lda*break_d;
x= x - break_d *incx;
A += 4 * ty* lda;
if( blkc == ( gridDim.x - 1 ) ) {
if(tx <= m_mod_thread_x )
A += tx;
else
A += m_mod_thread_x;
}
else{
A += tx;
}
int wc_c = 0;
int count = 0;
tx_ = td % quarter_thread_x;
ty_ = td / quarter_thread_x;
WC -= tx;
WC += tx_;
#pragma unroll
for(int j=0; j < 4; j++)
b[j] = buff[ty_*4+j];
if( break_d > 0)
#pragma unroll
for(int i=0; i < thread_x; i += thread_x ) {
MAGMA_C_SET2REAL(res_,0);
count++;
if( ty == 0 ) {
buff2[tx] = x[i*incx];
}
__syncthreads();
#pragma unroll
for( int k=0; k < 4; k++) {
#pragma unroll
for(int j=0; j < 4; j++)
tr[j] = A[j*lda];
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j]*buff2[quarter_thread_x*k + ty*4+(j)];
la[( (j)+ty*4)][tx] = cuConjf(tr[j]);
}
__syncthreads();
MAGMA_C_SET2REAL(res_, 0);
#pragma unroll
for(int j=0; j < 4; j++)
res_ += la[tx_][ty_*4+j]* b[j];
b[4+k] = res_;
__syncthreads();
A += lda* quarter_thread_x;
}
#pragma unroll
for(int k=0; k < 4; k++) {
la[tx_][ty_+quarter_thread_x*k]= b[4+k];
}
__syncthreads();
if( ty_ < 4 ) {
int k = ty_*quarter_thread_x;
res_ = la[tx_][0+k] + la[tx_][1+k]
+ la[tx_][2+k] + la[tx_][3+k]
+ la[tx_][4+k] + la[tx_][5+k]
+ la[tx_][6+k] + la[tx_][7+k]
+ la[tx_][8+k] + la[tx_][9+k]
+ la[tx_][10+k]+ la[tx_][11+k]
+ la[tx_][12+k]+ la[tx_][13+k]
+ la[tx_][14+k]+ la[tx_][15+k];
WC[k + wc_c*lda ] = res_;
}
wc_c++;
__syncthreads();
}
for(int i=thread_x; i < break_d; i += thread_x ) {
MAGMA_C_SET2REAL(res_, 0);
count++;
if(ty == 0 )
buff2[tx] = x[i*incx];
__syncthreads();
#pragma unroll
for( int k=0; k < 4; k++) {
#pragma unroll
for(int j=0; j < 4; j++)
tr[j] = A[j*lda];
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j]*buff2[quarter_thread_x*k + ty*4+(j)];
la[( (j)+ty*4)][tx] = cuConjf(tr[j]);
}
__syncthreads();
MAGMA_C_SET2REAL(res_, 0);
#pragma unroll
for(int j=0; j < 4; j++)
res_ += la[tx_][ty_*4+j]* b[j];
b[4+k] = res_;
__syncthreads();
A += lda* quarter_thread_x;
}
#pragma unroll
for(int k=0; k < 4; k++) {
la[tx_][ty_+quarter_thread_x*k]= b[4+k];
}
__syncthreads();
if( ty_ < 4 ) {
int k = ty_*quarter_thread_x;
res_ = la[tx_][0+k] + la[tx_][1+k]
+ la[tx_][2+k] + la[tx_][3+k]
+ la[tx_][4+k] + la[tx_][5+k]
+ la[tx_][6+k] + la[tx_][7+k]
+ la[tx_][8+k] + la[tx_][9+k]
+ la[tx_][10+k]+ la[tx_][11+k]
+ la[tx_][12+k]+ la[tx_][13+k]
+ la[tx_][14+k]+ la[tx_][15+k];
WC[k + wc_c*lda ] = res_;
}
wc_c++;
__syncthreads();
}
WC += tx;
WC -= tx_;
la[ty][tx]= res;
__syncthreads();
if( ty == 0 ) {
res=la[0][tx]+ la[1][tx]+ la[2][tx]+ la[3][tx];
WC[0+lda*(blkc)] = res;
}
}
__global__ void
magmablas_chemv_200_L_update(
int n, magmaFloatComplex alpha,
const magmaFloatComplex *A, int lda,
const magmaFloatComplex *x, int incx,
magmaFloatComplex beta,
magmaFloatComplex *y, int incy,
magmaFloatComplex *WC )
{
int i;
int tx = threadIdx.x;
int ind = blockIdx.x * thread_x + tx;
magmaFloatComplex Ca;
MAGMA_C_SET2REAL(Ca, 0);
WC += ind + lda * blockIdx.x;
for(i = blockIdx.x*thread_x; i < n; i += thread_x) {
Ca += WC[0];
WC += thread_x;
}
if( ind < n )
y[ind * incy] = beta * y[ind * incy] + alpha * Ca;
}
extern "C"
void magmablas_chemv_200_L(
magma_int_t m, magmaFloatComplex alpha,
const magmaFloatComplex *A, magma_int_t lda,
const magmaFloatComplex *X, magma_int_t incx,
magmaFloatComplex beta,
magmaFloatComplex *Y, magma_int_t incy,
magmaFloatComplex *dC_work)
{
magma_int_t blocks;
if (m % chemv_bs == 0)
blocks = m / chemv_bs;
else
blocks = m / chemv_bs + 1;
dim3 grid(blocks, 1, 1);
dim3 threads(thread_x, thread_y, 1);
dim3 threads_u(chemv_bs, 1, 1);
/*
* If matrix size is multiple of chemv_bs, we use a specific code.
* otherwise, we call the generic case.
*/
if(m % chemv_bs == 0 ) {
magmablas_chemv_200_L_special <<< grid, threads, 0, magma_stream >>>(
m, alpha, A, lda, X, incx, beta, Y, incy, dC_work);
}
else{
magma_int_t m_mod_thread_x = m%chemv_bs - 1;
magmablas_chemv_200_L_generic <<< grid, threads, 0, magma_stream >>> (
m, alpha, A, lda, X, incx ,beta, Y, incy, dC_work, m_mod_thread_x);
}
magmablas_chemv_200_L_update<<< grid, threads_u, 0, magma_stream >>>(
m, alpha, A, lda, X, incx, beta, Y, incy, dC_work);
}
#else // not defined NB_64
/*******************************************************************************
* Functions for each specific cases - Lower case nb = 32
*/
__global__ void
magmablas_chemv_200_L_special_32_s(
int n, magmaFloatComplex alpha,
magmaFloatComplex *A, int lda,
magmaFloatComplex *x, int incx,
magmaFloatComplex beta,
magmaFloatComplex *y, int incy,
magmaFloatComplex *WC,
int nb)
{
if(blockIdx.y > blockIdx.x) return;
int tx = threadIdx.x;
int ty = threadIdx.y;
magmaFloatComplex res = MAGMA_C_ZERO; // used in scan the row
magmaFloatComplex res_ = MAGMA_C_ZERO; // used in scan the column
__shared__ magmaFloatComplex la [1056];
__shared__ magmaFloatComplex buff [chemv_bs];
__shared__ magmaFloatComplex buff2 [chemv_bs];
int break_d = chemv_bs * blockIdx.x;
A += break_d;
A += lda * ty + tx;
A += lda * (blockIdx.y ) * chemv_bs; //
x += tx;
if ( blockIdx.x == blockIdx.y ) // diagonal
{
x += (blockIdx.y * chemv_bs) * incx;
if( ty == 0 )
{
buff[tx] = x[0];
} // obtain the vector x store in buff;
#pragma unroll
for(int j =0; j < chemv_bs; j += 8)
la[ bank_shift * (ty+j) + tx] = A[ j * lda];
__syncthreads();
#pragma unroll
for(int i=ty*4; i < (ty * 4 + 4); i++)
{
if ( i < tx )
{
la[bank_shift * tx + i] = cuConjf(la[ i * bank_shift + tx]);
}
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += cuConjf(la[bank_shift * tx + j + ty * 4]) * buff[j + ty * 4];
__syncthreads();
}
else // non diagonal
{
x += (blockIdx.x * chemv_bs) * incx;
if( ty == 0 )
{
buff[tx] = x[0];
} // obtain the vector x and store in buff; buff store its corresponding upper elements instead of buff2;
x -= (blockIdx.x * chemv_bs ) * incx;
x += (blockIdx.y * chemv_bs ) * incx;
if( ty == 0 )
{
buff2[tx] = x[0];
} // obtain the vector x store in buff2;
#pragma unroll
for(int j =0; j < chemv_bs; j += 8)
{
la[ bank_shift * (ty+j) + tx] = A[ j * lda];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
{
res += (la[bank_shift * (ty + j * 8) + tx] )* buff2[ ty + j * 8];
res_ += cuConjf(la[bank_shift * tx + j + ty * 4]) * buff[j + ty * 4]; //
}
__syncthreads();
la[bank_shift*tx+ty]= res_;
__syncthreads();
if( ty == 0 )
{
res_ = la[tx*bank_shift+0]+la[tx*bank_shift+1]
+ la[tx*bank_shift+2]+la[tx*bank_shift+3]
+ la[tx*bank_shift+4]+la[tx*bank_shift+5]
+ la[tx*bank_shift+6]+la[tx*bank_shift+7];
WC[ tx + blockIdx.y * chemv_bs + lda * blockIdx.x ] = res_; // write to its corresponding upper side position
}
__syncthreads();
} // end if else
la[bank_shift*tx+ty]= res;
__syncthreads();
if( ty == 0 )
{
res = la[tx*bank_shift+0]+la[tx*bank_shift+1]
+ la[tx*bank_shift+2]+la[tx*bank_shift+3]
+ la[tx*bank_shift+4]+la[tx*bank_shift+5]
+ la[tx*bank_shift+6]+la[tx*bank_shift+7];
WC[ tx + blockIdx.x * chemv_bs + lda * blockIdx.y] = res;
}
}
__global__ void
magmablas_chemv_200_L_special_32(
int n, magmaFloatComplex alpha,
magmaFloatComplex *A, int lda,
magmaFloatComplex *x, int incx,
magmaFloatComplex beta,
magmaFloatComplex *y, int incy,
magmaFloatComplex *WC,
int nb)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int blkc = blockIdx.x;
magmaFloatComplex res = MAGMA_C_ZERO; // used in scan the row
magmaFloatComplex res_ = MAGMA_C_ZERO; // used in scan the column
magmaFloatComplex res1 = MAGMA_C_ZERO; // tem for res
magmaFloatComplex res2 = MAGMA_C_ZERO; // tem for res_
__shared__ magmaFloatComplex la [16][64+2];
__shared__ magmaFloatComplex buff [chemv_bs];
__shared__ magmaFloatComplex buff2 [chemv_bs];
int break_d = chemv_bs * blkc;
x += (break_d + tx ) * incx;
A += break_d;
A += ty * lda + tx;
if( ty == 0 )
{
buff[tx] = x[0];
} // obtain the vector x store in buff;
{
A += lda * (blkc) * chemv_bs; // change
#pragma unroll
for(int j =0; j < chemv_bs; j += 8)
la[0][ bank_shift * (ty+j) + tx] = A[ j * lda];
__syncthreads();
#pragma unroll
for(int i=ty*4; i < (ty * 4 + 4); i++) {
if ( i < tx ) {
la[0][bank_shift * tx + i] = cuConjf( la[0][ i * bank_shift + tx] );
}
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += cuConjf( la[0][bank_shift * tx + j + ty * 4] ) * buff[j + ty * 4];
__syncthreads();
A -= lda * (blkc) * chemv_bs;
}
x -= blkc * chemv_bs *incx;
x= x- tx*incx;
int wc_c = 0;
int count = 0;
WC += break_d + tx;
if( blkc > 0) {
for(int s=0; s < (blkc * chemv_bs); s += chemv_bs )
{
MAGMA_C_SET2REAL(res_,0);
count++;
#pragma unroll
for(int j =0; j < chemv_bs; j += 8)
la[0][ bank_shift * (ty+j) + tx] = A[ j * lda];
if( ty == 0 )
{
buff2[tx] = x[tx];
} // obtain the vector x store in buff;
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
{
res += (la[0][bank_shift * (ty + j * 8) + tx] )* buff2[ ty + j * 8];
res_ += cuConjf( la[0][bank_shift * tx + j + ty * 4] ) * buff[j + ty * 4]; //iterate colum
}
__syncthreads();
la[0][bank_shift*tx+ty]= res_;
__syncthreads();
if( ty == 0 )
{
res2 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1]
+ la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3]
+ la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5]
+ la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7];
WC[wc_c*lda ] = res2;
}
__syncthreads();
wc_c += 1;
x += chemv_bs;
A += lda * chemv_bs;
}
}
la[0][bank_shift*tx+ty]= res;
__syncthreads();
if( ty == 0 )
{
res1 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1]
+ la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3]
+ la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5]
+ la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7];
WC[0+lda*(blkc)] = res1;
}
}
/**************************************************************
* Lower case for generic sizes
*/
__global__ void
magmablas_chemv_200_L_generic_32_s(
int n, magmaFloatComplex alpha,
magmaFloatComplex *A, int lda,
magmaFloatComplex *x, int incx,
magmaFloatComplex beta,
magmaFloatComplex *y, int incy,
magmaFloatComplex *WC,
int m_mod_thread_x,
int nb)
{
if(blockIdx.y > blockIdx.x) return;
int tx = threadIdx.x;
int ty = threadIdx.y;
magmaFloatComplex res = MAGMA_C_ZERO; // used in scan the row
magmaFloatComplex res_ = MAGMA_C_ZERO; // used in scan the column
__shared__ magmaFloatComplex la [1056];
__shared__ magmaFloatComplex buff [chemv_bs];
__shared__ magmaFloatComplex buff2 [chemv_bs];
int break_d = chemv_bs * blockIdx.x;
A += break_d;
A += lda * ty;
A += lda * (blockIdx.y ) * chemv_bs; //
x += tx;
x += (blockIdx.x * chemv_bs) * incx;
int trackA;
if( blockIdx.x == ( gridDim.x - 1 ) ) {
if( ty == 0 ) {
if( tx > m_mod_thread_x )
{
MAGMA_C_SET2REAL(buff[tx],0);
}
else
buff[tx] = x[0];
}
if ( tx > m_mod_thread_x )
trackA=m_mod_thread_x;
else
trackA=tx;
A += trackA;
}
else {
if( ty == 0 ) {
buff[tx] = x[0];
}
trackA = tx;
A += trackA;
}
__syncthreads();
if ( blockIdx.x == blockIdx.y) // diagonal
{
if( blockIdx.x == ( gridDim.x - 1 ) ) {
#pragma unroll
for(int j =0; j < chemv_bs; j += 8) {
if( ( ty + j ) > m_mod_thread_x )
{
MAGMA_C_SET2REAL(la[bank_shift*(ty+j)+tx], 9999);
}
else
la[bank_shift*(ty+j)+tx] = A[ j * lda];
}
}
else {
#pragma unroll
for(int j =0; j < chemv_bs; j += 8) {
la[bank_shift*(ty+j)+tx] = A[ j * lda];
}
}
__syncthreads();
#pragma unroll
for(int i=ty*4; i < (ty * 4 + 4); i++)
{
if ( i < tx )
{
la[bank_shift * tx + i] = cuConjf(la[ i * bank_shift + tx]);
}
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += cuConjf(la[bank_shift * tx + j + ty * 4]) * buff[j + ty * 4];
__syncthreads();
}
else // non diagonal
{
// obtain the vector x and store in buff; buff store its corresponding upper elements instead of buff2;
x -= (blockIdx.x * chemv_bs ) * incx;
x += (blockIdx.y * chemv_bs ) * incx;
if( ty == 0 )
{
buff2[tx] = x[0];
} // obtain the vector x store in buff2;
#pragma unroll
for(int j =0; j < chemv_bs; j += 8)
{
la[ bank_shift * (ty+j) + tx] = A[ j * lda];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
{
res += (la[bank_shift * (ty + j * 8) + tx] )* buff2[ ty + j * 8];
res_ += cuConjf(la[bank_shift * tx + j + ty * 4]) * buff[j + ty * 4]; //
}
__syncthreads();
la[bank_shift*tx+ty]= res_;
__syncthreads();
if( ty == 0 )
{
res_ = la[tx*bank_shift+0]+la[tx*bank_shift+1]
+ la[tx*bank_shift+2]+la[tx*bank_shift+3]
+ la[tx*bank_shift+4]+la[tx*bank_shift+5]
+ la[tx*bank_shift+6]+la[tx*bank_shift+7];
WC[ tx + blockIdx.y * chemv_bs + lda * blockIdx.x ] = res_; // write to its corresponding upper side position
}
__syncthreads();
} // end if else
la[bank_shift*tx+ty]= res;
__syncthreads();
if( ty == 0 )
{
res = la[tx*bank_shift+0]+la[tx*bank_shift+1]
+ la[tx*bank_shift+2]+la[tx*bank_shift+3]
+ la[tx*bank_shift+4]+la[tx*bank_shift+5]
+ la[tx*bank_shift+6]+la[tx*bank_shift+7];
WC[ tx + blockIdx.x * chemv_bs + lda * blockIdx.y] = res;
}
}
__global__ void
magmablas_chemv_200_L_generic_32(
int n, magmaFloatComplex alpha,
magmaFloatComplex *A, int lda,
magmaFloatComplex *x, int incx,
magmaFloatComplex beta,
magmaFloatComplex *y, int incy,
magmaFloatComplex *WC,
int m_mod_thread_x,
int nb)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int blkc = blockIdx.x;
magmaFloatComplex res = MAGMA_C_ZERO;
magmaFloatComplex res_ = MAGMA_C_ZERO;
magmaFloatComplex res1 = MAGMA_C_ZERO;
magmaFloatComplex res2 = MAGMA_C_ZERO;
__shared__ magmaFloatComplex la [16][64+2];
__shared__ magmaFloatComplex buff [chemv_bs];
__shared__ magmaFloatComplex buff2 [chemv_bs];
int break_d = chemv_bs * blkc;
x += (break_d + tx ) * incx;
A += break_d;
A += lda * ty;
int trackA;
if( blkc == ( gridDim.x - 1 ) ) {
if( ty == 0 ) {
if( tx > m_mod_thread_x )
{
MAGMA_C_SET2REAL(buff[tx],0);
}
else
buff[tx] = x[0];
}
if ( tx > m_mod_thread_x )
trackA=m_mod_thread_x;
else
trackA=tx;
A += trackA;
}
else {
if( ty == 0 ) {
buff[tx] = x[0];
}
trackA = tx;
A += trackA;
}
{
A += lda * (blkc) * chemv_bs; // change
// Somehow merging these two if - else creates problem
// It could be a potential bug -- from synchronization or from cuda or compiler
if( blkc == ( gridDim.x - 1 ) ) {
#pragma unroll
for(int j =0; j < chemv_bs; j += 8) {
if( ( ty + j ) > m_mod_thread_x )
{
MAGMA_C_SET2REAL(la[0][bank_shift*(ty+j)+tx], 9999);
}
else
la[0][bank_shift*(ty+j)+tx] = A[ j * lda];
}
}
else {
#pragma unroll
for(int j =0; j < chemv_bs; j += 8) {
la[0][bank_shift*(ty+j)+tx] = A[ j * lda];
}
}
__syncthreads();
#pragma unroll
for(int i=ty*4; i < (ty*4+4); i++) {
if ( i < tx ) {
la[0][bank_shift*tx+i] = cuConjf(la[0][i*bank_shift+tx]);
}
else
la[0][bank_shift*tx+i] = la[0][bank_shift*tx+i];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += cuConjf(la[0][bank_shift*tx+j+ty*4])* buff[j+ty*4];
__syncthreads();
A -= lda * (blkc) * chemv_bs;
}
__syncthreads();
x = x - break_d *incx;
x = x - tx * incx;
int wc_c = 0;
int count = 0;
WC += break_d + tx;
if( blkc > 0) {
for(int s=0; s < (blkc * chemv_bs); s += chemv_bs )
{
MAGMA_C_SET2REAL(res_,0);
count++;
#pragma unroll
for(int j =0; j < chemv_bs; j += 8)
la[0][ bank_shift * (ty+j) + tx] = A[ j * lda];
__syncthreads();
if( ty == 0 )
{
buff2[tx] = x[tx];
} // obtain the vector x store in buff2;
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
{
res += (la[0][bank_shift * (ty + j * 8) + tx] )* buff2[ ty + j * 8];
res_ += cuConjf( la[0][bank_shift * tx + j + ty * 4] ) * buff[j + ty * 4]; //iterate colum
}
__syncthreads();
la[0][bank_shift*tx+ty]= res_;
__syncthreads();
if( ty == 0 )
{
res2 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1]
+ la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3]
+ la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5]
+ la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7];
WC[wc_c*lda ] = res2;
}
__syncthreads();
wc_c += 1;
x += chemv_bs;
A += lda * chemv_bs;
}
}
la[0][bank_shift*tx+ty]= res;
__syncthreads();
if( ty == 0 )
{
res1 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1]
+ la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3]
+ la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5]
+ la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7];
WC[0+lda*(blkc)] = res1;
}
}
__global__ void
magmablas_chemv_200_L_update_32_s(
int n, magmaFloatComplex alpha,
magmaFloatComplex *A, int lda,
magmaFloatComplex *x, int incx,
magmaFloatComplex beta,
magmaFloatComplex *y, int incy,
magmaFloatComplex *WC,
int nb )
{
int i;
int tx = threadIdx.x;
int ind = blockIdx.x * chemv_bs + tx;
magmaFloatComplex Ca;
MAGMA_C_SET2REAL(Ca, 0);
WC += ind;
for(i =0; i < n; i += chemv_bs) {
Ca += WC[i/chemv_bs * lda];
}
if( ind < n )
y[ind * incy] = beta * y[ind * incy] + alpha * Ca;
}
__global__ void
magmablas_chemv_200_L_update_32(
int n, magmaFloatComplex alpha,
magmaFloatComplex *A, int lda,
magmaFloatComplex *x, int incx,
magmaFloatComplex beta,
magmaFloatComplex *y, int incy,
magmaFloatComplex *WC,
int nb )
{
int i;
int tx = threadIdx.x;
int ind = blockIdx.x * chemv_bs + tx;
magmaFloatComplex Ca;
MAGMA_C_SET2REAL(Ca, 0);
WC += ind + lda * blockIdx.x;
for(i = blockIdx.x*chemv_bs; i < n; i += chemv_bs) {
Ca += WC[0];
WC += chemv_bs;
}
if( ind < n )
y[ind * incy] = beta * y[ind * incy] + alpha * Ca;
}
extern "C"
void magmablas_chemv_200_L_32(
magma_int_t m, magmaFloatComplex alpha,
magmaFloatComplex *A, magma_int_t lda,
magmaFloatComplex *X, magma_int_t incx,
magmaFloatComplex beta,
magmaFloatComplex *Y, magma_int_t incy,
magmaFloatComplex *dC_work,
magma_int_t nb)
{
magma_int_t blocks;
if (m % chemv_bs == 0)
blocks = m / chemv_bs;
else
blocks = m / chemv_bs + 1;
dim3 grid(blocks, 1, 1);
dim3 grid_s(blocks, blocks, 1);
dim3 threads(thread_x, thread_y, 1);
dim3 threads_u(chemv_bs, 1, 1);
/*
* If matrix size is multiple of chemv_bs, we use a specific code.
* otherwise, we call the generic case.
*/
if(m % chemv_bs == 0 ) {
if(m < SWITCH)
magmablas_chemv_200_L_special_32_s <<< grid_s, threads, 0, magma_stream >>>(
m, alpha, A, lda, X, incx, beta, Y, incy, dC_work, nb);
else
magmablas_chemv_200_L_special_32 <<< grid, threads, 0, magma_stream >>>(
m, alpha, A, lda, X, incx, beta, Y, incy, dC_work, nb);
}
else{
magma_int_t m_mod_thread_x = m%chemv_bs - 1;
if(m < SWITCH)
magmablas_chemv_200_L_generic_32_s <<< grid_s, threads, 0, magma_stream >>> (
m, alpha, A, lda, X, incx ,beta, Y, incy, dC_work, m_mod_thread_x, nb);
else
magmablas_chemv_200_L_generic_32 <<< grid, threads, 0, magma_stream >>> (
m, alpha, A, lda, X, incx ,beta, Y, incy, dC_work, m_mod_thread_x, nb);
}
if(m < SWITCH)
magmablas_chemv_200_L_update_32_s<<< grid, threads_u, 0, magma_stream >>>(
m, alpha, A, lda, X, incx, beta, Y, incy, dC_work, nb);
else
magmablas_chemv_200_L_update_32<<< grid, threads_u, 0, magma_stream >>>(
m, alpha, A, lda, X, incx, beta, Y, incy, dC_work, nb);
}
#endif // not defined NB_64
/*************************************************************************
Purpose
=======
magmablas_chemv performs the matrix-vector operation on fermi:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n hermitian matrix.
Arguments
==========
UPLO - CHARACTER*1.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
UPLO = 'U' or 'u' Only the upper triangular part of A
is to be referenced.
UPLO = 'L' or 'l' Only the lower triangular part of A
is to be referenced.
Unchanged on exit.
N - INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
Unchanged on exit.
ALPHA - COMPLEX*16 .
On entry, ALPHA specifies the scalar alpha.
Unchanged on exit.
A - COMPLEX*16 array of DIMENSION ( LDA, n ).
Before entry with UPLO = 'U' or 'u', the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the hermitian matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = 'L' or 'l', the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the hermitian matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
Unchanged on exit.
LDA - INTEGER.
On entry, LDA specifies the first dimension of A as declared
in the calling (sub) program. LDA must be at least
max( 1, n ).
Unchanged on exit.
It is recommended that lda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
X - COMPLEX*16 array of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
Unchanged on exit.
INCX - INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
Unchanged on exit.
BETA - COMPLEX*16 .
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
Unchanged on exit.
Y - COMPLEX*16 array of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
INCY - INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
Unchanged on exit.
*/
extern "C"
magma_int_t
magmablas_chemv_200(
char uplo, magma_int_t n,
magmaFloatComplex alpha,
const magmaFloatComplex *A, magma_int_t lda,
const magmaFloatComplex *X, magma_int_t incx,
magmaFloatComplex beta,
magmaFloatComplex *Y, magma_int_t incy)
{
char uplo_[2] = {uplo, 0};
int upper = lapackf77_lsame(uplo_, "U");
/*
* Test the input parameters.
*/
if ((! upper) && (! lapackf77_lsame(uplo_, "L"))) {
return -1;
} else if ( n < 0 ) {
return -2;
} else if ( lda < max(1,n) ) {
return -5;
} else if ( incx == 0 ) {
return -7;
} else if ( incy == 0 ) {
return -10;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_C_EQUAL(alpha, MAGMA_C_ZERO) && MAGMA_C_EQUAL(beta, MAGMA_C_ONE) ) )
return MAGMA_SUCCESS;
/* TODO: Upper case is not implemented in MAGMA */
if ( upper )
cublasChemv(uplo, n, alpha, A, lda, X, incx, beta, Y, incy);
else
{
magmaFloatComplex *dC_work;
magma_int_t blocks = n / chemv_bs + (n % chemv_bs != 0);
magma_int_t workspace = lda * (blocks + 1);
/* TODO: need to add a MAGMA context to handle workspaces */
cublasAlloc( workspace, sizeof(magmaFloatComplex), (void**)&dC_work );
cublasGetError( );
#ifdef NB_64
magmablas_chemv_200_L(n, alpha, A, lda, X, incx, beta, Y, incy, dC_work);
#else
magmablas_chemv_200_L_32(n, alpha, A, lda, X, incx, beta, Y, incy, dC_work, chemv_bs);
#endif
cublasFree(dC_work);
cublasGetError( );
}
return MAGMA_SUCCESS;
}
extern "C"
magma_int_t
magmablas_chemv2_200(
char uplo, magma_int_t n,
magmaFloatComplex alpha,
const magmaFloatComplex *A, magma_int_t lda,
const magmaFloatComplex *X, magma_int_t incx,
magmaFloatComplex beta,
magmaFloatComplex *Y, magma_int_t incy,
magmaFloatComplex *work, magma_int_t lwork)
{
char uplo_[2] = {uplo, 0};
int upper = lapackf77_lsame(uplo_, "U");
/*
* Test the input parameters.
*/
if ((! upper) && (! lapackf77_lsame(uplo_, "L"))) {
return -1;
} else if ( n < 0 ) {
return -2;
} else if ( lda < max(1,n) ) {
return -5;
} else if ( incx == 0 ) {
return -7;
} else if ( incy == 0 ) {
return -10;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_C_EQUAL(alpha, MAGMA_C_ZERO) && MAGMA_C_EQUAL(beta, MAGMA_C_ONE) ) )
return MAGMA_SUCCESS;
/* TODO: Upper case is not implemented in MAGMA */
if ( upper )
cublasChemv(uplo, n, alpha, A, lda, X, incx, beta, Y, incy);
else
{
magma_int_t blocks = n / chemv_bs + (n % chemv_bs != 0);
magma_int_t workspace = n * (blocks );
if (lwork < workspace) {
printf("Not enough work space in magmablas_chemv: passed %d, required %d\n",
(int) lwork, (int) workspace);
exit(1);
}
//printf("You are using chemv_bs=%d\n", chemv_bs);
#ifdef NB_64
if( n < 1622)
cublasChemv(uplo, n, alpha, A, lda, X, incx, beta, Y, incy);
else
magmablas_chemv_200_L(n, alpha, A, lda, X, incx, beta, Y, incy, work);
#else
magmablas_chemv_200_L_32(n, alpha, A, lda, X, incx, beta, Y, incy, work, chemv_bs);
#endif
}
return MAGMA_SUCCESS;
}
#endif /* (GPUSHMEM >= 200) */
|
3bfc4318a11b77a83b72e5214fadaff5a4c1fbaf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//Udacity HW 4
//Radix Sorting
#include "reference_calc.cpp"
#include "utils.h"
/* Red Eye Removal
===============
For this assignment we are implementing red eye removal. This is
accomplished by first creating a score for every pixel that tells us how
likely it is to be a red eye pixel. We have already done this for you - you
are receiving the scores and need to sort them in ascending order so that we
know which pixels to alter to remove the red eye.
Note: ascending order == smallest to largest
Each score is associated with a position, when you sort the scores, you must
also move the positions accordingly.
Implementing Parallel Radix Sort with CUDA
==========================================
The basic idea is to construct a histogram on each pass of how many of each
"digit" there are. Then we scan this histogram so that we know where to put
the output of each digit. For example, the first 1 must come after all the
0s so we have to know how many 0s there are to be able to start moving 1s
into the correct position.
1) Histogram of the number of occurrences of each digit
2) Exclusive Prefix Sum of Histogram
3) Determine relative offset of each digit
For example [0 0 1 1 0 0 1]
-> [0 1 0 1 2 3 2]
4) Combine the results of steps 2 & 3 to determine the final
output location for each element and move it there
LSB Radix sort is an out-of-place sort and you will need to ping-pong values
between the input and output buffers we have provided. Make sure the final
sorted results end up in the output buffer! Hint: You may need to do a copy
at the end.
*/
#include <stdio.h>
int get_max_size (int a, int d)
{
int temp = a/d;
if (a%d != 0)
{
temp = temp+1;
}
return temp;
}
__global__ void createHistogramSlow ( unsigned int* d_bins,
unsigned int* const d_inputVals,
const size_t numElems,
int compareAndValue)
{
int myId = blockDim.x*blockIdx.x + threadIdx.x;
//int tid = threadIdx.x;
if (myId < numElems)
{
if ((d_inputVals[myId] & compareAndValue) != 0)
{
atomicAdd(&d_bins[1], 1);
}
else
{
atomicAdd(&d_bins[0], 1);
}
}
}
__global__ void localHistograms (const unsigned int *input,
unsigned int *output,
const int size,
int numBins,
int perThreadReads,
unsigned int compareAndValue,
unsigned int *d_setOneIfOne)
{
int myX = blockDim.x*blockIdx.x + threadIdx.x;
for (int i=0;i<perThreadReads;i++)
{
if (myX*perThreadReads+i < size)
{
if ((input[myX*perThreadReads + i] & compareAndValue) != 0)
{
//Write to global Value
output[myX*numBins + 1] = output[myX*numBins + 1]+1;
d_setOneIfOne[myX] = 1;
}
else
{
//Write to global Value
output[myX*numBins] = output[myX*numBins]+1;
d_setOneIfOne[myX] = 0;
}
}
}
}
__global__ void histogramReduce (int numBins,
unsigned int *input,
unsigned int *output,
int size
)
{
extern __shared__ unsigned int sdata3[];
int myX = blockDim.x*blockIdx.x + threadIdx.x;
int tid = threadIdx.x;
for (int i=0;i<numBins;i++)
{
int index = myX*numBins+i;
if (myX >= size)
{
sdata3[tid*numBins+i] = 0;
}
else
{
sdata3[tid*numBins+i] = input[index];
}
}
__syncthreads();
if (myX >= size)
{
if (tid == 0)
{
for (int i=0;i < numBins;i++)
{
output[blockIdx.x*numBins+i] = 0;
}
}
return;
}
for (unsigned int s = blockDim.x/2; s > 0; s/=2)
{
if(tid < s)
{
for (int i=0;i<numBins;i++)
{
sdata3[tid*numBins+i] = sdata3[tid*numBins+i] + sdata3[tid*numBins+i+s*numBins];
}
}
__syncthreads();
}
if (tid==0)
{
for (int i=0;i<numBins;i++)
{
//printf("Writing %d for bin value %d\n",sdata3[i],i);
output[blockIdx.x*numBins+i] = sdata3[i];
}
}
}
void fastHistogram(unsigned int *d_finalHist,
unsigned int* const d_inputVals,
const size_t numElems,
unsigned int compareAndValue,
unsigned int* d_setOneIfOne)
{
int numBins = 2;
unsigned int *d_histBins;
int numBlocksPerGrid = 1024;
int size = numElems;
dim3 blockDim(1);
dim3 gridDim(numBlocksPerGrid);
int sizeOfBins = numBins*sizeof(unsigned int)*numBlocksPerGrid;
int perThreadReads = get_max_size(size,numBlocksPerGrid);
//Initialize temperory array variable
checkCudaErrors(hipMalloc(&d_histBins, sizeOfBins));
checkCudaErrors(hipMemset(d_histBins, 0, sizeOfBins));
//Call first kernel
hipLaunchKernelGGL(( localHistograms), dim3(gridDim),dim3(blockDim), 0, 0, d_inputVals,d_histBins,size,numBins,perThreadReads,compareAndValue,d_setOneIfOne);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
size = numBlocksPerGrid;
int localHistThreadsPerBlock = numBlocksPerGrid;
dim3 blockDimLocalHist(localHistThreadsPerBlock);
unsigned int* d_curr_in;
d_curr_in = d_histBins;
dim3 gridDimLocalHist(get_max_size(size,localHistThreadsPerBlock));
hipLaunchKernelGGL(( histogramReduce), dim3(gridDimLocalHist),dim3(blockDimLocalHist),numBins*sizeof(unsigned int)*localHistThreadsPerBlock, 0, numBins,d_curr_in,d_finalHist,size);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
checkCudaErrors(hipFree(d_curr_in));
}
void your_sort(unsigned int* const d_inputVals,
unsigned int* const d_inputPos,
unsigned int* const d_outputVals,
unsigned int* const d_outputPos,
const size_t numElems)
{
unsigned int* d_bins;
unsigned int h_bins[2];
const size_t histo_size = 2*sizeof(unsigned int);
checkCudaErrors(hipMalloc(&d_bins, histo_size));
unsigned int* d_setOneIfOne;
checkCudaErrors(hipMalloc(&d_setOneIfOne, numElems*sizeof(numElems)));
for (int i=0;i<32;i++)
{
checkCudaErrors(hipMemset(d_bins, 0, histo_size));
int compareAndValue = 1 << i;
fastHistogram(d_bins,d_inputVals,numElems,compareAndValue,d_setOneIfOne);
checkCudaErrors(hipMemcpy(&h_bins, d_bins, histo_size, hipMemcpyDeviceToHost));
printf("Histogram Values - %d %d %d %d %d \n", h_bins[0], h_bins[1], h_bins[0]+h_bins[1], numElems, compareAndValue);
}
checkCudaErrors(hipFree(d_bins));
}
| 3bfc4318a11b77a83b72e5214fadaff5a4c1fbaf.cu | //Udacity HW 4
//Radix Sorting
#include "reference_calc.cpp"
#include "utils.h"
/* Red Eye Removal
===============
For this assignment we are implementing red eye removal. This is
accomplished by first creating a score for every pixel that tells us how
likely it is to be a red eye pixel. We have already done this for you - you
are receiving the scores and need to sort them in ascending order so that we
know which pixels to alter to remove the red eye.
Note: ascending order == smallest to largest
Each score is associated with a position, when you sort the scores, you must
also move the positions accordingly.
Implementing Parallel Radix Sort with CUDA
==========================================
The basic idea is to construct a histogram on each pass of how many of each
"digit" there are. Then we scan this histogram so that we know where to put
the output of each digit. For example, the first 1 must come after all the
0s so we have to know how many 0s there are to be able to start moving 1s
into the correct position.
1) Histogram of the number of occurrences of each digit
2) Exclusive Prefix Sum of Histogram
3) Determine relative offset of each digit
For example [0 0 1 1 0 0 1]
-> [0 1 0 1 2 3 2]
4) Combine the results of steps 2 & 3 to determine the final
output location for each element and move it there
LSB Radix sort is an out-of-place sort and you will need to ping-pong values
between the input and output buffers we have provided. Make sure the final
sorted results end up in the output buffer! Hint: You may need to do a copy
at the end.
*/
#include <stdio.h>
int get_max_size (int a, int d)
{
int temp = a/d;
if (a%d != 0)
{
temp = temp+1;
}
return temp;
}
__global__ void createHistogramSlow ( unsigned int* d_bins,
unsigned int* const d_inputVals,
const size_t numElems,
int compareAndValue)
{
int myId = blockDim.x*blockIdx.x + threadIdx.x;
//int tid = threadIdx.x;
if (myId < numElems)
{
if ((d_inputVals[myId] & compareAndValue) != 0)
{
atomicAdd(&d_bins[1], 1);
}
else
{
atomicAdd(&d_bins[0], 1);
}
}
}
__global__ void localHistograms (const unsigned int *input,
unsigned int *output,
const int size,
int numBins,
int perThreadReads,
unsigned int compareAndValue,
unsigned int *d_setOneIfOne)
{
int myX = blockDim.x*blockIdx.x + threadIdx.x;
for (int i=0;i<perThreadReads;i++)
{
if (myX*perThreadReads+i < size)
{
if ((input[myX*perThreadReads + i] & compareAndValue) != 0)
{
//Write to global Value
output[myX*numBins + 1] = output[myX*numBins + 1]+1;
d_setOneIfOne[myX] = 1;
}
else
{
//Write to global Value
output[myX*numBins] = output[myX*numBins]+1;
d_setOneIfOne[myX] = 0;
}
}
}
}
__global__ void histogramReduce (int numBins,
unsigned int *input,
unsigned int *output,
int size
)
{
extern __shared__ unsigned int sdata3[];
int myX = blockDim.x*blockIdx.x + threadIdx.x;
int tid = threadIdx.x;
for (int i=0;i<numBins;i++)
{
int index = myX*numBins+i;
if (myX >= size)
{
sdata3[tid*numBins+i] = 0;
}
else
{
sdata3[tid*numBins+i] = input[index];
}
}
__syncthreads();
if (myX >= size)
{
if (tid == 0)
{
for (int i=0;i < numBins;i++)
{
output[blockIdx.x*numBins+i] = 0;
}
}
return;
}
for (unsigned int s = blockDim.x/2; s > 0; s/=2)
{
if(tid < s)
{
for (int i=0;i<numBins;i++)
{
sdata3[tid*numBins+i] = sdata3[tid*numBins+i] + sdata3[tid*numBins+i+s*numBins];
}
}
__syncthreads();
}
if (tid==0)
{
for (int i=0;i<numBins;i++)
{
//printf("Writing %d for bin value %d\n",sdata3[i],i);
output[blockIdx.x*numBins+i] = sdata3[i];
}
}
}
void fastHistogram(unsigned int *d_finalHist,
unsigned int* const d_inputVals,
const size_t numElems,
unsigned int compareAndValue,
unsigned int* d_setOneIfOne)
{
int numBins = 2;
unsigned int *d_histBins;
int numBlocksPerGrid = 1024;
int size = numElems;
dim3 blockDim(1);
dim3 gridDim(numBlocksPerGrid);
int sizeOfBins = numBins*sizeof(unsigned int)*numBlocksPerGrid;
int perThreadReads = get_max_size(size,numBlocksPerGrid);
//Initialize temperory array variable
checkCudaErrors(cudaMalloc(&d_histBins, sizeOfBins));
checkCudaErrors(cudaMemset(d_histBins, 0, sizeOfBins));
//Call first kernel
localHistograms<<<gridDim,blockDim>>>(d_inputVals,d_histBins,size,numBins,perThreadReads,compareAndValue,d_setOneIfOne);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
size = numBlocksPerGrid;
int localHistThreadsPerBlock = numBlocksPerGrid;
dim3 blockDimLocalHist(localHistThreadsPerBlock);
unsigned int* d_curr_in;
d_curr_in = d_histBins;
dim3 gridDimLocalHist(get_max_size(size,localHistThreadsPerBlock));
histogramReduce<<<gridDimLocalHist,blockDimLocalHist,numBins*sizeof(unsigned int)*localHistThreadsPerBlock>>>(numBins,d_curr_in,d_finalHist,size);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaFree(d_curr_in));
}
void your_sort(unsigned int* const d_inputVals,
unsigned int* const d_inputPos,
unsigned int* const d_outputVals,
unsigned int* const d_outputPos,
const size_t numElems)
{
unsigned int* d_bins;
unsigned int h_bins[2];
const size_t histo_size = 2*sizeof(unsigned int);
checkCudaErrors(cudaMalloc(&d_bins, histo_size));
unsigned int* d_setOneIfOne;
checkCudaErrors(cudaMalloc(&d_setOneIfOne, numElems*sizeof(numElems)));
for (int i=0;i<32;i++)
{
checkCudaErrors(cudaMemset(d_bins, 0, histo_size));
int compareAndValue = 1 << i;
fastHistogram(d_bins,d_inputVals,numElems,compareAndValue,d_setOneIfOne);
checkCudaErrors(cudaMemcpy(&h_bins, d_bins, histo_size, cudaMemcpyDeviceToHost));
printf("Histogram Values - %d %d %d %d %d \n", h_bins[0], h_bins[1], h_bins[0]+h_bins[1], numElems, compareAndValue);
}
checkCudaErrors(cudaFree(d_bins));
}
|
a242c8bfdfd495e915542d0674971a1bc0261d81.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
#include "solver.h"
#include "device.h"
#include "ode.h"
#ifdef SIE
#include "linear_algebra.h"
#endif
__device__ void checkError(
float y1, float y2,
int * shared_error_flag,
float ABSOLUTE, float RELATIVE){
// determine if any equation is above the absolute or relative tolerances
float abs_error = fabs(y2 - y1);
if(abs_error > ABSOLUTE){
*shared_error_flag = 1;
#ifdef LOUD
printf("%d absolute failed: %.2e\n",threadIdx.x,abs_error);
#endif
}
#if SIE
float rel_error = fabs((y2-y1)/(y2+1e-12));
#else
float rel_error = fabs((y2-y1)/(2*y2-y1+1e-12));
#endif
if(rel_error > RELATIVE &&
fabs(y1) > ABSOLUTE &&
fabs(y2) > ABSOLUTE){
*shared_error_flag = 1;
#ifdef LOUD
printf("%d relative failed: %.2e\n",threadIdx.x,rel_error);
#endif
}
__syncthreads();
}
#ifdef SIE
__device__ void scaleAndInvertJacobians(
float timestep,
float * Jacobians,
float * inverses,
int Nequations_per_system,
float * shared_array){
int this_index;
// loop through each row and perform 1-hJ
for (int eqn_i=0; eqn_i<Nequations_per_system; eqn_i++){
this_index = eqn_i*Nequations_per_system + threadIdx.x;
Jacobians[this_index] = ((eqn_i)==threadIdx.x) - Jacobians[this_index]*timestep;
}
__syncthreads();
// invert 1-hJ into inverses
gjeInvertMatrix(
Jacobians,
inverses,
Nequations_per_system,
shared_array);
__syncthreads();
}
#endif
__device__ float innerstep(
float tnow, // the current time
float tstop, // the time we want to stop
int n_integration_steps, // the timestep to take
float * constants, // the constants for each system
float * shared_equations, // place to store the current state
#ifdef SIE
float * shared_dydts,
float * Jacobians,float * inverses,
#endif
int Nequations_per_system){ // the number of equations in each system
float dydt = 0;
float timestep = (tstop-tnow)/n_integration_steps;
#ifdef SIE
int this_index;
#endif
for (int nsteps=0; nsteps<n_integration_steps; nsteps++){
// limit step size based on remaining time
timestep = fmin(tstop - tnow, timestep);
__syncthreads();
//calculate the derivative for this equation
dydt = calculate_dydt(
tnow,
constants,
shared_equations);
#ifdef SIE
// calculate the jacobian for the whole system
calculate_jacobian(
tnow,
constants,
shared_equations,
Jacobians,
Nequations_per_system);
// invert 1-hJ into inverses
scaleAndInvertJacobians(
timestep,
Jacobians,
inverses,
Nequations_per_system,
shared_dydts);
// fill the shared array with
shared_dydts[threadIdx.x] = dydt;
__syncthreads();
/* -- calculate h x (1-hJ)^-1 f and add it into y(n) -- */
// accumulate matrix rows into elements of f
for (int eqn_i=0; eqn_i < Nequations_per_system; eqn_i++){
this_index = eqn_i*Nequations_per_system + threadIdx.x;
// accumulate values directly into shared_equations[eqn_i]-- J and inverses is actualy transposed
shared_equations[threadIdx.x]+=inverses[this_index]*shared_dydts[eqn_i]*timestep;
}
#else
__syncthreads();
shared_equations[threadIdx.x] += timestep*dydt;
#endif
tnow+=timestep;
} // while(tnow < tstop)
// make sure the last loop finished accumulating
__syncthreads();
return shared_equations[threadIdx.x];
}// innerstep
__global__ void integrateSystem(
float tnow, // the current time
float tend, // the time we integrating the system to
float timestep,
float * constants, // the constants for each system
float * equations, // a flattened array containing the y value for each equation in each system
#ifdef SIE
float * Jacobians,
float * inverses,
#endif
int Nsystems, // the number of systems
int Nequations_per_system,
int * nsteps, // the number of equations in each system
float ABSOLUTE, // the absolute tolerance
float RELATIVE){ // the relative tolerance
// unique thread ID , based on local ID in block and block ID
int tid = threadIdx.x + ( blockDim.x * blockIdx.x);
#ifdef SIE
// offset pointer to find flat jacobian and inverses in global memory
Jacobians+= Nequations_per_system*Nequations_per_system*blockIdx.x;
inverses+= Nequations_per_system*Nequations_per_system*blockIdx.x;
#endif
// offset pointer to find flat constants in global memory
constants+=NUM_CONST*blockIdx.x;
extern __shared__ float total_shared[];
// total_shared is a pointer to the beginning of this block's shared
// memory. If we want to use multiple shared memory arrays we must
// manually offset them within that block and allocate enough memory
// when initializing the kernel (<<dimGrid,dimBlock,sbytes>>)
int * shared_error_flag = (int *) &total_shared[0];
float * shared_equations = (float *) &total_shared[1];
#ifdef SIE
float * shared_dydts = (float *) &shared_equations[Nequations_per_system];
#endif
float y1,y2,current_y;
int this_nsteps = 0;
// ensure thread within limit
int unsolved = 0;
if (tid < Nsystems*Nequations_per_system ) {
*shared_error_flag = 0;
// copy the y values to shared memory
shared_equations[threadIdx.x] = equations[tid];
__syncthreads();
//printf("%d thread %d block\n",threadIdx.x,blockIdx.x);
while (tnow < tend){
this_nsteps+=3;
// make sure we don't overintegrate
timestep = fmin(tend-tnow,timestep);
// save this to reset the value before calculating y2
current_y = shared_equations[threadIdx.x];
__syncthreads();
// shared_equations will have the y2 value
// saved in it from the previous loop
// take the full step
y1 = innerstep(
tnow, tnow+timestep,
1,
constants,
shared_equations,
#ifdef SIE
shared_dydts,
Jacobians,inverses,
#endif
Nequations_per_system );
#ifdef DEBUGBLOCK
if (threadIdx.x==0 && blockIdx.x==DEBUGBLOCK){
printf("%02d - cuda - y1 ",this_nsteps);
for (int i=0; i<Nequations_per_system; i++){
printf("%.6f\t",shared_equations[i]);
}
printf("\n");
}
#endif
// overwrite the y values in shared memory
shared_equations[threadIdx.x] = current_y;
__syncthreads();
// take the half step
y2 = innerstep(
tnow, tnow+timestep,
2,
constants,
shared_equations,
#ifdef SIE
shared_dydts,
Jacobians,inverses,
#endif
Nequations_per_system );
#ifdef DEBUGBLOCK
if (threadIdx.x==0 && blockIdx.x==DEBUGBLOCK){
printf("%02d - cuda - y2 ",this_nsteps);
for (int i=0; i<Nequations_per_system; i++){
printf("%.6f\t",shared_equations[i]);
}
printf("\n");
}
#endif
#ifdef ADAPTIVE_TIMESTEP
checkError(
y1,y2,
shared_error_flag,
ABSOLUTE,RELATIVE);
#endif
#ifdef RK2
if (*shared_error_flag && unsolved <20){
#else
if (*shared_error_flag && unsolved <10){
#endif
unsolved++;
// refine and start over
timestep/=2;
*shared_error_flag = 0;
shared_equations[threadIdx.x] = current_y;
__syncthreads();
} // if shared_error_flag
else{
unsolved=0;
// accept this step and update the shared array
#ifdef RK2
shared_equations[threadIdx.x] = 2*y2-y1;
__syncthreads();
#else
// shared_equations already has y2 in it from last
// call to innerstep if SIE
#endif
tnow+=timestep;
#ifdef ADAPTIVE_TIMESTEP
// let's get a little more optimistic
#ifdef RK2
// increase one refinement level
timestep*=2;
#else
// go for gold
timestep=(tend-tnow);
#endif
#endif
}// if shared_error_flag -> else
}// while tnow < tend
// copy the y values back to global memory
equations[tid]=shared_equations[threadIdx.x];
#ifdef LOUD
if (threadIdx.x == 1 && blockIdx.x == 0){
printf("nsteps taken: %d - tnow: %.2f\n",this_nsteps,tnow);
}
#endif
// accumulate the number of steps this block took
if (threadIdx.x == 0){
atomicAdd(nsteps,this_nsteps);
}
} // if tid < nequations
} //integrateSystem
| a242c8bfdfd495e915542d0674971a1bc0261d81.cu | #include <stdio.h>
#include <math.h>
#include "solver.h"
#include "device.h"
#include "ode.h"
#ifdef SIE
#include "linear_algebra.h"
#endif
__device__ void checkError(
float y1, float y2,
int * shared_error_flag,
float ABSOLUTE, float RELATIVE){
// determine if any equation is above the absolute or relative tolerances
float abs_error = fabs(y2 - y1);
if(abs_error > ABSOLUTE){
*shared_error_flag = 1;
#ifdef LOUD
printf("%d absolute failed: %.2e\n",threadIdx.x,abs_error);
#endif
}
#if SIE
float rel_error = fabs((y2-y1)/(y2+1e-12));
#else
float rel_error = fabs((y2-y1)/(2*y2-y1+1e-12));
#endif
if(rel_error > RELATIVE &&
fabs(y1) > ABSOLUTE &&
fabs(y2) > ABSOLUTE){
*shared_error_flag = 1;
#ifdef LOUD
printf("%d relative failed: %.2e\n",threadIdx.x,rel_error);
#endif
}
__syncthreads();
}
#ifdef SIE
__device__ void scaleAndInvertJacobians(
float timestep,
float * Jacobians,
float * inverses,
int Nequations_per_system,
float * shared_array){
int this_index;
// loop through each row and perform 1-hJ
for (int eqn_i=0; eqn_i<Nequations_per_system; eqn_i++){
this_index = eqn_i*Nequations_per_system + threadIdx.x;
Jacobians[this_index] = ((eqn_i)==threadIdx.x) - Jacobians[this_index]*timestep;
}
__syncthreads();
// invert 1-hJ into inverses
gjeInvertMatrix(
Jacobians,
inverses,
Nequations_per_system,
shared_array);
__syncthreads();
}
#endif
__device__ float innerstep(
float tnow, // the current time
float tstop, // the time we want to stop
int n_integration_steps, // the timestep to take
float * constants, // the constants for each system
float * shared_equations, // place to store the current state
#ifdef SIE
float * shared_dydts,
float * Jacobians,float * inverses,
#endif
int Nequations_per_system){ // the number of equations in each system
float dydt = 0;
float timestep = (tstop-tnow)/n_integration_steps;
#ifdef SIE
int this_index;
#endif
for (int nsteps=0; nsteps<n_integration_steps; nsteps++){
// limit step size based on remaining time
timestep = fmin(tstop - tnow, timestep);
__syncthreads();
//calculate the derivative for this equation
dydt = calculate_dydt(
tnow,
constants,
shared_equations);
#ifdef SIE
// calculate the jacobian for the whole system
calculate_jacobian(
tnow,
constants,
shared_equations,
Jacobians,
Nequations_per_system);
// invert 1-hJ into inverses
scaleAndInvertJacobians(
timestep,
Jacobians,
inverses,
Nequations_per_system,
shared_dydts);
// fill the shared array with
shared_dydts[threadIdx.x] = dydt;
__syncthreads();
/* -- calculate h x (1-hJ)^-1 f and add it into y(n) -- */
// accumulate matrix rows into elements of f
for (int eqn_i=0; eqn_i < Nequations_per_system; eqn_i++){
this_index = eqn_i*Nequations_per_system + threadIdx.x;
// accumulate values directly into shared_equations[eqn_i]-- J and inverses is actualy transposed
shared_equations[threadIdx.x]+=inverses[this_index]*shared_dydts[eqn_i]*timestep;
}
#else
__syncthreads();
shared_equations[threadIdx.x] += timestep*dydt;
#endif
tnow+=timestep;
} // while(tnow < tstop)
// make sure the last loop finished accumulating
__syncthreads();
return shared_equations[threadIdx.x];
}// innerstep
__global__ void integrateSystem(
float tnow, // the current time
float tend, // the time we integrating the system to
float timestep,
float * constants, // the constants for each system
float * equations, // a flattened array containing the y value for each equation in each system
#ifdef SIE
float * Jacobians,
float * inverses,
#endif
int Nsystems, // the number of systems
int Nequations_per_system,
int * nsteps, // the number of equations in each system
float ABSOLUTE, // the absolute tolerance
float RELATIVE){ // the relative tolerance
// unique thread ID , based on local ID in block and block ID
int tid = threadIdx.x + ( blockDim.x * blockIdx.x);
#ifdef SIE
// offset pointer to find flat jacobian and inverses in global memory
Jacobians+= Nequations_per_system*Nequations_per_system*blockIdx.x;
inverses+= Nequations_per_system*Nequations_per_system*blockIdx.x;
#endif
// offset pointer to find flat constants in global memory
constants+=NUM_CONST*blockIdx.x;
extern __shared__ float total_shared[];
// total_shared is a pointer to the beginning of this block's shared
// memory. If we want to use multiple shared memory arrays we must
// manually offset them within that block and allocate enough memory
// when initializing the kernel (<<dimGrid,dimBlock,sbytes>>)
int * shared_error_flag = (int *) &total_shared[0];
float * shared_equations = (float *) &total_shared[1];
#ifdef SIE
float * shared_dydts = (float *) &shared_equations[Nequations_per_system];
#endif
float y1,y2,current_y;
int this_nsteps = 0;
// ensure thread within limit
int unsolved = 0;
if (tid < Nsystems*Nequations_per_system ) {
*shared_error_flag = 0;
// copy the y values to shared memory
shared_equations[threadIdx.x] = equations[tid];
__syncthreads();
//printf("%d thread %d block\n",threadIdx.x,blockIdx.x);
while (tnow < tend){
this_nsteps+=3;
// make sure we don't overintegrate
timestep = fmin(tend-tnow,timestep);
// save this to reset the value before calculating y2
current_y = shared_equations[threadIdx.x];
__syncthreads();
// shared_equations will have the y2 value
// saved in it from the previous loop
// take the full step
y1 = innerstep(
tnow, tnow+timestep,
1,
constants,
shared_equations,
#ifdef SIE
shared_dydts,
Jacobians,inverses,
#endif
Nequations_per_system );
#ifdef DEBUGBLOCK
if (threadIdx.x==0 && blockIdx.x==DEBUGBLOCK){
printf("%02d - cuda - y1 ",this_nsteps);
for (int i=0; i<Nequations_per_system; i++){
printf("%.6f\t",shared_equations[i]);
}
printf("\n");
}
#endif
// overwrite the y values in shared memory
shared_equations[threadIdx.x] = current_y;
__syncthreads();
// take the half step
y2 = innerstep(
tnow, tnow+timestep,
2,
constants,
shared_equations,
#ifdef SIE
shared_dydts,
Jacobians,inverses,
#endif
Nequations_per_system );
#ifdef DEBUGBLOCK
if (threadIdx.x==0 && blockIdx.x==DEBUGBLOCK){
printf("%02d - cuda - y2 ",this_nsteps);
for (int i=0; i<Nequations_per_system; i++){
printf("%.6f\t",shared_equations[i]);
}
printf("\n");
}
#endif
#ifdef ADAPTIVE_TIMESTEP
checkError(
y1,y2,
shared_error_flag,
ABSOLUTE,RELATIVE);
#endif
#ifdef RK2
if (*shared_error_flag && unsolved <20){
#else
if (*shared_error_flag && unsolved <10){
#endif
unsolved++;
// refine and start over
timestep/=2;
*shared_error_flag = 0;
shared_equations[threadIdx.x] = current_y;
__syncthreads();
} // if shared_error_flag
else{
unsolved=0;
// accept this step and update the shared array
#ifdef RK2
shared_equations[threadIdx.x] = 2*y2-y1;
__syncthreads();
#else
// shared_equations already has y2 in it from last
// call to innerstep if SIE
#endif
tnow+=timestep;
#ifdef ADAPTIVE_TIMESTEP
// let's get a little more optimistic
#ifdef RK2
// increase one refinement level
timestep*=2;
#else
// go for gold
timestep=(tend-tnow);
#endif
#endif
}// if shared_error_flag -> else
}// while tnow < tend
// copy the y values back to global memory
equations[tid]=shared_equations[threadIdx.x];
#ifdef LOUD
if (threadIdx.x == 1 && blockIdx.x == 0){
printf("nsteps taken: %d - tnow: %.2f\n",this_nsteps,tnow);
}
#endif
// accumulate the number of steps this block took
if (threadIdx.x == 0){
atomicAdd(nsteps,this_nsteps);
}
} // if tid < nequations
} //integrateSystem
|
0c1e3192ebb51685967528dbc012e1939c388392.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
% Function: sc_fdma_demodulator
% Description: Generates complex symbols from the sc-fdma symbols
% Inputs *pusch_bb_h sc-fdma symbols
M_pusch_rb numer of resource blocks assigned to ue
% Outputs: *symbs_h output symbols
By: Mohammed Mostafa
*/
#include "sc_fdma_demodulator.cuh"
__global__ void construct_fft_vec(hipfftComplex* pusch_bb_d, hipfftComplex* fft_vec_d, int M_pusch_sc) {
int x_idx = blockIdx.x * blockDim.x + threadIdx.x;
int y_idx = blockIdx.y;
if (y_idx == 0) //160 = N_cp_L_0
fft_vec_d[x_idx] = pusch_bb_d[x_idx + 160];
else if (y_idx == 7)// 14336 = y_idx * FFT_size // 15520 = y_idx * FFT_size + 2*N_cp_L_0 + 6*N_cp_L_else
fft_vec_d[14336 + x_idx] = pusch_bb_d[x_idx + 15520];
else if (y_idx < 7)
fft_vec_d[y_idx * FFT_size + x_idx] = pusch_bb_d[x_idx + y_idx*FFT_size + N_cp_L_0 + y_idx*N_cp_L_else];
else
fft_vec_d[y_idx * FFT_size + x_idx] = pusch_bb_d[x_idx + y_idx*FFT_size + 176 + y_idx*N_cp_L_else]; // 176 = 2*N_cp_L_0 - N_cp_L_else
}
__global__ void extract_symbs(hipfftComplex* fft_vec_d, hipfftComplex* symbs_d, int M_pusch_sc_div2) {
int x_idx = blockIdx.x * blockDim.x + threadIdx.x;
int y_idx = blockIdx.y;
//if (x_idx >= M_pusch_sc)
//return;
symbs_d[y_idx*M_pusch_sc_div2*2 + x_idx] = fft_vec_d[y_idx*FFT_size + (x_idx + FFT_size - M_pusch_sc_div2)%FFT_size]; // 1448 = FFT_size - M_pusch_sc/2
}
void sc_fdma_demodulator(hipfftComplex* pusch_bb_h, const int M_pusch_rb, hipfftComplex** symbs_h)
{
int M_pusch_sc = N_sc_rb * M_pusch_rb;
//For timing purpose
float elapsed = 0; //For time calc.
hipEvent_t start, stop;
//Device data
hipfftComplex* pusch_bb_d;
hipfftComplex* fft_vec_d;
hipfftComplex* symbs_d;
//Host data allocation
*symbs_h = (hipfftComplex *)malloc(sizeof(hipfftComplex)*N_symbs_per_subframe*M_pusch_sc);
//Device data allocation
startTimer();
hipMalloc((void **)&pusch_bb_d, sizeof(hipfftComplex)*modulated_subframe_length);
hipMalloc((void **)&fft_vec_d, sizeof(hipfftComplex)*N_symbs_per_subframe*FFT_size);
hipMalloc((void **)&symbs_d, sizeof(hipfftComplex)*N_symbs_per_subframe*M_pusch_sc);
stopTimer("hipMalloc Time= %.6f ms\n", elapsed);
//Copying data to device
startTimer();
hipMemcpy(pusch_bb_d, pusch_bb_h, sizeof(hipfftComplex)*modulated_subframe_length, hipMemcpyHostToDevice);
stopTimer("hipMemcpy Host->Device Time= %.6f ms\n", elapsed);
//constructing fft_vec
dim3 grid(2, N_symbs_per_subframe,1);
dim3 block(1024,1,1);
hipLaunchKernelGGL(( construct_fft_vec) , dim3(grid), dim3(block) , 0, 0, pusch_bb_d, fft_vec_d, M_pusch_sc);
// CUFFT plan
int N_SIGS = N_symbs_per_subframe;
int n[1] = { FFT_size };
hipfftHandle plan;
hipfftPlanMany(&plan, 1, n, NULL, 1, FFT_size, NULL, 1, FFT_size, HIPFFT_C2C, N_SIGS);
hipfftExecC2C(plan, fft_vec_d, fft_vec_d, HIPFFT_FORWARD);
dim3 grid1(2, N_symbs_per_subframe, 1);
dim3 block1(M_pusch_sc/2, 1, 1);
extract_symbs << < grid1, block1 >> >(fft_vec_d, symbs_d, M_pusch_sc/2);
//Retrieve data from device
startTimer();
hipMemcpy(*symbs_h, symbs_d, sizeof(hipfftComplex)*N_symbs_per_subframe*M_pusch_sc, hipMemcpyDeviceToHost);
stopTimer("hipMemcpy Device->Host Time= %.6f ms\n", elapsed);
// Cleanup
hipFree(pusch_bb_d);
hipFree(fft_vec_d);
hipFree(symbs_d);
//Destroy timers
destroyTimers();
}
| 0c1e3192ebb51685967528dbc012e1939c388392.cu | /*
% Function: sc_fdma_demodulator
% Description: Generates complex symbols from the sc-fdma symbols
% Inputs *pusch_bb_h sc-fdma symbols
M_pusch_rb numer of resource blocks assigned to ue
% Outputs: *symbs_h output symbols
By: Mohammed Mostafa
*/
#include "sc_fdma_demodulator.cuh"
__global__ void construct_fft_vec(cufftComplex* pusch_bb_d, cufftComplex* fft_vec_d, int M_pusch_sc) {
int x_idx = blockIdx.x * blockDim.x + threadIdx.x;
int y_idx = blockIdx.y;
if (y_idx == 0) //160 = N_cp_L_0
fft_vec_d[x_idx] = pusch_bb_d[x_idx + 160];
else if (y_idx == 7)// 14336 = y_idx * FFT_size // 15520 = y_idx * FFT_size + 2*N_cp_L_0 + 6*N_cp_L_else
fft_vec_d[14336 + x_idx] = pusch_bb_d[x_idx + 15520];
else if (y_idx < 7)
fft_vec_d[y_idx * FFT_size + x_idx] = pusch_bb_d[x_idx + y_idx*FFT_size + N_cp_L_0 + y_idx*N_cp_L_else];
else
fft_vec_d[y_idx * FFT_size + x_idx] = pusch_bb_d[x_idx + y_idx*FFT_size + 176 + y_idx*N_cp_L_else]; // 176 = 2*N_cp_L_0 - N_cp_L_else
}
__global__ void extract_symbs(cufftComplex* fft_vec_d, cufftComplex* symbs_d, int M_pusch_sc_div2) {
int x_idx = blockIdx.x * blockDim.x + threadIdx.x;
int y_idx = blockIdx.y;
//if (x_idx >= M_pusch_sc)
//return;
symbs_d[y_idx*M_pusch_sc_div2*2 + x_idx] = fft_vec_d[y_idx*FFT_size + (x_idx + FFT_size - M_pusch_sc_div2)%FFT_size]; // 1448 = FFT_size - M_pusch_sc/2
}
void sc_fdma_demodulator(cufftComplex* pusch_bb_h, const int M_pusch_rb, cufftComplex** symbs_h)
{
int M_pusch_sc = N_sc_rb * M_pusch_rb;
//For timing purpose
float elapsed = 0; //For time calc.
cudaEvent_t start, stop;
//Device data
cufftComplex* pusch_bb_d;
cufftComplex* fft_vec_d;
cufftComplex* symbs_d;
//Host data allocation
*symbs_h = (cufftComplex *)malloc(sizeof(cufftComplex)*N_symbs_per_subframe*M_pusch_sc);
//Device data allocation
startTimer();
cudaMalloc((void **)&pusch_bb_d, sizeof(cufftComplex)*modulated_subframe_length);
cudaMalloc((void **)&fft_vec_d, sizeof(cufftComplex)*N_symbs_per_subframe*FFT_size);
cudaMalloc((void **)&symbs_d, sizeof(cufftComplex)*N_symbs_per_subframe*M_pusch_sc);
stopTimer("cudaMalloc Time= %.6f ms\n", elapsed);
//Copying data to device
startTimer();
cudaMemcpy(pusch_bb_d, pusch_bb_h, sizeof(cufftComplex)*modulated_subframe_length, cudaMemcpyHostToDevice);
stopTimer("cudaMemcpy Host->Device Time= %.6f ms\n", elapsed);
//constructing fft_vec
dim3 grid(2, N_symbs_per_subframe,1);
dim3 block(1024,1,1);
construct_fft_vec <<< grid, block >>>(pusch_bb_d, fft_vec_d, M_pusch_sc);
// CUFFT plan
int N_SIGS = N_symbs_per_subframe;
int n[1] = { FFT_size };
cufftHandle plan;
cufftPlanMany(&plan, 1, n, NULL, 1, FFT_size, NULL, 1, FFT_size, CUFFT_C2C, N_SIGS);
cufftExecC2C(plan, fft_vec_d, fft_vec_d, CUFFT_FORWARD);
dim3 grid1(2, N_symbs_per_subframe, 1);
dim3 block1(M_pusch_sc/2, 1, 1);
extract_symbs << < grid1, block1 >> >(fft_vec_d, symbs_d, M_pusch_sc/2);
//Retrieve data from device
startTimer();
cudaMemcpy(*symbs_h, symbs_d, sizeof(cufftComplex)*N_symbs_per_subframe*M_pusch_sc, cudaMemcpyDeviceToHost);
stopTimer("cudaMemcpy Device->Host Time= %.6f ms\n", elapsed);
// Cleanup
cudaFree(pusch_bb_d);
cudaFree(fft_vec_d);
cudaFree(symbs_d);
//Destroy timers
destroyTimers();
}
|
amdgpu-kernel-attrs.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa \
// RUN: -fcuda-is-device -emit-llvm -o - -x hip %s \
// RUN: | FileCheck -check-prefixes=CHECK,DEFAULT %s
// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa --gpu-max-threads-per-block=1024 \
// RUN: -fcuda-is-device -emit-llvm -o - -x hip %s \
// RUN: | FileCheck -check-prefixes=CHECK,MAX1024 %s
// RUN: %clang_cc1 -triple nvptx \
// RUN: -fcuda-is-device -emit-llvm -o - %s | FileCheck %s \
// RUN: -check-prefix=NAMD
// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-llvm \
// RUN: -verify -o - -x hip %s | FileCheck -check-prefix=NAMD %s
#include "Inputs/cuda.h"
__global__ void flat_work_group_size_default() {
// CHECK: define{{.*}} amdgpu_kernel void @_Z28flat_work_group_size_defaultv() [[FLAT_WORK_GROUP_SIZE_DEFAULT:#[0-9]+]]
}
__attribute__((amdgpu_flat_work_group_size(32, 64))) // expected-no-diagnostics
__global__ void flat_work_group_size_32_64() {
// CHECK: define{{.*}} amdgpu_kernel void @_Z26flat_work_group_size_32_64v() [[FLAT_WORK_GROUP_SIZE_32_64:#[0-9]+]]
}
__attribute__((amdgpu_waves_per_eu(2))) // expected-no-diagnostics
__global__ void waves_per_eu_2() {
// CHECK: define{{.*}} amdgpu_kernel void @_Z14waves_per_eu_2v() [[WAVES_PER_EU_2:#[0-9]+]]
}
__attribute__((amdgpu_num_sgpr(32))) // expected-no-diagnostics
__global__ void num_sgpr_32() {
// CHECK: define{{.*}} amdgpu_kernel void @_Z11num_sgpr_32v() [[NUM_SGPR_32:#[0-9]+]]
}
__attribute__((amdgpu_num_vgpr(64))) // expected-no-diagnostics
__global__ void num_vgpr_64() {
// CHECK: define{{.*}} amdgpu_kernel void @_Z11num_vgpr_64v() [[NUM_VGPR_64:#[0-9]+]]
}
// Make sure this is silently accepted on other targets.
// NAMD-NOT: "amdgpu-flat-work-group-size"
// NAMD-NOT: "amdgpu-waves-per-eu"
// NAMD-NOT: "amdgpu-num-vgpr"
// NAMD-NOT: "amdgpu-num-sgpr"
// DEFAULT-DAG: attributes [[FLAT_WORK_GROUP_SIZE_DEFAULT]] = {{.*}}"amdgpu-flat-work-group-size"="1,256"{{.*}}"uniform-work-group-size"="true"
// MAX1024-DAG: attributes [[FLAT_WORK_GROUP_SIZE_DEFAULT]] = {{.*}}"amdgpu-flat-work-group-size"="1,1024"
// CHECK-DAG: attributes [[FLAT_WORK_GROUP_SIZE_32_64]] = {{.*}}"amdgpu-flat-work-group-size"="32,64"
// CHECK-DAG: attributes [[WAVES_PER_EU_2]] = {{.*}}"amdgpu-waves-per-eu"="2"
// CHECK-DAG: attributes [[NUM_SGPR_32]] = {{.*}}"amdgpu-num-sgpr"="32"
// CHECK-DAG: attributes [[NUM_VGPR_64]] = {{.*}}"amdgpu-num-vgpr"="64"
| amdgpu-kernel-attrs.cu | // RUN: %clang_cc1 -triple amdgcn-amd-amdhsa \
// RUN: -fcuda-is-device -emit-llvm -o - -x hip %s \
// RUN: | FileCheck -check-prefixes=CHECK,DEFAULT %s
// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa --gpu-max-threads-per-block=1024 \
// RUN: -fcuda-is-device -emit-llvm -o - -x hip %s \
// RUN: | FileCheck -check-prefixes=CHECK,MAX1024 %s
// RUN: %clang_cc1 -triple nvptx \
// RUN: -fcuda-is-device -emit-llvm -o - %s | FileCheck %s \
// RUN: -check-prefix=NAMD
// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-llvm \
// RUN: -verify -o - -x hip %s | FileCheck -check-prefix=NAMD %s
#include "Inputs/cuda.h"
__global__ void flat_work_group_size_default() {
// CHECK: define{{.*}} amdgpu_kernel void @_Z28flat_work_group_size_defaultv() [[FLAT_WORK_GROUP_SIZE_DEFAULT:#[0-9]+]]
}
__attribute__((amdgpu_flat_work_group_size(32, 64))) // expected-no-diagnostics
__global__ void flat_work_group_size_32_64() {
// CHECK: define{{.*}} amdgpu_kernel void @_Z26flat_work_group_size_32_64v() [[FLAT_WORK_GROUP_SIZE_32_64:#[0-9]+]]
}
__attribute__((amdgpu_waves_per_eu(2))) // expected-no-diagnostics
__global__ void waves_per_eu_2() {
// CHECK: define{{.*}} amdgpu_kernel void @_Z14waves_per_eu_2v() [[WAVES_PER_EU_2:#[0-9]+]]
}
__attribute__((amdgpu_num_sgpr(32))) // expected-no-diagnostics
__global__ void num_sgpr_32() {
// CHECK: define{{.*}} amdgpu_kernel void @_Z11num_sgpr_32v() [[NUM_SGPR_32:#[0-9]+]]
}
__attribute__((amdgpu_num_vgpr(64))) // expected-no-diagnostics
__global__ void num_vgpr_64() {
// CHECK: define{{.*}} amdgpu_kernel void @_Z11num_vgpr_64v() [[NUM_VGPR_64:#[0-9]+]]
}
// Make sure this is silently accepted on other targets.
// NAMD-NOT: "amdgpu-flat-work-group-size"
// NAMD-NOT: "amdgpu-waves-per-eu"
// NAMD-NOT: "amdgpu-num-vgpr"
// NAMD-NOT: "amdgpu-num-sgpr"
// DEFAULT-DAG: attributes [[FLAT_WORK_GROUP_SIZE_DEFAULT]] = {{.*}}"amdgpu-flat-work-group-size"="1,256"{{.*}}"uniform-work-group-size"="true"
// MAX1024-DAG: attributes [[FLAT_WORK_GROUP_SIZE_DEFAULT]] = {{.*}}"amdgpu-flat-work-group-size"="1,1024"
// CHECK-DAG: attributes [[FLAT_WORK_GROUP_SIZE_32_64]] = {{.*}}"amdgpu-flat-work-group-size"="32,64"
// CHECK-DAG: attributes [[WAVES_PER_EU_2]] = {{.*}}"amdgpu-waves-per-eu"="2"
// CHECK-DAG: attributes [[NUM_SGPR_32]] = {{.*}}"amdgpu-num-sgpr"="32"
// CHECK-DAG: attributes [[NUM_VGPR_64]] = {{.*}}"amdgpu-num-vgpr"="64"
|
41faa736f754b9d61ee2a5d23c88f7de8b3e4f75.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/search.hpp>
#include <cudf/detail/sorting.hpp>
#include <cudf/detail/stream_compaction.hpp>
#include <cudf/detail/transform.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/utilities/bit.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <numeric>
namespace cudf {
namespace detail {
std::pair<std::unique_ptr<table>, std::unique_ptr<column>> encode(
table_view const& input_table, rmm::mr::device_memory_resource* mr, hipStream_t stream)
{
std::vector<size_type> drop_keys(input_table.num_columns());
std::iota(drop_keys.begin(), drop_keys.end(), 0);
// side effects of this function we are now dependent on:
// - resulting column elements are sorted ascending
// - nulls are sorted to the beginning
auto keys_table = cudf::detail::drop_duplicates(
input_table, drop_keys, duplicate_keep_option::KEEP_FIRST, null_equality::EQUAL, mr, stream);
if (cudf::has_nulls(keys_table->view())) {
// Rows with nulls appear at the top of `keys_table`, but we want them to appear at
// the bottom. Below, we rearrange the rows so that nulls appear at the bottom:
// TODO: we should be able to get rid of this logic once
// https://github.com/rapidsai/cudf/issues/6144 is resolved
auto num_rows = keys_table->num_rows();
auto mask =
cudf::detail::bitmask_and(keys_table->view(), rmm::mr::get_current_device_resource(), stream);
auto num_rows_with_nulls =
cudf::count_unset_bits(reinterpret_cast<bitmask_type*>(mask.data()), 0, num_rows);
rmm::device_vector<cudf::size_type> gather_map(num_rows);
auto execpol = rmm::exec_policy(stream);
thrust::transform(execpol->on(stream),
thrust::make_counting_iterator<cudf::size_type>(0),
thrust::make_counting_iterator<cudf::size_type>(num_rows),
gather_map.begin(),
[num_rows, num_rows_with_nulls] __device__(cudf::size_type i) {
if (i < (num_rows - num_rows_with_nulls)) {
return num_rows_with_nulls + i;
} else {
return num_rows - i - 1;
}
});
cudf::column_view gather_map_column(
cudf::data_type{type_id::INT32}, num_rows, thrust::raw_pointer_cast(gather_map.data()));
keys_table = cudf::detail::gather(keys_table->view(),
gather_map_column,
cudf::detail::out_of_bounds_policy::FAIL,
cudf::detail::negative_index_policy::NOT_ALLOWED,
mr,
stream);
}
auto indices_column =
cudf::detail::lower_bound(keys_table->view(),
input_table,
std::vector<order>(input_table.num_columns(), order::ASCENDING),
std::vector<null_order>(input_table.num_columns(), null_order::AFTER),
mr,
stream);
return std::make_pair(std::move(keys_table), std::move(indices_column));
}
} // namespace detail
std::pair<std::unique_ptr<cudf::table>, std::unique_ptr<cudf::column>> encode(
cudf::table_view const& input, rmm::mr::device_memory_resource* mr)
{
return detail::encode(input, mr, 0);
}
} // namespace cudf
| 41faa736f754b9d61ee2a5d23c88f7de8b3e4f75.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/search.hpp>
#include <cudf/detail/sorting.hpp>
#include <cudf/detail/stream_compaction.hpp>
#include <cudf/detail/transform.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/utilities/bit.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <numeric>
namespace cudf {
namespace detail {
std::pair<std::unique_ptr<table>, std::unique_ptr<column>> encode(
table_view const& input_table, rmm::mr::device_memory_resource* mr, cudaStream_t stream)
{
std::vector<size_type> drop_keys(input_table.num_columns());
std::iota(drop_keys.begin(), drop_keys.end(), 0);
// side effects of this function we are now dependent on:
// - resulting column elements are sorted ascending
// - nulls are sorted to the beginning
auto keys_table = cudf::detail::drop_duplicates(
input_table, drop_keys, duplicate_keep_option::KEEP_FIRST, null_equality::EQUAL, mr, stream);
if (cudf::has_nulls(keys_table->view())) {
// Rows with nulls appear at the top of `keys_table`, but we want them to appear at
// the bottom. Below, we rearrange the rows so that nulls appear at the bottom:
// TODO: we should be able to get rid of this logic once
// https://github.com/rapidsai/cudf/issues/6144 is resolved
auto num_rows = keys_table->num_rows();
auto mask =
cudf::detail::bitmask_and(keys_table->view(), rmm::mr::get_current_device_resource(), stream);
auto num_rows_with_nulls =
cudf::count_unset_bits(reinterpret_cast<bitmask_type*>(mask.data()), 0, num_rows);
rmm::device_vector<cudf::size_type> gather_map(num_rows);
auto execpol = rmm::exec_policy(stream);
thrust::transform(execpol->on(stream),
thrust::make_counting_iterator<cudf::size_type>(0),
thrust::make_counting_iterator<cudf::size_type>(num_rows),
gather_map.begin(),
[num_rows, num_rows_with_nulls] __device__(cudf::size_type i) {
if (i < (num_rows - num_rows_with_nulls)) {
return num_rows_with_nulls + i;
} else {
return num_rows - i - 1;
}
});
cudf::column_view gather_map_column(
cudf::data_type{type_id::INT32}, num_rows, thrust::raw_pointer_cast(gather_map.data()));
keys_table = cudf::detail::gather(keys_table->view(),
gather_map_column,
cudf::detail::out_of_bounds_policy::FAIL,
cudf::detail::negative_index_policy::NOT_ALLOWED,
mr,
stream);
}
auto indices_column =
cudf::detail::lower_bound(keys_table->view(),
input_table,
std::vector<order>(input_table.num_columns(), order::ASCENDING),
std::vector<null_order>(input_table.num_columns(), null_order::AFTER),
mr,
stream);
return std::make_pair(std::move(keys_table), std::move(indices_column));
}
} // namespace detail
std::pair<std::unique_ptr<cudf::table>, std::unique_ptr<cudf::column>> encode(
cudf::table_view const& input, rmm::mr::device_memory_resource* mr)
{
return detail::encode(input, mr, 0);
}
} // namespace cudf
|
c9112f497d81aa4a86c714225c65e2ef9e142bed.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <hip/hip_runtime_api.h>
#define N 1000000
__global__ void vector_add(float *out, float *a, float *b, int n) {
for(int i = 0; i < n; i++){
out[i] = a[i] + b[i];
}
}
int main(){
hipProfilerStart();
float *a, *b, *out;
// Allocate memory
hipMallocManaged(&a, sizeof(float) * N);
hipMallocManaged(&b, sizeof(float) * N);
hipMallocManaged(&out, sizeof(float) * N);
// Initialize array
for(int i = 0; i < N; i++){
a[i] = 1.0f; b[i] = 2.0f;
}
// Main function
hipLaunchKernelGGL(( vector_add), dim3(1),dim3(1), 0, 0, out, a, b, N);
hipDeviceSynchronize();
hipFree(a);
hipFree(b);
hipFree(out);
hipDeviceReset();
hipProfilerStop();
}
| c9112f497d81aa4a86c714225c65e2ef9e142bed.cu | #include <stdlib.h>
#include <cuda_profiler_api.h>
#define N 1000000
__global__ void vector_add(float *out, float *a, float *b, int n) {
for(int i = 0; i < n; i++){
out[i] = a[i] + b[i];
}
}
int main(){
cudaProfilerStart();
float *a, *b, *out;
// Allocate memory
cudaMallocManaged(&a, sizeof(float) * N);
cudaMallocManaged(&b, sizeof(float) * N);
cudaMallocManaged(&out, sizeof(float) * N);
// Initialize array
for(int i = 0; i < N; i++){
a[i] = 1.0f; b[i] = 2.0f;
}
// Main function
vector_add<<<1,1>>>(out, a, b, N);
cudaDeviceSynchronize();
cudaFree(a);
cudaFree(b);
cudaFree(out);
cudaDeviceReset();
cudaProfilerStop();
}
|
1e338cac14921c615e3787f74cc019563cb9205e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@precisions normal z -> s d c
*/
#include "common_magma.h"
#include "commonblas_z.h"
#include "magma_templates.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
//==============================================================================
extern "C"
__global__ void
magma_zgemv_kernel1(int m, const magmaDoubleComplex * __restrict__ V, int ldv,
const magmaDoubleComplex * __restrict__ c,
magmaDoubleComplex *dwork)
{
const int i = threadIdx.x;
const magmaDoubleComplex *dV = V + (blockIdx.x) * ldv;
__shared__ magmaDoubleComplex sum[ BLOCK_SIZE ];
magmaDoubleComplex lsum;
/* lsum := v**H * C */
lsum = MAGMA_Z_ZERO;
for( int j = i; j < m; j += BLOCK_SIZE )
lsum += MAGMA_Z_MUL( MAGMA_Z_CNJG( dV[j] ), c[j] );
sum[i] = lsum;
magma_sum_reduce< BLOCK_SIZE >( i, sum );
__syncthreads();
if (i==0)
dwork [blockIdx.x] = sum[0];
}
//==============================================================================
/* -----------------------------------------------------------------------------
Call
magma_zgemv_kernel3<<< n, BLOCK_SIZE>>>(m, V, ldv, c, dwork, tau)
to compute
ZGEMV( "Conjugate transpose", m, n, -tau[0], V, ldv, c, 1, zero, dwork, 1)
and to set c[0] to 1.
i.e.,
work = -tau[0] V**H c
----------------------------------------------------------------------------- */
extern "C"
__global__ void
magma_zgemv_kernel3(int m, const magmaDoubleComplex * __restrict__ V, int ldv, magmaDoubleComplex *c,
magmaDoubleComplex *dwork, magmaDoubleComplex *tau)
{
const int i = threadIdx.x;
const magmaDoubleComplex *dV = V + (blockIdx.x) * ldv;
__shared__ magmaDoubleComplex sum[ BLOCK_SIZE ];
magmaDoubleComplex lsum;
if (i==0)
c[0] = MAGMA_Z_ONE;
/* lsum := v**H * C */
lsum = MAGMA_Z_ZERO;
for( int j = i; j < m; j += BLOCK_SIZE )
lsum += MAGMA_Z_MUL( MAGMA_Z_CNJG( dV[j] ), c[j] );
sum[i] = lsum;
magma_sum_reduce< BLOCK_SIZE >( i, sum );
__syncthreads();
if (i==0)
dwork [blockIdx.x] = -tau[0]*sum[0];
}
//==============================================================================
extern "C"
__global__ void
magma_zgemv_kernel2(int m, int n, const magmaDoubleComplex * __restrict__ V, int ldv,
const magmaDoubleComplex * __restrict__ x, magmaDoubleComplex *c)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE * blockIdx.x;
magmaDoubleComplex lsum;
V += j;
lsum = MAGMA_Z_ZERO;
if (j < m){
for(int k=0; k<n; k++)
lsum += MAGMA_Z_MUL( V[k*ldv], x[k]);
c[j] -= lsum;
}
}
//==============================================================================
/*
Apply a complex block reflector H to a complex vector C from the left
(i.e., C = H C). H is represented in the form
H = I - V T V**H
where T is the complex k-by-k upper triangular matrix in the
representation of the block reflector, and V is a complex block of
k elementary reflectors.
*/
extern "C" void
magma_zlarfbx_gpu(
magma_int_t m, magma_int_t k,
magmaDoubleComplex_ptr V, magma_int_t ldv,
magmaDoubleComplex_ptr dT, magma_int_t ldt,
magmaDoubleComplex_ptr c,
magmaDoubleComplex_ptr dwork)
{
/* dwork = V**H c */
hipLaunchKernelGGL(( magma_zgemv_kernel1), dim3(k), dim3(BLOCK_SIZE), 0, magma_stream , m, V, ldv, c, dwork);
/* dwork = T**H dwork */
hipLaunchKernelGGL(( magma_ztrmv_tkernel), dim3(k), dim3(k), 0, magma_stream , dT, ldt, dwork, dwork+k);
/* c = c - V dwork */
dim3 blocks3( (m + BLOCK_SIZE-1) / BLOCK_SIZE );
dim3 threads3( BLOCK_SIZE );
hipLaunchKernelGGL(( magma_zgemv_kernel2), dim3(blocks3), dim3(threads3), 0, magma_stream , m, k, V, ldv, dwork+k, c);
}
//==============================================================================
| 1e338cac14921c615e3787f74cc019563cb9205e.cu | /*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@precisions normal z -> s d c
*/
#include "common_magma.h"
#include "commonblas_z.h"
#include "magma_templates.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
//==============================================================================
extern "C"
__global__ void
magma_zgemv_kernel1(int m, const magmaDoubleComplex * __restrict__ V, int ldv,
const magmaDoubleComplex * __restrict__ c,
magmaDoubleComplex *dwork)
{
const int i = threadIdx.x;
const magmaDoubleComplex *dV = V + (blockIdx.x) * ldv;
__shared__ magmaDoubleComplex sum[ BLOCK_SIZE ];
magmaDoubleComplex lsum;
/* lsum := v**H * C */
lsum = MAGMA_Z_ZERO;
for( int j = i; j < m; j += BLOCK_SIZE )
lsum += MAGMA_Z_MUL( MAGMA_Z_CNJG( dV[j] ), c[j] );
sum[i] = lsum;
magma_sum_reduce< BLOCK_SIZE >( i, sum );
__syncthreads();
if (i==0)
dwork [blockIdx.x] = sum[0];
}
//==============================================================================
/* -----------------------------------------------------------------------------
Call
magma_zgemv_kernel3<<< n, BLOCK_SIZE>>>(m, V, ldv, c, dwork, tau)
to compute
ZGEMV( "Conjugate transpose", m, n, -tau[0], V, ldv, c, 1, zero, dwork, 1)
and to set c[0] to 1.
i.e.,
work = -tau[0] V**H c
----------------------------------------------------------------------------- */
extern "C"
__global__ void
magma_zgemv_kernel3(int m, const magmaDoubleComplex * __restrict__ V, int ldv, magmaDoubleComplex *c,
magmaDoubleComplex *dwork, magmaDoubleComplex *tau)
{
const int i = threadIdx.x;
const magmaDoubleComplex *dV = V + (blockIdx.x) * ldv;
__shared__ magmaDoubleComplex sum[ BLOCK_SIZE ];
magmaDoubleComplex lsum;
if (i==0)
c[0] = MAGMA_Z_ONE;
/* lsum := v**H * C */
lsum = MAGMA_Z_ZERO;
for( int j = i; j < m; j += BLOCK_SIZE )
lsum += MAGMA_Z_MUL( MAGMA_Z_CNJG( dV[j] ), c[j] );
sum[i] = lsum;
magma_sum_reduce< BLOCK_SIZE >( i, sum );
__syncthreads();
if (i==0)
dwork [blockIdx.x] = -tau[0]*sum[0];
}
//==============================================================================
extern "C"
__global__ void
magma_zgemv_kernel2(int m, int n, const magmaDoubleComplex * __restrict__ V, int ldv,
const magmaDoubleComplex * __restrict__ x, magmaDoubleComplex *c)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE * blockIdx.x;
magmaDoubleComplex lsum;
V += j;
lsum = MAGMA_Z_ZERO;
if (j < m){
for(int k=0; k<n; k++)
lsum += MAGMA_Z_MUL( V[k*ldv], x[k]);
c[j] -= lsum;
}
}
//==============================================================================
/*
Apply a complex block reflector H to a complex vector C from the left
(i.e., C = H C). H is represented in the form
H = I - V T V**H
where T is the complex k-by-k upper triangular matrix in the
representation of the block reflector, and V is a complex block of
k elementary reflectors.
*/
extern "C" void
magma_zlarfbx_gpu(
magma_int_t m, magma_int_t k,
magmaDoubleComplex_ptr V, magma_int_t ldv,
magmaDoubleComplex_ptr dT, magma_int_t ldt,
magmaDoubleComplex_ptr c,
magmaDoubleComplex_ptr dwork)
{
/* dwork = V**H c */
magma_zgemv_kernel1<<< k, BLOCK_SIZE, 0, magma_stream >>>(m, V, ldv, c, dwork);
/* dwork = T**H dwork */
magma_ztrmv_tkernel<<< k, k, 0, magma_stream >>>( dT, ldt, dwork, dwork+k);
/* c = c - V dwork */
dim3 blocks3( (m + BLOCK_SIZE-1) / BLOCK_SIZE );
dim3 threads3( BLOCK_SIZE );
magma_zgemv_kernel2<<< blocks3, threads3, 0, magma_stream >>>( m, k, V, ldv, dwork+k, c);
}
//==============================================================================
|
4f79ae0ff90d66c1b515a11509d067738cdfd8f0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ATen/ATen.h"
#include "ATen/AccumulateType.h"
#include "ATen/TensorUtils.h"
#include "c10/util/Exception.h"
#include "ATen/hip/HIPContext.h"
#include <THH/THHDeviceUtils.cuh>
#include <THH/THHTensorMathReduce.cuh>
namespace at {
namespace native {
namespace {
// Block size for weight_norm_*_first_dim_kernel.
// Currently, kernels are non-persistent.
// Dialing up the block size to, say 1024, can improve performance by
// increase the amount of cache available per block, which can improve cache hit rate.
// However, this is less efficient for short rows. 256 is pretty versatile.
// May be worth implementing heuristics later.
#define BLOCK 256
// Block size for weight_norm_*_last_dim_kernel.
// This is tricker than the first_dim case because we must make blocks
// at least 16 fast elements wide to ensure fully-coalesced half-precision accesses.
// Since output-element parallelism is along the fast dimension, this reduces the number of
// blocks we can launch by 16X.
#define TILE_W 16
// Somewhat versatile strategy: max out intra-block parallelism by extending
// blocks across the slow dimension up to the hardware-max block size of 1024.
#define TILE_H 64
template<typename T, typename ReduceOp>
__device__ __forceinline__ void reduce_block_into_lanes
(T *x,
T val,
int lanes, // lanes is intended to be <= 32.
ReduceOp reduceOp)
{
int tid = threadIdx.x + threadIdx.y*blockDim.x;
int blockSize = blockDim.x*blockDim.y; // blockSize is intended to be a multiple of 32.
if(blockSize >= 64)
{
x[tid] = val;
__syncthreads();
}
#pragma unroll
for(int i = (blockSize >> 1); i >= 64; i >>= 1)
{
if(tid < i)
x[tid] = reduceOp(x[tid], x[tid+i]);
__syncthreads();
}
if(tid < 32)
{
T final;
if(blockSize >= 64)
final = reduceOp(x[tid], x[tid+32]);
else
final = val;
// __SYNCWARP();
#pragma unroll
for(int i = 16; i >= lanes; i >>= 1)
final = reduceOp(final, WARP_SHFL_DOWN(final, i));
if(tid < lanes)
x[tid] = final; // EpilogueOp
}
// Make sure the smem result is visible to all warps.
__syncthreads();
}
template
<typename scalar_t,
typename accscalar_t>
__global__ void weight_norm_fwd_first_dim_kernel
(scalar_t* __restrict__ w,
accscalar_t* __restrict__ norms,
const scalar_t* __restrict__ v,
const scalar_t* __restrict__ g,
const int rowSize)
{
// We are norming each slowest-dim row of the tensor separately.
// For now, assign one block to each row.
const int tid = threadIdx.x;
const int row = blockIdx.x;
const int stride = blockDim.x;
// Logical index offset for this flattened row
const int rowStart = row*rowSize;
// Hack to get around nvcc complaining when an smem array is declared with the same name
// but different types in different kernels (in this case different instantiations)
// extern __shared__ accscalar_t s[]; // error: declaration is incompatible with previous "s"
extern __shared__ char buf[];
accscalar_t* s = (accscalar_t*)buf;
accscalar_t thread_sum = 0.f;
for(int i = tid; i < rowSize; i += stride )
{
accscalar_t val_f = scalar_cast<accscalar_t>(v[i+rowStart]);
thread_sum += val_f*val_f; // AccumOp, could do Kahan here
}
reduce_block_into_lanes(s, thread_sum, 1, ReduceAdd<accscalar_t>());
accscalar_t result = s[0];
result = sqrtf(result);
if(tid == 0)
norms[row] = result;
// Broadcast load, could use shared memory instead.
accscalar_t g_this_row = scalar_cast<accscalar_t>(g[row]);
accscalar_t rnorm = 1.f/result; // for consistency with backward kernel
// Write data to output
for(int i = tid; i < rowSize; i += stride )
{
accscalar_t val_f = scalar_cast<accscalar_t>(v[i+rowStart]);
w[i+rowStart] = scalar_cast<scalar_t>(g_this_row*val_f*rnorm);
}
}
template
<typename scalar_t,
typename accscalar_t>
__global__ void weight_norm_fwd_last_dim_kernel
(
scalar_t* __restrict__ w,
accscalar_t* __restrict__ norms,
const scalar_t* __restrict__ v,
const scalar_t* __restrict__ g,
const int fast_dim_size,
const int slower_dims_size
)
{
const int fast_dim_location = threadIdx.x + blockIdx.x*blockDim.x;
extern __shared__ char buf[];
accscalar_t* alloc = (accscalar_t*)buf;
accscalar_t* s = &alloc[0];
accscalar_t* rnorms_this_block = &alloc[blockDim.x*blockDim.y];
accscalar_t thread_sum = 0.f;
int slower_dims_location = threadIdx.y;
int currentIdx = fast_dim_location + fast_dim_size*slower_dims_location;
if(fast_dim_location < fast_dim_size)
while(slower_dims_location < slower_dims_size)
{
accscalar_t val_f = scalar_cast<accscalar_t>(v[currentIdx]);
thread_sum += val_f*val_f; // AccumOp, could do Kahan here
currentIdx += blockDim.y*fast_dim_size;
slower_dims_location += blockDim.y;
}
reduce_block_into_lanes(s, thread_sum, blockDim.x, ReduceAdd<accscalar_t>());
// Better to pass an EpilogueOp to reduce_block_into_lanes?
if(threadIdx.y == 0)
{
accscalar_t result = s[threadIdx.x];
accscalar_t norm_this_col = sqrtf(result);
norms[fast_dim_location] = norm_this_col;
rnorms_this_block[threadIdx.x] = 1.f/norm_this_col;
}
__syncthreads();
accscalar_t g_this_col = scalar_cast<accscalar_t>(g[fast_dim_location]);
accscalar_t rnorm = rnorms_this_block[threadIdx.x];
slower_dims_location = threadIdx.y;
currentIdx = fast_dim_location + fast_dim_size*slower_dims_location;
if(fast_dim_location < fast_dim_size)
while(slower_dims_location < slower_dims_size)
{
accscalar_t val_f = scalar_cast<accscalar_t>(v[currentIdx]);
w[currentIdx] = scalar_cast<scalar_t>(g_this_col*val_f*rnorm);
currentIdx += blockDim.y*fast_dim_size;
slower_dims_location += blockDim.y;
}
}
template
<typename scalar_t,
typename accscalar_t>
__global__ void weight_norm_bwd_first_dim_kernel
(scalar_t* __restrict__ grad_v,
scalar_t* __restrict__ grad_g,
const scalar_t* __restrict__ grad_w,
const scalar_t* __restrict__ saved_v,
const scalar_t* __restrict__ saved_g,
const accscalar_t* __restrict__ saved_norms,
const int rowSize)
{
// For now, assign one block to each row.
const int tid = threadIdx.x;
const int row = blockIdx.x;
const int stride = blockDim.x;
// Logical index offset for this flattened row
const int rowStart = row*rowSize;
// Hack to get around nvcc complaining when an smem array is declared with the same name
// but different types in different kernels (in this case different instantiations)
// extern __shared__ accscalar_t s[]; // error: declaration is incompatible with previous "s"
extern __shared__ char buf[];
accscalar_t* s = (accscalar_t*)buf;
accscalar_t thread_sum = 0.f;
for(int i = tid; i < rowSize; i += stride )
{
accscalar_t grad_wi = scalar_cast<accscalar_t>(grad_w[i+rowStart]);
accscalar_t saved_vi = scalar_cast<accscalar_t>(saved_v[i+rowStart]);
thread_sum += grad_wi*saved_vi; // AccumOp, could do Kahan here
}
reduce_block_into_lanes(s, thread_sum, 1, ReduceAdd<accscalar_t>());
accscalar_t result = s[0];
// Could choose to save reciprocal of norm instead I suppose, but norms is probably
// more handy to keep around.
// Broadcast load; could use shared memory instead.
accscalar_t rnorm = 1.f/saved_norms[row];
accscalar_t rnorm3 = rnorm*rnorm*rnorm;
// Write g gradients.
if(tid == 0)
grad_g[row] = scalar_cast<scalar_t>(result*rnorm);
// Broadcast load, could use shared memory instead.
accscalar_t g_this_row = scalar_cast<accscalar_t>(saved_g[row]);
// Write v gradients. We are reusing values that were loaded earlier, so there
// is an optimization opportunity here (store values persistently).
for(int j = tid; j < rowSize; j += stride )
{
accscalar_t grad_wj = scalar_cast<accscalar_t>(grad_w[j+rowStart]);
accscalar_t saved_vj = scalar_cast<accscalar_t>(saved_v[j+rowStart]);
accscalar_t grad_vj = g_this_row*(rnorm*grad_wj - rnorm3*saved_vj*result);
grad_v[j+rowStart] = scalar_cast<scalar_t>(grad_vj);
}
}
template
<typename scalar_t,
typename accscalar_t>
__global__ void weight_norm_bwd_last_dim_kernel
(scalar_t* __restrict__ grad_v,
scalar_t* __restrict__ grad_g,
const scalar_t* __restrict__ grad_w,
const scalar_t* __restrict__ saved_v,
const scalar_t* __restrict__ saved_g,
const accscalar_t* __restrict__ saved_norms,
const int fast_dim_size,
const int slower_dims_size)
{
const int fast_dim_location = threadIdx.x + blockIdx.x*blockDim.x;
extern __shared__ char buf[];
accscalar_t* s = (accscalar_t*)buf;
accscalar_t thread_sum = 0.f;
int slower_dims_location = threadIdx.y;
int currentIdx = fast_dim_location + fast_dim_size*slower_dims_location;
if(fast_dim_location < fast_dim_size)
while(slower_dims_location < slower_dims_size)
{
accscalar_t grad_wi = scalar_cast<accscalar_t>(grad_w[currentIdx]);
accscalar_t saved_vi = scalar_cast<accscalar_t>(saved_v[currentIdx]);
thread_sum += grad_wi*saved_vi; // AccumOp, could do Kahan here
currentIdx += blockDim.y*fast_dim_size;
slower_dims_location += blockDim.y;
}
reduce_block_into_lanes(s, thread_sum, blockDim.x, ReduceAdd<accscalar_t>());
accscalar_t result = s[threadIdx.x];
// Broadcast load; could use shared memory instead.
accscalar_t rnorm = 1.f/saved_norms[fast_dim_location];
accscalar_t rnorm3 = rnorm*rnorm*rnorm;
// Write g gradients.
if(threadIdx.y == 0)
grad_g[fast_dim_location] = scalar_cast<scalar_t>(result*rnorm);
// Entire block pulls these values, could use shared memory instead.
accscalar_t g_this_col = scalar_cast<accscalar_t>(saved_g[fast_dim_location]);
// Write v gradients.
slower_dims_location = threadIdx.y;
currentIdx = fast_dim_location + fast_dim_size*slower_dims_location;
if(fast_dim_location < fast_dim_size)
while(slower_dims_location < slower_dims_size)
{
accscalar_t grad_wj = scalar_cast<accscalar_t>(grad_w[currentIdx]);
accscalar_t saved_vj = scalar_cast<accscalar_t>(saved_v[currentIdx]);
accscalar_t grad_vj = g_this_col*(rnorm*grad_wj - rnorm3*saved_vj*result);
grad_v[currentIdx] = scalar_cast<scalar_t>(grad_vj);
currentIdx += blockDim.y*fast_dim_size;
slower_dims_location += blockDim.y;
}
}
} // anonymous namespace
std::tuple<Tensor,Tensor> weight_norm_cuda
(const Tensor & v,
const Tensor & g,
int64_t dim)
{
auto w = at::empty_like(v);
// weight_norm_fused does have a derivative defined in derivatives.yaml, therefore, VariableType.cpp
// sends the unpacked g.data() as the argument. In other words, we expect "g" is a bare Tensor here.
// norms is only needed to stash for backward.
// g.type().scalarType() may be at::ScalarType::Double, Float, or Half.
// If Half, stash norms as float.
at::ScalarType AccType = g.type().scalarType() == at::ScalarType::Half ?
at::ScalarType::Float : g.type().scalarType();
// Will this create norms on the same device as g, regardless of what the thread's default
// current device is? I believe so, because Type::* functions are DeviceGuard()ed.
auto norms = at::empty_strided(g.sizes(), g.strides(), g.options().dtype(AccType));
const int ndims = v.dim();
if(dim == 0)
{
// Find logical size of each flattened slowest-dim row
int rowSize = 1;
for(int i = ndims - 1; i > 0; i--)
rowSize *= v.size(i);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF
(v.type(),
"weight_norm_fwd_first_dim_kernel",
[&]
{
using accscalar_t = acc_type<scalar_t, true>;
hipLaunchKernelGGL(( weight_norm_fwd_first_dim_kernel<scalar_t, accscalar_t>)
, dim3(v.size(0)),
dim3(BLOCK),
BLOCK*sizeof(accscalar_t),
stream,
w.data<scalar_t>(),
norms.data<accscalar_t>(),
v.data<scalar_t>(),
g.data<scalar_t>(),
rowSize);
});
}
else if(dim == ndims - 1)
{
// Precompute slower_dims_size and fast_dim_size
int slower_dims_size = 1;
for(int i = 0; i < ndims - 1; i++)
slower_dims_size *= v.size(i);
int fast_dim_size = v.size(ndims-1);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF
(v.type(),
"weight_norm_fwd_last_dim_kernel",
[&]
{
using accscalar_t = acc_type<scalar_t, true>;
hipLaunchKernelGGL(( weight_norm_fwd_last_dim_kernel<scalar_t, accscalar_t>)
, dim3((fast_dim_size+TILE_W-1)/TILE_W),
dim3(dim3(TILE_W,TILE_H)),
(TILE_W*TILE_H + TILE_W)*sizeof(accscalar_t),
stream,
w.data<scalar_t>(),
norms.data<accscalar_t>(),
v.data<scalar_t>(),
g.data<scalar_t>(),
fast_dim_size,
slower_dims_size);
});
}
// The kernel execution is asynchronous, so this will only catch errors on the kernel launch,
// not the kernel's execution. Errors in kernel execution aren't guaranteed to be caught
// until a later error check on a synchronizing CUDA call. Unfortunately, without manually
// synchronizing here, this is the best we can do.
THCudaCheck(hipGetLastError());
return std::tuple<Tensor, Tensor>{w, norms};
}
std::tuple<Tensor, Tensor> weight_norm_cuda_backward
(const Tensor & grad_w,
const Tensor & saved_v,
const Tensor & saved_g,
const Tensor & saved_norms,
int64_t dim)
{
// These checks should always succeed, because weight_norm_fused_backward should only
// ever be recorded in the autograd graph via weight_norm, which passes contiguous v and g.
AT_CHECK(saved_v.is_contiguous(), "saved_v must be contiguous");
AT_CHECK(saved_g.is_contiguous(), "saved_g must be contiguous");
AT_CHECK(saved_norms.is_contiguous(), "saved_norms must be contiguous");
AT_CHECK(dim == 0 || dim == saved_v.dim() - 1, "fused kernels can only be applied for first or last dim")
auto grad_v = at::empty_like(saved_v);
auto grad_g = at::empty_like(saved_g);
const int ndims = saved_v.dim();
if(dim == 0)
{
// Find logical size of each flattened slowest-dim row
int rowSize = 1;
for(int i = ndims - 1; i > 0; i--)
rowSize *= saved_v.size(i);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF
(saved_v.type(),
"weight_norm_bwd_first_dim_kernel",
[&]
{
using accscalar_t = acc_type<scalar_t, true>;
hipLaunchKernelGGL(( weight_norm_bwd_first_dim_kernel<scalar_t, accscalar_t>)
, dim3(grad_w.size(0)),
dim3( BLOCK),
BLOCK*sizeof(accscalar_t),
stream,
grad_v.data<scalar_t>(),
grad_g.data<scalar_t>(),
grad_w.data<scalar_t>(),
saved_v.data<scalar_t>(),
saved_g.data<scalar_t>(),
saved_norms.data<accscalar_t>(),
rowSize);
});
}
else if(dim == ndims - 1)
{
// Precompute slower_dims_size and fast_dim_size because they involve dynamically indexing an array.
int slower_dims_size = 1;
for(int i = 0; i < ndims - 1; i++)
slower_dims_size *= saved_v.size(i);
int fast_dim_size = saved_v.size(ndims-1);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF
(saved_v.type(),
"weight_norm_bwd_last_dim_kernel",
[&]
{
using accscalar_t = acc_type<scalar_t, true>;
hipLaunchKernelGGL(( weight_norm_bwd_last_dim_kernel<scalar_t, accscalar_t>)
, dim3((fast_dim_size+TILE_W-1)/TILE_W),
dim3(dim3(TILE_W,TILE_H)),
(TILE_W*TILE_H + TILE_W)*sizeof(accscalar_t),
stream,
grad_v.data<scalar_t>(),
grad_g.data<scalar_t>(),
grad_w.data<scalar_t>(),
saved_v.data<scalar_t>(),
saved_g.data<scalar_t>(),
saved_norms.data<accscalar_t>(),
fast_dim_size,
slower_dims_size);
});
}
// The kernel execution is asynchronous, so this will only catch errors on the kernel launch,
// not the kernel's execution. Errors in kernel execution aren't guaranteed to be caught
// until a later error check on a synchronizing CUDA call. Unfortunately, without manually
// synchronizing here, this is the best we can do.
THCudaCheck(hipGetLastError());
return std::tuple<Tensor, Tensor>{grad_v, grad_g};
}
#undef BLOCK
#undef TILE_W
#undef TILE_H
} // namespace native
} // namespace at
| 4f79ae0ff90d66c1b515a11509d067738cdfd8f0.cu | #include "ATen/ATen.h"
#include "ATen/AccumulateType.h"
#include "ATen/TensorUtils.h"
#include "c10/util/Exception.h"
#include "ATen/cuda/CUDAContext.h"
#include <THC/THCDeviceUtils.cuh>
#include <THC/THCTensorMathReduce.cuh>
namespace at {
namespace native {
namespace {
// Block size for weight_norm_*_first_dim_kernel.
// Currently, kernels are non-persistent.
// Dialing up the block size to, say 1024, can improve performance by
// increase the amount of cache available per block, which can improve cache hit rate.
// However, this is less efficient for short rows. 256 is pretty versatile.
// May be worth implementing heuristics later.
#define BLOCK 256
// Block size for weight_norm_*_last_dim_kernel.
// This is tricker than the first_dim case because we must make blocks
// at least 16 fast elements wide to ensure fully-coalesced half-precision accesses.
// Since output-element parallelism is along the fast dimension, this reduces the number of
// blocks we can launch by 16X.
#define TILE_W 16
// Somewhat versatile strategy: max out intra-block parallelism by extending
// blocks across the slow dimension up to the hardware-max block size of 1024.
#define TILE_H 64
template<typename T, typename ReduceOp>
__device__ __forceinline__ void reduce_block_into_lanes
(T *x,
T val,
int lanes, // lanes is intended to be <= 32.
ReduceOp reduceOp)
{
int tid = threadIdx.x + threadIdx.y*blockDim.x;
int blockSize = blockDim.x*blockDim.y; // blockSize is intended to be a multiple of 32.
if(blockSize >= 64)
{
x[tid] = val;
__syncthreads();
}
#pragma unroll
for(int i = (blockSize >> 1); i >= 64; i >>= 1)
{
if(tid < i)
x[tid] = reduceOp(x[tid], x[tid+i]);
__syncthreads();
}
if(tid < 32)
{
T final;
if(blockSize >= 64)
final = reduceOp(x[tid], x[tid+32]);
else
final = val;
// __SYNCWARP();
#pragma unroll
for(int i = 16; i >= lanes; i >>= 1)
final = reduceOp(final, WARP_SHFL_DOWN(final, i));
if(tid < lanes)
x[tid] = final; // EpilogueOp
}
// Make sure the smem result is visible to all warps.
__syncthreads();
}
template
<typename scalar_t,
typename accscalar_t>
__global__ void weight_norm_fwd_first_dim_kernel
(scalar_t* __restrict__ w,
accscalar_t* __restrict__ norms,
const scalar_t* __restrict__ v,
const scalar_t* __restrict__ g,
const int rowSize)
{
// We are norming each slowest-dim row of the tensor separately.
// For now, assign one block to each row.
const int tid = threadIdx.x;
const int row = blockIdx.x;
const int stride = blockDim.x;
// Logical index offset for this flattened row
const int rowStart = row*rowSize;
// Hack to get around nvcc complaining when an smem array is declared with the same name
// but different types in different kernels (in this case different instantiations)
// extern __shared__ accscalar_t s[]; // error: declaration is incompatible with previous "s"
extern __shared__ char buf[];
accscalar_t* s = (accscalar_t*)buf;
accscalar_t thread_sum = 0.f;
for(int i = tid; i < rowSize; i += stride )
{
accscalar_t val_f = scalar_cast<accscalar_t>(v[i+rowStart]);
thread_sum += val_f*val_f; // AccumOp, could do Kahan here
}
reduce_block_into_lanes(s, thread_sum, 1, ReduceAdd<accscalar_t>());
accscalar_t result = s[0];
result = sqrtf(result);
if(tid == 0)
norms[row] = result;
// Broadcast load, could use shared memory instead.
accscalar_t g_this_row = scalar_cast<accscalar_t>(g[row]);
accscalar_t rnorm = 1.f/result; // for consistency with backward kernel
// Write data to output
for(int i = tid; i < rowSize; i += stride )
{
accscalar_t val_f = scalar_cast<accscalar_t>(v[i+rowStart]);
w[i+rowStart] = scalar_cast<scalar_t>(g_this_row*val_f*rnorm);
}
}
template
<typename scalar_t,
typename accscalar_t>
__global__ void weight_norm_fwd_last_dim_kernel
(
scalar_t* __restrict__ w,
accscalar_t* __restrict__ norms,
const scalar_t* __restrict__ v,
const scalar_t* __restrict__ g,
const int fast_dim_size,
const int slower_dims_size
)
{
const int fast_dim_location = threadIdx.x + blockIdx.x*blockDim.x;
extern __shared__ char buf[];
accscalar_t* alloc = (accscalar_t*)buf;
accscalar_t* s = &alloc[0];
accscalar_t* rnorms_this_block = &alloc[blockDim.x*blockDim.y];
accscalar_t thread_sum = 0.f;
int slower_dims_location = threadIdx.y;
int currentIdx = fast_dim_location + fast_dim_size*slower_dims_location;
if(fast_dim_location < fast_dim_size)
while(slower_dims_location < slower_dims_size)
{
accscalar_t val_f = scalar_cast<accscalar_t>(v[currentIdx]);
thread_sum += val_f*val_f; // AccumOp, could do Kahan here
currentIdx += blockDim.y*fast_dim_size;
slower_dims_location += blockDim.y;
}
reduce_block_into_lanes(s, thread_sum, blockDim.x, ReduceAdd<accscalar_t>());
// Better to pass an EpilogueOp to reduce_block_into_lanes?
if(threadIdx.y == 0)
{
accscalar_t result = s[threadIdx.x];
accscalar_t norm_this_col = sqrtf(result);
norms[fast_dim_location] = norm_this_col;
rnorms_this_block[threadIdx.x] = 1.f/norm_this_col;
}
__syncthreads();
accscalar_t g_this_col = scalar_cast<accscalar_t>(g[fast_dim_location]);
accscalar_t rnorm = rnorms_this_block[threadIdx.x];
slower_dims_location = threadIdx.y;
currentIdx = fast_dim_location + fast_dim_size*slower_dims_location;
if(fast_dim_location < fast_dim_size)
while(slower_dims_location < slower_dims_size)
{
accscalar_t val_f = scalar_cast<accscalar_t>(v[currentIdx]);
w[currentIdx] = scalar_cast<scalar_t>(g_this_col*val_f*rnorm);
currentIdx += blockDim.y*fast_dim_size;
slower_dims_location += blockDim.y;
}
}
template
<typename scalar_t,
typename accscalar_t>
__global__ void weight_norm_bwd_first_dim_kernel
(scalar_t* __restrict__ grad_v,
scalar_t* __restrict__ grad_g,
const scalar_t* __restrict__ grad_w,
const scalar_t* __restrict__ saved_v,
const scalar_t* __restrict__ saved_g,
const accscalar_t* __restrict__ saved_norms,
const int rowSize)
{
// For now, assign one block to each row.
const int tid = threadIdx.x;
const int row = blockIdx.x;
const int stride = blockDim.x;
// Logical index offset for this flattened row
const int rowStart = row*rowSize;
// Hack to get around nvcc complaining when an smem array is declared with the same name
// but different types in different kernels (in this case different instantiations)
// extern __shared__ accscalar_t s[]; // error: declaration is incompatible with previous "s"
extern __shared__ char buf[];
accscalar_t* s = (accscalar_t*)buf;
accscalar_t thread_sum = 0.f;
for(int i = tid; i < rowSize; i += stride )
{
accscalar_t grad_wi = scalar_cast<accscalar_t>(grad_w[i+rowStart]);
accscalar_t saved_vi = scalar_cast<accscalar_t>(saved_v[i+rowStart]);
thread_sum += grad_wi*saved_vi; // AccumOp, could do Kahan here
}
reduce_block_into_lanes(s, thread_sum, 1, ReduceAdd<accscalar_t>());
accscalar_t result = s[0];
// Could choose to save reciprocal of norm instead I suppose, but norms is probably
// more handy to keep around.
// Broadcast load; could use shared memory instead.
accscalar_t rnorm = 1.f/saved_norms[row];
accscalar_t rnorm3 = rnorm*rnorm*rnorm;
// Write g gradients.
if(tid == 0)
grad_g[row] = scalar_cast<scalar_t>(result*rnorm);
// Broadcast load, could use shared memory instead.
accscalar_t g_this_row = scalar_cast<accscalar_t>(saved_g[row]);
// Write v gradients. We are reusing values that were loaded earlier, so there
// is an optimization opportunity here (store values persistently).
for(int j = tid; j < rowSize; j += stride )
{
accscalar_t grad_wj = scalar_cast<accscalar_t>(grad_w[j+rowStart]);
accscalar_t saved_vj = scalar_cast<accscalar_t>(saved_v[j+rowStart]);
accscalar_t grad_vj = g_this_row*(rnorm*grad_wj - rnorm3*saved_vj*result);
grad_v[j+rowStart] = scalar_cast<scalar_t>(grad_vj);
}
}
template
<typename scalar_t,
typename accscalar_t>
__global__ void weight_norm_bwd_last_dim_kernel
(scalar_t* __restrict__ grad_v,
scalar_t* __restrict__ grad_g,
const scalar_t* __restrict__ grad_w,
const scalar_t* __restrict__ saved_v,
const scalar_t* __restrict__ saved_g,
const accscalar_t* __restrict__ saved_norms,
const int fast_dim_size,
const int slower_dims_size)
{
const int fast_dim_location = threadIdx.x + blockIdx.x*blockDim.x;
extern __shared__ char buf[];
accscalar_t* s = (accscalar_t*)buf;
accscalar_t thread_sum = 0.f;
int slower_dims_location = threadIdx.y;
int currentIdx = fast_dim_location + fast_dim_size*slower_dims_location;
if(fast_dim_location < fast_dim_size)
while(slower_dims_location < slower_dims_size)
{
accscalar_t grad_wi = scalar_cast<accscalar_t>(grad_w[currentIdx]);
accscalar_t saved_vi = scalar_cast<accscalar_t>(saved_v[currentIdx]);
thread_sum += grad_wi*saved_vi; // AccumOp, could do Kahan here
currentIdx += blockDim.y*fast_dim_size;
slower_dims_location += blockDim.y;
}
reduce_block_into_lanes(s, thread_sum, blockDim.x, ReduceAdd<accscalar_t>());
accscalar_t result = s[threadIdx.x];
// Broadcast load; could use shared memory instead.
accscalar_t rnorm = 1.f/saved_norms[fast_dim_location];
accscalar_t rnorm3 = rnorm*rnorm*rnorm;
// Write g gradients.
if(threadIdx.y == 0)
grad_g[fast_dim_location] = scalar_cast<scalar_t>(result*rnorm);
// Entire block pulls these values, could use shared memory instead.
accscalar_t g_this_col = scalar_cast<accscalar_t>(saved_g[fast_dim_location]);
// Write v gradients.
slower_dims_location = threadIdx.y;
currentIdx = fast_dim_location + fast_dim_size*slower_dims_location;
if(fast_dim_location < fast_dim_size)
while(slower_dims_location < slower_dims_size)
{
accscalar_t grad_wj = scalar_cast<accscalar_t>(grad_w[currentIdx]);
accscalar_t saved_vj = scalar_cast<accscalar_t>(saved_v[currentIdx]);
accscalar_t grad_vj = g_this_col*(rnorm*grad_wj - rnorm3*saved_vj*result);
grad_v[currentIdx] = scalar_cast<scalar_t>(grad_vj);
currentIdx += blockDim.y*fast_dim_size;
slower_dims_location += blockDim.y;
}
}
} // anonymous namespace
std::tuple<Tensor,Tensor> weight_norm_cuda
(const Tensor & v,
const Tensor & g,
int64_t dim)
{
auto w = at::empty_like(v);
// weight_norm_fused does have a derivative defined in derivatives.yaml, therefore, VariableType.cpp
// sends the unpacked g.data() as the argument. In other words, we expect "g" is a bare Tensor here.
// norms is only needed to stash for backward.
// g.type().scalarType() may be at::ScalarType::Double, Float, or Half.
// If Half, stash norms as float.
at::ScalarType AccType = g.type().scalarType() == at::ScalarType::Half ?
at::ScalarType::Float : g.type().scalarType();
// Will this create norms on the same device as g, regardless of what the thread's default
// current device is? I believe so, because Type::* functions are DeviceGuard()ed.
auto norms = at::empty_strided(g.sizes(), g.strides(), g.options().dtype(AccType));
const int ndims = v.dim();
if(dim == 0)
{
// Find logical size of each flattened slowest-dim row
int rowSize = 1;
for(int i = ndims - 1; i > 0; i--)
rowSize *= v.size(i);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF
(v.type(),
"weight_norm_fwd_first_dim_kernel",
[&]
{
using accscalar_t = acc_type<scalar_t, true>;
weight_norm_fwd_first_dim_kernel<scalar_t, accscalar_t>
<<<v.size(0),
BLOCK,
BLOCK*sizeof(accscalar_t),
stream>>>
(w.data<scalar_t>(),
norms.data<accscalar_t>(),
v.data<scalar_t>(),
g.data<scalar_t>(),
rowSize);
});
}
else if(dim == ndims - 1)
{
// Precompute slower_dims_size and fast_dim_size
int slower_dims_size = 1;
for(int i = 0; i < ndims - 1; i++)
slower_dims_size *= v.size(i);
int fast_dim_size = v.size(ndims-1);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF
(v.type(),
"weight_norm_fwd_last_dim_kernel",
[&]
{
using accscalar_t = acc_type<scalar_t, true>;
weight_norm_fwd_last_dim_kernel<scalar_t, accscalar_t>
<<<(fast_dim_size+TILE_W-1)/TILE_W,
dim3(TILE_W,TILE_H),
(TILE_W*TILE_H + TILE_W)*sizeof(accscalar_t),
stream>>>
(w.data<scalar_t>(),
norms.data<accscalar_t>(),
v.data<scalar_t>(),
g.data<scalar_t>(),
fast_dim_size,
slower_dims_size);
});
}
// The kernel execution is asynchronous, so this will only catch errors on the kernel launch,
// not the kernel's execution. Errors in kernel execution aren't guaranteed to be caught
// until a later error check on a synchronizing CUDA call. Unfortunately, without manually
// synchronizing here, this is the best we can do.
THCudaCheck(cudaGetLastError());
return std::tuple<Tensor, Tensor>{w, norms};
}
std::tuple<Tensor, Tensor> weight_norm_cuda_backward
(const Tensor & grad_w,
const Tensor & saved_v,
const Tensor & saved_g,
const Tensor & saved_norms,
int64_t dim)
{
// These checks should always succeed, because weight_norm_fused_backward should only
// ever be recorded in the autograd graph via weight_norm, which passes contiguous v and g.
AT_CHECK(saved_v.is_contiguous(), "saved_v must be contiguous");
AT_CHECK(saved_g.is_contiguous(), "saved_g must be contiguous");
AT_CHECK(saved_norms.is_contiguous(), "saved_norms must be contiguous");
AT_CHECK(dim == 0 || dim == saved_v.dim() - 1, "fused kernels can only be applied for first or last dim")
auto grad_v = at::empty_like(saved_v);
auto grad_g = at::empty_like(saved_g);
const int ndims = saved_v.dim();
if(dim == 0)
{
// Find logical size of each flattened slowest-dim row
int rowSize = 1;
for(int i = ndims - 1; i > 0; i--)
rowSize *= saved_v.size(i);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF
(saved_v.type(),
"weight_norm_bwd_first_dim_kernel",
[&]
{
using accscalar_t = acc_type<scalar_t, true>;
weight_norm_bwd_first_dim_kernel<scalar_t, accscalar_t>
<<<grad_w.size(0),
BLOCK,
BLOCK*sizeof(accscalar_t),
stream>>>
(grad_v.data<scalar_t>(),
grad_g.data<scalar_t>(),
grad_w.data<scalar_t>(),
saved_v.data<scalar_t>(),
saved_g.data<scalar_t>(),
saved_norms.data<accscalar_t>(),
rowSize);
});
}
else if(dim == ndims - 1)
{
// Precompute slower_dims_size and fast_dim_size because they involve dynamically indexing an array.
int slower_dims_size = 1;
for(int i = 0; i < ndims - 1; i++)
slower_dims_size *= saved_v.size(i);
int fast_dim_size = saved_v.size(ndims-1);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF
(saved_v.type(),
"weight_norm_bwd_last_dim_kernel",
[&]
{
using accscalar_t = acc_type<scalar_t, true>;
weight_norm_bwd_last_dim_kernel<scalar_t, accscalar_t>
<<<(fast_dim_size+TILE_W-1)/TILE_W,
dim3(TILE_W,TILE_H),
(TILE_W*TILE_H + TILE_W)*sizeof(accscalar_t),
stream>>>
(grad_v.data<scalar_t>(),
grad_g.data<scalar_t>(),
grad_w.data<scalar_t>(),
saved_v.data<scalar_t>(),
saved_g.data<scalar_t>(),
saved_norms.data<accscalar_t>(),
fast_dim_size,
slower_dims_size);
});
}
// The kernel execution is asynchronous, so this will only catch errors on the kernel launch,
// not the kernel's execution. Errors in kernel execution aren't guaranteed to be caught
// until a later error check on a synchronizing CUDA call. Unfortunately, without manually
// synchronizing here, this is the best we can do.
THCudaCheck(cudaGetLastError());
return std::tuple<Tensor, Tensor>{grad_v, grad_g};
}
#undef BLOCK
#undef TILE_W
#undef TILE_H
} // namespace native
} // namespace at
|
bf5dd13aa20dcb06b778fe12b69604ba6749f977.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void add(float *A, float *C)
{
int columna = threadIdx.x;
//indice de las filas
int fila = threadIdx.y;
//indice lineal
int Id = columna + fila * blockDim.x;
int id1 = (columna - 1) + fila * blockDim.x;
int id2 = (columna + 1) + fila * blockDim.x;
int id3 = columna + (fila - 1) * blockDim.x;
int id4 = columna + (fila + 1) * blockDim.x;
if ((fila > 0 && fila < N - 1) && (columna > 0 && columna < N - 1)) {
C[Id] = A[id1] + A[id2] + A[id3] + A[id4];
}
else
{
C[Id] = A[Id];
}
} | bf5dd13aa20dcb06b778fe12b69604ba6749f977.cu | #include "includes.h"
__global__ void add(float *A, float *C)
{
int columna = threadIdx.x;
//indice de las filas
int fila = threadIdx.y;
//indice lineal
int Id = columna + fila * blockDim.x;
int id1 = (columna - 1) + fila * blockDim.x;
int id2 = (columna + 1) + fila * blockDim.x;
int id3 = columna + (fila - 1) * blockDim.x;
int id4 = columna + (fila + 1) * blockDim.x;
if ((fila > 0 && fila < N - 1) && (columna > 0 && columna < N - 1)) {
C[Id] = A[id1] + A[id2] + A[id3] + A[id4];
}
else
{
C[Id] = A[Id];
}
} |
a7cc68d24b26e53854b445a323f7cf6711d5f89f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef __GPUELEMFACEGEOM
#define __GPUELEMFACEGEOM
template <typename T>
__global__ void gpuTemplateElemGeom1D(T *jac, T *Xx, T *Jg, int nga)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < nga) {
jac[tid] = Jg[tid];
Xx[tid] = 1.0;
tid += blockDim.x * gridDim.x;
}
}
template <typename T>
void gpuElemGeom1D(T *jac, T *Xx, T *Jg, int nga)
{
int blockDim = 256;
int gridDim = (nga + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateElemGeom1D), dim3(gridDim), dim3(blockDim), 0, 0, jac, Xx, Jg, nga);
}
template void gpuElemGeom1D(double*, double*, double*, int);
template void gpuElemGeom1D(float*, float*, float*, int);
template <typename T>
__global__ void gpuTemplateElemGeom2D(T *jac, T *Xx11, T *Xx12, T *Xx21, T *Xx22,
T *Jg11, T *Jg12, T *Jg21, T *Jg22, int nga)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < nga) {
jac[tid] = Jg11[tid]*Jg22[tid] - Jg12[tid]*Jg21[tid];
Xx11[tid] = Jg22[tid];
Xx21[tid] = -Jg21[tid];
Xx12[tid] = -Jg12[tid];
Xx22[tid] = Jg11[tid];
tid += blockDim.x * gridDim.x;
}
}
template <typename T>
void gpuElemGeom2D(T *jac, T *Xx11, T *Xx12, T *Xx21, T *Xx22,
T *Jg11, T *Jg12, T *Jg21, T *Jg22, int nga)
{
int blockDim = 256;
int gridDim = (nga + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateElemGeom2D), dim3(gridDim), dim3(blockDim), 0, 0, jac, Xx11, Xx12, Xx21, Xx22, Jg11, Jg12, Jg21, Jg22, nga);
}
template void gpuElemGeom2D(double*, double*, double*, double*, double*, double*, double*, double*, double*, int);
template void gpuElemGeom2D(float*, float*, float*, float*, float*, float*, float*, float*, float*, int);
template <typename T>
__global__ void gpuTemplateElemGeom3D(T *jac, T *Xx11, T *Xx12, T *Xx13, T *Xx21,
T *Xx22, T *Xx23, T *Xx31, T *Xx32, T *Xx33,
T *Jg11, T *Jg12, T *Jg13, T *Jg21, T *Jg22,
T *Jg23, T *Jg31, T *Jg32, T *Jg33, int nga)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < nga) {
jac[tid] = Jg11[tid]*Jg22[tid]*Jg33[tid] - Jg11[tid]*Jg32[tid]*Jg23[tid] +
Jg21[tid]*Jg32[tid]*Jg13[tid] - Jg21[tid]*Jg12[tid]*Jg33[tid] +
Jg31[tid]*Jg12[tid]*Jg23[tid] - Jg31[tid]*Jg22[tid]*Jg13[tid];
Xx11[tid] = Jg22[tid]*Jg33[tid] - Jg23[tid]*Jg32[tid];
Xx21[tid] = Jg23[tid]*Jg31[tid] - Jg21[tid]*Jg33[tid];
Xx31[tid] = Jg21[tid]*Jg32[tid] - Jg22[tid]*Jg31[tid];
Xx12[tid] = Jg13[tid]*Jg32[tid] - Jg12[tid]*Jg33[tid];
Xx22[tid] = Jg11[tid]*Jg33[tid] - Jg13[tid]*Jg31[tid];
Xx32[tid] = Jg12[tid]*Jg31[tid] - Jg11[tid]*Jg32[tid];
Xx13[tid] = Jg12[tid]*Jg23[tid] - Jg13[tid]*Jg22[tid];
Xx23[tid] = Jg13[tid]*Jg21[tid] - Jg11[tid]*Jg23[tid];
Xx33[tid] = Jg11[tid]*Jg22[tid] - Jg12[tid]*Jg21[tid];
tid += blockDim.x * gridDim.x;
}
}
template <typename T>
void gpuElemGeom3D(T *jac, T *Xx11, T *Xx12, T *Xx13, T *Xx21,
T *Xx22, T *Xx23, T *Xx31, T *Xx32, T *Xx33,
T *Jg11, T *Jg12, T *Jg13, T *Jg21, T *Jg22,
T *Jg23, T *Jg31, T *Jg32, T *Jg33, int nga)
{
int blockDim = 256;
int gridDim = (nga + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateElemGeom3D), dim3(gridDim), dim3(blockDim), 0, 0, jac, Xx11, Xx12, Xx13, Xx21, Xx22, Xx23, Xx31, Xx32, Xx33,
Jg11, Jg12, Jg13, Jg21, Jg22, Jg23, Jg31, Jg32, Jg33, nga);
}
template void gpuElemGeom3D(double*, double*, double*, double*, double*, double*, double*, double*, double*, double*,
double*, double*, double*, double*, double*, double*, double*, double*, double*, int);
template void gpuElemGeom3D(float*, float*, float*, float*, float*, float*, float*, float*, float*, float*,
float*, float*, float*, float*, float*, float*, float*, float*, float*, int);
template <typename T>
__global__ void gpuTemplateFaceGeom1D(T *jacg, T *nlg, T *Jg, int nga)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < nga) {
jacg[i] = 1.0;
nlg[i] = -1.0;
i += blockDim.x * gridDim.x;
}
}
template <typename T>
void gpuFaceGeom1D(T *jacg, T *nlg, T *Jg, int nga)
{
int blockDim = 256;
int gridDim = (nga + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateFaceGeom1D), dim3(gridDim), dim3(blockDim), 0, 0, jacg, nlg, Jg, nga);
}
template void gpuFaceGeom1D(double*, double*, double*, int);
template void gpuFaceGeom1D(float*, float*, float*, int);
template <typename T>
__global__ void gpuTemplateFaceGeom2D(T *jacg, T *nlg, T *Jg, int nga)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < nga) {
int j = i+nga;
jacg[i] = sqrt(Jg[i]*Jg[i] + Jg[j]*Jg[j]);
nlg[i] = Jg[j]/jacg[i];
nlg[j] = -Jg[i]/jacg[i];
i += blockDim.x * gridDim.x;
}
}
template <typename T>
void gpuFaceGeom2D(T *jacg, T *nlg, T *Jg, int nga)
{
int blockDim = 256;
int gridDim = (nga + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateFaceGeom2D), dim3(gridDim), dim3(blockDim), 0, 0, jacg, nlg, Jg, nga);
}
template void gpuFaceGeom2D(double*, double*, double*, int);
template void gpuFaceGeom2D(float*, float*, float*, int);
template <typename T>
__global__ void gpuTemplateFaceGeom3D(T *jacg, T *nlg, T *Jg, int nga)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < nga) {
int j = i+nga;
int k = i+2*nga;
int m = i+3*nga;
int n = i+4*nga;
int p = i+5*nga;
nlg[i] = Jg[j]*Jg[p] - Jg[k]*Jg[n];
nlg[j] = Jg[k]*Jg[m] - Jg[i]*Jg[p];
nlg[k] = Jg[i]*Jg[n] - Jg[j]*Jg[m];
jacg[i] = sqrt(nlg[i]*nlg[i] + nlg[j]*nlg[j] + nlg[k]*nlg[k]);
nlg[i] = nlg[i]/jacg[i];
nlg[j] = nlg[j]/jacg[i];
nlg[k] = nlg[k]/jacg[i];
i += blockDim.x * gridDim.x;
}
}
template <typename T>
void gpuFaceGeom3D(T *jacg, T *nlg, T *Jg, int nga)
{
int blockDim = 256;
int gridDim = (nga + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateFaceGeom3D), dim3(gridDim), dim3(blockDim), 0, 0, jacg, nlg, Jg, nga);
}
template void gpuFaceGeom3D(double*, double*, double*, int);
template void gpuFaceGeom3D(float*, float*, float*, int);
#endif
| a7cc68d24b26e53854b445a323f7cf6711d5f89f.cu | #ifndef __GPUELEMFACEGEOM
#define __GPUELEMFACEGEOM
template <typename T>
__global__ void gpuTemplateElemGeom1D(T *jac, T *Xx, T *Jg, int nga)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < nga) {
jac[tid] = Jg[tid];
Xx[tid] = 1.0;
tid += blockDim.x * gridDim.x;
}
}
template <typename T>
void gpuElemGeom1D(T *jac, T *Xx, T *Jg, int nga)
{
int blockDim = 256;
int gridDim = (nga + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateElemGeom1D<<<gridDim, blockDim>>>(jac, Xx, Jg, nga);
}
template void gpuElemGeom1D(double*, double*, double*, int);
template void gpuElemGeom1D(float*, float*, float*, int);
template <typename T>
__global__ void gpuTemplateElemGeom2D(T *jac, T *Xx11, T *Xx12, T *Xx21, T *Xx22,
T *Jg11, T *Jg12, T *Jg21, T *Jg22, int nga)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < nga) {
jac[tid] = Jg11[tid]*Jg22[tid] - Jg12[tid]*Jg21[tid];
Xx11[tid] = Jg22[tid];
Xx21[tid] = -Jg21[tid];
Xx12[tid] = -Jg12[tid];
Xx22[tid] = Jg11[tid];
tid += blockDim.x * gridDim.x;
}
}
template <typename T>
void gpuElemGeom2D(T *jac, T *Xx11, T *Xx12, T *Xx21, T *Xx22,
T *Jg11, T *Jg12, T *Jg21, T *Jg22, int nga)
{
int blockDim = 256;
int gridDim = (nga + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateElemGeom2D<<<gridDim, blockDim>>>(jac, Xx11, Xx12, Xx21, Xx22, Jg11, Jg12, Jg21, Jg22, nga);
}
template void gpuElemGeom2D(double*, double*, double*, double*, double*, double*, double*, double*, double*, int);
template void gpuElemGeom2D(float*, float*, float*, float*, float*, float*, float*, float*, float*, int);
template <typename T>
__global__ void gpuTemplateElemGeom3D(T *jac, T *Xx11, T *Xx12, T *Xx13, T *Xx21,
T *Xx22, T *Xx23, T *Xx31, T *Xx32, T *Xx33,
T *Jg11, T *Jg12, T *Jg13, T *Jg21, T *Jg22,
T *Jg23, T *Jg31, T *Jg32, T *Jg33, int nga)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < nga) {
jac[tid] = Jg11[tid]*Jg22[tid]*Jg33[tid] - Jg11[tid]*Jg32[tid]*Jg23[tid] +
Jg21[tid]*Jg32[tid]*Jg13[tid] - Jg21[tid]*Jg12[tid]*Jg33[tid] +
Jg31[tid]*Jg12[tid]*Jg23[tid] - Jg31[tid]*Jg22[tid]*Jg13[tid];
Xx11[tid] = Jg22[tid]*Jg33[tid] - Jg23[tid]*Jg32[tid];
Xx21[tid] = Jg23[tid]*Jg31[tid] - Jg21[tid]*Jg33[tid];
Xx31[tid] = Jg21[tid]*Jg32[tid] - Jg22[tid]*Jg31[tid];
Xx12[tid] = Jg13[tid]*Jg32[tid] - Jg12[tid]*Jg33[tid];
Xx22[tid] = Jg11[tid]*Jg33[tid] - Jg13[tid]*Jg31[tid];
Xx32[tid] = Jg12[tid]*Jg31[tid] - Jg11[tid]*Jg32[tid];
Xx13[tid] = Jg12[tid]*Jg23[tid] - Jg13[tid]*Jg22[tid];
Xx23[tid] = Jg13[tid]*Jg21[tid] - Jg11[tid]*Jg23[tid];
Xx33[tid] = Jg11[tid]*Jg22[tid] - Jg12[tid]*Jg21[tid];
tid += blockDim.x * gridDim.x;
}
}
template <typename T>
void gpuElemGeom3D(T *jac, T *Xx11, T *Xx12, T *Xx13, T *Xx21,
T *Xx22, T *Xx23, T *Xx31, T *Xx32, T *Xx33,
T *Jg11, T *Jg12, T *Jg13, T *Jg21, T *Jg22,
T *Jg23, T *Jg31, T *Jg32, T *Jg33, int nga)
{
int blockDim = 256;
int gridDim = (nga + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateElemGeom3D<<<gridDim, blockDim>>>(jac, Xx11, Xx12, Xx13, Xx21, Xx22, Xx23, Xx31, Xx32, Xx33,
Jg11, Jg12, Jg13, Jg21, Jg22, Jg23, Jg31, Jg32, Jg33, nga);
}
template void gpuElemGeom3D(double*, double*, double*, double*, double*, double*, double*, double*, double*, double*,
double*, double*, double*, double*, double*, double*, double*, double*, double*, int);
template void gpuElemGeom3D(float*, float*, float*, float*, float*, float*, float*, float*, float*, float*,
float*, float*, float*, float*, float*, float*, float*, float*, float*, int);
template <typename T>
__global__ void gpuTemplateFaceGeom1D(T *jacg, T *nlg, T *Jg, int nga)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < nga) {
jacg[i] = 1.0;
nlg[i] = -1.0;
i += blockDim.x * gridDim.x;
}
}
template <typename T>
void gpuFaceGeom1D(T *jacg, T *nlg, T *Jg, int nga)
{
int blockDim = 256;
int gridDim = (nga + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateFaceGeom1D<<<gridDim, blockDim>>>(jacg, nlg, Jg, nga);
}
template void gpuFaceGeom1D(double*, double*, double*, int);
template void gpuFaceGeom1D(float*, float*, float*, int);
template <typename T>
__global__ void gpuTemplateFaceGeom2D(T *jacg, T *nlg, T *Jg, int nga)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < nga) {
int j = i+nga;
jacg[i] = sqrt(Jg[i]*Jg[i] + Jg[j]*Jg[j]);
nlg[i] = Jg[j]/jacg[i];
nlg[j] = -Jg[i]/jacg[i];
i += blockDim.x * gridDim.x;
}
}
template <typename T>
void gpuFaceGeom2D(T *jacg, T *nlg, T *Jg, int nga)
{
int blockDim = 256;
int gridDim = (nga + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateFaceGeom2D<<<gridDim, blockDim>>>(jacg, nlg, Jg, nga);
}
template void gpuFaceGeom2D(double*, double*, double*, int);
template void gpuFaceGeom2D(float*, float*, float*, int);
template <typename T>
__global__ void gpuTemplateFaceGeom3D(T *jacg, T *nlg, T *Jg, int nga)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < nga) {
int j = i+nga;
int k = i+2*nga;
int m = i+3*nga;
int n = i+4*nga;
int p = i+5*nga;
nlg[i] = Jg[j]*Jg[p] - Jg[k]*Jg[n];
nlg[j] = Jg[k]*Jg[m] - Jg[i]*Jg[p];
nlg[k] = Jg[i]*Jg[n] - Jg[j]*Jg[m];
jacg[i] = sqrt(nlg[i]*nlg[i] + nlg[j]*nlg[j] + nlg[k]*nlg[k]);
nlg[i] = nlg[i]/jacg[i];
nlg[j] = nlg[j]/jacg[i];
nlg[k] = nlg[k]/jacg[i];
i += blockDim.x * gridDim.x;
}
}
template <typename T>
void gpuFaceGeom3D(T *jacg, T *nlg, T *Jg, int nga)
{
int blockDim = 256;
int gridDim = (nga + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateFaceGeom3D<<<gridDim, blockDim>>>(jacg, nlg, Jg, nga);
}
template void gpuFaceGeom3D(double*, double*, double*, int);
template void gpuFaceGeom3D(float*, float*, float*, int);
#endif
|
8ab557de9f530c2d9f7b1e747743b4814cec1acf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
hipError_t err = hipGetLastError();
if (hipSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
// TODO
int index = threadIdx.x;
if (idata[index] == 0)//If this is 0
{
bools[index] = 0;
}
else
{
bools[index] = 1;
}
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
// TODO
int index = threadIdx.x;
if (bools[index]!=0)
{
odata[indices[index]] = idata[index];
}
}
}
}
| 8ab557de9f530c2d9f7b1e747743b4814cec1acf.cu | #include "common.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
// TODO
int index = threadIdx.x;
if (idata[index] == 0)//If this is 0
{
bools[index] = 0;
}
else
{
bools[index] = 1;
}
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
// TODO
int index = threadIdx.x;
if (bools[index]!=0)
{
odata[indices[index]] = idata[index];
}
}
}
}
|
26e085eae64f4dd8c54ba10c5d753a63359f6d60.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <sys/time.h>
#include <assert.h>
#include "gdsync/device.cuh"
#include "gpu.h"
//---------------------------
// kernel stuff
__global__ void void_kernel()
{
}
int gpu_launch_void_kernel_on_stream(hipStream_t s)
{
const int nblocks = 1;
const int nthreads = 1;
hipLaunchKernelGGL(( void_kernel), dim3(nblocks), dim3(nthreads), 0, s, );
CUDACHECK(hipGetLastError());
return 0;
}
//----------
__global__ void dummy_kernel(int p0, float p1, float *p2)
{
//const uint tid = threadIdx.x;
//const uint bid = blockIdx.x;
//const uint block_size = blockDim.x;
//const uint grid_size = gridDim.x;
//const uint gid = tid + bid*block_size;
//const uint n_threads = block_size*grid_size;
__syncthreads();
}
int gpu_launch_dummy_kernel(void)
{
const int nblocks = over_sub_factor * gpu_num_sm;
const int nthreads = 32;
int p0 = 100;
float p1 = 1.1f;
float *p2 = NULL;
hipLaunchKernelGGL(( dummy_kernel), dim3(nblocks), dim3(nthreads), 0, gpu_stream, p0, p1, p2);
CUDACHECK(hipGetLastError());
return 0;
}
//----------
__global__ void calc_kernel(int n, float c, float *in, float *out)
{
const uint tid = threadIdx.x;
const uint bid = blockIdx.x;
const uint block_size = blockDim.x;
const uint grid_size = gridDim.x;
const uint gid = tid + bid*block_size;
const uint n_threads = block_size*grid_size;
for (int i=gid; i<n; i += n_threads)
out[i] = in[i] * c;
}
int gpu_launch_calc_kernel_on_stream(size_t size, hipStream_t s)
{
const int nblocks = over_sub_factor * gpu_num_sm;
const int nthreads = 32*2;
int n = size / sizeof(float);
static float *in = NULL;
static float *out = NULL;
if (!in) {
in = (float*)gpu_malloc(4096, size);
out = (float*)gpu_malloc(4096, size);
}
hipLaunchKernelGGL(( calc_kernel), dim3(nblocks), dim3(nthreads), 0, s, n, 1.0f, in, out);
CUDACHECK(hipGetLastError());
return 0;
}
//----------
/*
* Local variables:
* c-indent-level: 8
* c-basic-offset: 8
* tab-width: 8
* indent-tabs-mode: nil
* End:
*/
| 26e085eae64f4dd8c54ba10c5d753a63359f6d60.cu | #include <sys/time.h>
#include <assert.h>
#include "gdsync/device.cuh"
#include "gpu.h"
//---------------------------
// kernel stuff
__global__ void void_kernel()
{
}
int gpu_launch_void_kernel_on_stream(CUstream s)
{
const int nblocks = 1;
const int nthreads = 1;
void_kernel<<<nblocks, nthreads, 0, s>>>();
CUDACHECK(cudaGetLastError());
return 0;
}
//----------
__global__ void dummy_kernel(int p0, float p1, float *p2)
{
//const uint tid = threadIdx.x;
//const uint bid = blockIdx.x;
//const uint block_size = blockDim.x;
//const uint grid_size = gridDim.x;
//const uint gid = tid + bid*block_size;
//const uint n_threads = block_size*grid_size;
__syncthreads();
}
int gpu_launch_dummy_kernel(void)
{
const int nblocks = over_sub_factor * gpu_num_sm;
const int nthreads = 32;
int p0 = 100;
float p1 = 1.1f;
float *p2 = NULL;
dummy_kernel<<<nblocks, nthreads, 0, gpu_stream>>>(p0, p1, p2);
CUDACHECK(cudaGetLastError());
return 0;
}
//----------
__global__ void calc_kernel(int n, float c, float *in, float *out)
{
const uint tid = threadIdx.x;
const uint bid = blockIdx.x;
const uint block_size = blockDim.x;
const uint grid_size = gridDim.x;
const uint gid = tid + bid*block_size;
const uint n_threads = block_size*grid_size;
for (int i=gid; i<n; i += n_threads)
out[i] = in[i] * c;
}
int gpu_launch_calc_kernel_on_stream(size_t size, CUstream s)
{
const int nblocks = over_sub_factor * gpu_num_sm;
const int nthreads = 32*2;
int n = size / sizeof(float);
static float *in = NULL;
static float *out = NULL;
if (!in) {
in = (float*)gpu_malloc(4096, size);
out = (float*)gpu_malloc(4096, size);
}
calc_kernel<<<nblocks, nthreads, 0, s>>>(n, 1.0f, in, out);
CUDACHECK(cudaGetLastError());
return 0;
}
//----------
/*
* Local variables:
* c-indent-level: 8
* c-basic-offset: 8
* tab-width: 8
* indent-tabs-mode: nil
* End:
*/
|
875f122760d972f90f05cec236156834ed94978a.hip | // !!! This is a file automatically generated by hipify!!!
#include "stdio.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <iostream>
// Defining number of elements in array
#define N 512 * 512 //+1 for error
__global__ void gpuAdd(int *d_a, int *d_b, int *d_c) {
// Getting index of current kernel
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N) {
d_c[tid] = d_a[tid] + d_b[tid];
tid += blockDim.x * gridDim.x;
}
}
int main(void) {
// Declare host and device arrays
int h_a[N], h_b[N], h_c[N];
int *d_a, *d_b, *d_c;
// Allocate Memory on Device
hipMalloc((void **)&d_a, N * sizeof(int));
hipMalloc((void **)&d_b, N * sizeof(int));
hipMalloc((void **)&d_c, N * sizeof(int));
// Initialize host array
for (int i = 0; i < N; i++) {
h_a[i] = 2 * i * i;
h_b[i] = i;
}
hipMemcpy(d_a, h_a, N * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, N * sizeof(int), hipMemcpyHostToDevice);
// Kernel Call
hipLaunchKernelGGL(( gpuAdd), dim3(512), dim3(512), 0, 0, d_a, d_b, d_c);
hipMemcpy(h_c, d_c, N * sizeof(int), hipMemcpyDeviceToHost);
// This ensures that kernel execution is finishes before going forward
hipDeviceSynchronize();
int Correct = 1;
printf("Vector addition on GPU \n");
for (int i = 0; i < N; i++) {
if ((h_a[i] + h_b[i] != h_c[i])) {
Correct = 0;
}
}
if (Correct == 1) {
printf("GPU has computed Sum Correctly\n");
} else {
printf("There is an Error in GPU Computation\n");
}
// Free up memory
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
return 0;
}
| 875f122760d972f90f05cec236156834ed94978a.cu | #include "stdio.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include <iostream>
// Defining number of elements in array
#define N 512 * 512 //+1 for error
__global__ void gpuAdd(int *d_a, int *d_b, int *d_c) {
// Getting index of current kernel
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N) {
d_c[tid] = d_a[tid] + d_b[tid];
tid += blockDim.x * gridDim.x;
}
}
int main(void) {
// Declare host and device arrays
int h_a[N], h_b[N], h_c[N];
int *d_a, *d_b, *d_c;
// Allocate Memory on Device
cudaMalloc((void **)&d_a, N * sizeof(int));
cudaMalloc((void **)&d_b, N * sizeof(int));
cudaMalloc((void **)&d_c, N * sizeof(int));
// Initialize host array
for (int i = 0; i < N; i++) {
h_a[i] = 2 * i * i;
h_b[i] = i;
}
cudaMemcpy(d_a, h_a, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, N * sizeof(int), cudaMemcpyHostToDevice);
// Kernel Call
gpuAdd<<<512, 512>>>(d_a, d_b, d_c);
cudaMemcpy(h_c, d_c, N * sizeof(int), cudaMemcpyDeviceToHost);
// This ensures that kernel execution is finishes before going forward
cudaDeviceSynchronize();
int Correct = 1;
printf("Vector addition on GPU \n");
for (int i = 0; i < N; i++) {
if ((h_a[i] + h_b[i] != h_c[i])) {
Correct = 0;
}
}
if (Correct == 1) {
printf("GPU has computed Sum Correctly\n");
} else {
printf("There is an Error in GPU Computation\n");
}
// Free up memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
d686b36517d506abc84581164f848c63bd743702.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@generated from zlascl.cu normal z -> d, Fri Sep 11 18:29:21 2015
@author Mark Gates
*/
#include "common_magma.h"
#define NB 64
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right.
__global__ void
dlascl_full(int m, int n, double mul, double* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
A += ind;
if (ind < m) {
for (int j=0; j < n; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right to diagonal.
__global__ void
dlascl_lower(int m, int n, double mul, double* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
int break_d = (ind < n) ? ind : n-1;
A += ind;
if (ind < m) {
for (int j=0; j <= break_d; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from right edge and moving left to diagonal.
__global__ void
dlascl_upper(int m, int n, double mul, double* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
A += ind;
if (ind < m) {
for (int j=n-1; j >= ind; j--)
A[j*lda] *= mul;
}
}
/**
Purpose
-------
DLASCL multiplies the M by N real matrix A by the real scalar
CTO/CFROM. This is done without over/underflow as long as the final
result CTO*A(I,J)/CFROM does not over/underflow. TYPE specifies that
A may be full, upper triangular, lower triangular.
Arguments
---------
@param[in]
type magma_type_t
TYPE indices the storage type of the input matrix A.
= MagmaFull: full matrix.
= MagmaLower: lower triangular matrix.
= MagmaUpper: upper triangular matrix.
Other formats that LAPACK supports, MAGMA does not currently support.
@param[in]
kl INTEGER
Unused, for LAPACK compatability.
@param[in]
ku KU is INTEGER
Unused, for LAPACK compatability.
@param[in]
cfrom DOUBLE PRECISION
@param[in]
cto DOUBLE PRECISION
\n
The matrix A is multiplied by CTO/CFROM. A(I,J) is computed
without over/underflow if the final result CTO*A(I,J)/CFROM
can be represented without over/underflow.
CFROM must be nonzero. CFROM and CTO must not be NAN.
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
@param[in,out]
dA DOUBLE PRECISION array, dimension (LDDA,N)
The matrix to be multiplied by CTO/CFROM. See TYPE for the
storage type.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_daux2
********************************************************************/
extern "C" void
magmablas_dlascl_q(
magma_type_t type, magma_int_t kl, magma_int_t ku,
double cfrom, double cto,
magma_int_t m, magma_int_t n,
magmaDouble_ptr dA, magma_int_t ldda,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( type != MagmaLower && type != MagmaUpper && type != MagmaFull )
*info = -1;
else if ( cfrom == 0 || isnan(cfrom) )
*info = -4;
else if ( isnan(cto) )
*info = -5;
else if ( m < 0 )
*info = -6;
else if ( n < 0 )
*info = -3;
else if ( ldda < max(1,m) )
*info = -7;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //info;
}
dim3 grid( magma_ceildiv( m, NB ) );
dim3 threads( NB );
double smlnum, bignum, cfromc, ctoc, cto1, cfrom1, mul;
magma_int_t done = false;
// Uses over/underflow procedure from LAPACK dlascl
// Get machine parameters
smlnum = lapackf77_dlamch("s");
bignum = 1 / smlnum;
cfromc = cfrom;
ctoc = cto;
int cnt = 0;
while( ! done ) {
cfrom1 = cfromc*smlnum;
if ( cfrom1 == cfromc ) {
// cfromc is an inf. Multiply by a correctly signed zero for
// finite ctoc, or a nan if ctoc is infinite.
mul = ctoc / cfromc;
done = true;
cto1 = ctoc;
}
else {
cto1 = ctoc / bignum;
if ( cto1 == ctoc ) {
// ctoc is either 0 or an inf. In both cases, ctoc itself
// serves as the correct multiplication factor.
mul = ctoc;
done = true;
cfromc = 1;
}
else if ( fabs(cfrom1) > fabs(ctoc) && ctoc != 0 ) {
mul = smlnum;
done = false;
cfromc = cfrom1;
}
else if ( fabs(cto1) > fabs(cfromc) ) {
mul = bignum;
done = false;
ctoc = cto1;
}
else {
mul = ctoc / cfromc;
done = true;
}
}
if (type == MagmaLower) {
hipLaunchKernelGGL(( dlascl_lower) , dim3(grid), dim3(threads), 0, queue , m, n, mul, dA, ldda);
}
else if (type == MagmaUpper) {
hipLaunchKernelGGL(( dlascl_upper) , dim3(grid), dim3(threads), 0, queue , m, n, mul, dA, ldda);
}
else if (type == MagmaFull) {
hipLaunchKernelGGL(( dlascl_full) , dim3(grid), dim3(threads), 0, queue , m, n, mul, dA, ldda);
}
cnt += 1;
}
}
/**
@see magmablas_dlascl_q
@ingroup magma_daux2
********************************************************************/
extern "C" void
magmablas_dlascl(
magma_type_t type, magma_int_t kl, magma_int_t ku,
double cfrom, double cto,
magma_int_t m, magma_int_t n,
magmaDouble_ptr dA, magma_int_t ldda,
magma_int_t *info )
{
magmablas_dlascl_q( type, kl, ku, cfrom, cto, m, n, dA, ldda, magma_stream, info );
}
| d686b36517d506abc84581164f848c63bd743702.cu | /*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@generated from zlascl.cu normal z -> d, Fri Sep 11 18:29:21 2015
@author Mark Gates
*/
#include "common_magma.h"
#define NB 64
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right.
__global__ void
dlascl_full(int m, int n, double mul, double* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
A += ind;
if (ind < m) {
for (int j=0; j < n; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right to diagonal.
__global__ void
dlascl_lower(int m, int n, double mul, double* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
int break_d = (ind < n) ? ind : n-1;
A += ind;
if (ind < m) {
for (int j=0; j <= break_d; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from right edge and moving left to diagonal.
__global__ void
dlascl_upper(int m, int n, double mul, double* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
A += ind;
if (ind < m) {
for (int j=n-1; j >= ind; j--)
A[j*lda] *= mul;
}
}
/**
Purpose
-------
DLASCL multiplies the M by N real matrix A by the real scalar
CTO/CFROM. This is done without over/underflow as long as the final
result CTO*A(I,J)/CFROM does not over/underflow. TYPE specifies that
A may be full, upper triangular, lower triangular.
Arguments
---------
@param[in]
type magma_type_t
TYPE indices the storage type of the input matrix A.
= MagmaFull: full matrix.
= MagmaLower: lower triangular matrix.
= MagmaUpper: upper triangular matrix.
Other formats that LAPACK supports, MAGMA does not currently support.
@param[in]
kl INTEGER
Unused, for LAPACK compatability.
@param[in]
ku KU is INTEGER
Unused, for LAPACK compatability.
@param[in]
cfrom DOUBLE PRECISION
@param[in]
cto DOUBLE PRECISION
\n
The matrix A is multiplied by CTO/CFROM. A(I,J) is computed
without over/underflow if the final result CTO*A(I,J)/CFROM
can be represented without over/underflow.
CFROM must be nonzero. CFROM and CTO must not be NAN.
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
@param[in,out]
dA DOUBLE PRECISION array, dimension (LDDA,N)
The matrix to be multiplied by CTO/CFROM. See TYPE for the
storage type.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_daux2
********************************************************************/
extern "C" void
magmablas_dlascl_q(
magma_type_t type, magma_int_t kl, magma_int_t ku,
double cfrom, double cto,
magma_int_t m, magma_int_t n,
magmaDouble_ptr dA, magma_int_t ldda,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( type != MagmaLower && type != MagmaUpper && type != MagmaFull )
*info = -1;
else if ( cfrom == 0 || isnan(cfrom) )
*info = -4;
else if ( isnan(cto) )
*info = -5;
else if ( m < 0 )
*info = -6;
else if ( n < 0 )
*info = -3;
else if ( ldda < max(1,m) )
*info = -7;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //info;
}
dim3 grid( magma_ceildiv( m, NB ) );
dim3 threads( NB );
double smlnum, bignum, cfromc, ctoc, cto1, cfrom1, mul;
magma_int_t done = false;
// Uses over/underflow procedure from LAPACK dlascl
// Get machine parameters
smlnum = lapackf77_dlamch("s");
bignum = 1 / smlnum;
cfromc = cfrom;
ctoc = cto;
int cnt = 0;
while( ! done ) {
cfrom1 = cfromc*smlnum;
if ( cfrom1 == cfromc ) {
// cfromc is an inf. Multiply by a correctly signed zero for
// finite ctoc, or a nan if ctoc is infinite.
mul = ctoc / cfromc;
done = true;
cto1 = ctoc;
}
else {
cto1 = ctoc / bignum;
if ( cto1 == ctoc ) {
// ctoc is either 0 or an inf. In both cases, ctoc itself
// serves as the correct multiplication factor.
mul = ctoc;
done = true;
cfromc = 1;
}
else if ( fabs(cfrom1) > fabs(ctoc) && ctoc != 0 ) {
mul = smlnum;
done = false;
cfromc = cfrom1;
}
else if ( fabs(cto1) > fabs(cfromc) ) {
mul = bignum;
done = false;
ctoc = cto1;
}
else {
mul = ctoc / cfromc;
done = true;
}
}
if (type == MagmaLower) {
dlascl_lower <<< grid, threads, 0, queue >>> (m, n, mul, dA, ldda);
}
else if (type == MagmaUpper) {
dlascl_upper <<< grid, threads, 0, queue >>> (m, n, mul, dA, ldda);
}
else if (type == MagmaFull) {
dlascl_full <<< grid, threads, 0, queue >>> (m, n, mul, dA, ldda);
}
cnt += 1;
}
}
/**
@see magmablas_dlascl_q
@ingroup magma_daux2
********************************************************************/
extern "C" void
magmablas_dlascl(
magma_type_t type, magma_int_t kl, magma_int_t ku,
double cfrom, double cto,
magma_int_t m, magma_int_t n,
magmaDouble_ptr dA, magma_int_t ldda,
magma_int_t *info )
{
magmablas_dlascl_q( type, kl, ku, cfrom, cto, m, n, dA, ldda, magma_stream, info );
}
|
4736a1ec5dcfbbd121c30d04457cf3ec523b749d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@author Azzam Haidar
@author Ahmad Abdelfattah
@precisions normal z -> s d c
*/
#include "magma_internal.h"
#include "magma_templates.h"
#include "shuffle.cuh"
#include "sync.cuh"
#include "atomics.cuh"
#include "batched_kernel_param.h"
#define PRECISION_z
/**
Purpose
-------
LU factorization of m-by-n matrix ( m >= n ).
Each thread block caches an entire column in register.
Thread blocks communicate and synchronize through global memory.
Assumptions:
1. dA is of size MxN such that N <= M.
2. Thread block must be 1D, with TX multiple of 32 (warp size)
3. TX must be >= n
4. n must be less than the number of SMs on the GPU
**/
// =============================================================================
// init kernel
__global__ void
zgetf2_native_init_kernel( int n, int npages, magma_int_t *ipiv, int* update_flags)
{
const int tx = threadIdx.x;
if( tx < n){
ipiv[ tx ] = 0;
}
if( tx < max(n,npages) ){
update_flags[ tx ] = 0;
}
}
// =============================================================================
// the main kernel
template<int TX, int NPAGES>
__global__ void
zgetf2_native_kernel( int m, int n,
magmaDoubleComplex_ptr dA, int ldda,
volatile magma_int_t *ipiv, int gbstep,
volatile int* update_flag,
volatile magma_int_t *info)
{
#ifdef HAVE_CUBLAS
const int tx = threadIdx.x;
const int bx = blockIdx.x;
magmaDoubleComplex rA[NPAGES] = {MAGMA_Z_ZERO};
magmaDoubleComplex rx, rx_max;
magmaDoubleComplex_ptr da = dA;
int rx_id, max_id, flag = 0;
double rx_abs = 0.0, rx_abs_max = 0.0;
const int m_ = m-(NPAGES-1)*TX;
if( bx >= n ) return;
__shared__ magmaDoubleComplex sx[ TX ];
__shared__ double sabs[ TX ];
__shared__ int smax_id[ TX ];
__shared__ magmaDoubleComplex sreg;
// read
dA += bx * ldda + tx;
#pragma unroll
for(int i = 0; i < NPAGES-1; i++){
rA[i] = dA[ i * TX ];
}
if( tx < m_){
rA[NPAGES-1] = dA[ (NPAGES-1) * TX ];
}
// main loop
#pragma unroll
for(int i = 0; i < n; i++){
// izamax and write pivot for the ith thread block
if(bx == i){
rx_max = rx = (tx < i) ? MAGMA_Z_ZERO : rA[0];
rx_abs_max = rx_abs = fabs(MAGMA_Z_REAL(rx)) + fabs(MAGMA_Z_IMAG(rx));
max_id = rx_id = tx;
#pragma unroll
for(int j = 1; j < NPAGES; j++){
rx = rA[j];
rx_abs = fabs(MAGMA_Z_REAL(rx)) + fabs(MAGMA_Z_IMAG(rx));
if ( rx_abs > rx_abs_max ){
rx_max = rx;
rx_abs_max = rx_abs;
max_id = j * TX + tx;
}
}
sx[ tx ] = rx_max;
sabs[ tx ] = rx_abs_max;
smax_id[ tx ] = max_id;
__syncthreads();
// let the first warp do the final reduction step
if(tx < 32){
#pragma unroll
for(int j = 0; j < TX; j+= 32){
rx = sx[ j + tx ];
rx_abs = sabs[ j + tx ];
rx_id = smax_id[ j + tx ];
if ( rx_abs > rx_abs_max ){
rx_max = rx;
rx_abs_max = rx_abs;
max_id = rx_id;
}
}
magmablas_syncwarp();
sx[ tx ] = rx_max;
sabs[ tx ] = rx_abs_max;
smax_id[ tx ] = max_id;
magmablas_syncwarp();
#pragma unroll
for(int j = 0; j < 32; j++){
rx = sx[j];
rx_abs = sabs[j];
rx_id = smax_id[j];
if ( rx_abs > rx_abs_max ){
rx_abs_max = rx_abs;
rx_max = rx;
max_id = rx_id;
}
}
}
if(tx == 0){
sx[ 0 ] = rx_max;
sabs[ 0 ] = rx_abs_max;
smax_id[ 0 ] = max_id;
}
__syncthreads();
rx_max = sx[ 0 ];
rx_abs_max = sabs[ 0 ];
max_id = smax_id[ 0 ];
__syncthreads();
// now every thread in the i^th block has the maximum
if( tx == 0){
if( rx_abs_max == MAGMA_D_ZERO){
magmablas_iatomic_exchange( (magma_int_t*)info, (magma_int_t)(max_id + gbstep + 1) );
}
magmablas_iatomic_exchange((magma_int_t*)&ipiv[i], (magma_int_t)(max_id+1) ); // fortran indexing
}
__syncthreads();
if( rx_abs_max == MAGMA_D_ZERO )return;
}
else{ // other thread blocks are waiting
if(tx == 0){
max_id = 0;
while( max_id == 0 ){
max_id = ipiv[i];
};
smax_id[ 0 ] = max_id;
}
__syncthreads();
max_id = smax_id[ 0 ];
max_id -= 1; // revert fortran indexing
__syncthreads();
if( (*info) != 0 ) return;
}
// swap
// swap always happens between page 0 and page x
// to avoid spilling rA to local memory, we use shared memory
if( max_id != i){
// all blocks swap in registers
// for bx < i, the column is already written in memory,
// but we have a copy in reg., so continue to swap in reg.,
// and do one final write to memory
#pragma unroll
for(int j = 0; j < NPAGES; j++){
if( j == (max_id/TX) ){
sx[ tx ] = rA[j];
__syncthreads();
if( tx == i ){
magmaDoubleComplex tmp = sx[ max_id%TX ];
sx[ max_id%TX ] = rA[0];
rA[0] = tmp;
}
__syncthreads();
if( tx == max_id%TX ){
rA[j] = sx[ tx ];
}
__syncthreads();
}
}
//__syncthreads();
}
// the ith block does scal
if(bx == i){
magmaDoubleComplex reg = MAGMA_Z_DIV(MAGMA_Z_ONE, rx_max );
// scal
if( tx > i ){
rA[0] *= reg;
}
#pragma unroll
for(int j = 1; j < NPAGES; j++){
rA[j] *= reg;
}
// write column i to global memory
#pragma unroll
for(int j = 0; j < NPAGES-1; j++){
dA[ j * TX ] = rA[j];
}
if( tx < m_){
dA[ (NPAGES-1) * TX ] = rA[NPAGES-1];
}
__threadfence(); __syncthreads(); // after cuda 9.0, both are needed, not sure why
if(tx == 0) magmablas_iatomic_exchange( (int *)&update_flag[ i ], 1);
}
// thread blocks with ID larger than i perform ger
if(bx > i){
if( tx == i ){
sreg = rA[0];
}
// wait for scal
if( tx == 0){
flag = 0;
while( flag == 0 ){
flag = update_flag[ i ];
};
}
__syncthreads();
magmaDoubleComplex reg = sreg;
if( NPAGES == 1){
if(tx > i && tx < m_){
rA[0] -= da[ i * ldda + tx ] * reg;
}
}else{
if(tx > i){
rA[0] -= da[ i * ldda + tx ] * reg;
}
}
#pragma unroll
for(int j = 1; j < NPAGES-1; j++){
rA[j] -= da[ i * ldda + j * TX + tx ] * reg;
}
if( NPAGES > 1){
if( tx < m_ ){
rA[ NPAGES-1 ] -= da[ i * ldda + (NPAGES-1)*TX + tx ] * reg;
}
}
}
}
// all blocks write their columns again except the last one
if( bx < n-1 ){
#pragma unroll
for(int i = 0; i < NPAGES-1; i++){
dA[ i * TX ] = rA[i];
}
if( tx < m_){
dA[ (NPAGES-1) * TX ] = rA[NPAGES-1];
}
}
#endif // HAVE_CUBLAS
}
// =============================================================================
extern "C" magma_int_t
magma_zgetf2_native_fused(
magma_int_t m, magma_int_t n,
magmaDoubleComplex_ptr dA, magma_int_t ldda,
magma_int_t *ipiv, magma_int_t gbstep,
magma_int_t *flags,
magma_int_t *info, magma_queue_t queue )
{
magma_int_t arginfo = 0;
const magma_int_t ntx = ZGETF2_FUSED_NTH;
if( m < n || m > ZGETF2_FUSED_MAX_M ){
arginfo = -1;
}
else if( n > magma_getdevice_multiprocessor_count() ){
arginfo = -2;
}
else if( ldda < max(1, m) ){
arginfo = -4;
}
if (arginfo != 0) {
magma_xerbla( __func__, -(arginfo) );
return arginfo;
}
magma_int_t arch = magma_getdevice_arch();
dim3 grid(n, 1, 1);
dim3 threads(ntx, 1, 1);
const magma_int_t npages = magma_ceildiv(m, ntx);
// the kernel uses communication among thread blocks
// as a safeguard, force one thread block per multiprocessor
// by allocating more than half the shared memory
magma_int_t shmem = magma_getdevice_shmem_block();
shmem = (shmem / 2);
int *update_flag = (int*) flags; // update_flag is an int, not magma_int_t
hipLaunchKernelGGL(( zgetf2_native_init_kernel), dim3(1), dim3(max(n,npages)), 0, queue->cuda_stream() , n, npages, ipiv, update_flag);
// The case statement should cover up to ( xGETF2_CHAIN_MAX_M / ntx )
switch(npages){
case 1:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 1>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 2:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 2>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 3:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 3>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 4:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 4>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 5:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 5>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 6:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 6>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 7:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 7>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 8:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 8>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 9:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 9>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 10:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 10>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 11:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 11>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 12:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 12>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 13:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 13>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 14:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 14>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 15:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 15>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 16:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 16>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 17:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 17>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 18:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 18>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 19:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 19>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 20:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 20>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
#if defined(PRECISION_s) || defined(PRECISION_d)
case 21:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 21>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 22:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 22>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 23:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 23>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 24:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 24>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 25:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 25>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 26:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 26>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 27:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 27>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 28:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 28>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 29:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 29>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 30:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 30>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 31:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 31>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 32:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 32>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 33:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 33>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 34:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 34>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 35:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 35>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 36:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 36>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 37:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 37>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 38:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 38>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 39:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 39>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 40:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 40>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 41:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 41>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 42:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 42>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 43:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 43>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 44:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 44>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 45:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 45>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 46:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 46>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
#endif // defined(PRECISION_s) || defined(PRECISION_d)
#if defined(PRECISION_s)
case 47:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 47>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 48:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 48>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 49:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 49>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 50:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 50>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 51:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 51>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 52:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 52>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 53:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 53>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 54:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 54>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 55:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 55>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 56:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 56>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 57:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 57>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 58:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 58>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 59:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 59>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 60:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 60>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 61:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 61>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 62:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 62>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 63:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 63>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 64:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 64>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 65:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 65>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 66:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 66>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 67:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 67>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 68:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 68>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 69:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 69>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 70:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 70>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 71:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 71>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 72:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 72>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 73:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 73>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 74:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 74>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 75:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 75>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 76:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 76>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 77:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 77>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 78:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 78>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 79:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 79>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 80:hipLaunchKernelGGL(( zgetf2_native_kernel< ntx, 80>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
#endif // defined(PRECISION_s)
default: printf("size not supported \n");
}
return 0;
}
| 4736a1ec5dcfbbd121c30d04457cf3ec523b749d.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@author Azzam Haidar
@author Ahmad Abdelfattah
@precisions normal z -> s d c
*/
#include "magma_internal.h"
#include "magma_templates.h"
#include "shuffle.cuh"
#include "sync.cuh"
#include "atomics.cuh"
#include "batched_kernel_param.h"
#define PRECISION_z
/**
Purpose
-------
LU factorization of m-by-n matrix ( m >= n ).
Each thread block caches an entire column in register.
Thread blocks communicate and synchronize through global memory.
Assumptions:
1. dA is of size MxN such that N <= M.
2. Thread block must be 1D, with TX multiple of 32 (warp size)
3. TX must be >= n
4. n must be less than the number of SMs on the GPU
**/
// =============================================================================
// init kernel
__global__ void
zgetf2_native_init_kernel( int n, int npages, magma_int_t *ipiv, int* update_flags)
{
const int tx = threadIdx.x;
if( tx < n){
ipiv[ tx ] = 0;
}
if( tx < max(n,npages) ){
update_flags[ tx ] = 0;
}
}
// =============================================================================
// the main kernel
template<int TX, int NPAGES>
__global__ void
zgetf2_native_kernel( int m, int n,
magmaDoubleComplex_ptr dA, int ldda,
volatile magma_int_t *ipiv, int gbstep,
volatile int* update_flag,
volatile magma_int_t *info)
{
#ifdef HAVE_CUBLAS
const int tx = threadIdx.x;
const int bx = blockIdx.x;
magmaDoubleComplex rA[NPAGES] = {MAGMA_Z_ZERO};
magmaDoubleComplex rx, rx_max;
magmaDoubleComplex_ptr da = dA;
int rx_id, max_id, flag = 0;
double rx_abs = 0.0, rx_abs_max = 0.0;
const int m_ = m-(NPAGES-1)*TX;
if( bx >= n ) return;
__shared__ magmaDoubleComplex sx[ TX ];
__shared__ double sabs[ TX ];
__shared__ int smax_id[ TX ];
__shared__ magmaDoubleComplex sreg;
// read
dA += bx * ldda + tx;
#pragma unroll
for(int i = 0; i < NPAGES-1; i++){
rA[i] = dA[ i * TX ];
}
if( tx < m_){
rA[NPAGES-1] = dA[ (NPAGES-1) * TX ];
}
// main loop
#pragma unroll
for(int i = 0; i < n; i++){
// izamax and write pivot for the ith thread block
if(bx == i){
rx_max = rx = (tx < i) ? MAGMA_Z_ZERO : rA[0];
rx_abs_max = rx_abs = fabs(MAGMA_Z_REAL(rx)) + fabs(MAGMA_Z_IMAG(rx));
max_id = rx_id = tx;
#pragma unroll
for(int j = 1; j < NPAGES; j++){
rx = rA[j];
rx_abs = fabs(MAGMA_Z_REAL(rx)) + fabs(MAGMA_Z_IMAG(rx));
if ( rx_abs > rx_abs_max ){
rx_max = rx;
rx_abs_max = rx_abs;
max_id = j * TX + tx;
}
}
sx[ tx ] = rx_max;
sabs[ tx ] = rx_abs_max;
smax_id[ tx ] = max_id;
__syncthreads();
// let the first warp do the final reduction step
if(tx < 32){
#pragma unroll
for(int j = 0; j < TX; j+= 32){
rx = sx[ j + tx ];
rx_abs = sabs[ j + tx ];
rx_id = smax_id[ j + tx ];
if ( rx_abs > rx_abs_max ){
rx_max = rx;
rx_abs_max = rx_abs;
max_id = rx_id;
}
}
magmablas_syncwarp();
sx[ tx ] = rx_max;
sabs[ tx ] = rx_abs_max;
smax_id[ tx ] = max_id;
magmablas_syncwarp();
#pragma unroll
for(int j = 0; j < 32; j++){
rx = sx[j];
rx_abs = sabs[j];
rx_id = smax_id[j];
if ( rx_abs > rx_abs_max ){
rx_abs_max = rx_abs;
rx_max = rx;
max_id = rx_id;
}
}
}
if(tx == 0){
sx[ 0 ] = rx_max;
sabs[ 0 ] = rx_abs_max;
smax_id[ 0 ] = max_id;
}
__syncthreads();
rx_max = sx[ 0 ];
rx_abs_max = sabs[ 0 ];
max_id = smax_id[ 0 ];
__syncthreads();
// now every thread in the i^th block has the maximum
if( tx == 0){
if( rx_abs_max == MAGMA_D_ZERO){
magmablas_iatomic_exchange( (magma_int_t*)info, (magma_int_t)(max_id + gbstep + 1) );
}
magmablas_iatomic_exchange((magma_int_t*)&ipiv[i], (magma_int_t)(max_id+1) ); // fortran indexing
}
__syncthreads();
if( rx_abs_max == MAGMA_D_ZERO )return;
}
else{ // other thread blocks are waiting
if(tx == 0){
max_id = 0;
while( max_id == 0 ){
max_id = ipiv[i];
};
smax_id[ 0 ] = max_id;
}
__syncthreads();
max_id = smax_id[ 0 ];
max_id -= 1; // revert fortran indexing
__syncthreads();
if( (*info) != 0 ) return;
}
// swap
// swap always happens between page 0 and page x
// to avoid spilling rA to local memory, we use shared memory
if( max_id != i){
// all blocks swap in registers
// for bx < i, the column is already written in memory,
// but we have a copy in reg., so continue to swap in reg.,
// and do one final write to memory
#pragma unroll
for(int j = 0; j < NPAGES; j++){
if( j == (max_id/TX) ){
sx[ tx ] = rA[j];
__syncthreads();
if( tx == i ){
magmaDoubleComplex tmp = sx[ max_id%TX ];
sx[ max_id%TX ] = rA[0];
rA[0] = tmp;
}
__syncthreads();
if( tx == max_id%TX ){
rA[j] = sx[ tx ];
}
__syncthreads();
}
}
//__syncthreads();
}
// the ith block does scal
if(bx == i){
magmaDoubleComplex reg = MAGMA_Z_DIV(MAGMA_Z_ONE, rx_max );
// scal
if( tx > i ){
rA[0] *= reg;
}
#pragma unroll
for(int j = 1; j < NPAGES; j++){
rA[j] *= reg;
}
// write column i to global memory
#pragma unroll
for(int j = 0; j < NPAGES-1; j++){
dA[ j * TX ] = rA[j];
}
if( tx < m_){
dA[ (NPAGES-1) * TX ] = rA[NPAGES-1];
}
__threadfence(); __syncthreads(); // after cuda 9.0, both are needed, not sure why
if(tx == 0) magmablas_iatomic_exchange( (int *)&update_flag[ i ], 1);
}
// thread blocks with ID larger than i perform ger
if(bx > i){
if( tx == i ){
sreg = rA[0];
}
// wait for scal
if( tx == 0){
flag = 0;
while( flag == 0 ){
flag = update_flag[ i ];
};
}
__syncthreads();
magmaDoubleComplex reg = sreg;
if( NPAGES == 1){
if(tx > i && tx < m_){
rA[0] -= da[ i * ldda + tx ] * reg;
}
}else{
if(tx > i){
rA[0] -= da[ i * ldda + tx ] * reg;
}
}
#pragma unroll
for(int j = 1; j < NPAGES-1; j++){
rA[j] -= da[ i * ldda + j * TX + tx ] * reg;
}
if( NPAGES > 1){
if( tx < m_ ){
rA[ NPAGES-1 ] -= da[ i * ldda + (NPAGES-1)*TX + tx ] * reg;
}
}
}
}
// all blocks write their columns again except the last one
if( bx < n-1 ){
#pragma unroll
for(int i = 0; i < NPAGES-1; i++){
dA[ i * TX ] = rA[i];
}
if( tx < m_){
dA[ (NPAGES-1) * TX ] = rA[NPAGES-1];
}
}
#endif // HAVE_CUBLAS
}
// =============================================================================
extern "C" magma_int_t
magma_zgetf2_native_fused(
magma_int_t m, magma_int_t n,
magmaDoubleComplex_ptr dA, magma_int_t ldda,
magma_int_t *ipiv, magma_int_t gbstep,
magma_int_t *flags,
magma_int_t *info, magma_queue_t queue )
{
magma_int_t arginfo = 0;
const magma_int_t ntx = ZGETF2_FUSED_NTH;
if( m < n || m > ZGETF2_FUSED_MAX_M ){
arginfo = -1;
}
else if( n > magma_getdevice_multiprocessor_count() ){
arginfo = -2;
}
else if( ldda < max(1, m) ){
arginfo = -4;
}
if (arginfo != 0) {
magma_xerbla( __func__, -(arginfo) );
return arginfo;
}
magma_int_t arch = magma_getdevice_arch();
dim3 grid(n, 1, 1);
dim3 threads(ntx, 1, 1);
const magma_int_t npages = magma_ceildiv(m, ntx);
// the kernel uses communication among thread blocks
// as a safeguard, force one thread block per multiprocessor
// by allocating more than half the shared memory
magma_int_t shmem = magma_getdevice_shmem_block();
shmem = (shmem / 2);
int *update_flag = (int*) flags; // update_flag is an int, not magma_int_t
zgetf2_native_init_kernel<<< 1, max(n,npages), 0, queue->cuda_stream() >>>( n, npages, ipiv, update_flag);
// The case statement should cover up to ( xGETF2_CHAIN_MAX_M / ntx )
switch(npages){
case 1: zgetf2_native_kernel< ntx, 1><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 2: zgetf2_native_kernel< ntx, 2><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 3: zgetf2_native_kernel< ntx, 3><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 4: zgetf2_native_kernel< ntx, 4><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 5: zgetf2_native_kernel< ntx, 5><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 6: zgetf2_native_kernel< ntx, 6><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 7: zgetf2_native_kernel< ntx, 7><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 8: zgetf2_native_kernel< ntx, 8><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 9: zgetf2_native_kernel< ntx, 9><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 10: zgetf2_native_kernel< ntx, 10><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 11: zgetf2_native_kernel< ntx, 11><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 12: zgetf2_native_kernel< ntx, 12><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 13: zgetf2_native_kernel< ntx, 13><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 14: zgetf2_native_kernel< ntx, 14><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 15: zgetf2_native_kernel< ntx, 15><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 16: zgetf2_native_kernel< ntx, 16><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 17: zgetf2_native_kernel< ntx, 17><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 18: zgetf2_native_kernel< ntx, 18><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 19: zgetf2_native_kernel< ntx, 19><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 20: zgetf2_native_kernel< ntx, 20><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
#if defined(PRECISION_s) || defined(PRECISION_d)
case 21: zgetf2_native_kernel< ntx, 21><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 22: zgetf2_native_kernel< ntx, 22><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 23: zgetf2_native_kernel< ntx, 23><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 24: zgetf2_native_kernel< ntx, 24><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 25: zgetf2_native_kernel< ntx, 25><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 26: zgetf2_native_kernel< ntx, 26><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 27: zgetf2_native_kernel< ntx, 27><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 28: zgetf2_native_kernel< ntx, 28><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 29: zgetf2_native_kernel< ntx, 29><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 30: zgetf2_native_kernel< ntx, 30><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 31: zgetf2_native_kernel< ntx, 31><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 32: zgetf2_native_kernel< ntx, 32><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 33: zgetf2_native_kernel< ntx, 33><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 34: zgetf2_native_kernel< ntx, 34><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 35: zgetf2_native_kernel< ntx, 35><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 36: zgetf2_native_kernel< ntx, 36><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 37: zgetf2_native_kernel< ntx, 37><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 38: zgetf2_native_kernel< ntx, 38><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 39: zgetf2_native_kernel< ntx, 39><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 40: zgetf2_native_kernel< ntx, 40><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 41: zgetf2_native_kernel< ntx, 41><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 42: zgetf2_native_kernel< ntx, 42><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 43: zgetf2_native_kernel< ntx, 43><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 44: zgetf2_native_kernel< ntx, 44><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 45: zgetf2_native_kernel< ntx, 45><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 46: zgetf2_native_kernel< ntx, 46><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
#endif // defined(PRECISION_s) || defined(PRECISION_d)
#if defined(PRECISION_s)
case 47: zgetf2_native_kernel< ntx, 47><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 48: zgetf2_native_kernel< ntx, 48><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 49: zgetf2_native_kernel< ntx, 49><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 50: zgetf2_native_kernel< ntx, 50><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 51: zgetf2_native_kernel< ntx, 51><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 52: zgetf2_native_kernel< ntx, 52><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 53: zgetf2_native_kernel< ntx, 53><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 54: zgetf2_native_kernel< ntx, 54><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 55: zgetf2_native_kernel< ntx, 55><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 56: zgetf2_native_kernel< ntx, 56><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 57: zgetf2_native_kernel< ntx, 57><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 58: zgetf2_native_kernel< ntx, 58><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 59: zgetf2_native_kernel< ntx, 59><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 60: zgetf2_native_kernel< ntx, 60><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 61: zgetf2_native_kernel< ntx, 61><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 62: zgetf2_native_kernel< ntx, 62><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 63: zgetf2_native_kernel< ntx, 63><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 64: zgetf2_native_kernel< ntx, 64><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 65: zgetf2_native_kernel< ntx, 65><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 66: zgetf2_native_kernel< ntx, 66><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 67: zgetf2_native_kernel< ntx, 67><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 68: zgetf2_native_kernel< ntx, 68><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 69: zgetf2_native_kernel< ntx, 69><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 70: zgetf2_native_kernel< ntx, 70><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 71: zgetf2_native_kernel< ntx, 71><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 72: zgetf2_native_kernel< ntx, 72><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 73: zgetf2_native_kernel< ntx, 73><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 74: zgetf2_native_kernel< ntx, 74><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 75: zgetf2_native_kernel< ntx, 75><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 76: zgetf2_native_kernel< ntx, 76><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 77: zgetf2_native_kernel< ntx, 77><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 78: zgetf2_native_kernel< ntx, 78><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 79: zgetf2_native_kernel< ntx, 79><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 80: zgetf2_native_kernel< ntx, 80><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
#endif // defined(PRECISION_s)
default: printf("size not supported \n");
}
return 0;
}
|
438bc791b50da08f383e3e682f16f837b1659b66.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <stdio.h>
#include "box3d4r-16x16-1-128_kernel.hu"
#define BENCH_DIM 3
#define BENCH_FPP 1457
#define BENCH_RAD 4
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize][dimsize]
= (SB_TYPE (*)[dimsize][dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 9 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
hipError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != hipSuccess) { \
fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == hipSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(hipGetLastError()); \
} while(0)
double *dev_A;
cudaCheckReturn(hipMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double)));
{
cudaCheckReturn(hipMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), hipMemcpyHostToDevice));
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 4 - 4);
const AN5D_TYPE __c1Pad = (4);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 4 - 4);
const AN5D_TYPE __c2Pad = (4);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 4 - 4);
const AN5D_TYPE __c3Pad = (4);
#define __c3 c3
const AN5D_TYPE __halo1 = 4;
const AN5D_TYPE __halo2 = 4;
const AN5D_TYPE __halo3 = 4;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 8;
const AN5D_TYPE __side3Len = 8;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
}
}
else if (__c0Len % __side0LenMax)
{
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(hipMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), hipMemcpyDeviceToHost));
}
cudaCheckReturn(hipFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
for (int k = BENCH_RAD; k < dimsize - BENCH_RAD; k++)
A[(t+1)%2][i][j][k] =
-3.240f*A[t%2][i-4][j][k] +
0.0010f*A[t%2][i-4][j-4][k-4] +
0.0020f*A[t%2][i-4][j-4][k-3] +
0.0030f*A[t%2][i-4][j-4][k-2] +
0.0040f*A[t%2][i-4][j-4][k-1] +
0.0050f*A[t%2][i-4][j-4][k] +
0.0060f*A[t%2][i-4][j-4][k+1] +
0.0070f*A[t%2][i-4][j-4][k+2] +
0.0080f*A[t%2][i-4][j-4][k+3] +
0.0090f*A[t%2][i-4][j-4][k+4] +
0.0100f*A[t%2][i-4][j-3][k-4] +
0.0110f*A[t%2][i-4][j-3][k-3] +
0.0120f*A[t%2][i-4][j-3][k-2] +
0.0130f*A[t%2][i-4][j-3][k-1] +
0.0140f*A[t%2][i-4][j-3][k] +
0.0150f*A[t%2][i-4][j-3][k+1] +
0.0160f*A[t%2][i-4][j-3][k+2] +
0.0170f*A[t%2][i-4][j-3][k+3] +
0.0180f*A[t%2][i-4][j-3][k+4] +
0.0190f*A[t%2][i-4][j-2][k-4] +
0.0200f*A[t%2][i-4][j-2][k-3] +
0.0210f*A[t%2][i-4][j-2][k-2] +
0.0220f*A[t%2][i-4][j-2][k-1] +
0.0230f*A[t%2][i-4][j-2][k] +
0.0240f*A[t%2][i-4][j-2][k+1] +
0.0250f*A[t%2][i-4][j-2][k+2] +
0.0260f*A[t%2][i-4][j-2][k+3] +
0.0270f*A[t%2][i-4][j-2][k+4] +
0.0280f*A[t%2][i-4][j-1][k-4] +
0.0290f*A[t%2][i-4][j-1][k-3] +
0.0300f*A[t%2][i-4][j-1][k-2] +
0.0310f*A[t%2][i-4][j-1][k-1] +
0.0320f*A[t%2][i-4][j-1][k] +
0.0330f*A[t%2][i-4][j-1][k+1] +
0.0340f*A[t%2][i-4][j-1][k+2] +
0.0350f*A[t%2][i-4][j-1][k+3] +
0.0360f*A[t%2][i-4][j-1][k+4] +
0.0370f*A[t%2][i-4][j][k-4] +
0.0380f*A[t%2][i-4][j][k-3] +
0.0390f*A[t%2][i-4][j][k-2] +
0.0400f*A[t%2][i-4][j][k-1] +
0.0410f*A[t%2][i-4][j][k+1] +
0.0420f*A[t%2][i-4][j][k+2] +
0.0430f*A[t%2][i-4][j][k+3] +
0.0440f*A[t%2][i-4][j][k+4] +
0.0450f*A[t%2][i-4][j+1][k-4] +
0.0460f*A[t%2][i-4][j+1][k-3] +
0.0470f*A[t%2][i-4][j+1][k-2] +
0.0480f*A[t%2][i-4][j+1][k-1] +
0.0490f*A[t%2][i-4][j+1][k] +
0.0500f*A[t%2][i-4][j+1][k+1] +
0.0510f*A[t%2][i-4][j+1][k+2] +
0.0520f*A[t%2][i-4][j+1][k+3] +
0.0530f*A[t%2][i-4][j+1][k+4] +
0.0540f*A[t%2][i-4][j+2][k-4] +
0.0550f*A[t%2][i-4][j+2][k-3] +
0.0560f*A[t%2][i-4][j+2][k-2] +
0.0570f*A[t%2][i-4][j+2][k-1] +
0.0580f*A[t%2][i-4][j+2][k] +
0.0590f*A[t%2][i-4][j+2][k+1] +
0.0600f*A[t%2][i-4][j+2][k+2] +
0.0610f*A[t%2][i-4][j+2][k+3] +
0.0620f*A[t%2][i-4][j+2][k+4] +
0.0630f*A[t%2][i-4][j+3][k-4] +
0.0640f*A[t%2][i-4][j+3][k-3] +
0.0650f*A[t%2][i-4][j+3][k-2] +
0.0660f*A[t%2][i-4][j+3][k-1] +
0.0670f*A[t%2][i-4][j+3][k] +
0.0680f*A[t%2][i-4][j+3][k+1] +
0.0690f*A[t%2][i-4][j+3][k+2] +
0.0700f*A[t%2][i-4][j+3][k+3] +
0.0710f*A[t%2][i-4][j+3][k+4] +
0.0720f*A[t%2][i-4][j+4][k-4] +
0.0730f*A[t%2][i-4][j+4][k-3] +
0.0740f*A[t%2][i-4][j+4][k-2] +
0.0750f*A[t%2][i-4][j+4][k-1] +
0.0760f*A[t%2][i-4][j+4][k] +
0.0770f*A[t%2][i-4][j+4][k+1] +
0.0780f*A[t%2][i-4][j+4][k+2] +
0.0790f*A[t%2][i-4][j+4][k+3] +
0.0800f*A[t%2][i-4][j+4][k+4] +
-3.248f*A[t%2][i-3][j][k] +
0.0011f*A[t%2][i-3][j-4][k-4] +
0.0021f*A[t%2][i-3][j-4][k-3] +
0.0031f*A[t%2][i-3][j-4][k-2] +
0.0041f*A[t%2][i-3][j-4][k-1] +
0.0051f*A[t%2][i-3][j-4][k] +
0.0061f*A[t%2][i-3][j-4][k+1] +
0.0071f*A[t%2][i-3][j-4][k+2] +
0.0081f*A[t%2][i-3][j-4][k+3] +
0.0091f*A[t%2][i-3][j-4][k+4] +
0.0101f*A[t%2][i-3][j-3][k-4] +
0.0111f*A[t%2][i-3][j-3][k-3] +
0.0121f*A[t%2][i-3][j-3][k-2] +
0.0131f*A[t%2][i-3][j-3][k-1] +
0.0141f*A[t%2][i-3][j-3][k] +
0.0151f*A[t%2][i-3][j-3][k+1] +
0.0161f*A[t%2][i-3][j-3][k+2] +
0.0171f*A[t%2][i-3][j-3][k+3] +
0.0181f*A[t%2][i-3][j-3][k+4] +
0.0191f*A[t%2][i-3][j-2][k-4] +
0.0201f*A[t%2][i-3][j-2][k-3] +
0.0211f*A[t%2][i-3][j-2][k-2] +
0.0221f*A[t%2][i-3][j-2][k-1] +
0.0231f*A[t%2][i-3][j-2][k] +
0.0241f*A[t%2][i-3][j-2][k+1] +
0.0251f*A[t%2][i-3][j-2][k+2] +
0.0261f*A[t%2][i-3][j-2][k+3] +
0.0271f*A[t%2][i-3][j-2][k+4] +
0.0281f*A[t%2][i-3][j-1][k-4] +
0.0291f*A[t%2][i-3][j-1][k-3] +
0.0301f*A[t%2][i-3][j-1][k-2] +
0.0311f*A[t%2][i-3][j-1][k-1] +
0.0321f*A[t%2][i-3][j-1][k] +
0.0331f*A[t%2][i-3][j-1][k+1] +
0.0341f*A[t%2][i-3][j-1][k+2] +
0.0351f*A[t%2][i-3][j-1][k+3] +
0.0361f*A[t%2][i-3][j-1][k+4] +
0.0371f*A[t%2][i-3][j][k-4] +
0.0381f*A[t%2][i-3][j][k-3] +
0.0391f*A[t%2][i-3][j][k-2] +
0.0401f*A[t%2][i-3][j][k-1] +
0.0411f*A[t%2][i-3][j][k+1] +
0.0421f*A[t%2][i-3][j][k+2] +
0.0431f*A[t%2][i-3][j][k+3] +
0.0441f*A[t%2][i-3][j][k+4] +
0.0451f*A[t%2][i-3][j+1][k-4] +
0.0461f*A[t%2][i-3][j+1][k-3] +
0.0471f*A[t%2][i-3][j+1][k-2] +
0.0481f*A[t%2][i-3][j+1][k-1] +
0.0491f*A[t%2][i-3][j+1][k] +
0.0501f*A[t%2][i-3][j+1][k+1] +
0.0511f*A[t%2][i-3][j+1][k+2] +
0.0521f*A[t%2][i-3][j+1][k+3] +
0.0531f*A[t%2][i-3][j+1][k+4] +
0.0541f*A[t%2][i-3][j+2][k-4] +
0.0551f*A[t%2][i-3][j+2][k-3] +
0.0561f*A[t%2][i-3][j+2][k-2] +
0.0571f*A[t%2][i-3][j+2][k-1] +
0.0581f*A[t%2][i-3][j+2][k] +
0.0591f*A[t%2][i-3][j+2][k+1] +
0.0601f*A[t%2][i-3][j+2][k+2] +
0.0611f*A[t%2][i-3][j+2][k+3] +
0.0621f*A[t%2][i-3][j+2][k+4] +
0.0631f*A[t%2][i-3][j+3][k-4] +
0.0641f*A[t%2][i-3][j+3][k-3] +
0.0651f*A[t%2][i-3][j+3][k-2] +
0.0661f*A[t%2][i-3][j+3][k-1] +
0.0671f*A[t%2][i-3][j+3][k] +
0.0681f*A[t%2][i-3][j+3][k+1] +
0.0691f*A[t%2][i-3][j+3][k+2] +
0.0701f*A[t%2][i-3][j+3][k+3] +
0.0711f*A[t%2][i-3][j+3][k+4] +
0.0721f*A[t%2][i-3][j+4][k-4] +
0.0731f*A[t%2][i-3][j+4][k-3] +
0.0741f*A[t%2][i-3][j+4][k-2] +
0.0751f*A[t%2][i-3][j+4][k-1] +
0.0761f*A[t%2][i-3][j+4][k] +
0.0771f*A[t%2][i-3][j+4][k+1] +
0.0781f*A[t%2][i-3][j+4][k+2] +
0.0791f*A[t%2][i-3][j+4][k+3] +
0.0801f*A[t%2][i-3][j+4][k+4] +
-3.256f*A[t%2][i-2][j][k] +
0.0012f*A[t%2][i-2][j-4][k-4] +
0.0022f*A[t%2][i-2][j-4][k-3] +
0.0032f*A[t%2][i-2][j-4][k-2] +
0.0042f*A[t%2][i-2][j-4][k-1] +
0.0052f*A[t%2][i-2][j-4][k] +
0.0062f*A[t%2][i-2][j-4][k+1] +
0.0072f*A[t%2][i-2][j-4][k+2] +
0.0082f*A[t%2][i-2][j-4][k+3] +
0.0092f*A[t%2][i-2][j-4][k+4] +
0.0102f*A[t%2][i-2][j-3][k-4] +
0.0112f*A[t%2][i-2][j-3][k-3] +
0.0122f*A[t%2][i-2][j-3][k-2] +
0.0132f*A[t%2][i-2][j-3][k-1] +
0.0142f*A[t%2][i-2][j-3][k] +
0.0152f*A[t%2][i-2][j-3][k+1] +
0.0162f*A[t%2][i-2][j-3][k+2] +
0.0172f*A[t%2][i-2][j-3][k+3] +
0.0182f*A[t%2][i-2][j-3][k+4] +
0.0192f*A[t%2][i-2][j-2][k-4] +
0.0202f*A[t%2][i-2][j-2][k-3] +
0.0212f*A[t%2][i-2][j-2][k-2] +
0.0222f*A[t%2][i-2][j-2][k-1] +
0.0232f*A[t%2][i-2][j-2][k] +
0.0242f*A[t%2][i-2][j-2][k+1] +
0.0252f*A[t%2][i-2][j-2][k+2] +
0.0262f*A[t%2][i-2][j-2][k+3] +
0.0272f*A[t%2][i-2][j-2][k+4] +
0.0282f*A[t%2][i-2][j-1][k-4] +
0.0292f*A[t%2][i-2][j-1][k-3] +
0.0302f*A[t%2][i-2][j-1][k-2] +
0.0312f*A[t%2][i-2][j-1][k-1] +
0.0322f*A[t%2][i-2][j-1][k] +
0.0332f*A[t%2][i-2][j-1][k+1] +
0.0342f*A[t%2][i-2][j-1][k+2] +
0.0352f*A[t%2][i-2][j-1][k+3] +
0.0362f*A[t%2][i-2][j-1][k+4] +
0.0372f*A[t%2][i-2][j][k-4] +
0.0382f*A[t%2][i-2][j][k-3] +
0.0392f*A[t%2][i-2][j][k-2] +
0.0402f*A[t%2][i-2][j][k-1] +
0.0412f*A[t%2][i-2][j][k+1] +
0.0422f*A[t%2][i-2][j][k+2] +
0.0432f*A[t%2][i-2][j][k+3] +
0.0442f*A[t%2][i-2][j][k+4] +
0.0452f*A[t%2][i-2][j+1][k-4] +
0.0462f*A[t%2][i-2][j+1][k-3] +
0.0472f*A[t%2][i-2][j+1][k-2] +
0.0482f*A[t%2][i-2][j+1][k-1] +
0.0492f*A[t%2][i-2][j+1][k] +
0.0502f*A[t%2][i-2][j+1][k+1] +
0.0512f*A[t%2][i-2][j+1][k+2] +
0.0522f*A[t%2][i-2][j+1][k+3] +
0.0532f*A[t%2][i-2][j+1][k+4] +
0.0542f*A[t%2][i-2][j+2][k-4] +
0.0552f*A[t%2][i-2][j+2][k-3] +
0.0562f*A[t%2][i-2][j+2][k-2] +
0.0572f*A[t%2][i-2][j+2][k-1] +
0.0582f*A[t%2][i-2][j+2][k] +
0.0592f*A[t%2][i-2][j+2][k+1] +
0.0602f*A[t%2][i-2][j+2][k+2] +
0.0612f*A[t%2][i-2][j+2][k+3] +
0.0622f*A[t%2][i-2][j+2][k+4] +
0.0632f*A[t%2][i-2][j+3][k-4] +
0.0642f*A[t%2][i-2][j+3][k-3] +
0.0652f*A[t%2][i-2][j+3][k-2] +
0.0662f*A[t%2][i-2][j+3][k-1] +
0.0672f*A[t%2][i-2][j+3][k] +
0.0682f*A[t%2][i-2][j+3][k+1] +
0.0692f*A[t%2][i-2][j+3][k+2] +
0.0702f*A[t%2][i-2][j+3][k+3] +
0.0712f*A[t%2][i-2][j+3][k+4] +
0.0722f*A[t%2][i-2][j+4][k-4] +
0.0732f*A[t%2][i-2][j+4][k-3] +
0.0742f*A[t%2][i-2][j+4][k-2] +
0.0752f*A[t%2][i-2][j+4][k-1] +
0.0762f*A[t%2][i-2][j+4][k] +
0.0772f*A[t%2][i-2][j+4][k+1] +
0.0782f*A[t%2][i-2][j+4][k+2] +
0.0792f*A[t%2][i-2][j+4][k+3] +
0.0802f*A[t%2][i-2][j+4][k+4] +
-3.264f*A[t%2][i-1][j][k] +
0.0013f*A[t%2][i-1][j-4][k-4] +
0.0023f*A[t%2][i-1][j-4][k-3] +
0.0033f*A[t%2][i-1][j-4][k-2] +
0.0043f*A[t%2][i-1][j-4][k-1] +
0.0053f*A[t%2][i-1][j-4][k] +
0.0063f*A[t%2][i-1][j-4][k+1] +
0.0073f*A[t%2][i-1][j-4][k+2] +
0.0083f*A[t%2][i-1][j-4][k+3] +
0.0093f*A[t%2][i-1][j-4][k+4] +
0.0103f*A[t%2][i-1][j-3][k-4] +
0.0113f*A[t%2][i-1][j-3][k-3] +
0.0123f*A[t%2][i-1][j-3][k-2] +
0.0133f*A[t%2][i-1][j-3][k-1] +
0.0143f*A[t%2][i-1][j-3][k] +
0.0153f*A[t%2][i-1][j-3][k+1] +
0.0163f*A[t%2][i-1][j-3][k+2] +
0.0173f*A[t%2][i-1][j-3][k+3] +
0.0183f*A[t%2][i-1][j-3][k+4] +
0.0193f*A[t%2][i-1][j-2][k-4] +
0.0203f*A[t%2][i-1][j-2][k-3] +
0.0213f*A[t%2][i-1][j-2][k-2] +
0.0223f*A[t%2][i-1][j-2][k-1] +
0.0233f*A[t%2][i-1][j-2][k] +
0.0243f*A[t%2][i-1][j-2][k+1] +
0.0253f*A[t%2][i-1][j-2][k+2] +
0.0263f*A[t%2][i-1][j-2][k+3] +
0.0273f*A[t%2][i-1][j-2][k+4] +
0.0283f*A[t%2][i-1][j-1][k-4] +
0.0293f*A[t%2][i-1][j-1][k-3] +
0.0303f*A[t%2][i-1][j-1][k-2] +
0.0313f*A[t%2][i-1][j-1][k-1] +
0.0323f*A[t%2][i-1][j-1][k] +
0.0333f*A[t%2][i-1][j-1][k+1] +
0.0343f*A[t%2][i-1][j-1][k+2] +
0.0353f*A[t%2][i-1][j-1][k+3] +
0.0363f*A[t%2][i-1][j-1][k+4] +
0.0373f*A[t%2][i-1][j][k-4] +
0.0383f*A[t%2][i-1][j][k-3] +
0.0393f*A[t%2][i-1][j][k-2] +
0.0403f*A[t%2][i-1][j][k-1] +
0.0413f*A[t%2][i-1][j][k+1] +
0.0423f*A[t%2][i-1][j][k+2] +
0.0433f*A[t%2][i-1][j][k+3] +
0.0443f*A[t%2][i-1][j][k+4] +
0.0453f*A[t%2][i-1][j+1][k-4] +
0.0463f*A[t%2][i-1][j+1][k-3] +
0.0473f*A[t%2][i-1][j+1][k-2] +
0.0483f*A[t%2][i-1][j+1][k-1] +
0.0493f*A[t%2][i-1][j+1][k] +
0.0503f*A[t%2][i-1][j+1][k+1] +
0.0513f*A[t%2][i-1][j+1][k+2] +
0.0523f*A[t%2][i-1][j+1][k+3] +
0.0533f*A[t%2][i-1][j+1][k+4] +
0.0543f*A[t%2][i-1][j+2][k-4] +
0.0553f*A[t%2][i-1][j+2][k-3] +
0.0563f*A[t%2][i-1][j+2][k-2] +
0.0573f*A[t%2][i-1][j+2][k-1] +
0.0583f*A[t%2][i-1][j+2][k] +
0.0593f*A[t%2][i-1][j+2][k+1] +
0.0603f*A[t%2][i-1][j+2][k+2] +
0.0613f*A[t%2][i-1][j+2][k+3] +
0.0623f*A[t%2][i-1][j+2][k+4] +
0.0633f*A[t%2][i-1][j+3][k-4] +
0.0643f*A[t%2][i-1][j+3][k-3] +
0.0653f*A[t%2][i-1][j+3][k-2] +
0.0663f*A[t%2][i-1][j+3][k-1] +
0.0673f*A[t%2][i-1][j+3][k] +
0.0683f*A[t%2][i-1][j+3][k+1] +
0.0693f*A[t%2][i-1][j+3][k+2] +
0.0703f*A[t%2][i-1][j+3][k+3] +
0.0713f*A[t%2][i-1][j+3][k+4] +
0.0723f*A[t%2][i-1][j+4][k-4] +
0.0733f*A[t%2][i-1][j+4][k-3] +
0.0743f*A[t%2][i-1][j+4][k-2] +
0.0753f*A[t%2][i-1][j+4][k-1] +
0.0763f*A[t%2][i-1][j+4][k] +
0.0773f*A[t%2][i-1][j+4][k+1] +
0.0783f*A[t%2][i-1][j+4][k+2] +
0.0793f*A[t%2][i-1][j+4][k+3] +
0.0803f*A[t%2][i-1][j+4][k+4] +
-3.272f*A[t%2][i][j][k] +
0.0014f*A[t%2][i][j-4][k-4] +
0.0024f*A[t%2][i][j-4][k-3] +
0.0034f*A[t%2][i][j-4][k-2] +
0.0044f*A[t%2][i][j-4][k-1] +
0.0054f*A[t%2][i][j-4][k] +
0.0064f*A[t%2][i][j-4][k+1] +
0.0074f*A[t%2][i][j-4][k+2] +
0.0084f*A[t%2][i][j-4][k+3] +
0.0094f*A[t%2][i][j-4][k+4] +
0.0104f*A[t%2][i][j-3][k-4] +
0.0114f*A[t%2][i][j-3][k-3] +
0.0124f*A[t%2][i][j-3][k-2] +
0.0134f*A[t%2][i][j-3][k-1] +
0.0144f*A[t%2][i][j-3][k] +
0.0154f*A[t%2][i][j-3][k+1] +
0.0164f*A[t%2][i][j-3][k+2] +
0.0174f*A[t%2][i][j-3][k+3] +
0.0184f*A[t%2][i][j-3][k+4] +
0.0194f*A[t%2][i][j-2][k-4] +
0.0204f*A[t%2][i][j-2][k-3] +
0.0214f*A[t%2][i][j-2][k-2] +
0.0224f*A[t%2][i][j-2][k-1] +
0.0234f*A[t%2][i][j-2][k] +
0.0244f*A[t%2][i][j-2][k+1] +
0.0254f*A[t%2][i][j-2][k+2] +
0.0264f*A[t%2][i][j-2][k+3] +
0.0274f*A[t%2][i][j-2][k+4] +
0.0284f*A[t%2][i][j-1][k-4] +
0.0294f*A[t%2][i][j-1][k-3] +
0.0304f*A[t%2][i][j-1][k-2] +
0.0314f*A[t%2][i][j-1][k-1] +
0.0324f*A[t%2][i][j-1][k] +
0.0334f*A[t%2][i][j-1][k+1] +
0.0344f*A[t%2][i][j-1][k+2] +
0.0354f*A[t%2][i][j-1][k+3] +
0.0364f*A[t%2][i][j-1][k+4] +
0.0374f*A[t%2][i][j][k-4] +
0.0384f*A[t%2][i][j][k-3] +
0.0394f*A[t%2][i][j][k-2] +
0.0404f*A[t%2][i][j][k-1] +
0.0414f*A[t%2][i][j][k+1] +
0.0424f*A[t%2][i][j][k+2] +
0.0434f*A[t%2][i][j][k+3] +
0.0444f*A[t%2][i][j][k+4] +
0.0454f*A[t%2][i][j+1][k-4] +
0.0464f*A[t%2][i][j+1][k-3] +
0.0474f*A[t%2][i][j+1][k-2] +
0.0484f*A[t%2][i][j+1][k-1] +
0.0494f*A[t%2][i][j+1][k] +
0.0504f*A[t%2][i][j+1][k+1] +
0.0514f*A[t%2][i][j+1][k+2] +
0.0524f*A[t%2][i][j+1][k+3] +
0.0534f*A[t%2][i][j+1][k+4] +
0.0544f*A[t%2][i][j+2][k-4] +
0.0554f*A[t%2][i][j+2][k-3] +
0.0564f*A[t%2][i][j+2][k-2] +
0.0574f*A[t%2][i][j+2][k-1] +
0.0584f*A[t%2][i][j+2][k] +
0.0594f*A[t%2][i][j+2][k+1] +
0.0604f*A[t%2][i][j+2][k+2] +
0.0614f*A[t%2][i][j+2][k+3] +
0.0624f*A[t%2][i][j+2][k+4] +
0.0634f*A[t%2][i][j+3][k-4] +
0.0644f*A[t%2][i][j+3][k-3] +
0.0654f*A[t%2][i][j+3][k-2] +
0.0664f*A[t%2][i][j+3][k-1] +
0.0674f*A[t%2][i][j+3][k] +
0.0684f*A[t%2][i][j+3][k+1] +
0.0694f*A[t%2][i][j+3][k+2] +
0.0704f*A[t%2][i][j+3][k+3] +
0.0714f*A[t%2][i][j+3][k+4] +
0.0724f*A[t%2][i][j+4][k-4] +
0.0734f*A[t%2][i][j+4][k-3] +
0.0744f*A[t%2][i][j+4][k-2] +
0.0754f*A[t%2][i][j+4][k-1] +
0.0764f*A[t%2][i][j+4][k] +
0.0774f*A[t%2][i][j+4][k+1] +
0.0784f*A[t%2][i][j+4][k+2] +
0.0794f*A[t%2][i][j+4][k+3] +
0.0804f*A[t%2][i][j+4][k+4] +
-3.280f*A[t%2][i+1][j][k] +
0.0015f*A[t%2][i+1][j-4][k-4] +
0.0025f*A[t%2][i+1][j-4][k-3] +
0.0035f*A[t%2][i+1][j-4][k-2] +
0.0045f*A[t%2][i+1][j-4][k-1] +
0.0055f*A[t%2][i+1][j-4][k] +
0.0065f*A[t%2][i+1][j-4][k+1] +
0.0075f*A[t%2][i+1][j-4][k+2] +
0.0085f*A[t%2][i+1][j-4][k+3] +
0.0095f*A[t%2][i+1][j-4][k+4] +
0.0105f*A[t%2][i+1][j-3][k-4] +
0.0115f*A[t%2][i+1][j-3][k-3] +
0.0125f*A[t%2][i+1][j-3][k-2] +
0.0135f*A[t%2][i+1][j-3][k-1] +
0.0145f*A[t%2][i+1][j-3][k] +
0.0155f*A[t%2][i+1][j-3][k+1] +
0.0165f*A[t%2][i+1][j-3][k+2] +
0.0175f*A[t%2][i+1][j-3][k+3] +
0.0185f*A[t%2][i+1][j-3][k+4] +
0.0195f*A[t%2][i+1][j-2][k-4] +
0.0205f*A[t%2][i+1][j-2][k-3] +
0.0215f*A[t%2][i+1][j-2][k-2] +
0.0225f*A[t%2][i+1][j-2][k-1] +
0.0235f*A[t%2][i+1][j-2][k] +
0.0245f*A[t%2][i+1][j-2][k+1] +
0.0255f*A[t%2][i+1][j-2][k+2] +
0.0265f*A[t%2][i+1][j-2][k+3] +
0.0275f*A[t%2][i+1][j-2][k+4] +
0.0285f*A[t%2][i+1][j-1][k-4] +
0.0295f*A[t%2][i+1][j-1][k-3] +
0.0305f*A[t%2][i+1][j-1][k-2] +
0.0315f*A[t%2][i+1][j-1][k-1] +
0.0325f*A[t%2][i+1][j-1][k] +
0.0335f*A[t%2][i+1][j-1][k+1] +
0.0345f*A[t%2][i+1][j-1][k+2] +
0.0355f*A[t%2][i+1][j-1][k+3] +
0.0365f*A[t%2][i+1][j-1][k+4] +
0.0375f*A[t%2][i+1][j][k-4] +
0.0385f*A[t%2][i+1][j][k-3] +
0.0395f*A[t%2][i+1][j][k-2] +
0.0405f*A[t%2][i+1][j][k-1] +
0.0415f*A[t%2][i+1][j][k+1] +
0.0425f*A[t%2][i+1][j][k+2] +
0.0435f*A[t%2][i+1][j][k+3] +
0.0445f*A[t%2][i+1][j][k+4] +
0.0455f*A[t%2][i+1][j+1][k-4] +
0.0465f*A[t%2][i+1][j+1][k-3] +
0.0475f*A[t%2][i+1][j+1][k-2] +
0.0485f*A[t%2][i+1][j+1][k-1] +
0.0495f*A[t%2][i+1][j+1][k] +
0.0505f*A[t%2][i+1][j+1][k+1] +
0.0515f*A[t%2][i+1][j+1][k+2] +
0.0525f*A[t%2][i+1][j+1][k+3] +
0.0535f*A[t%2][i+1][j+1][k+4] +
0.0545f*A[t%2][i+1][j+2][k-4] +
0.0555f*A[t%2][i+1][j+2][k-3] +
0.0565f*A[t%2][i+1][j+2][k-2] +
0.0575f*A[t%2][i+1][j+2][k-1] +
0.0585f*A[t%2][i+1][j+2][k] +
0.0595f*A[t%2][i+1][j+2][k+1] +
0.0605f*A[t%2][i+1][j+2][k+2] +
0.0615f*A[t%2][i+1][j+2][k+3] +
0.0625f*A[t%2][i+1][j+2][k+4] +
0.0635f*A[t%2][i+1][j+3][k-4] +
0.0645f*A[t%2][i+1][j+3][k-3] +
0.0655f*A[t%2][i+1][j+3][k-2] +
0.0665f*A[t%2][i+1][j+3][k-1] +
0.0675f*A[t%2][i+1][j+3][k] +
0.0685f*A[t%2][i+1][j+3][k+1] +
0.0695f*A[t%2][i+1][j+3][k+2] +
0.0705f*A[t%2][i+1][j+3][k+3] +
0.0715f*A[t%2][i+1][j+3][k+4] +
0.0725f*A[t%2][i+1][j+4][k-4] +
0.0735f*A[t%2][i+1][j+4][k-3] +
0.0745f*A[t%2][i+1][j+4][k-2] +
0.0755f*A[t%2][i+1][j+4][k-1] +
0.0765f*A[t%2][i+1][j+4][k] +
0.0775f*A[t%2][i+1][j+4][k+1] +
0.0785f*A[t%2][i+1][j+4][k+2] +
0.0795f*A[t%2][i+1][j+4][k+3] +
0.0805f*A[t%2][i+1][j+4][k+4] +
-3.288f*A[t%2][i+2][j][k] +
0.0016f*A[t%2][i+2][j-4][k-4] +
0.0026f*A[t%2][i+2][j-4][k-3] +
0.0036f*A[t%2][i+2][j-4][k-2] +
0.0046f*A[t%2][i+2][j-4][k-1] +
0.0056f*A[t%2][i+2][j-4][k] +
0.0066f*A[t%2][i+2][j-4][k+1] +
0.0076f*A[t%2][i+2][j-4][k+2] +
0.0086f*A[t%2][i+2][j-4][k+3] +
0.0096f*A[t%2][i+2][j-4][k+4] +
0.0106f*A[t%2][i+2][j-3][k-4] +
0.0116f*A[t%2][i+2][j-3][k-3] +
0.0126f*A[t%2][i+2][j-3][k-2] +
0.0136f*A[t%2][i+2][j-3][k-1] +
0.0146f*A[t%2][i+2][j-3][k] +
0.0156f*A[t%2][i+2][j-3][k+1] +
0.0166f*A[t%2][i+2][j-3][k+2] +
0.0176f*A[t%2][i+2][j-3][k+3] +
0.0186f*A[t%2][i+2][j-3][k+4] +
0.0196f*A[t%2][i+2][j-2][k-4] +
0.0206f*A[t%2][i+2][j-2][k-3] +
0.0216f*A[t%2][i+2][j-2][k-2] +
0.0226f*A[t%2][i+2][j-2][k-1] +
0.0236f*A[t%2][i+2][j-2][k] +
0.0246f*A[t%2][i+2][j-2][k+1] +
0.0256f*A[t%2][i+2][j-2][k+2] +
0.0266f*A[t%2][i+2][j-2][k+3] +
0.0276f*A[t%2][i+2][j-2][k+4] +
0.0286f*A[t%2][i+2][j-1][k-4] +
0.0296f*A[t%2][i+2][j-1][k-3] +
0.0306f*A[t%2][i+2][j-1][k-2] +
0.0316f*A[t%2][i+2][j-1][k-1] +
0.0326f*A[t%2][i+2][j-1][k] +
0.0336f*A[t%2][i+2][j-1][k+1] +
0.0346f*A[t%2][i+2][j-1][k+2] +
0.0356f*A[t%2][i+2][j-1][k+3] +
0.0366f*A[t%2][i+2][j-1][k+4] +
0.0376f*A[t%2][i+2][j][k-4] +
0.0386f*A[t%2][i+2][j][k-3] +
0.0396f*A[t%2][i+2][j][k-2] +
0.0406f*A[t%2][i+2][j][k-1] +
0.0416f*A[t%2][i+2][j][k+1] +
0.0426f*A[t%2][i+2][j][k+2] +
0.0436f*A[t%2][i+2][j][k+3] +
0.0446f*A[t%2][i+2][j][k+4] +
0.0456f*A[t%2][i+2][j+1][k-4] +
0.0466f*A[t%2][i+2][j+1][k-3] +
0.0476f*A[t%2][i+2][j+1][k-2] +
0.0486f*A[t%2][i+2][j+1][k-1] +
0.0496f*A[t%2][i+2][j+1][k] +
0.0506f*A[t%2][i+2][j+1][k+1] +
0.0516f*A[t%2][i+2][j+1][k+2] +
0.0526f*A[t%2][i+2][j+1][k+3] +
0.0536f*A[t%2][i+2][j+1][k+4] +
0.0546f*A[t%2][i+2][j+2][k-4] +
0.0556f*A[t%2][i+2][j+2][k-3] +
0.0566f*A[t%2][i+2][j+2][k-2] +
0.0576f*A[t%2][i+2][j+2][k-1] +
0.0586f*A[t%2][i+2][j+2][k] +
0.0596f*A[t%2][i+2][j+2][k+1] +
0.0606f*A[t%2][i+2][j+2][k+2] +
0.0616f*A[t%2][i+2][j+2][k+3] +
0.0626f*A[t%2][i+2][j+2][k+4] +
0.0636f*A[t%2][i+2][j+3][k-4] +
0.0646f*A[t%2][i+2][j+3][k-3] +
0.0656f*A[t%2][i+2][j+3][k-2] +
0.0666f*A[t%2][i+2][j+3][k-1] +
0.0676f*A[t%2][i+2][j+3][k] +
0.0686f*A[t%2][i+2][j+3][k+1] +
0.0696f*A[t%2][i+2][j+3][k+2] +
0.0706f*A[t%2][i+2][j+3][k+3] +
0.0716f*A[t%2][i+2][j+3][k+4] +
0.0726f*A[t%2][i+2][j+4][k-4] +
0.0736f*A[t%2][i+2][j+4][k-3] +
0.0746f*A[t%2][i+2][j+4][k-2] +
0.0756f*A[t%2][i+2][j+4][k-1] +
0.0766f*A[t%2][i+2][j+4][k] +
0.0776f*A[t%2][i+2][j+4][k+1] +
0.0786f*A[t%2][i+2][j+4][k+2] +
0.0796f*A[t%2][i+2][j+4][k+3] +
0.0806f*A[t%2][i+2][j+4][k+4] +
-3.296f*A[t%2][i+3][j][k] +
0.0017f*A[t%2][i+3][j-4][k-4] +
0.0027f*A[t%2][i+3][j-4][k-3] +
0.0037f*A[t%2][i+3][j-4][k-2] +
0.0047f*A[t%2][i+3][j-4][k-1] +
0.0057f*A[t%2][i+3][j-4][k] +
0.0067f*A[t%2][i+3][j-4][k+1] +
0.0077f*A[t%2][i+3][j-4][k+2] +
0.0087f*A[t%2][i+3][j-4][k+3] +
0.0097f*A[t%2][i+3][j-4][k+4] +
0.0107f*A[t%2][i+3][j-3][k-4] +
0.0117f*A[t%2][i+3][j-3][k-3] +
0.0127f*A[t%2][i+3][j-3][k-2] +
0.0137f*A[t%2][i+3][j-3][k-1] +
0.0147f*A[t%2][i+3][j-3][k] +
0.0157f*A[t%2][i+3][j-3][k+1] +
0.0167f*A[t%2][i+3][j-3][k+2] +
0.0177f*A[t%2][i+3][j-3][k+3] +
0.0187f*A[t%2][i+3][j-3][k+4] +
0.0197f*A[t%2][i+3][j-2][k-4] +
0.0207f*A[t%2][i+3][j-2][k-3] +
0.0217f*A[t%2][i+3][j-2][k-2] +
0.0227f*A[t%2][i+3][j-2][k-1] +
0.0237f*A[t%2][i+3][j-2][k] +
0.0247f*A[t%2][i+3][j-2][k+1] +
0.0257f*A[t%2][i+3][j-2][k+2] +
0.0267f*A[t%2][i+3][j-2][k+3] +
0.0277f*A[t%2][i+3][j-2][k+4] +
0.0287f*A[t%2][i+3][j-1][k-4] +
0.0297f*A[t%2][i+3][j-1][k-3] +
0.0307f*A[t%2][i+3][j-1][k-2] +
0.0317f*A[t%2][i+3][j-1][k-1] +
0.0327f*A[t%2][i+3][j-1][k] +
0.0337f*A[t%2][i+3][j-1][k+1] +
0.0347f*A[t%2][i+3][j-1][k+2] +
0.0357f*A[t%2][i+3][j-1][k+3] +
0.0367f*A[t%2][i+3][j-1][k+4] +
0.0377f*A[t%2][i+3][j][k-4] +
0.0387f*A[t%2][i+3][j][k-3] +
0.0397f*A[t%2][i+3][j][k-2] +
0.0407f*A[t%2][i+3][j][k-1] +
0.0417f*A[t%2][i+3][j][k+1] +
0.0427f*A[t%2][i+3][j][k+2] +
0.0437f*A[t%2][i+3][j][k+3] +
0.0447f*A[t%2][i+3][j][k+4] +
0.0457f*A[t%2][i+3][j+1][k-4] +
0.0467f*A[t%2][i+3][j+1][k-3] +
0.0477f*A[t%2][i+3][j+1][k-2] +
0.0487f*A[t%2][i+3][j+1][k-1] +
0.0497f*A[t%2][i+3][j+1][k] +
0.0507f*A[t%2][i+3][j+1][k+1] +
0.0517f*A[t%2][i+3][j+1][k+2] +
0.0527f*A[t%2][i+3][j+1][k+3] +
0.0537f*A[t%2][i+3][j+1][k+4] +
0.0547f*A[t%2][i+3][j+2][k-4] +
0.0557f*A[t%2][i+3][j+2][k-3] +
0.0567f*A[t%2][i+3][j+2][k-2] +
0.0577f*A[t%2][i+3][j+2][k-1] +
0.0587f*A[t%2][i+3][j+2][k] +
0.0597f*A[t%2][i+3][j+2][k+1] +
0.0607f*A[t%2][i+3][j+2][k+2] +
0.0617f*A[t%2][i+3][j+2][k+3] +
0.0627f*A[t%2][i+3][j+2][k+4] +
0.0637f*A[t%2][i+3][j+3][k-4] +
0.0647f*A[t%2][i+3][j+3][k-3] +
0.0657f*A[t%2][i+3][j+3][k-2] +
0.0667f*A[t%2][i+3][j+3][k-1] +
0.0677f*A[t%2][i+3][j+3][k] +
0.0687f*A[t%2][i+3][j+3][k+1] +
0.0697f*A[t%2][i+3][j+3][k+2] +
0.0707f*A[t%2][i+3][j+3][k+3] +
0.0717f*A[t%2][i+3][j+3][k+4] +
0.0727f*A[t%2][i+3][j+4][k-4] +
0.0737f*A[t%2][i+3][j+4][k-3] +
0.0747f*A[t%2][i+3][j+4][k-2] +
0.0757f*A[t%2][i+3][j+4][k-1] +
0.0767f*A[t%2][i+3][j+4][k] +
0.0777f*A[t%2][i+3][j+4][k+1] +
0.0787f*A[t%2][i+3][j+4][k+2] +
0.0797f*A[t%2][i+3][j+4][k+3] +
0.0807f*A[t%2][i+3][j+4][k+4] +
-3.304f*A[t%2][i+4][j][k] +
0.0018f*A[t%2][i+4][j-4][k-4] +
0.0028f*A[t%2][i+4][j-4][k-3] +
0.0038f*A[t%2][i+4][j-4][k-2] +
0.0048f*A[t%2][i+4][j-4][k-1] +
0.0058f*A[t%2][i+4][j-4][k] +
0.0068f*A[t%2][i+4][j-4][k+1] +
0.0078f*A[t%2][i+4][j-4][k+2] +
0.0088f*A[t%2][i+4][j-4][k+3] +
0.0098f*A[t%2][i+4][j-4][k+4] +
0.0108f*A[t%2][i+4][j-3][k-4] +
0.0118f*A[t%2][i+4][j-3][k-3] +
0.0128f*A[t%2][i+4][j-3][k-2] +
0.0138f*A[t%2][i+4][j-3][k-1] +
0.0148f*A[t%2][i+4][j-3][k] +
0.0158f*A[t%2][i+4][j-3][k+1] +
0.0168f*A[t%2][i+4][j-3][k+2] +
0.0178f*A[t%2][i+4][j-3][k+3] +
0.0188f*A[t%2][i+4][j-3][k+4] +
0.0198f*A[t%2][i+4][j-2][k-4] +
0.0208f*A[t%2][i+4][j-2][k-3] +
0.0218f*A[t%2][i+4][j-2][k-2] +
0.0228f*A[t%2][i+4][j-2][k-1] +
0.0238f*A[t%2][i+4][j-2][k] +
0.0248f*A[t%2][i+4][j-2][k+1] +
0.0258f*A[t%2][i+4][j-2][k+2] +
0.0268f*A[t%2][i+4][j-2][k+3] +
0.0278f*A[t%2][i+4][j-2][k+4] +
0.0288f*A[t%2][i+4][j-1][k-4] +
0.0298f*A[t%2][i+4][j-1][k-3] +
0.0308f*A[t%2][i+4][j-1][k-2] +
0.0318f*A[t%2][i+4][j-1][k-1] +
0.0328f*A[t%2][i+4][j-1][k] +
0.0338f*A[t%2][i+4][j-1][k+1] +
0.0348f*A[t%2][i+4][j-1][k+2] +
0.0358f*A[t%2][i+4][j-1][k+3] +
0.0368f*A[t%2][i+4][j-1][k+4] +
0.0378f*A[t%2][i+4][j][k-4] +
0.0388f*A[t%2][i+4][j][k-3] +
0.0398f*A[t%2][i+4][j][k-2] +
0.0408f*A[t%2][i+4][j][k-1] +
0.0418f*A[t%2][i+4][j][k+1] +
0.0428f*A[t%2][i+4][j][k+2] +
0.0438f*A[t%2][i+4][j][k+3] +
0.0448f*A[t%2][i+4][j][k+4] +
0.0458f*A[t%2][i+4][j+1][k-4] +
0.0468f*A[t%2][i+4][j+1][k-3] +
0.0478f*A[t%2][i+4][j+1][k-2] +
0.0488f*A[t%2][i+4][j+1][k-1] +
0.0498f*A[t%2][i+4][j+1][k] +
0.0508f*A[t%2][i+4][j+1][k+1] +
0.0518f*A[t%2][i+4][j+1][k+2] +
0.0528f*A[t%2][i+4][j+1][k+3] +
0.0538f*A[t%2][i+4][j+1][k+4] +
0.0548f*A[t%2][i+4][j+2][k-4] +
0.0558f*A[t%2][i+4][j+2][k-3] +
0.0568f*A[t%2][i+4][j+2][k-2] +
0.0578f*A[t%2][i+4][j+2][k-1] +
0.0588f*A[t%2][i+4][j+2][k] +
0.0598f*A[t%2][i+4][j+2][k+1] +
0.0608f*A[t%2][i+4][j+2][k+2] +
0.0618f*A[t%2][i+4][j+2][k+3] +
0.0628f*A[t%2][i+4][j+2][k+4] +
0.0638f*A[t%2][i+4][j+3][k-4] +
0.0648f*A[t%2][i+4][j+3][k-3] +
0.0658f*A[t%2][i+4][j+3][k-2] +
0.0668f*A[t%2][i+4][j+3][k-1] +
0.0678f*A[t%2][i+4][j+3][k] +
0.0688f*A[t%2][i+4][j+3][k+1] +
0.0698f*A[t%2][i+4][j+3][k+2] +
0.0708f*A[t%2][i+4][j+3][k+3] +
0.0718f*A[t%2][i+4][j+3][k+4] +
0.0728f*A[t%2][i+4][j+4][k-4] +
0.0738f*A[t%2][i+4][j+4][k-3] +
0.0748f*A[t%2][i+4][j+4][k-2] +
0.0758f*A[t%2][i+4][j+4][k-1] +
0.0768f*A[t%2][i+4][j+4][k] +
0.0778f*A[t%2][i+4][j+4][k+1] +
0.0788f*A[t%2][i+4][j+4][k+2] +
0.0798f*A[t%2][i+4][j+4][k+3] +
0.0808f*A[t%2][i+4][j+4][k+4];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
| 438bc791b50da08f383e3e682f16f837b1659b66.cu | #include <assert.h>
#include <stdio.h>
#include "box3d4r-16x16-1-128_kernel.hu"
#define BENCH_DIM 3
#define BENCH_FPP 1457
#define BENCH_RAD 4
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize][dimsize]
= (SB_TYPE (*)[dimsize][dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 9 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
cudaError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != cudaSuccess) { \
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == cudaSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(cudaGetLastError()); \
} while(0)
double *dev_A;
cudaCheckReturn(cudaMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double)));
{
cudaCheckReturn(cudaMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), cudaMemcpyHostToDevice));
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 4 - 4);
const AN5D_TYPE __c1Pad = (4);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 4 - 4);
const AN5D_TYPE __c2Pad = (4);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 4 - 4);
const AN5D_TYPE __c3Pad = (4);
#define __c3 c3
const AN5D_TYPE __halo1 = 4;
const AN5D_TYPE __halo2 = 4;
const AN5D_TYPE __halo3 = 4;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 8;
const AN5D_TYPE __side3Len = 8;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
}
}
else if (__c0Len % __side0LenMax)
{
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(cudaMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), cudaMemcpyDeviceToHost));
}
cudaCheckReturn(cudaFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
for (int k = BENCH_RAD; k < dimsize - BENCH_RAD; k++)
A[(t+1)%2][i][j][k] =
-3.240f*A[t%2][i-4][j][k] +
0.0010f*A[t%2][i-4][j-4][k-4] +
0.0020f*A[t%2][i-4][j-4][k-3] +
0.0030f*A[t%2][i-4][j-4][k-2] +
0.0040f*A[t%2][i-4][j-4][k-1] +
0.0050f*A[t%2][i-4][j-4][k] +
0.0060f*A[t%2][i-4][j-4][k+1] +
0.0070f*A[t%2][i-4][j-4][k+2] +
0.0080f*A[t%2][i-4][j-4][k+3] +
0.0090f*A[t%2][i-4][j-4][k+4] +
0.0100f*A[t%2][i-4][j-3][k-4] +
0.0110f*A[t%2][i-4][j-3][k-3] +
0.0120f*A[t%2][i-4][j-3][k-2] +
0.0130f*A[t%2][i-4][j-3][k-1] +
0.0140f*A[t%2][i-4][j-3][k] +
0.0150f*A[t%2][i-4][j-3][k+1] +
0.0160f*A[t%2][i-4][j-3][k+2] +
0.0170f*A[t%2][i-4][j-3][k+3] +
0.0180f*A[t%2][i-4][j-3][k+4] +
0.0190f*A[t%2][i-4][j-2][k-4] +
0.0200f*A[t%2][i-4][j-2][k-3] +
0.0210f*A[t%2][i-4][j-2][k-2] +
0.0220f*A[t%2][i-4][j-2][k-1] +
0.0230f*A[t%2][i-4][j-2][k] +
0.0240f*A[t%2][i-4][j-2][k+1] +
0.0250f*A[t%2][i-4][j-2][k+2] +
0.0260f*A[t%2][i-4][j-2][k+3] +
0.0270f*A[t%2][i-4][j-2][k+4] +
0.0280f*A[t%2][i-4][j-1][k-4] +
0.0290f*A[t%2][i-4][j-1][k-3] +
0.0300f*A[t%2][i-4][j-1][k-2] +
0.0310f*A[t%2][i-4][j-1][k-1] +
0.0320f*A[t%2][i-4][j-1][k] +
0.0330f*A[t%2][i-4][j-1][k+1] +
0.0340f*A[t%2][i-4][j-1][k+2] +
0.0350f*A[t%2][i-4][j-1][k+3] +
0.0360f*A[t%2][i-4][j-1][k+4] +
0.0370f*A[t%2][i-4][j][k-4] +
0.0380f*A[t%2][i-4][j][k-3] +
0.0390f*A[t%2][i-4][j][k-2] +
0.0400f*A[t%2][i-4][j][k-1] +
0.0410f*A[t%2][i-4][j][k+1] +
0.0420f*A[t%2][i-4][j][k+2] +
0.0430f*A[t%2][i-4][j][k+3] +
0.0440f*A[t%2][i-4][j][k+4] +
0.0450f*A[t%2][i-4][j+1][k-4] +
0.0460f*A[t%2][i-4][j+1][k-3] +
0.0470f*A[t%2][i-4][j+1][k-2] +
0.0480f*A[t%2][i-4][j+1][k-1] +
0.0490f*A[t%2][i-4][j+1][k] +
0.0500f*A[t%2][i-4][j+1][k+1] +
0.0510f*A[t%2][i-4][j+1][k+2] +
0.0520f*A[t%2][i-4][j+1][k+3] +
0.0530f*A[t%2][i-4][j+1][k+4] +
0.0540f*A[t%2][i-4][j+2][k-4] +
0.0550f*A[t%2][i-4][j+2][k-3] +
0.0560f*A[t%2][i-4][j+2][k-2] +
0.0570f*A[t%2][i-4][j+2][k-1] +
0.0580f*A[t%2][i-4][j+2][k] +
0.0590f*A[t%2][i-4][j+2][k+1] +
0.0600f*A[t%2][i-4][j+2][k+2] +
0.0610f*A[t%2][i-4][j+2][k+3] +
0.0620f*A[t%2][i-4][j+2][k+4] +
0.0630f*A[t%2][i-4][j+3][k-4] +
0.0640f*A[t%2][i-4][j+3][k-3] +
0.0650f*A[t%2][i-4][j+3][k-2] +
0.0660f*A[t%2][i-4][j+3][k-1] +
0.0670f*A[t%2][i-4][j+3][k] +
0.0680f*A[t%2][i-4][j+3][k+1] +
0.0690f*A[t%2][i-4][j+3][k+2] +
0.0700f*A[t%2][i-4][j+3][k+3] +
0.0710f*A[t%2][i-4][j+3][k+4] +
0.0720f*A[t%2][i-4][j+4][k-4] +
0.0730f*A[t%2][i-4][j+4][k-3] +
0.0740f*A[t%2][i-4][j+4][k-2] +
0.0750f*A[t%2][i-4][j+4][k-1] +
0.0760f*A[t%2][i-4][j+4][k] +
0.0770f*A[t%2][i-4][j+4][k+1] +
0.0780f*A[t%2][i-4][j+4][k+2] +
0.0790f*A[t%2][i-4][j+4][k+3] +
0.0800f*A[t%2][i-4][j+4][k+4] +
-3.248f*A[t%2][i-3][j][k] +
0.0011f*A[t%2][i-3][j-4][k-4] +
0.0021f*A[t%2][i-3][j-4][k-3] +
0.0031f*A[t%2][i-3][j-4][k-2] +
0.0041f*A[t%2][i-3][j-4][k-1] +
0.0051f*A[t%2][i-3][j-4][k] +
0.0061f*A[t%2][i-3][j-4][k+1] +
0.0071f*A[t%2][i-3][j-4][k+2] +
0.0081f*A[t%2][i-3][j-4][k+3] +
0.0091f*A[t%2][i-3][j-4][k+4] +
0.0101f*A[t%2][i-3][j-3][k-4] +
0.0111f*A[t%2][i-3][j-3][k-3] +
0.0121f*A[t%2][i-3][j-3][k-2] +
0.0131f*A[t%2][i-3][j-3][k-1] +
0.0141f*A[t%2][i-3][j-3][k] +
0.0151f*A[t%2][i-3][j-3][k+1] +
0.0161f*A[t%2][i-3][j-3][k+2] +
0.0171f*A[t%2][i-3][j-3][k+3] +
0.0181f*A[t%2][i-3][j-3][k+4] +
0.0191f*A[t%2][i-3][j-2][k-4] +
0.0201f*A[t%2][i-3][j-2][k-3] +
0.0211f*A[t%2][i-3][j-2][k-2] +
0.0221f*A[t%2][i-3][j-2][k-1] +
0.0231f*A[t%2][i-3][j-2][k] +
0.0241f*A[t%2][i-3][j-2][k+1] +
0.0251f*A[t%2][i-3][j-2][k+2] +
0.0261f*A[t%2][i-3][j-2][k+3] +
0.0271f*A[t%2][i-3][j-2][k+4] +
0.0281f*A[t%2][i-3][j-1][k-4] +
0.0291f*A[t%2][i-3][j-1][k-3] +
0.0301f*A[t%2][i-3][j-1][k-2] +
0.0311f*A[t%2][i-3][j-1][k-1] +
0.0321f*A[t%2][i-3][j-1][k] +
0.0331f*A[t%2][i-3][j-1][k+1] +
0.0341f*A[t%2][i-3][j-1][k+2] +
0.0351f*A[t%2][i-3][j-1][k+3] +
0.0361f*A[t%2][i-3][j-1][k+4] +
0.0371f*A[t%2][i-3][j][k-4] +
0.0381f*A[t%2][i-3][j][k-3] +
0.0391f*A[t%2][i-3][j][k-2] +
0.0401f*A[t%2][i-3][j][k-1] +
0.0411f*A[t%2][i-3][j][k+1] +
0.0421f*A[t%2][i-3][j][k+2] +
0.0431f*A[t%2][i-3][j][k+3] +
0.0441f*A[t%2][i-3][j][k+4] +
0.0451f*A[t%2][i-3][j+1][k-4] +
0.0461f*A[t%2][i-3][j+1][k-3] +
0.0471f*A[t%2][i-3][j+1][k-2] +
0.0481f*A[t%2][i-3][j+1][k-1] +
0.0491f*A[t%2][i-3][j+1][k] +
0.0501f*A[t%2][i-3][j+1][k+1] +
0.0511f*A[t%2][i-3][j+1][k+2] +
0.0521f*A[t%2][i-3][j+1][k+3] +
0.0531f*A[t%2][i-3][j+1][k+4] +
0.0541f*A[t%2][i-3][j+2][k-4] +
0.0551f*A[t%2][i-3][j+2][k-3] +
0.0561f*A[t%2][i-3][j+2][k-2] +
0.0571f*A[t%2][i-3][j+2][k-1] +
0.0581f*A[t%2][i-3][j+2][k] +
0.0591f*A[t%2][i-3][j+2][k+1] +
0.0601f*A[t%2][i-3][j+2][k+2] +
0.0611f*A[t%2][i-3][j+2][k+3] +
0.0621f*A[t%2][i-3][j+2][k+4] +
0.0631f*A[t%2][i-3][j+3][k-4] +
0.0641f*A[t%2][i-3][j+3][k-3] +
0.0651f*A[t%2][i-3][j+3][k-2] +
0.0661f*A[t%2][i-3][j+3][k-1] +
0.0671f*A[t%2][i-3][j+3][k] +
0.0681f*A[t%2][i-3][j+3][k+1] +
0.0691f*A[t%2][i-3][j+3][k+2] +
0.0701f*A[t%2][i-3][j+3][k+3] +
0.0711f*A[t%2][i-3][j+3][k+4] +
0.0721f*A[t%2][i-3][j+4][k-4] +
0.0731f*A[t%2][i-3][j+4][k-3] +
0.0741f*A[t%2][i-3][j+4][k-2] +
0.0751f*A[t%2][i-3][j+4][k-1] +
0.0761f*A[t%2][i-3][j+4][k] +
0.0771f*A[t%2][i-3][j+4][k+1] +
0.0781f*A[t%2][i-3][j+4][k+2] +
0.0791f*A[t%2][i-3][j+4][k+3] +
0.0801f*A[t%2][i-3][j+4][k+4] +
-3.256f*A[t%2][i-2][j][k] +
0.0012f*A[t%2][i-2][j-4][k-4] +
0.0022f*A[t%2][i-2][j-4][k-3] +
0.0032f*A[t%2][i-2][j-4][k-2] +
0.0042f*A[t%2][i-2][j-4][k-1] +
0.0052f*A[t%2][i-2][j-4][k] +
0.0062f*A[t%2][i-2][j-4][k+1] +
0.0072f*A[t%2][i-2][j-4][k+2] +
0.0082f*A[t%2][i-2][j-4][k+3] +
0.0092f*A[t%2][i-2][j-4][k+4] +
0.0102f*A[t%2][i-2][j-3][k-4] +
0.0112f*A[t%2][i-2][j-3][k-3] +
0.0122f*A[t%2][i-2][j-3][k-2] +
0.0132f*A[t%2][i-2][j-3][k-1] +
0.0142f*A[t%2][i-2][j-3][k] +
0.0152f*A[t%2][i-2][j-3][k+1] +
0.0162f*A[t%2][i-2][j-3][k+2] +
0.0172f*A[t%2][i-2][j-3][k+3] +
0.0182f*A[t%2][i-2][j-3][k+4] +
0.0192f*A[t%2][i-2][j-2][k-4] +
0.0202f*A[t%2][i-2][j-2][k-3] +
0.0212f*A[t%2][i-2][j-2][k-2] +
0.0222f*A[t%2][i-2][j-2][k-1] +
0.0232f*A[t%2][i-2][j-2][k] +
0.0242f*A[t%2][i-2][j-2][k+1] +
0.0252f*A[t%2][i-2][j-2][k+2] +
0.0262f*A[t%2][i-2][j-2][k+3] +
0.0272f*A[t%2][i-2][j-2][k+4] +
0.0282f*A[t%2][i-2][j-1][k-4] +
0.0292f*A[t%2][i-2][j-1][k-3] +
0.0302f*A[t%2][i-2][j-1][k-2] +
0.0312f*A[t%2][i-2][j-1][k-1] +
0.0322f*A[t%2][i-2][j-1][k] +
0.0332f*A[t%2][i-2][j-1][k+1] +
0.0342f*A[t%2][i-2][j-1][k+2] +
0.0352f*A[t%2][i-2][j-1][k+3] +
0.0362f*A[t%2][i-2][j-1][k+4] +
0.0372f*A[t%2][i-2][j][k-4] +
0.0382f*A[t%2][i-2][j][k-3] +
0.0392f*A[t%2][i-2][j][k-2] +
0.0402f*A[t%2][i-2][j][k-1] +
0.0412f*A[t%2][i-2][j][k+1] +
0.0422f*A[t%2][i-2][j][k+2] +
0.0432f*A[t%2][i-2][j][k+3] +
0.0442f*A[t%2][i-2][j][k+4] +
0.0452f*A[t%2][i-2][j+1][k-4] +
0.0462f*A[t%2][i-2][j+1][k-3] +
0.0472f*A[t%2][i-2][j+1][k-2] +
0.0482f*A[t%2][i-2][j+1][k-1] +
0.0492f*A[t%2][i-2][j+1][k] +
0.0502f*A[t%2][i-2][j+1][k+1] +
0.0512f*A[t%2][i-2][j+1][k+2] +
0.0522f*A[t%2][i-2][j+1][k+3] +
0.0532f*A[t%2][i-2][j+1][k+4] +
0.0542f*A[t%2][i-2][j+2][k-4] +
0.0552f*A[t%2][i-2][j+2][k-3] +
0.0562f*A[t%2][i-2][j+2][k-2] +
0.0572f*A[t%2][i-2][j+2][k-1] +
0.0582f*A[t%2][i-2][j+2][k] +
0.0592f*A[t%2][i-2][j+2][k+1] +
0.0602f*A[t%2][i-2][j+2][k+2] +
0.0612f*A[t%2][i-2][j+2][k+3] +
0.0622f*A[t%2][i-2][j+2][k+4] +
0.0632f*A[t%2][i-2][j+3][k-4] +
0.0642f*A[t%2][i-2][j+3][k-3] +
0.0652f*A[t%2][i-2][j+3][k-2] +
0.0662f*A[t%2][i-2][j+3][k-1] +
0.0672f*A[t%2][i-2][j+3][k] +
0.0682f*A[t%2][i-2][j+3][k+1] +
0.0692f*A[t%2][i-2][j+3][k+2] +
0.0702f*A[t%2][i-2][j+3][k+3] +
0.0712f*A[t%2][i-2][j+3][k+4] +
0.0722f*A[t%2][i-2][j+4][k-4] +
0.0732f*A[t%2][i-2][j+4][k-3] +
0.0742f*A[t%2][i-2][j+4][k-2] +
0.0752f*A[t%2][i-2][j+4][k-1] +
0.0762f*A[t%2][i-2][j+4][k] +
0.0772f*A[t%2][i-2][j+4][k+1] +
0.0782f*A[t%2][i-2][j+4][k+2] +
0.0792f*A[t%2][i-2][j+4][k+3] +
0.0802f*A[t%2][i-2][j+4][k+4] +
-3.264f*A[t%2][i-1][j][k] +
0.0013f*A[t%2][i-1][j-4][k-4] +
0.0023f*A[t%2][i-1][j-4][k-3] +
0.0033f*A[t%2][i-1][j-4][k-2] +
0.0043f*A[t%2][i-1][j-4][k-1] +
0.0053f*A[t%2][i-1][j-4][k] +
0.0063f*A[t%2][i-1][j-4][k+1] +
0.0073f*A[t%2][i-1][j-4][k+2] +
0.0083f*A[t%2][i-1][j-4][k+3] +
0.0093f*A[t%2][i-1][j-4][k+4] +
0.0103f*A[t%2][i-1][j-3][k-4] +
0.0113f*A[t%2][i-1][j-3][k-3] +
0.0123f*A[t%2][i-1][j-3][k-2] +
0.0133f*A[t%2][i-1][j-3][k-1] +
0.0143f*A[t%2][i-1][j-3][k] +
0.0153f*A[t%2][i-1][j-3][k+1] +
0.0163f*A[t%2][i-1][j-3][k+2] +
0.0173f*A[t%2][i-1][j-3][k+3] +
0.0183f*A[t%2][i-1][j-3][k+4] +
0.0193f*A[t%2][i-1][j-2][k-4] +
0.0203f*A[t%2][i-1][j-2][k-3] +
0.0213f*A[t%2][i-1][j-2][k-2] +
0.0223f*A[t%2][i-1][j-2][k-1] +
0.0233f*A[t%2][i-1][j-2][k] +
0.0243f*A[t%2][i-1][j-2][k+1] +
0.0253f*A[t%2][i-1][j-2][k+2] +
0.0263f*A[t%2][i-1][j-2][k+3] +
0.0273f*A[t%2][i-1][j-2][k+4] +
0.0283f*A[t%2][i-1][j-1][k-4] +
0.0293f*A[t%2][i-1][j-1][k-3] +
0.0303f*A[t%2][i-1][j-1][k-2] +
0.0313f*A[t%2][i-1][j-1][k-1] +
0.0323f*A[t%2][i-1][j-1][k] +
0.0333f*A[t%2][i-1][j-1][k+1] +
0.0343f*A[t%2][i-1][j-1][k+2] +
0.0353f*A[t%2][i-1][j-1][k+3] +
0.0363f*A[t%2][i-1][j-1][k+4] +
0.0373f*A[t%2][i-1][j][k-4] +
0.0383f*A[t%2][i-1][j][k-3] +
0.0393f*A[t%2][i-1][j][k-2] +
0.0403f*A[t%2][i-1][j][k-1] +
0.0413f*A[t%2][i-1][j][k+1] +
0.0423f*A[t%2][i-1][j][k+2] +
0.0433f*A[t%2][i-1][j][k+3] +
0.0443f*A[t%2][i-1][j][k+4] +
0.0453f*A[t%2][i-1][j+1][k-4] +
0.0463f*A[t%2][i-1][j+1][k-3] +
0.0473f*A[t%2][i-1][j+1][k-2] +
0.0483f*A[t%2][i-1][j+1][k-1] +
0.0493f*A[t%2][i-1][j+1][k] +
0.0503f*A[t%2][i-1][j+1][k+1] +
0.0513f*A[t%2][i-1][j+1][k+2] +
0.0523f*A[t%2][i-1][j+1][k+3] +
0.0533f*A[t%2][i-1][j+1][k+4] +
0.0543f*A[t%2][i-1][j+2][k-4] +
0.0553f*A[t%2][i-1][j+2][k-3] +
0.0563f*A[t%2][i-1][j+2][k-2] +
0.0573f*A[t%2][i-1][j+2][k-1] +
0.0583f*A[t%2][i-1][j+2][k] +
0.0593f*A[t%2][i-1][j+2][k+1] +
0.0603f*A[t%2][i-1][j+2][k+2] +
0.0613f*A[t%2][i-1][j+2][k+3] +
0.0623f*A[t%2][i-1][j+2][k+4] +
0.0633f*A[t%2][i-1][j+3][k-4] +
0.0643f*A[t%2][i-1][j+3][k-3] +
0.0653f*A[t%2][i-1][j+3][k-2] +
0.0663f*A[t%2][i-1][j+3][k-1] +
0.0673f*A[t%2][i-1][j+3][k] +
0.0683f*A[t%2][i-1][j+3][k+1] +
0.0693f*A[t%2][i-1][j+3][k+2] +
0.0703f*A[t%2][i-1][j+3][k+3] +
0.0713f*A[t%2][i-1][j+3][k+4] +
0.0723f*A[t%2][i-1][j+4][k-4] +
0.0733f*A[t%2][i-1][j+4][k-3] +
0.0743f*A[t%2][i-1][j+4][k-2] +
0.0753f*A[t%2][i-1][j+4][k-1] +
0.0763f*A[t%2][i-1][j+4][k] +
0.0773f*A[t%2][i-1][j+4][k+1] +
0.0783f*A[t%2][i-1][j+4][k+2] +
0.0793f*A[t%2][i-1][j+4][k+3] +
0.0803f*A[t%2][i-1][j+4][k+4] +
-3.272f*A[t%2][i][j][k] +
0.0014f*A[t%2][i][j-4][k-4] +
0.0024f*A[t%2][i][j-4][k-3] +
0.0034f*A[t%2][i][j-4][k-2] +
0.0044f*A[t%2][i][j-4][k-1] +
0.0054f*A[t%2][i][j-4][k] +
0.0064f*A[t%2][i][j-4][k+1] +
0.0074f*A[t%2][i][j-4][k+2] +
0.0084f*A[t%2][i][j-4][k+3] +
0.0094f*A[t%2][i][j-4][k+4] +
0.0104f*A[t%2][i][j-3][k-4] +
0.0114f*A[t%2][i][j-3][k-3] +
0.0124f*A[t%2][i][j-3][k-2] +
0.0134f*A[t%2][i][j-3][k-1] +
0.0144f*A[t%2][i][j-3][k] +
0.0154f*A[t%2][i][j-3][k+1] +
0.0164f*A[t%2][i][j-3][k+2] +
0.0174f*A[t%2][i][j-3][k+3] +
0.0184f*A[t%2][i][j-3][k+4] +
0.0194f*A[t%2][i][j-2][k-4] +
0.0204f*A[t%2][i][j-2][k-3] +
0.0214f*A[t%2][i][j-2][k-2] +
0.0224f*A[t%2][i][j-2][k-1] +
0.0234f*A[t%2][i][j-2][k] +
0.0244f*A[t%2][i][j-2][k+1] +
0.0254f*A[t%2][i][j-2][k+2] +
0.0264f*A[t%2][i][j-2][k+3] +
0.0274f*A[t%2][i][j-2][k+4] +
0.0284f*A[t%2][i][j-1][k-4] +
0.0294f*A[t%2][i][j-1][k-3] +
0.0304f*A[t%2][i][j-1][k-2] +
0.0314f*A[t%2][i][j-1][k-1] +
0.0324f*A[t%2][i][j-1][k] +
0.0334f*A[t%2][i][j-1][k+1] +
0.0344f*A[t%2][i][j-1][k+2] +
0.0354f*A[t%2][i][j-1][k+3] +
0.0364f*A[t%2][i][j-1][k+4] +
0.0374f*A[t%2][i][j][k-4] +
0.0384f*A[t%2][i][j][k-3] +
0.0394f*A[t%2][i][j][k-2] +
0.0404f*A[t%2][i][j][k-1] +
0.0414f*A[t%2][i][j][k+1] +
0.0424f*A[t%2][i][j][k+2] +
0.0434f*A[t%2][i][j][k+3] +
0.0444f*A[t%2][i][j][k+4] +
0.0454f*A[t%2][i][j+1][k-4] +
0.0464f*A[t%2][i][j+1][k-3] +
0.0474f*A[t%2][i][j+1][k-2] +
0.0484f*A[t%2][i][j+1][k-1] +
0.0494f*A[t%2][i][j+1][k] +
0.0504f*A[t%2][i][j+1][k+1] +
0.0514f*A[t%2][i][j+1][k+2] +
0.0524f*A[t%2][i][j+1][k+3] +
0.0534f*A[t%2][i][j+1][k+4] +
0.0544f*A[t%2][i][j+2][k-4] +
0.0554f*A[t%2][i][j+2][k-3] +
0.0564f*A[t%2][i][j+2][k-2] +
0.0574f*A[t%2][i][j+2][k-1] +
0.0584f*A[t%2][i][j+2][k] +
0.0594f*A[t%2][i][j+2][k+1] +
0.0604f*A[t%2][i][j+2][k+2] +
0.0614f*A[t%2][i][j+2][k+3] +
0.0624f*A[t%2][i][j+2][k+4] +
0.0634f*A[t%2][i][j+3][k-4] +
0.0644f*A[t%2][i][j+3][k-3] +
0.0654f*A[t%2][i][j+3][k-2] +
0.0664f*A[t%2][i][j+3][k-1] +
0.0674f*A[t%2][i][j+3][k] +
0.0684f*A[t%2][i][j+3][k+1] +
0.0694f*A[t%2][i][j+3][k+2] +
0.0704f*A[t%2][i][j+3][k+3] +
0.0714f*A[t%2][i][j+3][k+4] +
0.0724f*A[t%2][i][j+4][k-4] +
0.0734f*A[t%2][i][j+4][k-3] +
0.0744f*A[t%2][i][j+4][k-2] +
0.0754f*A[t%2][i][j+4][k-1] +
0.0764f*A[t%2][i][j+4][k] +
0.0774f*A[t%2][i][j+4][k+1] +
0.0784f*A[t%2][i][j+4][k+2] +
0.0794f*A[t%2][i][j+4][k+3] +
0.0804f*A[t%2][i][j+4][k+4] +
-3.280f*A[t%2][i+1][j][k] +
0.0015f*A[t%2][i+1][j-4][k-4] +
0.0025f*A[t%2][i+1][j-4][k-3] +
0.0035f*A[t%2][i+1][j-4][k-2] +
0.0045f*A[t%2][i+1][j-4][k-1] +
0.0055f*A[t%2][i+1][j-4][k] +
0.0065f*A[t%2][i+1][j-4][k+1] +
0.0075f*A[t%2][i+1][j-4][k+2] +
0.0085f*A[t%2][i+1][j-4][k+3] +
0.0095f*A[t%2][i+1][j-4][k+4] +
0.0105f*A[t%2][i+1][j-3][k-4] +
0.0115f*A[t%2][i+1][j-3][k-3] +
0.0125f*A[t%2][i+1][j-3][k-2] +
0.0135f*A[t%2][i+1][j-3][k-1] +
0.0145f*A[t%2][i+1][j-3][k] +
0.0155f*A[t%2][i+1][j-3][k+1] +
0.0165f*A[t%2][i+1][j-3][k+2] +
0.0175f*A[t%2][i+1][j-3][k+3] +
0.0185f*A[t%2][i+1][j-3][k+4] +
0.0195f*A[t%2][i+1][j-2][k-4] +
0.0205f*A[t%2][i+1][j-2][k-3] +
0.0215f*A[t%2][i+1][j-2][k-2] +
0.0225f*A[t%2][i+1][j-2][k-1] +
0.0235f*A[t%2][i+1][j-2][k] +
0.0245f*A[t%2][i+1][j-2][k+1] +
0.0255f*A[t%2][i+1][j-2][k+2] +
0.0265f*A[t%2][i+1][j-2][k+3] +
0.0275f*A[t%2][i+1][j-2][k+4] +
0.0285f*A[t%2][i+1][j-1][k-4] +
0.0295f*A[t%2][i+1][j-1][k-3] +
0.0305f*A[t%2][i+1][j-1][k-2] +
0.0315f*A[t%2][i+1][j-1][k-1] +
0.0325f*A[t%2][i+1][j-1][k] +
0.0335f*A[t%2][i+1][j-1][k+1] +
0.0345f*A[t%2][i+1][j-1][k+2] +
0.0355f*A[t%2][i+1][j-1][k+3] +
0.0365f*A[t%2][i+1][j-1][k+4] +
0.0375f*A[t%2][i+1][j][k-4] +
0.0385f*A[t%2][i+1][j][k-3] +
0.0395f*A[t%2][i+1][j][k-2] +
0.0405f*A[t%2][i+1][j][k-1] +
0.0415f*A[t%2][i+1][j][k+1] +
0.0425f*A[t%2][i+1][j][k+2] +
0.0435f*A[t%2][i+1][j][k+3] +
0.0445f*A[t%2][i+1][j][k+4] +
0.0455f*A[t%2][i+1][j+1][k-4] +
0.0465f*A[t%2][i+1][j+1][k-3] +
0.0475f*A[t%2][i+1][j+1][k-2] +
0.0485f*A[t%2][i+1][j+1][k-1] +
0.0495f*A[t%2][i+1][j+1][k] +
0.0505f*A[t%2][i+1][j+1][k+1] +
0.0515f*A[t%2][i+1][j+1][k+2] +
0.0525f*A[t%2][i+1][j+1][k+3] +
0.0535f*A[t%2][i+1][j+1][k+4] +
0.0545f*A[t%2][i+1][j+2][k-4] +
0.0555f*A[t%2][i+1][j+2][k-3] +
0.0565f*A[t%2][i+1][j+2][k-2] +
0.0575f*A[t%2][i+1][j+2][k-1] +
0.0585f*A[t%2][i+1][j+2][k] +
0.0595f*A[t%2][i+1][j+2][k+1] +
0.0605f*A[t%2][i+1][j+2][k+2] +
0.0615f*A[t%2][i+1][j+2][k+3] +
0.0625f*A[t%2][i+1][j+2][k+4] +
0.0635f*A[t%2][i+1][j+3][k-4] +
0.0645f*A[t%2][i+1][j+3][k-3] +
0.0655f*A[t%2][i+1][j+3][k-2] +
0.0665f*A[t%2][i+1][j+3][k-1] +
0.0675f*A[t%2][i+1][j+3][k] +
0.0685f*A[t%2][i+1][j+3][k+1] +
0.0695f*A[t%2][i+1][j+3][k+2] +
0.0705f*A[t%2][i+1][j+3][k+3] +
0.0715f*A[t%2][i+1][j+3][k+4] +
0.0725f*A[t%2][i+1][j+4][k-4] +
0.0735f*A[t%2][i+1][j+4][k-3] +
0.0745f*A[t%2][i+1][j+4][k-2] +
0.0755f*A[t%2][i+1][j+4][k-1] +
0.0765f*A[t%2][i+1][j+4][k] +
0.0775f*A[t%2][i+1][j+4][k+1] +
0.0785f*A[t%2][i+1][j+4][k+2] +
0.0795f*A[t%2][i+1][j+4][k+3] +
0.0805f*A[t%2][i+1][j+4][k+4] +
-3.288f*A[t%2][i+2][j][k] +
0.0016f*A[t%2][i+2][j-4][k-4] +
0.0026f*A[t%2][i+2][j-4][k-3] +
0.0036f*A[t%2][i+2][j-4][k-2] +
0.0046f*A[t%2][i+2][j-4][k-1] +
0.0056f*A[t%2][i+2][j-4][k] +
0.0066f*A[t%2][i+2][j-4][k+1] +
0.0076f*A[t%2][i+2][j-4][k+2] +
0.0086f*A[t%2][i+2][j-4][k+3] +
0.0096f*A[t%2][i+2][j-4][k+4] +
0.0106f*A[t%2][i+2][j-3][k-4] +
0.0116f*A[t%2][i+2][j-3][k-3] +
0.0126f*A[t%2][i+2][j-3][k-2] +
0.0136f*A[t%2][i+2][j-3][k-1] +
0.0146f*A[t%2][i+2][j-3][k] +
0.0156f*A[t%2][i+2][j-3][k+1] +
0.0166f*A[t%2][i+2][j-3][k+2] +
0.0176f*A[t%2][i+2][j-3][k+3] +
0.0186f*A[t%2][i+2][j-3][k+4] +
0.0196f*A[t%2][i+2][j-2][k-4] +
0.0206f*A[t%2][i+2][j-2][k-3] +
0.0216f*A[t%2][i+2][j-2][k-2] +
0.0226f*A[t%2][i+2][j-2][k-1] +
0.0236f*A[t%2][i+2][j-2][k] +
0.0246f*A[t%2][i+2][j-2][k+1] +
0.0256f*A[t%2][i+2][j-2][k+2] +
0.0266f*A[t%2][i+2][j-2][k+3] +
0.0276f*A[t%2][i+2][j-2][k+4] +
0.0286f*A[t%2][i+2][j-1][k-4] +
0.0296f*A[t%2][i+2][j-1][k-3] +
0.0306f*A[t%2][i+2][j-1][k-2] +
0.0316f*A[t%2][i+2][j-1][k-1] +
0.0326f*A[t%2][i+2][j-1][k] +
0.0336f*A[t%2][i+2][j-1][k+1] +
0.0346f*A[t%2][i+2][j-1][k+2] +
0.0356f*A[t%2][i+2][j-1][k+3] +
0.0366f*A[t%2][i+2][j-1][k+4] +
0.0376f*A[t%2][i+2][j][k-4] +
0.0386f*A[t%2][i+2][j][k-3] +
0.0396f*A[t%2][i+2][j][k-2] +
0.0406f*A[t%2][i+2][j][k-1] +
0.0416f*A[t%2][i+2][j][k+1] +
0.0426f*A[t%2][i+2][j][k+2] +
0.0436f*A[t%2][i+2][j][k+3] +
0.0446f*A[t%2][i+2][j][k+4] +
0.0456f*A[t%2][i+2][j+1][k-4] +
0.0466f*A[t%2][i+2][j+1][k-3] +
0.0476f*A[t%2][i+2][j+1][k-2] +
0.0486f*A[t%2][i+2][j+1][k-1] +
0.0496f*A[t%2][i+2][j+1][k] +
0.0506f*A[t%2][i+2][j+1][k+1] +
0.0516f*A[t%2][i+2][j+1][k+2] +
0.0526f*A[t%2][i+2][j+1][k+3] +
0.0536f*A[t%2][i+2][j+1][k+4] +
0.0546f*A[t%2][i+2][j+2][k-4] +
0.0556f*A[t%2][i+2][j+2][k-3] +
0.0566f*A[t%2][i+2][j+2][k-2] +
0.0576f*A[t%2][i+2][j+2][k-1] +
0.0586f*A[t%2][i+2][j+2][k] +
0.0596f*A[t%2][i+2][j+2][k+1] +
0.0606f*A[t%2][i+2][j+2][k+2] +
0.0616f*A[t%2][i+2][j+2][k+3] +
0.0626f*A[t%2][i+2][j+2][k+4] +
0.0636f*A[t%2][i+2][j+3][k-4] +
0.0646f*A[t%2][i+2][j+3][k-3] +
0.0656f*A[t%2][i+2][j+3][k-2] +
0.0666f*A[t%2][i+2][j+3][k-1] +
0.0676f*A[t%2][i+2][j+3][k] +
0.0686f*A[t%2][i+2][j+3][k+1] +
0.0696f*A[t%2][i+2][j+3][k+2] +
0.0706f*A[t%2][i+2][j+3][k+3] +
0.0716f*A[t%2][i+2][j+3][k+4] +
0.0726f*A[t%2][i+2][j+4][k-4] +
0.0736f*A[t%2][i+2][j+4][k-3] +
0.0746f*A[t%2][i+2][j+4][k-2] +
0.0756f*A[t%2][i+2][j+4][k-1] +
0.0766f*A[t%2][i+2][j+4][k] +
0.0776f*A[t%2][i+2][j+4][k+1] +
0.0786f*A[t%2][i+2][j+4][k+2] +
0.0796f*A[t%2][i+2][j+4][k+3] +
0.0806f*A[t%2][i+2][j+4][k+4] +
-3.296f*A[t%2][i+3][j][k] +
0.0017f*A[t%2][i+3][j-4][k-4] +
0.0027f*A[t%2][i+3][j-4][k-3] +
0.0037f*A[t%2][i+3][j-4][k-2] +
0.0047f*A[t%2][i+3][j-4][k-1] +
0.0057f*A[t%2][i+3][j-4][k] +
0.0067f*A[t%2][i+3][j-4][k+1] +
0.0077f*A[t%2][i+3][j-4][k+2] +
0.0087f*A[t%2][i+3][j-4][k+3] +
0.0097f*A[t%2][i+3][j-4][k+4] +
0.0107f*A[t%2][i+3][j-3][k-4] +
0.0117f*A[t%2][i+3][j-3][k-3] +
0.0127f*A[t%2][i+3][j-3][k-2] +
0.0137f*A[t%2][i+3][j-3][k-1] +
0.0147f*A[t%2][i+3][j-3][k] +
0.0157f*A[t%2][i+3][j-3][k+1] +
0.0167f*A[t%2][i+3][j-3][k+2] +
0.0177f*A[t%2][i+3][j-3][k+3] +
0.0187f*A[t%2][i+3][j-3][k+4] +
0.0197f*A[t%2][i+3][j-2][k-4] +
0.0207f*A[t%2][i+3][j-2][k-3] +
0.0217f*A[t%2][i+3][j-2][k-2] +
0.0227f*A[t%2][i+3][j-2][k-1] +
0.0237f*A[t%2][i+3][j-2][k] +
0.0247f*A[t%2][i+3][j-2][k+1] +
0.0257f*A[t%2][i+3][j-2][k+2] +
0.0267f*A[t%2][i+3][j-2][k+3] +
0.0277f*A[t%2][i+3][j-2][k+4] +
0.0287f*A[t%2][i+3][j-1][k-4] +
0.0297f*A[t%2][i+3][j-1][k-3] +
0.0307f*A[t%2][i+3][j-1][k-2] +
0.0317f*A[t%2][i+3][j-1][k-1] +
0.0327f*A[t%2][i+3][j-1][k] +
0.0337f*A[t%2][i+3][j-1][k+1] +
0.0347f*A[t%2][i+3][j-1][k+2] +
0.0357f*A[t%2][i+3][j-1][k+3] +
0.0367f*A[t%2][i+3][j-1][k+4] +
0.0377f*A[t%2][i+3][j][k-4] +
0.0387f*A[t%2][i+3][j][k-3] +
0.0397f*A[t%2][i+3][j][k-2] +
0.0407f*A[t%2][i+3][j][k-1] +
0.0417f*A[t%2][i+3][j][k+1] +
0.0427f*A[t%2][i+3][j][k+2] +
0.0437f*A[t%2][i+3][j][k+3] +
0.0447f*A[t%2][i+3][j][k+4] +
0.0457f*A[t%2][i+3][j+1][k-4] +
0.0467f*A[t%2][i+3][j+1][k-3] +
0.0477f*A[t%2][i+3][j+1][k-2] +
0.0487f*A[t%2][i+3][j+1][k-1] +
0.0497f*A[t%2][i+3][j+1][k] +
0.0507f*A[t%2][i+3][j+1][k+1] +
0.0517f*A[t%2][i+3][j+1][k+2] +
0.0527f*A[t%2][i+3][j+1][k+3] +
0.0537f*A[t%2][i+3][j+1][k+4] +
0.0547f*A[t%2][i+3][j+2][k-4] +
0.0557f*A[t%2][i+3][j+2][k-3] +
0.0567f*A[t%2][i+3][j+2][k-2] +
0.0577f*A[t%2][i+3][j+2][k-1] +
0.0587f*A[t%2][i+3][j+2][k] +
0.0597f*A[t%2][i+3][j+2][k+1] +
0.0607f*A[t%2][i+3][j+2][k+2] +
0.0617f*A[t%2][i+3][j+2][k+3] +
0.0627f*A[t%2][i+3][j+2][k+4] +
0.0637f*A[t%2][i+3][j+3][k-4] +
0.0647f*A[t%2][i+3][j+3][k-3] +
0.0657f*A[t%2][i+3][j+3][k-2] +
0.0667f*A[t%2][i+3][j+3][k-1] +
0.0677f*A[t%2][i+3][j+3][k] +
0.0687f*A[t%2][i+3][j+3][k+1] +
0.0697f*A[t%2][i+3][j+3][k+2] +
0.0707f*A[t%2][i+3][j+3][k+3] +
0.0717f*A[t%2][i+3][j+3][k+4] +
0.0727f*A[t%2][i+3][j+4][k-4] +
0.0737f*A[t%2][i+3][j+4][k-3] +
0.0747f*A[t%2][i+3][j+4][k-2] +
0.0757f*A[t%2][i+3][j+4][k-1] +
0.0767f*A[t%2][i+3][j+4][k] +
0.0777f*A[t%2][i+3][j+4][k+1] +
0.0787f*A[t%2][i+3][j+4][k+2] +
0.0797f*A[t%2][i+3][j+4][k+3] +
0.0807f*A[t%2][i+3][j+4][k+4] +
-3.304f*A[t%2][i+4][j][k] +
0.0018f*A[t%2][i+4][j-4][k-4] +
0.0028f*A[t%2][i+4][j-4][k-3] +
0.0038f*A[t%2][i+4][j-4][k-2] +
0.0048f*A[t%2][i+4][j-4][k-1] +
0.0058f*A[t%2][i+4][j-4][k] +
0.0068f*A[t%2][i+4][j-4][k+1] +
0.0078f*A[t%2][i+4][j-4][k+2] +
0.0088f*A[t%2][i+4][j-4][k+3] +
0.0098f*A[t%2][i+4][j-4][k+4] +
0.0108f*A[t%2][i+4][j-3][k-4] +
0.0118f*A[t%2][i+4][j-3][k-3] +
0.0128f*A[t%2][i+4][j-3][k-2] +
0.0138f*A[t%2][i+4][j-3][k-1] +
0.0148f*A[t%2][i+4][j-3][k] +
0.0158f*A[t%2][i+4][j-3][k+1] +
0.0168f*A[t%2][i+4][j-3][k+2] +
0.0178f*A[t%2][i+4][j-3][k+3] +
0.0188f*A[t%2][i+4][j-3][k+4] +
0.0198f*A[t%2][i+4][j-2][k-4] +
0.0208f*A[t%2][i+4][j-2][k-3] +
0.0218f*A[t%2][i+4][j-2][k-2] +
0.0228f*A[t%2][i+4][j-2][k-1] +
0.0238f*A[t%2][i+4][j-2][k] +
0.0248f*A[t%2][i+4][j-2][k+1] +
0.0258f*A[t%2][i+4][j-2][k+2] +
0.0268f*A[t%2][i+4][j-2][k+3] +
0.0278f*A[t%2][i+4][j-2][k+4] +
0.0288f*A[t%2][i+4][j-1][k-4] +
0.0298f*A[t%2][i+4][j-1][k-3] +
0.0308f*A[t%2][i+4][j-1][k-2] +
0.0318f*A[t%2][i+4][j-1][k-1] +
0.0328f*A[t%2][i+4][j-1][k] +
0.0338f*A[t%2][i+4][j-1][k+1] +
0.0348f*A[t%2][i+4][j-1][k+2] +
0.0358f*A[t%2][i+4][j-1][k+3] +
0.0368f*A[t%2][i+4][j-1][k+4] +
0.0378f*A[t%2][i+4][j][k-4] +
0.0388f*A[t%2][i+4][j][k-3] +
0.0398f*A[t%2][i+4][j][k-2] +
0.0408f*A[t%2][i+4][j][k-1] +
0.0418f*A[t%2][i+4][j][k+1] +
0.0428f*A[t%2][i+4][j][k+2] +
0.0438f*A[t%2][i+4][j][k+3] +
0.0448f*A[t%2][i+4][j][k+4] +
0.0458f*A[t%2][i+4][j+1][k-4] +
0.0468f*A[t%2][i+4][j+1][k-3] +
0.0478f*A[t%2][i+4][j+1][k-2] +
0.0488f*A[t%2][i+4][j+1][k-1] +
0.0498f*A[t%2][i+4][j+1][k] +
0.0508f*A[t%2][i+4][j+1][k+1] +
0.0518f*A[t%2][i+4][j+1][k+2] +
0.0528f*A[t%2][i+4][j+1][k+3] +
0.0538f*A[t%2][i+4][j+1][k+4] +
0.0548f*A[t%2][i+4][j+2][k-4] +
0.0558f*A[t%2][i+4][j+2][k-3] +
0.0568f*A[t%2][i+4][j+2][k-2] +
0.0578f*A[t%2][i+4][j+2][k-1] +
0.0588f*A[t%2][i+4][j+2][k] +
0.0598f*A[t%2][i+4][j+2][k+1] +
0.0608f*A[t%2][i+4][j+2][k+2] +
0.0618f*A[t%2][i+4][j+2][k+3] +
0.0628f*A[t%2][i+4][j+2][k+4] +
0.0638f*A[t%2][i+4][j+3][k-4] +
0.0648f*A[t%2][i+4][j+3][k-3] +
0.0658f*A[t%2][i+4][j+3][k-2] +
0.0668f*A[t%2][i+4][j+3][k-1] +
0.0678f*A[t%2][i+4][j+3][k] +
0.0688f*A[t%2][i+4][j+3][k+1] +
0.0698f*A[t%2][i+4][j+3][k+2] +
0.0708f*A[t%2][i+4][j+3][k+3] +
0.0718f*A[t%2][i+4][j+3][k+4] +
0.0728f*A[t%2][i+4][j+4][k-4] +
0.0738f*A[t%2][i+4][j+4][k-3] +
0.0748f*A[t%2][i+4][j+4][k-2] +
0.0758f*A[t%2][i+4][j+4][k-1] +
0.0768f*A[t%2][i+4][j+4][k] +
0.0778f*A[t%2][i+4][j+4][k+1] +
0.0788f*A[t%2][i+4][j+4][k+2] +
0.0798f*A[t%2][i+4][j+4][k+3] +
0.0808f*A[t%2][i+4][j+4][k+4];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
|
8b2c6d9fa07cff12a5adac08292c3774a4110b02.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//each kernel process one node
__global__ void largeKernel(int *offset, int *col_id, int *large, int sizeLarge, int *color, int currentColor)
{
__shared__ bool set[1];
//get the node from large array
if(blockIdx.x < sizeLarge)
{
set[0]=1;
int node = large[blockIdx.x];
if(color[node]==0)
{
int neighLen = offset[node+1]-offset[node];
for(int i = threadIdx.x; i<neighLen; i=i+blockDim.x)
{
int item = col_id[offset[node]+i];
if(item >= node && color[item]==0)
set[0]=0;
}
__syncthreads();
if(threadIdx.x == 0){
if(set[0] == 1)
color[node]=currentColor;
}
}
}
}
| 8b2c6d9fa07cff12a5adac08292c3774a4110b02.cu | //each kernel process one node
__global__ void largeKernel(int *offset, int *col_id, int *large, int sizeLarge, int *color, int currentColor)
{
__shared__ bool set[1];
//get the node from large array
if(blockIdx.x < sizeLarge)
{
set[0]=1;
int node = large[blockIdx.x];
if(color[node]==0)
{
int neighLen = offset[node+1]-offset[node];
for(int i = threadIdx.x; i<neighLen; i=i+blockDim.x)
{
int item = col_id[offset[node]+i];
if(item >= node && color[item]==0)
set[0]=0;
}
__syncthreads();
if(threadIdx.x == 0){
if(set[0] == 1)
color[node]=currentColor;
}
}
}
}
|
8c20b591c61038aa9408df54cf5cd0cc58a1781b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "mult2_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *g_out = NULL;
hipMalloc(&g_out, XSIZE*YSIZE);
double *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
double *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
double *ct = NULL;
hipMalloc(&ct, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
mult2_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, g_out,a,b,ct,n);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
mult2_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, g_out,a,b,ct,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
mult2_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, g_out,a,b,ct,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 8c20b591c61038aa9408df54cf5cd0cc58a1781b.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "mult2_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *g_out = NULL;
cudaMalloc(&g_out, XSIZE*YSIZE);
double *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
double *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
double *ct = NULL;
cudaMalloc(&ct, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
mult2_kernel<<<gridBlock,threadBlock>>>(g_out,a,b,ct,n);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
mult2_kernel<<<gridBlock,threadBlock>>>(g_out,a,b,ct,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
mult2_kernel<<<gridBlock,threadBlock>>>(g_out,a,b,ct,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
3a150a5d3548105f17c491ef2fbb240673bf7546.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "atomic.cuh"
__global__ void atomic(int *d_bins, const int *d_in, const int BIN_COUNT) {
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int myItem = d_in[myId];
int myBin = myItem % BIN_COUNT;
atomicAdd(&(d_bins[myBin]), 1);
} | 3a150a5d3548105f17c491ef2fbb240673bf7546.cu | #include "atomic.cuh"
__global__ void atomic(int *d_bins, const int *d_in, const int BIN_COUNT) {
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int myItem = d_in[myId];
int myBin = myItem % BIN_COUNT;
atomicAdd(&(d_bins[myBin]), 1);
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.