hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
f7ee835f847d61b2b0ac07820cb0079fcb85e81e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <new> struct Foo { int value = 0x1234; }; __global__ void kernel(Foo* storage, Foo** initialized) { Foo* start = storage + threadIdx.x * 2; initialized[threadIdx.x] = new (start) Foo[2]; }
f7ee835f847d61b2b0ac07820cb0079fcb85e81e.cu
#include <new> struct Foo { int value = 0x1234; }; __global__ void kernel(Foo* storage, Foo** initialized) { Foo* start = storage + threadIdx.x * 2; initialized[threadIdx.x] = new (start) Foo[2]; }
cc38b789800fcab6dce40a3aa7b547f7f83b326a.hip
// !!! This is a file automatically generated by hipify!!! /* */ #include <stdio.h> #include <hip/hip_runtime_api.h> #include <time.h> #include <math.h> #include <float.h> //#include <hip/hip_runtime.h> #include "unimem.h" #include "first.h" //#define DEBUG_CUDA #define MAX_RESULTS 100000 //#define PACK_RESULTS struct boundaries { coord x_min; coord y_min; coord x_max; coord y_max; bool intersection; }; struct node* m_dev_node = NULL; __constant__ boundaries dev_bonds[1]; __constant__ unsigned dev_threads_count[1]; unsigned m_threads_count; indexer m_count_branches; int m_length_of_tree = 0; int m_multi_processor_count = 1; int m_warp_size2 = 64; //__constant__ struct branch *m_ttt_cuda_first_branch = NULL; __device__ bool cuda_check_intersection(coord p1x, coord p1y, coord p2x, coord p2y, coord p3x, coord p3y, coord p4x, coord p4y); __device__ coord cuda_distance(coord px, coord py, coord line_p0x, coord line_p0y, coord line_p1x, coord line_p1y); #ifdef PACK_RESULTS /// compare int cmp(const void* a, const void* b) { return (int)(*(indexer*)a - *(indexer*)b); } #endif extern "C" bool init_cuda_device(int deviceID, struct node* node) { if (!node) return false; //return false; struct node *nd = node; unsigned count1[64], i = 0; m_count_branches = 0; for (int j = 0; j < 64; j++) count1[j] = 0; //count1[0] = 1; struct node *stack_node[64]; int stack_pos = 0; indexer stack_idx[64]; alignas(16) struct branch *first_branch = NULL; alignas(16) struct node* stack_first_node[64]; for (unsigned i = 0; i < 64; ++i) { stack_first_node[i] = NULL; } while (i < nd->count_child_nodes) { if (!stack_first_node[stack_pos] || nd < stack_first_node[stack_pos]) stack_first_node[stack_pos] = nd; if (nd->is_last_node) { for (unsigned j = 0; j < nd->count_child_nodes; ++j) { struct branch *br = (struct branch*)(nd->child_node[j]); if (!first_branch || br < first_branch) first_branch = br; } /*if (!count_br) count_br = nd->count_child_nodes; if (count_br != nd->count_child_nodes) printf("Branches %u vs %u\n", count_br, nd->count_child_nodes);*/ m_count_branches += nd->count_child_nodes; // return from stack while (stack_pos > 0) { stack_pos--; nd = stack_node[stack_pos]; i = stack_idx[stack_pos] + 1; if (i < nd->count_child_nodes) { stack_idx[stack_pos] = i; stack_node[stack_pos] = nd; stack_pos++; nd = (struct node*)nd->child_node[i]; i = 0; break; } else { //if (count1[stack_pos]) { //if (count1[stack_pos] != nd->count_child_nodes) { // printf("Nodes %u vs %u\n", count1[stack_pos], nd->count_child_nodes); //} count1[stack_pos] += nd->count_child_nodes; //} //else { // count1[stack_pos] = nd->count_child_nodes; //} } } } else { stack_idx[stack_pos] = i; stack_node[stack_pos] = nd; stack_pos++; i = 0; nd = (struct node*)nd->child_node[i]; /*if (!count1[stack_pos]) count1[stack_pos] = nd->count_child_nodes; else count1[stack_pos] += nd->count_child_nodes;*/ } /*} else if (i < nd->count_child_nodes) { i++;*/ /*if (!count1[stack_pos]) count1[stack_pos] = nd->count_child_nodes; else count1[stack_pos] += nd->count_child_nodes; */ } //return false; int deviceCount; hipError_t er1 = hipGetDeviceCount(&deviceCount); printf("DevicecheckCudaErrors Count: %d\n", deviceCount); if (deviceID == -1) deviceID = 0; hipDeviceProp_t prop; for (int ii = 0; ii < deviceCount; ++ii) { er1 = hipGetDeviceProperties(&prop, ii); if (prop.major < 2 || prop.canMapHostMemory != 1) { printf("ERROR: calculation requires GPU devices with compute SM 2.0 or higher, or can not using MapHostMemory.\n"); printf("Current GPU device has compute SM%d.%d, Exiting...", prop.major, prop.minor); //exit(EXIT_WAIVED); return false; } printf("GPU device name is %s\n", prop.name); printf("GPU total memory = %.0f Mb\n", prop.totalGlobalMem / 1024.0 / 1024.0); printf("Number of multiprocessors on the device = %u\n", prop.multiProcessorCount); } er1 = hipSetDevice(deviceID); hipSetDeviceFlags(hipDeviceMapHost); er1 = hipGetDeviceProperties(&prop, deviceID); m_multi_processor_count = prop.multiProcessorCount; m_warp_size2 = prop.warpSize * 2; m_threads_count = prop.multiProcessorCount * prop.warpSize * 2; //er1 = hipMalloc((void**)&dev_threads_count, sizeof(unsigned)); er1 = hipMemcpyToSymbol(dev_threads_count, &m_threads_count, sizeof(unsigned)); // copy rtree int pos = 63; for (; pos >= 0; --pos) { if (count1[pos]) break; } m_length_of_tree = pos + 1; // allocationg memory for branches alignas(16) struct branch* tbr = (struct branch*)aligned_alloc(16, sizeof(struct branch) * m_count_branches); // struct branch* first_branch = NULL; /*nd = node; i = 0; unsigned k = 0; while (i < nd->count_child_nodes) { if (nd->is_last_node) { for (unsigned j = 0; j < nd->count_child_nodes; ++j) { struct branch *br = (struct branch*)(nd->child_node[j]); //if (!first_branch || br < first_branch) // first_branch = br; memcpy(tbr + k, br, sizeof(struct branch)); k++; } while (stack_pos > 0) { stack_pos--; nd = stack_node[stack_pos]; i = stack_idx[stack_pos] + 1; if (i < nd->count_child_nodes) { // insert to stack stack_idx[stack_pos] = i; stack_node[stack_pos] = nd; stack_pos++; nd = (struct node*)nd->child_node[i]; i = 0; break; } } } else { // insert to stack stack_idx[stack_pos] = i; stack_node[stack_pos] = nd; stack_pos++; nd = (struct node*)nd->child_node[i]; } }*/ /*nd = node; i = 0; for (int j = 0; j <= pos; ++j) nd = (struct node*)(nd->child_node[0]); for (indexer j = 0; j < count1[pos]; ++j) { for (indexer k = 0; k < nd[j].count_child_nodes; ++k) { memcpy(tbr + i, nd[j].child_node[k], sizeof(struct branch)); i++; } } */ memcpy(tbr, first_branch, sizeof(struct branch) * m_count_branches); // for debug /*printf("\n\n\n======================================================================\n"); for (indexer i = 0; i < count1[pos]; ++i) { if ((struct node*)(stack_first_node[pos + 1])[i].is_last_node) { unsigned tt = ((struct node*)(stack_first_node[pos + 1]))[i].count_child_nodes; for (indexer ii = 0; ii < tt; ++ii) { unsigned idx = (struct branch*)((struct node*)(stack_first_node[pos + 1])[i].child_node[ii]) - first_branch; if (!idx) printf("0\n"); else printf("%u\n", idx); } } else { printf("Error last node %u\n", i); } }*/ // copy data of branches to device clock_t t1 = clock(); hipStream_t stream; hipStreamCreate(&stream); for (indexer i = 0; i < m_count_branches; ++i) { void *data_ptr = tbr[i].leaf_x; er1 = hipMalloc((void**)&(tbr[i].leaf_x), sizeof(coord) * tbr[i].count_leafs); er1 = hipMemcpyAsync(tbr[i].leaf_x, data_ptr, sizeof(coord) * tbr[i].count_leafs, hipMemcpyHostToDevice, stream); data_ptr = tbr[i].leaf_y; er1 = hipMalloc((void**)&(tbr[i].leaf_y), sizeof(coord) * tbr[i].count_leafs); er1 = hipMemcpyAsync(tbr[i].leaf_y, data_ptr, sizeof(coord) * tbr[i].count_leafs, hipMemcpyHostToDevice, stream); data_ptr = tbr[i].leaf_number; er1 = hipMalloc((void**)&(tbr[i].leaf_number), sizeof(indexer) * tbr[i].count_leafs); er1 = hipMemcpyAsync(tbr[i].leaf_number, data_ptr, sizeof(indexer) * tbr[i].count_leafs, hipMemcpyHostToDevice, stream); data_ptr = tbr[i].merge_next_leaf; er1 = hipMalloc((void**)&(tbr[i].merge_next_leaf), sizeof(bool) * tbr[i].count_leafs); er1 = hipMemcpyAsync(tbr[i].merge_next_leaf, data_ptr, sizeof(bool) * tbr[i].count_leafs, hipMemcpyHostToDevice, stream); /*data_ptr = tbr[i].xsh_min; er1 = hipMalloc((void**)&(tbr[i].xsh_min), sizeof(coord) * tbr[i].count_shapes); er1 = hipMemcpyAsync(tbr[i].xsh_min, data_ptr, sizeof(coord) * tbr[i].count_shapes, hipMemcpyHostToDevice, stream); data_ptr = tbr[i].xsh_max; er1 = hipMalloc((void**)&(tbr[i].xsh_max), sizeof(coord) * tbr[i].count_shapes); er1 = hipMemcpyAsync(tbr[i].xsh_max, data_ptr, sizeof(coord) * tbr[i].count_shapes, hipMemcpyHostToDevice, stream); data_ptr = tbr[i].ysh_min; er1 = hipMalloc((void**)&(tbr[i].ysh_min), sizeof(coord) * tbr[i].count_shapes); er1 = hipMemcpyAsync(tbr[i].ysh_min, data_ptr, sizeof(coord) * tbr[i].count_shapes, hipMemcpyHostToDevice, stream); data_ptr = tbr[i].ysh_max; er1 = hipMalloc((void**)&(tbr[i].ysh_max), sizeof(coord) * tbr[i].count_shapes); er1 = hipMemcpyAsync(tbr[i].ysh_max, data_ptr, sizeof(coord) * tbr[i].count_shapes, hipMemcpyHostToDevice, stream); */ data_ptr = tbr[i].offset; er1 = hipMalloc((void**)&(tbr[i].offset), sizeof(indexer) * tbr[i].count_shapes); er1 = hipMemcpyAsync(tbr[i].offset, data_ptr, sizeof(indexer) * tbr[i].count_shapes, hipMemcpyHostToDevice, stream); } er1 = hipStreamSynchronize(stream); er1 = hipStreamDestroy(stream); clock_t t2 = clock(); printf("Time copying data to device = %u ms\n", t2 - t1); // copy branches to device struct branch *dev_br = NULL; er1 = hipMalloc((void**)&dev_br, sizeof(struct branch) * m_count_branches); er1 = hipMemcpy(dev_br, tbr, sizeof(struct branch) * m_count_branches, hipMemcpyHostToDevice); //hipMemcpyToSymbol(m_ttt_cuda_first_branch, &dev_br, sizeof(struct branch*)); //return false; alignas(16) struct node *to_dev_nd[65]; //void **to_dev_child[64]; struct node *dev_nd = NULL, *dev_nd_prev = NULL, *dev_ptr = NULL; // to_dev_nd[0] = (struct node*)aligned_alloc(16, sizeof(struct node)); // memcpy(to_dev_nd[0], nd, sizeof(struct node)); struct node* tnd = node; //for (unsigned j = 0; j <= pos; ++j) // tnd = (struct node*)(tnd->child_node[0]); //unsigned j = 0; //void* tmp1 = NULL; unsigned count = tnd->count_child_nodes, prev_count = 1; //printf("\n\n\n======================================================================\n"); for (int k1 = pos; k1 >= 0; --k1) { tnd = node; //for (unsigned j = 0; j <= k1; ++j) // tnd = (struct node*)(tnd->child_node[0]); // data child node to_dev_nd[k1] = (struct node*)aligned_alloc(16, sizeof(struct node) * count1[k1]); //memcpy(to_dev_nd[k1], tnd/*->child_node[0]*/, sizeof(struct node) * count1[k1]); memcpy(to_dev_nd[k1], stack_first_node[k1 + 1], sizeof(struct node) * count1[k1]); // pointer to child_node on host for (indexer j = 0; j < count1[k1]; ++j) { //(to_dev_nd[k1])[j]->child_node = (void**)aligned_alloc(16, sizeof(void*) * MAX_NODES); // tnd->count_child_nodes); //(to_dev_child[k1])[j] = to_dev_nd[j]->child_node; dev_ptr = NULL; er1 = hipMalloc((void**)&dev_ptr, sizeof(void*) * MAX_NODES); (to_dev_nd[k1])[j].child_node = (void**)dev_ptr; for (indexer k2 = 0; k2 < MAX_NODES; ++k2) { if (k1 == pos) { // copy pointer of branches //struct branch *ptr = &(dev_br[k2 + j * MAX_NODES]); unsigned idx = (struct branch*)((struct node*)(stack_first_node[k1 + 1])[j].child_node[k2]) - first_branch; //if (idx == 4899) // printf("%u\n", idx); struct branch *ptr = &(dev_br[idx]); er1 = hipMemcpy((void*)((to_dev_nd[k1])[j].child_node + k2), &ptr, sizeof(struct branch*), hipMemcpyHostToDevice); } else { // copy pointer of nodes //struct node* ptr = &(dev_nd_prev[k2 + j * MAX_NODES]); unsigned idx = (struct node*)(stack_first_node[k1 + 1])[j].child_node[k2] - (struct node*)(stack_first_node[k1 + 2]); //printf("%u\n", idx); struct node *ptr = &(dev_nd_prev[idx]); er1 = hipMemcpy((void*)((to_dev_nd[k1])[j].child_node + k2), &ptr, sizeof(struct node*), hipMemcpyHostToDevice); } } } //printf("==========================================\n\n\n"); // pointers of child nodes er1 = hipMalloc((void**)&dev_nd, sizeof(struct node) * count1[k1]); // tnd->count_child_nodes); hipMemcpy(dev_nd, to_dev_nd[k1], sizeof(struct node) * count1[k1], hipMemcpyHostToDevice); dev_nd_prev = dev_nd; } // copy top node (root) to_dev_nd[64] = (struct node*)aligned_alloc(16, sizeof(struct node)); memcpy(to_dev_nd[64], node/*->child_node[0]*/, sizeof(struct node)); dev_ptr = NULL; er1 = hipMalloc((void**)&dev_ptr, sizeof(void*) * node->count_child_nodes); (to_dev_nd[64])[0].child_node = (void**)dev_ptr; for (indexer k2 = 0; k2 < node->count_child_nodes; ++k2) { // copy pointer of nodes //struct node* ptr = &(dev_nd_prev[k2]); unsigned idx = (struct node*)(stack_first_node[0])[0].child_node[k2] - (struct node*)(stack_first_node[1]); struct node* ptr = &(dev_nd_prev[idx]); er1 = hipMemcpy((void*)((to_dev_nd[64])[0].child_node + k2), &ptr, sizeof(struct node*), hipMemcpyHostToDevice); } // pointers of child nodes er1 = hipMalloc((void**)&dev_nd, sizeof(struct node)); // tnd->count_child_nodes); er1 = hipMemcpy(dev_nd, to_dev_nd[64], sizeof(struct node), hipMemcpyHostToDevice); m_dev_node = dev_nd; printf("============== 0x%llx, 0x%llx, prev = 0x%llx\n", m_dev_node, dev_nd, dev_nd_prev); // free memory for (int k1 = pos; k1 >= 0; --k1) { _aligned_free(to_dev_nd[k1]); } _aligned_free(to_dev_nd[64]); // allocating memory for root //er1 = hipMalloc((void**)&m_dev_node, sizeof(struct node)); // copy to device root of tree //hipMemcpy(m_dev_node, to_dev_nd[0], sizeof(struct node), hipMemcpyHostToDevice); return true; } extern "C" bool destroy_cuda_device() { //hipFree(dev_threads_count); hipError_t er1 = hipDeviceReset(); return er1 == hipSuccess ? true : false; } #if defined(CALC_CIRCLE) || defined(CALC_POINT) /* searchin items in selected rectangle on cuda device */ extern "C" indexer* cuda_search_rect2(/*in*/struct node *nd, /*in*/coord x_min, /*in*/coord y_min, /*in*/coord x_max, /*in*/coord y_max, bool /*in*/intersection, /*out*/indexer *count_items); /* searchin items in selected rectangle on cuda device imlementation (nodes) */ __global__ void cuda_search_rect2_impl1(void **nd, indexer *iter_count, indexer *atomic_iter, /*out*/ void **next_nd); /* searchin items in selected rectangle on cuda device imlementation (branches) */ __global__ void cuda_search_rect2_impl2(void **br_ptr, indexer *atomic_iter, /*out*/ indexer *idxs); /* searching the nearest item to point in selected radius */ extern "C" indexer* cuda_search_nearest_item2(/*in*//*struct node *nd,*/ /*in*/coord x, /*in*/coord y, /*in*/coord radius, bool intersection, /*out*/coord *dist); /* searching the nearest item on device implementation (step 2) */ __global__ void cuda_search_nearest_item2_impl2(void **br_ptr, /*indexer *atomic_iter,*/ coord x, coord y, /*out*/ indexer *idxs, /*out*/ coord *dist); /* searching the nearest item on device implementation (step 3) */ __global__ void cuda_search_nearest_item2_impl3(/*in*/ indexer *idxs, /*in*/ coord *dist, /*in*/indexer count, /*in*/indexer *atomic_iter, /*out*/ indexer *idxs2, /*out*/ coord *dist2); #else extern "C" indexer* search_rect2(struct node *nd, coord x_min, coord y_min, coord x_max, coord y_max, bool intersection, /*out*/indexer *count_items) __global__ indexer* search_rect2_impl(void *nd_ptr, indexer iter_count, /*out*/indexer *count_items) #endif // CALC_POINT #if defined(CALC_CIRCLE) || defined(CALC_POINT) /* searchin items in selected rectangle on cuda device */ indexer* cuda_search_rect2(node * nd, coord x_min, coord y_min, coord x_max, coord y_max, bool intersection, indexer * count_items) #else indexer* search_rect2(struct node *nd, coord x_min, coord y_min, coord x_max, coord y_max, bool intersection, /*out*/indexer *count_items) #endif // CALC_POINT { // memory for result /*size_t mem_size = 256; size_t count_mem = 1; alignas(16) indexer* idxs = (indexer*)aligned_alloc(16, sizeof(indexer) * mem_size * count_mem); */ hipError_t er1; hipStream_t stream; hipStreamCreate(&stream); /*indexer *host_idxs = NULL, *dev_idxs = NULL; // , *dev_tmp_idxs = NULL;; hipHostMalloc((void**)&host_idxs, sizeof(indexer) * MAX_RESULTS, hipHostMallocMapped); hipHostGetDevicePointer((void**)&dev_idxs, host_idxs, 0); //hipMalloc((void**)&dev_tmp_idxs, sizeof(indexer) * MAX_RESULTS); */ indexer *dev_idxs = NULL; hipMalloc((void**)&dev_idxs, sizeof(indexer) * MAX_RESULTS); // searching hipEvent_t start, stop; float gtime = 0.0; int device_id; /*hipDeviceProp_t prop; er1 = hipGetDevice(&device_id); er1 = hipGetDeviceProperties(&prop, device_id); dim3 grid_size = dim3(prop.multiProcessorCount, 1, 1), block_size = dim3(prop.warpSize * 2, 1, 1); */ dim3 grid_size = dim3(m_multi_processor_count, 1, 1), block_size = dim3(m_warp_size2, 1, 1); // store boundaries boundaries b1; b1.intersection = intersection; b1.x_max = x_max; b1.x_min = x_min; b1.y_max = y_max; b1.y_min = y_min; //hipMalloc((void**)dev_bonds, sizeof(struct boundaries)); er1 = hipMemcpyToSymbolAsync(dev_bonds, &b1, sizeof(struct boundaries), 0, hipMemcpyHostToDevice, stream); // for store count of iterations to next step indexer *dev_atomic_iter = NULL; er1 = hipMalloc((void**)&dev_atomic_iter, sizeof(indexer)); er1 = hipMemsetAsync(dev_atomic_iter, 0, sizeof(indexer), stream); // store pointers for next step void **dev_ptr = NULL, **dev_ptr2 = NULL; er1 = hipMalloc((void**)&dev_ptr, sizeof(void*) * m_count_branches); //printf("======================= 0x%llx; 0x%llx, count_br = %u\n", &m_dev_node, m_dev_node, m_count_branches); void **tptr = (void**)(&m_dev_node); er1 = hipMemcpyAsync(dev_ptr, tptr, sizeof(void*), hipMemcpyHostToDevice, stream); er1 = hipMalloc((void**)&dev_ptr2, sizeof(void*) * m_count_branches); //printf("======================= 0x%llx; 0x%llx; dev_ptr = 0x%llx\n", &m_dev_node, m_dev_node, dev_ptr); // count items //indexer *dev_count_items = NULL; //er1 = hipMalloc((void**)&dev_count_items, sizeof(indexer)); // count of iterations indexer atomic_iter = 1; indexer *dev_iter_count = NULL; er1 = hipMalloc((void**)&dev_iter_count, sizeof(indexer)); //er1 = hipMemsetAsync(dev_iter_count, 0, sizeof(indexer), stream); //er1 = hipMemsetAsync(dev_iter_count, 1, 1, stream); er1 = hipMemcpyAsync(dev_iter_count, &atomic_iter, sizeof(indexer), hipMemcpyHostToDevice); er1 = hipStreamSynchronize(stream); #ifdef DEBUG_CUDA clock_t t1 = clock(); er1 = hipEventCreate(&start); er1 = hipEventCreate(&stop); er1 = hipEventRecord(start, stream); #endif // calculating nodes for (int i = 0; i < m_length_of_tree + 1; ++i) { er1 = hipMemsetAsync(dev_atomic_iter, 0, sizeof(indexer), stream); if (atomic_iter > m_warp_size2 /*prop.warpSize * 2 */) { unsigned t = (unsigned)ceil((double)atomic_iter / (double)(m_warp_size2 /*prop.warpSize * 2.0 */)); block_size = dim3(m_warp_size2 /*prop.warpSize * 2 */, 1, 1); grid_size = dim3(t, 1, 1); } else { grid_size = dim3(1, 1, 1); block_size = dim3(atomic_iter, 1, 1); } cuda_search_rect2_impl1 << <grid_size, block_size, 0, stream >> > ((void**)dev_ptr, dev_iter_count, dev_atomic_iter, dev_ptr2); er1 = hipMemcpyAsync(&atomic_iter, dev_atomic_iter, sizeof(indexer), hipMemcpyDeviceToHost, stream); er1 = hipMemcpyAsync(dev_ptr, dev_ptr2, sizeof(void*) * atomic_iter, hipMemcpyDeviceToDevice, stream); er1 = hipMemcpyAsync(dev_iter_count, dev_atomic_iter, sizeof(indexer), hipMemcpyDeviceToDevice, stream); hipStreamSynchronize(stream); //printf("===== Iter %i: next = %u (%s)\n", i, atomic_iter, er1 == hipSuccess ? "true" : "false"); //hipDeviceSynchronize(); } #ifdef DEBUG_CUDA er1 = hipEventRecord(stop, stream); er1 = hipEventSynchronize(stop); er1 = hipEventElapsedTime(&gtime, start, stop); printf("Kernel 1 time = %f ms\n", gtime); #endif // calculating branches grid_size = dim3(atomic_iter, 1, 1); block_size = dim3(m_warp_size2 /*prop.warpSize * 2 */, 1, 1); #ifdef DEBUG_CUDA er1 = hipEventRecord(start, stream); #endif er1 = hipMemsetAsync(dev_atomic_iter, 0, sizeof(indexer), stream); cuda_search_rect2_impl2 << <grid_size, block_size, 0, stream >> > ((void**)dev_ptr, dev_atomic_iter, dev_idxs); er1 = hipMemcpyAsync(&atomic_iter, dev_atomic_iter, sizeof(indexer), hipMemcpyDeviceToHost, stream); er1 = hipStreamSynchronize(stream); //er1 = hipDeviceSynchronize(); #ifdef DEBUG_CUDA er1 = hipEventRecord(stop, stream); er1 = hipEventSynchronize(stop); er1 = hipEventElapsedTime(&gtime, start, stop); printf("Kernel 2 time = %f ms\n", gtime); clock_t t2 = clock(); printf("All kernels time = %i ms\n", t2 - t1); #endif er1 = hipMemcpyAsync(count_items, dev_atomic_iter, sizeof(indexer), hipMemcpyDeviceToHost, stream); indexer *idxs = (indexer*)aligned_alloc(16, sizeof(indexer) * *count_items); //er1 = hipMemcpyAsync(idxs, host_idxs, sizeof(indexer) * *count_items, hipMemcpyHostToHost, stream); er1 = hipMemcpyAsync(idxs, dev_idxs, sizeof(indexer) * *count_items, hipMemcpyDeviceToHost, stream); er1 = hipStreamSynchronize(stream); #ifdef DEBUG_CUDA printf("Total results from device = %u\n", *count_items); #endif // freeing and destroying hipStreamDestroy(stream); er1 = hipFree(dev_iter_count); er1 = hipFree(dev_ptr); er1 = hipFree(dev_ptr2); //er1 = hipFree(dev_tmp_idxs); //er1 = hipFree(dev_count_items); er1 = hipFree(dev_atomic_iter); //er1 = hipHostFree(host_idxs); er1 = hipFree(dev_idxs); #ifdef DEBUG_CUDA er1 = hipEventDestroy(stop); er1 = hipEventDestroy(start); #endif #ifdef PACK_RESULTS if (*count_items) { qsort(idxs, *count_items, sizeof(indexer), cmp); indexer j = 1; indexer offset = 0; for (indexer i = 0; i < *count_items - 1 - offset; ++i) { //if (idxs[i] == 3617359) // idxs[i] = 3617359; if (idxs[i] == idxs[i + 1 + offset]) { offset++; idxs[i + 1] = idxs[i + 1 + offset]; i--; continue; } if (offset) idxs[i + 1] = idxs[i + 1 + offset]; j++; } *count_items = j; idxs = (indexer*)_aligned_realloc(idxs, sizeof(indexer) * j, 16); } #endif return idxs; } /* searchin items in selected rectangle on cuda device imlementation (step 1) */ __global__ void cuda_search_rect2_impl1(void **nd_ptr, indexer *iter_count, indexer *atomic_iter, /*out*/ void** next_nd) { int idxx = threadIdx.x; // to temporary store node index /*__shared__ indexer store[64]; store[threadIdx.x] = (indexer)-1; __shared__ int store_idx[1]; if (!threadIdx.x) store_idx[threadIdx.x] = 0; */ struct node** nd = (struct node**)nd_ptr; //indexer idx = 0; #ifdef CALC_POINT //coord tmp_dist = FLT_MAX; //indexer tmp_idx = -1; #endif // CALC_POINT indexer curr_indexer = idxx + blockIdx.x * blockDim.x; // (*dev_threads_count); if (curr_indexer < *iter_count) { struct node *curr_nd = nd[curr_indexer]; __shared__ coord nd_x1[64], nd_x2[64], nd_y1[64], nd_y2[64]; nd_x1[threadIdx.x] = curr_nd->x1; nd_x2[threadIdx.x] = curr_nd->x2; nd_y1[threadIdx.x] = curr_nd->y1; nd_y2[threadIdx.x] = curr_nd->y2; // node in bounrary or bounrary in node if (nd_x1[threadIdx.x] <= dev_bonds->x_max && nd_x2[threadIdx.x] >= dev_bonds->x_min && nd_y1[threadIdx.x] <= dev_bonds->y_max && nd_y2[threadIdx.x] >= dev_bonds->y_min) { // node isn't fully in the boundary, than add to calculation to next iteration indexer t3 = atomicAdd(atomic_iter, curr_nd->count_child_nodes); //printf("Increase %i: %u to %u (%u)\n", idxx, t1, *atomic_iter, nd[idxx]->count_child_nodes); for (unsigned k = t3, t2 = 0; k < t3 + curr_nd->count_child_nodes; ++k, ++t2) { next_nd[k] = curr_nd->child_node[t2]; //printf("Next index = %u\n", (struct branch*)(nd[curr_indexer]->child_node[t2]) - m_ttt_cuda_first_branch); } } else { // node and boundary isn't intersection } } } /* searchin items in selected rectangle on cuda device imlementation (step 1) */ __global__ void cuda_search_rect2_impl2(void **br_ptr, indexer *atomic_iter, /*out*/ indexer *idxs) { int idxx = threadIdx.x; int idx_gr_br = blockIdx.x; // for store temporary results __shared__ indexer temp_res[65]; // must be as blockDim.x size + 1 (for rpevious result) __shared__ char temp_res_flag[64]; temp_res[idxx] = (indexer)-1; temp_res_flag[idxx] = -1; if (!idxx) temp_res[64] = (indexer)-1; __shared__ coord leaf_x[65]; __shared__ coord leaf_y[65]; struct branch** br = (struct branch**)br_ptr; struct branch *curr_br = br[idx_gr_br]; __syncthreads(); __shared__ indexer start_num[1]; if (!idxx) start_num[0] = curr_br->leaf_number[0]; //start_num[0] = ((branch*)((struct branch**)br_ptr)[idx_gr_br])->leaf_number[0]; __syncthreads(); //if (start_num[0] != ((branch*)((struct branch**)br_ptr))->leaf_number[0]) { // start_num[0] = ((branch*)((struct branch**)br_ptr)[idx_gr_br])->leaf_number[0]; //} if (curr_br->x_min <= dev_bonds->x_max && curr_br->x_max >= dev_bonds->x_min && curr_br->y_min <= dev_bonds->y_max && curr_br->y_max >= dev_bonds->y_min) { int t = (int)ceilf((float)curr_br->count_leafs / (float)blockDim.x); for (int j = 0; j < t; ++j) { int curr_idx = idxx + j * blockDim.x; // curr_offset; if (/*j == t1 && */curr_idx < curr_br->count_leafs) { // loading frequantly using data leaf_x[idxx] = curr_br->leaf_x[curr_idx]; leaf_y[idxx] = curr_br->leaf_y[curr_idx]; if (!idxx && curr_idx + 64 < curr_br->count_leafs && curr_br->merge_next_leaf[curr_idx + 63]) { leaf_x[64] = curr_br->leaf_x[curr_idx + 64]; leaf_y[64] = curr_br->leaf_y[curr_idx + 64]; } //if (curr_br->leaf_number[curr_idx] == 3617359) // curr_idx = curr_idx; // check points to enter in boundary if (leaf_x[idxx] >= dev_bonds->x_min && leaf_x[idxx] <= dev_bonds->x_max && leaf_y[idxx] >= dev_bonds->y_min && leaf_y[idxx] <= dev_bonds->y_max) { temp_res[idxx] = curr_br->leaf_number[curr_idx]; } else if(dev_bonds->intersection) { bool fl1 = false; if (curr_br->merge_next_leaf[curr_idx]) { // last check: intersection // side 1/2 fl1 = cuda_check_intersection(leaf_x[idxx], leaf_y[idxx], leaf_x[idxx + 1], leaf_y[idxx + 1], dev_bonds->x_min, dev_bonds->y_min, dev_bonds->x_max, dev_bonds->y_min); // side 2/3 if (!fl1) { fl1 = cuda_check_intersection(leaf_x[idxx], leaf_y[idxx], leaf_x[idxx + 1], leaf_y[idxx + 1], dev_bonds->x_max, dev_bonds->y_min, dev_bonds->x_max, dev_bonds->y_max); } // side 3/4 if (!fl1) { fl1 = cuda_check_intersection(leaf_x[idxx], leaf_y[idxx], leaf_x[idxx + 1], leaf_y[idxx + 1], dev_bonds->x_max, dev_bonds->y_max, dev_bonds->x_min, dev_bonds->y_max); } // side 4/1 if (!fl1) { fl1 = cuda_check_intersection(leaf_x[idxx], leaf_y[idxx], leaf_x[idxx + 1], leaf_y[idxx + 1], dev_bonds->x_min, dev_bonds->y_max, dev_bonds->x_min, dev_bonds->y_min); } } else { indexer curr_num = curr_br->offset[curr_br->leaf_number[curr_idx] - start_num[0]]; __shared__ coord leaf_x_offset[64]; __shared__ coord leaf_y_offset[64]; leaf_x_offset[idxx] = curr_br->leaf_x[curr_num]; leaf_y_offset[idxx] = curr_br->leaf_y[curr_num]; // side 1/2 fl1 = cuda_check_intersection(leaf_x[idxx], leaf_y[idxx], leaf_x_offset[idxx], leaf_y_offset[idxx], dev_bonds->x_min, dev_bonds->y_min, dev_bonds->x_max, dev_bonds->y_min); // side 2/3 if (!fl1) { fl1 = cuda_check_intersection(leaf_x[idxx], leaf_y[idxx], leaf_x_offset[idxx], leaf_y_offset[idxx], dev_bonds->x_max, dev_bonds->y_min, dev_bonds->x_max, dev_bonds->y_max); } // side 3/4 if (!fl1) { fl1 = cuda_check_intersection(leaf_x[idxx], leaf_y[idxx], leaf_x_offset[idxx], leaf_y_offset[idxx], dev_bonds->x_max, dev_bonds->y_max, dev_bonds->x_min, dev_bonds->y_max); } // side 4/1 if (!fl1) { fl1 = cuda_check_intersection(leaf_x[idxx], leaf_y[idxx], leaf_x_offset[idxx], leaf_y_offset[idxx], dev_bonds->x_min, dev_bonds->y_max, dev_bonds->x_min, dev_bonds->y_min); } } if (fl1) temp_res[idxx] = curr_br->leaf_number[curr_idx]; } __syncthreads(); // packing temporary results if (temp_res[idxx] == temp_res[idxx + 1]) { __threadfence(); temp_res[idxx + 1] = -1; } else { //__threadfence(); } __syncthreads(); if (!idxx) { if (temp_res[64] == temp_res[0]) { temp_res[0] = -1; } } //__syncthreads(); // store temporary results to global array2 if (temp_res[idxx] != -1) { int t2 = atomicAdd(atomic_iter, 1); if (t2 >= MAX_RESULTS - 1) { // can not store result atomicSub(atomic_iter, 1); } else { // can store result (idxs2 - temporary) idxs[t2] = temp_res[idxx]; temp_res_flag[idxx] = idxx; } } __syncthreads(); // store previous result for (int t2 = blockDim.x / 2; t2 > 0; t2 >>= 1) { if (idxx < t2) { if (temp_res_flag[idxx] < temp_res_flag[idxx + t2]) temp_res_flag[idxx] = temp_res_flag[idxx + t2]; } __syncthreads(); } if (!idxx) { if (temp_res_flag[idxx] != -1) { temp_res[64] = temp_res[temp_res_flag[idxx]]; // idxs[idxx]; } } // reset temporary resulats temp_res[idxx] = (indexer)-1; //temp_res2[idxx] = -1; temp_res_flag[idxx] = -1; __syncthreads(); } } } } __device__ bool cuda_check_intersection(coord p1x, coord p1y, coord p2x, coord p2y, coord p3x, coord p3y, coord p4x, coord p4y) { coord x4x3 = p4x - p3x; coord y4y3 = p4y - p3y; coord x1x3 = p1x - p3x; coord y1y3 = p1y - p3y; coord x2x3 = p2x - p3x; coord y2y3 = p2y - p3y; coord x2x1 = p2x - p1x; coord y2y1 = p2y - p1y; coord x3x1 = p3x - p1x; coord y3y1 = p3y - p1y; coord x4x1 = p4x - p1x; coord y4y1 = p4y - p1y; coord v1 = x4x3 * y1y3 - x1x3 * y4y3; coord v2 = x4x3 * y2y3 - x2x3 * y4y3; coord v3 = x2x1 * y3y1 - x3x1 * y2y1; coord v4 = x2x1 * y4y1 - x4x1 * y2y1; coord v1t = v1 * v2; coord v2t = v3 * v4; //if ((signbit(v1t) || v1t == 0.0) && (signbit(v2t) || v2t == 0.0)) { if (v1t <= 0.0 && v2t <= 0.0) { return true; } return false; } /* searching the nearest item to point in selected radius */ indexer* cuda_search_nearest_item2(/*in*//*struct node *nd,*/ /*in*/coord x, /*in*/coord y, /*in*/coord radius, bool intersection, /*out*/coord *dist) { hipError_t er1; hipStream_t stream; hipStreamCreate(&stream); // searching hipEvent_t start, stop; float gtime = 0.0; dim3 grid_size = dim3(m_multi_processor_count, 1, 1), block_size = dim3(m_warp_size2, 1, 1); // store boundaries boundaries b1; b1.intersection = intersection; b1.x_max = x + radius; b1.x_min = x - radius; b1.y_max = y + radius; b1.y_min = y - radius; //hipMalloc((void**)dev_bonds, sizeof(struct boundaries)); er1 = hipMemcpyToSymbolAsync(dev_bonds, &b1, sizeof(struct boundaries), 0, hipMemcpyHostToDevice, stream); // for store count of iterations to next step indexer *dev_atomic_iter = NULL; er1 = hipMalloc((void**)&dev_atomic_iter, sizeof(indexer)); er1 = hipMemsetAsync(dev_atomic_iter, 0, sizeof(indexer), stream); // store pointers for next step void **dev_ptr = NULL, **dev_ptr2 = NULL; er1 = hipMalloc((void**)&dev_ptr, sizeof(void*) * m_count_branches); //printf("======================= 0x%llx; 0x%llx, count_br = %u\n", &m_dev_node, m_dev_node, m_count_branches); void **tptr = (void**)(&m_dev_node); er1 = hipMemcpyAsync(dev_ptr, tptr, sizeof(void*), hipMemcpyHostToDevice, stream); er1 = hipMalloc((void**)&dev_ptr2, sizeof(void*) * m_count_branches); //printf("======================= 0x%llx; 0x%llx; dev_ptr = 0x%llx\n", &m_dev_node, m_dev_node, dev_ptr); // count items //indexer *dev_count_items = NULL; //er1 = hipMalloc((void**)&dev_count_items, sizeof(indexer)); // count of iterations indexer atomic_iter = 1; indexer *dev_iter_count = NULL; er1 = hipMalloc((void**)&dev_iter_count, sizeof(indexer)); er1 = hipMemcpyAsync(dev_iter_count, &atomic_iter, sizeof(indexer), hipMemcpyHostToDevice); er1 = hipStreamSynchronize(stream); #ifdef DEBUG_CUDA clock_t t1 = clock(); er1 = hipEventCreate(&start); er1 = hipEventCreate(&stop); er1 = hipEventRecord(start, stream); #endif // calculating nodes for (int i = 0; i < m_length_of_tree + 1; ++i) { er1 = hipMemsetAsync(dev_atomic_iter, 0, sizeof(indexer), stream); if (atomic_iter > m_warp_size2 /*prop.warpSize * 2 */) { unsigned t = (unsigned)ceil((double)atomic_iter / (double)(m_warp_size2 /*prop.warpSize * 2.0 */)); block_size = dim3(m_warp_size2 /*prop.warpSize * 2 */, 1, 1); grid_size = dim3(t, 1, 1); } else { grid_size = dim3(1, 1, 1); block_size = dim3(atomic_iter, 1, 1); } cuda_search_rect2_impl1 << <grid_size, block_size, 0, stream >> > ((void**)dev_ptr, dev_iter_count, dev_atomic_iter, dev_ptr2); er1 = hipMemcpyAsync(&atomic_iter, dev_atomic_iter, sizeof(indexer), hipMemcpyDeviceToHost, stream); er1 = hipMemcpyAsync(dev_ptr, dev_ptr2, sizeof(void*) * atomic_iter, hipMemcpyDeviceToDevice, stream); er1 = hipMemcpyAsync(dev_iter_count, dev_atomic_iter, sizeof(indexer), hipMemcpyDeviceToDevice, stream); hipStreamSynchronize(stream); //printf("===== Iter %i: next = %u (%s)\n", i, atomic_iter, er1 == hipSuccess ? "true" : "false"); //hipDeviceSynchronize(); } #ifdef DEBUG_CUDA er1 = hipEventRecord(stop, stream); er1 = hipEventSynchronize(stop); er1 = hipEventElapsedTime(&gtime, start, stop); printf("Kernel 1 time = %f ms\n", gtime); #endif // calculating branches grid_size = dim3(atomic_iter, 1, 1); block_size = dim3(m_warp_size2 /*prop.warpSize * 2 */, 1, 1); #ifdef DEBUG_CUDA er1 = hipEventRecord(start, stream); #endif indexer *dev_idxs = NULL; hipMalloc((void**)&dev_idxs, sizeof(indexer) * atomic_iter); //hipMemsetAsync(dev_idxs, 0, sizeof(indexer), stream); coord *dev_dist = NULL; hipMalloc((void**)&dev_dist, sizeof(coord) * atomic_iter); //hipMemsetAsync(dev_dist, 0, sizeof(coord), stream); //er1 = hipMemsetAsync(dev_atomic_iter, 0, sizeof(indexer), stream); cuda_search_nearest_item2_impl2 << <grid_size, block_size, 0, stream >> > ((void**)dev_ptr, /*dev_atomic_iter,*/ x, y, dev_idxs, dev_dist); //er1 = hipMemcpyAsync(&atomic_iter, dev_atomic_iter, sizeof(indexer), hipMemcpyDeviceToHost, stream); er1 = hipStreamSynchronize(stream); //er1 = hipDeviceSynchronize(); #ifdef DEBUG_CUDA er1 = hipEventRecord(stop, stream); er1 = hipEventSynchronize(stop); er1 = hipEventElapsedTime(&gtime, start, stop); printf("Kernel 2 time = %f ms\n", gtime); clock_t t2 = clock(); printf("All kernels time = %i ms\n", t2 - t1); #endif grid_size = dim3(m_multi_processor_count, 1, 1), block_size = dim3(m_warp_size2 / 2, 1, 1); indexer *dev_idxs2 = NULL; hipMalloc((void**)&dev_idxs2, sizeof(indexer) * (size_t)ceil((double)atomic_iter / (double)m_warp_size2)); coord *dev_dist2 = NULL; hipMalloc((void**)&dev_dist2, sizeof(coord) * (size_t)ceil((double)atomic_iter / (double)m_warp_size2)); while (atomic_iter > 1) { er1 = hipMemsetAsync(dev_atomic_iter, 0, sizeof(indexer), stream); cuda_search_nearest_item2_impl3 << <grid_size, block_size, 0, stream >> > (dev_idxs, dev_dist, atomic_iter, dev_atomic_iter, dev_idxs2, dev_dist2); er1 = hipMemcpyAsync(&atomic_iter, dev_atomic_iter, sizeof(indexer), hipMemcpyDeviceToHost, stream); er1 = hipMemcpyAsync(dev_idxs, dev_idxs2, sizeof(indexer) * atomic_iter, hipMemcpyDeviceToDevice, stream); er1 = hipMemcpyAsync(dev_dist, dev_dist2, sizeof(coord) * atomic_iter, hipMemcpyDeviceToDevice, stream); hipStreamSynchronize(stream); } er1 = hipMemcpyAsync(dist, dev_dist, sizeof(coord), hipMemcpyDeviceToHost, stream); indexer *idxs = (indexer*)aligned_alloc(16, sizeof(indexer) * 1); //er1 = hipMemcpyAsync(idxs, host_idxs, sizeof(indexer) * *count_items, hipMemcpyHostToHost, stream); /*if (*dist == (coord)0.0) idxs[0] = (indexer)-1; else*/ er1 = hipMemcpyAsync(idxs, dev_idxs, sizeof(indexer) * 1, hipMemcpyDeviceToHost, stream); er1 = hipStreamSynchronize(stream); #ifdef DEBUG_CUDA printf("Total results from device = %u\n", 1); #endif // freeing and destroying hipStreamDestroy(stream); er1 = hipFree(dev_iter_count); er1 = hipFree(dev_ptr); er1 = hipFree(dev_ptr2); //er1 = hipFree(dev_tmp_idxs); //er1 = hipFree(dev_count_items); er1 = hipFree(dev_atomic_iter); //er1 = hipHostFree(host_idxs); er1 = hipFree(dev_idxs); #ifdef DEBUG_CUDA er1 = hipEventDestroy(stop); er1 = hipEventDestroy(start); #endif return idxs; } /* searching the nearest item on device implementation */ __global__ void cuda_search_nearest_item2_impl2(void **br_ptr, /*indexer *atomic_iter,*/ coord x, coord y, /*out*/ indexer *idxs, /*out*/ coord *dist) { int idxx = threadIdx.x; int idx_gr_br = blockIdx.x; // for store temporary results __shared__ indexer temp_res[65]; // must be as blockDim.x size + 1 (for rpevious result) //__shared__ char temp_res_flag[64]; __shared__ coord curr_dist[65]; //temp_res_flag[idxx] = -1; if (!idxx) { temp_res[64] = (indexer)-1; curr_dist[64] = FLT_MAX; } struct branch** br = (struct branch**)br_ptr; struct branch *curr_br = br[idx_gr_br]; __syncthreads(); __shared__ indexer start_num[1]; if (!idxx) start_num[0] = curr_br->leaf_number[0]; __syncthreads(); if (curr_br->x_min <= dev_bonds->x_max && curr_br->x_max >= dev_bonds->x_min && curr_br->y_min <= dev_bonds->y_max && curr_br->y_max >= dev_bonds->y_min) { int t = (int)ceilf((float)curr_br->count_leafs / (float)blockDim.x); for (int j = 0; j < t; ++j) { curr_dist[idxx] = FLT_MAX; temp_res[idxx] = (indexer)-1; int curr_idx = idxx + j * blockDim.x; // curr_offset; if (/*j == t1 && */curr_idx < curr_br->count_leafs) { // loading frequantly using data __shared__ coord leaf_x[65]; __shared__ coord leaf_y[65]; leaf_x[idxx] = curr_br->leaf_x[curr_idx]; leaf_y[idxx] = curr_br->leaf_y[curr_idx]; if (!idxx && curr_idx + 64 < curr_br->count_leafs && curr_br->merge_next_leaf[curr_idx + 63]) { leaf_x[64] = curr_br->leaf_x[curr_idx + 64]; leaf_y[64] = curr_br->leaf_y[curr_idx + 64]; } // calculating distances if (curr_br->merge_next_leaf[curr_idx]) { curr_dist[idxx] = cuda_distance(x, y, leaf_x[idxx], leaf_y[idxx], leaf_x[idxx + 1], leaf_y[idxx + 1]); } else { indexer curr_num = curr_br->offset[curr_br->leaf_number[curr_idx] - start_num[0]]; __shared__ coord leaf_x_offset[64]; __shared__ coord leaf_y_offset[64]; leaf_x_offset[idxx] = curr_br->leaf_x[curr_num]; leaf_y_offset[idxx] = curr_br->leaf_y[curr_num]; curr_dist[idxx] = cuda_distance(x, y, leaf_x[idxx], leaf_y[idxx], leaf_x_offset[idxx], leaf_y_offset[idxx]); } temp_res[idxx] = curr_br->leaf_number[curr_idx]; __syncthreads(); // find min distance for (int k = blockDim.x / 2; k > 0; k >>= 1) { if (idxx < k) { if (curr_dist[idxx] > curr_dist[idxx + k]) { curr_dist[idxx] = curr_dist[idxx + k]; temp_res[idxx] = temp_res[idxx + k]; } } } // check previous result if (!idxx) { if (curr_dist[64] > curr_dist[0]) { curr_dist[64] = curr_dist[0]; temp_res[64] = temp_res[0]; } } __syncthreads(); } } } __syncthreads(); if (!idxx) { dist[idx_gr_br] = curr_dist[64]; idxs[idx_gr_br] = temp_res[64]; //printf("GGGPU (%i) idx = %u, dist = %e\n", idx_gr_br, temp_res[64], curr_dist[64]); } } /* calculating distance between point and line */ __device__ coord cuda_distance(coord px, coord py, coord line_p0x, coord line_p0y, coord line_p1x, coord line_p1y) { coord vx, vy, wx, wy, c1, c2, b, pbx, pby; vx = line_p1x - line_p0x; vy = line_p1y - line_p0y; wx = px - line_p0x; wy = py - line_p0y; c1 = vx * wx + vy * wy; if (c1 <= 0) { //coord t1 = p->x - line_p0->x; //coord t2 = p->y - line_p0->y; coord t1 = wx; coord t2 = wy; return (coord)sqrt(t1 * t1 + t2 * t2); } c2 = vx * vx + vy * vy; if (c2 <= c1) { //return sqrt(pow(fabs(p->x - line_p1->x), 2) + pow(fabs(p->y - line_p1->y), 2)); coord t1 = px - line_p1x; coord t2 = py - line_p1y; return (coord)sqrt(t1 * t1 + t2 * t2); } b = c1 / c2; pbx = line_p0x + b * vx; pby = line_p0y + b * vy; //return sqrt(pow(fabs(p->x - pbx), 2) + pow(fabs(p->y - pby), 2)); coord t1 = px - pbx; coord t2 = py - pby; return (coord)sqrt(t1 * t1 + t2 * t2); } /* searching the nearest item on device implementation (step 3) */ __global__ void cuda_search_nearest_item2_impl3(/*in*/ indexer *idxs, /*in*/ coord *dist, /*in*/indexer count, /*in*/indexer *atomic_iter, /*out*/ indexer *idxs2, /*out*/ coord *dist2) { indexer idxx = threadIdx.x; //indexer thr_index = idxx + blockIdx.x * blockDim.x; //if (thr_index > count) { // return; //} __shared__ coord d[64]; d[idxx] = FLT_MAX; d[idxx + 32] = FLT_MAX; int block_size = gridDim.x * blockDim.x * 2; int c1 = (int)ceilf((double)count / (double)block_size); for (int i = 0; i < c1; ++i) { // find min distance indexer offset = blockIdx.x * blockDim.x * 2 + i * block_size; indexer curr_index = idxx + offset; //if (curr_index + 32 > count) // break; if (curr_index < count) d[idxx] = dist[curr_index]; if (curr_index + 32 < count) d[idxx + 32] = dist[curr_index + 32]; __syncthreads(); for (int k = blockDim.x /* * 2 / 2 */; k > 0; k >>= 1) { if (idxx < k) { //if (dist[curr_index] > dist[curr_index + k]) { if (d[idxx] > d[idxx + k]) { //dist[curr_index] = dist[curr_index + k]; d[idxx] = d[idxx + k]; idxs[curr_index] = idxs[curr_index + k]; } } else { break; } __syncthreads(); /*if (!idxx) printf("%i, k = %i, idx = %u:\n%e,%e,%e,%e,%e,%e,%e,%e\n%e,%e,%e,%e,%e,%e,%e,%e\n%e,%e,%e,%e,%e,%e,%e,%e\n%e,%e,%e,%e,%e,%e,%e,%e\n%e,%e,%e,%e,%e,%e,%e,%e\n%e,%e,%e,%e,%e,%e,%e,%e\n%e,%e,%e,%e,%e,%e,%e,%e\n%e,%e,%e,%e,%e,%e,%e,%e\n", blockIdx.x, k, curr_index, dist[curr_index], dist[curr_index + 1], dist[curr_index + 2], dist[curr_index + 3], dist[curr_index + 4], dist[curr_index + 5], dist[curr_index + 6], dist[curr_index + 7], dist[curr_index + 8], dist[curr_index + 9], dist[curr_index + 10], dist[curr_index + 11], dist[curr_index + 12], dist[curr_index + 13], dist[curr_index + 14], dist[curr_index + 15], dist[curr_index + 16], dist[curr_index + 17], dist[curr_index + 18], dist[curr_index + 19], dist[curr_index + 20], dist[curr_index + 21], dist[curr_index + 22], dist[curr_index + 23], dist[curr_index + 24], dist[curr_index + 25], dist[curr_index + 26], dist[curr_index + 27], dist[curr_index + 28], dist[curr_index + 29], dist[curr_index + 30], dist[curr_index + 31], dist[curr_index + 32], dist[curr_index + 33], dist[curr_index + 34], dist[curr_index + 35], dist[curr_index + 36], dist[curr_index + 37], dist[curr_index + 38], dist[curr_index + 39], dist[curr_index + 40], dist[curr_index + 41], dist[curr_index + 42], dist[curr_index + 43], dist[curr_index + 44], dist[curr_index + 45], dist[curr_index + 46], dist[curr_index + 47], dist[curr_index + 48], dist[curr_index + 49], dist[curr_index + 50], dist[curr_index + 51], dist[curr_index + 52], dist[curr_index + 53], dist[curr_index + 54], dist[curr_index + 55], dist[curr_index + 56], dist[curr_index + 57], dist[curr_index + 58], dist[curr_index + 59], dist[curr_index + 60], dist[curr_index + 61], dist[curr_index + 62], dist[curr_index + 63]); */ } __syncthreads(); if (!idxx && curr_index < count) { int t = atomicAdd(atomic_iter, 1); idxs2[t] = idxs[offset]; //dist2[t] = dist[offset]; dist2[t] = d[0]; } } }
cc38b789800fcab6dce40a3aa7b547f7f83b326a.cu
/* */ #include <stdio.h> #include <cuda_runtime_api.h> #include <time.h> #include <math.h> #include <float.h> //#include <cuda_runtime.h> #include "unimem.h" #include "first.h" //#define DEBUG_CUDA #define MAX_RESULTS 100000 //#define PACK_RESULTS struct boundaries { coord x_min; coord y_min; coord x_max; coord y_max; bool intersection; }; struct node* m_dev_node = NULL; __constant__ boundaries dev_bonds[1]; __constant__ unsigned dev_threads_count[1]; unsigned m_threads_count; indexer m_count_branches; int m_length_of_tree = 0; int m_multi_processor_count = 1; int m_warp_size2 = 64; //__constant__ struct branch *m_ttt_cuda_first_branch = NULL; __device__ bool cuda_check_intersection(coord p1x, coord p1y, coord p2x, coord p2y, coord p3x, coord p3y, coord p4x, coord p4y); __device__ coord cuda_distance(coord px, coord py, coord line_p0x, coord line_p0y, coord line_p1x, coord line_p1y); #ifdef PACK_RESULTS /// compare int cmp(const void* a, const void* b) { return (int)(*(indexer*)a - *(indexer*)b); } #endif extern "C" bool init_cuda_device(int deviceID, struct node* node) { if (!node) return false; //return false; struct node *nd = node; unsigned count1[64], i = 0; m_count_branches = 0; for (int j = 0; j < 64; j++) count1[j] = 0; //count1[0] = 1; struct node *stack_node[64]; int stack_pos = 0; indexer stack_idx[64]; alignas(16) struct branch *first_branch = NULL; alignas(16) struct node* stack_first_node[64]; for (unsigned i = 0; i < 64; ++i) { stack_first_node[i] = NULL; } while (i < nd->count_child_nodes) { if (!stack_first_node[stack_pos] || nd < stack_first_node[stack_pos]) stack_first_node[stack_pos] = nd; if (nd->is_last_node) { for (unsigned j = 0; j < nd->count_child_nodes; ++j) { struct branch *br = (struct branch*)(nd->child_node[j]); if (!first_branch || br < first_branch) first_branch = br; } /*if (!count_br) count_br = nd->count_child_nodes; if (count_br != nd->count_child_nodes) printf("Branches %u vs %u\n", count_br, nd->count_child_nodes);*/ m_count_branches += nd->count_child_nodes; // return from stack while (stack_pos > 0) { stack_pos--; nd = stack_node[stack_pos]; i = stack_idx[stack_pos] + 1; if (i < nd->count_child_nodes) { stack_idx[stack_pos] = i; stack_node[stack_pos] = nd; stack_pos++; nd = (struct node*)nd->child_node[i]; i = 0; break; } else { //if (count1[stack_pos]) { //if (count1[stack_pos] != nd->count_child_nodes) { // printf("Nodes %u vs %u\n", count1[stack_pos], nd->count_child_nodes); //} count1[stack_pos] += nd->count_child_nodes; //} //else { // count1[stack_pos] = nd->count_child_nodes; //} } } } else { stack_idx[stack_pos] = i; stack_node[stack_pos] = nd; stack_pos++; i = 0; nd = (struct node*)nd->child_node[i]; /*if (!count1[stack_pos]) count1[stack_pos] = nd->count_child_nodes; else count1[stack_pos] += nd->count_child_nodes;*/ } /*} else if (i < nd->count_child_nodes) { i++;*/ /*if (!count1[stack_pos]) count1[stack_pos] = nd->count_child_nodes; else count1[stack_pos] += nd->count_child_nodes; */ } //return false; int deviceCount; cudaError_t er1 = cudaGetDeviceCount(&deviceCount); printf("DevicecheckCudaErrors Count: %d\n", deviceCount); if (deviceID == -1) deviceID = 0; cudaDeviceProp prop; for (int ii = 0; ii < deviceCount; ++ii) { er1 = cudaGetDeviceProperties(&prop, ii); if (prop.major < 2 || prop.canMapHostMemory != 1) { printf("ERROR: calculation requires GPU devices with compute SM 2.0 or higher, or can not using MapHostMemory.\n"); printf("Current GPU device has compute SM%d.%d, Exiting...", prop.major, prop.minor); //exit(EXIT_WAIVED); return false; } printf("GPU device name is %s\n", prop.name); printf("GPU total memory = %.0f Mb\n", prop.totalGlobalMem / 1024.0 / 1024.0); printf("Number of multiprocessors on the device = %u\n", prop.multiProcessorCount); } er1 = cudaSetDevice(deviceID); cudaSetDeviceFlags(cudaDeviceMapHost); er1 = cudaGetDeviceProperties(&prop, deviceID); m_multi_processor_count = prop.multiProcessorCount; m_warp_size2 = prop.warpSize * 2; m_threads_count = prop.multiProcessorCount * prop.warpSize * 2; //er1 = cudaMalloc((void**)&dev_threads_count, sizeof(unsigned)); er1 = cudaMemcpyToSymbol(dev_threads_count, &m_threads_count, sizeof(unsigned)); // copy rtree int pos = 63; for (; pos >= 0; --pos) { if (count1[pos]) break; } m_length_of_tree = pos + 1; // allocationg memory for branches alignas(16) struct branch* tbr = (struct branch*)aligned_alloc(16, sizeof(struct branch) * m_count_branches); // struct branch* first_branch = NULL; /*nd = node; i = 0; unsigned k = 0; while (i < nd->count_child_nodes) { if (nd->is_last_node) { for (unsigned j = 0; j < nd->count_child_nodes; ++j) { struct branch *br = (struct branch*)(nd->child_node[j]); //if (!first_branch || br < first_branch) // first_branch = br; memcpy(tbr + k, br, sizeof(struct branch)); k++; } while (stack_pos > 0) { stack_pos--; nd = stack_node[stack_pos]; i = stack_idx[stack_pos] + 1; if (i < nd->count_child_nodes) { // insert to stack stack_idx[stack_pos] = i; stack_node[stack_pos] = nd; stack_pos++; nd = (struct node*)nd->child_node[i]; i = 0; break; } } } else { // insert to stack stack_idx[stack_pos] = i; stack_node[stack_pos] = nd; stack_pos++; nd = (struct node*)nd->child_node[i]; } }*/ /*nd = node; i = 0; for (int j = 0; j <= pos; ++j) nd = (struct node*)(nd->child_node[0]); for (indexer j = 0; j < count1[pos]; ++j) { for (indexer k = 0; k < nd[j].count_child_nodes; ++k) { memcpy(tbr + i, nd[j].child_node[k], sizeof(struct branch)); i++; } } */ memcpy(tbr, first_branch, sizeof(struct branch) * m_count_branches); // for debug /*printf("\n\n\n======================================================================\n"); for (indexer i = 0; i < count1[pos]; ++i) { if ((struct node*)(stack_first_node[pos + 1])[i].is_last_node) { unsigned tt = ((struct node*)(stack_first_node[pos + 1]))[i].count_child_nodes; for (indexer ii = 0; ii < tt; ++ii) { unsigned idx = (struct branch*)((struct node*)(stack_first_node[pos + 1])[i].child_node[ii]) - first_branch; if (!idx) printf("0\n"); else printf("%u\n", idx); } } else { printf("Error last node %u\n", i); } }*/ // copy data of branches to device clock_t t1 = clock(); cudaStream_t stream; cudaStreamCreate(&stream); for (indexer i = 0; i < m_count_branches; ++i) { void *data_ptr = tbr[i].leaf_x; er1 = cudaMalloc((void**)&(tbr[i].leaf_x), sizeof(coord) * tbr[i].count_leafs); er1 = cudaMemcpyAsync(tbr[i].leaf_x, data_ptr, sizeof(coord) * tbr[i].count_leafs, cudaMemcpyHostToDevice, stream); data_ptr = tbr[i].leaf_y; er1 = cudaMalloc((void**)&(tbr[i].leaf_y), sizeof(coord) * tbr[i].count_leafs); er1 = cudaMemcpyAsync(tbr[i].leaf_y, data_ptr, sizeof(coord) * tbr[i].count_leafs, cudaMemcpyHostToDevice, stream); data_ptr = tbr[i].leaf_number; er1 = cudaMalloc((void**)&(tbr[i].leaf_number), sizeof(indexer) * tbr[i].count_leafs); er1 = cudaMemcpyAsync(tbr[i].leaf_number, data_ptr, sizeof(indexer) * tbr[i].count_leafs, cudaMemcpyHostToDevice, stream); data_ptr = tbr[i].merge_next_leaf; er1 = cudaMalloc((void**)&(tbr[i].merge_next_leaf), sizeof(bool) * tbr[i].count_leafs); er1 = cudaMemcpyAsync(tbr[i].merge_next_leaf, data_ptr, sizeof(bool) * tbr[i].count_leafs, cudaMemcpyHostToDevice, stream); /*data_ptr = tbr[i].xsh_min; er1 = cudaMalloc((void**)&(tbr[i].xsh_min), sizeof(coord) * tbr[i].count_shapes); er1 = cudaMemcpyAsync(tbr[i].xsh_min, data_ptr, sizeof(coord) * tbr[i].count_shapes, cudaMemcpyHostToDevice, stream); data_ptr = tbr[i].xsh_max; er1 = cudaMalloc((void**)&(tbr[i].xsh_max), sizeof(coord) * tbr[i].count_shapes); er1 = cudaMemcpyAsync(tbr[i].xsh_max, data_ptr, sizeof(coord) * tbr[i].count_shapes, cudaMemcpyHostToDevice, stream); data_ptr = tbr[i].ysh_min; er1 = cudaMalloc((void**)&(tbr[i].ysh_min), sizeof(coord) * tbr[i].count_shapes); er1 = cudaMemcpyAsync(tbr[i].ysh_min, data_ptr, sizeof(coord) * tbr[i].count_shapes, cudaMemcpyHostToDevice, stream); data_ptr = tbr[i].ysh_max; er1 = cudaMalloc((void**)&(tbr[i].ysh_max), sizeof(coord) * tbr[i].count_shapes); er1 = cudaMemcpyAsync(tbr[i].ysh_max, data_ptr, sizeof(coord) * tbr[i].count_shapes, cudaMemcpyHostToDevice, stream); */ data_ptr = tbr[i].offset; er1 = cudaMalloc((void**)&(tbr[i].offset), sizeof(indexer) * tbr[i].count_shapes); er1 = cudaMemcpyAsync(tbr[i].offset, data_ptr, sizeof(indexer) * tbr[i].count_shapes, cudaMemcpyHostToDevice, stream); } er1 = cudaStreamSynchronize(stream); er1 = cudaStreamDestroy(stream); clock_t t2 = clock(); printf("Time copying data to device = %u ms\n", t2 - t1); // copy branches to device struct branch *dev_br = NULL; er1 = cudaMalloc((void**)&dev_br, sizeof(struct branch) * m_count_branches); er1 = cudaMemcpy(dev_br, tbr, sizeof(struct branch) * m_count_branches, cudaMemcpyHostToDevice); //cudaMemcpyToSymbol(m_ttt_cuda_first_branch, &dev_br, sizeof(struct branch*)); //return false; alignas(16) struct node *to_dev_nd[65]; //void **to_dev_child[64]; struct node *dev_nd = NULL, *dev_nd_prev = NULL, *dev_ptr = NULL; // to_dev_nd[0] = (struct node*)aligned_alloc(16, sizeof(struct node)); // memcpy(to_dev_nd[0], nd, sizeof(struct node)); struct node* tnd = node; //for (unsigned j = 0; j <= pos; ++j) // tnd = (struct node*)(tnd->child_node[0]); //unsigned j = 0; //void* tmp1 = NULL; unsigned count = tnd->count_child_nodes, prev_count = 1; //printf("\n\n\n======================================================================\n"); for (int k1 = pos; k1 >= 0; --k1) { tnd = node; //for (unsigned j = 0; j <= k1; ++j) // tnd = (struct node*)(tnd->child_node[0]); // data child node to_dev_nd[k1] = (struct node*)aligned_alloc(16, sizeof(struct node) * count1[k1]); //memcpy(to_dev_nd[k1], tnd/*->child_node[0]*/, sizeof(struct node) * count1[k1]); memcpy(to_dev_nd[k1], stack_first_node[k1 + 1], sizeof(struct node) * count1[k1]); // pointer to child_node on host for (indexer j = 0; j < count1[k1]; ++j) { //(to_dev_nd[k1])[j]->child_node = (void**)aligned_alloc(16, sizeof(void*) * MAX_NODES); // tnd->count_child_nodes); //(to_dev_child[k1])[j] = to_dev_nd[j]->child_node; dev_ptr = NULL; er1 = cudaMalloc((void**)&dev_ptr, sizeof(void*) * MAX_NODES); (to_dev_nd[k1])[j].child_node = (void**)dev_ptr; for (indexer k2 = 0; k2 < MAX_NODES; ++k2) { if (k1 == pos) { // copy pointer of branches //struct branch *ptr = &(dev_br[k2 + j * MAX_NODES]); unsigned idx = (struct branch*)((struct node*)(stack_first_node[k1 + 1])[j].child_node[k2]) - first_branch; //if (idx == 4899) // printf("%u\n", idx); struct branch *ptr = &(dev_br[idx]); er1 = cudaMemcpy((void*)((to_dev_nd[k1])[j].child_node + k2), &ptr, sizeof(struct branch*), cudaMemcpyHostToDevice); } else { // copy pointer of nodes //struct node* ptr = &(dev_nd_prev[k2 + j * MAX_NODES]); unsigned idx = (struct node*)(stack_first_node[k1 + 1])[j].child_node[k2] - (struct node*)(stack_first_node[k1 + 2]); //printf("%u\n", idx); struct node *ptr = &(dev_nd_prev[idx]); er1 = cudaMemcpy((void*)((to_dev_nd[k1])[j].child_node + k2), &ptr, sizeof(struct node*), cudaMemcpyHostToDevice); } } } //printf("==========================================\n\n\n"); // pointers of child nodes er1 = cudaMalloc((void**)&dev_nd, sizeof(struct node) * count1[k1]); // tnd->count_child_nodes); cudaMemcpy(dev_nd, to_dev_nd[k1], sizeof(struct node) * count1[k1], cudaMemcpyHostToDevice); dev_nd_prev = dev_nd; } // copy top node (root) to_dev_nd[64] = (struct node*)aligned_alloc(16, sizeof(struct node)); memcpy(to_dev_nd[64], node/*->child_node[0]*/, sizeof(struct node)); dev_ptr = NULL; er1 = cudaMalloc((void**)&dev_ptr, sizeof(void*) * node->count_child_nodes); (to_dev_nd[64])[0].child_node = (void**)dev_ptr; for (indexer k2 = 0; k2 < node->count_child_nodes; ++k2) { // copy pointer of nodes //struct node* ptr = &(dev_nd_prev[k2]); unsigned idx = (struct node*)(stack_first_node[0])[0].child_node[k2] - (struct node*)(stack_first_node[1]); struct node* ptr = &(dev_nd_prev[idx]); er1 = cudaMemcpy((void*)((to_dev_nd[64])[0].child_node + k2), &ptr, sizeof(struct node*), cudaMemcpyHostToDevice); } // pointers of child nodes er1 = cudaMalloc((void**)&dev_nd, sizeof(struct node)); // tnd->count_child_nodes); er1 = cudaMemcpy(dev_nd, to_dev_nd[64], sizeof(struct node), cudaMemcpyHostToDevice); m_dev_node = dev_nd; printf("============== 0x%llx, 0x%llx, prev = 0x%llx\n", m_dev_node, dev_nd, dev_nd_prev); // free memory for (int k1 = pos; k1 >= 0; --k1) { _aligned_free(to_dev_nd[k1]); } _aligned_free(to_dev_nd[64]); // allocating memory for root //er1 = cudaMalloc((void**)&m_dev_node, sizeof(struct node)); // copy to device root of tree //cudaMemcpy(m_dev_node, to_dev_nd[0], sizeof(struct node), cudaMemcpyHostToDevice); return true; } extern "C" bool destroy_cuda_device() { //cudaFree(dev_threads_count); cudaError_t er1 = cudaDeviceReset(); return er1 == cudaSuccess ? true : false; } #if defined(CALC_CIRCLE) || defined(CALC_POINT) /* searchin items in selected rectangle on cuda device */ extern "C" indexer* cuda_search_rect2(/*in*/struct node *nd, /*in*/coord x_min, /*in*/coord y_min, /*in*/coord x_max, /*in*/coord y_max, bool /*in*/intersection, /*out*/indexer *count_items); /* searchin items in selected rectangle on cuda device imlementation (nodes) */ __global__ void cuda_search_rect2_impl1(void **nd, indexer *iter_count, indexer *atomic_iter, /*out*/ void **next_nd); /* searchin items in selected rectangle on cuda device imlementation (branches) */ __global__ void cuda_search_rect2_impl2(void **br_ptr, indexer *atomic_iter, /*out*/ indexer *idxs); /* searching the nearest item to point in selected radius */ extern "C" indexer* cuda_search_nearest_item2(/*in*//*struct node *nd,*/ /*in*/coord x, /*in*/coord y, /*in*/coord radius, bool intersection, /*out*/coord *dist); /* searching the nearest item on device implementation (step 2) */ __global__ void cuda_search_nearest_item2_impl2(void **br_ptr, /*indexer *atomic_iter,*/ coord x, coord y, /*out*/ indexer *idxs, /*out*/ coord *dist); /* searching the nearest item on device implementation (step 3) */ __global__ void cuda_search_nearest_item2_impl3(/*in*/ indexer *idxs, /*in*/ coord *dist, /*in*/indexer count, /*in*/indexer *atomic_iter, /*out*/ indexer *idxs2, /*out*/ coord *dist2); #else extern "C" indexer* search_rect2(struct node *nd, coord x_min, coord y_min, coord x_max, coord y_max, bool intersection, /*out*/indexer *count_items) __global__ indexer* search_rect2_impl(void *nd_ptr, indexer iter_count, /*out*/indexer *count_items) #endif // CALC_POINT #if defined(CALC_CIRCLE) || defined(CALC_POINT) /* searchin items in selected rectangle on cuda device */ indexer* cuda_search_rect2(node * nd, coord x_min, coord y_min, coord x_max, coord y_max, bool intersection, indexer * count_items) #else indexer* search_rect2(struct node *nd, coord x_min, coord y_min, coord x_max, coord y_max, bool intersection, /*out*/indexer *count_items) #endif // CALC_POINT { // memory for result /*size_t mem_size = 256; size_t count_mem = 1; alignas(16) indexer* idxs = (indexer*)aligned_alloc(16, sizeof(indexer) * mem_size * count_mem); */ cudaError_t er1; cudaStream_t stream; cudaStreamCreate(&stream); /*indexer *host_idxs = NULL, *dev_idxs = NULL; // , *dev_tmp_idxs = NULL;; cudaHostAlloc((void**)&host_idxs, sizeof(indexer) * MAX_RESULTS, cudaHostAllocMapped); cudaHostGetDevicePointer((void**)&dev_idxs, host_idxs, 0); //cudaMalloc((void**)&dev_tmp_idxs, sizeof(indexer) * MAX_RESULTS); */ indexer *dev_idxs = NULL; cudaMalloc((void**)&dev_idxs, sizeof(indexer) * MAX_RESULTS); // searching cudaEvent_t start, stop; float gtime = 0.0; int device_id; /*cudaDeviceProp prop; er1 = cudaGetDevice(&device_id); er1 = cudaGetDeviceProperties(&prop, device_id); dim3 grid_size = dim3(prop.multiProcessorCount, 1, 1), block_size = dim3(prop.warpSize * 2, 1, 1); */ dim3 grid_size = dim3(m_multi_processor_count, 1, 1), block_size = dim3(m_warp_size2, 1, 1); // store boundaries boundaries b1; b1.intersection = intersection; b1.x_max = x_max; b1.x_min = x_min; b1.y_max = y_max; b1.y_min = y_min; //cudaMalloc((void**)dev_bonds, sizeof(struct boundaries)); er1 = cudaMemcpyToSymbolAsync(dev_bonds, &b1, sizeof(struct boundaries), 0, cudaMemcpyHostToDevice, stream); // for store count of iterations to next step indexer *dev_atomic_iter = NULL; er1 = cudaMalloc((void**)&dev_atomic_iter, sizeof(indexer)); er1 = cudaMemsetAsync(dev_atomic_iter, 0, sizeof(indexer), stream); // store pointers for next step void **dev_ptr = NULL, **dev_ptr2 = NULL; er1 = cudaMalloc((void**)&dev_ptr, sizeof(void*) * m_count_branches); //printf("======================= 0x%llx; 0x%llx, count_br = %u\n", &m_dev_node, m_dev_node, m_count_branches); void **tptr = (void**)(&m_dev_node); er1 = cudaMemcpyAsync(dev_ptr, tptr, sizeof(void*), cudaMemcpyHostToDevice, stream); er1 = cudaMalloc((void**)&dev_ptr2, sizeof(void*) * m_count_branches); //printf("======================= 0x%llx; 0x%llx; dev_ptr = 0x%llx\n", &m_dev_node, m_dev_node, dev_ptr); // count items //indexer *dev_count_items = NULL; //er1 = cudaMalloc((void**)&dev_count_items, sizeof(indexer)); // count of iterations indexer atomic_iter = 1; indexer *dev_iter_count = NULL; er1 = cudaMalloc((void**)&dev_iter_count, sizeof(indexer)); //er1 = cudaMemsetAsync(dev_iter_count, 0, sizeof(indexer), stream); //er1 = cudaMemsetAsync(dev_iter_count, 1, 1, stream); er1 = cudaMemcpyAsync(dev_iter_count, &atomic_iter, sizeof(indexer), cudaMemcpyHostToDevice); er1 = cudaStreamSynchronize(stream); #ifdef DEBUG_CUDA clock_t t1 = clock(); er1 = cudaEventCreate(&start); er1 = cudaEventCreate(&stop); er1 = cudaEventRecord(start, stream); #endif // calculating nodes for (int i = 0; i < m_length_of_tree + 1; ++i) { er1 = cudaMemsetAsync(dev_atomic_iter, 0, sizeof(indexer), stream); if (atomic_iter > m_warp_size2 /*prop.warpSize * 2 */) { unsigned t = (unsigned)ceil((double)atomic_iter / (double)(m_warp_size2 /*prop.warpSize * 2.0 */)); block_size = dim3(m_warp_size2 /*prop.warpSize * 2 */, 1, 1); grid_size = dim3(t, 1, 1); } else { grid_size = dim3(1, 1, 1); block_size = dim3(atomic_iter, 1, 1); } cuda_search_rect2_impl1 << <grid_size, block_size, 0, stream >> > ((void**)dev_ptr, dev_iter_count, dev_atomic_iter, dev_ptr2); er1 = cudaMemcpyAsync(&atomic_iter, dev_atomic_iter, sizeof(indexer), cudaMemcpyDeviceToHost, stream); er1 = cudaMemcpyAsync(dev_ptr, dev_ptr2, sizeof(void*) * atomic_iter, cudaMemcpyDeviceToDevice, stream); er1 = cudaMemcpyAsync(dev_iter_count, dev_atomic_iter, sizeof(indexer), cudaMemcpyDeviceToDevice, stream); cudaStreamSynchronize(stream); //printf("===== Iter %i: next = %u (%s)\n", i, atomic_iter, er1 == cudaSuccess ? "true" : "false"); //cudaThreadSynchronize(); } #ifdef DEBUG_CUDA er1 = cudaEventRecord(stop, stream); er1 = cudaEventSynchronize(stop); er1 = cudaEventElapsedTime(&gtime, start, stop); printf("Kernel 1 time = %f ms\n", gtime); #endif // calculating branches grid_size = dim3(atomic_iter, 1, 1); block_size = dim3(m_warp_size2 /*prop.warpSize * 2 */, 1, 1); #ifdef DEBUG_CUDA er1 = cudaEventRecord(start, stream); #endif er1 = cudaMemsetAsync(dev_atomic_iter, 0, sizeof(indexer), stream); cuda_search_rect2_impl2 << <grid_size, block_size, 0, stream >> > ((void**)dev_ptr, dev_atomic_iter, dev_idxs); er1 = cudaMemcpyAsync(&atomic_iter, dev_atomic_iter, sizeof(indexer), cudaMemcpyDeviceToHost, stream); er1 = cudaStreamSynchronize(stream); //er1 = cudaThreadSynchronize(); #ifdef DEBUG_CUDA er1 = cudaEventRecord(stop, stream); er1 = cudaEventSynchronize(stop); er1 = cudaEventElapsedTime(&gtime, start, stop); printf("Kernel 2 time = %f ms\n", gtime); clock_t t2 = clock(); printf("All kernels time = %i ms\n", t2 - t1); #endif er1 = cudaMemcpyAsync(count_items, dev_atomic_iter, sizeof(indexer), cudaMemcpyDeviceToHost, stream); indexer *idxs = (indexer*)aligned_alloc(16, sizeof(indexer) * *count_items); //er1 = cudaMemcpyAsync(idxs, host_idxs, sizeof(indexer) * *count_items, cudaMemcpyHostToHost, stream); er1 = cudaMemcpyAsync(idxs, dev_idxs, sizeof(indexer) * *count_items, cudaMemcpyDeviceToHost, stream); er1 = cudaStreamSynchronize(stream); #ifdef DEBUG_CUDA printf("Total results from device = %u\n", *count_items); #endif // freeing and destroying cudaStreamDestroy(stream); er1 = cudaFree(dev_iter_count); er1 = cudaFree(dev_ptr); er1 = cudaFree(dev_ptr2); //er1 = cudaFree(dev_tmp_idxs); //er1 = cudaFree(dev_count_items); er1 = cudaFree(dev_atomic_iter); //er1 = cudaFreeHost(host_idxs); er1 = cudaFree(dev_idxs); #ifdef DEBUG_CUDA er1 = cudaEventDestroy(stop); er1 = cudaEventDestroy(start); #endif #ifdef PACK_RESULTS if (*count_items) { qsort(idxs, *count_items, sizeof(indexer), cmp); indexer j = 1; indexer offset = 0; for (indexer i = 0; i < *count_items - 1 - offset; ++i) { //if (idxs[i] == 3617359) // idxs[i] = 3617359; if (idxs[i] == idxs[i + 1 + offset]) { offset++; idxs[i + 1] = idxs[i + 1 + offset]; i--; continue; } if (offset) idxs[i + 1] = idxs[i + 1 + offset]; j++; } *count_items = j; idxs = (indexer*)_aligned_realloc(idxs, sizeof(indexer) * j, 16); } #endif return idxs; } /* searchin items in selected rectangle on cuda device imlementation (step 1) */ __global__ void cuda_search_rect2_impl1(void **nd_ptr, indexer *iter_count, indexer *atomic_iter, /*out*/ void** next_nd) { int idxx = threadIdx.x; // to temporary store node index /*__shared__ indexer store[64]; store[threadIdx.x] = (indexer)-1; __shared__ int store_idx[1]; if (!threadIdx.x) store_idx[threadIdx.x] = 0; */ struct node** nd = (struct node**)nd_ptr; //indexer idx = 0; #ifdef CALC_POINT //coord tmp_dist = FLT_MAX; //indexer tmp_idx = -1; #endif // CALC_POINT indexer curr_indexer = idxx + blockIdx.x * blockDim.x; // (*dev_threads_count); if (curr_indexer < *iter_count) { struct node *curr_nd = nd[curr_indexer]; __shared__ coord nd_x1[64], nd_x2[64], nd_y1[64], nd_y2[64]; nd_x1[threadIdx.x] = curr_nd->x1; nd_x2[threadIdx.x] = curr_nd->x2; nd_y1[threadIdx.x] = curr_nd->y1; nd_y2[threadIdx.x] = curr_nd->y2; // node in bounrary or bounrary in node if (nd_x1[threadIdx.x] <= dev_bonds->x_max && nd_x2[threadIdx.x] >= dev_bonds->x_min && nd_y1[threadIdx.x] <= dev_bonds->y_max && nd_y2[threadIdx.x] >= dev_bonds->y_min) { // node isn't fully in the boundary, than add to calculation to next iteration indexer t3 = atomicAdd(atomic_iter, curr_nd->count_child_nodes); //printf("Increase %i: %u to %u (%u)\n", idxx, t1, *atomic_iter, nd[idxx]->count_child_nodes); for (unsigned k = t3, t2 = 0; k < t3 + curr_nd->count_child_nodes; ++k, ++t2) { next_nd[k] = curr_nd->child_node[t2]; //printf("Next index = %u\n", (struct branch*)(nd[curr_indexer]->child_node[t2]) - m_ttt_cuda_first_branch); } } else { // node and boundary isn't intersection } } } /* searchin items in selected rectangle on cuda device imlementation (step 1) */ __global__ void cuda_search_rect2_impl2(void **br_ptr, indexer *atomic_iter, /*out*/ indexer *idxs) { int idxx = threadIdx.x; int idx_gr_br = blockIdx.x; // for store temporary results __shared__ indexer temp_res[65]; // must be as blockDim.x size + 1 (for rpevious result) __shared__ char temp_res_flag[64]; temp_res[idxx] = (indexer)-1; temp_res_flag[idxx] = -1; if (!idxx) temp_res[64] = (indexer)-1; __shared__ coord leaf_x[65]; __shared__ coord leaf_y[65]; struct branch** br = (struct branch**)br_ptr; struct branch *curr_br = br[idx_gr_br]; __syncthreads(); __shared__ indexer start_num[1]; if (!idxx) start_num[0] = curr_br->leaf_number[0]; //start_num[0] = ((branch*)((struct branch**)br_ptr)[idx_gr_br])->leaf_number[0]; __syncthreads(); //if (start_num[0] != ((branch*)((struct branch**)br_ptr))->leaf_number[0]) { // start_num[0] = ((branch*)((struct branch**)br_ptr)[idx_gr_br])->leaf_number[0]; //} if (curr_br->x_min <= dev_bonds->x_max && curr_br->x_max >= dev_bonds->x_min && curr_br->y_min <= dev_bonds->y_max && curr_br->y_max >= dev_bonds->y_min) { int t = (int)ceilf((float)curr_br->count_leafs / (float)blockDim.x); for (int j = 0; j < t; ++j) { int curr_idx = idxx + j * blockDim.x; // curr_offset; if (/*j == t1 && */curr_idx < curr_br->count_leafs) { // loading frequantly using data leaf_x[idxx] = curr_br->leaf_x[curr_idx]; leaf_y[idxx] = curr_br->leaf_y[curr_idx]; if (!idxx && curr_idx + 64 < curr_br->count_leafs && curr_br->merge_next_leaf[curr_idx + 63]) { leaf_x[64] = curr_br->leaf_x[curr_idx + 64]; leaf_y[64] = curr_br->leaf_y[curr_idx + 64]; } //if (curr_br->leaf_number[curr_idx] == 3617359) // curr_idx = curr_idx; // check points to enter in boundary if (leaf_x[idxx] >= dev_bonds->x_min && leaf_x[idxx] <= dev_bonds->x_max && leaf_y[idxx] >= dev_bonds->y_min && leaf_y[idxx] <= dev_bonds->y_max) { temp_res[idxx] = curr_br->leaf_number[curr_idx]; } else if(dev_bonds->intersection) { bool fl1 = false; if (curr_br->merge_next_leaf[curr_idx]) { // last check: intersection // side 1/2 fl1 = cuda_check_intersection(leaf_x[idxx], leaf_y[idxx], leaf_x[idxx + 1], leaf_y[idxx + 1], dev_bonds->x_min, dev_bonds->y_min, dev_bonds->x_max, dev_bonds->y_min); // side 2/3 if (!fl1) { fl1 = cuda_check_intersection(leaf_x[idxx], leaf_y[idxx], leaf_x[idxx + 1], leaf_y[idxx + 1], dev_bonds->x_max, dev_bonds->y_min, dev_bonds->x_max, dev_bonds->y_max); } // side 3/4 if (!fl1) { fl1 = cuda_check_intersection(leaf_x[idxx], leaf_y[idxx], leaf_x[idxx + 1], leaf_y[idxx + 1], dev_bonds->x_max, dev_bonds->y_max, dev_bonds->x_min, dev_bonds->y_max); } // side 4/1 if (!fl1) { fl1 = cuda_check_intersection(leaf_x[idxx], leaf_y[idxx], leaf_x[idxx + 1], leaf_y[idxx + 1], dev_bonds->x_min, dev_bonds->y_max, dev_bonds->x_min, dev_bonds->y_min); } } else { indexer curr_num = curr_br->offset[curr_br->leaf_number[curr_idx] - start_num[0]]; __shared__ coord leaf_x_offset[64]; __shared__ coord leaf_y_offset[64]; leaf_x_offset[idxx] = curr_br->leaf_x[curr_num]; leaf_y_offset[idxx] = curr_br->leaf_y[curr_num]; // side 1/2 fl1 = cuda_check_intersection(leaf_x[idxx], leaf_y[idxx], leaf_x_offset[idxx], leaf_y_offset[idxx], dev_bonds->x_min, dev_bonds->y_min, dev_bonds->x_max, dev_bonds->y_min); // side 2/3 if (!fl1) { fl1 = cuda_check_intersection(leaf_x[idxx], leaf_y[idxx], leaf_x_offset[idxx], leaf_y_offset[idxx], dev_bonds->x_max, dev_bonds->y_min, dev_bonds->x_max, dev_bonds->y_max); } // side 3/4 if (!fl1) { fl1 = cuda_check_intersection(leaf_x[idxx], leaf_y[idxx], leaf_x_offset[idxx], leaf_y_offset[idxx], dev_bonds->x_max, dev_bonds->y_max, dev_bonds->x_min, dev_bonds->y_max); } // side 4/1 if (!fl1) { fl1 = cuda_check_intersection(leaf_x[idxx], leaf_y[idxx], leaf_x_offset[idxx], leaf_y_offset[idxx], dev_bonds->x_min, dev_bonds->y_max, dev_bonds->x_min, dev_bonds->y_min); } } if (fl1) temp_res[idxx] = curr_br->leaf_number[curr_idx]; } __syncthreads(); // packing temporary results if (temp_res[idxx] == temp_res[idxx + 1]) { __threadfence(); temp_res[idxx + 1] = -1; } else { //__threadfence(); } __syncthreads(); if (!idxx) { if (temp_res[64] == temp_res[0]) { temp_res[0] = -1; } } //__syncthreads(); // store temporary results to global array2 if (temp_res[idxx] != -1) { int t2 = atomicAdd(atomic_iter, 1); if (t2 >= MAX_RESULTS - 1) { // can not store result atomicSub(atomic_iter, 1); } else { // can store result (idxs2 - temporary) idxs[t2] = temp_res[idxx]; temp_res_flag[idxx] = idxx; } } __syncthreads(); // store previous result for (int t2 = blockDim.x / 2; t2 > 0; t2 >>= 1) { if (idxx < t2) { if (temp_res_flag[idxx] < temp_res_flag[idxx + t2]) temp_res_flag[idxx] = temp_res_flag[idxx + t2]; } __syncthreads(); } if (!idxx) { if (temp_res_flag[idxx] != -1) { temp_res[64] = temp_res[temp_res_flag[idxx]]; // idxs[idxx]; } } // reset temporary resulats temp_res[idxx] = (indexer)-1; //temp_res2[idxx] = -1; temp_res_flag[idxx] = -1; __syncthreads(); } } } } __device__ bool cuda_check_intersection(coord p1x, coord p1y, coord p2x, coord p2y, coord p3x, coord p3y, coord p4x, coord p4y) { coord x4x3 = p4x - p3x; coord y4y3 = p4y - p3y; coord x1x3 = p1x - p3x; coord y1y3 = p1y - p3y; coord x2x3 = p2x - p3x; coord y2y3 = p2y - p3y; coord x2x1 = p2x - p1x; coord y2y1 = p2y - p1y; coord x3x1 = p3x - p1x; coord y3y1 = p3y - p1y; coord x4x1 = p4x - p1x; coord y4y1 = p4y - p1y; coord v1 = x4x3 * y1y3 - x1x3 * y4y3; coord v2 = x4x3 * y2y3 - x2x3 * y4y3; coord v3 = x2x1 * y3y1 - x3x1 * y2y1; coord v4 = x2x1 * y4y1 - x4x1 * y2y1; coord v1t = v1 * v2; coord v2t = v3 * v4; //if ((signbit(v1t) || v1t == 0.0) && (signbit(v2t) || v2t == 0.0)) { if (v1t <= 0.0 && v2t <= 0.0) { return true; } return false; } /* searching the nearest item to point in selected radius */ indexer* cuda_search_nearest_item2(/*in*//*struct node *nd,*/ /*in*/coord x, /*in*/coord y, /*in*/coord radius, bool intersection, /*out*/coord *dist) { cudaError_t er1; cudaStream_t stream; cudaStreamCreate(&stream); // searching cudaEvent_t start, stop; float gtime = 0.0; dim3 grid_size = dim3(m_multi_processor_count, 1, 1), block_size = dim3(m_warp_size2, 1, 1); // store boundaries boundaries b1; b1.intersection = intersection; b1.x_max = x + radius; b1.x_min = x - radius; b1.y_max = y + radius; b1.y_min = y - radius; //cudaMalloc((void**)dev_bonds, sizeof(struct boundaries)); er1 = cudaMemcpyToSymbolAsync(dev_bonds, &b1, sizeof(struct boundaries), 0, cudaMemcpyHostToDevice, stream); // for store count of iterations to next step indexer *dev_atomic_iter = NULL; er1 = cudaMalloc((void**)&dev_atomic_iter, sizeof(indexer)); er1 = cudaMemsetAsync(dev_atomic_iter, 0, sizeof(indexer), stream); // store pointers for next step void **dev_ptr = NULL, **dev_ptr2 = NULL; er1 = cudaMalloc((void**)&dev_ptr, sizeof(void*) * m_count_branches); //printf("======================= 0x%llx; 0x%llx, count_br = %u\n", &m_dev_node, m_dev_node, m_count_branches); void **tptr = (void**)(&m_dev_node); er1 = cudaMemcpyAsync(dev_ptr, tptr, sizeof(void*), cudaMemcpyHostToDevice, stream); er1 = cudaMalloc((void**)&dev_ptr2, sizeof(void*) * m_count_branches); //printf("======================= 0x%llx; 0x%llx; dev_ptr = 0x%llx\n", &m_dev_node, m_dev_node, dev_ptr); // count items //indexer *dev_count_items = NULL; //er1 = cudaMalloc((void**)&dev_count_items, sizeof(indexer)); // count of iterations indexer atomic_iter = 1; indexer *dev_iter_count = NULL; er1 = cudaMalloc((void**)&dev_iter_count, sizeof(indexer)); er1 = cudaMemcpyAsync(dev_iter_count, &atomic_iter, sizeof(indexer), cudaMemcpyHostToDevice); er1 = cudaStreamSynchronize(stream); #ifdef DEBUG_CUDA clock_t t1 = clock(); er1 = cudaEventCreate(&start); er1 = cudaEventCreate(&stop); er1 = cudaEventRecord(start, stream); #endif // calculating nodes for (int i = 0; i < m_length_of_tree + 1; ++i) { er1 = cudaMemsetAsync(dev_atomic_iter, 0, sizeof(indexer), stream); if (atomic_iter > m_warp_size2 /*prop.warpSize * 2 */) { unsigned t = (unsigned)ceil((double)atomic_iter / (double)(m_warp_size2 /*prop.warpSize * 2.0 */)); block_size = dim3(m_warp_size2 /*prop.warpSize * 2 */, 1, 1); grid_size = dim3(t, 1, 1); } else { grid_size = dim3(1, 1, 1); block_size = dim3(atomic_iter, 1, 1); } cuda_search_rect2_impl1 << <grid_size, block_size, 0, stream >> > ((void**)dev_ptr, dev_iter_count, dev_atomic_iter, dev_ptr2); er1 = cudaMemcpyAsync(&atomic_iter, dev_atomic_iter, sizeof(indexer), cudaMemcpyDeviceToHost, stream); er1 = cudaMemcpyAsync(dev_ptr, dev_ptr2, sizeof(void*) * atomic_iter, cudaMemcpyDeviceToDevice, stream); er1 = cudaMemcpyAsync(dev_iter_count, dev_atomic_iter, sizeof(indexer), cudaMemcpyDeviceToDevice, stream); cudaStreamSynchronize(stream); //printf("===== Iter %i: next = %u (%s)\n", i, atomic_iter, er1 == cudaSuccess ? "true" : "false"); //cudaThreadSynchronize(); } #ifdef DEBUG_CUDA er1 = cudaEventRecord(stop, stream); er1 = cudaEventSynchronize(stop); er1 = cudaEventElapsedTime(&gtime, start, stop); printf("Kernel 1 time = %f ms\n", gtime); #endif // calculating branches grid_size = dim3(atomic_iter, 1, 1); block_size = dim3(m_warp_size2 /*prop.warpSize * 2 */, 1, 1); #ifdef DEBUG_CUDA er1 = cudaEventRecord(start, stream); #endif indexer *dev_idxs = NULL; cudaMalloc((void**)&dev_idxs, sizeof(indexer) * atomic_iter); //cudaMemsetAsync(dev_idxs, 0, sizeof(indexer), stream); coord *dev_dist = NULL; cudaMalloc((void**)&dev_dist, sizeof(coord) * atomic_iter); //cudaMemsetAsync(dev_dist, 0, sizeof(coord), stream); //er1 = cudaMemsetAsync(dev_atomic_iter, 0, sizeof(indexer), stream); cuda_search_nearest_item2_impl2 << <grid_size, block_size, 0, stream >> > ((void**)dev_ptr, /*dev_atomic_iter,*/ x, y, dev_idxs, dev_dist); //er1 = cudaMemcpyAsync(&atomic_iter, dev_atomic_iter, sizeof(indexer), cudaMemcpyDeviceToHost, stream); er1 = cudaStreamSynchronize(stream); //er1 = cudaThreadSynchronize(); #ifdef DEBUG_CUDA er1 = cudaEventRecord(stop, stream); er1 = cudaEventSynchronize(stop); er1 = cudaEventElapsedTime(&gtime, start, stop); printf("Kernel 2 time = %f ms\n", gtime); clock_t t2 = clock(); printf("All kernels time = %i ms\n", t2 - t1); #endif grid_size = dim3(m_multi_processor_count, 1, 1), block_size = dim3(m_warp_size2 / 2, 1, 1); indexer *dev_idxs2 = NULL; cudaMalloc((void**)&dev_idxs2, sizeof(indexer) * (size_t)ceil((double)atomic_iter / (double)m_warp_size2)); coord *dev_dist2 = NULL; cudaMalloc((void**)&dev_dist2, sizeof(coord) * (size_t)ceil((double)atomic_iter / (double)m_warp_size2)); while (atomic_iter > 1) { er1 = cudaMemsetAsync(dev_atomic_iter, 0, sizeof(indexer), stream); cuda_search_nearest_item2_impl3 << <grid_size, block_size, 0, stream >> > (dev_idxs, dev_dist, atomic_iter, dev_atomic_iter, dev_idxs2, dev_dist2); er1 = cudaMemcpyAsync(&atomic_iter, dev_atomic_iter, sizeof(indexer), cudaMemcpyDeviceToHost, stream); er1 = cudaMemcpyAsync(dev_idxs, dev_idxs2, sizeof(indexer) * atomic_iter, cudaMemcpyDeviceToDevice, stream); er1 = cudaMemcpyAsync(dev_dist, dev_dist2, sizeof(coord) * atomic_iter, cudaMemcpyDeviceToDevice, stream); cudaStreamSynchronize(stream); } er1 = cudaMemcpyAsync(dist, dev_dist, sizeof(coord), cudaMemcpyDeviceToHost, stream); indexer *idxs = (indexer*)aligned_alloc(16, sizeof(indexer) * 1); //er1 = cudaMemcpyAsync(idxs, host_idxs, sizeof(indexer) * *count_items, cudaMemcpyHostToHost, stream); /*if (*dist == (coord)0.0) idxs[0] = (indexer)-1; else*/ er1 = cudaMemcpyAsync(idxs, dev_idxs, sizeof(indexer) * 1, cudaMemcpyDeviceToHost, stream); er1 = cudaStreamSynchronize(stream); #ifdef DEBUG_CUDA printf("Total results from device = %u\n", 1); #endif // freeing and destroying cudaStreamDestroy(stream); er1 = cudaFree(dev_iter_count); er1 = cudaFree(dev_ptr); er1 = cudaFree(dev_ptr2); //er1 = cudaFree(dev_tmp_idxs); //er1 = cudaFree(dev_count_items); er1 = cudaFree(dev_atomic_iter); //er1 = cudaFreeHost(host_idxs); er1 = cudaFree(dev_idxs); #ifdef DEBUG_CUDA er1 = cudaEventDestroy(stop); er1 = cudaEventDestroy(start); #endif return idxs; } /* searching the nearest item on device implementation */ __global__ void cuda_search_nearest_item2_impl2(void **br_ptr, /*indexer *atomic_iter,*/ coord x, coord y, /*out*/ indexer *idxs, /*out*/ coord *dist) { int idxx = threadIdx.x; int idx_gr_br = blockIdx.x; // for store temporary results __shared__ indexer temp_res[65]; // must be as blockDim.x size + 1 (for rpevious result) //__shared__ char temp_res_flag[64]; __shared__ coord curr_dist[65]; //temp_res_flag[idxx] = -1; if (!idxx) { temp_res[64] = (indexer)-1; curr_dist[64] = FLT_MAX; } struct branch** br = (struct branch**)br_ptr; struct branch *curr_br = br[idx_gr_br]; __syncthreads(); __shared__ indexer start_num[1]; if (!idxx) start_num[0] = curr_br->leaf_number[0]; __syncthreads(); if (curr_br->x_min <= dev_bonds->x_max && curr_br->x_max >= dev_bonds->x_min && curr_br->y_min <= dev_bonds->y_max && curr_br->y_max >= dev_bonds->y_min) { int t = (int)ceilf((float)curr_br->count_leafs / (float)blockDim.x); for (int j = 0; j < t; ++j) { curr_dist[idxx] = FLT_MAX; temp_res[idxx] = (indexer)-1; int curr_idx = idxx + j * blockDim.x; // curr_offset; if (/*j == t1 && */curr_idx < curr_br->count_leafs) { // loading frequantly using data __shared__ coord leaf_x[65]; __shared__ coord leaf_y[65]; leaf_x[idxx] = curr_br->leaf_x[curr_idx]; leaf_y[idxx] = curr_br->leaf_y[curr_idx]; if (!idxx && curr_idx + 64 < curr_br->count_leafs && curr_br->merge_next_leaf[curr_idx + 63]) { leaf_x[64] = curr_br->leaf_x[curr_idx + 64]; leaf_y[64] = curr_br->leaf_y[curr_idx + 64]; } // calculating distances if (curr_br->merge_next_leaf[curr_idx]) { curr_dist[idxx] = cuda_distance(x, y, leaf_x[idxx], leaf_y[idxx], leaf_x[idxx + 1], leaf_y[idxx + 1]); } else { indexer curr_num = curr_br->offset[curr_br->leaf_number[curr_idx] - start_num[0]]; __shared__ coord leaf_x_offset[64]; __shared__ coord leaf_y_offset[64]; leaf_x_offset[idxx] = curr_br->leaf_x[curr_num]; leaf_y_offset[idxx] = curr_br->leaf_y[curr_num]; curr_dist[idxx] = cuda_distance(x, y, leaf_x[idxx], leaf_y[idxx], leaf_x_offset[idxx], leaf_y_offset[idxx]); } temp_res[idxx] = curr_br->leaf_number[curr_idx]; __syncthreads(); // find min distance for (int k = blockDim.x / 2; k > 0; k >>= 1) { if (idxx < k) { if (curr_dist[idxx] > curr_dist[idxx + k]) { curr_dist[idxx] = curr_dist[idxx + k]; temp_res[idxx] = temp_res[idxx + k]; } } } // check previous result if (!idxx) { if (curr_dist[64] > curr_dist[0]) { curr_dist[64] = curr_dist[0]; temp_res[64] = temp_res[0]; } } __syncthreads(); } } } __syncthreads(); if (!idxx) { dist[idx_gr_br] = curr_dist[64]; idxs[idx_gr_br] = temp_res[64]; //printf("GGGPU (%i) idx = %u, dist = %e\n", idx_gr_br, temp_res[64], curr_dist[64]); } } /* calculating distance between point and line */ __device__ coord cuda_distance(coord px, coord py, coord line_p0x, coord line_p0y, coord line_p1x, coord line_p1y) { coord vx, vy, wx, wy, c1, c2, b, pbx, pby; vx = line_p1x - line_p0x; vy = line_p1y - line_p0y; wx = px - line_p0x; wy = py - line_p0y; c1 = vx * wx + vy * wy; if (c1 <= 0) { //coord t1 = p->x - line_p0->x; //coord t2 = p->y - line_p0->y; coord t1 = wx; coord t2 = wy; return (coord)sqrt(t1 * t1 + t2 * t2); } c2 = vx * vx + vy * vy; if (c2 <= c1) { //return sqrt(pow(fabs(p->x - line_p1->x), 2) + pow(fabs(p->y - line_p1->y), 2)); coord t1 = px - line_p1x; coord t2 = py - line_p1y; return (coord)sqrt(t1 * t1 + t2 * t2); } b = c1 / c2; pbx = line_p0x + b * vx; pby = line_p0y + b * vy; //return sqrt(pow(fabs(p->x - pbx), 2) + pow(fabs(p->y - pby), 2)); coord t1 = px - pbx; coord t2 = py - pby; return (coord)sqrt(t1 * t1 + t2 * t2); } /* searching the nearest item on device implementation (step 3) */ __global__ void cuda_search_nearest_item2_impl3(/*in*/ indexer *idxs, /*in*/ coord *dist, /*in*/indexer count, /*in*/indexer *atomic_iter, /*out*/ indexer *idxs2, /*out*/ coord *dist2) { indexer idxx = threadIdx.x; //indexer thr_index = idxx + blockIdx.x * blockDim.x; //if (thr_index > count) { // return; //} __shared__ coord d[64]; d[idxx] = FLT_MAX; d[idxx + 32] = FLT_MAX; int block_size = gridDim.x * blockDim.x * 2; int c1 = (int)ceilf((double)count / (double)block_size); for (int i = 0; i < c1; ++i) { // find min distance indexer offset = blockIdx.x * blockDim.x * 2 + i * block_size; indexer curr_index = idxx + offset; //if (curr_index + 32 > count) // break; if (curr_index < count) d[idxx] = dist[curr_index]; if (curr_index + 32 < count) d[idxx + 32] = dist[curr_index + 32]; __syncthreads(); for (int k = blockDim.x /* * 2 / 2 */; k > 0; k >>= 1) { if (idxx < k) { //if (dist[curr_index] > dist[curr_index + k]) { if (d[idxx] > d[idxx + k]) { //dist[curr_index] = dist[curr_index + k]; d[idxx] = d[idxx + k]; idxs[curr_index] = idxs[curr_index + k]; } } else { break; } __syncthreads(); /*if (!idxx) printf("%i, k = %i, idx = %u:\n%e,%e,%e,%e,%e,%e,%e,%e\n%e,%e,%e,%e,%e,%e,%e,%e\n%e,%e,%e,%e,%e,%e,%e,%e\n%e,%e,%e,%e,%e,%e,%e,%e\n%e,%e,%e,%e,%e,%e,%e,%e\n%e,%e,%e,%e,%e,%e,%e,%e\n%e,%e,%e,%e,%e,%e,%e,%e\n%e,%e,%e,%e,%e,%e,%e,%e\n", blockIdx.x, k, curr_index, dist[curr_index], dist[curr_index + 1], dist[curr_index + 2], dist[curr_index + 3], dist[curr_index + 4], dist[curr_index + 5], dist[curr_index + 6], dist[curr_index + 7], dist[curr_index + 8], dist[curr_index + 9], dist[curr_index + 10], dist[curr_index + 11], dist[curr_index + 12], dist[curr_index + 13], dist[curr_index + 14], dist[curr_index + 15], dist[curr_index + 16], dist[curr_index + 17], dist[curr_index + 18], dist[curr_index + 19], dist[curr_index + 20], dist[curr_index + 21], dist[curr_index + 22], dist[curr_index + 23], dist[curr_index + 24], dist[curr_index + 25], dist[curr_index + 26], dist[curr_index + 27], dist[curr_index + 28], dist[curr_index + 29], dist[curr_index + 30], dist[curr_index + 31], dist[curr_index + 32], dist[curr_index + 33], dist[curr_index + 34], dist[curr_index + 35], dist[curr_index + 36], dist[curr_index + 37], dist[curr_index + 38], dist[curr_index + 39], dist[curr_index + 40], dist[curr_index + 41], dist[curr_index + 42], dist[curr_index + 43], dist[curr_index + 44], dist[curr_index + 45], dist[curr_index + 46], dist[curr_index + 47], dist[curr_index + 48], dist[curr_index + 49], dist[curr_index + 50], dist[curr_index + 51], dist[curr_index + 52], dist[curr_index + 53], dist[curr_index + 54], dist[curr_index + 55], dist[curr_index + 56], dist[curr_index + 57], dist[curr_index + 58], dist[curr_index + 59], dist[curr_index + 60], dist[curr_index + 61], dist[curr_index + 62], dist[curr_index + 63]); */ } __syncthreads(); if (!idxx && curr_index < count) { int t = atomicAdd(atomic_iter, 1); idxs2[t] = idxs[offset]; //dist2[t] = dist[offset]; dist2[t] = d[0]; } } }
4cc09639a329baa9955e28b7457e8e0415d41ce0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //xfail:REPAIR_ERROR //--blockDim=2048 --gridDim=64 struct s { char *p; }; __global__ void foo(s q) { __requires_fresh_array(q.p); q.p[0] = threadIdx.x; }
4cc09639a329baa9955e28b7457e8e0415d41ce0.cu
//xfail:REPAIR_ERROR //--blockDim=2048 --gridDim=64 struct s { char *p; }; __global__ void foo(s q) { __requires_fresh_array(q.p); q.p[0] = threadIdx.x; }
461c923af4655dba995d1419b6b52fa964a4d9a0.hip
// !!! This is a file automatically generated by hipify!!! #include "modcusp_library.h" int cusp_biCGSTAB_solver::cusp_biCGSTAB_initDevice(indexType devID) { int deviceCount = 0; hipError_t error_id = hipGetDeviceCount(&deviceCount); if (error_id != hipSuccess) { printf("hipGetDeviceCount returned %d\n-> %s\n", (int)error_id, hipGetErrorString(error_id)); return OPERROR; } if (deviceCount == 0) { printf("There are no available CUDA device(s). Reverting to a CPU Solver\n"); return NODEVICE; } else { printf("Detected %d CUDA Capable device(s)\n", deviceCount); if ( devID >= deviceCount) { printf("Device id=$d not found. Maximum id=%d. Reverting to a CPU solver \n", devID, deviceCount); return NODEVICE; } else { hipSetDevice(devID); hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, devID); printf("\nRunning on device %d: \"%s\"\n", devID, deviceProp.name); char msg[256]; SPRINTF(msg, " Total amount of global memory: %.0f MBytes (%llu bytes)\n", (float)deviceProp.totalGlobalMem/1048576.0f, (unsigned long long) deviceProp.totalGlobalMem); printf("%s", msg); printf(" (%2d) Multiprocessors, (%3d) CUDA Cores/MP: %d CUDA Cores\n", deviceProp.multiProcessorCount, _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor), _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount); } } return OPSUCCESS; } int cusp_biCGSTAB_solver::cusp_biCGSTAB_allocDevice(indexType n, indexType m) { N = n; nnz = m; if(N <= 0 or nnz <= N) { printf("The size of the coeffcient matrix is not set correctly, N=%d, NNZ=%d\n.", n, m); return OPERROR; } if(hipMalloc(&cooRowIndADev, nnz*sizeof(indexType)) != hipSuccess)return OPERROR; if(hipMalloc(&cooColIndADev, nnz*sizeof(indexType)) != hipSuccess)return OPERROR; if(hipMalloc(&cooValADev, nnz*sizeof(valueType)) != hipSuccess)return OPERROR; if(hipMalloc(&xDev, N*sizeof(valueType)) != hipSuccess)return OPERROR; if(hipMalloc(&bDev, N*sizeof(valueType)) != hipSuccess)return OPERROR; return OPSUCCESS; } int cusp_biCGSTAB_solver::cusp_biCGSTAB_copyH2D_AInds(indexType *rows, indexType *cols) { if(hipMemcpy(cooRowIndADev, rows, nnz*sizeof(cooRowIndADev[0]), hipMemcpyHostToDevice) != hipSuccess) return OPERROR; if(hipMemcpy(cooColIndADev, cols, nnz*sizeof(cooColIndADev[0]), hipMemcpyHostToDevice) != hipSuccess) return OPERROR; return OPSUCCESS; } int cusp_biCGSTAB_solver::cusp_biCGSTAB_copyH2D_system(valueType *Avals, valueType *xHost, valueType *bHost) { if(hipMemcpy(bDev, bHost, N*sizeof(bDev[0]),hipMemcpyHostToDevice) != hipSuccess) return OPERROR; if(hipMemcpy(xDev, xHost, N*sizeof(xDev[0]),hipMemcpyHostToDevice) != hipSuccess) return OPERROR; if(hipMemcpy(cooValADev, Avals, nnz*sizeof(cooValADev[0]), hipMemcpyHostToDevice) != hipSuccess) return OPERROR; return OPSUCCESS; } int cusp_biCGSTAB_solver::cusp_biCGSTAB_copyD2H_x(valueType *xHost) { if(hipMemcpy(xHost, xDev, N*sizeof(xHost[0]),hipMemcpyDeviceToHost) != hipSuccess) return OPERROR; return OPSUCCESS; } int cusp_biCGSTAB_solver::cusp_biCGSTAB_solveDev_system(valueType relTol, valueType absTol, indexType maxItr) { // Wrap device pointers thrust::device_ptr<indexType> wrapped_cooRowIndADev(cooRowIndADev); thrust::device_ptr<indexType> wrapped_cooColIndADev(cooColIndADev); thrust::device_ptr<valueType> wrapped_cooValADev(cooValADev); thrust::device_ptr<valueType> wrapped_xDev(xDev); thrust::device_ptr<valueType> wrapped_bDev(bDev); // Wrap in cusp array1d deviceIndexArrayView rowInds (wrapped_cooRowIndADev, wrapped_cooRowIndADev + nnz); deviceIndexArrayView colInds (wrapped_cooColIndADev, wrapped_cooColIndADev + nnz); deviceValueArrayView values (wrapped_cooValADev, wrapped_cooValADev + nnz); deviceValueArrayView x (wrapped_xDev, wrapped_xDev + N); deviceValueArrayView b (wrapped_bDev, wrapped_bDev + N); // Create coo_matrix_view from the 3 array1d views deviceView A(N, N, nnz, rowInds, colInds, values); // Setup a monitor and solve cusp::monitor<valueType> monitor(b, maxItr, relTol, absTol, false); cusp::precond::diagonal<valueType, devMemorySpace> M(A); cusp::krylov::bicgstab(A, x, b, monitor, M); residuals = monitor.residual_norm(); solverItr = monitor.iteration_count(); return OPSUCCESS; } int cusp_biCGSTAB_solver::cusp_biCGSTAB_getMonitor(valueType &res, indexType &nItr) { nItr = solverItr; res = residuals; return OPSUCCESS; } int cusp_biCGSTAB_solver::cusp_biCGSTAB_shutdown() { if(hipFree(cooRowIndADev) != hipSuccess)return OPERROR; if(hipFree(cooColIndADev) != hipSuccess)return OPERROR; if(hipFree(cooValADev) != hipSuccess)return OPERROR; if(hipFree(xDev) != hipSuccess)return OPERROR; if(hipFree(bDev) != hipSuccess)return OPERROR; return OPSUCCESS; } /******************************************************/ //External Interfaces / /******************************************************/ extern "C" void* getInstance_cusp_biCGSTAB_solver() { cusp_biCGSTAB_solver *cusp_biCGSTAB_solver_ = new cusp_biCGSTAB_solver(0,0,0.,0.); return static_cast<void *>(cusp_biCGSTAB_solver_); } extern "C" int cusp_biCGSTAB_initDevice_intrf(void *cusp_biCGSTAB_solver_ptr, indexType devID) { cusp_biCGSTAB_solver *ptr = static_cast<cusp_biCGSTAB_solver*>(cusp_biCGSTAB_solver_ptr); return ptr->cusp_biCGSTAB_initDevice(devID); } extern "C" int cusp_biCGSTAB_allocDevice_intrf(void *cusp_biCGSTAB_solver_ptr, indexType n, indexType m) { cusp_biCGSTAB_solver *ptr = static_cast<cusp_biCGSTAB_solver*>(cusp_biCGSTAB_solver_ptr); return ptr->cusp_biCGSTAB_allocDevice(n,m); } extern "C" int cusp_biCGSTAB_copyH2D_AInds_intrf(void *cusp_biCGSTAB_solver_ptr, indexType *rows, indexType *cols) { cusp_biCGSTAB_solver *ptr = static_cast<cusp_biCGSTAB_solver*>(cusp_biCGSTAB_solver_ptr); return ptr->cusp_biCGSTAB_copyH2D_AInds(rows,cols); } extern "C" int cusp_biCGSTAB_copyH2D_system_intrf(void *cusp_biCGSTAB_solver_ptr,valueType *Avals, valueType *xHost, valueType *bHost) { cusp_biCGSTAB_solver *ptr = static_cast<cusp_biCGSTAB_solver*>(cusp_biCGSTAB_solver_ptr); return ptr->cusp_biCGSTAB_copyH2D_system(Avals, xHost, bHost); } extern "C" int cusp_biCGSTAB_copyD2H_x_intrf(void *cusp_biCGSTAB_solver_ptr,valueType *xHost) { cusp_biCGSTAB_solver *ptr = static_cast<cusp_biCGSTAB_solver*>(cusp_biCGSTAB_solver_ptr); return ptr->cusp_biCGSTAB_copyD2H_x(xHost); } extern "C" int cusp_biCGSTAB_solveDev_system_intrf(void *cusp_biCGSTAB_solver_ptr, valueType relTol, valueType absTol, indexType maxItr) { cusp_biCGSTAB_solver *ptr = static_cast<cusp_biCGSTAB_solver*>(cusp_biCGSTAB_solver_ptr); return ptr->cusp_biCGSTAB_solveDev_system(relTol, absTol, maxItr); } extern "C" int cusp_biCGSTAB_getMonitor_intrf(void *cusp_biCGSTAB_solver_ptr, valueType &residual, indexType &nItr) { cusp_biCGSTAB_solver *ptr = static_cast<cusp_biCGSTAB_solver*>(cusp_biCGSTAB_solver_ptr); return ptr->cusp_biCGSTAB_getMonitor(residual, nItr); } extern "C" int cusp_biCGSTAB_shutdown_intrf(void *cusp_biCGSTAB_solver_ptr) { cusp_biCGSTAB_solver *ptr = static_cast<cusp_biCGSTAB_solver*>(cusp_biCGSTAB_solver_ptr); return ptr->cusp_biCGSTAB_shutdown(); }
461c923af4655dba995d1419b6b52fa964a4d9a0.cu
#include "modcusp_library.h" int cusp_biCGSTAB_solver::cusp_biCGSTAB_initDevice(indexType devID) { int deviceCount = 0; cudaError_t error_id = cudaGetDeviceCount(&deviceCount); if (error_id != cudaSuccess) { printf("cudaGetDeviceCount returned %d\n-> %s\n", (int)error_id, cudaGetErrorString(error_id)); return OPERROR; } if (deviceCount == 0) { printf("There are no available CUDA device(s). Reverting to a CPU Solver\n"); return NODEVICE; } else { printf("Detected %d CUDA Capable device(s)\n", deviceCount); if ( devID >= deviceCount) { printf("Device id=$d not found. Maximum id=%d. Reverting to a CPU solver \n", devID, deviceCount); return NODEVICE; } else { cudaSetDevice(devID); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, devID); printf("\nRunning on device %d: \"%s\"\n", devID, deviceProp.name); char msg[256]; SPRINTF(msg, " Total amount of global memory: %.0f MBytes (%llu bytes)\n", (float)deviceProp.totalGlobalMem/1048576.0f, (unsigned long long) deviceProp.totalGlobalMem); printf("%s", msg); printf(" (%2d) Multiprocessors, (%3d) CUDA Cores/MP: %d CUDA Cores\n", deviceProp.multiProcessorCount, _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor), _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount); } } return OPSUCCESS; } int cusp_biCGSTAB_solver::cusp_biCGSTAB_allocDevice(indexType n, indexType m) { N = n; nnz = m; if(N <= 0 or nnz <= N) { printf("The size of the coeffcient matrix is not set correctly, N=%d, NNZ=%d\n.", n, m); return OPERROR; } if(cudaMalloc(&cooRowIndADev, nnz*sizeof(indexType)) != cudaSuccess)return OPERROR; if(cudaMalloc(&cooColIndADev, nnz*sizeof(indexType)) != cudaSuccess)return OPERROR; if(cudaMalloc(&cooValADev, nnz*sizeof(valueType)) != cudaSuccess)return OPERROR; if(cudaMalloc(&xDev, N*sizeof(valueType)) != cudaSuccess)return OPERROR; if(cudaMalloc(&bDev, N*sizeof(valueType)) != cudaSuccess)return OPERROR; return OPSUCCESS; } int cusp_biCGSTAB_solver::cusp_biCGSTAB_copyH2D_AInds(indexType *rows, indexType *cols) { if(cudaMemcpy(cooRowIndADev, rows, nnz*sizeof(cooRowIndADev[0]), cudaMemcpyHostToDevice) != cudaSuccess) return OPERROR; if(cudaMemcpy(cooColIndADev, cols, nnz*sizeof(cooColIndADev[0]), cudaMemcpyHostToDevice) != cudaSuccess) return OPERROR; return OPSUCCESS; } int cusp_biCGSTAB_solver::cusp_biCGSTAB_copyH2D_system(valueType *Avals, valueType *xHost, valueType *bHost) { if(cudaMemcpy(bDev, bHost, N*sizeof(bDev[0]),cudaMemcpyHostToDevice) != cudaSuccess) return OPERROR; if(cudaMemcpy(xDev, xHost, N*sizeof(xDev[0]),cudaMemcpyHostToDevice) != cudaSuccess) return OPERROR; if(cudaMemcpy(cooValADev, Avals, nnz*sizeof(cooValADev[0]), cudaMemcpyHostToDevice) != cudaSuccess) return OPERROR; return OPSUCCESS; } int cusp_biCGSTAB_solver::cusp_biCGSTAB_copyD2H_x(valueType *xHost) { if(cudaMemcpy(xHost, xDev, N*sizeof(xHost[0]),cudaMemcpyDeviceToHost) != cudaSuccess) return OPERROR; return OPSUCCESS; } int cusp_biCGSTAB_solver::cusp_biCGSTAB_solveDev_system(valueType relTol, valueType absTol, indexType maxItr) { // Wrap device pointers thrust::device_ptr<indexType> wrapped_cooRowIndADev(cooRowIndADev); thrust::device_ptr<indexType> wrapped_cooColIndADev(cooColIndADev); thrust::device_ptr<valueType> wrapped_cooValADev(cooValADev); thrust::device_ptr<valueType> wrapped_xDev(xDev); thrust::device_ptr<valueType> wrapped_bDev(bDev); // Wrap in cusp array1d deviceIndexArrayView rowInds (wrapped_cooRowIndADev, wrapped_cooRowIndADev + nnz); deviceIndexArrayView colInds (wrapped_cooColIndADev, wrapped_cooColIndADev + nnz); deviceValueArrayView values (wrapped_cooValADev, wrapped_cooValADev + nnz); deviceValueArrayView x (wrapped_xDev, wrapped_xDev + N); deviceValueArrayView b (wrapped_bDev, wrapped_bDev + N); // Create coo_matrix_view from the 3 array1d views deviceView A(N, N, nnz, rowInds, colInds, values); // Setup a monitor and solve cusp::monitor<valueType> monitor(b, maxItr, relTol, absTol, false); cusp::precond::diagonal<valueType, devMemorySpace> M(A); cusp::krylov::bicgstab(A, x, b, monitor, M); residuals = monitor.residual_norm(); solverItr = monitor.iteration_count(); return OPSUCCESS; } int cusp_biCGSTAB_solver::cusp_biCGSTAB_getMonitor(valueType &res, indexType &nItr) { nItr = solverItr; res = residuals; return OPSUCCESS; } int cusp_biCGSTAB_solver::cusp_biCGSTAB_shutdown() { if(cudaFree(cooRowIndADev) != cudaSuccess)return OPERROR; if(cudaFree(cooColIndADev) != cudaSuccess)return OPERROR; if(cudaFree(cooValADev) != cudaSuccess)return OPERROR; if(cudaFree(xDev) != cudaSuccess)return OPERROR; if(cudaFree(bDev) != cudaSuccess)return OPERROR; return OPSUCCESS; } /******************************************************/ //External Interfaces / /******************************************************/ extern "C" void* getInstance_cusp_biCGSTAB_solver() { cusp_biCGSTAB_solver *cusp_biCGSTAB_solver_ = new cusp_biCGSTAB_solver(0,0,0.,0.); return static_cast<void *>(cusp_biCGSTAB_solver_); } extern "C" int cusp_biCGSTAB_initDevice_intrf(void *cusp_biCGSTAB_solver_ptr, indexType devID) { cusp_biCGSTAB_solver *ptr = static_cast<cusp_biCGSTAB_solver*>(cusp_biCGSTAB_solver_ptr); return ptr->cusp_biCGSTAB_initDevice(devID); } extern "C" int cusp_biCGSTAB_allocDevice_intrf(void *cusp_biCGSTAB_solver_ptr, indexType n, indexType m) { cusp_biCGSTAB_solver *ptr = static_cast<cusp_biCGSTAB_solver*>(cusp_biCGSTAB_solver_ptr); return ptr->cusp_biCGSTAB_allocDevice(n,m); } extern "C" int cusp_biCGSTAB_copyH2D_AInds_intrf(void *cusp_biCGSTAB_solver_ptr, indexType *rows, indexType *cols) { cusp_biCGSTAB_solver *ptr = static_cast<cusp_biCGSTAB_solver*>(cusp_biCGSTAB_solver_ptr); return ptr->cusp_biCGSTAB_copyH2D_AInds(rows,cols); } extern "C" int cusp_biCGSTAB_copyH2D_system_intrf(void *cusp_biCGSTAB_solver_ptr,valueType *Avals, valueType *xHost, valueType *bHost) { cusp_biCGSTAB_solver *ptr = static_cast<cusp_biCGSTAB_solver*>(cusp_biCGSTAB_solver_ptr); return ptr->cusp_biCGSTAB_copyH2D_system(Avals, xHost, bHost); } extern "C" int cusp_biCGSTAB_copyD2H_x_intrf(void *cusp_biCGSTAB_solver_ptr,valueType *xHost) { cusp_biCGSTAB_solver *ptr = static_cast<cusp_biCGSTAB_solver*>(cusp_biCGSTAB_solver_ptr); return ptr->cusp_biCGSTAB_copyD2H_x(xHost); } extern "C" int cusp_biCGSTAB_solveDev_system_intrf(void *cusp_biCGSTAB_solver_ptr, valueType relTol, valueType absTol, indexType maxItr) { cusp_biCGSTAB_solver *ptr = static_cast<cusp_biCGSTAB_solver*>(cusp_biCGSTAB_solver_ptr); return ptr->cusp_biCGSTAB_solveDev_system(relTol, absTol, maxItr); } extern "C" int cusp_biCGSTAB_getMonitor_intrf(void *cusp_biCGSTAB_solver_ptr, valueType &residual, indexType &nItr) { cusp_biCGSTAB_solver *ptr = static_cast<cusp_biCGSTAB_solver*>(cusp_biCGSTAB_solver_ptr); return ptr->cusp_biCGSTAB_getMonitor(residual, nItr); } extern "C" int cusp_biCGSTAB_shutdown_intrf(void *cusp_biCGSTAB_solver_ptr) { cusp_biCGSTAB_solver *ptr = static_cast<cusp_biCGSTAB_solver*>(cusp_biCGSTAB_solver_ptr); return ptr->cusp_biCGSTAB_shutdown(); }
219ef7118e6c7c8ea2bf97e1b8fd3ac88f98a1e7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/roi_align_op.h" #include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; using LoDTensor = framework::LoDTensor; static constexpr int kNumCUDAThreads = 512; static constexpr int kNumMaxinumNumBlocks = 4096; static inline int NumBlocks(const int N) { return ::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads, kNumMaxinumNumBlocks); } #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ i += blockDim.x * gridDim.x) template <class T> __device__ T BilinearInterpolate(const T* input_data, const int height, const int width, T y, T x) { if (y < -1.0 || y > height || x < -1.0 || x > width) { return 0; } y = y <= 0 ? 0 : y; x = x <= 0 ? 0 : x; int y_low = static_cast<int>(y); int x_low = static_cast<int>(x); int y_high; int x_high; if (y_low >= height - 1) { y_high = y_low = height - 1; y = static_cast<T>(y_low); } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = static_cast<T>(x_low); } else { x_high = x_low + 1; } T ly = y - y_low, lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; T v1 = input_data[y_low * width + x_low]; T v2 = input_data[y_low * width + x_high]; T v3 = input_data[y_high * width + x_low]; T v4 = input_data[y_high * width + x_high]; T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <class T> __device__ void BilinearInterpolateGradient(const int height, const int width, T y, T x, T* w1, T* w2, T* w3, T* w4, int* x_low, int* x_high, int* y_low, int* y_high) { if (y < -1.0 || y > height || x < -1.0 || x > width) { return; } y = y <= 0 ? 0 : y; x = x <= 0 ? 0 : x; *y_low = static_cast<int>(y); *x_low = static_cast<int>(x); if (*y_low >= height - 1) { *y_high = *y_low = height - 1; y = static_cast<T>(*y_low); } else { *y_high = *y_low + 1; } if (*x_low >= width - 1) { *x_high = *x_low = width - 1; x = static_cast<T>(*x_low); } else { *x_high = *x_low + 1; } T ly = y - *y_low, lx = x - *x_low; T hy = 1. - ly, hx = 1. - lx; *w1 = hy * hx, *w2 = hy * lx, *w3 = ly * hx, *w4 = ly * lx; return; } template <class T> __global__ void GPUROIAlignForward( const int nthreads, const T* input_data, const T* input_rois, const float spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, int* roi_batch_id_data, T* output_data) { CUDA_1D_KERNEL_LOOP(i, nthreads) { int pw = i % pooled_width; int ph = (i / pooled_width) % pooled_height; int c = (i / pooled_width / pooled_height) % channels; int n = i / pooled_width / pooled_height / channels; const T* offset_input_rois = input_rois + n * kROISize; int roi_batch_ind = roi_batch_id_data[n]; T roi_xmin = offset_input_rois[0] * spatial_scale; T roi_ymin = offset_input_rois[1] * spatial_scale; T roi_xmax = offset_input_rois[2] * spatial_scale; T roi_ymax = offset_input_rois[3] * spatial_scale; T roi_width = max(roi_xmax - roi_xmin, static_cast<T>(1.)); T roi_height = max(roi_ymax - roi_ymin, static_cast<T>(1.)); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); const T* offset_input_data = input_data + (roi_batch_ind * channels + c) * height * width; int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); const T count = roi_bin_grid_h * roi_bin_grid_w; T output_val = 0; for (int iy = 0; iy < roi_bin_grid_h; iy++) { const T y = roi_ymin + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = roi_xmin + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T val = BilinearInterpolate(offset_input_data, height, width, y, x); output_val += val; } } output_val /= count; output_data[i] = output_val; } } template <typename T> __global__ void GPUROIAlignBackward(const int nthreads, const T* input_rois, const T* out_grad, const int num_rois, const float spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, int* roi_batch_id_data, T* input_grad) { CUDA_1D_KERNEL_LOOP(i, nthreads) { int pw = i % pooled_width; int ph = (i / pooled_width) % pooled_height; int c = (i / pooled_width / pooled_height) % channels; int n = i / pooled_width / pooled_height / channels; const T* offset_input_rois = input_rois + n * kROISize; int roi_batch_ind = roi_batch_id_data[n]; T roi_xmin = offset_input_rois[0] * spatial_scale; T roi_ymin = offset_input_rois[1] * spatial_scale; T roi_xmax = offset_input_rois[2] * spatial_scale; T roi_ymax = offset_input_rois[3] * spatial_scale; T roi_width = max(roi_xmax - roi_xmin, static_cast<T>(1.)); T roi_height = max(roi_ymax - roi_ymin, static_cast<T>(1.)); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); T* offset_input_grad = input_grad + (roi_batch_ind * channels + c) * height * width; const T* offset_out_grad = out_grad + (n * channels + c) * pooled_height * pooled_width; const T out_grad_this_bin = offset_out_grad[ph * pooled_width + pw]; int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); const T count = roi_bin_grid_h * roi_bin_grid_w; for (int iy = 0; iy < roi_bin_grid_h; iy++) { const T y = roi_ymin + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = roi_xmin + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T w1 = 0, w2 = 0, w3 = 0, w4 = 0; int x_low = -1, x_high = -1, y_low = -1, y_high = -1; BilinearInterpolateGradient(height, width, y, x, &w1, &w2, &w3, &w4, &x_low, &x_high, &y_low, &y_high); T diff1 = out_grad_this_bin * w1 / count; T diff2 = out_grad_this_bin * w2 / count; T diff3 = out_grad_this_bin * w3 / count; T diff4 = out_grad_this_bin * w4 / count; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { platform::CudaAtomicAdd(offset_input_grad + y_low * width + x_low, diff1); platform::CudaAtomicAdd(offset_input_grad + y_low * width + x_high, diff2); platform::CudaAtomicAdd(offset_input_grad + y_high * width + x_low, diff3); platform::CudaAtomicAdd(offset_input_grad + y_high * width + x_high, diff4); } } } } } template <typename Place, typename T> class GPUROIAlignOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* in = ctx.Input<Tensor>("X"); auto* rois = ctx.Input<LoDTensor>("ROIs"); auto* out = ctx.Output<Tensor>("Out"); auto pooled_height = ctx.Attr<int>("pooled_height"); auto pooled_width = ctx.Attr<int>("pooled_width"); auto spatial_scale = ctx.Attr<float>("spatial_scale"); auto sampling_ratio = ctx.Attr<int>("sampling_ratio"); auto in_dims = in->dims(); int batch_size = in_dims[0]; int channels = in_dims[1]; int height = in_dims[2]; int width = in_dims[3]; int rois_num = rois->dims()[0]; if (rois_num == 0) return; int output_size = out->numel(); int blocks = NumBlocks(output_size); int threads = kNumCUDAThreads; Tensor roi_batch_id_list; roi_batch_id_list.Resize({rois_num}); int* roi_batch_id_data = roi_batch_id_list.mutable_data<int>(platform::CPUPlace()); auto rois_lod = rois->lod().back(); int rois_batch_size = rois_lod.size() - 1; PADDLE_ENFORCE_EQ( rois_batch_size, batch_size, "The rois_batch_size and imgs batch_size must be the same."); int rois_num_with_lod = rois_lod[rois_batch_size]; PADDLE_ENFORCE_EQ(rois_num, rois_num_with_lod, "The rois_num from input and lod must be the same."); for (int n = 0; n < rois_batch_size; ++n) { for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) { roi_batch_id_data[i] = n; } } Tensor roi_batch_id_list_gpu; framework::TensorCopySync(roi_batch_id_list, ctx.GetPlace(), &roi_batch_id_list_gpu); hipLaunchKernelGGL(( GPUROIAlignForward< T>), dim3(blocks), dim3(threads), 0, ctx.cuda_device_context().stream(), output_size, in->data<T>(), rois->data<T>(), spatial_scale, channels, height, width, pooled_height, pooled_width, sampling_ratio, roi_batch_id_list_gpu.data<int>(), out->mutable_data<T>(ctx.GetPlace())); } }; template <typename Place, typename T> class GPUROIAlignGradOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* in = ctx.Input<Tensor>("X"); auto* rois = ctx.Input<LoDTensor>("ROIs"); auto* out_grad = ctx.Input<Tensor>(framework::GradVarName("Out")); auto* in_grad = ctx.Output<Tensor>(framework::GradVarName("X")); auto pooled_height = ctx.Attr<int>("pooled_height"); auto pooled_width = ctx.Attr<int>("pooled_width"); auto spatial_scale = ctx.Attr<float>("spatial_scale"); auto sampling_ratio = ctx.Attr<int>("sampling_ratio"); int rois_num = rois->dims()[0]; int channels = in->dims()[1]; int height = in->dims()[2]; int width = in->dims()[3]; if (!in_grad) { return; } Tensor roi_batch_id_list; roi_batch_id_list.Resize({rois_num}); int* roi_batch_id_data = roi_batch_id_list.mutable_data<int>(platform::CPUPlace()); auto rois_lod = rois->lod().back(); int rois_batch_size = rois_lod.size() - 1; for (int n = 0; n < rois_batch_size; ++n) { for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) { roi_batch_id_data[i] = n; } } Tensor roi_batch_id_list_gpu; framework::TensorCopySync(roi_batch_id_list, ctx.GetPlace(), &roi_batch_id_list_gpu); in_grad->mutable_data<T>(ctx.GetPlace()); math::SetConstant<Place, T> set_zero; set_zero(ctx.cuda_device_context(), in_grad, static_cast<T>(0)); int output_grad_size = out_grad->numel(); int blocks = NumBlocks(output_grad_size); int threads = kNumCUDAThreads; if (output_grad_size > 0) { hipLaunchKernelGGL(( GPUROIAlignBackward< T>), dim3(blocks), dim3(threads), 0, ctx.cuda_device_context().stream(), output_grad_size, rois->data<T>(), out_grad->data<T>(), rois_num, spatial_scale, channels, height, width, pooled_height, pooled_width, sampling_ratio, roi_batch_id_list_gpu.data<int>(), in_grad->mutable_data<T>(ctx.GetPlace())); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( roi_align, ops::GPUROIAlignOpKernel<paddle::platform::CUDADeviceContext, float>, ops::GPUROIAlignOpKernel<paddle::platform::CUDADeviceContext, double>); REGISTER_OP_CUDA_KERNEL( roi_align_grad, ops::GPUROIAlignGradOpKernel<paddle::platform::CUDADeviceContext, float>, ops::GPUROIAlignGradOpKernel<paddle::platform::CUDADeviceContext, double>);
219ef7118e6c7c8ea2bf97e1b8fd3ac88f98a1e7.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/roi_align_op.h" #include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; using LoDTensor = framework::LoDTensor; static constexpr int kNumCUDAThreads = 512; static constexpr int kNumMaxinumNumBlocks = 4096; static inline int NumBlocks(const int N) { return std::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads, kNumMaxinumNumBlocks); } #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ i += blockDim.x * gridDim.x) template <class T> __device__ T BilinearInterpolate(const T* input_data, const int height, const int width, T y, T x) { if (y < -1.0 || y > height || x < -1.0 || x > width) { return 0; } y = y <= 0 ? 0 : y; x = x <= 0 ? 0 : x; int y_low = static_cast<int>(y); int x_low = static_cast<int>(x); int y_high; int x_high; if (y_low >= height - 1) { y_high = y_low = height - 1; y = static_cast<T>(y_low); } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = static_cast<T>(x_low); } else { x_high = x_low + 1; } T ly = y - y_low, lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; T v1 = input_data[y_low * width + x_low]; T v2 = input_data[y_low * width + x_high]; T v3 = input_data[y_high * width + x_low]; T v4 = input_data[y_high * width + x_high]; T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <class T> __device__ void BilinearInterpolateGradient(const int height, const int width, T y, T x, T* w1, T* w2, T* w3, T* w4, int* x_low, int* x_high, int* y_low, int* y_high) { if (y < -1.0 || y > height || x < -1.0 || x > width) { return; } y = y <= 0 ? 0 : y; x = x <= 0 ? 0 : x; *y_low = static_cast<int>(y); *x_low = static_cast<int>(x); if (*y_low >= height - 1) { *y_high = *y_low = height - 1; y = static_cast<T>(*y_low); } else { *y_high = *y_low + 1; } if (*x_low >= width - 1) { *x_high = *x_low = width - 1; x = static_cast<T>(*x_low); } else { *x_high = *x_low + 1; } T ly = y - *y_low, lx = x - *x_low; T hy = 1. - ly, hx = 1. - lx; *w1 = hy * hx, *w2 = hy * lx, *w3 = ly * hx, *w4 = ly * lx; return; } template <class T> __global__ void GPUROIAlignForward( const int nthreads, const T* input_data, const T* input_rois, const float spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, int* roi_batch_id_data, T* output_data) { CUDA_1D_KERNEL_LOOP(i, nthreads) { int pw = i % pooled_width; int ph = (i / pooled_width) % pooled_height; int c = (i / pooled_width / pooled_height) % channels; int n = i / pooled_width / pooled_height / channels; const T* offset_input_rois = input_rois + n * kROISize; int roi_batch_ind = roi_batch_id_data[n]; T roi_xmin = offset_input_rois[0] * spatial_scale; T roi_ymin = offset_input_rois[1] * spatial_scale; T roi_xmax = offset_input_rois[2] * spatial_scale; T roi_ymax = offset_input_rois[3] * spatial_scale; T roi_width = max(roi_xmax - roi_xmin, static_cast<T>(1.)); T roi_height = max(roi_ymax - roi_ymin, static_cast<T>(1.)); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); const T* offset_input_data = input_data + (roi_batch_ind * channels + c) * height * width; int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); const T count = roi_bin_grid_h * roi_bin_grid_w; T output_val = 0; for (int iy = 0; iy < roi_bin_grid_h; iy++) { const T y = roi_ymin + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = roi_xmin + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T val = BilinearInterpolate(offset_input_data, height, width, y, x); output_val += val; } } output_val /= count; output_data[i] = output_val; } } template <typename T> __global__ void GPUROIAlignBackward(const int nthreads, const T* input_rois, const T* out_grad, const int num_rois, const float spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, int* roi_batch_id_data, T* input_grad) { CUDA_1D_KERNEL_LOOP(i, nthreads) { int pw = i % pooled_width; int ph = (i / pooled_width) % pooled_height; int c = (i / pooled_width / pooled_height) % channels; int n = i / pooled_width / pooled_height / channels; const T* offset_input_rois = input_rois + n * kROISize; int roi_batch_ind = roi_batch_id_data[n]; T roi_xmin = offset_input_rois[0] * spatial_scale; T roi_ymin = offset_input_rois[1] * spatial_scale; T roi_xmax = offset_input_rois[2] * spatial_scale; T roi_ymax = offset_input_rois[3] * spatial_scale; T roi_width = max(roi_xmax - roi_xmin, static_cast<T>(1.)); T roi_height = max(roi_ymax - roi_ymin, static_cast<T>(1.)); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); T* offset_input_grad = input_grad + (roi_batch_ind * channels + c) * height * width; const T* offset_out_grad = out_grad + (n * channels + c) * pooled_height * pooled_width; const T out_grad_this_bin = offset_out_grad[ph * pooled_width + pw]; int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); const T count = roi_bin_grid_h * roi_bin_grid_w; for (int iy = 0; iy < roi_bin_grid_h; iy++) { const T y = roi_ymin + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = roi_xmin + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T w1 = 0, w2 = 0, w3 = 0, w4 = 0; int x_low = -1, x_high = -1, y_low = -1, y_high = -1; BilinearInterpolateGradient(height, width, y, x, &w1, &w2, &w3, &w4, &x_low, &x_high, &y_low, &y_high); T diff1 = out_grad_this_bin * w1 / count; T diff2 = out_grad_this_bin * w2 / count; T diff3 = out_grad_this_bin * w3 / count; T diff4 = out_grad_this_bin * w4 / count; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { platform::CudaAtomicAdd(offset_input_grad + y_low * width + x_low, diff1); platform::CudaAtomicAdd(offset_input_grad + y_low * width + x_high, diff2); platform::CudaAtomicAdd(offset_input_grad + y_high * width + x_low, diff3); platform::CudaAtomicAdd(offset_input_grad + y_high * width + x_high, diff4); } } } } } template <typename Place, typename T> class GPUROIAlignOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* in = ctx.Input<Tensor>("X"); auto* rois = ctx.Input<LoDTensor>("ROIs"); auto* out = ctx.Output<Tensor>("Out"); auto pooled_height = ctx.Attr<int>("pooled_height"); auto pooled_width = ctx.Attr<int>("pooled_width"); auto spatial_scale = ctx.Attr<float>("spatial_scale"); auto sampling_ratio = ctx.Attr<int>("sampling_ratio"); auto in_dims = in->dims(); int batch_size = in_dims[0]; int channels = in_dims[1]; int height = in_dims[2]; int width = in_dims[3]; int rois_num = rois->dims()[0]; if (rois_num == 0) return; int output_size = out->numel(); int blocks = NumBlocks(output_size); int threads = kNumCUDAThreads; Tensor roi_batch_id_list; roi_batch_id_list.Resize({rois_num}); int* roi_batch_id_data = roi_batch_id_list.mutable_data<int>(platform::CPUPlace()); auto rois_lod = rois->lod().back(); int rois_batch_size = rois_lod.size() - 1; PADDLE_ENFORCE_EQ( rois_batch_size, batch_size, "The rois_batch_size and imgs batch_size must be the same."); int rois_num_with_lod = rois_lod[rois_batch_size]; PADDLE_ENFORCE_EQ(rois_num, rois_num_with_lod, "The rois_num from input and lod must be the same."); for (int n = 0; n < rois_batch_size; ++n) { for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) { roi_batch_id_data[i] = n; } } Tensor roi_batch_id_list_gpu; framework::TensorCopySync(roi_batch_id_list, ctx.GetPlace(), &roi_batch_id_list_gpu); GPUROIAlignForward< T><<<blocks, threads, 0, ctx.cuda_device_context().stream()>>>( output_size, in->data<T>(), rois->data<T>(), spatial_scale, channels, height, width, pooled_height, pooled_width, sampling_ratio, roi_batch_id_list_gpu.data<int>(), out->mutable_data<T>(ctx.GetPlace())); } }; template <typename Place, typename T> class GPUROIAlignGradOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* in = ctx.Input<Tensor>("X"); auto* rois = ctx.Input<LoDTensor>("ROIs"); auto* out_grad = ctx.Input<Tensor>(framework::GradVarName("Out")); auto* in_grad = ctx.Output<Tensor>(framework::GradVarName("X")); auto pooled_height = ctx.Attr<int>("pooled_height"); auto pooled_width = ctx.Attr<int>("pooled_width"); auto spatial_scale = ctx.Attr<float>("spatial_scale"); auto sampling_ratio = ctx.Attr<int>("sampling_ratio"); int rois_num = rois->dims()[0]; int channels = in->dims()[1]; int height = in->dims()[2]; int width = in->dims()[3]; if (!in_grad) { return; } Tensor roi_batch_id_list; roi_batch_id_list.Resize({rois_num}); int* roi_batch_id_data = roi_batch_id_list.mutable_data<int>(platform::CPUPlace()); auto rois_lod = rois->lod().back(); int rois_batch_size = rois_lod.size() - 1; for (int n = 0; n < rois_batch_size; ++n) { for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) { roi_batch_id_data[i] = n; } } Tensor roi_batch_id_list_gpu; framework::TensorCopySync(roi_batch_id_list, ctx.GetPlace(), &roi_batch_id_list_gpu); in_grad->mutable_data<T>(ctx.GetPlace()); math::SetConstant<Place, T> set_zero; set_zero(ctx.cuda_device_context(), in_grad, static_cast<T>(0)); int output_grad_size = out_grad->numel(); int blocks = NumBlocks(output_grad_size); int threads = kNumCUDAThreads; if (output_grad_size > 0) { GPUROIAlignBackward< T><<<blocks, threads, 0, ctx.cuda_device_context().stream()>>>( output_grad_size, rois->data<T>(), out_grad->data<T>(), rois_num, spatial_scale, channels, height, width, pooled_height, pooled_width, sampling_ratio, roi_batch_id_list_gpu.data<int>(), in_grad->mutable_data<T>(ctx.GetPlace())); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( roi_align, ops::GPUROIAlignOpKernel<paddle::platform::CUDADeviceContext, float>, ops::GPUROIAlignOpKernel<paddle::platform::CUDADeviceContext, double>); REGISTER_OP_CUDA_KERNEL( roi_align_grad, ops::GPUROIAlignGradOpKernel<paddle::platform::CUDADeviceContext, float>, ops::GPUROIAlignGradOpKernel<paddle::platform::CUDADeviceContext, double>);
014bf2351e3246f09b94ffb065f37c3cb1289a53.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <string> #include <hip/hip_runtime.h> #include <gdal_priv.h> #include <cpl_conv.h> #include "GDALRead.h" #include "GDALWrite.h" #include "basestruct.h" #include "operator.h" #include "timer.h" #include "utils.h" #include <fstream> #include <iostream> #include <map> #include <set> #include <vector> using namespace std; double LineCCLNoSplit(int *allData, int width, int height, dim3 blockSize, dim3 gridSize, int * labelMap, int *pixNum, int *perimeter, int nodata); // double LineSplitCCL2(int *allData, int width, int data_height, int task_height, dim3 blockSize, dim3 gridSize, int *h_labelMap, int *pixNum, int *perimeter, int nodata, int labelStart, int data_start, int data_end, int task_start, int task_end); // double LineSplitCCL(int *allData, int width, int data_height, int task_height, dim3 blockSize, dim3 gridSize, int *h_labelMap, int *pixNum, int *perimeter, int nodata, int labelStart, int data_start, int data_end, int task_start, int task_end); double LineSplitCCL(int** h_subDataNextBlock, dataBlock &dataBlockNext, CGDALRead* pread, int *allData, int width, int data_height, int task_height, dim3 blockSize, dim3 gridSize, int *h_labelMap, int *pixNum, int *perimeter, int nodata, int labelStart, int data_start, int data_end, int task_start, int task_end); double LineSplitCCL2(int** h_subDataNextBlock, dataBlock* dataBlockArray, int iBlock, CGDALRead* pread, int *allData, int width, int data_height, int task_height, dim3 blockSize, dim3 gridSize, int *h_labelMap, int *pixNum, int *perimeter, int nodata, int labelStart, int data_start, int data_end, int task_start, int task_end); void loadBlockData(int width, int data_height, int data_start, int** h_subData, CGDALRead* pread) { size_t nBytes_data = data_height * width * sizeof(int); checkCudaErrors(hipHostMalloc((void **)h_subData, nBytes_data, hipHostMallocDefault)); memset(*h_subData, 0, data_height * width); switch (pread->datatype()) { case GDT_Byte: { pread->readDataBlock<unsigned char>(width,data_height,0,data_start, *h_subData); break; } case GDT_UInt16: { pread->readDataBlock<unsigned short>(width,data_height,0,data_start,*h_subData); break; } case GDT_Int16: { pread->readDataBlock<short>(width,data_height,0,data_start,*h_subData); break; } case GDT_UInt32: { pread->readDataBlock<unsigned int>(width,data_height,0,data_start,*h_subData); break; } case GDT_Int32: { pread->readDataBlock<int>(width,data_height,0,data_start,*h_subData); break; } case GDT_Float32: { float* allData = pread->transforData<float>(); break; } case GDT_Float64: { double* allData = pread->transforData<double>(); break; } default: { cout << "transfor data type false!" << endl; } } } int getDevideInfo(int width, int height, dataBlock** dataBlockArray) { int maxnum; // size_t freeGPU, totalGPU; hipMemGetInfo(&freeGPU, &totalGPU);//size_t* free, size_t* total cout << "(free,total)" << freeGPU << "," << totalGPU << endl; maxnum = (freeGPU) / (sizeof(int)* 6);//pixel6int int sub_height = maxnum / width - 5; //sub_height int blockNum = height / sub_height + 1; // *dataBlockArray = new dataBlock[blockNum]; int subIdx = 0; for (int height_all = 0; height_all < height; height_all += sub_height) { int task_start = subIdx*sub_height; int task_end; if ((subIdx + 1)*sub_height - height <= 0) task_end = (subIdx + 1)*sub_height - 1; else task_end = height - 1; int data_start, data_end; if (task_start - 1 <= 0) data_start = 0; else data_start = task_start - 1; if (task_end + 1 >= height - 1) data_end = height - 1; else data_end = task_end + 1; int data_height = data_end - data_start + 1; int task_height = task_end - task_start + 1; (*dataBlockArray)[subIdx].dataStart = data_start; (*dataBlockArray)[subIdx].dataEnd = data_end; (*dataBlockArray)[subIdx].taskStart = task_start; (*dataBlockArray)[subIdx].taskEnd = task_end; (*dataBlockArray)[subIdx].subTaskHeight = task_height; (*dataBlockArray)[subIdx].subDataHeight = data_height; subIdx++; } return blockNum; } //GPU void exeGPUAndMemcpy( CGDALRead* pread, std::map<int, Patch> &mapPatch, UF &Quf, bool &split) { int devicesCount; hipGetDeviceCount(&devicesCount); hipSetDevice(0); int width = pread->cols(); int height = pread->rows(); dataBlock* dataBlockArray = NULL; //blockInfo int* mergeStructArray = (int* )malloc(sizeof(int) * width * 4 * width); //mergeInfo int blockNum; blockNum = getDevideInfo(width, height, &dataBlockArray);//get information of all the blocks,allocate memory for mergeArray int iBlock; int2 blockSize; blockSize.x = 32; blockSize.y = 16; dim3 blockDim(blockSize.x, blockSize.y, 1); dim3 gridDim((pread->cols() + blockSize.x - 1) / blockSize.x, (pread->rows() + blockSize.y - 1) / blockSize.y, 1); int* h_subDataNextBlock = NULL;//host hipStream_t stream[blockNum]; for (int i = 0; i < blockNum; ++i) { checkCudaErrors(hipStreamCreate(&stream[i])); } for(iBlock = 0; iBlock < blockNum; iBlock++) { //info int data_start = dataBlockArray[iBlock].dataStart; int data_end = dataBlockArray[iBlock].dataEnd; int task_start = dataBlockArray[iBlock].taskStart; int task_end = dataBlockArray[iBlock].taskEnd; int data_height = dataBlockArray[iBlock].subDataHeight; int task_height = dataBlockArray[iBlock].subTaskHeight; size_t nBytes_task = data_height * width * sizeof(int); int* h_labelMap; checkCudaErrors(hipHostMalloc((void **)&h_labelMap, nBytes_task, hipHostMallocDefault)); memset(h_labelMap, 0, data_height * width); int* h_PixelNum; checkCudaErrors(hipHostMalloc((void **)&h_PixelNum, nBytes_task, hipHostMallocDefault)); memset(h_PixelNum, 0, data_height * width); int* h_Peri; checkCudaErrors(hipHostMalloc((void **)&h_Peri, nBytes_task, hipHostMallocDefault)); memset(h_Peri, 0, data_height * width); std::map<int, Patch> sub_mapPatch; if(iBlock == 0) { int* h_subData = NULL; loadBlockData(width, data_height, data_start, &h_subData, pread); if (!dataBlockArray[iBlock].isSplit()) // no need to split { split = 0; cout << "do not need devide the picture" << endl; LineCCLNoSplit(h_subData, width, height, blockDim, gridDim, h_labelMap, h_PixelNum, h_Peri, (int)pread->invalidValue()); createPatchMap(h_subData, h_labelMap, h_PixelNum, h_Peri, data_height, width, mapPatch, data_start, data_end, task_start, task_end); } else { split = 1; LineSplitCCL(&h_subDataNextBlock, dataBlockArray[iBlock+1], pread ,h_subData, width, data_height, task_height, blockDim, gridDim, h_labelMap, h_PixelNum, h_Peri, (int)pread->invalidValue(), task_start*width, data_start, data_end, task_start, task_end); createPatchMap(h_subData, h_labelMap, h_PixelNum, h_Peri, data_height, width, sub_mapPatch, data_start, data_end, task_start, task_end); mergePatchMap(mapPatch, sub_mapPatch); memcpy(mergeStructArray+iBlock, h_labelMap + (task_height - 1) * width , sizeof(int) * width);//mlastRowLabel } checkCudaErrors(hipHostFree(h_subData)); } else { int* h_subData = h_subDataNextBlock;// h_subDataNextBlock = NULL; LineSplitCCL2(&h_subDataNextBlock, dataBlockArray, iBlock, pread, h_subData, width, data_height,task_height, blockDim, gridDim, h_labelMap, h_PixelNum, h_Peri, (int)pread->invalidValue(), task_start*width, data_start, data_end, task_start, task_end); createPatchMap(h_subData, h_labelMap, h_PixelNum, h_Peri, data_height, width, sub_mapPatch, data_start, data_end, task_start, task_end); mergePatchMap(mapPatch, sub_mapPatch); memcpy(mergeStructArray+(iBlock-1)*width*4+width, h_labelMap, sizeof(int)*width);//mfirstRowLabel memcpy(mergeStructArray+(iBlock-1)*width*4+width*2, h_subData, sizeof(int)*width);//mh_subDataFirst memcpy(mergeStructArray+(iBlock-1)*width*4+width*3, h_subData+width, sizeof(int)*width);//mh_subDataSecond if(iBlock != blockNum - 1) { memcpy(mergeStructArray+iBlock*width*4, h_labelMap + width*(task_height - 1), sizeof(int)*width);//mlastRowLabel } checkCudaErrors(hipHostFree(h_subData)); } checkCudaErrors(hipHostFree(h_labelMap)); checkCudaErrors(hipHostFree(h_PixelNum)); checkCudaErrors(hipHostFree(h_Peri)); } MergePatchArray(width, blockNum, (int)pread->invalidValue(), mergeStructArray, Quf); // delete[] mergeStructArray; // mergeStructArray = NULL; free(mergeStructArray); delete[] dataBlockArray; dataBlockArray = NULL; for (int i = 0; i < blockNum; ++i) { checkCudaErrors(hipStreamDestroy(stream[i])); } } int main(int argc, char *argv[]) { if (argc < 2) { cout << "please input I/O filename. exit." << endl; return -1; } GDALAllRegister(); CPLSetConfigOption("GDAL_FILENAME_IS_UTF8", "NO"); CGDALRead* pread = new CGDALRead; // load all data to unsigned char array when read data; if (!pread->loadMetaData(argv[1])) { cout << "load error!" << endl; } cout << "rows:" << pread->rows() << endl; cout << "cols:" << pread->cols() << endl; cout << "bandnum:" << pread->bandnum() << endl; cout << "datalength:" << pread->datalength() << endl; cout << "invalidValue:" << pread->invalidValue() << endl; cout << "datatype:" << GDALGetDataTypeName(pread->datatype()) << endl; cout << "projectionRef:" << pread->projectionRef() << endl; cout << "perPixelSize:" << pread->perPixelSize() << endl; int imgSize = pread->rows()*pread->cols(); // int mergeStruct::width = pread->cols(); // std::map<int, Patch> mapPatch; UF Quf(imgSize);//QuickUnion bool split = 0; exeGPUAndMemcpy(pread, mapPatch, Quf, split); if(split) outputPatchToFile(mapPatch, Quf, "LineUF"); else outputPatchToFile(mapPatch, "LineUF"); return 0; } // void devideImg( CGDALRead* pread, std::map<int, Patch> &mapPatch, UF &Quf, bool &split) // { // int devicesCount; // hipGetDeviceCount(&devicesCount); // hipSetDevice(0); // int width = pread->cols(); // int height = pread->rows(); // int maxnum; // // size_t freeGPU, totalGPU; // hipMemGetInfo(&freeGPU, &totalGPU);//size_t* free, size_t* total // cout << "(free,total)" << freeGPU << "," << totalGPU << endl; // maxnum = (freeGPU) / (sizeof(int)* 6);//pixel6int // int sub_height = maxnum / width - 5; //sub_height // int subIdx = 0; // int* lastRowLabel = new int[width]; // int* firstRowLabel = new int[width]; // int *Meg = NULL; // // int Merge_count = 0; // // int2 blockSize; blockSize.x = 32; blockSize.y = 16; // dim3 blockDim(blockSize.x, blockSize.y, 1); // dim3 gridDim((pread->cols() + blockSize.x - 1) / blockSize.x, (pread->rows() + blockSize.y - 1) / blockSize.y, 1); // { // // //the first block----------------------start------------------------------------------ // // //------------------------------------------------------------------------------------ // // //------------------------------------------------------------------------------------ // // int task_start0 = 0; // // int task_end0; // // if ((subIdx + 1)*sub_height - height <= 0) // // task_end0 = (subIdx + 1)*sub_height - 1; // // else // // task_end0 = height - 1; // // int data_start0, data_end0; // // if (task_start0 - 1 <= 0) // // data_start0 = 0; // // else // // data_start0 = task_start0 - 1; // // if (task_end0 + 1 >= height - 1) // // data_end0 = height - 1; // // else // // data_end0 = task_end0 + 1; // // int data_height0 = data_end0 - data_start0 + 1; // // int task_height0 = task_end0 - task_start0 + 1; // // int* h_subData0 = NULL; // // loadBlockData(width, data_height0, data_start0, &h_subData0, pread); // // size_t nBytes_task0 = task_height0 * width * sizeof(int); // // int* h_labelMap0; // // checkCudaErrors(hipHostMalloc((void **)&h_labelMap0, nBytes_task0, hipHostMallocDefault)); // // memset(h_labelMap0, 0, task_height0 * width); // // int* h_PixelNum0; // // checkCudaErrors(hipHostMalloc((void **)&h_PixelNum0, nBytes_task0, hipHostMallocDefault)); // // memset(h_PixelNum0, 0, task_height0 * width); // // int* h_Peri0; // // checkCudaErrors(hipHostMalloc((void **)&h_Peri0, nBytes_task0, hipHostMallocDefault)); // // memset(h_Peri0, 0, task_height0 * width); // // cout << "subIdx ,data_height:" << subIdx << "," << data_height0 << endl; // // cout << "data_start:" << data_start0 << endl; // // cout << "data___end:" << data_end0 << endl; // // cout << "task_start:" << task_start0 << endl; // // cout << "task___end:" << task_end0 << endl; // // cout << "-------------------------------------------" << endl; // // std::map<int, Patch> sub_mapPatch0; // // if ((task_start0 == data_start0) && (task_end0 == data_end0)) // // { // // split = 0; // // cout << "do not need devide the picture" << endl; // // LineCCLNoSplit(h_subData0, width, height, blockDim, gridDim, h_labelMap0, h_PixelNum0, h_Peri0, (int)pread->invalidValue()); // // createPatchMap(h_subData0, h_labelMap0, h_PixelNum0, h_Peri0, data_height0, width, mapPatch, data_start0, data_end0, task_start0, task_end0); // // } // // else // // { // // split = 1; // // if (task_start == 0) // // { // // LineSplitCCL(h_subData0, width, data_height0, task_height0, blockDim, gridDim, h_labelMap0, h_PixelNum0, h_Peri0, (int)pread->invalidValue(), task_start0*width, data_start0, data_end0, task_start0, task_end0); // // createPatchMap(h_subData0, h_labelMap0, h_PixelNum0, h_Peri0, data_height0, width, sub_mapPatch0, data_start0, data_end0, task_start0, task_end0); // // mergePatchMap(mapPatch, sub_mapPatch0); // // memcpy(lastRowLabel , h_labelMap0 + (task_height0-1) * width , sizeof(int)*width); // // } // // } // // //the first block----------------------end------------------------------------------ // // //---------------------------------------------------------------------------------- // // //---------------------------------------------------------------------------------- // } // for (int height_all = 0; height_all < height; height_all += sub_height) // { // int task_start = subIdx*sub_height; // int task_end; // if ((subIdx + 1)*sub_height - height <= 0) // task_end = (subIdx + 1)*sub_height - 1; // else // task_end = height - 1; // int data_start, data_end; // if (task_start - 1 <= 0) // data_start = 0; // else // data_start = task_start - 1; // if (task_end + 1 >= height - 1) // data_end = height - 1; // else // data_end = task_end + 1; // int data_height = data_end - data_start + 1; // int task_height = task_end - task_start + 1; // int* h_subData = NULL; // loadBlockData(width, data_height, data_start, &h_subData, pread); // size_t nBytes_task = task_height * width * sizeof(int); // int* h_labelMap; // checkCudaErrors(hipHostMalloc((void **)&h_labelMap, nBytes_task, hipHostMallocDefault)); // memset(h_labelMap, 0, task_height * width); // int* h_PixelNum; // checkCudaErrors(hipHostMalloc((void **)&h_PixelNum, nBytes_task, hipHostMallocDefault)); // memset(h_PixelNum, 0, task_height * width); // int* h_Peri; // checkCudaErrors(hipHostMalloc((void **)&h_Peri, nBytes_task, hipHostMallocDefault)); // memset(h_Peri, 0, task_height * width); // cout << "subIdx ,data_height:" << subIdx << "," << data_height << endl; // cout << "data_start:" << data_start << endl; // cout << "data___end:" << data_end << endl; // cout << "task_start:" << task_start << endl; // cout << "task___end:" << task_end << endl; // cout << "-------------------------------------------" << endl; // //h_subData, // std::map<int, Patch> sub_mapPatch; // if ((task_start == data_start) && (task_end == data_end)) // { // split = 0; // cout << "do not need devide the picture" << endl; // LineCCLNoSplit(h_subData, width, height, blockDim, gridDim, h_labelMap, h_PixelNum, h_Peri, (int)pread->invalidValue()); // createPatchMap(h_subData, h_labelMap, h_PixelNum, h_Peri, data_height, width, mapPatch, data_start, data_end, task_start, task_end); // } // else // { // split = 1; // if (task_start == 0) // { // LineSplitCCL(h_subData, width, data_height, task_height, blockDim, gridDim, h_labelMap, h_PixelNum, h_Peri, (int)pread->invalidValue(), task_start*width, data_start, data_end, task_start, task_end); // createPatchMap(h_subData, h_labelMap, h_PixelNum, h_Peri, data_height, width, sub_mapPatch, data_start, data_end, task_start, task_end); // mergePatchMap(mapPatch, sub_mapPatch); // memcpy(lastRowLabel , h_labelMap + (task_height-1) * width , sizeof(int)*width); // } // else // { // // // //mapPatch---patchvalue,area,peri // //Quf--- // LineSplitCCL2(h_subData, width, data_height,task_height, blockDim, gridDim, h_labelMap, h_PixelNum, h_Peri, (int)pread->invalidValue(), task_start*width, data_start, data_end, task_start, task_end); // createPatchMap(h_subData, h_labelMap, h_PixelNum, h_Peri, data_height, width, sub_mapPatch, data_start, data_end, task_start, task_end); // mergePatchMap(mapPatch, sub_mapPatch); // memcpy(firstRowLabel, h_labelMap, sizeof(int)*width);//nextTaskStartLabel // int *h_subDataFirst = new int[width]; // int *h_subDataSecond = new int[width]; // memcpy(h_subDataFirst, h_subData, sizeof(int)*width);//nextTaskStartLabel // memcpy(h_subDataSecond, h_subData+width, sizeof(int)*width);//nextTaskStartLabel // //lastRowLabel // //firstRowLabel,MegUnion-find // Meg = (int *)malloc(sizeof(int) * width * 2); // Merge_count = findMerge(width,(int)pread->invalidValue(), Meg, h_subDataFirst,h_subDataSecond,lastRowLabel,firstRowLabel); // MergePatch( Meg, Merge_count, Quf); // memcpy(lastRowLabel, h_labelMap + width*(task_height - 1), sizeof(int)*width);// // delete[] Meg; // Meg = NULL; // Merge_count = 0; // } // } // checkCudaErrors(hipHostFree(h_subData)); // checkCudaErrors(hipHostFree(h_labelMap)); // checkCudaErrors(hipHostFree(h_PixelNum)); // checkCudaErrors(hipHostFree(h_Peri)); // subIdx++; // } // } // int main(int argc, char *argv[]) // { // if (argc < 2) // { // cout << "please input I/O filename. exit." << endl; // return -1; // } // GDALAllRegister(); // CPLSetConfigOption("GDAL_FILENAME_IS_UTF8", "NO"); // CGDALRead* pread = new CGDALRead; // // load all data to unsigned char array when read data; // if (!pread->loadMetaData(argv[1])) // { // cout << "load error!" << endl; // } // cout << "rows:" << pread->rows() << endl; // cout << "cols:" << pread->cols() << endl; // cout << "bandnum:" << pread->bandnum() << endl; // cout << "datalength:" << pread->datalength() << endl; // cout << "invalidValue:" << pread->invalidValue() << endl; // cout << "datatype:" << GDALGetDataTypeName(pread->datatype()) << endl; // cout << "projectionRef:" << pread->projectionRef() << endl; // cout << "perPixelSize:" << pread->perPixelSize() << endl; // int imgSize = pread->rows()*pread->cols(); // // // std::map<int, Patch> mapPatch; // UF Quf(imgSize);//QuickUnion // bool split = 0; // devideImg(pread, mapPatch, Quf, split); // if(split) // outputPatchToFile(mapPatch, Quf, "LineUF"); // else // outputPatchToFile(mapPatch, "LineUF"); // return 0; // } // int main() // { // int imgSize = 25; // // int array[25] = { 1, 1, 2, 2, 2, 1, 2, 2, 3, 2, 1, 2, 1, 3, 2, 2, 1, 3, 3, 3, 2, 3, 3, 3, 3 }; // int array[25] = { 1, 3, 3, 3, 3, // 1, 3, 3, 1, 3, // 1, 2, 1, 3, 2, // 2, 1, 3, 2, 3, // 1, 2, 2, 3, 2 }; // int* AllDataHost = new int[25]; // for (int i = 0; i < 25; i++) // { // AllDataHost[i] = array[i]; // } // std::map<int, Patch> mapPatch; // UF Quf(imgSize);//QuickUnion // // vector<set<int> >mergeSet; // PRead *pread = new PRead(5, 5, 0); // devideImg1(AllDataHost, pread, mapPatch,Quf); // outputPatchToFile(mapPatch,Quf, "LineUF"); // return 0; // }
014bf2351e3246f09b94ffb065f37c3cb1289a53.cu
#include <stdio.h> #include <stdlib.h> #include <string> #include <cuda.h> #include <gdal_priv.h> #include <cpl_conv.h> #include "GDALRead.h" #include "GDALWrite.h" #include "basestruct.h" #include "operator.h" #include "timer.h" #include "utils.h" #include <fstream> #include <iostream> #include <map> #include <set> #include <vector> using namespace std; double LineCCLNoSplit(int *allData, int width, int height, dim3 blockSize, dim3 gridSize, int * labelMap, int *pixNum, int *perimeter, int nodata); // double LineSplitCCL2(int *allData, int width, int data_height, int task_height, dim3 blockSize, dim3 gridSize, int *h_labelMap, int *pixNum, int *perimeter, int nodata, int labelStart, int data_start, int data_end, int task_start, int task_end); // double LineSplitCCL(int *allData, int width, int data_height, int task_height, dim3 blockSize, dim3 gridSize, int *h_labelMap, int *pixNum, int *perimeter, int nodata, int labelStart, int data_start, int data_end, int task_start, int task_end); double LineSplitCCL(int** h_subDataNextBlock, dataBlock &dataBlockNext, CGDALRead* pread, int *allData, int width, int data_height, int task_height, dim3 blockSize, dim3 gridSize, int *h_labelMap, int *pixNum, int *perimeter, int nodata, int labelStart, int data_start, int data_end, int task_start, int task_end); double LineSplitCCL2(int** h_subDataNextBlock, dataBlock* dataBlockArray, int iBlock, CGDALRead* pread, int *allData, int width, int data_height, int task_height, dim3 blockSize, dim3 gridSize, int *h_labelMap, int *pixNum, int *perimeter, int nodata, int labelStart, int data_start, int data_end, int task_start, int task_end); void loadBlockData(int width, int data_height, int data_start, int** h_subData, CGDALRead* pread) { size_t nBytes_data = data_height * width * sizeof(int); checkCudaErrors(cudaHostAlloc((void **)h_subData, nBytes_data, cudaHostAllocDefault)); memset(*h_subData, 0, data_height * width); switch (pread->datatype()) { case GDT_Byte: { pread->readDataBlock<unsigned char>(width,data_height,0,data_start, *h_subData); break; } case GDT_UInt16: { pread->readDataBlock<unsigned short>(width,data_height,0,data_start,*h_subData); break; } case GDT_Int16: { pread->readDataBlock<short>(width,data_height,0,data_start,*h_subData); break; } case GDT_UInt32: { pread->readDataBlock<unsigned int>(width,data_height,0,data_start,*h_subData); break; } case GDT_Int32: { pread->readDataBlock<int>(width,data_height,0,data_start,*h_subData); break; } case GDT_Float32: { float* allData = pread->transforData<float>(); break; } case GDT_Float64: { double* allData = pread->transforData<double>(); break; } default: { cout << "transfor data type false!" << endl; } } } int getDevideInfo(int width, int height, dataBlock** dataBlockArray) { int maxnum; //可以读入的像元的个数 size_t freeGPU, totalGPU; cudaMemGetInfo(&freeGPU, &totalGPU);//size_t* free, size_t* total cout << "(free,total)" << freeGPU << "," << totalGPU << endl; maxnum = (freeGPU) / (sizeof(int)* 6);//每个pixel基本上要开辟6个中间变量,变量类型都是int int sub_height = maxnum / width - 5; //每个分块的高度sub_height int blockNum = height / sub_height + 1; //总的分块个数 *dataBlockArray = new dataBlock[blockNum]; int subIdx = 0; for (int height_all = 0; height_all < height; height_all += sub_height) { int task_start = subIdx*sub_height; int task_end; if ((subIdx + 1)*sub_height - height <= 0) task_end = (subIdx + 1)*sub_height - 1; else task_end = height - 1; int data_start, data_end; if (task_start - 1 <= 0) data_start = 0; else data_start = task_start - 1; if (task_end + 1 >= height - 1) data_end = height - 1; else data_end = task_end + 1; int data_height = data_end - data_start + 1; int task_height = task_end - task_start + 1; (*dataBlockArray)[subIdx].dataStart = data_start; (*dataBlockArray)[subIdx].dataEnd = data_end; (*dataBlockArray)[subIdx].taskStart = task_start; (*dataBlockArray)[subIdx].taskEnd = task_end; (*dataBlockArray)[subIdx].subTaskHeight = task_height; (*dataBlockArray)[subIdx].subDataHeight = data_height; subIdx++; } return blockNum; } //GPU执行上一个分块的任务,同时读下一个分块的数据 void exeGPUAndMemcpy( CGDALRead* pread, std::map<int, Patch> &mapPatch, UF &Quf, bool &split) { int devicesCount; cudaGetDeviceCount(&devicesCount); cudaSetDevice(0); int width = pread->cols(); int height = pread->rows(); dataBlock* dataBlockArray = NULL; //blockInfo int* mergeStructArray = (int* )malloc(sizeof(int) * width * 4 * width); //mergeInfo int blockNum; blockNum = getDevideInfo(width, height, &dataBlockArray);//get information of all the blocks,allocate memory for mergeArray int iBlock; int2 blockSize; blockSize.x = 32; blockSize.y = 16; dim3 blockDim(blockSize.x, blockSize.y, 1); dim3 gridDim((pread->cols() + blockSize.x - 1) / blockSize.x, (pread->rows() + blockSize.y - 1) / blockSize.y, 1); int* h_subDataNextBlock = NULL;//host端读下一个分块 cudaStream_t stream[blockNum]; for (int i = 0; i < blockNum; ++i) { checkCudaErrors(cudaStreamCreate(&stream[i])); } for(iBlock = 0; iBlock < blockNum; iBlock++) { //当前分块info int data_start = dataBlockArray[iBlock].dataStart; int data_end = dataBlockArray[iBlock].dataEnd; int task_start = dataBlockArray[iBlock].taskStart; int task_end = dataBlockArray[iBlock].taskEnd; int data_height = dataBlockArray[iBlock].subDataHeight; int task_height = dataBlockArray[iBlock].subTaskHeight; size_t nBytes_task = data_height * width * sizeof(int); int* h_labelMap; checkCudaErrors(cudaHostAlloc((void **)&h_labelMap, nBytes_task, cudaHostAllocDefault)); memset(h_labelMap, 0, data_height * width); int* h_PixelNum; checkCudaErrors(cudaHostAlloc((void **)&h_PixelNum, nBytes_task, cudaHostAllocDefault)); memset(h_PixelNum, 0, data_height * width); int* h_Peri; checkCudaErrors(cudaHostAlloc((void **)&h_Peri, nBytes_task, cudaHostAllocDefault)); memset(h_Peri, 0, data_height * width); std::map<int, Patch> sub_mapPatch; if(iBlock == 0) { int* h_subData = NULL; loadBlockData(width, data_height, data_start, &h_subData, pread); if (!dataBlockArray[iBlock].isSplit()) // no need to split { split = 0; cout << "do not need devide the picture" << endl; LineCCLNoSplit(h_subData, width, height, blockDim, gridDim, h_labelMap, h_PixelNum, h_Peri, (int)pread->invalidValue()); createPatchMap(h_subData, h_labelMap, h_PixelNum, h_Peri, data_height, width, mapPatch, data_start, data_end, task_start, task_end); } else { split = 1; LineSplitCCL(&h_subDataNextBlock, dataBlockArray[iBlock+1], pread ,h_subData, width, data_height, task_height, blockDim, gridDim, h_labelMap, h_PixelNum, h_Peri, (int)pread->invalidValue(), task_start*width, data_start, data_end, task_start, task_end); createPatchMap(h_subData, h_labelMap, h_PixelNum, h_Peri, data_height, width, sub_mapPatch, data_start, data_end, task_start, task_end); mergePatchMap(mapPatch, sub_mapPatch); memcpy(mergeStructArray+iBlock, h_labelMap + (task_height - 1) * width , sizeof(int) * width);//mlastRowLabel } checkCudaErrors(cudaFreeHost(h_subData)); } else { int* h_subData = h_subDataNextBlock;//当前分块的值 h_subDataNextBlock = NULL; LineSplitCCL2(&h_subDataNextBlock, dataBlockArray, iBlock, pread, h_subData, width, data_height,task_height, blockDim, gridDim, h_labelMap, h_PixelNum, h_Peri, (int)pread->invalidValue(), task_start*width, data_start, data_end, task_start, task_end); createPatchMap(h_subData, h_labelMap, h_PixelNum, h_Peri, data_height, width, sub_mapPatch, data_start, data_end, task_start, task_end); mergePatchMap(mapPatch, sub_mapPatch); memcpy(mergeStructArray+(iBlock-1)*width*4+width, h_labelMap, sizeof(int)*width);//mfirstRowLabel memcpy(mergeStructArray+(iBlock-1)*width*4+width*2, h_subData, sizeof(int)*width);//mh_subDataFirst memcpy(mergeStructArray+(iBlock-1)*width*4+width*3, h_subData+width, sizeof(int)*width);//mh_subDataSecond if(iBlock != blockNum - 1) { memcpy(mergeStructArray+iBlock*width*4, h_labelMap + width*(task_height - 1), sizeof(int)*width);//mlastRowLabel用完了之后将当前块的最后一行保存下来,下次用 } checkCudaErrors(cudaFreeHost(h_subData)); } checkCudaErrors(cudaFreeHost(h_labelMap)); checkCudaErrors(cudaFreeHost(h_PixelNum)); checkCudaErrors(cudaFreeHost(h_Peri)); } MergePatchArray(width, blockNum, (int)pread->invalidValue(), mergeStructArray, Quf); // delete[] mergeStructArray; // mergeStructArray = NULL; free(mergeStructArray); delete[] dataBlockArray; dataBlockArray = NULL; for (int i = 0; i < blockNum; ++i) { checkCudaErrors(cudaStreamDestroy(stream[i])); } } int main(int argc, char *argv[]) { if (argc < 2) { cout << "please input I/O filename. exit." << endl; return -1; } GDALAllRegister(); CPLSetConfigOption("GDAL_FILENAME_IS_UTF8", "NO"); CGDALRead* pread = new CGDALRead; // load all data to unsigned char array when read data; if (!pread->loadMetaData(argv[1])) { cout << "load error!" << endl; } cout << "rows:" << pread->rows() << endl; cout << "cols:" << pread->cols() << endl; cout << "bandnum:" << pread->bandnum() << endl; cout << "datalength:" << pread->datalength() << endl; cout << "invalidValue:" << pread->invalidValue() << endl; cout << "datatype:" << GDALGetDataTypeName(pread->datatype()) << endl; cout << "projectionRef:" << pread->projectionRef() << endl; cout << "perPixelSize:" << pread->perPixelSize() << endl; int imgSize = pread->rows()*pread->cols(); // int mergeStruct::width = pread->cols(); //分块 std::map<int, Patch> mapPatch; UF Quf(imgSize);//QuickUnion算法,合并斑块序号 bool split = 0; exeGPUAndMemcpy(pread, mapPatch, Quf, split); if(split) outputPatchToFile(mapPatch, Quf, "LineUF"); else outputPatchToFile(mapPatch, "LineUF"); return 0; } // void devideImg( CGDALRead* pread, std::map<int, Patch> &mapPatch, UF &Quf, bool &split) // { // int devicesCount; // cudaGetDeviceCount(&devicesCount); // cudaSetDevice(0); // int width = pread->cols(); // int height = pread->rows(); // int maxnum; //可以读入的像元的个数 // size_t freeGPU, totalGPU; // cudaMemGetInfo(&freeGPU, &totalGPU);//size_t* free, size_t* total // cout << "(free,total)" << freeGPU << "," << totalGPU << endl; // maxnum = (freeGPU) / (sizeof(int)* 6);//每个pixel基本上要开辟6个中间变量,变量类型都是int // int sub_height = maxnum / width - 5; //每个分块的高度sub_height // int subIdx = 0; // int* lastRowLabel = new int[width]; // int* firstRowLabel = new int[width]; // int *Meg = NULL; //合并数组 // int Merge_count = 0; //合并计数 // int2 blockSize; blockSize.x = 32; blockSize.y = 16; // dim3 blockDim(blockSize.x, blockSize.y, 1); // dim3 gridDim((pread->cols() + blockSize.x - 1) / blockSize.x, (pread->rows() + blockSize.y - 1) / blockSize.y, 1); // { // // //the first block----------------------start------------------------------------------ // // //------------------------------------------------------------------------------------ // // //------------------------------------------------------------------------------------ // // int task_start0 = 0; // // int task_end0; // // if ((subIdx + 1)*sub_height - height <= 0) // // task_end0 = (subIdx + 1)*sub_height - 1; // // else // // task_end0 = height - 1; // // int data_start0, data_end0; // // if (task_start0 - 1 <= 0) // // data_start0 = 0; // // else // // data_start0 = task_start0 - 1; // // if (task_end0 + 1 >= height - 1) // // data_end0 = height - 1; // // else // // data_end0 = task_end0 + 1; // // int data_height0 = data_end0 - data_start0 + 1; // // int task_height0 = task_end0 - task_start0 + 1; // // int* h_subData0 = NULL; // // loadBlockData(width, data_height0, data_start0, &h_subData0, pread); // // size_t nBytes_task0 = task_height0 * width * sizeof(int); // // int* h_labelMap0; // // checkCudaErrors(cudaHostAlloc((void **)&h_labelMap0, nBytes_task0, cudaHostAllocDefault)); // // memset(h_labelMap0, 0, task_height0 * width); // // int* h_PixelNum0; // // checkCudaErrors(cudaHostAlloc((void **)&h_PixelNum0, nBytes_task0, cudaHostAllocDefault)); // // memset(h_PixelNum0, 0, task_height0 * width); // // int* h_Peri0; // // checkCudaErrors(cudaHostAlloc((void **)&h_Peri0, nBytes_task0, cudaHostAllocDefault)); // // memset(h_Peri0, 0, task_height0 * width); // // cout << "subIdx ,data_height:" << subIdx << "," << data_height0 << endl; // // cout << "data_start:" << data_start0 << endl; // // cout << "data___end:" << data_end0 << endl; // // cout << "task_start:" << task_start0 << endl; // // cout << "task___end:" << task_end0 << endl; // // cout << "-------------------------------------------" << endl; // // std::map<int, Patch> sub_mapPatch0; // // if ((task_start0 == data_start0) && (task_end0 == data_end0)) // // { // // split = 0; // // cout << "do not need devide the picture" << endl; // // LineCCLNoSplit(h_subData0, width, height, blockDim, gridDim, h_labelMap0, h_PixelNum0, h_Peri0, (int)pread->invalidValue()); // // createPatchMap(h_subData0, h_labelMap0, h_PixelNum0, h_Peri0, data_height0, width, mapPatch, data_start0, data_end0, task_start0, task_end0); // // } // // else // // { // // split = 1; // // if (task_start == 0) // // { // // LineSplitCCL(h_subData0, width, data_height0, task_height0, blockDim, gridDim, h_labelMap0, h_PixelNum0, h_Peri0, (int)pread->invalidValue(), task_start0*width, data_start0, data_end0, task_start0, task_end0); // // createPatchMap(h_subData0, h_labelMap0, h_PixelNum0, h_Peri0, data_height0, width, sub_mapPatch0, data_start0, data_end0, task_start0, task_end0); // // mergePatchMap(mapPatch, sub_mapPatch0); // // memcpy(lastRowLabel , h_labelMap0 + (task_height0-1) * width , sizeof(int)*width); // // } // // } // // //the first block----------------------end------------------------------------------ // // //---------------------------------------------------------------------------------- // // //---------------------------------------------------------------------------------- // } // for (int height_all = 0; height_all < height; height_all += sub_height) // { // int task_start = subIdx*sub_height; // int task_end; // if ((subIdx + 1)*sub_height - height <= 0) // task_end = (subIdx + 1)*sub_height - 1; // else // task_end = height - 1; // int data_start, data_end; // if (task_start - 1 <= 0) // data_start = 0; // else // data_start = task_start - 1; // if (task_end + 1 >= height - 1) // data_end = height - 1; // else // data_end = task_end + 1; // int data_height = data_end - data_start + 1; // int task_height = task_end - task_start + 1; // int* h_subData = NULL; // loadBlockData(width, data_height, data_start, &h_subData, pread); // size_t nBytes_task = task_height * width * sizeof(int); // int* h_labelMap; // checkCudaErrors(cudaHostAlloc((void **)&h_labelMap, nBytes_task, cudaHostAllocDefault)); // memset(h_labelMap, 0, task_height * width); // int* h_PixelNum; // checkCudaErrors(cudaHostAlloc((void **)&h_PixelNum, nBytes_task, cudaHostAllocDefault)); // memset(h_PixelNum, 0, task_height * width); // int* h_Peri; // checkCudaErrors(cudaHostAlloc((void **)&h_Peri, nBytes_task, cudaHostAllocDefault)); // memset(h_Peri, 0, task_height * width); // cout << "subIdx ,data_height:" << subIdx << "," << data_height << endl; // cout << "data_start:" << data_start << endl; // cout << "data___end:" << data_end << endl; // cout << "task_start:" << task_start << endl; // cout << "task___end:" << task_end << endl; // cout << "-------------------------------------------" << endl; // //至此,每个分块的初始数据已经保存在h_subData中,下面调用核函数,进行计算,并将数据传回 // std::map<int, Patch> sub_mapPatch; // if ((task_start == data_start) && (task_end == data_end)) // { // split = 0; // cout << "do not need devide the picture" << endl; // LineCCLNoSplit(h_subData, width, height, blockDim, gridDim, h_labelMap, h_PixelNum, h_Peri, (int)pread->invalidValue()); // createPatchMap(h_subData, h_labelMap, h_PixelNum, h_Peri, data_height, width, mapPatch, data_start, data_end, task_start, task_end); // } // else // { // split = 1; // if (task_start == 0) // { // LineSplitCCL(h_subData, width, data_height, task_height, blockDim, gridDim, h_labelMap, h_PixelNum, h_Peri, (int)pread->invalidValue(), task_start*width, data_start, data_end, task_start, task_end); // createPatchMap(h_subData, h_labelMap, h_PixelNum, h_Peri, data_height, width, sub_mapPatch, data_start, data_end, task_start, task_end); // mergePatchMap(mapPatch, sub_mapPatch); // memcpy(lastRowLabel , h_labelMap + (task_height-1) * width , sizeof(int)*width); // } // else // { // //所有分块共同维护 // //mapPatch---存储所有局部patch的value,area,peri // //Quf---存储要合并的信息 // LineSplitCCL2(h_subData, width, data_height,task_height, blockDim, gridDim, h_labelMap, h_PixelNum, h_Peri, (int)pread->invalidValue(), task_start*width, data_start, data_end, task_start, task_end); // createPatchMap(h_subData, h_labelMap, h_PixelNum, h_Peri, data_height, width, sub_mapPatch, data_start, data_end, task_start, task_end); // mergePatchMap(mapPatch, sub_mapPatch); // memcpy(firstRowLabel, h_labelMap, sizeof(int)*width);//取当前块的第一行用来与上一分块中的标记nextTaskStartLabel做对比生成集合 // int *h_subDataFirst = new int[width]; // int *h_subDataSecond = new int[width]; // memcpy(h_subDataFirst, h_subData, sizeof(int)*width);//取当前块的第一行用来与上一分块中的标记nextTaskStartLabel做对比生成集合 // memcpy(h_subDataSecond, h_subData+width, sizeof(int)*width);//取当前块的第一行用来与上一分块中的标记nextTaskStartLabel做对比生成集合 // //现在有上个分块最后一行的标识lastRowLabel // //当前分块第一行的标识firstRowLabel,将这两行用来生成Meg数组,并构造Union-find // Meg = (int *)malloc(sizeof(int) * width * 2); // Merge_count = findMerge(width,(int)pread->invalidValue(), Meg, h_subDataFirst,h_subDataSecond,lastRowLabel,firstRowLabel); // MergePatch( Meg, Merge_count, Quf); // memcpy(lastRowLabel, h_labelMap + width*(task_height - 1), sizeof(int)*width);//用完了之后将当前块的最后一行保存下来,下次用 // delete[] Meg; // Meg = NULL; // Merge_count = 0; // } // } // checkCudaErrors(cudaFreeHost(h_subData)); // checkCudaErrors(cudaFreeHost(h_labelMap)); // checkCudaErrors(cudaFreeHost(h_PixelNum)); // checkCudaErrors(cudaFreeHost(h_Peri)); // subIdx++; // } // } // int main(int argc, char *argv[]) // { // if (argc < 2) // { // cout << "please input I/O filename. exit." << endl; // return -1; // } // GDALAllRegister(); // CPLSetConfigOption("GDAL_FILENAME_IS_UTF8", "NO"); // CGDALRead* pread = new CGDALRead; // // load all data to unsigned char array when read data; // if (!pread->loadMetaData(argv[1])) // { // cout << "load error!" << endl; // } // cout << "rows:" << pread->rows() << endl; // cout << "cols:" << pread->cols() << endl; // cout << "bandnum:" << pread->bandnum() << endl; // cout << "datalength:" << pread->datalength() << endl; // cout << "invalidValue:" << pread->invalidValue() << endl; // cout << "datatype:" << GDALGetDataTypeName(pread->datatype()) << endl; // cout << "projectionRef:" << pread->projectionRef() << endl; // cout << "perPixelSize:" << pread->perPixelSize() << endl; // int imgSize = pread->rows()*pread->cols(); // //分块 // std::map<int, Patch> mapPatch; // UF Quf(imgSize);//QuickUnion算法,合并斑块序号 // bool split = 0; // devideImg(pread, mapPatch, Quf, split); // if(split) // outputPatchToFile(mapPatch, Quf, "LineUF"); // else // outputPatchToFile(mapPatch, "LineUF"); // return 0; // } // int main() // { // int imgSize = 25; // // int array[25] = { 1, 1, 2, 2, 2, 1, 2, 2, 3, 2, 1, 2, 1, 3, 2, 2, 1, 3, 3, 3, 2, 3, 3, 3, 3 }; // int array[25] = { 1, 3, 3, 3, 3, // 1, 3, 3, 1, 3, // 1, 2, 1, 3, 2, // 2, 1, 3, 2, 3, // 1, 2, 2, 3, 2 }; // int* AllDataHost = new int[25]; // for (int i = 0; i < 25; i++) // { // AllDataHost[i] = array[i]; // } // std::map<int, Patch> mapPatch; // UF Quf(imgSize);//QuickUnion算法,合并斑块序号 // // vector<set<int> >mergeSet; // PRead *pread = new PRead(5, 5, 0); // devideImg1(AllDataHost, pread, mapPatch,Quf); // outputPatchToFile(mapPatch,Quf, "LineUF"); // return 0; // }
6ae75442135d3a806523996550433f7c0d9705df.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2020-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/detail/utilities/vector_factories.hpp> #include <cudf/utilities/default_stream.hpp> #include <cudf/utilities/span.hpp> #include <io/utilities/hostdevice_vector.hpp> #include <cudf_test/base_fixture.hpp> #include <cudf_test/cudf_gtest.hpp> #include <cudf_test/type_lists.hpp> #include <rmm/device_buffer.hpp> #include <rmm/device_vector.hpp> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <cstddef> #include <cstring> #include <string> using cudf::device_span; using cudf::host_span; using cudf::detail::device_2dspan; using cudf::detail::host_2dspan; using cudf::detail::hostdevice_2dvector; template <typename T> void expect_equivolent(host_span<T> a, host_span<T> b) { EXPECT_EQ(a.size(), b.size()); EXPECT_EQ(a.data(), b.data()); } template <typename Iterator1, typename T> void expect_match(Iterator1 expected, size_t expected_size, host_span<T> input) { EXPECT_EQ(expected_size, input.size()); for (size_t i = 0; i < expected_size; i++) { EXPECT_EQ(*(expected + i), *(input.begin() + i)); } } template <typename T> void expect_match(std::string expected, host_span<T> input) { return expect_match(expected.begin(), expected.size(), input); } std::string const hello_wold_message = "hello world"; std::vector<char> create_hello_world_message() { return std::vector<char>(hello_wold_message.begin(), hello_wold_message.end()); } class SpanTest : public cudf::test::BaseFixture { }; TEST(SpanTest, CanCreateFullSubspan) { auto message = create_hello_world_message(); auto const message_span = host_span<char>(message.data(), message.size()); expect_equivolent(message_span, message_span.subspan(0, message_span.size())); } TEST(SpanTest, CanTakeFirst) { auto message = create_hello_world_message(); auto const message_span = host_span<char>(message.data(), message.size()); expect_match("hello", message_span.first(5)); } TEST(SpanTest, CanTakeLast) { auto message = create_hello_world_message(); auto const message_span = host_span<char>(message.data(), message.size()); expect_match("world", message_span.last(5)); } TEST(SpanTest, CanTakeSubspanFull) { auto message = create_hello_world_message(); auto const message_span = host_span<char>(message.data(), message.size()); expect_match("hello world", message_span.subspan(0, 11)); } TEST(SpanTest, CanTakeSubspanPartial) { auto message = create_hello_world_message(); auto const message_span = host_span<char>(message.data(), message.size()); expect_match("lo w", message_span.subspan(3, 4)); } TEST(SpanTest, CanGetFront) { auto message = create_hello_world_message(); auto const message_span = host_span<char>(message.data(), message.size()); EXPECT_EQ('h', message_span.front()); } TEST(SpanTest, CanGetBack) { auto message = create_hello_world_message(); auto const message_span = host_span<char>(message.data(), message.size()); EXPECT_EQ('d', message_span.back()); } TEST(SpanTest, CanGetData) { auto message = create_hello_world_message(); auto const message_span = host_span<char>(message.data(), message.size()); EXPECT_EQ(message.data(), message_span.data()); } TEST(SpanTest, CanDetermineEmptiness) { auto message = create_hello_world_message(); auto const message_span = host_span<char>(message.data(), message.size()); auto const empty_span = host_span<char>(); EXPECT_FALSE(message_span.empty()); EXPECT_TRUE(empty_span.empty()); } TEST(SpanTest, CanGetSize) { auto message = create_hello_world_message(); auto const message_span = host_span<char>(message.data(), message.size()); auto const empty_span = host_span<char>(); EXPECT_EQ(static_cast<size_t>(11), message_span.size()); EXPECT_EQ(static_cast<size_t>(0), empty_span.size()); } TEST(SpanTest, CanGetSizeBytes) { auto doubles = std::vector<double>({6, 3, 2}); auto const doubles_span = host_span<double>(doubles.data(), doubles.size()); auto const empty_span = host_span<double>(); EXPECT_EQ(static_cast<size_t>(24), doubles_span.size_bytes()); EXPECT_EQ(static_cast<size_t>(0), empty_span.size_bytes()); } TEST(SpanTest, CanCopySpan) { auto message = create_hello_world_message(); host_span<char> message_span_copy; { auto const message_span = host_span<char>(message.data(), message.size()); message_span_copy = message_span; } EXPECT_EQ(message.data(), message_span_copy.data()); EXPECT_EQ(message.size(), message_span_copy.size()); } TEST(SpanTest, CanSubscriptRead) { auto message = create_hello_world_message(); auto const message_span = host_span<char>(message.data(), message.size()); EXPECT_EQ('o', message_span[4]); } TEST(SpanTest, CanSubscriptWrite) { auto message = create_hello_world_message(); auto const message_span = host_span<char>(message.data(), message.size()); message_span[4] = 'x'; EXPECT_EQ('x', message_span[4]); } TEST(SpanTest, CanConstructFromHostContainers) { auto std_vector = std::vector<int>(1); auto h_vector = thrust::host_vector<int>(1); (void)host_span<int>(std_vector); (void)host_span<int>(h_vector); auto const std_vector_c = std_vector; auto const h_vector_c = h_vector; (void)host_span<int const>(std_vector_c); (void)host_span<int const>(h_vector_c); } // This test is the only place in libcudf's test suite where using a // thrust::device_vector (and therefore the CUDA default stream) is acceptable // since we are explicitly testing conversions from thrust::device_vector. TEST(SpanTest, CanConstructFromDeviceContainers) { auto d_thrust_vector = thrust::device_vector<int>(1); auto d_vector = rmm::device_vector<int>(1); auto d_uvector = rmm::device_uvector<int>(1, cudf::get_default_stream()); (void)device_span<int>(d_thrust_vector); (void)device_span<int>(d_vector); (void)device_span<int>(d_uvector); auto const& d_thrust_vector_c = d_thrust_vector; auto const& d_vector_c = d_vector; auto const& d_uvector_c = d_uvector; (void)device_span<int const>(d_thrust_vector_c); (void)device_span<int const>(d_vector_c); (void)device_span<int const>(d_uvector_c); } __global__ void simple_device_kernel(device_span<bool> result) { result[0] = true; } TEST(SpanTest, CanUseDeviceSpan) { auto d_message = cudf::detail::make_zeroed_device_uvector_async<bool>(1, cudf::get_default_stream()); auto d_span = device_span<bool>(d_message.data(), d_message.size()); hipLaunchKernelGGL(( simple_device_kernel), dim3(1), dim3(1), 0, cudf::get_default_stream().value(), d_span); ASSERT_TRUE(d_message.element(0, cudf::get_default_stream())); } class MdSpanTest : public cudf::test::BaseFixture { }; TEST(MdSpanTest, CanDetermineEmptiness) { auto const vector = hostdevice_2dvector<int>(1, 2, cudf::get_default_stream()); auto const no_rows_vector = hostdevice_2dvector<int>(0, 2, cudf::get_default_stream()); auto const no_columns_vector = hostdevice_2dvector<int>(1, 0, cudf::get_default_stream()); EXPECT_FALSE(host_2dspan<int const>{vector}.is_empty()); EXPECT_FALSE(device_2dspan<int const>{vector}.is_empty()); EXPECT_TRUE(host_2dspan<int const>{no_rows_vector}.is_empty()); EXPECT_TRUE(device_2dspan<int const>{no_rows_vector}.is_empty()); EXPECT_TRUE(host_2dspan<int const>{no_columns_vector}.is_empty()); EXPECT_TRUE(device_2dspan<int const>{no_columns_vector}.is_empty()); } __global__ void readwrite_kernel(device_2dspan<int> result) { if (result[5][6] == 5) { result[5][6] *= 6; } else { result[5][6] = 5; } } TEST(MdSpanTest, DeviceReadWrite) { auto vector = hostdevice_2dvector<int>(11, 23, cudf::get_default_stream()); hipLaunchKernelGGL(( readwrite_kernel), dim3(1), dim3(1), 0, cudf::get_default_stream().value(), vector); hipLaunchKernelGGL(( readwrite_kernel), dim3(1), dim3(1), 0, cudf::get_default_stream().value(), vector); vector.device_to_host(cudf::get_default_stream(), true); EXPECT_EQ(vector[5][6], 30); } TEST(MdSpanTest, HostReadWrite) { auto vector = hostdevice_2dvector<int>(11, 23, cudf::get_default_stream()); auto span = host_2dspan<int>{vector}; span[5][6] = 5; if (span[5][6] == 5) { span[5][6] *= 6; } EXPECT_EQ(vector[5][6], 30); } TEST(MdSpanTest, CanGetSize) { auto const vector = hostdevice_2dvector<int>(1, 2, cudf::get_default_stream()); EXPECT_EQ(host_2dspan<int const>{vector}.size(), vector.size()); EXPECT_EQ(device_2dspan<int const>{vector}.size(), vector.size()); } TEST(MdSpanTest, CanGetCount) { auto const vector = hostdevice_2dvector<int>(11, 23, cudf::get_default_stream()); EXPECT_EQ(host_2dspan<int const>{vector}.count(), 11ul * 23); EXPECT_EQ(device_2dspan<int const>{vector}.count(), 11ul * 23); } CUDF_TEST_PROGRAM_MAIN()
6ae75442135d3a806523996550433f7c0d9705df.cu
/* * Copyright (c) 2020-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/detail/utilities/vector_factories.hpp> #include <cudf/utilities/default_stream.hpp> #include <cudf/utilities/span.hpp> #include <io/utilities/hostdevice_vector.hpp> #include <cudf_test/base_fixture.hpp> #include <cudf_test/cudf_gtest.hpp> #include <cudf_test/type_lists.hpp> #include <rmm/device_buffer.hpp> #include <rmm/device_vector.hpp> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <cstddef> #include <cstring> #include <string> using cudf::device_span; using cudf::host_span; using cudf::detail::device_2dspan; using cudf::detail::host_2dspan; using cudf::detail::hostdevice_2dvector; template <typename T> void expect_equivolent(host_span<T> a, host_span<T> b) { EXPECT_EQ(a.size(), b.size()); EXPECT_EQ(a.data(), b.data()); } template <typename Iterator1, typename T> void expect_match(Iterator1 expected, size_t expected_size, host_span<T> input) { EXPECT_EQ(expected_size, input.size()); for (size_t i = 0; i < expected_size; i++) { EXPECT_EQ(*(expected + i), *(input.begin() + i)); } } template <typename T> void expect_match(std::string expected, host_span<T> input) { return expect_match(expected.begin(), expected.size(), input); } std::string const hello_wold_message = "hello world"; std::vector<char> create_hello_world_message() { return std::vector<char>(hello_wold_message.begin(), hello_wold_message.end()); } class SpanTest : public cudf::test::BaseFixture { }; TEST(SpanTest, CanCreateFullSubspan) { auto message = create_hello_world_message(); auto const message_span = host_span<char>(message.data(), message.size()); expect_equivolent(message_span, message_span.subspan(0, message_span.size())); } TEST(SpanTest, CanTakeFirst) { auto message = create_hello_world_message(); auto const message_span = host_span<char>(message.data(), message.size()); expect_match("hello", message_span.first(5)); } TEST(SpanTest, CanTakeLast) { auto message = create_hello_world_message(); auto const message_span = host_span<char>(message.data(), message.size()); expect_match("world", message_span.last(5)); } TEST(SpanTest, CanTakeSubspanFull) { auto message = create_hello_world_message(); auto const message_span = host_span<char>(message.data(), message.size()); expect_match("hello world", message_span.subspan(0, 11)); } TEST(SpanTest, CanTakeSubspanPartial) { auto message = create_hello_world_message(); auto const message_span = host_span<char>(message.data(), message.size()); expect_match("lo w", message_span.subspan(3, 4)); } TEST(SpanTest, CanGetFront) { auto message = create_hello_world_message(); auto const message_span = host_span<char>(message.data(), message.size()); EXPECT_EQ('h', message_span.front()); } TEST(SpanTest, CanGetBack) { auto message = create_hello_world_message(); auto const message_span = host_span<char>(message.data(), message.size()); EXPECT_EQ('d', message_span.back()); } TEST(SpanTest, CanGetData) { auto message = create_hello_world_message(); auto const message_span = host_span<char>(message.data(), message.size()); EXPECT_EQ(message.data(), message_span.data()); } TEST(SpanTest, CanDetermineEmptiness) { auto message = create_hello_world_message(); auto const message_span = host_span<char>(message.data(), message.size()); auto const empty_span = host_span<char>(); EXPECT_FALSE(message_span.empty()); EXPECT_TRUE(empty_span.empty()); } TEST(SpanTest, CanGetSize) { auto message = create_hello_world_message(); auto const message_span = host_span<char>(message.data(), message.size()); auto const empty_span = host_span<char>(); EXPECT_EQ(static_cast<size_t>(11), message_span.size()); EXPECT_EQ(static_cast<size_t>(0), empty_span.size()); } TEST(SpanTest, CanGetSizeBytes) { auto doubles = std::vector<double>({6, 3, 2}); auto const doubles_span = host_span<double>(doubles.data(), doubles.size()); auto const empty_span = host_span<double>(); EXPECT_EQ(static_cast<size_t>(24), doubles_span.size_bytes()); EXPECT_EQ(static_cast<size_t>(0), empty_span.size_bytes()); } TEST(SpanTest, CanCopySpan) { auto message = create_hello_world_message(); host_span<char> message_span_copy; { auto const message_span = host_span<char>(message.data(), message.size()); message_span_copy = message_span; } EXPECT_EQ(message.data(), message_span_copy.data()); EXPECT_EQ(message.size(), message_span_copy.size()); } TEST(SpanTest, CanSubscriptRead) { auto message = create_hello_world_message(); auto const message_span = host_span<char>(message.data(), message.size()); EXPECT_EQ('o', message_span[4]); } TEST(SpanTest, CanSubscriptWrite) { auto message = create_hello_world_message(); auto const message_span = host_span<char>(message.data(), message.size()); message_span[4] = 'x'; EXPECT_EQ('x', message_span[4]); } TEST(SpanTest, CanConstructFromHostContainers) { auto std_vector = std::vector<int>(1); auto h_vector = thrust::host_vector<int>(1); (void)host_span<int>(std_vector); (void)host_span<int>(h_vector); auto const std_vector_c = std_vector; auto const h_vector_c = h_vector; (void)host_span<int const>(std_vector_c); (void)host_span<int const>(h_vector_c); } // This test is the only place in libcudf's test suite where using a // thrust::device_vector (and therefore the CUDA default stream) is acceptable // since we are explicitly testing conversions from thrust::device_vector. TEST(SpanTest, CanConstructFromDeviceContainers) { auto d_thrust_vector = thrust::device_vector<int>(1); auto d_vector = rmm::device_vector<int>(1); auto d_uvector = rmm::device_uvector<int>(1, cudf::get_default_stream()); (void)device_span<int>(d_thrust_vector); (void)device_span<int>(d_vector); (void)device_span<int>(d_uvector); auto const& d_thrust_vector_c = d_thrust_vector; auto const& d_vector_c = d_vector; auto const& d_uvector_c = d_uvector; (void)device_span<int const>(d_thrust_vector_c); (void)device_span<int const>(d_vector_c); (void)device_span<int const>(d_uvector_c); } __global__ void simple_device_kernel(device_span<bool> result) { result[0] = true; } TEST(SpanTest, CanUseDeviceSpan) { auto d_message = cudf::detail::make_zeroed_device_uvector_async<bool>(1, cudf::get_default_stream()); auto d_span = device_span<bool>(d_message.data(), d_message.size()); simple_device_kernel<<<1, 1, 0, cudf::get_default_stream().value()>>>(d_span); ASSERT_TRUE(d_message.element(0, cudf::get_default_stream())); } class MdSpanTest : public cudf::test::BaseFixture { }; TEST(MdSpanTest, CanDetermineEmptiness) { auto const vector = hostdevice_2dvector<int>(1, 2, cudf::get_default_stream()); auto const no_rows_vector = hostdevice_2dvector<int>(0, 2, cudf::get_default_stream()); auto const no_columns_vector = hostdevice_2dvector<int>(1, 0, cudf::get_default_stream()); EXPECT_FALSE(host_2dspan<int const>{vector}.is_empty()); EXPECT_FALSE(device_2dspan<int const>{vector}.is_empty()); EXPECT_TRUE(host_2dspan<int const>{no_rows_vector}.is_empty()); EXPECT_TRUE(device_2dspan<int const>{no_rows_vector}.is_empty()); EXPECT_TRUE(host_2dspan<int const>{no_columns_vector}.is_empty()); EXPECT_TRUE(device_2dspan<int const>{no_columns_vector}.is_empty()); } __global__ void readwrite_kernel(device_2dspan<int> result) { if (result[5][6] == 5) { result[5][6] *= 6; } else { result[5][6] = 5; } } TEST(MdSpanTest, DeviceReadWrite) { auto vector = hostdevice_2dvector<int>(11, 23, cudf::get_default_stream()); readwrite_kernel<<<1, 1, 0, cudf::get_default_stream().value()>>>(vector); readwrite_kernel<<<1, 1, 0, cudf::get_default_stream().value()>>>(vector); vector.device_to_host(cudf::get_default_stream(), true); EXPECT_EQ(vector[5][6], 30); } TEST(MdSpanTest, HostReadWrite) { auto vector = hostdevice_2dvector<int>(11, 23, cudf::get_default_stream()); auto span = host_2dspan<int>{vector}; span[5][6] = 5; if (span[5][6] == 5) { span[5][6] *= 6; } EXPECT_EQ(vector[5][6], 30); } TEST(MdSpanTest, CanGetSize) { auto const vector = hostdevice_2dvector<int>(1, 2, cudf::get_default_stream()); EXPECT_EQ(host_2dspan<int const>{vector}.size(), vector.size()); EXPECT_EQ(device_2dspan<int const>{vector}.size(), vector.size()); } TEST(MdSpanTest, CanGetCount) { auto const vector = hostdevice_2dvector<int>(11, 23, cudf::get_default_stream()); EXPECT_EQ(host_2dspan<int const>{vector}.count(), 11ul * 23); EXPECT_EQ(device_2dspan<int const>{vector}.count(), 11ul * 23); } CUDF_TEST_PROGRAM_MAIN()
4941d21e9cfedd5ecc93791e959fab25308eb46e.hip
// !!! This is a file automatically generated by hipify!!! /* * This CUDA-Cusparse code can handle/work with any type of the input mxArrays, * GPUarray or standard matlab CPU array as input {prhs[0]/prhs[1] := mxGPUArray or CPU Array}[double/complex double] * Sparse/Dense vector-sparse/dense vector dot product Z=CuMatlab_dot(Sparse/Dense(X),Sparse/Dense(Y)). * Z= X.Y * Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London * Wellcome Trust Centre for Neuroimaging * Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm) * Copyright 2018 * Kevin Bronik */ #include "matrix.h" #include "mex.h" #include "gpu/mxGPUArray.h" #include <cusparse_v2.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "SPARSEHELPER.h" #include "ERRORCHK.h" #include <omp.h> // Input Arguments #define INPUTSPARSEA prhs[0] #define INPUTDENSEB prhs[1] // Output Arguments #define OUTPUTMATRIX plhs[0] extern "C" static void mexCuMatlab_sparseSDC(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]) { int nDevices; hipError_t errCode =hipGetDeviceCount(&nDevices); //int nDevices; //hipGetDeviceCount(&nDevices); if (errCode != hipSuccess){ printf("Error! No CUDA devices found! \n"); return; } char const * const InputErrMsg = "Invalid input to MEX file, number of input arguments must be two."; char const * const OutputErrMsg = "Invalid output to MEX file, number of output arguments must be one."; if ((nrhs!=2)) { mexErrMsgIdAndTxt("MATLAB:mexatexit:invalidInput", InputErrMsg); } if ((nlhs!=1)) { mexErrMsgIdAndTxt("MATLAB:mexatexit:invalidInput", OutputErrMsg); } char *input_buf0; input_buf0 = mxArrayToString(INPUTSPARSEA); if ((mxIsChar(INPUTSPARSEA))){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(FIRST ARGUMENT) must be array, or gpuArray object not %s\n",input_buf0); } char *input_buf1; input_buf1 = mxArrayToString(INPUTDENSEB); if ((mxIsChar(INPUTDENSEB))){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(SECOND ARGUMENT) must be array, or gpuArray object not %s\n",input_buf1); } if (mxIsGPUArray(INPUTSPARSEA) && mxIsGPUArray(INPUTDENSEB)) { mxGPUArray const *INPUTSPARSEGPUA; mxGPUArray const *INPUTDENSEGPUB; /* Initialize the MathWorks GPU API. */ mxInitGPU(); INPUTSPARSEGPUA = mxGPUCreateFromMxArray(INPUTSPARSEA); INPUTDENSEGPUB = mxGPUCreateFromMxArray(INPUTDENSEB); if((mxGPUIsSparse(INPUTSPARSEGPUA))&& (!mxGPUIsSparse(INPUTDENSEGPUB)) ){ const mwSize *dimsGPUSA; dimsGPUSA=mxGPUGetDimensions(INPUTSPARSEGPUA); int numARows, numAColumns; numARows = (int)dimsGPUSA[0]; /* gets number of rows of A */ numAColumns = (int)dimsGPUSA[1]; /* gets number of columns of A */ const mwSize *dimsGPUSB; dimsGPUSB=mxGPUGetDimensions(INPUTDENSEGPUB); int numBRows, numBColumns; numBRows = (int)dimsGPUSB[0]; /* gets number of rows of B */ numBColumns = (int)dimsGPUSB[1]; /* gets number of columns of B */ if ( (((numARows!= 1) && (numAColumns!= 1))) ||(((numBRows!= 1) && (numBColumns!= 1)))) { mxGPUDestroyGPUArray(INPUTSPARSEGPUA); mxGPUDestroyGPUArray(INPUTDENSEGPUB); mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, first/second arguments must be sparse/dense column/row vector."); } if ( mxGPUGetNumberOfElements(INPUTSPARSEGPUA)!=mxGPUGetNumberOfElements(INPUTDENSEGPUB)) { mxGPUDestroyGPUArray(INPUTSPARSEGPUA); mxGPUDestroyGPUArray(INPUTDENSEGPUB); mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, row/column number of sparse vector (first argument) must be equal to row/column number of dense vector(second argument)."); } mwIndex nnz1; mxArray * tempx = mxGPUCreateMxArrayOnCPU(INPUTSPARSEGPUA); nnz1 = *(mxGetJc(tempx) + numAColumns); //nnz1=(mwSize)ceil(numARows*numAColumns); int nnz= static_cast<int> (nnz1); int *pointerrow =0; mxArray *row_sort; if (numAColumns == 1) { row_sort =mxCreateNumericMatrix(nnz, 1, mxINT32_CLASS, mxREAL); pointerrow = (int *)mxGetInt32s(row_sort); Ir_DataGetSetIXY(tempx , pointerrow, nnz); } if (numARows == 1) { row_sort =mxCreateNumericMatrix(nnz, 1, mxINT32_CLASS, mxREAL); pointerrow = (int *)mxGetInt32s(row_sort); Jc_GetSetIXY(tempx , pointerrow); } hipDoubleComplex *pointerval = (hipDoubleComplex *)mxGetComplexDoubles(tempx); size_t pivot_dimensionsrow[1] = {nnz}; size_t pivot_dimensionsvalue[1] = {nnz}; mxGPUArray *row_sortA = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsrow, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); int *xrow_sortA=(int *)mxGPUGetData(row_sortA); gpuErrchk(hipMemcpy(xrow_sortA, pointerrow, nnz * sizeof(*xrow_sortA), hipMemcpyHostToDevice)); mxGPUArray *val_sortA = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalue, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE); hipDoubleComplex *xval_sortA=(hipDoubleComplex*)mxGPUGetData(val_sortA); gpuErrchk(hipMemcpy(xval_sortA, pointerval, nnz * sizeof(*xval_sortA), hipMemcpyHostToDevice)); hipDoubleComplex const *d_B_dense; d_B_dense = (hipDoubleComplex const *)(mxGPUGetDataReadOnly(INPUTDENSEGPUB)); mxGPUDestroyGPUArray(INPUTSPARSEGPUA); mxGPUDestroyGPUArray(INPUTDENSEGPUB); mxDestroyArray(row_sort); mxDestroyArray(tempx); hipsparseHandle_t handle; cusparseSafeCall(hipsparseCreate(&handle)); hipsparseMatDescr_t descrA; cusparseSafeCall(hipsparseCreateMatDescr(&descrA)); hipsparseSetMatType(descrA, HIPSPARSE_MATRIX_TYPE_GENERAL); hipsparseSetMatIndexBase(descrA, HIPSPARSE_INDEX_BASE_ONE); hipDoubleComplex VALOUT= make_cuDoubleComplex(0.0, 0.0); cusparseSafeCall(cusparseZdotci(handle, nnz, xval_sortA, xrow_sortA, d_B_dense, &VALOUT, HIPSPARSE_INDEX_BASE_ONE)); int nnzx=1; mwSize nnzm=(mwSize)nnzx; OUTPUTMATRIX = mxCreateSparse(1,1,nnzm,mxCOMPLEX); mwIndex *irs = static_cast<mwIndex *> (mxMalloc (nnzx * sizeof(mwIndex))); irs[0] = 0; mwIndex *jcs = static_cast<mwIndex *> (mxMalloc (2 * sizeof(mwIndex))); jcs[0]=jcs[1]=1; mxComplexDouble* PRS =static_cast<mxComplexDouble *> (mxMalloc (nnzx * sizeof(mxComplexDouble))); PRS[0].real = VALOUT.x; PRS[0].imag = VALOUT.y; mxFree (mxGetJc (OUTPUTMATRIX)) ; mxFree (mxGetIr (OUTPUTMATRIX)) ; mxFree (mxGetComplexDoubles (OUTPUTMATRIX)) ; mxSetIr(OUTPUTMATRIX, (mwIndex *)irs); mxSetJc(OUTPUTMATRIX, (mwIndex *)jcs); int s = mxSetComplexDoubles(OUTPUTMATRIX, (mxComplexDouble *)PRS); if ( s==0) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "the function is unsuccessful, either mxArray is not an unshared mxDOUBLE_CLASS array, or the data is not allocated with mxCalloc."); } mxGPUDestroyGPUArray(row_sortA); mxGPUDestroyGPUArray(val_sortA); hipsparseDestroyMatDescr(descrA); hipsparseDestroy(handle); } else{ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments! %s\n"); } } //////////////////////////////////////////////////////////////////////////////////// else if (!(mxIsGPUArray(INPUTSPARSEA)) && !(mxIsGPUArray(INPUTDENSEB))){ // if ((mxGetClassID(INPUTSPARSEA) != mxDOUBLE_CLASS) || (mxGetClassID(INPUTSPARSEB) != mxDOUBLE_CLASS)) { // mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", // "Invalid input to MEX file, input(FIRST and SECOND ARGUMENTS) must be hipDoubleComplex precision."); // } if((mxIsSparse(INPUTSPARSEA))&& (!mxIsSparse(INPUTDENSEB)) ){ mxInitGPU(); const mwSize *dimsCPUA; dimsCPUA=mxGetDimensions(INPUTSPARSEA); int numARows = (int)dimsCPUA[0]; /* gets number of rows of A */ int numAColumns = (int)dimsCPUA[1]; /* gets number of columns of A */ const mwSize *dimsCPUB; dimsCPUB=mxGetDimensions(INPUTDENSEB); int numBRows = (int)dimsCPUB[0]; /* gets number of rows of B */ int numBColumns = (int)dimsCPUB[1]; /* gets number of columns of B */ if ( (((numARows!= 1) && (numAColumns!= 1))) ||(((numBRows!= 1) && (numBColumns!= 1)))) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, first/second arguments must be sparse/dense column/row vector."); } if ( mxGetNumberOfElements(INPUTSPARSEA)!=mxGetNumberOfElements(INPUTDENSEB)) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, row/column number of sparse vector (first argument) must be equal to row/column number of dense vector(second argument)."); } mwIndex nnz1; nnz1 = *(mxGetJc(INPUTSPARSEA) + numAColumns); //int nnz = (int)nnz1; int nnz= static_cast<int> (nnz1); int *pointerrow =0; mxArray *row_sort; if (numAColumns == 1) { row_sort =mxCreateNumericMatrix(nnz, 1, mxINT32_CLASS, mxREAL); pointerrow = (int *)mxGetInt32s(row_sort); Ir_DataGetSetIXY(INPUTSPARSEA , pointerrow, nnz); } if (numARows == 1) { row_sort =mxCreateNumericMatrix(nnz, 1, mxINT32_CLASS, mxREAL); pointerrow = (int *)mxGetInt32s(row_sort); Jc_GetSetIXY(INPUTSPARSEA , pointerrow); } hipDoubleComplex *pointerval = (hipDoubleComplex *)mxGetComplexDoubles(INPUTSPARSEA); size_t pivot_dimensionsrow[1] = {nnz}; size_t pivot_dimensionsvalue[1] = {nnz}; mxGPUArray *row_sortA = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsrow, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); int *xrow_sortA=(int *)mxGPUGetData(row_sortA); gpuErrchk(hipMemcpy(xrow_sortA, pointerrow, nnz * sizeof(*xrow_sortA), hipMemcpyHostToDevice)); mxGPUArray *val_sortA = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalue, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE); hipDoubleComplex *xval_sortA=(hipDoubleComplex*)mxGPUGetData(val_sortA); gpuErrchk(hipMemcpy(xval_sortA, pointerval, nnz * sizeof(*xval_sortA), hipMemcpyHostToDevice)); hipsparseHandle_t handle; cusparseSafeCall(hipsparseCreate(&handle)); hipsparseMatDescr_t descrA; cusparseSafeCall(hipsparseCreateMatDescr(&descrA)); hipsparseSetMatType(descrA, HIPSPARSE_MATRIX_TYPE_GENERAL); hipsparseSetMatIndexBase(descrA, HIPSPARSE_INDEX_BASE_ONE); hipDoubleComplex *h_B_dense1; h_B_dense1 = (hipDoubleComplex *)mxGetComplexDoubles(INPUTDENSEB); hipDoubleComplex *VALDENSE; mxGPUArray *VAL; if (numBColumns == 1) { size_t pivot_dimensionsvalueV[1] = {numBRows}; VAL = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalueV, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE); VALDENSE = (hipDoubleComplex *)mxGPUGetData(VAL); gpuErrchk(hipMemcpy(VALDENSE, h_B_dense1, sizeof(hipDoubleComplex) * numBRows , hipMemcpyHostToDevice)); } if (numBRows == 1) { size_t pivot_dimensionsvalueV[1] = {numBColumns}; VAL = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalueV, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE); VALDENSE = (hipDoubleComplex *)mxGPUGetData(VAL); gpuErrchk(hipMemcpy(VALDENSE, h_B_dense1, sizeof(hipDoubleComplex) * numBColumns , hipMemcpyHostToDevice)); } hipDoubleComplex VALOUT= make_cuDoubleComplex(0.0, 0.0); cusparseSafeCall(cusparseZdotci(handle, nnz, xval_sortA, xrow_sortA, VALDENSE, &VALOUT, HIPSPARSE_INDEX_BASE_ONE)); int nnzx=1; mwSize nnzm=(mwSize)nnzx; OUTPUTMATRIX = mxCreateSparse(1,1,nnzm,mxCOMPLEX); mwIndex *irs = static_cast<mwIndex *> (mxMalloc (nnzx * sizeof(mwIndex))); irs[0] = 0; mwIndex *jcs = static_cast<mwIndex *> (mxMalloc (2 * sizeof(mwIndex))); jcs[0]=jcs[1]=1; mxComplexDouble* PRS =static_cast<mxComplexDouble *> (mxMalloc (nnzx * sizeof(mxComplexDouble))); PRS[0].real = VALOUT.x; PRS[0].imag = VALOUT.y; mxFree (mxGetJc (OUTPUTMATRIX)) ; mxFree (mxGetIr (OUTPUTMATRIX)) ; mxFree (mxGetComplexDoubles (OUTPUTMATRIX)) ; mxSetIr(OUTPUTMATRIX, (mwIndex *)irs); mxSetJc(OUTPUTMATRIX, (mwIndex *)jcs); int s = mxSetComplexDoubles(OUTPUTMATRIX, (mxComplexDouble *)PRS); if ( s==0) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "the function is unsuccessful, either mxArray is not an unshared mxDOUBLE_CLASS array, or the data is not allocated with mxCalloc."); } mxGPUDestroyGPUArray(row_sortA); mxGPUDestroyGPUArray(val_sortA); mxDestroyArray(row_sort); mxGPUDestroyGPUArray(VAL); hipsparseDestroyMatDescr(descrA); hipsparseDestroy(handle); } else{ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments! %s\n"); } } // else{ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments! %s\n"); } }
4941d21e9cfedd5ecc93791e959fab25308eb46e.cu
/* * This CUDA-Cusparse code can handle/work with any type of the input mxArrays, * GPUarray or standard matlab CPU array as input {prhs[0]/prhs[1] := mxGPUArray or CPU Array}[double/complex double] * Sparse/Dense vector-sparse/dense vector dot product Z=CuMatlab_dot(Sparse/Dense(X),Sparse/Dense(Y)). * Z= X.Y * Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London * Wellcome Trust Centre for Neuroimaging * Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm) * Copyright 2018 * Kevin Bronik */ #include "matrix.h" #include "mex.h" #include "gpu/mxGPUArray.h" #include <cusparse_v2.h> #include <cuda.h> #include <cuda_runtime.h> #include "SPARSEHELPER.h" #include "ERRORCHK.h" #include <omp.h> // Input Arguments #define INPUTSPARSEA prhs[0] #define INPUTDENSEB prhs[1] // Output Arguments #define OUTPUTMATRIX plhs[0] extern "C" static void mexCuMatlab_sparseSDC(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]) { int nDevices; cudaError_t errCode =cudaGetDeviceCount(&nDevices); //int nDevices; //cudaGetDeviceCount(&nDevices); if (errCode != cudaSuccess){ printf("Error! No CUDA devices found! \n"); return; } char const * const InputErrMsg = "Invalid input to MEX file, number of input arguments must be two."; char const * const OutputErrMsg = "Invalid output to MEX file, number of output arguments must be one."; if ((nrhs!=2)) { mexErrMsgIdAndTxt("MATLAB:mexatexit:invalidInput", InputErrMsg); } if ((nlhs!=1)) { mexErrMsgIdAndTxt("MATLAB:mexatexit:invalidInput", OutputErrMsg); } char *input_buf0; input_buf0 = mxArrayToString(INPUTSPARSEA); if ((mxIsChar(INPUTSPARSEA))){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(FIRST ARGUMENT) must be array, or gpuArray object not %s\n",input_buf0); } char *input_buf1; input_buf1 = mxArrayToString(INPUTDENSEB); if ((mxIsChar(INPUTDENSEB))){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(SECOND ARGUMENT) must be array, or gpuArray object not %s\n",input_buf1); } if (mxIsGPUArray(INPUTSPARSEA) && mxIsGPUArray(INPUTDENSEB)) { mxGPUArray const *INPUTSPARSEGPUA; mxGPUArray const *INPUTDENSEGPUB; /* Initialize the MathWorks GPU API. */ mxInitGPU(); INPUTSPARSEGPUA = mxGPUCreateFromMxArray(INPUTSPARSEA); INPUTDENSEGPUB = mxGPUCreateFromMxArray(INPUTDENSEB); if((mxGPUIsSparse(INPUTSPARSEGPUA))&& (!mxGPUIsSparse(INPUTDENSEGPUB)) ){ const mwSize *dimsGPUSA; dimsGPUSA=mxGPUGetDimensions(INPUTSPARSEGPUA); int numARows, numAColumns; numARows = (int)dimsGPUSA[0]; /* gets number of rows of A */ numAColumns = (int)dimsGPUSA[1]; /* gets number of columns of A */ const mwSize *dimsGPUSB; dimsGPUSB=mxGPUGetDimensions(INPUTDENSEGPUB); int numBRows, numBColumns; numBRows = (int)dimsGPUSB[0]; /* gets number of rows of B */ numBColumns = (int)dimsGPUSB[1]; /* gets number of columns of B */ if ( (((numARows!= 1) && (numAColumns!= 1))) ||(((numBRows!= 1) && (numBColumns!= 1)))) { mxGPUDestroyGPUArray(INPUTSPARSEGPUA); mxGPUDestroyGPUArray(INPUTDENSEGPUB); mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, first/second arguments must be sparse/dense column/row vector."); } if ( mxGPUGetNumberOfElements(INPUTSPARSEGPUA)!=mxGPUGetNumberOfElements(INPUTDENSEGPUB)) { mxGPUDestroyGPUArray(INPUTSPARSEGPUA); mxGPUDestroyGPUArray(INPUTDENSEGPUB); mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, row/column number of sparse vector (first argument) must be equal to row/column number of dense vector(second argument)."); } mwIndex nnz1; mxArray * tempx = mxGPUCreateMxArrayOnCPU(INPUTSPARSEGPUA); nnz1 = *(mxGetJc(tempx) + numAColumns); //nnz1=(mwSize)ceil(numARows*numAColumns); int nnz= static_cast<int> (nnz1); int *pointerrow =0; mxArray *row_sort; if (numAColumns == 1) { row_sort =mxCreateNumericMatrix(nnz, 1, mxINT32_CLASS, mxREAL); pointerrow = (int *)mxGetInt32s(row_sort); Ir_DataGetSetIXY(tempx , pointerrow, nnz); } if (numARows == 1) { row_sort =mxCreateNumericMatrix(nnz, 1, mxINT32_CLASS, mxREAL); pointerrow = (int *)mxGetInt32s(row_sort); Jc_GetSetIXY(tempx , pointerrow); } cuDoubleComplex *pointerval = (cuDoubleComplex *)mxGetComplexDoubles(tempx); size_t pivot_dimensionsrow[1] = {nnz}; size_t pivot_dimensionsvalue[1] = {nnz}; mxGPUArray *row_sortA = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsrow, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); int *xrow_sortA=(int *)mxGPUGetData(row_sortA); gpuErrchk(cudaMemcpy(xrow_sortA, pointerrow, nnz * sizeof(*xrow_sortA), cudaMemcpyHostToDevice)); mxGPUArray *val_sortA = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalue, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE); cuDoubleComplex *xval_sortA=(cuDoubleComplex*)mxGPUGetData(val_sortA); gpuErrchk(cudaMemcpy(xval_sortA, pointerval, nnz * sizeof(*xval_sortA), cudaMemcpyHostToDevice)); cuDoubleComplex const *d_B_dense; d_B_dense = (cuDoubleComplex const *)(mxGPUGetDataReadOnly(INPUTDENSEGPUB)); mxGPUDestroyGPUArray(INPUTSPARSEGPUA); mxGPUDestroyGPUArray(INPUTDENSEGPUB); mxDestroyArray(row_sort); mxDestroyArray(tempx); cusparseHandle_t handle; cusparseSafeCall(cusparseCreate(&handle)); cusparseMatDescr_t descrA; cusparseSafeCall(cusparseCreateMatDescr(&descrA)); cusparseSetMatType(descrA, CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descrA, CUSPARSE_INDEX_BASE_ONE); cuDoubleComplex VALOUT= make_cuDoubleComplex(0.0, 0.0); cusparseSafeCall(cusparseZdotci(handle, nnz, xval_sortA, xrow_sortA, d_B_dense, &VALOUT, CUSPARSE_INDEX_BASE_ONE)); int nnzx=1; mwSize nnzm=(mwSize)nnzx; OUTPUTMATRIX = mxCreateSparse(1,1,nnzm,mxCOMPLEX); mwIndex *irs = static_cast<mwIndex *> (mxMalloc (nnzx * sizeof(mwIndex))); irs[0] = 0; mwIndex *jcs = static_cast<mwIndex *> (mxMalloc (2 * sizeof(mwIndex))); jcs[0]=jcs[1]=1; mxComplexDouble* PRS =static_cast<mxComplexDouble *> (mxMalloc (nnzx * sizeof(mxComplexDouble))); PRS[0].real = VALOUT.x; PRS[0].imag = VALOUT.y; mxFree (mxGetJc (OUTPUTMATRIX)) ; mxFree (mxGetIr (OUTPUTMATRIX)) ; mxFree (mxGetComplexDoubles (OUTPUTMATRIX)) ; mxSetIr(OUTPUTMATRIX, (mwIndex *)irs); mxSetJc(OUTPUTMATRIX, (mwIndex *)jcs); int s = mxSetComplexDoubles(OUTPUTMATRIX, (mxComplexDouble *)PRS); if ( s==0) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "the function is unsuccessful, either mxArray is not an unshared mxDOUBLE_CLASS array, or the data is not allocated with mxCalloc."); } mxGPUDestroyGPUArray(row_sortA); mxGPUDestroyGPUArray(val_sortA); cusparseDestroyMatDescr(descrA); cusparseDestroy(handle); } else{ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments! %s\n"); } } //////////////////////////////////////////////////////////////////////////////////// else if (!(mxIsGPUArray(INPUTSPARSEA)) && !(mxIsGPUArray(INPUTDENSEB))){ // if ((mxGetClassID(INPUTSPARSEA) != mxDOUBLE_CLASS) || (mxGetClassID(INPUTSPARSEB) != mxDOUBLE_CLASS)) { // mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", // "Invalid input to MEX file, input(FIRST and SECOND ARGUMENTS) must be cuDoubleComplex precision."); // } if((mxIsSparse(INPUTSPARSEA))&& (!mxIsSparse(INPUTDENSEB)) ){ mxInitGPU(); const mwSize *dimsCPUA; dimsCPUA=mxGetDimensions(INPUTSPARSEA); int numARows = (int)dimsCPUA[0]; /* gets number of rows of A */ int numAColumns = (int)dimsCPUA[1]; /* gets number of columns of A */ const mwSize *dimsCPUB; dimsCPUB=mxGetDimensions(INPUTDENSEB); int numBRows = (int)dimsCPUB[0]; /* gets number of rows of B */ int numBColumns = (int)dimsCPUB[1]; /* gets number of columns of B */ if ( (((numARows!= 1) && (numAColumns!= 1))) ||(((numBRows!= 1) && (numBColumns!= 1)))) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, first/second arguments must be sparse/dense column/row vector."); } if ( mxGetNumberOfElements(INPUTSPARSEA)!=mxGetNumberOfElements(INPUTDENSEB)) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, row/column number of sparse vector (first argument) must be equal to row/column number of dense vector(second argument)."); } mwIndex nnz1; nnz1 = *(mxGetJc(INPUTSPARSEA) + numAColumns); //int nnz = (int)nnz1; int nnz= static_cast<int> (nnz1); int *pointerrow =0; mxArray *row_sort; if (numAColumns == 1) { row_sort =mxCreateNumericMatrix(nnz, 1, mxINT32_CLASS, mxREAL); pointerrow = (int *)mxGetInt32s(row_sort); Ir_DataGetSetIXY(INPUTSPARSEA , pointerrow, nnz); } if (numARows == 1) { row_sort =mxCreateNumericMatrix(nnz, 1, mxINT32_CLASS, mxREAL); pointerrow = (int *)mxGetInt32s(row_sort); Jc_GetSetIXY(INPUTSPARSEA , pointerrow); } cuDoubleComplex *pointerval = (cuDoubleComplex *)mxGetComplexDoubles(INPUTSPARSEA); size_t pivot_dimensionsrow[1] = {nnz}; size_t pivot_dimensionsvalue[1] = {nnz}; mxGPUArray *row_sortA = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsrow, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); int *xrow_sortA=(int *)mxGPUGetData(row_sortA); gpuErrchk(cudaMemcpy(xrow_sortA, pointerrow, nnz * sizeof(*xrow_sortA), cudaMemcpyHostToDevice)); mxGPUArray *val_sortA = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalue, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE); cuDoubleComplex *xval_sortA=(cuDoubleComplex*)mxGPUGetData(val_sortA); gpuErrchk(cudaMemcpy(xval_sortA, pointerval, nnz * sizeof(*xval_sortA), cudaMemcpyHostToDevice)); cusparseHandle_t handle; cusparseSafeCall(cusparseCreate(&handle)); cusparseMatDescr_t descrA; cusparseSafeCall(cusparseCreateMatDescr(&descrA)); cusparseSetMatType(descrA, CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descrA, CUSPARSE_INDEX_BASE_ONE); cuDoubleComplex *h_B_dense1; h_B_dense1 = (cuDoubleComplex *)mxGetComplexDoubles(INPUTDENSEB); cuDoubleComplex *VALDENSE; mxGPUArray *VAL; if (numBColumns == 1) { size_t pivot_dimensionsvalueV[1] = {numBRows}; VAL = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalueV, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE); VALDENSE = (cuDoubleComplex *)mxGPUGetData(VAL); gpuErrchk(cudaMemcpy(VALDENSE, h_B_dense1, sizeof(cuDoubleComplex) * numBRows , cudaMemcpyHostToDevice)); } if (numBRows == 1) { size_t pivot_dimensionsvalueV[1] = {numBColumns}; VAL = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalueV, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE); VALDENSE = (cuDoubleComplex *)mxGPUGetData(VAL); gpuErrchk(cudaMemcpy(VALDENSE, h_B_dense1, sizeof(cuDoubleComplex) * numBColumns , cudaMemcpyHostToDevice)); } cuDoubleComplex VALOUT= make_cuDoubleComplex(0.0, 0.0); cusparseSafeCall(cusparseZdotci(handle, nnz, xval_sortA, xrow_sortA, VALDENSE, &VALOUT, CUSPARSE_INDEX_BASE_ONE)); int nnzx=1; mwSize nnzm=(mwSize)nnzx; OUTPUTMATRIX = mxCreateSparse(1,1,nnzm,mxCOMPLEX); mwIndex *irs = static_cast<mwIndex *> (mxMalloc (nnzx * sizeof(mwIndex))); irs[0] = 0; mwIndex *jcs = static_cast<mwIndex *> (mxMalloc (2 * sizeof(mwIndex))); jcs[0]=jcs[1]=1; mxComplexDouble* PRS =static_cast<mxComplexDouble *> (mxMalloc (nnzx * sizeof(mxComplexDouble))); PRS[0].real = VALOUT.x; PRS[0].imag = VALOUT.y; mxFree (mxGetJc (OUTPUTMATRIX)) ; mxFree (mxGetIr (OUTPUTMATRIX)) ; mxFree (mxGetComplexDoubles (OUTPUTMATRIX)) ; mxSetIr(OUTPUTMATRIX, (mwIndex *)irs); mxSetJc(OUTPUTMATRIX, (mwIndex *)jcs); int s = mxSetComplexDoubles(OUTPUTMATRIX, (mxComplexDouble *)PRS); if ( s==0) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "the function is unsuccessful, either mxArray is not an unshared mxDOUBLE_CLASS array, or the data is not allocated with mxCalloc."); } mxGPUDestroyGPUArray(row_sortA); mxGPUDestroyGPUArray(val_sortA); mxDestroyArray(row_sort); mxGPUDestroyGPUArray(VAL); cusparseDestroyMatDescr(descrA); cusparseDestroy(handle); } else{ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments! %s\n"); } } // else{ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments! %s\n"); } }
c27b07f2f8584f34330f03a03e96caa5f240a86e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include "compat.cuh" #include "utils.cuh" #define THREADS 1024 template <typename scalar_t> __global__ void nearest_kernel(const scalar_t *__restrict__ x, const scalar_t *__restrict__ y, const int64_t *__restrict__ batch_x, const int64_t *__restrict__ batch_y, int64_t *__restrict__ out, const size_t dim) { const ptrdiff_t n_x = blockIdx.x; const ptrdiff_t batch_idx = batch_x[n_x]; const ptrdiff_t idx = threadIdx.x; const ptrdiff_t start_idx = batch_y[batch_idx]; const ptrdiff_t end_idx = batch_y[batch_idx + 1]; __shared__ scalar_t best_dist[THREADS]; __shared__ int64_t best_dist_idx[THREADS]; scalar_t best = 1e38; ptrdiff_t best_idx = 0; for (ptrdiff_t n_y = start_idx + idx; n_y < end_idx; n_y += THREADS) { scalar_t dist = 0; for (ptrdiff_t d = 0; d < dim; d++) { dist += (x[n_x * dim + d] - y[n_y * dim + d]) * (x[n_x * dim + d] - y[n_y * dim + d]); } if (dist < best) { best = dist; best_idx = n_y; } } best_dist[idx] = best; best_dist_idx[idx] = best_idx; for (int64_t u = 0; (1 << u) < THREADS; u++) { __syncthreads(); if (idx < (THREADS >> (u + 1))) { int64_t idx_1 = (idx * 2) << u; int64_t idx_2 = (idx * 2 + 1) << u; if (best_dist[idx_1] > best_dist[idx_2]) { best_dist[idx_1] = best_dist[idx_2]; best_dist_idx[idx_1] = best_dist_idx[idx_2]; } } } __syncthreads(); if (idx == 0) { out[n_x] = best_dist_idx[0]; } } at::Tensor nearest_cuda(at::Tensor x, at::Tensor y, at::Tensor batch_x, at::Tensor batch_y) { hipSetDevice(x.get_device()); auto batch_sizes = (int64_t *)malloc(sizeof(int64_t)); hipMemcpy(batch_sizes, batch_x[-1].DATA_PTR<int64_t>(), sizeof(int64_t), hipMemcpyDeviceToHost); auto batch_size = batch_sizes[0] + 1; batch_y = degree(batch_y, batch_size); batch_y = at::cat({at::zeros(1, batch_y.options()), batch_y.cumsum(0)}, 0); auto out = at::empty_like(batch_x); AT_DISPATCH_FLOATING_TYPES(x.scalar_type(), "nearest_kernel", [&] { hipLaunchKernelGGL(( nearest_kernel<scalar_t>), dim3(x.size(0)), dim3(THREADS), 0, 0, x.DATA_PTR<scalar_t>(), y.DATA_PTR<scalar_t>(), batch_x.DATA_PTR<int64_t>(), batch_y.DATA_PTR<int64_t>(), out.DATA_PTR<int64_t>(), x.size(1)); }); return out; }
c27b07f2f8584f34330f03a03e96caa5f240a86e.cu
#include <ATen/ATen.h> #include "compat.cuh" #include "utils.cuh" #define THREADS 1024 template <typename scalar_t> __global__ void nearest_kernel(const scalar_t *__restrict__ x, const scalar_t *__restrict__ y, const int64_t *__restrict__ batch_x, const int64_t *__restrict__ batch_y, int64_t *__restrict__ out, const size_t dim) { const ptrdiff_t n_x = blockIdx.x; const ptrdiff_t batch_idx = batch_x[n_x]; const ptrdiff_t idx = threadIdx.x; const ptrdiff_t start_idx = batch_y[batch_idx]; const ptrdiff_t end_idx = batch_y[batch_idx + 1]; __shared__ scalar_t best_dist[THREADS]; __shared__ int64_t best_dist_idx[THREADS]; scalar_t best = 1e38; ptrdiff_t best_idx = 0; for (ptrdiff_t n_y = start_idx + idx; n_y < end_idx; n_y += THREADS) { scalar_t dist = 0; for (ptrdiff_t d = 0; d < dim; d++) { dist += (x[n_x * dim + d] - y[n_y * dim + d]) * (x[n_x * dim + d] - y[n_y * dim + d]); } if (dist < best) { best = dist; best_idx = n_y; } } best_dist[idx] = best; best_dist_idx[idx] = best_idx; for (int64_t u = 0; (1 << u) < THREADS; u++) { __syncthreads(); if (idx < (THREADS >> (u + 1))) { int64_t idx_1 = (idx * 2) << u; int64_t idx_2 = (idx * 2 + 1) << u; if (best_dist[idx_1] > best_dist[idx_2]) { best_dist[idx_1] = best_dist[idx_2]; best_dist_idx[idx_1] = best_dist_idx[idx_2]; } } } __syncthreads(); if (idx == 0) { out[n_x] = best_dist_idx[0]; } } at::Tensor nearest_cuda(at::Tensor x, at::Tensor y, at::Tensor batch_x, at::Tensor batch_y) { cudaSetDevice(x.get_device()); auto batch_sizes = (int64_t *)malloc(sizeof(int64_t)); cudaMemcpy(batch_sizes, batch_x[-1].DATA_PTR<int64_t>(), sizeof(int64_t), cudaMemcpyDeviceToHost); auto batch_size = batch_sizes[0] + 1; batch_y = degree(batch_y, batch_size); batch_y = at::cat({at::zeros(1, batch_y.options()), batch_y.cumsum(0)}, 0); auto out = at::empty_like(batch_x); AT_DISPATCH_FLOATING_TYPES(x.scalar_type(), "nearest_kernel", [&] { nearest_kernel<scalar_t><<<x.size(0), THREADS>>>( x.DATA_PTR<scalar_t>(), y.DATA_PTR<scalar_t>(), batch_x.DATA_PTR<int64_t>(), batch_y.DATA_PTR<int64_t>(), out.DATA_PTR<int64_t>(), x.size(1)); }); return out; }
0e7738e42e96b01952bed0440bff93f71102b235.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "common.h" #include <cstdio> #include <cstdlib> #include <math.h> #include <chrono> using namespace std; //Code used from examples and modified for activity //Adrian Biller A01018940 //matrix multiplication with 1D void initialData(int *ip, const int size) { int i; for(i = 0; i < size; i++) { ip[i] = i+1; } return; } void printArray(int * arr, int size) { int totalSize = size * size; int row = 1; for(int x = 0; x < totalSize; x++){ printf("%d ", arr[x]); if((size * row)-1 == x){ row++; printf("\n"); } } } void multiplyMatrixOnHost(int *A, int *B, int *C, const int nx, const int ny) { for(int i = 0; i < nx; i++){ for(int j = 0; j < nx ; j++){ for(int k = 0; k < nx; k++){ C[i*nx+j] += A[i*nx+k] * B[k*nx+j]; } } } return; } void checkResult(int *hostRef, int *gpuRef, const int N) { double epsilon = 1.0E-8; bool match = 1; for (int i = 0; i < N; i++) { if (abs(hostRef[i] - gpuRef[i]) > epsilon) { match = 0; printf("host %d gpu %d\n", hostRef[i], gpuRef[i]); break; } } if (match) printf("Arrays match.\n\n"); else printf("Arrays do not match.\n\n"); } // grid 1D block 1D __global__ void multiplyMatrixOnGPU1D(int *MatA, int *MatB, int *MatC, int nx, int ny) { unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; //idx full size //ix X ix matrix //nx = width //ny height if (ix < nx){ for(int j = 0; j < nx; j++){ for(int k = 0; k < nx; k++){ // printf("%d\n",(j*nx+ix) ); MatC[ix * nx + j] += MatA[ix * nx + k] * MatB[k * nx + j]; } } } } int main(int argc, char **argv) { printf("%s Starting...\n", argv[0]); // set up device int dev = 0; hipDeviceProp_t deviceProp; SAFE_CALL(hipGetDeviceProperties(&deviceProp, dev), "Error device prop"); printf("Using Device %d: %s\n", dev, deviceProp.name); SAFE_CALL(hipSetDevice(dev), "Error setting device"); // set up data size of matrix // int nx = 1 << 12; // int ny = 1 << 12; int nx = 4000; int ny = 4000; int nxy = nx * ny; int nBytes = nxy * sizeof(int); printf("Matrix size: nx %d ny %d\n", nx, ny); // malloc host memory int *h_A, *h_B, *hostRef, *gpuRef; h_A = (int *)malloc(nBytes); h_B = (int *)malloc(nBytes); hostRef = (int *)malloc(nBytes); gpuRef = (int *)malloc(nBytes); // initialize data at host side initialData(h_A, nxy); initialData(h_B, nxy); // printArray(h_A, nx); // printf("\n"); // printArray(h_B, nx); // printf("\n"); // printArray(h_A, nx); memset(hostRef, 0, nBytes); memset(gpuRef, 0, nBytes); // add matrix at host side for result SAFE_CALLs auto start_cpu = chrono::high_resolution_clock::now(); // multiplyMatrixOnHost(h_A, h_B, hostRef, nx, ny); auto end_cpu = chrono::high_resolution_clock::now(); chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu; printf("multiplyMatrixOnHost elapsed %f ms\n", duration_ms.count()); // malloc device global memory int *d_MatA, *d_MatB, *d_MatC; SAFE_CALL(hipMalloc((void **)&d_MatA, nBytes), "Error allocating d_MatA"); SAFE_CALL(hipMalloc((void **)&d_MatB, nBytes), "Error allocating d_MatB"); SAFE_CALL(hipMalloc((void **)&d_MatC, nBytes), "Error allocating d_MatC"); // transfer data from host to device SAFE_CALL(hipMemcpy(d_MatA, h_A, nBytes, hipMemcpyHostToDevice), "Error copying d_MatA"); SAFE_CALL(hipMemcpy(d_MatB, h_B, nBytes, hipMemcpyHostToDevice), "Error copying d_MatB"); SAFE_CALL(hipMemset(d_MatC, 0, nBytes), "Error setting d_MatC to zeros"); // invoke kernel at host side int dimx = 512; dim3 block(dimx, 1); dim3 grid((nx + block.x - 1) / block.x, 1); start_cpu = chrono::high_resolution_clock::now(); hipLaunchKernelGGL(( multiplyMatrixOnGPU1D), dim3(grid), dim3(block), 0, 0, d_MatA, d_MatB, d_MatC, nx, ny); SAFE_CALL(hipDeviceSynchronize(), "Error executing kernel"); end_cpu = chrono::high_resolution_clock::now(); duration_ms = end_cpu - start_cpu; printf("multiplyMatrixOnGPU1D <<<(%d,%d), (%d,%d)>>> elapsed %f ms\n", grid.x, grid.y, block.x, block.y, duration_ms.count()); // SAFE_CALL kernel error SAFE_CALL(hipGetLastError(), "Error with last error"); // copy kernel result back to host side SAFE_CALL(hipMemcpy(gpuRef, d_MatC, nBytes, hipMemcpyDeviceToHost), "Error copying d_MatC"); // check device results // printArray(hostRef, nx); // printf("Host\n"); // printArray(gpuRef, nx); // printf("GPU\n"); // checkResult(hostRef, gpuRef, nxy); // free device global memory SAFE_CALL(hipFree(d_MatA), "Error freeing memory"); SAFE_CALL(hipFree(d_MatB), "Error freeing memory"); SAFE_CALL(hipFree(d_MatC), "Error freeing memory"); // free host memory free(h_A); free(h_B); free(hostRef); free(gpuRef); // reset device SAFE_CALL(hipDeviceReset(), "Error reseting"); return (0); }
0e7738e42e96b01952bed0440bff93f71102b235.cu
#include "common.h" #include <cstdio> #include <cstdlib> #include <math.h> #include <chrono> using namespace std; //Code used from examples and modified for activity //Adrian Biller A01018940 //matrix multiplication with 1D void initialData(int *ip, const int size) { int i; for(i = 0; i < size; i++) { ip[i] = i+1; } return; } void printArray(int * arr, int size) { int totalSize = size * size; int row = 1; for(int x = 0; x < totalSize; x++){ printf("%d ", arr[x]); if((size * row)-1 == x){ row++; printf("\n"); } } } void multiplyMatrixOnHost(int *A, int *B, int *C, const int nx, const int ny) { for(int i = 0; i < nx; i++){ for(int j = 0; j < nx ; j++){ for(int k = 0; k < nx; k++){ C[i*nx+j] += A[i*nx+k] * B[k*nx+j]; } } } return; } void checkResult(int *hostRef, int *gpuRef, const int N) { double epsilon = 1.0E-8; bool match = 1; for (int i = 0; i < N; i++) { if (abs(hostRef[i] - gpuRef[i]) > epsilon) { match = 0; printf("host %d gpu %d\n", hostRef[i], gpuRef[i]); break; } } if (match) printf("Arrays match.\n\n"); else printf("Arrays do not match.\n\n"); } // grid 1D block 1D __global__ void multiplyMatrixOnGPU1D(int *MatA, int *MatB, int *MatC, int nx, int ny) { unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; //idx full size //ix X ix matrix //nx = width //ny height if (ix < nx){ for(int j = 0; j < nx; j++){ for(int k = 0; k < nx; k++){ // printf("%d\n",(j*nx+ix) ); MatC[ix * nx + j] += MatA[ix * nx + k] * MatB[k * nx + j]; } } } } int main(int argc, char **argv) { printf("%s Starting...\n", argv[0]); // set up device int dev = 0; cudaDeviceProp deviceProp; SAFE_CALL(cudaGetDeviceProperties(&deviceProp, dev), "Error device prop"); printf("Using Device %d: %s\n", dev, deviceProp.name); SAFE_CALL(cudaSetDevice(dev), "Error setting device"); // set up data size of matrix // int nx = 1 << 12; // int ny = 1 << 12; int nx = 4000; int ny = 4000; int nxy = nx * ny; int nBytes = nxy * sizeof(int); printf("Matrix size: nx %d ny %d\n", nx, ny); // malloc host memory int *h_A, *h_B, *hostRef, *gpuRef; h_A = (int *)malloc(nBytes); h_B = (int *)malloc(nBytes); hostRef = (int *)malloc(nBytes); gpuRef = (int *)malloc(nBytes); // initialize data at host side initialData(h_A, nxy); initialData(h_B, nxy); // printArray(h_A, nx); // printf("\n"); // printArray(h_B, nx); // printf("\n"); // printArray(h_A, nx); memset(hostRef, 0, nBytes); memset(gpuRef, 0, nBytes); // add matrix at host side for result SAFE_CALLs auto start_cpu = chrono::high_resolution_clock::now(); // multiplyMatrixOnHost(h_A, h_B, hostRef, nx, ny); auto end_cpu = chrono::high_resolution_clock::now(); chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu; printf("multiplyMatrixOnHost elapsed %f ms\n", duration_ms.count()); // malloc device global memory int *d_MatA, *d_MatB, *d_MatC; SAFE_CALL(cudaMalloc((void **)&d_MatA, nBytes), "Error allocating d_MatA"); SAFE_CALL(cudaMalloc((void **)&d_MatB, nBytes), "Error allocating d_MatB"); SAFE_CALL(cudaMalloc((void **)&d_MatC, nBytes), "Error allocating d_MatC"); // transfer data from host to device SAFE_CALL(cudaMemcpy(d_MatA, h_A, nBytes, cudaMemcpyHostToDevice), "Error copying d_MatA"); SAFE_CALL(cudaMemcpy(d_MatB, h_B, nBytes, cudaMemcpyHostToDevice), "Error copying d_MatB"); SAFE_CALL(cudaMemset(d_MatC, 0, nBytes), "Error setting d_MatC to zeros"); // invoke kernel at host side int dimx = 512; dim3 block(dimx, 1); dim3 grid((nx + block.x - 1) / block.x, 1); start_cpu = chrono::high_resolution_clock::now(); multiplyMatrixOnGPU1D<<<grid, block>>>(d_MatA, d_MatB, d_MatC, nx, ny); SAFE_CALL(cudaDeviceSynchronize(), "Error executing kernel"); end_cpu = chrono::high_resolution_clock::now(); duration_ms = end_cpu - start_cpu; printf("multiplyMatrixOnGPU1D <<<(%d,%d), (%d,%d)>>> elapsed %f ms\n", grid.x, grid.y, block.x, block.y, duration_ms.count()); // SAFE_CALL kernel error SAFE_CALL(cudaGetLastError(), "Error with last error"); // copy kernel result back to host side SAFE_CALL(cudaMemcpy(gpuRef, d_MatC, nBytes, cudaMemcpyDeviceToHost), "Error copying d_MatC"); // check device results // printArray(hostRef, nx); // printf("Host\n"); // printArray(gpuRef, nx); // printf("GPU\n"); // checkResult(hostRef, gpuRef, nxy); // free device global memory SAFE_CALL(cudaFree(d_MatA), "Error freeing memory"); SAFE_CALL(cudaFree(d_MatB), "Error freeing memory"); SAFE_CALL(cudaFree(d_MatC), "Error freeing memory"); // free host memory free(h_A); free(h_B); free(hostRef); free(gpuRef); // reset device SAFE_CALL(cudaDeviceReset(), "Error reseting"); return (0); }
d61c4518f2f88db0d4fdd3ade617b0adf9bc8669.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*************************************************************************** *cr *cr (C) Copyright 2007 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ***************************************************************************/ #include "randomc.h" #define LOWER_MASK ((1LU << MERS_R) - 1) #define UPPER_MASK (0xFFFFFFFF << MERS_R) /* The following two functions implement the Mersenne Twister random number generator. The copyright notice/disclaimer, etc are related to this code. Copyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura, All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The names of its contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ __device__ void RandomInit(uint32 seed) { int i; // re-seed generator if(threadIdx.x == 0) { mt[0]= seed & 0xffffffffUL; for (i=1; i < MERS_N; i++) { mt[i] = (1812433253UL * (mt[i-1] ^ (mt[i-1] >> 30)) + i); } } __syncthreads(); } __device__ void BRandom() { // generate 32 random bits uint32 y; int thdx; // block size is 256 // step 1: 0-226, MERS_N-MERS_M=227 if (threadIdx.x<MERS_N-MERS_M) { y = (mt[threadIdx.x] & UPPER_MASK) | (mt[threadIdx.x+1] & LOWER_MASK); y = mt[threadIdx.x+MERS_M] ^ (y >> 1) ^ ( (y & 1)? MERS_A: 0); } __syncthreads(); if (threadIdx.x<MERS_N-MERS_M) { mt[threadIdx.x] = y; } __syncthreads(); // step 2: 227-453 thdx = threadIdx.x + (MERS_N-MERS_M); if (threadIdx.x<MERS_N-MERS_M) { y = (mt[thdx] & UPPER_MASK) | (mt[thdx+1] & LOWER_MASK); y = mt[threadIdx.x] ^ (y >> 1) ^ ( (y & 1)? MERS_A: 0); } __syncthreads(); if (threadIdx.x<MERS_N-MERS_M) { mt[thdx] = y; } __syncthreads(); // step 3: 454-622 thdx += (MERS_N-MERS_M); if (thdx < MERS_N-1) { y = (mt[thdx] & UPPER_MASK) | (mt[thdx+1] & LOWER_MASK); y = mt[threadIdx.x+(MERS_N-MERS_M)] ^ (y >> 1) ^ ( (y & 1)? MERS_A: 0); } __syncthreads(); if (thdx < MERS_N-1) { mt[thdx] = y; } __syncthreads(); // step 4: 623 if (threadIdx.x == 0) { y = (mt[MERS_N-1] & UPPER_MASK) | (mt[0] & LOWER_MASK); mt[MERS_N-1] = mt[MERS_M-1] ^ (y >> 1) ^ ( (y & 1)? MERS_A: 0); } __syncthreads(); // Tempering (May be omitted): y ^= y >> MERS_U; y ^= (y << MERS_S) & MERS_B; y ^= (y << MERS_T) & MERS_C; y ^= y >> MERS_L; } // return a random in [0, max] // #define Random(max_plus_1) (BRandom()% (max_plus_1))
d61c4518f2f88db0d4fdd3ade617b0adf9bc8669.cu
/*************************************************************************** *cr *cr (C) Copyright 2007 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ***************************************************************************/ #include "randomc.h" #define LOWER_MASK ((1LU << MERS_R) - 1) #define UPPER_MASK (0xFFFFFFFF << MERS_R) /* The following two functions implement the Mersenne Twister random number generator. The copyright notice/disclaimer, etc are related to this code. Copyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura, All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The names of its contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ __device__ void RandomInit(uint32 seed) { int i; // re-seed generator if(threadIdx.x == 0) { mt[0]= seed & 0xffffffffUL; for (i=1; i < MERS_N; i++) { mt[i] = (1812433253UL * (mt[i-1] ^ (mt[i-1] >> 30)) + i); } } __syncthreads(); } __device__ void BRandom() { // generate 32 random bits uint32 y; int thdx; // block size is 256 // step 1: 0-226, MERS_N-MERS_M=227 if (threadIdx.x<MERS_N-MERS_M) { y = (mt[threadIdx.x] & UPPER_MASK) | (mt[threadIdx.x+1] & LOWER_MASK); y = mt[threadIdx.x+MERS_M] ^ (y >> 1) ^ ( (y & 1)? MERS_A: 0); } __syncthreads(); if (threadIdx.x<MERS_N-MERS_M) { mt[threadIdx.x] = y; } __syncthreads(); // step 2: 227-453 thdx = threadIdx.x + (MERS_N-MERS_M); if (threadIdx.x<MERS_N-MERS_M) { y = (mt[thdx] & UPPER_MASK) | (mt[thdx+1] & LOWER_MASK); y = mt[threadIdx.x] ^ (y >> 1) ^ ( (y & 1)? MERS_A: 0); } __syncthreads(); if (threadIdx.x<MERS_N-MERS_M) { mt[thdx] = y; } __syncthreads(); // step 3: 454-622 thdx += (MERS_N-MERS_M); if (thdx < MERS_N-1) { y = (mt[thdx] & UPPER_MASK) | (mt[thdx+1] & LOWER_MASK); y = mt[threadIdx.x+(MERS_N-MERS_M)] ^ (y >> 1) ^ ( (y & 1)? MERS_A: 0); } __syncthreads(); if (thdx < MERS_N-1) { mt[thdx] = y; } __syncthreads(); // step 4: 623 if (threadIdx.x == 0) { y = (mt[MERS_N-1] & UPPER_MASK) | (mt[0] & LOWER_MASK); mt[MERS_N-1] = mt[MERS_M-1] ^ (y >> 1) ^ ( (y & 1)? MERS_A: 0); } __syncthreads(); // Tempering (May be omitted): y ^= y >> MERS_U; y ^= (y << MERS_S) & MERS_B; y ^= (y << MERS_T) & MERS_C; y ^= y >> MERS_L; } // return a random in [0, max] // #define Random(max_plus_1) (BRandom()% (max_plus_1))
91808057da1ace747978d3d5db9d4cb7d5f6ef9e.hip
// !!! This is a file automatically generated by hipify!!! // Includes #include <stdio.h> #include <stdlib.h> // includes from project // includes from CUDA #include <hip/hip_runtime.h> //#include <helper_math.h> #define THREADS_PER_BLOCK 256 #define NUM_OF_BLOCKS 640 // Variables unsigned* h_A; unsigned* h_B; unsigned* h_C; unsigned* d_A; unsigned* d_B; unsigned* d_C; // Functions void CleanupResources(void); void RandomInit(unsigned*, int); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(hipError_t err, const char *file, const int line ) { if(hipSuccess != err){ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling hipGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) { hipError_t err = hipGetLastError(); if (hipSuccess != err){ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions __global__ void PowerKernal2(const unsigned* A, const unsigned* B, unsigned* C, int iterations) { int i = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation unsigned Value1=0; unsigned Value2=0; unsigned Value3=0; unsigned Value=0; unsigned I1=A[i]; unsigned I2=B[i]; // Excessive INT addition access if((i%32)<=11){ for(unsigned k=0; k<iterations;k++) { Value2= I1+I2; Value3=I1-I2; Value1-=Value2; Value3+=Value1; Value2-=Value3; Value1+=Value3; } } __syncthreads(); Value=Value1; C[i]=Value; __syncthreads(); } int main(int argc, char** argv) { int iterations; if (argc != 2){ fprintf(stderr,"usage: %s #iterations\n",argv[0]); exit(1); } else{ iterations = atoi(argv[1]); } printf("Power Microbenchmark with %d iterations\n",iterations); int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS; size_t size = N * sizeof(unsigned); // Allocate input vectors h_A and h_B in host memory h_A = (unsigned*)malloc(size); if (h_A == 0) CleanupResources(); h_B = (unsigned*)malloc(size); if (h_B == 0) CleanupResources(); h_C = (unsigned*)malloc(size); if (h_C == 0) CleanupResources(); // Initialize input vectors RandomInit(h_A, N); RandomInit(h_B, N); // Allocate vectors in device memory printf("before\n"); checkCudaErrors( hipMalloc((void**)&d_A, size) ); checkCudaErrors( hipMalloc((void**)&d_B, size) ); checkCudaErrors( hipMalloc((void**)&d_C, size) ); printf("after\n"); hipEvent_t start, stop; float elapsedTime = 0; checkCudaErrors(hipEventCreate(&start)); checkCudaErrors(hipEventCreate(&stop)); // Copy vectors from host memory to device memory checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) ); dim3 dimGrid(NUM_OF_BLOCKS,1); dim3 dimBlock(THREADS_PER_BLOCK,1); dim3 dimGrid2(1,1); dim3 dimBlock2(1,1); checkCudaErrors(hipEventRecord(start)); hipLaunchKernelGGL(( PowerKernal2), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B, d_C, iterations); checkCudaErrors(hipEventRecord(stop)); checkCudaErrors(hipEventSynchronize(stop)); checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, stop)); printf("gpu execution time = %.2f s\n", elapsedTime/1000); getLastCudaError("kernel launch failure"); hipDeviceSynchronize(); // Copy result from device memory to host memory // h_C contains the result in host memory checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) ); checkCudaErrors(hipEventDestroy(start)); checkCudaErrors(hipEventDestroy(stop)); CleanupResources(); return 0; } void CleanupResources(void) { // Free device memory if (d_A) hipFree(d_A); if (d_B) hipFree(d_B); if (d_C) hipFree(d_C); // Free host memory if (h_A) free(h_A); if (h_B) free(h_B); if (h_C) free(h_C); } // Allocates an array with random unsigned entries. void RandomInit(unsigned* data, int n) { for (int i = 0; i < n; ++i){ srand((unsigned)time(0)); data[i] = rand() / RAND_MAX; } }
91808057da1ace747978d3d5db9d4cb7d5f6ef9e.cu
// Includes #include <stdio.h> #include <stdlib.h> // includes from project // includes from CUDA #include <cuda_runtime.h> //#include <helper_math.h> #define THREADS_PER_BLOCK 256 #define NUM_OF_BLOCKS 640 // Variables unsigned* h_A; unsigned* h_B; unsigned* h_C; unsigned* d_A; unsigned* d_B; unsigned* d_C; // Functions void CleanupResources(void); void RandomInit(unsigned*, int); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(cudaError err, const char *file, const int line ) { if(cudaSuccess != err){ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling cudaGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err){ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions __global__ void PowerKernal2(const unsigned* A, const unsigned* B, unsigned* C, int iterations) { int i = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation unsigned Value1=0; unsigned Value2=0; unsigned Value3=0; unsigned Value=0; unsigned I1=A[i]; unsigned I2=B[i]; // Excessive INT addition access if((i%32)<=11){ for(unsigned k=0; k<iterations;k++) { Value2= I1+I2; Value3=I1-I2; Value1-=Value2; Value3+=Value1; Value2-=Value3; Value1+=Value3; } } __syncthreads(); Value=Value1; C[i]=Value; __syncthreads(); } int main(int argc, char** argv) { int iterations; if (argc != 2){ fprintf(stderr,"usage: %s #iterations\n",argv[0]); exit(1); } else{ iterations = atoi(argv[1]); } printf("Power Microbenchmark with %d iterations\n",iterations); int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS; size_t size = N * sizeof(unsigned); // Allocate input vectors h_A and h_B in host memory h_A = (unsigned*)malloc(size); if (h_A == 0) CleanupResources(); h_B = (unsigned*)malloc(size); if (h_B == 0) CleanupResources(); h_C = (unsigned*)malloc(size); if (h_C == 0) CleanupResources(); // Initialize input vectors RandomInit(h_A, N); RandomInit(h_B, N); // Allocate vectors in device memory printf("before\n"); checkCudaErrors( cudaMalloc((void**)&d_A, size) ); checkCudaErrors( cudaMalloc((void**)&d_B, size) ); checkCudaErrors( cudaMalloc((void**)&d_C, size) ); printf("after\n"); cudaEvent_t start, stop; float elapsedTime = 0; checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); // Copy vectors from host memory to device memory checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) ); dim3 dimGrid(NUM_OF_BLOCKS,1); dim3 dimBlock(THREADS_PER_BLOCK,1); dim3 dimGrid2(1,1); dim3 dimBlock2(1,1); checkCudaErrors(cudaEventRecord(start)); PowerKernal2<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, iterations); checkCudaErrors(cudaEventRecord(stop)); checkCudaErrors(cudaEventSynchronize(stop)); checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop)); printf("gpu execution time = %.2f s\n", elapsedTime/1000); getLastCudaError("kernel launch failure"); cudaThreadSynchronize(); // Copy result from device memory to host memory // h_C contains the result in host memory checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) ); checkCudaErrors(cudaEventDestroy(start)); checkCudaErrors(cudaEventDestroy(stop)); CleanupResources(); return 0; } void CleanupResources(void) { // Free device memory if (d_A) cudaFree(d_A); if (d_B) cudaFree(d_B); if (d_C) cudaFree(d_C); // Free host memory if (h_A) free(h_A); if (h_B) free(h_B); if (h_C) free(h_C); } // Allocates an array with random unsigned entries. void RandomInit(unsigned* data, int n) { for (int i = 0; i < n; ++i){ srand((unsigned)time(0)); data[i] = rand() / RAND_MAX; } }
8fdc5b260ae0d546a5cb68758c87a836e5912391.hip
// !!! This is a file automatically generated by hipify!!! #include "reduce.hpp" #include "checkCudaErrors.hpp" #include "cudaMemory.hpp" #include "functions.hpp" #include <hip/hip_runtime.h> #include <thrust/reduce.h> #include <thrust/extrema.h> #include <thrust/functional.h> #include <thrust/execution_policy.h> #include <stdexcept> #include <string> #include <limits> #include <utility> // -------------------- GPU Parallel Reduce Add (thrust) -------------------- unsigned int thrustGPUreduce( const unsigned int* const d_in, const unsigned int length ) { return thrust::reduce(thrust::device, d_in, d_in + length, 0, thrust::plus<unsigned int>()); } // -------------------- GPU Parallel Find Min Max (thrust) -------------------- void thrustGPUfindMinMaxFloat( const float* const d_in, const unsigned int length, float& min, float& max ) { thrust::pair<const float*, const float*> result = thrust::minmax_element(thrust::device, d_in, d_in + length); memcpyGPUtoCPU((void*) result.first, (void*) &min, sizeof(float)); memcpyGPUtoCPU((void*) result.second, (void*) &max, sizeof(float)); } // -------------------- GPU Parallel Reduce Add -------------------- template <typename T> __device__ __forceinline__ void warpAdd( volatile T* sh_data, const unsigned int idx ) { sh_data[idx] = sh_data[idx] + sh_data[idx + 32]; sh_data[idx] = sh_data[idx] + sh_data[idx + 16]; sh_data[idx] = sh_data[idx] + sh_data[idx + 8]; sh_data[idx] = sh_data[idx] + sh_data[idx + 4]; sh_data[idx] = sh_data[idx] + sh_data[idx + 2]; sh_data[idx] = sh_data[idx] + sh_data[idx + 1]; } template <typename T> __global__ void kernelReduceAdd( const T* const d_in, const unsigned int length, const T identity, T* const d_out ) { const unsigned int absIdx = blockIdx.x*blockDim.x*4 + threadIdx.x; const unsigned int idx = threadIdx.x; extern __shared__ __align__(sizeof(T)) unsigned char sh_mem[]; T* sh_data = reinterpret_cast<T*>(sh_mem); if (absIdx < length) { sh_data[idx] = d_in[absIdx]; } else { sh_data[idx] = identity; } for (unsigned int i = 1; i < 4; i++) { if (absIdx + i*blockDim.x < length) sh_data[idx] = sh_data[idx] + d_in[ absIdx + i*blockDim.x ]; } __syncthreads(); for (unsigned int i = blockDim.x/4; i > 32; i >>= 2) { if (idx >= i) return; sh_data[idx] = sh_data[idx] + sh_data[ 3*idx + i ] + sh_data[ 3*idx + i + 1 ] + sh_data[ 3*idx + i + 2 ]; __syncthreads(); } if (idx >= warpSize) return; warpAdd(sh_data, idx); if (idx == 0) d_out[blockIdx.x] = sh_data[0]; } template <typename T> T pGPUreduce( const T* const d_in, const unsigned int length, const T identity ) { dim3 blockDim(1024, 1, 1); unsigned int gridX = ui_ceilDiv(length, 4*blockDim.x); dim3 gridDim(gridX, 1, 1); T* d_o; T* d_i; allocCudaMem((void**) &d_o, gridDim.x *sizeof(T)); // gpuMemFree((void**) &d_o); allocCudaMem((void**) &d_i, ui_ceilDiv(gridDim.x, 4*blockDim.x)*sizeof(T)); // gpuMemFree((void**) &d_i); hipLaunchKernelGGL(( kernelReduceAdd), dim3(gridDim), dim3(blockDim), blockDim.x*sizeof(T), 0, d_in, length, identity, d_o); while (gridDim.x > 1) { std::swap(d_o, d_i); hipLaunchKernelGGL(( kernelReduceAdd), dim3(gridDim), dim3(blockDim), blockDim.x*sizeof(T), 0, d_i, gridDim.x, identity, d_o); gridDim.x = ui_ceilDiv(gridDim.x, 4*blockDim.x); } hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); T ret; memcpyGPUtoCPU((void*) d_o, (void*) &ret, sizeof(T)); gpuMemFree((void**) &d_i); gpuMemFree((void**) &d_o); return ret; } unsigned int parallelGPUreduce( const unsigned int* const d_in, const unsigned int length ) { return pGPUreduce(d_in, length, 0U); } // -------------------- GPU Parallel Find Min Max -------------------- template <typename T> __device__ __forceinline__ void warpMin( volatile T* sh_data, const unsigned int idx ) { sh_data[idx] = min( sh_data[idx], sh_data[idx + 32] ); sh_data[idx] = min( sh_data[idx], sh_data[idx + 16] ); sh_data[idx] = min( sh_data[idx], sh_data[idx + 8] ); sh_data[idx] = min( sh_data[idx], sh_data[idx + 4] ); sh_data[idx] = min( sh_data[idx], sh_data[idx + 2] ); sh_data[idx] = min( sh_data[idx], sh_data[idx + 1] ); } template <typename T> __global__ void kernelReduceMin( const T* const d_in, const unsigned int length, const T identity, T* const d_out ) { const unsigned int absIdx = blockIdx.x*blockDim.x*4 + threadIdx.x; const unsigned int idx = threadIdx.x; extern __shared__ __align__(sizeof(T)) unsigned char sh_mem[]; T* sh_data = reinterpret_cast<T*>(sh_mem); if (absIdx < length) { sh_data[idx] = d_in[absIdx]; } else { sh_data[idx] = identity; } for (unsigned int i = 1; i < 4; i++) { if (absIdx + i*blockDim.x < length) sh_data[idx] = min(sh_data[idx], d_in[ absIdx + i*blockDim.x ]); } __syncthreads(); for (unsigned int i = blockDim.x/4; i > 32; i >>= 2) { if (idx >= i) return; sh_data[idx] = min( min( sh_data[idx], sh_data[ 3*idx + i ] ), min( sh_data[ 3*idx + i + 1 ], sh_data[ 3*idx + i + 2 ] ) ); __syncthreads(); } if (idx >= warpSize) return; warpMin(sh_data, idx); if (idx == 0) d_out[blockIdx.x] = sh_data[0]; } template <typename T> __device__ __forceinline__ void warpMax( volatile T* sh_data, const unsigned int idx ) { sh_data[idx] = max( sh_data[idx], sh_data[idx + 32] ); sh_data[idx] = max( sh_data[idx], sh_data[idx + 16] ); sh_data[idx] = max( sh_data[idx], sh_data[idx + 8] ); sh_data[idx] = max( sh_data[idx], sh_data[idx + 4] ); sh_data[idx] = max( sh_data[idx], sh_data[idx + 2] ); sh_data[idx] = max( sh_data[idx], sh_data[idx + 1] ); } template <typename T> __global__ void kernelReduceMax( const T* const d_in, const unsigned int length, const T identity, T* const d_out ) { const unsigned int absIdx = blockIdx.x*blockDim.x*4 + threadIdx.x; const unsigned int idx = threadIdx.x; extern __shared__ __align__(sizeof(T)) unsigned char sh_mem[]; T* sh_data = reinterpret_cast<T*>(sh_mem); if (absIdx < length) { sh_data[idx] = d_in[absIdx]; } else { sh_data[idx] = identity; } for (unsigned int i = 1; i < 4; i++) { if (absIdx + i*blockDim.x < length) sh_data[idx] = max(sh_data[idx], d_in[ absIdx + i*blockDim.x ]); } __syncthreads(); for (unsigned int i = blockDim.x/4; i > 32; i >>= 2) { if (idx >= i) return; sh_data[idx] = max( max( sh_data[idx], sh_data[ 3*idx + i ] ), max( sh_data[ 3*idx + i + 1 ], sh_data[ 3*idx + i + 2 ] ) ); __syncthreads(); } if (idx >= warpSize) return; warpMax(sh_data, idx); if (idx == 0) d_out[blockIdx.x] = sh_data[0]; } template <typename T> T pGPUfindMinMax( const T* const d_in, const unsigned int length, const T identity, std::string op ) { void (*kernel)(const T* const, const unsigned int, const T, T* const) = nullptr; if (op == "min") { kernel = kernelReduceMin<T>; } else if (op == "max") { kernel = kernelReduceMax<T>; } else { throw std::invalid_argument("Operators supported by parallelReduce are: min or max. Given: " + op); } dim3 blockDim(1024, 1, 1); unsigned int gridX = ui_ceilDiv(length, 4*blockDim.x); dim3 gridDim(gridX, 1, 1); T* d_o; T* d_i; allocCudaMem((void**) &d_o, gridDim.x *sizeof(T)); // gpuMemFree((void**) &d_o); allocCudaMem((void**) &d_i, ui_ceilDiv(gridDim.x, 4*blockDim.x)*sizeof(T)); // gpuMemFree((void**) &d_i); hipLaunchKernelGGL(( kernel), dim3(gridDim), dim3(blockDim), blockDim.x*sizeof(T), 0, d_in, length, identity, d_o); while (gridDim.x > 1) { std::swap(d_o, d_i); hipLaunchKernelGGL(( kernel), dim3(gridDim), dim3(blockDim), blockDim.x*sizeof(T), 0, d_i, gridDim.x, identity, d_o); gridDim.x = ui_ceilDiv(gridDim.x, 4*blockDim.x); } hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); T ret; memcpyGPUtoCPU((void*) d_o, (void*) &ret, sizeof(T)); gpuMemFree((void**) &d_i); gpuMemFree((void**) &d_o); return ret; } void parallelGPUfindMinMaxFloat( const float* const d_in, const float length, float& min, float& max ) { const float flt_max = std::numeric_limits<float>::max(); min = pGPUfindMinMax(d_in, length, flt_max, "min"); max = pGPUfindMinMax(d_in, length, -flt_max, "max"); } // -------------------- GPU Parallel Reduce Bit Or -------------------- template <typename T> __device__ __forceinline__ void warpBitOr( volatile T* sh_data, const unsigned int idx ) { sh_data[idx] = sh_data[idx] | sh_data[idx + 32]; sh_data[idx] = sh_data[idx] | sh_data[idx + 16]; sh_data[idx] = sh_data[idx] | sh_data[idx + 8]; sh_data[idx] = sh_data[idx] | sh_data[idx + 4]; sh_data[idx] = sh_data[idx] | sh_data[idx + 2]; sh_data[idx] = sh_data[idx] | sh_data[idx + 1]; } __device__ __forceinline__ unsigned int getKey( unsigned int key ) { return key; } __device__ __forceinline__ unsigned int getKey( thrust::pair<unsigned int, unsigned int> element ) { return element.first; } __device__ __forceinline__ unsigned int storeKey( unsigned int key, unsigned int* d_out ) { return key; } __device__ __forceinline__ thrust::pair<unsigned int, unsigned int> storeKey( unsigned int key, thrust::pair<unsigned int, unsigned int>* d_out ) { return thrust::pair<unsigned int, unsigned int>(key, 0); } template <typename T> __global__ void kernelReduceBitOr( const T* const d_in, const unsigned int length, const T identity, T* const d_out ) { const unsigned int absIdx = blockIdx.x*blockDim.x*4 + threadIdx.x; const unsigned int idx = threadIdx.x; extern __shared__ __align__(sizeof(unsigned int)) unsigned char sh_mem[]; unsigned int* sh_data = reinterpret_cast<unsigned int*>(sh_mem); T element; if (absIdx < length) { element = d_in[absIdx]; } else { element = identity; } sh_data[idx] = getKey(element); for (unsigned int i = 1; i < 4; i++) { if (absIdx + i*blockDim.x < length) sh_data[idx] = sh_data[idx] | getKey(d_in[ absIdx + i*blockDim.x ]); } __syncthreads(); for (unsigned int i = blockDim.x/4; i > 32; i >>= 2) { if (idx >= i) return; sh_data[idx] = sh_data[idx] | sh_data[ 3*idx + i ] | sh_data[ 3*idx + i + 1 ] | sh_data[ 3*idx + i + 2 ]; __syncthreads(); } if (idx >= warpSize) return; warpBitOr(sh_data, idx); if (idx == 0) d_out[blockIdx.x] = storeKey(sh_data[0], d_out); } template <typename T> T pGPUreduceBitOr( const T* const d_in, const unsigned int length, const T identity ) { dim3 blockDim(1024, 1, 1); unsigned int gridX = ui_ceilDiv(length, 4*blockDim.x); dim3 gridDim(gridX, 1, 1); T* d_o; T* d_i; allocCudaMem((void**) &d_o, gridDim.x *sizeof(T)); // gpuMemFree((void**) &d_o); allocCudaMem((void**) &d_i, ui_ceilDiv(gridDim.x, 4*blockDim.x)*sizeof(T)); // gpuMemFree((void**) &d_i); hipLaunchKernelGGL(( kernelReduceBitOr), dim3(gridDim), dim3(blockDim), blockDim.x*sizeof(T), 0, d_in, length, identity, d_o); while (gridDim.x > 1) { std::swap(d_o, d_i); hipLaunchKernelGGL(( kernelReduceBitOr), dim3(gridDim), dim3(blockDim), blockDim.x*sizeof(T), 0, d_i, gridDim.x, identity, d_o); gridDim.x = ui_ceilDiv(gridDim.x, 4*blockDim.x); } hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); T ret; memcpyGPUtoCPU((void*) d_o, (void*) &ret, sizeof(T)); gpuMemFree((void**) &d_i); gpuMemFree((void**) &d_o); return ret; } unsigned int parallelGPUreduceBitOr( unsigned int* d_in, const unsigned int length ) { return pGPUreduceBitOr((unsigned int*) d_in, length, 0U); } unsigned int parallelGPUreduceBitOr( std::pair<unsigned int, unsigned int>* d_in, const unsigned int length ) { auto identity = thrust::pair<unsigned int, unsigned int>(0U, 0U); return pGPUreduceBitOr((thrust::pair<unsigned int, unsigned int>*) d_in, length, identity).first; }
8fdc5b260ae0d546a5cb68758c87a836e5912391.cu
#include "reduce.hpp" #include "checkCudaErrors.hpp" #include "cudaMemory.hpp" #include "functions.hpp" #include <cuda.h> #include <thrust/reduce.h> #include <thrust/extrema.h> #include <thrust/functional.h> #include <thrust/execution_policy.h> #include <stdexcept> #include <string> #include <limits> #include <utility> // -------------------- GPU Parallel Reduce Add (thrust) -------------------- unsigned int thrustGPUreduce( const unsigned int* const d_in, const unsigned int length ) { return thrust::reduce(thrust::device, d_in, d_in + length, 0, thrust::plus<unsigned int>()); } // -------------------- GPU Parallel Find Min Max (thrust) -------------------- void thrustGPUfindMinMaxFloat( const float* const d_in, const unsigned int length, float& min, float& max ) { thrust::pair<const float*, const float*> result = thrust::minmax_element(thrust::device, d_in, d_in + length); memcpyGPUtoCPU((void*) result.first, (void*) &min, sizeof(float)); memcpyGPUtoCPU((void*) result.second, (void*) &max, sizeof(float)); } // -------------------- GPU Parallel Reduce Add -------------------- template <typename T> __device__ __forceinline__ void warpAdd( volatile T* sh_data, const unsigned int idx ) { sh_data[idx] = sh_data[idx] + sh_data[idx + 32]; sh_data[idx] = sh_data[idx] + sh_data[idx + 16]; sh_data[idx] = sh_data[idx] + sh_data[idx + 8]; sh_data[idx] = sh_data[idx] + sh_data[idx + 4]; sh_data[idx] = sh_data[idx] + sh_data[idx + 2]; sh_data[idx] = sh_data[idx] + sh_data[idx + 1]; } template <typename T> __global__ void kernelReduceAdd( const T* const d_in, const unsigned int length, const T identity, T* const d_out ) { const unsigned int absIdx = blockIdx.x*blockDim.x*4 + threadIdx.x; const unsigned int idx = threadIdx.x; extern __shared__ __align__(sizeof(T)) unsigned char sh_mem[]; T* sh_data = reinterpret_cast<T*>(sh_mem); if (absIdx < length) { sh_data[idx] = d_in[absIdx]; } else { sh_data[idx] = identity; } for (unsigned int i = 1; i < 4; i++) { if (absIdx + i*blockDim.x < length) sh_data[idx] = sh_data[idx] + d_in[ absIdx + i*blockDim.x ]; } __syncthreads(); for (unsigned int i = blockDim.x/4; i > 32; i >>= 2) { if (idx >= i) return; sh_data[idx] = sh_data[idx] + sh_data[ 3*idx + i ] + sh_data[ 3*idx + i + 1 ] + sh_data[ 3*idx + i + 2 ]; __syncthreads(); } if (idx >= warpSize) return; warpAdd(sh_data, idx); if (idx == 0) d_out[blockIdx.x] = sh_data[0]; } template <typename T> T pGPUreduce( const T* const d_in, const unsigned int length, const T identity ) { dim3 blockDim(1024, 1, 1); unsigned int gridX = ui_ceilDiv(length, 4*blockDim.x); dim3 gridDim(gridX, 1, 1); T* d_o; T* d_i; allocCudaMem((void**) &d_o, gridDim.x *sizeof(T)); // gpuMemFree((void**) &d_o); allocCudaMem((void**) &d_i, ui_ceilDiv(gridDim.x, 4*blockDim.x)*sizeof(T)); // gpuMemFree((void**) &d_i); kernelReduceAdd<<<gridDim, blockDim, blockDim.x*sizeof(T)>>>(d_in, length, identity, d_o); while (gridDim.x > 1) { std::swap(d_o, d_i); kernelReduceAdd<<<gridDim, blockDim, blockDim.x*sizeof(T)>>>(d_i, gridDim.x, identity, d_o); gridDim.x = ui_ceilDiv(gridDim.x, 4*blockDim.x); } cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); T ret; memcpyGPUtoCPU((void*) d_o, (void*) &ret, sizeof(T)); gpuMemFree((void**) &d_i); gpuMemFree((void**) &d_o); return ret; } unsigned int parallelGPUreduce( const unsigned int* const d_in, const unsigned int length ) { return pGPUreduce(d_in, length, 0U); } // -------------------- GPU Parallel Find Min Max -------------------- template <typename T> __device__ __forceinline__ void warpMin( volatile T* sh_data, const unsigned int idx ) { sh_data[idx] = min( sh_data[idx], sh_data[idx + 32] ); sh_data[idx] = min( sh_data[idx], sh_data[idx + 16] ); sh_data[idx] = min( sh_data[idx], sh_data[idx + 8] ); sh_data[idx] = min( sh_data[idx], sh_data[idx + 4] ); sh_data[idx] = min( sh_data[idx], sh_data[idx + 2] ); sh_data[idx] = min( sh_data[idx], sh_data[idx + 1] ); } template <typename T> __global__ void kernelReduceMin( const T* const d_in, const unsigned int length, const T identity, T* const d_out ) { const unsigned int absIdx = blockIdx.x*blockDim.x*4 + threadIdx.x; const unsigned int idx = threadIdx.x; extern __shared__ __align__(sizeof(T)) unsigned char sh_mem[]; T* sh_data = reinterpret_cast<T*>(sh_mem); if (absIdx < length) { sh_data[idx] = d_in[absIdx]; } else { sh_data[idx] = identity; } for (unsigned int i = 1; i < 4; i++) { if (absIdx + i*blockDim.x < length) sh_data[idx] = min(sh_data[idx], d_in[ absIdx + i*blockDim.x ]); } __syncthreads(); for (unsigned int i = blockDim.x/4; i > 32; i >>= 2) { if (idx >= i) return; sh_data[idx] = min( min( sh_data[idx], sh_data[ 3*idx + i ] ), min( sh_data[ 3*idx + i + 1 ], sh_data[ 3*idx + i + 2 ] ) ); __syncthreads(); } if (idx >= warpSize) return; warpMin(sh_data, idx); if (idx == 0) d_out[blockIdx.x] = sh_data[0]; } template <typename T> __device__ __forceinline__ void warpMax( volatile T* sh_data, const unsigned int idx ) { sh_data[idx] = max( sh_data[idx], sh_data[idx + 32] ); sh_data[idx] = max( sh_data[idx], sh_data[idx + 16] ); sh_data[idx] = max( sh_data[idx], sh_data[idx + 8] ); sh_data[idx] = max( sh_data[idx], sh_data[idx + 4] ); sh_data[idx] = max( sh_data[idx], sh_data[idx + 2] ); sh_data[idx] = max( sh_data[idx], sh_data[idx + 1] ); } template <typename T> __global__ void kernelReduceMax( const T* const d_in, const unsigned int length, const T identity, T* const d_out ) { const unsigned int absIdx = blockIdx.x*blockDim.x*4 + threadIdx.x; const unsigned int idx = threadIdx.x; extern __shared__ __align__(sizeof(T)) unsigned char sh_mem[]; T* sh_data = reinterpret_cast<T*>(sh_mem); if (absIdx < length) { sh_data[idx] = d_in[absIdx]; } else { sh_data[idx] = identity; } for (unsigned int i = 1; i < 4; i++) { if (absIdx + i*blockDim.x < length) sh_data[idx] = max(sh_data[idx], d_in[ absIdx + i*blockDim.x ]); } __syncthreads(); for (unsigned int i = blockDim.x/4; i > 32; i >>= 2) { if (idx >= i) return; sh_data[idx] = max( max( sh_data[idx], sh_data[ 3*idx + i ] ), max( sh_data[ 3*idx + i + 1 ], sh_data[ 3*idx + i + 2 ] ) ); __syncthreads(); } if (idx >= warpSize) return; warpMax(sh_data, idx); if (idx == 0) d_out[blockIdx.x] = sh_data[0]; } template <typename T> T pGPUfindMinMax( const T* const d_in, const unsigned int length, const T identity, std::string op ) { void (*kernel)(const T* const, const unsigned int, const T, T* const) = nullptr; if (op == "min") { kernel = kernelReduceMin<T>; } else if (op == "max") { kernel = kernelReduceMax<T>; } else { throw std::invalid_argument("Operators supported by parallelReduce are: min or max. Given: " + op); } dim3 blockDim(1024, 1, 1); unsigned int gridX = ui_ceilDiv(length, 4*blockDim.x); dim3 gridDim(gridX, 1, 1); T* d_o; T* d_i; allocCudaMem((void**) &d_o, gridDim.x *sizeof(T)); // gpuMemFree((void**) &d_o); allocCudaMem((void**) &d_i, ui_ceilDiv(gridDim.x, 4*blockDim.x)*sizeof(T)); // gpuMemFree((void**) &d_i); kernel<<<gridDim, blockDim, blockDim.x*sizeof(T)>>>(d_in, length, identity, d_o); while (gridDim.x > 1) { std::swap(d_o, d_i); kernel<<<gridDim, blockDim, blockDim.x*sizeof(T)>>>(d_i, gridDim.x, identity, d_o); gridDim.x = ui_ceilDiv(gridDim.x, 4*blockDim.x); } cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); T ret; memcpyGPUtoCPU((void*) d_o, (void*) &ret, sizeof(T)); gpuMemFree((void**) &d_i); gpuMemFree((void**) &d_o); return ret; } void parallelGPUfindMinMaxFloat( const float* const d_in, const float length, float& min, float& max ) { const float flt_max = std::numeric_limits<float>::max(); min = pGPUfindMinMax(d_in, length, flt_max, "min"); max = pGPUfindMinMax(d_in, length, -flt_max, "max"); } // -------------------- GPU Parallel Reduce Bit Or -------------------- template <typename T> __device__ __forceinline__ void warpBitOr( volatile T* sh_data, const unsigned int idx ) { sh_data[idx] = sh_data[idx] | sh_data[idx + 32]; sh_data[idx] = sh_data[idx] | sh_data[idx + 16]; sh_data[idx] = sh_data[idx] | sh_data[idx + 8]; sh_data[idx] = sh_data[idx] | sh_data[idx + 4]; sh_data[idx] = sh_data[idx] | sh_data[idx + 2]; sh_data[idx] = sh_data[idx] | sh_data[idx + 1]; } __device__ __forceinline__ unsigned int getKey( unsigned int key ) { return key; } __device__ __forceinline__ unsigned int getKey( thrust::pair<unsigned int, unsigned int> element ) { return element.first; } __device__ __forceinline__ unsigned int storeKey( unsigned int key, unsigned int* d_out ) { return key; } __device__ __forceinline__ thrust::pair<unsigned int, unsigned int> storeKey( unsigned int key, thrust::pair<unsigned int, unsigned int>* d_out ) { return thrust::pair<unsigned int, unsigned int>(key, 0); } template <typename T> __global__ void kernelReduceBitOr( const T* const d_in, const unsigned int length, const T identity, T* const d_out ) { const unsigned int absIdx = blockIdx.x*blockDim.x*4 + threadIdx.x; const unsigned int idx = threadIdx.x; extern __shared__ __align__(sizeof(unsigned int)) unsigned char sh_mem[]; unsigned int* sh_data = reinterpret_cast<unsigned int*>(sh_mem); T element; if (absIdx < length) { element = d_in[absIdx]; } else { element = identity; } sh_data[idx] = getKey(element); for (unsigned int i = 1; i < 4; i++) { if (absIdx + i*blockDim.x < length) sh_data[idx] = sh_data[idx] | getKey(d_in[ absIdx + i*blockDim.x ]); } __syncthreads(); for (unsigned int i = blockDim.x/4; i > 32; i >>= 2) { if (idx >= i) return; sh_data[idx] = sh_data[idx] | sh_data[ 3*idx + i ] | sh_data[ 3*idx + i + 1 ] | sh_data[ 3*idx + i + 2 ]; __syncthreads(); } if (idx >= warpSize) return; warpBitOr(sh_data, idx); if (idx == 0) d_out[blockIdx.x] = storeKey(sh_data[0], d_out); } template <typename T> T pGPUreduceBitOr( const T* const d_in, const unsigned int length, const T identity ) { dim3 blockDim(1024, 1, 1); unsigned int gridX = ui_ceilDiv(length, 4*blockDim.x); dim3 gridDim(gridX, 1, 1); T* d_o; T* d_i; allocCudaMem((void**) &d_o, gridDim.x *sizeof(T)); // gpuMemFree((void**) &d_o); allocCudaMem((void**) &d_i, ui_ceilDiv(gridDim.x, 4*blockDim.x)*sizeof(T)); // gpuMemFree((void**) &d_i); kernelReduceBitOr<<<gridDim, blockDim, blockDim.x*sizeof(T)>>>(d_in, length, identity, d_o); while (gridDim.x > 1) { std::swap(d_o, d_i); kernelReduceBitOr<<<gridDim, blockDim, blockDim.x*sizeof(T)>>>(d_i, gridDim.x, identity, d_o); gridDim.x = ui_ceilDiv(gridDim.x, 4*blockDim.x); } cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); T ret; memcpyGPUtoCPU((void*) d_o, (void*) &ret, sizeof(T)); gpuMemFree((void**) &d_i); gpuMemFree((void**) &d_o); return ret; } unsigned int parallelGPUreduceBitOr( unsigned int* d_in, const unsigned int length ) { return pGPUreduceBitOr((unsigned int*) d_in, length, 0U); } unsigned int parallelGPUreduceBitOr( std::pair<unsigned int, unsigned int>* d_in, const unsigned int length ) { auto identity = thrust::pair<unsigned int, unsigned int>(0U, 0U); return pGPUreduceBitOr((thrust::pair<unsigned int, unsigned int>*) d_in, length, identity).first; }
ab86ab453116818f1b87aa9af1e6859e9e6e16ef.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> __global__ void read_mem(char* state){ *state = threadIdx.x + blockIdx.x * blockDim.x + 0xffffffff; //for(int i=0; i<1; i++){ //printf("%x\n", *((char *)0x6048a0000)); //printf("%x\n", *((char *)0x6048a1000)); printf("%p\n", state); if((long)state == 0x6048a0000){ printf("Probe : Before allocated\n"); unsigned long long start_address = 0x6048a0000, curr_address = 0x0; int count = 0; printf("Probe : 0MB to 64MB before allocated\n"); while(count < 5){ curr_address = start_address - count*0x1000000; count++; printf("%p => %x\n", curr_address, *((char *)curr_address)); } printf("Probe : 64MB to 72MB before allocated\n"); count = 0; start_address = curr_address; while(count < 9){ curr_address = start_address - count*0x100000; count++; printf("%p => %x\n", curr_address, *((char *)curr_address)); } printf("Probe : 72MB to 73MB before allocated\n"); count = 0; start_address = curr_address; while(count < 11){ curr_address = start_address - count*0x10000; count++; printf("%p => %x\n", curr_address, *((char *)curr_address)); } curr_address = curr_address - 0x1; printf("%p => %x\n", curr_address, *((char *)curr_address)); } if((long)state == 0x8cd4a0000){ printf("Probe : Allocated\n"); unsigned long long start_address = 0x8cd4a0000, curr_address = 0x0; int count = 0; while(count < 10){ curr_address = start_address + count*0x100000; count++; printf("%p => %x\n", curr_address, *((char *)curr_address)); } printf("Probe : Last 1MB of allocated\n"); count = 0; start_address = curr_address; while(count < 16){ curr_address = start_address + count*0x10000; count++; printf("%p => %x\n", curr_address, *((char *)curr_address)); } printf("Probe : Last 64KB of allocated\n"); count = 0; start_address = curr_address; while(count < 16){ curr_address = start_address + count*0x1000; count++; printf("%p => %x\n", curr_address, *((char *)curr_address)); } printf("Probe : Last 4KB of allocated\n"); count = 0; start_address = curr_address; while(count < 16){ curr_address = start_address + count*0x100; count++; printf("%p => %x\n", curr_address, *((char *)curr_address)); } printf("Probe : Last 512B of allocated\n"); count = 0; start_address = curr_address; while(count < 16){ curr_address = start_address + count*0x10; count++; printf("%p => %x\n", curr_address, *((char *)curr_address)); } printf("Probe : Last 32B of allocated\n"); count = 0; start_address = curr_address; while(count < 16){ curr_address = start_address + count*0x1; count++; printf("%p => %x\n", curr_address, *((char *)curr_address)); } } //printf("[%p] = %x\n", 0x0, *((char *)0x0)); //printf("[%p] = %x\n", 0x1, *((char *)0x1)); //*state = *((char *)i); //} //printf("state from GPU = %x (at %p)\n", *state, state); } int main(){ char *h_state, *d_state; h_state = (char *)malloc(sizeof(int)); hipError_t cerr; unsigned long long alloc_size = 1024*1024*1024; unsigned long long tot_alloc_size = 0; for(int i=0; i<11; i++){ cerr = hipMalloc(&d_state, sizeof(char)*alloc_size); if(cerr != hipSuccess){ printf("hipMalloc failed : %s\n", hipGetErrorString(cerr)); }else{ tot_alloc_size+=alloc_size; printf("Allocated another 1GB...total allocated = %llu\n", tot_alloc_size/(1024*1024*1024)); hipLaunchKernelGGL(( read_mem), dim3(1),dim3(1), 0, 0, d_state); hipDeviceSynchronize(); } } alloc_size = 1024*1024*100; tot_alloc_size = 0; for(int i=0; i<1; i++){ cerr = hipMalloc(&d_state, sizeof(char)*alloc_size); if(cerr != hipSuccess){ printf("hipMalloc failed : %s\n", hipGetErrorString(cerr)); }else{ tot_alloc_size+=alloc_size; printf("Allocated another 100 MB...total allocated = %llu\n", tot_alloc_size/(1024*1024*100)); hipLaunchKernelGGL(( read_mem), dim3(1),dim3(1), 0, 0, d_state); hipDeviceSynchronize(); } } alloc_size = 1024*1024*10; tot_alloc_size = 0; for(int i=0; i<5; i++){ cerr = hipMalloc(&d_state, sizeof(char)*alloc_size); if(cerr != hipSuccess){ printf("hipMalloc failed : %s\n", hipGetErrorString(cerr)); }else{ tot_alloc_size+=alloc_size; printf("Allocated another 10 MB...total allocated = %llu\n", tot_alloc_size/(1024*1024*10)); hipLaunchKernelGGL(( read_mem), dim3(1),dim3(1), 0, 0, d_state); cerr = hipDeviceSynchronize(); if(cerr != hipSuccess){ printf("hipDeviceSynchronize failed : %s\n", hipGetErrorString(cerr)); } } } #if 0 long long address = 0xF00000000; unsigned count = 0; while(count++ < 1000){ hipLaunchKernelGGL(( read_mem), dim3(1),dim3(1), 0, 0, d_state, address); address = address + 0x100000; } hipMemcpy(h_state, d_state, sizeof(int), hipMemcpyDeviceToHost); printf("state from CPU = %x\n", *h_state); #endif return 0; }
ab86ab453116818f1b87aa9af1e6859e9e6e16ef.cu
#include <stdlib.h> #include <stdio.h> __global__ void read_mem(char* state){ *state = threadIdx.x + blockIdx.x * blockDim.x + 0xffffffff; //for(int i=0; i<1; i++){ //printf("%x\n", *((char *)0x6048a0000)); //printf("%x\n", *((char *)0x6048a1000)); printf("%p\n", state); if((long)state == 0x6048a0000){ printf("Probe : Before allocated\n"); unsigned long long start_address = 0x6048a0000, curr_address = 0x0; int count = 0; printf("Probe : 0MB to 64MB before allocated\n"); while(count < 5){ curr_address = start_address - count*0x1000000; count++; printf("%p => %x\n", curr_address, *((char *)curr_address)); } printf("Probe : 64MB to 72MB before allocated\n"); count = 0; start_address = curr_address; while(count < 9){ curr_address = start_address - count*0x100000; count++; printf("%p => %x\n", curr_address, *((char *)curr_address)); } printf("Probe : 72MB to 73MB before allocated\n"); count = 0; start_address = curr_address; while(count < 11){ curr_address = start_address - count*0x10000; count++; printf("%p => %x\n", curr_address, *((char *)curr_address)); } curr_address = curr_address - 0x1; printf("%p => %x\n", curr_address, *((char *)curr_address)); } if((long)state == 0x8cd4a0000){ printf("Probe : Allocated\n"); unsigned long long start_address = 0x8cd4a0000, curr_address = 0x0; int count = 0; while(count < 10){ curr_address = start_address + count*0x100000; count++; printf("%p => %x\n", curr_address, *((char *)curr_address)); } printf("Probe : Last 1MB of allocated\n"); count = 0; start_address = curr_address; while(count < 16){ curr_address = start_address + count*0x10000; count++; printf("%p => %x\n", curr_address, *((char *)curr_address)); } printf("Probe : Last 64KB of allocated\n"); count = 0; start_address = curr_address; while(count < 16){ curr_address = start_address + count*0x1000; count++; printf("%p => %x\n", curr_address, *((char *)curr_address)); } printf("Probe : Last 4KB of allocated\n"); count = 0; start_address = curr_address; while(count < 16){ curr_address = start_address + count*0x100; count++; printf("%p => %x\n", curr_address, *((char *)curr_address)); } printf("Probe : Last 512B of allocated\n"); count = 0; start_address = curr_address; while(count < 16){ curr_address = start_address + count*0x10; count++; printf("%p => %x\n", curr_address, *((char *)curr_address)); } printf("Probe : Last 32B of allocated\n"); count = 0; start_address = curr_address; while(count < 16){ curr_address = start_address + count*0x1; count++; printf("%p => %x\n", curr_address, *((char *)curr_address)); } } //printf("[%p] = %x\n", 0x0, *((char *)0x0)); //printf("[%p] = %x\n", 0x1, *((char *)0x1)); //*state = *((char *)i); //} //printf("state from GPU = %x (at %p)\n", *state, state); } int main(){ char *h_state, *d_state; h_state = (char *)malloc(sizeof(int)); cudaError_t cerr; unsigned long long alloc_size = 1024*1024*1024; unsigned long long tot_alloc_size = 0; for(int i=0; i<11; i++){ cerr = cudaMalloc(&d_state, sizeof(char)*alloc_size); if(cerr != cudaSuccess){ printf("cudaMalloc failed : %s\n", cudaGetErrorString(cerr)); }else{ tot_alloc_size+=alloc_size; printf("Allocated another 1GB...total allocated = %llu\n", tot_alloc_size/(1024*1024*1024)); read_mem<<<1,1>>>(d_state); cudaDeviceSynchronize(); } } alloc_size = 1024*1024*100; tot_alloc_size = 0; for(int i=0; i<1; i++){ cerr = cudaMalloc(&d_state, sizeof(char)*alloc_size); if(cerr != cudaSuccess){ printf("cudaMalloc failed : %s\n", cudaGetErrorString(cerr)); }else{ tot_alloc_size+=alloc_size; printf("Allocated another 100 MB...total allocated = %llu\n", tot_alloc_size/(1024*1024*100)); read_mem<<<1,1>>>(d_state); cudaDeviceSynchronize(); } } alloc_size = 1024*1024*10; tot_alloc_size = 0; for(int i=0; i<5; i++){ cerr = cudaMalloc(&d_state, sizeof(char)*alloc_size); if(cerr != cudaSuccess){ printf("cudaMalloc failed : %s\n", cudaGetErrorString(cerr)); }else{ tot_alloc_size+=alloc_size; printf("Allocated another 10 MB...total allocated = %llu\n", tot_alloc_size/(1024*1024*10)); read_mem<<<1,1>>>(d_state); cerr = cudaDeviceSynchronize(); if(cerr != cudaSuccess){ printf("cudaDeviceSynchronize failed : %s\n", cudaGetErrorString(cerr)); } } } #if 0 long long address = 0xF00000000; unsigned count = 0; while(count++ < 1000){ read_mem<<<1,1>>>(d_state, address); address = address + 0x100000; } cudaMemcpy(h_state, d_state, sizeof(int), cudaMemcpyDeviceToHost); printf("state from CPU = %x\n", *h_state); #endif return 0; }
8e2813b0775981dbc06d9c9597dc1943caf549dd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <vector> #include <cstdlib> #include <cstdio> #include <cassert> #include <utils.h> using std::vector; __device__ __host__ int f(int i) { return i*i; } __global__ void kernel(int* out) { out[threadIdx.x] = f(threadIdx.x); } int main(int argc, const char** argv) { int N = 32; if (checkCmdLineFlag(argc, argv, "N")) { N = getCmdLineArgumentInt(argc, argv, "N"); printf("Using %d threads = %d warps\n",N, (N+31)/32); } int* d_output; /* checkCudaErrors: A wrapper function we wrote to test whether an error occurred when launching a kernel. hipMalloc: Allocated memory on device */ checkCudaErrors(hipMalloc(&d_output, sizeof(int) * N)); /* This is like a parallel for loop. kernel is the function above. d_output is the input variable. This call will execute the function kernel using N threads. Each thread gets a different threadIdx.x value. */ hipLaunchKernelGGL(( kernel), dim3(1), dim3(N), 0, 0, d_output); /* This is just to check that the kernel executed as expected. */ hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); vector<int> h_output(N); /* This function copies the data back from GPU to CPU. See hipMemcpyDeviceToHost You also have hipMemcpyHostToDevice */ checkCudaErrors(hipMemcpy(&h_output[0], d_output, sizeof(int) * N, hipMemcpyDeviceToHost)); for(int i = 0; i < N; ++i) { if (i==0 || i==N-1 || i%(N/10) == 0) printf("Entry %10d, written by thread %5d\n", h_output[i], i); assert(h_output[i] == f(i)); } /* Free memory on the device. */ checkCudaErrors(hipFree(d_output)); return 0; }
8e2813b0775981dbc06d9c9597dc1943caf549dd.cu
#include <iostream> #include <vector> #include <cstdlib> #include <cstdio> #include <cassert> #include <utils.h> using std::vector; __device__ __host__ int f(int i) { return i*i; } __global__ void kernel(int* out) { out[threadIdx.x] = f(threadIdx.x); } int main(int argc, const char** argv) { int N = 32; if (checkCmdLineFlag(argc, argv, "N")) { N = getCmdLineArgumentInt(argc, argv, "N"); printf("Using %d threads = %d warps\n",N, (N+31)/32); } int* d_output; /* checkCudaErrors: A wrapper function we wrote to test whether an error occurred when launching a kernel. cudaMalloc: Allocated memory on device */ checkCudaErrors(cudaMalloc(&d_output, sizeof(int) * N)); /* This is like a parallel for loop. kernel is the function above. d_output is the input variable. This call will execute the function kernel using N threads. Each thread gets a different threadIdx.x value. */ kernel<<<1, N>>>(d_output); /* This is just to check that the kernel executed as expected. */ cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); vector<int> h_output(N); /* This function copies the data back from GPU to CPU. See cudaMemcpyDeviceToHost You also have cudaMemcpyHostToDevice */ checkCudaErrors(cudaMemcpy(&h_output[0], d_output, sizeof(int) * N, cudaMemcpyDeviceToHost)); for(int i = 0; i < N; ++i) { if (i==0 || i==N-1 || i%(N/10) == 0) printf("Entry %10d, written by thread %5d\n", h_output[i], i); assert(h_output[i] == f(i)); } /* Free memory on the device. */ checkCudaErrors(cudaFree(d_output)); return 0; }
fb2e05abfacfcfae923dea7e6dc83a9a94087d24.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Suma de matrices por hilos */ #include <ctime> #include <iostream> #include <time.h> #include <math.h> #define N 90000000 using namespace std; void generateRandom(int *h_a); void parallelAddition(); void serialAddition(); // en el host "h_" // en device "d_" int *h_a, *h_b, *h_c, *serialC; int *d_a, *d_b, *d_c; int size = N * sizeof(int); double serialTimer; float parallelTimer; // definicin del Kernel. __global__ void vectorAdd(int *d_a, int *d_b, int *d_c){ // mltiples bloques e hilos se deben de generar. // se necesita un ndice para mapear... int index = threadIdx.x + (blockIdx.x * blockDim.x); d_c[index] = d_a[index] + d_b[index]; } int main(int argc, char const *argv[]) { h_a = (int * ) malloc(size); h_b = (int * ) malloc(size); h_c = (int * ) malloc(size); serialC = (int * ) malloc(size); generateRandom(h_a); generateRandom(h_b); parallelAddition(); serialAddition(); free(h_a); free(h_b); free(h_c); hipFree(d_a); hipFree(d_b); hipFree(d_c); cout << "Speedup: " << (serialTimer / parallelTimer) << endl; } void generateRandom(int *h_a){ srand(time(NULL)); for (int i = 0; i < N; i++){ h_a[i] = rand() % 101; } } void parallelAddition(){ hipMalloc((void **) &d_a, size); hipMalloc((void **) &d_b, size); hipMalloc((void **) &d_c, size); // transferir datos de host a device hipMemcpy(d_a, h_a, size, hipMemcpyHostToDevice); hipMemcpy(d_b, h_b, size, hipMemcpyHostToDevice); int threads = 512; int blocks = ceil(N/threads); // eventos para tomar tiempo hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); // llamada a kernel, nmero de bloques y de kernel hipLaunchKernelGGL(( vectorAdd), dim3(blocks), dim3(threads), 0, 0, d_a, d_b, d_c); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&parallelTimer, start, stop); cout << "ellapser parallel timer: " << parallelTimer << "ms" << endl; hipMemcpy(h_c, d_c, size, hipMemcpyDeviceToHost); } void serialAddition(){ clock_t start = clock(); for(int i = 0; i<N; ++i){ h_c[i] = h_a[i] + h_b[i]; } clock_t end = clock(); serialTimer = double(end-start) / double(CLOCKS_PER_SEC); cout << "Elapsed Serial Time: " << serialTimer << endl; }
fb2e05abfacfcfae923dea7e6dc83a9a94087d24.cu
/* Suma de matrices por hilos */ #include <ctime> #include <iostream> #include <time.h> #include <math.h> #define N 90000000 using namespace std; void generateRandom(int *h_a); void parallelAddition(); void serialAddition(); // en el host "h_" // en device "d_" int *h_a, *h_b, *h_c, *serialC; int *d_a, *d_b, *d_c; int size = N * sizeof(int); double serialTimer; float parallelTimer; // definición del Kernel. __global__ void vectorAdd(int *d_a, int *d_b, int *d_c){ // múltiples bloques e hilos se deben de generar. // se necesita un índice para mapear... int index = threadIdx.x + (blockIdx.x * blockDim.x); d_c[index] = d_a[index] + d_b[index]; } int main(int argc, char const *argv[]) { h_a = (int * ) malloc(size); h_b = (int * ) malloc(size); h_c = (int * ) malloc(size); serialC = (int * ) malloc(size); generateRandom(h_a); generateRandom(h_b); parallelAddition(); serialAddition(); free(h_a); free(h_b); free(h_c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); cout << "Speedup: " << (serialTimer / parallelTimer) << endl; } void generateRandom(int *h_a){ srand(time(NULL)); for (int i = 0; i < N; i++){ h_a[i] = rand() % 101; } } void parallelAddition(){ cudaMalloc((void **) &d_a, size); cudaMalloc((void **) &d_b, size); cudaMalloc((void **) &d_c, size); // transferir datos de host a device cudaMemcpy(d_a, h_a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, size, cudaMemcpyHostToDevice); int threads = 512; int blocks = ceil(N/threads); // eventos para tomar tiempo cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); // llamada a kernel, número de bloques y de kernel vectorAdd<<<blocks, threads>>>(d_a, d_b, d_c); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&parallelTimer, start, stop); cout << "ellapser parallel timer: " << parallelTimer << "ms" << endl; cudaMemcpy(h_c, d_c, size, cudaMemcpyDeviceToHost); } void serialAddition(){ clock_t start = clock(); for(int i = 0; i<N; ++i){ h_c[i] = h_a[i] + h_b[i]; } clock_t end = clock(); serialTimer = double(end-start) / double(CLOCKS_PER_SEC); cout << "Elapsed Serial Time: " << serialTimer << endl; }
aadeb13f7f58f23c3c84d9c3922929f61932e7b7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <vector> #include "lite/backends/cuda/math/utils.h" #include "lite/core/op_registry.h" #include "lite/core/type_system.h" #include "lite/kernels/cuda/calib_compute.h" namespace paddle { namespace lite { namespace kernels { namespace cuda { __global__ void Fp32ToInt8Kernel(const int num, const float scale, const float* input, int8_t* output) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < num) { output[index] = lite::cuda::math::from_float<int8_t>(input[index] / scale); } } __global__ void Int8ToFp32Kernel(const int num, const float scale, const int8_t* input, float* output) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < num) { output[index] = input[index] * scale; } } void CalibComputeFp32ToInt8::Run() { auto& param = this->Param<param_t>(); auto& ctx = this->ctx_->As<CUDAContext>(); auto stream = ctx.exec_stream(); auto scale = param.scale; const auto* din = param.input->data<float>(); auto* dout = param.output->mutable_data<int8_t>(TARGET(kCUDA)); int num = static_cast<int>(param.input->numel()); int threads = 1024; int blocks = (num + threads - 1) / threads; hipLaunchKernelGGL(( Fp32ToInt8Kernel), dim3(blocks), dim3(threads), 0, stream, num, scale, din, dout); hipError_t error = hipGetLastError(); CHECK(error == hipSuccess) << hipGetErrorString(error); } void CalibComputeInt8ToFp32::Run() { auto& param = this->Param<param_t>(); auto& ctx = this->ctx_->As<CUDAContext>(); auto stream = ctx.exec_stream(); auto scale = param.scale; const auto* din = param.input->data<int8_t>(); auto* dout = param.output->mutable_data<float>(TARGET(kCUDA)); int num = static_cast<int>(param.input->numel()); int threads = 1024; int blocks = (num + threads - 1) / threads; hipLaunchKernelGGL(( Int8ToFp32Kernel), dim3(blocks), dim3(threads), 0, stream, num, scale, din, dout); hipError_t error = hipGetLastError(); CHECK(error == hipSuccess) << hipGetErrorString(error); } } // namespace cuda } // namespace kernels } // namespace lite } // namespace paddle REGISTER_LITE_KERNEL(calib, kCUDA, kFloat, kNCHW, paddle::lite::kernels::cuda::CalibComputeFp32ToInt8, fp32_to_int8) .BindInput("Input", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFloat), DATALAYOUT(kAny))}) .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kInt8), DATALAYOUT(kAny))}) .Finalize(); REGISTER_LITE_KERNEL(calib, kCUDA, kFloat, kNCHW, paddle::lite::kernels::cuda::CalibComputeInt8ToFp32, int8_to_fp32) .BindInput("Input", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kInt8), DATALAYOUT(kAny))}) .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFloat), DATALAYOUT(kAny))}) .Finalize(); REGISTER_LITE_KERNEL(calib_once, kCUDA, kFloat, kNCHW, paddle::lite::kernels::cuda::CalibComputeFp32ToInt8, fp32_to_int8) .BindInput("Input", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFloat), DATALAYOUT(kAny))}) .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kInt8), DATALAYOUT(kAny))}) .Finalize(); REGISTER_LITE_KERNEL(calib_once, kCUDA, kFloat, kNCHW, paddle::lite::kernels::cuda::CalibComputeInt8ToFp32, int8_to_fp32) .BindInput("Input", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kInt8), DATALAYOUT(kAny))}) .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFloat), DATALAYOUT(kAny))}) .Finalize();
aadeb13f7f58f23c3c84d9c3922929f61932e7b7.cu
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <vector> #include "lite/backends/cuda/math/utils.h" #include "lite/core/op_registry.h" #include "lite/core/type_system.h" #include "lite/kernels/cuda/calib_compute.h" namespace paddle { namespace lite { namespace kernels { namespace cuda { __global__ void Fp32ToInt8Kernel(const int num, const float scale, const float* input, int8_t* output) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < num) { output[index] = lite::cuda::math::from_float<int8_t>(input[index] / scale); } } __global__ void Int8ToFp32Kernel(const int num, const float scale, const int8_t* input, float* output) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < num) { output[index] = input[index] * scale; } } void CalibComputeFp32ToInt8::Run() { auto& param = this->Param<param_t>(); auto& ctx = this->ctx_->As<CUDAContext>(); auto stream = ctx.exec_stream(); auto scale = param.scale; const auto* din = param.input->data<float>(); auto* dout = param.output->mutable_data<int8_t>(TARGET(kCUDA)); int num = static_cast<int>(param.input->numel()); int threads = 1024; int blocks = (num + threads - 1) / threads; Fp32ToInt8Kernel<<<blocks, threads, 0, stream>>>(num, scale, din, dout); cudaError_t error = cudaGetLastError(); CHECK(error == cudaSuccess) << cudaGetErrorString(error); } void CalibComputeInt8ToFp32::Run() { auto& param = this->Param<param_t>(); auto& ctx = this->ctx_->As<CUDAContext>(); auto stream = ctx.exec_stream(); auto scale = param.scale; const auto* din = param.input->data<int8_t>(); auto* dout = param.output->mutable_data<float>(TARGET(kCUDA)); int num = static_cast<int>(param.input->numel()); int threads = 1024; int blocks = (num + threads - 1) / threads; Int8ToFp32Kernel<<<blocks, threads, 0, stream>>>(num, scale, din, dout); cudaError_t error = cudaGetLastError(); CHECK(error == cudaSuccess) << cudaGetErrorString(error); } } // namespace cuda } // namespace kernels } // namespace lite } // namespace paddle REGISTER_LITE_KERNEL(calib, kCUDA, kFloat, kNCHW, paddle::lite::kernels::cuda::CalibComputeFp32ToInt8, fp32_to_int8) .BindInput("Input", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFloat), DATALAYOUT(kAny))}) .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kInt8), DATALAYOUT(kAny))}) .Finalize(); REGISTER_LITE_KERNEL(calib, kCUDA, kFloat, kNCHW, paddle::lite::kernels::cuda::CalibComputeInt8ToFp32, int8_to_fp32) .BindInput("Input", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kInt8), DATALAYOUT(kAny))}) .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFloat), DATALAYOUT(kAny))}) .Finalize(); REGISTER_LITE_KERNEL(calib_once, kCUDA, kFloat, kNCHW, paddle::lite::kernels::cuda::CalibComputeFp32ToInt8, fp32_to_int8) .BindInput("Input", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFloat), DATALAYOUT(kAny))}) .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kInt8), DATALAYOUT(kAny))}) .Finalize(); REGISTER_LITE_KERNEL(calib_once, kCUDA, kFloat, kNCHW, paddle::lite::kernels::cuda::CalibComputeInt8ToFp32, int8_to_fp32) .BindInput("Input", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kInt8), DATALAYOUT(kAny))}) .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFloat), DATALAYOUT(kAny))}) .Finalize();
e78fe7e571c596ee007a8dac53a75198b9c9e7ea.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/operators/math/math_function.h" #include "paddle/operators/math/sequence_pooling.h" namespace paddle { namespace operators { namespace math { #define FLT_MAX __FLT_MAX__ template <typename T> __global__ void KeMaxSequencePool(const T* input, const size_t* starts, T* output, int* index, int64_t num_seq, int64_t dim) { int dim_idx = threadIdx.x; int seq_id = blockIdx.x; if (seq_id >= num_seq) return; size_t start = starts[seq_id]; size_t end = starts[seq_id + 1]; for (int64_t i = dim_idx; i < dim; i += blockDim.x) { T max_val = static_cast<T>(-FLT_MAX); int max_id = -1; for (size_t step_id = start; step_id < end; step_id++) { if (max_val < input[step_id * dim + i]) { max_val = input[step_id * dim + i]; max_id = step_id; } } output[seq_id * dim + i] = max_val; index[seq_id * dim + i] = max_id; } } template <typename T> class MaxSeqPoolFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::LoDTensor& input, framework::Tensor* output, framework::Tensor* index) { auto in_dims = input.dims(); auto out_dims = output->dims(); auto idx_dims = index->dims(); PADDLE_ENFORCE_GT(in_dims.size(), static_cast<int64_t>(1)); PADDLE_ENFORCE_GT(out_dims.size(), 1); for (int64_t i = 1; i < in_dims.size(); ++i) { PADDLE_ENFORCE_EQ(in_dims[i], out_dims[i]); } PADDLE_ENFORCE_EQ(idx_dims, out_dims); auto starts = input.lod()[0]; const T* in_data = input.data<T>(); T* out_data = output->data<T>(); int* max_index = index->data<int>(); int64_t num_seq = out_dims[0]; int64_t dim = output->numel() / num_seq; dim3 threads(256, 1); dim3 grid(num_seq, 1); auto stream = context.stream(); hipLaunchKernelGGL(( KeMaxSequencePool<T>), dim3(grid), dim3(threads), 0, stream, in_data, starts.cuda_data(), out_data, max_index, num_seq, dim); } }; template <typename T> __global__ void KeMaxSequencePoolGrad(const T* out_grad, const int* max_index, T* in_grad, int64_t num_seq, int64_t dim) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int col_idx = idx % dim; if (idx < num_seq * dim) { int step_id = max_index[idx]; in_grad[step_id * dim + col_idx] = out_grad[idx]; } } template <typename T> class MaxSeqPoolGradFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& out_grad, const framework::Tensor& index, framework::LoDTensor* in_grad) { auto og_dims = out_grad.dims(); auto idx_dims = index.dims(); auto ig_dims = in_grad->dims(); PADDLE_ENFORCE_GT(og_dims.size(), static_cast<int64_t>(1)); PADDLE_ENFORCE_GT(ig_dims.size(), static_cast<int64_t>(1)); for (int64_t i = 1; i < og_dims.size(); ++i) { PADDLE_ENFORCE_EQ(og_dims[i], ig_dims[i]); } PADDLE_ENFORCE_EQ(idx_dims, og_dims); const T* og_data = out_grad.data<T>(); const int* max_index = index.data<int>(); T* ig_data = in_grad->data<T>(); SetConstant<platform::CUDADeviceContext, T> set_zero; set_zero(context, in_grad, static_cast<T>(0.0)); int64_t num_seq = og_dims[0]; int64_t dim = out_grad.numel() / num_seq; unsigned int blocks = (num_seq * dim + 128 - 1) / 128; dim3 threads(128, 1); dim3 grid(blocks, 1); auto stream = context.stream(); hipLaunchKernelGGL(( KeMaxSequencePoolGrad<T>), dim3(grid), dim3(threads), 0, stream, og_data, max_index, ig_data, num_seq, dim); } }; template class MaxSeqPoolFunctor<platform::CUDADeviceContext, float>; template class MaxSeqPoolFunctor<platform::CUDADeviceContext, double>; template class MaxSeqPoolGradFunctor<platform::CUDADeviceContext, float>; template class MaxSeqPoolGradFunctor<platform::CUDADeviceContext, double>; } // namespace math } // namespace operators } // namespace paddle
e78fe7e571c596ee007a8dac53a75198b9c9e7ea.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/operators/math/math_function.h" #include "paddle/operators/math/sequence_pooling.h" namespace paddle { namespace operators { namespace math { #define FLT_MAX __FLT_MAX__ template <typename T> __global__ void KeMaxSequencePool(const T* input, const size_t* starts, T* output, int* index, int64_t num_seq, int64_t dim) { int dim_idx = threadIdx.x; int seq_id = blockIdx.x; if (seq_id >= num_seq) return; size_t start = starts[seq_id]; size_t end = starts[seq_id + 1]; for (int64_t i = dim_idx; i < dim; i += blockDim.x) { T max_val = static_cast<T>(-FLT_MAX); int max_id = -1; for (size_t step_id = start; step_id < end; step_id++) { if (max_val < input[step_id * dim + i]) { max_val = input[step_id * dim + i]; max_id = step_id; } } output[seq_id * dim + i] = max_val; index[seq_id * dim + i] = max_id; } } template <typename T> class MaxSeqPoolFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::LoDTensor& input, framework::Tensor* output, framework::Tensor* index) { auto in_dims = input.dims(); auto out_dims = output->dims(); auto idx_dims = index->dims(); PADDLE_ENFORCE_GT(in_dims.size(), static_cast<int64_t>(1)); PADDLE_ENFORCE_GT(out_dims.size(), 1); for (int64_t i = 1; i < in_dims.size(); ++i) { PADDLE_ENFORCE_EQ(in_dims[i], out_dims[i]); } PADDLE_ENFORCE_EQ(idx_dims, out_dims); auto starts = input.lod()[0]; const T* in_data = input.data<T>(); T* out_data = output->data<T>(); int* max_index = index->data<int>(); int64_t num_seq = out_dims[0]; int64_t dim = output->numel() / num_seq; dim3 threads(256, 1); dim3 grid(num_seq, 1); auto stream = context.stream(); KeMaxSequencePool<T><<<grid, threads, 0, stream>>>( in_data, starts.cuda_data(), out_data, max_index, num_seq, dim); } }; template <typename T> __global__ void KeMaxSequencePoolGrad(const T* out_grad, const int* max_index, T* in_grad, int64_t num_seq, int64_t dim) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int col_idx = idx % dim; if (idx < num_seq * dim) { int step_id = max_index[idx]; in_grad[step_id * dim + col_idx] = out_grad[idx]; } } template <typename T> class MaxSeqPoolGradFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& out_grad, const framework::Tensor& index, framework::LoDTensor* in_grad) { auto og_dims = out_grad.dims(); auto idx_dims = index.dims(); auto ig_dims = in_grad->dims(); PADDLE_ENFORCE_GT(og_dims.size(), static_cast<int64_t>(1)); PADDLE_ENFORCE_GT(ig_dims.size(), static_cast<int64_t>(1)); for (int64_t i = 1; i < og_dims.size(); ++i) { PADDLE_ENFORCE_EQ(og_dims[i], ig_dims[i]); } PADDLE_ENFORCE_EQ(idx_dims, og_dims); const T* og_data = out_grad.data<T>(); const int* max_index = index.data<int>(); T* ig_data = in_grad->data<T>(); SetConstant<platform::CUDADeviceContext, T> set_zero; set_zero(context, in_grad, static_cast<T>(0.0)); int64_t num_seq = og_dims[0]; int64_t dim = out_grad.numel() / num_seq; unsigned int blocks = (num_seq * dim + 128 - 1) / 128; dim3 threads(128, 1); dim3 grid(blocks, 1); auto stream = context.stream(); KeMaxSequencePoolGrad<T><<<grid, threads, 0, stream>>>( og_data, max_index, ig_data, num_seq, dim); } }; template class MaxSeqPoolFunctor<platform::CUDADeviceContext, float>; template class MaxSeqPoolFunctor<platform::CUDADeviceContext, double>; template class MaxSeqPoolGradFunctor<platform::CUDADeviceContext, float>; template class MaxSeqPoolGradFunctor<platform::CUDADeviceContext, double>; } // namespace math } // namespace operators } // namespace paddle
f8f69e6b612822d24c3f1e937f5006cf985bba54.hip
// !!! This is a file automatically generated by hipify!!! /* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // @author Yurii Shyrma ([email protected]) // #include "cudnnUtils.h" #include <ops/declarable/helpers/convolutions.h> namespace sd { namespace ops { namespace platforms { ////////////////////////////////////////////////////////////////////////// static void conv2dCUDNN(const LaunchContext* context, const NDArray* input, const NDArray* weights, const NDArray* bias, NDArray* output, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int paddingMode, const bool isNCHW, const int wFormat) { // cudnn support only two formats for weights {oC,iC,kH,kW} and {oC,kH,kW,iC} int bS, iC, iH, iW, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width; int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, wFormat, *input, *output, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWoC, indWkH, indOoH); auto handle = reinterpret_cast<cudnnHandle_t *>(context->getCuDnnHandle()); CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnSetStream), cudnnSetStream(*handle, *context->getCudaStream())); cudnnTensorFormat_t format = isNCHW ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC; cudnnTensorFormat_t formatW = 0 == wFormat ? format : (1 == wFormat ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC); // input descriptor CudnnTensor x; if(input->ews() == 1 && input->ordering() == 'c') x.set4D(format, cudnnDataType(input->dataType()), bS, iC, iH, iW); else x.set4DEx(cudnnDataType(input->dataType()), bS, iC, iH, iW, input->strideAt(0), input->strideAt(indIOioC), input->strideAt(indIiH), input->strideAt(indIiH + 1)); // weights descriptor FilterDesc w; w.set4D( cudnnDataType(weights->dataType()), formatW, oC, iC, kH, kW); // output descriptor CudnnTensor z; if(output->ews() == 1 && output->ordering() == 'c') z.set4D(format, cudnnDataType(output->dataType()), bS, oC, oH, oW); else z.set4DEx(cudnnDataType(output->dataType()), bS, oC, oH, oW, output->strideAt(0), output->strideAt(indIOioC), output->strideAt(indOoH), output->strideAt(indOoH + 1)); // description of convolution ConvolutionDesc conv; conv.set2D(pH, pW, sH, sW, dH, dW, CUDNN_CROSS_CORRELATION, cudnnDataType(output->dataType())); // algorithm description cudnnConvolutionFwdAlgo_t algo; cudnnConvolutionFwdAlgoPerf_t algoPerf; int count = 0; //err = cudnnGetConvolutionForwardAlgorithm(*handle, x, w, conv, z, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo); CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnFindConvolutionForwardAlgorithm), cudnnFindConvolutionForwardAlgorithm( *handle, x, w, conv, z, 1, &count, &algoPerf)); if (count == 0) throw sd::cuda_exception::build("conv2dCUDNN: cudnnGetConvolutionForwardAlgorithm failed as the count is 0", 0); algo = algoPerf.algo; PointersManager manager(context, __func__ ); // allocate auxiliary device memory, abbreviation ws means workspace size_t wsSize; CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnGetConvolutionForwardWorkspaceSize), cudnnGetConvolutionForwardWorkspaceSize( *handle, x, w, conv, z, algo, &wsSize)); void* wsData = manager.allocateDevMem(wsSize); // provide scaling parameters const float alpha32(1), beta32(0); const double alpha64(1), beta64(0); const void* alpha = output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64); const void* beta = output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64); NDArray::prepareSpecialUse({output}, {input, weights, bias}); // run calculation CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnConvolutionForward), cudnnConvolutionForward( *handle, alpha, x, input->specialBuffer(), w, weights->specialBuffer(), conv, algo, wsData, wsSize, beta, z, output->specialBuffer())); // add bias if it is present if (bias != nullptr) { CudnnTensor b; // b.set4D(format, cudnnDataType(bias->dataType()), 1, isNCHW ? bias->lengthOf() : 1, 1, isNCHW ? 1: bias->lengthOf()); b.set4D(CUDNN_TENSOR_NCHW, cudnnDataType(bias->dataType()), 1, oC, 1, 1); CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnAddTensor), cudnnAddTensor( *handle, alpha, b, bias->specialBuffer(), alpha, z, output->specialBuffer())); } // cudaErr = hipStreamSynchronize(*context->getCudaStream()); // if (cudaErr != 0) // throw cuda_exception::build("conv2dCUDNN: hipStreamSynchronize failed !", cudaErr); NDArray::registerSpecialUse({output}, {input, weights, bias}); } ////////////////////////////////////////////////////////////////////////// static void conv2dBpCUDNN(const LaunchContext* context, const NDArray* input, const NDArray* weights, const NDArray* gradO, NDArray* gradI, NDArray* gradW, NDArray* gradB, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int paddingMode, const bool isNCHW, const int wFormat) { int bS, iC, iH, iW, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width; int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, wFormat, *input, *gradO, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWoC, indWkH, indOoH); auto handle = reinterpret_cast<cudnnHandle_t *>(context->getCuDnnHandle()); CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnSetStream), cudnnSetStream(*handle, *context->getCudaStream())); cudnnTensorFormat_t format = isNCHW ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC; cudnnTensorFormat_t formatW = 0 == wFormat ? format : (1 == wFormat ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC); PointersManager manager(context, __func__ ); // input descriptor, gradO descriptor, gradI descriptor CudnnTensor x, dz, dx; if(input->ews() == 1 && input->ordering() == 'c') x.set4D(format, cudnnDataType(input->dataType()), bS, iC, iH, iW); else x.set4DEx(cudnnDataType(input->dataType()), bS, iC, iH, iW, input->strideAt(0), input->strideAt(indIOioC), input->strideAt(indIiH), input->strideAt(indIiH + 1)); if(gradO->ews() == 1 && gradO->ordering() == 'c') dz.set4D(format, cudnnDataType(gradO->dataType()), bS, oC, oH, oW); else dz.set4DEx(cudnnDataType(gradO->dataType()), bS, oC, oH, oW, gradO->strideAt(0), gradO->strideAt(indIOioC), gradO->strideAt(indOoH), gradO->strideAt(indOoH + 1)); if(gradI->ews() == 1 && gradI->ordering() == 'c') dx.set4D(format, cudnnDataType(gradI->dataType()), bS, iC, iH, iW); else dx.set4DEx(cudnnDataType(gradI->dataType()), bS, iC, iH, iW, gradI->strideAt(0), gradI->strideAt(indIOioC), gradI->strideAt(indIiH), gradI->strideAt(indIiH + 1)); // gradW descriptor FilterDesc dw; dw.set4D( cudnnDataType(gradW->dataType()), formatW, oC, iC, kH, kW); // description of convolution ConvolutionDesc conv; conv.set2D( pH, pW, sH, sW, dH, dW, CUDNN_CROSS_CORRELATION, cudnnDataType(gradO->dataType())); // gradW algorithm description cudnnConvolutionBwdFilterAlgo_t algoGradW; cudnnConvolutionBwdFilterAlgoPerf_t algoGradWPerf; int count = 0; //err = cudnnGetConvolutionBackwardFilterAlgorithm(*handle, x, dz, conv, dw, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algoGradW); CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnFindConvolutionBackwardFilterAlgorithm), cudnnFindConvolutionBackwardFilterAlgorithm(*handle, x, dz, conv, dw, 1, &count, &algoGradWPerf)); if (count == 0) throw sd::cuda_exception::build("conv2dBpCUDNN: cudnnGetConvolutionBackwardFilterAlgorithm failed as the count is 0", 0); algoGradW = algoGradWPerf.algo; // gradI algorithm description cudnnConvolutionBwdDataAlgo_t algoGradI; cudnnConvolutionBwdDataAlgoPerf_t algoGradIPerf; //err = cudnnGetConvolutionBackwardDataAlgorithm(*handle, dw, dz, conv, x, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algoGradI); CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnFindConvolutionBackwardDataAlgorithm), cudnnFindConvolutionBackwardDataAlgorithm( *handle, dw, dz, conv, x, 1, &count, &algoGradIPerf)); if (count == 0) throw sd::cuda_exception::build("conv2dBpCUDNN: cudnnGetConvolutionBackwardDataAlgorithm failed as the count is 0", 0); algoGradI = algoGradIPerf.algo; // allocate auxiliary device memory for gradW calculation, abbreviation ws means workspace size_t wsGradWSize; CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnGetConvolutionBackwardFilterWorkspaceSize), cudnnGetConvolutionBackwardFilterWorkspaceSize( *handle, x, dz, conv, dw, algoGradW, &wsGradWSize)); void* wsGradWData = manager.allocateDevMem(wsGradWSize); // allocate auxiliary device memory for gradI calculation, abbreviation ws means workspace size_t wsGradISize; CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnGetConvolutionBackwardDataWorkspaceSize), cudnnGetConvolutionBackwardDataWorkspaceSize( *handle, dw, dz, conv, dx, algoGradI, &wsGradISize)); void* wsGradIData = manager.allocateDevMem(wsGradISize); // provide scaling parameters const float alpha32(1), beta32(0); const double alpha64(1), beta64(0); const void* alpha = gradO->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64); const void* beta = gradO->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64); NDArray::prepareSpecialUse({gradI, gradW, gradB}, {input, weights, gradO}); // run calculation for gradB (if not nullptr) if(gradB != nullptr) { CudnnTensor db; // db.set4D(format, cudnnDataType(gradB->dataType()), 1, isNCHW ? gradB->lengthOf() : 1, 1, isNCHW ? 1: gradB->lengthOf()); db.set4D(CUDNN_TENSOR_NCHW, cudnnDataType(gradB->dataType()), 1, oC, 1, 1); CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnConvolutionBackwardBias), cudnnConvolutionBackwardBias( *handle, alpha, dz, gradO->specialBuffer(), beta, db, gradB->specialBuffer())); } // run calculation for gradW CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnConvolutionBackwardFilter), cudnnConvolutionBackwardFilter( *handle, alpha, x, input->specialBuffer(), dz, gradO->specialBuffer(), conv, algoGradW, wsGradWData, wsGradWSize, beta, dw, gradW->specialBuffer())); // run calculation for gradI CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnConvolutionBackwardData), cudnnConvolutionBackwardData( *handle, alpha, dw, weights->specialBuffer(), dz, gradO->specialBuffer(), conv, algoGradI, wsGradIData, wsGradISize, beta, dx, gradI->specialBuffer())); // cudaErr = hipStreamSynchronize(*context->getCudaStream()); // if (cudaErr != 0) // throw cuda_exception::build("conv2dBpCUDNN: hipStreamSynchronize failed !", cudaErr); NDArray::registerSpecialUse({gradI, gradW, gradB}, {input, weights, gradO}); } ////////////////////////////////////////////////////////////////////////// PLATFORM_IMPL(conv2d, ENGINE_CUDA) { auto input = INPUT_VARIABLE(0); // [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW) auto weights = INPUT_VARIABLE(1); // [kH, kW, iC, oC], [oC, iC, kH, kW], [oC, kH, kW, iC] auto bias = block.width() > 2 ? INPUT_VARIABLE(2) : nullptr; // [oC] auto output = OUTPUT_VARIABLE(0); // [bS, oH, oW, oC] (NHWC) or [bS, oC, oH, oW] (NCHW) int sH = INT_ARG(2); // strides height int sW = INT_ARG(3); // strides width int pH = INT_ARG(4); // paddings height int pW = INT_ARG(5); // paddings width int dH = INT_ARG(6); // dilations height int dW = INT_ARG(7); // dilations width int paddingMode = INT_ARG(8); // 0-VALID, 1-SAME bool isNCHW = block.getIArguments()->size() > 9 ? !INT_ARG(9) : 1; // INT_ARG(9): 0-NCHW, 1-NHWC int wFormat = block.getIArguments()->size() > 10 ? INT_ARG(10) : 0; // 0 - [kH, kW, iC, oC], 1 - [oC, iC, kH, kW], 2 - [oC, kH, kW, iC] int kH = INT_ARG(0) > 0 ? INT_ARG(0) : static_cast<int>(weights->sizeAt(0)); // filter(kernel) height int kW = INT_ARG(1) > 0 ? INT_ARG(1) : static_cast<int>(weights->sizeAt(1)); // filter(kernel) width REQUIRE_TRUE(input->rankOf() == 4, 0, "CUSTOM CONV2D CUDNN OP: rank of input array must be equal to 4, but got %i instead !", input->rankOf()); REQUIRE_TRUE(weights->rankOf() == 4, 0, "CUSTOM CONV2D CUDNN OP: rank of weights array must be equal to 4, but got %i instead !", weights->rankOf()); int bS, iC, iH, iW, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width; int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, wFormat, *input, *output, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWoC, indWkH, indOoH); ConvolutionUtils::calcPadding2D(pH, pW, oH, oW, iH, iW, kH, kW, sH, sW, dH, dW, paddingMode); std::vector<Nd4jLong> expectedWeightsShape = ConvolutionUtils::expectWeightsShape(wFormat, kH, kW, iC, oC); REQUIRE_TRUE(weights->isSameShape(expectedWeightsShape), 0, "CUSTOM CONV2D CUDNN OP: wrong shape of weights array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expectedWeightsShape).c_str(), ShapeUtils::shapeAsString(weights).c_str()); if (bias) { REQUIRE_TRUE(bias->rankOf() <= 2 && oC == bias->lengthOf(), 0, "CUSTOM CONV2D CUDNN OP: wrong shape of array with biases, expected rank, length: <=2, %i, but got %i, %i instead !", oC, bias->rankOf(), bias->lengthOf()); REQUIRE_TRUE((bias->rankOf() == 1 && bias->strideAt(0) == 1) || (bias->rankOf() == 2 && bias->sizeAt(0) == 1 && bias->strideAt(1) == 1) || (bias->rankOf() == 2 && bias->sizeAt(1) == 1 && bias->strideAt(0) == 1), 0, "CUSTOM CONV2D CUDNN OP: bias array should be contiguous in memory !"); } std::unique_ptr<NDArray> tmpWeight = {}, tmpInput = {}; NDArray* newWeights = weights; // cudnn support only two formats {oC,iC,kH,kW} and {oC,kH,kW,iC} if(0 == wFormat) { tmpWeight.reset(new NDArray(weights->ordering(), isNCHW ? std::vector<Nd4jLong>({oC, iC, kH, kW}) : std::vector<Nd4jLong>({oC, kH, kW, iC}), weights->dataType(), weights->getContext())); newWeights = tmpWeight.get(); newWeights->assign(weights->permute(isNCHW ? std::vector<int>({3,2,0,1}) : std::vector<int>({3,0,1,2}))); // (kH, kW, iC, oC --> oC, iC, kH, kW) or (kH, kW, iC, oC --> oC, kH, kW, iC) } if(paddingMode == 1){ // in same paddingMode cudnn doesn't support asymmetric left/right top/bottopm paddings auto ret = checkConv2dCUDNNPadAsymmetric(input, nullptr, iH, iW, oH, oW, kH, kW, sH, sW, pH, pW, dH, dW, isNCHW); tmpInput = std::move(std::get<0>(ret)); //prolong life if(tmpInput) input = tmpInput.get(); } conv2dCUDNN(block.launchContext(), input, newWeights, bias, output, kH,kW,sH,sW,pH,pW,dH,dW, paddingMode, isNCHW, wFormat); return Status::OK(); } ////////////////////////////////////////////////////////////////////////// PLATFORM_CHECK(conv2d, ENGINE_CUDA) { auto input = INPUT_VARIABLE(0); // [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW) auto weights = INPUT_VARIABLE(1); // [kH, kW, iC, oC] always auto bias = block.width() > 2 ? INPUT_VARIABLE(2) : nullptr; // [oC] const int paddingMode = INT_ARG(8); // 0-VALID, 1-SAME, 2-CAUSAL const bool badInputType = input->dataType() != DataType::DOUBLE && input->dataType() != DataType::FLOAT32 && input->dataType() != DataType::HALF; const bool badWeightsType = weights->dataType() != DataType::DOUBLE && weights->dataType() != DataType::FLOAT32 && weights->dataType() != DataType::HALF; const bool badBiasType = bias == nullptr ? false : (bias->dataType() != DataType::DOUBLE && bias->dataType() != DataType::FLOAT32 && bias->dataType() != DataType::HALF); return paddingMode != 2 && !badInputType && !badWeightsType && !badBiasType; Requirements req("CUDNN CONV2d OP"); req.expectNotEq(makeInfoVariable(paddingMode,"paddingMode"), 2) && req.expectIn(makeInfoVariable(input->dataType(), TYPE_MSG_INPUT0), {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE} ) && req.expectIn(makeInfoVariable(weights->dataType(), TYPE_MSG_INPUT1), {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE} ); if(bias){ req.expectIn(makeInfoVariable(bias->dataType(), TYPE_MSG_INPUT_ "#bias"), {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE}) ; } req.logTheSuccess(); return req; } ////////////////////////////////////////////////////////////////////////// PLATFORM_IMPL(conv2d_bp, ENGINE_CUDA) { auto input = INPUT_VARIABLE(0); // [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW) auto weights = INPUT_VARIABLE(1); // [kH, kW, iC, oC], [oC, iC, kH, kW], [oC, kH, kW, iC] auto bias = block.width() > 3 ? INPUT_VARIABLE(2) : nullptr; // [oC] auto gradO = block.width() > 3 ? INPUT_VARIABLE(3) : INPUT_VARIABLE(2); // [bS, oH, oW, oC] (NHWC) or [bS, oC, oH, oW] (NCHW), epsilon_next auto gradI = OUTPUT_VARIABLE(0); // [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW), epsilon auto gradW = OUTPUT_VARIABLE(1); // [kH, kW, iC, oC], [oC, iC, kH, kW], [oC, kH, kW, iC] auto gradB = block.width() > 3 ? OUTPUT_VARIABLE(2) : nullptr; // [oC] int kH = INT_ARG(0); // filter(kernel) height int kW = INT_ARG(1); // filter(kernel) width int sH = INT_ARG(2); // strides height int sW = INT_ARG(3); // strides width int pH = INT_ARG(4); // paddings height int pW = INT_ARG(5); // paddings width int dH = INT_ARG(6); // dilations height int dW = INT_ARG(7); // dilations width int paddingMode = INT_ARG(8); // 0-VALID, 1-SAME int isNCHW = block.getIArguments()->size() > 9 ? !INT_ARG(9) : 1; // INT_ARG(9): 0-NCHW, 1-NHWC int wFormat = block.getIArguments()->size() > 10 ? INT_ARG(10) : 0; // 0 - [kH, kW, iC, oC], 1 - [oC, iC, kH, kW], 2 - [oC, kH, kW, iC] REQUIRE_TRUE(input->rankOf() == 4, 0, "CUSTOM CONV2D_BP CUDNN OP: rank of input array must be equal to 4, but got %i instead !", input->rankOf()); REQUIRE_TRUE(weights->rankOf() == 4, 0, "CUSTOM CONV2D_BP CUDNN OP: rank of weights array must be equal to 4, but got %i instead !", weights->rankOf()); REQUIRE_TRUE(gradO->rankOf() == 4, 0, "CUSTOM CONV2D_BP CUDNN OP: rank of output's gradients (next epsilon) array must be equal to 4, but got %i instead !", gradO->rankOf()); int bS, iC, iH, iW, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width; int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, wFormat, *input, *gradO, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWoC, indWkH, indOoH); int trueoH, trueoW; // true output height, width ConvolutionUtils::calcOutSizePool2D(trueoH, trueoW, kH, kW, sH, sW, pH, pW, dH, dW, iH, iW, paddingMode); ConvolutionUtils::calcPadding2D(pH, pW, oH, oW, iH, iW, kH, kW, sH, sW, dH, dW, paddingMode); std::vector<Nd4jLong> expectedGradOShape = ShapeUtils::composeShapeUsingDimsAndIdx({bS,oC,trueoH,trueoW, 0,indIOioC,indOoH,indOoH+1}); std::vector<Nd4jLong> expectedWeightsShape = ConvolutionUtils::expectWeightsShape(wFormat, kH, kW, iC, oC); REQUIRE_TRUE(gradO->isSameShape(expectedGradOShape), 0, "CUSTOM CONV2D_BP CUDNN OP: wrong shape of output gradients (next epsilon) array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expectedGradOShape).c_str(), ShapeUtils::shapeAsString(gradO).c_str()); REQUIRE_TRUE(weights->isSameShape(expectedWeightsShape), 0, "CUSTOM CONV2D_BP CUDNN OP: wrong shape of weights array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expectedWeightsShape).c_str(), ShapeUtils::shapeAsString(weights).c_str()); if(bias) REQUIRE_TRUE(bias->rankOf() <= 2 && oC == bias->lengthOf(), 0, "CUSTOM CONV2D_BP CUDNN OP: wrong shape of array with biases, expected rank, length: <=2, %i, but got %i, %i instead !", oC, bias->rankOf(), bias->lengthOf()); std::unique_ptr<NDArray> tmpGradI = {}, tmpInput = {} , tmpWeights = {}, tmpGradW = {}; NDArray *newWeights = weights, *newGradW = gradW; // cudnn support only two formats {oC,iC,kH,kW} and {oC,kH,kW,iC} if(0 == wFormat) { tmpGradW.reset(new NDArray(gradW->ordering(), isNCHW ? std::vector<Nd4jLong>({oC, iC, kH, kW}) : std::vector<Nd4jLong>({oC, kH, kW, iC}), gradW->dataType(), gradW->getContext())); tmpWeights.reset(new NDArray(weights->ordering(), isNCHW ? std::vector<Nd4jLong>({oC, iC, kH, kW}) : std::vector<Nd4jLong>({oC, kH, kW, iC}), weights->dataType(), weights->getContext())); newGradW = tmpGradW.get(); newWeights = tmpWeights.get(); newWeights->assign(weights->permute(isNCHW ? std::vector<int>({3,2,0,1}) : std::vector<int>({3,0,1,2}))); // (kH, kW, iC, oC --> oC, iC, kH, kW) or (kH, kW, iC, oC --> oC, kH, kW, iC) } NDArray* newInput = input; NDArray* newGradI = gradI; if(paddingMode == 1){ // in same paddingMode cudnn doesn't support asymmetric left/right top/bottopm paddings auto ret = checkConv2dCUDNNPadAsymmetric(input, gradI, iH, iW, oH, oW, kH, kW, sH, sW, pH, pW, dH, dW, isNCHW); tmpInput = std::move(std::get<0>(ret)); tmpGradI = std::move(std::get<1>(ret)); if(tmpInput) newInput = tmpInput.get(); if(tmpGradI) newGradI = tmpGradI.get(); } conv2dBpCUDNN(block.launchContext(), newInput, newWeights, gradO, newGradI, newGradW, gradB, kH,kW,sH,sW,pH,pW,dH,dW,paddingMode,isNCHW,wFormat); if(0 == wFormat) { newGradW->permutei(isNCHW ? std::vector<int>({2,3,1,0}) : std::vector<int>({1,2,3,0})); // (oC, iC, kH, kW --> kH, kW, iC, oC) or (oC, kH, kW, iC --> kH, kW, iC, oC) gradW->assign(newGradW); } if(newInput != input) { if(isNCHW) gradI->assign((*newGradI)({0,0, 0,0, 0,gradI->sizeAt(2), 0,gradI->sizeAt(3)})); else gradI->assign((*newGradI)({0,0, 0,gradI->sizeAt(1), 0,gradI->sizeAt(2), 0,0})); } return Status::OK(); } PLATFORM_CHECK(conv2d_bp, ENGINE_CUDA) { auto input = INPUT_VARIABLE(0); // [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW) auto weights = INPUT_VARIABLE(1); // [kH, kW, iC, oC] always auto bias = block.width() > 3 ? INPUT_VARIABLE(2) : nullptr; // [oC] auto gradO = block.width() > 3 ? INPUT_VARIABLE(3) : INPUT_VARIABLE(2); // [bS, oH, oW, oC] (NHWC) or [bS, oC, oH, oW] (NCHW), epsilon_next const int paddingMode = INT_ARG(8); // 0-VALID, 1-SAME, 2-CAUSAL const int isNCHW = block.getIArguments()->size() > 9 ? !INT_ARG(9) : 1; // INT_ARG(9): 0-NCHW, 1-NHWC Requirements req("CUDNN CONV2d_BP OP"); req.expectNotEq(makeInfoVariable(paddingMode,"paddingMode"), 2) && req.expectTrue(makeInfoVariable(isNCHW,"isNCHW")) && req.expectIn(makeInfoVariable(input->dataType(), TYPE_MSG_INPUT0), {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE} ) && req.expectIn(makeInfoVariable(weights->dataType(), TYPE_MSG_INPUT1), {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE} ); if(bias){ req.expectIn(makeInfoVariable(bias->dataType(), TYPE_MSG_INPUT_ "#bias"), {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE}) && req.expectIn(makeInfoVariable(gradO->dataType(), TYPE_MSG_INPUT3), {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE}); }else{ req.expectIn(makeInfoVariable(gradO->dataType(), TYPE_MSG_INPUT2), {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE} ); } req.logTheSuccess(); return req; } } } }
f8f69e6b612822d24c3f1e937f5006cf985bba54.cu
/* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // @author Yurii Shyrma ([email protected]) // #include "cudnnUtils.h" #include <ops/declarable/helpers/convolutions.h> namespace sd { namespace ops { namespace platforms { ////////////////////////////////////////////////////////////////////////// static void conv2dCUDNN(const LaunchContext* context, const NDArray* input, const NDArray* weights, const NDArray* bias, NDArray* output, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int paddingMode, const bool isNCHW, const int wFormat) { // cudnn support only two formats for weights {oC,iC,kH,kW} and {oC,kH,kW,iC} int bS, iC, iH, iW, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width; int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, wFormat, *input, *output, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWoC, indWkH, indOoH); auto handle = reinterpret_cast<cudnnHandle_t *>(context->getCuDnnHandle()); CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnSetStream), cudnnSetStream(*handle, *context->getCudaStream())); cudnnTensorFormat_t format = isNCHW ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC; cudnnTensorFormat_t formatW = 0 == wFormat ? format : (1 == wFormat ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC); // input descriptor CudnnTensor x; if(input->ews() == 1 && input->ordering() == 'c') x.set4D(format, cudnnDataType(input->dataType()), bS, iC, iH, iW); else x.set4DEx(cudnnDataType(input->dataType()), bS, iC, iH, iW, input->strideAt(0), input->strideAt(indIOioC), input->strideAt(indIiH), input->strideAt(indIiH + 1)); // weights descriptor FilterDesc w; w.set4D( cudnnDataType(weights->dataType()), formatW, oC, iC, kH, kW); // output descriptor CudnnTensor z; if(output->ews() == 1 && output->ordering() == 'c') z.set4D(format, cudnnDataType(output->dataType()), bS, oC, oH, oW); else z.set4DEx(cudnnDataType(output->dataType()), bS, oC, oH, oW, output->strideAt(0), output->strideAt(indIOioC), output->strideAt(indOoH), output->strideAt(indOoH + 1)); // description of convolution ConvolutionDesc conv; conv.set2D(pH, pW, sH, sW, dH, dW, CUDNN_CROSS_CORRELATION, cudnnDataType(output->dataType())); // algorithm description cudnnConvolutionFwdAlgo_t algo; cudnnConvolutionFwdAlgoPerf_t algoPerf; int count = 0; //err = cudnnGetConvolutionForwardAlgorithm(*handle, x, w, conv, z, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo); CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnFindConvolutionForwardAlgorithm), cudnnFindConvolutionForwardAlgorithm( *handle, x, w, conv, z, 1, &count, &algoPerf)); if (count == 0) throw sd::cuda_exception::build("conv2dCUDNN: cudnnGetConvolutionForwardAlgorithm failed as the count is 0", 0); algo = algoPerf.algo; PointersManager manager(context, __func__ ); // allocate auxiliary device memory, abbreviation ws means workspace size_t wsSize; CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnGetConvolutionForwardWorkspaceSize), cudnnGetConvolutionForwardWorkspaceSize( *handle, x, w, conv, z, algo, &wsSize)); void* wsData = manager.allocateDevMem(wsSize); // provide scaling parameters const float alpha32(1), beta32(0); const double alpha64(1), beta64(0); const void* alpha = output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64); const void* beta = output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64); NDArray::prepareSpecialUse({output}, {input, weights, bias}); // run calculation CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnConvolutionForward), cudnnConvolutionForward( *handle, alpha, x, input->specialBuffer(), w, weights->specialBuffer(), conv, algo, wsData, wsSize, beta, z, output->specialBuffer())); // add bias if it is present if (bias != nullptr) { CudnnTensor b; // b.set4D(format, cudnnDataType(bias->dataType()), 1, isNCHW ? bias->lengthOf() : 1, 1, isNCHW ? 1: bias->lengthOf()); b.set4D(CUDNN_TENSOR_NCHW, cudnnDataType(bias->dataType()), 1, oC, 1, 1); CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnAddTensor), cudnnAddTensor( *handle, alpha, b, bias->specialBuffer(), alpha, z, output->specialBuffer())); } // cudaErr = cudaStreamSynchronize(*context->getCudaStream()); // if (cudaErr != 0) // throw cuda_exception::build("conv2dCUDNN: cudaStreamSynchronize failed !", cudaErr); NDArray::registerSpecialUse({output}, {input, weights, bias}); } ////////////////////////////////////////////////////////////////////////// static void conv2dBpCUDNN(const LaunchContext* context, const NDArray* input, const NDArray* weights, const NDArray* gradO, NDArray* gradI, NDArray* gradW, NDArray* gradB, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int paddingMode, const bool isNCHW, const int wFormat) { int bS, iC, iH, iW, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width; int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, wFormat, *input, *gradO, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWoC, indWkH, indOoH); auto handle = reinterpret_cast<cudnnHandle_t *>(context->getCuDnnHandle()); CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnSetStream), cudnnSetStream(*handle, *context->getCudaStream())); cudnnTensorFormat_t format = isNCHW ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC; cudnnTensorFormat_t formatW = 0 == wFormat ? format : (1 == wFormat ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC); PointersManager manager(context, __func__ ); // input descriptor, gradO descriptor, gradI descriptor CudnnTensor x, dz, dx; if(input->ews() == 1 && input->ordering() == 'c') x.set4D(format, cudnnDataType(input->dataType()), bS, iC, iH, iW); else x.set4DEx(cudnnDataType(input->dataType()), bS, iC, iH, iW, input->strideAt(0), input->strideAt(indIOioC), input->strideAt(indIiH), input->strideAt(indIiH + 1)); if(gradO->ews() == 1 && gradO->ordering() == 'c') dz.set4D(format, cudnnDataType(gradO->dataType()), bS, oC, oH, oW); else dz.set4DEx(cudnnDataType(gradO->dataType()), bS, oC, oH, oW, gradO->strideAt(0), gradO->strideAt(indIOioC), gradO->strideAt(indOoH), gradO->strideAt(indOoH + 1)); if(gradI->ews() == 1 && gradI->ordering() == 'c') dx.set4D(format, cudnnDataType(gradI->dataType()), bS, iC, iH, iW); else dx.set4DEx(cudnnDataType(gradI->dataType()), bS, iC, iH, iW, gradI->strideAt(0), gradI->strideAt(indIOioC), gradI->strideAt(indIiH), gradI->strideAt(indIiH + 1)); // gradW descriptor FilterDesc dw; dw.set4D( cudnnDataType(gradW->dataType()), formatW, oC, iC, kH, kW); // description of convolution ConvolutionDesc conv; conv.set2D( pH, pW, sH, sW, dH, dW, CUDNN_CROSS_CORRELATION, cudnnDataType(gradO->dataType())); // gradW algorithm description cudnnConvolutionBwdFilterAlgo_t algoGradW; cudnnConvolutionBwdFilterAlgoPerf_t algoGradWPerf; int count = 0; //err = cudnnGetConvolutionBackwardFilterAlgorithm(*handle, x, dz, conv, dw, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algoGradW); CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnFindConvolutionBackwardFilterAlgorithm), cudnnFindConvolutionBackwardFilterAlgorithm(*handle, x, dz, conv, dw, 1, &count, &algoGradWPerf)); if (count == 0) throw sd::cuda_exception::build("conv2dBpCUDNN: cudnnGetConvolutionBackwardFilterAlgorithm failed as the count is 0", 0); algoGradW = algoGradWPerf.algo; // gradI algorithm description cudnnConvolutionBwdDataAlgo_t algoGradI; cudnnConvolutionBwdDataAlgoPerf_t algoGradIPerf; //err = cudnnGetConvolutionBackwardDataAlgorithm(*handle, dw, dz, conv, x, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algoGradI); CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnFindConvolutionBackwardDataAlgorithm), cudnnFindConvolutionBackwardDataAlgorithm( *handle, dw, dz, conv, x, 1, &count, &algoGradIPerf)); if (count == 0) throw sd::cuda_exception::build("conv2dBpCUDNN: cudnnGetConvolutionBackwardDataAlgorithm failed as the count is 0", 0); algoGradI = algoGradIPerf.algo; // allocate auxiliary device memory for gradW calculation, abbreviation ws means workspace size_t wsGradWSize; CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnGetConvolutionBackwardFilterWorkspaceSize), cudnnGetConvolutionBackwardFilterWorkspaceSize( *handle, x, dz, conv, dw, algoGradW, &wsGradWSize)); void* wsGradWData = manager.allocateDevMem(wsGradWSize); // allocate auxiliary device memory for gradI calculation, abbreviation ws means workspace size_t wsGradISize; CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnGetConvolutionBackwardDataWorkspaceSize), cudnnGetConvolutionBackwardDataWorkspaceSize( *handle, dw, dz, conv, dx, algoGradI, &wsGradISize)); void* wsGradIData = manager.allocateDevMem(wsGradISize); // provide scaling parameters const float alpha32(1), beta32(0); const double alpha64(1), beta64(0); const void* alpha = gradO->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64); const void* beta = gradO->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64); NDArray::prepareSpecialUse({gradI, gradW, gradB}, {input, weights, gradO}); // run calculation for gradB (if not nullptr) if(gradB != nullptr) { CudnnTensor db; // db.set4D(format, cudnnDataType(gradB->dataType()), 1, isNCHW ? gradB->lengthOf() : 1, 1, isNCHW ? 1: gradB->lengthOf()); db.set4D(CUDNN_TENSOR_NCHW, cudnnDataType(gradB->dataType()), 1, oC, 1, 1); CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnConvolutionBackwardBias), cudnnConvolutionBackwardBias( *handle, alpha, dz, gradO->specialBuffer(), beta, db, gradB->specialBuffer())); } // run calculation for gradW CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnConvolutionBackwardFilter), cudnnConvolutionBackwardFilter( *handle, alpha, x, input->specialBuffer(), dz, gradO->specialBuffer(), conv, algoGradW, wsGradWData, wsGradWSize, beta, dw, gradW->specialBuffer())); // run calculation for gradI CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnConvolutionBackwardData), cudnnConvolutionBackwardData( *handle, alpha, dw, weights->specialBuffer(), dz, gradO->specialBuffer(), conv, algoGradI, wsGradIData, wsGradISize, beta, dx, gradI->specialBuffer())); // cudaErr = cudaStreamSynchronize(*context->getCudaStream()); // if (cudaErr != 0) // throw cuda_exception::build("conv2dBpCUDNN: cudaStreamSynchronize failed !", cudaErr); NDArray::registerSpecialUse({gradI, gradW, gradB}, {input, weights, gradO}); } ////////////////////////////////////////////////////////////////////////// PLATFORM_IMPL(conv2d, ENGINE_CUDA) { auto input = INPUT_VARIABLE(0); // [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW) auto weights = INPUT_VARIABLE(1); // [kH, kW, iC, oC], [oC, iC, kH, kW], [oC, kH, kW, iC] auto bias = block.width() > 2 ? INPUT_VARIABLE(2) : nullptr; // [oC] auto output = OUTPUT_VARIABLE(0); // [bS, oH, oW, oC] (NHWC) or [bS, oC, oH, oW] (NCHW) int sH = INT_ARG(2); // strides height int sW = INT_ARG(3); // strides width int pH = INT_ARG(4); // paddings height int pW = INT_ARG(5); // paddings width int dH = INT_ARG(6); // dilations height int dW = INT_ARG(7); // dilations width int paddingMode = INT_ARG(8); // 0-VALID, 1-SAME bool isNCHW = block.getIArguments()->size() > 9 ? !INT_ARG(9) : 1; // INT_ARG(9): 0-NCHW, 1-NHWC int wFormat = block.getIArguments()->size() > 10 ? INT_ARG(10) : 0; // 0 - [kH, kW, iC, oC], 1 - [oC, iC, kH, kW], 2 - [oC, kH, kW, iC] int kH = INT_ARG(0) > 0 ? INT_ARG(0) : static_cast<int>(weights->sizeAt(0)); // filter(kernel) height int kW = INT_ARG(1) > 0 ? INT_ARG(1) : static_cast<int>(weights->sizeAt(1)); // filter(kernel) width REQUIRE_TRUE(input->rankOf() == 4, 0, "CUSTOM CONV2D CUDNN OP: rank of input array must be equal to 4, but got %i instead !", input->rankOf()); REQUIRE_TRUE(weights->rankOf() == 4, 0, "CUSTOM CONV2D CUDNN OP: rank of weights array must be equal to 4, but got %i instead !", weights->rankOf()); int bS, iC, iH, iW, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width; int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, wFormat, *input, *output, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWoC, indWkH, indOoH); ConvolutionUtils::calcPadding2D(pH, pW, oH, oW, iH, iW, kH, kW, sH, sW, dH, dW, paddingMode); std::vector<Nd4jLong> expectedWeightsShape = ConvolutionUtils::expectWeightsShape(wFormat, kH, kW, iC, oC); REQUIRE_TRUE(weights->isSameShape(expectedWeightsShape), 0, "CUSTOM CONV2D CUDNN OP: wrong shape of weights array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expectedWeightsShape).c_str(), ShapeUtils::shapeAsString(weights).c_str()); if (bias) { REQUIRE_TRUE(bias->rankOf() <= 2 && oC == bias->lengthOf(), 0, "CUSTOM CONV2D CUDNN OP: wrong shape of array with biases, expected rank, length: <=2, %i, but got %i, %i instead !", oC, bias->rankOf(), bias->lengthOf()); REQUIRE_TRUE((bias->rankOf() == 1 && bias->strideAt(0) == 1) || (bias->rankOf() == 2 && bias->sizeAt(0) == 1 && bias->strideAt(1) == 1) || (bias->rankOf() == 2 && bias->sizeAt(1) == 1 && bias->strideAt(0) == 1), 0, "CUSTOM CONV2D CUDNN OP: bias array should be contiguous in memory !"); } std::unique_ptr<NDArray> tmpWeight = {}, tmpInput = {}; NDArray* newWeights = weights; // cudnn support only two formats {oC,iC,kH,kW} and {oC,kH,kW,iC} if(0 == wFormat) { tmpWeight.reset(new NDArray(weights->ordering(), isNCHW ? std::vector<Nd4jLong>({oC, iC, kH, kW}) : std::vector<Nd4jLong>({oC, kH, kW, iC}), weights->dataType(), weights->getContext())); newWeights = tmpWeight.get(); newWeights->assign(weights->permute(isNCHW ? std::vector<int>({3,2,0,1}) : std::vector<int>({3,0,1,2}))); // (kH, kW, iC, oC --> oC, iC, kH, kW) or (kH, kW, iC, oC --> oC, kH, kW, iC) } if(paddingMode == 1){ // in same paddingMode cudnn doesn't support asymmetric left/right top/bottopm paddings auto ret = checkConv2dCUDNNPadAsymmetric(input, nullptr, iH, iW, oH, oW, kH, kW, sH, sW, pH, pW, dH, dW, isNCHW); tmpInput = std::move(std::get<0>(ret)); //prolong life if(tmpInput) input = tmpInput.get(); } conv2dCUDNN(block.launchContext(), input, newWeights, bias, output, kH,kW,sH,sW,pH,pW,dH,dW, paddingMode, isNCHW, wFormat); return Status::OK(); } ////////////////////////////////////////////////////////////////////////// PLATFORM_CHECK(conv2d, ENGINE_CUDA) { auto input = INPUT_VARIABLE(0); // [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW) auto weights = INPUT_VARIABLE(1); // [kH, kW, iC, oC] always auto bias = block.width() > 2 ? INPUT_VARIABLE(2) : nullptr; // [oC] const int paddingMode = INT_ARG(8); // 0-VALID, 1-SAME, 2-CAUSAL const bool badInputType = input->dataType() != DataType::DOUBLE && input->dataType() != DataType::FLOAT32 && input->dataType() != DataType::HALF; const bool badWeightsType = weights->dataType() != DataType::DOUBLE && weights->dataType() != DataType::FLOAT32 && weights->dataType() != DataType::HALF; const bool badBiasType = bias == nullptr ? false : (bias->dataType() != DataType::DOUBLE && bias->dataType() != DataType::FLOAT32 && bias->dataType() != DataType::HALF); return paddingMode != 2 && !badInputType && !badWeightsType && !badBiasType; Requirements req("CUDNN CONV2d OP"); req.expectNotEq(makeInfoVariable(paddingMode,"paddingMode"), 2) && req.expectIn(makeInfoVariable(input->dataType(), TYPE_MSG_INPUT0), {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE} ) && req.expectIn(makeInfoVariable(weights->dataType(), TYPE_MSG_INPUT1), {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE} ); if(bias){ req.expectIn(makeInfoVariable(bias->dataType(), TYPE_MSG_INPUT_ "#bias"), {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE}) ; } req.logTheSuccess(); return req; } ////////////////////////////////////////////////////////////////////////// PLATFORM_IMPL(conv2d_bp, ENGINE_CUDA) { auto input = INPUT_VARIABLE(0); // [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW) auto weights = INPUT_VARIABLE(1); // [kH, kW, iC, oC], [oC, iC, kH, kW], [oC, kH, kW, iC] auto bias = block.width() > 3 ? INPUT_VARIABLE(2) : nullptr; // [oC] auto gradO = block.width() > 3 ? INPUT_VARIABLE(3) : INPUT_VARIABLE(2); // [bS, oH, oW, oC] (NHWC) or [bS, oC, oH, oW] (NCHW), epsilon_next auto gradI = OUTPUT_VARIABLE(0); // [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW), epsilon auto gradW = OUTPUT_VARIABLE(1); // [kH, kW, iC, oC], [oC, iC, kH, kW], [oC, kH, kW, iC] auto gradB = block.width() > 3 ? OUTPUT_VARIABLE(2) : nullptr; // [oC] int kH = INT_ARG(0); // filter(kernel) height int kW = INT_ARG(1); // filter(kernel) width int sH = INT_ARG(2); // strides height int sW = INT_ARG(3); // strides width int pH = INT_ARG(4); // paddings height int pW = INT_ARG(5); // paddings width int dH = INT_ARG(6); // dilations height int dW = INT_ARG(7); // dilations width int paddingMode = INT_ARG(8); // 0-VALID, 1-SAME int isNCHW = block.getIArguments()->size() > 9 ? !INT_ARG(9) : 1; // INT_ARG(9): 0-NCHW, 1-NHWC int wFormat = block.getIArguments()->size() > 10 ? INT_ARG(10) : 0; // 0 - [kH, kW, iC, oC], 1 - [oC, iC, kH, kW], 2 - [oC, kH, kW, iC] REQUIRE_TRUE(input->rankOf() == 4, 0, "CUSTOM CONV2D_BP CUDNN OP: rank of input array must be equal to 4, but got %i instead !", input->rankOf()); REQUIRE_TRUE(weights->rankOf() == 4, 0, "CUSTOM CONV2D_BP CUDNN OP: rank of weights array must be equal to 4, but got %i instead !", weights->rankOf()); REQUIRE_TRUE(gradO->rankOf() == 4, 0, "CUSTOM CONV2D_BP CUDNN OP: rank of output's gradients (next epsilon) array must be equal to 4, but got %i instead !", gradO->rankOf()); int bS, iC, iH, iW, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width; int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, wFormat, *input, *gradO, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWoC, indWkH, indOoH); int trueoH, trueoW; // true output height, width ConvolutionUtils::calcOutSizePool2D(trueoH, trueoW, kH, kW, sH, sW, pH, pW, dH, dW, iH, iW, paddingMode); ConvolutionUtils::calcPadding2D(pH, pW, oH, oW, iH, iW, kH, kW, sH, sW, dH, dW, paddingMode); std::vector<Nd4jLong> expectedGradOShape = ShapeUtils::composeShapeUsingDimsAndIdx({bS,oC,trueoH,trueoW, 0,indIOioC,indOoH,indOoH+1}); std::vector<Nd4jLong> expectedWeightsShape = ConvolutionUtils::expectWeightsShape(wFormat, kH, kW, iC, oC); REQUIRE_TRUE(gradO->isSameShape(expectedGradOShape), 0, "CUSTOM CONV2D_BP CUDNN OP: wrong shape of output gradients (next epsilon) array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expectedGradOShape).c_str(), ShapeUtils::shapeAsString(gradO).c_str()); REQUIRE_TRUE(weights->isSameShape(expectedWeightsShape), 0, "CUSTOM CONV2D_BP CUDNN OP: wrong shape of weights array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expectedWeightsShape).c_str(), ShapeUtils::shapeAsString(weights).c_str()); if(bias) REQUIRE_TRUE(bias->rankOf() <= 2 && oC == bias->lengthOf(), 0, "CUSTOM CONV2D_BP CUDNN OP: wrong shape of array with biases, expected rank, length: <=2, %i, but got %i, %i instead !", oC, bias->rankOf(), bias->lengthOf()); std::unique_ptr<NDArray> tmpGradI = {}, tmpInput = {} , tmpWeights = {}, tmpGradW = {}; NDArray *newWeights = weights, *newGradW = gradW; // cudnn support only two formats {oC,iC,kH,kW} and {oC,kH,kW,iC} if(0 == wFormat) { tmpGradW.reset(new NDArray(gradW->ordering(), isNCHW ? std::vector<Nd4jLong>({oC, iC, kH, kW}) : std::vector<Nd4jLong>({oC, kH, kW, iC}), gradW->dataType(), gradW->getContext())); tmpWeights.reset(new NDArray(weights->ordering(), isNCHW ? std::vector<Nd4jLong>({oC, iC, kH, kW}) : std::vector<Nd4jLong>({oC, kH, kW, iC}), weights->dataType(), weights->getContext())); newGradW = tmpGradW.get(); newWeights = tmpWeights.get(); newWeights->assign(weights->permute(isNCHW ? std::vector<int>({3,2,0,1}) : std::vector<int>({3,0,1,2}))); // (kH, kW, iC, oC --> oC, iC, kH, kW) or (kH, kW, iC, oC --> oC, kH, kW, iC) } NDArray* newInput = input; NDArray* newGradI = gradI; if(paddingMode == 1){ // in same paddingMode cudnn doesn't support asymmetric left/right top/bottopm paddings auto ret = checkConv2dCUDNNPadAsymmetric(input, gradI, iH, iW, oH, oW, kH, kW, sH, sW, pH, pW, dH, dW, isNCHW); tmpInput = std::move(std::get<0>(ret)); tmpGradI = std::move(std::get<1>(ret)); if(tmpInput) newInput = tmpInput.get(); if(tmpGradI) newGradI = tmpGradI.get(); } conv2dBpCUDNN(block.launchContext(), newInput, newWeights, gradO, newGradI, newGradW, gradB, kH,kW,sH,sW,pH,pW,dH,dW,paddingMode,isNCHW,wFormat); if(0 == wFormat) { newGradW->permutei(isNCHW ? std::vector<int>({2,3,1,0}) : std::vector<int>({1,2,3,0})); // (oC, iC, kH, kW --> kH, kW, iC, oC) or (oC, kH, kW, iC --> kH, kW, iC, oC) gradW->assign(newGradW); } if(newInput != input) { if(isNCHW) gradI->assign((*newGradI)({0,0, 0,0, 0,gradI->sizeAt(2), 0,gradI->sizeAt(3)})); else gradI->assign((*newGradI)({0,0, 0,gradI->sizeAt(1), 0,gradI->sizeAt(2), 0,0})); } return Status::OK(); } PLATFORM_CHECK(conv2d_bp, ENGINE_CUDA) { auto input = INPUT_VARIABLE(0); // [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW) auto weights = INPUT_VARIABLE(1); // [kH, kW, iC, oC] always auto bias = block.width() > 3 ? INPUT_VARIABLE(2) : nullptr; // [oC] auto gradO = block.width() > 3 ? INPUT_VARIABLE(3) : INPUT_VARIABLE(2); // [bS, oH, oW, oC] (NHWC) or [bS, oC, oH, oW] (NCHW), epsilon_next const int paddingMode = INT_ARG(8); // 0-VALID, 1-SAME, 2-CAUSAL const int isNCHW = block.getIArguments()->size() > 9 ? !INT_ARG(9) : 1; // INT_ARG(9): 0-NCHW, 1-NHWC Requirements req("CUDNN CONV2d_BP OP"); req.expectNotEq(makeInfoVariable(paddingMode,"paddingMode"), 2) && req.expectTrue(makeInfoVariable(isNCHW,"isNCHW")) && req.expectIn(makeInfoVariable(input->dataType(), TYPE_MSG_INPUT0), {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE} ) && req.expectIn(makeInfoVariable(weights->dataType(), TYPE_MSG_INPUT1), {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE} ); if(bias){ req.expectIn(makeInfoVariable(bias->dataType(), TYPE_MSG_INPUT_ "#bias"), {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE}) && req.expectIn(makeInfoVariable(gradO->dataType(), TYPE_MSG_INPUT3), {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE}); }else{ req.expectIn(makeInfoVariable(gradO->dataType(), TYPE_MSG_INPUT2), {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE} ); } req.logTheSuccess(); return req; } } } }
79c4889658b9042ea69e9c91f90fa97dfb9dfbea.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2019 The Microsoft DeepSpeed Team */ #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <stdio.h> #include <cmath> #include "ATen/ATen.h" #include "ATen/TensorUtils.h" #include "ATen/hip/HIPContext.h" #include "ATen/hip/detail/IndexUtils.cuh" // #include "ATen/Type.h" #include "ATen/AccumulateType.h" #include <iostream> // #include <helper_functions.h> #if defined(__HIP_PLATFORM_HCC__) && HIP_VERSION > 305 #include <hip/hip_cooperative_groups.h> #else #include <hip/hip_cooperative_groups.h> #endif #include <hip/hip_runtime_api.h> #include <stdio.h> namespace cg = cooperative_groups; // Utility class used to avoid linker errors with extern // unsized shared memory arrays with templated type namespace { // This is the un-specialized struct. Note that we prevent instantiation of this // struct by putting an undefined symbol in the function body so it won't compile. template <typename T> struct SharedMemory { // Ensure that we won't compile any un-specialized types __device__ inline operator T*() { #ifndef _WIN32 extern __device__ void error(void); error(); #endif return NULL; } }; template <> struct SharedMemory<float> { __device__ inline operator float*() { extern __shared__ float s_float[]; return s_float; } }; template <> struct SharedMemory<double> { __device__ inline operator double*() { extern __shared__ double s_double[]; return s_double; } }; } // namespace #include "type_shim.h" typedef enum { ADAM_MODE_0 = 0, // eps under square root ADAM_MODE_1 = 1 // eps outside square root } adamMode_t; // s_a and s_b are in shared memory // g_a and g_b are in shared memory template <typename T, int blockSize> __device__ void reduce_block_in_shared_memory(T* s_a, T* s_b, T* g_a, T* g_b) { // Handle to thread block group cg::thread_block cta = cg::this_thread_block(); // perform block reduction in shared memory, unsigned int tid = cta.thread_rank(); T a_sum = s_a[tid]; T b_sum = s_b[tid]; cg::sync(cta); // do reduction in shared mem if ((blockSize >= 512) && (tid < 256)) { s_a[tid] = a_sum = a_sum + s_a[tid + 256]; s_b[tid] = b_sum = b_sum + s_b[tid + 256]; } cg::sync(cta); if ((blockSize >= 256) && (tid < 128)) { s_a[tid] = a_sum = a_sum + s_a[tid + 128]; s_b[tid] = b_sum = b_sum + s_b[tid + 128]; } cg::sync(cta); if ((blockSize >= 128) && (tid < 64)) { s_a[tid] = a_sum = a_sum + s_a[tid + 64]; s_b[tid] = b_sum = b_sum + s_b[tid + 64]; } cg::sync(cta); #if (__CUDA_ARCH__ >= 300) || (defined(__HIP_PLATFORM_HCC__) && HIP_VERSION >= 502) if (tid < 32) { cg::coalesced_group active = cg::coalesced_threads(); // Fetch final intermediate sum from 2nd warp if (blockSize >= 64) { a_sum = a_sum + s_a[tid + 32]; b_sum = b_sum + s_b[tid + 32]; } // Reduce final warp using shuffle for (int offset = warpSize / 2; offset > 0; offset /= 2) { a_sum += active.shfl_down(a_sum, offset); b_sum += active.shfl_down(b_sum, offset); } } #else if ((blockSize >= 64) && (tid < 32)) { s_a[tid] = a_sum = a_sum + s_a[tid + 32]; s_b[tid] = b_sum = b_sum + s_b[tid + 32]; } cg::sync(cta); if ((blockSize >= 32) && (tid < 16)) { s_a[tid] = a_sum = a_sum + s_a[tid + 16]; s_b[tid] = b_sum = b_sum + s_b[tid + 16]; } cg::sync(cta); if ((blockSize >= 16) && (tid < 8)) { s_a[tid] = a_sum = a_sum + s_a[tid + 8]; s_b[tid] = b_sum = b_sum + s_b[tid + 8]; } cg::sync(cta); if ((blockSize >= 8) && (tid < 4)) { s_a[tid] = a_sum = a_sum + s_a[tid + 4]; s_b[tid] = b_sum = b_sum + s_b[tid + 4]; } cg::sync(cta); if ((blockSize >= 4) && (tid < 2)) { s_a[tid] = a_sum = a_sum + s_a[tid + 2]; s_b[tid] = b_sum = b_sum + s_b[tid + 2]; } cg::sync(cta); if ((blockSize >= 2) && (tid < 1)) { s_a[tid] = a_sum = a_sum + s_a[tid + 1]; s_b[tid] = b_sum = b_sum + s_b[tid + 1]; } cg::sync(cta); #endif // write result for this block to global mem if (tid == 0) { g_a[blockIdx.x] = (T)a_sum; g_b[blockIdx.x] = (T)b_sum; } } template <typename T, int blockSize> __device__ void reduce_two_vectors_in_register(T a, T b, T* g_a, T* g_b) { const int threadIdInBlock = cg::this_thread_block().thread_rank(); T* s_a = SharedMemory<T>(); T* s_b = SharedMemory<T>() + cg::this_thread_block().size(); s_a[threadIdInBlock] = a; s_b[threadIdInBlock] = b; reduce_block_in_shared_memory<T, blockSize>(s_a, s_b, g_a, g_b); } template <typename T, typename GRAD_T, int blockSize> __global__ void lamb_cuda_kernel_part1( T* __restrict__ p, GRAD_T* __restrict__ p_copy, // For mixed precision training, pass NULL if not needed T* __restrict__ m, T* __restrict__ v, const GRAD_T* __restrict__ g, const float b1, const float b2, const float eps, const float grad_scale, const float step_size, const size_t tsize, adamMode_t mode, const float decay, T* __restrict__ w_l2_i, T* __restrict__ u_l2_i) { // Assuming 2D grids and 2D blocks const int blockId = gridDim.x * blockIdx.y + blockIdx.x; const int threadsPerBlock = blockDim.x * blockDim.y; const int threadIdInBlock = cg::this_thread_block().thread_rank(); const int i = (blockId * threadsPerBlock + threadIdInBlock); const int totThreads = gridDim.x * gridDim.y * threadsPerBlock; T reg_w = 0; T reg_u = 0; for (int j = i; j < tsize; j += totThreads) { T scaled_grad = g[j] / grad_scale; T pj = p[j]; m[j] = b1 * m[j] + (1 - b1) * scaled_grad; v[j] = b2 * v[j] + (1 - b2) * scaled_grad * scaled_grad; float denom; if (mode == ADAM_MODE_0) denom = sqrtf(v[j] + eps); else // Mode 1 denom = sqrtf(v[j]) + eps; T update = (m[j] / denom) + (decay * p[j]); reg_u += update * update; reg_w += pj * pj; } reduce_two_vectors_in_register<T, blockSize>(reg_w, reg_u, w_l2_i, u_l2_i); } template <typename T, typename GRAD_T, int blockSize> __global__ void lamb_cuda_kernel_part2(const size_t tsize, T* __restrict__ g_a, T* __restrict__ g_b) { T* s_a = SharedMemory<T>(); T* s_b = SharedMemory<T>() + cg::this_thread_block().size(); const int threadIdInBlock = cg::this_thread_block().thread_rank(); s_a[threadIdInBlock] = g_a[threadIdInBlock]; s_b[threadIdInBlock] = g_b[threadIdInBlock]; if (threadIdInBlock >= tsize) { s_a[threadIdInBlock] = 0.0; s_b[threadIdInBlock] = 0.0; } reduce_block_in_shared_memory<T, blockSize>(s_a, s_b, g_a, g_b); } template <typename T, typename GRAD_T> __global__ void lamb_cuda_kernel_part3( T* __restrict__ p, GRAD_T* __restrict__ p_copy, // For mixed precision training, pass NULL if not needed T* __restrict__ m, T* __restrict__ v, const GRAD_T* __restrict__ g, const float b1, const float b2, const float max_coeff, const float min_coeff, const float eps, const float grad_scale, const float step_size, const size_t tsize, adamMode_t mode, const float decay, T* __restrict__ w_l2_i, T* __restrict__ u_l2_i, T* __restrict__ lamb_coeff_val) { // Assuming 2D grids and 2D blocks const int blockId = gridDim.x * blockIdx.y + blockIdx.x; const int threadsPerBlock = blockDim.x * blockDim.y; const int threadIdInBlock = cg::this_thread_block().thread_rank(); const int i = (blockId * threadsPerBlock + threadIdInBlock); const int totThreads = gridDim.x * gridDim.y * threadsPerBlock; T reg_w = sqrtf(w_l2_i[0]); T reg_u = sqrtf(u_l2_i[0]); float lamb_coeff = 1.0; if (reg_w != 0 && reg_u != 0) { lamb_coeff = reg_w / reg_u; if (lamb_coeff > max_coeff) { lamb_coeff = max_coeff; } if (lamb_coeff < min_coeff) { lamb_coeff = min_coeff; } } if (blockId == 0 && threadIdInBlock == 0) { lamb_coeff_val[0] = lamb_coeff; // printf("Cuda Lamb Coeff is %.6f \n",lamb_coeff); } for (int j = i; j < tsize; j += totThreads) { T pj = (float)p[j]; T mj = m[j]; T vj = v[j]; float denom; if (mode == ADAM_MODE_0) denom = sqrtf(vj + eps); else // Mode 1 denom = sqrtf(vj) + eps; T update = (mj / denom) + (decay * pj); pj = pj - (step_size * lamb_coeff * update); p[j] = pj; if (p_copy != NULL) p_copy[j] = (GRAD_T)pj; } } void fused_lamb_cuda(at::Tensor& p, at::Tensor& p_copy, at::Tensor& m, at::Tensor& v, at::Tensor& g, float lr, float beta1, float beta2, float max_coeff, float min_coeff, float eps, float grad_scale, int step, int mode, int bias_correction, float decay, at::Tensor& w_l2_i, at::Tensor& u_l2_i, at::Tensor& lamb_coeff) { // using namespace at; // Get tensor size int tsize = p.numel(); // Determine #threads and #blocks const int threadsPerBlock = 512; int num_blocks = (tsize + threadsPerBlock - 1) / threadsPerBlock; if (num_blocks > 512) num_blocks = 512; int smemsize = 0; if (p.type().scalarType() == at::ScalarType::Double) smemsize = 2 * threadsPerBlock * sizeof(double); else smemsize = 2 * threadsPerBlock * sizeof(float); const dim3 blocks(num_blocks); const dim3 threads(threadsPerBlock); AT_ASSERTM(at::cuda::detail::canUse32BitIndexMath(p), "parameter tensor is too large to be indexed with int32"); // Constants float step_size = 0; if (bias_correction == 1) { const float bias_correction1 = 1 - ::pow(beta1, step); const float bias_correction2 = 1 - ::pow(beta2, step); step_size = lr * std::sqrt(bias_correction2) / bias_correction1; } else { step_size = lr; } hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); if (g.type().scalarType() == at::ScalarType::Half) { // all other values should be fp32 for half gradients AT_ASSERTM(p.type().scalarType() == at::ScalarType::Float, "expected parameter to be of float type"); // dispatch is done on the gradient type using namespace at; // prevents "toString is undefined" errors AT_DISPATCH_FLOATING_TYPES_AND_HALF( g.scalar_type(), "lamb_cuda_kernel", ([&] { using accscalar_t = at::acc_type<scalar_t, true>; hipLaunchKernelGGL(( lamb_cuda_kernel_part1<accscalar_t, scalar_t, threadsPerBlock>) , dim3(blocks), dim3(threadsPerBlock), smemsize, stream, p.data<accscalar_t>(), p_copy.numel() ? p_copy.data<scalar_t>() : NULL, m.data<accscalar_t>(), v.data<accscalar_t>(), g.data<scalar_t>(), beta1, beta2, eps, grad_scale, step_size, tsize, (adamMode_t)mode, decay, w_l2_i.data<accscalar_t>(), u_l2_i.data<accscalar_t>()); hipLaunchKernelGGL(( lamb_cuda_kernel_part2<accscalar_t, scalar_t, threadsPerBlock>) , dim3(1), dim3(threadsPerBlock), smemsize, stream, num_blocks, w_l2_i.data<accscalar_t>(), u_l2_i.data<accscalar_t>()); hipLaunchKernelGGL(( lamb_cuda_kernel_part3<accscalar_t, scalar_t>) , dim3(blocks), dim3(threadsPerBlock), smemsize, stream, p.data<accscalar_t>(), p_copy.numel() ? p_copy.data<scalar_t>() : NULL, m.data<accscalar_t>(), v.data<accscalar_t>(), g.data<scalar_t>(), beta1, beta2, max_coeff, min_coeff, eps, grad_scale, step_size, tsize, (adamMode_t)mode, decay, w_l2_i.data<accscalar_t>(), u_l2_i.data<accscalar_t>(), lamb_coeff.data<accscalar_t>()); })); } else { using namespace at; AT_DISPATCH_FLOATING_TYPES( g.scalar_type(), "lamb_cuda_kernel", ([&] { hipLaunchKernelGGL(( lamb_cuda_kernel_part1<scalar_t, scalar_t, threadsPerBlock>) , dim3(blocks), dim3(threadsPerBlock), smemsize, stream, p.data<scalar_t>(), NULL, // don't output p_copy for fp32, it's wasted write m.data<scalar_t>(), v.data<scalar_t>(), g.data<scalar_t>(), beta1, beta2, eps, grad_scale, step_size, tsize, (adamMode_t)mode, decay, w_l2_i.data<scalar_t>(), u_l2_i.data<scalar_t>()); hipLaunchKernelGGL(( lamb_cuda_kernel_part2<scalar_t, scalar_t, threadsPerBlock>) , dim3(1), dim3(threadsPerBlock), smemsize, stream, num_blocks, w_l2_i.data<scalar_t>(), u_l2_i.data<scalar_t>()); hipLaunchKernelGGL(( lamb_cuda_kernel_part3<scalar_t, scalar_t>) , dim3(blocks), dim3(threadsPerBlock), smemsize, stream, p.data<scalar_t>(), NULL, // don't output p_copy for fp32, it's wasted write m.data<scalar_t>(), v.data<scalar_t>(), g.data<scalar_t>(), beta1, beta2, max_coeff, min_coeff, eps, grad_scale, step_size, tsize, (adamMode_t)mode, decay, w_l2_i.data<scalar_t>(), u_l2_i.data<scalar_t>(), lamb_coeff.data<scalar_t>()); })); } C10_HIP_CHECK(hipGetLastError()); } // template __device__ void reduce_two_vectors_in_register<float,512>(float a, float b, float* g_a, // float* g_b, cg::grid_group &cgg);
79c4889658b9042ea69e9c91f90fa97dfb9dfbea.cu
/* Copyright 2019 The Microsoft DeepSpeed Team */ #include <cuda.h> #include <cuda_runtime.h> #include <stdio.h> #include <cmath> #include "ATen/ATen.h" #include "ATen/TensorUtils.h" #include "ATen/cuda/CUDAContext.h" #include "ATen/cuda/detail/IndexUtils.cuh" // #include "ATen/Type.h" #include "ATen/AccumulateType.h" #include <iostream> // #include <helper_functions.h> #if defined(__HIP_PLATFORM_HCC__) && HIP_VERSION > 305 #include <hip/hip_cooperative_groups.h> #else #include <cooperative_groups.h> #endif #include <cuda_runtime_api.h> #include <stdio.h> namespace cg = cooperative_groups; // Utility class used to avoid linker errors with extern // unsized shared memory arrays with templated type namespace { // This is the un-specialized struct. Note that we prevent instantiation of this // struct by putting an undefined symbol in the function body so it won't compile. template <typename T> struct SharedMemory { // Ensure that we won't compile any un-specialized types __device__ inline operator T*() { #ifndef _WIN32 extern __device__ void error(void); error(); #endif return NULL; } }; template <> struct SharedMemory<float> { __device__ inline operator float*() { extern __shared__ float s_float[]; return s_float; } }; template <> struct SharedMemory<double> { __device__ inline operator double*() { extern __shared__ double s_double[]; return s_double; } }; } // namespace #include "type_shim.h" typedef enum { ADAM_MODE_0 = 0, // eps under square root ADAM_MODE_1 = 1 // eps outside square root } adamMode_t; // s_a and s_b are in shared memory // g_a and g_b are in shared memory template <typename T, int blockSize> __device__ void reduce_block_in_shared_memory(T* s_a, T* s_b, T* g_a, T* g_b) { // Handle to thread block group cg::thread_block cta = cg::this_thread_block(); // perform block reduction in shared memory, unsigned int tid = cta.thread_rank(); T a_sum = s_a[tid]; T b_sum = s_b[tid]; cg::sync(cta); // do reduction in shared mem if ((blockSize >= 512) && (tid < 256)) { s_a[tid] = a_sum = a_sum + s_a[tid + 256]; s_b[tid] = b_sum = b_sum + s_b[tid + 256]; } cg::sync(cta); if ((blockSize >= 256) && (tid < 128)) { s_a[tid] = a_sum = a_sum + s_a[tid + 128]; s_b[tid] = b_sum = b_sum + s_b[tid + 128]; } cg::sync(cta); if ((blockSize >= 128) && (tid < 64)) { s_a[tid] = a_sum = a_sum + s_a[tid + 64]; s_b[tid] = b_sum = b_sum + s_b[tid + 64]; } cg::sync(cta); #if (__CUDA_ARCH__ >= 300) || (defined(__HIP_PLATFORM_HCC__) && HIP_VERSION >= 502) if (tid < 32) { cg::coalesced_group active = cg::coalesced_threads(); // Fetch final intermediate sum from 2nd warp if (blockSize >= 64) { a_sum = a_sum + s_a[tid + 32]; b_sum = b_sum + s_b[tid + 32]; } // Reduce final warp using shuffle for (int offset = warpSize / 2; offset > 0; offset /= 2) { a_sum += active.shfl_down(a_sum, offset); b_sum += active.shfl_down(b_sum, offset); } } #else if ((blockSize >= 64) && (tid < 32)) { s_a[tid] = a_sum = a_sum + s_a[tid + 32]; s_b[tid] = b_sum = b_sum + s_b[tid + 32]; } cg::sync(cta); if ((blockSize >= 32) && (tid < 16)) { s_a[tid] = a_sum = a_sum + s_a[tid + 16]; s_b[tid] = b_sum = b_sum + s_b[tid + 16]; } cg::sync(cta); if ((blockSize >= 16) && (tid < 8)) { s_a[tid] = a_sum = a_sum + s_a[tid + 8]; s_b[tid] = b_sum = b_sum + s_b[tid + 8]; } cg::sync(cta); if ((blockSize >= 8) && (tid < 4)) { s_a[tid] = a_sum = a_sum + s_a[tid + 4]; s_b[tid] = b_sum = b_sum + s_b[tid + 4]; } cg::sync(cta); if ((blockSize >= 4) && (tid < 2)) { s_a[tid] = a_sum = a_sum + s_a[tid + 2]; s_b[tid] = b_sum = b_sum + s_b[tid + 2]; } cg::sync(cta); if ((blockSize >= 2) && (tid < 1)) { s_a[tid] = a_sum = a_sum + s_a[tid + 1]; s_b[tid] = b_sum = b_sum + s_b[tid + 1]; } cg::sync(cta); #endif // write result for this block to global mem if (tid == 0) { g_a[blockIdx.x] = (T)a_sum; g_b[blockIdx.x] = (T)b_sum; } } template <typename T, int blockSize> __device__ void reduce_two_vectors_in_register(T a, T b, T* g_a, T* g_b) { const int threadIdInBlock = cg::this_thread_block().thread_rank(); T* s_a = SharedMemory<T>(); T* s_b = SharedMemory<T>() + cg::this_thread_block().size(); s_a[threadIdInBlock] = a; s_b[threadIdInBlock] = b; reduce_block_in_shared_memory<T, blockSize>(s_a, s_b, g_a, g_b); } template <typename T, typename GRAD_T, int blockSize> __global__ void lamb_cuda_kernel_part1( T* __restrict__ p, GRAD_T* __restrict__ p_copy, // For mixed precision training, pass NULL if not needed T* __restrict__ m, T* __restrict__ v, const GRAD_T* __restrict__ g, const float b1, const float b2, const float eps, const float grad_scale, const float step_size, const size_t tsize, adamMode_t mode, const float decay, T* __restrict__ w_l2_i, T* __restrict__ u_l2_i) { // Assuming 2D grids and 2D blocks const int blockId = gridDim.x * blockIdx.y + blockIdx.x; const int threadsPerBlock = blockDim.x * blockDim.y; const int threadIdInBlock = cg::this_thread_block().thread_rank(); const int i = (blockId * threadsPerBlock + threadIdInBlock); const int totThreads = gridDim.x * gridDim.y * threadsPerBlock; T reg_w = 0; T reg_u = 0; for (int j = i; j < tsize; j += totThreads) { T scaled_grad = g[j] / grad_scale; T pj = p[j]; m[j] = b1 * m[j] + (1 - b1) * scaled_grad; v[j] = b2 * v[j] + (1 - b2) * scaled_grad * scaled_grad; float denom; if (mode == ADAM_MODE_0) denom = sqrtf(v[j] + eps); else // Mode 1 denom = sqrtf(v[j]) + eps; T update = (m[j] / denom) + (decay * p[j]); reg_u += update * update; reg_w += pj * pj; } reduce_two_vectors_in_register<T, blockSize>(reg_w, reg_u, w_l2_i, u_l2_i); } template <typename T, typename GRAD_T, int blockSize> __global__ void lamb_cuda_kernel_part2(const size_t tsize, T* __restrict__ g_a, T* __restrict__ g_b) { T* s_a = SharedMemory<T>(); T* s_b = SharedMemory<T>() + cg::this_thread_block().size(); const int threadIdInBlock = cg::this_thread_block().thread_rank(); s_a[threadIdInBlock] = g_a[threadIdInBlock]; s_b[threadIdInBlock] = g_b[threadIdInBlock]; if (threadIdInBlock >= tsize) { s_a[threadIdInBlock] = 0.0; s_b[threadIdInBlock] = 0.0; } reduce_block_in_shared_memory<T, blockSize>(s_a, s_b, g_a, g_b); } template <typename T, typename GRAD_T> __global__ void lamb_cuda_kernel_part3( T* __restrict__ p, GRAD_T* __restrict__ p_copy, // For mixed precision training, pass NULL if not needed T* __restrict__ m, T* __restrict__ v, const GRAD_T* __restrict__ g, const float b1, const float b2, const float max_coeff, const float min_coeff, const float eps, const float grad_scale, const float step_size, const size_t tsize, adamMode_t mode, const float decay, T* __restrict__ w_l2_i, T* __restrict__ u_l2_i, T* __restrict__ lamb_coeff_val) { // Assuming 2D grids and 2D blocks const int blockId = gridDim.x * blockIdx.y + blockIdx.x; const int threadsPerBlock = blockDim.x * blockDim.y; const int threadIdInBlock = cg::this_thread_block().thread_rank(); const int i = (blockId * threadsPerBlock + threadIdInBlock); const int totThreads = gridDim.x * gridDim.y * threadsPerBlock; T reg_w = sqrtf(w_l2_i[0]); T reg_u = sqrtf(u_l2_i[0]); float lamb_coeff = 1.0; if (reg_w != 0 && reg_u != 0) { lamb_coeff = reg_w / reg_u; if (lamb_coeff > max_coeff) { lamb_coeff = max_coeff; } if (lamb_coeff < min_coeff) { lamb_coeff = min_coeff; } } if (blockId == 0 && threadIdInBlock == 0) { lamb_coeff_val[0] = lamb_coeff; // printf("Cuda Lamb Coeff is %.6f \n",lamb_coeff); } for (int j = i; j < tsize; j += totThreads) { T pj = (float)p[j]; T mj = m[j]; T vj = v[j]; float denom; if (mode == ADAM_MODE_0) denom = sqrtf(vj + eps); else // Mode 1 denom = sqrtf(vj) + eps; T update = (mj / denom) + (decay * pj); pj = pj - (step_size * lamb_coeff * update); p[j] = pj; if (p_copy != NULL) p_copy[j] = (GRAD_T)pj; } } void fused_lamb_cuda(at::Tensor& p, at::Tensor& p_copy, at::Tensor& m, at::Tensor& v, at::Tensor& g, float lr, float beta1, float beta2, float max_coeff, float min_coeff, float eps, float grad_scale, int step, int mode, int bias_correction, float decay, at::Tensor& w_l2_i, at::Tensor& u_l2_i, at::Tensor& lamb_coeff) { // using namespace at; // Get tensor size int tsize = p.numel(); // Determine #threads and #blocks const int threadsPerBlock = 512; int num_blocks = (tsize + threadsPerBlock - 1) / threadsPerBlock; if (num_blocks > 512) num_blocks = 512; int smemsize = 0; if (p.type().scalarType() == at::ScalarType::Double) smemsize = 2 * threadsPerBlock * sizeof(double); else smemsize = 2 * threadsPerBlock * sizeof(float); const dim3 blocks(num_blocks); const dim3 threads(threadsPerBlock); AT_ASSERTM(at::cuda::detail::canUse32BitIndexMath(p), "parameter tensor is too large to be indexed with int32"); // Constants float step_size = 0; if (bias_correction == 1) { const float bias_correction1 = 1 - std::pow(beta1, step); const float bias_correction2 = 1 - std::pow(beta2, step); step_size = lr * std::sqrt(bias_correction2) / bias_correction1; } else { step_size = lr; } cudaStream_t stream = at::cuda::getCurrentCUDAStream(); if (g.type().scalarType() == at::ScalarType::Half) { // all other values should be fp32 for half gradients AT_ASSERTM(p.type().scalarType() == at::ScalarType::Float, "expected parameter to be of float type"); // dispatch is done on the gradient type using namespace at; // prevents "toString is undefined" errors AT_DISPATCH_FLOATING_TYPES_AND_HALF( g.scalar_type(), "lamb_cuda_kernel", ([&] { using accscalar_t = at::acc_type<scalar_t, true>; lamb_cuda_kernel_part1<accscalar_t, scalar_t, threadsPerBlock> <<<blocks, threadsPerBlock, smemsize, stream>>>( p.data<accscalar_t>(), p_copy.numel() ? p_copy.data<scalar_t>() : NULL, m.data<accscalar_t>(), v.data<accscalar_t>(), g.data<scalar_t>(), beta1, beta2, eps, grad_scale, step_size, tsize, (adamMode_t)mode, decay, w_l2_i.data<accscalar_t>(), u_l2_i.data<accscalar_t>()); lamb_cuda_kernel_part2<accscalar_t, scalar_t, threadsPerBlock> <<<1, threadsPerBlock, smemsize, stream>>>( num_blocks, w_l2_i.data<accscalar_t>(), u_l2_i.data<accscalar_t>()); lamb_cuda_kernel_part3<accscalar_t, scalar_t> <<<blocks, threadsPerBlock, smemsize, stream>>>( p.data<accscalar_t>(), p_copy.numel() ? p_copy.data<scalar_t>() : NULL, m.data<accscalar_t>(), v.data<accscalar_t>(), g.data<scalar_t>(), beta1, beta2, max_coeff, min_coeff, eps, grad_scale, step_size, tsize, (adamMode_t)mode, decay, w_l2_i.data<accscalar_t>(), u_l2_i.data<accscalar_t>(), lamb_coeff.data<accscalar_t>()); })); } else { using namespace at; AT_DISPATCH_FLOATING_TYPES( g.scalar_type(), "lamb_cuda_kernel", ([&] { lamb_cuda_kernel_part1<scalar_t, scalar_t, threadsPerBlock> <<<blocks, threadsPerBlock, smemsize, stream>>>( p.data<scalar_t>(), NULL, // don't output p_copy for fp32, it's wasted write m.data<scalar_t>(), v.data<scalar_t>(), g.data<scalar_t>(), beta1, beta2, eps, grad_scale, step_size, tsize, (adamMode_t)mode, decay, w_l2_i.data<scalar_t>(), u_l2_i.data<scalar_t>()); lamb_cuda_kernel_part2<scalar_t, scalar_t, threadsPerBlock> <<<1, threadsPerBlock, smemsize, stream>>>( num_blocks, w_l2_i.data<scalar_t>(), u_l2_i.data<scalar_t>()); lamb_cuda_kernel_part3<scalar_t, scalar_t> <<<blocks, threadsPerBlock, smemsize, stream>>>( p.data<scalar_t>(), NULL, // don't output p_copy for fp32, it's wasted write m.data<scalar_t>(), v.data<scalar_t>(), g.data<scalar_t>(), beta1, beta2, max_coeff, min_coeff, eps, grad_scale, step_size, tsize, (adamMode_t)mode, decay, w_l2_i.data<scalar_t>(), u_l2_i.data<scalar_t>(), lamb_coeff.data<scalar_t>()); })); } C10_CUDA_CHECK(cudaGetLastError()); } // template __device__ void reduce_two_vectors_in_register<float,512>(float a, float b, float* g_a, // float* g_b, cg::grid_group &cgg);
833f21f007d2b0031833f2e95926c2b9802afaac.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <iostream> #include <cstdlib> #include <ctime> #include <math.h> #include "../headers/graph.h" #define THREADS_PER_BLOCK_X 32 #define THREADS_PER_BLOCK_Y 32 #define MAX_THREADS_PER_BLOCK 1024 #define MAX_SHARED_MEM_PER_BLOCK 1024 #define S_MATRIX_SIZE 32 #define AFW_CONST 1 #define BFW_CONST 2 #define CFW_CONST 3 #define DFW_CONST 4 using namespace std; int ** copy_matrix_to_host(int ** dev_matrix, int n); __global__ void AloopFW(int ** d_x, int x_row_st, int x_col_st, int u_row_st, int u_col_st, int v_row_st, int v_col_st, int m){ int tx = threadIdx.x, ty = threadIdx.y; int sum; int rowsPerThread = m / blockDim.x; int colsPerThread = m / blockDim.y; int r_offset_start = threadIdx.x * rowsPerThread; int r_offset_end = r_offset_start + rowsPerThread - 1; int c_offset_start = threadIdx.y * colsPerThread; int c_offset_end = c_offset_start + colsPerThread - 1; //Copy to shared memory __shared__ int s_x[S_MATRIX_SIZE][S_MATRIX_SIZE]; s_x[tx][ty] = d_x[x_row_st + tx][x_col_st + ty]; syncthreads(); for(int k = 0; k < m; k++){ for(int i = r_offset_start; i <= r_offset_end; i++){ if(i == k) continue; for(int j = c_offset_start; j <= c_offset_end; j++){ if(j == k) continue; sum = s_x[i][k] + s_x[k][j]; if(s_x[i][j] > sum) s_x[i][j] = sum; } } syncthreads(); }//outer k for loop syncthreads(); d_x[x_row_st + tx][x_col_st + ty] = s_x[tx][ty]; } __global__ void BloopFW(int ** d_x, int x_row_st, int x_col_st, int u_row_st, int u_col_st, int v_row_st, int v_col_st, int m, int submatrix_offset, int parent){ //Change the start cells, of x, u ,v x_row_st += submatrix_offset * m; x_col_st += blockIdx.y * m; u_row_st += submatrix_offset * m; u_col_st += submatrix_offset * m; v_row_st += submatrix_offset * m; v_col_st += blockIdx.y * m; if(!(blockIdx.y == submatrix_offset && parent == AFW_CONST)){ int tx = threadIdx.x, ty = threadIdx.y; int sum; int rowsPerThread = m / blockDim.x; int colsPerThread = m / blockDim.y; int r_offset_start = threadIdx.x * rowsPerThread; int r_offset_end = r_offset_start + rowsPerThread - 1; int c_offset_start = threadIdx.y * colsPerThread; int c_offset_end = c_offset_start + colsPerThread - 1; __shared__ int s_x[S_MATRIX_SIZE][S_MATRIX_SIZE]; __shared__ int s_u[S_MATRIX_SIZE][S_MATRIX_SIZE]; s_x[tx][ty] = d_x[x_row_st + tx][x_col_st + ty]; s_u[tx][ty] = d_x[u_row_st + tx][u_col_st + ty]; syncthreads(); for(int k=0; k < m; k++){ //Update the other cells. for(int i = r_offset_start; i <= r_offset_end; i++){ if(i == k) continue; for(int j = c_offset_start; j <= c_offset_end; j++){ sum = s_u[i][k] + s_x[k][j]; if(s_x[i][j] > sum) s_x[i][j] = sum; } } syncthreads(); }//outer k for loop syncthreads(); d_x[x_row_st + tx][x_col_st + ty] = s_x[tx][ty]; } } __global__ void CloopFW(int ** d_x, int x_row_st, int x_col_st, int u_row_st, int u_col_st, int v_row_st, int v_col_st, int m, int submatrix_offset, int parent){ x_row_st += blockIdx.x * m; x_col_st += submatrix_offset * m; u_row_st += blockIdx.x * m; u_col_st += submatrix_offset * m; v_row_st += submatrix_offset * m; v_col_st += submatrix_offset * m; if(!(blockIdx.x == submatrix_offset && parent == AFW_CONST)){ int tx = threadIdx.x, ty = threadIdx.y; int sum; int rowsPerThread = m / blockDim.x; int colsPerThread = m / blockDim.y; int r_offset_start = threadIdx.x * rowsPerThread; int r_offset_end = r_offset_start + rowsPerThread - 1; int c_offset_start = threadIdx.y * colsPerThread; int c_offset_end = c_offset_start + colsPerThread - 1; __shared__ int s_x[S_MATRIX_SIZE][S_MATRIX_SIZE]; __shared__ int s_v[S_MATRIX_SIZE][S_MATRIX_SIZE]; s_x[tx][ty] = d_x[x_row_st + tx][x_col_st + ty]; s_v[tx][ty] = d_x[v_row_st + tx][v_col_st + ty]; syncthreads(); for(int k=0; k < m; k++){ for(int i = r_offset_start; i <= r_offset_end; i++){ for(int j = c_offset_start; j <= c_offset_end; j++){ if(j == k) continue; sum = s_x[i][k] + s_v[k][j]; if(s_x[i][j] > sum) s_x[i][j] = sum; } } syncthreads(); }//outer k loop syncthreads(); d_x[x_row_st + tx][x_col_st + ty] = s_x[tx][ty]; } } __global__ void DloopFW(int ** d_x, int x_row_st, int x_col_st, int u_row_st, int u_col_st, int v_row_st, int v_col_st, int m, int submatrix_offset, int parent){ x_row_st += blockIdx.x * m; x_col_st += blockIdx.y * m; u_row_st += blockIdx.x * m; u_col_st += submatrix_offset * m; v_row_st += submatrix_offset * m; v_col_st += blockIdx.y * m; //AFW_PARENT -> blockIdx.x != submatrix_offset && blockIdx.y != submatrix_offset //BFW_PARENT -> blockIdx.x != submatrix_offset //CFW_PARENT -> blockIdx.y != submatrix_offset int flag1 = parent == AFW_CONST && blockIdx.x == submatrix_offset && blockIdx.y == submatrix_offset; int flag2 = parent == BFW_CONST && blockIdx.x == submatrix_offset; int flag3 = parent == CFW_CONST && blockIdx.y == submatrix_offset; if(!(flag1 || flag2 || flag3)){ int tx = threadIdx.x, ty = threadIdx.y; int sum; int rowsPerThread = m / blockDim.x; int colsPerThread = m / blockDim.y; int r_offset_start = threadIdx.x * rowsPerThread; int r_offset_end = r_offset_start + rowsPerThread - 1; int c_offset_start = threadIdx.y * colsPerThread; int c_offset_end = c_offset_start + colsPerThread - 1; __shared__ int s_x[S_MATRIX_SIZE][S_MATRIX_SIZE]; __shared__ int s_u[S_MATRIX_SIZE][S_MATRIX_SIZE]; __shared__ int s_v[S_MATRIX_SIZE][S_MATRIX_SIZE]; s_x[tx][ty] = d_x[x_row_st + tx][x_col_st + ty]; s_u[tx][ty] = d_x[u_row_st + tx][u_col_st + ty]; s_v[tx][ty] = d_x[v_row_st + tx][v_col_st + ty]; syncthreads(); for(int k = 0; k < m; k++){ for(int i = r_offset_start; i <= r_offset_end; i++){ for(int j = c_offset_start; j <= c_offset_end; j++){ sum = s_u[i][k] + s_v[k][j]; if(s_x[i][j] > sum) s_x[i][j] = sum; } } syncthreads(); }//outer k for loop syncthreads(); d_x[x_row_st + tx][x_col_st + ty] = s_x[tx][ty]; } } void DFW(int ** d_x, int x_row_st, int x_col_st, int u_row_st, int u_col_st, int v_row_st, int v_col_st, int n, int depth, int * tilesize){ int r = tilesize[depth]; if(r > n){ printf("ERR DFW: Shouldn't reach here.\n"); /* int threadX = min(n, THREADS_PER_BLOCK_X); int threadY = min(n, THREADS_PER_BLOCK_Y); dim3 threadsPerBlock(threadX, threadY); //Execute base case DloopFW<<<1, threadsPerBlock>>>(d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, n); */ } else{ int sub_size = n / r; if(sub_size < tilesize[depth + 1]){ int threadX = min(sub_size, THREADS_PER_BLOCK_X); int threadY = min(sub_size, THREADS_PER_BLOCK_Y); dim3 threadsPerBlock(threadX, threadY); for(int k = 0; k < r; k++){ //Update all submatrices with Dloop dim3 blocksPerGrid_D(r, r); hipLaunchKernelGGL(( DloopFW), dim3(blocksPerGrid_D), dim3(threadsPerBlock), 0, 0, d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, sub_size, k, DFW_CONST); hipDeviceSynchronize(); } } else{ for(int k = 0; k < r; k++){ int offset = k*sub_size; for(int i = 0; i < r; i++){ for(int j = 0; j < r; j++){ DFW(d_x, x_row_st + i*sub_size, x_col_st + j*sub_size, u_row_st + i*sub_size, u_col_st + offset, v_row_st + offset, v_col_st + j*sub_size, sub_size, depth+1, tilesize); } } hipDeviceSynchronize(); }//outer k loop } } } void CFW(int ** d_x, int x_row_st, int x_col_st, int u_row_st, int u_col_st, int v_row_st, int v_col_st, int n, int depth, int * tilesize){ int r = tilesize[depth]; if(r > n){ printf("ERR CFW: Shouldn't reach here.\n"); /* int threadX = min(n, THREADS_PER_BLOCK_X); int threadY = min(n, THREADS_PER_BLOCK_Y); dim3 threadsPerBlock(threadX, threadY); //Execute base case CloopFW<<<1, threadsPerBlock>>>(d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, n); */ } else{ int sub_size = n / r; if(sub_size < tilesize[depth + 1]){ int threadX = min(sub_size, THREADS_PER_BLOCK_X); int threadY = min(sub_size, THREADS_PER_BLOCK_Y); dim3 threadsPerBlock(threadX, threadY); for(int k = 0; k < r; k++){ //Update kth col with Cloop dim3 blocksPerGrid_C(r, 1); hipLaunchKernelGGL(( CloopFW), dim3(blocksPerGrid_C), dim3(threadsPerBlock), 0, 0, d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, sub_size, k, CFW_CONST); hipDeviceSynchronize(); //Update remaining cells with Dloop dim3 blocksPerGrid_D(r, r); hipLaunchKernelGGL(( DloopFW), dim3(blocksPerGrid_D), dim3(threadsPerBlock), 0, 0, d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, sub_size, k, CFW_CONST); hipDeviceSynchronize(); } } else{ for(int k = 0; k < r; k++){ int offset = k*sub_size; for(int i = 0; i < r; i++){ CFW(d_x, x_row_st + i*sub_size, x_col_st + offset, u_row_st + i*sub_size, u_col_st + offset, v_row_st + offset, v_col_st + offset, sub_size, depth+1, tilesize); } for(int i = 0; i < r; i++){ for(int j = 0; j < r; j++){ if(j == k) continue; DFW(d_x, x_row_st + i*sub_size, x_col_st + j*sub_size, u_row_st + i*sub_size, u_col_st + offset, v_row_st + offset, v_col_st + j*sub_size, sub_size, depth+1, tilesize); } } }//outer k loop } } } void BFW(int ** d_x, int x_row_st, int x_col_st, int u_row_st, int u_col_st, int v_row_st, int v_col_st, int n, int depth, int * tilesize){ int r = tilesize[depth]; if(r > n){ printf("ERR BFW: Shouldn't reach here.\n"); /* int threadX = min(n, THREADS_PER_BLOCK_X); int threadY = min(n, THREADS_PER_BLOCK_Y); dim3 threadsPerBlock(threadX, threadY); //Execute base case BloopFW<<<1, threadsPerBlock>>>(d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, n); */ } else{ int sub_size = n / r; if(sub_size < tilesize[depth + 1]){ int threadX = min(sub_size, THREADS_PER_BLOCK_X); int threadY = min(sub_size, THREADS_PER_BLOCK_Y); dim3 threadsPerBlock(threadX, threadY); for(int k = 0; k < r; k++){ //Update kth row with Bloop dim3 blocksPerGrid_B(1, r); hipLaunchKernelGGL(( BloopFW), dim3(blocksPerGrid_B), dim3(threadsPerBlock), 0, 0, d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, sub_size, k, BFW_CONST); hipDeviceSynchronize(); //Update remaining cells with Dloop dim3 blocksPerGrid_D(r, r); hipLaunchKernelGGL(( DloopFW), dim3(blocksPerGrid_D), dim3(threadsPerBlock), 0, 0, d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, sub_size, k, BFW_CONST); hipDeviceSynchronize(); } } else{ for(int k = 0; k < r; k++){ int offset = k*sub_size; for(int j = 0; j < r; j++){ BFW(d_x, x_row_st + offset, x_col_st + j*sub_size, u_row_st + offset, u_col_st + offset, v_row_st + offset, v_col_st + j*sub_size, sub_size, depth+1, tilesize); } for(int i = 0; i < r; i++){ if(i == k) continue; for(int j = 0; j < r; j++){ DFW(d_x, x_row_st + i*sub_size, x_col_st + j*sub_size, u_row_st + i*sub_size, u_col_st + offset, v_row_st + offset, v_col_st + j*sub_size, sub_size, depth+1, tilesize); } } }//outer k loop } } } //Figure 4 implementation : HW 5 void AFW(int ** d_x, int x_row_st, int x_col_st, int u_row_st, int u_col_st, int v_row_st, int v_col_st, int n, int depth, int * tilesize){ int r = tilesize[depth]; if(r > n){ printf("ERR AFW: Shouldn't reach here.\n"); /* int threadX = min(n, THREADS_PER_BLOCK_X); int threadY = min(n, THREADS_PER_BLOCK_Y); dim3 threadsPerBlock(threadX, threadY); //Execute base case hipLaunchKernelGGL(( AloopFW), dim3(1), dim3(threadsPerBlock), 0, 0, d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, n); */ } else{ int sub_size = n / r; if(sub_size < tilesize[depth+1]){ int threadX = min(sub_size, THREADS_PER_BLOCK_X); int threadY = min(sub_size, THREADS_PER_BLOCK_Y); dim3 threadsPerBlock(threadX, threadY); for(int k = 0; k < r; k++){ int offset = k * sub_size; hipLaunchKernelGGL(( AloopFW), dim3(1), dim3(threadsPerBlock), 0, 0, d_x, x_row_st + offset, x_col_st + offset, u_row_st + offset, u_col_st + offset, v_row_st + offset, v_col_st + offset, sub_size); hipDeviceSynchronize(); //Update kth row submatrices and kth col submatrices in parallel dim3 blocksPerGrid_B(1, r); hipLaunchKernelGGL(( BloopFW), dim3(blocksPerGrid_B), dim3(threadsPerBlock), 0, 0, d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, sub_size, k, AFW_CONST); hipDeviceSynchronize(); dim3 blocksPerGrid_C(r, 1); hipLaunchKernelGGL(( CloopFW), dim3(blocksPerGrid_C), dim3(threadsPerBlock), 0, 0, d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, sub_size, k, AFW_CONST); hipDeviceSynchronize(); //update remaining submatrices dim3 blocksPerGrid_D(r, r); hipLaunchKernelGGL(( DloopFW), dim3(blocksPerGrid_D), dim3(threadsPerBlock), 0, 0, d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, sub_size, k, AFW_CONST); hipDeviceSynchronize(); } } else{ for(int k = 0; k < r; k++){ int offset = k*sub_size; AFW(d_x, x_row_st + offset, x_col_st + offset, u_row_st + offset, u_col_st + offset, v_row_st + offset, v_col_st + offset, sub_size, depth+1, tilesize); for(int j = 0; j < r; j++){ if(j == k) continue; BFW(d_x, x_row_st + offset, x_col_st + j*sub_size, u_row_st + offset, u_col_st + offset, v_row_st + offset, v_col_st + j*sub_size, sub_size, depth+1, tilesize); CFW(d_x, x_row_st + j*sub_size, x_col_st + offset, u_row_st + j*sub_size, u_col_st + offset, v_row_st + offset, v_col_st + offset, sub_size, depth+1, tilesize); } for(int i = 0; i < r; i++){ if(i == k) continue; for(int j = 0; j < r; j++){ if(j == k) continue; DFW(d_x, x_row_st + i*sub_size, x_col_st + j*sub_size, u_row_st + i*sub_size, u_col_st + offset, v_row_st + offset, v_col_st + j*sub_size, sub_size, depth+1, tilesize); } } }//outer for }//else } }//AFW int ** copy_matrix_to_host(int ** dev_matrix, int n){ int ** new_matrix = new int*[n+1]; for(int i=1;i <= n; i++){ new_matrix[i] = new int[n+1]; int * begin; hipMemcpy(&begin, &dev_matrix[i], sizeof (int *), hipMemcpyDeviceToHost); hipMemcpy(new_matrix[i], begin, (n+1) * sizeof(int), hipMemcpyDeviceToHost); } return new_matrix; } int ** copy_matrix_to_device(int ** host_matrix, int n){ //int ** dev_matrix = new int*[n+1]; int ** dev_matrix; hipError_t err = hipMalloc(&dev_matrix, (n+1) * sizeof(int *)); if(err != hipSuccess){ printf("Error allocating memory on device."); return NULL; } for(int i = 1; i <= n; i++){ //printf("%x\n", &addr[i]); int * start; err = hipMalloc(&start, (n+1)*sizeof(int)); if(err != hipSuccess){ printf("Error allocating memory on device."); return NULL; } hipMemcpy(dev_matrix+i, &start, sizeof(int *), hipMemcpyHostToDevice); hipMemcpy(start, host_matrix[i], (n+1) * sizeof(int), hipMemcpyHostToDevice); } return dev_matrix; } void fw_iterative_serial(int ** matrix, int n){ int i,j,k = 0; for(k = 1; k <= n; k++){ for(i = 1; i <= n; i++){ for(j = 1; j <= n; j++){ if(matrix[i][j] > matrix[i][k] + matrix[k][j]) matrix[i][j] = matrix[i][k] + matrix[k][j]; } } } }//end of iterative int compare(int ** orig, int ** new_matrix, int n){ fw_iterative_serial(orig, n); for(int i=1; i <= n; i++){ for(int j=1; j <= n; j++){ if(orig[i][j] != new_matrix[i][j]){ return 0; } } } return 1; } int main(int argc, char * argv[]) { //Matrix int n = atoi(argv[1]); int ** matrix = generate_matrix(n); int ** dev_matrix = copy_matrix_to_device(matrix, n); if(dev_matrix == NULL) return 0; // fw_iterative_outer(dev_matrix, n); /* if(n <= 32){ printf("Original matrix: \n"); print_matrix(matrix, n); } */ long long start, end; //int tilesize[2] = {2, INT_MAX}; int tilesize[3] = {2, n/S_MATRIX_SIZE, INT_MAX}; start = clock(); AFW(dev_matrix, 1, 1, 1, 1, 1, 1, n, 0, tilesize); end = clock(); /* if(n <= 32){ int ** new_matrix = copy_matrix_to_host(dev_matrix, n); printf("\nWith updated distances: \n"); print_matrix(new_matrix, n); delete[] new_matrix; } */ /* if(n <= 1024){ int ** new_matrix = copy_matrix_to_host(dev_matrix, n); int ans = compare(matrix, new_matrix, n); if(ans) printf("ANSWER: CORRECT\n"); else printf("ANSWER: WRONG\n"); delete[] new_matrix; }*/ cout << "Runtime: " << double(end-start)/double(CLOCKS_PER_SEC) << endl; hipFree(dev_matrix); delete[] matrix; return 0; }
833f21f007d2b0031833f2e95926c2b9802afaac.cu
#include <stdio.h> #include <iostream> #include <cstdlib> #include <ctime> #include <math.h> #include "../headers/graph.h" #define THREADS_PER_BLOCK_X 32 #define THREADS_PER_BLOCK_Y 32 #define MAX_THREADS_PER_BLOCK 1024 #define MAX_SHARED_MEM_PER_BLOCK 1024 #define S_MATRIX_SIZE 32 #define AFW_CONST 1 #define BFW_CONST 2 #define CFW_CONST 3 #define DFW_CONST 4 using namespace std; int ** copy_matrix_to_host(int ** dev_matrix, int n); __global__ void AloopFW(int ** d_x, int x_row_st, int x_col_st, int u_row_st, int u_col_st, int v_row_st, int v_col_st, int m){ int tx = threadIdx.x, ty = threadIdx.y; int sum; int rowsPerThread = m / blockDim.x; int colsPerThread = m / blockDim.y; int r_offset_start = threadIdx.x * rowsPerThread; int r_offset_end = r_offset_start + rowsPerThread - 1; int c_offset_start = threadIdx.y * colsPerThread; int c_offset_end = c_offset_start + colsPerThread - 1; //Copy to shared memory __shared__ int s_x[S_MATRIX_SIZE][S_MATRIX_SIZE]; s_x[tx][ty] = d_x[x_row_st + tx][x_col_st + ty]; syncthreads(); for(int k = 0; k < m; k++){ for(int i = r_offset_start; i <= r_offset_end; i++){ if(i == k) continue; for(int j = c_offset_start; j <= c_offset_end; j++){ if(j == k) continue; sum = s_x[i][k] + s_x[k][j]; if(s_x[i][j] > sum) s_x[i][j] = sum; } } syncthreads(); }//outer k for loop syncthreads(); d_x[x_row_st + tx][x_col_st + ty] = s_x[tx][ty]; } __global__ void BloopFW(int ** d_x, int x_row_st, int x_col_st, int u_row_st, int u_col_st, int v_row_st, int v_col_st, int m, int submatrix_offset, int parent){ //Change the start cells, of x, u ,v x_row_st += submatrix_offset * m; x_col_st += blockIdx.y * m; u_row_st += submatrix_offset * m; u_col_st += submatrix_offset * m; v_row_st += submatrix_offset * m; v_col_st += blockIdx.y * m; if(!(blockIdx.y == submatrix_offset && parent == AFW_CONST)){ int tx = threadIdx.x, ty = threadIdx.y; int sum; int rowsPerThread = m / blockDim.x; int colsPerThread = m / blockDim.y; int r_offset_start = threadIdx.x * rowsPerThread; int r_offset_end = r_offset_start + rowsPerThread - 1; int c_offset_start = threadIdx.y * colsPerThread; int c_offset_end = c_offset_start + colsPerThread - 1; __shared__ int s_x[S_MATRIX_SIZE][S_MATRIX_SIZE]; __shared__ int s_u[S_MATRIX_SIZE][S_MATRIX_SIZE]; s_x[tx][ty] = d_x[x_row_st + tx][x_col_st + ty]; s_u[tx][ty] = d_x[u_row_st + tx][u_col_st + ty]; syncthreads(); for(int k=0; k < m; k++){ //Update the other cells. for(int i = r_offset_start; i <= r_offset_end; i++){ if(i == k) continue; for(int j = c_offset_start; j <= c_offset_end; j++){ sum = s_u[i][k] + s_x[k][j]; if(s_x[i][j] > sum) s_x[i][j] = sum; } } syncthreads(); }//outer k for loop syncthreads(); d_x[x_row_st + tx][x_col_st + ty] = s_x[tx][ty]; } } __global__ void CloopFW(int ** d_x, int x_row_st, int x_col_st, int u_row_st, int u_col_st, int v_row_st, int v_col_st, int m, int submatrix_offset, int parent){ x_row_st += blockIdx.x * m; x_col_st += submatrix_offset * m; u_row_st += blockIdx.x * m; u_col_st += submatrix_offset * m; v_row_st += submatrix_offset * m; v_col_st += submatrix_offset * m; if(!(blockIdx.x == submatrix_offset && parent == AFW_CONST)){ int tx = threadIdx.x, ty = threadIdx.y; int sum; int rowsPerThread = m / blockDim.x; int colsPerThread = m / blockDim.y; int r_offset_start = threadIdx.x * rowsPerThread; int r_offset_end = r_offset_start + rowsPerThread - 1; int c_offset_start = threadIdx.y * colsPerThread; int c_offset_end = c_offset_start + colsPerThread - 1; __shared__ int s_x[S_MATRIX_SIZE][S_MATRIX_SIZE]; __shared__ int s_v[S_MATRIX_SIZE][S_MATRIX_SIZE]; s_x[tx][ty] = d_x[x_row_st + tx][x_col_st + ty]; s_v[tx][ty] = d_x[v_row_st + tx][v_col_st + ty]; syncthreads(); for(int k=0; k < m; k++){ for(int i = r_offset_start; i <= r_offset_end; i++){ for(int j = c_offset_start; j <= c_offset_end; j++){ if(j == k) continue; sum = s_x[i][k] + s_v[k][j]; if(s_x[i][j] > sum) s_x[i][j] = sum; } } syncthreads(); }//outer k loop syncthreads(); d_x[x_row_st + tx][x_col_st + ty] = s_x[tx][ty]; } } __global__ void DloopFW(int ** d_x, int x_row_st, int x_col_st, int u_row_st, int u_col_st, int v_row_st, int v_col_st, int m, int submatrix_offset, int parent){ x_row_st += blockIdx.x * m; x_col_st += blockIdx.y * m; u_row_st += blockIdx.x * m; u_col_st += submatrix_offset * m; v_row_st += submatrix_offset * m; v_col_st += blockIdx.y * m; //AFW_PARENT -> blockIdx.x != submatrix_offset && blockIdx.y != submatrix_offset //BFW_PARENT -> blockIdx.x != submatrix_offset //CFW_PARENT -> blockIdx.y != submatrix_offset int flag1 = parent == AFW_CONST && blockIdx.x == submatrix_offset && blockIdx.y == submatrix_offset; int flag2 = parent == BFW_CONST && blockIdx.x == submatrix_offset; int flag3 = parent == CFW_CONST && blockIdx.y == submatrix_offset; if(!(flag1 || flag2 || flag3)){ int tx = threadIdx.x, ty = threadIdx.y; int sum; int rowsPerThread = m / blockDim.x; int colsPerThread = m / blockDim.y; int r_offset_start = threadIdx.x * rowsPerThread; int r_offset_end = r_offset_start + rowsPerThread - 1; int c_offset_start = threadIdx.y * colsPerThread; int c_offset_end = c_offset_start + colsPerThread - 1; __shared__ int s_x[S_MATRIX_SIZE][S_MATRIX_SIZE]; __shared__ int s_u[S_MATRIX_SIZE][S_MATRIX_SIZE]; __shared__ int s_v[S_MATRIX_SIZE][S_MATRIX_SIZE]; s_x[tx][ty] = d_x[x_row_st + tx][x_col_st + ty]; s_u[tx][ty] = d_x[u_row_st + tx][u_col_st + ty]; s_v[tx][ty] = d_x[v_row_st + tx][v_col_st + ty]; syncthreads(); for(int k = 0; k < m; k++){ for(int i = r_offset_start; i <= r_offset_end; i++){ for(int j = c_offset_start; j <= c_offset_end; j++){ sum = s_u[i][k] + s_v[k][j]; if(s_x[i][j] > sum) s_x[i][j] = sum; } } syncthreads(); }//outer k for loop syncthreads(); d_x[x_row_st + tx][x_col_st + ty] = s_x[tx][ty]; } } void DFW(int ** d_x, int x_row_st, int x_col_st, int u_row_st, int u_col_st, int v_row_st, int v_col_st, int n, int depth, int * tilesize){ int r = tilesize[depth]; if(r > n){ printf("ERR DFW: Shouldn't reach here.\n"); /* int threadX = min(n, THREADS_PER_BLOCK_X); int threadY = min(n, THREADS_PER_BLOCK_Y); dim3 threadsPerBlock(threadX, threadY); //Execute base case DloopFW<<<1, threadsPerBlock>>>(d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, n); */ } else{ int sub_size = n / r; if(sub_size < tilesize[depth + 1]){ int threadX = min(sub_size, THREADS_PER_BLOCK_X); int threadY = min(sub_size, THREADS_PER_BLOCK_Y); dim3 threadsPerBlock(threadX, threadY); for(int k = 0; k < r; k++){ //Update all submatrices with Dloop dim3 blocksPerGrid_D(r, r); DloopFW<<<blocksPerGrid_D, threadsPerBlock>>>(d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, sub_size, k, DFW_CONST); cudaDeviceSynchronize(); } } else{ for(int k = 0; k < r; k++){ int offset = k*sub_size; for(int i = 0; i < r; i++){ for(int j = 0; j < r; j++){ DFW(d_x, x_row_st + i*sub_size, x_col_st + j*sub_size, u_row_st + i*sub_size, u_col_st + offset, v_row_st + offset, v_col_st + j*sub_size, sub_size, depth+1, tilesize); } } cudaDeviceSynchronize(); }//outer k loop } } } void CFW(int ** d_x, int x_row_st, int x_col_st, int u_row_st, int u_col_st, int v_row_st, int v_col_st, int n, int depth, int * tilesize){ int r = tilesize[depth]; if(r > n){ printf("ERR CFW: Shouldn't reach here.\n"); /* int threadX = min(n, THREADS_PER_BLOCK_X); int threadY = min(n, THREADS_PER_BLOCK_Y); dim3 threadsPerBlock(threadX, threadY); //Execute base case CloopFW<<<1, threadsPerBlock>>>(d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, n); */ } else{ int sub_size = n / r; if(sub_size < tilesize[depth + 1]){ int threadX = min(sub_size, THREADS_PER_BLOCK_X); int threadY = min(sub_size, THREADS_PER_BLOCK_Y); dim3 threadsPerBlock(threadX, threadY); for(int k = 0; k < r; k++){ //Update kth col with Cloop dim3 blocksPerGrid_C(r, 1); CloopFW<<<blocksPerGrid_C, threadsPerBlock>>>(d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, sub_size, k, CFW_CONST); cudaDeviceSynchronize(); //Update remaining cells with Dloop dim3 blocksPerGrid_D(r, r); DloopFW<<<blocksPerGrid_D, threadsPerBlock>>>(d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, sub_size, k, CFW_CONST); cudaDeviceSynchronize(); } } else{ for(int k = 0; k < r; k++){ int offset = k*sub_size; for(int i = 0; i < r; i++){ CFW(d_x, x_row_st + i*sub_size, x_col_st + offset, u_row_st + i*sub_size, u_col_st + offset, v_row_st + offset, v_col_st + offset, sub_size, depth+1, tilesize); } for(int i = 0; i < r; i++){ for(int j = 0; j < r; j++){ if(j == k) continue; DFW(d_x, x_row_st + i*sub_size, x_col_st + j*sub_size, u_row_st + i*sub_size, u_col_st + offset, v_row_st + offset, v_col_st + j*sub_size, sub_size, depth+1, tilesize); } } }//outer k loop } } } void BFW(int ** d_x, int x_row_st, int x_col_st, int u_row_st, int u_col_st, int v_row_st, int v_col_st, int n, int depth, int * tilesize){ int r = tilesize[depth]; if(r > n){ printf("ERR BFW: Shouldn't reach here.\n"); /* int threadX = min(n, THREADS_PER_BLOCK_X); int threadY = min(n, THREADS_PER_BLOCK_Y); dim3 threadsPerBlock(threadX, threadY); //Execute base case BloopFW<<<1, threadsPerBlock>>>(d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, n); */ } else{ int sub_size = n / r; if(sub_size < tilesize[depth + 1]){ int threadX = min(sub_size, THREADS_PER_BLOCK_X); int threadY = min(sub_size, THREADS_PER_BLOCK_Y); dim3 threadsPerBlock(threadX, threadY); for(int k = 0; k < r; k++){ //Update kth row with Bloop dim3 blocksPerGrid_B(1, r); BloopFW<<<blocksPerGrid_B, threadsPerBlock>>>(d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, sub_size, k, BFW_CONST); cudaDeviceSynchronize(); //Update remaining cells with Dloop dim3 blocksPerGrid_D(r, r); DloopFW<<<blocksPerGrid_D, threadsPerBlock>>>(d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, sub_size, k, BFW_CONST); cudaDeviceSynchronize(); } } else{ for(int k = 0; k < r; k++){ int offset = k*sub_size; for(int j = 0; j < r; j++){ BFW(d_x, x_row_st + offset, x_col_st + j*sub_size, u_row_st + offset, u_col_st + offset, v_row_st + offset, v_col_st + j*sub_size, sub_size, depth+1, tilesize); } for(int i = 0; i < r; i++){ if(i == k) continue; for(int j = 0; j < r; j++){ DFW(d_x, x_row_st + i*sub_size, x_col_st + j*sub_size, u_row_st + i*sub_size, u_col_st + offset, v_row_st + offset, v_col_st + j*sub_size, sub_size, depth+1, tilesize); } } }//outer k loop } } } //Figure 4 implementation : HW 5 void AFW(int ** d_x, int x_row_st, int x_col_st, int u_row_st, int u_col_st, int v_row_st, int v_col_st, int n, int depth, int * tilesize){ int r = tilesize[depth]; if(r > n){ printf("ERR AFW: Shouldn't reach here.\n"); /* int threadX = min(n, THREADS_PER_BLOCK_X); int threadY = min(n, THREADS_PER_BLOCK_Y); dim3 threadsPerBlock(threadX, threadY); //Execute base case AloopFW<<<1, threadsPerBlock>>>(d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, n); */ } else{ int sub_size = n / r; if(sub_size < tilesize[depth+1]){ int threadX = min(sub_size, THREADS_PER_BLOCK_X); int threadY = min(sub_size, THREADS_PER_BLOCK_Y); dim3 threadsPerBlock(threadX, threadY); for(int k = 0; k < r; k++){ int offset = k * sub_size; AloopFW<<<1, threadsPerBlock>>>(d_x, x_row_st + offset, x_col_st + offset, u_row_st + offset, u_col_st + offset, v_row_st + offset, v_col_st + offset, sub_size); cudaDeviceSynchronize(); //Update kth row submatrices and kth col submatrices in parallel dim3 blocksPerGrid_B(1, r); BloopFW<<<blocksPerGrid_B, threadsPerBlock>>>(d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, sub_size, k, AFW_CONST); cudaDeviceSynchronize(); dim3 blocksPerGrid_C(r, 1); CloopFW<<<blocksPerGrid_C, threadsPerBlock>>>(d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, sub_size, k, AFW_CONST); cudaDeviceSynchronize(); //update remaining submatrices dim3 blocksPerGrid_D(r, r); DloopFW<<<blocksPerGrid_D, threadsPerBlock>>>(d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, sub_size, k, AFW_CONST); cudaDeviceSynchronize(); } } else{ for(int k = 0; k < r; k++){ int offset = k*sub_size; AFW(d_x, x_row_st + offset, x_col_st + offset, u_row_st + offset, u_col_st + offset, v_row_st + offset, v_col_st + offset, sub_size, depth+1, tilesize); for(int j = 0; j < r; j++){ if(j == k) continue; BFW(d_x, x_row_st + offset, x_col_st + j*sub_size, u_row_st + offset, u_col_st + offset, v_row_st + offset, v_col_st + j*sub_size, sub_size, depth+1, tilesize); CFW(d_x, x_row_st + j*sub_size, x_col_st + offset, u_row_st + j*sub_size, u_col_st + offset, v_row_st + offset, v_col_st + offset, sub_size, depth+1, tilesize); } for(int i = 0; i < r; i++){ if(i == k) continue; for(int j = 0; j < r; j++){ if(j == k) continue; DFW(d_x, x_row_st + i*sub_size, x_col_st + j*sub_size, u_row_st + i*sub_size, u_col_st + offset, v_row_st + offset, v_col_st + j*sub_size, sub_size, depth+1, tilesize); } } }//outer for }//else } }//AFW int ** copy_matrix_to_host(int ** dev_matrix, int n){ int ** new_matrix = new int*[n+1]; for(int i=1;i <= n; i++){ new_matrix[i] = new int[n+1]; int * begin; cudaMemcpy(&begin, &dev_matrix[i], sizeof (int *), cudaMemcpyDeviceToHost); cudaMemcpy(new_matrix[i], begin, (n+1) * sizeof(int), cudaMemcpyDeviceToHost); } return new_matrix; } int ** copy_matrix_to_device(int ** host_matrix, int n){ //int ** dev_matrix = new int*[n+1]; int ** dev_matrix; cudaError_t err = cudaMalloc(&dev_matrix, (n+1) * sizeof(int *)); if(err != cudaSuccess){ printf("Error allocating memory on device."); return NULL; } for(int i = 1; i <= n; i++){ //printf("%x\n", &addr[i]); int * start; err = cudaMalloc(&start, (n+1)*sizeof(int)); if(err != cudaSuccess){ printf("Error allocating memory on device."); return NULL; } cudaMemcpy(dev_matrix+i, &start, sizeof(int *), cudaMemcpyHostToDevice); cudaMemcpy(start, host_matrix[i], (n+1) * sizeof(int), cudaMemcpyHostToDevice); } return dev_matrix; } void fw_iterative_serial(int ** matrix, int n){ int i,j,k = 0; for(k = 1; k <= n; k++){ for(i = 1; i <= n; i++){ for(j = 1; j <= n; j++){ if(matrix[i][j] > matrix[i][k] + matrix[k][j]) matrix[i][j] = matrix[i][k] + matrix[k][j]; } } } }//end of iterative int compare(int ** orig, int ** new_matrix, int n){ fw_iterative_serial(orig, n); for(int i=1; i <= n; i++){ for(int j=1; j <= n; j++){ if(orig[i][j] != new_matrix[i][j]){ return 0; } } } return 1; } int main(int argc, char * argv[]) { //Matrix int n = atoi(argv[1]); int ** matrix = generate_matrix(n); int ** dev_matrix = copy_matrix_to_device(matrix, n); if(dev_matrix == NULL) return 0; // fw_iterative_outer(dev_matrix, n); /* if(n <= 32){ printf("Original matrix: \n"); print_matrix(matrix, n); } */ long long start, end; //int tilesize[2] = {2, INT_MAX}; int tilesize[3] = {2, n/S_MATRIX_SIZE, INT_MAX}; start = clock(); AFW(dev_matrix, 1, 1, 1, 1, 1, 1, n, 0, tilesize); end = clock(); /* if(n <= 32){ int ** new_matrix = copy_matrix_to_host(dev_matrix, n); printf("\nWith updated distances: \n"); print_matrix(new_matrix, n); delete[] new_matrix; } */ /* if(n <= 1024){ int ** new_matrix = copy_matrix_to_host(dev_matrix, n); int ans = compare(matrix, new_matrix, n); if(ans) printf("ANSWER: CORRECT\n"); else printf("ANSWER: WRONG\n"); delete[] new_matrix; }*/ cout << "Runtime: " << double(end-start)/double(CLOCKS_PER_SEC) << endl; cudaFree(dev_matrix); delete[] matrix; return 0; }
baf2e10eadd55a70d2d3fd671b3ab18c918cab2b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // // Copyright (c) 2010, Paul Furgale, Chi Hay Tong // // The original code was written by Paul Furgale and Chi Hay Tong // and later optimized and prepared for integration into OpenCV by Itseez. // //M*/ #include "internal_shared.hpp" #include "opencv2/gpu/device/limits.hpp" #include "opencv2/gpu/device/saturate_cast.hpp" #include "opencv2/gpu/device/utility.hpp" #include "opencv2/gpu/device/functional.hpp" #include "opencv2/gpu/device/filters.hpp" #include <float.h> namespace cv { namespace gpu { namespace device { namespace surf { //////////////////////////////////////////////////////////////////////// // Global parameters // The maximum number of features (before subpixel interpolation) that memory is reserved for. __constant__ int c_max_candidates; // The maximum number of features that memory is reserved for. __constant__ int c_max_features; // The image size. __constant__ int c_img_rows; __constant__ int c_img_cols; // The number of layers. __constant__ int c_nOctaveLayers; // The hessian threshold. __constant__ float c_hessianThreshold; // The current octave. __constant__ int c_octave; // The current layer size. __constant__ int c_layer_rows; __constant__ int c_layer_cols; void loadGlobalConstants(int maxCandidates, int maxFeatures, int img_rows, int img_cols, int nOctaveLayers, float hessianThreshold) { cudaSafeCall( hipMemcpyToSymbol(c_max_candidates, &maxCandidates, sizeof(maxCandidates)) ); cudaSafeCall( hipMemcpyToSymbol(c_max_features, &maxFeatures, sizeof(maxFeatures)) ); cudaSafeCall( hipMemcpyToSymbol(c_img_rows, &img_rows, sizeof(img_rows)) ); cudaSafeCall( hipMemcpyToSymbol(c_img_cols, &img_cols, sizeof(img_cols)) ); cudaSafeCall( hipMemcpyToSymbol(c_nOctaveLayers, &nOctaveLayers, sizeof(nOctaveLayers)) ); cudaSafeCall( hipMemcpyToSymbol(c_hessianThreshold, &hessianThreshold, sizeof(hessianThreshold)) ); } void loadOctaveConstants(int octave, int layer_rows, int layer_cols) { cudaSafeCall( hipMemcpyToSymbol(c_octave, &octave, sizeof(octave)) ); cudaSafeCall( hipMemcpyToSymbol(c_layer_rows, &layer_rows, sizeof(layer_rows)) ); cudaSafeCall( hipMemcpyToSymbol(c_layer_cols, &layer_cols, sizeof(layer_cols)) ); } //////////////////////////////////////////////////////////////////////// // Integral image texture texture<unsigned char, 2, hipReadModeElementType> imgTex(0, hipFilterModePoint, hipAddressModeClamp); texture<unsigned int, 2, hipReadModeElementType> sumTex(0, hipFilterModePoint, hipAddressModeClamp); texture<unsigned int, 2, hipReadModeElementType> maskSumTex(0, hipFilterModePoint, hipAddressModeClamp); void bindImgTex(PtrStepSzb img) { bindTexture(&imgTex, img); } void bindSumTex(PtrStepSz<uint> sum) { bindTexture(&sumTex, sum); } void bindMaskSumTex(PtrStepSz<uint> maskSum) { bindTexture(&maskSumTex, maskSum); } template <int N> __device__ float icvCalcHaarPatternSum(const float src[][5], int oldSize, int newSize, int y, int x) { #if __CUDA_ARCH__ && __CUDA_ARCH__ >= 200 typedef double real_t; #else typedef float real_t; #endif float ratio = (float)newSize / oldSize; real_t d = 0; #pragma unroll for (int k = 0; k < N; ++k) { int dx1 = __float2int_rn(ratio * src[k][0]); int dy1 = __float2int_rn(ratio * src[k][1]); int dx2 = __float2int_rn(ratio * src[k][2]); int dy2 = __float2int_rn(ratio * src[k][3]); real_t t = 0; t += tex2D(sumTex, x + dx1, y + dy1); t -= tex2D(sumTex, x + dx1, y + dy2); t -= tex2D(sumTex, x + dx2, y + dy1); t += tex2D(sumTex, x + dx2, y + dy2); d += t * src[k][4] / ((dx2 - dx1) * (dy2 - dy1)); } return (float)d; } //////////////////////////////////////////////////////////////////////// // Hessian __constant__ float c_DX [3][5] = { {0, 2, 3, 7, 1}, {3, 2, 6, 7, -2}, {6, 2, 9, 7, 1} }; __constant__ float c_DY [3][5] = { {2, 0, 7, 3, 1}, {2, 3, 7, 6, -2}, {2, 6, 7, 9, 1} }; __constant__ float c_DXY[4][5] = { {1, 1, 4, 4, 1}, {5, 1, 8, 4, -1}, {1, 5, 4, 8, -1}, {5, 5, 8, 8, 1} }; __host__ __device__ __forceinline__ int calcSize(int octave, int layer) { /* Wavelet size at first layer of first octave. */ const int HAAR_SIZE0 = 9; /* Wavelet size increment between layers. This should be an even number, such that the wavelet sizes in an octave are either all even or all odd. This ensures that when looking for the neighbours of a sample, the layers above and below are aligned correctly. */ const int HAAR_SIZE_INC = 6; return (HAAR_SIZE0 + HAAR_SIZE_INC * layer) << octave; } __global__ void icvCalcLayerDetAndTrace(PtrStepf det, PtrStepf trace) { // Determine the indices const int gridDim_y = gridDim.y / (c_nOctaveLayers + 2); const int blockIdx_y = blockIdx.y % gridDim_y; const int blockIdx_z = blockIdx.y / gridDim_y; const int j = threadIdx.x + blockIdx.x * blockDim.x; const int i = threadIdx.y + blockIdx_y * blockDim.y; const int layer = blockIdx_z; const int size = calcSize(c_octave, layer); const int samples_i = 1 + ((c_img_rows - size) >> c_octave); const int samples_j = 1 + ((c_img_cols - size) >> c_octave); // Ignore pixels where some of the kernel is outside the image const int margin = (size >> 1) >> c_octave; if (size <= c_img_rows && size <= c_img_cols && i < samples_i && j < samples_j) { const float dx = icvCalcHaarPatternSum<3>(c_DX , 9, size, i << c_octave, j << c_octave); const float dy = icvCalcHaarPatternSum<3>(c_DY , 9, size, i << c_octave, j << c_octave); const float dxy = icvCalcHaarPatternSum<4>(c_DXY, 9, size, i << c_octave, j << c_octave); det.ptr(layer * c_layer_rows + i + margin)[j + margin] = dx * dy - 0.81f * dxy * dxy; trace.ptr(layer * c_layer_rows + i + margin)[j + margin] = dx + dy; } } void icvCalcLayerDetAndTrace_gpu(const PtrStepf& det, const PtrStepf& trace, int img_rows, int img_cols, int octave, int nOctaveLayers) { const int min_size = calcSize(octave, 0); const int max_samples_i = 1 + ((img_rows - min_size) >> octave); const int max_samples_j = 1 + ((img_cols - min_size) >> octave); dim3 threads(16, 16); dim3 grid; grid.x = divUp(max_samples_j, threads.x); grid.y = divUp(max_samples_i, threads.y) * (nOctaveLayers + 2); hipLaunchKernelGGL(( icvCalcLayerDetAndTrace), dim3(grid), dim3(threads), 0, 0, det, trace); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); } //////////////////////////////////////////////////////////////////////// // NONMAX __constant__ float c_DM[5] = {0, 0, 9, 9, 1}; struct WithMask { static __device__ bool check(int sum_i, int sum_j, int size) { float ratio = (float)size / 9.0f; float d = 0; int dx1 = __float2int_rn(ratio * c_DM[0]); int dy1 = __float2int_rn(ratio * c_DM[1]); int dx2 = __float2int_rn(ratio * c_DM[2]); int dy2 = __float2int_rn(ratio * c_DM[3]); float t = 0; t += tex2D(maskSumTex, sum_j + dx1, sum_i + dy1); t -= tex2D(maskSumTex, sum_j + dx1, sum_i + dy2); t -= tex2D(maskSumTex, sum_j + dx2, sum_i + dy1); t += tex2D(maskSumTex, sum_j + dx2, sum_i + dy2); d += t * c_DM[4] / ((dx2 - dx1) * (dy2 - dy1)); return (d >= 0.5f); } }; template <typename Mask> __global__ void icvFindMaximaInLayer(const PtrStepf det, const PtrStepf trace, int4* maxPosBuffer, unsigned int* maxCounter) { #if __CUDA_ARCH__ && __CUDA_ARCH__ >= 110 extern __shared__ float N9[]; // The hidx variables are the indices to the hessian buffer. const int gridDim_y = gridDim.y / c_nOctaveLayers; const int blockIdx_y = blockIdx.y % gridDim_y; const int blockIdx_z = blockIdx.y / gridDim_y; const int layer = blockIdx_z + 1; const int size = calcSize(c_octave, layer); // Ignore pixels without a 3x3x3 neighbourhood in the layer above const int margin = ((calcSize(c_octave, layer + 1) >> 1) >> c_octave) + 1; const int j = threadIdx.x + blockIdx.x * (blockDim.x - 2) + margin - 1; const int i = threadIdx.y + blockIdx_y * (blockDim.y - 2) + margin - 1; // Is this thread within the hessian buffer? const int zoff = blockDim.x * blockDim.y; const int localLin = threadIdx.x + threadIdx.y * blockDim.x + zoff; N9[localLin - zoff] = det.ptr(c_layer_rows * (layer - 1) + ::min(::max(i, 0), c_img_rows - 1))[::min(::max(j, 0), c_img_cols - 1)]; N9[localLin ] = det.ptr(c_layer_rows * (layer ) + ::min(::max(i, 0), c_img_rows - 1))[::min(::max(j, 0), c_img_cols - 1)]; N9[localLin + zoff] = det.ptr(c_layer_rows * (layer + 1) + ::min(::max(i, 0), c_img_rows - 1))[::min(::max(j, 0), c_img_cols - 1)]; __syncthreads(); if (i < c_layer_rows - margin && j < c_layer_cols - margin && threadIdx.x > 0 && threadIdx.x < blockDim.x - 1 && threadIdx.y > 0 && threadIdx.y < blockDim.y - 1) { float val0 = N9[localLin]; if (val0 > c_hessianThreshold) { // Coordinates for the start of the wavelet in the sum image. There // is some integer division involved, so don't try to simplify this // (cancel out sampleStep) without checking the result is the same const int sum_i = (i - ((size >> 1) >> c_octave)) << c_octave; const int sum_j = (j - ((size >> 1) >> c_octave)) << c_octave; if (Mask::check(sum_i, sum_j, size)) { // Check to see if we have a max (in its 26 neighbours) const bool condmax = val0 > N9[localLin - 1 - blockDim.x - zoff] && val0 > N9[localLin - blockDim.x - zoff] && val0 > N9[localLin + 1 - blockDim.x - zoff] && val0 > N9[localLin - 1 - zoff] && val0 > N9[localLin - zoff] && val0 > N9[localLin + 1 - zoff] && val0 > N9[localLin - 1 + blockDim.x - zoff] && val0 > N9[localLin + blockDim.x - zoff] && val0 > N9[localLin + 1 + blockDim.x - zoff] && val0 > N9[localLin - 1 - blockDim.x] && val0 > N9[localLin - blockDim.x] && val0 > N9[localLin + 1 - blockDim.x] && val0 > N9[localLin - 1 ] && val0 > N9[localLin + 1 ] && val0 > N9[localLin - 1 + blockDim.x] && val0 > N9[localLin + blockDim.x] && val0 > N9[localLin + 1 + blockDim.x] && val0 > N9[localLin - 1 - blockDim.x + zoff] && val0 > N9[localLin - blockDim.x + zoff] && val0 > N9[localLin + 1 - blockDim.x + zoff] && val0 > N9[localLin - 1 + zoff] && val0 > N9[localLin + zoff] && val0 > N9[localLin + 1 + zoff] && val0 > N9[localLin - 1 + blockDim.x + zoff] && val0 > N9[localLin + blockDim.x + zoff] && val0 > N9[localLin + 1 + blockDim.x + zoff] ; if(condmax) { unsigned int ind = atomicInc(maxCounter,(unsigned int) -1); if (ind < c_max_candidates) { const int laplacian = (int) copysignf(1.0f, trace.ptr(layer * c_layer_rows + i)[j]); maxPosBuffer[ind] = make_int4(j, i, layer, laplacian); } } } } } #endif } void icvFindMaximaInLayer_gpu(const PtrStepf& det, const PtrStepf& trace, int4* maxPosBuffer, unsigned int* maxCounter, int img_rows, int img_cols, int octave, bool use_mask, int nOctaveLayers) { const int layer_rows = img_rows >> octave; const int layer_cols = img_cols >> octave; const int min_margin = ((calcSize(octave, 2) >> 1) >> octave) + 1; dim3 threads(16, 16); dim3 grid; grid.x = divUp(layer_cols - 2 * min_margin, threads.x - 2); grid.y = divUp(layer_rows - 2 * min_margin, threads.y - 2) * nOctaveLayers; const size_t smem_size = threads.x * threads.y * 3 * sizeof(float); if (use_mask) hipLaunchKernelGGL(( icvFindMaximaInLayer<WithMask>), dim3(grid), dim3(threads), smem_size, 0, det, trace, maxPosBuffer, maxCounter); else hipLaunchKernelGGL(( icvFindMaximaInLayer<WithOutMask>), dim3(grid), dim3(threads), smem_size, 0, det, trace, maxPosBuffer, maxCounter); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); } //////////////////////////////////////////////////////////////////////// // INTERPOLATION __global__ void icvInterpolateKeypoint(const PtrStepf det, const int4* maxPosBuffer, float* featureX, float* featureY, int* featureLaplacian, int* featureOctave, float* featureSize, float* featureHessian, unsigned int* featureCounter) { #if __CUDA_ARCH__ && __CUDA_ARCH__ >= 110 const int4 maxPos = maxPosBuffer[blockIdx.x]; const int j = maxPos.x - 1 + threadIdx.x; const int i = maxPos.y - 1 + threadIdx.y; const int layer = maxPos.z - 1 + threadIdx.z; __shared__ float N9[3][3][3]; N9[threadIdx.z][threadIdx.y][threadIdx.x] = det.ptr(c_layer_rows * layer + i)[j]; __syncthreads(); if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0) { __shared__ float dD[3]; //dx dD[0] = -0.5f * (N9[1][1][2] - N9[1][1][0]); //dy dD[1] = -0.5f * (N9[1][2][1] - N9[1][0][1]); //ds dD[2] = -0.5f * (N9[2][1][1] - N9[0][1][1]); __shared__ float H[3][3]; //dxx H[0][0] = N9[1][1][0] - 2.0f * N9[1][1][1] + N9[1][1][2]; //dxy H[0][1]= 0.25f * (N9[1][2][2] - N9[1][2][0] - N9[1][0][2] + N9[1][0][0]); //dxs H[0][2]= 0.25f * (N9[2][1][2] - N9[2][1][0] - N9[0][1][2] + N9[0][1][0]); //dyx = dxy H[1][0] = H[0][1]; //dyy H[1][1] = N9[1][0][1] - 2.0f * N9[1][1][1] + N9[1][2][1]; //dys H[1][2]= 0.25f * (N9[2][2][1] - N9[2][0][1] - N9[0][2][1] + N9[0][0][1]); //dsx = dxs H[2][0] = H[0][2]; //dsy = dys H[2][1] = H[1][2]; //dss H[2][2] = N9[0][1][1] - 2.0f * N9[1][1][1] + N9[2][1][1]; __shared__ float x[3]; if (solve3x3(H, dD, x)) { if (::fabs(x[0]) <= 1.f && ::fabs(x[1]) <= 1.f && ::fabs(x[2]) <= 1.f) { // if the step is within the interpolation region, perform it const int size = calcSize(c_octave, maxPos.z); const int sum_i = (maxPos.y - ((size >> 1) >> c_octave)) << c_octave; const int sum_j = (maxPos.x - ((size >> 1) >> c_octave)) << c_octave; const float center_i = sum_i + (float)(size - 1) / 2; const float center_j = sum_j + (float)(size - 1) / 2; const float px = center_j + x[0] * (1 << c_octave); const float py = center_i + x[1] * (1 << c_octave); const int ds = size - calcSize(c_octave, maxPos.z - 1); const float psize = roundf(size + x[2] * ds); /* The sampling intervals and wavelet sized for selecting an orientation and building the keypoint descriptor are defined relative to 's' */ const float s = psize * 1.2f / 9.0f; /* To find the dominant orientation, the gradients in x and y are sampled in a circle of radius 6s using wavelets of size 4s. We ensure the gradient wavelet size is even to ensure the wavelet pattern is balanced and symmetric around its center */ const int grad_wav_size = 2 * __float2int_rn(2.0f * s); // check when grad_wav_size is too big if ((c_img_rows + 1) >= grad_wav_size && (c_img_cols + 1) >= grad_wav_size) { // Get a new feature index. unsigned int ind = atomicInc(featureCounter, (unsigned int)-1); if (ind < c_max_features) { featureX[ind] = px; featureY[ind] = py; featureLaplacian[ind] = maxPos.w; featureOctave[ind] = c_octave; featureSize[ind] = psize; featureHessian[ind] = N9[1][1][1]; } } // grad_wav_size check } // If the subpixel interpolation worked } } // If this is thread 0. #endif } void icvInterpolateKeypoint_gpu(const PtrStepf& det, const int4* maxPosBuffer, unsigned int maxCounter, float* featureX, float* featureY, int* featureLaplacian, int* featureOctave, float* featureSize, float* featureHessian, unsigned int* featureCounter) { dim3 threads; threads.x = 3; threads.y = 3; threads.z = 3; dim3 grid; grid.x = maxCounter; hipLaunchKernelGGL(( icvInterpolateKeypoint), dim3(grid), dim3(threads), 0, 0, det, maxPosBuffer, featureX, featureY, featureLaplacian, featureOctave, featureSize, featureHessian, featureCounter); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); } //////////////////////////////////////////////////////////////////////// // Orientation #define ORI_SEARCH_INC 5 #define ORI_WIN 60 #define ORI_SAMPLES 113 __constant__ float c_aptX[ORI_SAMPLES] = {-6, -5, -5, -5, -5, -5, -5, -5, -4, -4, -4, -4, -4, -4, -4, -4, -4, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 6}; __constant__ float c_aptY[ORI_SAMPLES] = {0, -3, -2, -1, 0, 1, 2, 3, -4, -3, -2, -1, 0, 1, 2, 3, 4, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -4, -3, -2, -1, 0, 1, 2, 3, 4, -3, -2, -1, 0, 1, 2, 3, 0}; __constant__ float c_aptW[ORI_SAMPLES] = {0.001455130288377404f, 0.001707611023448408f, 0.002547456417232752f, 0.003238451667129993f, 0.0035081731621176f, 0.003238451667129993f, 0.002547456417232752f, 0.001707611023448408f, 0.002003900473937392f, 0.0035081731621176f, 0.005233579315245152f, 0.00665318313986063f, 0.00720730796456337f, 0.00665318313986063f, 0.005233579315245152f, 0.0035081731621176f, 0.002003900473937392f, 0.001707611023448408f, 0.0035081731621176f, 0.006141661666333675f, 0.009162282571196556f, 0.01164754293859005f, 0.01261763460934162f, 0.01164754293859005f, 0.009162282571196556f, 0.006141661666333675f, 0.0035081731621176f, 0.001707611023448408f, 0.002547456417232752f, 0.005233579315245152f, 0.009162282571196556f, 0.01366852037608624f, 0.01737609319388866f, 0.0188232995569706f, 0.01737609319388866f, 0.01366852037608624f, 0.009162282571196556f, 0.005233579315245152f, 0.002547456417232752f, 0.003238451667129993f, 0.00665318313986063f, 0.01164754293859005f, 0.01737609319388866f, 0.02208934165537357f, 0.02392910048365593f, 0.02208934165537357f, 0.01737609319388866f, 0.01164754293859005f, 0.00665318313986063f, 0.003238451667129993f, 0.001455130288377404f, 0.0035081731621176f, 0.00720730796456337f, 0.01261763460934162f, 0.0188232995569706f, 0.02392910048365593f, 0.02592208795249462f, 0.02392910048365593f, 0.0188232995569706f, 0.01261763460934162f, 0.00720730796456337f, 0.0035081731621176f, 0.001455130288377404f, 0.003238451667129993f, 0.00665318313986063f, 0.01164754293859005f, 0.01737609319388866f, 0.02208934165537357f, 0.02392910048365593f, 0.02208934165537357f, 0.01737609319388866f, 0.01164754293859005f, 0.00665318313986063f, 0.003238451667129993f, 0.002547456417232752f, 0.005233579315245152f, 0.009162282571196556f, 0.01366852037608624f, 0.01737609319388866f, 0.0188232995569706f, 0.01737609319388866f, 0.01366852037608624f, 0.009162282571196556f, 0.005233579315245152f, 0.002547456417232752f, 0.001707611023448408f, 0.0035081731621176f, 0.006141661666333675f, 0.009162282571196556f, 0.01164754293859005f, 0.01261763460934162f, 0.01164754293859005f, 0.009162282571196556f, 0.006141661666333675f, 0.0035081731621176f, 0.001707611023448408f, 0.002003900473937392f, 0.0035081731621176f, 0.005233579315245152f, 0.00665318313986063f, 0.00720730796456337f, 0.00665318313986063f, 0.005233579315245152f, 0.0035081731621176f, 0.002003900473937392f, 0.001707611023448408f, 0.002547456417232752f, 0.003238451667129993f, 0.0035081731621176f, 0.003238451667129993f, 0.002547456417232752f, 0.001707611023448408f, 0.001455130288377404f}; __constant__ float c_NX[2][5] = {{0, 0, 2, 4, -1}, {2, 0, 4, 4, 1}}; __constant__ float c_NY[2][5] = {{0, 0, 4, 2, 1}, {0, 2, 4, 4, -1}}; __global__ void icvCalcOrientation(const float* featureX, const float* featureY, const float* featureSize, float* featureDir) { __shared__ float s_X[128]; __shared__ float s_Y[128]; __shared__ float s_angle[128]; __shared__ float s_sumx[32 * 4]; __shared__ float s_sumy[32 * 4]; /* The sampling intervals and wavelet sized for selecting an orientation and building the keypoint descriptor are defined relative to 's' */ const float s = featureSize[blockIdx.x] * 1.2f / 9.0f; /* To find the dominant orientation, the gradients in x and y are sampled in a circle of radius 6s using wavelets of size 4s. We ensure the gradient wavelet size is even to ensure the wavelet pattern is balanced and symmetric around its center */ const int grad_wav_size = 2 * __float2int_rn(2.0f * s); // check when grad_wav_size is too big if ((c_img_rows + 1) < grad_wav_size || (c_img_cols + 1) < grad_wav_size) return; // Calc X, Y, angle and store it to shared memory const int tid = threadIdx.y * blockDim.x + threadIdx.x; float X = 0.0f, Y = 0.0f, angle = 0.0f; if (tid < ORI_SAMPLES) { const float margin = (float)(grad_wav_size - 1) / 2.0f; const int x = __float2int_rn(featureX[blockIdx.x] + c_aptX[tid] * s - margin); const int y = __float2int_rn(featureY[blockIdx.x] + c_aptY[tid] * s - margin); if (y >= 0 && y < (c_img_rows + 1) - grad_wav_size && x >= 0 && x < (c_img_cols + 1) - grad_wav_size) { X = c_aptW[tid] * icvCalcHaarPatternSum<2>(c_NX, 4, grad_wav_size, y, x); Y = c_aptW[tid] * icvCalcHaarPatternSum<2>(c_NY, 4, grad_wav_size, y, x); angle = atan2f(Y, X); if (angle < 0) angle += 2.0f * CV_PI_F; angle *= 180.0f / CV_PI_F; } } s_X[tid] = X; s_Y[tid] = Y; s_angle[tid] = angle; __syncthreads(); float bestx = 0, besty = 0, best_mod = 0; #pragma unroll for (int i = 0; i < 18; ++i) { const int dir = (i * 4 + threadIdx.y) * ORI_SEARCH_INC; float sumx = 0.0f, sumy = 0.0f; int d = ::abs(__float2int_rn(s_angle[threadIdx.x]) - dir); if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2) { sumx = s_X[threadIdx.x]; sumy = s_Y[threadIdx.x]; } d = ::abs(__float2int_rn(s_angle[threadIdx.x + 32]) - dir); if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2) { sumx += s_X[threadIdx.x + 32]; sumy += s_Y[threadIdx.x + 32]; } d = ::abs(__float2int_rn(s_angle[threadIdx.x + 64]) - dir); if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2) { sumx += s_X[threadIdx.x + 64]; sumy += s_Y[threadIdx.x + 64]; } d = ::abs(__float2int_rn(s_angle[threadIdx.x + 96]) - dir); if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2) { sumx += s_X[threadIdx.x + 96]; sumy += s_Y[threadIdx.x + 96]; } device::reduce<32>(s_sumx + threadIdx.y * 32, sumx, threadIdx.x, plus<volatile float>()); device::reduce<32>(s_sumy + threadIdx.y * 32, sumy, threadIdx.x, plus<volatile float>()); const float temp_mod = sumx * sumx + sumy * sumy; if (temp_mod > best_mod) { best_mod = temp_mod; bestx = sumx; besty = sumy; } __syncthreads(); } if (threadIdx.x == 0) { s_X[threadIdx.y] = bestx; s_Y[threadIdx.y] = besty; s_angle[threadIdx.y] = best_mod; } __syncthreads(); if (threadIdx.x == 0 && threadIdx.y == 0) { int bestIdx = 0; if (s_angle[1] > s_angle[bestIdx]) bestIdx = 1; if (s_angle[2] > s_angle[bestIdx]) bestIdx = 2; if (s_angle[3] > s_angle[bestIdx]) bestIdx = 3; float kp_dir = atan2f(s_Y[bestIdx], s_X[bestIdx]); if (kp_dir < 0) kp_dir += 2.0f * CV_PI_F; kp_dir *= 180.0f / CV_PI_F; kp_dir = 360.0f - kp_dir; if (abs(kp_dir - 360.f) < FLT_EPSILON) kp_dir = 0.f; featureDir[blockIdx.x] = kp_dir; } } #undef ORI_SEARCH_INC #undef ORI_WIN #undef ORI_SAMPLES void icvCalcOrientation_gpu(const float* featureX, const float* featureY, const float* featureSize, float* featureDir, int nFeatures) { dim3 threads; threads.x = 32; threads.y = 4; dim3 grid; grid.x = nFeatures; hipLaunchKernelGGL(( icvCalcOrientation), dim3(grid), dim3(threads), 0, 0, featureX, featureY, featureSize, featureDir); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); } //////////////////////////////////////////////////////////////////////// // Descriptors #define PATCH_SZ 20 __constant__ float c_DW[PATCH_SZ * PATCH_SZ] = { 3.695352233989979e-006f, 8.444558261544444e-006f, 1.760426494001877e-005f, 3.34794785885606e-005f, 5.808438800158911e-005f, 9.193058212986216e-005f, 0.0001327334757661447f, 0.0001748319627949968f, 0.0002100782439811155f, 0.0002302826324012131f, 0.0002302826324012131f, 0.0002100782439811155f, 0.0001748319627949968f, 0.0001327334757661447f, 9.193058212986216e-005f, 5.808438800158911e-005f, 3.34794785885606e-005f, 1.760426494001877e-005f, 8.444558261544444e-006f, 3.695352233989979e-006f, 8.444558261544444e-006f, 1.929736572492402e-005f, 4.022897701361217e-005f, 7.650675252079964e-005f, 0.0001327334903180599f, 0.0002100782585330308f, 0.0003033203829545528f, 0.0003995231236331165f, 0.0004800673632416874f, 0.0005262381164357066f, 0.0005262381164357066f, 0.0004800673632416874f, 0.0003995231236331165f, 0.0003033203829545528f, 0.0002100782585330308f, 0.0001327334903180599f, 7.650675252079964e-005f, 4.022897701361217e-005f, 1.929736572492402e-005f, 8.444558261544444e-006f, 1.760426494001877e-005f, 4.022897701361217e-005f, 8.386484114453197e-005f, 0.0001594926579855382f, 0.0002767078403849155f, 0.0004379475140012801f, 0.0006323281559161842f, 0.0008328808471560478f, 0.001000790391117334f, 0.001097041997127235f, 0.001097041997127235f, 0.001000790391117334f, 0.0008328808471560478f, 0.0006323281559161842f, 0.0004379475140012801f, 0.0002767078403849155f, 0.0001594926579855382f, 8.386484114453197e-005f, 4.022897701361217e-005f, 1.760426494001877e-005f, 3.34794785885606e-005f, 7.650675252079964e-005f, 0.0001594926579855382f, 0.0003033203247468919f, 0.0005262380582280457f, 0.0008328807889483869f, 0.001202550483867526f, 0.001583957928232849f, 0.001903285388834775f, 0.002086334861814976f, 0.002086334861814976f, 0.001903285388834775f, 0.001583957928232849f, 0.001202550483867526f, 0.0008328807889483869f, 0.0005262380582280457f, 0.0003033203247468919f, 0.0001594926579855382f, 7.650675252079964e-005f, 3.34794785885606e-005f, 5.808438800158911e-005f, 0.0001327334903180599f, 0.0002767078403849155f, 0.0005262380582280457f, 0.0009129836107604206f, 0.001444985857233405f, 0.002086335094645619f, 0.002748048631474376f, 0.00330205773934722f, 0.003619635012000799f, 0.003619635012000799f, 0.00330205773934722f, 0.002748048631474376f, 0.002086335094645619f, 0.001444985857233405f, 0.0009129836107604206f, 0.0005262380582280457f, 0.0002767078403849155f, 0.0001327334903180599f, 5.808438800158911e-005f, 9.193058212986216e-005f, 0.0002100782585330308f, 0.0004379475140012801f, 0.0008328807889483869f, 0.001444985857233405f, 0.002286989474669099f, 0.00330205773934722f, 0.004349356517195702f, 0.00522619066759944f, 0.005728822201490402f, 0.005728822201490402f, 0.00522619066759944f, 0.004349356517195702f, 0.00330205773934722f, 0.002286989474669099f, 0.001444985857233405f, 0.0008328807889483869f, 0.0004379475140012801f, 0.0002100782585330308f, 9.193058212986216e-005f, 0.0001327334757661447f, 0.0003033203829545528f, 0.0006323281559161842f, 0.001202550483867526f, 0.002086335094645619f, 0.00330205773934722f, 0.004767658654600382f, 0.006279794964939356f, 0.007545807864516974f, 0.008271530270576477f, 0.008271530270576477f, 0.007545807864516974f, 0.006279794964939356f, 0.004767658654600382f, 0.00330205773934722f, 0.002086335094645619f, 0.001202550483867526f, 0.0006323281559161842f, 0.0003033203829545528f, 0.0001327334757661447f, 0.0001748319627949968f, 0.0003995231236331165f, 0.0008328808471560478f, 0.001583957928232849f, 0.002748048631474376f, 0.004349356517195702f, 0.006279794964939356f, 0.008271529339253902f, 0.009939077310264111f, 0.01089497376233339f, 0.01089497376233339f, 0.009939077310264111f, 0.008271529339253902f, 0.006279794964939356f, 0.004349356517195702f, 0.002748048631474376f, 0.001583957928232849f, 0.0008328808471560478f, 0.0003995231236331165f, 0.0001748319627949968f, 0.0002100782439811155f, 0.0004800673632416874f, 0.001000790391117334f, 0.001903285388834775f, 0.00330205773934722f, 0.00522619066759944f, 0.007545807864516974f, 0.009939077310264111f, 0.01194280479103327f, 0.01309141051024199f, 0.01309141051024199f, 0.01194280479103327f, 0.009939077310264111f, 0.007545807864516974f, 0.00522619066759944f, 0.00330205773934722f, 0.001903285388834775f, 0.001000790391117334f, 0.0004800673632416874f, 0.0002100782439811155f, 0.0002302826324012131f, 0.0005262381164357066f, 0.001097041997127235f, 0.002086334861814976f, 0.003619635012000799f, 0.005728822201490402f, 0.008271530270576477f, 0.01089497376233339f, 0.01309141051024199f, 0.01435048412531614f, 0.01435048412531614f, 0.01309141051024199f, 0.01089497376233339f, 0.008271530270576477f, 0.005728822201490402f, 0.003619635012000799f, 0.002086334861814976f, 0.001097041997127235f, 0.0005262381164357066f, 0.0002302826324012131f, 0.0002302826324012131f, 0.0005262381164357066f, 0.001097041997127235f, 0.002086334861814976f, 0.003619635012000799f, 0.005728822201490402f, 0.008271530270576477f, 0.01089497376233339f, 0.01309141051024199f, 0.01435048412531614f, 0.01435048412531614f, 0.01309141051024199f, 0.01089497376233339f, 0.008271530270576477f, 0.005728822201490402f, 0.003619635012000799f, 0.002086334861814976f, 0.001097041997127235f, 0.0005262381164357066f, 0.0002302826324012131f, 0.0002100782439811155f, 0.0004800673632416874f, 0.001000790391117334f, 0.001903285388834775f, 0.00330205773934722f, 0.00522619066759944f, 0.007545807864516974f, 0.009939077310264111f, 0.01194280479103327f, 0.01309141051024199f, 0.01309141051024199f, 0.01194280479103327f, 0.009939077310264111f, 0.007545807864516974f, 0.00522619066759944f, 0.00330205773934722f, 0.001903285388834775f, 0.001000790391117334f, 0.0004800673632416874f, 0.0002100782439811155f, 0.0001748319627949968f, 0.0003995231236331165f, 0.0008328808471560478f, 0.001583957928232849f, 0.002748048631474376f, 0.004349356517195702f, 0.006279794964939356f, 0.008271529339253902f, 0.009939077310264111f, 0.01089497376233339f, 0.01089497376233339f, 0.009939077310264111f, 0.008271529339253902f, 0.006279794964939356f, 0.004349356517195702f, 0.002748048631474376f, 0.001583957928232849f, 0.0008328808471560478f, 0.0003995231236331165f, 0.0001748319627949968f, 0.0001327334757661447f, 0.0003033203829545528f, 0.0006323281559161842f, 0.001202550483867526f, 0.002086335094645619f, 0.00330205773934722f, 0.004767658654600382f, 0.006279794964939356f, 0.007545807864516974f, 0.008271530270576477f, 0.008271530270576477f, 0.007545807864516974f, 0.006279794964939356f, 0.004767658654600382f, 0.00330205773934722f, 0.002086335094645619f, 0.001202550483867526f, 0.0006323281559161842f, 0.0003033203829545528f, 0.0001327334757661447f, 9.193058212986216e-005f, 0.0002100782585330308f, 0.0004379475140012801f, 0.0008328807889483869f, 0.001444985857233405f, 0.002286989474669099f, 0.00330205773934722f, 0.004349356517195702f, 0.00522619066759944f, 0.005728822201490402f, 0.005728822201490402f, 0.00522619066759944f, 0.004349356517195702f, 0.00330205773934722f, 0.002286989474669099f, 0.001444985857233405f, 0.0008328807889483869f, 0.0004379475140012801f, 0.0002100782585330308f, 9.193058212986216e-005f, 5.808438800158911e-005f, 0.0001327334903180599f, 0.0002767078403849155f, 0.0005262380582280457f, 0.0009129836107604206f, 0.001444985857233405f, 0.002086335094645619f, 0.002748048631474376f, 0.00330205773934722f, 0.003619635012000799f, 0.003619635012000799f, 0.00330205773934722f, 0.002748048631474376f, 0.002086335094645619f, 0.001444985857233405f, 0.0009129836107604206f, 0.0005262380582280457f, 0.0002767078403849155f, 0.0001327334903180599f, 5.808438800158911e-005f, 3.34794785885606e-005f, 7.650675252079964e-005f, 0.0001594926579855382f, 0.0003033203247468919f, 0.0005262380582280457f, 0.0008328807889483869f, 0.001202550483867526f, 0.001583957928232849f, 0.001903285388834775f, 0.002086334861814976f, 0.002086334861814976f, 0.001903285388834775f, 0.001583957928232849f, 0.001202550483867526f, 0.0008328807889483869f, 0.0005262380582280457f, 0.0003033203247468919f, 0.0001594926579855382f, 7.650675252079964e-005f, 3.34794785885606e-005f, 1.760426494001877e-005f, 4.022897701361217e-005f, 8.386484114453197e-005f, 0.0001594926579855382f, 0.0002767078403849155f, 0.0004379475140012801f, 0.0006323281559161842f, 0.0008328808471560478f, 0.001000790391117334f, 0.001097041997127235f, 0.001097041997127235f, 0.001000790391117334f, 0.0008328808471560478f, 0.0006323281559161842f, 0.0004379475140012801f, 0.0002767078403849155f, 0.0001594926579855382f, 8.386484114453197e-005f, 4.022897701361217e-005f, 1.760426494001877e-005f, 8.444558261544444e-006f, 1.929736572492402e-005f, 4.022897701361217e-005f, 7.650675252079964e-005f, 0.0001327334903180599f, 0.0002100782585330308f, 0.0003033203829545528f, 0.0003995231236331165f, 0.0004800673632416874f, 0.0005262381164357066f, 0.0005262381164357066f, 0.0004800673632416874f, 0.0003995231236331165f, 0.0003033203829545528f, 0.0002100782585330308f, 0.0001327334903180599f, 7.650675252079964e-005f, 4.022897701361217e-005f, 1.929736572492402e-005f, 8.444558261544444e-006f, 3.695352233989979e-006f, 8.444558261544444e-006f, 1.760426494001877e-005f, 3.34794785885606e-005f, 5.808438800158911e-005f, 9.193058212986216e-005f, 0.0001327334757661447f, 0.0001748319627949968f, 0.0002100782439811155f, 0.0002302826324012131f, 0.0002302826324012131f, 0.0002100782439811155f, 0.0001748319627949968f, 0.0001327334757661447f, 9.193058212986216e-005f, 5.808438800158911e-005f, 3.34794785885606e-005f, 1.760426494001877e-005f, 8.444558261544444e-006f, 3.695352233989979e-006f }; struct WinReader { typedef uchar elem_type; __device__ __forceinline__ WinReader(float centerX_, float centerY_, float win_offset_, float cos_dir_, float sin_dir_) : centerX(centerX_), centerY(centerY_), win_offset(win_offset_), cos_dir(cos_dir_), sin_dir(sin_dir_) { } __device__ __forceinline__ uchar operator ()(int i, int j) const { float pixel_x = centerX + (win_offset + j) * cos_dir + (win_offset + i) * sin_dir; float pixel_y = centerY - (win_offset + j) * sin_dir + (win_offset + i) * cos_dir; return tex2D(imgTex, pixel_x, pixel_y); } float centerX; float centerY; float win_offset; float cos_dir; float sin_dir; }; __device__ void calc_dx_dy(float s_dx_bin[25], float s_dy_bin[25], const float* featureX, const float* featureY, const float* featureSize, const float* featureDir) { __shared__ float s_PATCH[6][6]; const float centerX = featureX[blockIdx.x]; const float centerY = featureY[blockIdx.x]; const float size = featureSize[blockIdx.x]; float descriptor_dir = 360.0f - featureDir[blockIdx.x]; if (std::abs(descriptor_dir - 360.f) < FLT_EPSILON) descriptor_dir = 0.f; descriptor_dir *= (float)(CV_PI_F / 180.0f); /* The sampling intervals and wavelet sized for selecting an orientation and building the keypoint descriptor are defined relative to 's' */ const float s = size * 1.2f / 9.0f; /* Extract a window of pixels around the keypoint of size 20s */ const int win_size = (int)((PATCH_SZ + 1) * s); float sin_dir; float cos_dir; sincosf(descriptor_dir, &sin_dir, &cos_dir); /* Nearest neighbour version (faster) */ const float win_offset = -(float)(win_size - 1) / 2; // Compute sampling points // since grids are 2D, need to compute xBlock and yBlock indices const int xBlock = (blockIdx.y & 3); // blockIdx.y % 4 const int yBlock = (blockIdx.y >> 2); // floor(blockIdx.y/4) const int xIndex = xBlock * 5 + threadIdx.x; const int yIndex = yBlock * 5 + threadIdx.y; const float icoo = ((float)yIndex / (PATCH_SZ + 1)) * win_size; const float jcoo = ((float)xIndex / (PATCH_SZ + 1)) * win_size; LinearFilter<WinReader> filter(WinReader(centerX, centerY, win_offset, cos_dir, sin_dir)); s_PATCH[threadIdx.y][threadIdx.x] = filter(icoo, jcoo); __syncthreads(); if (threadIdx.x < 5 && threadIdx.y < 5) { const int tid = threadIdx.y * 5 + threadIdx.x; const float dw = c_DW[yIndex * PATCH_SZ + xIndex]; const float vx = (s_PATCH[threadIdx.y ][threadIdx.x + 1] - s_PATCH[threadIdx.y][threadIdx.x] + s_PATCH[threadIdx.y + 1][threadIdx.x + 1] - s_PATCH[threadIdx.y + 1][threadIdx.x ]) * dw; const float vy = (s_PATCH[threadIdx.y + 1][threadIdx.x ] - s_PATCH[threadIdx.y][threadIdx.x] + s_PATCH[threadIdx.y + 1][threadIdx.x + 1] - s_PATCH[threadIdx.y ][threadIdx.x + 1]) * dw; s_dx_bin[tid] = vx; s_dy_bin[tid] = vy; } } __device__ void reduce_sum25(volatile float* sdata1, volatile float* sdata2, volatile float* sdata3, volatile float* sdata4, int tid) { // first step is to reduce from 25 to 16 if (tid < 9) // use 9 threads { sdata1[tid] += sdata1[tid + 16]; sdata2[tid] += sdata2[tid + 16]; sdata3[tid] += sdata3[tid + 16]; sdata4[tid] += sdata4[tid + 16]; } // sum (reduce) from 16 to 1 (unrolled - aligned to a half-warp) if (tid < 8) { sdata1[tid] += sdata1[tid + 8]; sdata1[tid] += sdata1[tid + 4]; sdata1[tid] += sdata1[tid + 2]; sdata1[tid] += sdata1[tid + 1]; sdata2[tid] += sdata2[tid + 8]; sdata2[tid] += sdata2[tid + 4]; sdata2[tid] += sdata2[tid + 2]; sdata2[tid] += sdata2[tid + 1]; sdata3[tid] += sdata3[tid + 8]; sdata3[tid] += sdata3[tid + 4]; sdata3[tid] += sdata3[tid + 2]; sdata3[tid] += sdata3[tid + 1]; sdata4[tid] += sdata4[tid + 8]; sdata4[tid] += sdata4[tid + 4]; sdata4[tid] += sdata4[tid + 2]; sdata4[tid] += sdata4[tid + 1]; } } __global__ void compute_descriptors64(PtrStepf descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir) { // 2 floats (dx,dy) for each thread (5x5 sample points in each sub-region) __shared__ float sdx[25]; __shared__ float sdy[25]; __shared__ float sdxabs[25]; __shared__ float sdyabs[25]; calc_dx_dy(sdx, sdy, featureX, featureY, featureSize, featureDir); __syncthreads(); const int tid = threadIdx.y * blockDim.x + threadIdx.x; if (tid < 25) { sdxabs[tid] = ::fabs(sdx[tid]); // |dx| array sdyabs[tid] = ::fabs(sdy[tid]); // |dy| array __syncthreads(); reduce_sum25(sdx, sdy, sdxabs, sdyabs, tid); __syncthreads(); float* descriptors_block = descriptors.ptr(blockIdx.x) + (blockIdx.y << 2); // write dx, dy, |dx|, |dy| if (tid == 0) { descriptors_block[0] = sdx[0]; descriptors_block[1] = sdy[0]; descriptors_block[2] = sdxabs[0]; descriptors_block[3] = sdyabs[0]; } } } __global__ void compute_descriptors128(PtrStepf descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir) { // 2 floats (dx,dy) for each thread (5x5 sample points in each sub-region) __shared__ float sdx[25]; __shared__ float sdy[25]; // sum (reduce) 5x5 area response __shared__ float sd1[25]; __shared__ float sd2[25]; __shared__ float sdabs1[25]; __shared__ float sdabs2[25]; calc_dx_dy(sdx, sdy, featureX, featureY, featureSize, featureDir); __syncthreads(); const int tid = threadIdx.y * blockDim.x + threadIdx.x; if (tid < 25) { if (sdy[tid] >= 0) { sd1[tid] = sdx[tid]; sdabs1[tid] = ::fabs(sdx[tid]); sd2[tid] = 0; sdabs2[tid] = 0; } else { sd1[tid] = 0; sdabs1[tid] = 0; sd2[tid] = sdx[tid]; sdabs2[tid] = ::fabs(sdx[tid]); } __syncthreads(); reduce_sum25(sd1, sd2, sdabs1, sdabs2, tid); __syncthreads(); float* descriptors_block = descriptors.ptr(blockIdx.x) + (blockIdx.y << 3); // write dx (dy >= 0), |dx| (dy >= 0), dx (dy < 0), |dx| (dy < 0) if (tid == 0) { descriptors_block[0] = sd1[0]; descriptors_block[1] = sdabs1[0]; descriptors_block[2] = sd2[0]; descriptors_block[3] = sdabs2[0]; } __syncthreads(); if (sdx[tid] >= 0) { sd1[tid] = sdy[tid]; sdabs1[tid] = ::fabs(sdy[tid]); sd2[tid] = 0; sdabs2[tid] = 0; } else { sd1[tid] = 0; sdabs1[tid] = 0; sd2[tid] = sdy[tid]; sdabs2[tid] = ::fabs(sdy[tid]); } __syncthreads(); reduce_sum25(sd1, sd2, sdabs1, sdabs2, tid); __syncthreads(); // write dy (dx >= 0), |dy| (dx >= 0), dy (dx < 0), |dy| (dx < 0) if (tid == 0) { descriptors_block[4] = sd1[0]; descriptors_block[5] = sdabs1[0]; descriptors_block[6] = sd2[0]; descriptors_block[7] = sdabs2[0]; } } } template <int BLOCK_DIM_X> __global__ void normalize_descriptors(PtrStepf descriptors) { // no need for thread ID float* descriptor_base = descriptors.ptr(blockIdx.x); // read in the unnormalized descriptor values (squared) __shared__ float sqDesc[BLOCK_DIM_X]; const float lookup = descriptor_base[threadIdx.x]; sqDesc[threadIdx.x] = lookup * lookup; __syncthreads(); if (BLOCK_DIM_X >= 128) { if (threadIdx.x < 64) sqDesc[threadIdx.x] += sqDesc[threadIdx.x + 64]; __syncthreads(); } // reduction to get total if (threadIdx.x < 32) { volatile float* smem = sqDesc; smem[threadIdx.x] += smem[threadIdx.x + 32]; smem[threadIdx.x] += smem[threadIdx.x + 16]; smem[threadIdx.x] += smem[threadIdx.x + 8]; smem[threadIdx.x] += smem[threadIdx.x + 4]; smem[threadIdx.x] += smem[threadIdx.x + 2]; smem[threadIdx.x] += smem[threadIdx.x + 1]; } // compute length (square root) __shared__ float len; if (threadIdx.x == 0) { len = sqrtf(sqDesc[0]); } __syncthreads(); // normalize and store in output descriptor_base[threadIdx.x] = lookup / len; } void compute_descriptors_gpu(const PtrStepSzf& descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir, int nFeatures) { // compute unnormalized descriptors, then normalize them - odd indexing since grid must be 2D if (descriptors.cols == 64) { hipLaunchKernelGGL(( compute_descriptors64), dim3(dim3(nFeatures, 16, 1)), dim3(dim3(6, 6, 1)), 0, 0, descriptors, featureX, featureY, featureSize, featureDir); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); hipLaunchKernelGGL(( normalize_descriptors<64>), dim3(dim3(nFeatures, 1, 1)), dim3(dim3(64, 1, 1)), 0, 0, descriptors); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); } else { hipLaunchKernelGGL(( compute_descriptors128), dim3(dim3(nFeatures, 16, 1)), dim3(dim3(6, 6, 1)), 0, 0, descriptors, featureX, featureY, featureSize, featureDir); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); hipLaunchKernelGGL(( normalize_descriptors<128>), dim3(dim3(nFeatures, 1, 1)), dim3(dim3(128, 1, 1)), 0, 0, descriptors); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); } } } // namespace surf }}} // namespace cv { namespace gpu { namespace device
baf2e10eadd55a70d2d3fd671b3ab18c918cab2b.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // // Copyright (c) 2010, Paul Furgale, Chi Hay Tong // // The original code was written by Paul Furgale and Chi Hay Tong // and later optimized and prepared for integration into OpenCV by Itseez. // //M*/ #include "internal_shared.hpp" #include "opencv2/gpu/device/limits.hpp" #include "opencv2/gpu/device/saturate_cast.hpp" #include "opencv2/gpu/device/utility.hpp" #include "opencv2/gpu/device/functional.hpp" #include "opencv2/gpu/device/filters.hpp" #include <float.h> namespace cv { namespace gpu { namespace device { namespace surf { //////////////////////////////////////////////////////////////////////// // Global parameters // The maximum number of features (before subpixel interpolation) that memory is reserved for. __constant__ int c_max_candidates; // The maximum number of features that memory is reserved for. __constant__ int c_max_features; // The image size. __constant__ int c_img_rows; __constant__ int c_img_cols; // The number of layers. __constant__ int c_nOctaveLayers; // The hessian threshold. __constant__ float c_hessianThreshold; // The current octave. __constant__ int c_octave; // The current layer size. __constant__ int c_layer_rows; __constant__ int c_layer_cols; void loadGlobalConstants(int maxCandidates, int maxFeatures, int img_rows, int img_cols, int nOctaveLayers, float hessianThreshold) { cudaSafeCall( cudaMemcpyToSymbol(c_max_candidates, &maxCandidates, sizeof(maxCandidates)) ); cudaSafeCall( cudaMemcpyToSymbol(c_max_features, &maxFeatures, sizeof(maxFeatures)) ); cudaSafeCall( cudaMemcpyToSymbol(c_img_rows, &img_rows, sizeof(img_rows)) ); cudaSafeCall( cudaMemcpyToSymbol(c_img_cols, &img_cols, sizeof(img_cols)) ); cudaSafeCall( cudaMemcpyToSymbol(c_nOctaveLayers, &nOctaveLayers, sizeof(nOctaveLayers)) ); cudaSafeCall( cudaMemcpyToSymbol(c_hessianThreshold, &hessianThreshold, sizeof(hessianThreshold)) ); } void loadOctaveConstants(int octave, int layer_rows, int layer_cols) { cudaSafeCall( cudaMemcpyToSymbol(c_octave, &octave, sizeof(octave)) ); cudaSafeCall( cudaMemcpyToSymbol(c_layer_rows, &layer_rows, sizeof(layer_rows)) ); cudaSafeCall( cudaMemcpyToSymbol(c_layer_cols, &layer_cols, sizeof(layer_cols)) ); } //////////////////////////////////////////////////////////////////////// // Integral image texture texture<unsigned char, 2, cudaReadModeElementType> imgTex(0, cudaFilterModePoint, cudaAddressModeClamp); texture<unsigned int, 2, cudaReadModeElementType> sumTex(0, cudaFilterModePoint, cudaAddressModeClamp); texture<unsigned int, 2, cudaReadModeElementType> maskSumTex(0, cudaFilterModePoint, cudaAddressModeClamp); void bindImgTex(PtrStepSzb img) { bindTexture(&imgTex, img); } void bindSumTex(PtrStepSz<uint> sum) { bindTexture(&sumTex, sum); } void bindMaskSumTex(PtrStepSz<uint> maskSum) { bindTexture(&maskSumTex, maskSum); } template <int N> __device__ float icvCalcHaarPatternSum(const float src[][5], int oldSize, int newSize, int y, int x) { #if __CUDA_ARCH__ && __CUDA_ARCH__ >= 200 typedef double real_t; #else typedef float real_t; #endif float ratio = (float)newSize / oldSize; real_t d = 0; #pragma unroll for (int k = 0; k < N; ++k) { int dx1 = __float2int_rn(ratio * src[k][0]); int dy1 = __float2int_rn(ratio * src[k][1]); int dx2 = __float2int_rn(ratio * src[k][2]); int dy2 = __float2int_rn(ratio * src[k][3]); real_t t = 0; t += tex2D(sumTex, x + dx1, y + dy1); t -= tex2D(sumTex, x + dx1, y + dy2); t -= tex2D(sumTex, x + dx2, y + dy1); t += tex2D(sumTex, x + dx2, y + dy2); d += t * src[k][4] / ((dx2 - dx1) * (dy2 - dy1)); } return (float)d; } //////////////////////////////////////////////////////////////////////// // Hessian __constant__ float c_DX [3][5] = { {0, 2, 3, 7, 1}, {3, 2, 6, 7, -2}, {6, 2, 9, 7, 1} }; __constant__ float c_DY [3][5] = { {2, 0, 7, 3, 1}, {2, 3, 7, 6, -2}, {2, 6, 7, 9, 1} }; __constant__ float c_DXY[4][5] = { {1, 1, 4, 4, 1}, {5, 1, 8, 4, -1}, {1, 5, 4, 8, -1}, {5, 5, 8, 8, 1} }; __host__ __device__ __forceinline__ int calcSize(int octave, int layer) { /* Wavelet size at first layer of first octave. */ const int HAAR_SIZE0 = 9; /* Wavelet size increment between layers. This should be an even number, such that the wavelet sizes in an octave are either all even or all odd. This ensures that when looking for the neighbours of a sample, the layers above and below are aligned correctly. */ const int HAAR_SIZE_INC = 6; return (HAAR_SIZE0 + HAAR_SIZE_INC * layer) << octave; } __global__ void icvCalcLayerDetAndTrace(PtrStepf det, PtrStepf trace) { // Determine the indices const int gridDim_y = gridDim.y / (c_nOctaveLayers + 2); const int blockIdx_y = blockIdx.y % gridDim_y; const int blockIdx_z = blockIdx.y / gridDim_y; const int j = threadIdx.x + blockIdx.x * blockDim.x; const int i = threadIdx.y + blockIdx_y * blockDim.y; const int layer = blockIdx_z; const int size = calcSize(c_octave, layer); const int samples_i = 1 + ((c_img_rows - size) >> c_octave); const int samples_j = 1 + ((c_img_cols - size) >> c_octave); // Ignore pixels where some of the kernel is outside the image const int margin = (size >> 1) >> c_octave; if (size <= c_img_rows && size <= c_img_cols && i < samples_i && j < samples_j) { const float dx = icvCalcHaarPatternSum<3>(c_DX , 9, size, i << c_octave, j << c_octave); const float dy = icvCalcHaarPatternSum<3>(c_DY , 9, size, i << c_octave, j << c_octave); const float dxy = icvCalcHaarPatternSum<4>(c_DXY, 9, size, i << c_octave, j << c_octave); det.ptr(layer * c_layer_rows + i + margin)[j + margin] = dx * dy - 0.81f * dxy * dxy; trace.ptr(layer * c_layer_rows + i + margin)[j + margin] = dx + dy; } } void icvCalcLayerDetAndTrace_gpu(const PtrStepf& det, const PtrStepf& trace, int img_rows, int img_cols, int octave, int nOctaveLayers) { const int min_size = calcSize(octave, 0); const int max_samples_i = 1 + ((img_rows - min_size) >> octave); const int max_samples_j = 1 + ((img_cols - min_size) >> octave); dim3 threads(16, 16); dim3 grid; grid.x = divUp(max_samples_j, threads.x); grid.y = divUp(max_samples_i, threads.y) * (nOctaveLayers + 2); icvCalcLayerDetAndTrace<<<grid, threads>>>(det, trace); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); } //////////////////////////////////////////////////////////////////////// // NONMAX __constant__ float c_DM[5] = {0, 0, 9, 9, 1}; struct WithMask { static __device__ bool check(int sum_i, int sum_j, int size) { float ratio = (float)size / 9.0f; float d = 0; int dx1 = __float2int_rn(ratio * c_DM[0]); int dy1 = __float2int_rn(ratio * c_DM[1]); int dx2 = __float2int_rn(ratio * c_DM[2]); int dy2 = __float2int_rn(ratio * c_DM[3]); float t = 0; t += tex2D(maskSumTex, sum_j + dx1, sum_i + dy1); t -= tex2D(maskSumTex, sum_j + dx1, sum_i + dy2); t -= tex2D(maskSumTex, sum_j + dx2, sum_i + dy1); t += tex2D(maskSumTex, sum_j + dx2, sum_i + dy2); d += t * c_DM[4] / ((dx2 - dx1) * (dy2 - dy1)); return (d >= 0.5f); } }; template <typename Mask> __global__ void icvFindMaximaInLayer(const PtrStepf det, const PtrStepf trace, int4* maxPosBuffer, unsigned int* maxCounter) { #if __CUDA_ARCH__ && __CUDA_ARCH__ >= 110 extern __shared__ float N9[]; // The hidx variables are the indices to the hessian buffer. const int gridDim_y = gridDim.y / c_nOctaveLayers; const int blockIdx_y = blockIdx.y % gridDim_y; const int blockIdx_z = blockIdx.y / gridDim_y; const int layer = blockIdx_z + 1; const int size = calcSize(c_octave, layer); // Ignore pixels without a 3x3x3 neighbourhood in the layer above const int margin = ((calcSize(c_octave, layer + 1) >> 1) >> c_octave) + 1; const int j = threadIdx.x + blockIdx.x * (blockDim.x - 2) + margin - 1; const int i = threadIdx.y + blockIdx_y * (blockDim.y - 2) + margin - 1; // Is this thread within the hessian buffer? const int zoff = blockDim.x * blockDim.y; const int localLin = threadIdx.x + threadIdx.y * blockDim.x + zoff; N9[localLin - zoff] = det.ptr(c_layer_rows * (layer - 1) + ::min(::max(i, 0), c_img_rows - 1))[::min(::max(j, 0), c_img_cols - 1)]; N9[localLin ] = det.ptr(c_layer_rows * (layer ) + ::min(::max(i, 0), c_img_rows - 1))[::min(::max(j, 0), c_img_cols - 1)]; N9[localLin + zoff] = det.ptr(c_layer_rows * (layer + 1) + ::min(::max(i, 0), c_img_rows - 1))[::min(::max(j, 0), c_img_cols - 1)]; __syncthreads(); if (i < c_layer_rows - margin && j < c_layer_cols - margin && threadIdx.x > 0 && threadIdx.x < blockDim.x - 1 && threadIdx.y > 0 && threadIdx.y < blockDim.y - 1) { float val0 = N9[localLin]; if (val0 > c_hessianThreshold) { // Coordinates for the start of the wavelet in the sum image. There // is some integer division involved, so don't try to simplify this // (cancel out sampleStep) without checking the result is the same const int sum_i = (i - ((size >> 1) >> c_octave)) << c_octave; const int sum_j = (j - ((size >> 1) >> c_octave)) << c_octave; if (Mask::check(sum_i, sum_j, size)) { // Check to see if we have a max (in its 26 neighbours) const bool condmax = val0 > N9[localLin - 1 - blockDim.x - zoff] && val0 > N9[localLin - blockDim.x - zoff] && val0 > N9[localLin + 1 - blockDim.x - zoff] && val0 > N9[localLin - 1 - zoff] && val0 > N9[localLin - zoff] && val0 > N9[localLin + 1 - zoff] && val0 > N9[localLin - 1 + blockDim.x - zoff] && val0 > N9[localLin + blockDim.x - zoff] && val0 > N9[localLin + 1 + blockDim.x - zoff] && val0 > N9[localLin - 1 - blockDim.x] && val0 > N9[localLin - blockDim.x] && val0 > N9[localLin + 1 - blockDim.x] && val0 > N9[localLin - 1 ] && val0 > N9[localLin + 1 ] && val0 > N9[localLin - 1 + blockDim.x] && val0 > N9[localLin + blockDim.x] && val0 > N9[localLin + 1 + blockDim.x] && val0 > N9[localLin - 1 - blockDim.x + zoff] && val0 > N9[localLin - blockDim.x + zoff] && val0 > N9[localLin + 1 - blockDim.x + zoff] && val0 > N9[localLin - 1 + zoff] && val0 > N9[localLin + zoff] && val0 > N9[localLin + 1 + zoff] && val0 > N9[localLin - 1 + blockDim.x + zoff] && val0 > N9[localLin + blockDim.x + zoff] && val0 > N9[localLin + 1 + blockDim.x + zoff] ; if(condmax) { unsigned int ind = atomicInc(maxCounter,(unsigned int) -1); if (ind < c_max_candidates) { const int laplacian = (int) copysignf(1.0f, trace.ptr(layer * c_layer_rows + i)[j]); maxPosBuffer[ind] = make_int4(j, i, layer, laplacian); } } } } } #endif } void icvFindMaximaInLayer_gpu(const PtrStepf& det, const PtrStepf& trace, int4* maxPosBuffer, unsigned int* maxCounter, int img_rows, int img_cols, int octave, bool use_mask, int nOctaveLayers) { const int layer_rows = img_rows >> octave; const int layer_cols = img_cols >> octave; const int min_margin = ((calcSize(octave, 2) >> 1) >> octave) + 1; dim3 threads(16, 16); dim3 grid; grid.x = divUp(layer_cols - 2 * min_margin, threads.x - 2); grid.y = divUp(layer_rows - 2 * min_margin, threads.y - 2) * nOctaveLayers; const size_t smem_size = threads.x * threads.y * 3 * sizeof(float); if (use_mask) icvFindMaximaInLayer<WithMask><<<grid, threads, smem_size>>>(det, trace, maxPosBuffer, maxCounter); else icvFindMaximaInLayer<WithOutMask><<<grid, threads, smem_size>>>(det, trace, maxPosBuffer, maxCounter); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); } //////////////////////////////////////////////////////////////////////// // INTERPOLATION __global__ void icvInterpolateKeypoint(const PtrStepf det, const int4* maxPosBuffer, float* featureX, float* featureY, int* featureLaplacian, int* featureOctave, float* featureSize, float* featureHessian, unsigned int* featureCounter) { #if __CUDA_ARCH__ && __CUDA_ARCH__ >= 110 const int4 maxPos = maxPosBuffer[blockIdx.x]; const int j = maxPos.x - 1 + threadIdx.x; const int i = maxPos.y - 1 + threadIdx.y; const int layer = maxPos.z - 1 + threadIdx.z; __shared__ float N9[3][3][3]; N9[threadIdx.z][threadIdx.y][threadIdx.x] = det.ptr(c_layer_rows * layer + i)[j]; __syncthreads(); if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0) { __shared__ float dD[3]; //dx dD[0] = -0.5f * (N9[1][1][2] - N9[1][1][0]); //dy dD[1] = -0.5f * (N9[1][2][1] - N9[1][0][1]); //ds dD[2] = -0.5f * (N9[2][1][1] - N9[0][1][1]); __shared__ float H[3][3]; //dxx H[0][0] = N9[1][1][0] - 2.0f * N9[1][1][1] + N9[1][1][2]; //dxy H[0][1]= 0.25f * (N9[1][2][2] - N9[1][2][0] - N9[1][0][2] + N9[1][0][0]); //dxs H[0][2]= 0.25f * (N9[2][1][2] - N9[2][1][0] - N9[0][1][2] + N9[0][1][0]); //dyx = dxy H[1][0] = H[0][1]; //dyy H[1][1] = N9[1][0][1] - 2.0f * N9[1][1][1] + N9[1][2][1]; //dys H[1][2]= 0.25f * (N9[2][2][1] - N9[2][0][1] - N9[0][2][1] + N9[0][0][1]); //dsx = dxs H[2][0] = H[0][2]; //dsy = dys H[2][1] = H[1][2]; //dss H[2][2] = N9[0][1][1] - 2.0f * N9[1][1][1] + N9[2][1][1]; __shared__ float x[3]; if (solve3x3(H, dD, x)) { if (::fabs(x[0]) <= 1.f && ::fabs(x[1]) <= 1.f && ::fabs(x[2]) <= 1.f) { // if the step is within the interpolation region, perform it const int size = calcSize(c_octave, maxPos.z); const int sum_i = (maxPos.y - ((size >> 1) >> c_octave)) << c_octave; const int sum_j = (maxPos.x - ((size >> 1) >> c_octave)) << c_octave; const float center_i = sum_i + (float)(size - 1) / 2; const float center_j = sum_j + (float)(size - 1) / 2; const float px = center_j + x[0] * (1 << c_octave); const float py = center_i + x[1] * (1 << c_octave); const int ds = size - calcSize(c_octave, maxPos.z - 1); const float psize = roundf(size + x[2] * ds); /* The sampling intervals and wavelet sized for selecting an orientation and building the keypoint descriptor are defined relative to 's' */ const float s = psize * 1.2f / 9.0f; /* To find the dominant orientation, the gradients in x and y are sampled in a circle of radius 6s using wavelets of size 4s. We ensure the gradient wavelet size is even to ensure the wavelet pattern is balanced and symmetric around its center */ const int grad_wav_size = 2 * __float2int_rn(2.0f * s); // check when grad_wav_size is too big if ((c_img_rows + 1) >= grad_wav_size && (c_img_cols + 1) >= grad_wav_size) { // Get a new feature index. unsigned int ind = atomicInc(featureCounter, (unsigned int)-1); if (ind < c_max_features) { featureX[ind] = px; featureY[ind] = py; featureLaplacian[ind] = maxPos.w; featureOctave[ind] = c_octave; featureSize[ind] = psize; featureHessian[ind] = N9[1][1][1]; } } // grad_wav_size check } // If the subpixel interpolation worked } } // If this is thread 0. #endif } void icvInterpolateKeypoint_gpu(const PtrStepf& det, const int4* maxPosBuffer, unsigned int maxCounter, float* featureX, float* featureY, int* featureLaplacian, int* featureOctave, float* featureSize, float* featureHessian, unsigned int* featureCounter) { dim3 threads; threads.x = 3; threads.y = 3; threads.z = 3; dim3 grid; grid.x = maxCounter; icvInterpolateKeypoint<<<grid, threads>>>(det, maxPosBuffer, featureX, featureY, featureLaplacian, featureOctave, featureSize, featureHessian, featureCounter); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); } //////////////////////////////////////////////////////////////////////// // Orientation #define ORI_SEARCH_INC 5 #define ORI_WIN 60 #define ORI_SAMPLES 113 __constant__ float c_aptX[ORI_SAMPLES] = {-6, -5, -5, -5, -5, -5, -5, -5, -4, -4, -4, -4, -4, -4, -4, -4, -4, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 6}; __constant__ float c_aptY[ORI_SAMPLES] = {0, -3, -2, -1, 0, 1, 2, 3, -4, -3, -2, -1, 0, 1, 2, 3, 4, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -4, -3, -2, -1, 0, 1, 2, 3, 4, -3, -2, -1, 0, 1, 2, 3, 0}; __constant__ float c_aptW[ORI_SAMPLES] = {0.001455130288377404f, 0.001707611023448408f, 0.002547456417232752f, 0.003238451667129993f, 0.0035081731621176f, 0.003238451667129993f, 0.002547456417232752f, 0.001707611023448408f, 0.002003900473937392f, 0.0035081731621176f, 0.005233579315245152f, 0.00665318313986063f, 0.00720730796456337f, 0.00665318313986063f, 0.005233579315245152f, 0.0035081731621176f, 0.002003900473937392f, 0.001707611023448408f, 0.0035081731621176f, 0.006141661666333675f, 0.009162282571196556f, 0.01164754293859005f, 0.01261763460934162f, 0.01164754293859005f, 0.009162282571196556f, 0.006141661666333675f, 0.0035081731621176f, 0.001707611023448408f, 0.002547456417232752f, 0.005233579315245152f, 0.009162282571196556f, 0.01366852037608624f, 0.01737609319388866f, 0.0188232995569706f, 0.01737609319388866f, 0.01366852037608624f, 0.009162282571196556f, 0.005233579315245152f, 0.002547456417232752f, 0.003238451667129993f, 0.00665318313986063f, 0.01164754293859005f, 0.01737609319388866f, 0.02208934165537357f, 0.02392910048365593f, 0.02208934165537357f, 0.01737609319388866f, 0.01164754293859005f, 0.00665318313986063f, 0.003238451667129993f, 0.001455130288377404f, 0.0035081731621176f, 0.00720730796456337f, 0.01261763460934162f, 0.0188232995569706f, 0.02392910048365593f, 0.02592208795249462f, 0.02392910048365593f, 0.0188232995569706f, 0.01261763460934162f, 0.00720730796456337f, 0.0035081731621176f, 0.001455130288377404f, 0.003238451667129993f, 0.00665318313986063f, 0.01164754293859005f, 0.01737609319388866f, 0.02208934165537357f, 0.02392910048365593f, 0.02208934165537357f, 0.01737609319388866f, 0.01164754293859005f, 0.00665318313986063f, 0.003238451667129993f, 0.002547456417232752f, 0.005233579315245152f, 0.009162282571196556f, 0.01366852037608624f, 0.01737609319388866f, 0.0188232995569706f, 0.01737609319388866f, 0.01366852037608624f, 0.009162282571196556f, 0.005233579315245152f, 0.002547456417232752f, 0.001707611023448408f, 0.0035081731621176f, 0.006141661666333675f, 0.009162282571196556f, 0.01164754293859005f, 0.01261763460934162f, 0.01164754293859005f, 0.009162282571196556f, 0.006141661666333675f, 0.0035081731621176f, 0.001707611023448408f, 0.002003900473937392f, 0.0035081731621176f, 0.005233579315245152f, 0.00665318313986063f, 0.00720730796456337f, 0.00665318313986063f, 0.005233579315245152f, 0.0035081731621176f, 0.002003900473937392f, 0.001707611023448408f, 0.002547456417232752f, 0.003238451667129993f, 0.0035081731621176f, 0.003238451667129993f, 0.002547456417232752f, 0.001707611023448408f, 0.001455130288377404f}; __constant__ float c_NX[2][5] = {{0, 0, 2, 4, -1}, {2, 0, 4, 4, 1}}; __constant__ float c_NY[2][5] = {{0, 0, 4, 2, 1}, {0, 2, 4, 4, -1}}; __global__ void icvCalcOrientation(const float* featureX, const float* featureY, const float* featureSize, float* featureDir) { __shared__ float s_X[128]; __shared__ float s_Y[128]; __shared__ float s_angle[128]; __shared__ float s_sumx[32 * 4]; __shared__ float s_sumy[32 * 4]; /* The sampling intervals and wavelet sized for selecting an orientation and building the keypoint descriptor are defined relative to 's' */ const float s = featureSize[blockIdx.x] * 1.2f / 9.0f; /* To find the dominant orientation, the gradients in x and y are sampled in a circle of radius 6s using wavelets of size 4s. We ensure the gradient wavelet size is even to ensure the wavelet pattern is balanced and symmetric around its center */ const int grad_wav_size = 2 * __float2int_rn(2.0f * s); // check when grad_wav_size is too big if ((c_img_rows + 1) < grad_wav_size || (c_img_cols + 1) < grad_wav_size) return; // Calc X, Y, angle and store it to shared memory const int tid = threadIdx.y * blockDim.x + threadIdx.x; float X = 0.0f, Y = 0.0f, angle = 0.0f; if (tid < ORI_SAMPLES) { const float margin = (float)(grad_wav_size - 1) / 2.0f; const int x = __float2int_rn(featureX[blockIdx.x] + c_aptX[tid] * s - margin); const int y = __float2int_rn(featureY[blockIdx.x] + c_aptY[tid] * s - margin); if (y >= 0 && y < (c_img_rows + 1) - grad_wav_size && x >= 0 && x < (c_img_cols + 1) - grad_wav_size) { X = c_aptW[tid] * icvCalcHaarPatternSum<2>(c_NX, 4, grad_wav_size, y, x); Y = c_aptW[tid] * icvCalcHaarPatternSum<2>(c_NY, 4, grad_wav_size, y, x); angle = atan2f(Y, X); if (angle < 0) angle += 2.0f * CV_PI_F; angle *= 180.0f / CV_PI_F; } } s_X[tid] = X; s_Y[tid] = Y; s_angle[tid] = angle; __syncthreads(); float bestx = 0, besty = 0, best_mod = 0; #pragma unroll for (int i = 0; i < 18; ++i) { const int dir = (i * 4 + threadIdx.y) * ORI_SEARCH_INC; float sumx = 0.0f, sumy = 0.0f; int d = ::abs(__float2int_rn(s_angle[threadIdx.x]) - dir); if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2) { sumx = s_X[threadIdx.x]; sumy = s_Y[threadIdx.x]; } d = ::abs(__float2int_rn(s_angle[threadIdx.x + 32]) - dir); if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2) { sumx += s_X[threadIdx.x + 32]; sumy += s_Y[threadIdx.x + 32]; } d = ::abs(__float2int_rn(s_angle[threadIdx.x + 64]) - dir); if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2) { sumx += s_X[threadIdx.x + 64]; sumy += s_Y[threadIdx.x + 64]; } d = ::abs(__float2int_rn(s_angle[threadIdx.x + 96]) - dir); if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2) { sumx += s_X[threadIdx.x + 96]; sumy += s_Y[threadIdx.x + 96]; } device::reduce<32>(s_sumx + threadIdx.y * 32, sumx, threadIdx.x, plus<volatile float>()); device::reduce<32>(s_sumy + threadIdx.y * 32, sumy, threadIdx.x, plus<volatile float>()); const float temp_mod = sumx * sumx + sumy * sumy; if (temp_mod > best_mod) { best_mod = temp_mod; bestx = sumx; besty = sumy; } __syncthreads(); } if (threadIdx.x == 0) { s_X[threadIdx.y] = bestx; s_Y[threadIdx.y] = besty; s_angle[threadIdx.y] = best_mod; } __syncthreads(); if (threadIdx.x == 0 && threadIdx.y == 0) { int bestIdx = 0; if (s_angle[1] > s_angle[bestIdx]) bestIdx = 1; if (s_angle[2] > s_angle[bestIdx]) bestIdx = 2; if (s_angle[3] > s_angle[bestIdx]) bestIdx = 3; float kp_dir = atan2f(s_Y[bestIdx], s_X[bestIdx]); if (kp_dir < 0) kp_dir += 2.0f * CV_PI_F; kp_dir *= 180.0f / CV_PI_F; kp_dir = 360.0f - kp_dir; if (abs(kp_dir - 360.f) < FLT_EPSILON) kp_dir = 0.f; featureDir[blockIdx.x] = kp_dir; } } #undef ORI_SEARCH_INC #undef ORI_WIN #undef ORI_SAMPLES void icvCalcOrientation_gpu(const float* featureX, const float* featureY, const float* featureSize, float* featureDir, int nFeatures) { dim3 threads; threads.x = 32; threads.y = 4; dim3 grid; grid.x = nFeatures; icvCalcOrientation<<<grid, threads>>>(featureX, featureY, featureSize, featureDir); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); } //////////////////////////////////////////////////////////////////////// // Descriptors #define PATCH_SZ 20 __constant__ float c_DW[PATCH_SZ * PATCH_SZ] = { 3.695352233989979e-006f, 8.444558261544444e-006f, 1.760426494001877e-005f, 3.34794785885606e-005f, 5.808438800158911e-005f, 9.193058212986216e-005f, 0.0001327334757661447f, 0.0001748319627949968f, 0.0002100782439811155f, 0.0002302826324012131f, 0.0002302826324012131f, 0.0002100782439811155f, 0.0001748319627949968f, 0.0001327334757661447f, 9.193058212986216e-005f, 5.808438800158911e-005f, 3.34794785885606e-005f, 1.760426494001877e-005f, 8.444558261544444e-006f, 3.695352233989979e-006f, 8.444558261544444e-006f, 1.929736572492402e-005f, 4.022897701361217e-005f, 7.650675252079964e-005f, 0.0001327334903180599f, 0.0002100782585330308f, 0.0003033203829545528f, 0.0003995231236331165f, 0.0004800673632416874f, 0.0005262381164357066f, 0.0005262381164357066f, 0.0004800673632416874f, 0.0003995231236331165f, 0.0003033203829545528f, 0.0002100782585330308f, 0.0001327334903180599f, 7.650675252079964e-005f, 4.022897701361217e-005f, 1.929736572492402e-005f, 8.444558261544444e-006f, 1.760426494001877e-005f, 4.022897701361217e-005f, 8.386484114453197e-005f, 0.0001594926579855382f, 0.0002767078403849155f, 0.0004379475140012801f, 0.0006323281559161842f, 0.0008328808471560478f, 0.001000790391117334f, 0.001097041997127235f, 0.001097041997127235f, 0.001000790391117334f, 0.0008328808471560478f, 0.0006323281559161842f, 0.0004379475140012801f, 0.0002767078403849155f, 0.0001594926579855382f, 8.386484114453197e-005f, 4.022897701361217e-005f, 1.760426494001877e-005f, 3.34794785885606e-005f, 7.650675252079964e-005f, 0.0001594926579855382f, 0.0003033203247468919f, 0.0005262380582280457f, 0.0008328807889483869f, 0.001202550483867526f, 0.001583957928232849f, 0.001903285388834775f, 0.002086334861814976f, 0.002086334861814976f, 0.001903285388834775f, 0.001583957928232849f, 0.001202550483867526f, 0.0008328807889483869f, 0.0005262380582280457f, 0.0003033203247468919f, 0.0001594926579855382f, 7.650675252079964e-005f, 3.34794785885606e-005f, 5.808438800158911e-005f, 0.0001327334903180599f, 0.0002767078403849155f, 0.0005262380582280457f, 0.0009129836107604206f, 0.001444985857233405f, 0.002086335094645619f, 0.002748048631474376f, 0.00330205773934722f, 0.003619635012000799f, 0.003619635012000799f, 0.00330205773934722f, 0.002748048631474376f, 0.002086335094645619f, 0.001444985857233405f, 0.0009129836107604206f, 0.0005262380582280457f, 0.0002767078403849155f, 0.0001327334903180599f, 5.808438800158911e-005f, 9.193058212986216e-005f, 0.0002100782585330308f, 0.0004379475140012801f, 0.0008328807889483869f, 0.001444985857233405f, 0.002286989474669099f, 0.00330205773934722f, 0.004349356517195702f, 0.00522619066759944f, 0.005728822201490402f, 0.005728822201490402f, 0.00522619066759944f, 0.004349356517195702f, 0.00330205773934722f, 0.002286989474669099f, 0.001444985857233405f, 0.0008328807889483869f, 0.0004379475140012801f, 0.0002100782585330308f, 9.193058212986216e-005f, 0.0001327334757661447f, 0.0003033203829545528f, 0.0006323281559161842f, 0.001202550483867526f, 0.002086335094645619f, 0.00330205773934722f, 0.004767658654600382f, 0.006279794964939356f, 0.007545807864516974f, 0.008271530270576477f, 0.008271530270576477f, 0.007545807864516974f, 0.006279794964939356f, 0.004767658654600382f, 0.00330205773934722f, 0.002086335094645619f, 0.001202550483867526f, 0.0006323281559161842f, 0.0003033203829545528f, 0.0001327334757661447f, 0.0001748319627949968f, 0.0003995231236331165f, 0.0008328808471560478f, 0.001583957928232849f, 0.002748048631474376f, 0.004349356517195702f, 0.006279794964939356f, 0.008271529339253902f, 0.009939077310264111f, 0.01089497376233339f, 0.01089497376233339f, 0.009939077310264111f, 0.008271529339253902f, 0.006279794964939356f, 0.004349356517195702f, 0.002748048631474376f, 0.001583957928232849f, 0.0008328808471560478f, 0.0003995231236331165f, 0.0001748319627949968f, 0.0002100782439811155f, 0.0004800673632416874f, 0.001000790391117334f, 0.001903285388834775f, 0.00330205773934722f, 0.00522619066759944f, 0.007545807864516974f, 0.009939077310264111f, 0.01194280479103327f, 0.01309141051024199f, 0.01309141051024199f, 0.01194280479103327f, 0.009939077310264111f, 0.007545807864516974f, 0.00522619066759944f, 0.00330205773934722f, 0.001903285388834775f, 0.001000790391117334f, 0.0004800673632416874f, 0.0002100782439811155f, 0.0002302826324012131f, 0.0005262381164357066f, 0.001097041997127235f, 0.002086334861814976f, 0.003619635012000799f, 0.005728822201490402f, 0.008271530270576477f, 0.01089497376233339f, 0.01309141051024199f, 0.01435048412531614f, 0.01435048412531614f, 0.01309141051024199f, 0.01089497376233339f, 0.008271530270576477f, 0.005728822201490402f, 0.003619635012000799f, 0.002086334861814976f, 0.001097041997127235f, 0.0005262381164357066f, 0.0002302826324012131f, 0.0002302826324012131f, 0.0005262381164357066f, 0.001097041997127235f, 0.002086334861814976f, 0.003619635012000799f, 0.005728822201490402f, 0.008271530270576477f, 0.01089497376233339f, 0.01309141051024199f, 0.01435048412531614f, 0.01435048412531614f, 0.01309141051024199f, 0.01089497376233339f, 0.008271530270576477f, 0.005728822201490402f, 0.003619635012000799f, 0.002086334861814976f, 0.001097041997127235f, 0.0005262381164357066f, 0.0002302826324012131f, 0.0002100782439811155f, 0.0004800673632416874f, 0.001000790391117334f, 0.001903285388834775f, 0.00330205773934722f, 0.00522619066759944f, 0.007545807864516974f, 0.009939077310264111f, 0.01194280479103327f, 0.01309141051024199f, 0.01309141051024199f, 0.01194280479103327f, 0.009939077310264111f, 0.007545807864516974f, 0.00522619066759944f, 0.00330205773934722f, 0.001903285388834775f, 0.001000790391117334f, 0.0004800673632416874f, 0.0002100782439811155f, 0.0001748319627949968f, 0.0003995231236331165f, 0.0008328808471560478f, 0.001583957928232849f, 0.002748048631474376f, 0.004349356517195702f, 0.006279794964939356f, 0.008271529339253902f, 0.009939077310264111f, 0.01089497376233339f, 0.01089497376233339f, 0.009939077310264111f, 0.008271529339253902f, 0.006279794964939356f, 0.004349356517195702f, 0.002748048631474376f, 0.001583957928232849f, 0.0008328808471560478f, 0.0003995231236331165f, 0.0001748319627949968f, 0.0001327334757661447f, 0.0003033203829545528f, 0.0006323281559161842f, 0.001202550483867526f, 0.002086335094645619f, 0.00330205773934722f, 0.004767658654600382f, 0.006279794964939356f, 0.007545807864516974f, 0.008271530270576477f, 0.008271530270576477f, 0.007545807864516974f, 0.006279794964939356f, 0.004767658654600382f, 0.00330205773934722f, 0.002086335094645619f, 0.001202550483867526f, 0.0006323281559161842f, 0.0003033203829545528f, 0.0001327334757661447f, 9.193058212986216e-005f, 0.0002100782585330308f, 0.0004379475140012801f, 0.0008328807889483869f, 0.001444985857233405f, 0.002286989474669099f, 0.00330205773934722f, 0.004349356517195702f, 0.00522619066759944f, 0.005728822201490402f, 0.005728822201490402f, 0.00522619066759944f, 0.004349356517195702f, 0.00330205773934722f, 0.002286989474669099f, 0.001444985857233405f, 0.0008328807889483869f, 0.0004379475140012801f, 0.0002100782585330308f, 9.193058212986216e-005f, 5.808438800158911e-005f, 0.0001327334903180599f, 0.0002767078403849155f, 0.0005262380582280457f, 0.0009129836107604206f, 0.001444985857233405f, 0.002086335094645619f, 0.002748048631474376f, 0.00330205773934722f, 0.003619635012000799f, 0.003619635012000799f, 0.00330205773934722f, 0.002748048631474376f, 0.002086335094645619f, 0.001444985857233405f, 0.0009129836107604206f, 0.0005262380582280457f, 0.0002767078403849155f, 0.0001327334903180599f, 5.808438800158911e-005f, 3.34794785885606e-005f, 7.650675252079964e-005f, 0.0001594926579855382f, 0.0003033203247468919f, 0.0005262380582280457f, 0.0008328807889483869f, 0.001202550483867526f, 0.001583957928232849f, 0.001903285388834775f, 0.002086334861814976f, 0.002086334861814976f, 0.001903285388834775f, 0.001583957928232849f, 0.001202550483867526f, 0.0008328807889483869f, 0.0005262380582280457f, 0.0003033203247468919f, 0.0001594926579855382f, 7.650675252079964e-005f, 3.34794785885606e-005f, 1.760426494001877e-005f, 4.022897701361217e-005f, 8.386484114453197e-005f, 0.0001594926579855382f, 0.0002767078403849155f, 0.0004379475140012801f, 0.0006323281559161842f, 0.0008328808471560478f, 0.001000790391117334f, 0.001097041997127235f, 0.001097041997127235f, 0.001000790391117334f, 0.0008328808471560478f, 0.0006323281559161842f, 0.0004379475140012801f, 0.0002767078403849155f, 0.0001594926579855382f, 8.386484114453197e-005f, 4.022897701361217e-005f, 1.760426494001877e-005f, 8.444558261544444e-006f, 1.929736572492402e-005f, 4.022897701361217e-005f, 7.650675252079964e-005f, 0.0001327334903180599f, 0.0002100782585330308f, 0.0003033203829545528f, 0.0003995231236331165f, 0.0004800673632416874f, 0.0005262381164357066f, 0.0005262381164357066f, 0.0004800673632416874f, 0.0003995231236331165f, 0.0003033203829545528f, 0.0002100782585330308f, 0.0001327334903180599f, 7.650675252079964e-005f, 4.022897701361217e-005f, 1.929736572492402e-005f, 8.444558261544444e-006f, 3.695352233989979e-006f, 8.444558261544444e-006f, 1.760426494001877e-005f, 3.34794785885606e-005f, 5.808438800158911e-005f, 9.193058212986216e-005f, 0.0001327334757661447f, 0.0001748319627949968f, 0.0002100782439811155f, 0.0002302826324012131f, 0.0002302826324012131f, 0.0002100782439811155f, 0.0001748319627949968f, 0.0001327334757661447f, 9.193058212986216e-005f, 5.808438800158911e-005f, 3.34794785885606e-005f, 1.760426494001877e-005f, 8.444558261544444e-006f, 3.695352233989979e-006f }; struct WinReader { typedef uchar elem_type; __device__ __forceinline__ WinReader(float centerX_, float centerY_, float win_offset_, float cos_dir_, float sin_dir_) : centerX(centerX_), centerY(centerY_), win_offset(win_offset_), cos_dir(cos_dir_), sin_dir(sin_dir_) { } __device__ __forceinline__ uchar operator ()(int i, int j) const { float pixel_x = centerX + (win_offset + j) * cos_dir + (win_offset + i) * sin_dir; float pixel_y = centerY - (win_offset + j) * sin_dir + (win_offset + i) * cos_dir; return tex2D(imgTex, pixel_x, pixel_y); } float centerX; float centerY; float win_offset; float cos_dir; float sin_dir; }; __device__ void calc_dx_dy(float s_dx_bin[25], float s_dy_bin[25], const float* featureX, const float* featureY, const float* featureSize, const float* featureDir) { __shared__ float s_PATCH[6][6]; const float centerX = featureX[blockIdx.x]; const float centerY = featureY[blockIdx.x]; const float size = featureSize[blockIdx.x]; float descriptor_dir = 360.0f - featureDir[blockIdx.x]; if (std::abs(descriptor_dir - 360.f) < FLT_EPSILON) descriptor_dir = 0.f; descriptor_dir *= (float)(CV_PI_F / 180.0f); /* The sampling intervals and wavelet sized for selecting an orientation and building the keypoint descriptor are defined relative to 's' */ const float s = size * 1.2f / 9.0f; /* Extract a window of pixels around the keypoint of size 20s */ const int win_size = (int)((PATCH_SZ + 1) * s); float sin_dir; float cos_dir; sincosf(descriptor_dir, &sin_dir, &cos_dir); /* Nearest neighbour version (faster) */ const float win_offset = -(float)(win_size - 1) / 2; // Compute sampling points // since grids are 2D, need to compute xBlock and yBlock indices const int xBlock = (blockIdx.y & 3); // blockIdx.y % 4 const int yBlock = (blockIdx.y >> 2); // floor(blockIdx.y/4) const int xIndex = xBlock * 5 + threadIdx.x; const int yIndex = yBlock * 5 + threadIdx.y; const float icoo = ((float)yIndex / (PATCH_SZ + 1)) * win_size; const float jcoo = ((float)xIndex / (PATCH_SZ + 1)) * win_size; LinearFilter<WinReader> filter(WinReader(centerX, centerY, win_offset, cos_dir, sin_dir)); s_PATCH[threadIdx.y][threadIdx.x] = filter(icoo, jcoo); __syncthreads(); if (threadIdx.x < 5 && threadIdx.y < 5) { const int tid = threadIdx.y * 5 + threadIdx.x; const float dw = c_DW[yIndex * PATCH_SZ + xIndex]; const float vx = (s_PATCH[threadIdx.y ][threadIdx.x + 1] - s_PATCH[threadIdx.y][threadIdx.x] + s_PATCH[threadIdx.y + 1][threadIdx.x + 1] - s_PATCH[threadIdx.y + 1][threadIdx.x ]) * dw; const float vy = (s_PATCH[threadIdx.y + 1][threadIdx.x ] - s_PATCH[threadIdx.y][threadIdx.x] + s_PATCH[threadIdx.y + 1][threadIdx.x + 1] - s_PATCH[threadIdx.y ][threadIdx.x + 1]) * dw; s_dx_bin[tid] = vx; s_dy_bin[tid] = vy; } } __device__ void reduce_sum25(volatile float* sdata1, volatile float* sdata2, volatile float* sdata3, volatile float* sdata4, int tid) { // first step is to reduce from 25 to 16 if (tid < 9) // use 9 threads { sdata1[tid] += sdata1[tid + 16]; sdata2[tid] += sdata2[tid + 16]; sdata3[tid] += sdata3[tid + 16]; sdata4[tid] += sdata4[tid + 16]; } // sum (reduce) from 16 to 1 (unrolled - aligned to a half-warp) if (tid < 8) { sdata1[tid] += sdata1[tid + 8]; sdata1[tid] += sdata1[tid + 4]; sdata1[tid] += sdata1[tid + 2]; sdata1[tid] += sdata1[tid + 1]; sdata2[tid] += sdata2[tid + 8]; sdata2[tid] += sdata2[tid + 4]; sdata2[tid] += sdata2[tid + 2]; sdata2[tid] += sdata2[tid + 1]; sdata3[tid] += sdata3[tid + 8]; sdata3[tid] += sdata3[tid + 4]; sdata3[tid] += sdata3[tid + 2]; sdata3[tid] += sdata3[tid + 1]; sdata4[tid] += sdata4[tid + 8]; sdata4[tid] += sdata4[tid + 4]; sdata4[tid] += sdata4[tid + 2]; sdata4[tid] += sdata4[tid + 1]; } } __global__ void compute_descriptors64(PtrStepf descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir) { // 2 floats (dx,dy) for each thread (5x5 sample points in each sub-region) __shared__ float sdx[25]; __shared__ float sdy[25]; __shared__ float sdxabs[25]; __shared__ float sdyabs[25]; calc_dx_dy(sdx, sdy, featureX, featureY, featureSize, featureDir); __syncthreads(); const int tid = threadIdx.y * blockDim.x + threadIdx.x; if (tid < 25) { sdxabs[tid] = ::fabs(sdx[tid]); // |dx| array sdyabs[tid] = ::fabs(sdy[tid]); // |dy| array __syncthreads(); reduce_sum25(sdx, sdy, sdxabs, sdyabs, tid); __syncthreads(); float* descriptors_block = descriptors.ptr(blockIdx.x) + (blockIdx.y << 2); // write dx, dy, |dx|, |dy| if (tid == 0) { descriptors_block[0] = sdx[0]; descriptors_block[1] = sdy[0]; descriptors_block[2] = sdxabs[0]; descriptors_block[3] = sdyabs[0]; } } } __global__ void compute_descriptors128(PtrStepf descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir) { // 2 floats (dx,dy) for each thread (5x5 sample points in each sub-region) __shared__ float sdx[25]; __shared__ float sdy[25]; // sum (reduce) 5x5 area response __shared__ float sd1[25]; __shared__ float sd2[25]; __shared__ float sdabs1[25]; __shared__ float sdabs2[25]; calc_dx_dy(sdx, sdy, featureX, featureY, featureSize, featureDir); __syncthreads(); const int tid = threadIdx.y * blockDim.x + threadIdx.x; if (tid < 25) { if (sdy[tid] >= 0) { sd1[tid] = sdx[tid]; sdabs1[tid] = ::fabs(sdx[tid]); sd2[tid] = 0; sdabs2[tid] = 0; } else { sd1[tid] = 0; sdabs1[tid] = 0; sd2[tid] = sdx[tid]; sdabs2[tid] = ::fabs(sdx[tid]); } __syncthreads(); reduce_sum25(sd1, sd2, sdabs1, sdabs2, tid); __syncthreads(); float* descriptors_block = descriptors.ptr(blockIdx.x) + (blockIdx.y << 3); // write dx (dy >= 0), |dx| (dy >= 0), dx (dy < 0), |dx| (dy < 0) if (tid == 0) { descriptors_block[0] = sd1[0]; descriptors_block[1] = sdabs1[0]; descriptors_block[2] = sd2[0]; descriptors_block[3] = sdabs2[0]; } __syncthreads(); if (sdx[tid] >= 0) { sd1[tid] = sdy[tid]; sdabs1[tid] = ::fabs(sdy[tid]); sd2[tid] = 0; sdabs2[tid] = 0; } else { sd1[tid] = 0; sdabs1[tid] = 0; sd2[tid] = sdy[tid]; sdabs2[tid] = ::fabs(sdy[tid]); } __syncthreads(); reduce_sum25(sd1, sd2, sdabs1, sdabs2, tid); __syncthreads(); // write dy (dx >= 0), |dy| (dx >= 0), dy (dx < 0), |dy| (dx < 0) if (tid == 0) { descriptors_block[4] = sd1[0]; descriptors_block[5] = sdabs1[0]; descriptors_block[6] = sd2[0]; descriptors_block[7] = sdabs2[0]; } } } template <int BLOCK_DIM_X> __global__ void normalize_descriptors(PtrStepf descriptors) { // no need for thread ID float* descriptor_base = descriptors.ptr(blockIdx.x); // read in the unnormalized descriptor values (squared) __shared__ float sqDesc[BLOCK_DIM_X]; const float lookup = descriptor_base[threadIdx.x]; sqDesc[threadIdx.x] = lookup * lookup; __syncthreads(); if (BLOCK_DIM_X >= 128) { if (threadIdx.x < 64) sqDesc[threadIdx.x] += sqDesc[threadIdx.x + 64]; __syncthreads(); } // reduction to get total if (threadIdx.x < 32) { volatile float* smem = sqDesc; smem[threadIdx.x] += smem[threadIdx.x + 32]; smem[threadIdx.x] += smem[threadIdx.x + 16]; smem[threadIdx.x] += smem[threadIdx.x + 8]; smem[threadIdx.x] += smem[threadIdx.x + 4]; smem[threadIdx.x] += smem[threadIdx.x + 2]; smem[threadIdx.x] += smem[threadIdx.x + 1]; } // compute length (square root) __shared__ float len; if (threadIdx.x == 0) { len = sqrtf(sqDesc[0]); } __syncthreads(); // normalize and store in output descriptor_base[threadIdx.x] = lookup / len; } void compute_descriptors_gpu(const PtrStepSzf& descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir, int nFeatures) { // compute unnormalized descriptors, then normalize them - odd indexing since grid must be 2D if (descriptors.cols == 64) { compute_descriptors64<<<dim3(nFeatures, 16, 1), dim3(6, 6, 1)>>>(descriptors, featureX, featureY, featureSize, featureDir); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); normalize_descriptors<64><<<dim3(nFeatures, 1, 1), dim3(64, 1, 1)>>>(descriptors); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); } else { compute_descriptors128<<<dim3(nFeatures, 16, 1), dim3(6, 6, 1)>>>(descriptors, featureX, featureY, featureSize, featureDir); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); normalize_descriptors<128><<<dim3(nFeatures, 1, 1), dim3(128, 1, 1)>>>(descriptors); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); } } } // namespace surf }}} // namespace cv { namespace gpu { namespace device
db716e25baee96678654bbd84de336fff7c4a8b0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Date: 15-12-2016 Author: Omer Anjum Description: RK integration 55-Point Comments: Omer Anjum: Changed the 19-point RK integration Kernel to 55-Point integration Kernel without changing the requirements of shared memory and simultaneously reducing the global memory traffic. The technique applied to achieve this is "scattering". Sep 09, 2017: Fixing many error */ #define EXTERN extern #include "dconsts.cuh" #include "../cparam_c.h" #include "smem.cuh" #include "hydro.cuh" #include "continuity.cuh" #include "forcing.cuh" #include "shear.cuh" #include "diff.cuh" //DEBUG #include "diagnostics.cuh" /* * Notes: * -diff functions are defined here, so that * these __device__ functions can be optimized * by the compiler when compiling rungekutta_steps. * This results in a very large speedup with the cost * of larger source files. * * -__launch_bounds__(maximum threads per block, minimum number of blocks we want to multithread on SMs) * tells the compiler how many registers we want to use: the compiler calculates the maximum amount of * registers it can use in order not to hit the register cap when we want to have certain amount of * thread blocks running on the SM. F.ex. max number of registers per SM is 65536 and we have 128-sized * thread blocks and want to multithread 8 blocks => max registers per thread = 65536 / (128*8) = 64 * * -restrict keyword tells the compiler that only one pointer is used to reference a certain value. * This enables the compiler to optimize some memory fetches to read-only cache and registers because * restrict keyword tells that the value temporarily stored to faster memory is always up-to-date and * is only modified with that specific pointer. * * -sid_column maps to threadIdx.x and sid_row maps to threadIdx.y. This is done because c++ arrays * are row-major and nearby threads access a contiguous memory area (when computing der_scalx). * e.g. the shared memory block is arranged like s_scal[Y-direction][X-direction] where X and Y * go to the same direction as X and Y in the device grids (d_lnrho etc.) * * */ //------------------------------------------------------------------------------------------------------ // // Derivative operators, 1st order // __device__ float der_scalx( int sid_row, int sid_column, float s_scal[SHARED_SIZE_ROW][SHARED_SIZE_COL]) { // // Single derivative in x-direction // float res ; res = ( - s_scal[sid_row][sid_column-3] + d_FLT_9 * s_scal[sid_row][sid_column-2] - d_FLT_45 * s_scal[sid_row][sid_column-1] + d_FLT_45 * s_scal[sid_row][sid_column+1] - d_FLT_9 * s_scal[sid_row][sid_column+2] + s_scal[sid_row][sid_column+3] ) * d_DIFF1_DX_DIV; // / ( d_FLT_60*d_DX ); return res; } __device__ float der_scaly( int sid_row, int sid_column, float s_scal[SHARED_SIZE_ROW][SHARED_SIZE_COL]) { // // Single derivative in y-direction // float res ; res = ( - s_scal[sid_row-3][sid_column] + d_FLT_9 * s_scal[sid_row-2][sid_column] - d_FLT_45 * s_scal[sid_row-1][sid_column] + d_FLT_45 * s_scal[sid_row+1][sid_column] - d_FLT_9 * s_scal[sid_row+2][sid_column] + s_scal[sid_row+3][sid_column] ) * d_DIFF1_DY_DIV; // / ( d_FLT_60*d_DY ); //MV: Made these divisions to go away. -> need only be calculated once and used as a constant. return res; } __device__ float der_scalz( float behind3, float behind2, float behind1, float infront1, float infront2, float infront3) { // // Single derivative in z-direction // float res ; res = ( - behind3 + d_FLT_9 * behind2 - d_FLT_45 * behind1 + d_FLT_45 * infront1 - d_FLT_9 * infront2 + infront3 ) * d_DIFF1_DZ_DIV; // / ( d_FLT_60*d_DZ ); return res; } //------------------------------------------------------------------------------------------------------ //------------------------------------------------------------------------------------------------------ // // Derivative operators, 2nd order // __device__ float der2_scalx(int sid_row, int sid_column, float s_scal[SHARED_SIZE_ROW][SHARED_SIZE_COL]) { // // Double derivative in x-direction // float res; res = ( d_FLT_2 * s_scal[sid_row][sid_column-3] - d_FLT_27 * s_scal[sid_row][sid_column-2] + d_FLT_270 * s_scal[sid_row][sid_column-1] - d_FLT_490 * s_scal[sid_row][sid_column ] + d_FLT_270 * s_scal[sid_row][sid_column+1] - d_FLT_27 * s_scal[sid_row][sid_column+2] + d_FLT_2 * s_scal[sid_row][sid_column+3] ) * d_DIFF2_DX_DIV; // / ( d_FLT_180*d_DX*d_DX ); return res; } __device__ float der2_scaly(int sid_row, int sid_column, float s_scal[SHARED_SIZE_ROW][SHARED_SIZE_COL]) { // // Double derivative in y-direction // float res; res = ( d_FLT_2 * s_scal[sid_row-3][sid_column] - d_FLT_27 * s_scal[sid_row-2][sid_column] + d_FLT_270 * s_scal[sid_row-1][sid_column] - d_FLT_490 * s_scal[sid_row ][sid_column] + d_FLT_270 * s_scal[sid_row+1][sid_column] - d_FLT_27 * s_scal[sid_row+2][sid_column] + d_FLT_2 * s_scal[sid_row+3][sid_column] ) * d_DIFF2_DY_DIV; // / ( d_FLT_180*d_DY*d_DY ); return res; } __device__ float der2_scalz(int sid_row, int sid_column, float s_scal[SHARED_SIZE_ROW][SHARED_SIZE_COL], float behind3, float behind2, float behind1, float infront1, float infront2, float infront3) { // // Double derivative in z-direction // float res; res = ( d_FLT_2 * behind3 - d_FLT_27 * behind2 + d_FLT_270 * behind1 - d_FLT_490 * s_scal[sid_row][sid_column] + d_FLT_270 * infront1 - d_FLT_27 * infront2 + d_FLT_2 * infront3 ) * d_DIFF2_DZ_DIV; // / ( d_FLT_180*d_DY*d_DY ); return res; } __device__ float der2_scalxy(int sid_row, int sid_column, float s_scal[SHARED_SIZE_ROW][SHARED_SIZE_COL]) { // // Double derivative in xy-direction // float res; res = ( (float) 2.0 * ( s_scal[sid_row - 3][sid_column - 3] -s_scal[sid_row + 3][sid_column - 3] +s_scal[sid_row + 3][sid_column + 3] -s_scal[sid_row - 3][sid_column + 3]) - (float) 27.0 * ( s_scal[sid_row - 2][sid_column - 2] -s_scal[sid_row + 2][sid_column - 2] +s_scal[sid_row + 2][sid_column + 2] -s_scal[sid_row - 2][sid_column + 2]) + (float) 270.0 * ( s_scal[sid_row - 1][sid_column - 1]//ok -s_scal[sid_row + 1][sid_column - 1]//ok +s_scal[sid_row + 1][sid_column + 1]//ok -s_scal[sid_row - 1][sid_column + 1])//ok )* d_DIFFMN_DXDY_DIV; return res; } __device__ float der2_scalxz(int sid_row, int sid_column, float s_scal[SHARED_SIZE_ROW][SHARED_SIZE_COL], float res[]) { // // Double derivative in xz-direction // res[0] = d_DIFFMN_DXDZ_DIV*d_FLT_2 * (-s_scal[sid_row ][sid_column + 3] + s_scal[sid_row ][sid_column - 3]); res[1] = -d_DIFFMN_DXDZ_DIV*d_FLT_27 * (-s_scal[sid_row ][sid_column + 2] + s_scal[sid_row ][sid_column - 2]); res[2] = d_DIFFMN_DXDZ_DIV*d_FLT_270 * (-s_scal[sid_row ][sid_column + 1] + s_scal[sid_row ][sid_column - 1]); return 0; } __device__ float der2_scalyz(int sid_row, int sid_column, float s_scal[SHARED_SIZE_ROW][SHARED_SIZE_COL], float res[]) { // // Double derivative in yz-direction // res[0] = d_DIFFMN_DYDZ_DIV*d_FLT_2 * (-s_scal[sid_row + 3][sid_column] + s_scal[sid_row - 3][sid_column]); res[1] = -d_DIFFMN_DYDZ_DIV*d_FLT_27 * (-s_scal[sid_row + 2][sid_column] + s_scal[sid_row - 2][sid_column]); res[2] = d_DIFFMN_DYDZ_DIV*d_FLT_270 * (-s_scal[sid_row + 1][sid_column] + s_scal[sid_row - 1][sid_column]); return 0; } static __device__ void nebla_nebla_div(int sid_row, int sid_column, float s_uu_x[][SHARED_SIZE_COL], float s_uu_y[][SHARED_SIZE_COL], float s_uu_z[][SHARED_SIZE_COL], float div_z_partial_ux[], float div_z_partial_uy[], float div_z_partial_uz[], int zplane){ //Calculate front if(zplane - 3 >= 0 && zplane - 3 < RK_ELEMS_PER_THREAD_FIRST) { div_z_partial_ux[0] += d_DIFFMN_DXDZ_DIV*(float) 2.0 * (s_uu_z[sid_row ][sid_column + 3]- s_uu_z[sid_row ][sid_column - 3]); div_z_partial_uy[0] += d_DIFFMN_DYDZ_DIV*(float) 2.0 * (s_uu_z[sid_row + 3][sid_column]- s_uu_z[sid_row - 3][sid_column]); div_z_partial_uz[0] += (d_DIFFMN_DXDZ_DIV*(float) 2.0 * (s_uu_x[sid_row ][sid_column + 3]- s_uu_x[sid_row ][sid_column - 3])+ d_DIFFMN_DYDZ_DIV*(float) 2.0 * (s_uu_y[sid_row + 3][sid_column]- s_uu_y[sid_row - 3][sid_column])); } if(zplane - 2 >= 0 && zplane - 2 < RK_ELEMS_PER_THREAD_FIRST) { div_z_partial_ux[1] += -d_DIFFMN_DXDZ_DIV*(float) 27.0 * (s_uu_z[sid_row ][sid_column + 2]- s_uu_z[sid_row ][sid_column - 2]); div_z_partial_uy[1] += -d_DIFFMN_DYDZ_DIV*(float) 27.0 * (s_uu_z[sid_row + 2][sid_column]- s_uu_z[sid_row - 2][sid_column]); div_z_partial_uz[1] += (-d_DIFFMN_DXDZ_DIV*(float) 27.0 * (s_uu_x[sid_row ][sid_column + 2]- s_uu_x[sid_row ][sid_column - 2])+ (-d_DIFFMN_DYDZ_DIV)*(float) 27.0 * (s_uu_y[sid_row + 2][sid_column]- s_uu_y[sid_row - 2][sid_column])); } if(zplane - 1 >= 0 && zplane - 1 < RK_ELEMS_PER_THREAD_FIRST) { div_z_partial_ux[2] += d_DIFFMN_DXDZ_DIV*(float) 270.0 * (s_uu_z[sid_row ][sid_column + 1]- s_uu_z[sid_row ][sid_column - 1]); div_z_partial_uy[2] += d_DIFFMN_DYDZ_DIV*(float) 270.0 * (s_uu_z[sid_row + 1][sid_column]- s_uu_z[sid_row - 1][sid_column]); div_z_partial_uz[2] += (d_DIFFMN_DXDZ_DIV*(float) 270.0 * (s_uu_x[sid_row ][sid_column + 1]- s_uu_x[sid_row ][sid_column - 1])+ d_DIFFMN_DYDZ_DIV*(float) 270.0 * (s_uu_y[sid_row + 1][sid_column]- s_uu_y[sid_row - 1][sid_column])); } // div_z_partial_xx[3] += 0; if(zplane + 1 >= 0 && zplane + 1 < RK_ELEMS_PER_THREAD_FIRST) { div_z_partial_ux[4] -= d_DIFFMN_DXDZ_DIV*(float) 270.0 * (s_uu_z[sid_row ][sid_column + 1]- s_uu_z[sid_row ][sid_column - 1]); div_z_partial_uy[4] -= d_DIFFMN_DYDZ_DIV*(float) 270.0 * (s_uu_z[sid_row + 1][sid_column]- s_uu_z[sid_row - 1][sid_column]); div_z_partial_uz[4] -= (d_DIFFMN_DXDZ_DIV*(float) 270.0 * (s_uu_x[sid_row ][sid_column + 1]- s_uu_x[sid_row ][sid_column - 1])+ d_DIFFMN_DYDZ_DIV*(float) 270.0 * ( s_uu_y[sid_row + 1][sid_column]- s_uu_y[sid_row - 1][sid_column])); } if(zplane + 2 >= 0 && zplane + 2 < RK_ELEMS_PER_THREAD_FIRST) { div_z_partial_ux[5] -= -d_DIFFMN_DXDZ_DIV*(float) 27.0 * (s_uu_z[sid_row ][sid_column + 2]- s_uu_z[sid_row ][sid_column - 2]); div_z_partial_uy[5] -= -d_DIFFMN_DYDZ_DIV*(float) 27.0 * (s_uu_z[sid_row + 2][sid_column]- s_uu_z[sid_row - 2][sid_column]); div_z_partial_uz[5] -= (-d_DIFFMN_DXDZ_DIV*(float) 27.0 * (s_uu_x[sid_row ][sid_column + 2]- s_uu_x[sid_row ][sid_column - 2])+ (-d_DIFFMN_DYDZ_DIV)*(float) 27.0 * (s_uu_y[sid_row + 2][sid_column]- s_uu_y[sid_row - 2][sid_column])); } if(zplane + 3 >= 0 && zplane + 3 < RK_ELEMS_PER_THREAD_FIRST) { div_z_partial_ux[6] = -d_DIFFMN_DXDZ_DIV*(float) 2.0 * (s_uu_z[sid_row ][sid_column + 3]- s_uu_z[sid_row ][sid_column - 3]); div_z_partial_uy[6] = -d_DIFFMN_DYDZ_DIV*(float) 2.0 * (s_uu_z[sid_row + 3][sid_column]- s_uu_z[sid_row - 3][sid_column]); div_z_partial_uz[6] = -(d_DIFFMN_DXDZ_DIV*(float) 2.0 * (s_uu_x[sid_row ][sid_column + 3]- s_uu_x[sid_row ][sid_column - 3])+ d_DIFFMN_DYDZ_DIV*(float) 2.0 * (s_uu_y[sid_row + 3][sid_column]- s_uu_y[sid_row - 3][sid_column])); } } //------------------------------------------------------------------------------------------------------ template <int step_number> __global__ void __launch_bounds__(RK_THREADS_PER_BLOCK, 4) rungekutta_step_first_half(const float* __restrict__ d_lnrho, const float* __restrict__ d_uu_x, const float* __restrict__ d_uu_y, const float* __restrict__ d_uu_z, float* __restrict__ d_w_lnrho, float* __restrict__ d_w_uu_x, float* __restrict__ d_w_uu_y, float* __restrict__ d_w_uu_z, float* __restrict__ d_lnrho_dest, float* __restrict__ d_uu_x_dest, float* __restrict__ d_uu_y_dest, float* __restrict__ d_uu_z_dest, int isubstep) { float ALPHA, BETA; switch (isubstep) { case 1: ALPHA = d_ALPHA1; BETA = d_BETA1; break; case 2: ALPHA = d_ALPHA2; BETA = d_BETA2; break; case 3: ALPHA = d_ALPHA3; BETA = d_BETA3; break; } __shared__ float s_lnrho[SHARED_SIZE_ROW][SHARED_SIZE_COL]; //SHARED_SIZE_ROW (RK_THREADS_Y + 2*BOUND_SIZE) = (4 + 2*3) = 10 __shared__ float s_uu_x [SHARED_SIZE_ROW][SHARED_SIZE_COL]; //SHARED_SIZE_COL (RK_THREADS_X + 2*BOUND_SIZE) = (32 + 2*3) = 38 __shared__ float s_uu_y [SHARED_SIZE_ROW][SHARED_SIZE_COL]; __shared__ float s_uu_z [SHARED_SIZE_ROW][SHARED_SIZE_COL]; float w_lnrho = NAN; float w_uu_x = NAN; float w_uu_y = NAN; float w_uu_z = NAN; const int grid_idx_x = threadIdx.x + blockIdx.x*blockDim.x; const int grid_idx_y = threadIdx.y + blockIdx.y*blockDim.y; const int grid_idx_z = threadIdx.z + blockIdx.z*blockDim.z*RK_ELEMS_PER_THREAD_FIRST; const int sid_col = threadIdx.x + BOUND_SIZE; //Varies between (3, blockDim.x + 3) if BOUND_SIZE == 3 const int sid_row = threadIdx.y + BOUND_SIZE; //Varies between (3, blockDim.y + 3) //Index in the partial result array (doesn't have boundary zones) int w_grid_idx = (grid_idx_x) + (grid_idx_y)*d_W_GRID_Y_OFFSET + (grid_idx_z)*d_W_GRID_Z_OFFSET; //Index in the final result array (offset to start from first index of //the computational domain) //int grid_idx = (grid_idx_x + d_CX_BOT) + // (grid_idx_y + d_CY_BOT)*d_GRID_Y_OFFSET + // (grid_idx_z + d_CZ_BOT)*d_GRID_Z_OFFSET; int grid_idx = (grid_idx_x + d_CX_BOT) + (grid_idx_y + d_CY_BOT)*d_GRID_Y_OFFSET + (grid_idx_z + 0)*d_GRID_Z_OFFSET; // Only in zplane we are in halo zone float current_lnrho = d_lnrho[grid_idx]; float current_uu_x = d_uu_x[grid_idx]; float current_uu_y = d_uu_y[grid_idx]; float current_uu_z = d_uu_z[grid_idx]; float infront1_lnrho = d_lnrho[grid_idx + 1*d_GRID_Z_OFFSET]; float infront2_lnrho = d_lnrho[grid_idx + 2*d_GRID_Z_OFFSET]; float infront3_lnrho = d_lnrho[grid_idx + 3*d_GRID_Z_OFFSET]; float infront1_uu_x = d_uu_x[grid_idx + 1*d_GRID_Z_OFFSET]; float infront2_uu_x = d_uu_x[grid_idx + 2*d_GRID_Z_OFFSET]; float infront3_uu_x = d_uu_x[grid_idx + 3*d_GRID_Z_OFFSET]; float infront1_uu_y = d_uu_y[grid_idx + 1*d_GRID_Z_OFFSET]; float infront2_uu_y = d_uu_y[grid_idx + 2*d_GRID_Z_OFFSET]; float infront3_uu_y = d_uu_y[grid_idx + 3*d_GRID_Z_OFFSET]; float infront1_uu_z = d_uu_z[grid_idx + 1*d_GRID_Z_OFFSET]; float infront2_uu_z = d_uu_z[grid_idx + 2*d_GRID_Z_OFFSET]; float infront3_uu_z = d_uu_z[grid_idx + 3*d_GRID_Z_OFFSET]; float behind3_lnrho = NAN; float behind2_lnrho = NAN; float behind1_lnrho = NAN; float behind3_uu_x = NAN; float behind2_uu_x = NAN; float behind1_uu_x = NAN; float behind3_uu_y = NAN; float behind2_uu_y = NAN; float behind1_uu_y = NAN; float behind3_uu_z = NAN; float behind2_uu_z = NAN; float behind1_uu_z = NAN; //--------------------------------------------------------- float div_z_partial_ux[(2*BOUND_SIZE) + 1] = {NAN}; float div_z_partial_uy[(2*BOUND_SIZE) + 1] = {NAN}; float div_z_partial_uz[(2*BOUND_SIZE) + 1] = {NAN}; __shared__ float mom_x[RK_THREADS_PER_BLOCK][BOUND_SIZE+1]; __shared__ float mom_y[RK_THREADS_PER_BLOCK][BOUND_SIZE+1]; __shared__ float mom_z[RK_THREADS_PER_BLOCK][BOUND_SIZE+1]; //--------------------------------------------------------- for(int zplane = -3 ; zplane < RK_ELEMS_PER_THREAD_FIRST + 3; zplane++) { switch (isubstep) { case 1: w_lnrho = 0.0f; w_uu_x = 0.0f; w_uu_y = 0.0f; w_uu_z = 0.0f; break; default: if (zplane >= 0 && zplane < RK_ELEMS_PER_THREAD_FIRST) { w_lnrho = d_w_lnrho[w_grid_idx]; }else { w_lnrho = NAN; } if(zplane - 3 >= 0 && zplane -3 < RK_ELEMS_PER_THREAD_FIRST) { const int mature_w_idx = w_grid_idx-3*d_W_GRID_Z_OFFSET; w_uu_x = d_w_uu_x [mature_w_idx]; w_uu_y = d_w_uu_y [mature_w_idx]; w_uu_z = d_w_uu_z [mature_w_idx]; }else { w_uu_x = NAN; w_uu_y = NAN; w_uu_z = NAN; } break; } //Load the previous step to shared memory s_lnrho[sid_row][sid_col] = current_lnrho; s_uu_x [sid_row][sid_col] = current_uu_x; s_uu_y [sid_row][sid_col] = current_uu_y; s_uu_z [sid_row][sid_col] = current_uu_z; //Load halos (not optimal) if (threadIdx.x < BOUND_SIZE) { //Load left s_lnrho[sid_row][sid_col-BOUND_SIZE] = d_lnrho[grid_idx - BOUND_SIZE]; // Omer: Filling in halozones of shared memory s_uu_x [sid_row][sid_col-BOUND_SIZE] = d_uu_x [grid_idx - BOUND_SIZE]; s_uu_y [sid_row][sid_col-BOUND_SIZE] = d_uu_y [grid_idx - BOUND_SIZE]; s_uu_z [sid_row][sid_col-BOUND_SIZE] = d_uu_z [grid_idx - BOUND_SIZE]; //Load right s_lnrho[sid_row][sid_col+RK_THREADS_X] = d_lnrho[grid_idx+RK_THREADS_X]; s_uu_x [sid_row][sid_col+RK_THREADS_X] = d_uu_x [grid_idx+RK_THREADS_X]; s_uu_y [sid_row][sid_col+RK_THREADS_X] = d_uu_y [grid_idx+RK_THREADS_X]; s_uu_z [sid_row][sid_col+RK_THREADS_X] = d_uu_z [grid_idx+RK_THREADS_X]; } if (threadIdx.y < BOUND_SIZE) { //Load down s_lnrho[sid_row-BOUND_SIZE][sid_col] = d_lnrho[grid_idx - BOUND_SIZE*d_GRID_Y_OFFSET]; s_uu_x [sid_row-BOUND_SIZE][sid_col] = d_uu_x [grid_idx - BOUND_SIZE*d_GRID_Y_OFFSET]; s_uu_y [sid_row-BOUND_SIZE][sid_col] = d_uu_y [grid_idx - BOUND_SIZE*d_GRID_Y_OFFSET]; s_uu_z [sid_row-BOUND_SIZE][sid_col] = d_uu_z [grid_idx - BOUND_SIZE*d_GRID_Y_OFFSET]; //Load up s_lnrho[sid_row+RK_THREADS_Y][sid_col] = d_lnrho[grid_idx + RK_THREADS_Y*d_GRID_Y_OFFSET]; s_uu_x [sid_row+RK_THREADS_Y][sid_col] = d_uu_x [grid_idx + RK_THREADS_Y*d_GRID_Y_OFFSET]; s_uu_y [sid_row+RK_THREADS_Y][sid_col] = d_uu_y [grid_idx + RK_THREADS_Y*d_GRID_Y_OFFSET]; s_uu_z [sid_row+RK_THREADS_Y][sid_col] = d_uu_z [grid_idx + RK_THREADS_Y*d_GRID_Y_OFFSET]; } if(threadIdx.x < BOUND_SIZE && threadIdx.y < BOUND_SIZE){ //Load corners of size 3x3 of halo zones not loaded above in shared memory //Left Up s_lnrho[sid_row-BOUND_SIZE][sid_col-BOUND_SIZE] = d_lnrho[grid_idx - BOUND_SIZE - BOUND_SIZE*d_GRID_Y_OFFSET]; s_uu_x [sid_row-BOUND_SIZE][sid_col-BOUND_SIZE] = d_uu_x[grid_idx - BOUND_SIZE - BOUND_SIZE*d_GRID_Y_OFFSET]; s_uu_y [sid_row-BOUND_SIZE][sid_col-BOUND_SIZE] = d_uu_y[grid_idx - BOUND_SIZE - BOUND_SIZE*d_GRID_Y_OFFSET]; s_uu_z [sid_row-BOUND_SIZE][sid_col-BOUND_SIZE] = d_uu_z[grid_idx - BOUND_SIZE - BOUND_SIZE*d_GRID_Y_OFFSET]; //Left Down s_lnrho[sid_row+RK_THREADS_Y][sid_col-BOUND_SIZE] = d_lnrho[grid_idx - BOUND_SIZE + RK_THREADS_Y*d_GRID_Y_OFFSET]; s_uu_x [sid_row+RK_THREADS_Y][sid_col-BOUND_SIZE] = d_uu_x[grid_idx - BOUND_SIZE + RK_THREADS_Y*d_GRID_Y_OFFSET]; s_uu_y [sid_row+RK_THREADS_Y][sid_col-BOUND_SIZE] = d_uu_y[grid_idx - BOUND_SIZE + RK_THREADS_Y*d_GRID_Y_OFFSET]; s_uu_z [sid_row+RK_THREADS_Y][sid_col-BOUND_SIZE] = d_uu_z[grid_idx - BOUND_SIZE + RK_THREADS_Y*d_GRID_Y_OFFSET]; //Right Up s_lnrho[sid_row-BOUND_SIZE][sid_col+RK_THREADS_X] = d_lnrho[grid_idx + RK_THREADS_X - BOUND_SIZE*d_GRID_Y_OFFSET]; s_uu_x [sid_row-BOUND_SIZE][sid_col+RK_THREADS_X] = d_uu_x[grid_idx + RK_THREADS_X - BOUND_SIZE*d_GRID_Y_OFFSET]; s_uu_y [sid_row-BOUND_SIZE][sid_col+RK_THREADS_X] = d_uu_y[grid_idx + RK_THREADS_X - BOUND_SIZE*d_GRID_Y_OFFSET]; s_uu_z [sid_row-BOUND_SIZE][sid_col+RK_THREADS_X] = d_uu_z[grid_idx + RK_THREADS_X - BOUND_SIZE*d_GRID_Y_OFFSET]; //Right Down s_lnrho[sid_row+RK_THREADS_Y][sid_col + RK_THREADS_X] = d_lnrho[grid_idx + RK_THREADS_X + RK_THREADS_Y*d_GRID_Y_OFFSET]; s_uu_x [sid_row+RK_THREADS_Y][sid_col + RK_THREADS_X] = d_uu_x[grid_idx + RK_THREADS_X + RK_THREADS_Y*d_GRID_Y_OFFSET]; s_uu_y [sid_row+RK_THREADS_Y][sid_col + RK_THREADS_X] = d_uu_y[grid_idx + RK_THREADS_X + RK_THREADS_Y*d_GRID_Y_OFFSET]; s_uu_z [sid_row+RK_THREADS_Y][sid_col + RK_THREADS_X] = d_uu_z[grid_idx + RK_THREADS_X + RK_THREADS_Y*d_GRID_Y_OFFSET]; } __syncthreads(); //%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% nebla_nebla_div(sid_row, sid_col, s_uu_x, s_uu_y, s_uu_z, div_z_partial_ux, div_z_partial_uy, div_z_partial_uz, zplane); if(zplane >= 0 && zplane < RK_ELEMS_PER_THREAD_FIRST){ const float d2x_uu_x = der2_scalx(sid_row, sid_col, s_uu_x); const float d2xy_uu_y = der2_scalxy(sid_row, sid_col, s_uu_y); const float d2xy_uu_x = der2_scalxy(sid_row, sid_col, s_uu_x); const float d2y_uu_y = der2_scaly(sid_row, sid_col, s_uu_y); const float d2z_uu_z = der2_scalz(sid_row, sid_col, s_uu_z, behind3_uu_z, behind2_uu_z, behind1_uu_z, infront1_uu_z, infront2_uu_z, infront3_uu_z); //%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% //Solve derivatives const float ddz_lnrho = der_scalz( behind3_lnrho, behind2_lnrho, behind1_lnrho, infront1_lnrho, infront2_lnrho, infront3_lnrho ); const float ddz_uu_x = der_scalz( behind3_uu_x, behind2_uu_x, behind1_uu_x, infront1_uu_x, infront2_uu_x, infront3_uu_x ); const float ddz_uu_y = der_scalz( behind3_uu_y, behind2_uu_y, behind1_uu_y, infront1_uu_y, infront2_uu_y, infront3_uu_y ); const float ddz_uu_z = der_scalz( behind3_uu_z, behind2_uu_z, behind1_uu_z, infront1_uu_z, infront2_uu_z, infront3_uu_z ); const float ddx_lnrho = der_scalx(sid_row, sid_col, s_lnrho); const float ddx_uu_x = der_scalx(sid_row, sid_col, s_uu_x); const float ddx_uu_y = der_scalx(sid_row, sid_col, s_uu_y); const float ddx_uu_z = der_scalx(sid_row, sid_col, s_uu_z); const float ddy_lnrho = der_scaly(sid_row, sid_col, s_lnrho); const float ddy_uu_x = der_scaly(sid_row, sid_col, s_uu_x); const float ddy_uu_y = der_scaly(sid_row, sid_col, s_uu_y); const float ddy_uu_z = der_scaly(sid_row, sid_col, s_uu_z); //Save the divergence field of uu to global memory //d_div_uu[grid_idx] = ddx_uu_x + ddy_uu_y + ddz_uu_z; // Omer: nebla.u_i Eq(.1) //Continuity const float cont_res = - (s_uu_x[sid_row][sid_col] * ddx_lnrho + s_uu_y[sid_row][sid_col] * ddy_lnrho + s_uu_z[sid_row][sid_col] * ddz_lnrho) - (ddx_uu_x + ddy_uu_y + ddz_uu_z); // Omer: -(u.nebla)rho - nebla.u Eq(.2) //ILP: compute nu_const_uu and S_grad_lnrho before using cont_res //Omer: Eq(.6) const float nu_const_uu_x = der2_scalx(sid_row, sid_col, s_uu_x) + der2_scaly(sid_row, sid_col, s_uu_x) + der2_scalz(sid_row, sid_col, s_uu_x, behind3_uu_x, behind2_uu_x, behind1_uu_x, infront1_uu_x, infront2_uu_x, infront3_uu_x); const float nu_const_uu_y = der2_scalx(sid_row, sid_col, s_uu_y) + der2_scaly(sid_row, sid_col, s_uu_y) + der2_scalz(sid_row, sid_col, s_uu_y, behind3_uu_y, behind2_uu_y, behind1_uu_y, infront1_uu_y, infront2_uu_y, infront3_uu_y); const float nu_const_uu_z = der2_scalx(sid_row, sid_col, s_uu_z) + der2_scaly(sid_row, sid_col, s_uu_z) + der2_scalz(sid_row, sid_col, s_uu_z, behind3_uu_z, behind2_uu_z, behind1_uu_z, infront1_uu_z, infront2_uu_z, infront3_uu_z); //S_grad_lnrho //Eq(.9) const float Sxx = (2.0f/3.0f)*ddx_uu_x - (1.0f/3.0f)*(ddy_uu_y + ddz_uu_z); const float Sxy = 0.5f*(ddy_uu_x + ddx_uu_y); const float Sxz = 0.5f*(ddz_uu_x + ddx_uu_z); const float Syy = (2.0f/3.0f)*ddy_uu_y - (1.0f/3.0f)*(ddx_uu_x + ddz_uu_z); const float Syz = 0.5f*(ddz_uu_y + ddy_uu_z); const float Szz = (2.0f/3.0f)*ddz_uu_z - (1.0f/3.0f)*(ddx_uu_x + ddy_uu_y); //Use cont_res to compute w_lnrho w_lnrho = ALPHA*w_lnrho + d_DT*cont_res; //Omer: Second line Algo. 3 updating rho //Navier-Stokes //if ( blockIdx.x == 0 && blockIdx.y == 0 && threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0){ // printf("%d---writing mom_[%d] \n", zplane , zplane%4); //} mom_x[(threadIdx.y*RK_THREADS_X)+threadIdx.x][zplane%4] = - (s_uu_x[sid_row][sid_col] * ddx_uu_x + //vec_dot_nabla_scal s_uu_y[sid_row][sid_col] * ddy_uu_x + s_uu_z[sid_row][sid_col] * ddz_uu_x) - d_CS2_SOUND*ddx_lnrho //ddx part of grad lnrho + d_NU_VISC * nu_const_uu_x //nu_const + 2.0f*d_NU_VISC*(Sxx*ddx_lnrho + Sxy*ddy_lnrho + Sxz*ddz_lnrho)+d_NU_VISC*(1.0f/3.0f)*(d2x_uu_x + d2xy_uu_y); //S_grad_lnrho mom_y[(threadIdx.y*RK_THREADS_X)+threadIdx.x][zplane%4] = - (s_uu_x[sid_row][sid_col] * ddx_uu_y + //vec_dot_nabla_scal s_uu_y[sid_row][sid_col] * ddy_uu_y + s_uu_z[sid_row][sid_col] * ddz_uu_y) - d_CS2_SOUND*ddy_lnrho //ddy part of grad lnrho + d_NU_VISC * nu_const_uu_y //nu_const + 2.0f*d_NU_VISC*(Sxy*ddx_lnrho + Syy*ddy_lnrho + Syz*ddz_lnrho)+d_NU_VISC*(1.0f/3.0f)*(d2xy_uu_x + d2y_uu_y); //S_grad_lnrho mom_z[(threadIdx.y*RK_THREADS_X)+threadIdx.x][zplane%4] = - (s_uu_x[sid_row][sid_col] * ddx_uu_z + //vec_dot_nabla_scal s_uu_y[sid_row][sid_col] * ddy_uu_z + s_uu_z[sid_row][sid_col] * ddz_uu_z) - d_CS2_SOUND*ddz_lnrho //ddz part of grad lnrho + d_NU_VISC * nu_const_uu_z //nu_const + 2.0f*d_NU_VISC*(Sxz*ddx_lnrho + Syz*ddy_lnrho + Szz*ddz_lnrho)+d_NU_VISC*(1.0f/3.0f)*d2z_uu_z; //S_grad_lnrho d_lnrho_dest[grid_idx] = s_lnrho[sid_row][sid_col] + BETA*w_lnrho; d_w_lnrho[w_grid_idx] = w_lnrho; } //use the output which is mature now if(zplane - 3 >= 0 && zplane - 3 < RK_ELEMS_PER_THREAD_FIRST) { const float div_uux = d_NU_VISC*(1.0f/3.0f)*(div_z_partial_ux[0]); const float div_uuy = d_NU_VISC*(1.0f/3.0f)*(div_z_partial_uy[0]); const float div_uuz = d_NU_VISC*(1.0f/3.0f)*(div_z_partial_uz[0]); w_uu_x = ALPHA*w_uu_x + d_DT*(mom_x[(threadIdx.y*RK_THREADS_X)+threadIdx.x][(4+zplane+1)%4] + div_uux); w_uu_y = ALPHA*w_uu_y + d_DT*(mom_y[(threadIdx.y*RK_THREADS_X)+threadIdx.x][(4+zplane+1)%4] + div_uuy); w_uu_z = ALPHA*w_uu_z + d_DT*(mom_z[(threadIdx.y*RK_THREADS_X)+threadIdx.x][(4+zplane+1)%4] + div_uuz); d_uu_x_dest [grid_idx-3*d_GRID_Z_OFFSET] = behind3_uu_x + BETA*w_uu_x; d_uu_y_dest [grid_idx-3*d_GRID_Z_OFFSET] = behind3_uu_y + BETA*w_uu_y; d_uu_z_dest [grid_idx-3*d_GRID_Z_OFFSET] = behind3_uu_z + BETA*w_uu_z; d_w_uu_x [w_grid_idx-3*d_W_GRID_Z_OFFSET] = w_uu_x; d_w_uu_y [w_grid_idx-3*d_W_GRID_Z_OFFSET] = w_uu_y; d_w_uu_z [w_grid_idx-3*d_W_GRID_Z_OFFSET] = w_uu_z; } // Shift div_z_partial_ux[0] = div_z_partial_ux[1]; div_z_partial_ux[1] = div_z_partial_ux[2]; div_z_partial_ux[2] = div_z_partial_ux[3]; div_z_partial_ux[3] = div_z_partial_ux[4]; div_z_partial_ux[4] = div_z_partial_ux[5]; div_z_partial_ux[5] = div_z_partial_ux[6]; div_z_partial_ux[6] = NAN; div_z_partial_uy[0] = div_z_partial_uy[1]; div_z_partial_uy[1] = div_z_partial_uy[2]; div_z_partial_uy[2] = div_z_partial_uy[3]; div_z_partial_uy[3] = div_z_partial_uy[4]; div_z_partial_uy[4] = div_z_partial_uy[5]; div_z_partial_uy[5] = div_z_partial_uy[6]; div_z_partial_uy[6] = NAN; div_z_partial_uz[0] = div_z_partial_uz[1]; div_z_partial_uz[1] = div_z_partial_uz[2]; div_z_partial_uz[2] = div_z_partial_uz[3]; div_z_partial_uz[3] = div_z_partial_uz[4]; div_z_partial_uz[4] = div_z_partial_uz[5]; div_z_partial_uz[5] = div_z_partial_uz[6]; div_z_partial_uz[6] = NAN; //else continue grid_idx += d_GRID_Z_OFFSET; if (zplane >= 0) w_grid_idx += d_W_GRID_Z_OFFSET; //Reuse data in registers and update infront3 behind3_lnrho = behind2_lnrho; behind2_lnrho = behind1_lnrho; behind1_lnrho = s_lnrho[sid_row][sid_col]; current_lnrho = infront1_lnrho; infront1_lnrho = infront2_lnrho; infront2_lnrho = infront3_lnrho; behind3_uu_x = behind2_uu_x; behind2_uu_x = behind1_uu_x; behind1_uu_x = s_uu_x[sid_row][sid_col]; current_uu_x = infront1_uu_x; infront1_uu_x = infront2_uu_x; infront2_uu_x = infront3_uu_x; behind3_uu_y = behind2_uu_y; behind2_uu_y = behind1_uu_y; behind1_uu_y = s_uu_y[sid_row][sid_col]; current_uu_y = infront1_uu_y; infront1_uu_y = infront2_uu_y; infront2_uu_y = infront3_uu_y; behind3_uu_z = behind2_uu_z; behind2_uu_z = behind1_uu_z; behind1_uu_z = s_uu_z[sid_row][sid_col]; current_uu_z = infront1_uu_z; infront1_uu_z = infront2_uu_z; infront2_uu_z = infront3_uu_z; if(zplane < RK_ELEMS_PER_THREAD_FIRST-1){ infront3_lnrho = d_lnrho[grid_idx + 3*d_GRID_Z_OFFSET]; infront3_uu_x = d_uu_x[grid_idx + 3*d_GRID_Z_OFFSET]; infront3_uu_y = d_uu_y[grid_idx + 3*d_GRID_Z_OFFSET]; infront3_uu_z = d_uu_z[grid_idx + 3*d_GRID_Z_OFFSET]; } else{ infront3_lnrho = NAN; infront3_uu_x = NAN; infront3_uu_y = NAN; infront3_uu_z = NAN; } __syncthreads(); }// loop ends } //---------------------------------------------------------- // Manages the calculation on 2N-Runge-Kutta for a single timestep //---------------------------------------------------------- void rungekutta2N_cuda( float* d_lnrho, float* d_uu_x, float* d_uu_y, float* d_uu_z, float* d_w_lnrho, float* d_w_uu_x, float* d_w_uu_y, float* d_w_uu_z, float* d_lnrho_dest, float* d_uu_x_dest, float* d_uu_y_dest, float* d_uu_z_dest, int isubstep) { //Determine threadblock dims (TODO better solution, define?) static dim3 threadsPerBlock, blocksPerGridFirst, blocksPerGridSecond; threadsPerBlock.x = RK_THREADS_X; //RK_THREADS_X = 32 threadsPerBlock.y = RK_THREADS_Y; //RK_THREADS_Y = 4 threadsPerBlock.z = RK_THREADS_Z; //RK_THREADS_Z = 1 assert(RK_THREADS_Z == 1); blocksPerGridFirst.x = ceil((float) COMP_DOMAIN_SIZE_X / (float)threadsPerBlock.x); //128 / 32 = 4 blocksPerGridFirst.y = ceil((float) COMP_DOMAIN_SIZE_Y / (float)threadsPerBlock.y); //128 / 4 = 32 blocksPerGridFirst.z = ceil((float) COMP_DOMAIN_SIZE_Z / (float)(threadsPerBlock.z*RK_ELEMS_PER_THREAD_FIRST)); //128 / (1*8) = 16 blocksPerGridSecond.x = ceil((float) COMP_DOMAIN_SIZE_X / (float)threadsPerBlock.x); blocksPerGridSecond.y = ceil((float) COMP_DOMAIN_SIZE_Y / (float)threadsPerBlock.y); blocksPerGridSecond.z = ceil((float) COMP_DOMAIN_SIZE_Z / (float)(threadsPerBlock.z*RK_ELEMS_PER_THREAD_SECOND)); //Calculate steps in kernels // Step 1: //------------------------------------------------------------------------------------------------------------------------------- //FIRST HALF hipLaunchKernelGGL(( rungekutta_step_first_half<0>), dim3(blocksPerGridFirst), dim3(threadsPerBlock), 0, 0, d_lnrho, d_uu_x, d_uu_y, d_uu_z, d_w_lnrho, d_w_uu_x, d_w_uu_y, d_w_uu_z, d_lnrho_dest, d_uu_x_dest, d_uu_y_dest, d_uu_z_dest, isubstep); /* //hipDeviceSynchronize(); //checkKernelErr(); //SECOND HALF ////periodic_boundcond_scal_cuda(d_div_uu); //Boundary conditions for the divergence field //hipDeviceSynchronize(); //checkKernelErr(); ////rungekutta_step_second_half<0><<<blocksPerGridSecond, threadsPerBlock>>>(d_div_uu, //// d_uu_x_dest, d_uu_y_dest, d_uu_z_dest, //// d_w_uu_x, d_w_uu_y, d_w_uu_z); //hipDeviceSynchronize(); //checkKernelErr(); boundcond_cuda(d_lnrho_dest, d_uu_x_dest, d_uu_y_dest, d_uu_z_dest); //hipDeviceSynchronize(); //checkKernelErr(); //------------------------------------------------------------------------------------------------------------------------------- // Step 2: //------------------------------------------------------------------------------------------------------------------------------- //FIRST HALF rungekutta_step_first_half<1><<<blocksPerGridFirst, threadsPerBlock>>>(d_lnrho_dest, d_uu_x_dest, d_uu_y_dest, d_uu_z_dest, d_w_lnrho, d_w_uu_x, d_w_uu_y, d_w_uu_z, d_lnrho, d_uu_x, d_uu_y, d_uu_z, d_div_uu); //hipDeviceSynchronize(); //checkKernelErr(); //SECOND HALF ////periodic_boundcond_scal_cuda(d_div_uu); //Boundary conditions for the divergence field //hipDeviceSynchronize(); //checkKernelErr(); ////rungekutta_step_second_half<1><<<blocksPerGridSecond, threadsPerBlock>>>(d_div_uu, //// d_uu_x, d_uu_y, d_uu_z, //// d_w_uu_x, d_w_uu_y, d_w_uu_z); //hipDeviceSynchronize(); //checkKernelErr(); boundcond_cuda(d_lnrho, d_uu_x, d_uu_y, d_uu_z); //hipDeviceSynchronize(); //checkKernelErr(); //------------------------------------------------------------------------------------------------------------------------------- //TIME START hipEvent_t start, stop; float time; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord( start, 0 ); // Step 3: //------------------------------------------------------------------------------------------------------------------------------- //FIRST HALF rungekutta_step_first_half<2><<<blocksPerGridFirst, threadsPerBlock>>>(d_lnrho, d_uu_x, d_uu_y, d_uu_z, d_w_lnrho, d_w_uu_x, d_w_uu_y, d_w_uu_z, d_lnrho_dest, d_uu_x_dest, d_uu_y_dest, d_uu_z_dest, d_div_uu); //hipDeviceSynchronize(); //checkKernelErr(); //SECOND HALF ////periodic_boundcond_scal_cuda(d_div_uu);; //Boundary conditions for the divergence field //hipDeviceSynchronize(); //checkKernelErr(); ////rungekutta_step_second_half<2><<<blocksPerGridSecond, threadsPerBlock>>>(d_div_uu, //// d_uu_x_dest, d_uu_y_dest, d_uu_z_dest, //// d_w_uu_x, d_w_uu_y, d_w_uu_z); //hipDeviceSynchronize(); //checkKernelErr(); boundcond_cuda(d_lnrho_dest, d_uu_x_dest, d_uu_y_dest, d_uu_z_dest); //hipDeviceSynchronize(); //checkKernelErr(); //------------------------------------------------------------------------------------------------------------------------------- //TIME END hipEventRecord( stop, 0 ); hipEventSynchronize( stop ); hipEventElapsedTime( &time, start, stop ); hipEventDestroy( start ); hipEventDestroy( stop ); printf("A Single rungekutta step time elapsed: \t%f ms\n", time); */ hipDeviceSynchronize(); }
db716e25baee96678654bbd84de336fff7c4a8b0.cu
/* Date: 15-12-2016 Author: Omer Anjum Description: RK integration 55-Point Comments: Omer Anjum: Changed the 19-point RK integration Kernel to 55-Point integration Kernel without changing the requirements of shared memory and simultaneously reducing the global memory traffic. The technique applied to achieve this is "scattering". Sep 09, 2017: Fixing many error */ #define EXTERN extern #include "dconsts.cuh" #include "../cparam_c.h" #include "smem.cuh" #include "hydro.cuh" #include "continuity.cuh" #include "forcing.cuh" #include "shear.cuh" #include "diff.cuh" //DEBUG #include "diagnostics.cuh" /* * Notes: * -diff functions are defined here, so that * these __device__ functions can be optimized * by the compiler when compiling rungekutta_steps. * This results in a very large speedup with the cost * of larger source files. * * -__launch_bounds__(maximum threads per block, minimum number of blocks we want to multithread on SMs) * tells the compiler how many registers we want to use: the compiler calculates the maximum amount of * registers it can use in order not to hit the register cap when we want to have certain amount of * thread blocks running on the SM. F.ex. max number of registers per SM is 65536 and we have 128-sized * thread blocks and want to multithread 8 blocks => max registers per thread = 65536 / (128*8) = 64 * * -restrict keyword tells the compiler that only one pointer is used to reference a certain value. * This enables the compiler to optimize some memory fetches to read-only cache and registers because * restrict keyword tells that the value temporarily stored to faster memory is always up-to-date and * is only modified with that specific pointer. * * -sid_column maps to threadIdx.x and sid_row maps to threadIdx.y. This is done because c++ arrays * are row-major and nearby threads access a contiguous memory area (when computing der_scalx). * e.g. the shared memory block is arranged like s_scal[Y-direction][X-direction] where X and Y * go to the same direction as X and Y in the device grids (d_lnrho etc.) * * */ //------------------------------------------------------------------------------------------------------ // // Derivative operators, 1st order // __device__ float der_scalx( int sid_row, int sid_column, float s_scal[SHARED_SIZE_ROW][SHARED_SIZE_COL]) { // // Single derivative in x-direction // float res ; res = ( - s_scal[sid_row][sid_column-3] + d_FLT_9 * s_scal[sid_row][sid_column-2] - d_FLT_45 * s_scal[sid_row][sid_column-1] + d_FLT_45 * s_scal[sid_row][sid_column+1] - d_FLT_9 * s_scal[sid_row][sid_column+2] + s_scal[sid_row][sid_column+3] ) * d_DIFF1_DX_DIV; // / ( d_FLT_60*d_DX ); return res; } __device__ float der_scaly( int sid_row, int sid_column, float s_scal[SHARED_SIZE_ROW][SHARED_SIZE_COL]) { // // Single derivative in y-direction // float res ; res = ( - s_scal[sid_row-3][sid_column] + d_FLT_9 * s_scal[sid_row-2][sid_column] - d_FLT_45 * s_scal[sid_row-1][sid_column] + d_FLT_45 * s_scal[sid_row+1][sid_column] - d_FLT_9 * s_scal[sid_row+2][sid_column] + s_scal[sid_row+3][sid_column] ) * d_DIFF1_DY_DIV; // / ( d_FLT_60*d_DY ); //MV: Made these divisions to go away. -> need only be calculated once and used as a constant. return res; } __device__ float der_scalz( float behind3, float behind2, float behind1, float infront1, float infront2, float infront3) { // // Single derivative in z-direction // float res ; res = ( - behind3 + d_FLT_9 * behind2 - d_FLT_45 * behind1 + d_FLT_45 * infront1 - d_FLT_9 * infront2 + infront3 ) * d_DIFF1_DZ_DIV; // / ( d_FLT_60*d_DZ ); return res; } //------------------------------------------------------------------------------------------------------ //------------------------------------------------------------------------------------------------------ // // Derivative operators, 2nd order // __device__ float der2_scalx(int sid_row, int sid_column, float s_scal[SHARED_SIZE_ROW][SHARED_SIZE_COL]) { // // Double derivative in x-direction // float res; res = ( d_FLT_2 * s_scal[sid_row][sid_column-3] - d_FLT_27 * s_scal[sid_row][sid_column-2] + d_FLT_270 * s_scal[sid_row][sid_column-1] - d_FLT_490 * s_scal[sid_row][sid_column ] + d_FLT_270 * s_scal[sid_row][sid_column+1] - d_FLT_27 * s_scal[sid_row][sid_column+2] + d_FLT_2 * s_scal[sid_row][sid_column+3] ) * d_DIFF2_DX_DIV; // / ( d_FLT_180*d_DX*d_DX ); return res; } __device__ float der2_scaly(int sid_row, int sid_column, float s_scal[SHARED_SIZE_ROW][SHARED_SIZE_COL]) { // // Double derivative in y-direction // float res; res = ( d_FLT_2 * s_scal[sid_row-3][sid_column] - d_FLT_27 * s_scal[sid_row-2][sid_column] + d_FLT_270 * s_scal[sid_row-1][sid_column] - d_FLT_490 * s_scal[sid_row ][sid_column] + d_FLT_270 * s_scal[sid_row+1][sid_column] - d_FLT_27 * s_scal[sid_row+2][sid_column] + d_FLT_2 * s_scal[sid_row+3][sid_column] ) * d_DIFF2_DY_DIV; // / ( d_FLT_180*d_DY*d_DY ); return res; } __device__ float der2_scalz(int sid_row, int sid_column, float s_scal[SHARED_SIZE_ROW][SHARED_SIZE_COL], float behind3, float behind2, float behind1, float infront1, float infront2, float infront3) { // // Double derivative in z-direction // float res; res = ( d_FLT_2 * behind3 - d_FLT_27 * behind2 + d_FLT_270 * behind1 - d_FLT_490 * s_scal[sid_row][sid_column] + d_FLT_270 * infront1 - d_FLT_27 * infront2 + d_FLT_2 * infront3 ) * d_DIFF2_DZ_DIV; // / ( d_FLT_180*d_DY*d_DY ); return res; } __device__ float der2_scalxy(int sid_row, int sid_column, float s_scal[SHARED_SIZE_ROW][SHARED_SIZE_COL]) { // // Double derivative in xy-direction // float res; res = ( (float) 2.0 * ( s_scal[sid_row - 3][sid_column - 3] -s_scal[sid_row + 3][sid_column - 3] +s_scal[sid_row + 3][sid_column + 3] -s_scal[sid_row - 3][sid_column + 3]) - (float) 27.0 * ( s_scal[sid_row - 2][sid_column - 2] -s_scal[sid_row + 2][sid_column - 2] +s_scal[sid_row + 2][sid_column + 2] -s_scal[sid_row - 2][sid_column + 2]) + (float) 270.0 * ( s_scal[sid_row - 1][sid_column - 1]//ok -s_scal[sid_row + 1][sid_column - 1]//ok +s_scal[sid_row + 1][sid_column + 1]//ok -s_scal[sid_row - 1][sid_column + 1])//ok )* d_DIFFMN_DXDY_DIV; return res; } __device__ float der2_scalxz(int sid_row, int sid_column, float s_scal[SHARED_SIZE_ROW][SHARED_SIZE_COL], float res[]) { // // Double derivative in xz-direction // res[0] = d_DIFFMN_DXDZ_DIV*d_FLT_2 * (-s_scal[sid_row ][sid_column + 3] + s_scal[sid_row ][sid_column - 3]); res[1] = -d_DIFFMN_DXDZ_DIV*d_FLT_27 * (-s_scal[sid_row ][sid_column + 2] + s_scal[sid_row ][sid_column - 2]); res[2] = d_DIFFMN_DXDZ_DIV*d_FLT_270 * (-s_scal[sid_row ][sid_column + 1] + s_scal[sid_row ][sid_column - 1]); return 0; } __device__ float der2_scalyz(int sid_row, int sid_column, float s_scal[SHARED_SIZE_ROW][SHARED_SIZE_COL], float res[]) { // // Double derivative in yz-direction // res[0] = d_DIFFMN_DYDZ_DIV*d_FLT_2 * (-s_scal[sid_row + 3][sid_column] + s_scal[sid_row - 3][sid_column]); res[1] = -d_DIFFMN_DYDZ_DIV*d_FLT_27 * (-s_scal[sid_row + 2][sid_column] + s_scal[sid_row - 2][sid_column]); res[2] = d_DIFFMN_DYDZ_DIV*d_FLT_270 * (-s_scal[sid_row + 1][sid_column] + s_scal[sid_row - 1][sid_column]); return 0; } static __device__ void nebla_nebla_div(int sid_row, int sid_column, float s_uu_x[][SHARED_SIZE_COL], float s_uu_y[][SHARED_SIZE_COL], float s_uu_z[][SHARED_SIZE_COL], float div_z_partial_ux[], float div_z_partial_uy[], float div_z_partial_uz[], int zplane){ //Calculate front if(zplane - 3 >= 0 && zplane - 3 < RK_ELEMS_PER_THREAD_FIRST) { div_z_partial_ux[0] += d_DIFFMN_DXDZ_DIV*(float) 2.0 * (s_uu_z[sid_row ][sid_column + 3]- s_uu_z[sid_row ][sid_column - 3]); div_z_partial_uy[0] += d_DIFFMN_DYDZ_DIV*(float) 2.0 * (s_uu_z[sid_row + 3][sid_column]- s_uu_z[sid_row - 3][sid_column]); div_z_partial_uz[0] += (d_DIFFMN_DXDZ_DIV*(float) 2.0 * (s_uu_x[sid_row ][sid_column + 3]- s_uu_x[sid_row ][sid_column - 3])+ d_DIFFMN_DYDZ_DIV*(float) 2.0 * (s_uu_y[sid_row + 3][sid_column]- s_uu_y[sid_row - 3][sid_column])); } if(zplane - 2 >= 0 && zplane - 2 < RK_ELEMS_PER_THREAD_FIRST) { div_z_partial_ux[1] += -d_DIFFMN_DXDZ_DIV*(float) 27.0 * (s_uu_z[sid_row ][sid_column + 2]- s_uu_z[sid_row ][sid_column - 2]); div_z_partial_uy[1] += -d_DIFFMN_DYDZ_DIV*(float) 27.0 * (s_uu_z[sid_row + 2][sid_column]- s_uu_z[sid_row - 2][sid_column]); div_z_partial_uz[1] += (-d_DIFFMN_DXDZ_DIV*(float) 27.0 * (s_uu_x[sid_row ][sid_column + 2]- s_uu_x[sid_row ][sid_column - 2])+ (-d_DIFFMN_DYDZ_DIV)*(float) 27.0 * (s_uu_y[sid_row + 2][sid_column]- s_uu_y[sid_row - 2][sid_column])); } if(zplane - 1 >= 0 && zplane - 1 < RK_ELEMS_PER_THREAD_FIRST) { div_z_partial_ux[2] += d_DIFFMN_DXDZ_DIV*(float) 270.0 * (s_uu_z[sid_row ][sid_column + 1]- s_uu_z[sid_row ][sid_column - 1]); div_z_partial_uy[2] += d_DIFFMN_DYDZ_DIV*(float) 270.0 * (s_uu_z[sid_row + 1][sid_column]- s_uu_z[sid_row - 1][sid_column]); div_z_partial_uz[2] += (d_DIFFMN_DXDZ_DIV*(float) 270.0 * (s_uu_x[sid_row ][sid_column + 1]- s_uu_x[sid_row ][sid_column - 1])+ d_DIFFMN_DYDZ_DIV*(float) 270.0 * (s_uu_y[sid_row + 1][sid_column]- s_uu_y[sid_row - 1][sid_column])); } // div_z_partial_xx[3] += 0; if(zplane + 1 >= 0 && zplane + 1 < RK_ELEMS_PER_THREAD_FIRST) { div_z_partial_ux[4] -= d_DIFFMN_DXDZ_DIV*(float) 270.0 * (s_uu_z[sid_row ][sid_column + 1]- s_uu_z[sid_row ][sid_column - 1]); div_z_partial_uy[4] -= d_DIFFMN_DYDZ_DIV*(float) 270.0 * (s_uu_z[sid_row + 1][sid_column]- s_uu_z[sid_row - 1][sid_column]); div_z_partial_uz[4] -= (d_DIFFMN_DXDZ_DIV*(float) 270.0 * (s_uu_x[sid_row ][sid_column + 1]- s_uu_x[sid_row ][sid_column - 1])+ d_DIFFMN_DYDZ_DIV*(float) 270.0 * ( s_uu_y[sid_row + 1][sid_column]- s_uu_y[sid_row - 1][sid_column])); } if(zplane + 2 >= 0 && zplane + 2 < RK_ELEMS_PER_THREAD_FIRST) { div_z_partial_ux[5] -= -d_DIFFMN_DXDZ_DIV*(float) 27.0 * (s_uu_z[sid_row ][sid_column + 2]- s_uu_z[sid_row ][sid_column - 2]); div_z_partial_uy[5] -= -d_DIFFMN_DYDZ_DIV*(float) 27.0 * (s_uu_z[sid_row + 2][sid_column]- s_uu_z[sid_row - 2][sid_column]); div_z_partial_uz[5] -= (-d_DIFFMN_DXDZ_DIV*(float) 27.0 * (s_uu_x[sid_row ][sid_column + 2]- s_uu_x[sid_row ][sid_column - 2])+ (-d_DIFFMN_DYDZ_DIV)*(float) 27.0 * (s_uu_y[sid_row + 2][sid_column]- s_uu_y[sid_row - 2][sid_column])); } if(zplane + 3 >= 0 && zplane + 3 < RK_ELEMS_PER_THREAD_FIRST) { div_z_partial_ux[6] = -d_DIFFMN_DXDZ_DIV*(float) 2.0 * (s_uu_z[sid_row ][sid_column + 3]- s_uu_z[sid_row ][sid_column - 3]); div_z_partial_uy[6] = -d_DIFFMN_DYDZ_DIV*(float) 2.0 * (s_uu_z[sid_row + 3][sid_column]- s_uu_z[sid_row - 3][sid_column]); div_z_partial_uz[6] = -(d_DIFFMN_DXDZ_DIV*(float) 2.0 * (s_uu_x[sid_row ][sid_column + 3]- s_uu_x[sid_row ][sid_column - 3])+ d_DIFFMN_DYDZ_DIV*(float) 2.0 * (s_uu_y[sid_row + 3][sid_column]- s_uu_y[sid_row - 3][sid_column])); } } //------------------------------------------------------------------------------------------------------ template <int step_number> __global__ void __launch_bounds__(RK_THREADS_PER_BLOCK, 4) rungekutta_step_first_half(const float* __restrict__ d_lnrho, const float* __restrict__ d_uu_x, const float* __restrict__ d_uu_y, const float* __restrict__ d_uu_z, float* __restrict__ d_w_lnrho, float* __restrict__ d_w_uu_x, float* __restrict__ d_w_uu_y, float* __restrict__ d_w_uu_z, float* __restrict__ d_lnrho_dest, float* __restrict__ d_uu_x_dest, float* __restrict__ d_uu_y_dest, float* __restrict__ d_uu_z_dest, int isubstep) { float ALPHA, BETA; switch (isubstep) { case 1: ALPHA = d_ALPHA1; BETA = d_BETA1; break; case 2: ALPHA = d_ALPHA2; BETA = d_BETA2; break; case 3: ALPHA = d_ALPHA3; BETA = d_BETA3; break; } __shared__ float s_lnrho[SHARED_SIZE_ROW][SHARED_SIZE_COL]; //SHARED_SIZE_ROW (RK_THREADS_Y + 2*BOUND_SIZE) = (4 + 2*3) = 10 __shared__ float s_uu_x [SHARED_SIZE_ROW][SHARED_SIZE_COL]; //SHARED_SIZE_COL (RK_THREADS_X + 2*BOUND_SIZE) = (32 + 2*3) = 38 __shared__ float s_uu_y [SHARED_SIZE_ROW][SHARED_SIZE_COL]; __shared__ float s_uu_z [SHARED_SIZE_ROW][SHARED_SIZE_COL]; float w_lnrho = NAN; float w_uu_x = NAN; float w_uu_y = NAN; float w_uu_z = NAN; const int grid_idx_x = threadIdx.x + blockIdx.x*blockDim.x; const int grid_idx_y = threadIdx.y + blockIdx.y*blockDim.y; const int grid_idx_z = threadIdx.z + blockIdx.z*blockDim.z*RK_ELEMS_PER_THREAD_FIRST; const int sid_col = threadIdx.x + BOUND_SIZE; //Varies between (3, blockDim.x + 3) if BOUND_SIZE == 3 const int sid_row = threadIdx.y + BOUND_SIZE; //Varies between (3, blockDim.y + 3) //Index in the partial result array (doesn't have boundary zones) int w_grid_idx = (grid_idx_x) + (grid_idx_y)*d_W_GRID_Y_OFFSET + (grid_idx_z)*d_W_GRID_Z_OFFSET; //Index in the final result array (offset to start from first index of //the computational domain) //int grid_idx = (grid_idx_x + d_CX_BOT) + // (grid_idx_y + d_CY_BOT)*d_GRID_Y_OFFSET + // (grid_idx_z + d_CZ_BOT)*d_GRID_Z_OFFSET; int grid_idx = (grid_idx_x + d_CX_BOT) + (grid_idx_y + d_CY_BOT)*d_GRID_Y_OFFSET + (grid_idx_z + 0)*d_GRID_Z_OFFSET; // Only in zplane we are in halo zone float current_lnrho = d_lnrho[grid_idx]; float current_uu_x = d_uu_x[grid_idx]; float current_uu_y = d_uu_y[grid_idx]; float current_uu_z = d_uu_z[grid_idx]; float infront1_lnrho = d_lnrho[grid_idx + 1*d_GRID_Z_OFFSET]; float infront2_lnrho = d_lnrho[grid_idx + 2*d_GRID_Z_OFFSET]; float infront3_lnrho = d_lnrho[grid_idx + 3*d_GRID_Z_OFFSET]; float infront1_uu_x = d_uu_x[grid_idx + 1*d_GRID_Z_OFFSET]; float infront2_uu_x = d_uu_x[grid_idx + 2*d_GRID_Z_OFFSET]; float infront3_uu_x = d_uu_x[grid_idx + 3*d_GRID_Z_OFFSET]; float infront1_uu_y = d_uu_y[grid_idx + 1*d_GRID_Z_OFFSET]; float infront2_uu_y = d_uu_y[grid_idx + 2*d_GRID_Z_OFFSET]; float infront3_uu_y = d_uu_y[grid_idx + 3*d_GRID_Z_OFFSET]; float infront1_uu_z = d_uu_z[grid_idx + 1*d_GRID_Z_OFFSET]; float infront2_uu_z = d_uu_z[grid_idx + 2*d_GRID_Z_OFFSET]; float infront3_uu_z = d_uu_z[grid_idx + 3*d_GRID_Z_OFFSET]; float behind3_lnrho = NAN; float behind2_lnrho = NAN; float behind1_lnrho = NAN; float behind3_uu_x = NAN; float behind2_uu_x = NAN; float behind1_uu_x = NAN; float behind3_uu_y = NAN; float behind2_uu_y = NAN; float behind1_uu_y = NAN; float behind3_uu_z = NAN; float behind2_uu_z = NAN; float behind1_uu_z = NAN; //--------------------------------------------------------- float div_z_partial_ux[(2*BOUND_SIZE) + 1] = {NAN}; float div_z_partial_uy[(2*BOUND_SIZE) + 1] = {NAN}; float div_z_partial_uz[(2*BOUND_SIZE) + 1] = {NAN}; __shared__ float mom_x[RK_THREADS_PER_BLOCK][BOUND_SIZE+1]; __shared__ float mom_y[RK_THREADS_PER_BLOCK][BOUND_SIZE+1]; __shared__ float mom_z[RK_THREADS_PER_BLOCK][BOUND_SIZE+1]; //--------------------------------------------------------- for(int zplane = -3 ; zplane < RK_ELEMS_PER_THREAD_FIRST + 3; zplane++) { switch (isubstep) { case 1: w_lnrho = 0.0f; w_uu_x = 0.0f; w_uu_y = 0.0f; w_uu_z = 0.0f; break; default: if (zplane >= 0 && zplane < RK_ELEMS_PER_THREAD_FIRST) { w_lnrho = d_w_lnrho[w_grid_idx]; }else { w_lnrho = NAN; } if(zplane - 3 >= 0 && zplane -3 < RK_ELEMS_PER_THREAD_FIRST) { const int mature_w_idx = w_grid_idx-3*d_W_GRID_Z_OFFSET; w_uu_x = d_w_uu_x [mature_w_idx]; w_uu_y = d_w_uu_y [mature_w_idx]; w_uu_z = d_w_uu_z [mature_w_idx]; }else { w_uu_x = NAN; w_uu_y = NAN; w_uu_z = NAN; } break; } //Load the previous step to shared memory s_lnrho[sid_row][sid_col] = current_lnrho; s_uu_x [sid_row][sid_col] = current_uu_x; s_uu_y [sid_row][sid_col] = current_uu_y; s_uu_z [sid_row][sid_col] = current_uu_z; //Load halos (not optimal) if (threadIdx.x < BOUND_SIZE) { //Load left s_lnrho[sid_row][sid_col-BOUND_SIZE] = d_lnrho[grid_idx - BOUND_SIZE]; // Omer: Filling in halozones of shared memory s_uu_x [sid_row][sid_col-BOUND_SIZE] = d_uu_x [grid_idx - BOUND_SIZE]; s_uu_y [sid_row][sid_col-BOUND_SIZE] = d_uu_y [grid_idx - BOUND_SIZE]; s_uu_z [sid_row][sid_col-BOUND_SIZE] = d_uu_z [grid_idx - BOUND_SIZE]; //Load right s_lnrho[sid_row][sid_col+RK_THREADS_X] = d_lnrho[grid_idx+RK_THREADS_X]; s_uu_x [sid_row][sid_col+RK_THREADS_X] = d_uu_x [grid_idx+RK_THREADS_X]; s_uu_y [sid_row][sid_col+RK_THREADS_X] = d_uu_y [grid_idx+RK_THREADS_X]; s_uu_z [sid_row][sid_col+RK_THREADS_X] = d_uu_z [grid_idx+RK_THREADS_X]; } if (threadIdx.y < BOUND_SIZE) { //Load down s_lnrho[sid_row-BOUND_SIZE][sid_col] = d_lnrho[grid_idx - BOUND_SIZE*d_GRID_Y_OFFSET]; s_uu_x [sid_row-BOUND_SIZE][sid_col] = d_uu_x [grid_idx - BOUND_SIZE*d_GRID_Y_OFFSET]; s_uu_y [sid_row-BOUND_SIZE][sid_col] = d_uu_y [grid_idx - BOUND_SIZE*d_GRID_Y_OFFSET]; s_uu_z [sid_row-BOUND_SIZE][sid_col] = d_uu_z [grid_idx - BOUND_SIZE*d_GRID_Y_OFFSET]; //Load up s_lnrho[sid_row+RK_THREADS_Y][sid_col] = d_lnrho[grid_idx + RK_THREADS_Y*d_GRID_Y_OFFSET]; s_uu_x [sid_row+RK_THREADS_Y][sid_col] = d_uu_x [grid_idx + RK_THREADS_Y*d_GRID_Y_OFFSET]; s_uu_y [sid_row+RK_THREADS_Y][sid_col] = d_uu_y [grid_idx + RK_THREADS_Y*d_GRID_Y_OFFSET]; s_uu_z [sid_row+RK_THREADS_Y][sid_col] = d_uu_z [grid_idx + RK_THREADS_Y*d_GRID_Y_OFFSET]; } if(threadIdx.x < BOUND_SIZE && threadIdx.y < BOUND_SIZE){ //Load corners of size 3x3 of halo zones not loaded above in shared memory //Left Up s_lnrho[sid_row-BOUND_SIZE][sid_col-BOUND_SIZE] = d_lnrho[grid_idx - BOUND_SIZE - BOUND_SIZE*d_GRID_Y_OFFSET]; s_uu_x [sid_row-BOUND_SIZE][sid_col-BOUND_SIZE] = d_uu_x[grid_idx - BOUND_SIZE - BOUND_SIZE*d_GRID_Y_OFFSET]; s_uu_y [sid_row-BOUND_SIZE][sid_col-BOUND_SIZE] = d_uu_y[grid_idx - BOUND_SIZE - BOUND_SIZE*d_GRID_Y_OFFSET]; s_uu_z [sid_row-BOUND_SIZE][sid_col-BOUND_SIZE] = d_uu_z[grid_idx - BOUND_SIZE - BOUND_SIZE*d_GRID_Y_OFFSET]; //Left Down s_lnrho[sid_row+RK_THREADS_Y][sid_col-BOUND_SIZE] = d_lnrho[grid_idx - BOUND_SIZE + RK_THREADS_Y*d_GRID_Y_OFFSET]; s_uu_x [sid_row+RK_THREADS_Y][sid_col-BOUND_SIZE] = d_uu_x[grid_idx - BOUND_SIZE + RK_THREADS_Y*d_GRID_Y_OFFSET]; s_uu_y [sid_row+RK_THREADS_Y][sid_col-BOUND_SIZE] = d_uu_y[grid_idx - BOUND_SIZE + RK_THREADS_Y*d_GRID_Y_OFFSET]; s_uu_z [sid_row+RK_THREADS_Y][sid_col-BOUND_SIZE] = d_uu_z[grid_idx - BOUND_SIZE + RK_THREADS_Y*d_GRID_Y_OFFSET]; //Right Up s_lnrho[sid_row-BOUND_SIZE][sid_col+RK_THREADS_X] = d_lnrho[grid_idx + RK_THREADS_X - BOUND_SIZE*d_GRID_Y_OFFSET]; s_uu_x [sid_row-BOUND_SIZE][sid_col+RK_THREADS_X] = d_uu_x[grid_idx + RK_THREADS_X - BOUND_SIZE*d_GRID_Y_OFFSET]; s_uu_y [sid_row-BOUND_SIZE][sid_col+RK_THREADS_X] = d_uu_y[grid_idx + RK_THREADS_X - BOUND_SIZE*d_GRID_Y_OFFSET]; s_uu_z [sid_row-BOUND_SIZE][sid_col+RK_THREADS_X] = d_uu_z[grid_idx + RK_THREADS_X - BOUND_SIZE*d_GRID_Y_OFFSET]; //Right Down s_lnrho[sid_row+RK_THREADS_Y][sid_col + RK_THREADS_X] = d_lnrho[grid_idx + RK_THREADS_X + RK_THREADS_Y*d_GRID_Y_OFFSET]; s_uu_x [sid_row+RK_THREADS_Y][sid_col + RK_THREADS_X] = d_uu_x[grid_idx + RK_THREADS_X + RK_THREADS_Y*d_GRID_Y_OFFSET]; s_uu_y [sid_row+RK_THREADS_Y][sid_col + RK_THREADS_X] = d_uu_y[grid_idx + RK_THREADS_X + RK_THREADS_Y*d_GRID_Y_OFFSET]; s_uu_z [sid_row+RK_THREADS_Y][sid_col + RK_THREADS_X] = d_uu_z[grid_idx + RK_THREADS_X + RK_THREADS_Y*d_GRID_Y_OFFSET]; } __syncthreads(); //%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% nebla_nebla_div(sid_row, sid_col, s_uu_x, s_uu_y, s_uu_z, div_z_partial_ux, div_z_partial_uy, div_z_partial_uz, zplane); if(zplane >= 0 && zplane < RK_ELEMS_PER_THREAD_FIRST){ const float d2x_uu_x = der2_scalx(sid_row, sid_col, s_uu_x); const float d2xy_uu_y = der2_scalxy(sid_row, sid_col, s_uu_y); const float d2xy_uu_x = der2_scalxy(sid_row, sid_col, s_uu_x); const float d2y_uu_y = der2_scaly(sid_row, sid_col, s_uu_y); const float d2z_uu_z = der2_scalz(sid_row, sid_col, s_uu_z, behind3_uu_z, behind2_uu_z, behind1_uu_z, infront1_uu_z, infront2_uu_z, infront3_uu_z); //%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% //Solve derivatives const float ddz_lnrho = der_scalz( behind3_lnrho, behind2_lnrho, behind1_lnrho, infront1_lnrho, infront2_lnrho, infront3_lnrho ); const float ddz_uu_x = der_scalz( behind3_uu_x, behind2_uu_x, behind1_uu_x, infront1_uu_x, infront2_uu_x, infront3_uu_x ); const float ddz_uu_y = der_scalz( behind3_uu_y, behind2_uu_y, behind1_uu_y, infront1_uu_y, infront2_uu_y, infront3_uu_y ); const float ddz_uu_z = der_scalz( behind3_uu_z, behind2_uu_z, behind1_uu_z, infront1_uu_z, infront2_uu_z, infront3_uu_z ); const float ddx_lnrho = der_scalx(sid_row, sid_col, s_lnrho); const float ddx_uu_x = der_scalx(sid_row, sid_col, s_uu_x); const float ddx_uu_y = der_scalx(sid_row, sid_col, s_uu_y); const float ddx_uu_z = der_scalx(sid_row, sid_col, s_uu_z); const float ddy_lnrho = der_scaly(sid_row, sid_col, s_lnrho); const float ddy_uu_x = der_scaly(sid_row, sid_col, s_uu_x); const float ddy_uu_y = der_scaly(sid_row, sid_col, s_uu_y); const float ddy_uu_z = der_scaly(sid_row, sid_col, s_uu_z); //Save the divergence field of uu to global memory //d_div_uu[grid_idx] = ddx_uu_x + ddy_uu_y + ddz_uu_z; // Omer: nebla.u_i Eq(.1) //Continuity const float cont_res = - (s_uu_x[sid_row][sid_col] * ddx_lnrho + s_uu_y[sid_row][sid_col] * ddy_lnrho + s_uu_z[sid_row][sid_col] * ddz_lnrho) - (ddx_uu_x + ddy_uu_y + ddz_uu_z); // Omer: -(u.nebla)rho - nebla.u Eq(.2) //ILP: compute nu_const_uu and S_grad_lnrho before using cont_res //Omer: Eq(.6) const float nu_const_uu_x = der2_scalx(sid_row, sid_col, s_uu_x) + der2_scaly(sid_row, sid_col, s_uu_x) + der2_scalz(sid_row, sid_col, s_uu_x, behind3_uu_x, behind2_uu_x, behind1_uu_x, infront1_uu_x, infront2_uu_x, infront3_uu_x); const float nu_const_uu_y = der2_scalx(sid_row, sid_col, s_uu_y) + der2_scaly(sid_row, sid_col, s_uu_y) + der2_scalz(sid_row, sid_col, s_uu_y, behind3_uu_y, behind2_uu_y, behind1_uu_y, infront1_uu_y, infront2_uu_y, infront3_uu_y); const float nu_const_uu_z = der2_scalx(sid_row, sid_col, s_uu_z) + der2_scaly(sid_row, sid_col, s_uu_z) + der2_scalz(sid_row, sid_col, s_uu_z, behind3_uu_z, behind2_uu_z, behind1_uu_z, infront1_uu_z, infront2_uu_z, infront3_uu_z); //S_grad_lnrho //Eq(.9) const float Sxx = (2.0f/3.0f)*ddx_uu_x - (1.0f/3.0f)*(ddy_uu_y + ddz_uu_z); const float Sxy = 0.5f*(ddy_uu_x + ddx_uu_y); const float Sxz = 0.5f*(ddz_uu_x + ddx_uu_z); const float Syy = (2.0f/3.0f)*ddy_uu_y - (1.0f/3.0f)*(ddx_uu_x + ddz_uu_z); const float Syz = 0.5f*(ddz_uu_y + ddy_uu_z); const float Szz = (2.0f/3.0f)*ddz_uu_z - (1.0f/3.0f)*(ddx_uu_x + ddy_uu_y); //Use cont_res to compute w_lnrho w_lnrho = ALPHA*w_lnrho + d_DT*cont_res; //Omer: Second line Algo. 3 updating rho //Navier-Stokes //if ( blockIdx.x == 0 && blockIdx.y == 0 && threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0){ // printf("%d---writing mom_[%d] \n", zplane , zplane%4); //} mom_x[(threadIdx.y*RK_THREADS_X)+threadIdx.x][zplane%4] = - (s_uu_x[sid_row][sid_col] * ddx_uu_x + //vec_dot_nabla_scal s_uu_y[sid_row][sid_col] * ddy_uu_x + s_uu_z[sid_row][sid_col] * ddz_uu_x) - d_CS2_SOUND*ddx_lnrho //ddx part of grad lnrho + d_NU_VISC * nu_const_uu_x //nu_const + 2.0f*d_NU_VISC*(Sxx*ddx_lnrho + Sxy*ddy_lnrho + Sxz*ddz_lnrho)+d_NU_VISC*(1.0f/3.0f)*(d2x_uu_x + d2xy_uu_y); //S_grad_lnrho mom_y[(threadIdx.y*RK_THREADS_X)+threadIdx.x][zplane%4] = - (s_uu_x[sid_row][sid_col] * ddx_uu_y + //vec_dot_nabla_scal s_uu_y[sid_row][sid_col] * ddy_uu_y + s_uu_z[sid_row][sid_col] * ddz_uu_y) - d_CS2_SOUND*ddy_lnrho //ddy part of grad lnrho + d_NU_VISC * nu_const_uu_y //nu_const + 2.0f*d_NU_VISC*(Sxy*ddx_lnrho + Syy*ddy_lnrho + Syz*ddz_lnrho)+d_NU_VISC*(1.0f/3.0f)*(d2xy_uu_x + d2y_uu_y); //S_grad_lnrho mom_z[(threadIdx.y*RK_THREADS_X)+threadIdx.x][zplane%4] = - (s_uu_x[sid_row][sid_col] * ddx_uu_z + //vec_dot_nabla_scal s_uu_y[sid_row][sid_col] * ddy_uu_z + s_uu_z[sid_row][sid_col] * ddz_uu_z) - d_CS2_SOUND*ddz_lnrho //ddz part of grad lnrho + d_NU_VISC * nu_const_uu_z //nu_const + 2.0f*d_NU_VISC*(Sxz*ddx_lnrho + Syz*ddy_lnrho + Szz*ddz_lnrho)+d_NU_VISC*(1.0f/3.0f)*d2z_uu_z; //S_grad_lnrho d_lnrho_dest[grid_idx] = s_lnrho[sid_row][sid_col] + BETA*w_lnrho; d_w_lnrho[w_grid_idx] = w_lnrho; } //use the output which is mature now if(zplane - 3 >= 0 && zplane - 3 < RK_ELEMS_PER_THREAD_FIRST) { const float div_uux = d_NU_VISC*(1.0f/3.0f)*(div_z_partial_ux[0]); const float div_uuy = d_NU_VISC*(1.0f/3.0f)*(div_z_partial_uy[0]); const float div_uuz = d_NU_VISC*(1.0f/3.0f)*(div_z_partial_uz[0]); w_uu_x = ALPHA*w_uu_x + d_DT*(mom_x[(threadIdx.y*RK_THREADS_X)+threadIdx.x][(4+zplane+1)%4] + div_uux); w_uu_y = ALPHA*w_uu_y + d_DT*(mom_y[(threadIdx.y*RK_THREADS_X)+threadIdx.x][(4+zplane+1)%4] + div_uuy); w_uu_z = ALPHA*w_uu_z + d_DT*(mom_z[(threadIdx.y*RK_THREADS_X)+threadIdx.x][(4+zplane+1)%4] + div_uuz); d_uu_x_dest [grid_idx-3*d_GRID_Z_OFFSET] = behind3_uu_x + BETA*w_uu_x; d_uu_y_dest [grid_idx-3*d_GRID_Z_OFFSET] = behind3_uu_y + BETA*w_uu_y; d_uu_z_dest [grid_idx-3*d_GRID_Z_OFFSET] = behind3_uu_z + BETA*w_uu_z; d_w_uu_x [w_grid_idx-3*d_W_GRID_Z_OFFSET] = w_uu_x; d_w_uu_y [w_grid_idx-3*d_W_GRID_Z_OFFSET] = w_uu_y; d_w_uu_z [w_grid_idx-3*d_W_GRID_Z_OFFSET] = w_uu_z; } // Shift div_z_partial_ux[0] = div_z_partial_ux[1]; div_z_partial_ux[1] = div_z_partial_ux[2]; div_z_partial_ux[2] = div_z_partial_ux[3]; div_z_partial_ux[3] = div_z_partial_ux[4]; div_z_partial_ux[4] = div_z_partial_ux[5]; div_z_partial_ux[5] = div_z_partial_ux[6]; div_z_partial_ux[6] = NAN; div_z_partial_uy[0] = div_z_partial_uy[1]; div_z_partial_uy[1] = div_z_partial_uy[2]; div_z_partial_uy[2] = div_z_partial_uy[3]; div_z_partial_uy[3] = div_z_partial_uy[4]; div_z_partial_uy[4] = div_z_partial_uy[5]; div_z_partial_uy[5] = div_z_partial_uy[6]; div_z_partial_uy[6] = NAN; div_z_partial_uz[0] = div_z_partial_uz[1]; div_z_partial_uz[1] = div_z_partial_uz[2]; div_z_partial_uz[2] = div_z_partial_uz[3]; div_z_partial_uz[3] = div_z_partial_uz[4]; div_z_partial_uz[4] = div_z_partial_uz[5]; div_z_partial_uz[5] = div_z_partial_uz[6]; div_z_partial_uz[6] = NAN; //else continue grid_idx += d_GRID_Z_OFFSET; if (zplane >= 0) w_grid_idx += d_W_GRID_Z_OFFSET; //Reuse data in registers and update infront3 behind3_lnrho = behind2_lnrho; behind2_lnrho = behind1_lnrho; behind1_lnrho = s_lnrho[sid_row][sid_col]; current_lnrho = infront1_lnrho; infront1_lnrho = infront2_lnrho; infront2_lnrho = infront3_lnrho; behind3_uu_x = behind2_uu_x; behind2_uu_x = behind1_uu_x; behind1_uu_x = s_uu_x[sid_row][sid_col]; current_uu_x = infront1_uu_x; infront1_uu_x = infront2_uu_x; infront2_uu_x = infront3_uu_x; behind3_uu_y = behind2_uu_y; behind2_uu_y = behind1_uu_y; behind1_uu_y = s_uu_y[sid_row][sid_col]; current_uu_y = infront1_uu_y; infront1_uu_y = infront2_uu_y; infront2_uu_y = infront3_uu_y; behind3_uu_z = behind2_uu_z; behind2_uu_z = behind1_uu_z; behind1_uu_z = s_uu_z[sid_row][sid_col]; current_uu_z = infront1_uu_z; infront1_uu_z = infront2_uu_z; infront2_uu_z = infront3_uu_z; if(zplane < RK_ELEMS_PER_THREAD_FIRST-1){ infront3_lnrho = d_lnrho[grid_idx + 3*d_GRID_Z_OFFSET]; infront3_uu_x = d_uu_x[grid_idx + 3*d_GRID_Z_OFFSET]; infront3_uu_y = d_uu_y[grid_idx + 3*d_GRID_Z_OFFSET]; infront3_uu_z = d_uu_z[grid_idx + 3*d_GRID_Z_OFFSET]; } else{ infront3_lnrho = NAN; infront3_uu_x = NAN; infront3_uu_y = NAN; infront3_uu_z = NAN; } __syncthreads(); }// loop ends } //---------------------------------------------------------- // Manages the calculation on 2N-Runge-Kutta for a single timestep //---------------------------------------------------------- void rungekutta2N_cuda( float* d_lnrho, float* d_uu_x, float* d_uu_y, float* d_uu_z, float* d_w_lnrho, float* d_w_uu_x, float* d_w_uu_y, float* d_w_uu_z, float* d_lnrho_dest, float* d_uu_x_dest, float* d_uu_y_dest, float* d_uu_z_dest, int isubstep) { //Determine threadblock dims (TODO better solution, define?) static dim3 threadsPerBlock, blocksPerGridFirst, blocksPerGridSecond; threadsPerBlock.x = RK_THREADS_X; //RK_THREADS_X = 32 threadsPerBlock.y = RK_THREADS_Y; //RK_THREADS_Y = 4 threadsPerBlock.z = RK_THREADS_Z; //RK_THREADS_Z = 1 assert(RK_THREADS_Z == 1); blocksPerGridFirst.x = ceil((float) COMP_DOMAIN_SIZE_X / (float)threadsPerBlock.x); //128 / 32 = 4 blocksPerGridFirst.y = ceil((float) COMP_DOMAIN_SIZE_Y / (float)threadsPerBlock.y); //128 / 4 = 32 blocksPerGridFirst.z = ceil((float) COMP_DOMAIN_SIZE_Z / (float)(threadsPerBlock.z*RK_ELEMS_PER_THREAD_FIRST)); //128 / (1*8) = 16 blocksPerGridSecond.x = ceil((float) COMP_DOMAIN_SIZE_X / (float)threadsPerBlock.x); blocksPerGridSecond.y = ceil((float) COMP_DOMAIN_SIZE_Y / (float)threadsPerBlock.y); blocksPerGridSecond.z = ceil((float) COMP_DOMAIN_SIZE_Z / (float)(threadsPerBlock.z*RK_ELEMS_PER_THREAD_SECOND)); //Calculate steps in kernels // Step 1: //------------------------------------------------------------------------------------------------------------------------------- //FIRST HALF rungekutta_step_first_half<0><<<blocksPerGridFirst, threadsPerBlock>>>(d_lnrho, d_uu_x, d_uu_y, d_uu_z, d_w_lnrho, d_w_uu_x, d_w_uu_y, d_w_uu_z, d_lnrho_dest, d_uu_x_dest, d_uu_y_dest, d_uu_z_dest, isubstep); /* //cudaDeviceSynchronize(); //checkKernelErr(); //SECOND HALF ////periodic_boundcond_scal_cuda(d_div_uu); //Boundary conditions for the divergence field //cudaDeviceSynchronize(); //checkKernelErr(); ////rungekutta_step_second_half<0><<<blocksPerGridSecond, threadsPerBlock>>>(d_div_uu, //// d_uu_x_dest, d_uu_y_dest, d_uu_z_dest, //// d_w_uu_x, d_w_uu_y, d_w_uu_z); //cudaDeviceSynchronize(); //checkKernelErr(); boundcond_cuda(d_lnrho_dest, d_uu_x_dest, d_uu_y_dest, d_uu_z_dest); //cudaDeviceSynchronize(); //checkKernelErr(); //------------------------------------------------------------------------------------------------------------------------------- // Step 2: //------------------------------------------------------------------------------------------------------------------------------- //FIRST HALF rungekutta_step_first_half<1><<<blocksPerGridFirst, threadsPerBlock>>>(d_lnrho_dest, d_uu_x_dest, d_uu_y_dest, d_uu_z_dest, d_w_lnrho, d_w_uu_x, d_w_uu_y, d_w_uu_z, d_lnrho, d_uu_x, d_uu_y, d_uu_z, d_div_uu); //cudaDeviceSynchronize(); //checkKernelErr(); //SECOND HALF ////periodic_boundcond_scal_cuda(d_div_uu); //Boundary conditions for the divergence field //cudaDeviceSynchronize(); //checkKernelErr(); ////rungekutta_step_second_half<1><<<blocksPerGridSecond, threadsPerBlock>>>(d_div_uu, //// d_uu_x, d_uu_y, d_uu_z, //// d_w_uu_x, d_w_uu_y, d_w_uu_z); //cudaDeviceSynchronize(); //checkKernelErr(); boundcond_cuda(d_lnrho, d_uu_x, d_uu_y, d_uu_z); //cudaDeviceSynchronize(); //checkKernelErr(); //------------------------------------------------------------------------------------------------------------------------------- //TIME START cudaEvent_t start, stop; float time; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord( start, 0 ); // Step 3: //------------------------------------------------------------------------------------------------------------------------------- //FIRST HALF rungekutta_step_first_half<2><<<blocksPerGridFirst, threadsPerBlock>>>(d_lnrho, d_uu_x, d_uu_y, d_uu_z, d_w_lnrho, d_w_uu_x, d_w_uu_y, d_w_uu_z, d_lnrho_dest, d_uu_x_dest, d_uu_y_dest, d_uu_z_dest, d_div_uu); //cudaDeviceSynchronize(); //checkKernelErr(); //SECOND HALF ////periodic_boundcond_scal_cuda(d_div_uu);; //Boundary conditions for the divergence field //cudaDeviceSynchronize(); //checkKernelErr(); ////rungekutta_step_second_half<2><<<blocksPerGridSecond, threadsPerBlock>>>(d_div_uu, //// d_uu_x_dest, d_uu_y_dest, d_uu_z_dest, //// d_w_uu_x, d_w_uu_y, d_w_uu_z); //cudaDeviceSynchronize(); //checkKernelErr(); boundcond_cuda(d_lnrho_dest, d_uu_x_dest, d_uu_y_dest, d_uu_z_dest); //cudaDeviceSynchronize(); //checkKernelErr(); //------------------------------------------------------------------------------------------------------------------------------- //TIME END cudaEventRecord( stop, 0 ); cudaEventSynchronize( stop ); cudaEventElapsedTime( &time, start, stop ); cudaEventDestroy( start ); cudaEventDestroy( stop ); printf("A Single rungekutta step time elapsed: \t%f ms\n", time); */ cudaDeviceSynchronize(); }
97dda833a70a14207338f9a9cf0ca58edbf900a6.hip
// !!! This is a file automatically generated by hipify!!! #include "hetu_gpu_table.h" #include "common/helper_cuda.h" using namespace hetuCTR; unsigned long hetuCTR::__seed = 0; void HetuTable::pushPull(embed_t *grad, embed_t *dst) { checkCudaErrors(hipSetDevice(device_id_)); // If no grad is provided, than this batch is considered as inference batch. // Set shapes in previous batch to 0, so that no kernel will be launched and no data will be sent and received if (grad == nullptr) { prev_batch_.batch_size = 0; prev_batch_.unique_size = 0; for (int i = 0; i <= nrank_; i++) { prev_batch_.h_shape[i] = 0; prev_batch_.h_shape_exchanged[i] = 0; } } generateGradient(grad); generateQuery(); all2allExchangeQuery(); handleGradient(); handleQuery(); checkCudaErrors(hipStreamSynchronize(stream_main_)); all2allReturnValue(); writeBack(dst); checkCudaErrors(hipStreamSynchronize(stream_main_)); return; } void HetuTable::preprocess(index_t *data_ptr, size_t batch_size) { checkCudaErrors(hipSetDevice(device_id_)); std::swap(cur_batch_, prev_batch_); if (batch_size > batch_size_reserved_) { allocateAuxillaryMemory(batch_size); } if (batch_size > cur_batch_.allocate_size) { INFO("ReAllocate cuda memory for batch ", cur_batch_.batch_size, "->" , batch_size); freePreprocessData(cur_batch_); createPreprocessData(cur_batch_, batch_size, nrank_); } cur_batch_.batch_size = batch_size; // sync data with this pointer on device checkCudaErrors(hipMemcpyAsync( d_this, this, sizeof(HetuTable), hipMemcpyHostToDevice, stream_main_)); preprocessIndex(data_ptr, batch_size); preprocessGradient(); checkCudaErrors(hipStreamSynchronize(stream_main_)); }
97dda833a70a14207338f9a9cf0ca58edbf900a6.cu
#include "hetu_gpu_table.h" #include "common/helper_cuda.h" using namespace hetuCTR; unsigned long hetuCTR::__seed = 0; void HetuTable::pushPull(embed_t *grad, embed_t *dst) { checkCudaErrors(cudaSetDevice(device_id_)); // If no grad is provided, than this batch is considered as inference batch. // Set shapes in previous batch to 0, so that no kernel will be launched and no data will be sent and received if (grad == nullptr) { prev_batch_.batch_size = 0; prev_batch_.unique_size = 0; for (int i = 0; i <= nrank_; i++) { prev_batch_.h_shape[i] = 0; prev_batch_.h_shape_exchanged[i] = 0; } } generateGradient(grad); generateQuery(); all2allExchangeQuery(); handleGradient(); handleQuery(); checkCudaErrors(cudaStreamSynchronize(stream_main_)); all2allReturnValue(); writeBack(dst); checkCudaErrors(cudaStreamSynchronize(stream_main_)); return; } void HetuTable::preprocess(index_t *data_ptr, size_t batch_size) { checkCudaErrors(cudaSetDevice(device_id_)); std::swap(cur_batch_, prev_batch_); if (batch_size > batch_size_reserved_) { allocateAuxillaryMemory(batch_size); } if (batch_size > cur_batch_.allocate_size) { INFO("ReAllocate cuda memory for batch ", cur_batch_.batch_size, "->" , batch_size); freePreprocessData(cur_batch_); createPreprocessData(cur_batch_, batch_size, nrank_); } cur_batch_.batch_size = batch_size; // sync data with this pointer on device checkCudaErrors(cudaMemcpyAsync( d_this, this, sizeof(HetuTable), cudaMemcpyHostToDevice, stream_main_)); preprocessIndex(data_ptr, batch_size); preprocessGradient(); checkCudaErrors(cudaStreamSynchronize(stream_main_)); }
765937d4b217e76958eb0359ea9cf9276e7789e3.hip
// !!! This is a file automatically generated by hipify!!! // // Copyright (c) 2010-2015, Raymond Tay, Singapore // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of the <organization> nor the // names of its contributors may be used to endorse or promote products // derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY // DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // #include <thrust/host_vector.h> #if __CUDA_ARCH__ == 200 #include <thrust/device_vector.h> #elif __CUDA_ARCH__ == 100 #include <thrust/device_ptr.h> #endif #include <thrust/generate.h> #include <thrust/reduce.h> #include <thrust/transform.h> #include <thrust/functional.h> #include <cstdlib> #include <cmath> #include <iostream> struct powfunctor { __host__ __device__ double operator()(const float& p, const float& q) const { return pow( (float)(p - q), 2); } //double operator()(const float& p, const float& q) const { // return p+q; //} }; // gold solution to compute x + y template <typename InputIterator1, typename InputIterator2> double computeGold_2(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, InputIterator2 last2) { double sum = 0.0; for(; (first1 != last1) && (first2 != last2); ++first1, ++first2) { sum += *first1 + *first2; } std::cout << "Gold=" << sum << std::endl; return sum; } template <typename InputIterator1, typename InputIterator2> double computeGold_1(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, InputIterator2 last2) { float sum = 0.0; for(; (first1 != last1) && (first2 != last2); ++first1, ++first2) { sum += pow((float)(*first1 - *first2), 2); } float s1 = sqrt(sum); std::cout << "Gold=" << s1 << std::endl; return s1; } int main(void) { #if __CUDA_ARCH__ == 200 thrust::device_vector<float> p_vec(1 << 20); thrust::device_vector<float> q_vec(1 << 20); thrust::device_vector<float> r_vec(1 << 20); thrust::generate(p_vec.begin(), p_vec.end(), rand); thrust::generate(q_vec.begin(), q_vec.end(), rand); // Current Thrust's transformations supports 2 input vectors, so we use it thrust::transform(p_vec.begin(), p_vec.end(), q_vec.begin(), r_vec.begin(), powfunctor()); float sum = thrust::reduce(r_vec.begin(), r_vec.end(), (int)0, thrust::plus<float>()); std::cout << "sqrt(" << sum << ")=" << sqrt(sum) << std::endl; // #elif __CUDA_ARCH__ == 100 #else unsigned int N = 1 << 20; thrust::host_vector<float> p_vec(N); thrust::host_vector<float> q_vec(N); thrust::host_vector<float> r_vec(N); srand(0); thrust::generate(p_vec.begin(), p_vec.end(), rand); thrust::generate(q_vec.begin(), q_vec.end(), rand); float referenceSoln = computeGold_1(p_vec.begin(), p_vec.end(), q_vec.begin(), q_vec.end()); // device memory 'raw' pointers float* raw_ptr_P; float* raw_ptr_Q; float* raw_ptr_R; hipMalloc( (void**)&raw_ptr_P, (N)*sizeof(float)); hipMalloc( (void**)&raw_ptr_Q, (N)*sizeof(float)); hipMalloc( (void**)&raw_ptr_R, (N)*sizeof(float)); thrust::device_ptr<float> dev_ptr_P(raw_ptr_P); thrust::device_ptr<float> dev_ptr_Q(raw_ptr_Q); thrust::device_ptr<float> dev_ptr_R(raw_ptr_R); thrust::copy(p_vec.begin(), p_vec.end(), dev_ptr_P); thrust::copy(q_vec.begin(), q_vec.end(), dev_ptr_Q); // uncommenting the following will produce errors for 1.x devices // complaining that CUDA doesn't support function pointers and function // templates. reason is because a host function like 'rand' cannot be // executed in the device i.e. GPU //thrust::generate(dev_ptr_P, dev_ptr_Q + N, rand); //thrust::generate(dev_ptr_Q, dev_ptr_Q + N, rand); thrust::transform(dev_ptr_P, dev_ptr_P + N, dev_ptr_Q, dev_ptr_R, powfunctor()); float sum = thrust::reduce(dev_ptr_R, dev_ptr_R + N, (float)0, thrust::plus<float>()); std::cout << "1. GPU " << sqrt(sum) << std::endl; std::cout << "2. CPU " << referenceSoln << std::endl; #endif std::cout << "END" << std::endl; return 0; }
765937d4b217e76958eb0359ea9cf9276e7789e3.cu
// // Copyright (c) 2010-2015, Raymond Tay, Singapore // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of the <organization> nor the // names of its contributors may be used to endorse or promote products // derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY // DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // #include <thrust/host_vector.h> #if __CUDA_ARCH__ == 200 #include <thrust/device_vector.h> #elif __CUDA_ARCH__ == 100 #include <thrust/device_ptr.h> #endif #include <thrust/generate.h> #include <thrust/reduce.h> #include <thrust/transform.h> #include <thrust/functional.h> #include <cstdlib> #include <cmath> #include <iostream> struct powfunctor { __host__ __device__ double operator()(const float& p, const float& q) const { return pow( (float)(p - q), 2); } //double operator()(const float& p, const float& q) const { // return p+q; //} }; // gold solution to compute x + y template <typename InputIterator1, typename InputIterator2> double computeGold_2(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, InputIterator2 last2) { double sum = 0.0; for(; (first1 != last1) && (first2 != last2); ++first1, ++first2) { sum += *first1 + *first2; } std::cout << "Gold=" << sum << std::endl; return sum; } template <typename InputIterator1, typename InputIterator2> double computeGold_1(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, InputIterator2 last2) { float sum = 0.0; for(; (first1 != last1) && (first2 != last2); ++first1, ++first2) { sum += pow((float)(*first1 - *first2), 2); } float s1 = sqrt(sum); std::cout << "Gold=" << s1 << std::endl; return s1; } int main(void) { #if __CUDA_ARCH__ == 200 thrust::device_vector<float> p_vec(1 << 20); thrust::device_vector<float> q_vec(1 << 20); thrust::device_vector<float> r_vec(1 << 20); thrust::generate(p_vec.begin(), p_vec.end(), rand); thrust::generate(q_vec.begin(), q_vec.end(), rand); // Current Thrust's transformations supports 2 input vectors, so we use it thrust::transform(p_vec.begin(), p_vec.end(), q_vec.begin(), r_vec.begin(), powfunctor()); float sum = thrust::reduce(r_vec.begin(), r_vec.end(), (int)0, thrust::plus<float>()); std::cout << "sqrt(" << sum << ")=" << sqrt(sum) << std::endl; // #elif __CUDA_ARCH__ == 100 #else unsigned int N = 1 << 20; thrust::host_vector<float> p_vec(N); thrust::host_vector<float> q_vec(N); thrust::host_vector<float> r_vec(N); srand(0); thrust::generate(p_vec.begin(), p_vec.end(), rand); thrust::generate(q_vec.begin(), q_vec.end(), rand); float referenceSoln = computeGold_1(p_vec.begin(), p_vec.end(), q_vec.begin(), q_vec.end()); // device memory 'raw' pointers float* raw_ptr_P; float* raw_ptr_Q; float* raw_ptr_R; cudaMalloc( (void**)&raw_ptr_P, (N)*sizeof(float)); cudaMalloc( (void**)&raw_ptr_Q, (N)*sizeof(float)); cudaMalloc( (void**)&raw_ptr_R, (N)*sizeof(float)); thrust::device_ptr<float> dev_ptr_P(raw_ptr_P); thrust::device_ptr<float> dev_ptr_Q(raw_ptr_Q); thrust::device_ptr<float> dev_ptr_R(raw_ptr_R); thrust::copy(p_vec.begin(), p_vec.end(), dev_ptr_P); thrust::copy(q_vec.begin(), q_vec.end(), dev_ptr_Q); // uncommenting the following will produce errors for 1.x devices // complaining that CUDA doesn't support function pointers and function // templates. reason is because a host function like 'rand' cannot be // executed in the device i.e. GPU //thrust::generate(dev_ptr_P, dev_ptr_Q + N, rand); //thrust::generate(dev_ptr_Q, dev_ptr_Q + N, rand); thrust::transform(dev_ptr_P, dev_ptr_P + N, dev_ptr_Q, dev_ptr_R, powfunctor()); float sum = thrust::reduce(dev_ptr_R, dev_ptr_R + N, (float)0, thrust::plus<float>()); std::cout << "1. GPU " << sqrt(sum) << std::endl; std::cout << "2. CPU " << referenceSoln << std::endl; #endif std::cout << "END" << std::endl; return 0; }
16439f3341324da040a072d4a84679c2913b364a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <hipfft.h> #include "helper_cuda.hpp" #include "cu_image.hpp" #include <opencv2/highgui.hpp> #include <opencv2/imgproc.hpp> #include <iostream> // The images contained are padded. // fullSquareSize is with overlap, but without padding. // Actual image sizes are pitch by pitch, which includes half zeros along each axis struct ImagePatches { float *buf; int fullSquareSize, overlap, stepSize; int n, nw, nh, pitch; void release(); }; void ImagePatches::release() { hipFree(buf); } static void show_patches(const ImagePatches& ps, const char* name, int wait=0) { //int w = ps.nw * ps.fullSquareSize; //int h = ps.nh * ps.fullSquareSize; //cv::Mat dimg(h,w, CV_32F); int s = ps.fullSquareSize; int p = ps.pitch; for (int yy=0; yy<ps.nh; yy++) for (int xx=0; xx<ps.nw; xx++) { //hipMemcpy(dimg(cv::Rect{xx*s,yy*s,s,s}).data, ps.buf+(yy*ps.nw+xx)*s*s, sizeof(float)*s*s, hipMemcpyDeviceToHost); cv::Mat dimg(s,s,CV_32F); //hipMemcpy(dimg.data, ps.buf+(yy*ps.nw+xx)*p*p, sizeof(float)*s*s, hipMemcpyDeviceToHost); hipMemcpy2D(dimg.data, s*sizeof(float), ps.buf+(yy*ps.nw+xx)*p*p, p*sizeof(float), sizeof(float)*s,s, hipMemcpyDeviceToHost); hipDeviceSynchronize(); double min, max; cv::minMaxLoc(dimg,&min,&max); //std::cout << " - min max " << min << " " << max << "\n"; cv::normalize(dimg,dimg, 0, 255, cv::NORM_MINMAX, CV_8UC1); cv::imshow(name, dimg); cv::waitKey(wait); } //double min, max; cv::minMaxLoc(dimg,&min,&max); //std::cout << " - min max " << min << " " << max << "\n"; //cv::normalize(dimg,dimg, 0, 255, cv::NORM_MINMAX, CV_8UC1); //cv::resize(dimg, dimg, cv::Size{1000, 1000*dimg.rows/dimg.cols}); //cv::imshow(name, dimg); cv::waitKey(wait); } static void show_corr(const ImagePatches& pa, const ImagePatches& pb, const float *corr, const char* name, int wait=0) { hipDeviceSynchronize(); int s = pa.fullSquareSize; int p = pa.pitch; cv::Mat dimg(s,3*s,CV_32F); cv::Mat dimgs[5]; dimgs[0] = cv::Mat(p,p,CV_8UC1); dimgs[1] = cv::Mat(p,p,CV_8UC1); dimgs[2] = cv::Mat(p,p,CV_8UC1); dimgs[3] = cv::Mat(p,p,CV_32F); dimgs[4] = cv::Mat(p,p,CV_32F); for (int yy=0; yy<pa.nh; yy++) for (int xx=0; xx<pa.nw; xx++) { //hipMemcpy2D(dimg.data+0*s*sizeof(float), 3*s*sizeof(float), pa.buf+(yy*pa.nw+xx)*p*p, p*sizeof(float), sizeof(float)*s,s, hipMemcpyDeviceToHost); //hipMemcpy2D(dimg.data+1*s*sizeof(float), 3*s*sizeof(float), pb.buf+(yy*pb.nw+xx)*p*p, p*sizeof(float), sizeof(float)*s,s, hipMemcpyDeviceToHost); //hipMemcpy2D(dimg.data+2*s*sizeof(float), 3*s*sizeof(float), corr+(yy*pa.nw+xx)*p*p, p*sizeof(float), sizeof(float)*s,s, hipMemcpyDeviceToHost); hipMemcpy2D(dimgs[3].data, 1*p*sizeof(float), pa.buf+(yy*pa.nw+xx)*p*p, p*sizeof(float), sizeof(float)*p,p, hipMemcpyDeviceToHost); cv::normalize(dimgs[3],dimgs[1], 0, 255, cv::NORM_MINMAX, CV_8UC1); hipMemcpy2D(dimgs[3].data, 1*p*sizeof(float), pb.buf+(yy*pb.nw+xx)*p*p, p*sizeof(float), sizeof(float)*p,p, hipMemcpyDeviceToHost); cv::normalize(dimgs[3],dimgs[2], 0, 255, cv::NORM_MINMAX, CV_8UC1); hipMemcpy2D(dimgs[3].data, 1*p*sizeof(float), corr+(yy*pa.nw+xx)*p*p, p*sizeof(float), sizeof(float)*p,p, hipMemcpyDeviceToHost); // FFT Shift //dimgs[3](cv::Rect{0,0,p/2,p/2}).copyTo(dimgs[4](cv::Rect{p/2,p/2,p/2,p/2})); //dimgs[3](cv::Rect{p/2,0,p/2,p/2}).copyTo(dimgs[4](cv::Rect{0,p/2,p/2,p/2})); //dimgs[3](cv::Rect{0,p/2,p/2,p/2}).copyTo(dimgs[4](cv::Rect{p/2,0,p/2,p/2})); //dimgs[3](cv::Rect{p/2,p/2,p/2,p/2}).copyTo(dimgs[4](cv::Rect{0,0,p/2,p/2})); dimgs[4] = dimgs[3].clone(); cv::Point mini,maxi; double min, max; cv::minMaxLoc(dimgs[4],&min,&max,&mini,&maxi); //std::cout << " - corr min max " << min << " " << max << " | " << maxi << "\n"; cv::normalize(dimgs[4],dimgs[0], 0, 255, cv::NORM_MINMAX, CV_8UC1); cv::hconcat(dimgs, 3, dimg); cv::cvtColor(dimg,dimg, cv::COLOR_GRAY2BGR); cv::circle(dimg, maxi, 5, cv::Scalar{0,255,0}, 1); cv::imshow(name, dimg); cv::waitKey(wait); } } __global__ void slice_and_pad_(float* outs, int squaresWide, int squareSize, int overlapSize, int pitch, float* inImg, int w, int h, bool flip) { int qy = blockIdx.x; int qx = blockIdx.y; int fullSquareSize = squareSize + 2 * overlapSize; int rowOffset = threadIdx.x + blockIdx.x * (squareSize); int colOffset = blockIdx.y * squareSize; //int inRowOff = qy * squareSize / (fullSquareSize); //int inColOff = qx * squareSize / (fullSquareSize); // Apply linear fall-of at edges of big image, and Hamming window on each patch float edy = min(1.f, ( min(rowOffset, h - rowOffset)/32.f)); float ham_y = pow(sin(M_PI*threadIdx.x/fullSquareSize), 2.0); for (int c=0; c<fullSquareSize; c++) { float v = 0; if (colOffset+c < w and rowOffset < h) v = inImg[rowOffset*w+colOffset+c]; float window_v = 1; // Window the original image boundaries! float edx = min(1.f, ( min(colOffset+c, w - colOffset-c)/32.f)); v = v * edx * edy; // Window the patch boundaries! float ham_x = pow(sin(M_PI*c/fullSquareSize), 2.0); v = v * ham_x * ham_y; int yy = (pitch-fullSquareSize)/2+threadIdx.x; int xx = c + (pitch-fullSquareSize)/2; if (flip) { yy = pitch - 1 - yy; xx = pitch - 1 - xx; } outs[(qy*squaresWide+qx)*pitch*pitch + (yy)*pitch + xx] = v; } } ImagePatches slice_and_pad(CuImage<float>& inImg, bool flip) { int squareSize = 256, overlapSize = 128/2; int fullSquareSize = squareSize + 2 * overlapSize; //int inW = 1024, inH = 1024; int inW = inImg.w, inH = inImg.h; int squaresHigh = (inH + squareSize - 1) / squareSize; int squaresWide = (inW + squareSize - 1) / squareSize; // Strictly speaking, I should append W zeros along both axes. // However, with the hamming window it appears to be good enough //int pitch = (fullSquareSize * 1); int pitch = (fullSquareSize * 2); ImagePatches outs; outs.n = squaresWide * squaresHigh; outs.nw = squaresWide; outs.nh = squaresHigh; outs.fullSquareSize = fullSquareSize; outs.overlap = overlapSize; outs.pitch = pitch; outs.stepSize = squareSize; printf(" - Slicing img (%d %d) to get (%d * %d**2) patches.\n", inH,inW, outs.n, outs.fullSquareSize); hipMalloc(&outs.buf, pitch*pitch*squaresWide*squaresHigh*sizeof(float)); hipMemset(outs.buf, 0,pitch*pitch*squaresWide*squaresHigh*sizeof(float)); hipDeviceSynchronize(); dim3 blk(squaresHigh, squaresWide); dim3 thr(fullSquareSize); getLastCudaError("pre"); hipLaunchKernelGGL(( slice_and_pad_), dim3(blk),dim3(thr), 0, 0, outs.buf, squaresWide, squareSize, overlapSize, pitch, inImg.buf,inImg.w,inImg.h,flip); getLastCudaError("post slice"); return outs; } __device__ float2 cmplx_mult(const float2& u, const float2& v, const float& scale) { return make_float2( scale * (u.x*v.x - u.y*v.y), scale * (u.x*v.y + u.y*v.x) ); } #include <thrust/execution_policy.h> #include <thrust/transform.h> #include <thrust/gather.h> #include <thrust/iterator/discard_iterator.h> void multiply_spectra(float2* out, const float2* a, const float2* b, int l, int n) { float scale = 1.f / n; auto it = thrust::make_zip_iterator(thrust::make_tuple(a,b)); thrust::transform(thrust::device, it, it+l*n, out, [=]__device__(const auto& a) { return cmplx_mult(thrust::get<0>(a), thrust::get<1>(a), scale); }); } void fft_shift(float* out, float* corrs, int n, int w, int h) { auto it = thrust::make_transform_iterator(thrust::counting_iterator<int>(0), [=]__device__(const int& i) { int b = i / (w*h); int y = (i % (w*h)) / w; int x = (i % (w*h)) % w; y = (y - h / 2); x = (x - w / 2); if (y < 0) y += h; if (x < 0) x += w; return b*w*h + y*w + x; }); thrust::gather(thrust::device, it, it+n*w*h, corrs, out); } void fft_get_max_location(int2* outs, float* corrs, int n, int w, int h) { thrust::tuple<int,float>* tmp; hipMallocManaged(&tmp, sizeof(thrust::tuple<int,float>)*n); auto it = thrust::make_zip_iterator(thrust::make_tuple( thrust::counting_iterator<int>(0), corrs)); auto it2 = thrust::reduce_by_key(thrust::device, it, it+n*w*h, it, thrust::make_discard_iterator(), tmp, [=]__device__(const auto& a, const auto& b) { return thrust::get<0>(a) / (w*h) == thrust::get<0>(b) / (w*h); }, [=]__device__(const auto& a, const auto& b) { //printf(" - comparing %f %f\n", thrust::get<1>(a) , thrust::get<1>(b)); return thrust::get<1>(a) > thrust::get<1>(b) ? a : b; }); //std::cout << " - Had " << it2.second - tmp << " keys.\n"; hipDeviceSynchronize(); for (int i=0; i<n; i++) { //std::cout << " - ind " << thrust::get<0>(tmp[i]) //<< " i " << thrust::get<0>(tmp[i]) % (w*h) //<< " x " << (thrust::get<0>(tmp[i]) % (w*h)) % w //<< " y " << (thrust::get<0>(tmp[i]) % (w*h)) / w //<< " wh " << w << " " << h //<< "\n"; int xx = (thrust::get<0>(tmp[i]) % (w*h)) % w - w/2; int yy = (thrust::get<0>(tmp[i]) % (w*h)) / w - h/2; outs[i] = make_int2(xx,yy); } hipFree(tmp); } #define cufftSafeCall(err) __cufftSafeCall(err, __FILE__, __LINE__) inline void __cufftSafeCall(hipfftResult err, const char *file, const int line) { if( HIPFFT_SUCCESS != err) { fprintf(stderr, "CUFFT error in file '%s', line %d\n %s\nerror %d: %s\nterminating!\n",__FILE__, __LINE__,err, \ _cudaGetErrorEnum(err)); \ hipDeviceReset(); assert(0); \ } } void do_corr(CuImage<float>& imga, CuImage<float>& imgb) { ImagePatches patchesa = slice_and_pad(imga, false); ImagePatches patchesb = slice_and_pad(imgb, true); hipDeviceSynchronize(); show_patches(patchesa, "Patches", 1); show_patches(patchesb, "Patches", 1); std::cout << " - Showed Patches." << std::endl; //int size = patchesa.fullSquareSize; int size = patchesa.pitch; hipfftHandle planFwd, planInv; //checkCudaErrors(hipfftPlan2d(&fftPlanFwd, size, size, HIPFFT_R2C)); //checkCudaErrors(hipfftPlan2d(&fftPlanInv, size, size, HIPFFT_C2R)); int sizes[2] = { size, size }; size_t workSize1, workSize2; size_t batch = patchesa.n; int inembed[2] = {size, size}; int onembed[2] = {size, size}; int istride=1, ostride=1, idist=size*size, odist=size*size; std::cout << " - Making plans." << std::endl; //cufftSafeCall(hipfftMakePlanMany(planFwd, 2, sizes, inembed,istride,idist, onembed,ostride,odist, HIPFFT_R2C, batch, &workSize1)); //cufftSafeCall(hipfftMakePlanMany(planInv, 2, sizes, inembed,istride,idist, onembed,ostride,odist, HIPFFT_R2C, batch, &workSize2)); cufftSafeCall(hipfftPlanMany(&planFwd, 2, sizes, inembed,istride,idist, onembed,ostride,odist, HIPFFT_R2C, batch)); cufftSafeCall(hipfftPlanMany(&planInv, 2, sizes, inembed,istride,idist, onembed,ostride,odist, HIPFFT_C2R, batch)); std::cout << " - Made plans." << std::endl; float2 *fa, *fb, *fab; int n = patchesa.n; int l = size*size; checkCudaErrors(hipMalloc(&fa, sizeof(float2)*l*n)); checkCudaErrors(hipMalloc(&fb, sizeof(float2)*l*n)); checkCudaErrors(hipMalloc(&fab, sizeof(float2)*l*n)); checkCudaErrors(hipMemset(fab, 0, sizeof(float2)*l*n)); std::cout << " - Allocated buffers." << std::endl; cufftSafeCall(hipfftExecR2C(planFwd, (hipfftReal*)patchesa.buf, (hipfftComplex*)fa)); cufftSafeCall(hipfftExecR2C(planFwd, (hipfftReal*)patchesb.buf, (hipfftComplex*)fb)); std::cout << " - Exec'ed ffts." << std::endl; multiply_spectra(fab, fa, fb, l, n); std::cout << " - Multiplied spectra." << std::endl; //hipMemset(fa, 0,patchesa.pitch*patchesa.pitch*patchesa.nw*patchesa.nh*sizeof(float)); cufftSafeCall(hipfftExecC2R(planInv, (hipfftComplex*)fab, (hipfftReal*)fab)); std::cout << " - Exec'ed ifft." << std::endl; float* ifab = (float*)fa; fft_shift(ifab, (float*)fab, n, size, size); //show_corr(patchesa, patchesb, (float*)ifab, "corr", 0); int2 maxLocations[n]; fft_get_max_location(maxLocations, (float*)ifab, n, size, size); //for (int i=0; i<n; i++) std::cout << " - max at " << maxLocations[i].x << " " << maxLocations[i].y << "\n"; // Show offsets cv::Mat dimg0(imga.h,imga.w,CV_32F), dimga, dimgb; hipMemcpy(dimg0.data, imga.buf, sizeof(float)*imga.w*imga.h, hipMemcpyDeviceToHost); cv::normalize(dimg0,dimga, 0, 255, cv::NORM_MINMAX, CV_8UC1); hipMemcpy(dimg0.data, imgb.buf, sizeof(float)*imgb.w*imgb.h, hipMemcpyDeviceToHost); cv::normalize(dimg0,dimgb, 0, 255, cv::NORM_MINMAX, CV_8UC1); cv::Mat dimg(imga.h, imga.w, CV_8UC3); for (int y=0; y<imga.h; y++) for (int x=0; x<imga.w; x++) { uint8_t b = dimga.at<uint8_t>(y,x); uint8_t g = dimgb.at<uint8_t>(y,x); dimg.at<cv::Vec3b>(y,x) = cv::Vec3b{b,g,0}; } for (int yy=0; yy<patchesa.nh; yy++) for (int xx=0; xx<patchesa.nw; xx++) { int i = yy*patchesa.nw + xx; int y0 = yy * patchesa.stepSize + patchesa.stepSize / 2 + maxLocations[i].x; int x0 = xx * patchesa.stepSize + patchesa.stepSize / 2 + maxLocations[i].y; cv::Point pt0 { y0 , x0 }; for (int dy=-1; dy<1; dy++) for (int dx=-1; dx<1; dx++) { if (dy == 0 and dx == 0) continue; if (dy == dx) continue; if (x0+dx > 0 and y0+dy > 0) { int j = (yy+dy)*patchesa.nw + (dx+xx); int y1 = (yy + dy) * patchesa.stepSize + patchesa.stepSize / 2 + maxLocations[j].x; int x1 = (xx + dx) * patchesa.stepSize + patchesa.stepSize / 2 + maxLocations[j].y; cv::Point pt1 { y1 , x1 }; std::cout << " - line " << y0 << " " << x0 << " " << pt0 << " " << pt1 << " " << patchesa.stepSize << "\n"; cv::line(dimg, pt0, pt1, cv::Scalar{0,255,0}, 1); } } } //cv::resize(dimg,dimg, cv::Size{1024, 1024*dimg.rows/dimg.cols}); cv::pyrDown(dimg,dimg); cv::imshow("Grid", dimg); cv::waitKey(0); hipFree(fab); hipFree(fb); hipFree(fa); patchesa.release(); patchesb.release(); }
16439f3341324da040a072d4a84679c2913b364a.cu
#include <cufft.h> #include "helper_cuda.hpp" #include "cu_image.hpp" #include <opencv2/highgui.hpp> #include <opencv2/imgproc.hpp> #include <iostream> // The images contained are padded. // fullSquareSize is with overlap, but without padding. // Actual image sizes are pitch by pitch, which includes half zeros along each axis struct ImagePatches { float *buf; int fullSquareSize, overlap, stepSize; int n, nw, nh, pitch; void release(); }; void ImagePatches::release() { cudaFree(buf); } static void show_patches(const ImagePatches& ps, const char* name, int wait=0) { //int w = ps.nw * ps.fullSquareSize; //int h = ps.nh * ps.fullSquareSize; //cv::Mat dimg(h,w, CV_32F); int s = ps.fullSquareSize; int p = ps.pitch; for (int yy=0; yy<ps.nh; yy++) for (int xx=0; xx<ps.nw; xx++) { //cudaMemcpy(dimg(cv::Rect{xx*s,yy*s,s,s}).data, ps.buf+(yy*ps.nw+xx)*s*s, sizeof(float)*s*s, cudaMemcpyDeviceToHost); cv::Mat dimg(s,s,CV_32F); //cudaMemcpy(dimg.data, ps.buf+(yy*ps.nw+xx)*p*p, sizeof(float)*s*s, cudaMemcpyDeviceToHost); cudaMemcpy2D(dimg.data, s*sizeof(float), ps.buf+(yy*ps.nw+xx)*p*p, p*sizeof(float), sizeof(float)*s,s, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); double min, max; cv::minMaxLoc(dimg,&min,&max); //std::cout << " - min max " << min << " " << max << "\n"; cv::normalize(dimg,dimg, 0, 255, cv::NORM_MINMAX, CV_8UC1); cv::imshow(name, dimg); cv::waitKey(wait); } //double min, max; cv::minMaxLoc(dimg,&min,&max); //std::cout << " - min max " << min << " " << max << "\n"; //cv::normalize(dimg,dimg, 0, 255, cv::NORM_MINMAX, CV_8UC1); //cv::resize(dimg, dimg, cv::Size{1000, 1000*dimg.rows/dimg.cols}); //cv::imshow(name, dimg); cv::waitKey(wait); } static void show_corr(const ImagePatches& pa, const ImagePatches& pb, const float *corr, const char* name, int wait=0) { cudaDeviceSynchronize(); int s = pa.fullSquareSize; int p = pa.pitch; cv::Mat dimg(s,3*s,CV_32F); cv::Mat dimgs[5]; dimgs[0] = cv::Mat(p,p,CV_8UC1); dimgs[1] = cv::Mat(p,p,CV_8UC1); dimgs[2] = cv::Mat(p,p,CV_8UC1); dimgs[3] = cv::Mat(p,p,CV_32F); dimgs[4] = cv::Mat(p,p,CV_32F); for (int yy=0; yy<pa.nh; yy++) for (int xx=0; xx<pa.nw; xx++) { //cudaMemcpy2D(dimg.data+0*s*sizeof(float), 3*s*sizeof(float), pa.buf+(yy*pa.nw+xx)*p*p, p*sizeof(float), sizeof(float)*s,s, cudaMemcpyDeviceToHost); //cudaMemcpy2D(dimg.data+1*s*sizeof(float), 3*s*sizeof(float), pb.buf+(yy*pb.nw+xx)*p*p, p*sizeof(float), sizeof(float)*s,s, cudaMemcpyDeviceToHost); //cudaMemcpy2D(dimg.data+2*s*sizeof(float), 3*s*sizeof(float), corr+(yy*pa.nw+xx)*p*p, p*sizeof(float), sizeof(float)*s,s, cudaMemcpyDeviceToHost); cudaMemcpy2D(dimgs[3].data, 1*p*sizeof(float), pa.buf+(yy*pa.nw+xx)*p*p, p*sizeof(float), sizeof(float)*p,p, cudaMemcpyDeviceToHost); cv::normalize(dimgs[3],dimgs[1], 0, 255, cv::NORM_MINMAX, CV_8UC1); cudaMemcpy2D(dimgs[3].data, 1*p*sizeof(float), pb.buf+(yy*pb.nw+xx)*p*p, p*sizeof(float), sizeof(float)*p,p, cudaMemcpyDeviceToHost); cv::normalize(dimgs[3],dimgs[2], 0, 255, cv::NORM_MINMAX, CV_8UC1); cudaMemcpy2D(dimgs[3].data, 1*p*sizeof(float), corr+(yy*pa.nw+xx)*p*p, p*sizeof(float), sizeof(float)*p,p, cudaMemcpyDeviceToHost); // FFT Shift //dimgs[3](cv::Rect{0,0,p/2,p/2}).copyTo(dimgs[4](cv::Rect{p/2,p/2,p/2,p/2})); //dimgs[3](cv::Rect{p/2,0,p/2,p/2}).copyTo(dimgs[4](cv::Rect{0,p/2,p/2,p/2})); //dimgs[3](cv::Rect{0,p/2,p/2,p/2}).copyTo(dimgs[4](cv::Rect{p/2,0,p/2,p/2})); //dimgs[3](cv::Rect{p/2,p/2,p/2,p/2}).copyTo(dimgs[4](cv::Rect{0,0,p/2,p/2})); dimgs[4] = dimgs[3].clone(); cv::Point mini,maxi; double min, max; cv::minMaxLoc(dimgs[4],&min,&max,&mini,&maxi); //std::cout << " - corr min max " << min << " " << max << " | " << maxi << "\n"; cv::normalize(dimgs[4],dimgs[0], 0, 255, cv::NORM_MINMAX, CV_8UC1); cv::hconcat(dimgs, 3, dimg); cv::cvtColor(dimg,dimg, cv::COLOR_GRAY2BGR); cv::circle(dimg, maxi, 5, cv::Scalar{0,255,0}, 1); cv::imshow(name, dimg); cv::waitKey(wait); } } __global__ void slice_and_pad_(float* outs, int squaresWide, int squareSize, int overlapSize, int pitch, float* inImg, int w, int h, bool flip) { int qy = blockIdx.x; int qx = blockIdx.y; int fullSquareSize = squareSize + 2 * overlapSize; int rowOffset = threadIdx.x + blockIdx.x * (squareSize); int colOffset = blockIdx.y * squareSize; //int inRowOff = qy * squareSize / (fullSquareSize); //int inColOff = qx * squareSize / (fullSquareSize); // Apply linear fall-of at edges of big image, and Hamming window on each patch float edy = min(1.f, ( min(rowOffset, h - rowOffset)/32.f)); float ham_y = pow(sin(M_PI*threadIdx.x/fullSquareSize), 2.0); for (int c=0; c<fullSquareSize; c++) { float v = 0; if (colOffset+c < w and rowOffset < h) v = inImg[rowOffset*w+colOffset+c]; float window_v = 1; // Window the original image boundaries! float edx = min(1.f, ( min(colOffset+c, w - colOffset-c)/32.f)); v = v * edx * edy; // Window the patch boundaries! float ham_x = pow(sin(M_PI*c/fullSquareSize), 2.0); v = v * ham_x * ham_y; int yy = (pitch-fullSquareSize)/2+threadIdx.x; int xx = c + (pitch-fullSquareSize)/2; if (flip) { yy = pitch - 1 - yy; xx = pitch - 1 - xx; } outs[(qy*squaresWide+qx)*pitch*pitch + (yy)*pitch + xx] = v; } } ImagePatches slice_and_pad(CuImage<float>& inImg, bool flip) { int squareSize = 256, overlapSize = 128/2; int fullSquareSize = squareSize + 2 * overlapSize; //int inW = 1024, inH = 1024; int inW = inImg.w, inH = inImg.h; int squaresHigh = (inH + squareSize - 1) / squareSize; int squaresWide = (inW + squareSize - 1) / squareSize; // Strictly speaking, I should append W zeros along both axes. // However, with the hamming window it appears to be good enough //int pitch = (fullSquareSize * 1); int pitch = (fullSquareSize * 2); ImagePatches outs; outs.n = squaresWide * squaresHigh; outs.nw = squaresWide; outs.nh = squaresHigh; outs.fullSquareSize = fullSquareSize; outs.overlap = overlapSize; outs.pitch = pitch; outs.stepSize = squareSize; printf(" - Slicing img (%d %d) to get (%d * %d**2) patches.\n", inH,inW, outs.n, outs.fullSquareSize); cudaMalloc(&outs.buf, pitch*pitch*squaresWide*squaresHigh*sizeof(float)); cudaMemset(outs.buf, 0,pitch*pitch*squaresWide*squaresHigh*sizeof(float)); cudaDeviceSynchronize(); dim3 blk(squaresHigh, squaresWide); dim3 thr(fullSquareSize); getLastCudaError("pre"); slice_and_pad_<<<blk,thr>>>(outs.buf, squaresWide, squareSize, overlapSize, pitch, inImg.buf,inImg.w,inImg.h,flip); getLastCudaError("post slice"); return outs; } __device__ float2 cmplx_mult(const float2& u, const float2& v, const float& scale) { return make_float2( scale * (u.x*v.x - u.y*v.y), scale * (u.x*v.y + u.y*v.x) ); } #include <thrust/execution_policy.h> #include <thrust/transform.h> #include <thrust/gather.h> #include <thrust/iterator/discard_iterator.h> void multiply_spectra(float2* out, const float2* a, const float2* b, int l, int n) { float scale = 1.f / n; auto it = thrust::make_zip_iterator(thrust::make_tuple(a,b)); thrust::transform(thrust::device, it, it+l*n, out, [=]__device__(const auto& a) { return cmplx_mult(thrust::get<0>(a), thrust::get<1>(a), scale); }); } void fft_shift(float* out, float* corrs, int n, int w, int h) { auto it = thrust::make_transform_iterator(thrust::counting_iterator<int>(0), [=]__device__(const int& i) { int b = i / (w*h); int y = (i % (w*h)) / w; int x = (i % (w*h)) % w; y = (y - h / 2); x = (x - w / 2); if (y < 0) y += h; if (x < 0) x += w; return b*w*h + y*w + x; }); thrust::gather(thrust::device, it, it+n*w*h, corrs, out); } void fft_get_max_location(int2* outs, float* corrs, int n, int w, int h) { thrust::tuple<int,float>* tmp; cudaMallocManaged(&tmp, sizeof(thrust::tuple<int,float>)*n); auto it = thrust::make_zip_iterator(thrust::make_tuple( thrust::counting_iterator<int>(0), corrs)); auto it2 = thrust::reduce_by_key(thrust::device, it, it+n*w*h, it, thrust::make_discard_iterator(), tmp, [=]__device__(const auto& a, const auto& b) { return thrust::get<0>(a) / (w*h) == thrust::get<0>(b) / (w*h); }, [=]__device__(const auto& a, const auto& b) { //printf(" - comparing %f %f\n", thrust::get<1>(a) , thrust::get<1>(b)); return thrust::get<1>(a) > thrust::get<1>(b) ? a : b; }); //std::cout << " - Had " << it2.second - tmp << " keys.\n"; cudaDeviceSynchronize(); for (int i=0; i<n; i++) { //std::cout << " - ind " << thrust::get<0>(tmp[i]) //<< " i " << thrust::get<0>(tmp[i]) % (w*h) //<< " x " << (thrust::get<0>(tmp[i]) % (w*h)) % w //<< " y " << (thrust::get<0>(tmp[i]) % (w*h)) / w //<< " wh " << w << " " << h //<< "\n"; int xx = (thrust::get<0>(tmp[i]) % (w*h)) % w - w/2; int yy = (thrust::get<0>(tmp[i]) % (w*h)) / w - h/2; outs[i] = make_int2(xx,yy); } cudaFree(tmp); } #define cufftSafeCall(err) __cufftSafeCall(err, __FILE__, __LINE__) inline void __cufftSafeCall(cufftResult err, const char *file, const int line) { if( CUFFT_SUCCESS != err) { fprintf(stderr, "CUFFT error in file '%s', line %d\n %s\nerror %d: %s\nterminating!\n",__FILE__, __LINE__,err, \ _cudaGetErrorEnum(err)); \ cudaDeviceReset(); assert(0); \ } } void do_corr(CuImage<float>& imga, CuImage<float>& imgb) { ImagePatches patchesa = slice_and_pad(imga, false); ImagePatches patchesb = slice_and_pad(imgb, true); cudaDeviceSynchronize(); show_patches(patchesa, "Patches", 1); show_patches(patchesb, "Patches", 1); std::cout << " - Showed Patches." << std::endl; //int size = patchesa.fullSquareSize; int size = patchesa.pitch; cufftHandle planFwd, planInv; //checkCudaErrors(cufftPlan2d(&fftPlanFwd, size, size, CUFFT_R2C)); //checkCudaErrors(cufftPlan2d(&fftPlanInv, size, size, CUFFT_C2R)); int sizes[2] = { size, size }; size_t workSize1, workSize2; size_t batch = patchesa.n; int inembed[2] = {size, size}; int onembed[2] = {size, size}; int istride=1, ostride=1, idist=size*size, odist=size*size; std::cout << " - Making plans." << std::endl; //cufftSafeCall(cufftMakePlanMany(planFwd, 2, sizes, inembed,istride,idist, onembed,ostride,odist, CUFFT_R2C, batch, &workSize1)); //cufftSafeCall(cufftMakePlanMany(planInv, 2, sizes, inembed,istride,idist, onembed,ostride,odist, CUFFT_R2C, batch, &workSize2)); cufftSafeCall(cufftPlanMany(&planFwd, 2, sizes, inembed,istride,idist, onembed,ostride,odist, CUFFT_R2C, batch)); cufftSafeCall(cufftPlanMany(&planInv, 2, sizes, inembed,istride,idist, onembed,ostride,odist, CUFFT_C2R, batch)); std::cout << " - Made plans." << std::endl; float2 *fa, *fb, *fab; int n = patchesa.n; int l = size*size; checkCudaErrors(cudaMalloc(&fa, sizeof(float2)*l*n)); checkCudaErrors(cudaMalloc(&fb, sizeof(float2)*l*n)); checkCudaErrors(cudaMalloc(&fab, sizeof(float2)*l*n)); checkCudaErrors(cudaMemset(fab, 0, sizeof(float2)*l*n)); std::cout << " - Allocated buffers." << std::endl; cufftSafeCall(cufftExecR2C(planFwd, (cufftReal*)patchesa.buf, (cufftComplex*)fa)); cufftSafeCall(cufftExecR2C(planFwd, (cufftReal*)patchesb.buf, (cufftComplex*)fb)); std::cout << " - Exec'ed ffts." << std::endl; multiply_spectra(fab, fa, fb, l, n); std::cout << " - Multiplied spectra." << std::endl; //cudaMemset(fa, 0,patchesa.pitch*patchesa.pitch*patchesa.nw*patchesa.nh*sizeof(float)); cufftSafeCall(cufftExecC2R(planInv, (cufftComplex*)fab, (cufftReal*)fab)); std::cout << " - Exec'ed ifft." << std::endl; float* ifab = (float*)fa; fft_shift(ifab, (float*)fab, n, size, size); //show_corr(patchesa, patchesb, (float*)ifab, "corr", 0); int2 maxLocations[n]; fft_get_max_location(maxLocations, (float*)ifab, n, size, size); //for (int i=0; i<n; i++) std::cout << " - max at " << maxLocations[i].x << " " << maxLocations[i].y << "\n"; // Show offsets cv::Mat dimg0(imga.h,imga.w,CV_32F), dimga, dimgb; cudaMemcpy(dimg0.data, imga.buf, sizeof(float)*imga.w*imga.h, cudaMemcpyDeviceToHost); cv::normalize(dimg0,dimga, 0, 255, cv::NORM_MINMAX, CV_8UC1); cudaMemcpy(dimg0.data, imgb.buf, sizeof(float)*imgb.w*imgb.h, cudaMemcpyDeviceToHost); cv::normalize(dimg0,dimgb, 0, 255, cv::NORM_MINMAX, CV_8UC1); cv::Mat dimg(imga.h, imga.w, CV_8UC3); for (int y=0; y<imga.h; y++) for (int x=0; x<imga.w; x++) { uint8_t b = dimga.at<uint8_t>(y,x); uint8_t g = dimgb.at<uint8_t>(y,x); dimg.at<cv::Vec3b>(y,x) = cv::Vec3b{b,g,0}; } for (int yy=0; yy<patchesa.nh; yy++) for (int xx=0; xx<patchesa.nw; xx++) { int i = yy*patchesa.nw + xx; int y0 = yy * patchesa.stepSize + patchesa.stepSize / 2 + maxLocations[i].x; int x0 = xx * patchesa.stepSize + patchesa.stepSize / 2 + maxLocations[i].y; cv::Point pt0 { y0 , x0 }; for (int dy=-1; dy<1; dy++) for (int dx=-1; dx<1; dx++) { if (dy == 0 and dx == 0) continue; if (dy == dx) continue; if (x0+dx > 0 and y0+dy > 0) { int j = (yy+dy)*patchesa.nw + (dx+xx); int y1 = (yy + dy) * patchesa.stepSize + patchesa.stepSize / 2 + maxLocations[j].x; int x1 = (xx + dx) * patchesa.stepSize + patchesa.stepSize / 2 + maxLocations[j].y; cv::Point pt1 { y1 , x1 }; std::cout << " - line " << y0 << " " << x0 << " " << pt0 << " " << pt1 << " " << patchesa.stepSize << "\n"; cv::line(dimg, pt0, pt1, cv::Scalar{0,255,0}, 1); } } } //cv::resize(dimg,dimg, cv::Size{1024, 1024*dimg.rows/dimg.cols}); cv::pyrDown(dimg,dimg); cv::imshow("Grid", dimg); cv::waitKey(0); cudaFree(fab); cudaFree(fb); cudaFree(fa); patchesa.release(); patchesb.release(); }
544efb54c6b9ec412f0738113455307ca955c8f8.hip
// !!! This is a file automatically generated by hipify!!! // // Created by Jacob Austin on 5/17/18. // #define GLM_FORCE_PURE #include "mass.h" Mass::Mass() { m = 1.0; dt = 0.0001; damping = 1.0; extduration = 0; force = Vec(0., 0., 0.); extforce = Vec(0., 0., 0.); maxforce = Vec(0, 0, 0); T = 0; density = 1.0; valid = true; arrayptr = nullptr; ref_count = 0; spring_count = 0; #ifdef GRAPHICS color = Vec(1.0, 0.2, 0.2); #endif } // constructor TODO fix timing void Mass::operator=(CUDA_MASS & mass) { m = mass.m; dt = mass.dt; T = mass.T; damping = mass.damping; extduration = mass.extduration; pos = mass.pos; vel = mass.vel; acc = mass.acc; force = mass.force; extforce = mass.extforce; maxforce = mass.maxforce; valid = mass.valid; ref_count = this -> ref_count; arrayptr = this -> arrayptr; #ifdef CONSTRAINTS constraints = this -> constraints; #endif #ifdef GRAPHICS color = mass.color; #endif } // Copy constructor Mass::Mass(const Mass &other) { m = other.m; pos = other.pos; origpos = other.origpos; dt = other.dt; vel = other.vel; acc = other.acc; force = other.force; index = other.index; T = other.T; density = other.density; damping = other.damping; extduration = other.extduration; extforce = other.extforce; maxforce = other.maxforce; valid = other.valid; arrayptr = nullptr; ref_count = other.ref_count; spring_count = other.spring_count; constraints.fixed = other.constraints.fixed; } Mass::Mass(const Vec & position, double mass, bool fixed, double dt) { m = mass; pos = position; origpos = position; this -> dt = dt; T = 0; damping = 1.0; density = 1.0; force = Vec(0., 0., 0.); extduration = 0; extforce = Vec(0., 0., 0.); maxforce = Vec(0, 0, 0); valid = true; arrayptr = nullptr; ref_count = 0; spring_count = 0; #ifdef GRAPHICS color = Vec(1.0, 0.2, 0.2); #endif } CUDA_MASS::CUDA_MASS(Mass &mass) { m = mass.m; dt = mass.dt; T = mass.T; damping = mass.damping; extduration = mass.extduration; pos = mass.pos; vel = mass.vel; acc = mass.acc; force = mass.force; extforce = mass.extforce; maxforce = mass.maxforce; valid = true; #ifdef CONSTRAINTS constraints = CUDA_LOCAL_CONSTRAINTS(mass.constraints); #endif #ifdef GRAPHICS color = mass.color; #endif } #ifdef CONSTRAINTS void Mass::addConstraint(CONSTRAINT_TYPE type, const Vec & vec, double num) { // TODO make this more efficient if (type == 0) { this -> constraints.constraint_plane.push_back(CudaConstraintPlane(vec, num)); this -> constraints.num_constraint_planes++; this -> constraints.constraint_plane_ptr = thrust::raw_pointer_cast(constraints.constraint_plane.data()); } else if (type == 1) { this -> constraints.contact_plane.push_back(CudaContactPlane(vec, num)); this -> constraints.num_contact_planes++; this -> constraints.contact_plane_ptr = thrust::raw_pointer_cast(constraints.contact_plane.data()); } else if (type == 2) { this -> constraints.ball.push_back(CudaBall(vec, num)); this -> constraints.num_balls++; this -> constraints.ball_ptr = thrust::raw_pointer_cast(constraints.ball.data()); } else if (type == 3) { this -> constraints.direction.push_back(CudaDirection(vec, num)); this -> constraints.num_directions++; this -> constraints.direction_ptr = thrust::raw_pointer_cast(constraints.direction.data()); } } void Mass::clearConstraints(CONSTRAINT_TYPE type) { if (type == 0) { this -> constraints.constraint_plane.clear(); this -> constraints.constraint_plane.shrink_to_fit(); this -> constraints.num_constraint_planes = 0; } else if (type == 1) { this -> constraints.contact_plane.clear(); this -> constraints.contact_plane.shrink_to_fit(); this -> constraints.num_contact_planes = 0; } else if (type == 2) { this -> constraints.ball.clear(); this -> constraints.ball.shrink_to_fit(); this -> constraints.num_balls = 0; } else if (type == 3) { this -> constraints.direction.clear(); this -> constraints.direction.shrink_to_fit(); this -> constraints.num_directions = 0; } } void Mass::clearConstraints() { clearConstraints(CONSTRAINT_PLANE); clearConstraints(CONTACT_PLANE); clearConstraints(DIRECTION); clearConstraints(BALL); } void Mass::fix() { this -> constraints.fixed = true; } void Mass::unfix() { this -> constraints.fixed = false; } void Mass::setDrag(double C) { this -> constraints.drag_coefficient = C; } #endif void Mass::decrementRefCount() { if (--ref_count == 0) { if (arrayptr) { hipFree(arrayptr); } delete this; } }
544efb54c6b9ec412f0738113455307ca955c8f8.cu
// // Created by Jacob Austin on 5/17/18. // #define GLM_FORCE_PURE #include "mass.h" Mass::Mass() { m = 1.0; dt = 0.0001; damping = 1.0; extduration = 0; force = Vec(0., 0., 0.); extforce = Vec(0., 0., 0.); maxforce = Vec(0, 0, 0); T = 0; density = 1.0; valid = true; arrayptr = nullptr; ref_count = 0; spring_count = 0; #ifdef GRAPHICS color = Vec(1.0, 0.2, 0.2); #endif } // constructor TODO fix timing void Mass::operator=(CUDA_MASS & mass) { m = mass.m; dt = mass.dt; T = mass.T; damping = mass.damping; extduration = mass.extduration; pos = mass.pos; vel = mass.vel; acc = mass.acc; force = mass.force; extforce = mass.extforce; maxforce = mass.maxforce; valid = mass.valid; ref_count = this -> ref_count; arrayptr = this -> arrayptr; #ifdef CONSTRAINTS constraints = this -> constraints; #endif #ifdef GRAPHICS color = mass.color; #endif } // Copy constructor Mass::Mass(const Mass &other) { m = other.m; pos = other.pos; origpos = other.origpos; dt = other.dt; vel = other.vel; acc = other.acc; force = other.force; index = other.index; T = other.T; density = other.density; damping = other.damping; extduration = other.extduration; extforce = other.extforce; maxforce = other.maxforce; valid = other.valid; arrayptr = nullptr; ref_count = other.ref_count; spring_count = other.spring_count; constraints.fixed = other.constraints.fixed; } Mass::Mass(const Vec & position, double mass, bool fixed, double dt) { m = mass; pos = position; origpos = position; this -> dt = dt; T = 0; damping = 1.0; density = 1.0; force = Vec(0., 0., 0.); extduration = 0; extforce = Vec(0., 0., 0.); maxforce = Vec(0, 0, 0); valid = true; arrayptr = nullptr; ref_count = 0; spring_count = 0; #ifdef GRAPHICS color = Vec(1.0, 0.2, 0.2); #endif } CUDA_MASS::CUDA_MASS(Mass &mass) { m = mass.m; dt = mass.dt; T = mass.T; damping = mass.damping; extduration = mass.extduration; pos = mass.pos; vel = mass.vel; acc = mass.acc; force = mass.force; extforce = mass.extforce; maxforce = mass.maxforce; valid = true; #ifdef CONSTRAINTS constraints = CUDA_LOCAL_CONSTRAINTS(mass.constraints); #endif #ifdef GRAPHICS color = mass.color; #endif } #ifdef CONSTRAINTS void Mass::addConstraint(CONSTRAINT_TYPE type, const Vec & vec, double num) { // TODO make this more efficient if (type == 0) { this -> constraints.constraint_plane.push_back(CudaConstraintPlane(vec, num)); this -> constraints.num_constraint_planes++; this -> constraints.constraint_plane_ptr = thrust::raw_pointer_cast(constraints.constraint_plane.data()); } else if (type == 1) { this -> constraints.contact_plane.push_back(CudaContactPlane(vec, num)); this -> constraints.num_contact_planes++; this -> constraints.contact_plane_ptr = thrust::raw_pointer_cast(constraints.contact_plane.data()); } else if (type == 2) { this -> constraints.ball.push_back(CudaBall(vec, num)); this -> constraints.num_balls++; this -> constraints.ball_ptr = thrust::raw_pointer_cast(constraints.ball.data()); } else if (type == 3) { this -> constraints.direction.push_back(CudaDirection(vec, num)); this -> constraints.num_directions++; this -> constraints.direction_ptr = thrust::raw_pointer_cast(constraints.direction.data()); } } void Mass::clearConstraints(CONSTRAINT_TYPE type) { if (type == 0) { this -> constraints.constraint_plane.clear(); this -> constraints.constraint_plane.shrink_to_fit(); this -> constraints.num_constraint_planes = 0; } else if (type == 1) { this -> constraints.contact_plane.clear(); this -> constraints.contact_plane.shrink_to_fit(); this -> constraints.num_contact_planes = 0; } else if (type == 2) { this -> constraints.ball.clear(); this -> constraints.ball.shrink_to_fit(); this -> constraints.num_balls = 0; } else if (type == 3) { this -> constraints.direction.clear(); this -> constraints.direction.shrink_to_fit(); this -> constraints.num_directions = 0; } } void Mass::clearConstraints() { clearConstraints(CONSTRAINT_PLANE); clearConstraints(CONTACT_PLANE); clearConstraints(DIRECTION); clearConstraints(BALL); } void Mass::fix() { this -> constraints.fixed = true; } void Mass::unfix() { this -> constraints.fixed = false; } void Mass::setDrag(double C) { this -> constraints.drag_coefficient = C; } #endif void Mass::decrementRefCount() { if (--ref_count == 0) { if (arrayptr) { cudaFree(arrayptr); } delete this; } }
b0554e08765aa6c0d0e4255d96d50dac88c4f95d.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * NVIDIA Corporation is strictly prohibited. * * Please refer to the applicable NVIDIA end user license agreement (EULA) * associated with this source code for terms and conditions that govern * your use of this NVIDIA software. * */ #include <stdio.h> #include <hip/hip_runtime.h> #define N (1024*1024) #define FULL_DATA_SIZE (N*20) #ifndef checkCudaErrors #define checkCudaErrors(err) __checkCudaErrors(err, __FILE__, __LINE__) void __checkCudaErrors(hipError_t err, const char *file, const int line) { if (hipSuccess != err) { fprintf(stderr, "checkCudaErrors() Driver API error = %04d \"%s\" from file <%s>, line %i.\n", err, hipGetErrorString(err), file, line); exit(EXIT_FAILURE); } } #endif __global__ void kernel( int *a, int *b, int *c ) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < N) { int idx1 = (idx + 1) % 256; int idx2 = (idx + 2) % 256; float as = (a[idx] + a[idx1] + a[idx2]) / 3.0f; float bs = (b[idx] + b[idx1] + b[idx2]) / 3.0f; c[idx] = (as + bs) / 2; } } int main( void ) { hipDeviceProp_t prop; int whichDevice; checkCudaErrors( hipGetDevice( &whichDevice ) ); checkCudaErrors( hipGetDeviceProperties( &prop, whichDevice ) ); if (!prop.deviceOverlap) { printf( "Device will not handle overlaps, so no speed up from streams\n" ); return 0; } hipEvent_t start, stop; float elapsedTime; hipStream_t stream0, stream1; int *host_a, *host_b, *host_c; int *dev_a0, *dev_b0, *dev_c0; int *dev_a1, *dev_b1, *dev_c1; // start the timers checkCudaErrors( hipEventCreate( &start ) ); checkCudaErrors( hipEventCreate( &stop ) ); // initialize the streams checkCudaErrors( hipStreamCreate( &stream0 ) ); checkCudaErrors( hipStreamCreate( &stream1 ) ); // allocate the memory on the GPU checkCudaErrors( hipMalloc( (void**)&dev_a0, N * sizeof(int) ) ); checkCudaErrors( hipMalloc( (void**)&dev_b0, N * sizeof(int) ) ); checkCudaErrors( hipMalloc( (void**)&dev_c0, N * sizeof(int) ) ); checkCudaErrors( hipMalloc( (void**)&dev_a1, N * sizeof(int) ) ); checkCudaErrors( hipMalloc( (void**)&dev_b1, N * sizeof(int) ) ); checkCudaErrors( hipMalloc( (void**)&dev_c1, N * sizeof(int) ) ); // allocate host locked memory, used to stream checkCudaErrors( hipHostMalloc( (void**)&host_a, FULL_DATA_SIZE * sizeof(int), hipHostMallocDefault ) ); checkCudaErrors( hipHostMalloc( (void**)&host_b, FULL_DATA_SIZE * sizeof(int), hipHostMallocDefault ) ); checkCudaErrors( hipHostMalloc( (void**)&host_c, FULL_DATA_SIZE * sizeof(int), hipHostMallocDefault ) ); for (int i=0; i<FULL_DATA_SIZE; i++) { host_a[i] = rand(); host_b[i] = rand(); } checkCudaErrors( hipEventRecord( start, 0 ) ); // now loop over full data, in bite-sized chunks for (int i=0; i<FULL_DATA_SIZE; i+= N*2) { // enqueue copies of a in stream0 and stream1 checkCudaErrors( hipMemcpyAsync( dev_a0, host_a+i, N * sizeof(int), hipMemcpyHostToDevice, stream0 ) ); checkCudaErrors( hipMemcpyAsync( dev_a1, host_a+i+N, N * sizeof(int), hipMemcpyHostToDevice, stream1 ) ); // enqueue copies of b in stream0 and stream1 checkCudaErrors( hipMemcpyAsync( dev_b0, host_b+i, N * sizeof(int), hipMemcpyHostToDevice, stream0 ) ); checkCudaErrors( hipMemcpyAsync( dev_b1, host_b+i+N, N * sizeof(int), hipMemcpyHostToDevice, stream1 ) ); // enqueue kernels in stream0 and stream1 hipLaunchKernelGGL(( kernel), dim3(N/256),dim3(256),0,stream0, dev_a0, dev_b0, dev_c0 ); hipLaunchKernelGGL(( kernel), dim3(N/256),dim3(256),0,stream1, dev_a1, dev_b1, dev_c1 ); // enqueue copies of c from device to locked memory checkCudaErrors( hipMemcpyAsync( host_c+i, dev_c0, N * sizeof(int), hipMemcpyDeviceToHost, stream0 ) ); checkCudaErrors( hipMemcpyAsync( host_c+i+N, dev_c1, N * sizeof(int), hipMemcpyDeviceToHost, stream1 ) ); } checkCudaErrors( hipStreamSynchronize( stream0 ) ); checkCudaErrors( hipStreamSynchronize( stream1 ) ); checkCudaErrors( hipEventRecord( stop, 0 ) ); checkCudaErrors( hipEventSynchronize( stop ) ); checkCudaErrors( hipEventElapsedTime( &elapsedTime, start, stop ) ); printf( "Time taken: %3.1f ms\n", elapsedTime ); // cleanup the streams and memory checkCudaErrors( hipHostFree( host_a ) ); checkCudaErrors( hipHostFree( host_b ) ); checkCudaErrors( hipHostFree( host_c ) ); checkCudaErrors( hipFree( dev_a0 ) ); checkCudaErrors( hipFree( dev_b0 ) ); checkCudaErrors( hipFree( dev_c0 ) ); checkCudaErrors( hipFree( dev_a1 ) ); checkCudaErrors( hipFree( dev_b1 ) ); checkCudaErrors( hipFree( dev_c1 ) ); checkCudaErrors( hipStreamDestroy( stream0 ) ); checkCudaErrors( hipStreamDestroy( stream1 ) ); return 0; }
b0554e08765aa6c0d0e4255d96d50dac88c4f95d.cu
/* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * NVIDIA Corporation is strictly prohibited. * * Please refer to the applicable NVIDIA end user license agreement (EULA) * associated with this source code for terms and conditions that govern * your use of this NVIDIA software. * */ #include <stdio.h> #include <cuda_runtime.h> #define N (1024*1024) #define FULL_DATA_SIZE (N*20) #ifndef checkCudaErrors #define checkCudaErrors(err) __checkCudaErrors(err, __FILE__, __LINE__) void __checkCudaErrors(cudaError_t err, const char *file, const int line) { if (cudaSuccess != err) { fprintf(stderr, "checkCudaErrors() Driver API error = %04d \"%s\" from file <%s>, line %i.\n", err, cudaGetErrorString(err), file, line); exit(EXIT_FAILURE); } } #endif __global__ void kernel( int *a, int *b, int *c ) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < N) { int idx1 = (idx + 1) % 256; int idx2 = (idx + 2) % 256; float as = (a[idx] + a[idx1] + a[idx2]) / 3.0f; float bs = (b[idx] + b[idx1] + b[idx2]) / 3.0f; c[idx] = (as + bs) / 2; } } int main( void ) { cudaDeviceProp prop; int whichDevice; checkCudaErrors( cudaGetDevice( &whichDevice ) ); checkCudaErrors( cudaGetDeviceProperties( &prop, whichDevice ) ); if (!prop.deviceOverlap) { printf( "Device will not handle overlaps, so no speed up from streams\n" ); return 0; } cudaEvent_t start, stop; float elapsedTime; cudaStream_t stream0, stream1; int *host_a, *host_b, *host_c; int *dev_a0, *dev_b0, *dev_c0; int *dev_a1, *dev_b1, *dev_c1; // start the timers checkCudaErrors( cudaEventCreate( &start ) ); checkCudaErrors( cudaEventCreate( &stop ) ); // initialize the streams checkCudaErrors( cudaStreamCreate( &stream0 ) ); checkCudaErrors( cudaStreamCreate( &stream1 ) ); // allocate the memory on the GPU checkCudaErrors( cudaMalloc( (void**)&dev_a0, N * sizeof(int) ) ); checkCudaErrors( cudaMalloc( (void**)&dev_b0, N * sizeof(int) ) ); checkCudaErrors( cudaMalloc( (void**)&dev_c0, N * sizeof(int) ) ); checkCudaErrors( cudaMalloc( (void**)&dev_a1, N * sizeof(int) ) ); checkCudaErrors( cudaMalloc( (void**)&dev_b1, N * sizeof(int) ) ); checkCudaErrors( cudaMalloc( (void**)&dev_c1, N * sizeof(int) ) ); // allocate host locked memory, used to stream checkCudaErrors( cudaHostAlloc( (void**)&host_a, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault ) ); checkCudaErrors( cudaHostAlloc( (void**)&host_b, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault ) ); checkCudaErrors( cudaHostAlloc( (void**)&host_c, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault ) ); for (int i=0; i<FULL_DATA_SIZE; i++) { host_a[i] = rand(); host_b[i] = rand(); } checkCudaErrors( cudaEventRecord( start, 0 ) ); // now loop over full data, in bite-sized chunks for (int i=0; i<FULL_DATA_SIZE; i+= N*2) { // enqueue copies of a in stream0 and stream1 checkCudaErrors( cudaMemcpyAsync( dev_a0, host_a+i, N * sizeof(int), cudaMemcpyHostToDevice, stream0 ) ); checkCudaErrors( cudaMemcpyAsync( dev_a1, host_a+i+N, N * sizeof(int), cudaMemcpyHostToDevice, stream1 ) ); // enqueue copies of b in stream0 and stream1 checkCudaErrors( cudaMemcpyAsync( dev_b0, host_b+i, N * sizeof(int), cudaMemcpyHostToDevice, stream0 ) ); checkCudaErrors( cudaMemcpyAsync( dev_b1, host_b+i+N, N * sizeof(int), cudaMemcpyHostToDevice, stream1 ) ); // enqueue kernels in stream0 and stream1 kernel<<<N/256,256,0,stream0>>>( dev_a0, dev_b0, dev_c0 ); kernel<<<N/256,256,0,stream1>>>( dev_a1, dev_b1, dev_c1 ); // enqueue copies of c from device to locked memory checkCudaErrors( cudaMemcpyAsync( host_c+i, dev_c0, N * sizeof(int), cudaMemcpyDeviceToHost, stream0 ) ); checkCudaErrors( cudaMemcpyAsync( host_c+i+N, dev_c1, N * sizeof(int), cudaMemcpyDeviceToHost, stream1 ) ); } checkCudaErrors( cudaStreamSynchronize( stream0 ) ); checkCudaErrors( cudaStreamSynchronize( stream1 ) ); checkCudaErrors( cudaEventRecord( stop, 0 ) ); checkCudaErrors( cudaEventSynchronize( stop ) ); checkCudaErrors( cudaEventElapsedTime( &elapsedTime, start, stop ) ); printf( "Time taken: %3.1f ms\n", elapsedTime ); // cleanup the streams and memory checkCudaErrors( cudaFreeHost( host_a ) ); checkCudaErrors( cudaFreeHost( host_b ) ); checkCudaErrors( cudaFreeHost( host_c ) ); checkCudaErrors( cudaFree( dev_a0 ) ); checkCudaErrors( cudaFree( dev_b0 ) ); checkCudaErrors( cudaFree( dev_c0 ) ); checkCudaErrors( cudaFree( dev_a1 ) ); checkCudaErrors( cudaFree( dev_b1 ) ); checkCudaErrors( cudaFree( dev_c1 ) ); checkCudaErrors( cudaStreamDestroy( stream0 ) ); checkCudaErrors( cudaStreamDestroy( stream1 ) ); return 0; }
146a94b3c6a4a3f6e326f8f154aa844f8e59af5c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // @author Yurii Shyrma ([email protected]) // #include <system/op_boilerplate.h> #include <loops/reduce_long.h> #include <loops/legacy_ops.h> #include <helpers/DebugHelper.h> #include <types/types.h> #include <execution/LaunchContext.h> #include <exceptions/cuda_exception.h> #include <loops/scalar.h> using namespace simdOps; //////////////////////////////////////////////////////////////////////// template <typename X, typename Z, typename OpType> __device__ void reduceSimpleGeneric(void *x, Nd4jLong *xShapeInfo, void *extraParams, void *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, void *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) { functions::reduce::ReduceLongFunction<X,Z>::template transformCudaXD<OpType>(x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo, tadOffsets); } //////////////////////////////////////////////////////////////////////// template <typename X, typename Z, typename OpType> __device__ void reduceScalarGeneric(void *x, Nd4jLong *xShapeInfo, void *extraParams, void *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, void *reductionBuffer, Nd4jLong *tadOnlyShapeInfo) { functions::reduce::ReduceLongFunction<X, Z>::template execScalarCuda<OpType>(x, xShapeInfo, extraParams, z, zShapeInfo, reductionBuffer, tadOnlyShapeInfo); } //////////////////////////////////////////////////////////////////////// template <typename X, typename Z, typename OpType> __global__ void simpleReduce(void *x, Nd4jLong *xShapeInfo, void *extraParams, void *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, void *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) { reduceSimpleGeneric<X, Z, OpType>(x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo, tadOffsets); } //////////////////////////////////////////////////////////////////////// template <typename X, typename Z, typename OpType> __global__ void simpleScalar(void *x, Nd4jLong *xShapeInfo, void *extraParams, void *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, void *reductionBuffer, Nd4jLong *tadOnlyShapeInfo) { reduceScalarGeneric<X, Z, OpType>(x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo); } namespace functions { namespace reduce { //////////////////////////////////////////////////////////////////////// template <typename X, typename Z> template <typename OpType> __device__ void ReduceLongFunction<X,Z>::aggregatePartials(void *vsPartials, Nd4jLong tid, Nd4jLong numItems, void *vextraParams) { // start the shared memory loop on the next power of 2 less // than the block size. If block size is not a power of 2, // accumulate the intermediate sums in the remainder range. auto sPartials = reinterpret_cast<Z*>(vsPartials); auto extraParams = reinterpret_cast<X*>(vextraParams); Nd4jLong floorPow2 = numItems; if (floorPow2 & (floorPow2 - 1)) { while (floorPow2 & (floorPow2 - 1)) floorPow2 &= floorPow2 - 1; if (tid >= floorPow2) sPartials[tid - floorPow2] = OpType::update(sPartials[tid - floorPow2], sPartials[tid], extraParams); __syncthreads(); } for (Nd4jLong activeThreads = floorPow2 >> 1; activeThreads; activeThreads >>= 1) { if (tid < activeThreads && tid + activeThreads < numItems) sPartials[tid] = OpType::update(sPartials[tid], sPartials[tid + activeThreads], extraParams); __syncthreads(); } } //////////////////////////////////////////////////////////////////////// template <typename X, typename Z> template <typename OpType> __device__ void ReduceLongFunction<X,Z>::transformCudaXD( void *vx, Nd4jLong *xShapeInfo, void *vextraParams, void *vz, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, void *vreductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) { auto x = reinterpret_cast<X*>(vx); auto z = reinterpret_cast<Z*>(vz); auto extraParams = reinterpret_cast<X*>(vextraParams); auto reductionBuffer = reinterpret_cast<Z*>(vreductionBuffer); //shared memory space for storing intermediate results __shared__ Z* sPartials; __shared__ int tadLength, numTads; __shared__ bool isPlainOutput; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sPartials = reinterpret_cast<Z*>(shmem); isPlainOutput = shape::order(zShapeInfo) == 'c' && shape::elementWiseStride(zShapeInfo) == 1; tadLength = shape::length(tadOnlyShapeInfo); numTads = shape::length(xShapeInfo) / tadLength; } __syncthreads(); for (int r = blockIdx.x; r < numTads; r += gridDim.x) { Nd4jLong tadOffsetForBlock = tadOffsets[r]; sPartials[threadIdx.x] = OpType::startingValue(x + tadOffsetForBlock); for (int i = threadIdx.x; i < tadLength; i += blockDim.x) { auto xOffset = tadOffsetForBlock + shape::getIndexOffset(i, tadOnlyShapeInfo); sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(x[xOffset], extraParams), extraParams); } __syncthreads(); // aggregate. do NOT reduce for elements > tadLength aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::nd4j_min<int>(blockDim.x, tadLength), extraParams); __syncthreads(); if (threadIdx.x == 0) z[isPlainOutput ? r : shape::getIndexOffset(r, zShapeInfo)] = OpType::postProcess(sPartials[threadIdx.x], tadLength, extraParams); } } //////////////////////////////////////////////////////////////////////// template <typename X, typename Z> template <typename OpType> __device__ void ReduceLongFunction<X,Z>::execScalarCuda(void *vx, Nd4jLong *xShapeInfo, void *vextraParams, void *vz, Nd4jLong *zShapeInfo, void *vreductionBuffer, Nd4jLong *tadOnlyShapeInfo) { auto x = reinterpret_cast<X*>(vx); auto z = reinterpret_cast<Z*>(vz); auto extraParams = reinterpret_cast<X*>(vextraParams); auto reductionBuffer = reinterpret_cast<Z*>(vreductionBuffer); auto tid = blockDim.x * blockIdx.x + threadIdx.x; //shared memory space for storing intermediate results __shared__ Z* sPartials; __shared__ Nd4jLong xEws; __shared__ Nd4jLong len; if(threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sPartials = reinterpret_cast<Z*>(shmem); xEws = shape::elementWiseStride(xShapeInfo); len = shape::length(xShapeInfo); } __syncthreads(); sPartials[threadIdx.x] = OpType::startingValue(x); if (xEws > 0) for (int i = tid; i < len; i += (blockDim.x * gridDim.x)) sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(x[i * xEws], extraParams), extraParams); else for (int i = tid; i < len; i += blockDim.x * gridDim.x) sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(x[shape::getIndexOffset(i, xShapeInfo)], extraParams), extraParams); __syncthreads(); aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::nd4j_min<int>(blockDim.x, len), extraParams); __syncthreads(); if (gridDim.x > 1) { auto tc = reinterpret_cast<unsigned int *>(reductionBuffer); __shared__ bool amLast; tid = threadIdx.x; if (threadIdx.x == 0) reductionBuffer[blockIdx.x] = sPartials[0];//this->postProcess(sPartials[0],len,extraParams); __threadfence(); __syncthreads(); if (threadIdx.x == 0) { unsigned int ticket = atomicInc(&tc[16384], gridDim.x); amLast = (ticket == gridDim.x - 1); } __syncthreads(); if (amLast) { tc[16384] = 0; sPartials[threadIdx.x] = OpType::startingValue(x); for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], reductionBuffer[i], extraParams); __syncthreads(); aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::nd4j_min<int>(gridDim.x, blockDim.x), extraParams); __syncthreads(); if (threadIdx.x == 0) { z[0] = OpType::postProcess(sPartials[0], len, extraParams); } } } else { if (threadIdx.x == 0) { auto tc = reinterpret_cast<unsigned int*>(reductionBuffer); tc[16384] = 0; z[0] = OpType::postProcess(sPartials[0], len, extraParams); } } } //////////////////////////////////////////////////////////////////////// template <typename X, typename Z> template<typename OpType> __host__ void ReduceLongFunction<X,Z>::intermediateXD(dim3 launchDims, hipStream_t *stream, void *x, Nd4jLong *xShapeInfo, Nd4jLong *hXShapeInfo, void *extraParams, void *z, Nd4jLong *zShapeInfo, Nd4jLong *hZShapeInfo, int *dimension, int dimensionLength, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { if(shape::isEmpty(hXShapeInfo)) { if(shape::isEmpty(hZShapeInfo)) return; const auto startingVal = static_cast<Z>(OpType::startingValue(reinterpret_cast<X*>(x))); auto res = hipMemcpyAsync(sd::LaunchContext::defaultContext()->getScalarPointer(), &startingVal, sizeof(Z), hipMemcpyHostToDevice, *stream); if (res != 0) throw sd::cuda_exception::build("ReduceLongFunction<X,Z>::intermediateXD: failed to copy temporary scalar", res); auto ptr = sd::LaunchContext::defaultContext()->getScalarPointer(); // scalar assign functions::scalar::ScalarTransform<Z, Z, Z>::executeCudaShaped(launchDims, stream, 14, z, zShapeInfo, hXShapeInfo, z, zShapeInfo, hZShapeInfo, ptr, nullptr); } else { hipLaunchKernelGGL(( simpleReduce<X, Z, OpType>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets); } } //////////////////////////////////////////////////////////////////////// template <typename X, typename Z> template<typename OpType> __host__ void ReduceLongFunction<X,Z>::intermediateScalar(dim3 launchDims, hipStream_t *stream, void *x, Nd4jLong *xShapeInfo, Nd4jLong *hXShapeInfo, void *extraParams, void *z, Nd4jLong *zShapeInfo, Nd4jLong *hZShapeInfo, int *dimension, int dimensionLength, void *reductionBuffer, Nd4jLong *tadOnlyShapeInfo) { if (shape::isEmpty(hXShapeInfo)) { if (shape::isEmpty(hZShapeInfo)) return; const auto startingVal = static_cast<Z>(OpType::startingValue(reinterpret_cast<X*>(x))); auto res = hipMemcpyAsync(z, &startingVal, sizeof(Z), hipMemcpyHostToDevice, *stream); if (res != 0) throw sd::cuda_exception::build("ReduceLongFunction<X,Z>::intermediateScalar: failed to copy resulting scalar", res); } else { hipLaunchKernelGGL(( simpleScalar<X, Z, OpType>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo); } } //////////////////////////////////////////////////////////////////////// template <typename X, typename Y> _CUDA_H void ReduceLongFunction<X,Y>::execReduceScalar(dim3 launchDims, hipStream_t *stream, int opNum, void *x, Nd4jLong *xShapeInfo, Nd4jLong* hXShapeInfo, void *extraParams, void *z, Nd4jLong *zShapeInfo, Nd4jLong* hZShapeInfo, int *dimension, int dimensionLength, void *reductionBuffer, Nd4jLong *tadOnlyShapeInfo) { DISPATCH_BY_OPNUM_TT(intermediateScalar, PARAMS(launchDims, stream, x, xShapeInfo, hXShapeInfo, extraParams, z, zShapeInfo, hZShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo), OPS_A(REDUCE_LONG_OPS)); sd::DebugHelper::checkErrorCode(stream, "execReduceScalarFloat(...) failed"); } //////////////////////////////////////////////////////////////////////// template <typename X, typename Y> _CUDA_H void ReduceLongFunction<X,Y>::execReduceXD(dim3 launchDims, hipStream_t *stream, int opNum, int rank, void *x, Nd4jLong *xShapeInfo, Nd4jLong* hXShapeInfo, void *extraParams, void *z, Nd4jLong *zShapeInfo, Nd4jLong* hZShapeInfo, int *dimension, int dimensionLength, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { DISPATCH_BY_OPNUM_TT(intermediateXD, PARAMS(launchDims, stream, x, xShapeInfo, hXShapeInfo, extraParams, z, zShapeInfo, hZShapeInfo, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_LONG_OPS)); DEBUG_KERNEL(stream, opNum); } //////////////////////////////////////////////////////////////////////// template <typename X> __device__ void initializeShared(X *extraParams, X **sPartials, int sMemSize) { int sPartialsLength = sMemSize / sizeof(X); X *sPartialsDeref = (X *) *sPartials; for (int i = 0; i < sPartialsLength; i++) sPartialsDeref[i] = extraParams[0]; } BUILD_DOUBLE_TEMPLATE(template class ND4J_EXPORT ReduceLongFunction, , LIBND4J_TYPES, LONG_TYPES); } }
146a94b3c6a4a3f6e326f8f154aa844f8e59af5c.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // @author Yurii Shyrma ([email protected]) // #include <system/op_boilerplate.h> #include <loops/reduce_long.h> #include <loops/legacy_ops.h> #include <helpers/DebugHelper.h> #include <types/types.h> #include <execution/LaunchContext.h> #include <exceptions/cuda_exception.h> #include <loops/scalar.h> using namespace simdOps; //////////////////////////////////////////////////////////////////////// template <typename X, typename Z, typename OpType> __device__ void reduceSimpleGeneric(void *x, Nd4jLong *xShapeInfo, void *extraParams, void *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, void *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) { functions::reduce::ReduceLongFunction<X,Z>::template transformCudaXD<OpType>(x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo, tadOffsets); } //////////////////////////////////////////////////////////////////////// template <typename X, typename Z, typename OpType> __device__ void reduceScalarGeneric(void *x, Nd4jLong *xShapeInfo, void *extraParams, void *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, void *reductionBuffer, Nd4jLong *tadOnlyShapeInfo) { functions::reduce::ReduceLongFunction<X, Z>::template execScalarCuda<OpType>(x, xShapeInfo, extraParams, z, zShapeInfo, reductionBuffer, tadOnlyShapeInfo); } //////////////////////////////////////////////////////////////////////// template <typename X, typename Z, typename OpType> __global__ void simpleReduce(void *x, Nd4jLong *xShapeInfo, void *extraParams, void *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, void *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) { reduceSimpleGeneric<X, Z, OpType>(x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo, tadOffsets); } //////////////////////////////////////////////////////////////////////// template <typename X, typename Z, typename OpType> __global__ void simpleScalar(void *x, Nd4jLong *xShapeInfo, void *extraParams, void *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, void *reductionBuffer, Nd4jLong *tadOnlyShapeInfo) { reduceScalarGeneric<X, Z, OpType>(x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo); } namespace functions { namespace reduce { //////////////////////////////////////////////////////////////////////// template <typename X, typename Z> template <typename OpType> __device__ void ReduceLongFunction<X,Z>::aggregatePartials(void *vsPartials, Nd4jLong tid, Nd4jLong numItems, void *vextraParams) { // start the shared memory loop on the next power of 2 less // than the block size. If block size is not a power of 2, // accumulate the intermediate sums in the remainder range. auto sPartials = reinterpret_cast<Z*>(vsPartials); auto extraParams = reinterpret_cast<X*>(vextraParams); Nd4jLong floorPow2 = numItems; if (floorPow2 & (floorPow2 - 1)) { while (floorPow2 & (floorPow2 - 1)) floorPow2 &= floorPow2 - 1; if (tid >= floorPow2) sPartials[tid - floorPow2] = OpType::update(sPartials[tid - floorPow2], sPartials[tid], extraParams); __syncthreads(); } for (Nd4jLong activeThreads = floorPow2 >> 1; activeThreads; activeThreads >>= 1) { if (tid < activeThreads && tid + activeThreads < numItems) sPartials[tid] = OpType::update(sPartials[tid], sPartials[tid + activeThreads], extraParams); __syncthreads(); } } //////////////////////////////////////////////////////////////////////// template <typename X, typename Z> template <typename OpType> __device__ void ReduceLongFunction<X,Z>::transformCudaXD( void *vx, Nd4jLong *xShapeInfo, void *vextraParams, void *vz, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, void *vreductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) { auto x = reinterpret_cast<X*>(vx); auto z = reinterpret_cast<Z*>(vz); auto extraParams = reinterpret_cast<X*>(vextraParams); auto reductionBuffer = reinterpret_cast<Z*>(vreductionBuffer); //shared memory space for storing intermediate results __shared__ Z* sPartials; __shared__ int tadLength, numTads; __shared__ bool isPlainOutput; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sPartials = reinterpret_cast<Z*>(shmem); isPlainOutput = shape::order(zShapeInfo) == 'c' && shape::elementWiseStride(zShapeInfo) == 1; tadLength = shape::length(tadOnlyShapeInfo); numTads = shape::length(xShapeInfo) / tadLength; } __syncthreads(); for (int r = blockIdx.x; r < numTads; r += gridDim.x) { Nd4jLong tadOffsetForBlock = tadOffsets[r]; sPartials[threadIdx.x] = OpType::startingValue(x + tadOffsetForBlock); for (int i = threadIdx.x; i < tadLength; i += blockDim.x) { auto xOffset = tadOffsetForBlock + shape::getIndexOffset(i, tadOnlyShapeInfo); sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(x[xOffset], extraParams), extraParams); } __syncthreads(); // aggregate. do NOT reduce for elements > tadLength aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::nd4j_min<int>(blockDim.x, tadLength), extraParams); __syncthreads(); if (threadIdx.x == 0) z[isPlainOutput ? r : shape::getIndexOffset(r, zShapeInfo)] = OpType::postProcess(sPartials[threadIdx.x], tadLength, extraParams); } } //////////////////////////////////////////////////////////////////////// template <typename X, typename Z> template <typename OpType> __device__ void ReduceLongFunction<X,Z>::execScalarCuda(void *vx, Nd4jLong *xShapeInfo, void *vextraParams, void *vz, Nd4jLong *zShapeInfo, void *vreductionBuffer, Nd4jLong *tadOnlyShapeInfo) { auto x = reinterpret_cast<X*>(vx); auto z = reinterpret_cast<Z*>(vz); auto extraParams = reinterpret_cast<X*>(vextraParams); auto reductionBuffer = reinterpret_cast<Z*>(vreductionBuffer); auto tid = blockDim.x * blockIdx.x + threadIdx.x; //shared memory space for storing intermediate results __shared__ Z* sPartials; __shared__ Nd4jLong xEws; __shared__ Nd4jLong len; if(threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sPartials = reinterpret_cast<Z*>(shmem); xEws = shape::elementWiseStride(xShapeInfo); len = shape::length(xShapeInfo); } __syncthreads(); sPartials[threadIdx.x] = OpType::startingValue(x); if (xEws > 0) for (int i = tid; i < len; i += (blockDim.x * gridDim.x)) sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(x[i * xEws], extraParams), extraParams); else for (int i = tid; i < len; i += blockDim.x * gridDim.x) sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(x[shape::getIndexOffset(i, xShapeInfo)], extraParams), extraParams); __syncthreads(); aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::nd4j_min<int>(blockDim.x, len), extraParams); __syncthreads(); if (gridDim.x > 1) { auto tc = reinterpret_cast<unsigned int *>(reductionBuffer); __shared__ bool amLast; tid = threadIdx.x; if (threadIdx.x == 0) reductionBuffer[blockIdx.x] = sPartials[0];//this->postProcess(sPartials[0],len,extraParams); __threadfence(); __syncthreads(); if (threadIdx.x == 0) { unsigned int ticket = atomicInc(&tc[16384], gridDim.x); amLast = (ticket == gridDim.x - 1); } __syncthreads(); if (amLast) { tc[16384] = 0; sPartials[threadIdx.x] = OpType::startingValue(x); for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], reductionBuffer[i], extraParams); __syncthreads(); aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::nd4j_min<int>(gridDim.x, blockDim.x), extraParams); __syncthreads(); if (threadIdx.x == 0) { z[0] = OpType::postProcess(sPartials[0], len, extraParams); } } } else { if (threadIdx.x == 0) { auto tc = reinterpret_cast<unsigned int*>(reductionBuffer); tc[16384] = 0; z[0] = OpType::postProcess(sPartials[0], len, extraParams); } } } //////////////////////////////////////////////////////////////////////// template <typename X, typename Z> template<typename OpType> __host__ void ReduceLongFunction<X,Z>::intermediateXD(dim3 launchDims, cudaStream_t *stream, void *x, Nd4jLong *xShapeInfo, Nd4jLong *hXShapeInfo, void *extraParams, void *z, Nd4jLong *zShapeInfo, Nd4jLong *hZShapeInfo, int *dimension, int dimensionLength, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { if(shape::isEmpty(hXShapeInfo)) { if(shape::isEmpty(hZShapeInfo)) return; const auto startingVal = static_cast<Z>(OpType::startingValue(reinterpret_cast<X*>(x))); auto res = cudaMemcpyAsync(sd::LaunchContext::defaultContext()->getScalarPointer(), &startingVal, sizeof(Z), cudaMemcpyHostToDevice, *stream); if (res != 0) throw sd::cuda_exception::build("ReduceLongFunction<X,Z>::intermediateXD: failed to copy temporary scalar", res); auto ptr = sd::LaunchContext::defaultContext()->getScalarPointer(); // scalar assign functions::scalar::ScalarTransform<Z, Z, Z>::executeCudaShaped(launchDims, stream, 14, z, zShapeInfo, hXShapeInfo, z, zShapeInfo, hZShapeInfo, ptr, nullptr); } else { simpleReduce<X, Z, OpType><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets); } } //////////////////////////////////////////////////////////////////////// template <typename X, typename Z> template<typename OpType> __host__ void ReduceLongFunction<X,Z>::intermediateScalar(dim3 launchDims, cudaStream_t *stream, void *x, Nd4jLong *xShapeInfo, Nd4jLong *hXShapeInfo, void *extraParams, void *z, Nd4jLong *zShapeInfo, Nd4jLong *hZShapeInfo, int *dimension, int dimensionLength, void *reductionBuffer, Nd4jLong *tadOnlyShapeInfo) { if (shape::isEmpty(hXShapeInfo)) { if (shape::isEmpty(hZShapeInfo)) return; const auto startingVal = static_cast<Z>(OpType::startingValue(reinterpret_cast<X*>(x))); auto res = cudaMemcpyAsync(z, &startingVal, sizeof(Z), cudaMemcpyHostToDevice, *stream); if (res != 0) throw sd::cuda_exception::build("ReduceLongFunction<X,Z>::intermediateScalar: failed to copy resulting scalar", res); } else { simpleScalar<X, Z, OpType><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo); } } //////////////////////////////////////////////////////////////////////// template <typename X, typename Y> _CUDA_H void ReduceLongFunction<X,Y>::execReduceScalar(dim3 launchDims, cudaStream_t *stream, int opNum, void *x, Nd4jLong *xShapeInfo, Nd4jLong* hXShapeInfo, void *extraParams, void *z, Nd4jLong *zShapeInfo, Nd4jLong* hZShapeInfo, int *dimension, int dimensionLength, void *reductionBuffer, Nd4jLong *tadOnlyShapeInfo) { DISPATCH_BY_OPNUM_TT(intermediateScalar, PARAMS(launchDims, stream, x, xShapeInfo, hXShapeInfo, extraParams, z, zShapeInfo, hZShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo), OPS_A(REDUCE_LONG_OPS)); sd::DebugHelper::checkErrorCode(stream, "execReduceScalarFloat(...) failed"); } //////////////////////////////////////////////////////////////////////// template <typename X, typename Y> _CUDA_H void ReduceLongFunction<X,Y>::execReduceXD(dim3 launchDims, cudaStream_t *stream, int opNum, int rank, void *x, Nd4jLong *xShapeInfo, Nd4jLong* hXShapeInfo, void *extraParams, void *z, Nd4jLong *zShapeInfo, Nd4jLong* hZShapeInfo, int *dimension, int dimensionLength, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { DISPATCH_BY_OPNUM_TT(intermediateXD, PARAMS(launchDims, stream, x, xShapeInfo, hXShapeInfo, extraParams, z, zShapeInfo, hZShapeInfo, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_LONG_OPS)); DEBUG_KERNEL(stream, opNum); } //////////////////////////////////////////////////////////////////////// template <typename X> __device__ void initializeShared(X *extraParams, X **sPartials, int sMemSize) { int sPartialsLength = sMemSize / sizeof(X); X *sPartialsDeref = (X *) *sPartials; for (int i = 0; i < sPartialsLength; i++) sPartialsDeref[i] = extraParams[0]; } BUILD_DOUBLE_TEMPLATE(template class ND4J_EXPORT ReduceLongFunction, , LIBND4J_TYPES, LONG_TYPES); } }
a4b56bfeac2f1769ef3a48db877116788598640f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* This program generates 2 N*N matrices and then multiplies them on a GPU */ #include<stdio.h> #include<stdlib.h> #include<math.h> #include<cuda.h> #include<unistd.h> //#define N 100 __global__ void multiply(float* A, float* B, float* C, int K) { /* The Kernel is a 2D grid. Tried doing the same with a 1D grid but it requires 2 for loops */ int index1=blockIdx.x*blockDim.x+threadIdx.x; int index2=blockIdx.y*blockDim.y+threadIdx.y; float sum=0.0; for (int i=0;i<K;i++) { sum+=A[index2*K+i]*B[i*K+index1]; } C[index2*K+index1]=sum; } float** Make2DfloatArray(int arraySizeX, int arraySizeY) { /* Generates a 2D matrix of dimension arraySizeX * arraySizeY */ float** theArray; theArray = (float**) malloc(arraySizeX*sizeof(float*)); int i; for (i = 0; i < arraySizeX; i++) theArray[i] = (float*) malloc(arraySizeY*sizeof(float)); int j; for (i=0;i<arraySizeX;i++) { for (j=0;j<arraySizeY;j++) { theArray[i][j]=rand()%5; } } return theArray; } void init_zeros(float** matrix, int K) { /* Initializes a matrix to zeros */ int i,j; for (i=0;i<K;i++) { for (j=0;j<K;j++) { matrix[i][j]=0; } } } float* Make1DfloatArray(int arraySizeX) { /* Generates a 1D float array of size arraySizeX */ float* theArray; theArray = (float*)malloc(arraySizeX*sizeof(float)); int i; for (i=0;i<arraySizeX;i++) { theArray[i]=0.0; } return theArray; } void printmat(float** matrix, int K) { /* To print matrix on display */ int i,j; for (i=0;i<K;i++) { printf("\n"); for (j=0;j<K;j++) { printf("%f \t",matrix[i][j]); } } printf("\n"); } void printtofile(float** matrix, int K, char* filename) { /* Prints original 2D matrices to file */ FILE *fp; fp=fopen(filename,"wt"); int i,j; for (i=0;i<K;i++) { fprintf(fp, "\n"); for (j=0;j<K;j++) { fprintf(fp, "%f\t", matrix[i][j]); } } } void printtofile1D(float* matrix, int K, char* filename) { /* Prints resultant matrix to a file */ FILE *fp; fp=fopen(filename,"wt"); int i,j; int counters=0; for (i=0;i<K;i++) { fprintf(fp, "\n"); for (j=0;j<K;j++) { fprintf(fp, "%f \t", matrix[counters]); counters++; } } } void freese(int sizeX, float** ptr) { /* Function used to free up all the 2D matrices created */ int i; for (i=0;i<sizeX;i++) free(ptr[i]); free(ptr); } int main(int argc, char *argv[]) { const int K = 100; const int blocks=K/20; const int threadblocks=K/blocks; float** M1=Make2DfloatArray(K,K); float** M2=Make2DfloatArray(K,K); float** Prod=Make2DfloatArray(K,K); hipEvent_t start, stop, start_kernel, stop_kernel; float time, time_kernel; hipEventCreate(&start); hipEventCreate(&stop); hipEventCreate(&start_kernel); hipEventCreate(&stop_kernel); init_zeros(Prod, K); float* M1_host_flat=Make1DfloatArray(K*K); float* M2_host_flat=Make1DfloatArray(K*K); float* Prod_host_flat=Make1DfloatArray(K*K); float* M1_device_flat; float* M2_device_flat; float* Prod_device_flat; int* K_device; printf("\n Everything initialized"); printtofile(M1,K,"M1.txt"); printtofile(M2,K,"M2.txt"); printtofile(Prod,K,"Prod.txt"); int counter=0; int i,j; for(i=0;i<K;i++) { for(j=0;j<K;j++) { M1_host_flat[counter]=M1[i][j]; M2_host_flat[counter]=M2[i][j]; Prod_host_flat[counter]=Prod[i][j]; counter+=1; } } //printf("\n Converted to flat"); //Transferring matrices from Host to Device hipEventRecord(start,0); hipMalloc((void **) &M1_device_flat, sizeof(float)*K*K); hipMalloc((void **) &M2_device_flat, sizeof(float)*K*K); hipMalloc((void **) &Prod_device_flat, sizeof(float)*K*K); hipMalloc((void **) &K_device, sizeof(int)); hipMemcpy(M1_device_flat, M1_host_flat, sizeof(float)*K*K, hipMemcpyHostToDevice); hipMemcpy(M2_device_flat, M2_host_flat, sizeof(float)*K*K, hipMemcpyHostToDevice); hipMemcpy(Prod_device_flat, Prod_host_flat, sizeof(float)*K*K, hipMemcpyHostToDevice); hipMemcpy(K_device, &K, sizeof(int), hipMemcpyHostToDevice); //Kernel call dim3 threads(threadblocks,threadblocks); dim3 grid(blocks,blocks); hipEventRecord(start_kernel,0); hipLaunchKernelGGL(( multiply), dim3(grid),dim3(threads), 0, 0, M1_device_flat,M2_device_flat,Prod_device_flat, K); hipEventRecord(stop_kernel,0); //Transferring result matrix from Device to Host hipMemcpy(Prod_host_flat, Prod_device_flat, sizeof(int)*K*K, hipMemcpyDeviceToHost); hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); hipEventElapsedTime(&time_kernel, start_kernel, stop_kernel); printf("\nTime for kernel with data transfer = %f ms \n", time); printf("\nTime for kernel without data transfer = %f ms \n", time_kernel); printtofile1D(Prod_host_flat,K,"Prod_result.txt"); //Freeing up all the memory that was used hipFree(M1_device_flat); hipFree(M2_device_flat); hipFree(Prod_device_flat); hipFree(K_device); freese(K,M1); freese(K,M2); freese(K,Prod); free(M1_host_flat); free(M2_host_flat); free(Prod_host_flat); return 0; }
a4b56bfeac2f1769ef3a48db877116788598640f.cu
/* This program generates 2 N*N matrices and then multiplies them on a GPU */ #include<stdio.h> #include<stdlib.h> #include<math.h> #include<cuda.h> #include<unistd.h> //#define N 100 __global__ void multiply(float* A, float* B, float* C, int K) { /* The Kernel is a 2D grid. Tried doing the same with a 1D grid but it requires 2 for loops */ int index1=blockIdx.x*blockDim.x+threadIdx.x; int index2=blockIdx.y*blockDim.y+threadIdx.y; float sum=0.0; for (int i=0;i<K;i++) { sum+=A[index2*K+i]*B[i*K+index1]; } C[index2*K+index1]=sum; } float** Make2DfloatArray(int arraySizeX, int arraySizeY) { /* Generates a 2D matrix of dimension arraySizeX * arraySizeY */ float** theArray; theArray = (float**) malloc(arraySizeX*sizeof(float*)); int i; for (i = 0; i < arraySizeX; i++) theArray[i] = (float*) malloc(arraySizeY*sizeof(float)); int j; for (i=0;i<arraySizeX;i++) { for (j=0;j<arraySizeY;j++) { theArray[i][j]=rand()%5; } } return theArray; } void init_zeros(float** matrix, int K) { /* Initializes a matrix to zeros */ int i,j; for (i=0;i<K;i++) { for (j=0;j<K;j++) { matrix[i][j]=0; } } } float* Make1DfloatArray(int arraySizeX) { /* Generates a 1D float array of size arraySizeX */ float* theArray; theArray = (float*)malloc(arraySizeX*sizeof(float)); int i; for (i=0;i<arraySizeX;i++) { theArray[i]=0.0; } return theArray; } void printmat(float** matrix, int K) { /* To print matrix on display */ int i,j; for (i=0;i<K;i++) { printf("\n"); for (j=0;j<K;j++) { printf("%f \t",matrix[i][j]); } } printf("\n"); } void printtofile(float** matrix, int K, char* filename) { /* Prints original 2D matrices to file */ FILE *fp; fp=fopen(filename,"wt"); int i,j; for (i=0;i<K;i++) { fprintf(fp, "\n"); for (j=0;j<K;j++) { fprintf(fp, "%f\t", matrix[i][j]); } } } void printtofile1D(float* matrix, int K, char* filename) { /* Prints resultant matrix to a file */ FILE *fp; fp=fopen(filename,"wt"); int i,j; int counters=0; for (i=0;i<K;i++) { fprintf(fp, "\n"); for (j=0;j<K;j++) { fprintf(fp, "%f \t", matrix[counters]); counters++; } } } void freese(int sizeX, float** ptr) { /* Function used to free up all the 2D matrices created */ int i; for (i=0;i<sizeX;i++) free(ptr[i]); free(ptr); } int main(int argc, char *argv[]) { const int K = 100; const int blocks=K/20; const int threadblocks=K/blocks; float** M1=Make2DfloatArray(K,K); float** M2=Make2DfloatArray(K,K); float** Prod=Make2DfloatArray(K,K); cudaEvent_t start, stop, start_kernel, stop_kernel; float time, time_kernel; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventCreate(&start_kernel); cudaEventCreate(&stop_kernel); init_zeros(Prod, K); float* M1_host_flat=Make1DfloatArray(K*K); float* M2_host_flat=Make1DfloatArray(K*K); float* Prod_host_flat=Make1DfloatArray(K*K); float* M1_device_flat; float* M2_device_flat; float* Prod_device_flat; int* K_device; printf("\n Everything initialized"); printtofile(M1,K,"M1.txt"); printtofile(M2,K,"M2.txt"); printtofile(Prod,K,"Prod.txt"); int counter=0; int i,j; for(i=0;i<K;i++) { for(j=0;j<K;j++) { M1_host_flat[counter]=M1[i][j]; M2_host_flat[counter]=M2[i][j]; Prod_host_flat[counter]=Prod[i][j]; counter+=1; } } //printf("\n Converted to flat"); //Transferring matrices from Host to Device cudaEventRecord(start,0); cudaMalloc((void **) &M1_device_flat, sizeof(float)*K*K); cudaMalloc((void **) &M2_device_flat, sizeof(float)*K*K); cudaMalloc((void **) &Prod_device_flat, sizeof(float)*K*K); cudaMalloc((void **) &K_device, sizeof(int)); cudaMemcpy(M1_device_flat, M1_host_flat, sizeof(float)*K*K, cudaMemcpyHostToDevice); cudaMemcpy(M2_device_flat, M2_host_flat, sizeof(float)*K*K, cudaMemcpyHostToDevice); cudaMemcpy(Prod_device_flat, Prod_host_flat, sizeof(float)*K*K, cudaMemcpyHostToDevice); cudaMemcpy(K_device, &K, sizeof(int), cudaMemcpyHostToDevice); //Kernel call dim3 threads(threadblocks,threadblocks); dim3 grid(blocks,blocks); cudaEventRecord(start_kernel,0); multiply<<<grid,threads>>>(M1_device_flat,M2_device_flat,Prod_device_flat, K); cudaEventRecord(stop_kernel,0); //Transferring result matrix from Device to Host cudaMemcpy(Prod_host_flat, Prod_device_flat, sizeof(int)*K*K, cudaMemcpyDeviceToHost); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); cudaEventElapsedTime(&time_kernel, start_kernel, stop_kernel); printf("\nTime for kernel with data transfer = %f ms \n", time); printf("\nTime for kernel without data transfer = %f ms \n", time_kernel); printtofile1D(Prod_host_flat,K,"Prod_result.txt"); //Freeing up all the memory that was used cudaFree(M1_device_flat); cudaFree(M2_device_flat); cudaFree(Prod_device_flat); cudaFree(K_device); freese(K,M1); freese(K,M2); freese(K,Prod); free(M1_host_flat); free(M2_host_flat); free(Prod_host_flat); return 0; }
2da82949d586d64dfc721dc057547c7a086c081c.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <torch/types.h> #include <hipsparse.h> #define checkCudaError( a ) do { \ if (hipSuccess != (a)) { \ fprintf(stderr, "Cuda runTime error in line %d of file %s \ : %s \n", __LINE__, __FILE__, hipGetErrorString(hipGetLastError()) ); \ exit(EXIT_FAILURE); \ } \ } while(0) #define checkCuSparseError( a ) do { \ if (HIPSPARSE_STATUS_SUCCESS != (a)) { \ fprintf(stderr, "CuSparse runTime error in line %d of file %s \ : %s \n", __LINE__, __FILE__, hipGetErrorString(hipGetLastError()) ); \ exit(EXIT_FAILURE); \ } \ } while (0) __device__ __forceinline__ float sum_reduce(float acc, float x) { return acc + x; } __device__ __forceinline__ float sum_init() { return 0; } __global__ void topoCacheCoarsenSPMMKernel( int m, int k, const int* A_indptr, const int* A_indices, const float* B, float* C ) { extern __shared__ int sh[]; int sm_offset = (threadIdx.y<<5); int thread_idx = sm_offset+threadIdx.x; int rid = blockDim.y*blockIdx.x+threadIdx.y; if (rid<m) { int cid = (blockIdx.y<<6)+threadIdx.x; int lb = A_indptr[rid]; int hb = A_indptr[rid+1]; int ptr = lb+threadIdx.x; int offset; float acc1 = sum_init(); float acc2 = sum_init(); if (blockIdx.y != gridDim.y-1) { for (int jj=lb; jj<hb; jj+=32) { if (ptr<hb) { sh[thread_idx] = A_indices[ptr]*k; // sh[thread_idx] = __ldg(A_indices+ptr)*k; } __syncwarp(); ptr += 32; for (int kk=0; kk<32&&jj+kk<hb; kk++) { offset = sh[(sm_offset+kk)] + cid; acc1 = sum_reduce(acc1, B[offset]); acc2 = sum_reduce(acc2, B[(offset+32)]); // acc1 = sum_reduce(acc1, __ldg(B+offset)); // acc2 = sum_reduce(acc2, __ldg(B+offset+32)); } __syncwarp(); } offset = rid*k+cid; C[offset] = acc1; C[offset+32] = acc2; } else { // threadIdx.y==blockDim.y-1 int nout = (k-cid+31)/32; for (int jj=lb; jj<hb; jj+=32) { if (ptr<hb) { sh[thread_idx] = A_indices[ptr]*k; // sh[thread_idx] = __ldg(A_indices+ptr)*k; } __syncwarp(); ptr += 32; for (int kk=0; kk<32&&jj+kk<hb; kk++) { offset = sh[(sm_offset+kk)] + cid; if (nout>0) { acc1 = sum_reduce(acc1, B[offset]);} // acc1 = sum_reduce(acc1, __ldg(B+offset)); } if (nout>1) { acc2 = sum_reduce(acc2, B[(offset+32)]);} // acc2 = sum_reduce(acc2, __ldg(B+offset+32));} } __syncwarp(); } offset = rid*k+cid; if (nout>0) { C[offset] = acc1;} if (nout>1) { C[offset+32] = acc2;} } } } __global__ void topoCacheSPMMKernel( int m, int k, const int* A_indptr, const int* A_indices, const float* B, float* C ) { extern __shared__ int sh[]; int sm_offset = (threadIdx.y<<5); int thread_idx = sm_offset + threadIdx.x; int cid = (blockIdx.y<<5)+threadIdx.x; int rid = blockDim.y*blockIdx.x+threadIdx.y; if (rid<m) { int lb = A_indptr[rid]; int hb = A_indptr[(rid+1)]; int offset; int ptr = lb+threadIdx.x; float acc1 = sum_init(); if (blockIdx.y != gridDim.y-1) { for (int jj=lb; jj<hb; jj+=32) { if (ptr<hb) { sh[thread_idx] = A_indices[ptr]*k; // sh[thread_idx] = __ldg(A_indices+ptr)*k; } __syncwarp(); ptr += 32; for (int kk=0; kk<32&&jj+kk<hb; kk++) { offset = sh[sm_offset+kk]+cid; acc1 = sum_reduce(acc1, B[offset]); // acc1 = sum_reduce(acc1, __ldg(B+offset)); } __syncwarp(); } offset = rid*k+cid; C[offset] = acc1; } else { // threadIdx.y==blockDim.y-1 int nout = (k-cid+31)/32; for (int jj=lb; jj<hb; jj+=32) { if (ptr<hb) { sh[thread_idx] = A_indices[ptr]*k; // sh[thread_idx] = __ldg(A_indices+ptr)*k; } __syncwarp(); ptr += 32; for (int kk=0; kk<32&&jj+kk<hb; kk++) { offset = sh[(sm_offset+kk)] + cid; if (nout>0) { acc1 = sum_reduce(acc1, B[offset]);} // acc1 = sum_reduce(acc1, __ldg(B+offset)); } } __syncwarp(); } offset = rid*k+cid; if (nout>0) { C[offset] = acc1;} } } } __global__ void topoSimpleSPMMKernel( int m, int k, const int* A_indptr, const int* A_indices, const float* B, float* C ) { int rid = blockDim.y*blockIdx.x+threadIdx.y; if (rid<m) { int lb = A_indptr[rid]; int hb = A_indptr[(rid+1)]; float acc1 = sum_init(); int offset; for (int ptr=lb; ptr<hb; ptr++) { // offset = __ldg(A_indices+ptr)*k+threadIdx.x; // acc1 = sum_reduce(acc1, __ldg(B+offset)); offset = A_indices[ptr]*k+threadIdx.x; acc1 = sum_reduce(acc1, B[offset]); } C[(rid*k+threadIdx.x)] = acc1; } } torch::Tensor spmm_cuda_no_edge_value( torch::Tensor rowptr, torch::Tensor colind, torch::Tensor dense ) { const auto m = rowptr.size(0)-1; const auto k = dense.size(1); auto devid = dense.device().index(); auto options = torch::TensorOptions().dtype(torch::kFloat32).device(torch::kCUDA, devid); auto out = torch::empty({m,k}, options); if (k<32) { const int row_per_block = 128/k; const int n_block = (m+row_per_block-1)/row_per_block; hipLaunchKernelGGL(( topoSimpleSPMMKernel), dim3(dim3(n_block,1,1)),dim3(dim3(k, row_per_block, 1)), 0, 0, m, k, rowptr.data_ptr<int>(), colind.data_ptr<int>(), dense.data_ptr<float>(), out.data_ptr<float>()); return out; } if (k<64) { const int tile_k = (k+31)/32; const int n_block = (m+3)/4; hipLaunchKernelGGL(( topoCacheSPMMKernel), dim3(dim3(n_block,tile_k,1)), dim3(dim3(32,4,1)), 128*sizeof(int), 0, m, k, rowptr.data_ptr<int>(), colind.data_ptr<int>(), dense.data_ptr<float>(), out.data_ptr<float>()); return out; } else { const int tile_k = (k+63)/64; const int n_block = (m+8-1)/8; hipLaunchKernelGGL(( topoCacheCoarsenSPMMKernel), dim3(dim3(n_block,tile_k,1)), dim3(dim3(32,8,1)), 8*32*sizeof(int), 0, m, k, rowptr.data_ptr<int>(), colind.data_ptr<int>(), dense.data_ptr<float>(), out.data_ptr<float>()); return out; } } __global__ void spmm_test0( int A_nrows, int B_ncols, int* A_csrRowPtr, int* A_csrColInd, float* A_csrVal, float* B_dnVal, float* C_dnVal ) { int rid = blockDim.y*blockIdx.x+threadIdx.y; if (rid<A_nrows) { int cid = (blockIdx.y<<5)+threadIdx.x; int lb = A_csrRowPtr[rid]; int hb = A_csrRowPtr[(rid+1)]; int offset = 0; float acc=0; if (blockIdx.y!=gridDim.y-1){ for (int ptr = lb; ptr<hb; ptr++) { offset = A_csrColInd[ptr]*B_ncols+cid; acc += A_csrVal[ptr]*B_dnVal[offset]; } C_dnVal[(rid*B_ncols+cid)] = acc; } else { for (int ptr = lb; ptr<hb; ptr++) { if (cid<B_ncols) { offset = A_csrColInd[ptr]*B_ncols+cid;} acc += A_csrVal[ptr]*B_dnVal[offset]; } if (cid<B_ncols) { C_dnVal[(rid*B_ncols+cid)] = acc;} } } } __global__ void spmm_test1( int A_nrows, int B_ncols, int* A_csrRowPtr, int* A_csrColInd, float* A_csrVal, float* B_dnVal, float* C_dnVal ) { extern __shared__ int sh[]; int *colInd_sh = sh; float *val_sh = (float *)&sh[(blockDim.y<<5)]; int shmem_offset = (threadIdx.y<<5); int thread_idx = shmem_offset+threadIdx.x; int rid = blockDim.y*blockIdx.x+threadIdx.y; if (rid<A_nrows) { int cid = (blockIdx.y<<5)+threadIdx.x; int lb = A_csrRowPtr[rid]; int hb = A_csrRowPtr[(rid+1)]; int ptr = lb+threadIdx.x; int offset; float acc=0; if (blockIdx.y != gridDim.y-1) { for (int jj=lb; jj<hb; jj+=32) { if (ptr<hb) { val_sh[thread_idx] = A_csrVal[ptr]; colInd_sh[thread_idx] = B_ncols*A_csrColInd[ptr]; } __syncwarp(); ptr += 32; for (int kk=0; kk<32&&jj+kk<hb; kk++) { offset = colInd_sh[(shmem_offset+kk)] + cid; acc += val_sh[(shmem_offset+kk)]*B_dnVal[offset]; } __syncwarp(); } C_dnVal[(rid*B_ncols+cid)] = acc; } else { for (int jj=lb; jj<hb; jj+=32) { if (ptr<hb) { val_sh[thread_idx] = A_csrVal[ptr]; colInd_sh[thread_idx] = B_ncols*A_csrColInd[ptr]; } __syncwarp(); ptr += 32; for (int kk=0; kk<32&&jj+kk<hb; kk++) { offset = colInd_sh[(shmem_offset+kk)] + cid; if (cid<B_ncols) { acc += val_sh[(shmem_offset+kk)]*B_dnVal[offset]; } } __syncwarp(); } if (cid<B_ncols) { C_dnVal[(rid*B_ncols+cid)] = acc; } } } } __global__ void spmm_test2( int A_nrows, int B_ncols, int* A_csrRowPtr, int* A_csrColInd, float* A_csrVal, float* B_dnVal, float* C_dnVal ) { extern __shared__ int sh[]; int *colInd_sh = sh; float *val_sh = (float *)&sh[(blockDim.y<<5)]; int shmem_offset = (threadIdx.y<<5); int thread_idx = shmem_offset+threadIdx.x; int rid = blockDim.y*blockIdx.x+threadIdx.y; if (rid<A_nrows) { int cid = (blockIdx.y<<6)+threadIdx.x; int lb = A_csrRowPtr[rid]; int hb = A_csrRowPtr[(rid+1)]; int ptr = lb+threadIdx.x; int offset; float acc1=0, acc2=0, val; if (blockIdx.y != gridDim.y-1) { for (int jj=lb; jj<hb; jj+=32) { if (ptr<hb) { val_sh[thread_idx] = A_csrVal[ptr]; colInd_sh[thread_idx] = B_ncols*A_csrColInd[ptr]; } __syncwarp(); ptr += 32; for (int kk=0; kk<32&&jj+kk<hb; kk++) { offset = colInd_sh[(shmem_offset+kk)] + cid; val = val_sh[(shmem_offset+kk)]; acc1 += val*B_dnVal[offset]; acc2 += val*B_dnVal[offset+32]; } __syncwarp(); } offset = rid*B_ncols+cid; C_dnVal[offset] = acc1; C_dnVal[offset+32] = acc2; } else { int nout = (B_ncols-cid+31)/32; for (int jj=lb; jj<hb; jj+=32) { if (ptr<hb) { val_sh[thread_idx] = A_csrVal[ptr]; colInd_sh[thread_idx] = B_ncols*A_csrColInd[ptr]; } __syncwarp(); ptr += 32; for (int kk=0; kk<32&&jj+kk<hb; kk++) { val = val_sh[(shmem_offset+kk)]; offset = colInd_sh[(shmem_offset+kk)] + cid; if (nout>0) { acc1 += val*B_dnVal[offset]; } if (nout>1) { acc2 += val*B_dnVal[offset+32]; } } __syncwarp(); } offset = rid*B_ncols+cid; if (nout>0) { C_dnVal[offset] = acc1; } if (nout>1) { C_dnVal[(offset+32)] = acc2; } } } } void csr2cscKernel(int m, int n, int nnz, int *csrRowPtr, int *csrColInd, float *csrVal, int *cscColPtr, int *cscRowInd, float *cscVal ) { hipsparseHandle_t handle; size_t bufferSize = 0; void* buffer = NULL; checkCuSparseError(hipsparseCsr2cscEx2_bufferSize(handle, m, n, nnz, csrVal, csrRowPtr, csrColInd, cscVal, cscColPtr, cscRowInd, HIP_R_32F, HIPSPARSE_ACTION_SYMBOLIC, HIPSPARSE_INDEX_BASE_ZERO, HIPSPARSE_CSR2CSC_ALG1, &bufferSize )); checkCudaError(hipMalloc((void**)&buffer, bufferSize * sizeof(float))); checkCuSparseError(hipsparseCsr2cscEx2(handle, m, n, nnz, csrVal, csrRowPtr, csrColInd, cscVal, cscColPtr, cscRowInd, HIP_R_32F, HIPSPARSE_ACTION_NUMERIC, HIPSPARSE_INDEX_BASE_ZERO, HIPSPARSE_CSR2CSC_ALG1, buffer )); checkCudaError(hipFree(buffer)); } torch::Tensor spmm_cuda( torch::Tensor rowptr, torch::Tensor colind, torch::Tensor values, torch::Tensor dense ) { const auto m = rowptr.size(0)-1; const auto k = dense.size(1); auto devid = dense.device().index(); auto options = torch::TensorOptions().dtype(torch::kFloat32).device(torch::kCUDA, devid); auto out = torch::empty({m,k}, options); if (k<32) { const int row_per_block = 128/k; const int n_block = (m+row_per_block-1)/row_per_block; hipLaunchKernelGGL(( spmm_test0), dim3(dim3(n_block,1,1)),dim3(dim3(k, row_per_block, 1)), 0, 0, m, k, rowptr.data_ptr<int>(), colind.data_ptr<int>(), values.data_ptr<float>(), dense.data_ptr<float>(), out.data_ptr<float>()); return out; } if (k<64) { const int tile_k = (k+31)/32; const int n_block = (m+4-1)/4; hipLaunchKernelGGL(( spmm_test1), dim3(dim3(n_block, tile_k, 1)), dim3(dim3(32, 4, 1)), 32*4*(sizeof(int)+sizeof(float)), 0, m, k, rowptr.data_ptr<int>(), colind.data_ptr<int>(), values.data_ptr<float>(), dense.data_ptr<float>(), out.data_ptr<float>()); return out; } else { const int tile_k = (k+63)/64; const int n_block = (m+8-1)/8; hipLaunchKernelGGL(( spmm_test2), dim3(dim3(n_block, tile_k, 1)), dim3(dim3(32, 8, 1)), 32*8*(sizeof(int)+sizeof(float)), 0, m, k, rowptr.data_ptr<int>(), colind.data_ptr<int>(), values.data_ptr<float>(), dense.data_ptr<float>(), out.data_ptr<float>()); return out; } } torch::Tensor csr2csc_cuda( torch::Tensor csrRowPtr, torch::Tensor csrColInd, torch::Tensor csrVal, torch::Tensor cscColPtr, torch::Tensor cscRowInd ) { const auto m = csrRowPtr.size(0) - 1; const auto n = cscColPtr.size(0) - 1; const auto nnz = csrColInd.size(0); auto devid = csrRowPtr.device().index(); auto options = torch::TensorOptions().dtype(torch::kFloat32).device(torch::kCUDA, devid); auto cscVal = torch::empty({nnz}, options); csr2cscKernel(m, n, nnz, csrRowPtr.data_ptr<int>(), csrColInd.data_ptr<int>(), csrVal.data_ptr<float>(), cscColPtr.data_ptr<int>(), cscRowInd.data_ptr<int>(), cscVal.data_ptr<float>()); return cscVal; }
2da82949d586d64dfc721dc057547c7a086c081c.cu
#include <cuda.h> #include <torch/types.h> #include <cusparse.h> #define checkCudaError( a ) do { \ if (cudaSuccess != (a)) { \ fprintf(stderr, "Cuda runTime error in line %d of file %s \ : %s \n", __LINE__, __FILE__, cudaGetErrorString(cudaGetLastError()) ); \ exit(EXIT_FAILURE); \ } \ } while(0) #define checkCuSparseError( a ) do { \ if (CUSPARSE_STATUS_SUCCESS != (a)) { \ fprintf(stderr, "CuSparse runTime error in line %d of file %s \ : %s \n", __LINE__, __FILE__, cudaGetErrorString(cudaGetLastError()) ); \ exit(EXIT_FAILURE); \ } \ } while (0) __device__ __forceinline__ float sum_reduce(float acc, float x) { return acc + x; } __device__ __forceinline__ float sum_init() { return 0; } __global__ void topoCacheCoarsenSPMMKernel( int m, int k, const int* A_indptr, const int* A_indices, const float* B, float* C ) { extern __shared__ int sh[]; int sm_offset = (threadIdx.y<<5); int thread_idx = sm_offset+threadIdx.x; int rid = blockDim.y*blockIdx.x+threadIdx.y; if (rid<m) { int cid = (blockIdx.y<<6)+threadIdx.x; int lb = A_indptr[rid]; int hb = A_indptr[rid+1]; int ptr = lb+threadIdx.x; int offset; float acc1 = sum_init(); float acc2 = sum_init(); if (blockIdx.y != gridDim.y-1) { for (int jj=lb; jj<hb; jj+=32) { if (ptr<hb) { sh[thread_idx] = A_indices[ptr]*k; // sh[thread_idx] = __ldg(A_indices+ptr)*k; } __syncwarp(); ptr += 32; for (int kk=0; kk<32&&jj+kk<hb; kk++) { offset = sh[(sm_offset+kk)] + cid; acc1 = sum_reduce(acc1, B[offset]); acc2 = sum_reduce(acc2, B[(offset+32)]); // acc1 = sum_reduce(acc1, __ldg(B+offset)); // acc2 = sum_reduce(acc2, __ldg(B+offset+32)); } __syncwarp(); } offset = rid*k+cid; C[offset] = acc1; C[offset+32] = acc2; } else { // threadIdx.y==blockDim.y-1 int nout = (k-cid+31)/32; for (int jj=lb; jj<hb; jj+=32) { if (ptr<hb) { sh[thread_idx] = A_indices[ptr]*k; // sh[thread_idx] = __ldg(A_indices+ptr)*k; } __syncwarp(); ptr += 32; for (int kk=0; kk<32&&jj+kk<hb; kk++) { offset = sh[(sm_offset+kk)] + cid; if (nout>0) { acc1 = sum_reduce(acc1, B[offset]);} // acc1 = sum_reduce(acc1, __ldg(B+offset)); } if (nout>1) { acc2 = sum_reduce(acc2, B[(offset+32)]);} // acc2 = sum_reduce(acc2, __ldg(B+offset+32));} } __syncwarp(); } offset = rid*k+cid; if (nout>0) { C[offset] = acc1;} if (nout>1) { C[offset+32] = acc2;} } } } __global__ void topoCacheSPMMKernel( int m, int k, const int* A_indptr, const int* A_indices, const float* B, float* C ) { extern __shared__ int sh[]; int sm_offset = (threadIdx.y<<5); int thread_idx = sm_offset + threadIdx.x; int cid = (blockIdx.y<<5)+threadIdx.x; int rid = blockDim.y*blockIdx.x+threadIdx.y; if (rid<m) { int lb = A_indptr[rid]; int hb = A_indptr[(rid+1)]; int offset; int ptr = lb+threadIdx.x; float acc1 = sum_init(); if (blockIdx.y != gridDim.y-1) { for (int jj=lb; jj<hb; jj+=32) { if (ptr<hb) { sh[thread_idx] = A_indices[ptr]*k; // sh[thread_idx] = __ldg(A_indices+ptr)*k; } __syncwarp(); ptr += 32; for (int kk=0; kk<32&&jj+kk<hb; kk++) { offset = sh[sm_offset+kk]+cid; acc1 = sum_reduce(acc1, B[offset]); // acc1 = sum_reduce(acc1, __ldg(B+offset)); } __syncwarp(); } offset = rid*k+cid; C[offset] = acc1; } else { // threadIdx.y==blockDim.y-1 int nout = (k-cid+31)/32; for (int jj=lb; jj<hb; jj+=32) { if (ptr<hb) { sh[thread_idx] = A_indices[ptr]*k; // sh[thread_idx] = __ldg(A_indices+ptr)*k; } __syncwarp(); ptr += 32; for (int kk=0; kk<32&&jj+kk<hb; kk++) { offset = sh[(sm_offset+kk)] + cid; if (nout>0) { acc1 = sum_reduce(acc1, B[offset]);} // acc1 = sum_reduce(acc1, __ldg(B+offset)); } } __syncwarp(); } offset = rid*k+cid; if (nout>0) { C[offset] = acc1;} } } } __global__ void topoSimpleSPMMKernel( int m, int k, const int* A_indptr, const int* A_indices, const float* B, float* C ) { int rid = blockDim.y*blockIdx.x+threadIdx.y; if (rid<m) { int lb = A_indptr[rid]; int hb = A_indptr[(rid+1)]; float acc1 = sum_init(); int offset; for (int ptr=lb; ptr<hb; ptr++) { // offset = __ldg(A_indices+ptr)*k+threadIdx.x; // acc1 = sum_reduce(acc1, __ldg(B+offset)); offset = A_indices[ptr]*k+threadIdx.x; acc1 = sum_reduce(acc1, B[offset]); } C[(rid*k+threadIdx.x)] = acc1; } } torch::Tensor spmm_cuda_no_edge_value( torch::Tensor rowptr, torch::Tensor colind, torch::Tensor dense ) { const auto m = rowptr.size(0)-1; const auto k = dense.size(1); auto devid = dense.device().index(); auto options = torch::TensorOptions().dtype(torch::kFloat32).device(torch::kCUDA, devid); auto out = torch::empty({m,k}, options); if (k<32) { const int row_per_block = 128/k; const int n_block = (m+row_per_block-1)/row_per_block; topoSimpleSPMMKernel<<< dim3(n_block,1,1),dim3(k, row_per_block, 1)>>>( m, k, rowptr.data_ptr<int>(), colind.data_ptr<int>(), dense.data_ptr<float>(), out.data_ptr<float>()); return out; } if (k<64) { const int tile_k = (k+31)/32; const int n_block = (m+3)/4; topoCacheSPMMKernel<<< dim3(n_block,tile_k,1), dim3(32,4,1), 128*sizeof(int)>>>( m, k, rowptr.data_ptr<int>(), colind.data_ptr<int>(), dense.data_ptr<float>(), out.data_ptr<float>()); return out; } else { const int tile_k = (k+63)/64; const int n_block = (m+8-1)/8; topoCacheCoarsenSPMMKernel<<< dim3(n_block,tile_k,1), dim3(32,8,1), 8*32*sizeof(int)>>>( m, k, rowptr.data_ptr<int>(), colind.data_ptr<int>(), dense.data_ptr<float>(), out.data_ptr<float>()); return out; } } __global__ void spmm_test0( int A_nrows, int B_ncols, int* A_csrRowPtr, int* A_csrColInd, float* A_csrVal, float* B_dnVal, float* C_dnVal ) { int rid = blockDim.y*blockIdx.x+threadIdx.y; if (rid<A_nrows) { int cid = (blockIdx.y<<5)+threadIdx.x; int lb = A_csrRowPtr[rid]; int hb = A_csrRowPtr[(rid+1)]; int offset = 0; float acc=0; if (blockIdx.y!=gridDim.y-1){ for (int ptr = lb; ptr<hb; ptr++) { offset = A_csrColInd[ptr]*B_ncols+cid; acc += A_csrVal[ptr]*B_dnVal[offset]; } C_dnVal[(rid*B_ncols+cid)] = acc; } else { for (int ptr = lb; ptr<hb; ptr++) { if (cid<B_ncols) { offset = A_csrColInd[ptr]*B_ncols+cid;} acc += A_csrVal[ptr]*B_dnVal[offset]; } if (cid<B_ncols) { C_dnVal[(rid*B_ncols+cid)] = acc;} } } } __global__ void spmm_test1( int A_nrows, int B_ncols, int* A_csrRowPtr, int* A_csrColInd, float* A_csrVal, float* B_dnVal, float* C_dnVal ) { extern __shared__ int sh[]; int *colInd_sh = sh; float *val_sh = (float *)&sh[(blockDim.y<<5)]; int shmem_offset = (threadIdx.y<<5); int thread_idx = shmem_offset+threadIdx.x; int rid = blockDim.y*blockIdx.x+threadIdx.y; if (rid<A_nrows) { int cid = (blockIdx.y<<5)+threadIdx.x; int lb = A_csrRowPtr[rid]; int hb = A_csrRowPtr[(rid+1)]; int ptr = lb+threadIdx.x; int offset; float acc=0; if (blockIdx.y != gridDim.y-1) { for (int jj=lb; jj<hb; jj+=32) { if (ptr<hb) { val_sh[thread_idx] = A_csrVal[ptr]; colInd_sh[thread_idx] = B_ncols*A_csrColInd[ptr]; } __syncwarp(); ptr += 32; for (int kk=0; kk<32&&jj+kk<hb; kk++) { offset = colInd_sh[(shmem_offset+kk)] + cid; acc += val_sh[(shmem_offset+kk)]*B_dnVal[offset]; } __syncwarp(); } C_dnVal[(rid*B_ncols+cid)] = acc; } else { for (int jj=lb; jj<hb; jj+=32) { if (ptr<hb) { val_sh[thread_idx] = A_csrVal[ptr]; colInd_sh[thread_idx] = B_ncols*A_csrColInd[ptr]; } __syncwarp(); ptr += 32; for (int kk=0; kk<32&&jj+kk<hb; kk++) { offset = colInd_sh[(shmem_offset+kk)] + cid; if (cid<B_ncols) { acc += val_sh[(shmem_offset+kk)]*B_dnVal[offset]; } } __syncwarp(); } if (cid<B_ncols) { C_dnVal[(rid*B_ncols+cid)] = acc; } } } } __global__ void spmm_test2( int A_nrows, int B_ncols, int* A_csrRowPtr, int* A_csrColInd, float* A_csrVal, float* B_dnVal, float* C_dnVal ) { extern __shared__ int sh[]; int *colInd_sh = sh; float *val_sh = (float *)&sh[(blockDim.y<<5)]; int shmem_offset = (threadIdx.y<<5); int thread_idx = shmem_offset+threadIdx.x; int rid = blockDim.y*blockIdx.x+threadIdx.y; if (rid<A_nrows) { int cid = (blockIdx.y<<6)+threadIdx.x; int lb = A_csrRowPtr[rid]; int hb = A_csrRowPtr[(rid+1)]; int ptr = lb+threadIdx.x; int offset; float acc1=0, acc2=0, val; if (blockIdx.y != gridDim.y-1) { for (int jj=lb; jj<hb; jj+=32) { if (ptr<hb) { val_sh[thread_idx] = A_csrVal[ptr]; colInd_sh[thread_idx] = B_ncols*A_csrColInd[ptr]; } __syncwarp(); ptr += 32; for (int kk=0; kk<32&&jj+kk<hb; kk++) { offset = colInd_sh[(shmem_offset+kk)] + cid; val = val_sh[(shmem_offset+kk)]; acc1 += val*B_dnVal[offset]; acc2 += val*B_dnVal[offset+32]; } __syncwarp(); } offset = rid*B_ncols+cid; C_dnVal[offset] = acc1; C_dnVal[offset+32] = acc2; } else { int nout = (B_ncols-cid+31)/32; for (int jj=lb; jj<hb; jj+=32) { if (ptr<hb) { val_sh[thread_idx] = A_csrVal[ptr]; colInd_sh[thread_idx] = B_ncols*A_csrColInd[ptr]; } __syncwarp(); ptr += 32; for (int kk=0; kk<32&&jj+kk<hb; kk++) { val = val_sh[(shmem_offset+kk)]; offset = colInd_sh[(shmem_offset+kk)] + cid; if (nout>0) { acc1 += val*B_dnVal[offset]; } if (nout>1) { acc2 += val*B_dnVal[offset+32]; } } __syncwarp(); } offset = rid*B_ncols+cid; if (nout>0) { C_dnVal[offset] = acc1; } if (nout>1) { C_dnVal[(offset+32)] = acc2; } } } } void csr2cscKernel(int m, int n, int nnz, int *csrRowPtr, int *csrColInd, float *csrVal, int *cscColPtr, int *cscRowInd, float *cscVal ) { cusparseHandle_t handle; size_t bufferSize = 0; void* buffer = NULL; checkCuSparseError(cusparseCsr2cscEx2_bufferSize(handle, m, n, nnz, csrVal, csrRowPtr, csrColInd, cscVal, cscColPtr, cscRowInd, CUDA_R_32F, CUSPARSE_ACTION_SYMBOLIC, CUSPARSE_INDEX_BASE_ZERO, CUSPARSE_CSR2CSC_ALG1, &bufferSize )); checkCudaError(cudaMalloc((void**)&buffer, bufferSize * sizeof(float))); checkCuSparseError(cusparseCsr2cscEx2(handle, m, n, nnz, csrVal, csrRowPtr, csrColInd, cscVal, cscColPtr, cscRowInd, CUDA_R_32F, CUSPARSE_ACTION_NUMERIC, CUSPARSE_INDEX_BASE_ZERO, CUSPARSE_CSR2CSC_ALG1, buffer )); checkCudaError(cudaFree(buffer)); } torch::Tensor spmm_cuda( torch::Tensor rowptr, torch::Tensor colind, torch::Tensor values, torch::Tensor dense ) { const auto m = rowptr.size(0)-1; const auto k = dense.size(1); auto devid = dense.device().index(); auto options = torch::TensorOptions().dtype(torch::kFloat32).device(torch::kCUDA, devid); auto out = torch::empty({m,k}, options); if (k<32) { const int row_per_block = 128/k; const int n_block = (m+row_per_block-1)/row_per_block; spmm_test0<<<dim3(n_block,1,1),dim3(k, row_per_block, 1)>>>( m, k, rowptr.data_ptr<int>(), colind.data_ptr<int>(), values.data_ptr<float>(), dense.data_ptr<float>(), out.data_ptr<float>()); return out; } if (k<64) { const int tile_k = (k+31)/32; const int n_block = (m+4-1)/4; spmm_test1<<<dim3(n_block, tile_k, 1), dim3(32, 4, 1), 32*4*(sizeof(int)+sizeof(float))>>> ( m, k, rowptr.data_ptr<int>(), colind.data_ptr<int>(), values.data_ptr<float>(), dense.data_ptr<float>(), out.data_ptr<float>()); return out; } else { const int tile_k = (k+63)/64; const int n_block = (m+8-1)/8; spmm_test2<<<dim3(n_block, tile_k, 1), dim3(32, 8, 1), 32*8*(sizeof(int)+sizeof(float))>>> ( m, k, rowptr.data_ptr<int>(), colind.data_ptr<int>(), values.data_ptr<float>(), dense.data_ptr<float>(), out.data_ptr<float>()); return out; } } torch::Tensor csr2csc_cuda( torch::Tensor csrRowPtr, torch::Tensor csrColInd, torch::Tensor csrVal, torch::Tensor cscColPtr, torch::Tensor cscRowInd ) { const auto m = csrRowPtr.size(0) - 1; const auto n = cscColPtr.size(0) - 1; const auto nnz = csrColInd.size(0); auto devid = csrRowPtr.device().index(); auto options = torch::TensorOptions().dtype(torch::kFloat32).device(torch::kCUDA, devid); auto cscVal = torch::empty({nnz}, options); csr2cscKernel(m, n, nnz, csrRowPtr.data_ptr<int>(), csrColInd.data_ptr<int>(), csrVal.data_ptr<float>(), cscColPtr.data_ptr<int>(), cscRowInd.data_ptr<int>(), cscVal.data_ptr<float>()); return cscVal; }
9e9b1db41c5f619dee05cfbab802d2e274044add.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void cube(float * d_out, float * d_in){ // Todo: Fill in this function int x = threadIdx.x; float f = d_in[x]; d_out[x] = f*f*f; } int main(int argc, char ** argv) { const int ARRAY_SIZE = 64; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); // generate the input array on the host float h_in[ARRAY_SIZE]; for (int i = 0; i < ARRAY_SIZE; i++) { h_in[i] = float(i); } float h_out[ARRAY_SIZE]; // declare GPU memory pointers float * d_in; float * d_out; // allocate GPU memory hipMalloc((void**) &d_in, ARRAY_BYTES); hipMalloc((void**) &d_out, ARRAY_BYTES); // transfer the array to the GPU hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice); // launch the kernel hipLaunchKernelGGL(( cube), dim3(1), dim3(ARRAY_SIZE), 0, 0, d_out, d_in); // copy back the result array to the CPU hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost); // print out the resulting array for (int i =0; i < ARRAY_SIZE; i++) { printf("%f", h_out[i]); printf(((i % 4) != 3) ? "\t" : "\n"); } hipFree(d_in); hipFree(d_out); return 0; }
9e9b1db41c5f619dee05cfbab802d2e274044add.cu
#include <stdio.h> __global__ void cube(float * d_out, float * d_in){ // Todo: Fill in this function int x = threadIdx.x; float f = d_in[x]; d_out[x] = f*f*f; } int main(int argc, char ** argv) { const int ARRAY_SIZE = 64; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); // generate the input array on the host float h_in[ARRAY_SIZE]; for (int i = 0; i < ARRAY_SIZE; i++) { h_in[i] = float(i); } float h_out[ARRAY_SIZE]; // declare GPU memory pointers float * d_in; float * d_out; // allocate GPU memory cudaMalloc((void**) &d_in, ARRAY_BYTES); cudaMalloc((void**) &d_out, ARRAY_BYTES); // transfer the array to the GPU cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice); // launch the kernel cube<<<1, ARRAY_SIZE>>>(d_out, d_in); // copy back the result array to the CPU cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost); // print out the resulting array for (int i =0; i < ARRAY_SIZE; i++) { printf("%f", h_out[i]); printf(((i % 4) != 3) ? "\t" : "\n"); } cudaFree(d_in); cudaFree(d_out); return 0; }
coalesced4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void coalesced4(float *A, float *C, const int N) { int i = (blockIdx.x * blockDim.x + threadIdx.x)*4; if (i+3 < N) { C[i] = A[i]; C[i+1] = A[i+1]; C[i+2] = A[i+2]; C[i+3] = A[i+3];} }
coalesced4.cu
#include "includes.h" __global__ void coalesced4(float *A, float *C, const int N) { int i = (blockIdx.x * blockDim.x + threadIdx.x)*4; if (i+3 < N) { C[i] = A[i]; C[i+1] = A[i+1]; C[i+2] = A[i+2]; C[i+3] = A[i+3];} }
13962ea580b5fd501198c5dca27cf0e36965422d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> //#include<gl/glew.h> #include"glew.h" #include<gl/GL.h> #include"vmath.h" #include"C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.1\include\cuda.h" #include"C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.1\include\cuda_runtime.h" #include"C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.1\include\cuda_gl_interop.h" #include"sample.cuh" __global__ void simple_vbo_kernel(float4 *pos, unsigned int width, unsigned int height, float time) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; // calculate uv coordinates float u = x / (float) width; float v = y / (float) height; u = u*2.0f - 1.0f; v = v*2.0f - 1.0f; // calculate simple sine wave pattern float freq = 4.0f; float w = sinf(u*freq + time) * cosf(v*freq + time) * 0.5f; // write output vertex pos[y*width+x] = make_float4(u, w, v, 1.0f); } void launch_kernel(float4 *pos, unsigned int mesh_width, unsigned int mesh_height, float time) { __global__ void simple_vbo_kernel(float4 *pos, unsigned int width, unsigned int height, float time); // execute the kernel dim3 block(8, 8, 1); dim3 grid(mesh_width / block.x, mesh_height / block.y, 1); simple_vbo_kernel <<< grid, block >> > (pos, mesh_width, mesh_height, time); }
13962ea580b5fd501198c5dca27cf0e36965422d.cu
#include<stdio.h> //#include<gl/glew.h> #include"glew.h" #include<gl/GL.h> #include"vmath.h" #include"C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.1\include\cuda.h" #include"C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.1\include\cuda_runtime.h" #include"C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.1\include\cuda_gl_interop.h" #include"sample.cuh" __global__ void simple_vbo_kernel(float4 *pos, unsigned int width, unsigned int height, float time) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; // calculate uv coordinates float u = x / (float) width; float v = y / (float) height; u = u*2.0f - 1.0f; v = v*2.0f - 1.0f; // calculate simple sine wave pattern float freq = 4.0f; float w = sinf(u*freq + time) * cosf(v*freq + time) * 0.5f; // write output vertex pos[y*width+x] = make_float4(u, w, v, 1.0f); } void launch_kernel(float4 *pos, unsigned int mesh_width, unsigned int mesh_height, float time) { __global__ void simple_vbo_kernel(float4 *pos, unsigned int width, unsigned int height, float time); // execute the kernel dim3 block(8, 8, 1); dim3 grid(mesh_width / block.x, mesh_height / block.y, 1); simple_vbo_kernel <<< grid, block >> > (pos, mesh_width, mesh_height, time); }
aad95623ed8b65bde4b0aac86eecfed6bff5c481.hip
// !!! This is a file automatically generated by hipify!!! // 3D Bouyant Ball viscous code // nvcc -arch=sm_70 -O3 visBouy_accel_3d.cu // run: ./a.out #include "stdio.h" #include "stdlib.h" #include "math.h" #include "hip/hip_runtime.h" #define USE_SINGLE_PRECISION /* Comment this line using "//" if you want to use double precision. */ #ifdef USE_SINGLE_PRECISION #define DAT float #define PRECIS 4 #else #define DAT double #define PRECIS 8 #endif #define GPU_ID 0 #define OVERLENGTH_X 1 #define OVERLENGTH_Y 1 #define OVERLENGTH_Z 1 #define zeros(A,nx,ny,nz) DAT *A##_d,*A##_h; A##_h = (DAT*)malloc(((nx)*(ny)*(nz))*sizeof(DAT)); \ for(i=0; i < ((nx)*(ny)*(nz)); i++){ A##_h[i]=(DAT)0.0; } \ hipMalloc(&A##_d ,((nx)*(ny)*(nz))*sizeof(DAT)); \ hipMemcpy( A##_d,A##_h,((nx)*(ny)*(nz))*sizeof(DAT),hipMemcpyHostToDevice); #define free_all(A) free(A##_h); hipFree(A##_d); #define gather(A,nx,ny,nz) hipMemcpy( A##_h,A##_d,((nx)*(ny)*(nz))*sizeof(DAT),hipMemcpyDeviceToHost); // --------------------------------------------------------------------- // // Physics const DAT Lx = 10.0; const DAT Ly = 10.0; const DAT Lz = 10.0; const DAT k = 1.0; const DAT rhoi= 10.0; const DAT g = -10.0; const DAT eta = 1.0; const DAT nu = 6.0; const DAT epsi= 1.0e-6; // Numerics #define BLOCK_X 8 #define BLOCK_Y 8 #define BLOCK_Z 8 #define GRID_X 4 #define GRID_Y 4 #define GRID_Z 4 const int nx = BLOCK_X*GRID_X - OVERLENGTH_X; const int ny = BLOCK_Y*GRID_Y - OVERLENGTH_Y; const int nz = BLOCK_Z*GRID_Z - OVERLENGTH_Z; const int nt = 200000; const int nmax = 100; const DAT dx = Lx/((DAT)nx); const DAT dy = Ly/((DAT)ny); const DAT dz = Lz/((DAT)nz); const DAT dtV = (min(dx, min(dy,dz))*min(dx,min(dy,dz)))/(eta*4.1*((DAT)4)); const DAT dtP = 4.1*eta/((DAT)(4*ny)); // --------------------------------------------------------------------- // void save_info(int me, const int nx, const int ny, const int nz){ FILE* fid; if (me==0){ fid=fopen("0_nxyz.inf" ,"w"); fprintf(fid,"%d %d %d %d", PRECIS, nx, ny, nz); fclose(fid); } } #define save_info() save_info(me, nx, ny, nz); void save_array(DAT* A, int nx, int ny, int nz, int me, const char A_name[]){ char* fname; FILE* fid; asprintf(&fname, "%d_%s.res" , me, A_name); fid=fopen(fname, "wb"); fwrite(A, sizeof(DAT), (nx)*(ny)*(nz), fid); fclose(fid); free(fname); } #define SaveArray(A,nx,ny,nz,A_name) gather(A,nx,ny,nz); save_array(A##_h,nx,ny,nz,me,A_name); void clean_cuda(){ hipError_t ce = hipGetLastError(); if(ce != hipSuccess){ printf("ERROR launching GPU C-CUDA program: %s\n", hipGetErrorString(ce)); hipDeviceReset();} } // Timer #include "sys/time.h" double timer_start = 0; double cpu_sec(){ struct timeval tp; gettimeofday(&tp,NULL); return tp.tv_sec+1e-6*tp.tv_usec; } void tic(){ timer_start = cpu_sec(); } double toc(){ return cpu_sec()-timer_start; } void tim(const char *what, double n){ double s=toc(); printf("%s: %8.3f seconds",what,s);if(n>0)printf(", %8.3f GB/s", n/s); printf("\n"); } // MIN and MAX function // DAT device_MAX=0.0; #define NB_THREADS (BLOCK_X*BLOCK_Y*BLOCK_Z) #define blockId (blockIdx.x + blockIdx.y *gridDim.x + blockIdx.z*gridDim.y*gridDim.x) #define threadId (threadIdx.x + threadIdx.y*blockDim.x + threadIdx.z*blockDim.y*blockDim.x) #define isBlockMaster (threadIdx.x==0 && threadIdx.y==0 && threadIdx.z ==0) // maxval // #define block_max_init() DAT __thread_maxval=0.0; #define __thread_max(A,nx_A,ny_A,nz_A) if (iy<ny_A && ix<nx_A && iz<nz_A){ __thread_maxval = max((__thread_maxval) , abs(A[ix + iy*nx_A +iz*ny_A*nx_A])); } __shared__ volatile DAT __block_maxval; #define __block_max(A,nx_A,ny_A,nz_A) __thread_max(A,nx_A,ny_A,nz_A); if (isBlockMaster){ __block_maxval=0; } __syncthreads(); \ for (int i=0; i < (NB_THREADS); i++){ if (i==threadId){ __block_maxval = max(__block_maxval,__thread_maxval); } __syncthreads(); } __global__ void __device_max_d(DAT*A, const int nx_A,const int ny_A, const int nz_A, DAT*__device_maxval){ block_max_init(); int ix = blockIdx.x*blockDim.x + threadIdx.x; int iy = blockIdx.y*blockDim.y + threadIdx.y; int iz = blockIdx.z*blockDim.z + threadIdx.z; // find the maxval for each block __block_max(A,nx_A,ny_A,nz_A); __device_maxval[blockId] = __block_maxval; } #define __DEVICE_max(A,nx_A,ny_A,nz_A) hipLaunchKernelGGL(( __device_max_d), dim3(grid), dim3(block), 0, 0, A##_d, nx_A, ny_A, nz_A, __device_maxval_d); \ gather(__device_maxval,grid.x,grid.y,grid.z); device_MAX=(DAT)0.0; \ for (int i=0; i < (grid.x*grid.y*grid.z); i++){ \ device_MAX = max(device_MAX,__device_maxval_h[i]); \ } \ A##_MAX = (device_MAX); // --------------------------------------------------------------------- // // Computing physics kernels __global__ void init(DAT* x, DAT* y, DAT* z, DAT* rho, const DAT Lx, const DAT Ly, const DAT Lz, const DAT dx, const DAT dy, const DAT dz, const int nx, const int ny, const int nz){ int ix = blockIdx.x*blockDim.x + threadIdx.x; // thread ID, dimension x int iy = blockIdx.y*blockDim.y + threadIdx.y; // thread ID, dimension y int iz = blockIdx.z*blockDim.z + threadIdx.z; // thread ID, dimension z if (iy<ny && ix<nx && iz<nz){ x[ix+iy*nx+iz*nx*ny] = (DAT)ix*dx + (-Lx+dx)/2.0; } if (iy<ny && ix<nx && iz<nz){ y[ix+iy*nx+iz*nx*ny] = (DAT)iy*dy + (-Ly+dy)/2.0; } if (iy<ny && ix<nx && iz<nz){ z[ix+iy*nx+iz*nx*ny] = (DAT)iz*dz + (-Lz+dz)/2.0; } if (iy<ny && ix<nx && iz<nz){ if(x[ix+iy*nx+iz*nx*ny]*x[ix+iy*nx+iz*nx*ny] + y[ix+iy*nx+iz*nx*ny]*y[ix+iy*nx+iz*nx*ny] + z[ix+iy*nx+iz*nx*ny]*z[ix+iy*nx+iz*nx*ny] < 1){ rho[ix+iy*nx+iz*nx*ny]=rhoi; } } } __global__ void compute_V(DAT* Vx, DAT* Vy, DAT* Vz, DAT* P, DAT* Txx, DAT* Tyy, DAT* Tzz, DAT* Txy, DAT* Txz, DAT* Tyz, DAT* dVxdt, DAT* dVydt, DAT* dVzdt, DAT* Rx, DAT* Ry, DAT* Rz, DAT* rho, const DAT dtV, const DAT g, const DAT dx, const DAT dy, const DAT dz, const int nx, const int ny, const int nz){ int ix = blockIdx.x*blockDim.x + threadIdx.x; // thread ID, dimension x int iy = blockIdx.y*blockDim.y + threadIdx.y; // thread ID, dimension y int iz = blockIdx.z*blockDim.z + threadIdx.z; // thread ID, dimension z if (ix>0 && iy<ny && ix<nx && iz<nz){ Rx[ix+(iy)*(nx+1)+(iz)*(nx+1)*ny] = 1 * ( -1*(P[ ix +(iy )* nx +(iz )* nx * ny ] - P[(ix-1)+(iy )* nx +(iz )* nx * ny ])/dx + (Txx[ ix +(iy )* nx +(iz )* nx * ny ] - Txx[(ix-1)+(iy )* nx +(iz )* nx * ny ])/dx + (Txy[(ix)+(iy+1)*(nx+1)+(iz )*(nx+1)*(ny+1)] - Txy[(ix )+(iy )*(nx+1)+(iz )*(nx+1)*(ny+1)])/dy + (Txz[(ix)+(iy )*(nx+1)+(iz+1)*(nx+1)*(ny )] - Txz[(ix )+(iy )*(nx+1)+(iz )*(nx+1)*(ny )])/dz); dVxdt[ix+(iy)*(nx+1)+(iz)*(nx+1)*ny] = (1-nu/nx)*dVxdt[ix+(iy)*(nx+1)+(iz)*(nx+1)*ny] + Rx[ix+(iy)*(nx+1)+(iz)*(nx+1)*ny]; Vx[ix+(iy)*(nx+1)+(iz)*(nx+1)*ny] = Vx[ix+(iy)*(nx+1)+(iz)*(nx+1)*ny] + dtV*dVxdt[ix+(iy)*(nx+1)+(iz)*(nx+1)*ny]; } if (iy>0 && iy<ny && ix<nx && iz<nz){ Ry[ix+(iy)*(nx )+(iz)*(nx )*(ny+1)] = 1 * ( -1*(P[(ix )+(iy )* nx +(iz )* nx * ny ] - P[(ix )+(iy-1)* nx +(iz )* nx * ny ])/dy + (Tyy[(ix )+(iy )* nx +(iz )* nx * ny ] - Tyy[(ix )+(iy-1)* nx +(iz )* nx * ny ])/dy + (Txy[(ix+1)+(iy )*(nx+1)+(iz )*(nx+1)*(ny+1)] - Txy[(ix )+(iy )*(nx+1)+(iz )*(nx+1)*(ny+1)])/dx + (Tyz[(ix )+(iy )*(nx )+(iz+1)*(nx )*(ny+1)] - Tyz[(ix )+(iy )*(nx )+(iz )*(nx )*(ny+1)])/dz + .5*g*(rho[(ix )+(iy )* nx +(iz )* nx * ny ] + rho[(ix )+(iy-1)* nx +(iz )* nx * ny ])); dVydt[ix+(iy)*(nx )+(iz)*(nx )*(ny+1)] = (1-nu/ny)*dVydt[ix+(iy)*(nx )+(iz)*(nx )*(ny+1)] + Ry[ix+(iy)*(nx )+(iz)*(nx )*(ny+1)]; Vy[ix+(iy)*(nx )+(iz)*(nx )*(ny+1)] = Vy[ix+(iy)*(nx )+(iz)*(nx )*(ny+1)] + dtV*dVydt[ix+(iy)*(nx )+(iz)*(nx )*(ny+1)]; } if (iz>0 && iy<ny && ix<nx && iz<nz){ Rz[ix+(iy)*(nx )+(iz)*(nx )*(ny )] = 1 * ( -1*(P[(ix )+(iy )* nx +(iz )* nx * ny ] - P[(ix )+(iy )* nx +(iz-1)* nx * ny ])/dz + (Tzz[(ix )+(iy )* nx +(iz )* nx * ny ] - Tzz[(ix )+(iy )* nx +(iz-1)* nx * ny ])/dz + (Txz[(ix+1)+(iy )*(nx+1)+(iz )*(nx+1)*(ny )] - Txz[(ix )+(iy )*(nx+1)+(iz )*(nx+1)*(ny )])/dx + (Tyz[(ix )+(iy+1)*(nx )+(iz )*(nx )*(ny+1)] - Tyz[(ix )+(iy )*(nx )+(iz )*(nx )*(ny+1)])/dy); dVzdt[ix+(iy)*(nx )+(iz)*(nx )*(ny )] = (1-nu/nz)*dVzdt[ix+(iy)*(nx )+(iz)*(nx )*(ny )] + Rz[ix+(iy)*(nx )+(iz)*(nx )*(ny )]; Vz[ix+(iy)*(nx )+(iz)*(nx )*(ny )] = Vz[ix+(iy)*(nx )+(iz)*(nx )*(ny )] + dtV*dVzdt[ix+(iy)*(nx )+(iz)*(nx )*(ny )]; } } __global__ void compute_P(DAT* Vx, DAT* Vy, DAT* Vz, DAT* P, const DAT dtP, const DAT k, const DAT dx, const DAT dy, const DAT dz, const int nx, const int ny, const int nz){ int ix = blockIdx.x*blockDim.x + threadIdx.x; // thread ID, dimension x int iy = blockIdx.y*blockDim.y + threadIdx.y; // thread ID, dimension y int iz = blockIdx.z*blockDim.z + threadIdx.z; // thread ID, dimension z if (iy<ny && ix<nx && iz<nz){ P[ix+(iy)*nx+(iz)*nx*ny] = P[ix+(iy)*nx+(iz)*nx*ny] - dtP*k*( (Vx[(ix+1)+(iy )*(nx+1)+(iz )*(nx+1)*(ny )]-Vx[(ix )+(iy )*(nx+1)+(iz )*(nx+1)*(ny )])/dx+ (Vy[(ix )+(iy+1)*(nx )+(iz )*(nx )*(ny+1)]-Vy[(ix )+(iy )*(nx )+(iz )*(nx )*(ny+1)])/dy+ (Vz[(ix )+(iy )*(nx )+(iz+1)*(nx )*(ny )]-Vz[(ix )+(iy )*(nx )+(iz )*(nx )*(ny )])/dz); } } __global__ void compute_T(DAT* Vx, DAT* Vy, DAT* Vz, DAT* Txx, DAT* Tyy, DAT* Tzz, DAT* Txy, DAT* Txz, DAT* Tyz, const DAT eta, const DAT dx, const DAT dy, const DAT dz, const int nx, const int ny, const int nz){ int ix = blockIdx.x*blockDim.x + threadIdx.x; // thread ID, dimension x int iy = blockIdx.y*blockDim.y + threadIdx.y; // thread ID, dimension y int iz = blockIdx.z*blockDim.z + threadIdx.z; // thread ID, dimension z if (iy<ny && ix<nx && iz<nz){ Txx[ix+(iy)*nx+(iz)*nx*ny] = 2*eta*( (Vx[(ix+1)+(iy )*(nx+1)+(iz )*(nx+1)*(ny )]-Vx[(ix )+(iy )*(nx+1)+(iz )*(nx+1)*(ny )])/dx - ((Vx[(ix+1)+(iy )*(nx+1)+(iz )*(nx+1)*(ny )]-Vx[(ix )+(iy )*(nx+1)+(iz )*(nx+1)*(ny )])/dx+ (Vy[(ix )+(iy+1)*(nx )+(iz )*(nx )*(ny+1)]-Vy[(ix )+(iy )*(nx )+(iz )*(nx )*(ny+1)])/dy+ (Vz[(ix )+(iy )*(nx )+(iz+1)*(nx )*(ny )]-Vz[(ix )+(iy )*(nx )+(iz )*(nx )*(ny )])/dz)/((DAT)3)); Tyy[ix+(iy)*nx+(iz)*nx*ny] = 2*eta*( (Vy[(ix )+(iy+1)*(nx )+(iz )*(nx )*(ny+1)]-Vy[(ix )+(iy )*(nx )+(iz )*(nx )*(ny+1)])/dy - ((Vx[(ix+1)+(iy )*(nx+1)+(iz )*(nx+1)*(ny )]-Vx[(ix )+(iy )*(nx+1)+(iz )*(nx+1)*(ny )])/dx+ (Vy[(ix )+(iy+1)*(nx )+(iz )*(nx )*(ny+1)]-Vy[(ix )+(iy )*(nx )+(iz )*(nx )*(ny+1)])/dy+ (Vz[(ix )+(iy )*(nx )+(iz+1)*(nx )*(ny )]-Vz[(ix )+(iy )*(nx )+(iz )*(nx )*(ny )])/dz)/((DAT)3)); Tzz[ix+(iy)*nx+(iz)*nx*ny] = 2*eta*( (Vz[(ix )+(iy )*(nx )+(iz+1)*(nx )*(ny )]-Vz[(ix )+(iy )*(nx )+(iz )*(nx )*(ny )])/dz - ((Vx[(ix+1)+(iy )*(nx+1)+(iz )*(nx+1)*(ny )]-Vx[(ix )+(iy )*(nx+1)+(iz )*(nx+1)*(ny )])/dx+ (Vy[(ix )+(iy+1)*(nx )+(iz )*(nx )*(ny+1)]-Vy[(ix )+(iy )*(nx )+(iz )*(nx )*(ny+1)])/dy+ (Vz[(ix )+(iy )*(nx )+(iz+1)*(nx )*(ny )]-Vz[(ix )+(iy )*(nx )+(iz )*(nx )*(ny )])/dz)/((DAT)3)); } if(iy<ny && ix<nx && iz<nz && ix>0 && iy >0){ Txy[(ix)+(iy )*(nx+1)+(iz )*(nx+1)*(ny+1)] = eta*( (Vx[(ix)+(iy )*(nx+1)+(iz )*(nx+1)*(ny )] - Vx[(ix )+(iy-1)*(nx+1)+(iz )*(nx+1)*(ny )])/dy + (Vy[(ix)+(iy )*(nx )+(iz )*(nx )*(ny+1)] - Vy[(ix-1)+(iy )*(nx )+(iz )*(nx )*(ny+1)])/dx); } if(iy<ny && ix<nx && iz<nz && ix>0 && iz >0){ Txz[(ix)+(iy )*(nx+1)+(iz )*(nx+1)*(ny )] = eta*( (Vx[(ix)+(iy )*(nx+1)+(iz )*(nx+1)*(ny )] - Vx[(ix )+(iy )*(nx+1)+(iz-1)*(nx+1)*(ny )])/dz + (Vz[(ix)+(iy )*(nx )+(iz )*(nx )*(ny )] - Vz[(ix-1)+(iy )*(nx )+(iz )*(nx )*(ny )])/dx); } if(iy<ny && ix<nx && iz<nz && iy>0 && iz >0){ Tyz[(ix)+(iy )*(nx )+(iz )*(nx )*(ny+1)] = eta*( (Vy[(ix)+(iy )*(nx )+(iz )*(nx )*(ny+1)] - Vy[(ix)+(iy )*(nx )+(iz-1)*(nx )*(ny+1)])/dz + (Vz[(ix)+(iy )*(nx )+(iz )*(nx )*(ny )] - Vz[(ix)+(iy-1)*(nx )+(iz )*(nx )*(ny )])/dy); } } int main(){ int i, it; size_t N=nx*ny*nz, mem=N*sizeof(DAT); // Set up GPU int gpu_id=-1; int me = 0; dim3 grid, block; block.x = BLOCK_X; grid.x = GRID_X; block.y = BLOCK_Y; grid.y = GRID_Y; block.z = BLOCK_Z; grid.z = GRID_Z; gpu_id = GPU_ID; hipSetDevice(gpu_id); hipGetDevice(&gpu_id); hipDeviceReset(); hipDeviceSetCacheConfig(hipFuncCachePreferL1); // set L1 to prefered printf("Process uses GPU with id %d.\n",gpu_id); printf("%dx%dx%d, %1.3f GB, %d iterations.\n", nx,ny,nz, 20*mem/1024./1024./1024., nt); printf("Launching (%dx%dx%d) grid of (%dx%dx%d) blocks.\n", grid.x, grid.y, grid.z, block.x, block.y, block.z); // Initial arrays zeros(x ,nx ,ny ,nz ); zeros(y ,nx ,ny ,nz ); zeros(z ,nx ,ny ,nz ); zeros(rho ,nx ,ny ,nz ); zeros(P ,nx ,ny ,nz ); zeros(Txx ,nx ,ny ,nz ); zeros(Tyy ,nx ,ny ,nz ); zeros(Tzz ,nx ,ny ,nz ); zeros(Txy ,nx+1,ny+1,nz ); zeros(Txz ,nx+1,ny ,nz+1); zeros(Tyz ,nx ,ny+1,nz+1); zeros(Vx ,nx+1,ny ,nz ); zeros(Vy ,nx ,ny+1,nz ); zeros(Vz ,nx+1,ny ,nz+1); zeros(dVxdt,nx+1,ny ,nz ); zeros(dVydt,nx ,ny+1,nz ); zeros(dVzdt,nx+1,ny ,nz+1); zeros(Rx ,nx+1,ny ,nz ); zeros(Ry ,nx ,ny+1,nz ); zeros(Rz ,nx+1,ny ,nz+1); zeros(__device_maxval ,grid.x,grid.y,grid.z); DAT Rx_MAX = 1.0; DAT Ry_MAX = 0.0; DAT Rz_MAX = 0.0; // Initial conditions hipLaunchKernelGGL(( init), dim3(grid),dim3(block), 0, 0, x_d, y_d, z_d, rho_d, Lx, Ly, Lz, dx, dy, dz, nx, ny, nz); hipDeviceSynchronize(); // Action for (it=0;it<nt;it++){ if (it==1){ tic(); } hipLaunchKernelGGL(( compute_P), dim3(grid),dim3(block), 0, 0, Vx_d, Vy_d, Vz_d, P_d, dtP, k, dx, dy, dz, nx, ny, nz); hipDeviceSynchronize(); hipLaunchKernelGGL(( compute_T), dim3(grid),dim3(block), 0, 0, Vx_d, Vy_d, Vz_d, Txx_d, Tyy_d, Tzz_d, Txy_d, Txz_d, Tyz_d, eta, dx, dy, dz, nx, ny, nz); hipDeviceSynchronize(); hipLaunchKernelGGL(( compute_V), dim3(grid),dim3(block), 0, 0, Vx_d, Vy_d, Vz_d, P_d, Txx_d, Tyy_d, Tzz_d, Txy_d, Txz_d, Tyz_d, dVxdt_d, dVydt_d, dVzdt_d, Rx_d, Ry_d, Rz_d, rho_d, dtV, g, dx ,dy ,dz ,nx ,ny ,nz ); hipDeviceSynchronize(); __DEVICE_max(Rx,nx,ny,nz); __DEVICE_max(Ry,nx,ny,nz); __DEVICE_max(Rz,nx,ny,nz); // if (it%nmax==0){ printf("max(Rx,Ry,Rz)=%1.3e, %1.3e, %1.3e \n", Rx_MAX, Ry_MAX, Rz_MAX); } if (Rx_MAX < epsi && Ry_MAX < epsi && Rz_MAX < epsi && it > nmax){ printf("Broke on iteration %d \n",it); printf("max(Rx,Ry,Rz)=%1.3e, %1.3e, %1.3e \n", Rx_MAX, Ry_MAX, Rz_MAX); break; } }//it tim("Time (s), Effective MTP (GB/s)",20*mem*(it-3)*20/1024./1024./1024.); save_info(); SaveArray(P ,nx ,ny ,nz ,"P" ); SaveArray(Vx,nx+1,ny ,nz ,"Vx"); SaveArray(Vy,nx ,ny+1,nz ,"Vy"); SaveArray(Vz,nx ,ny ,nz+1,"Vz"); SaveArray(Rx,nx+1,ny ,nz ,"Rx"); SaveArray(Ry,nx ,ny+1,nz ,"Ry"); SaveArray(Rz,nx ,ny ,nz+1,"Rz"); SaveArray(Txx,nx ,ny ,nz ,"Txx"); SaveArray(Tyy,nx ,ny ,nz ,"Tyy"); SaveArray(Tzz,nx ,ny ,nz ,"Tzz"); SaveArray(Txy,nx+1,ny+1,nz ,"Txy"); SaveArray(Txz,nx+1,ny ,nz+1,"Txz"); SaveArray(Tyz,nx ,ny+1,nz+1,"Tyz"); free_all(x ); free_all(y ); free_all(z ); free_all(rho); free_all(P ); free_all(Vx); free_all(Vy); free_all(Vz); free_all(dVxdt); free_all(dVydt); free_all(dVzdt); free_all(Rx); free_all(Ry); free_all(Rz); free_all(Txx); free_all(Tyy); free_all(Tzz); free_all(Txy); free_all(Txz); free_all(Tyz); clean_cuda(); }
aad95623ed8b65bde4b0aac86eecfed6bff5c481.cu
// 3D Bouyant Ball viscous code // nvcc -arch=sm_70 -O3 visBouy_accel_3d.cu // run: ./a.out #include "stdio.h" #include "stdlib.h" #include "math.h" #include "cuda.h" #define USE_SINGLE_PRECISION /* Comment this line using "//" if you want to use double precision. */ #ifdef USE_SINGLE_PRECISION #define DAT float #define PRECIS 4 #else #define DAT double #define PRECIS 8 #endif #define GPU_ID 0 #define OVERLENGTH_X 1 #define OVERLENGTH_Y 1 #define OVERLENGTH_Z 1 #define zeros(A,nx,ny,nz) DAT *A##_d,*A##_h; A##_h = (DAT*)malloc(((nx)*(ny)*(nz))*sizeof(DAT)); \ for(i=0; i < ((nx)*(ny)*(nz)); i++){ A##_h[i]=(DAT)0.0; } \ cudaMalloc(&A##_d ,((nx)*(ny)*(nz))*sizeof(DAT)); \ cudaMemcpy( A##_d,A##_h,((nx)*(ny)*(nz))*sizeof(DAT),cudaMemcpyHostToDevice); #define free_all(A) free(A##_h); cudaFree(A##_d); #define gather(A,nx,ny,nz) cudaMemcpy( A##_h,A##_d,((nx)*(ny)*(nz))*sizeof(DAT),cudaMemcpyDeviceToHost); // --------------------------------------------------------------------- // // Physics const DAT Lx = 10.0; const DAT Ly = 10.0; const DAT Lz = 10.0; const DAT k = 1.0; const DAT rhoi= 10.0; const DAT g = -10.0; const DAT eta = 1.0; const DAT nu = 6.0; const DAT epsi= 1.0e-6; // Numerics #define BLOCK_X 8 #define BLOCK_Y 8 #define BLOCK_Z 8 #define GRID_X 4 #define GRID_Y 4 #define GRID_Z 4 const int nx = BLOCK_X*GRID_X - OVERLENGTH_X; const int ny = BLOCK_Y*GRID_Y - OVERLENGTH_Y; const int nz = BLOCK_Z*GRID_Z - OVERLENGTH_Z; const int nt = 200000; const int nmax = 100; const DAT dx = Lx/((DAT)nx); const DAT dy = Ly/((DAT)ny); const DAT dz = Lz/((DAT)nz); const DAT dtV = (min(dx, min(dy,dz))*min(dx,min(dy,dz)))/(eta*4.1*((DAT)4)); const DAT dtP = 4.1*eta/((DAT)(4*ny)); // --------------------------------------------------------------------- // void save_info(int me, const int nx, const int ny, const int nz){ FILE* fid; if (me==0){ fid=fopen("0_nxyz.inf" ,"w"); fprintf(fid,"%d %d %d %d", PRECIS, nx, ny, nz); fclose(fid); } } #define save_info() save_info(me, nx, ny, nz); void save_array(DAT* A, int nx, int ny, int nz, int me, const char A_name[]){ char* fname; FILE* fid; asprintf(&fname, "%d_%s.res" , me, A_name); fid=fopen(fname, "wb"); fwrite(A, sizeof(DAT), (nx)*(ny)*(nz), fid); fclose(fid); free(fname); } #define SaveArray(A,nx,ny,nz,A_name) gather(A,nx,ny,nz); save_array(A##_h,nx,ny,nz,me,A_name); void clean_cuda(){ cudaError_t ce = cudaGetLastError(); if(ce != cudaSuccess){ printf("ERROR launching GPU C-CUDA program: %s\n", cudaGetErrorString(ce)); cudaDeviceReset();} } // Timer #include "sys/time.h" double timer_start = 0; double cpu_sec(){ struct timeval tp; gettimeofday(&tp,NULL); return tp.tv_sec+1e-6*tp.tv_usec; } void tic(){ timer_start = cpu_sec(); } double toc(){ return cpu_sec()-timer_start; } void tim(const char *what, double n){ double s=toc(); printf("%s: %8.3f seconds",what,s);if(n>0)printf(", %8.3f GB/s", n/s); printf("\n"); } // MIN and MAX function // DAT device_MAX=0.0; #define NB_THREADS (BLOCK_X*BLOCK_Y*BLOCK_Z) #define blockId (blockIdx.x + blockIdx.y *gridDim.x + blockIdx.z*gridDim.y*gridDim.x) #define threadId (threadIdx.x + threadIdx.y*blockDim.x + threadIdx.z*blockDim.y*blockDim.x) #define isBlockMaster (threadIdx.x==0 && threadIdx.y==0 && threadIdx.z ==0) // maxval // #define block_max_init() DAT __thread_maxval=0.0; #define __thread_max(A,nx_A,ny_A,nz_A) if (iy<ny_A && ix<nx_A && iz<nz_A){ __thread_maxval = max((__thread_maxval) , abs(A[ix + iy*nx_A +iz*ny_A*nx_A])); } __shared__ volatile DAT __block_maxval; #define __block_max(A,nx_A,ny_A,nz_A) __thread_max(A,nx_A,ny_A,nz_A); if (isBlockMaster){ __block_maxval=0; } __syncthreads(); \ for (int i=0; i < (NB_THREADS); i++){ if (i==threadId){ __block_maxval = max(__block_maxval,__thread_maxval); } __syncthreads(); } __global__ void __device_max_d(DAT*A, const int nx_A,const int ny_A, const int nz_A, DAT*__device_maxval){ block_max_init(); int ix = blockIdx.x*blockDim.x + threadIdx.x; int iy = blockIdx.y*blockDim.y + threadIdx.y; int iz = blockIdx.z*blockDim.z + threadIdx.z; // find the maxval for each block __block_max(A,nx_A,ny_A,nz_A); __device_maxval[blockId] = __block_maxval; } #define __DEVICE_max(A,nx_A,ny_A,nz_A) __device_max_d<<<grid, block>>>(A##_d, nx_A, ny_A, nz_A, __device_maxval_d); \ gather(__device_maxval,grid.x,grid.y,grid.z); device_MAX=(DAT)0.0; \ for (int i=0; i < (grid.x*grid.y*grid.z); i++){ \ device_MAX = max(device_MAX,__device_maxval_h[i]); \ } \ A##_MAX = (device_MAX); // --------------------------------------------------------------------- // // Computing physics kernels __global__ void init(DAT* x, DAT* y, DAT* z, DAT* rho, const DAT Lx, const DAT Ly, const DAT Lz, const DAT dx, const DAT dy, const DAT dz, const int nx, const int ny, const int nz){ int ix = blockIdx.x*blockDim.x + threadIdx.x; // thread ID, dimension x int iy = blockIdx.y*blockDim.y + threadIdx.y; // thread ID, dimension y int iz = blockIdx.z*blockDim.z + threadIdx.z; // thread ID, dimension z if (iy<ny && ix<nx && iz<nz){ x[ix+iy*nx+iz*nx*ny] = (DAT)ix*dx + (-Lx+dx)/2.0; } if (iy<ny && ix<nx && iz<nz){ y[ix+iy*nx+iz*nx*ny] = (DAT)iy*dy + (-Ly+dy)/2.0; } if (iy<ny && ix<nx && iz<nz){ z[ix+iy*nx+iz*nx*ny] = (DAT)iz*dz + (-Lz+dz)/2.0; } if (iy<ny && ix<nx && iz<nz){ if(x[ix+iy*nx+iz*nx*ny]*x[ix+iy*nx+iz*nx*ny] + y[ix+iy*nx+iz*nx*ny]*y[ix+iy*nx+iz*nx*ny] + z[ix+iy*nx+iz*nx*ny]*z[ix+iy*nx+iz*nx*ny] < 1){ rho[ix+iy*nx+iz*nx*ny]=rhoi; } } } __global__ void compute_V(DAT* Vx, DAT* Vy, DAT* Vz, DAT* P, DAT* Txx, DAT* Tyy, DAT* Tzz, DAT* Txy, DAT* Txz, DAT* Tyz, DAT* dVxdt, DAT* dVydt, DAT* dVzdt, DAT* Rx, DAT* Ry, DAT* Rz, DAT* rho, const DAT dtV, const DAT g, const DAT dx, const DAT dy, const DAT dz, const int nx, const int ny, const int nz){ int ix = blockIdx.x*blockDim.x + threadIdx.x; // thread ID, dimension x int iy = blockIdx.y*blockDim.y + threadIdx.y; // thread ID, dimension y int iz = blockIdx.z*blockDim.z + threadIdx.z; // thread ID, dimension z if (ix>0 && iy<ny && ix<nx && iz<nz){ Rx[ix+(iy)*(nx+1)+(iz)*(nx+1)*ny] = 1 * ( -1*(P[ ix +(iy )* nx +(iz )* nx * ny ] - P[(ix-1)+(iy )* nx +(iz )* nx * ny ])/dx + (Txx[ ix +(iy )* nx +(iz )* nx * ny ] - Txx[(ix-1)+(iy )* nx +(iz )* nx * ny ])/dx + (Txy[(ix)+(iy+1)*(nx+1)+(iz )*(nx+1)*(ny+1)] - Txy[(ix )+(iy )*(nx+1)+(iz )*(nx+1)*(ny+1)])/dy + (Txz[(ix)+(iy )*(nx+1)+(iz+1)*(nx+1)*(ny )] - Txz[(ix )+(iy )*(nx+1)+(iz )*(nx+1)*(ny )])/dz); dVxdt[ix+(iy)*(nx+1)+(iz)*(nx+1)*ny] = (1-nu/nx)*dVxdt[ix+(iy)*(nx+1)+(iz)*(nx+1)*ny] + Rx[ix+(iy)*(nx+1)+(iz)*(nx+1)*ny]; Vx[ix+(iy)*(nx+1)+(iz)*(nx+1)*ny] = Vx[ix+(iy)*(nx+1)+(iz)*(nx+1)*ny] + dtV*dVxdt[ix+(iy)*(nx+1)+(iz)*(nx+1)*ny]; } if (iy>0 && iy<ny && ix<nx && iz<nz){ Ry[ix+(iy)*(nx )+(iz)*(nx )*(ny+1)] = 1 * ( -1*(P[(ix )+(iy )* nx +(iz )* nx * ny ] - P[(ix )+(iy-1)* nx +(iz )* nx * ny ])/dy + (Tyy[(ix )+(iy )* nx +(iz )* nx * ny ] - Tyy[(ix )+(iy-1)* nx +(iz )* nx * ny ])/dy + (Txy[(ix+1)+(iy )*(nx+1)+(iz )*(nx+1)*(ny+1)] - Txy[(ix )+(iy )*(nx+1)+(iz )*(nx+1)*(ny+1)])/dx + (Tyz[(ix )+(iy )*(nx )+(iz+1)*(nx )*(ny+1)] - Tyz[(ix )+(iy )*(nx )+(iz )*(nx )*(ny+1)])/dz + .5*g*(rho[(ix )+(iy )* nx +(iz )* nx * ny ] + rho[(ix )+(iy-1)* nx +(iz )* nx * ny ])); dVydt[ix+(iy)*(nx )+(iz)*(nx )*(ny+1)] = (1-nu/ny)*dVydt[ix+(iy)*(nx )+(iz)*(nx )*(ny+1)] + Ry[ix+(iy)*(nx )+(iz)*(nx )*(ny+1)]; Vy[ix+(iy)*(nx )+(iz)*(nx )*(ny+1)] = Vy[ix+(iy)*(nx )+(iz)*(nx )*(ny+1)] + dtV*dVydt[ix+(iy)*(nx )+(iz)*(nx )*(ny+1)]; } if (iz>0 && iy<ny && ix<nx && iz<nz){ Rz[ix+(iy)*(nx )+(iz)*(nx )*(ny )] = 1 * ( -1*(P[(ix )+(iy )* nx +(iz )* nx * ny ] - P[(ix )+(iy )* nx +(iz-1)* nx * ny ])/dz + (Tzz[(ix )+(iy )* nx +(iz )* nx * ny ] - Tzz[(ix )+(iy )* nx +(iz-1)* nx * ny ])/dz + (Txz[(ix+1)+(iy )*(nx+1)+(iz )*(nx+1)*(ny )] - Txz[(ix )+(iy )*(nx+1)+(iz )*(nx+1)*(ny )])/dx + (Tyz[(ix )+(iy+1)*(nx )+(iz )*(nx )*(ny+1)] - Tyz[(ix )+(iy )*(nx )+(iz )*(nx )*(ny+1)])/dy); dVzdt[ix+(iy)*(nx )+(iz)*(nx )*(ny )] = (1-nu/nz)*dVzdt[ix+(iy)*(nx )+(iz)*(nx )*(ny )] + Rz[ix+(iy)*(nx )+(iz)*(nx )*(ny )]; Vz[ix+(iy)*(nx )+(iz)*(nx )*(ny )] = Vz[ix+(iy)*(nx )+(iz)*(nx )*(ny )] + dtV*dVzdt[ix+(iy)*(nx )+(iz)*(nx )*(ny )]; } } __global__ void compute_P(DAT* Vx, DAT* Vy, DAT* Vz, DAT* P, const DAT dtP, const DAT k, const DAT dx, const DAT dy, const DAT dz, const int nx, const int ny, const int nz){ int ix = blockIdx.x*blockDim.x + threadIdx.x; // thread ID, dimension x int iy = blockIdx.y*blockDim.y + threadIdx.y; // thread ID, dimension y int iz = blockIdx.z*blockDim.z + threadIdx.z; // thread ID, dimension z if (iy<ny && ix<nx && iz<nz){ P[ix+(iy)*nx+(iz)*nx*ny] = P[ix+(iy)*nx+(iz)*nx*ny] - dtP*k*( (Vx[(ix+1)+(iy )*(nx+1)+(iz )*(nx+1)*(ny )]-Vx[(ix )+(iy )*(nx+1)+(iz )*(nx+1)*(ny )])/dx+ (Vy[(ix )+(iy+1)*(nx )+(iz )*(nx )*(ny+1)]-Vy[(ix )+(iy )*(nx )+(iz )*(nx )*(ny+1)])/dy+ (Vz[(ix )+(iy )*(nx )+(iz+1)*(nx )*(ny )]-Vz[(ix )+(iy )*(nx )+(iz )*(nx )*(ny )])/dz); } } __global__ void compute_T(DAT* Vx, DAT* Vy, DAT* Vz, DAT* Txx, DAT* Tyy, DAT* Tzz, DAT* Txy, DAT* Txz, DAT* Tyz, const DAT eta, const DAT dx, const DAT dy, const DAT dz, const int nx, const int ny, const int nz){ int ix = blockIdx.x*blockDim.x + threadIdx.x; // thread ID, dimension x int iy = blockIdx.y*blockDim.y + threadIdx.y; // thread ID, dimension y int iz = blockIdx.z*blockDim.z + threadIdx.z; // thread ID, dimension z if (iy<ny && ix<nx && iz<nz){ Txx[ix+(iy)*nx+(iz)*nx*ny] = 2*eta*( (Vx[(ix+1)+(iy )*(nx+1)+(iz )*(nx+1)*(ny )]-Vx[(ix )+(iy )*(nx+1)+(iz )*(nx+1)*(ny )])/dx - ((Vx[(ix+1)+(iy )*(nx+1)+(iz )*(nx+1)*(ny )]-Vx[(ix )+(iy )*(nx+1)+(iz )*(nx+1)*(ny )])/dx+ (Vy[(ix )+(iy+1)*(nx )+(iz )*(nx )*(ny+1)]-Vy[(ix )+(iy )*(nx )+(iz )*(nx )*(ny+1)])/dy+ (Vz[(ix )+(iy )*(nx )+(iz+1)*(nx )*(ny )]-Vz[(ix )+(iy )*(nx )+(iz )*(nx )*(ny )])/dz)/((DAT)3)); Tyy[ix+(iy)*nx+(iz)*nx*ny] = 2*eta*( (Vy[(ix )+(iy+1)*(nx )+(iz )*(nx )*(ny+1)]-Vy[(ix )+(iy )*(nx )+(iz )*(nx )*(ny+1)])/dy - ((Vx[(ix+1)+(iy )*(nx+1)+(iz )*(nx+1)*(ny )]-Vx[(ix )+(iy )*(nx+1)+(iz )*(nx+1)*(ny )])/dx+ (Vy[(ix )+(iy+1)*(nx )+(iz )*(nx )*(ny+1)]-Vy[(ix )+(iy )*(nx )+(iz )*(nx )*(ny+1)])/dy+ (Vz[(ix )+(iy )*(nx )+(iz+1)*(nx )*(ny )]-Vz[(ix )+(iy )*(nx )+(iz )*(nx )*(ny )])/dz)/((DAT)3)); Tzz[ix+(iy)*nx+(iz)*nx*ny] = 2*eta*( (Vz[(ix )+(iy )*(nx )+(iz+1)*(nx )*(ny )]-Vz[(ix )+(iy )*(nx )+(iz )*(nx )*(ny )])/dz - ((Vx[(ix+1)+(iy )*(nx+1)+(iz )*(nx+1)*(ny )]-Vx[(ix )+(iy )*(nx+1)+(iz )*(nx+1)*(ny )])/dx+ (Vy[(ix )+(iy+1)*(nx )+(iz )*(nx )*(ny+1)]-Vy[(ix )+(iy )*(nx )+(iz )*(nx )*(ny+1)])/dy+ (Vz[(ix )+(iy )*(nx )+(iz+1)*(nx )*(ny )]-Vz[(ix )+(iy )*(nx )+(iz )*(nx )*(ny )])/dz)/((DAT)3)); } if(iy<ny && ix<nx && iz<nz && ix>0 && iy >0){ Txy[(ix)+(iy )*(nx+1)+(iz )*(nx+1)*(ny+1)] = eta*( (Vx[(ix)+(iy )*(nx+1)+(iz )*(nx+1)*(ny )] - Vx[(ix )+(iy-1)*(nx+1)+(iz )*(nx+1)*(ny )])/dy + (Vy[(ix)+(iy )*(nx )+(iz )*(nx )*(ny+1)] - Vy[(ix-1)+(iy )*(nx )+(iz )*(nx )*(ny+1)])/dx); } if(iy<ny && ix<nx && iz<nz && ix>0 && iz >0){ Txz[(ix)+(iy )*(nx+1)+(iz )*(nx+1)*(ny )] = eta*( (Vx[(ix)+(iy )*(nx+1)+(iz )*(nx+1)*(ny )] - Vx[(ix )+(iy )*(nx+1)+(iz-1)*(nx+1)*(ny )])/dz + (Vz[(ix)+(iy )*(nx )+(iz )*(nx )*(ny )] - Vz[(ix-1)+(iy )*(nx )+(iz )*(nx )*(ny )])/dx); } if(iy<ny && ix<nx && iz<nz && iy>0 && iz >0){ Tyz[(ix)+(iy )*(nx )+(iz )*(nx )*(ny+1)] = eta*( (Vy[(ix)+(iy )*(nx )+(iz )*(nx )*(ny+1)] - Vy[(ix)+(iy )*(nx )+(iz-1)*(nx )*(ny+1)])/dz + (Vz[(ix)+(iy )*(nx )+(iz )*(nx )*(ny )] - Vz[(ix)+(iy-1)*(nx )+(iz )*(nx )*(ny )])/dy); } } int main(){ int i, it; size_t N=nx*ny*nz, mem=N*sizeof(DAT); // Set up GPU int gpu_id=-1; int me = 0; dim3 grid, block; block.x = BLOCK_X; grid.x = GRID_X; block.y = BLOCK_Y; grid.y = GRID_Y; block.z = BLOCK_Z; grid.z = GRID_Z; gpu_id = GPU_ID; cudaSetDevice(gpu_id); cudaGetDevice(&gpu_id); cudaDeviceReset(); cudaDeviceSetCacheConfig(cudaFuncCachePreferL1); // set L1 to prefered printf("Process uses GPU with id %d.\n",gpu_id); printf("%dx%dx%d, %1.3f GB, %d iterations.\n", nx,ny,nz, 20*mem/1024./1024./1024., nt); printf("Launching (%dx%dx%d) grid of (%dx%dx%d) blocks.\n", grid.x, grid.y, grid.z, block.x, block.y, block.z); // Initial arrays zeros(x ,nx ,ny ,nz ); zeros(y ,nx ,ny ,nz ); zeros(z ,nx ,ny ,nz ); zeros(rho ,nx ,ny ,nz ); zeros(P ,nx ,ny ,nz ); zeros(Txx ,nx ,ny ,nz ); zeros(Tyy ,nx ,ny ,nz ); zeros(Tzz ,nx ,ny ,nz ); zeros(Txy ,nx+1,ny+1,nz ); zeros(Txz ,nx+1,ny ,nz+1); zeros(Tyz ,nx ,ny+1,nz+1); zeros(Vx ,nx+1,ny ,nz ); zeros(Vy ,nx ,ny+1,nz ); zeros(Vz ,nx+1,ny ,nz+1); zeros(dVxdt,nx+1,ny ,nz ); zeros(dVydt,nx ,ny+1,nz ); zeros(dVzdt,nx+1,ny ,nz+1); zeros(Rx ,nx+1,ny ,nz ); zeros(Ry ,nx ,ny+1,nz ); zeros(Rz ,nx+1,ny ,nz+1); zeros(__device_maxval ,grid.x,grid.y,grid.z); DAT Rx_MAX = 1.0; DAT Ry_MAX = 0.0; DAT Rz_MAX = 0.0; // Initial conditions init<<<grid,block>>>(x_d, y_d, z_d, rho_d, Lx, Ly, Lz, dx, dy, dz, nx, ny, nz); cudaDeviceSynchronize(); // Action for (it=0;it<nt;it++){ if (it==1){ tic(); } compute_P<<<grid,block>>>(Vx_d, Vy_d, Vz_d, P_d, dtP, k, dx, dy, dz, nx, ny, nz); cudaDeviceSynchronize(); compute_T<<<grid,block>>>(Vx_d, Vy_d, Vz_d, Txx_d, Tyy_d, Tzz_d, Txy_d, Txz_d, Tyz_d, eta, dx, dy, dz, nx, ny, nz); cudaDeviceSynchronize(); compute_V<<<grid,block>>>(Vx_d, Vy_d, Vz_d, P_d, Txx_d, Tyy_d, Tzz_d, Txy_d, Txz_d, Tyz_d, dVxdt_d, dVydt_d, dVzdt_d, Rx_d, Ry_d, Rz_d, rho_d, dtV, g, dx ,dy ,dz ,nx ,ny ,nz ); cudaDeviceSynchronize(); __DEVICE_max(Rx,nx,ny,nz); __DEVICE_max(Ry,nx,ny,nz); __DEVICE_max(Rz,nx,ny,nz); // if (it%nmax==0){ printf("max(Rx,Ry,Rz)=%1.3e, %1.3e, %1.3e \n", Rx_MAX, Ry_MAX, Rz_MAX); } if (Rx_MAX < epsi && Ry_MAX < epsi && Rz_MAX < epsi && it > nmax){ printf("Broke on iteration %d \n",it); printf("max(Rx,Ry,Rz)=%1.3e, %1.3e, %1.3e \n", Rx_MAX, Ry_MAX, Rz_MAX); break; } }//it tim("Time (s), Effective MTP (GB/s)",20*mem*(it-3)*20/1024./1024./1024.); save_info(); SaveArray(P ,nx ,ny ,nz ,"P" ); SaveArray(Vx,nx+1,ny ,nz ,"Vx"); SaveArray(Vy,nx ,ny+1,nz ,"Vy"); SaveArray(Vz,nx ,ny ,nz+1,"Vz"); SaveArray(Rx,nx+1,ny ,nz ,"Rx"); SaveArray(Ry,nx ,ny+1,nz ,"Ry"); SaveArray(Rz,nx ,ny ,nz+1,"Rz"); SaveArray(Txx,nx ,ny ,nz ,"Txx"); SaveArray(Tyy,nx ,ny ,nz ,"Tyy"); SaveArray(Tzz,nx ,ny ,nz ,"Tzz"); SaveArray(Txy,nx+1,ny+1,nz ,"Txy"); SaveArray(Txz,nx+1,ny ,nz+1,"Txz"); SaveArray(Tyz,nx ,ny+1,nz+1,"Tyz"); free_all(x ); free_all(y ); free_all(z ); free_all(rho); free_all(P ); free_all(Vx); free_all(Vy); free_all(Vz); free_all(dVxdt); free_all(dVydt); free_all(dVzdt); free_all(Rx); free_all(Ry); free_all(Rz); free_all(Txx); free_all(Tyy); free_all(Tzz); free_all(Txy); free_all(Txz); free_all(Tyz); clean_cuda(); }
c2bd70fce78eb5050c45b81d6cd014ba5b78bd89.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright 2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "backend/kernel_compiler/gpu/cuda_impl/extract_image_patches_impl.cuh" template <typename T> __global__ void ExtractImagePatches(size_t output_size, int64_t stride_row, int64_t stride_col, int64_t rate_row, int64_t rate_col, int64_t output_cols, bool need_batch, int64_t row_stride, int64_t patch_stride, int64_t other_stride, int64_t input_row_size, int64_t input_col_size, int64_t row_padding_top, int64_t col_padding_left, int64_t col_input_stride, int64_t row_input_stride, int64_t patch_input_stride, int64_t output_depth, const T *input, T *output) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < output_size; pos += blockDim.x * gridDim.x) { const int64_t batch_index = need_batch ? (static_cast<int64_t>(pos) / other_stride) : 0; const int64_t inner_index = need_batch ? (static_cast<int64_t>(pos) - batch_index * other_stride) : static_cast<int64_t>(pos); // inner index const int64_t patch_index = inner_index / patch_stride; const int64_t patch_offset = (inner_index - patch_index * patch_stride) / output_depth; // row const int64_t row_index = patch_index / output_cols; const int64_t row_offset = patch_offset / row_stride; const int64_t input_row = row_index * stride_row + row_offset * rate_row - row_padding_top; if (input_row < 0 || input_row >= input_row_size) { output[pos] = static_cast<T>(0); return; } // col const int64_t col_index = patch_index - row_index * output_cols; const int64_t col_offset = patch_offset - row_offset * row_stride; const int64_t input_col = col_index * stride_col + col_offset * rate_col - col_padding_left; if (input_col < 0 || input_col >= input_col_size) { output[pos] = static_cast<T>(0); return; } // depth const int64_t depth = inner_index - (inner_index / output_depth) * output_depth; // input index const int64_t input_index = depth + input_col * col_input_stride + input_row * row_input_stride + batch_index * patch_input_stride; output[pos] = input[static_cast<size_t>(input_index)]; } return; } template <typename T> void CalExtractImagePatchesNHWC(size_t output_size, int64_t stride_row, int64_t stride_col, int64_t rate_row, int64_t rate_col, int64_t output_cols, bool need_batch, int64_t row_stride, int64_t patch_stride, int64_t other_stride, int64_t input_row_size, int64_t input_col_size, int64_t row_padding_top, int64_t col_padding_left, int64_t col_input_stride, int64_t row_input_stride, int64_t patch_input_stride, int64_t output_depth, const T *input, T *output, hipStream_t stream) { hipLaunchKernelGGL(( ExtractImagePatches), dim3(GET_BLOCKS(output_size)), dim3(GET_THREADS), 0, stream, output_size, stride_row, stride_col, rate_row, rate_col, output_cols, need_batch, row_stride, patch_stride, other_stride, input_row_size, input_col_size, row_padding_top, col_padding_left, col_input_stride, row_input_stride, patch_input_stride, output_depth, input, output); } template void CalExtractImagePatchesNHWC<int>(size_t output_size, int64_t stride_row, int64_t stride_col, int64_t rate_row, int64_t rate_col, int64_t output_cols, bool need_batch, int64_t row_stride, int64_t patch_stride, int64_t other_stride, int64_t input_row_size, int64_t input_col_size, int64_t row_padding_top, int64_t col_padding_left, int64_t col_input_stride, int64_t row_input_stride, int64_t patch_input_stride, int64_t output_depth, const int *input, int *output, hipStream_t stream); template void CalExtractImagePatchesNHWC<float>(size_t output_size, int64_t stride_row, int64_t stride_col, int64_t rate_row, int64_t rate_col, int64_t output_cols, bool need_batch, int64_t row_stride, int64_t patch_stride, int64_t other_stride, int64_t input_row_size, int64_t input_col_size, int64_t row_padding_top, int64_t col_padding_left, int64_t col_input_stride, int64_t row_input_stride, int64_t patch_input_stride, int64_t output_depth, const float *input, float *output, hipStream_t stream); template void CalExtractImagePatchesNHWC<half>(size_t output_size, int64_t stride_row, int64_t stride_col, int64_t rate_row, int64_t rate_col, int64_t output_cols, bool need_batch, int64_t row_stride, int64_t patch_stride, int64_t other_stride, int64_t input_row_size, int64_t input_col_size, int64_t row_padding_top, int64_t col_padding_left, int64_t col_input_stride, int64_t row_input_stride, int64_t patch_input_stride, int64_t output_depth, const half *input, half *output, hipStream_t stream); template void CalExtractImagePatchesNHWC<double>(size_t output_size, int64_t stride_row, int64_t stride_col, int64_t rate_row, int64_t rate_col, int64_t output_cols, bool need_batch, int64_t row_stride, int64_t patch_stride, int64_t other_stride, int64_t input_row_size, int64_t input_col_size, int64_t row_padding_top, int64_t col_padding_left, int64_t col_input_stride, int64_t row_input_stride, int64_t patch_input_stride, int64_t output_depth, const double *input, double *output, hipStream_t stream);
c2bd70fce78eb5050c45b81d6cd014ba5b78bd89.cu
/** * Copyright 2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "backend/kernel_compiler/gpu/cuda_impl/extract_image_patches_impl.cuh" template <typename T> __global__ void ExtractImagePatches(size_t output_size, int64_t stride_row, int64_t stride_col, int64_t rate_row, int64_t rate_col, int64_t output_cols, bool need_batch, int64_t row_stride, int64_t patch_stride, int64_t other_stride, int64_t input_row_size, int64_t input_col_size, int64_t row_padding_top, int64_t col_padding_left, int64_t col_input_stride, int64_t row_input_stride, int64_t patch_input_stride, int64_t output_depth, const T *input, T *output) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < output_size; pos += blockDim.x * gridDim.x) { const int64_t batch_index = need_batch ? (static_cast<int64_t>(pos) / other_stride) : 0; const int64_t inner_index = need_batch ? (static_cast<int64_t>(pos) - batch_index * other_stride) : static_cast<int64_t>(pos); // inner index const int64_t patch_index = inner_index / patch_stride; const int64_t patch_offset = (inner_index - patch_index * patch_stride) / output_depth; // row const int64_t row_index = patch_index / output_cols; const int64_t row_offset = patch_offset / row_stride; const int64_t input_row = row_index * stride_row + row_offset * rate_row - row_padding_top; if (input_row < 0 || input_row >= input_row_size) { output[pos] = static_cast<T>(0); return; } // col const int64_t col_index = patch_index - row_index * output_cols; const int64_t col_offset = patch_offset - row_offset * row_stride; const int64_t input_col = col_index * stride_col + col_offset * rate_col - col_padding_left; if (input_col < 0 || input_col >= input_col_size) { output[pos] = static_cast<T>(0); return; } // depth const int64_t depth = inner_index - (inner_index / output_depth) * output_depth; // input index const int64_t input_index = depth + input_col * col_input_stride + input_row * row_input_stride + batch_index * patch_input_stride; output[pos] = input[static_cast<size_t>(input_index)]; } return; } template <typename T> void CalExtractImagePatchesNHWC(size_t output_size, int64_t stride_row, int64_t stride_col, int64_t rate_row, int64_t rate_col, int64_t output_cols, bool need_batch, int64_t row_stride, int64_t patch_stride, int64_t other_stride, int64_t input_row_size, int64_t input_col_size, int64_t row_padding_top, int64_t col_padding_left, int64_t col_input_stride, int64_t row_input_stride, int64_t patch_input_stride, int64_t output_depth, const T *input, T *output, cudaStream_t stream) { ExtractImagePatches<<<GET_BLOCKS(output_size), GET_THREADS, 0, stream>>>( output_size, stride_row, stride_col, rate_row, rate_col, output_cols, need_batch, row_stride, patch_stride, other_stride, input_row_size, input_col_size, row_padding_top, col_padding_left, col_input_stride, row_input_stride, patch_input_stride, output_depth, input, output); } template void CalExtractImagePatchesNHWC<int>(size_t output_size, int64_t stride_row, int64_t stride_col, int64_t rate_row, int64_t rate_col, int64_t output_cols, bool need_batch, int64_t row_stride, int64_t patch_stride, int64_t other_stride, int64_t input_row_size, int64_t input_col_size, int64_t row_padding_top, int64_t col_padding_left, int64_t col_input_stride, int64_t row_input_stride, int64_t patch_input_stride, int64_t output_depth, const int *input, int *output, cudaStream_t stream); template void CalExtractImagePatchesNHWC<float>(size_t output_size, int64_t stride_row, int64_t stride_col, int64_t rate_row, int64_t rate_col, int64_t output_cols, bool need_batch, int64_t row_stride, int64_t patch_stride, int64_t other_stride, int64_t input_row_size, int64_t input_col_size, int64_t row_padding_top, int64_t col_padding_left, int64_t col_input_stride, int64_t row_input_stride, int64_t patch_input_stride, int64_t output_depth, const float *input, float *output, cudaStream_t stream); template void CalExtractImagePatchesNHWC<half>(size_t output_size, int64_t stride_row, int64_t stride_col, int64_t rate_row, int64_t rate_col, int64_t output_cols, bool need_batch, int64_t row_stride, int64_t patch_stride, int64_t other_stride, int64_t input_row_size, int64_t input_col_size, int64_t row_padding_top, int64_t col_padding_left, int64_t col_input_stride, int64_t row_input_stride, int64_t patch_input_stride, int64_t output_depth, const half *input, half *output, cudaStream_t stream); template void CalExtractImagePatchesNHWC<double>(size_t output_size, int64_t stride_row, int64_t stride_col, int64_t rate_row, int64_t rate_col, int64_t output_cols, bool need_batch, int64_t row_stride, int64_t patch_stride, int64_t other_stride, int64_t input_row_size, int64_t input_col_size, int64_t row_padding_top, int64_t col_padding_left, int64_t col_input_stride, int64_t row_input_stride, int64_t patch_input_stride, int64_t output_depth, const double *input, double *output, cudaStream_t stream);
95baa823f97672b1ef43609a465be6d0dcf7e62a.hip
// !!! This is a file automatically generated by hipify!!! #include <sys/types.h> #include <math.h> #include <errno.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <jansson.h> #include <sys/time.h> #include "point.h" #include "kmeans.h" #include "config.h" int NUMBER_OF_POINTS = 100000; int NUMBER_OF_CENTROIDS = 10; /** * */ void printCentroids( Centroid* centroids ) { for (int i = 0; i < NUMBER_OF_CENTROIDS; i++) { printf("[x=%lf, y=%lf, x_sum=%lf, y_sum=%lf, num_points=%i]\n", centroids[i].x, centroids[i].y, centroids[i].x_sum, centroids[i].y_sum, centroids[i].num_points); } printf("--------------------------------------------------\n"); } /** * */ float runKmeans( Point* points, Centroid* centroids ) { struct timeval time_before, time_after, time_result; // gettimeofday(&time_before, NULL); for (int i = 0; i < REPEAT; i++) { // k for (int ci = 0; ci < NUMBER_OF_CENTROIDS; ci++) { centroids[ci].x = points[ci].x; centroids[ci].y = points[ci].y; } kmeans(points, centroids, NUMBER_OF_POINTS, NUMBER_OF_CENTROIDS); if (i + 1 == REPEAT) { printCentroids(centroids); } } // gettimeofday(&time_after, NULL); timersub(&time_after, &time_before, &time_result); float repeat_time = (time_result.tv_sec*1000.0) + (time_result.tv_usec/1000.0); return repeat_time / REPEAT; } int main() { json_t *json; json_error_t error; json_t *value; size_t index; float total_time = 0; // hipSetDevice(0); // Point* points = (Point*) malloc(NUMBER_OF_POINTS * sizeof(Point)); Centroid* centroids = (Centroid*) malloc(NUMBER_OF_CENTROIDS * sizeof(Centroid)); // json json = json_load_file("../points.json", 0, &error); if (!json) { printf("Error parsing Json file"); fflush(stdout); return -1; } else { json_array_foreach(json, index, value) { float x = json_number_value(json_array_get(value, 0)); float y = json_number_value(json_array_get(value, 1)); points[index].x = x; points[index].y = y; } } // total_time = runKmeans(points, centroids); printf("Iterations: %d\n", ITERATIONS); printf("Average Time: %f ms\n", total_time); // free(centroids); free(points); hipDeviceReset(); return 0; }
95baa823f97672b1ef43609a465be6d0dcf7e62a.cu
#include <sys/types.h> #include <math.h> #include <errno.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <jansson.h> #include <sys/time.h> #include "point.h" #include "kmeans.h" #include "config.h" int NUMBER_OF_POINTS = 100000; int NUMBER_OF_CENTROIDS = 10; /** * 打印聚类信息 */ void printCentroids( Centroid* centroids ) { for (int i = 0; i < NUMBER_OF_CENTROIDS; i++) { printf("[x=%lf, y=%lf, x_sum=%lf, y_sum=%lf, num_points=%i]\n", centroids[i].x, centroids[i].y, centroids[i].x_sum, centroids[i].y_sum, centroids[i].num_points); } printf("--------------------------------------------------\n"); } /** * 实验代码,用于计时与重复实验 */ float runKmeans( Point* points, Centroid* centroids ) { struct timeval time_before, time_after, time_result; // 开始计时 gettimeofday(&time_before, NULL); for (int i = 0; i < REPEAT; i++) { // 使用点对象中的前k个点初始化聚类 for (int ci = 0; ci < NUMBER_OF_CENTROIDS; ci++) { centroids[ci].x = points[ci].x; centroids[ci].y = points[ci].y; } kmeans(points, centroids, NUMBER_OF_POINTS, NUMBER_OF_CENTROIDS); if (i + 1 == REPEAT) { printCentroids(centroids); } } // 结束计时 gettimeofday(&time_after, NULL); timersub(&time_after, &time_before, &time_result); float repeat_time = (time_result.tv_sec*1000.0) + (time_result.tv_usec/1000.0); return repeat_time / REPEAT; } int main() { json_t *json; json_error_t error; json_t *value; size_t index; float total_time = 0; // 初始化环境 cudaSetDevice(0); // 初始化内存,分配变量 Point* points = (Point*) malloc(NUMBER_OF_POINTS * sizeof(Point)); Centroid* centroids = (Centroid*) malloc(NUMBER_OF_CENTROIDS * sizeof(Centroid)); // 从json文件中读取数据集 json = json_load_file("../points.json", 0, &error); if (!json) { printf("Error parsing Json file"); fflush(stdout); return -1; } else { json_array_foreach(json, index, value) { float x = json_number_value(json_array_get(value, 0)); float y = json_number_value(json_array_get(value, 1)); points[index].x = x; points[index].y = y; } } // 开始实验 total_time = runKmeans(points, centroids); printf("Iterations: %d\n", ITERATIONS); printf("Average Time: %f ms\n", total_time); // 内存释放 free(centroids); free(points); cudaDeviceReset(); return 0; }
d0e088242e0f0379df68d34610fc7391febce62d.hip
// !!! This is a file automatically generated by hipify!!! #include <unittest/testframework.h> #include <unittest/cuda/testframework.h> #include <thrust/system/hip/memory.h> #include <hip/hip_runtime.h> #include <numeric> __global__ void dummy_kernel() {} bool binary_exists_for_current_device() { // check against the dummy_kernel // if we're unable to get the attributes, then // we didn't compile a binary compatible with the current device hipFuncAttributes attr; hipError_t error = hipFuncGetAttributes(&attr, dummy_kernel); // clear the CUDA global error state if we just set it, so that // check_cuda_error doesn't complain if (hipSuccess != error) (void)hipGetLastError(); return hipSuccess == error; } void list_devices(void) { int deviceCount; hipGetDeviceCount(&deviceCount); if(deviceCount == 0) { std::cout << "There is no device supporting CUDA" << std::endl; } int selected_device; hipGetDevice(&selected_device); for (int dev = 0; dev < deviceCount; ++dev) { hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev); if(dev == 0) { if(deviceProp.major == 9999 && deviceProp.minor == 9999) std::cout << "There is no device supporting CUDA." << std::endl; else if(deviceCount == 1) std::cout << "There is 1 device supporting CUDA" << std:: endl; else std::cout << "There are " << deviceCount << " devices supporting CUDA" << std:: endl; } std::cout << "\nDevice " << dev << ": \"" << deviceProp.name << "\""; if(dev == selected_device) std::cout << " [SELECTED]"; std::cout << std::endl; std::cout << " Major revision number: " << deviceProp.major << std::endl; std::cout << " Minor revision number: " << deviceProp.minor << std::endl; std::cout << " Total amount of global memory: " << deviceProp.totalGlobalMem << " bytes" << std::endl; } std::cout << std::endl; } // provide next, which c++03 doesn't have template<typename Iterator> Iterator my_next(Iterator iter) { return ++iter; } std::vector<int> CUDATestDriver::target_devices(const ArgumentMap &kwargs) { std::vector<int> result; // by default, test all devices in the system (device id -1) int device_id = kwargs.count("device") ? atoi(kwargs.find("device")->second.c_str()) : -1; if(device_id < 0) { // target all devices in the system int count = 0; hipGetDeviceCount(&count); result.resize(count); std::iota(result.begin(), result.end(), 0); } else { // target the specified device result = std::vector<int>(1,device_id); } return result; } bool CUDATestDriver::check_cuda_error(bool concise) { hipError_t const error = hipGetLastError(); if(hipSuccess != error) { if(!concise) { std::cout << "[ERROR] CUDA error detected before running tests: [" << std::string(hipGetErrorName(error)) << ": " << std::string(hipGetErrorString(error)) << "]" << std::endl; } } return hipSuccess != error; } bool CUDATestDriver::post_test_smoke_check(const UnitTest &test, bool concise) { hipError_t const error = hipDeviceSynchronize(); if(hipSuccess != error) { if(!concise) { std::cout << "\t[ERROR] CUDA error detected after running " << test.name << ": [" << std::string(hipGetErrorName(error)) << ": " << std::string(hipGetErrorString(error)) << "]" << std::endl; } } return hipSuccess == error; } bool CUDATestDriver::run_tests(const ArgumentSet &args, const ArgumentMap &kwargs) { bool verbose = kwargs.count("verbose"); bool concise = kwargs.count("concise"); if(verbose && concise) { std::cout << "--verbose and --concise cannot be used together" << std::endl; exit(EXIT_FAILURE); } // check error status before doing anything if(check_cuda_error(concise)) return false; bool result = true; if(kwargs.count("verbose")) { list_devices(); } // figure out which devices to target std::vector<int> devices = target_devices(kwargs); // target each device for(std::vector<int>::iterator device = devices.begin(); device != devices.end(); ++device) { hipDeviceSynchronize(); // set the device hipSetDevice(*device); // check if a binary exists for this device // if none exists, skip the device silently unless this is the only one we're targeting if(devices.size() > 1 && !binary_exists_for_current_device()) { // note which device we're skipping hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, *device); std::cout << "Skipping Device " << *device << ": \"" << deviceProp.name << "\"" << std::endl; continue; } if(!concise) { // note which device we're testing hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, *device); std::cout << "Testing Device " << *device << ": \"" << deviceProp.name << "\"" << std::endl; } // check error status before running any tests if(check_cuda_error(concise)) return false; // run tests result &= UnitTestDriver::run_tests(args, kwargs); if(!concise && my_next(device) != devices.end()) { // provide some separation between the output of separate tests std::cout << std::endl; } } return result; } int CUDATestDriver::current_device_architecture() const { int current = -1; hipGetDevice(&current); hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, current); return 100 * deviceProp.major + 10 * deviceProp.minor; } UnitTestDriver &driver_instance(thrust::system::cuda::tag) { static CUDATestDriver s_instance; return s_instance; }
d0e088242e0f0379df68d34610fc7391febce62d.cu
#include <unittest/testframework.h> #include <unittest/cuda/testframework.h> #include <thrust/system/cuda/memory.h> #include <cuda_runtime.h> #include <numeric> __global__ void dummy_kernel() {} bool binary_exists_for_current_device() { // check against the dummy_kernel // if we're unable to get the attributes, then // we didn't compile a binary compatible with the current device cudaFuncAttributes attr; cudaError_t error = cudaFuncGetAttributes(&attr, dummy_kernel); // clear the CUDA global error state if we just set it, so that // check_cuda_error doesn't complain if (cudaSuccess != error) (void)cudaGetLastError(); return cudaSuccess == error; } void list_devices(void) { int deviceCount; cudaGetDeviceCount(&deviceCount); if(deviceCount == 0) { std::cout << "There is no device supporting CUDA" << std::endl; } int selected_device; cudaGetDevice(&selected_device); for (int dev = 0; dev < deviceCount; ++dev) { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); if(dev == 0) { if(deviceProp.major == 9999 && deviceProp.minor == 9999) std::cout << "There is no device supporting CUDA." << std::endl; else if(deviceCount == 1) std::cout << "There is 1 device supporting CUDA" << std:: endl; else std::cout << "There are " << deviceCount << " devices supporting CUDA" << std:: endl; } std::cout << "\nDevice " << dev << ": \"" << deviceProp.name << "\""; if(dev == selected_device) std::cout << " [SELECTED]"; std::cout << std::endl; std::cout << " Major revision number: " << deviceProp.major << std::endl; std::cout << " Minor revision number: " << deviceProp.minor << std::endl; std::cout << " Total amount of global memory: " << deviceProp.totalGlobalMem << " bytes" << std::endl; } std::cout << std::endl; } // provide next, which c++03 doesn't have template<typename Iterator> Iterator my_next(Iterator iter) { return ++iter; } std::vector<int> CUDATestDriver::target_devices(const ArgumentMap &kwargs) { std::vector<int> result; // by default, test all devices in the system (device id -1) int device_id = kwargs.count("device") ? atoi(kwargs.find("device")->second.c_str()) : -1; if(device_id < 0) { // target all devices in the system int count = 0; cudaGetDeviceCount(&count); result.resize(count); std::iota(result.begin(), result.end(), 0); } else { // target the specified device result = std::vector<int>(1,device_id); } return result; } bool CUDATestDriver::check_cuda_error(bool concise) { cudaError_t const error = cudaGetLastError(); if(cudaSuccess != error) { if(!concise) { std::cout << "[ERROR] CUDA error detected before running tests: [" << std::string(cudaGetErrorName(error)) << ": " << std::string(cudaGetErrorString(error)) << "]" << std::endl; } } return cudaSuccess != error; } bool CUDATestDriver::post_test_smoke_check(const UnitTest &test, bool concise) { cudaError_t const error = cudaDeviceSynchronize(); if(cudaSuccess != error) { if(!concise) { std::cout << "\t[ERROR] CUDA error detected after running " << test.name << ": [" << std::string(cudaGetErrorName(error)) << ": " << std::string(cudaGetErrorString(error)) << "]" << std::endl; } } return cudaSuccess == error; } bool CUDATestDriver::run_tests(const ArgumentSet &args, const ArgumentMap &kwargs) { bool verbose = kwargs.count("verbose"); bool concise = kwargs.count("concise"); if(verbose && concise) { std::cout << "--verbose and --concise cannot be used together" << std::endl; exit(EXIT_FAILURE); } // check error status before doing anything if(check_cuda_error(concise)) return false; bool result = true; if(kwargs.count("verbose")) { list_devices(); } // figure out which devices to target std::vector<int> devices = target_devices(kwargs); // target each device for(std::vector<int>::iterator device = devices.begin(); device != devices.end(); ++device) { cudaDeviceSynchronize(); // set the device cudaSetDevice(*device); // check if a binary exists for this device // if none exists, skip the device silently unless this is the only one we're targeting if(devices.size() > 1 && !binary_exists_for_current_device()) { // note which device we're skipping cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, *device); std::cout << "Skipping Device " << *device << ": \"" << deviceProp.name << "\"" << std::endl; continue; } if(!concise) { // note which device we're testing cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, *device); std::cout << "Testing Device " << *device << ": \"" << deviceProp.name << "\"" << std::endl; } // check error status before running any tests if(check_cuda_error(concise)) return false; // run tests result &= UnitTestDriver::run_tests(args, kwargs); if(!concise && my_next(device) != devices.end()) { // provide some separation between the output of separate tests std::cout << std::endl; } } return result; } int CUDATestDriver::current_device_architecture() const { int current = -1; cudaGetDevice(&current); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, current); return 100 * deviceProp.major + 10 * deviceProp.minor; } UnitTestDriver &driver_instance(thrust::system::cuda::tag) { static CUDATestDriver s_instance; return s_instance; }
28869aadfdc54ab193373942d7698b7ddff0255d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __device__ bool checkBoundary(int blockIdx, int blockDim, int threadIdx){ int x = threadIdx; int y = blockIdx; return (x == 0 || x == (blockDim-1) || y == 0 || y == 479); } __global__ void mGradient_TwoDim(float *u_dimX, float *u_dimY, float *scalar, float coeffX, float coeffY) { if(checkBoundary(blockIdx.x, blockDim.x, threadIdx.x)) return; int Idx = blockIdx.x * blockDim.x + threadIdx.x; int Left = Idx - 1; int Right = Idx + 1; int Top = Idx + blockDim.x; int Bottom = Idx - blockDim.x; u_dimX[Idx] -= (scalar[Right] - scalar[Left])*coeffX; u_dimY[Idx] -= (scalar[Top] - scalar[Bottom])*coeffY; }
28869aadfdc54ab193373942d7698b7ddff0255d.cu
#include "includes.h" __device__ bool checkBoundary(int blockIdx, int blockDim, int threadIdx){ int x = threadIdx; int y = blockIdx; return (x == 0 || x == (blockDim-1) || y == 0 || y == 479); } __global__ void mGradient_TwoDim(float *u_dimX, float *u_dimY, float *scalar, float coeffX, float coeffY) { if(checkBoundary(blockIdx.x, blockDim.x, threadIdx.x)) return; int Idx = blockIdx.x * blockDim.x + threadIdx.x; int Left = Idx - 1; int Right = Idx + 1; int Top = Idx + blockDim.x; int Bottom = Idx - blockDim.x; u_dimX[Idx] -= (scalar[Right] - scalar[Left])*coeffX; u_dimY[Idx] -= (scalar[Top] - scalar[Bottom])*coeffY; }
a7f1ea528f6bff9874689e2298d3aa35023bed55.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <sys/time.h> #include<stdio.h> #include<cuda.h> #include<math.h> #define SQRT_TWO_PI 2.506628274631000 #define BLOCK_D1 1024 #define BLOCK_D2 1 #define BLOCK_D3 1 // Note: Needs compute capability >= 2.0 for calculation with doubles, so compile with: // nvcc kernelExample-pinned.cu -arch=compute_20 -code=sm_20,compute_20 -o kernelExample-pinned // -use_fast_math // CUDA kernel: __global__ void calc_loglik(double* vals, int n, double mu, double sigma) { // note that this assumes no third dimension to the grid // id of the block int myblock = blockIdx.x + blockIdx.y * gridDim.x; // size of each block (within grid of blocks) int blocksize = blockDim.x * blockDim.y * blockDim.z; // id of thread in a given block int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x; // assign overall id/index of the thread int idx = myblock * blocksize + subthread; if(idx < n) { double std = (vals[idx] - mu)/sigma; double e = exp( - 0.5 * std * std); vals[idx] = e / ( sigma * SQRT_TWO_PI); } } int calc_loglik_cpu(double* vals, int n, double mu, double sigma) { double std, e; for(int idx = 0; idx < n; idx++) { std = (vals[idx] - mu)/sigma; e = exp( - 0.5 * std * std); vals[idx] = e / ( sigma * SQRT_TWO_PI); } return 0; } /* --------------------------- host code ------------------------------*/ void fill( double *p, int n ) { int i; srand48(0); for( i = 0; i < n; i++ ) p[i] = 2*drand48()-1; } double read_timer() { struct timeval end; gettimeofday( &end, NULL ); return end.tv_sec+1.e-6*end.tv_usec; } int main (int argc, char *argv[]) { double* cpu_vals; double* gpu_vals; int n; hipError_t cudaStat; printf("====================================================\n"); for( n = 32768; n <= 134217728; n*=8 ) { // allocated pinned and mapped memory on CPU hipSetDeviceFlags(hipDeviceMapHost); hipHostMalloc((void**)&cpu_vals, n*sizeof(double), hipHostMallocMapped); // map the CPU storage to the GPU to the CPU storage cudaStat = hipHostGetDevicePointer(&gpu_vals, cpu_vals, 0); if(cudaStat != hipSuccess) { printf ("device memory mapping failed"); return EXIT_FAILURE; } const dim3 blockSize(BLOCK_D1, BLOCK_D2, BLOCK_D3); int tmp = ceil(pow(n/BLOCK_D1, 0.5)); printf("Grid dimension is %i x %i\n", tmp, tmp); dim3 gridSize(tmp, tmp, 1); int nthreads = BLOCK_D1*BLOCK_D2*BLOCK_D3*tmp*tmp; if (nthreads < n){ printf("\n============ NOT ENOUGH THREADS TO COVER n=%d ===============\n\n",n); } else { printf("Launching %d threads (n=%d)\n", nthreads, n); } double mu = 0.0; double sigma = 1.0; // simulate 'data' fill(cpu_vals, n); printf("Input values: %f %f %f...\n", cpu_vals[0], cpu_vals[1], cpu_vals[2]); hipDeviceSynchronize(); double tInit = read_timer(); // do the calculation hipLaunchKernelGGL(( calc_loglik), dim3(gridSize), dim3(blockSize), 0, 0, gpu_vals, n, mu, sigma); hipDeviceSynchronize(); double tCalc = read_timer(); printf("Output values: %f %f %f...\n", cpu_vals[0], cpu_vals[1], cpu_vals[2]); // do calculation on CPU for comparison (unfair as this will only use one core) fill(cpu_vals, n); double tInit2 = read_timer(); calc_loglik_cpu(cpu_vals, n, mu, sigma); double tCalcCPU = read_timer(); printf("Output values (CPU): %f %f %f...\n", cpu_vals[0], cpu_vals[1], cpu_vals[2]); printf("Timing results for n = %d\n", n); printf("Calculation time (GPU): %f\n", tCalc - tInit); printf("Calculation time (CPU): %f\n", tCalcCPU - tInit2); printf("Freeing memory...\n"); printf("====================================================\n"); hipHostFree(cpu_vals); } printf("\n\nFinished.\n\n"); return 0; }
a7f1ea528f6bff9874689e2298d3aa35023bed55.cu
#include <stdlib.h> #include <sys/time.h> #include<stdio.h> #include<cuda.h> #include<math.h> #define SQRT_TWO_PI 2.506628274631000 #define BLOCK_D1 1024 #define BLOCK_D2 1 #define BLOCK_D3 1 // Note: Needs compute capability >= 2.0 for calculation with doubles, so compile with: // nvcc kernelExample-pinned.cu -arch=compute_20 -code=sm_20,compute_20 -o kernelExample-pinned // -use_fast_math // CUDA kernel: __global__ void calc_loglik(double* vals, int n, double mu, double sigma) { // note that this assumes no third dimension to the grid // id of the block int myblock = blockIdx.x + blockIdx.y * gridDim.x; // size of each block (within grid of blocks) int blocksize = blockDim.x * blockDim.y * blockDim.z; // id of thread in a given block int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x; // assign overall id/index of the thread int idx = myblock * blocksize + subthread; if(idx < n) { double std = (vals[idx] - mu)/sigma; double e = exp( - 0.5 * std * std); vals[idx] = e / ( sigma * SQRT_TWO_PI); } } int calc_loglik_cpu(double* vals, int n, double mu, double sigma) { double std, e; for(int idx = 0; idx < n; idx++) { std = (vals[idx] - mu)/sigma; e = exp( - 0.5 * std * std); vals[idx] = e / ( sigma * SQRT_TWO_PI); } return 0; } /* --------------------------- host code ------------------------------*/ void fill( double *p, int n ) { int i; srand48(0); for( i = 0; i < n; i++ ) p[i] = 2*drand48()-1; } double read_timer() { struct timeval end; gettimeofday( &end, NULL ); return end.tv_sec+1.e-6*end.tv_usec; } int main (int argc, char *argv[]) { double* cpu_vals; double* gpu_vals; int n; cudaError_t cudaStat; printf("====================================================\n"); for( n = 32768; n <= 134217728; n*=8 ) { // allocated pinned and mapped memory on CPU cudaSetDeviceFlags(cudaDeviceMapHost); cudaHostAlloc((void**)&cpu_vals, n*sizeof(double), cudaHostAllocMapped); // map the CPU storage to the GPU to the CPU storage cudaStat = cudaHostGetDevicePointer(&gpu_vals, cpu_vals, 0); if(cudaStat != cudaSuccess) { printf ("device memory mapping failed"); return EXIT_FAILURE; } const dim3 blockSize(BLOCK_D1, BLOCK_D2, BLOCK_D3); int tmp = ceil(pow(n/BLOCK_D1, 0.5)); printf("Grid dimension is %i x %i\n", tmp, tmp); dim3 gridSize(tmp, tmp, 1); int nthreads = BLOCK_D1*BLOCK_D2*BLOCK_D3*tmp*tmp; if (nthreads < n){ printf("\n============ NOT ENOUGH THREADS TO COVER n=%d ===============\n\n",n); } else { printf("Launching %d threads (n=%d)\n", nthreads, n); } double mu = 0.0; double sigma = 1.0; // simulate 'data' fill(cpu_vals, n); printf("Input values: %f %f %f...\n", cpu_vals[0], cpu_vals[1], cpu_vals[2]); cudaDeviceSynchronize(); double tInit = read_timer(); // do the calculation calc_loglik<<<gridSize, blockSize>>>(gpu_vals, n, mu, sigma); cudaDeviceSynchronize(); double tCalc = read_timer(); printf("Output values: %f %f %f...\n", cpu_vals[0], cpu_vals[1], cpu_vals[2]); // do calculation on CPU for comparison (unfair as this will only use one core) fill(cpu_vals, n); double tInit2 = read_timer(); calc_loglik_cpu(cpu_vals, n, mu, sigma); double tCalcCPU = read_timer(); printf("Output values (CPU): %f %f %f...\n", cpu_vals[0], cpu_vals[1], cpu_vals[2]); printf("Timing results for n = %d\n", n); printf("Calculation time (GPU): %f\n", tCalc - tInit); printf("Calculation time (CPU): %f\n", tCalcCPU - tInit2); printf("Freeing memory...\n"); printf("====================================================\n"); cudaFreeHost(cpu_vals); } printf("\n\nFinished.\n\n"); return 0; }
a227a11e3846afcb1e250d120611a025a7b2507c.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <malloc.h> #include <hip/hip_runtime.h> #define SIZE_thread 1024 __global__ void VectorAdd(int *A, int *B, int *C,int n) { int i = threadIdx.x + blockIdx.x*blockDim.x; if(i<n) C[i]=A[i]+B[i]; } int main() { int n = 3000; clock_t start = clock(); int *a, *b, *c; int *d_a, *d_b, *d_c; a = (int *)malloc(n*sizeof(int)); b = (int *)malloc(n*sizeof(int)); c = (int *)malloc(n*sizeof(int)); hipMalloc(&d_a, n*sizeof(int)); hipMalloc(&d_b, n*sizeof(int)); hipMalloc(&d_c, n*sizeof(int)); for(int i=0;i<n;i++) { a[i]=i; b[i]=i; c[i]=0; } hipMemcpy(d_a, a, n*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_b, b, n*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_c, c, n*sizeof(int), hipMemcpyHostToDevice); dim3 dimGrid(ceil(n/float(SIZE_thread)),1,1); dim3 dimblock(SIZE_thread,1,1); hipLaunchKernelGGL(( VectorAdd), dim3(dimGrid),dim3(dimblock), 0, 0, d_a, d_b, d_c,n); hipMemcpy(c, d_c, n*sizeof(int), hipMemcpyDeviceToHost); for(int i=0;i<10; i++) printf("%d ",c[i]); free(a); free(b); free(c); hipFree(d_a); hipFree(d_b); hipFree(d_c); printf("Tiempo transcurrido: %f \n ",((double)clock() - start) / CLOCKS_PER_SEC); return 0; }
a227a11e3846afcb1e250d120611a025a7b2507c.cu
#include <stdio.h> #include <malloc.h> #include <cuda.h> #define SIZE_thread 1024 __global__ void VectorAdd(int *A, int *B, int *C,int n) { int i = threadIdx.x + blockIdx.x*blockDim.x; if(i<n) C[i]=A[i]+B[i]; } int main() { int n = 3000; clock_t start = clock(); int *a, *b, *c; int *d_a, *d_b, *d_c; a = (int *)malloc(n*sizeof(int)); b = (int *)malloc(n*sizeof(int)); c = (int *)malloc(n*sizeof(int)); cudaMalloc(&d_a, n*sizeof(int)); cudaMalloc(&d_b, n*sizeof(int)); cudaMalloc(&d_c, n*sizeof(int)); for(int i=0;i<n;i++) { a[i]=i; b[i]=i; c[i]=0; } cudaMemcpy(d_a, a, n*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, n*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_c, c, n*sizeof(int), cudaMemcpyHostToDevice); dim3 dimGrid(ceil(n/float(SIZE_thread)),1,1); dim3 dimblock(SIZE_thread,1,1); VectorAdd<<<dimGrid,dimblock>>>(d_a, d_b, d_c,n); cudaMemcpy(c, d_c, n*sizeof(int), cudaMemcpyDeviceToHost); for(int i=0;i<10; i++) printf("%d ",c[i]); free(a); free(b); free(c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); printf("Tiempo transcurrido: %f \n ",((double)clock() - start) / CLOCKS_PER_SEC); return 0; }
d70bcfbd0c3e39e7adc7cd147cae3526a623ba25.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> // Print device properties void printDevProp(hipDeviceProp_t devProp) { printf("Major revision number: %d\n", devProp.major); printf("Minor revision number: %d\n", devProp.minor); printf("Name: %s\n", devProp.name); printf("Total global memory: %zu\n", devProp.totalGlobalMem); printf("Total shared memory per block: %zu\n", devProp.sharedMemPerBlock); printf("Total registers per block: %d\n", devProp.regsPerBlock); printf("Warp size: %d\n", devProp.warpSize); printf("Maximum memory pitch: %zu\n", devProp.memPitch); printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]); printf("Clock rate: %d\n", devProp.clockRate); printf("Total constant memory: %zu\n", devProp.totalConstMem); printf("Texture alignment: %zu\n", devProp.textureAlignment); printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No")); printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount); printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No")); return; } void printLimits() { size_t val; hipDeviceGetLimit(&val, hipLimitStackSize); printf("Stack size limit: %zu\n", val); hipDeviceGetLimit(&val, hipLimitPrintfFifoSize); printf("Printf fifo limit: %zu\n", val); hipDeviceGetLimit(&val, hipLimitMallocHeapSize); printf("Heap size limit: %zu\n", val); } int main() { // Number of CUDA devices int devCount; hipGetDeviceCount(&devCount); printf("CUDA Device Query...\n"); printf("There are %d CUDA devices.\n", devCount); // Iterate through devices for (int i = 0; i < devCount; ++i) { // Get device properties printf("\nCUDA Device #%d\n", i); hipDeviceProp_t devProp; hipGetDeviceProperties(&devProp, i); printDevProp(devProp); size_t f, t; hipMemGetInfo(&f, &t); printf("\n%zu free, %zu total memory\n", f, t); printLimits(); } printf("\nPress any key to exit..."); char c; scanf("%c", &c); return 0; }
d70bcfbd0c3e39e7adc7cd147cae3526a623ba25.cu
#include <stdio.h> // Print device properties void printDevProp(cudaDeviceProp devProp) { printf("Major revision number: %d\n", devProp.major); printf("Minor revision number: %d\n", devProp.minor); printf("Name: %s\n", devProp.name); printf("Total global memory: %zu\n", devProp.totalGlobalMem); printf("Total shared memory per block: %zu\n", devProp.sharedMemPerBlock); printf("Total registers per block: %d\n", devProp.regsPerBlock); printf("Warp size: %d\n", devProp.warpSize); printf("Maximum memory pitch: %zu\n", devProp.memPitch); printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]); printf("Clock rate: %d\n", devProp.clockRate); printf("Total constant memory: %zu\n", devProp.totalConstMem); printf("Texture alignment: %zu\n", devProp.textureAlignment); printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No")); printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount); printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No")); return; } void printLimits() { size_t val; cudaDeviceGetLimit(&val, cudaLimitStackSize); printf("Stack size limit: %zu\n", val); cudaDeviceGetLimit(&val, cudaLimitPrintfFifoSize); printf("Printf fifo limit: %zu\n", val); cudaDeviceGetLimit(&val, cudaLimitMallocHeapSize); printf("Heap size limit: %zu\n", val); } int main() { // Number of CUDA devices int devCount; cudaGetDeviceCount(&devCount); printf("CUDA Device Query...\n"); printf("There are %d CUDA devices.\n", devCount); // Iterate through devices for (int i = 0; i < devCount; ++i) { // Get device properties printf("\nCUDA Device #%d\n", i); cudaDeviceProp devProp; cudaGetDeviceProperties(&devProp, i); printDevProp(devProp); size_t f, t; cudaMemGetInfo(&f, &t); printf("\n%zu free, %zu total memory\n", f, t); printLimits(); } printf("\nPress any key to exit..."); char c; scanf("%c", &c); return 0; }
0a401cb91a78da6656f36d2c5d287bb4a1a0b5eb.hip
// !!! This is a file automatically generated by hipify!!! // the GPU code can be found in power_gpu.cu // jiabing jin, sept 2017 //////////////////////////////////////////// #include <stdio.h> #include <math.h> #include <stdlib.h> #include <string.h> #include <time.h> #include "hip/hip_runtime.h" const int BLOCK_SIZE =256; // #include "power_gpu.cu" // Input Array Variables float* h_MatA = NULL; float* d_MatA = NULL; // Output Array float* h_VecV = NULL; float* d_VecV = NULL; float* h_VecW = NULL; float* d_VecW = NULL; float* h_NormW = NULL; float* d_NormW = NULL; // Variables to change int GlobalSize = 8000; // this is the dimension of the matrix, GlobalSize*GlobalSize int BlockSize = 256; // number of threads in each block const float EPS = 0.000005; // tolerence of the error int max_iteration = 100; // the maximum iteration steps // Functions void Cleanup(void); void InitOne(float*, int); void UploadArray(float*, int); float CPUReduce(float*, int); void Arguments(int, char**); void checkCardVersion(void); void ParseArguments(int, char**); // Kernels __global__ void Av_Product(float* , float* , float* , int ); __global__ void FindNormW(float* , float* , int ); __global__ void NormalizeW(float* ,float* , int ); __global__ void ComputeLamda( float* ,float* , float* ,int ); void CPU_AvProduct() { int N = GlobalSize; int matIndex =0; for(int i=0;i<N;i++) { h_VecW[i] = 0; for(int j=0;j<N;j++) { matIndex = i*N + j; h_VecW[i] += h_MatA[matIndex] * h_VecV[j]; } } } void CPU_NormalizeW() { int N = GlobalSize; float normW=0; for(int i=0;i<N;i++) normW += h_VecW[i] * h_VecW[i]; normW = sqrt(normW); for(int i=0;i<N;i++) h_VecV[i] = h_VecW[i]/normW; } float CPU_ComputeLamda() { int N = GlobalSize; float lamda =0; for(int i=0;i<N;i++) lamda += h_VecV[i] * h_VecW[i]; return lamda; } void RunCPUPowerMethod() { printf("*************************************\n"); float oldLamda =0; float lamda=0; //AvProduct CPU_AvProduct(); //power loop for (int i=0;i<max_iteration;i++) { CPU_NormalizeW(); CPU_AvProduct(); lamda= CPU_ComputeLamda(); printf("CPU lamda at %d: %f \n", i, lamda); // If residual is lass than epsilon break if(abs(oldLamda - lamda) < EPS) break; oldLamda = lamda; } printf("*************************************\n"); } __global__ void Av_Product(float* g_MatA, float* g_VecV, float* g_VecW, int N) { // Block index int bx = blockIdx.x; // Thread index int tx = threadIdx.x; int aBegin = N * BLOCK_SIZE * bx; int aEnd = aBegin + N - 1; int step = BLOCK_SIZE; int bBegin = 0;//BLOCK_SIZE * bx; int bIndex=0; int aIndex =0; float Csub = 0; for (int a = aBegin, b = bBegin; a <= aEnd; a += step, b += step) { __shared__ float As[BLOCK_SIZE]; __shared__ float bs[BLOCK_SIZE]; aIndex = a + tx; if( aIndex < N*N) As[tx] = g_MatA[aIndex]; else As[tx] = 0; bIndex = b+tx; if(bIndex<N) bs[tx] = g_VecV[bIndex]; else bs[tx] = 0; __syncthreads(); for (int k = 0; k < BLOCK_SIZE; ++k) { Csub += As[k] * bs[k]; }//} __syncthreads(); } g_VecW[ BLOCK_SIZE * bx + tx] = Csub; } __global__ void ComputeLamda( float* g_VecV, float* g_VecW, float * g_Lamda,int N) { // shared memory size declared at kernel launch extern __shared__ float sdataVW[]; unsigned int tid = threadIdx.x; unsigned int globalid = blockIdx.x*blockDim.x + threadIdx.x; // For thread ids greater than data space if (globalid < N) { sdataVW[tid] = g_VecV[globalid] * g_VecW[globalid]; } else { sdataVW[tid] = 0; // Case of extra threads above N } // each thread loads one element from global to shared mem __syncthreads(); // do reduction in shared mem for (unsigned int s=blockDim.x / 2; s > 0; s = s >> 1) { if (tid < s) { sdataVW[tid] = sdataVW[tid] + sdataVW[tid+ s]; } __syncthreads(); } // atomic operations: if (tid == 0) atomicAdd(g_Lamda,sdataVW[0]); } __global__ void FindNormW(float* g_VecW, float * g_NormW, int N) { // shared memory size declared at kernel launch extern __shared__ float sdata[]; unsigned int tid = threadIdx.x; unsigned int globalid = blockIdx.x*blockDim.x + threadIdx.x; // For thread ids greater than data space if (globalid < N) { sdata[tid] = g_VecW[globalid]; } else { sdata[tid] = 0; // Case of extra threads above N } // each thread loads one element from global to shared mem __syncthreads(); sdata[tid] = sdata[tid] * sdata[tid]; __syncthreads(); // do reduction in shared mem for (unsigned int s=blockDim.x / 2; s > 0; s = s >> 1) { if (tid < s) { sdata[tid] = sdata[tid] + sdata[tid+ s]; } __syncthreads(); } // atomic operations: if (tid == 0) atomicAdd(g_NormW,sdata[0]); } __global__ void NormalizeW(float* g_VecW, float * g_NormW, float* g_VecV, int N) { // shared memory size declared at kernel launch extern __shared__ float sNormData[]; unsigned int tid = threadIdx.x; unsigned int globalid = blockIdx.x*blockDim.x + threadIdx.x; if(tid==0) sNormData[0] = g_NormW[0]; __syncthreads(); // For thread ids greater than data space if (globalid < N) { g_VecV[globalid] = g_VecW[globalid]/sNormData[0]; } } // Host code int main(int argc, char** argv) { struct timespec t_start,t_end,t_start2; double runtime; double Av_runtime = 0.0; double Find_runtime = 0.0; double Norm_runtime = 0.0; double Compute_runtime = 0.0; ParseArguments(argc, argv); int N = GlobalSize; printf("Matrix size %d X %d \n", N, N); size_t vec_size = N * sizeof(float); size_t mat_size = N * N * sizeof(float); size_t norm_size = sizeof(float); // Allocate normalized value in host memory h_NormW = (float*)malloc(norm_size); // Allocate input matrix in host memory h_MatA = (float*)malloc(mat_size); // Allocate initial vector V in host memory h_VecV = (float*)malloc(vec_size); // Allocate W vector for computations h_VecW = (float*)malloc(vec_size); // Initialize input matrix UploadArray(h_MatA, N); InitOne(h_VecV,N); printf("Power method in CPU starts\n"); clock_gettime(CLOCK_REALTIME,&t_start); RunCPUPowerMethod(); // the lamda is already solved here clock_gettime(CLOCK_REALTIME,&t_end); runtime = (t_end.tv_sec - t_start.tv_sec) + 1e-9*(t_end.tv_nsec - t_start.tv_nsec); printf("CPU: run time = %f secs.\n",runtime); printf("Power method in CPU is finished\n"); ///////////////////////////////////////////////// // This is the starting points of GPU printf("Power method in GPU starts\n"); checkCardVersion(); // Initialize input matrix InitOne(h_VecV,N); clock_gettime(CLOCK_REALTIME,&t_start); // Here I start to count // Set the kernel arguments int threadsPerBlock = BlockSize; int sharedMemSize = threadsPerBlock * sizeof(float); // in per block, the memory is shared int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock; /* Host variables for convergence */ float OldLamda = 0; float temp = 0; /* Device variable for convergence */ float* Lamda = 0; // Allocate matrix and vectors in device memory hipMalloc((void**)&d_MatA, mat_size); hipMalloc((void**)&d_VecV, vec_size); hipMalloc((void**)&d_VecW, vec_size); // This vector is only used by the device hipMalloc((void**)&d_NormW, norm_size); /* Allocate memory for device lamda */ hipMalloc((void**)&Lamda, sizeof(float)); //Copy from host memory to device memory hipMemcpy(d_MatA, h_MatA, mat_size, hipMemcpyHostToDevice); hipMemcpy(d_VecV, h_VecV, vec_size, hipMemcpyHostToDevice); hipMemcpy(Lamda, &OldLamda, sizeof(float), hipMemcpyHostToDevice); // cutilCheckError(cutStopTimer(timer_mem)); //Power method loops clock_gettime(CLOCK_REALTIME,&t_start2); /* First matrix vector multiplication */ hipLaunchKernelGGL(( Av_Product), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMemSize, 0, d_MatA, d_VecV, d_VecW, N); /* Measure time for one matrix vector multiplication */ clock_gettime(CLOCK_REALTIME,&t_end); Av_runtime += Av_runtime + (t_end.tv_sec - t_start2.tv_sec) + 1e-9*(t_end.tv_nsec - t_start2.tv_nsec); hipDeviceSynchronize(); //Needed, kind of barrier to sychronize all threads // This part is the main code of the iteration process for the Power Method in GPU. // Please finish this part based on the given code. Do not forget the command line // hipDeviceSynchronize() after callig the function every time in CUDA to synchoronize the threads //////////////////////////////////////////// // /// // // // // // // // // //power loop for (int i=0;i<max_iteration;i++) { /* Measure time for creating the normalized vector */ clock_gettime(CLOCK_REALTIME,&t_start2); hipLaunchKernelGGL(( FindNormW), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMemSize, 0, d_VecW, d_NormW, N); clock_gettime(CLOCK_REALTIME,&t_end); Find_runtime += Find_runtime + (t_end.tv_sec - t_start2.tv_sec) + 1e-9*(t_end.tv_nsec - t_start2.tv_nsec); hipDeviceSynchronize(); /* Measure time for normalizing the vector */ clock_gettime(CLOCK_REALTIME,&t_start2); hipLaunchKernelGGL(( NormalizeW), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMemSize, 0, d_VecW, d_NormW, d_VecV, N); clock_gettime(CLOCK_REALTIME,&t_end); Norm_runtime += Norm_runtime + (t_end.tv_sec - t_start2.tv_sec) + 1e-9*(t_end.tv_nsec - t_start2.tv_nsec); hipDeviceSynchronize(); /* Measure time for matrix vector multiplication */ clock_gettime(CLOCK_REALTIME,&t_start2); hipLaunchKernelGGL(( Av_Product), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMemSize, 0, d_MatA, d_VecV, d_VecW, N); clock_gettime(CLOCK_REALTIME,&t_end); Av_runtime += Av_runtime + (t_end.tv_sec - t_start2.tv_sec) + 1e-9*(t_end.tv_nsec - t_start2.tv_nsec); hipDeviceSynchronize(); /* Measure time computing the Lamda */ clock_gettime(CLOCK_REALTIME,&t_start2); hipLaunchKernelGGL(( ComputeLamda), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMemSize, 0, d_VecV, d_VecW, Lamda, N); clock_gettime(CLOCK_REALTIME,&t_end); Compute_runtime += Compute_runtime + (t_end.tv_sec - t_start2.tv_sec) + 1e-9*(t_end.tv_nsec - t_start2.tv_nsec); hipDeviceSynchronize(); // If residual is lass than epsilon break hipMemcpy(&OldLamda, Lamda, sizeof(float), hipMemcpyDeviceToHost); printf("GPU lamda at %d: %f \n", i, OldLamda); if(abs(OldLamda - temp) < EPS){ break; } temp = OldLamda; } // // // // // // // // // // // // // /// // /// // // // // // // // // clock_gettime(CLOCK_REALTIME,&t_end); runtime = (t_end.tv_sec - t_start.tv_sec) + 1e-9*(t_end.tv_nsec - t_start.tv_nsec); printf("GPU: run time = %f secs.\n",runtime); printf("GPU: Av time = %f secs.\n",Av_runtime); printf("GPU: compute time = %f secs.\n",Compute_runtime); printf("GPU: find time = %f secs.\n",Find_runtime); printf("GPU: norm time = %f secs.\n",Norm_runtime); // printf("Overall CPU Execution Time: %f (ms) \n", cutGetTimerValue(timer_CPU)); if (Lamda) hipFree(Lamda); Cleanup(); } void Cleanup(void) { // Free device memory if (d_MatA) hipFree(d_MatA); if (d_VecV) hipFree(d_VecV); if (d_VecW) hipFree(d_VecW); if (d_NormW) hipFree(d_NormW); // Free host memory if (h_MatA) free(h_MatA); if (h_VecV) free(h_VecV); if (h_VecW) free(h_VecW); if (h_NormW) free(h_NormW); exit(0); } // Allocates an array with zero value. void InitOne(float* data, int n) { for (int i = 0; i < n; i++) data[i] = 0; data[0]=1; } void UploadArray(float* data, int n) { int total = n*n; int value=1; for (int i = 0; i < total; i++) { data[i] = (int) (rand() % (int)(101));//1;//value; value ++; if(value>n) value =1; // data[i] = 1; } } // Obtain program arguments void Arguments(int argc, char** argv) { for (int i = 0; i < argc; ++i) { if (strcmp(argv[i], "--size") == 0 || strcmp(argv[i], "-size") == 0) { GlobalSize = atoi(argv[i+1]); i = i + 1; } if (strcmp(argv[i], "--max_iteration") == 0 || strcmp(argv[i], "-max_iteration") == 0) { max_iteration = atoi(argv[i+1]); i = i + 1; } } } void checkCardVersion() { hipDeviceProp_t prop; hipGetDeviceProperties(&prop, 0); printf("This GPU has major architecture %d, minor %d \n",prop.major,prop.minor); if(prop.major < 2) { fprintf(stderr,"Need compute capability 2 or higher.\n"); exit(1); } } // Parse program arguments void ParseArguments(int argc, char** argv) { for (int i = 0; i < argc; ++i) { if (strcmp(argv[i], "--size") == 0 || strcmp(argv[i], "-size") == 0) { GlobalSize = atoi(argv[i+1]); i = i + 1; } // if (strcmp(argv[i], "--blocksize") == 0 || strcmp(argv[i], "-blocksize") == 0) { // BlockSize = atoi(argv[i+1]); // i = i + 1; // } } }
0a401cb91a78da6656f36d2c5d287bb4a1a0b5eb.cu
// the GPU code can be found in power_gpu.cu // jiabing jin, sept 2017 //////////////////////////////////////////// #include <stdio.h> #include <math.h> #include <stdlib.h> #include <string.h> #include <time.h> #include "cuda.h" const int BLOCK_SIZE =256; // #include "power_gpu.cu" // Input Array Variables float* h_MatA = NULL; float* d_MatA = NULL; // Output Array float* h_VecV = NULL; float* d_VecV = NULL; float* h_VecW = NULL; float* d_VecW = NULL; float* h_NormW = NULL; float* d_NormW = NULL; // Variables to change int GlobalSize = 8000; // this is the dimension of the matrix, GlobalSize*GlobalSize int BlockSize = 256; // number of threads in each block const float EPS = 0.000005; // tolerence of the error int max_iteration = 100; // the maximum iteration steps // Functions void Cleanup(void); void InitOne(float*, int); void UploadArray(float*, int); float CPUReduce(float*, int); void Arguments(int, char**); void checkCardVersion(void); void ParseArguments(int, char**); // Kernels __global__ void Av_Product(float* , float* , float* , int ); __global__ void FindNormW(float* , float* , int ); __global__ void NormalizeW(float* ,float* , int ); __global__ void ComputeLamda( float* ,float* , float* ,int ); void CPU_AvProduct() { int N = GlobalSize; int matIndex =0; for(int i=0;i<N;i++) { h_VecW[i] = 0; for(int j=0;j<N;j++) { matIndex = i*N + j; h_VecW[i] += h_MatA[matIndex] * h_VecV[j]; } } } void CPU_NormalizeW() { int N = GlobalSize; float normW=0; for(int i=0;i<N;i++) normW += h_VecW[i] * h_VecW[i]; normW = sqrt(normW); for(int i=0;i<N;i++) h_VecV[i] = h_VecW[i]/normW; } float CPU_ComputeLamda() { int N = GlobalSize; float lamda =0; for(int i=0;i<N;i++) lamda += h_VecV[i] * h_VecW[i]; return lamda; } void RunCPUPowerMethod() { printf("*************************************\n"); float oldLamda =0; float lamda=0; //AvProduct CPU_AvProduct(); //power loop for (int i=0;i<max_iteration;i++) { CPU_NormalizeW(); CPU_AvProduct(); lamda= CPU_ComputeLamda(); printf("CPU lamda at %d: %f \n", i, lamda); // If residual is lass than epsilon break if(abs(oldLamda - lamda) < EPS) break; oldLamda = lamda; } printf("*************************************\n"); } __global__ void Av_Product(float* g_MatA, float* g_VecV, float* g_VecW, int N) { // Block index int bx = blockIdx.x; // Thread index int tx = threadIdx.x; int aBegin = N * BLOCK_SIZE * bx; int aEnd = aBegin + N - 1; int step = BLOCK_SIZE; int bBegin = 0;//BLOCK_SIZE * bx; int bIndex=0; int aIndex =0; float Csub = 0; for (int a = aBegin, b = bBegin; a <= aEnd; a += step, b += step) { __shared__ float As[BLOCK_SIZE]; __shared__ float bs[BLOCK_SIZE]; aIndex = a + tx; if( aIndex < N*N) As[tx] = g_MatA[aIndex]; else As[tx] = 0; bIndex = b+tx; if(bIndex<N) bs[tx] = g_VecV[bIndex]; else bs[tx] = 0; __syncthreads(); for (int k = 0; k < BLOCK_SIZE; ++k) { Csub += As[k] * bs[k]; }//} __syncthreads(); } g_VecW[ BLOCK_SIZE * bx + tx] = Csub; } __global__ void ComputeLamda( float* g_VecV, float* g_VecW, float * g_Lamda,int N) { // shared memory size declared at kernel launch extern __shared__ float sdataVW[]; unsigned int tid = threadIdx.x; unsigned int globalid = blockIdx.x*blockDim.x + threadIdx.x; // For thread ids greater than data space if (globalid < N) { sdataVW[tid] = g_VecV[globalid] * g_VecW[globalid]; } else { sdataVW[tid] = 0; // Case of extra threads above N } // each thread loads one element from global to shared mem __syncthreads(); // do reduction in shared mem for (unsigned int s=blockDim.x / 2; s > 0; s = s >> 1) { if (tid < s) { sdataVW[tid] = sdataVW[tid] + sdataVW[tid+ s]; } __syncthreads(); } // atomic operations: if (tid == 0) atomicAdd(g_Lamda,sdataVW[0]); } __global__ void FindNormW(float* g_VecW, float * g_NormW, int N) { // shared memory size declared at kernel launch extern __shared__ float sdata[]; unsigned int tid = threadIdx.x; unsigned int globalid = blockIdx.x*blockDim.x + threadIdx.x; // For thread ids greater than data space if (globalid < N) { sdata[tid] = g_VecW[globalid]; } else { sdata[tid] = 0; // Case of extra threads above N } // each thread loads one element from global to shared mem __syncthreads(); sdata[tid] = sdata[tid] * sdata[tid]; __syncthreads(); // do reduction in shared mem for (unsigned int s=blockDim.x / 2; s > 0; s = s >> 1) { if (tid < s) { sdata[tid] = sdata[tid] + sdata[tid+ s]; } __syncthreads(); } // atomic operations: if (tid == 0) atomicAdd(g_NormW,sdata[0]); } __global__ void NormalizeW(float* g_VecW, float * g_NormW, float* g_VecV, int N) { // shared memory size declared at kernel launch extern __shared__ float sNormData[]; unsigned int tid = threadIdx.x; unsigned int globalid = blockIdx.x*blockDim.x + threadIdx.x; if(tid==0) sNormData[0] = g_NormW[0]; __syncthreads(); // For thread ids greater than data space if (globalid < N) { g_VecV[globalid] = g_VecW[globalid]/sNormData[0]; } } // Host code int main(int argc, char** argv) { struct timespec t_start,t_end,t_start2; double runtime; double Av_runtime = 0.0; double Find_runtime = 0.0; double Norm_runtime = 0.0; double Compute_runtime = 0.0; ParseArguments(argc, argv); int N = GlobalSize; printf("Matrix size %d X %d \n", N, N); size_t vec_size = N * sizeof(float); size_t mat_size = N * N * sizeof(float); size_t norm_size = sizeof(float); // Allocate normalized value in host memory h_NormW = (float*)malloc(norm_size); // Allocate input matrix in host memory h_MatA = (float*)malloc(mat_size); // Allocate initial vector V in host memory h_VecV = (float*)malloc(vec_size); // Allocate W vector for computations h_VecW = (float*)malloc(vec_size); // Initialize input matrix UploadArray(h_MatA, N); InitOne(h_VecV,N); printf("Power method in CPU starts\n"); clock_gettime(CLOCK_REALTIME,&t_start); RunCPUPowerMethod(); // the lamda is already solved here clock_gettime(CLOCK_REALTIME,&t_end); runtime = (t_end.tv_sec - t_start.tv_sec) + 1e-9*(t_end.tv_nsec - t_start.tv_nsec); printf("CPU: run time = %f secs.\n",runtime); printf("Power method in CPU is finished\n"); ///////////////////////////////////////////////// // This is the starting points of GPU printf("Power method in GPU starts\n"); checkCardVersion(); // Initialize input matrix InitOne(h_VecV,N); clock_gettime(CLOCK_REALTIME,&t_start); // Here I start to count // Set the kernel arguments int threadsPerBlock = BlockSize; int sharedMemSize = threadsPerBlock * sizeof(float); // in per block, the memory is shared int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock; /* Host variables for convergence */ float OldLamda = 0; float temp = 0; /* Device variable for convergence */ float* Lamda = 0; // Allocate matrix and vectors in device memory cudaMalloc((void**)&d_MatA, mat_size); cudaMalloc((void**)&d_VecV, vec_size); cudaMalloc((void**)&d_VecW, vec_size); // This vector is only used by the device cudaMalloc((void**)&d_NormW, norm_size); /* Allocate memory for device lamda */ cudaMalloc((void**)&Lamda, sizeof(float)); //Copy from host memory to device memory cudaMemcpy(d_MatA, h_MatA, mat_size, cudaMemcpyHostToDevice); cudaMemcpy(d_VecV, h_VecV, vec_size, cudaMemcpyHostToDevice); cudaMemcpy(Lamda, &OldLamda, sizeof(float), cudaMemcpyHostToDevice); // cutilCheckError(cutStopTimer(timer_mem)); //Power method loops clock_gettime(CLOCK_REALTIME,&t_start2); /* First matrix vector multiplication */ Av_Product<<<blocksPerGrid, threadsPerBlock, sharedMemSize>>>(d_MatA, d_VecV, d_VecW, N); /* Measure time for one matrix vector multiplication */ clock_gettime(CLOCK_REALTIME,&t_end); Av_runtime += Av_runtime + (t_end.tv_sec - t_start2.tv_sec) + 1e-9*(t_end.tv_nsec - t_start2.tv_nsec); cudaThreadSynchronize(); //Needed, kind of barrier to sychronize all threads // This part is the main code of the iteration process for the Power Method in GPU. // Please finish this part based on the given code. Do not forget the command line // cudaThreadSynchronize() after callig the function every time in CUDA to synchoronize the threads //////////////////////////////////////////// // /// // // // // // // // // //power loop for (int i=0;i<max_iteration;i++) { /* Measure time for creating the normalized vector */ clock_gettime(CLOCK_REALTIME,&t_start2); FindNormW<<<blocksPerGrid, threadsPerBlock, sharedMemSize>>>(d_VecW, d_NormW, N); clock_gettime(CLOCK_REALTIME,&t_end); Find_runtime += Find_runtime + (t_end.tv_sec - t_start2.tv_sec) + 1e-9*(t_end.tv_nsec - t_start2.tv_nsec); cudaThreadSynchronize(); /* Measure time for normalizing the vector */ clock_gettime(CLOCK_REALTIME,&t_start2); NormalizeW<<<blocksPerGrid, threadsPerBlock, sharedMemSize>>>(d_VecW, d_NormW, d_VecV, N); clock_gettime(CLOCK_REALTIME,&t_end); Norm_runtime += Norm_runtime + (t_end.tv_sec - t_start2.tv_sec) + 1e-9*(t_end.tv_nsec - t_start2.tv_nsec); cudaThreadSynchronize(); /* Measure time for matrix vector multiplication */ clock_gettime(CLOCK_REALTIME,&t_start2); Av_Product<<<blocksPerGrid, threadsPerBlock, sharedMemSize>>>(d_MatA, d_VecV, d_VecW, N); clock_gettime(CLOCK_REALTIME,&t_end); Av_runtime += Av_runtime + (t_end.tv_sec - t_start2.tv_sec) + 1e-9*(t_end.tv_nsec - t_start2.tv_nsec); cudaThreadSynchronize(); /* Measure time computing the Lamda */ clock_gettime(CLOCK_REALTIME,&t_start2); ComputeLamda<<<blocksPerGrid, threadsPerBlock, sharedMemSize>>>(d_VecV, d_VecW, Lamda, N); clock_gettime(CLOCK_REALTIME,&t_end); Compute_runtime += Compute_runtime + (t_end.tv_sec - t_start2.tv_sec) + 1e-9*(t_end.tv_nsec - t_start2.tv_nsec); cudaThreadSynchronize(); // If residual is lass than epsilon break cudaMemcpy(&OldLamda, Lamda, sizeof(float), cudaMemcpyDeviceToHost); printf("GPU lamda at %d: %f \n", i, OldLamda); if(abs(OldLamda - temp) < EPS){ break; } temp = OldLamda; } // // // // // // // // // // // // // /// // /// // // // // // // // // clock_gettime(CLOCK_REALTIME,&t_end); runtime = (t_end.tv_sec - t_start.tv_sec) + 1e-9*(t_end.tv_nsec - t_start.tv_nsec); printf("GPU: run time = %f secs.\n",runtime); printf("GPU: Av time = %f secs.\n",Av_runtime); printf("GPU: compute time = %f secs.\n",Compute_runtime); printf("GPU: find time = %f secs.\n",Find_runtime); printf("GPU: norm time = %f secs.\n",Norm_runtime); // printf("Overall CPU Execution Time: %f (ms) \n", cutGetTimerValue(timer_CPU)); if (Lamda) cudaFree(Lamda); Cleanup(); } void Cleanup(void) { // Free device memory if (d_MatA) cudaFree(d_MatA); if (d_VecV) cudaFree(d_VecV); if (d_VecW) cudaFree(d_VecW); if (d_NormW) cudaFree(d_NormW); // Free host memory if (h_MatA) free(h_MatA); if (h_VecV) free(h_VecV); if (h_VecW) free(h_VecW); if (h_NormW) free(h_NormW); exit(0); } // Allocates an array with zero value. void InitOne(float* data, int n) { for (int i = 0; i < n; i++) data[i] = 0; data[0]=1; } void UploadArray(float* data, int n) { int total = n*n; int value=1; for (int i = 0; i < total; i++) { data[i] = (int) (rand() % (int)(101));//1;//value; value ++; if(value>n) value =1; // data[i] = 1; } } // Obtain program arguments void Arguments(int argc, char** argv) { for (int i = 0; i < argc; ++i) { if (strcmp(argv[i], "--size") == 0 || strcmp(argv[i], "-size") == 0) { GlobalSize = atoi(argv[i+1]); i = i + 1; } if (strcmp(argv[i], "--max_iteration") == 0 || strcmp(argv[i], "-max_iteration") == 0) { max_iteration = atoi(argv[i+1]); i = i + 1; } } } void checkCardVersion() { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); printf("This GPU has major architecture %d, minor %d \n",prop.major,prop.minor); if(prop.major < 2) { fprintf(stderr,"Need compute capability 2 or higher.\n"); exit(1); } } // Parse program arguments void ParseArguments(int argc, char** argv) { for (int i = 0; i < argc; ++i) { if (strcmp(argv[i], "--size") == 0 || strcmp(argv[i], "-size") == 0) { GlobalSize = atoi(argv[i+1]); i = i + 1; } // if (strcmp(argv[i], "--blocksize") == 0 || strcmp(argv[i], "-blocksize") == 0) { // BlockSize = atoi(argv[i+1]); // i = i + 1; // } } }
0d9463ad31afcdb54bdfd822b6aac688e23e2589.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" __global__ void kMul(double* a, double* b, double* dest, int n) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx<n) { dest[idx] = a[idx] * b[idx]; } } extern "C" __global__ void kFillArray(double* a, int m, double* dest, int n) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx<n) { dest[idx] = a[idx % m]; } } extern "C" __global__ void kFill(double v, double* dest, int n) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx<n) { dest[idx] = v; } } extern "C" __global__ void kSigmoid(double* a, double* dest, int n) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx<n) { dest[idx] = 1/(1+ exp(-1*a[idx])); } } extern "C" __global__ void kTanh(double* a, double* dest, int n) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx<n) { dest[idx] = tanh(a[idx]); } } extern "C" __global__ void kPow(double* a, double y, int n) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx<n) { a[idx] = pow(a[idx], y); } } extern "C" __global__ void kInverseElements(double* a, int n) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx<n) { a[idx] = (a[idx]==0.0)?0.0:1.0/a[idx]; } } extern "C" __global__ void kSqrt(double* a, int n) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx<n) { a[idx] = sqrt(a[idx]); } } extern "C" __global__ void kDivByColumnVector(double *a, int m, double* dest, int n) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx<n) { dest[idx] = (a[idx/m]==0.0)?0.0:dest[idx]/a[idx/m]; } } extern "C" __global__ void kMulByColumnVector(double *a, int m, double* dest, int n) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx<n) { dest[idx] = dest[idx]*a[idx/m]; } }
0d9463ad31afcdb54bdfd822b6aac688e23e2589.cu
extern "C" __global__ void kMul(double* a, double* b, double* dest, int n) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx<n) { dest[idx] = a[idx] * b[idx]; } } extern "C" __global__ void kFillArray(double* a, int m, double* dest, int n) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx<n) { dest[idx] = a[idx % m]; } } extern "C" __global__ void kFill(double v, double* dest, int n) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx<n) { dest[idx] = v; } } extern "C" __global__ void kSigmoid(double* a, double* dest, int n) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx<n) { dest[idx] = 1/(1+ exp(-1*a[idx])); } } extern "C" __global__ void kTanh(double* a, double* dest, int n) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx<n) { dest[idx] = tanh(a[idx]); } } extern "C" __global__ void kPow(double* a, double y, int n) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx<n) { a[idx] = pow(a[idx], y); } } extern "C" __global__ void kInverseElements(double* a, int n) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx<n) { a[idx] = (a[idx]==0.0)?0.0:1.0/a[idx]; } } extern "C" __global__ void kSqrt(double* a, int n) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx<n) { a[idx] = sqrt(a[idx]); } } extern "C" __global__ void kDivByColumnVector(double *a, int m, double* dest, int n) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx<n) { dest[idx] = (a[idx/m]==0.0)?0.0:dest[idx]/a[idx/m]; } } extern "C" __global__ void kMulByColumnVector(double *a, int m, double* dest, int n) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx<n) { dest[idx] = dest[idx]*a[idx/m]; } }
fdbc6ec8e7ac511e49071d3b5171d173b41e29a9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * @file depth_convert.cu * @copyright Copyright (c) 2020 University of Turku, MIT License * @author Nicolas Pope */ #include <ftl/codecs/depth_convert_cuda.hpp> #include "../Utils/ColorSpace.h" #include <opencv2/core/cuda_stream_accessor.hpp> // Encoding __device__ inline float clamp(float v) { return max(0.0f, min(1.0f, v)); } __device__ inline float clampC(float v, float t=255.0f) { return max(0.0f, min(t, v)); } /* * See: Pece F., Kautz J., Weyrich T. 2011. Adapting standard video codecs for * depth streaming. Joint Virtual Reality Conference of EGVE 2011 - * The 17th Eurographics Symposium on Virtual Environments, EuroVR 2011 - * The 8th EuroVR (INTUITION) Conference, , pp. 59-66. * */ // Assumes 8 (256) bit output channels and 14bit (16384) depth static constexpr float P = (2.0f * 256.0f) / 16384.0f; /* Convert single float to L Ha Hb. */ __device__ inline float3 depth2yuv(float depth, float maxdepth) { // Normalise float d = max(0.0f,depth); if (d >= maxdepth) d = 0.0f; float L = d / maxdepth; const float p = P; float Ha1 = fmodf((L / (p/2.0f)), 2.0f); float Ha = (Ha1 <= 1.0f) ? Ha1 : 2.0f - Ha1; float Hb1 = fmodf(((L - (p/4.0f)) / (p/2.0f)), 2.0f); float Hb = (Hb1 <= 1.0f) ? Hb1 : 2.0f - Hb1; return {L, Ha, Hb}; } __global__ void depth_to_vuya_kernel(cv::cuda::PtrStepSz<float> depth, cv::cuda::PtrStepSz<uchar4> rgba, float maxdepth) { const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; if (x < depth.cols && y < depth.rows) { float3 yuv = depth2yuv(depth(y,x), maxdepth); rgba(y,x) = make_uchar4(yuv.z*255.0f,yuv.y*255.0f,yuv.x*255.0f, 0.0f); } } void ftl::cuda::depth_to_vuya(const cv::cuda::PtrStepSz<float> &depth, const cv::cuda::PtrStepSz<uchar4> &rgba, float maxdepth, cv::cuda::Stream &stream) { static constexpr int THREADS_X = 8; static constexpr int THREADS_Y = 8; const dim3 gridSize((depth.cols + THREADS_X - 1)/THREADS_X, (depth.rows + THREADS_Y - 1)/THREADS_Y); const dim3 blockSize(THREADS_X, THREADS_Y); hipLaunchKernelGGL(( depth_to_vuya_kernel), dim3(gridSize), dim3(blockSize), 0, cv::cuda::StreamAccessor::getStream(stream), depth, rgba, maxdepth); cudaSafeCall( hipGetLastError() ); } /* Planar 10bit version */ __global__ void depth_to_nv12_10_kernel(cv::cuda::PtrStepSz<float> depth, ushort* luminance, ushort* chroma, int pitch, float maxdepth) { const unsigned int x = (blockIdx.x*blockDim.x + threadIdx.x) * 2; const unsigned int y = (blockIdx.y*blockDim.y + threadIdx.y) * 2; if (x < depth.cols && y < depth.rows) { // Process all 4 pixels at same time, due to 4:2:0 format float3 yuv1 = depth2yuv(depth(y,x), maxdepth); float3 yuv2 = depth2yuv(depth(y,x+1), maxdepth); float3 yuv3 = depth2yuv(depth(y+1,x), maxdepth); float3 yuv4 = depth2yuv(depth(y+1,x+1), maxdepth); // TODO: Something better than just average! // Bad ones are discarded anyway... float Ha = (yuv1.y+yuv2.y+yuv3.y+yuv4.y) / 4.0f * 255.0f; float Hb = (yuv1.z+yuv2.z+yuv3.z+yuv4.z) / 4.0f * 255.0f; // Use upper 8 bits only for luma luminance[y*pitch+x] = ushort(yuv1.x*255.0f) << 8; luminance[y*pitch+x+1] = ushort(yuv2.x*255.0f) << 8; luminance[(y+1)*pitch+x] = ushort(yuv3.x*255.0f) << 8; luminance[(y+1)*pitch+x+1] = ushort(yuv4.x*255.0f) << 8; chroma[(y/2)*pitch+x] = ushort(Ha) << 8; chroma[(y/2)*pitch+x+1] = ushort(Hb) << 8; } } void ftl::cuda::depth_to_nv12_10(const cv::cuda::PtrStepSz<float> &depth, ushort* luminance, ushort* chroma, int pitch, float maxdepth, cv::cuda::Stream &stream) { static constexpr int THREADS_X = 8; // TODO: (nick) tune static constexpr int THREADS_Y = 8; const dim3 gridSize((depth.cols/2 + THREADS_X - 1)/THREADS_X, (depth.rows/2 + THREADS_Y - 1)/THREADS_Y); const dim3 blockSize(THREADS_X, THREADS_Y); hipLaunchKernelGGL(( depth_to_nv12_10_kernel), dim3(gridSize), dim3(blockSize), 0, cv::cuda::StreamAccessor::getStream(stream), depth, luminance, chroma, pitch, maxdepth); cudaSafeCall( hipGetLastError() ); } // ============================================================================= // Decoding /* * See: Pece F., Kautz J., Weyrich T. 2011. Adapting standard video codecs for * depth streaming. Joint Virtual Reality Conference of EGVE 2011 - * The 17th Eurographics Symposium on Virtual Environments, EuroVR 2011 - * The 8th EuroVR (INTUITION) Conference, , pp. 59-66. * */ __device__ inline ushort round8(ushort v) { return (v >> 8) + ((v >> 7) & 0x1); // Note: Make no PSNR difference //return v >> 8; } __device__ inline uchar round8(uchar v) { return v; } /* Convert single L Ha Hb to float depth */ __device__ inline float yuv2depth(float L, float Ha, float Hb) { const float p = P; int m = int(floor(4.0f*(L/p) - 0.5f)) % 4; float L0 = L - fmodf((L-(p/8.0f)), p) + (p/4.0f)*float(m) - (p/8.0f); float s = 0.0f; if (m == 0) s = (p/2.0f)*Ha; if (m == 1) s = (p/2.0f)*Hb; if (m == 2) s = (p/2.0f)*(1.0f - Ha); if (m == 3) s = (p/2.0f)*(1.0f - Hb); return (L0+s); // Not denormalised! } // Video is assumed to be 10bit encoded, returning ushort instead of uchar. __global__ void vuya_to_depth_kernel(cv::cuda::PtrStepSz<float> depth, cv::cuda::PtrStepSz<ushort4> rgba, float maxdepth) { const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; if (x < depth.cols && y < depth.rows) { ushort4 in = rgba(y,x); // Only the top 8 bits contain any data float L = float(round8(in.z)) / 255.0f; float Ha = float(round8(in.y)) / 255.0f; float Hb = float(round8(in.x)) / 255.0f; depth(y,x) = yuv2depth(L, Ha, Hb) * maxdepth; } } void ftl::cuda::vuya_to_depth(const cv::cuda::PtrStepSz<float> &depth, const cv::cuda::PtrStepSz<ushort4> &rgba, float maxdepth, cv::cuda::Stream &stream) { static constexpr int THREADS_X = 8; static constexpr int THREADS_Y = 8; const dim3 gridSize((depth.cols + THREADS_X - 1)/THREADS_X, (depth.rows + THREADS_Y - 1)/THREADS_Y); const dim3 blockSize(THREADS_X, THREADS_Y); hipLaunchKernelGGL(( vuya_to_depth_kernel), dim3(gridSize), dim3(blockSize), 0, cv::cuda::StreamAccessor::getStream(stream), depth, rgba, maxdepth); cudaSafeCall( hipGetLastError() ); } // ==== Planar 4:2:0 version =================================================== // Typed pair to combine memory read template <typename T> struct T2 { T x; T y; }; /* Read both chroma values together. */ template <typename T> __device__ inline ushort2 readChroma(const T* __restrict__ chroma, int pitch, uint x, uint y) { T2<T> c = *(T2<T>*)(&chroma[(y/2)*pitch+x]); return { ushort(round8(c.x)), ushort(round8(c.y)) }; } __device__ inline float2 norm_float(const ushort2 &v) { return make_float2(float(v.x)/255.0f, float(v.y)/255.0f); } /* * Interpolate the chroma, but only if the luminance is the same. This smooths * the decoded output but without crossing discontinuities. If luma values are * themselves inconsistent then the data is marked invalid as it has been * corrupted by the compression. * * Unused, has been rewritten into kernel directly. */ template <typename T> __device__ inline float2 bilinChroma(const T* __restrict__ chroma, const T* __restrict__ luminance, int pitch, uchar L, uint x, uint y, const ushort2 &D, int dx, int dy, int width, int height, bool consistent) { if (uint(x+dx) >= width || uint(y+dy) >= height) return {float(D.x)/255.0f, float(D.y)/255.0f}; float w = 0.0f; float2 R = {0.0f,0.0f}; if (round8(luminance[(y+dy)*pitch+x+dx]) == L) { R += 0.0625f * norm_float(readChroma(chroma, pitch, x+dx, y+dy)); w += 0.0625f; } if (round8(luminance[(y+dy)*pitch+x]) == L) { R += 0.1875f * norm_float(readChroma(chroma, pitch, x, y+dy)); w += 0.1875f; } if (round8(luminance[(y)*pitch+x+dx]) == L) { R += 0.1875f * norm_float(readChroma(chroma, pitch, x+dx, y)); w += 0.1875f; } // TODO: (nick) Find way to correct data rather than discard it. if (consistent) { R.x += 0.5625f * (float(D.x) / 255.0f); R.y += 0.5625f * (float(D.y) / 255.0f); w += 0.5625f; } return R / w; // TODO: Check W isn't 0? } /** * See: J. Korhonen, IMPROVING IMAGE FIDELITY BY LUMA-ASSISTED CHROMA * SUBSAMPLING Jari Korhonen Department of Photonics Engineering , * Technical University of Denmark.. * * For the closest published version of the chroma upsampling applied here. * Difference is we can make assumptions about the depth data so have slightly * modified the algorithm to prevent unwanted interpolation at edges. */ // Video is assumed to be 10bit encoded, returning ushort instead of uchar. // 4:2:0 10bit template <typename T, int THREADS_X, int THREADS_Y> __global__ void vuya_to_depth_kernel(cv::cuda::PtrStepSz<float> depth, const T* __restrict__ luminance, const T* __restrict__ chroma, int pitch, float maxdepth) { __shared__ uchar4 lum_s[THREADS_Y+2][64]; __shared__ ushort2 chroma_s[THREADS_Y+2][64]; __shared__ int consistent_s[THREADS_Y+2][64]; for (int i=threadIdx.x + threadIdx.y*THREADS_X; i<((THREADS_X+2))*((THREADS_Y+2)); i += THREADS_X*THREADS_Y) { const int y = i/((THREADS_X+2)); const int x = i%((THREADS_X+2)); const int gx = (x + blockIdx.x*blockDim.x - 1)*2; const int gy = (y + blockIdx.y*blockDim.y - 1)*2; bool valid = (gx >= 0 && gy >= 0 && gx < depth.cols-1 && gy < depth.rows-1); const ushort2 v1 = (valid) ? *(const ushort2*)(&luminance[gy*pitch+gx]) : make_ushort2(0,0); const ushort2 v2 = (valid) ? *(const ushort2*)(&luminance[(gy+1)*pitch+gx]) : make_ushort2(0,0); short4 L = make_short4( round8(v1.x), round8(v1.y), round8(v2.x), round8(v2.y) ); lum_s[y][x] = make_uchar4(L.x,L.y,L.z,L.w); chroma_s[y][x] = (valid) ? readChroma(chroma, pitch, gx, gy) : make_ushort2(0,0); bool consistent = true; if (abs(L.x-L.y) > 1.0f) consistent = false; if (abs(L.x-L.z) > 1.0f) consistent = false; if (abs(L.w-L.y) > 1.0f) consistent = false; if (abs(L.w-L.z) > 1.0f) consistent = false; consistent_s[y][x] = int(consistent); } __syncthreads(); const unsigned int x = (blockIdx.x*blockDim.x + threadIdx.x)*2; const unsigned int y = (blockIdx.y*blockDim.y + threadIdx.y)*2; uchar4 L = lum_s[threadIdx.y+1][threadIdx.x+1]; const ushort2 H = chroma_s[threadIdx.y+1][threadIdx.x+1]; float d[4] = {0.0f, 0.0f, 0.0f, 0.0f}; float2 H2; float w; bool consistent = consistent_s[threadIdx.y+1][threadIdx.x+1]; // Do a bilinear interpolation of chroma, combined with a luma consistency // check to not smooth over boundaries, and to remove inconsistent values // that can be assumed to have been corrupted by the compression. w = 0.0f; H2 = {0.0f,0.0f}; if (consistent_s[threadIdx.y+1-1][threadIdx.x+1-1] && L.x == lum_s[threadIdx.y+1-1][threadIdx.x+1-1].w) { H2 += 0.0625f * norm_float(chroma_s[threadIdx.y+1-1][threadIdx.x+1-1]); w += 0.0625f; } if (consistent_s[threadIdx.y+1-1][threadIdx.x+1] && L.x == lum_s[threadIdx.y+1-1][threadIdx.x+1].z) { H2 += 0.1875f * norm_float(chroma_s[threadIdx.y+1-1][threadIdx.x+1]); w += 0.1875f; } if (consistent_s[threadIdx.y+1][threadIdx.x+1-1] && L.x == lum_s[threadIdx.y+1][threadIdx.x+1-1].y) { H2 += 0.1875f * norm_float(chroma_s[threadIdx.y+1][threadIdx.x+1-1]); w += 0.1875f; } if (consistent) { H2 += 0.5625f * norm_float(H); w += 0.5625f; } if (w > 0.0f) d[0] = yuv2depth(float(L.x) / 255.0f, H2.x/w, H2.y/w) * maxdepth; w = 0.0f; H2 = {0.0f,0.0f}; if (consistent_s[threadIdx.y+1-1][threadIdx.x+1+1] && L.y == lum_s[threadIdx.y+1-1][threadIdx.x+1+1].z) { H2 += 0.0625f * norm_float(chroma_s[threadIdx.y+1-1][threadIdx.x+1+1]); w += 0.0625f; } if (consistent_s[threadIdx.y+1-1][threadIdx.x+1] && L.y == lum_s[threadIdx.y+1-1][threadIdx.x+1].w) { H2 += 0.1875f * norm_float(chroma_s[threadIdx.y+1-1][threadIdx.x+1]); w += 0.1875f; } if (consistent_s[threadIdx.y+1][threadIdx.x+1+1] && L.y == lum_s[threadIdx.y+1][threadIdx.x+1+1].x) { H2 += 0.1875f * norm_float(chroma_s[threadIdx.y+1][threadIdx.x+1+1]); w += 0.1875f; } if (consistent) { H2 += 0.5625f * norm_float(H); w += 0.5625f; } if (w > 0.0f) d[1] = yuv2depth(float(L.y) / 255.0f, H2.x/w, H2.y/w) * maxdepth; w = 0.0f; H2 = {0.0f,0.0f}; if (consistent_s[threadIdx.y+1+1][threadIdx.x+1-1] && L.z == lum_s[threadIdx.y+1+1][threadIdx.x+1-1].y) { H2 += 0.0625f * norm_float(chroma_s[threadIdx.y+1+1][threadIdx.x+1-1]); w += 0.0625f; } if (consistent_s[threadIdx.y+1+1][threadIdx.x+1] && L.z == lum_s[threadIdx.y+1+1][threadIdx.x+1].x) { H2 += 0.1875f * norm_float(chroma_s[threadIdx.y+1+1][threadIdx.x+1]); w += 0.1875f; } if (consistent_s[threadIdx.y+1][threadIdx.x+1-1] && L.z == lum_s[threadIdx.y+1][threadIdx.x+1-1].w) { H2 += 0.1875f * norm_float(chroma_s[threadIdx.y+1][threadIdx.x+1-1]); w += 0.1875f; } if (consistent) { H2 += 0.5625f * norm_float(H); w += 0.5625f; } if (w > 0.0f) d[2] = yuv2depth(float(L.z) / 255.0f, H2.x/w, H2.y/w) * maxdepth; w = 0.0f; H2 = {0.0f,0.0f}; if (consistent_s[threadIdx.y+1+1][threadIdx.x+1+1] && L.w == lum_s[threadIdx.y+1+1][threadIdx.x+1+1].x) { H2 += 0.0625f * norm_float(chroma_s[threadIdx.y+1+1][threadIdx.x+1+1]); w += 0.0625f; } if (consistent_s[threadIdx.y+1+1][threadIdx.x+1] && L.w == lum_s[threadIdx.y+1+1][threadIdx.x+1].y) { H2 += 0.1875f * norm_float(chroma_s[threadIdx.y+1+1][threadIdx.x+1]); w += 0.1875f; } if (consistent_s[threadIdx.y+1][threadIdx.x+1+1] && L.w == lum_s[threadIdx.y+1][threadIdx.x+1+1].z) { H2 += 0.1875f * norm_float(chroma_s[threadIdx.y+1][threadIdx.x+1+1]); w += 0.1875f; } if (consistent_s[threadIdx.y+1][threadIdx.x+1]) { H2 += 0.5625f * norm_float(H); w += 0.5625f; } if (w > 0.0f) d[3] = yuv2depth(float(L.w) / 255.0f, H2.x/w, H2.y/w) * maxdepth; if (x < depth.cols && y < depth.rows) { depth(y,x) = d[0]; depth(y,x+1) = d[1]; depth(y+1,x) = d[2]; depth(y+1,x+1) = d[3]; } } void ftl::cuda::vuya_to_depth(const cv::cuda::PtrStepSz<float> &depth, const cv::cuda::PtrStepSz<ushort> &luminance, const cv::cuda::PtrStepSz<ushort> &chroma, float maxdepth, cv::cuda::Stream &stream) { static const int THREADS_X = 16; static const int THREADS_Y = 8; const dim3 gridSize((depth.cols/2 + THREADS_X - 1)/THREADS_X, (depth.rows/2 + THREADS_Y - 1)/THREADS_Y); const dim3 blockSize(THREADS_X, THREADS_Y); hipLaunchKernelGGL(( vuya_to_depth_kernel<ushort,THREADS_X,THREADS_Y>), dim3(gridSize), dim3(blockSize), 0, cv::cuda::StreamAccessor::getStream(stream), depth, luminance.data, chroma.data, int(luminance.step/sizeof(ushort)), maxdepth); cudaSafeCall( hipGetLastError() ); } void ftl::cuda::smooth_y(const cv::cuda::PtrStepSz<ushort4> &rgba, cv::cuda::Stream &stream) { // REMOVED!! } // ==== Colour conversions ===================================================== // Some of the following comes from the defunct NvPipe library. It has been // modified by us. __constant__ float matYuv2Rgb[3][3]; __constant__ float matRgb2Yuv[3][3]; static void inline GetConstants(int iMatrix, float &wr, float &wb, int &black, int &white, int &max) { // Default is BT709 wr = 0.2126f; wb = 0.0722f; black = 16; white = 235; max = 255; if (iMatrix == ColorSpaceStandard_BT601) { wr = 0.2990f; wb = 0.1140f; } else if (iMatrix == ColorSpaceStandard_BT2020) { wr = 0.2627f; wb = 0.0593f; // 10-bit only black = 64 << 6; white = 940 << 6; max = (1 << 16) - 1; } } // Full-range BT.709 and BT.2020 are the default matrices used for YUV to RGB conversion for 8-bit and 10/12-bit encoded streams, respectively. // If color primaries are encoded/embedded in the bitstream, the client should use those color primaries in the conversion matrices for more accurate color reproduction. static void SetMatYuv2Rgb(int iMatrix) { float wr, wb; int black, white, max; GetConstants(iMatrix, wr, wb, black, white, max); float mat[3][3] = { 1.0f, 0.0f, (1.0f - wr) / 0.5f, 1.0f, -wb * (1.0f - wb) / 0.5f / (1 - wb - wr), -wr * (1 - wr) / 0.5f / (1 - wb - wr), 1.0f, (1.0f - wb) / 0.5f, 0.0f, }; for (int i = 0; i < 3; i++) { for (int j = 0; j < 3; j++) { mat[i][j] = (float)(1.0 * max / (white - black) * mat[i][j]); } } hipMemcpyToSymbol(matYuv2Rgb, mat, sizeof(mat)); } template<class T> __device__ static T Clamp(T x, T lower, T upper) { return x < lower ? lower : (x > upper ? upper : x); } template<class Rgb, class YuvUnit> __device__ inline Rgb YuvToRgbForPixel(YuvUnit y, YuvUnit u, YuvUnit v) { const int low = 1 << (sizeof(YuvUnit) * 8 - 4), mid = 1 << (sizeof(YuvUnit) * 8 - 1); float fy = (int)y - low, fu = (int)u - mid, fv = (int)v - mid; const float maxf = (1 << sizeof(YuvUnit) * 8) - 1.0f; YuvUnit r = (YuvUnit)Clamp(matYuv2Rgb[0][0] * fy + matYuv2Rgb[0][1] * fu + matYuv2Rgb[0][2] * fv, 0.0f, maxf), g = (YuvUnit)Clamp(matYuv2Rgb[1][0] * fy + matYuv2Rgb[1][1] * fu + matYuv2Rgb[1][2] * fv, 0.0f, maxf), b = (YuvUnit)Clamp(matYuv2Rgb[2][0] * fy + matYuv2Rgb[2][1] * fu + matYuv2Rgb[2][2] * fv, 0.0f, maxf); Rgb rgb{}; const int nShift = abs((int)sizeof(YuvUnit) - (int)sizeof(rgb.c.r)) * 8; if (sizeof(YuvUnit) >= sizeof(rgb.c.r)) { rgb.c.r = r >> nShift; rgb.c.g = g >> nShift; rgb.c.b = b >> nShift; } else { rgb.c.r = r << nShift; rgb.c.g = g << nShift; rgb.c.b = b << nShift; } return rgb; } template<class YuvUnitx2, class Rgb, class RgbIntx2> __global__ static void YuvToRgbKernel(uint8_t *pYuv, int nYuvPitch, uint8_t *pRgb, int nRgbPitch, int nWidth, int nHeight) { int x = (threadIdx.x + blockIdx.x * blockDim.x) * 2; int y = (threadIdx.y + blockIdx.y * blockDim.y) * 2; if (x + 1 >= nWidth || y + 1 >= nHeight) { return; } uint8_t *pSrc = pYuv + x * sizeof(YuvUnitx2) / 2 + y * nYuvPitch; uint8_t *pDst = pRgb + x * sizeof(Rgb) + y * nRgbPitch; YuvUnitx2 l0 = *(YuvUnitx2 *)pSrc; YuvUnitx2 l1 = *(YuvUnitx2 *)(pSrc + nYuvPitch); YuvUnitx2 ch = *(YuvUnitx2 *)(pSrc + (nHeight - y / 2) * nYuvPitch); *(RgbIntx2 *)pDst = RgbIntx2 { YuvToRgbForPixel<Rgb>(l0.x, ch.x, ch.y).d, YuvToRgbForPixel<Rgb>(l0.y, ch.x, ch.y).d, }; *(RgbIntx2 *)(pDst + nRgbPitch) = RgbIntx2 { YuvToRgbForPixel<Rgb>(l1.x, ch.x, ch.y).d, YuvToRgbForPixel<Rgb>(l1.y, ch.x, ch.y).d, }; } template <class COLOR32> void Nv12ToColor32(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix, hipStream_t s) { SetMatYuv2Rgb(iMatrix); hipLaunchKernelGGL(( YuvToRgbKernel<uchar2, COLOR32, uint2>) , dim3(dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2 / 2)), dim3(dim3(32, 2)), 0, 0, dpNv12, nNv12Pitch, dpBgra, nBgraPitch, nWidth, nHeight); } template void Nv12ToColor32<BGRA32>(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix, hipStream_t); template void Nv12ToColor32<RGBA32>(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix, hipStream_t); __global__ static void nv12_to_float(const uint8_t* __restrict__ src, uint32_t srcPitch, float* dst, uint32_t dstPitch, uint32_t width, uint32_t height) { const uint32_t x = blockIdx.x * blockDim.x + threadIdx.x; const uint32_t y = blockIdx.y * blockDim.y + threadIdx.y; if (x < width && y < height) { const uint32_t i = y * srcPitch + x; const uint32_t j = y * dstPitch + x; // Copy higher byte from left half of Y channel ushort value = (src[i]) + (src[i+width]<<8); //dst[j] = src[i]; // Copy lower byte from right half of Y channel //dst[j + 1] = src[i + width]; dst[j] = float(value) / 1000.0f; } } void ftl::cuda::nv12_to_float(const uint8_t* src, uint32_t srcPitch, float* dst, uint32_t dstPitch, uint32_t width, uint32_t height, hipStream_t s) { static const int THREADS_X = 16; static const int THREADS_Y = 16; dim3 gridSize(width / THREADS_X + 1, height / THREADS_Y + 1); dim3 blockSize(THREADS_X, THREADS_Y); ::nv12_to_float << <gridSize, blockSize, 0, s >> > (src, srcPitch, dst, dstPitch, width, height); } __global__ void float_to_nv12_16bit(const float* __restrict__ src, uint32_t srcPitch, uint8_t* dst, uint32_t dstPitch, uint32_t width, uint32_t height) { const uint32_t x = blockIdx.x * blockDim.x + threadIdx.x; const uint32_t y = blockIdx.y * blockDim.y + threadIdx.y; if (x < width && y < height) { const uint32_t i = y * srcPitch + x; const uint32_t j = y * dstPitch + x; float d = src[i]; ushort ds = ushort(d*1000.0f); // Copy higher byte to left half of Y channel dst[j] = ds & 0xFF; // Copy lower byte to right half of Y channel dst[j + width] = ds >> 8; // Blank UV channel if (y < height / 2) { uint8_t* UV = dst + dstPitch * (height + y); UV[2 * x + 0] = 0; UV[2 * x + 1] = 0; } } } void ftl::cuda::float_to_nv12_16bit(const float* src, uint32_t srcPitch, uchar* dst, uint32_t dstPitch, uint32_t width, uint32_t height, hipStream_t s) { static const int THREADS_X = 16; static const int THREADS_Y = 16; dim3 gridSize(width / THREADS_X + 1, height / THREADS_Y + 1); dim3 blockSize(THREADS_X, THREADS_Y); ::float_to_nv12_16bit << <gridSize, blockSize, 0, s >> > (src, srcPitch, dst, dstPitch, width, height); }
fdbc6ec8e7ac511e49071d3b5171d173b41e29a9.cu
/** * @file depth_convert.cu * @copyright Copyright (c) 2020 University of Turku, MIT License * @author Nicolas Pope */ #include <ftl/codecs/depth_convert_cuda.hpp> #include "../Utils/ColorSpace.h" #include <opencv2/core/cuda_stream_accessor.hpp> // Encoding __device__ inline float clamp(float v) { return max(0.0f, min(1.0f, v)); } __device__ inline float clampC(float v, float t=255.0f) { return max(0.0f, min(t, v)); } /* * See: Pece F., Kautz J., Weyrich T. 2011. Adapting standard video codecs for * depth streaming. Joint Virtual Reality Conference of EGVE 2011 - * The 17th Eurographics Symposium on Virtual Environments, EuroVR 2011 - * The 8th EuroVR (INTUITION) Conference, , pp. 59-66. * */ // Assumes 8 (256) bit output channels and 14bit (16384) depth static constexpr float P = (2.0f * 256.0f) / 16384.0f; /* Convert single float to L Ha Hb. */ __device__ inline float3 depth2yuv(float depth, float maxdepth) { // Normalise float d = max(0.0f,depth); if (d >= maxdepth) d = 0.0f; float L = d / maxdepth; const float p = P; float Ha1 = fmodf((L / (p/2.0f)), 2.0f); float Ha = (Ha1 <= 1.0f) ? Ha1 : 2.0f - Ha1; float Hb1 = fmodf(((L - (p/4.0f)) / (p/2.0f)), 2.0f); float Hb = (Hb1 <= 1.0f) ? Hb1 : 2.0f - Hb1; return {L, Ha, Hb}; } __global__ void depth_to_vuya_kernel(cv::cuda::PtrStepSz<float> depth, cv::cuda::PtrStepSz<uchar4> rgba, float maxdepth) { const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; if (x < depth.cols && y < depth.rows) { float3 yuv = depth2yuv(depth(y,x), maxdepth); rgba(y,x) = make_uchar4(yuv.z*255.0f,yuv.y*255.0f,yuv.x*255.0f, 0.0f); } } void ftl::cuda::depth_to_vuya(const cv::cuda::PtrStepSz<float> &depth, const cv::cuda::PtrStepSz<uchar4> &rgba, float maxdepth, cv::cuda::Stream &stream) { static constexpr int THREADS_X = 8; static constexpr int THREADS_Y = 8; const dim3 gridSize((depth.cols + THREADS_X - 1)/THREADS_X, (depth.rows + THREADS_Y - 1)/THREADS_Y); const dim3 blockSize(THREADS_X, THREADS_Y); depth_to_vuya_kernel<<<gridSize, blockSize, 0, cv::cuda::StreamAccessor::getStream(stream)>>>(depth, rgba, maxdepth); cudaSafeCall( cudaGetLastError() ); } /* Planar 10bit version */ __global__ void depth_to_nv12_10_kernel(cv::cuda::PtrStepSz<float> depth, ushort* luminance, ushort* chroma, int pitch, float maxdepth) { const unsigned int x = (blockIdx.x*blockDim.x + threadIdx.x) * 2; const unsigned int y = (blockIdx.y*blockDim.y + threadIdx.y) * 2; if (x < depth.cols && y < depth.rows) { // Process all 4 pixels at same time, due to 4:2:0 format float3 yuv1 = depth2yuv(depth(y,x), maxdepth); float3 yuv2 = depth2yuv(depth(y,x+1), maxdepth); float3 yuv3 = depth2yuv(depth(y+1,x), maxdepth); float3 yuv4 = depth2yuv(depth(y+1,x+1), maxdepth); // TODO: Something better than just average! // Bad ones are discarded anyway... float Ha = (yuv1.y+yuv2.y+yuv3.y+yuv4.y) / 4.0f * 255.0f; float Hb = (yuv1.z+yuv2.z+yuv3.z+yuv4.z) / 4.0f * 255.0f; // Use upper 8 bits only for luma luminance[y*pitch+x] = ushort(yuv1.x*255.0f) << 8; luminance[y*pitch+x+1] = ushort(yuv2.x*255.0f) << 8; luminance[(y+1)*pitch+x] = ushort(yuv3.x*255.0f) << 8; luminance[(y+1)*pitch+x+1] = ushort(yuv4.x*255.0f) << 8; chroma[(y/2)*pitch+x] = ushort(Ha) << 8; chroma[(y/2)*pitch+x+1] = ushort(Hb) << 8; } } void ftl::cuda::depth_to_nv12_10(const cv::cuda::PtrStepSz<float> &depth, ushort* luminance, ushort* chroma, int pitch, float maxdepth, cv::cuda::Stream &stream) { static constexpr int THREADS_X = 8; // TODO: (nick) tune static constexpr int THREADS_Y = 8; const dim3 gridSize((depth.cols/2 + THREADS_X - 1)/THREADS_X, (depth.rows/2 + THREADS_Y - 1)/THREADS_Y); const dim3 blockSize(THREADS_X, THREADS_Y); depth_to_nv12_10_kernel<<<gridSize, blockSize, 0, cv::cuda::StreamAccessor::getStream(stream)>>>(depth, luminance, chroma, pitch, maxdepth); cudaSafeCall( cudaGetLastError() ); } // ============================================================================= // Decoding /* * See: Pece F., Kautz J., Weyrich T. 2011. Adapting standard video codecs for * depth streaming. Joint Virtual Reality Conference of EGVE 2011 - * The 17th Eurographics Symposium on Virtual Environments, EuroVR 2011 - * The 8th EuroVR (INTUITION) Conference, , pp. 59-66. * */ __device__ inline ushort round8(ushort v) { return (v >> 8) + ((v >> 7) & 0x1); // Note: Make no PSNR difference //return v >> 8; } __device__ inline uchar round8(uchar v) { return v; } /* Convert single L Ha Hb to float depth */ __device__ inline float yuv2depth(float L, float Ha, float Hb) { const float p = P; int m = int(floor(4.0f*(L/p) - 0.5f)) % 4; float L0 = L - fmodf((L-(p/8.0f)), p) + (p/4.0f)*float(m) - (p/8.0f); float s = 0.0f; if (m == 0) s = (p/2.0f)*Ha; if (m == 1) s = (p/2.0f)*Hb; if (m == 2) s = (p/2.0f)*(1.0f - Ha); if (m == 3) s = (p/2.0f)*(1.0f - Hb); return (L0+s); // Not denormalised! } // Video is assumed to be 10bit encoded, returning ushort instead of uchar. __global__ void vuya_to_depth_kernel(cv::cuda::PtrStepSz<float> depth, cv::cuda::PtrStepSz<ushort4> rgba, float maxdepth) { const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; if (x < depth.cols && y < depth.rows) { ushort4 in = rgba(y,x); // Only the top 8 bits contain any data float L = float(round8(in.z)) / 255.0f; float Ha = float(round8(in.y)) / 255.0f; float Hb = float(round8(in.x)) / 255.0f; depth(y,x) = yuv2depth(L, Ha, Hb) * maxdepth; } } void ftl::cuda::vuya_to_depth(const cv::cuda::PtrStepSz<float> &depth, const cv::cuda::PtrStepSz<ushort4> &rgba, float maxdepth, cv::cuda::Stream &stream) { static constexpr int THREADS_X = 8; static constexpr int THREADS_Y = 8; const dim3 gridSize((depth.cols + THREADS_X - 1)/THREADS_X, (depth.rows + THREADS_Y - 1)/THREADS_Y); const dim3 blockSize(THREADS_X, THREADS_Y); vuya_to_depth_kernel<<<gridSize, blockSize, 0, cv::cuda::StreamAccessor::getStream(stream)>>>(depth, rgba, maxdepth); cudaSafeCall( cudaGetLastError() ); } // ==== Planar 4:2:0 version =================================================== // Typed pair to combine memory read template <typename T> struct T2 { T x; T y; }; /* Read both chroma values together. */ template <typename T> __device__ inline ushort2 readChroma(const T* __restrict__ chroma, int pitch, uint x, uint y) { T2<T> c = *(T2<T>*)(&chroma[(y/2)*pitch+x]); return { ushort(round8(c.x)), ushort(round8(c.y)) }; } __device__ inline float2 norm_float(const ushort2 &v) { return make_float2(float(v.x)/255.0f, float(v.y)/255.0f); } /* * Interpolate the chroma, but only if the luminance is the same. This smooths * the decoded output but without crossing discontinuities. If luma values are * themselves inconsistent then the data is marked invalid as it has been * corrupted by the compression. * * Unused, has been rewritten into kernel directly. */ template <typename T> __device__ inline float2 bilinChroma(const T* __restrict__ chroma, const T* __restrict__ luminance, int pitch, uchar L, uint x, uint y, const ushort2 &D, int dx, int dy, int width, int height, bool consistent) { if (uint(x+dx) >= width || uint(y+dy) >= height) return {float(D.x)/255.0f, float(D.y)/255.0f}; float w = 0.0f; float2 R = {0.0f,0.0f}; if (round8(luminance[(y+dy)*pitch+x+dx]) == L) { R += 0.0625f * norm_float(readChroma(chroma, pitch, x+dx, y+dy)); w += 0.0625f; } if (round8(luminance[(y+dy)*pitch+x]) == L) { R += 0.1875f * norm_float(readChroma(chroma, pitch, x, y+dy)); w += 0.1875f; } if (round8(luminance[(y)*pitch+x+dx]) == L) { R += 0.1875f * norm_float(readChroma(chroma, pitch, x+dx, y)); w += 0.1875f; } // TODO: (nick) Find way to correct data rather than discard it. if (consistent) { R.x += 0.5625f * (float(D.x) / 255.0f); R.y += 0.5625f * (float(D.y) / 255.0f); w += 0.5625f; } return R / w; // TODO: Check W isn't 0? } /** * See: J. Korhonen, “IMPROVING IMAGE FIDELITY BY LUMA-ASSISTED CHROMA * SUBSAMPLING Jari Korhonen Department of Photonics Engineering , * Technical University of Denmark.”. * * For the closest published version of the chroma upsampling applied here. * Difference is we can make assumptions about the depth data so have slightly * modified the algorithm to prevent unwanted interpolation at edges. */ // Video is assumed to be 10bit encoded, returning ushort instead of uchar. // 4:2:0 10bit template <typename T, int THREADS_X, int THREADS_Y> __global__ void vuya_to_depth_kernel(cv::cuda::PtrStepSz<float> depth, const T* __restrict__ luminance, const T* __restrict__ chroma, int pitch, float maxdepth) { __shared__ uchar4 lum_s[THREADS_Y+2][64]; __shared__ ushort2 chroma_s[THREADS_Y+2][64]; __shared__ int consistent_s[THREADS_Y+2][64]; for (int i=threadIdx.x + threadIdx.y*THREADS_X; i<((THREADS_X+2))*((THREADS_Y+2)); i += THREADS_X*THREADS_Y) { const int y = i/((THREADS_X+2)); const int x = i%((THREADS_X+2)); const int gx = (x + blockIdx.x*blockDim.x - 1)*2; const int gy = (y + blockIdx.y*blockDim.y - 1)*2; bool valid = (gx >= 0 && gy >= 0 && gx < depth.cols-1 && gy < depth.rows-1); const ushort2 v1 = (valid) ? *(const ushort2*)(&luminance[gy*pitch+gx]) : make_ushort2(0,0); const ushort2 v2 = (valid) ? *(const ushort2*)(&luminance[(gy+1)*pitch+gx]) : make_ushort2(0,0); short4 L = make_short4( round8(v1.x), round8(v1.y), round8(v2.x), round8(v2.y) ); lum_s[y][x] = make_uchar4(L.x,L.y,L.z,L.w); chroma_s[y][x] = (valid) ? readChroma(chroma, pitch, gx, gy) : make_ushort2(0,0); bool consistent = true; if (abs(L.x-L.y) > 1.0f) consistent = false; if (abs(L.x-L.z) > 1.0f) consistent = false; if (abs(L.w-L.y) > 1.0f) consistent = false; if (abs(L.w-L.z) > 1.0f) consistent = false; consistent_s[y][x] = int(consistent); } __syncthreads(); const unsigned int x = (blockIdx.x*blockDim.x + threadIdx.x)*2; const unsigned int y = (blockIdx.y*blockDim.y + threadIdx.y)*2; uchar4 L = lum_s[threadIdx.y+1][threadIdx.x+1]; const ushort2 H = chroma_s[threadIdx.y+1][threadIdx.x+1]; float d[4] = {0.0f, 0.0f, 0.0f, 0.0f}; float2 H2; float w; bool consistent = consistent_s[threadIdx.y+1][threadIdx.x+1]; // Do a bilinear interpolation of chroma, combined with a luma consistency // check to not smooth over boundaries, and to remove inconsistent values // that can be assumed to have been corrupted by the compression. w = 0.0f; H2 = {0.0f,0.0f}; if (consistent_s[threadIdx.y+1-1][threadIdx.x+1-1] && L.x == lum_s[threadIdx.y+1-1][threadIdx.x+1-1].w) { H2 += 0.0625f * norm_float(chroma_s[threadIdx.y+1-1][threadIdx.x+1-1]); w += 0.0625f; } if (consistent_s[threadIdx.y+1-1][threadIdx.x+1] && L.x == lum_s[threadIdx.y+1-1][threadIdx.x+1].z) { H2 += 0.1875f * norm_float(chroma_s[threadIdx.y+1-1][threadIdx.x+1]); w += 0.1875f; } if (consistent_s[threadIdx.y+1][threadIdx.x+1-1] && L.x == lum_s[threadIdx.y+1][threadIdx.x+1-1].y) { H2 += 0.1875f * norm_float(chroma_s[threadIdx.y+1][threadIdx.x+1-1]); w += 0.1875f; } if (consistent) { H2 += 0.5625f * norm_float(H); w += 0.5625f; } if (w > 0.0f) d[0] = yuv2depth(float(L.x) / 255.0f, H2.x/w, H2.y/w) * maxdepth; w = 0.0f; H2 = {0.0f,0.0f}; if (consistent_s[threadIdx.y+1-1][threadIdx.x+1+1] && L.y == lum_s[threadIdx.y+1-1][threadIdx.x+1+1].z) { H2 += 0.0625f * norm_float(chroma_s[threadIdx.y+1-1][threadIdx.x+1+1]); w += 0.0625f; } if (consistent_s[threadIdx.y+1-1][threadIdx.x+1] && L.y == lum_s[threadIdx.y+1-1][threadIdx.x+1].w) { H2 += 0.1875f * norm_float(chroma_s[threadIdx.y+1-1][threadIdx.x+1]); w += 0.1875f; } if (consistent_s[threadIdx.y+1][threadIdx.x+1+1] && L.y == lum_s[threadIdx.y+1][threadIdx.x+1+1].x) { H2 += 0.1875f * norm_float(chroma_s[threadIdx.y+1][threadIdx.x+1+1]); w += 0.1875f; } if (consistent) { H2 += 0.5625f * norm_float(H); w += 0.5625f; } if (w > 0.0f) d[1] = yuv2depth(float(L.y) / 255.0f, H2.x/w, H2.y/w) * maxdepth; w = 0.0f; H2 = {0.0f,0.0f}; if (consistent_s[threadIdx.y+1+1][threadIdx.x+1-1] && L.z == lum_s[threadIdx.y+1+1][threadIdx.x+1-1].y) { H2 += 0.0625f * norm_float(chroma_s[threadIdx.y+1+1][threadIdx.x+1-1]); w += 0.0625f; } if (consistent_s[threadIdx.y+1+1][threadIdx.x+1] && L.z == lum_s[threadIdx.y+1+1][threadIdx.x+1].x) { H2 += 0.1875f * norm_float(chroma_s[threadIdx.y+1+1][threadIdx.x+1]); w += 0.1875f; } if (consistent_s[threadIdx.y+1][threadIdx.x+1-1] && L.z == lum_s[threadIdx.y+1][threadIdx.x+1-1].w) { H2 += 0.1875f * norm_float(chroma_s[threadIdx.y+1][threadIdx.x+1-1]); w += 0.1875f; } if (consistent) { H2 += 0.5625f * norm_float(H); w += 0.5625f; } if (w > 0.0f) d[2] = yuv2depth(float(L.z) / 255.0f, H2.x/w, H2.y/w) * maxdepth; w = 0.0f; H2 = {0.0f,0.0f}; if (consistent_s[threadIdx.y+1+1][threadIdx.x+1+1] && L.w == lum_s[threadIdx.y+1+1][threadIdx.x+1+1].x) { H2 += 0.0625f * norm_float(chroma_s[threadIdx.y+1+1][threadIdx.x+1+1]); w += 0.0625f; } if (consistent_s[threadIdx.y+1+1][threadIdx.x+1] && L.w == lum_s[threadIdx.y+1+1][threadIdx.x+1].y) { H2 += 0.1875f * norm_float(chroma_s[threadIdx.y+1+1][threadIdx.x+1]); w += 0.1875f; } if (consistent_s[threadIdx.y+1][threadIdx.x+1+1] && L.w == lum_s[threadIdx.y+1][threadIdx.x+1+1].z) { H2 += 0.1875f * norm_float(chroma_s[threadIdx.y+1][threadIdx.x+1+1]); w += 0.1875f; } if (consistent_s[threadIdx.y+1][threadIdx.x+1]) { H2 += 0.5625f * norm_float(H); w += 0.5625f; } if (w > 0.0f) d[3] = yuv2depth(float(L.w) / 255.0f, H2.x/w, H2.y/w) * maxdepth; if (x < depth.cols && y < depth.rows) { depth(y,x) = d[0]; depth(y,x+1) = d[1]; depth(y+1,x) = d[2]; depth(y+1,x+1) = d[3]; } } void ftl::cuda::vuya_to_depth(const cv::cuda::PtrStepSz<float> &depth, const cv::cuda::PtrStepSz<ushort> &luminance, const cv::cuda::PtrStepSz<ushort> &chroma, float maxdepth, cv::cuda::Stream &stream) { static const int THREADS_X = 16; static const int THREADS_Y = 8; const dim3 gridSize((depth.cols/2 + THREADS_X - 1)/THREADS_X, (depth.rows/2 + THREADS_Y - 1)/THREADS_Y); const dim3 blockSize(THREADS_X, THREADS_Y); vuya_to_depth_kernel<ushort,THREADS_X,THREADS_Y><<<gridSize, blockSize, 0, cv::cuda::StreamAccessor::getStream(stream)>>>(depth, luminance.data, chroma.data, int(luminance.step/sizeof(ushort)), maxdepth); cudaSafeCall( cudaGetLastError() ); } void ftl::cuda::smooth_y(const cv::cuda::PtrStepSz<ushort4> &rgba, cv::cuda::Stream &stream) { // REMOVED!! } // ==== Colour conversions ===================================================== // Some of the following comes from the defunct NvPipe library. It has been // modified by us. __constant__ float matYuv2Rgb[3][3]; __constant__ float matRgb2Yuv[3][3]; static void inline GetConstants(int iMatrix, float &wr, float &wb, int &black, int &white, int &max) { // Default is BT709 wr = 0.2126f; wb = 0.0722f; black = 16; white = 235; max = 255; if (iMatrix == ColorSpaceStandard_BT601) { wr = 0.2990f; wb = 0.1140f; } else if (iMatrix == ColorSpaceStandard_BT2020) { wr = 0.2627f; wb = 0.0593f; // 10-bit only black = 64 << 6; white = 940 << 6; max = (1 << 16) - 1; } } // Full-range BT.709 and BT.2020 are the default matrices used for YUV to RGB conversion for 8-bit and 10/12-bit encoded streams, respectively. // If color primaries are encoded/embedded in the bitstream, the client should use those color primaries in the conversion matrices for more accurate color reproduction. static void SetMatYuv2Rgb(int iMatrix) { float wr, wb; int black, white, max; GetConstants(iMatrix, wr, wb, black, white, max); float mat[3][3] = { 1.0f, 0.0f, (1.0f - wr) / 0.5f, 1.0f, -wb * (1.0f - wb) / 0.5f / (1 - wb - wr), -wr * (1 - wr) / 0.5f / (1 - wb - wr), 1.0f, (1.0f - wb) / 0.5f, 0.0f, }; for (int i = 0; i < 3; i++) { for (int j = 0; j < 3; j++) { mat[i][j] = (float)(1.0 * max / (white - black) * mat[i][j]); } } cudaMemcpyToSymbol(matYuv2Rgb, mat, sizeof(mat)); } template<class T> __device__ static T Clamp(T x, T lower, T upper) { return x < lower ? lower : (x > upper ? upper : x); } template<class Rgb, class YuvUnit> __device__ inline Rgb YuvToRgbForPixel(YuvUnit y, YuvUnit u, YuvUnit v) { const int low = 1 << (sizeof(YuvUnit) * 8 - 4), mid = 1 << (sizeof(YuvUnit) * 8 - 1); float fy = (int)y - low, fu = (int)u - mid, fv = (int)v - mid; const float maxf = (1 << sizeof(YuvUnit) * 8) - 1.0f; YuvUnit r = (YuvUnit)Clamp(matYuv2Rgb[0][0] * fy + matYuv2Rgb[0][1] * fu + matYuv2Rgb[0][2] * fv, 0.0f, maxf), g = (YuvUnit)Clamp(matYuv2Rgb[1][0] * fy + matYuv2Rgb[1][1] * fu + matYuv2Rgb[1][2] * fv, 0.0f, maxf), b = (YuvUnit)Clamp(matYuv2Rgb[2][0] * fy + matYuv2Rgb[2][1] * fu + matYuv2Rgb[2][2] * fv, 0.0f, maxf); Rgb rgb{}; const int nShift = abs((int)sizeof(YuvUnit) - (int)sizeof(rgb.c.r)) * 8; if (sizeof(YuvUnit) >= sizeof(rgb.c.r)) { rgb.c.r = r >> nShift; rgb.c.g = g >> nShift; rgb.c.b = b >> nShift; } else { rgb.c.r = r << nShift; rgb.c.g = g << nShift; rgb.c.b = b << nShift; } return rgb; } template<class YuvUnitx2, class Rgb, class RgbIntx2> __global__ static void YuvToRgbKernel(uint8_t *pYuv, int nYuvPitch, uint8_t *pRgb, int nRgbPitch, int nWidth, int nHeight) { int x = (threadIdx.x + blockIdx.x * blockDim.x) * 2; int y = (threadIdx.y + blockIdx.y * blockDim.y) * 2; if (x + 1 >= nWidth || y + 1 >= nHeight) { return; } uint8_t *pSrc = pYuv + x * sizeof(YuvUnitx2) / 2 + y * nYuvPitch; uint8_t *pDst = pRgb + x * sizeof(Rgb) + y * nRgbPitch; YuvUnitx2 l0 = *(YuvUnitx2 *)pSrc; YuvUnitx2 l1 = *(YuvUnitx2 *)(pSrc + nYuvPitch); YuvUnitx2 ch = *(YuvUnitx2 *)(pSrc + (nHeight - y / 2) * nYuvPitch); *(RgbIntx2 *)pDst = RgbIntx2 { YuvToRgbForPixel<Rgb>(l0.x, ch.x, ch.y).d, YuvToRgbForPixel<Rgb>(l0.y, ch.x, ch.y).d, }; *(RgbIntx2 *)(pDst + nRgbPitch) = RgbIntx2 { YuvToRgbForPixel<Rgb>(l1.x, ch.x, ch.y).d, YuvToRgbForPixel<Rgb>(l1.y, ch.x, ch.y).d, }; } template <class COLOR32> void Nv12ToColor32(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix, cudaStream_t s) { SetMatYuv2Rgb(iMatrix); YuvToRgbKernel<uchar2, COLOR32, uint2> <<<dim3((nWidth + 63) / 32 / 2, (nHeight + 3) / 2 / 2), dim3(32, 2)>>> (dpNv12, nNv12Pitch, dpBgra, nBgraPitch, nWidth, nHeight); } template void Nv12ToColor32<BGRA32>(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix, cudaStream_t); template void Nv12ToColor32<RGBA32>(uint8_t *dpNv12, int nNv12Pitch, uint8_t *dpBgra, int nBgraPitch, int nWidth, int nHeight, int iMatrix, cudaStream_t); __global__ static void nv12_to_float(const uint8_t* __restrict__ src, uint32_t srcPitch, float* dst, uint32_t dstPitch, uint32_t width, uint32_t height) { const uint32_t x = blockIdx.x * blockDim.x + threadIdx.x; const uint32_t y = blockIdx.y * blockDim.y + threadIdx.y; if (x < width && y < height) { const uint32_t i = y * srcPitch + x; const uint32_t j = y * dstPitch + x; // Copy higher byte from left half of Y channel ushort value = (src[i]) + (src[i+width]<<8); //dst[j] = src[i]; // Copy lower byte from right half of Y channel //dst[j + 1] = src[i + width]; dst[j] = float(value) / 1000.0f; } } void ftl::cuda::nv12_to_float(const uint8_t* src, uint32_t srcPitch, float* dst, uint32_t dstPitch, uint32_t width, uint32_t height, cudaStream_t s) { static const int THREADS_X = 16; static const int THREADS_Y = 16; dim3 gridSize(width / THREADS_X + 1, height / THREADS_Y + 1); dim3 blockSize(THREADS_X, THREADS_Y); ::nv12_to_float << <gridSize, blockSize, 0, s >> > (src, srcPitch, dst, dstPitch, width, height); } __global__ void float_to_nv12_16bit(const float* __restrict__ src, uint32_t srcPitch, uint8_t* dst, uint32_t dstPitch, uint32_t width, uint32_t height) { const uint32_t x = blockIdx.x * blockDim.x + threadIdx.x; const uint32_t y = blockIdx.y * blockDim.y + threadIdx.y; if (x < width && y < height) { const uint32_t i = y * srcPitch + x; const uint32_t j = y * dstPitch + x; float d = src[i]; ushort ds = ushort(d*1000.0f); // Copy higher byte to left half of Y channel dst[j] = ds & 0xFF; // Copy lower byte to right half of Y channel dst[j + width] = ds >> 8; // Blank UV channel if (y < height / 2) { uint8_t* UV = dst + dstPitch * (height + y); UV[2 * x + 0] = 0; UV[2 * x + 1] = 0; } } } void ftl::cuda::float_to_nv12_16bit(const float* src, uint32_t srcPitch, uchar* dst, uint32_t dstPitch, uint32_t width, uint32_t height, cudaStream_t s) { static const int THREADS_X = 16; static const int THREADS_Y = 16; dim3 gridSize(width / THREADS_X + 1, height / THREADS_Y + 1); dim3 blockSize(THREADS_X, THREADS_Y); ::float_to_nv12_16bit << <gridSize, blockSize, 0, s >> > (src, srcPitch, dst, dstPitch, width, height); }
d952bbd3ac7e955b7b498442dcd0b8ad2719e1c2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <bits/stdc++.h> #include <mpi.h> #include <omp.h> using namespace std; #define CSC(call) \ do { \ hipError_t res = call; \ if (res != hipSuccess) { \ fprintf(stderr, "ERROR in %s:%d. Message: %s\n", \ __FILE__, __LINE__, hipGetErrorString(res)); \ exit(0); \ } \ } while(0) struct vec3 { double x; double y; double z; __device__ __host__ vec3() {} __device__ __host__ vec3(double x, double y, double z) : x(x), y(y), z(z) {} }; struct polygon { vec3 x; vec3 y; vec3 z; uchar4 color; __device__ __host__ polygon() {} __device__ __host__ polygon(vec3 points[], uchar4 color) { x = points[0]; y = points[1]; z = points[2]; this->color = color; } __device__ __host__ polygon(vec3 a, vec3 b, vec3 c, uchar4 color) { x = a; y = b; z = c; this->color = color; } }; __device__ __host__ double dot(vec3 a, vec3 b) { return a.x * b.x + a.y * b.y + a.z * b.z; } __device__ __host__ vec3 prod(vec3 a, vec3 b) { return vec3(a.y * b.z - a.z * b.y, a.z * b.x - a.x * b.z, a.x * b.y - a.y * b.x); } __device__ __host__ vec3 mult_by_number(vec3 a, double num) { return vec3(a.x * num, a.y * num, a.z * num); } __device__ __host__ vec3 norm(vec3 v) { double l = sqrt(dot(v, v)); return vec3(v.x / l, v.y / l, v.z / l); } __device__ __host__ vec3 diff(vec3 a, vec3 b) { return vec3(a.x - b.x, a.y - b.y, a.z - b.z); } __device__ __host__ vec3 add(vec3 a, vec3 b) { return vec3(a.x + b.x, a.y + b.y, a.z + b.z); } __device__ __host__ vec3 mult(vec3 a, vec3 b, vec3 c, vec3 v) { return vec3(a.x * v.x + b.x * v.y + c.x * v.z, a.y * v.x + b.y * v.y + c.y * v.z, a.z * v.x + b.z * v.y + c.z * v.z); } void scene(polygon polygons[], vec3 points[], vec3 color) { uchar4 true_color = make_uchar4( color.x * 255, color.y * 255, color.z * 255, 0); polygons[0] = polygon(points[0], points[1], points[2], true_color); polygons[1] = polygon(points[0], points[2], points[3], true_color); } void hexahedron(polygon polygons[], double radius, vec3 center, vec3 color) { uchar4 true_color = make_uchar4( color.x * 255, color.y * 255, color.z * 255, 0); double a = 2 * radius / sqrt(3); vec3 vertex(center.x - a / 2, center.y - a / 2, center.z - a / 2); vec3 vertices[] = { vec3( vertex.x, vertex.y, vertex.z ), vec3( vertex.x, vertex.y + a, vertex.z ), vec3( vertex.x + a, vertex.y + a, vertex.z ), vec3( vertex.x + a, vertex.y, vertex.z ), vec3( vertex.x, vertex.y, vertex.z + a ), vec3( vertex.x, vertex.y + a, vertex.z + a ), vec3( vertex.x + a, vertex.y + a, vertex.z + a ), vec3( vertex.x + a, vertex.y, vertex.z + a ) }; polygons[2] = polygon(vertices[0], vertices[1], vertices[2], true_color); polygons[3] = polygon(vertices[2], vertices[3], vertices[0], true_color); polygons[4] = polygon(vertices[6], vertices[7], vertices[3], true_color); polygons[5] = polygon(vertices[3], vertices[2], vertices[6], true_color); polygons[6] = polygon(vertices[2], vertices[1], vertices[5], true_color); polygons[7] = polygon(vertices[5], vertices[6], vertices[2], true_color); polygons[8] = polygon(vertices[4], vertices[5], vertices[1], true_color); polygons[9] = polygon(vertices[1], vertices[0], vertices[4], true_color); polygons[10] = polygon(vertices[3], vertices[7], vertices[4], true_color); polygons[11] = polygon(vertices[4], vertices[0], vertices[3], true_color); polygons[12] = polygon(vertices[6], vertices[5], vertices[4], true_color); polygons[13] = polygon(vertices[4], vertices[7], vertices[6], true_color); } void octahedron(polygon polygons[], double radius, vec3 center, vec3 color) { uchar4 true_color = make_uchar4( color.x * 255, color.y * 255, color.z * 255, 0); vec3 vertices[] = { vec3( center.x + radius, center.y, center.z ), vec3( center.x - radius, center.y, center.z ), vec3( center.x, center.y + radius, center.z ), vec3( center.x, center.y - radius, center.z ), vec3( center.x, center.y, center.z + radius ), vec3( center.x, center.y, center.z - radius ) }; polygons[14] = polygon(vertices[5], vertices[2], vertices[0], true_color); polygons[15] = polygon(vertices[5], vertices[0], vertices[3], true_color); polygons[16] = polygon(vertices[5], vertices[3], vertices[1], true_color); polygons[17] = polygon(vertices[5], vertices[1], vertices[2], true_color); polygons[18] = polygon(vertices[4], vertices[3], vertices[0], true_color); polygons[19] = polygon(vertices[4], vertices[1], vertices[3], true_color); polygons[20] = polygon(vertices[4], vertices[2], vertices[1], true_color); polygons[21] = polygon(vertices[4], vertices[0], vertices[2], true_color); } void dodecahedron(polygon polygons[], double radius, vec3 center, vec3 color) { uchar4 true_color = make_uchar4( color.x * 255, color.y * 255, color.z * 255, 0); double phi = (1 + sqrt(5)) / 2; //sorry for that vec3 vertices[] = { vec3(center.x + (-1/phi / sqrt(3) * radius), center.y + ( 0 * radius), center.z + ( phi / sqrt(3) * radius) ), vec3(center.x + ( 1/phi / sqrt(3) * radius), center.y + ( 0 * radius), center.z + ( phi / sqrt(3) * radius) ), vec3(center.x + (-1 / sqrt(3) * radius), center.y + ( 1 / sqrt(3) * radius), center.z + ( 1 / sqrt(3) * radius) ), vec3(center.x + ( 1 / sqrt(3) * radius), center.y + ( 1 / sqrt(3) * radius), center.z + ( 1 / sqrt(3) * radius) ), vec3(center.x + ( 1 / sqrt(3) * radius), center.y + (-1 / sqrt(3) * radius), center.z + ( 1 / sqrt(3) * radius) ), vec3(center.x + (-1 / sqrt(3) * radius), center.y + (-1 / sqrt(3) * radius), center.z + ( 1 / sqrt(3) * radius) ), vec3(center.x + ( 0 * radius), center.y + (-phi / sqrt(3) * radius), center.z + ( 1/phi / sqrt(3) * radius) ), vec3(center.x + ( 0 * radius), center.y + ( phi / sqrt(3) * radius), center.z + ( 1/phi / sqrt(3) * radius) ), vec3(center.x + (-phi / sqrt(3) * radius), center.y + (-1/phi / sqrt(3) * radius), center.z + ( 0 * radius) ), vec3(center.x + (-phi / sqrt(3) * radius), center.y + ( 1/phi / sqrt(3) * radius), center.z + ( 0 * radius) ), vec3(center.x + ( phi / sqrt(3) * radius), center.y + ( 1/phi / sqrt(3) * radius), center.z + ( 0 * radius) ), vec3(center.x + ( phi / sqrt(3) * radius), center.y + (-1/phi / sqrt(3) * radius), center.z + ( 0 * radius) ), vec3(center.x + ( 0 * radius), center.y + (-phi / sqrt(3) * radius), center.z + (-1/phi / sqrt(3) * radius) ), vec3(center.x + ( 0 * radius), center.y + ( phi / sqrt(3) * radius), center.z + (-1/phi / sqrt(3) * radius) ), vec3(center.x + ( 1 / sqrt(3) * radius), center.y + ( 1 / sqrt(3) * radius), center.z + (-1 / sqrt(3) * radius) ), vec3(center.x + ( 1 / sqrt(3) * radius), center.y + (-1 / sqrt(3) * radius), center.z + (-1 / sqrt(3) * radius) ), vec3(center.x + (-1 / sqrt(3) * radius), center.y + (-1 / sqrt(3) * radius), center.z + (-1 / sqrt(3) * radius) ), vec3(center.x + (-1 / sqrt(3) * radius), center.y + ( 1 / sqrt(3) * radius), center.z + (-1 / sqrt(3) * radius) ), vec3(center.x + ( 1/phi / sqrt(3) * radius), center.y + ( 0 * radius), center.z + (-phi / sqrt(3) * radius) ), vec3(center.x + (-1/phi / sqrt(3) * radius), center.y + ( 0 * radius), center.z + (-phi / sqrt(3) * radius) ) }; polygons[22] = polygon(vertices[4], vertices[0], vertices[6], true_color); polygons[23] = polygon(vertices[0], vertices[5], vertices[6], true_color); polygons[24] = polygon(vertices[0], vertices[4], vertices[1], true_color); polygons[25] = polygon(vertices[0], vertices[3], vertices[7], true_color); polygons[26] = polygon(vertices[2], vertices[0], vertices[7], true_color); polygons[27] = polygon(vertices[0], vertices[1], vertices[3], true_color); polygons[28] = polygon(vertices[10], vertices[1], vertices[11], true_color); polygons[29] = polygon(vertices[3], vertices[1], vertices[10], true_color); polygons[30] = polygon(vertices[1], vertices[4], vertices[11], true_color); polygons[31] = polygon(vertices[5], vertices[0], vertices[8], true_color); polygons[32] = polygon(vertices[0], vertices[2], vertices[9], true_color); polygons[33] = polygon(vertices[8], vertices[0], vertices[9], true_color); polygons[34] = polygon(vertices[5], vertices[8], vertices[16], true_color); polygons[35] = polygon(vertices[6], vertices[5], vertices[12], true_color); polygons[36] = polygon(vertices[12], vertices[5], vertices[16], true_color); polygons[37] = polygon(vertices[4], vertices[12], vertices[15], true_color); polygons[38] = polygon(vertices[4], vertices[6], vertices[12], true_color); polygons[39] = polygon(vertices[11], vertices[4], vertices[15], true_color); polygons[40] = polygon(vertices[2], vertices[13], vertices[17], true_color); polygons[41] = polygon(vertices[2], vertices[7], vertices[13], true_color); polygons[42] = polygon(vertices[9], vertices[2], vertices[17], true_color); polygons[43] = polygon(vertices[13], vertices[3], vertices[14], true_color); polygons[44] = polygon(vertices[7], vertices[3], vertices[13], true_color); polygons[45] = polygon(vertices[3], vertices[10], vertices[14], true_color); polygons[46] = polygon(vertices[8], vertices[17], vertices[19], true_color); polygons[47] = polygon(vertices[16], vertices[8], vertices[19], true_color); polygons[48] = polygon(vertices[8], vertices[9], vertices[17], true_color); polygons[49] = polygon(vertices[14], vertices[11], vertices[18], true_color); polygons[50] = polygon(vertices[11], vertices[15], vertices[18], true_color); polygons[51] = polygon(vertices[10], vertices[11], vertices[14], true_color); polygons[52] = polygon(vertices[12], vertices[19], vertices[18], true_color); polygons[53] = polygon(vertices[15], vertices[12], vertices[18], true_color); polygons[54] = polygon(vertices[12], vertices[16], vertices[19], true_color); polygons[55] = polygon(vertices[19], vertices[13], vertices[18], true_color); polygons[56] = polygon(vertices[17], vertices[13], vertices[19], true_color); polygons[57] = polygon(vertices[13], vertices[14], vertices[18], true_color); } __device__ __host__ uchar4 ray(vec3 pos, vec3 dir, vec3 light_src, vec3 light_color, polygon polygons[], int len) { int min_value = -1; double ts_min; for (int i = 0; i < len; i++) { vec3 e1 = diff(polygons[i].y, polygons[i].x); vec3 e2 = diff(polygons[i].z, polygons[i].x); vec3 p = prod(dir, e2); double div = dot(p, e1); if (fabs(div) < 1e-10) continue; vec3 t = diff(pos, polygons[i].x); double u = dot(p, t) / div; if (u < 0.0 || u > 1.0) continue; vec3 q = prod(t, e1); double v = dot(q, dir) / div; if (v < 0.0 || v + u > 1.0) continue; double ts = dot(q, e2) / div; if (ts < 0.0) continue; if (min_value == -1 || ts < ts_min) { min_value = i; ts_min = ts; } } if (min_value == -1) return make_uchar4(0, 0, 0, 0); // To calculate light pos = add(mult_by_number(dir, ts_min), pos); dir = diff(light_src, pos); double length = sqrt(dot(dir, dir)); dir = norm(dir); for (int i = 0; i < len; i++) { vec3 e1 = diff(polygons[i].y, polygons[i].x); vec3 e2 = diff(polygons[i].z, polygons[i].x); vec3 p = prod(dir, e2); double div = dot(p, e1); if (fabs(div) < 1e-10) continue; vec3 t = diff(pos, polygons[i].x); double u = dot(p, t) / div; if (u < 0.0 || u > 1.0) continue; vec3 q = prod(t, e1); double v = dot(q, dir) / div; if (v < 0.0 || v + u > 1.0) continue; double ts = dot(q, e2) / div; if (ts > 0.0 && ts < length && i != min_value) { return make_uchar4(0, 0, 0, 0); } } uchar4 color_min = polygons[min_value].color; color_min.x = color_min.x * light_color.x; color_min.y = color_min.y * light_color.y; color_min.z = color_min.z * light_color.z; return color_min; } void render_cpu(vec3 pc, vec3 pv, int w, int h, double angle, uchar4 *data, vec3 light_src, vec3 light_color, polygon polygons[], int len) { int i, j; double dw = 2.0 / (w - 1.0); double dh = 2.0 / (h - 1.0); double z = 1.0 / tan(angle * M_PI / 360.0); vec3 bz = norm(diff(pv, pc)); vec3 bx = norm(prod(bz, vec3(0.0, 0.0, 1.0))); vec3 by = norm(prod(bx, bz)); #pragma omp parallel for for (j = 0; j < h; j++) { for (i = 0; i < w; i++) { vec3 v; v.x = -1.0 + dw * i; v.y = (-1.0 + dh * j) * h / w; v.z = z; vec3 dir = mult(bx, by, bz, v); data[(h - 1 - j) * w + i] = ray(pc, norm(dir), light_src, light_color, polygons, len); } } } __global__ void render(vec3 pc, vec3 pv, int w, int h, double angle, uchar4 *data, vec3 light_src, vec3 light_color, polygon polygons[], int len) { int idx = blockDim.x * blockIdx.x + threadIdx.x; int idy = blockDim.y * blockIdx.y + threadIdx.y; int offsetx = blockDim.x * gridDim.x; int offsety = blockDim.y * gridDim.y; int i, j; double dw = 2.0 / (w - 1.0); double dh = 2.0 / (h - 1.0); double z = 1.0 / tan(angle * M_PI / 360.0); vec3 bz = norm(diff(pv, pc)); vec3 bx = norm(prod(bz, vec3(0.0, 0.0, 1.0))); vec3 by = norm(prod(bx, bz)); for (j = idy; j < h; j += offsety) { for (i = idx; i < w; i += offsetx) { vec3 v = vec3(-1.0 + dw * i, (-1.0 + dh * j) * h / w, z); vec3 dir = mult(bx, by, bz, v); data[(h - 1 - j) * w + i] = ray(pc, norm(dir), light_src, light_color, polygons, len); } } } void ssaa_cpu(uchar4 *src, uchar4 *out, int w, int h, int wScale, int hScale) { int n = wScale * hScale; int x, y, i, j; uchar4 p; uint4 s; // #pragma omp parallel for for(y = 0; y < h; y += 1) { for(x = 0; x < w; x += 1) { s = make_uint4(0,0,0,0); for (i = 0; i < wScale; ++i) { for (j = 0; j < hScale; ++j){ p = src[ w * wScale * (y * hScale + j) + (x * wScale + i) ]; s.x += p.x; s.y += p.y; s.z += p.z; } } s.x /= n; s.y /= n; s.z /= n; out[y * w + x] = make_uchar4(s.x, s.y, s.z, s.w); } } } __global__ void ssaa(uchar4 *src, uchar4 *out, int w, int h, int wScale, int hScale) { int idx = blockDim.x * blockIdx.x + threadIdx.x; int idy = blockDim.y * blockIdx.y + threadIdx.y; int offsetx = blockDim.x * gridDim.x; int offsety = blockDim.y * gridDim.y; int n = wScale * hScale; int x, y, i, j; uchar4 p; uint4 s; for(y = idy; y < h; y += offsety) { for(x = idx; x < w; x += offsetx) { s = make_uint4(0,0,0,0); for (i = 0; i < wScale; ++i) { for (j = 0; j < hScale; ++j){ p = src[ w * wScale * (y * hScale + j) + (x * wScale + i) ]; s.x += p.x; s.y += p.y; s.z += p.z; } } s.x /= n; s.y /= n; s.z /= n; out[y * w + x] = make_uchar4(s.x, s.y, s.z, s.w); } } } int main(int argc, char *argv[]) { ios_base::sync_with_stdio(false); cin.tie(NULL); cout.tie(NULL); bool gpu = true; // default values int frames = 30; char out[256] = "out/%d.data"; int width = 640, height = 480; double fov = 120; double r_center_0 = 2, z_center_0 = 2, f_center_0 = .5, A_center_r = 1, A_center_z = 1.5, w_center_r = 1, w_center_z = .5, w_center_f = 1, p_center_r = 0, p_center_z = 1.5, r_direction_0 = .5, z_direction_0 = .5, f_direction_0 = .1, A_direction_r = 2, A_direction_z = .5, w_direction_r = 2, w_direction_z = .5, w_direction_f = 2, p_direction_r = 0, p_direction_z = 0; vec3 hexahedron_center = vec3(4, 0, 0), hexahedron_color = vec3(1, 0, 0); double hexahedron_radius = 2; vec3 octahedron_center = vec3(0, 3, 0), octahedron_color = vec3(0, 1, 0); double octahedron_radius = 1; vec3 dodecahedron_center = vec3(-2, 1, 1), dodecahedron_color = vec3(0, 0, 1); double dodecahedron_radius = 1; vec3 scene_points[] = {vec3(-10, -10, -1), vec3(-10, 10, -1), vec3(10, 10, -1), vec3(10, -10, -1)}; vec3 scene_color = vec3(0.952, 0.635, 0.070); int light = 1; vec3 light_src = vec3(100, 100, 100), light_color = vec3(1, 1, 1); int multiplier = 1; // fillers double _; string _str; // check parameters if (argc == 1 || argc == 2) { if (argc == 2) { // check gpu flag if ((string(argv[1]) == "--gpu") || string(argv[1]) == "--default") { gpu = true; } else if (string(argv[1]) == "--cpu") { gpu = false; } else { cerr << "Invalid command line parameter\n"; cerr << "Expected one of this:\n" "\t--gpu\n" "\t--default\n" "\t--cpu\n"; exit(1); } // check input if ((string(argv[1]) == "--gpu") || string(argv[1]) == "--cpu") { cin >> frames >> out >> width >> height >> fov; cin >> r_center_0 >> z_center_0 >> f_center_0 >> A_center_r >> A_center_z >> w_center_r >> w_center_z >> w_center_f >> p_center_r >> p_center_z; cin >> r_direction_0 >> z_direction_0 >> f_direction_0 >> A_direction_r >> A_direction_z >> w_direction_r >> w_direction_z >> w_direction_f >> p_direction_r >> p_direction_z; cin >> hexahedron_center.x >> hexahedron_center.y >> hexahedron_center.z >> hexahedron_color.x >> hexahedron_color.y >> hexahedron_color.z >> hexahedron_radius >> _ >> _ >> _; cin >> octahedron_center.x >> octahedron_center.y >> octahedron_center.z >> octahedron_color.x >> octahedron_color.y >> octahedron_color.z >> octahedron_radius >> _ >> _ >> _; cin >> dodecahedron_center.x >> dodecahedron_center.y >> dodecahedron_center.z >> dodecahedron_color.x >> dodecahedron_color.y >> dodecahedron_color.z >> dodecahedron_radius >> _ >> _ >> _; cin >> scene_points[0].x >> scene_points[0].y >> scene_points[0].z >> scene_points[1].x >> scene_points[1].y >> scene_points[1].z >> scene_points[2].x >> scene_points[2].y >> scene_points[2].z >> scene_points[3].x >> scene_points[3].y >> scene_points[3].z >> _str; cin >> scene_color.x >> scene_color.y >> scene_color.z >> _; cin >> light; assert(light == 1); cin >> light_src.x >> light_src.y >> light_src.z >> light_color.x >> light_color.y >> light_color.z >> _ >> multiplier; } } } else { cerr << "Wrong number of command line parameters, expected extra one or no one\n"; exit(1); } int numproc, id; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &numproc); MPI_Comm_rank(MPI_COMM_WORLD, &id); MPI_Bcast(&frames, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(out, 256, MPI_CHAR, 0, MPI_COMM_WORLD); MPI_Bcast(&width, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(&height, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(&fov, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&r_center_0, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&z_center_0, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&f_center_0, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&A_center_r, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&A_center_z, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&w_center_r, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&w_center_z, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&w_center_f, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&p_center_r, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&p_center_z, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&r_direction_0, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&z_direction_0, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&f_direction_0, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&A_direction_r, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&A_direction_z, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&w_direction_r, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&w_direction_z, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&w_direction_f, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&p_direction_r, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&p_direction_z, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast((double *)&hexahedron_center, 3, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast((double *)&hexahedron_color, 3, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&hexahedron_radius, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast((double *)&octahedron_center, 3, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast((double *)&octahedron_color, 3, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&octahedron_radius, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast((double *)&dodecahedron_center, 3, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast((double *)&dodecahedron_color, 3, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&dodecahedron_radius, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast((double *)scene_points, 3, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast((double *)scene_points + 1, 3, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast((double *)scene_points + 2, 3, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast((double *)scene_points + 3, 3, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast((double *)&scene_color, 3, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&light, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast((double *)&light_src, light * 3, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast((double *)&light_color, light * 3, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&multiplier, 1, MPI_INT, 0, MPI_COMM_WORLD); int polygons_sz = 58; polygon polygons[polygons_sz], *cuda_polygons; uchar4 *data = (uchar4 *) malloc(multiplier * multiplier * width * height * sizeof(uchar4)), *ssaa_data = (uchar4 *) malloc(width * height * sizeof(uchar4)), *cuda_data, *ssaa_cuda_data; if (gpu) { CSC(hipMalloc((polygon **) (&cuda_polygons), polygons_sz * sizeof(polygon))); CSC(hipMalloc((uchar4 * *)(&cuda_data), multiplier * multiplier * width * height * sizeof(uchar4))); CSC(hipMalloc((uchar4 * *)(&ssaa_cuda_data), width * height * sizeof(uchar4))); } // fill polygons hexahedron(polygons, hexahedron_radius, hexahedron_center, hexahedron_color); octahedron(polygons, octahedron_radius, octahedron_center, octahedron_color); dodecahedron(polygons, dodecahedron_radius, dodecahedron_center, dodecahedron_color); scene(polygons, scene_points, scene_color); if (gpu) { CSC(hipMemcpy(cuda_polygons, polygons, polygons_sz * sizeof(polygon), hipMemcpyHostToDevice)); } vec3 pc, pv; char buff[256]; for (int iter = id; iter < frames; iter += numproc) { double step = 2 * M_PI * iter / frames; double r_center = A_center_r * sin(w_center_r * step + p_center_r) + r_center_0; double z_center = A_center_z * sin(w_center_z * step + p_center_z) + z_center_0; double f_center = w_center_f * step + f_center_0; double r_direction = A_direction_r * sin(w_direction_r * step + p_direction_r) + r_direction_0; double z_direction = A_direction_z * sin(w_direction_z * step + p_direction_z) + z_direction_0; double f_direction = w_direction_f * step + f_direction_0; pc.x = cos(f_center) * r_center; pc.y = sin(f_center) * r_center; pc.z = z_center; pv.x = cos(f_direction) * r_direction; pv.y = sin(f_direction) * r_direction; pv.z = z_direction; // time to process one frame hipEvent_t start, stop; float gpu_time = 0.0; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); if (gpu) { hipLaunchKernelGGL(( render), dim3(dim3(16, 16)), dim3(dim3(16, 16)), 0, 0, pc, pv, width * multiplier, height * multiplier, fov, cuda_data, light_src, light_color, cuda_polygons, polygons_sz); CSC(hipGetLastError()); hipLaunchKernelGGL(( ssaa), dim3(dim3(16, 16)), dim3(dim3(16, 16)), 0, 0, cuda_data, ssaa_cuda_data, width, height, multiplier, multiplier); CSC(hipGetLastError()); CSC(hipMemcpy(data, ssaa_cuda_data, sizeof(uchar4) * width * height, hipMemcpyDeviceToHost)); } else { render_cpu (pc, pv, width * multiplier, height * multiplier, fov, data, light_src, light_color, polygons, polygons_sz); ssaa_cpu (data, ssaa_data, width, height, multiplier, multiplier); memcpy(data, ssaa_data, sizeof(uchar4) * width * height); } // time to process one frame hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&gpu_time, start, stop); sprintf(buff, out, iter); cerr << id << ": " << iter << "\t" << gpu_time << "\t" << width * height * multiplier * multiplier << endl; FILE *out = fopen(buff, "w"); fwrite(&width, sizeof(int), 1, out); fwrite(&height, sizeof(int), 1, out); fwrite(data, sizeof(uchar4), width * height, out); fclose(out); } if (gpu) { CSC(hipFree(cuda_data)); CSC(hipFree(ssaa_cuda_data)); CSC(hipFree(cuda_polygons)); } free(data); free(ssaa_data); MPI_Finalize(); return 0; }
d952bbd3ac7e955b7b498442dcd0b8ad2719e1c2.cu
#include <bits/stdc++.h> #include <mpi.h> #include <omp.h> using namespace std; #define CSC(call) \ do { \ cudaError_t res = call; \ if (res != cudaSuccess) { \ fprintf(stderr, "ERROR in %s:%d. Message: %s\n", \ __FILE__, __LINE__, cudaGetErrorString(res)); \ exit(0); \ } \ } while(0) struct vec3 { double x; double y; double z; __device__ __host__ vec3() {} __device__ __host__ vec3(double x, double y, double z) : x(x), y(y), z(z) {} }; struct polygon { vec3 x; vec3 y; vec3 z; uchar4 color; __device__ __host__ polygon() {} __device__ __host__ polygon(vec3 points[], uchar4 color) { x = points[0]; y = points[1]; z = points[2]; this->color = color; } __device__ __host__ polygon(vec3 a, vec3 b, vec3 c, uchar4 color) { x = a; y = b; z = c; this->color = color; } }; __device__ __host__ double dot(vec3 a, vec3 b) { return a.x * b.x + a.y * b.y + a.z * b.z; } __device__ __host__ vec3 prod(vec3 a, vec3 b) { return vec3(a.y * b.z - a.z * b.y, a.z * b.x - a.x * b.z, a.x * b.y - a.y * b.x); } __device__ __host__ vec3 mult_by_number(vec3 a, double num) { return vec3(a.x * num, a.y * num, a.z * num); } __device__ __host__ vec3 norm(vec3 v) { double l = sqrt(dot(v, v)); return vec3(v.x / l, v.y / l, v.z / l); } __device__ __host__ vec3 diff(vec3 a, vec3 b) { return vec3(a.x - b.x, a.y - b.y, a.z - b.z); } __device__ __host__ vec3 add(vec3 a, vec3 b) { return vec3(a.x + b.x, a.y + b.y, a.z + b.z); } __device__ __host__ vec3 mult(vec3 a, vec3 b, vec3 c, vec3 v) { return vec3(a.x * v.x + b.x * v.y + c.x * v.z, a.y * v.x + b.y * v.y + c.y * v.z, a.z * v.x + b.z * v.y + c.z * v.z); } void scene(polygon polygons[], vec3 points[], vec3 color) { uchar4 true_color = make_uchar4( color.x * 255, color.y * 255, color.z * 255, 0); polygons[0] = polygon(points[0], points[1], points[2], true_color); polygons[1] = polygon(points[0], points[2], points[3], true_color); } void hexahedron(polygon polygons[], double radius, vec3 center, vec3 color) { uchar4 true_color = make_uchar4( color.x * 255, color.y * 255, color.z * 255, 0); double a = 2 * radius / sqrt(3); vec3 vertex(center.x - a / 2, center.y - a / 2, center.z - a / 2); vec3 vertices[] = { vec3( vertex.x, vertex.y, vertex.z ), vec3( vertex.x, vertex.y + a, vertex.z ), vec3( vertex.x + a, vertex.y + a, vertex.z ), vec3( vertex.x + a, vertex.y, vertex.z ), vec3( vertex.x, vertex.y, vertex.z + a ), vec3( vertex.x, vertex.y + a, vertex.z + a ), vec3( vertex.x + a, vertex.y + a, vertex.z + a ), vec3( vertex.x + a, vertex.y, vertex.z + a ) }; polygons[2] = polygon(vertices[0], vertices[1], vertices[2], true_color); polygons[3] = polygon(vertices[2], vertices[3], vertices[0], true_color); polygons[4] = polygon(vertices[6], vertices[7], vertices[3], true_color); polygons[5] = polygon(vertices[3], vertices[2], vertices[6], true_color); polygons[6] = polygon(vertices[2], vertices[1], vertices[5], true_color); polygons[7] = polygon(vertices[5], vertices[6], vertices[2], true_color); polygons[8] = polygon(vertices[4], vertices[5], vertices[1], true_color); polygons[9] = polygon(vertices[1], vertices[0], vertices[4], true_color); polygons[10] = polygon(vertices[3], vertices[7], vertices[4], true_color); polygons[11] = polygon(vertices[4], vertices[0], vertices[3], true_color); polygons[12] = polygon(vertices[6], vertices[5], vertices[4], true_color); polygons[13] = polygon(vertices[4], vertices[7], vertices[6], true_color); } void octahedron(polygon polygons[], double radius, vec3 center, vec3 color) { uchar4 true_color = make_uchar4( color.x * 255, color.y * 255, color.z * 255, 0); vec3 vertices[] = { vec3( center.x + radius, center.y, center.z ), vec3( center.x - radius, center.y, center.z ), vec3( center.x, center.y + radius, center.z ), vec3( center.x, center.y - radius, center.z ), vec3( center.x, center.y, center.z + radius ), vec3( center.x, center.y, center.z - radius ) }; polygons[14] = polygon(vertices[5], vertices[2], vertices[0], true_color); polygons[15] = polygon(vertices[5], vertices[0], vertices[3], true_color); polygons[16] = polygon(vertices[5], vertices[3], vertices[1], true_color); polygons[17] = polygon(vertices[5], vertices[1], vertices[2], true_color); polygons[18] = polygon(vertices[4], vertices[3], vertices[0], true_color); polygons[19] = polygon(vertices[4], vertices[1], vertices[3], true_color); polygons[20] = polygon(vertices[4], vertices[2], vertices[1], true_color); polygons[21] = polygon(vertices[4], vertices[0], vertices[2], true_color); } void dodecahedron(polygon polygons[], double radius, vec3 center, vec3 color) { uchar4 true_color = make_uchar4( color.x * 255, color.y * 255, color.z * 255, 0); double phi = (1 + sqrt(5)) / 2; //sorry for that vec3 vertices[] = { vec3(center.x + (-1/phi / sqrt(3) * radius), center.y + ( 0 * radius), center.z + ( phi / sqrt(3) * radius) ), vec3(center.x + ( 1/phi / sqrt(3) * radius), center.y + ( 0 * radius), center.z + ( phi / sqrt(3) * radius) ), vec3(center.x + (-1 / sqrt(3) * radius), center.y + ( 1 / sqrt(3) * radius), center.z + ( 1 / sqrt(3) * radius) ), vec3(center.x + ( 1 / sqrt(3) * radius), center.y + ( 1 / sqrt(3) * radius), center.z + ( 1 / sqrt(3) * radius) ), vec3(center.x + ( 1 / sqrt(3) * radius), center.y + (-1 / sqrt(3) * radius), center.z + ( 1 / sqrt(3) * radius) ), vec3(center.x + (-1 / sqrt(3) * radius), center.y + (-1 / sqrt(3) * radius), center.z + ( 1 / sqrt(3) * radius) ), vec3(center.x + ( 0 * radius), center.y + (-phi / sqrt(3) * radius), center.z + ( 1/phi / sqrt(3) * radius) ), vec3(center.x + ( 0 * radius), center.y + ( phi / sqrt(3) * radius), center.z + ( 1/phi / sqrt(3) * radius) ), vec3(center.x + (-phi / sqrt(3) * radius), center.y + (-1/phi / sqrt(3) * radius), center.z + ( 0 * radius) ), vec3(center.x + (-phi / sqrt(3) * radius), center.y + ( 1/phi / sqrt(3) * radius), center.z + ( 0 * radius) ), vec3(center.x + ( phi / sqrt(3) * radius), center.y + ( 1/phi / sqrt(3) * radius), center.z + ( 0 * radius) ), vec3(center.x + ( phi / sqrt(3) * radius), center.y + (-1/phi / sqrt(3) * radius), center.z + ( 0 * radius) ), vec3(center.x + ( 0 * radius), center.y + (-phi / sqrt(3) * radius), center.z + (-1/phi / sqrt(3) * radius) ), vec3(center.x + ( 0 * radius), center.y + ( phi / sqrt(3) * radius), center.z + (-1/phi / sqrt(3) * radius) ), vec3(center.x + ( 1 / sqrt(3) * radius), center.y + ( 1 / sqrt(3) * radius), center.z + (-1 / sqrt(3) * radius) ), vec3(center.x + ( 1 / sqrt(3) * radius), center.y + (-1 / sqrt(3) * radius), center.z + (-1 / sqrt(3) * radius) ), vec3(center.x + (-1 / sqrt(3) * radius), center.y + (-1 / sqrt(3) * radius), center.z + (-1 / sqrt(3) * radius) ), vec3(center.x + (-1 / sqrt(3) * radius), center.y + ( 1 / sqrt(3) * radius), center.z + (-1 / sqrt(3) * radius) ), vec3(center.x + ( 1/phi / sqrt(3) * radius), center.y + ( 0 * radius), center.z + (-phi / sqrt(3) * radius) ), vec3(center.x + (-1/phi / sqrt(3) * radius), center.y + ( 0 * radius), center.z + (-phi / sqrt(3) * radius) ) }; polygons[22] = polygon(vertices[4], vertices[0], vertices[6], true_color); polygons[23] = polygon(vertices[0], vertices[5], vertices[6], true_color); polygons[24] = polygon(vertices[0], vertices[4], vertices[1], true_color); polygons[25] = polygon(vertices[0], vertices[3], vertices[7], true_color); polygons[26] = polygon(vertices[2], vertices[0], vertices[7], true_color); polygons[27] = polygon(vertices[0], vertices[1], vertices[3], true_color); polygons[28] = polygon(vertices[10], vertices[1], vertices[11], true_color); polygons[29] = polygon(vertices[3], vertices[1], vertices[10], true_color); polygons[30] = polygon(vertices[1], vertices[4], vertices[11], true_color); polygons[31] = polygon(vertices[5], vertices[0], vertices[8], true_color); polygons[32] = polygon(vertices[0], vertices[2], vertices[9], true_color); polygons[33] = polygon(vertices[8], vertices[0], vertices[9], true_color); polygons[34] = polygon(vertices[5], vertices[8], vertices[16], true_color); polygons[35] = polygon(vertices[6], vertices[5], vertices[12], true_color); polygons[36] = polygon(vertices[12], vertices[5], vertices[16], true_color); polygons[37] = polygon(vertices[4], vertices[12], vertices[15], true_color); polygons[38] = polygon(vertices[4], vertices[6], vertices[12], true_color); polygons[39] = polygon(vertices[11], vertices[4], vertices[15], true_color); polygons[40] = polygon(vertices[2], vertices[13], vertices[17], true_color); polygons[41] = polygon(vertices[2], vertices[7], vertices[13], true_color); polygons[42] = polygon(vertices[9], vertices[2], vertices[17], true_color); polygons[43] = polygon(vertices[13], vertices[3], vertices[14], true_color); polygons[44] = polygon(vertices[7], vertices[3], vertices[13], true_color); polygons[45] = polygon(vertices[3], vertices[10], vertices[14], true_color); polygons[46] = polygon(vertices[8], vertices[17], vertices[19], true_color); polygons[47] = polygon(vertices[16], vertices[8], vertices[19], true_color); polygons[48] = polygon(vertices[8], vertices[9], vertices[17], true_color); polygons[49] = polygon(vertices[14], vertices[11], vertices[18], true_color); polygons[50] = polygon(vertices[11], vertices[15], vertices[18], true_color); polygons[51] = polygon(vertices[10], vertices[11], vertices[14], true_color); polygons[52] = polygon(vertices[12], vertices[19], vertices[18], true_color); polygons[53] = polygon(vertices[15], vertices[12], vertices[18], true_color); polygons[54] = polygon(vertices[12], vertices[16], vertices[19], true_color); polygons[55] = polygon(vertices[19], vertices[13], vertices[18], true_color); polygons[56] = polygon(vertices[17], vertices[13], vertices[19], true_color); polygons[57] = polygon(vertices[13], vertices[14], vertices[18], true_color); } __device__ __host__ uchar4 ray(vec3 pos, vec3 dir, vec3 light_src, vec3 light_color, polygon polygons[], int len) { int min_value = -1; double ts_min; for (int i = 0; i < len; i++) { vec3 e1 = diff(polygons[i].y, polygons[i].x); vec3 e2 = diff(polygons[i].z, polygons[i].x); vec3 p = prod(dir, e2); double div = dot(p, e1); if (fabs(div) < 1e-10) continue; vec3 t = diff(pos, polygons[i].x); double u = dot(p, t) / div; if (u < 0.0 || u > 1.0) continue; vec3 q = prod(t, e1); double v = dot(q, dir) / div; if (v < 0.0 || v + u > 1.0) continue; double ts = dot(q, e2) / div; if (ts < 0.0) continue; if (min_value == -1 || ts < ts_min) { min_value = i; ts_min = ts; } } if (min_value == -1) return make_uchar4(0, 0, 0, 0); // To calculate light pos = add(mult_by_number(dir, ts_min), pos); dir = diff(light_src, pos); double length = sqrt(dot(dir, dir)); dir = norm(dir); for (int i = 0; i < len; i++) { vec3 e1 = diff(polygons[i].y, polygons[i].x); vec3 e2 = diff(polygons[i].z, polygons[i].x); vec3 p = prod(dir, e2); double div = dot(p, e1); if (fabs(div) < 1e-10) continue; vec3 t = diff(pos, polygons[i].x); double u = dot(p, t) / div; if (u < 0.0 || u > 1.0) continue; vec3 q = prod(t, e1); double v = dot(q, dir) / div; if (v < 0.0 || v + u > 1.0) continue; double ts = dot(q, e2) / div; if (ts > 0.0 && ts < length && i != min_value) { return make_uchar4(0, 0, 0, 0); } } uchar4 color_min = polygons[min_value].color; color_min.x = color_min.x * light_color.x; color_min.y = color_min.y * light_color.y; color_min.z = color_min.z * light_color.z; return color_min; } void render_cpu(vec3 pc, vec3 pv, int w, int h, double angle, uchar4 *data, vec3 light_src, vec3 light_color, polygon polygons[], int len) { int i, j; double dw = 2.0 / (w - 1.0); double dh = 2.0 / (h - 1.0); double z = 1.0 / tan(angle * M_PI / 360.0); vec3 bz = norm(diff(pv, pc)); vec3 bx = norm(prod(bz, vec3(0.0, 0.0, 1.0))); vec3 by = norm(prod(bx, bz)); #pragma omp parallel for for (j = 0; j < h; j++) { for (i = 0; i < w; i++) { vec3 v; v.x = -1.0 + dw * i; v.y = (-1.0 + dh * j) * h / w; v.z = z; vec3 dir = mult(bx, by, bz, v); data[(h - 1 - j) * w + i] = ray(pc, norm(dir), light_src, light_color, polygons, len); } } } __global__ void render(vec3 pc, vec3 pv, int w, int h, double angle, uchar4 *data, vec3 light_src, vec3 light_color, polygon polygons[], int len) { int idx = blockDim.x * blockIdx.x + threadIdx.x; int idy = blockDim.y * blockIdx.y + threadIdx.y; int offsetx = blockDim.x * gridDim.x; int offsety = blockDim.y * gridDim.y; int i, j; double dw = 2.0 / (w - 1.0); double dh = 2.0 / (h - 1.0); double z = 1.0 / tan(angle * M_PI / 360.0); vec3 bz = norm(diff(pv, pc)); vec3 bx = norm(prod(bz, vec3(0.0, 0.0, 1.0))); vec3 by = norm(prod(bx, bz)); for (j = idy; j < h; j += offsety) { for (i = idx; i < w; i += offsetx) { vec3 v = vec3(-1.0 + dw * i, (-1.0 + dh * j) * h / w, z); vec3 dir = mult(bx, by, bz, v); data[(h - 1 - j) * w + i] = ray(pc, norm(dir), light_src, light_color, polygons, len); } } } void ssaa_cpu(uchar4 *src, uchar4 *out, int w, int h, int wScale, int hScale) { int n = wScale * hScale; int x, y, i, j; uchar4 p; uint4 s; // #pragma omp parallel for for(y = 0; y < h; y += 1) { for(x = 0; x < w; x += 1) { s = make_uint4(0,0,0,0); for (i = 0; i < wScale; ++i) { for (j = 0; j < hScale; ++j){ p = src[ w * wScale * (y * hScale + j) + (x * wScale + i) ]; s.x += p.x; s.y += p.y; s.z += p.z; } } s.x /= n; s.y /= n; s.z /= n; out[y * w + x] = make_uchar4(s.x, s.y, s.z, s.w); } } } __global__ void ssaa(uchar4 *src, uchar4 *out, int w, int h, int wScale, int hScale) { int idx = blockDim.x * blockIdx.x + threadIdx.x; int idy = blockDim.y * blockIdx.y + threadIdx.y; int offsetx = blockDim.x * gridDim.x; int offsety = blockDim.y * gridDim.y; int n = wScale * hScale; int x, y, i, j; uchar4 p; uint4 s; for(y = idy; y < h; y += offsety) { for(x = idx; x < w; x += offsetx) { s = make_uint4(0,0,0,0); for (i = 0; i < wScale; ++i) { for (j = 0; j < hScale; ++j){ p = src[ w * wScale * (y * hScale + j) + (x * wScale + i) ]; s.x += p.x; s.y += p.y; s.z += p.z; } } s.x /= n; s.y /= n; s.z /= n; out[y * w + x] = make_uchar4(s.x, s.y, s.z, s.w); } } } int main(int argc, char *argv[]) { ios_base::sync_with_stdio(false); cin.tie(NULL); cout.tie(NULL); bool gpu = true; // default values int frames = 30; char out[256] = "out/%d.data"; int width = 640, height = 480; double fov = 120; double r_center_0 = 2, z_center_0 = 2, f_center_0 = .5, A_center_r = 1, A_center_z = 1.5, w_center_r = 1, w_center_z = .5, w_center_f = 1, p_center_r = 0, p_center_z = 1.5, r_direction_0 = .5, z_direction_0 = .5, f_direction_0 = .1, A_direction_r = 2, A_direction_z = .5, w_direction_r = 2, w_direction_z = .5, w_direction_f = 2, p_direction_r = 0, p_direction_z = 0; vec3 hexahedron_center = vec3(4, 0, 0), hexahedron_color = vec3(1, 0, 0); double hexahedron_radius = 2; vec3 octahedron_center = vec3(0, 3, 0), octahedron_color = vec3(0, 1, 0); double octahedron_radius = 1; vec3 dodecahedron_center = vec3(-2, 1, 1), dodecahedron_color = vec3(0, 0, 1); double dodecahedron_radius = 1; vec3 scene_points[] = {vec3(-10, -10, -1), vec3(-10, 10, -1), vec3(10, 10, -1), vec3(10, -10, -1)}; vec3 scene_color = vec3(0.952, 0.635, 0.070); int light = 1; vec3 light_src = vec3(100, 100, 100), light_color = vec3(1, 1, 1); int multiplier = 1; // fillers double _; string _str; // check parameters if (argc == 1 || argc == 2) { if (argc == 2) { // check gpu flag if ((string(argv[1]) == "--gpu") || string(argv[1]) == "--default") { gpu = true; } else if (string(argv[1]) == "--cpu") { gpu = false; } else { cerr << "Invalid command line parameter\n"; cerr << "Expected one of this:\n" "\t--gpu\n" "\t--default\n" "\t--cpu\n"; exit(1); } // check input if ((string(argv[1]) == "--gpu") || string(argv[1]) == "--cpu") { cin >> frames >> out >> width >> height >> fov; cin >> r_center_0 >> z_center_0 >> f_center_0 >> A_center_r >> A_center_z >> w_center_r >> w_center_z >> w_center_f >> p_center_r >> p_center_z; cin >> r_direction_0 >> z_direction_0 >> f_direction_0 >> A_direction_r >> A_direction_z >> w_direction_r >> w_direction_z >> w_direction_f >> p_direction_r >> p_direction_z; cin >> hexahedron_center.x >> hexahedron_center.y >> hexahedron_center.z >> hexahedron_color.x >> hexahedron_color.y >> hexahedron_color.z >> hexahedron_radius >> _ >> _ >> _; cin >> octahedron_center.x >> octahedron_center.y >> octahedron_center.z >> octahedron_color.x >> octahedron_color.y >> octahedron_color.z >> octahedron_radius >> _ >> _ >> _; cin >> dodecahedron_center.x >> dodecahedron_center.y >> dodecahedron_center.z >> dodecahedron_color.x >> dodecahedron_color.y >> dodecahedron_color.z >> dodecahedron_radius >> _ >> _ >> _; cin >> scene_points[0].x >> scene_points[0].y >> scene_points[0].z >> scene_points[1].x >> scene_points[1].y >> scene_points[1].z >> scene_points[2].x >> scene_points[2].y >> scene_points[2].z >> scene_points[3].x >> scene_points[3].y >> scene_points[3].z >> _str; cin >> scene_color.x >> scene_color.y >> scene_color.z >> _; cin >> light; assert(light == 1); cin >> light_src.x >> light_src.y >> light_src.z >> light_color.x >> light_color.y >> light_color.z >> _ >> multiplier; } } } else { cerr << "Wrong number of command line parameters, expected extra one or no one\n"; exit(1); } int numproc, id; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &numproc); MPI_Comm_rank(MPI_COMM_WORLD, &id); MPI_Bcast(&frames, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(out, 256, MPI_CHAR, 0, MPI_COMM_WORLD); MPI_Bcast(&width, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(&height, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(&fov, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&r_center_0, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&z_center_0, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&f_center_0, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&A_center_r, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&A_center_z, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&w_center_r, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&w_center_z, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&w_center_f, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&p_center_r, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&p_center_z, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&r_direction_0, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&z_direction_0, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&f_direction_0, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&A_direction_r, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&A_direction_z, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&w_direction_r, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&w_direction_z, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&w_direction_f, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&p_direction_r, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&p_direction_z, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast((double *)&hexahedron_center, 3, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast((double *)&hexahedron_color, 3, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&hexahedron_radius, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast((double *)&octahedron_center, 3, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast((double *)&octahedron_color, 3, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&octahedron_radius, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast((double *)&dodecahedron_center, 3, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast((double *)&dodecahedron_color, 3, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&dodecahedron_radius, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast((double *)scene_points, 3, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast((double *)scene_points + 1, 3, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast((double *)scene_points + 2, 3, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast((double *)scene_points + 3, 3, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast((double *)&scene_color, 3, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&light, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast((double *)&light_src, light * 3, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast((double *)&light_color, light * 3, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&multiplier, 1, MPI_INT, 0, MPI_COMM_WORLD); int polygons_sz = 58; polygon polygons[polygons_sz], *cuda_polygons; uchar4 *data = (uchar4 *) malloc(multiplier * multiplier * width * height * sizeof(uchar4)), *ssaa_data = (uchar4 *) malloc(width * height * sizeof(uchar4)), *cuda_data, *ssaa_cuda_data; if (gpu) { CSC(cudaMalloc((polygon **) (&cuda_polygons), polygons_sz * sizeof(polygon))); CSC(cudaMalloc((uchar4 * *)(&cuda_data), multiplier * multiplier * width * height * sizeof(uchar4))); CSC(cudaMalloc((uchar4 * *)(&ssaa_cuda_data), width * height * sizeof(uchar4))); } // fill polygons hexahedron(polygons, hexahedron_radius, hexahedron_center, hexahedron_color); octahedron(polygons, octahedron_radius, octahedron_center, octahedron_color); dodecahedron(polygons, dodecahedron_radius, dodecahedron_center, dodecahedron_color); scene(polygons, scene_points, scene_color); if (gpu) { CSC(cudaMemcpy(cuda_polygons, polygons, polygons_sz * sizeof(polygon), cudaMemcpyHostToDevice)); } vec3 pc, pv; char buff[256]; for (int iter = id; iter < frames; iter += numproc) { double step = 2 * M_PI * iter / frames; double r_center = A_center_r * sin(w_center_r * step + p_center_r) + r_center_0; double z_center = A_center_z * sin(w_center_z * step + p_center_z) + z_center_0; double f_center = w_center_f * step + f_center_0; double r_direction = A_direction_r * sin(w_direction_r * step + p_direction_r) + r_direction_0; double z_direction = A_direction_z * sin(w_direction_z * step + p_direction_z) + z_direction_0; double f_direction = w_direction_f * step + f_direction_0; pc.x = cos(f_center) * r_center; pc.y = sin(f_center) * r_center; pc.z = z_center; pv.x = cos(f_direction) * r_direction; pv.y = sin(f_direction) * r_direction; pv.z = z_direction; // time to process one frame cudaEvent_t start, stop; float gpu_time = 0.0; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); if (gpu) { render<<<dim3(16, 16), dim3(16, 16)>>> (pc, pv, width * multiplier, height * multiplier, fov, cuda_data, light_src, light_color, cuda_polygons, polygons_sz); CSC(cudaGetLastError()); ssaa<<<dim3(16, 16), dim3(16, 16)>>> (cuda_data, ssaa_cuda_data, width, height, multiplier, multiplier); CSC(cudaGetLastError()); CSC(cudaMemcpy(data, ssaa_cuda_data, sizeof(uchar4) * width * height, cudaMemcpyDeviceToHost)); } else { render_cpu (pc, pv, width * multiplier, height * multiplier, fov, data, light_src, light_color, polygons, polygons_sz); ssaa_cpu (data, ssaa_data, width, height, multiplier, multiplier); memcpy(data, ssaa_data, sizeof(uchar4) * width * height); } // time to process one frame cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&gpu_time, start, stop); sprintf(buff, out, iter); cerr << id << ": " << iter << "\t" << gpu_time << "\t" << width * height * multiplier * multiplier << endl; FILE *out = fopen(buff, "w"); fwrite(&width, sizeof(int), 1, out); fwrite(&height, sizeof(int), 1, out); fwrite(data, sizeof(uchar4), width * height, out); fclose(out); } if (gpu) { CSC(cudaFree(cuda_data)); CSC(cudaFree(ssaa_cuda_data)); CSC(cudaFree(cuda_polygons)); } free(data); free(ssaa_data); MPI_Finalize(); return 0; }
c4b6d6dbbc33199a071c98c40179c6846ef5b297.hip
// !!! This is a file automatically generated by hipify!!! #include <processing.h> #include <hip/hip_runtime.h> #include <stdio.h> using namespace std; __global__ void gaussianKernel(int *original, int width, int height, int *imgblur){ int id = threadIdx.x + blockDim.x * blockIdx.x; // Declaraciones unsigned int blurpixel; signed int dx, dy; unsigned int pixelweight; unsigned int pixel; // Declarar el array de pesos para el difuminado gaussiano int weighting[5][5] = { {2, 4, 5, 4, 2}, {4, 9, 12, 9, 4}, {5, 12, 15, 12, 5}, {4, 9, 12, 9, 4}, {2, 4, 5, 4, 2} }; if(id < width - 2){ // Aplicar el flitro a cada pixel for (int y = 2; y <= height - 2; y++){ // Limpiar blurpixel blurpixel = 0; // +-2 para cada pixel y calcular el peso for (dx = -2; dx <= 2; dx++){ for (dy = -2; dy <= 2; dy++){ pixelweight = weighting[dx + 2][dy + 2]; // Conseguir pixel if(id + dx >= width || y + dy >= height){ pixel = *(original + id * height + y); } else{ pixel = *(original + (id + dx) * height + (y + dy)); } // Aplicar peso blurpixel = blurpixel + pixel * pixelweight; } } // Escribir pixel para difuminar la imagen *(imgblur + id * height + y) = (blurpixel / 159); } } } __global__ void sobelFilter(int *original, int width, int height, int *imggrad, int *imggraddir){ int id = threadIdx.x + blockDim.x * blockIdx.x; // Declaraciones int pix[3]; int gradx, grady; int graddir, grad; if(id < width - 1){ // Conseguir pixeles y calcular el gradiente y su direccin for (int y = 1; y <= height - 1; y++){ // Conseguir los pixeles de origen para calcular la direccin e intensidad pix[0] = *(original + id * height + y); // pixel principal pix[1] = *(original + (id - 1) * height + y); // pixel izquierdo pix[2] = *(original + id * height + (y - 1)); // pixel encima // Conseguir valor para gradiente x gradx = pix[0] - pix[1]; // Conseguir valor para gradiente y grady = pix[0] - pix[2]; // Calcular direccin del gradiente // Queremos redondearlo a 0, 1, 2, 3 que representa 0, 45, 90, 135 grados graddir = (int)(abs(atan2f(grady, gradx)) + 0.22) * 80; // Guardar direccin del gradiente *(imggraddir + id * height + y) = graddir; // Calcular gradiente grad = (int)sqrtf(gradx * gradx + grady * grady) * 2; // Guardar pixel *(imggrad + id * height + y) = grad; } *(imggrad + id * height + 0) = 0; *(imggrad + id * height + 1) = 0; *(imggrad + id * height + 2) = 0; *(imggrad + id * height + (height - 1)) = 0; for(int y = 0; y < height; y++){ *(imggrad + 0 * height + y) = 0; *(imggrad + 1 * height + y) = 0; *(imggrad + 2 * height + y) = 0; *(imggrad + (width - 1) * height + y) = 0; *(imggrad + (width - 2) * height + y) = 0; *(imggrad + (width - 3) * height + y) = 0; } } } void edgeDetection(int *image_pointer, int width, int height){ // <<< Nmero de bloques, nmero de hebras >>> dim3 unBloque(64, 1, 1); dim3 bloques((width / 64) + 1, 1, 1); int *gpu_img = NULL; // Declarar imagen para guardar el difuminado int *imgblur = NULL; // Declarar imagen para guardar la intensidad del gradiente int *imggrad = NULL; // Declarar imagen para guardar la direccin del gradiente int *imggraddir = NULL; // Reserva de memoria en la GPU hipMalloc((void **) &gpu_img, sizeof(int) * (width * height)); hipMalloc((void **) &imgblur, sizeof(int) * (width * height)); hipMalloc((void **) &imggrad, sizeof(int) * (width * height)); hipMalloc((void **) &imggraddir, sizeof(int) * (width * height)); // Copia de memoria en la GPU hipMemcpy(gpu_img, image_pointer, sizeof(int) * (width * height), hipMemcpyHostToDevice); // Llamada a los kernel hipLaunchKernelGGL(( gaussianKernel), dim3(bloques), dim3(unBloque) , 0, 0, gpu_img, width, height, imgblur); hipDeviceSynchronize(); hipLaunchKernelGGL(( sobelFilter), dim3(bloques), dim3(unBloque) , 0, 0, imgblur, width, height, imggrad, imggraddir); hipDeviceSynchronize(); hipMemcpy(image_pointer, imggrad, sizeof(int) * (width * height), hipMemcpyDeviceToHost); hipFree(gpu_img); hipFree(imgblur); hipFree(imggrad); hipFree(imggraddir); }
c4b6d6dbbc33199a071c98c40179c6846ef5b297.cu
#include <processing.h> #include <cuda.h> #include <stdio.h> using namespace std; __global__ void gaussianKernel(int *original, int width, int height, int *imgblur){ int id = threadIdx.x + blockDim.x * blockIdx.x; // Declaraciones unsigned int blurpixel; signed int dx, dy; unsigned int pixelweight; unsigned int pixel; // Declarar el array de pesos para el difuminado gaussiano int weighting[5][5] = { {2, 4, 5, 4, 2}, {4, 9, 12, 9, 4}, {5, 12, 15, 12, 5}, {4, 9, 12, 9, 4}, {2, 4, 5, 4, 2} }; if(id < width - 2){ // Aplicar el flitro a cada pixel for (int y = 2; y <= height - 2; y++){ // Limpiar blurpixel blurpixel = 0; // +-2 para cada pixel y calcular el peso for (dx = -2; dx <= 2; dx++){ for (dy = -2; dy <= 2; dy++){ pixelweight = weighting[dx + 2][dy + 2]; // Conseguir pixel if(id + dx >= width || y + dy >= height){ pixel = *(original + id * height + y); } else{ pixel = *(original + (id + dx) * height + (y + dy)); } // Aplicar peso blurpixel = blurpixel + pixel * pixelweight; } } // Escribir pixel para difuminar la imagen *(imgblur + id * height + y) = (blurpixel / 159); } } } __global__ void sobelFilter(int *original, int width, int height, int *imggrad, int *imggraddir){ int id = threadIdx.x + blockDim.x * blockIdx.x; // Declaraciones int pix[3]; int gradx, grady; int graddir, grad; if(id < width - 1){ // Conseguir pixeles y calcular el gradiente y su dirección for (int y = 1; y <= height - 1; y++){ // Conseguir los pixeles de origen para calcular la dirección e intensidad pix[0] = *(original + id * height + y); // pixel principal pix[1] = *(original + (id - 1) * height + y); // pixel izquierdo pix[2] = *(original + id * height + (y - 1)); // pixel encima // Conseguir valor para gradiente x gradx = pix[0] - pix[1]; // Conseguir valor para gradiente y grady = pix[0] - pix[2]; // Calcular dirección del gradiente // Queremos redondearlo a 0, 1, 2, 3 que representa 0, 45, 90, 135 grados graddir = (int)(abs(atan2f(grady, gradx)) + 0.22) * 80; // Guardar dirección del gradiente *(imggraddir + id * height + y) = graddir; // Calcular gradiente grad = (int)sqrtf(gradx * gradx + grady * grady) * 2; // Guardar pixel *(imggrad + id * height + y) = grad; } *(imggrad + id * height + 0) = 0; *(imggrad + id * height + 1) = 0; *(imggrad + id * height + 2) = 0; *(imggrad + id * height + (height - 1)) = 0; for(int y = 0; y < height; y++){ *(imggrad + 0 * height + y) = 0; *(imggrad + 1 * height + y) = 0; *(imggrad + 2 * height + y) = 0; *(imggrad + (width - 1) * height + y) = 0; *(imggrad + (width - 2) * height + y) = 0; *(imggrad + (width - 3) * height + y) = 0; } } } void edgeDetection(int *image_pointer, int width, int height){ // <<< Número de bloques, número de hebras >>> dim3 unBloque(64, 1, 1); dim3 bloques((width / 64) + 1, 1, 1); int *gpu_img = NULL; // Declarar imagen para guardar el difuminado int *imgblur = NULL; // Declarar imagen para guardar la intensidad del gradiente int *imggrad = NULL; // Declarar imagen para guardar la dirección del gradiente int *imggraddir = NULL; // Reserva de memoria en la GPU cudaMalloc((void **) &gpu_img, sizeof(int) * (width * height)); cudaMalloc((void **) &imgblur, sizeof(int) * (width * height)); cudaMalloc((void **) &imggrad, sizeof(int) * (width * height)); cudaMalloc((void **) &imggraddir, sizeof(int) * (width * height)); // Copia de memoria en la GPU cudaMemcpy(gpu_img, image_pointer, sizeof(int) * (width * height), cudaMemcpyHostToDevice); // Llamada a los kernel gaussianKernel<<< bloques, unBloque >>>(gpu_img, width, height, imgblur); cudaDeviceSynchronize(); sobelFilter<<< bloques, unBloque >>>(imgblur, width, height, imggrad, imggraddir); cudaDeviceSynchronize(); cudaMemcpy(image_pointer, imggrad, sizeof(int) * (width * height), cudaMemcpyDeviceToHost); cudaFree(gpu_img); cudaFree(imgblur); cudaFree(imggrad); cudaFree(imggraddir); }
9e2f5f7ea1e75fb71ae5d0e97ebcbe53a79456fe.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 2019 Xilinx Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ //#include <mutex> #include <thrust/execution_policy.h> #include <thrust/extrema.h> #include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/util/quantize.hpp" #include <float.h> namespace caffe { template <typename Dtype> __global__ void gpu_fix_kernel1(const int n, const Dtype *x, Dtype *y, Dtype step, Dtype lb, Dtype ub) { CUDA_KERNEL_LOOP(i, n) { y[i] = min(max(round(x[i] / step) * step, lb), ub); } } // sigmoid kernel: y = sigmoid(x) template <typename Dtype> __global__ void gpu_sigmoid_kernel(const int n, const Dtype *x, Dtype *y) { CUDA_KERNEL_LOOP(i, n) { y[i] = 1. / (1. + exp(-x[i])); } } template <typename Dtype> __global__ void gpu_fix_kernel2(const int n, const Dtype *x, Dtype *y, Dtype step, Dtype lb, Dtype ub) { CUDA_KERNEL_LOOP(i, n) { Dtype tmp = x[i] / step; // simulate DPU where to save hardware resource if ( tmp < 0 && ( tmp - floor( tmp ) ) == 0.5 ) tmp = ceil( tmp ); else tmp = round( tmp ); y[i] = min(max(tmp * step, lb), ub); } } template <typename Dtype> void caffe_gpu_fix(const int n, const Dtype *x, Dtype *y, const int bit_width, const int p) { Dtype step = ::pow(Dtype(2), -p); Dtype lower_bound = -::pow(Dtype(2), bit_width - 1) * step; Dtype upper_bound = ::pow(Dtype(2), bit_width - 1) * step - step; hipLaunchKernelGGL(( gpu_fix_kernel1<Dtype>), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, n, x, y, step, lower_bound, upper_bound); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> void caffe_gpu_top_fix(const int n, const Dtype *x, Dtype *y, const int bit_width, const int p) { Dtype step = ::pow(Dtype(2), -p); Dtype lower_bound = -::pow(Dtype(2), bit_width - 1) * step; Dtype upper_bound = ::pow(Dtype(2), bit_width - 1) * step - step; hipLaunchKernelGGL(( gpu_fix_kernel2<Dtype>), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, n, x, y, step, lower_bound, upper_bound); CUDA_POST_KERNEL_CHECK; } template void caffe_gpu_fix<float>(const int n, const float *x, float *y, const int bit_width, const int p); template void caffe_gpu_fix<double>(const int n, const double *x, double *y, const int bit_width, const int p); template void caffe_gpu_top_fix<float>(const int n, const float *x, float *y, const int bit_width, const int p); template void caffe_gpu_top_fix<double>(const int n, const double *x, double *y, const int bit_width, const int p); // Overflow: minimize fix pos in terms of all weights and data do not overflow template <typename Dtype> Dtype caffe_gpu_fix_pos_overflow(const int n, const Dtype *x, const int bit_width) { // Use half of step as a guard Dtype fix_lb = -::pow(2, bit_width - 1) - 0.5; Dtype fix_ub = ::pow(2, bit_width - 1) - 0.5; // Dynamic range [min, max] // Find min and max value in GPU auto min_max = thrust::minmax_element(thrust::device, x, x + n); // Copy to Host Dtype x_min, x_max; hipMemcpy(&x_min, min_max.first, sizeof(Dtype), hipMemcpyDeviceToHost); hipMemcpy(&x_max, min_max.second, sizeof(Dtype), hipMemcpyDeviceToHost); Dtype step = ::max(x_min / fix_lb, x_max / fix_ub); if (step == 0) { return SHRT_MAX; } else if(isnan(step)) { return SHRT_MIN; } return std::log2(1 / step); } template float caffe_gpu_fix_pos_overflow<float>(const int n, const float *x, const int bit_width); template double caffe_gpu_fix_pos_overflow<double>(const int n, const double *x, const int bit_width); // Diff_S: minimize L2 norm of fixed weights/activation and float weights/activation template <typename Dtype> Dtype caffe_gpu_fix_pos_diffs(const int n, const Dtype *x, const int bit_width, const int range) { // Calc search range for scale int max_scale; Dtype fix_lb = -::pow(2, bit_width - 1) - 0.5; Dtype fix_ub = ::pow(2, bit_width - 1) - 0.5; auto min_max = thrust::minmax_element(thrust::device, x, x + n); // Copy to Host Dtype x_min, x_max; hipMemcpy(&x_min, min_max.first, sizeof(Dtype), hipMemcpyDeviceToHost); hipMemcpy(&x_max, min_max.second, sizeof(Dtype), hipMemcpyDeviceToHost); // Find max_scale Dtype step = ::max(x_min / fix_lb, x_max / fix_ub); if (step == 0) { return SHRT_MAX; } else if(isnan(step)) { return SHRT_MIN; } else { max_scale = ::floor(std::log2(1 / step)); } // Find fix pos in range [max_scale + range , max_scale] Dtype final_scale; final_scale = max_scale; Dtype fixed_diff_min = FLT_MAX; Dtype *buffer; CUDA_CHECK(hipMalloc((void **)&buffer, n * sizeof(Dtype))); /* CHECK_NOTNULL(buffer); */ for (int scale = max_scale; scale < max_scale + range; scale++) { caffe_gpu_fix<Dtype>(n, x, buffer, bit_width, scale); caffe_gpu_sub<Dtype>(n, x, buffer, buffer); caffe_gpu_powx<Dtype>(n, buffer, 2, buffer); Dtype fixed_diff; caffe_gpu_asum(n, buffer, &fixed_diff); if (fixed_diff < fixed_diff_min) { final_scale = scale; fixed_diff_min = fixed_diff; } } CUDA_CHECK(hipFree(buffer)); return final_scale; } template float caffe_gpu_fix_pos_diffs<float>(const int n, const float *x, const int bit_width, const int range); template double caffe_gpu_fix_pos_diffs<double>(const int n, const double *x, const int bit_width, const int range); // Diff_S_Sigmoid: minimize L2 norm of sigmoid(weights/activation) between fixed and float template <typename Dtype> Dtype caffe_gpu_fix_pos_diffs_sigmoid(const int n, const Dtype *x, const int bit_width, const int range) { // Calc search range for scale int max_scale; Dtype fix_lb = -::pow(2, bit_width - 1) - 0.5; Dtype fix_ub = ::pow(2, bit_width - 1) - 0.5; auto min_max = thrust::minmax_element(thrust::device, x, x + n); // Copy to Host Dtype x_min, x_max; hipMemcpy(&x_min, min_max.first, sizeof(Dtype), hipMemcpyDeviceToHost); hipMemcpy(&x_max, min_max.second, sizeof(Dtype), hipMemcpyDeviceToHost); // Find max_scale Dtype step = ::max(x_min / fix_lb, x_max / fix_ub); if (step == 0) max_scale = 0; else max_scale = ::floor(std::log2(1 / step)); // Find fix pos in range [max_scale + range , max_scale] Dtype final_scale; final_scale = max_scale; Dtype fixed_diff_min = FLT_MAX; Dtype *sigmoid_x, *buffer; CUDA_CHECK(hipMalloc((void **)&sigmoid_x, n * sizeof(Dtype))); CUDA_CHECK(hipMalloc((void **)&buffer, n * sizeof(Dtype))); hipLaunchKernelGGL(( gpu_sigmoid_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, n, x, sigmoid_x); CUDA_POST_KERNEL_CHECK; LOG(INFO) << "calib start"; for (int scale = max_scale; scale < max_scale + range; scale++) { caffe_gpu_fix<Dtype>(n, x, buffer, bit_width, scale); hipLaunchKernelGGL(( gpu_sigmoid_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, n, buffer, buffer); CUDA_POST_KERNEL_CHECK; caffe_gpu_sub<Dtype>(n, sigmoid_x, buffer, buffer); caffe_gpu_powx<Dtype>(n, buffer, 2, buffer); Dtype fixed_diff; caffe_gpu_asum(n, buffer, &fixed_diff); if (fixed_diff < fixed_diff_min) { final_scale = scale; fixed_diff_min = fixed_diff; } } CUDA_CHECK(hipFree(sigmoid_x)); CUDA_CHECK(hipFree(buffer)); return final_scale; } template float caffe_gpu_fix_pos_diffs_sigmoid<float>(const int n, const float *x, const int bit_width, const int range); template double caffe_gpu_fix_pos_diffs_sigmoid<double>(const int n, const double *x, const int bit_width, const int range); /* template <typename Dtype> static __global__ void overflow_kernel(const int n, Dtype upper_bound, Dtype lower_bound, const Dtype* x, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index]=(x[index]<=upper_bound && x[index]>=lower_bound)?Dtype(0):Dtype(1); } } template <typename Dtype> static bool test_overflow(const int n, Dtype upper_bound, Dtype lower_bound, const Dtype* data, Dtype* buffer) { overflow_kernel<Dtype><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>(n, upper_bound, lower_bound, data, buffer); CUDA_POST_KERNEL_CHECK; Dtype asum; caffe_gpu_asum(n, buffer, &asum); return asum>Dtype(0.5); } template <typename Dtype> void caffe_gpu_fix_overflow(const int n, const Dtype* x, Dtype* y, const int bit_level, const int max_scale, const int min_scale, int& final_scale) { final_scale=::max(::min(final_scale, max_scale), min_scale); int search_length=max_scale-min_scale+1; if(search_length<2) { final_scale=min_scale; } else { Dtype* buffer=y; if(x==y) { buffer=static_cast<Dtype*>(Caffe::GpuBuffer(n*sizeof(Dtype))); CHECK_NOTNULL(buffer); } vector<Dtype> upper_bound(search_length); vector<Dtype> lower_bound(search_length); for(int i=0; i<search_length; i++) { upper_bound[i]=::pow(Dtype(2), i+min_scale); lower_bound[i]=-upper_bound[i]-::pow(Dtype(2), i+min_scale-bit_level); } vector<bool> overflow(search_length); vector<bool> tested(search_length, false); bool found=false; overflow[final_scale-min_scale]=test_overflow(n, upper_bound[final_scale-min_scale], lower_bound[final_scale-min_scale], x, buffer); tested[final_scale-min_scale]=true; if(!overflow[final_scale-min_scale]) { if(final_scale==min_scale) { found=true; } else { overflow[final_scale-min_scale-1]=test_overflow(n, upper_bound[final_scale-min_scale-1], lower_bound[final_scale-min_scale-1], x, buffer); tested[final_scale-min_scale-1]=true; if(overflow[final_scale-min_scale-1]) { found=true; } } } if(!found) { overflow[0]=true; tested[0]=true; overflow[search_length-1]=false; tested[search_length-1]=true; int left=0; int right=search_length-1; for(;;) { int middle=(left+right)/2; if(!tested[middle]) { overflow[middle]=test_overflow(n, upper_bound[middle], lower_bound[middle], x, buffer); tested[middle]=true; } if(!tested[middle+1]) { overflow[middle+1]=test_overflow(n, upper_bound[middle+1], lower_bound[middle+1], x, buffer); tested[middle+1]=true; } if(overflow[middle] && !overflow[middle+1]) { final_scale=min_scale+middle+1; break; } else if(!overflow[middle]) { right=middle; } else { left=middle+1; } } } } caffe_gpu_fix(n, x, y, bit_level, final_scale); } template void caffe_gpu_fix_overflow<float>(const int n, const float* x, float* y, const int bit_level, const int max_scale, const int min_scale, int& final_scale); template void caffe_gpu_fix_overflow<double>(const int n, const double* x, double* y, const int bit_level, const int max_scale, const int min_scale, int& final_scale); */ template <typename Dtype> __global__ void gpu_scale_kernel(const int n, const Dtype *x, Dtype *y, Dtype step ) { CUDA_KERNEL_LOOP(i, n) { y[i] = x[i] * step; } } template <typename Dtype> void caffe_gpu_scale(const int n, const Dtype *x, Dtype *y, const int p) { Dtype step; if (p == SHRT_MAX) { step = 1; } else { step = ::pow(Dtype(2), p); } hipLaunchKernelGGL(( gpu_scale_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, n, x, y, step); CUDA_POST_KERNEL_CHECK; } template void caffe_gpu_scale<float>(const int n, const float *x, float *y, const int p); template void caffe_gpu_scale<double>(const int n, const double *x, double *y, const int p); template <typename Dtype> __global__ void gpu_trunc_kernel(const int n, const Dtype *x, Dtype *y, Dtype scale) { CUDA_KERNEL_LOOP(i, n) { y[i] = ( (int)(x[i] / scale) ) * scale; } } template <typename Dtype> void caffe_gpu_trunc(const int n, const Dtype *x, Dtype *y, const int p) { Dtype scale = ::pow(Dtype(2), -p); hipLaunchKernelGGL(( gpu_trunc_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, n, x, y, scale); CUDA_POST_KERNEL_CHECK; } template void caffe_gpu_trunc<float>(const int n, const float *x, float *y, const int p); template void caffe_gpu_trunc<double>(const int n, const double *x, double *y, const int p); template <typename Dtype> void caffe_pooling_scale(const int n, const Dtype *x, Dtype *y, float scale) { hipLaunchKernelGGL(( gpu_scale_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, n, x, y, scale); CUDA_POST_KERNEL_CHECK; } template void caffe_pooling_scale<float>(const int n, const float *x, float *y, float scale); template void caffe_pooling_scale<double>(const int n, const double *x, double *y, float scale); } // namespace caffe
9e2f5f7ea1e75fb71ae5d0e97ebcbe53a79456fe.cu
/* * Copyright 2019 Xilinx Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ //#include <mutex> #include <thrust/execution_policy.h> #include <thrust/extrema.h> #include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/util/quantize.hpp" #include <float.h> namespace caffe { template <typename Dtype> __global__ void gpu_fix_kernel1(const int n, const Dtype *x, Dtype *y, Dtype step, Dtype lb, Dtype ub) { CUDA_KERNEL_LOOP(i, n) { y[i] = min(max(round(x[i] / step) * step, lb), ub); } } // sigmoid kernel: y = sigmoid(x) template <typename Dtype> __global__ void gpu_sigmoid_kernel(const int n, const Dtype *x, Dtype *y) { CUDA_KERNEL_LOOP(i, n) { y[i] = 1. / (1. + exp(-x[i])); } } template <typename Dtype> __global__ void gpu_fix_kernel2(const int n, const Dtype *x, Dtype *y, Dtype step, Dtype lb, Dtype ub) { CUDA_KERNEL_LOOP(i, n) { Dtype tmp = x[i] / step; // simulate DPU where to save hardware resource if ( tmp < 0 && ( tmp - floor( tmp ) ) == 0.5 ) tmp = ceil( tmp ); else tmp = round( tmp ); y[i] = min(max(tmp * step, lb), ub); } } template <typename Dtype> void caffe_gpu_fix(const int n, const Dtype *x, Dtype *y, const int bit_width, const int p) { Dtype step = std::pow(Dtype(2), -p); Dtype lower_bound = -std::pow(Dtype(2), bit_width - 1) * step; Dtype upper_bound = std::pow(Dtype(2), bit_width - 1) * step - step; gpu_fix_kernel1<Dtype><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>( n, x, y, step, lower_bound, upper_bound); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> void caffe_gpu_top_fix(const int n, const Dtype *x, Dtype *y, const int bit_width, const int p) { Dtype step = std::pow(Dtype(2), -p); Dtype lower_bound = -std::pow(Dtype(2), bit_width - 1) * step; Dtype upper_bound = std::pow(Dtype(2), bit_width - 1) * step - step; gpu_fix_kernel2<Dtype><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>( n, x, y, step, lower_bound, upper_bound); CUDA_POST_KERNEL_CHECK; } template void caffe_gpu_fix<float>(const int n, const float *x, float *y, const int bit_width, const int p); template void caffe_gpu_fix<double>(const int n, const double *x, double *y, const int bit_width, const int p); template void caffe_gpu_top_fix<float>(const int n, const float *x, float *y, const int bit_width, const int p); template void caffe_gpu_top_fix<double>(const int n, const double *x, double *y, const int bit_width, const int p); // Overflow: minimize fix pos in terms of all weights and data do not overflow template <typename Dtype> Dtype caffe_gpu_fix_pos_overflow(const int n, const Dtype *x, const int bit_width) { // Use half of step as a guard Dtype fix_lb = -std::pow(2, bit_width - 1) - 0.5; Dtype fix_ub = std::pow(2, bit_width - 1) - 0.5; // Dynamic range [min, max] // Find min and max value in GPU auto min_max = thrust::minmax_element(thrust::device, x, x + n); // Copy to Host Dtype x_min, x_max; cudaMemcpy(&x_min, min_max.first, sizeof(Dtype), cudaMemcpyDeviceToHost); cudaMemcpy(&x_max, min_max.second, sizeof(Dtype), cudaMemcpyDeviceToHost); Dtype step = std::max(x_min / fix_lb, x_max / fix_ub); if (step == 0) { return SHRT_MAX; } else if(isnan(step)) { return SHRT_MIN; } return std::log2(1 / step); } template float caffe_gpu_fix_pos_overflow<float>(const int n, const float *x, const int bit_width); template double caffe_gpu_fix_pos_overflow<double>(const int n, const double *x, const int bit_width); // Diff_S: minimize L2 norm of fixed weights/activation and float weights/activation template <typename Dtype> Dtype caffe_gpu_fix_pos_diffs(const int n, const Dtype *x, const int bit_width, const int range) { // Calc search range for scale int max_scale; Dtype fix_lb = -std::pow(2, bit_width - 1) - 0.5; Dtype fix_ub = std::pow(2, bit_width - 1) - 0.5; auto min_max = thrust::minmax_element(thrust::device, x, x + n); // Copy to Host Dtype x_min, x_max; cudaMemcpy(&x_min, min_max.first, sizeof(Dtype), cudaMemcpyDeviceToHost); cudaMemcpy(&x_max, min_max.second, sizeof(Dtype), cudaMemcpyDeviceToHost); // Find max_scale Dtype step = std::max(x_min / fix_lb, x_max / fix_ub); if (step == 0) { return SHRT_MAX; } else if(isnan(step)) { return SHRT_MIN; } else { max_scale = std::floor(std::log2(1 / step)); } // Find fix pos in range [max_scale + range , max_scale] Dtype final_scale; final_scale = max_scale; Dtype fixed_diff_min = FLT_MAX; Dtype *buffer; CUDA_CHECK(cudaMalloc((void **)&buffer, n * sizeof(Dtype))); /* CHECK_NOTNULL(buffer); */ for (int scale = max_scale; scale < max_scale + range; scale++) { caffe_gpu_fix<Dtype>(n, x, buffer, bit_width, scale); caffe_gpu_sub<Dtype>(n, x, buffer, buffer); caffe_gpu_powx<Dtype>(n, buffer, 2, buffer); Dtype fixed_diff; caffe_gpu_asum(n, buffer, &fixed_diff); if (fixed_diff < fixed_diff_min) { final_scale = scale; fixed_diff_min = fixed_diff; } } CUDA_CHECK(cudaFree(buffer)); return final_scale; } template float caffe_gpu_fix_pos_diffs<float>(const int n, const float *x, const int bit_width, const int range); template double caffe_gpu_fix_pos_diffs<double>(const int n, const double *x, const int bit_width, const int range); // Diff_S_Sigmoid: minimize L2 norm of sigmoid(weights/activation) between fixed and float template <typename Dtype> Dtype caffe_gpu_fix_pos_diffs_sigmoid(const int n, const Dtype *x, const int bit_width, const int range) { // Calc search range for scale int max_scale; Dtype fix_lb = -std::pow(2, bit_width - 1) - 0.5; Dtype fix_ub = std::pow(2, bit_width - 1) - 0.5; auto min_max = thrust::minmax_element(thrust::device, x, x + n); // Copy to Host Dtype x_min, x_max; cudaMemcpy(&x_min, min_max.first, sizeof(Dtype), cudaMemcpyDeviceToHost); cudaMemcpy(&x_max, min_max.second, sizeof(Dtype), cudaMemcpyDeviceToHost); // Find max_scale Dtype step = std::max(x_min / fix_lb, x_max / fix_ub); if (step == 0) max_scale = 0; else max_scale = std::floor(std::log2(1 / step)); // Find fix pos in range [max_scale + range , max_scale] Dtype final_scale; final_scale = max_scale; Dtype fixed_diff_min = FLT_MAX; Dtype *sigmoid_x, *buffer; CUDA_CHECK(cudaMalloc((void **)&sigmoid_x, n * sizeof(Dtype))); CUDA_CHECK(cudaMalloc((void **)&buffer, n * sizeof(Dtype))); gpu_sigmoid_kernel<Dtype><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>( n, x, sigmoid_x); CUDA_POST_KERNEL_CHECK; LOG(INFO) << "calib start"; for (int scale = max_scale; scale < max_scale + range; scale++) { caffe_gpu_fix<Dtype>(n, x, buffer, bit_width, scale); gpu_sigmoid_kernel<Dtype><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>( n, buffer, buffer); CUDA_POST_KERNEL_CHECK; caffe_gpu_sub<Dtype>(n, sigmoid_x, buffer, buffer); caffe_gpu_powx<Dtype>(n, buffer, 2, buffer); Dtype fixed_diff; caffe_gpu_asum(n, buffer, &fixed_diff); if (fixed_diff < fixed_diff_min) { final_scale = scale; fixed_diff_min = fixed_diff; } } CUDA_CHECK(cudaFree(sigmoid_x)); CUDA_CHECK(cudaFree(buffer)); return final_scale; } template float caffe_gpu_fix_pos_diffs_sigmoid<float>(const int n, const float *x, const int bit_width, const int range); template double caffe_gpu_fix_pos_diffs_sigmoid<double>(const int n, const double *x, const int bit_width, const int range); /* template <typename Dtype> static __global__ void overflow_kernel(const int n, Dtype upper_bound, Dtype lower_bound, const Dtype* x, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index]=(x[index]<=upper_bound && x[index]>=lower_bound)?Dtype(0):Dtype(1); } } template <typename Dtype> static bool test_overflow(const int n, Dtype upper_bound, Dtype lower_bound, const Dtype* data, Dtype* buffer) { overflow_kernel<Dtype><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>(n, upper_bound, lower_bound, data, buffer); CUDA_POST_KERNEL_CHECK; Dtype asum; caffe_gpu_asum(n, buffer, &asum); return asum>Dtype(0.5); } template <typename Dtype> void caffe_gpu_fix_overflow(const int n, const Dtype* x, Dtype* y, const int bit_level, const int max_scale, const int min_scale, int& final_scale) { final_scale=std::max(std::min(final_scale, max_scale), min_scale); int search_length=max_scale-min_scale+1; if(search_length<2) { final_scale=min_scale; } else { Dtype* buffer=y; if(x==y) { buffer=static_cast<Dtype*>(Caffe::GpuBuffer(n*sizeof(Dtype))); CHECK_NOTNULL(buffer); } vector<Dtype> upper_bound(search_length); vector<Dtype> lower_bound(search_length); for(int i=0; i<search_length; i++) { upper_bound[i]=std::pow(Dtype(2), i+min_scale); lower_bound[i]=-upper_bound[i]-std::pow(Dtype(2), i+min_scale-bit_level); } vector<bool> overflow(search_length); vector<bool> tested(search_length, false); bool found=false; overflow[final_scale-min_scale]=test_overflow(n, upper_bound[final_scale-min_scale], lower_bound[final_scale-min_scale], x, buffer); tested[final_scale-min_scale]=true; if(!overflow[final_scale-min_scale]) { if(final_scale==min_scale) { found=true; } else { overflow[final_scale-min_scale-1]=test_overflow(n, upper_bound[final_scale-min_scale-1], lower_bound[final_scale-min_scale-1], x, buffer); tested[final_scale-min_scale-1]=true; if(overflow[final_scale-min_scale-1]) { found=true; } } } if(!found) { overflow[0]=true; tested[0]=true; overflow[search_length-1]=false; tested[search_length-1]=true; int left=0; int right=search_length-1; for(;;) { int middle=(left+right)/2; if(!tested[middle]) { overflow[middle]=test_overflow(n, upper_bound[middle], lower_bound[middle], x, buffer); tested[middle]=true; } if(!tested[middle+1]) { overflow[middle+1]=test_overflow(n, upper_bound[middle+1], lower_bound[middle+1], x, buffer); tested[middle+1]=true; } if(overflow[middle] && !overflow[middle+1]) { final_scale=min_scale+middle+1; break; } else if(!overflow[middle]) { right=middle; } else { left=middle+1; } } } } caffe_gpu_fix(n, x, y, bit_level, final_scale); } template void caffe_gpu_fix_overflow<float>(const int n, const float* x, float* y, const int bit_level, const int max_scale, const int min_scale, int& final_scale); template void caffe_gpu_fix_overflow<double>(const int n, const double* x, double* y, const int bit_level, const int max_scale, const int min_scale, int& final_scale); */ template <typename Dtype> __global__ void gpu_scale_kernel(const int n, const Dtype *x, Dtype *y, Dtype step ) { CUDA_KERNEL_LOOP(i, n) { y[i] = x[i] * step; } } template <typename Dtype> void caffe_gpu_scale(const int n, const Dtype *x, Dtype *y, const int p) { Dtype step; if (p == SHRT_MAX) { step = 1; } else { step = std::pow(Dtype(2), p); } gpu_scale_kernel<Dtype><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>( n, x, y, step); CUDA_POST_KERNEL_CHECK; } template void caffe_gpu_scale<float>(const int n, const float *x, float *y, const int p); template void caffe_gpu_scale<double>(const int n, const double *x, double *y, const int p); template <typename Dtype> __global__ void gpu_trunc_kernel(const int n, const Dtype *x, Dtype *y, Dtype scale) { CUDA_KERNEL_LOOP(i, n) { y[i] = ( (int)(x[i] / scale) ) * scale; } } template <typename Dtype> void caffe_gpu_trunc(const int n, const Dtype *x, Dtype *y, const int p) { Dtype scale = std::pow(Dtype(2), -p); gpu_trunc_kernel<Dtype><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>( n, x, y, scale); CUDA_POST_KERNEL_CHECK; } template void caffe_gpu_trunc<float>(const int n, const float *x, float *y, const int p); template void caffe_gpu_trunc<double>(const int n, const double *x, double *y, const int p); template <typename Dtype> void caffe_pooling_scale(const int n, const Dtype *x, Dtype *y, float scale) { gpu_scale_kernel<Dtype><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>( n, x, y, scale); CUDA_POST_KERNEL_CHECK; } template void caffe_pooling_scale<float>(const int n, const float *x, float *y, float scale); template void caffe_pooling_scale<double>(const int n, const double *x, double *y, float scale); } // namespace caffe
c48a2c58afb874a057f273b2d418ccfc3bf02110.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "Matrix_getRow_FloatId_naive.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const float *A = NULL; hipMalloc(&A, XSIZE*YSIZE); int Acount = 1; int Acols = 1; float *out0 = NULL; hipMalloc(&out0, XSIZE*YSIZE); int out0count = 1; int out0cols = 1; float row_id = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( Matrix_getRow_FloatId_naive), dim3(gridBlock),dim3(threadBlock), 0, 0, A,Acount,Acols,out0,out0count,out0cols,row_id); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( Matrix_getRow_FloatId_naive), dim3(gridBlock),dim3(threadBlock), 0, 0, A,Acount,Acols,out0,out0count,out0cols,row_id); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( Matrix_getRow_FloatId_naive), dim3(gridBlock),dim3(threadBlock), 0, 0, A,Acount,Acols,out0,out0count,out0cols,row_id); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
c48a2c58afb874a057f273b2d418ccfc3bf02110.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "Matrix_getRow_FloatId_naive.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const float *A = NULL; cudaMalloc(&A, XSIZE*YSIZE); int Acount = 1; int Acols = 1; float *out0 = NULL; cudaMalloc(&out0, XSIZE*YSIZE); int out0count = 1; int out0cols = 1; float row_id = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); Matrix_getRow_FloatId_naive<<<gridBlock,threadBlock>>>(A,Acount,Acols,out0,out0count,out0cols,row_id); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { Matrix_getRow_FloatId_naive<<<gridBlock,threadBlock>>>(A,Acount,Acols,out0,out0count,out0cols,row_id); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { Matrix_getRow_FloatId_naive<<<gridBlock,threadBlock>>>(A,Acount,Acols,out0,out0count,out0cols,row_id); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
34966c1dbca28f3357213f0f6fbd3202ecd34ed3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN #include <doctest.h> #include <heteroflow/heteroflow.hpp> // ---------------------------------------------------------------------------- // Parameters // ---------------------------------------------------------------------------- const size_t C = ::min(16u, std::thread::hardware_concurrency()); const size_t G = ::min(4u, hf::cuda::num_devices()); // ---------------------------------------------------------------------------- // Kernel // ---------------------------------------------------------------------------- template <typename T> __global__ void k_set(T* ptr, size_t N, T value) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < N) { ptr[i] = value; } } template <typename T> __global__ void k_add(T* ptr, size_t N, T value) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < N) { ptr[i] += value; } } template <typename T> __global__ void k_single_add(T* ptr, size_t N, int idx, T value) { ptr[idx] += value; } // -------------------------------------------------------- // Testcase: static // -------------------------------------------------------- TEST_CASE("static" * doctest::timeout(300)) { hf::Executor executor(C, G); REQUIRE(executor.num_cpu_workers() == C); REQUIRE(executor.num_gpu_workers() == G); REQUIRE(executor.num_workers() == C + G); hf::Heteroflow hf; REQUIRE(hf.empty() == true); REQUIRE(hf.size() == 0); hf::HostTask host; hf::SpanTask span; hf::KernelTask kernel; hf::CopyTask copy; REQUIRE(host.empty() == true); REQUIRE(span.empty() == true); REQUIRE(kernel.empty() == true); REQUIRE(copy.empty() == true); auto host2 = hf.placeholder<hf::HostTask>(); auto span2 = hf.placeholder<hf::SpanTask>(); auto copy2 = hf.placeholder<hf::CopyTask>(); auto kernel2 = hf.placeholder<hf::KernelTask>(); REQUIRE(host2.empty() == false); REQUIRE(span2.empty() == false); REQUIRE(copy2.empty() == false); REQUIRE(kernel2.empty() == false); REQUIRE(hf.size() == 4); REQUIRE(hf.empty() == false); host = host2; copy = copy2; kernel = kernel2; span = span2; REQUIRE((host == host2 && host.empty() == false)); REQUIRE((copy == copy2 && copy.empty() == false)); REQUIRE((span == span2 && span.empty() == false)); REQUIRE((kernel == kernel2 && kernel.empty() == false)); REQUIRE(hf.size() == 4); REQUIRE(hf.empty() == false); hf::HostTask host3(host2); hf::SpanTask span3(span2); hf::CopyTask copy3(copy2); hf::KernelTask kernel3(kernel2); REQUIRE((host3 == host && host2 == host)); REQUIRE((span3 == span && span2 == span)); REQUIRE((copy3 == copy && copy2 == copy)); REQUIRE((kernel3 == kernel && kernel2 == kernel)); REQUIRE(hf.size() == 4); REQUIRE(hf.empty() == false); } // -------------------------------------------------------- // Testcase: host-tasks // -------------------------------------------------------- TEST_CASE("host-tasks" * doctest::timeout(300)) { const size_t num_tasks = 100; SUBCASE("Empty") { for(size_t W=1; W<=C; ++W) { hf::Executor executor(W); hf::Heteroflow heteroflow; REQUIRE(heteroflow.size() == 0); REQUIRE(heteroflow.empty() == true); executor.run(heteroflow).wait(); } } SUBCASE("Placeholder") { for(size_t W=1; W<=C; ++W) { hf::Executor executor(W); hf::Heteroflow heteroflow; std::atomic<int> counter {0}; std::vector<hf::HostTask> hosts; for(size_t i=0; i<num_tasks; ++i) { hosts.emplace_back( heteroflow.placeholder<hf::HostTask>().name(std::to_string(i)) ); } for(size_t i=0; i<num_tasks; ++i) { REQUIRE(hosts[i].name() == std::to_string(i)); REQUIRE(hosts[i].num_dependents() == 0); REQUIRE(hosts[i].num_successors() == 0); } for(auto& host : hosts) { host.host([&counter](){ counter++; }); } executor.run(heteroflow).get(); REQUIRE(counter == num_tasks); } } SUBCASE("EmbarrassinglyParallel"){ for(size_t W=1; W<=C; ++W) { hf::Executor executor(W); hf::Heteroflow heteroflow; std::atomic<int> counter {0}; std::vector<hf::HostTask> tasks; for(size_t i=0;i<num_tasks;i++) { tasks.emplace_back(heteroflow.host([&counter]() {counter += 1;})); } REQUIRE(heteroflow.size() == num_tasks); executor.run(heteroflow).get(); REQUIRE(counter == num_tasks); REQUIRE(heteroflow.size() == 100); counter = 0; for(size_t i=0;i<num_tasks;i++){ tasks.emplace_back(heteroflow.host([&counter]() {counter += 1;})); } REQUIRE(heteroflow.size() == num_tasks * 2); executor.run(heteroflow).get(); REQUIRE(counter == num_tasks * 2); REQUIRE(heteroflow.size() == 200); } } SUBCASE("ParallelFor") { for(size_t W=1; W<=C; ++W) { hf::Executor executor(W); // Range for for(size_t i=0; i<num_tasks; i++) { hf::Heteroflow heteroflow; std::atomic<int> counter{0}; auto N = ::rand() % 4098 + 1; std::vector<int> vec(N, 20); heteroflow.parallel_for(vec.begin(), vec.end(), [&](int i){ counter += i; }); executor.run(heteroflow).wait(); auto res = std::accumulate(vec.begin(), vec.end(), 0, std::plus<int>()); REQUIRE(counter == res); } // Index for for(size_t i=0; i<num_tasks; i++) { std::atomic<int> counter{0}; hf::Heteroflow heteroflow; auto N = ::rand() % 4098 + 1; auto S = ::min(::rand()%10, N) + 1; heteroflow.parallel_for(0, N, S, [&](int){ ++counter; }); executor.run(heteroflow).wait(); auto res = 0; for(auto i=0; i<N; i+=S) { ++res; } REQUIRE(counter == res); } } } SUBCASE("BinarySequence"){ for(size_t W=1; W<=C; ++W) { hf::Executor executor(W); hf::Heteroflow heteroflow; std::atomic<int> counter {0}; std::vector<hf::HostTask> tasks; for(size_t i=0;i<num_tasks;i++){ if(i%2 == 0){ tasks.emplace_back(heteroflow.host( [&counter]() { REQUIRE(counter == 0); counter += 1;} )); } else{ tasks.emplace_back(heteroflow.host( [&counter]() { REQUIRE(counter == 1); counter -= 1;} )); } if(i>0){ tasks[i-1].precede(tasks[i]); } if(i==0) { REQUIRE(tasks[i].num_dependents() == 0); } else { REQUIRE(tasks[i].num_dependents() == 1); } } executor.run(heteroflow).get(); } } SUBCASE("LinearCounter"){ for(size_t W=1; W<=C; ++W) { hf::Executor executor(W); hf::Heteroflow heteroflow; std::atomic<int> counter {0}; std::vector<hf::HostTask> tasks; for(size_t i=0;i<num_tasks;i++){ tasks.emplace_back( heteroflow.host([&counter, i]() { REQUIRE(counter == i); counter += 1;} ) ); if(i>0){ tasks[i-1].precede(tasks[i]); } } executor.run(heteroflow).get(); REQUIRE(counter == num_tasks); REQUIRE(heteroflow.size() == num_tasks); } } SUBCASE("Broadcast"){ for(size_t W=1; W<=C; ++W) { hf::Executor executor(W); hf::Heteroflow heteroflow; std::atomic<int> counter {0}; std::vector<hf::HostTask> tasks; auto src = heteroflow.host([&counter]() {counter -= 1;}); for(size_t i=1; i<num_tasks; i++){ auto tgt = heteroflow.host([&counter]() {REQUIRE(counter == -1);}); src.precede(tgt); } executor.run(heteroflow).get(); REQUIRE(counter == - 1); REQUIRE(heteroflow.size() == num_tasks); } } SUBCASE("Succeed"){ for(size_t W=1; W<=C; ++W) { hf::Executor executor(W); hf::Heteroflow heteroflow; std::atomic<int> counter {0}; std::vector<hf::HostTask> tasks; auto dst = heteroflow.host([&]() { REQUIRE(counter == num_tasks - 1);}); for(size_t i=1;i<num_tasks;i++){ auto src = heteroflow.host([&counter]() {counter += 1;}); dst.succeed(src); } executor.run(heteroflow).get(); REQUIRE(counter == num_tasks - 1); REQUIRE(heteroflow.size() == num_tasks); } } } // -------------------------------------------------------- // Testcase: span // -------------------------------------------------------- TEST_CASE("span" * doctest::timeout(300)) { const size_t num_tasks = 4096; for(size_t c=1; c<=C; ++c) { for(size_t g=1; g<=G; ++g) { hf::Executor executor(c, g); hf::Heteroflow heteroflow; for(size_t i=0; i<num_tasks; ++i) { auto bytes = ::rand()% 1024; heteroflow.span(bytes); } executor.run(heteroflow).wait(); } } } // -------------------------------------------------------- // Testcase: memset // -------------------------------------------------------- TEST_CASE("memset" * doctest::timeout(300)) { const size_t num_tasks = 100; SUBCASE("span-fill") { for(size_t c=1; c<=C; ++c) { for(size_t g=1; g<=G; ++g) { hf::Executor executor(c, g); hf::Heteroflow heteroflow; for(size_t i=0; i<num_tasks; ++i) { auto ndata= ::rand()%4096 + 1; auto ptr = new char[ndata]; auto span = heteroflow.span(ndata); auto fill = heteroflow.fill(span, ndata, 'z'); auto push = heteroflow.copy(ptr, span, ndata); auto host = heteroflow.host([=](){ for(auto j=0; j<ndata; j++) { REQUIRE(ptr[j] == 'z'); } delete [] ptr; }); fill.succeed(span).precede(push); push.precede(host); } executor.run(heteroflow).wait(); } } } SUBCASE("span-fill-offset") { for(size_t c=1; c<=C; ++c) { for(size_t g=1; g<=G; ++g) { hf::Executor executor(c, g); hf::Heteroflow heteroflow; for(size_t i=0; i<num_tasks; ++i) { auto ndata = ::rand()%4096 + 1; auto offset = ::rand()%ndata; auto ptr = new char[ndata]; auto span = heteroflow.span(ndata); auto fill1 = heteroflow.fill(span, offset, ndata-offset, 'z'); auto fill2 = heteroflow.fill(span, offset, 'a'); auto push = heteroflow.copy(ptr, span, ndata); auto host = heteroflow.host([=](){ for(auto j=0; j<offset; j++) { REQUIRE(ptr[j] == 'a'); } for(auto j=offset; j<ndata; j++) { REQUIRE(ptr[j] == 'z'); } delete [] ptr; }); fill1.succeed(span).precede(push); fill2.succeed(span).precede(push); push.precede(host); } executor.run(heteroflow).wait(); } } } SUBCASE("kernel") { for(size_t c=1; c<=C; ++c) { for(size_t g=1; g<=G; ++g) { hf::Executor executor(c, g); hf::Heteroflow heteroflow; for(size_t i=0; i<num_tasks; ++i) { auto ndata= ::rand()%4096 + 1; auto ptr = new char[ndata]; auto span = heteroflow.span(ndata); auto mset = heteroflow.kernel( (ndata+255)/256, 256, 0, k_set<char>, span, ndata, 'z' ); auto push = heteroflow.copy(ptr, span, ndata); auto host = heteroflow.host([=](){ for(auto j=0; j<ndata; j++) { REQUIRE(ptr[j] == 'z'); } delete [] ptr; }); span.precede(mset); mset.precede(push); push.precede(host); } executor.run(heteroflow).wait(); } } } SUBCASE("span-fill-kernel") { for(size_t c=1; c<=C; ++c) { for(size_t g=1; g<=G; ++g) { hf::Executor executor(c, g); hf::Heteroflow heteroflow; for(size_t i=0; i<num_tasks; ++i) { auto ndata= ::rand()%4096 + 1; auto ptr = new char[ndata]; auto span = heteroflow.span(ndata); auto fill = heteroflow.fill(span, ndata, 'a'); auto mset = heteroflow.kernel( (ndata+255)/256, 256, 0, k_add<char>, span, ndata, 1 ); auto push = heteroflow.copy(ptr, span, ndata); auto host = heteroflow.host([=](){ for(auto j=0; j<ndata; j++) { REQUIRE(ptr[j] == 'b'); } delete [] ptr; }); span.precede(fill); fill.precede(mset); mset.precede(push); push.precede(host); } executor.run(heteroflow).wait(); } } } SUBCASE("pull-kernel-push") { for(size_t c=1; c<=C; ++c) { for(size_t g=1; g<=G; ++g) { hf::Executor executor(c, g); hf::Heteroflow heteroflow; for(size_t i=0; i<num_tasks; ++i) { auto ndata= ::rand()%4096 + 1; auto ofset= ::rand()%ndata; auto ptr = new char[ndata]; std::fill_n(ptr, ndata, 'z'); auto span = heteroflow.span(ndata); auto fill = heteroflow.fill(span, ndata, 'a'); auto mset = heteroflow.kernel( (ndata+255)/256, 256, 0, k_add<char>, span, ndata, 1 ); auto push = heteroflow.copy(ptr, span, ofset, ndata-ofset); auto host = heteroflow.host([=](){ for(auto j=0; j<ndata-ofset; j++) { REQUIRE(ptr[j] == 'b'); } for(auto j=ndata-ofset; j<ndata; j++) { REQUIRE(ptr[j] == 'z'); } delete [] ptr; }); span.precede(fill); fill.precede(mset); mset.precede(push); push.precede(host); } executor.run(heteroflow).wait(); } } } SUBCASE("from-host") { for(size_t c=1; c<=C; ++c) { for(size_t g=1; g<=G; ++g) { hf::Executor executor(c, g); hf::Heteroflow heteroflow; for(size_t i=0; i<num_tasks; ++i) { auto ndata= ::rand()%4096 + 1; auto ptr = new char[ndata]; std::fill_n(ptr, ndata, 'a'); auto span = heteroflow.span(ptr, ndata); auto madd = heteroflow.kernel( (ndata+255)/256, 256, 0, k_add<char>, span, ndata, 1 ); auto push = heteroflow.copy(ptr, span, ndata); auto host = heteroflow.host([=](){ for(auto j=0; j<ndata; j++) { REQUIRE(ptr[j] == 'b'); } delete [] ptr; }); span.precede(madd); madd.precede(push); push.precede(host); } executor.run(heteroflow).wait(); } } } } // -------------------------------------------------------- // Testcase: h2d // -------------------------------------------------------- TEST_CASE("h2d" * doctest::timeout(300)) { const size_t N = 1000; const size_t S = 64; std::vector<std::vector<char>> res(S); for(auto& v : res) { v.resize(N); } std::vector<char> vec(N); for(size_t i=0; i<N; ++i) { vec[i] = ::rand()%40; } for(size_t c=1; c<=C; ++c) { for(size_t g=1; g<=G; ++g) { hf::Executor executor(c, g); hf::Heteroflow hf; for(size_t s=0; s<S; ++s) { std::fill_n(res[s].begin(), N, 'z'); auto span = hf.span(vec.size()); auto back = hf.copy(res[s].data(), span, N); for(size_t i=0; i<vec.size(); ++i) { auto copy = hf.copy(span, i, &(vec[i]), 1); copy.succeed(span) .precede(back); } } executor.run(hf).wait(); for(size_t s=0; s<S; ++s) { REQUIRE(vec == res[s]); } } } } // -------------------------------------------------------- // Testcase: d2h // -------------------------------------------------------- TEST_CASE("d2h" * doctest::timeout(300)) { const size_t N = 1000; const size_t S = 64; std::vector<std::vector<char>> res(S); for(auto& v : res) { v.resize(N); } std::vector<char> vec(N); for(size_t i=0; i<N; ++i) { vec[i] = ::rand()%40; } for(size_t c=1; c<=C; ++c) { for(size_t g=1; g<=G; ++g) { hf::Executor executor(c, g); hf::Heteroflow hf; for(size_t s=0; s<S; ++s) { std::fill_n(res[s].begin(), N, 'z'); auto span = hf.span(vec.data(), N); for(size_t i=0; i<N; ++i) { hf.copy(&(res[s][i]), span, i, 1) .succeed(span); } } executor.run(hf).wait(); for(size_t s=0; s<S; ++s) { REQUIRE(vec == res[s]); } } } } // -------------------------------------------------------- // Testcase: h2d2h // -------------------------------------------------------- TEST_CASE("h2d2h" * doctest::timeout(300)) { const size_t N = 1000; const size_t S = 64; std::vector<std::vector<char>> res(S); for(auto& v : res) { v.resize(N); } std::vector<char> vec(N); for(size_t i=0; i<N; ++i) { vec[i] = ::rand()%40; } for(size_t c=1; c<=C; ++c) { for(size_t g=1; g<=G; ++g) { hf::Executor executor(c, g); hf::Heteroflow hf; for(size_t s=0; s<S; ++s) { std::fill_n(res[s].begin(), N, 'z'); auto span = hf.span(vec.size()); for(size_t i=0; i<vec.size(); ++i) { auto h2d = hf.copy(span, i, &(vec[i]), 1); auto d2h = hf.copy(&(res[s][i]), span, i, 1); h2d.precede(d2h).succeed(span); } } executor.run(hf).wait(); for(size_t s=0; s<S; ++s) { REQUIRE(vec == res[s]); } } } } // -------------------------------------------------------- // Testcase: d2d // -------------------------------------------------------- TEST_CASE("d2d" * doctest::timeout(300)) { SUBCASE("without-offset") { for(size_t c=1; c<=C; ++c) { for(size_t g=1; g<=G; ++g) { hf::Executor executor(c, g); hf::Heteroflow heteroflow; for(size_t i=0; i<100; ++i) { auto ndata = ::rand()%4096 + 1; auto data = new char[ndata]; auto span1 = heteroflow.span(ndata); auto span2 = heteroflow.span(ndata); auto fill1 = heteroflow.fill(span1, ndata, 'a'); auto fill2 = heteroflow.fill(span2, ndata, 'b'); auto kadd1 = heteroflow.kernel( (ndata + 255)/256, 256, 0, k_add<char>, span1, ndata, 1 ); auto kadd2 = heteroflow.kernel( (ndata + 255)/256, 256, 0, k_add<char>, span2, ndata, 1 ); auto trans = heteroflow.copy( span1, span2, ndata ); auto push1 = heteroflow.copy(data, span1, ndata); auto test1 = heteroflow.host([data, ndata](){ for(int i=0; i<ndata; ++i) { REQUIRE(data[i] == 'c'); } delete [] data; }); span1.precede(fill1); span2.precede(fill2); fill1.precede(kadd1); fill2.precede(kadd2); trans.succeed(kadd1, kadd2) .precede(push1); push1.precede(test1); } executor.run(heteroflow).wait(); } } } SUBCASE("with-offset") { for(size_t c=1; c<=C; ++c) { for(size_t g=1; g<=G; ++g) { hf::Executor executor(c, g); hf::Heteroflow heteroflow; for(size_t i=0; i<1024; ++i) { auto ndata = ::rand()%4096 + 1; auto offs1 = ::rand()%ndata; auto offs2 = ::rand()%ndata; auto togo = ::min(ndata-offs1, ndata-offs2); auto data = new char[ndata]; auto span1 = heteroflow.span(ndata); auto span2 = heteroflow.span(ndata); auto fill1 = heteroflow.fill(span1, ndata, 'a'); auto fill2 = heteroflow.fill(span2, ndata, 'b'); auto kadd1 = heteroflow.kernel( (ndata + 255)/256, 256, 0, k_add<char>, span1, ndata, 1 ); auto kadd2 = heteroflow.kernel( (ndata + 255)/256, 256, 0, k_add<char>, span2, ndata, 1 ); auto trans = heteroflow.copy( span1, offs1, span2, offs2, togo ); auto push1 = heteroflow.copy(data, span1, ndata); auto test1 = heteroflow.host([=](){ for(int i=0; i<offs1; ++i) { REQUIRE(data[i] == 'b'); } for(int i=offs1; i<offs1+togo; ++i) { REQUIRE(data[i] == 'c'); } for(int i=offs1+togo; i<ndata; ++i) { REQUIRE(data[i] == 'b'); } delete [] data; }); span1.precede(fill1); span2.precede(fill2); fill1.precede(kadd1); fill2.precede(kadd2); trans.succeed(kadd1, kadd2) .precede(push1); push1.precede(test1); } executor.run(heteroflow).wait(); } } } } // -------------------------------------------------------- // Testcase: h2d2d2h // -------------------------------------------------------- TEST_CASE("h2d2d2h" * doctest::timeout(300)) { const size_t N = 1000; const size_t S = 64; std::vector<std::vector<char>> res(S); for(auto& v : res) { v.resize(N); } std::vector<char> vec(N); for(size_t i=0; i<N; ++i) { vec[i] = ::rand()%40; } for(size_t c=1; c<=C; ++c) { for(size_t g=1; g<=G; ++g) { hf::Executor executor(c, g); hf::Heteroflow hf; for(size_t s=0; s<S; ++s) { std::fill_n(res[s].begin(), N, 'z'); auto span1 = hf.span(vec.size()); auto span2 = hf.span(vec.size()); for(size_t i=0; i<vec.size(); ++i) { auto h2d = hf.copy(span1, i, &(vec[i]), 1); auto d2d = hf.copy(span2, i, span1, i, 1); auto d2h = hf.copy(&(res[s][i]), span2, i, 1); span1.precede(h2d); span2.precede(d2d); h2d.precede(d2d); d2d.precede(d2h); } } executor.run(hf).wait(); for(size_t s=0; s<S; ++s) { REQUIRE(vec == res[s]); } } } } // -------------------------------------------------------- // Testcase: dependent-copies // -------------------------------------------------------- TEST_CASE("dependent-copies" * doctest::timeout(300)) { using namespace std::literals::string_literals; const size_t N = 1<<10; const size_t S = 32; std::vector<std::vector<char>> in(S); std::vector<std::vector<char>> out(S); std::vector<hf::CopyTask> h2d(N); std::vector<hf::CopyTask> d2d(N); std::vector<hf::CopyTask> d2h(N); // randomize the in/out data for(size_t s=0; s<S; ++s) { in[s].resize(N); out[s].resize(N); for(size_t i=0; i<N; ++i) { in[s][i] = ::rand()%26 + 'a'; out[s][i] = ::rand()%26 + 'a'; } } for(size_t c=1; c<=C; ++c) { for(size_t g=1; g<=G; ++g) { hf::Executor executor(c, g); hf::Heteroflow hf; for(size_t s=0; s<S; ++s) { auto span1 = hf.span(N).name("span1"); auto span2 = hf.span(N).name("span2"); // inter-tree dependency for(size_t i=1; i<N; i++) { h2d[i] = hf.copy(span1, i, &(in[s][i]), 1) .name("h2d["s + std::to_string(i) + ']'); d2d[i] = hf.copy(span2, i, span1, i, 1) .name("d2d["s + std::to_string(i) + ']'); d2h[i] = hf.copy(&(out[s][i]), span2, i, 1) .name("d2h["s + std::to_string(i) + ']'); h2d[i].precede(d2d[i]); d2d[i].precede(d2h[i]); } // tree dependency span1.precede(h2d[1]); span2.precede(h2d[1]); for(size_t i=1; i<N; ++i) { size_t l = i*2; size_t r = i*2 + 1; if(l < N) { h2d[i].precede(h2d[l]); d2d[i].precede(d2d[l]); d2h[i].precede(d2h[l]); } if(r < N) { h2d[i].precede(h2d[r]); d2d[i].precede(d2d[r]); d2h[i].precede(d2h[r]); } } } executor.run(hf).wait(); for(size_t s=0; s<S; ++s) { for(size_t i=1; i<N; ++i) { REQUIRE(in[s][i] == out[s][i]); } } } } } // -------------------------------------------------------- // Testcase: chained-kernels // -------------------------------------------------------- TEST_CASE("chained-kernels" * doctest::timeout(300)) { const size_t N = 1000; const size_t S = 64; const size_t L = 1000; std::vector<int> vec(N, 0); std::vector<std::vector<int>> res(S); for(auto& v : res) { v.resize(N); } for(size_t c=1; c<=C; ++c) { for(size_t g=1; g<=G; ++g) { hf::Executor executor(c, g); hf::Heteroflow hf; for(size_t s=0; s<S; ++s) { auto span = hf.span(vec.data(), N*sizeof(int)); auto copy = hf.copy(res[s].data(), span, N*sizeof(int)).name("copy"); hf::KernelTask prev, curr; for(size_t x=0; x<L; ++x) { curr = hf.kernel((N+16-1)/16, 16, 0, k_add<int>, span, N, 1) .name(std::to_string(x) + "-kernel"); if(x==0) { span.precede(curr); } else { prev.precede(curr); } prev = curr; } curr.precede(copy); auto test = hf.host([&vec=res[s]](){ for(auto item : vec) { REQUIRE(item == L); } }).name("test"); copy.precede(test); } executor.run(hf).wait(); } } } // -------------------------------------------------------- // Testcase: dependent-kernels // -------------------------------------------------------- TEST_CASE("dependent-kernels" * doctest::timeout(300)) { using namespace std::literals::string_literals; const size_t N = 1<<2; const size_t S = 1; std::vector<std::vector<char>> in(S); std::vector<std::vector<char>> out(S); std::vector<hf::CopyTask> h2d(N); std::vector<hf::CopyTask> d2d(N); std::vector<hf::CopyTask> d2h(N); // randomize the in/out data for(size_t s=0; s<S; ++s) { in[s].resize(N); out[s].resize(N); for(size_t i=0; i<N; ++i) { in[s][i] = ::rand()%26 + 'a'; out[s][i] = ::rand()%26 + 'a'; } } for(size_t c=1; c<=C; ++c) { for(size_t g=1; g<=G; ++g) { hf::Executor executor(c, g); hf::Heteroflow hf; for(size_t s=0; s<S; ++s) { auto span1 = hf.span(N).name("span1"); auto span2 = hf.span(N).name("span2"); // inter-tree dependency for(size_t i=1; i<N; i++) { h2d[i] = hf.copy(span1, i, &(in[s][i]), 1) .name("h2d["s + std::to_string(i) + ']'); d2d[i] = hf.copy(span2, i, span1, i, 1) .name("d2d["s + std::to_string(i) + ']'); d2h[i] = hf.copy(&(out[s][i]), span2, i, 1) .name("d2h["s + std::to_string(i) + ']'); auto k1 = hf.kernel(1, 1, 0, k_single_add<char>, span1, N, i, 1 ).name("k1["s + std::to_string(i) + ']'); auto k2 = hf.kernel(1, 1, 0, k_single_add<char>, span2, N, i, 1 ).name("k2["s + std::to_string(i) + ']'); h2d[i].precede(k1); k1.precede(d2d[i]); d2d[i].precede(k2); k2.precede(d2h[i]); } // tree dependency span1.precede(h2d[1]); span2.precede(h2d[1]); for(size_t i=1; i<N; ++i) { size_t l = i*2; size_t r = i*2 + 1; if(l < N) { h2d[i].precede(h2d[l]); d2d[i].precede(d2d[l]); d2h[i].precede(d2h[l]); } if(r < N) { h2d[i].precede(h2d[r]); d2d[i].precede(d2d[r]); d2h[i].precede(d2h[r]); } } } executor.run(hf).wait(); for(size_t s=0; s<S; ++s) { for(size_t i=1; i<N; ++i) { REQUIRE(in[s][i] + 2 == out[s][i]); } } } } } // -------------------------------------------------------- // Testcase: state-transition // -------------------------------------------------------- TEST_CASE("statefulness" * doctest::timeout(300)) { SUBCASE("linear-chain") { for(size_t c=1; c<=C; ++c) { for(size_t g=1; g<=G; ++g) { hf::Executor executor(c, g); hf::Heteroflow heteroflow; std::vector<char> vec; size_t size = 0; char* data = nullptr; dim3 grid, block; auto host = heteroflow.host([&](){ size = 1234567; vec.resize(size, 'a'); data = vec.data(); grid = (size+255)/256; block = 256; }); auto span = heteroflow.span(std::ref(data), std::ref(size)); auto kadd = heteroflow.kernel( std::ref(grid), std::ref(block), 0, k_add<char>, span, std::ref(size), 1 ); auto push = heteroflow.copy(std::ref(data), span, std::ref(size)); auto test = heteroflow.host([&](){ REQUIRE(size == vec.size()); REQUIRE(data == vec.data()); REQUIRE(grid.x == (size+255)/256); REQUIRE(block.x == 256); for(auto i : vec) { REQUIRE(i == 'b'); } }); host.precede(span); span.precede(kadd); kadd.precede(push); push.precede(test); executor.run(heteroflow).wait(); } } } } // -------------------------------------------------------- // Testcase: run_n // -------------------------------------------------------- TEST_CASE("run_n" * doctest::timeout(300)) { for(size_t c=1; c<=C; ++c) { for(size_t g=1; g<=G; ++g) { std::atomic<size_t> counter{0}; hf::Executor executor(c, g); hf::Heteroflow heteroflow; const size_t ndata = 5000; for(size_t n=0; n<2*G; ++n) { std::vector<char> vec(ndata); auto data = vec.data(); auto host = heteroflow.host([vec=std::move(vec)]() mutable { for(auto& c : vec) c = 0; }); auto span = heteroflow.span(data, ndata); auto kadd = heteroflow.kernel( (ndata + 255)/256, 256, 0, k_add<char>, span, ndata, 1 ); auto push = heteroflow.copy(data, span, ndata); auto combine = heteroflow.host([&counter, data, ndata] () { for(size_t i=0; i<ndata; ++i) { counter += data[i]; } }); host.precede(span); span.precede(kadd); kadd.precede(push); push.precede(combine); } auto res = 0; for(size_t s=0; s<25; ++s){ auto r = ::rand() % 5; res += r; executor.run_n(heteroflow, r).wait(); REQUIRE(counter == res*ndata*2*G); } } } }
34966c1dbca28f3357213f0f6fbd3202ecd34ed3.cu
#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN #include <doctest.h> #include <heteroflow/heteroflow.hpp> // ---------------------------------------------------------------------------- // Parameters // ---------------------------------------------------------------------------- const size_t C = std::min(16u, std::thread::hardware_concurrency()); const size_t G = std::min(4u, hf::cuda::num_devices()); // ---------------------------------------------------------------------------- // Kernel // ---------------------------------------------------------------------------- template <typename T> __global__ void k_set(T* ptr, size_t N, T value) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < N) { ptr[i] = value; } } template <typename T> __global__ void k_add(T* ptr, size_t N, T value) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < N) { ptr[i] += value; } } template <typename T> __global__ void k_single_add(T* ptr, size_t N, int idx, T value) { ptr[idx] += value; } // -------------------------------------------------------- // Testcase: static // -------------------------------------------------------- TEST_CASE("static" * doctest::timeout(300)) { hf::Executor executor(C, G); REQUIRE(executor.num_cpu_workers() == C); REQUIRE(executor.num_gpu_workers() == G); REQUIRE(executor.num_workers() == C + G); hf::Heteroflow hf; REQUIRE(hf.empty() == true); REQUIRE(hf.size() == 0); hf::HostTask host; hf::SpanTask span; hf::KernelTask kernel; hf::CopyTask copy; REQUIRE(host.empty() == true); REQUIRE(span.empty() == true); REQUIRE(kernel.empty() == true); REQUIRE(copy.empty() == true); auto host2 = hf.placeholder<hf::HostTask>(); auto span2 = hf.placeholder<hf::SpanTask>(); auto copy2 = hf.placeholder<hf::CopyTask>(); auto kernel2 = hf.placeholder<hf::KernelTask>(); REQUIRE(host2.empty() == false); REQUIRE(span2.empty() == false); REQUIRE(copy2.empty() == false); REQUIRE(kernel2.empty() == false); REQUIRE(hf.size() == 4); REQUIRE(hf.empty() == false); host = host2; copy = copy2; kernel = kernel2; span = span2; REQUIRE((host == host2 && host.empty() == false)); REQUIRE((copy == copy2 && copy.empty() == false)); REQUIRE((span == span2 && span.empty() == false)); REQUIRE((kernel == kernel2 && kernel.empty() == false)); REQUIRE(hf.size() == 4); REQUIRE(hf.empty() == false); hf::HostTask host3(host2); hf::SpanTask span3(span2); hf::CopyTask copy3(copy2); hf::KernelTask kernel3(kernel2); REQUIRE((host3 == host && host2 == host)); REQUIRE((span3 == span && span2 == span)); REQUIRE((copy3 == copy && copy2 == copy)); REQUIRE((kernel3 == kernel && kernel2 == kernel)); REQUIRE(hf.size() == 4); REQUIRE(hf.empty() == false); } // -------------------------------------------------------- // Testcase: host-tasks // -------------------------------------------------------- TEST_CASE("host-tasks" * doctest::timeout(300)) { const size_t num_tasks = 100; SUBCASE("Empty") { for(size_t W=1; W<=C; ++W) { hf::Executor executor(W); hf::Heteroflow heteroflow; REQUIRE(heteroflow.size() == 0); REQUIRE(heteroflow.empty() == true); executor.run(heteroflow).wait(); } } SUBCASE("Placeholder") { for(size_t W=1; W<=C; ++W) { hf::Executor executor(W); hf::Heteroflow heteroflow; std::atomic<int> counter {0}; std::vector<hf::HostTask> hosts; for(size_t i=0; i<num_tasks; ++i) { hosts.emplace_back( heteroflow.placeholder<hf::HostTask>().name(std::to_string(i)) ); } for(size_t i=0; i<num_tasks; ++i) { REQUIRE(hosts[i].name() == std::to_string(i)); REQUIRE(hosts[i].num_dependents() == 0); REQUIRE(hosts[i].num_successors() == 0); } for(auto& host : hosts) { host.host([&counter](){ counter++; }); } executor.run(heteroflow).get(); REQUIRE(counter == num_tasks); } } SUBCASE("EmbarrassinglyParallel"){ for(size_t W=1; W<=C; ++W) { hf::Executor executor(W); hf::Heteroflow heteroflow; std::atomic<int> counter {0}; std::vector<hf::HostTask> tasks; for(size_t i=0;i<num_tasks;i++) { tasks.emplace_back(heteroflow.host([&counter]() {counter += 1;})); } REQUIRE(heteroflow.size() == num_tasks); executor.run(heteroflow).get(); REQUIRE(counter == num_tasks); REQUIRE(heteroflow.size() == 100); counter = 0; for(size_t i=0;i<num_tasks;i++){ tasks.emplace_back(heteroflow.host([&counter]() {counter += 1;})); } REQUIRE(heteroflow.size() == num_tasks * 2); executor.run(heteroflow).get(); REQUIRE(counter == num_tasks * 2); REQUIRE(heteroflow.size() == 200); } } SUBCASE("ParallelFor") { for(size_t W=1; W<=C; ++W) { hf::Executor executor(W); // Range for for(size_t i=0; i<num_tasks; i++) { hf::Heteroflow heteroflow; std::atomic<int> counter{0}; auto N = ::rand() % 4098 + 1; std::vector<int> vec(N, 20); heteroflow.parallel_for(vec.begin(), vec.end(), [&](int i){ counter += i; }); executor.run(heteroflow).wait(); auto res = std::accumulate(vec.begin(), vec.end(), 0, std::plus<int>()); REQUIRE(counter == res); } // Index for for(size_t i=0; i<num_tasks; i++) { std::atomic<int> counter{0}; hf::Heteroflow heteroflow; auto N = ::rand() % 4098 + 1; auto S = std::min(::rand()%10, N) + 1; heteroflow.parallel_for(0, N, S, [&](int){ ++counter; }); executor.run(heteroflow).wait(); auto res = 0; for(auto i=0; i<N; i+=S) { ++res; } REQUIRE(counter == res); } } } SUBCASE("BinarySequence"){ for(size_t W=1; W<=C; ++W) { hf::Executor executor(W); hf::Heteroflow heteroflow; std::atomic<int> counter {0}; std::vector<hf::HostTask> tasks; for(size_t i=0;i<num_tasks;i++){ if(i%2 == 0){ tasks.emplace_back(heteroflow.host( [&counter]() { REQUIRE(counter == 0); counter += 1;} )); } else{ tasks.emplace_back(heteroflow.host( [&counter]() { REQUIRE(counter == 1); counter -= 1;} )); } if(i>0){ tasks[i-1].precede(tasks[i]); } if(i==0) { REQUIRE(tasks[i].num_dependents() == 0); } else { REQUIRE(tasks[i].num_dependents() == 1); } } executor.run(heteroflow).get(); } } SUBCASE("LinearCounter"){ for(size_t W=1; W<=C; ++W) { hf::Executor executor(W); hf::Heteroflow heteroflow; std::atomic<int> counter {0}; std::vector<hf::HostTask> tasks; for(size_t i=0;i<num_tasks;i++){ tasks.emplace_back( heteroflow.host([&counter, i]() { REQUIRE(counter == i); counter += 1;} ) ); if(i>0){ tasks[i-1].precede(tasks[i]); } } executor.run(heteroflow).get(); REQUIRE(counter == num_tasks); REQUIRE(heteroflow.size() == num_tasks); } } SUBCASE("Broadcast"){ for(size_t W=1; W<=C; ++W) { hf::Executor executor(W); hf::Heteroflow heteroflow; std::atomic<int> counter {0}; std::vector<hf::HostTask> tasks; auto src = heteroflow.host([&counter]() {counter -= 1;}); for(size_t i=1; i<num_tasks; i++){ auto tgt = heteroflow.host([&counter]() {REQUIRE(counter == -1);}); src.precede(tgt); } executor.run(heteroflow).get(); REQUIRE(counter == - 1); REQUIRE(heteroflow.size() == num_tasks); } } SUBCASE("Succeed"){ for(size_t W=1; W<=C; ++W) { hf::Executor executor(W); hf::Heteroflow heteroflow; std::atomic<int> counter {0}; std::vector<hf::HostTask> tasks; auto dst = heteroflow.host([&]() { REQUIRE(counter == num_tasks - 1);}); for(size_t i=1;i<num_tasks;i++){ auto src = heteroflow.host([&counter]() {counter += 1;}); dst.succeed(src); } executor.run(heteroflow).get(); REQUIRE(counter == num_tasks - 1); REQUIRE(heteroflow.size() == num_tasks); } } } // -------------------------------------------------------- // Testcase: span // -------------------------------------------------------- TEST_CASE("span" * doctest::timeout(300)) { const size_t num_tasks = 4096; for(size_t c=1; c<=C; ++c) { for(size_t g=1; g<=G; ++g) { hf::Executor executor(c, g); hf::Heteroflow heteroflow; for(size_t i=0; i<num_tasks; ++i) { auto bytes = ::rand()% 1024; heteroflow.span(bytes); } executor.run(heteroflow).wait(); } } } // -------------------------------------------------------- // Testcase: memset // -------------------------------------------------------- TEST_CASE("memset" * doctest::timeout(300)) { const size_t num_tasks = 100; SUBCASE("span-fill") { for(size_t c=1; c<=C; ++c) { for(size_t g=1; g<=G; ++g) { hf::Executor executor(c, g); hf::Heteroflow heteroflow; for(size_t i=0; i<num_tasks; ++i) { auto ndata= ::rand()%4096 + 1; auto ptr = new char[ndata]; auto span = heteroflow.span(ndata); auto fill = heteroflow.fill(span, ndata, 'z'); auto push = heteroflow.copy(ptr, span, ndata); auto host = heteroflow.host([=](){ for(auto j=0; j<ndata; j++) { REQUIRE(ptr[j] == 'z'); } delete [] ptr; }); fill.succeed(span).precede(push); push.precede(host); } executor.run(heteroflow).wait(); } } } SUBCASE("span-fill-offset") { for(size_t c=1; c<=C; ++c) { for(size_t g=1; g<=G; ++g) { hf::Executor executor(c, g); hf::Heteroflow heteroflow; for(size_t i=0; i<num_tasks; ++i) { auto ndata = ::rand()%4096 + 1; auto offset = ::rand()%ndata; auto ptr = new char[ndata]; auto span = heteroflow.span(ndata); auto fill1 = heteroflow.fill(span, offset, ndata-offset, 'z'); auto fill2 = heteroflow.fill(span, offset, 'a'); auto push = heteroflow.copy(ptr, span, ndata); auto host = heteroflow.host([=](){ for(auto j=0; j<offset; j++) { REQUIRE(ptr[j] == 'a'); } for(auto j=offset; j<ndata; j++) { REQUIRE(ptr[j] == 'z'); } delete [] ptr; }); fill1.succeed(span).precede(push); fill2.succeed(span).precede(push); push.precede(host); } executor.run(heteroflow).wait(); } } } SUBCASE("kernel") { for(size_t c=1; c<=C; ++c) { for(size_t g=1; g<=G; ++g) { hf::Executor executor(c, g); hf::Heteroflow heteroflow; for(size_t i=0; i<num_tasks; ++i) { auto ndata= ::rand()%4096 + 1; auto ptr = new char[ndata]; auto span = heteroflow.span(ndata); auto mset = heteroflow.kernel( (ndata+255)/256, 256, 0, k_set<char>, span, ndata, 'z' ); auto push = heteroflow.copy(ptr, span, ndata); auto host = heteroflow.host([=](){ for(auto j=0; j<ndata; j++) { REQUIRE(ptr[j] == 'z'); } delete [] ptr; }); span.precede(mset); mset.precede(push); push.precede(host); } executor.run(heteroflow).wait(); } } } SUBCASE("span-fill-kernel") { for(size_t c=1; c<=C; ++c) { for(size_t g=1; g<=G; ++g) { hf::Executor executor(c, g); hf::Heteroflow heteroflow; for(size_t i=0; i<num_tasks; ++i) { auto ndata= ::rand()%4096 + 1; auto ptr = new char[ndata]; auto span = heteroflow.span(ndata); auto fill = heteroflow.fill(span, ndata, 'a'); auto mset = heteroflow.kernel( (ndata+255)/256, 256, 0, k_add<char>, span, ndata, 1 ); auto push = heteroflow.copy(ptr, span, ndata); auto host = heteroflow.host([=](){ for(auto j=0; j<ndata; j++) { REQUIRE(ptr[j] == 'b'); } delete [] ptr; }); span.precede(fill); fill.precede(mset); mset.precede(push); push.precede(host); } executor.run(heteroflow).wait(); } } } SUBCASE("pull-kernel-push") { for(size_t c=1; c<=C; ++c) { for(size_t g=1; g<=G; ++g) { hf::Executor executor(c, g); hf::Heteroflow heteroflow; for(size_t i=0; i<num_tasks; ++i) { auto ndata= ::rand()%4096 + 1; auto ofset= ::rand()%ndata; auto ptr = new char[ndata]; std::fill_n(ptr, ndata, 'z'); auto span = heteroflow.span(ndata); auto fill = heteroflow.fill(span, ndata, 'a'); auto mset = heteroflow.kernel( (ndata+255)/256, 256, 0, k_add<char>, span, ndata, 1 ); auto push = heteroflow.copy(ptr, span, ofset, ndata-ofset); auto host = heteroflow.host([=](){ for(auto j=0; j<ndata-ofset; j++) { REQUIRE(ptr[j] == 'b'); } for(auto j=ndata-ofset; j<ndata; j++) { REQUIRE(ptr[j] == 'z'); } delete [] ptr; }); span.precede(fill); fill.precede(mset); mset.precede(push); push.precede(host); } executor.run(heteroflow).wait(); } } } SUBCASE("from-host") { for(size_t c=1; c<=C; ++c) { for(size_t g=1; g<=G; ++g) { hf::Executor executor(c, g); hf::Heteroflow heteroflow; for(size_t i=0; i<num_tasks; ++i) { auto ndata= ::rand()%4096 + 1; auto ptr = new char[ndata]; std::fill_n(ptr, ndata, 'a'); auto span = heteroflow.span(ptr, ndata); auto madd = heteroflow.kernel( (ndata+255)/256, 256, 0, k_add<char>, span, ndata, 1 ); auto push = heteroflow.copy(ptr, span, ndata); auto host = heteroflow.host([=](){ for(auto j=0; j<ndata; j++) { REQUIRE(ptr[j] == 'b'); } delete [] ptr; }); span.precede(madd); madd.precede(push); push.precede(host); } executor.run(heteroflow).wait(); } } } } // -------------------------------------------------------- // Testcase: h2d // -------------------------------------------------------- TEST_CASE("h2d" * doctest::timeout(300)) { const size_t N = 1000; const size_t S = 64; std::vector<std::vector<char>> res(S); for(auto& v : res) { v.resize(N); } std::vector<char> vec(N); for(size_t i=0; i<N; ++i) { vec[i] = ::rand()%40; } for(size_t c=1; c<=C; ++c) { for(size_t g=1; g<=G; ++g) { hf::Executor executor(c, g); hf::Heteroflow hf; for(size_t s=0; s<S; ++s) { std::fill_n(res[s].begin(), N, 'z'); auto span = hf.span(vec.size()); auto back = hf.copy(res[s].data(), span, N); for(size_t i=0; i<vec.size(); ++i) { auto copy = hf.copy(span, i, &(vec[i]), 1); copy.succeed(span) .precede(back); } } executor.run(hf).wait(); for(size_t s=0; s<S; ++s) { REQUIRE(vec == res[s]); } } } } // -------------------------------------------------------- // Testcase: d2h // -------------------------------------------------------- TEST_CASE("d2h" * doctest::timeout(300)) { const size_t N = 1000; const size_t S = 64; std::vector<std::vector<char>> res(S); for(auto& v : res) { v.resize(N); } std::vector<char> vec(N); for(size_t i=0; i<N; ++i) { vec[i] = ::rand()%40; } for(size_t c=1; c<=C; ++c) { for(size_t g=1; g<=G; ++g) { hf::Executor executor(c, g); hf::Heteroflow hf; for(size_t s=0; s<S; ++s) { std::fill_n(res[s].begin(), N, 'z'); auto span = hf.span(vec.data(), N); for(size_t i=0; i<N; ++i) { hf.copy(&(res[s][i]), span, i, 1) .succeed(span); } } executor.run(hf).wait(); for(size_t s=0; s<S; ++s) { REQUIRE(vec == res[s]); } } } } // -------------------------------------------------------- // Testcase: h2d2h // -------------------------------------------------------- TEST_CASE("h2d2h" * doctest::timeout(300)) { const size_t N = 1000; const size_t S = 64; std::vector<std::vector<char>> res(S); for(auto& v : res) { v.resize(N); } std::vector<char> vec(N); for(size_t i=0; i<N; ++i) { vec[i] = ::rand()%40; } for(size_t c=1; c<=C; ++c) { for(size_t g=1; g<=G; ++g) { hf::Executor executor(c, g); hf::Heteroflow hf; for(size_t s=0; s<S; ++s) { std::fill_n(res[s].begin(), N, 'z'); auto span = hf.span(vec.size()); for(size_t i=0; i<vec.size(); ++i) { auto h2d = hf.copy(span, i, &(vec[i]), 1); auto d2h = hf.copy(&(res[s][i]), span, i, 1); h2d.precede(d2h).succeed(span); } } executor.run(hf).wait(); for(size_t s=0; s<S; ++s) { REQUIRE(vec == res[s]); } } } } // -------------------------------------------------------- // Testcase: d2d // -------------------------------------------------------- TEST_CASE("d2d" * doctest::timeout(300)) { SUBCASE("without-offset") { for(size_t c=1; c<=C; ++c) { for(size_t g=1; g<=G; ++g) { hf::Executor executor(c, g); hf::Heteroflow heteroflow; for(size_t i=0; i<100; ++i) { auto ndata = ::rand()%4096 + 1; auto data = new char[ndata]; auto span1 = heteroflow.span(ndata); auto span2 = heteroflow.span(ndata); auto fill1 = heteroflow.fill(span1, ndata, 'a'); auto fill2 = heteroflow.fill(span2, ndata, 'b'); auto kadd1 = heteroflow.kernel( (ndata + 255)/256, 256, 0, k_add<char>, span1, ndata, 1 ); auto kadd2 = heteroflow.kernel( (ndata + 255)/256, 256, 0, k_add<char>, span2, ndata, 1 ); auto trans = heteroflow.copy( span1, span2, ndata ); auto push1 = heteroflow.copy(data, span1, ndata); auto test1 = heteroflow.host([data, ndata](){ for(int i=0; i<ndata; ++i) { REQUIRE(data[i] == 'c'); } delete [] data; }); span1.precede(fill1); span2.precede(fill2); fill1.precede(kadd1); fill2.precede(kadd2); trans.succeed(kadd1, kadd2) .precede(push1); push1.precede(test1); } executor.run(heteroflow).wait(); } } } SUBCASE("with-offset") { for(size_t c=1; c<=C; ++c) { for(size_t g=1; g<=G; ++g) { hf::Executor executor(c, g); hf::Heteroflow heteroflow; for(size_t i=0; i<1024; ++i) { auto ndata = ::rand()%4096 + 1; auto offs1 = ::rand()%ndata; auto offs2 = ::rand()%ndata; auto togo = std::min(ndata-offs1, ndata-offs2); auto data = new char[ndata]; auto span1 = heteroflow.span(ndata); auto span2 = heteroflow.span(ndata); auto fill1 = heteroflow.fill(span1, ndata, 'a'); auto fill2 = heteroflow.fill(span2, ndata, 'b'); auto kadd1 = heteroflow.kernel( (ndata + 255)/256, 256, 0, k_add<char>, span1, ndata, 1 ); auto kadd2 = heteroflow.kernel( (ndata + 255)/256, 256, 0, k_add<char>, span2, ndata, 1 ); auto trans = heteroflow.copy( span1, offs1, span2, offs2, togo ); auto push1 = heteroflow.copy(data, span1, ndata); auto test1 = heteroflow.host([=](){ for(int i=0; i<offs1; ++i) { REQUIRE(data[i] == 'b'); } for(int i=offs1; i<offs1+togo; ++i) { REQUIRE(data[i] == 'c'); } for(int i=offs1+togo; i<ndata; ++i) { REQUIRE(data[i] == 'b'); } delete [] data; }); span1.precede(fill1); span2.precede(fill2); fill1.precede(kadd1); fill2.precede(kadd2); trans.succeed(kadd1, kadd2) .precede(push1); push1.precede(test1); } executor.run(heteroflow).wait(); } } } } // -------------------------------------------------------- // Testcase: h2d2d2h // -------------------------------------------------------- TEST_CASE("h2d2d2h" * doctest::timeout(300)) { const size_t N = 1000; const size_t S = 64; std::vector<std::vector<char>> res(S); for(auto& v : res) { v.resize(N); } std::vector<char> vec(N); for(size_t i=0; i<N; ++i) { vec[i] = ::rand()%40; } for(size_t c=1; c<=C; ++c) { for(size_t g=1; g<=G; ++g) { hf::Executor executor(c, g); hf::Heteroflow hf; for(size_t s=0; s<S; ++s) { std::fill_n(res[s].begin(), N, 'z'); auto span1 = hf.span(vec.size()); auto span2 = hf.span(vec.size()); for(size_t i=0; i<vec.size(); ++i) { auto h2d = hf.copy(span1, i, &(vec[i]), 1); auto d2d = hf.copy(span2, i, span1, i, 1); auto d2h = hf.copy(&(res[s][i]), span2, i, 1); span1.precede(h2d); span2.precede(d2d); h2d.precede(d2d); d2d.precede(d2h); } } executor.run(hf).wait(); for(size_t s=0; s<S; ++s) { REQUIRE(vec == res[s]); } } } } // -------------------------------------------------------- // Testcase: dependent-copies // -------------------------------------------------------- TEST_CASE("dependent-copies" * doctest::timeout(300)) { using namespace std::literals::string_literals; const size_t N = 1<<10; const size_t S = 32; std::vector<std::vector<char>> in(S); std::vector<std::vector<char>> out(S); std::vector<hf::CopyTask> h2d(N); std::vector<hf::CopyTask> d2d(N); std::vector<hf::CopyTask> d2h(N); // randomize the in/out data for(size_t s=0; s<S; ++s) { in[s].resize(N); out[s].resize(N); for(size_t i=0; i<N; ++i) { in[s][i] = ::rand()%26 + 'a'; out[s][i] = ::rand()%26 + 'a'; } } for(size_t c=1; c<=C; ++c) { for(size_t g=1; g<=G; ++g) { hf::Executor executor(c, g); hf::Heteroflow hf; for(size_t s=0; s<S; ++s) { auto span1 = hf.span(N).name("span1"); auto span2 = hf.span(N).name("span2"); // inter-tree dependency for(size_t i=1; i<N; i++) { h2d[i] = hf.copy(span1, i, &(in[s][i]), 1) .name("h2d["s + std::to_string(i) + ']'); d2d[i] = hf.copy(span2, i, span1, i, 1) .name("d2d["s + std::to_string(i) + ']'); d2h[i] = hf.copy(&(out[s][i]), span2, i, 1) .name("d2h["s + std::to_string(i) + ']'); h2d[i].precede(d2d[i]); d2d[i].precede(d2h[i]); } // tree dependency span1.precede(h2d[1]); span2.precede(h2d[1]); for(size_t i=1; i<N; ++i) { size_t l = i*2; size_t r = i*2 + 1; if(l < N) { h2d[i].precede(h2d[l]); d2d[i].precede(d2d[l]); d2h[i].precede(d2h[l]); } if(r < N) { h2d[i].precede(h2d[r]); d2d[i].precede(d2d[r]); d2h[i].precede(d2h[r]); } } } executor.run(hf).wait(); for(size_t s=0; s<S; ++s) { for(size_t i=1; i<N; ++i) { REQUIRE(in[s][i] == out[s][i]); } } } } } // -------------------------------------------------------- // Testcase: chained-kernels // -------------------------------------------------------- TEST_CASE("chained-kernels" * doctest::timeout(300)) { const size_t N = 1000; const size_t S = 64; const size_t L = 1000; std::vector<int> vec(N, 0); std::vector<std::vector<int>> res(S); for(auto& v : res) { v.resize(N); } for(size_t c=1; c<=C; ++c) { for(size_t g=1; g<=G; ++g) { hf::Executor executor(c, g); hf::Heteroflow hf; for(size_t s=0; s<S; ++s) { auto span = hf.span(vec.data(), N*sizeof(int)); auto copy = hf.copy(res[s].data(), span, N*sizeof(int)).name("copy"); hf::KernelTask prev, curr; for(size_t x=0; x<L; ++x) { curr = hf.kernel((N+16-1)/16, 16, 0, k_add<int>, span, N, 1) .name(std::to_string(x) + "-kernel"); if(x==0) { span.precede(curr); } else { prev.precede(curr); } prev = curr; } curr.precede(copy); auto test = hf.host([&vec=res[s]](){ for(auto item : vec) { REQUIRE(item == L); } }).name("test"); copy.precede(test); } executor.run(hf).wait(); } } } // -------------------------------------------------------- // Testcase: dependent-kernels // -------------------------------------------------------- TEST_CASE("dependent-kernels" * doctest::timeout(300)) { using namespace std::literals::string_literals; const size_t N = 1<<2; const size_t S = 1; std::vector<std::vector<char>> in(S); std::vector<std::vector<char>> out(S); std::vector<hf::CopyTask> h2d(N); std::vector<hf::CopyTask> d2d(N); std::vector<hf::CopyTask> d2h(N); // randomize the in/out data for(size_t s=0; s<S; ++s) { in[s].resize(N); out[s].resize(N); for(size_t i=0; i<N; ++i) { in[s][i] = ::rand()%26 + 'a'; out[s][i] = ::rand()%26 + 'a'; } } for(size_t c=1; c<=C; ++c) { for(size_t g=1; g<=G; ++g) { hf::Executor executor(c, g); hf::Heteroflow hf; for(size_t s=0; s<S; ++s) { auto span1 = hf.span(N).name("span1"); auto span2 = hf.span(N).name("span2"); // inter-tree dependency for(size_t i=1; i<N; i++) { h2d[i] = hf.copy(span1, i, &(in[s][i]), 1) .name("h2d["s + std::to_string(i) + ']'); d2d[i] = hf.copy(span2, i, span1, i, 1) .name("d2d["s + std::to_string(i) + ']'); d2h[i] = hf.copy(&(out[s][i]), span2, i, 1) .name("d2h["s + std::to_string(i) + ']'); auto k1 = hf.kernel(1, 1, 0, k_single_add<char>, span1, N, i, 1 ).name("k1["s + std::to_string(i) + ']'); auto k2 = hf.kernel(1, 1, 0, k_single_add<char>, span2, N, i, 1 ).name("k2["s + std::to_string(i) + ']'); h2d[i].precede(k1); k1.precede(d2d[i]); d2d[i].precede(k2); k2.precede(d2h[i]); } // tree dependency span1.precede(h2d[1]); span2.precede(h2d[1]); for(size_t i=1; i<N; ++i) { size_t l = i*2; size_t r = i*2 + 1; if(l < N) { h2d[i].precede(h2d[l]); d2d[i].precede(d2d[l]); d2h[i].precede(d2h[l]); } if(r < N) { h2d[i].precede(h2d[r]); d2d[i].precede(d2d[r]); d2h[i].precede(d2h[r]); } } } executor.run(hf).wait(); for(size_t s=0; s<S; ++s) { for(size_t i=1; i<N; ++i) { REQUIRE(in[s][i] + 2 == out[s][i]); } } } } } // -------------------------------------------------------- // Testcase: state-transition // -------------------------------------------------------- TEST_CASE("statefulness" * doctest::timeout(300)) { SUBCASE("linear-chain") { for(size_t c=1; c<=C; ++c) { for(size_t g=1; g<=G; ++g) { hf::Executor executor(c, g); hf::Heteroflow heteroflow; std::vector<char> vec; size_t size = 0; char* data = nullptr; dim3 grid, block; auto host = heteroflow.host([&](){ size = 1234567; vec.resize(size, 'a'); data = vec.data(); grid = (size+255)/256; block = 256; }); auto span = heteroflow.span(std::ref(data), std::ref(size)); auto kadd = heteroflow.kernel( std::ref(grid), std::ref(block), 0, k_add<char>, span, std::ref(size), 1 ); auto push = heteroflow.copy(std::ref(data), span, std::ref(size)); auto test = heteroflow.host([&](){ REQUIRE(size == vec.size()); REQUIRE(data == vec.data()); REQUIRE(grid.x == (size+255)/256); REQUIRE(block.x == 256); for(auto i : vec) { REQUIRE(i == 'b'); } }); host.precede(span); span.precede(kadd); kadd.precede(push); push.precede(test); executor.run(heteroflow).wait(); } } } } // -------------------------------------------------------- // Testcase: run_n // -------------------------------------------------------- TEST_CASE("run_n" * doctest::timeout(300)) { for(size_t c=1; c<=C; ++c) { for(size_t g=1; g<=G; ++g) { std::atomic<size_t> counter{0}; hf::Executor executor(c, g); hf::Heteroflow heteroflow; const size_t ndata = 5000; for(size_t n=0; n<2*G; ++n) { std::vector<char> vec(ndata); auto data = vec.data(); auto host = heteroflow.host([vec=std::move(vec)]() mutable { for(auto& c : vec) c = 0; }); auto span = heteroflow.span(data, ndata); auto kadd = heteroflow.kernel( (ndata + 255)/256, 256, 0, k_add<char>, span, ndata, 1 ); auto push = heteroflow.copy(data, span, ndata); auto combine = heteroflow.host([&counter, data, ndata] () { for(size_t i=0; i<ndata; ++i) { counter += data[i]; } }); host.precede(span); span.precede(kadd); kadd.precede(push); push.precede(combine); } auto res = 0; for(size_t s=0; s<25; ++s){ auto r = ::rand() % 5; res += r; executor.run_n(heteroflow, r).wait(); REQUIRE(counter == res*ndata*2*G); } } } }
d72ef79354ca93943db1d4a157616c955ad96f23.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/cudart_utils.h> #include <raft/linalg/cublas_wrappers.h> #include <test_utils.h> #include <cuml/common/logger.hpp> #include <cuml/decomposition/pca_mg.hpp> #include <opg/linalg/gemm.hpp> #include <opg/matrix/matrix_utils.hpp> #include <raft/cuda_utils.cuh> #include <raft/matrix/matrix.cuh> #include "test_opg_utils.h" #include <raft/comms/mpi_comms.hpp> namespace MLCommon { namespace Test { namespace opg { struct PCAOpgParams { int M; int N; int N_components; ML::mg_solver algorithm; std::vector<int> partSizes; std::vector<int> ranksOwners; Matrix::Layout layout; unsigned long long int seed; }; template <typename T> class PCAOpgTest : public testing::TestWithParam<PCAOpgParams> { public: void SetUp() { params = GetParam(); raft::comms::initialize_mpi_comms(&handle, MPI_COMM_WORLD); // Prepare resource const raft::comms::comms_t& comm = handle.get_comms(); stream = handle.get_stream(); hipblasHandle_t cublasHandle = handle.get_cublas_handle(); myRank = comm.get_rank(); totalRanks = comm.get_size(); raft::random::Rng r(params.seed + myRank); CUBLAS_CHECK(hipblasSetStream(cublasHandle, stream)); if (myRank == 0) { std::cout << "Testing PCA of " << params.M << " x " << params.N << " matrix" << std::endl; } // Prepare X matrix std::vector<Matrix::RankSizePair*> totalPartsToRanks; for (int i = 0; i < params.partSizes.size(); i++) { Matrix::RankSizePair* rspt = new Matrix::RankSizePair(params.ranksOwners[i] % totalRanks, params.partSizes[i]); totalPartsToRanks.push_back(rspt); } Matrix::PartDescriptor desc( params.M, params.N, totalPartsToRanks, comm.get_rank(), params.layout); std::vector<Matrix::Data<T>*> inParts; Matrix::opg::allocate(handle, inParts, desc, myRank, stream); Matrix::opg::randomize(handle, r, inParts, desc, myRank, stream, T(10.0), T(20.0)); handle.wait_on_user_stream(); prmsPCA.n_rows = params.M; prmsPCA.n_cols = params.N; prmsPCA.n_components = params.N_components; prmsPCA.whiten = false; prmsPCA.n_iterations = 100; prmsPCA.tol = 0.01; prmsPCA.algorithm = params.algorithm; rmm::device_uvector<T> components(prmsPCA.n_components * prmsPCA.n_cols, stream); rmm::device_uvector<T> explained_var(prmsPCA.n_components, stream); rmm::device_uvector<T> explained_var_ratio(prmsPCA.n_components, stream); rmm::device_uvector<T> singular_vals(prmsPCA.n_components, stream); rmm::device_uvector<T> mu(prmsPCA.n_cols, stream); rmm::device_uvector<T> noise_vars(prmsPCA.n_components, stream); ML::PCA::opg::fit(handle, inParts, desc, components.data(), explained_var.data(), explained_var_ratio.data(), singular_vals.data(), mu.data(), noise_vars.data(), prmsPCA, false); CUML_LOG_DEBUG( raft::arr2Str(singular_vals.data(), params.N_components, "Singular Vals", stream).c_str()); CUML_LOG_DEBUG( raft::arr2Str(explained_var.data(), params.N_components, "Explained Variance", stream) .c_str()); CUML_LOG_DEBUG( raft::arr2Str( explained_var_ratio.data(), params.N_components, "Explained Variance Ratio", stream) .c_str()); CUML_LOG_DEBUG( raft::arr2Str(components.data(), params.N_components * params.N, "Components", stream) .c_str()); Matrix::opg::deallocate(handle, inParts, desc, myRank, stream); } protected: PCAOpgParams params; raft::handle_t handle; hipStream_t stream = 0; int myRank; int totalRanks; ML::paramsPCAMG prmsPCA; }; const std::vector<PCAOpgParams> inputs = { {20, 4, 2, ML::mg_solver::COV_EIG_JACOBI, {11, 9}, {1, 0}, Matrix::LayoutColMajor, 223548ULL}, {20, 4, 2, ML::mg_solver::COV_EIG_DQ, {11, 9}, {1, 0}, Matrix::LayoutColMajor, 223548ULL}, {20, 4, 2, ML::mg_solver::QR, {11, 9}, {1, 0}, Matrix::LayoutColMajor, 223548ULL}}; typedef PCAOpgTest<float> PCAOpgTestF; TEST_P(PCAOpgTestF, Result) { if (myRank == 0) { // We should be inverse transforming and checking against the original // data here. Github reference: https://github.com/rapidsai/cuml/issues/2474 ASSERT_TRUE(true); } } INSTANTIATE_TEST_CASE_P(PCAOpgTest, PCAOpgTestF, ::testing::ValuesIn(inputs)); typedef PCAOpgTest<double> PCAOpgTestD; TEST_P(PCAOpgTestD, Result) { if (myRank == 0) { // We should be inverse transforming and checking against the original // data here. Github reference: https://github.com/rapidsai/cuml/issues/2474 ASSERT_TRUE(true); } } INSTANTIATE_TEST_CASE_P(PCAOpgTest, PCAOpgTestD, ::testing::ValuesIn(inputs)); } // end namespace opg } // end namespace Test } // end namespace MLCommon
d72ef79354ca93943db1d4a157616c955ad96f23.cu
/* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/cudart_utils.h> #include <raft/linalg/cublas_wrappers.h> #include <test_utils.h> #include <cuml/common/logger.hpp> #include <cuml/decomposition/pca_mg.hpp> #include <opg/linalg/gemm.hpp> #include <opg/matrix/matrix_utils.hpp> #include <raft/cuda_utils.cuh> #include <raft/matrix/matrix.cuh> #include "test_opg_utils.h" #include <raft/comms/mpi_comms.hpp> namespace MLCommon { namespace Test { namespace opg { struct PCAOpgParams { int M; int N; int N_components; ML::mg_solver algorithm; std::vector<int> partSizes; std::vector<int> ranksOwners; Matrix::Layout layout; unsigned long long int seed; }; template <typename T> class PCAOpgTest : public testing::TestWithParam<PCAOpgParams> { public: void SetUp() { params = GetParam(); raft::comms::initialize_mpi_comms(&handle, MPI_COMM_WORLD); // Prepare resource const raft::comms::comms_t& comm = handle.get_comms(); stream = handle.get_stream(); cublasHandle_t cublasHandle = handle.get_cublas_handle(); myRank = comm.get_rank(); totalRanks = comm.get_size(); raft::random::Rng r(params.seed + myRank); CUBLAS_CHECK(cublasSetStream(cublasHandle, stream)); if (myRank == 0) { std::cout << "Testing PCA of " << params.M << " x " << params.N << " matrix" << std::endl; } // Prepare X matrix std::vector<Matrix::RankSizePair*> totalPartsToRanks; for (int i = 0; i < params.partSizes.size(); i++) { Matrix::RankSizePair* rspt = new Matrix::RankSizePair(params.ranksOwners[i] % totalRanks, params.partSizes[i]); totalPartsToRanks.push_back(rspt); } Matrix::PartDescriptor desc( params.M, params.N, totalPartsToRanks, comm.get_rank(), params.layout); std::vector<Matrix::Data<T>*> inParts; Matrix::opg::allocate(handle, inParts, desc, myRank, stream); Matrix::opg::randomize(handle, r, inParts, desc, myRank, stream, T(10.0), T(20.0)); handle.wait_on_user_stream(); prmsPCA.n_rows = params.M; prmsPCA.n_cols = params.N; prmsPCA.n_components = params.N_components; prmsPCA.whiten = false; prmsPCA.n_iterations = 100; prmsPCA.tol = 0.01; prmsPCA.algorithm = params.algorithm; rmm::device_uvector<T> components(prmsPCA.n_components * prmsPCA.n_cols, stream); rmm::device_uvector<T> explained_var(prmsPCA.n_components, stream); rmm::device_uvector<T> explained_var_ratio(prmsPCA.n_components, stream); rmm::device_uvector<T> singular_vals(prmsPCA.n_components, stream); rmm::device_uvector<T> mu(prmsPCA.n_cols, stream); rmm::device_uvector<T> noise_vars(prmsPCA.n_components, stream); ML::PCA::opg::fit(handle, inParts, desc, components.data(), explained_var.data(), explained_var_ratio.data(), singular_vals.data(), mu.data(), noise_vars.data(), prmsPCA, false); CUML_LOG_DEBUG( raft::arr2Str(singular_vals.data(), params.N_components, "Singular Vals", stream).c_str()); CUML_LOG_DEBUG( raft::arr2Str(explained_var.data(), params.N_components, "Explained Variance", stream) .c_str()); CUML_LOG_DEBUG( raft::arr2Str( explained_var_ratio.data(), params.N_components, "Explained Variance Ratio", stream) .c_str()); CUML_LOG_DEBUG( raft::arr2Str(components.data(), params.N_components * params.N, "Components", stream) .c_str()); Matrix::opg::deallocate(handle, inParts, desc, myRank, stream); } protected: PCAOpgParams params; raft::handle_t handle; cudaStream_t stream = 0; int myRank; int totalRanks; ML::paramsPCAMG prmsPCA; }; const std::vector<PCAOpgParams> inputs = { {20, 4, 2, ML::mg_solver::COV_EIG_JACOBI, {11, 9}, {1, 0}, Matrix::LayoutColMajor, 223548ULL}, {20, 4, 2, ML::mg_solver::COV_EIG_DQ, {11, 9}, {1, 0}, Matrix::LayoutColMajor, 223548ULL}, {20, 4, 2, ML::mg_solver::QR, {11, 9}, {1, 0}, Matrix::LayoutColMajor, 223548ULL}}; typedef PCAOpgTest<float> PCAOpgTestF; TEST_P(PCAOpgTestF, Result) { if (myRank == 0) { // We should be inverse transforming and checking against the original // data here. Github reference: https://github.com/rapidsai/cuml/issues/2474 ASSERT_TRUE(true); } } INSTANTIATE_TEST_CASE_P(PCAOpgTest, PCAOpgTestF, ::testing::ValuesIn(inputs)); typedef PCAOpgTest<double> PCAOpgTestD; TEST_P(PCAOpgTestD, Result) { if (myRank == 0) { // We should be inverse transforming and checking against the original // data here. Github reference: https://github.com/rapidsai/cuml/issues/2474 ASSERT_TRUE(true); } } INSTANTIATE_TEST_CASE_P(PCAOpgTest, PCAOpgTestD, ::testing::ValuesIn(inputs)); } // end namespace opg } // end namespace Test } // end namespace MLCommon
44ddd5d51c3443070f7396fb1aa952dc2221dfb8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2015 @precisions normal z -> s d c */ #include "common_magma.h" #include "commonblas_z.h" #define PRECISION_z // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 /* --------------------------------------------------------------------------- */ /** Purpose ------- ZLAQPS computes a step of QR factorization with column pivoting of a complex M-by-N matrix A by using Blas-3. It tries to factorize NB columns from A starting from the row OFFSET+1, and updates all of the matrix with Blas-3 xGEMM. In some cases, due to catastrophic cancellations, it cannot factorize NB columns. Hence, the actual number of factorized columns is returned in KB. Block A(1:OFFSET,1:N) is accordingly pivoted, but not factorized. Arguments --------- @param[in] m INTEGER The number of rows of the matrix A. M >= 0. @param[in] n INTEGER The number of columns of the matrix A. N >= 0 @param[in] offset INTEGER The number of rows of A that have been factorized in previous steps. @param[in] NB INTEGER The number of columns to factorize. @param[out] kb INTEGER The number of columns actually factorized. @param[in,out] dA COMPLEX*16 array, dimension (LDDA,N) On entry, the M-by-N matrix A. On exit, block A(OFFSET+1:M,1:KB) is the triangular factor obtained and block A(1:OFFSET,1:N) has been accordingly pivoted, but no factorized. The rest of the matrix, block A(OFFSET+1:M,KB+1:N) has been updated. @param[in] ldda INTEGER The leading dimension of the array A. LDDA >= max(1,M). @param[in,out] jpvt INTEGER array, dimension (N) JPVT(I) = K <==> Column K of the full matrix A has been permuted into position I in AP. @param[out] dtau COMPLEX*16 array, dimension (KB) The scalar factors of the elementary reflectors. @param[in,out] dvn1 DOUBLE PRECISION array, dimension (N) The vector with the partial column norms. @param[in,out] dvn2 DOUBLE PRECISION array, dimension (N) The vector with the exact column norms. @param[in,out] dauxv COMPLEX*16 array, dimension (NB) Auxiliar vector. @param[in,out] dF COMPLEX*16 array, dimension (LDDF,NB) Matrix F**H = L * Y**H * A. @param[in] lddf INTEGER The leading dimension of the array F. LDDF >= max(1,N). @ingroup magma_zgeqp3_aux ********************************************************************/ extern "C" magma_int_t magma_zlaqps2_gpu( magma_int_t m, magma_int_t n, magma_int_t offset, magma_int_t nb, magma_int_t *kb, magmaDoubleComplex_ptr dA, magma_int_t ldda, magma_int_t *jpvt, magmaDoubleComplex_ptr dtau, magmaDouble_ptr dvn1, magmaDouble_ptr dvn2, magmaDoubleComplex_ptr dauxv, magmaDoubleComplex_ptr dF, magma_int_t lddf) { #define dA(i_, j_) (dA + (i_) + (j_)*(ldda)) #define dF(i_, j_) (dF + (i_) + (j_)*(lddf)) magmaDoubleComplex c_zero = MAGMA_Z_MAKE( 0.,0.); magmaDoubleComplex c_one = MAGMA_Z_MAKE( 1.,0.); magmaDoubleComplex c_neg_one = MAGMA_Z_MAKE(-1.,0.); magma_int_t ione = 1; magma_int_t i__1, i__2; magma_int_t k, rk; magmaDoubleComplex tauk; magma_int_t pvt, itemp; double tol3z; magmaDoubleComplex_ptr dAkk = dauxv; dauxv += nb; double lsticc, *lsticcs; magma_dmalloc( &lsticcs, 1+256*(n+255)/256 ); tol3z = magma_dsqrt( lapackf77_dlamch("Epsilon")); lsticc = 0; k = 0; while( k < nb && lsticc == 0 ) { rk = offset + k; /* Determine ith pivot column and swap if necessary */ pvt = k - 1 + magma_idamax( n-k, &dvn1[k], ione ); if (pvt != k) { magmablas_zswap( k+1, dF(pvt,0), lddf, dF(k,0), lddf); itemp = jpvt[pvt]; jpvt[pvt] = jpvt[k]; jpvt[k] = itemp; #if (defined(PRECISION_d) || defined(PRECISION_z)) //magma_dswap( 1, &dvn1[pvt], 1, &dvn1[k], 1 ); //magma_dswap( 1, &dvn2[pvt], 1, &dvn2[k], 1 ); magma_dswap( 2, &dvn1[pvt], n+offset, &dvn1[k], n+offset); #else //magma_sswap( 1, &dvn1[pvt], 1, &dvn1[k], 1 ); //magma_sswap( 1, &dvn2[pvt], 1, &dvn2[k], 1 ); magma_sswap(2, &dvn1[pvt], n+offset, &dvn1[k], n+offset); #endif magmablas_zswap( m, dA(0,pvt), ione, dA(0, k), ione ); } /* Apply previous Householder reflectors to column K: A(RK:M,K) := A(RK:M,K) - A(RK:M,1:K-1)*F(K,1:K-1)'. Optimization: multiply with beta=0; wait for vector and subtract */ if (k > 0) { magmablas_zgemv_conjv( m-rk, k, c_neg_one, dA(rk, 0), ldda, dF(k, 0), lddf, c_one, dA(rk, k), ione ); } /* Generate elementary reflector H(k). */ magma_zlarfg_gpu(m-rk, dA(rk, k), dA(rk + 1, k), &dtau[k], &dvn1[k], &dAkk[k]); magma_zsetvector( 1, &c_one, 1, dA(rk, k), 1 ); /* Compute Kth column of F: Compute F(K+1:N,K) := tau(K)*A(RK:M,K+1:N)'*A(RK:M,K) on the GPU */ if (k < n-1 || k > 0 ) magma_zgetvector( 1, &dtau[k], 1, &tauk, 1 ); if (k < n-1) { magma_zgemv( MagmaConjTrans, m-rk, n-k-1, tauk, dA( rk, k+1 ), ldda, dA( rk, k ), 1, c_zero, dF( k+1, k ), 1 ); } /* Incremental updating of F: F(1:N,K) := F(1:N,K) - tau(K)*F(1:N,1:K-1)*A(RK:M,1:K-1)'*A(RK:M,K). F(1:N,K) := tau(K)*A(RK:M,K+1:N)'*A(RK:M,K) - tau(K)*F(1:N,1:K-1)*A(RK:M,1:K-1)'*A(RK:M,K) := tau(K)(A(RK:M,K+1:N)' - F(1:N,1:K-1)*A(RK:M,1:K-1)') A(RK:M,K) so, F is (updated A)*V */ if (k > 0) { /*z__1 = MAGMA_Z_NEGATE( tauk ); magma_zgemv( MagmaConjTrans, m-rk, k, z__1, dA(rk, 0), ldda, dA(rk, k), ione, c_zero, dauxv, ione );*/ hipLaunchKernelGGL(( magma_zgemv_kernel3), dim3(k), dim3(BLOCK_SIZE), 0, magma_stream , m-rk, dA(rk, 0), ldda, dA(rk, k), dauxv, dtau+k); /* I think we only need stricly lower-triangular part */ magma_zgemv( MagmaNoTrans, n-k-1, k, c_one, dF(k+1,0), lddf, dauxv, ione, c_one, dF(k+1,k), ione ); } /* Update the current row of A: A(RK,K+1:N) := A(RK,K+1:N) - A(RK,1:K)*F(K+1:N,1:K)'. */ if (k < n-1) { i__1 = n - k - 1; i__2 = k + 1; /* left-looking update of rows, * * since F=A**H v with original A, so no right-looking */ magma_zgemm( MagmaNoTrans, MagmaConjTrans, ione, i__1, i__2, c_neg_one, dA(rk, 0 ), ldda, dF(k+1,0 ), lddf, c_one, dA(rk, k+1), ldda ); } /* Update partial column norms. */ if (rk < min(m, n+offset)-1){ magmablas_dznrm2_row_check_adjust(n-k-1, tol3z, &dvn1[k+1], &dvn2[k+1], dA(rk,k+1), ldda, lsticcs); #if defined(PRECISION_d) || defined(PRECISION_z) magma_dgetvector( 1, &lsticcs[0], 1, &lsticc, 1 ); #else magma_sgetvector( 1, &lsticcs[0], 1, &lsticc, 1 ); #endif } //*dA(rk, k) = Akk; //magma_zsetvector( 1, &Akk, 1, dA(rk, k), 1 ); //magmablas_zlacpy(MagmaUpperLower, 1, 1, dAkk, 1, dA(rk, k), 1); ++k; } // restore the diagonals magma_zcopymatrix( 1, k, dAkk, 1, dA(offset, 0), ldda+1 ); // leave k as the last column done --k; *kb = k + 1; rk = offset + *kb - 1; /* Apply the block reflector to the rest of the matrix: A(OFFSET+KB+1:M,KB+1:N) := A(OFFSET+KB+1:M,KB+1:N) - A(OFFSET+KB+1:M,1:KB)*F(KB+1:N,1:KB)' */ if (*kb < min(n, m - offset)) { i__1 = m - rk - 1; i__2 = n - *kb; magma_zgemm( MagmaNoTrans, MagmaConjTrans, i__1, i__2, *kb, c_neg_one, dA(rk+1, 0 ), ldda, dF(*kb, 0 ), lddf, c_one, dA(rk+1, *kb), ldda ); } /* Recomputation of difficult columns. */ if( lsticc > 0 ) { // printf( " -- recompute dnorms --\n" ); magmablas_dznrm2_check(m-rk-1, n-*kb, dA(rk+1,*kb), ldda, &dvn1[*kb], lsticcs); #if defined(PRECISION_d) || defined(PRECISION_z) magma_dcopymatrix( n-*kb, 1, &dvn1[*kb], n, &dvn2[*kb], n); #else magma_scopymatrix( n-*kb, 1, &dvn1[*kb], n, &dvn2[*kb], n); #endif } magma_free(lsticcs); return MAGMA_SUCCESS; } /* magma_zlaqps */
44ddd5d51c3443070f7396fb1aa952dc2221dfb8.cu
/* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2015 @precisions normal z -> s d c */ #include "common_magma.h" #include "commonblas_z.h" #define PRECISION_z // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 /* --------------------------------------------------------------------------- */ /** Purpose ------- ZLAQPS computes a step of QR factorization with column pivoting of a complex M-by-N matrix A by using Blas-3. It tries to factorize NB columns from A starting from the row OFFSET+1, and updates all of the matrix with Blas-3 xGEMM. In some cases, due to catastrophic cancellations, it cannot factorize NB columns. Hence, the actual number of factorized columns is returned in KB. Block A(1:OFFSET,1:N) is accordingly pivoted, but not factorized. Arguments --------- @param[in] m INTEGER The number of rows of the matrix A. M >= 0. @param[in] n INTEGER The number of columns of the matrix A. N >= 0 @param[in] offset INTEGER The number of rows of A that have been factorized in previous steps. @param[in] NB INTEGER The number of columns to factorize. @param[out] kb INTEGER The number of columns actually factorized. @param[in,out] dA COMPLEX*16 array, dimension (LDDA,N) On entry, the M-by-N matrix A. On exit, block A(OFFSET+1:M,1:KB) is the triangular factor obtained and block A(1:OFFSET,1:N) has been accordingly pivoted, but no factorized. The rest of the matrix, block A(OFFSET+1:M,KB+1:N) has been updated. @param[in] ldda INTEGER The leading dimension of the array A. LDDA >= max(1,M). @param[in,out] jpvt INTEGER array, dimension (N) JPVT(I) = K <==> Column K of the full matrix A has been permuted into position I in AP. @param[out] dtau COMPLEX*16 array, dimension (KB) The scalar factors of the elementary reflectors. @param[in,out] dvn1 DOUBLE PRECISION array, dimension (N) The vector with the partial column norms. @param[in,out] dvn2 DOUBLE PRECISION array, dimension (N) The vector with the exact column norms. @param[in,out] dauxv COMPLEX*16 array, dimension (NB) Auxiliar vector. @param[in,out] dF COMPLEX*16 array, dimension (LDDF,NB) Matrix F**H = L * Y**H * A. @param[in] lddf INTEGER The leading dimension of the array F. LDDF >= max(1,N). @ingroup magma_zgeqp3_aux ********************************************************************/ extern "C" magma_int_t magma_zlaqps2_gpu( magma_int_t m, magma_int_t n, magma_int_t offset, magma_int_t nb, magma_int_t *kb, magmaDoubleComplex_ptr dA, magma_int_t ldda, magma_int_t *jpvt, magmaDoubleComplex_ptr dtau, magmaDouble_ptr dvn1, magmaDouble_ptr dvn2, magmaDoubleComplex_ptr dauxv, magmaDoubleComplex_ptr dF, magma_int_t lddf) { #define dA(i_, j_) (dA + (i_) + (j_)*(ldda)) #define dF(i_, j_) (dF + (i_) + (j_)*(lddf)) magmaDoubleComplex c_zero = MAGMA_Z_MAKE( 0.,0.); magmaDoubleComplex c_one = MAGMA_Z_MAKE( 1.,0.); magmaDoubleComplex c_neg_one = MAGMA_Z_MAKE(-1.,0.); magma_int_t ione = 1; magma_int_t i__1, i__2; magma_int_t k, rk; magmaDoubleComplex tauk; magma_int_t pvt, itemp; double tol3z; magmaDoubleComplex_ptr dAkk = dauxv; dauxv += nb; double lsticc, *lsticcs; magma_dmalloc( &lsticcs, 1+256*(n+255)/256 ); tol3z = magma_dsqrt( lapackf77_dlamch("Epsilon")); lsticc = 0; k = 0; while( k < nb && lsticc == 0 ) { rk = offset + k; /* Determine ith pivot column and swap if necessary */ pvt = k - 1 + magma_idamax( n-k, &dvn1[k], ione ); if (pvt != k) { magmablas_zswap( k+1, dF(pvt,0), lddf, dF(k,0), lddf); itemp = jpvt[pvt]; jpvt[pvt] = jpvt[k]; jpvt[k] = itemp; #if (defined(PRECISION_d) || defined(PRECISION_z)) //magma_dswap( 1, &dvn1[pvt], 1, &dvn1[k], 1 ); //magma_dswap( 1, &dvn2[pvt], 1, &dvn2[k], 1 ); magma_dswap( 2, &dvn1[pvt], n+offset, &dvn1[k], n+offset); #else //magma_sswap( 1, &dvn1[pvt], 1, &dvn1[k], 1 ); //magma_sswap( 1, &dvn2[pvt], 1, &dvn2[k], 1 ); magma_sswap(2, &dvn1[pvt], n+offset, &dvn1[k], n+offset); #endif magmablas_zswap( m, dA(0,pvt), ione, dA(0, k), ione ); } /* Apply previous Householder reflectors to column K: A(RK:M,K) := A(RK:M,K) - A(RK:M,1:K-1)*F(K,1:K-1)'. Optimization: multiply with beta=0; wait for vector and subtract */ if (k > 0) { magmablas_zgemv_conjv( m-rk, k, c_neg_one, dA(rk, 0), ldda, dF(k, 0), lddf, c_one, dA(rk, k), ione ); } /* Generate elementary reflector H(k). */ magma_zlarfg_gpu(m-rk, dA(rk, k), dA(rk + 1, k), &dtau[k], &dvn1[k], &dAkk[k]); magma_zsetvector( 1, &c_one, 1, dA(rk, k), 1 ); /* Compute Kth column of F: Compute F(K+1:N,K) := tau(K)*A(RK:M,K+1:N)'*A(RK:M,K) on the GPU */ if (k < n-1 || k > 0 ) magma_zgetvector( 1, &dtau[k], 1, &tauk, 1 ); if (k < n-1) { magma_zgemv( MagmaConjTrans, m-rk, n-k-1, tauk, dA( rk, k+1 ), ldda, dA( rk, k ), 1, c_zero, dF( k+1, k ), 1 ); } /* Incremental updating of F: F(1:N,K) := F(1:N,K) - tau(K)*F(1:N,1:K-1)*A(RK:M,1:K-1)'*A(RK:M,K). F(1:N,K) := tau(K)*A(RK:M,K+1:N)'*A(RK:M,K) - tau(K)*F(1:N,1:K-1)*A(RK:M,1:K-1)'*A(RK:M,K) := tau(K)(A(RK:M,K+1:N)' - F(1:N,1:K-1)*A(RK:M,1:K-1)') A(RK:M,K) so, F is (updated A)*V */ if (k > 0) { /*z__1 = MAGMA_Z_NEGATE( tauk ); magma_zgemv( MagmaConjTrans, m-rk, k, z__1, dA(rk, 0), ldda, dA(rk, k), ione, c_zero, dauxv, ione );*/ magma_zgemv_kernel3<<< k, BLOCK_SIZE, 0, magma_stream >>>(m-rk, dA(rk, 0), ldda, dA(rk, k), dauxv, dtau+k); /* I think we only need stricly lower-triangular part */ magma_zgemv( MagmaNoTrans, n-k-1, k, c_one, dF(k+1,0), lddf, dauxv, ione, c_one, dF(k+1,k), ione ); } /* Update the current row of A: A(RK,K+1:N) := A(RK,K+1:N) - A(RK,1:K)*F(K+1:N,1:K)'. */ if (k < n-1) { i__1 = n - k - 1; i__2 = k + 1; /* left-looking update of rows, * * since F=A**H v with original A, so no right-looking */ magma_zgemm( MagmaNoTrans, MagmaConjTrans, ione, i__1, i__2, c_neg_one, dA(rk, 0 ), ldda, dF(k+1,0 ), lddf, c_one, dA(rk, k+1), ldda ); } /* Update partial column norms. */ if (rk < min(m, n+offset)-1){ magmablas_dznrm2_row_check_adjust(n-k-1, tol3z, &dvn1[k+1], &dvn2[k+1], dA(rk,k+1), ldda, lsticcs); #if defined(PRECISION_d) || defined(PRECISION_z) magma_dgetvector( 1, &lsticcs[0], 1, &lsticc, 1 ); #else magma_sgetvector( 1, &lsticcs[0], 1, &lsticc, 1 ); #endif } //*dA(rk, k) = Akk; //magma_zsetvector( 1, &Akk, 1, dA(rk, k), 1 ); //magmablas_zlacpy(MagmaUpperLower, 1, 1, dAkk, 1, dA(rk, k), 1); ++k; } // restore the diagonals magma_zcopymatrix( 1, k, dAkk, 1, dA(offset, 0), ldda+1 ); // leave k as the last column done --k; *kb = k + 1; rk = offset + *kb - 1; /* Apply the block reflector to the rest of the matrix: A(OFFSET+KB+1:M,KB+1:N) := A(OFFSET+KB+1:M,KB+1:N) - A(OFFSET+KB+1:M,1:KB)*F(KB+1:N,1:KB)' */ if (*kb < min(n, m - offset)) { i__1 = m - rk - 1; i__2 = n - *kb; magma_zgemm( MagmaNoTrans, MagmaConjTrans, i__1, i__2, *kb, c_neg_one, dA(rk+1, 0 ), ldda, dF(*kb, 0 ), lddf, c_one, dA(rk+1, *kb), ldda ); } /* Recomputation of difficult columns. */ if( lsticc > 0 ) { // printf( " -- recompute dnorms --\n" ); magmablas_dznrm2_check(m-rk-1, n-*kb, dA(rk+1,*kb), ldda, &dvn1[*kb], lsticcs); #if defined(PRECISION_d) || defined(PRECISION_z) magma_dcopymatrix( n-*kb, 1, &dvn1[*kb], n, &dvn2[*kb], n); #else magma_scopymatrix( n-*kb, 1, &dvn1[*kb], n, &dvn2[*kb], n); #endif } magma_free(lsticcs); return MAGMA_SUCCESS; } /* magma_zlaqps */
20b772f140511214790a447a69a7c2686760cef8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void thresholdSIMD(unsigned int *data, unsigned int threshold) { int thread = (gridDim.x * blockIdx.y + blockIdx.x) * blockDim.x + threadIdx.x; unsigned int *ptr = data + thread; *ptr = __vcmpgeu4(*ptr, threshold); }
20b772f140511214790a447a69a7c2686760cef8.cu
#include "includes.h" __global__ void thresholdSIMD(unsigned int *data, unsigned int threshold) { int thread = (gridDim.x * blockIdx.y + blockIdx.x) * blockDim.x + threadIdx.x; unsigned int *ptr = data + thread; *ptr = __vcmpgeu4(*ptr, threshold); }
17a839a889df9d6d6fd51cad8c287bccb0996cc9.hip
// !!! This is a file automatically generated by hipify!!! // // Created by root on 2020/11/19. // #include "stdio.h" #include <hip/hip_runtime.h> #define DIM 128 __global__ void reduceGmem(int *g_idata, int *g_odata, int n) { int tid = threadIdx.x; int *idata = g_idata + blockIdx.x * blockDim.x; int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx >= n) { return; } // if current thread id is less than half of block dim, reduce in place if (blockDim.x >= 1024 && tid < 512) { idata[tid] += idata[tid + 512]; } if (blockDim.x >= 512 && tid < 256) { idata[tid] += idata[tid + 256]; } if (blockDim.x >= 256 && tid < 128) { idata[tid] += idata[tid + 128]; } if (blockDim.x >= 128 && tid < 64) { idata[tid] += idata[tid + 64]; } __syncthreads(); // unrolling warp into the first thread of this warp if (tid < 32) { volatile int *vsmem = idata; vsmem[tid] += vsmem[tid + 32]; // I only applied block dim = 128, so tid + 64 has been reduced, but tid + 32 not vsmem[tid] += vsmem[tid + 16]; vsmem[tid] += vsmem[tid + 8]; vsmem[tid] += vsmem[tid + 4]; vsmem[tid] += vsmem[tid + 2]; vsmem[tid] += vsmem[tid + 1]; } // write result stored in thread 0 into output if (tid == 0) { g_odata[blockIdx.x] = idata[0]; } } __global__ void reduceGmemUnrolling4(int *g_idata, int *g_odata, int n) { int tid = threadIdx.x; int idx = blockIdx.x * blockDim.x * 4 + threadIdx.x; // one thread per block processes 4 data int *idata = g_idata + blockDim.x * blockIdx.x * 4; if (idx >= n) { return; } // process 4 data per thread int a = 0, b = 0, c = 0, d = 0; a = g_idata[idx]; if (idx + blockDim.x < n) { b = g_idata[idx + blockDim.x]; } if (idx + 2 * blockDim.x < n) { c = g_idata[idx + blockDim.x * 2]; } if (idx + 3 * blockDim.x < n) { d = g_idata[idx + blockDim.x * 3]; } g_idata[idx] = a + b + c + d; __syncthreads(); if (blockDim.x >= 1024 && tid < 512) { idata[tid] += idata[tid + 512]; } if (blockDim.x >= 512 && tid < 256) { idata[tid] += idata[tid + 256]; } if (blockDim.x >= 256 && tid < 128) { idata[tid] += idata[tid + 128]; } if (blockDim.x >= 128 && tid < 64) { idata[tid] += idata[tid + 64]; } __syncthreads(); if (tid < 32) { volatile int *s_vmem = idata; s_vmem[tid] += s_vmem[tid + 32]; s_vmem[tid] += s_vmem[tid + 16]; s_vmem[tid] += s_vmem[tid + 8]; s_vmem[tid] += s_vmem[tid + 4]; s_vmem[tid] += s_vmem[tid + 2]; s_vmem[tid] += s_vmem[tid + 1]; } if (tid == 0) { g_odata[blockIdx.x] = idata[0]; } } __global__ void reduceSMemUnrolling4(int *g_idata, int *g_odata, int n) { int tid = threadIdx.x; int idx = blockIdx.x * blockDim.x * 4 + tid; if (idx >= n) { return; } extern __shared__ int smem[]; // I use dynamic shared memory to reduce data int a = 0, b = 0, c = 0, d = 0; // Each thread still processes 4 data a = g_idata[idx]; if (idx + blockDim.x < n) { b = g_idata[idx + blockDim.x]; } if (idx + 2 * blockDim.x < n) { c = g_idata[idx + 2 * blockDim.x]; } if (idx + 3 * blockDim.x < n) { d = g_idata[idx + 3 * blockDim.x]; } smem[tid] = a + b + c + d; // Save result of 4 data into shared memory __syncthreads(); // Reduce data for block using shared memory if (blockDim.x >= 1024 && tid < 512) { smem[tid] += smem[tid + 512]; } if (blockDim.x >= 512 && tid < 256) { smem[tid] += smem[tid + 256]; } if (blockDim.x >= 256 && tid < 128) { smem[tid] += smem[tid + 128]; } if (blockDim.x >= 128 && tid < 64) { smem[tid] += smem[tid + 64]; } __syncthreads(); if (tid < 32) { volatile int* s_vmem = smem; s_vmem[tid] += s_vmem[tid + 32]; s_vmem[tid] += s_vmem[tid + 16]; s_vmem[tid] += s_vmem[tid + 8]; s_vmem[tid] += s_vmem[tid + 4]; s_vmem[tid] += s_vmem[tid + 2]; s_vmem[tid] += s_vmem[tid + 1]; } if (tid == 0) { g_odata[blockIdx.x] = smem[0]; } } void test(int size) { int sum = 0; for (int i = 0; i < size; i++) { sum += i; } printf("Target is %d\n", sum); } int main() { int size = 1 << 22; int blockSize = DIM; test(size); // verify the result dim3 blockDim(blockSize); dim3 gridDim((size + blockDim.x - 1) / blockDim.x); printf("grid:(%d), block:(%d)\n", gridDim.x, blockDim.x); int nBytes = size * sizeof(int); int *h_idata = (int *) malloc(nBytes); int *h_odata = (int *) malloc(gridDim.x * sizeof(int)); // Valid output per block is stored in the first thread of each block. // So the number of output to be added is equal to the grid dim int *d_odata; int *d_idata; hipMalloc(&d_idata, nBytes); hipMalloc(&d_odata, gridDim.x * sizeof(int)); for (int i = 0; i < size; i++) { h_idata[i] = i; } hipMemcpy(d_idata, h_idata, nBytes, hipMemcpyHostToDevice); hipLaunchKernelGGL(( reduceGmem), dim3(gridDim), dim3(blockDim), 0, 0, d_idata, d_odata, size); hipDeviceSynchronize(); hipMemcpy(h_odata, d_odata, gridDim.x * sizeof(int), hipMemcpyDeviceToHost); int sum = 0; for (int i = 0; i < gridDim.x; i++) { sum += h_odata[i]; } printf("\n=========\n"); printf("sum = %d\n", sum); memset(h_odata, 0, gridDim.x * sizeof(int)); hipMemset(d_odata, 0, gridDim.x * sizeof(int)); hipMemcpy(d_idata, h_idata, nBytes, hipMemcpyHostToDevice); dim3 gridDim_(gridDim.x / 4); hipLaunchKernelGGL(( reduceGmemUnrolling4), dim3(gridDim_), dim3(blockDim), 0, 0, d_idata, d_odata, size); hipDeviceSynchronize(); hipMemcpy(h_odata, d_odata, gridDim_.x * sizeof(int), hipMemcpyDeviceToHost); sum = 0; for (int i = 0; i < gridDim_.x; i++) { sum += h_odata[i]; } printf("\n=========\n"); printf("sum = %d\n", sum); memset(h_odata, 0, gridDim.x * sizeof(int)); hipMemset(d_odata, 0, gridDim.x * sizeof(int)); hipMemcpy(d_idata, h_idata, nBytes, hipMemcpyHostToDevice); hipLaunchKernelGGL(( reduceSMemUnrolling4), dim3(gridDim_), dim3(blockDim), DIM * sizeof(int ), 0, d_idata, d_odata, size); hipDeviceSynchronize(); hipMemcpy(h_odata, d_odata, gridDim_.x * sizeof(int), hipMemcpyDeviceToHost); sum = 0; for (int i = 0; i < gridDim_.x; i++) { sum += h_odata[i]; } printf("\n=========\n"); printf("sum = %d\n", sum); hipFree(d_idata); hipFree(d_odata); free(h_idata); free(h_odata); return 0; }
17a839a889df9d6d6fd51cad8c287bccb0996cc9.cu
// // Created by root on 2020/11/19. // #include "stdio.h" #include <cuda_runtime.h> #define DIM 128 __global__ void reduceGmem(int *g_idata, int *g_odata, int n) { int tid = threadIdx.x; int *idata = g_idata + blockIdx.x * blockDim.x; int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx >= n) { return; } // if current thread id is less than half of block dim, reduce in place if (blockDim.x >= 1024 && tid < 512) { idata[tid] += idata[tid + 512]; } if (blockDim.x >= 512 && tid < 256) { idata[tid] += idata[tid + 256]; } if (blockDim.x >= 256 && tid < 128) { idata[tid] += idata[tid + 128]; } if (blockDim.x >= 128 && tid < 64) { idata[tid] += idata[tid + 64]; } __syncthreads(); // unrolling warp into the first thread of this warp if (tid < 32) { volatile int *vsmem = idata; vsmem[tid] += vsmem[tid + 32]; // I only applied block dim = 128, so tid + 64 has been reduced, but tid + 32 not vsmem[tid] += vsmem[tid + 16]; vsmem[tid] += vsmem[tid + 8]; vsmem[tid] += vsmem[tid + 4]; vsmem[tid] += vsmem[tid + 2]; vsmem[tid] += vsmem[tid + 1]; } // write result stored in thread 0 into output if (tid == 0) { g_odata[blockIdx.x] = idata[0]; } } __global__ void reduceGmemUnrolling4(int *g_idata, int *g_odata, int n) { int tid = threadIdx.x; int idx = blockIdx.x * blockDim.x * 4 + threadIdx.x; // one thread per block processes 4 data int *idata = g_idata + blockDim.x * blockIdx.x * 4; if (idx >= n) { return; } // process 4 data per thread int a = 0, b = 0, c = 0, d = 0; a = g_idata[idx]; if (idx + blockDim.x < n) { b = g_idata[idx + blockDim.x]; } if (idx + 2 * blockDim.x < n) { c = g_idata[idx + blockDim.x * 2]; } if (idx + 3 * blockDim.x < n) { d = g_idata[idx + blockDim.x * 3]; } g_idata[idx] = a + b + c + d; __syncthreads(); if (blockDim.x >= 1024 && tid < 512) { idata[tid] += idata[tid + 512]; } if (blockDim.x >= 512 && tid < 256) { idata[tid] += idata[tid + 256]; } if (blockDim.x >= 256 && tid < 128) { idata[tid] += idata[tid + 128]; } if (blockDim.x >= 128 && tid < 64) { idata[tid] += idata[tid + 64]; } __syncthreads(); if (tid < 32) { volatile int *s_vmem = idata; s_vmem[tid] += s_vmem[tid + 32]; s_vmem[tid] += s_vmem[tid + 16]; s_vmem[tid] += s_vmem[tid + 8]; s_vmem[tid] += s_vmem[tid + 4]; s_vmem[tid] += s_vmem[tid + 2]; s_vmem[tid] += s_vmem[tid + 1]; } if (tid == 0) { g_odata[blockIdx.x] = idata[0]; } } __global__ void reduceSMemUnrolling4(int *g_idata, int *g_odata, int n) { int tid = threadIdx.x; int idx = blockIdx.x * blockDim.x * 4 + tid; if (idx >= n) { return; } extern __shared__ int smem[]; // I use dynamic shared memory to reduce data int a = 0, b = 0, c = 0, d = 0; // Each thread still processes 4 data a = g_idata[idx]; if (idx + blockDim.x < n) { b = g_idata[idx + blockDim.x]; } if (idx + 2 * blockDim.x < n) { c = g_idata[idx + 2 * blockDim.x]; } if (idx + 3 * blockDim.x < n) { d = g_idata[idx + 3 * blockDim.x]; } smem[tid] = a + b + c + d; // Save result of 4 data into shared memory __syncthreads(); // Reduce data for block using shared memory if (blockDim.x >= 1024 && tid < 512) { smem[tid] += smem[tid + 512]; } if (blockDim.x >= 512 && tid < 256) { smem[tid] += smem[tid + 256]; } if (blockDim.x >= 256 && tid < 128) { smem[tid] += smem[tid + 128]; } if (blockDim.x >= 128 && tid < 64) { smem[tid] += smem[tid + 64]; } __syncthreads(); if (tid < 32) { volatile int* s_vmem = smem; s_vmem[tid] += s_vmem[tid + 32]; s_vmem[tid] += s_vmem[tid + 16]; s_vmem[tid] += s_vmem[tid + 8]; s_vmem[tid] += s_vmem[tid + 4]; s_vmem[tid] += s_vmem[tid + 2]; s_vmem[tid] += s_vmem[tid + 1]; } if (tid == 0) { g_odata[blockIdx.x] = smem[0]; } } void test(int size) { int sum = 0; for (int i = 0; i < size; i++) { sum += i; } printf("Target is %d\n", sum); } int main() { int size = 1 << 22; int blockSize = DIM; test(size); // verify the result dim3 blockDim(blockSize); dim3 gridDim((size + blockDim.x - 1) / blockDim.x); printf("grid:(%d), block:(%d)\n", gridDim.x, blockDim.x); int nBytes = size * sizeof(int); int *h_idata = (int *) malloc(nBytes); int *h_odata = (int *) malloc(gridDim.x * sizeof(int)); // Valid output per block is stored in the first thread of each block. // So the number of output to be added is equal to the grid dim int *d_odata; int *d_idata; cudaMalloc(&d_idata, nBytes); cudaMalloc(&d_odata, gridDim.x * sizeof(int)); for (int i = 0; i < size; i++) { h_idata[i] = i; } cudaMemcpy(d_idata, h_idata, nBytes, cudaMemcpyHostToDevice); reduceGmem<<<gridDim, blockDim>>>(d_idata, d_odata, size); cudaDeviceSynchronize(); cudaMemcpy(h_odata, d_odata, gridDim.x * sizeof(int), cudaMemcpyDeviceToHost); int sum = 0; for (int i = 0; i < gridDim.x; i++) { sum += h_odata[i]; } printf("\n=========\n"); printf("sum = %d\n", sum); memset(h_odata, 0, gridDim.x * sizeof(int)); cudaMemset(d_odata, 0, gridDim.x * sizeof(int)); cudaMemcpy(d_idata, h_idata, nBytes, cudaMemcpyHostToDevice); dim3 gridDim_(gridDim.x / 4); reduceGmemUnrolling4<<<gridDim_, blockDim>>>(d_idata, d_odata, size); cudaDeviceSynchronize(); cudaMemcpy(h_odata, d_odata, gridDim_.x * sizeof(int), cudaMemcpyDeviceToHost); sum = 0; for (int i = 0; i < gridDim_.x; i++) { sum += h_odata[i]; } printf("\n=========\n"); printf("sum = %d\n", sum); memset(h_odata, 0, gridDim.x * sizeof(int)); cudaMemset(d_odata, 0, gridDim.x * sizeof(int)); cudaMemcpy(d_idata, h_idata, nBytes, cudaMemcpyHostToDevice); reduceSMemUnrolling4<<<gridDim_, blockDim, DIM * sizeof(int )>>>(d_idata, d_odata, size); cudaDeviceSynchronize(); cudaMemcpy(h_odata, d_odata, gridDim_.x * sizeof(int), cudaMemcpyDeviceToHost); sum = 0; for (int i = 0; i < gridDim_.x; i++) { sum += h_odata[i]; } printf("\n=========\n"); printf("sum = %d\n", sum); cudaFree(d_idata); cudaFree(d_odata); free(h_idata); free(h_odata); return 0; }
edb573150d9e5afd1e5562ae0a27905de5ceef6f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /// /// Copyright (c) 2013, Intel Corporation /// /// Redistribution and use in source and binary forms, with or without /// modification, are permitted provided that the following conditions /// are met: /// /// * Redistributions of source code must retain the above copyright /// notice, this list of conditions and the following disclaimer. /// * Redistributions in binary form must reproduce the above /// copyright notice, this list of conditions and the following /// disclaimer in the documentation and/or other materials provided /// with the distribution. /// * Neither the name of Intel Corporation nor the names of its /// contributors may be used to endorse or promote products /// derived from this software without specific prior written /// permission. /// /// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS /// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT /// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS /// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE /// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, /// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, /// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; /// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER /// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT /// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN /// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE /// POSSIBILITY OF SUCH DAMAGE. ////////////////////////////////////////////////////////////////////// /// /// NAME: Stencil /// /// PURPOSE: This program tests the efficiency with which a space-invariant, /// linear, symmetric filter (stencil) can be applied to a square /// grid or image. /// /// USAGE: The program takes as input the linear /// dimension of the grid, and the number of iterations on the grid /// /// <progname> <iterations> <grid size> /// /// The output consists of diagnostics to make sure the /// algorithm worked, and of timing statistics. /// /// FUNCTIONS CALLED: /// /// Other than standard C functions, the following functions are used in /// this program: /// wtime() /// /// HISTORY: - Written by Rob Van der Wijngaart, February 2009. /// - RvdW: Removed unrolling pragmas for clarity; /// added constant to array "in" at end of each iteration to force /// refreshing of neighbor data in parallel versions; August 2013 /// C++11-ification by Jeff Hammond, May 2017. /// ////////////////////////////////////////////////////////////////////// #include "prk_util.h" #include "prk_cuda.h" #include "stencil_cuda.hpp" __global__ void nothing(const int n, const prk_float * in, prk_float * out) { //printf("You are trying to use a stencil that does not exist.\n"); //printf("Please generate the new stencil using the code generator.\n"); // n will never be zero - this is to silence compiler warnings. //if (n==0) printf("in=%p out=%p\n", in, out); //abort(); } __global__ void add(const int n, prk_float * in) { auto i = blockIdx.x * blockDim.x + threadIdx.x; auto j = blockIdx.y * blockDim.y + threadIdx.y; if ((i<n) && (j<n)) { in[i*n+j] += (prk_float)1; } } int main(int argc, char* argv[]) { std::cout << "Parallel Research Kernels version " << PRKVERSION << std::endl; std::cout << "C++11/CUDA Stencil execution on 2D grid" << std::endl; prk::CUDA::info info; info.print(); ////////////////////////////////////////////////////////////////////// // Process and test input parameters ////////////////////////////////////////////////////////////////////// int iterations, n, radius, tile_size; bool star = true; try { if (argc < 3) { throw "Usage: <# iterations> <array dimension> [<tile_size> <star/grid> <radius>]"; } // number of times to run the algorithm iterations = std::atoi(argv[1]); if (iterations < 1) { throw "ERROR: iterations must be >= 1"; } // linear grid dimension n = std::atoi(argv[2]); if (n < 1) { throw "ERROR: grid dimension must be positive"; } else if (n > prk::get_max_matrix_size()) { throw "ERROR: grid dimension too large - overflow risk"; } // default tile size for tiling of local transpose tile_size = 32; if (argc > 3) { tile_size = std::atoi(argv[3]); if (tile_size <= 0) tile_size = n; if (tile_size > n) tile_size = n; if (tile_size > 32) { std::cout << "Warning: tile_size > 32 may lead to incorrect results (observed for CUDA 9.0 on GV100).\n"; } } // stencil pattern if (argc > 4) { auto stencil = std::string(argv[4]); auto grid = std::string("grid"); star = (stencil == grid) ? false : true; } // stencil radius radius = 2; if (argc > 5) { radius = std::atoi(argv[5]); } if ( (radius < 1) || (2*radius+1 > n) ) { throw "ERROR: Stencil radius negative or too large"; } } catch (const char * e) { std::cout << e << std::endl; return 1; } std::cout << "Number of iterations = " << iterations << std::endl; std::cout << "Grid size = " << n << std::endl; std::cout << "Tile size = " << tile_size << std::endl; std::cout << "Type of stencil = " << (star ? "star" : "grid") << std::endl; std::cout << "Radius of stencil = " << radius << std::endl; auto stencil = nothing; if (star) { switch (radius) { case 1: stencil = star1; break; case 2: stencil = star2; break; case 3: stencil = star3; break; case 4: stencil = star4; break; case 5: stencil = star5; break; } } else { switch (radius) { case 1: stencil = grid1; break; case 2: stencil = grid2; break; case 3: stencil = grid3; break; case 4: stencil = grid4; break; case 5: stencil = grid5; break; } } dim3 dimGrid(prk::divceil(n,tile_size),prk::divceil(n,tile_size),1); dim3 dimBlock(tile_size, tile_size, 1); info.checkDims(dimBlock, dimGrid); ////////////////////////////////////////////////////////////////////// // Allocate space and perform the computation ////////////////////////////////////////////////////////////////////// double stencil_time{0}; const size_t nelems = (size_t)n * (size_t)n; const size_t bytes = nelems * sizeof(prk_float); prk_float * h_in; prk_float * h_out; #ifndef __CORIANDERCC__ prk::CUDA::check( hipHostMalloc((void**)&h_in, bytes) ); prk::CUDA::check( hipHostMalloc((void**)&h_out, bytes) ); #else h_in = new prk_float[nelems]; h_out = new prk_float[nelems]; #endif for (int i=0; i<n; i++) { for (int j=0; j<n; j++) { h_in[i*n+j] = static_cast<prk_float>(i+j); h_out[i*n+j] = static_cast<prk_float>(0); } } // copy input from host to device prk_float * d_in; prk_float * d_out; prk::CUDA::check( hipMalloc((void**)&d_in, bytes) ); prk::CUDA::check( hipMalloc((void**)&d_out, bytes) ); prk::CUDA::check( hipMemcpy(d_in, &(h_in[0]), bytes, hipMemcpyHostToDevice) ); prk::CUDA::check( hipMemcpy(d_out, &(h_out[0]), bytes, hipMemcpyHostToDevice) ); for (int iter = 0; iter<=iterations; iter++) { if (iter==1) stencil_time = prk::wtime(); // Apply the stencil operator hipLaunchKernelGGL(( stencil), dim3(dimGrid), dim3(dimBlock), 0, 0, n, d_in, d_out); // Add constant to solution to force refresh of neighbor data, if any hipLaunchKernelGGL(( add), dim3(dimGrid), dim3(dimBlock), 0, 0, n, d_in); #ifndef __CORIANDERCC__ // silence "ignoring hipDeviceSynchronize for now" warning prk::CUDA::check( hipDeviceSynchronize() ); #endif } stencil_time = prk::wtime() - stencil_time; // copy output back to host prk::CUDA::check( hipMemcpy(&(h_out[0]), d_out, bytes, hipMemcpyDeviceToHost) ); #ifdef VERBOSE // copy input back to host - debug only prk::CUDA::check( hipMemcpy(&(h_in[0]), d_in, bytes, hipMemcpyDeviceToHost) ); #endif prk::CUDA::check( hipFree(d_out) ); prk::CUDA::check( hipFree(d_in) ); ////////////////////////////////////////////////////////////////////// // Analyze and output results. ////////////////////////////////////////////////////////////////////// // interior of grid with respect to stencil size_t active_points = static_cast<size_t>(n-2*radius)*static_cast<size_t>(n-2*radius); double norm = 0.0; for (int i=radius; i<n-radius; i++) { for (int j=radius; j<n-radius; j++) { norm += prk::abs(h_out[i*n+j]); } } norm /= active_points; // verify correctness const double epsilon = 1.0e-8; double reference_norm = 2.*(iterations+1.); if (prk::abs(norm-reference_norm) > epsilon) { std::cout << "ERROR: L1 norm = " << norm << " Reference L1 norm = " << reference_norm << std::endl; return 1; } else { std::cout << "Solution validates" << std::endl; #ifdef VERBOSE std::cout << "L1 norm = " << norm << " Reference L1 norm = " << reference_norm << std::endl; #endif const int stencil_size = star ? 4*radius+1 : (2*radius+1)*(2*radius+1); size_t flops = (2L*(size_t)stencil_size+1L) * active_points; auto avgtime = stencil_time/iterations; std::cout << "Rate (MFlops/s): " << 1.0e-6 * static_cast<double>(flops)/avgtime << " Avg time (s): " << avgtime << std::endl; } return 0; }
edb573150d9e5afd1e5562ae0a27905de5ceef6f.cu
/// /// Copyright (c) 2013, Intel Corporation /// /// Redistribution and use in source and binary forms, with or without /// modification, are permitted provided that the following conditions /// are met: /// /// * Redistributions of source code must retain the above copyright /// notice, this list of conditions and the following disclaimer. /// * Redistributions in binary form must reproduce the above /// copyright notice, this list of conditions and the following /// disclaimer in the documentation and/or other materials provided /// with the distribution. /// * Neither the name of Intel Corporation nor the names of its /// contributors may be used to endorse or promote products /// derived from this software without specific prior written /// permission. /// /// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS /// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT /// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS /// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE /// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, /// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, /// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; /// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER /// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT /// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN /// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE /// POSSIBILITY OF SUCH DAMAGE. ////////////////////////////////////////////////////////////////////// /// /// NAME: Stencil /// /// PURPOSE: This program tests the efficiency with which a space-invariant, /// linear, symmetric filter (stencil) can be applied to a square /// grid or image. /// /// USAGE: The program takes as input the linear /// dimension of the grid, and the number of iterations on the grid /// /// <progname> <iterations> <grid size> /// /// The output consists of diagnostics to make sure the /// algorithm worked, and of timing statistics. /// /// FUNCTIONS CALLED: /// /// Other than standard C functions, the following functions are used in /// this program: /// wtime() /// /// HISTORY: - Written by Rob Van der Wijngaart, February 2009. /// - RvdW: Removed unrolling pragmas for clarity; /// added constant to array "in" at end of each iteration to force /// refreshing of neighbor data in parallel versions; August 2013 /// C++11-ification by Jeff Hammond, May 2017. /// ////////////////////////////////////////////////////////////////////// #include "prk_util.h" #include "prk_cuda.h" #include "stencil_cuda.hpp" __global__ void nothing(const int n, const prk_float * in, prk_float * out) { //printf("You are trying to use a stencil that does not exist.\n"); //printf("Please generate the new stencil using the code generator.\n"); // n will never be zero - this is to silence compiler warnings. //if (n==0) printf("in=%p out=%p\n", in, out); //abort(); } __global__ void add(const int n, prk_float * in) { auto i = blockIdx.x * blockDim.x + threadIdx.x; auto j = blockIdx.y * blockDim.y + threadIdx.y; if ((i<n) && (j<n)) { in[i*n+j] += (prk_float)1; } } int main(int argc, char* argv[]) { std::cout << "Parallel Research Kernels version " << PRKVERSION << std::endl; std::cout << "C++11/CUDA Stencil execution on 2D grid" << std::endl; prk::CUDA::info info; info.print(); ////////////////////////////////////////////////////////////////////// // Process and test input parameters ////////////////////////////////////////////////////////////////////// int iterations, n, radius, tile_size; bool star = true; try { if (argc < 3) { throw "Usage: <# iterations> <array dimension> [<tile_size> <star/grid> <radius>]"; } // number of times to run the algorithm iterations = std::atoi(argv[1]); if (iterations < 1) { throw "ERROR: iterations must be >= 1"; } // linear grid dimension n = std::atoi(argv[2]); if (n < 1) { throw "ERROR: grid dimension must be positive"; } else if (n > prk::get_max_matrix_size()) { throw "ERROR: grid dimension too large - overflow risk"; } // default tile size for tiling of local transpose tile_size = 32; if (argc > 3) { tile_size = std::atoi(argv[3]); if (tile_size <= 0) tile_size = n; if (tile_size > n) tile_size = n; if (tile_size > 32) { std::cout << "Warning: tile_size > 32 may lead to incorrect results (observed for CUDA 9.0 on GV100).\n"; } } // stencil pattern if (argc > 4) { auto stencil = std::string(argv[4]); auto grid = std::string("grid"); star = (stencil == grid) ? false : true; } // stencil radius radius = 2; if (argc > 5) { radius = std::atoi(argv[5]); } if ( (radius < 1) || (2*radius+1 > n) ) { throw "ERROR: Stencil radius negative or too large"; } } catch (const char * e) { std::cout << e << std::endl; return 1; } std::cout << "Number of iterations = " << iterations << std::endl; std::cout << "Grid size = " << n << std::endl; std::cout << "Tile size = " << tile_size << std::endl; std::cout << "Type of stencil = " << (star ? "star" : "grid") << std::endl; std::cout << "Radius of stencil = " << radius << std::endl; auto stencil = nothing; if (star) { switch (radius) { case 1: stencil = star1; break; case 2: stencil = star2; break; case 3: stencil = star3; break; case 4: stencil = star4; break; case 5: stencil = star5; break; } } else { switch (radius) { case 1: stencil = grid1; break; case 2: stencil = grid2; break; case 3: stencil = grid3; break; case 4: stencil = grid4; break; case 5: stencil = grid5; break; } } dim3 dimGrid(prk::divceil(n,tile_size),prk::divceil(n,tile_size),1); dim3 dimBlock(tile_size, tile_size, 1); info.checkDims(dimBlock, dimGrid); ////////////////////////////////////////////////////////////////////// // Allocate space and perform the computation ////////////////////////////////////////////////////////////////////// double stencil_time{0}; const size_t nelems = (size_t)n * (size_t)n; const size_t bytes = nelems * sizeof(prk_float); prk_float * h_in; prk_float * h_out; #ifndef __CORIANDERCC__ prk::CUDA::check( cudaMallocHost((void**)&h_in, bytes) ); prk::CUDA::check( cudaMallocHost((void**)&h_out, bytes) ); #else h_in = new prk_float[nelems]; h_out = new prk_float[nelems]; #endif for (int i=0; i<n; i++) { for (int j=0; j<n; j++) { h_in[i*n+j] = static_cast<prk_float>(i+j); h_out[i*n+j] = static_cast<prk_float>(0); } } // copy input from host to device prk_float * d_in; prk_float * d_out; prk::CUDA::check( cudaMalloc((void**)&d_in, bytes) ); prk::CUDA::check( cudaMalloc((void**)&d_out, bytes) ); prk::CUDA::check( cudaMemcpy(d_in, &(h_in[0]), bytes, cudaMemcpyHostToDevice) ); prk::CUDA::check( cudaMemcpy(d_out, &(h_out[0]), bytes, cudaMemcpyHostToDevice) ); for (int iter = 0; iter<=iterations; iter++) { if (iter==1) stencil_time = prk::wtime(); // Apply the stencil operator stencil<<<dimGrid, dimBlock>>>(n, d_in, d_out); // Add constant to solution to force refresh of neighbor data, if any add<<<dimGrid, dimBlock>>>(n, d_in); #ifndef __CORIANDERCC__ // silence "ignoring cudaDeviceSynchronize for now" warning prk::CUDA::check( cudaDeviceSynchronize() ); #endif } stencil_time = prk::wtime() - stencil_time; // copy output back to host prk::CUDA::check( cudaMemcpy(&(h_out[0]), d_out, bytes, cudaMemcpyDeviceToHost) ); #ifdef VERBOSE // copy input back to host - debug only prk::CUDA::check( cudaMemcpy(&(h_in[0]), d_in, bytes, cudaMemcpyDeviceToHost) ); #endif prk::CUDA::check( cudaFree(d_out) ); prk::CUDA::check( cudaFree(d_in) ); ////////////////////////////////////////////////////////////////////// // Analyze and output results. ////////////////////////////////////////////////////////////////////// // interior of grid with respect to stencil size_t active_points = static_cast<size_t>(n-2*radius)*static_cast<size_t>(n-2*radius); double norm = 0.0; for (int i=radius; i<n-radius; i++) { for (int j=radius; j<n-radius; j++) { norm += prk::abs(h_out[i*n+j]); } } norm /= active_points; // verify correctness const double epsilon = 1.0e-8; double reference_norm = 2.*(iterations+1.); if (prk::abs(norm-reference_norm) > epsilon) { std::cout << "ERROR: L1 norm = " << norm << " Reference L1 norm = " << reference_norm << std::endl; return 1; } else { std::cout << "Solution validates" << std::endl; #ifdef VERBOSE std::cout << "L1 norm = " << norm << " Reference L1 norm = " << reference_norm << std::endl; #endif const int stencil_size = star ? 4*radius+1 : (2*radius+1)*(2*radius+1); size_t flops = (2L*(size_t)stencil_size+1L) * active_points; auto avgtime = stencil_time/iterations; std::cout << "Rate (MFlops/s): " << 1.0e-6 * static_cast<double>(flops)/avgtime << " Avg time (s): " << avgtime << std::endl; } return 0; }
9bfee8acd69cf25ca38585d9d81622d3d70a5144.hip
// !!! This is a file automatically generated by hipify!!! #include "cuElectron.h" #include "Electron.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "Constants.h" void /*cuElectron::*/cuMalloc(void **cuElectrons, int memSize) { hipMalloc(cuElectrons, memSize); } extern "C" __global__ void pole_cuda(TPL_Point *cuElectrons, int b1, int e1, float QK, float MK) { int i = blockIdx.x*blockDim.x+threadIdx.x; if (i<e1-b1) { i+=b1; float v; cuElectrons[i].ax=-1*QK*cuElectrons[i].vy*B_const/MK; //Y if ( std::abs(cuElectrons[i].z- Z1) <= std::abs(cuElectrons[i].z- Z2) ) { v=cos(1.57*std::abs(cuElectrons[i].z-Z1)/(Z2-((Z1+Z2)/2))); } else { v=cos(1.57*std::abs(cuElectrons[i].z-Z2)/(Z2-((Z1+Z2)/2))); } cuElectrons[i].ay=(QK*cuElectrons[i].vx*B_const/MK)-(QK*E_const/MK)+(QK*E_const/MK)*std::abs(v*v*v*v*v*v*v*v*v*v*v*v*v*v*v); //Z if(std::abs(cuElectrons[i].z-Z1)<=std::abs(cuElectrons[i].z-Z2)) { v=cos(1.57*std::abs(cuElectrons[i].z-Z1)/(Z2-((Z1+Z2)/2))); cuElectrons[i].az=std::abs((QK*E_const/MK)*v*v*v*v*v*v*v*v*v*v*v*v*v*v*v); } else { v=cos(1.57*std::abs(cuElectrons[i].z-Z2)/(Z2-((Z1+Z2)/2))); cuElectrons[i].az=(-1)*std::abs((QK*E_const/MK)*v*v*v*v*v*v*v*v*v*v*v*v*v*v*v); } } } extern "C" __global__ void raschet(int *cuElectrons_number, TPL_Point *cuElectrons, int b1,int e1, int b2,int e2, int Count, float MK, float QK, int final) { int i = blockIdx.x*blockDim.x+threadIdx.x; i*=32; if (i<e1-b1) { i+=b1; float dx,dy,dz,powe; if (i<final) for(int j=b2;j<e2;++j) if (j<Count) { for (int t=0;t<32;t++) if(((cuElectrons_number[i+t]-Kx)<=cuElectrons_number[j])&&((cuElectrons_number[i+t]+Kx)>=cuElectrons_number[j])) if((i+t!=j)&&(i+t<final)) { dx=cuElectrons[i+t].x-cuElectrons[j].x; dy=cuElectrons[i+t].y-cuElectrons[j].y; dz=cuElectrons[i+t].z-cuElectrons[j].z; powe=(float)pow((float)(dx*dx+dy*dy+dz*dz),(float)0.5)*num_k/MK*QK*QK/(dx*dx+dy*dy+dz*dz); cuElectrons[i+t].ax+=dx/powe; cuElectrons[i+t].ay+=dy/powe; cuElectrons[i+t].az+=dz/powe; } /*if(((cuElectrons_number[i]-Kx)<=cuElectrons_number[j])&&((cuElectrons_number[i]+Kx)>=cuElectrons_number[j])) if(i!=j) { dx=cuElectrons[i].x-cuElectrons[j].x; dy=cuElectrons[i].y-cuElectrons[j].y; dz=cuElectrons[i].z-cuElectrons[j].z; powe=(float)pow((float)(dx*dx+dy*dy+dz*dz),(float)0.5)*k/MK*QK*QK/(dx*dx+dy*dy+dz*dz); cuElectrons[i].ax+=dx/powe; cuElectrons[i].ay+=dy/powe; cuElectrons[i].az+=dz/powe; }*/ } } } extern "C" __global__ void pereschet(int *cuElectrons_number, TPL_Point *cuElectrons, int b1, int e1, float step) { int i = blockIdx.x*blockDim.x+threadIdx.x; if (i<e1-b1) { i+=b1; cuElectrons[i].x=cuElectrons[i].x+cuElectrons[i].vx*step+(cuElectrons[i].ax*step*step/2); cuElectrons[i].vx+=cuElectrons[i].ax*step; cuElectrons[i].y=cuElectrons[i].y+cuElectrons[i].vy*step+(cuElectrons[i].ay*step*step/2); cuElectrons[i].vy+=cuElectrons[i].ay*step; cuElectrons[i].z=cuElectrons[i].z+cuElectrons[i].vz*step+(cuElectrons[i].az*step*step/2); cuElectrons[i].vz+=cuElectrons[i].az*step; // } }
9bfee8acd69cf25ca38585d9d81622d3d70a5144.cu
#include "cuElectron.h" #include "Electron.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "Constants.h" void /*cuElectron::*/cuMalloc(void **cuElectrons, int memSize) { cudaMalloc(cuElectrons, memSize); } extern "C" __global__ void pole_cuda(TPL_Point *cuElectrons, int b1, int e1, float QK, float MK) { int i = blockIdx.x*blockDim.x+threadIdx.x; if (i<e1-b1) { i+=b1; float v; cuElectrons[i].ax=-1*QK*cuElectrons[i].vy*B_const/MK; //Y if ( std::abs(cuElectrons[i].z- Z1) <= std::abs(cuElectrons[i].z- Z2) ) { v=cos(1.57*std::abs(cuElectrons[i].z-Z1)/(Z2-((Z1+Z2)/2))); } else { v=cos(1.57*std::abs(cuElectrons[i].z-Z2)/(Z2-((Z1+Z2)/2))); } cuElectrons[i].ay=(QK*cuElectrons[i].vx*B_const/MK)-(QK*E_const/MK)+(QK*E_const/MK)*std::abs(v*v*v*v*v*v*v*v*v*v*v*v*v*v*v); //Z if(std::abs(cuElectrons[i].z-Z1)<=std::abs(cuElectrons[i].z-Z2)) { v=cos(1.57*std::abs(cuElectrons[i].z-Z1)/(Z2-((Z1+Z2)/2))); cuElectrons[i].az=std::abs((QK*E_const/MK)*v*v*v*v*v*v*v*v*v*v*v*v*v*v*v); } else { v=cos(1.57*std::abs(cuElectrons[i].z-Z2)/(Z2-((Z1+Z2)/2))); cuElectrons[i].az=(-1)*std::abs((QK*E_const/MK)*v*v*v*v*v*v*v*v*v*v*v*v*v*v*v); } } } extern "C" __global__ void raschet(int *cuElectrons_number, TPL_Point *cuElectrons, int b1,int e1, int b2,int e2, int Count, float MK, float QK, int final) { int i = blockIdx.x*blockDim.x+threadIdx.x; i*=32; if (i<e1-b1) { i+=b1; float dx,dy,dz,powe; if (i<final) for(int j=b2;j<e2;++j) if (j<Count) { for (int t=0;t<32;t++) if(((cuElectrons_number[i+t]-Kx)<=cuElectrons_number[j])&&((cuElectrons_number[i+t]+Kx)>=cuElectrons_number[j])) if((i+t!=j)&&(i+t<final)) { dx=cuElectrons[i+t].x-cuElectrons[j].x; dy=cuElectrons[i+t].y-cuElectrons[j].y; dz=cuElectrons[i+t].z-cuElectrons[j].z; powe=(float)pow((float)(dx*dx+dy*dy+dz*dz),(float)0.5)*num_k/MK*QK*QK/(dx*dx+dy*dy+dz*dz); cuElectrons[i+t].ax+=dx/powe; cuElectrons[i+t].ay+=dy/powe; cuElectrons[i+t].az+=dz/powe; } /*if(((cuElectrons_number[i]-Kx)<=cuElectrons_number[j])&&((cuElectrons_number[i]+Kx)>=cuElectrons_number[j])) if(i!=j) { dx=cuElectrons[i].x-cuElectrons[j].x; dy=cuElectrons[i].y-cuElectrons[j].y; dz=cuElectrons[i].z-cuElectrons[j].z; powe=(float)pow((float)(dx*dx+dy*dy+dz*dz),(float)0.5)*k/MK*QK*QK/(dx*dx+dy*dy+dz*dz); cuElectrons[i].ax+=dx/powe; cuElectrons[i].ay+=dy/powe; cuElectrons[i].az+=dz/powe; }*/ } } } extern "C" __global__ void pereschet(int *cuElectrons_number, TPL_Point *cuElectrons, int b1, int e1, float step) { int i = blockIdx.x*blockDim.x+threadIdx.x; if (i<e1-b1) { i+=b1; cuElectrons[i].x=cuElectrons[i].x+cuElectrons[i].vx*step+(cuElectrons[i].ax*step*step/2); cuElectrons[i].vx+=cuElectrons[i].ax*step; cuElectrons[i].y=cuElectrons[i].y+cuElectrons[i].vy*step+(cuElectrons[i].ay*step*step/2); cuElectrons[i].vy+=cuElectrons[i].ay*step; cuElectrons[i].z=cuElectrons[i].z+cuElectrons[i].vz*step+(cuElectrons[i].az*step*step/2); cuElectrons[i].vz+=cuElectrons[i].az*step; //рассчет места электрона } }
284ecd09a0be83cdfa34658735438a7130fddd27.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Rashid Hafez */ #include "ISplit.h" dim3 GRID; dim3 BLOCK; static hipDeviceProp_t PROPS; /****************************** Increment Kernel *******************************/ __global__ void Incr(float * vec, float* cc, unsigned long n, unsigned long long it){ long long x = (long long)threadIdx.x + (long long)blockIdx.x * (long long)blockDim.x; long long y = (long long)threadIdx.y + (long long)blockIdx.y * (long long)blockDim.y; long long offset = x + y * (long long)blockDim.x * (long long)gridDim.x; //works for any size and anything if(offset<=n){ cc[offset] = vec[offset] * 3.3f; } } void ISplit(float * & arr, unsigned long sz, hipDeviceProp_t* prop){ } void setProp(int d){ gpuErrchk(hipSetDevice(d)); gpuErrchk(hipGetDeviceProperties(&PROPS,d)); } hipDeviceProp_t getProp(){ return(PROPS); }
284ecd09a0be83cdfa34658735438a7130fddd27.cu
/* Rashid Hafez */ #include "ISplit.h" dim3 GRID; dim3 BLOCK; static cudaDeviceProp PROPS; /****************************** Increment Kernel *******************************/ __global__ void Incr(float * vec, float* cc, unsigned long n, unsigned long long it){ long long x = (long long)threadIdx.x + (long long)blockIdx.x * (long long)blockDim.x; long long y = (long long)threadIdx.y + (long long)blockIdx.y * (long long)blockDim.y; long long offset = x + y * (long long)blockDim.x * (long long)gridDim.x; //works for any size and anything if(offset<=n){ cc[offset] = vec[offset] * 3.3f; } } void ISplit(float * & arr, unsigned long sz, cudaDeviceProp* prop){ } void setProp(int d){ gpuErrchk(cudaSetDevice(d)); gpuErrchk(cudaGetDeviceProperties(&PROPS,d)); } cudaDeviceProp getProp(){ return(PROPS); }
254c976cdeae24fe84cae3e781b25937d1ae0a55.hip
// !!! This is a file automatically generated by hipify!!! // ### // ### // ### Practical Course: GPU Programming in Computer Vision // ### // ### // ### Technical University Munich, Computer Vision Group // ### Winter Semester 2013/2014, March 3 - April 4 // ### // ### // ### Evgeny Strekalovskiy, Maria Klodt, Jan Stuehmer, Mohamed Souiai // ### // ### // ### // ### // ### // ### TODO: For every student of your group, please provide here: // ### // ### name, email, login username (for example p123) // ### // ### #include <aux.h> #include <iostream> using namespace std; // uncomment to use the camera //#define CAMERA #include "stereo_projection.h" int main(int argc, char **argv) { // Before the GPU can process your kernels, a so called "CUDA context" must be initialized // This happens on the very first call to a CUDA function, and takes some time (around half a second) // We will do it right here, so that the run time measurements are accurate hipDeviceSynchronize(); CUDA_CHECK; // Reading command line parameters: // getParam("param", var, argc, argv) looks whether "-param xyz" is specified, and if so stores the value "xyz" in "var" // If "-param" is not specified, the value of "var" remains unchanged // // return value: getParam("param", ...) returns true if "-param" is specified, and false otherwise #ifdef CAMERA #else // input image - left and right string imageLeft = "", imageRight = ""; bool ret = getParam("i_left", imageLeft, argc, argv) && getParam("i_right", imageRight, argc, argv); if (!ret) cerr << "ERROR: one or more image(s) not specified" << endl; if (argc <= 1) { cout << "Usage: " << argv[0] << " -i_left <image> -i_right <image> [-repeats <repeats>] [-gray]" << endl; return 1; } #endif // suppress all outputs by program except file writes and errors bool suppressOut = false; getParam("suppress_out", suppressOut, argc, argv); cout << "Suppress Out: " << suppressOut << endl; // number of computation repetitions to get a better run time measurement int repeats = 1; getParam("repeats", repeats, argc, argv); // load the input image as grayscale if "-gray" is specifed bool gray = false; getParam("gray", gray, argc, argv); // ### Define your own parameters here as needed // get MU float MU; bool retVal = getParam("mu", MU, argc, argv); if (!retVal) { cerr << "ERROR: no MU specified" << endl; cout << "Usage: " << argv[0] << " -mu <value> " << endl; return 1; } // get sigma float SIGMA; retVal = getParam("sigma", SIGMA, argc, argv); if (!retVal) { cerr << "ERROR: no SIGMA specified" << endl; cout << "Usage: " << argv[0] << " -sigma <value>" << endl; return 1; } // get TAU float TAU; retVal = getParam("tau", TAU, argc, argv); if (!retVal) { cerr << "ERROR: no TAU specified" << endl; cout << "Usage: " << argv[0] << " -tau <value>" << endl; return 1; } // get discretization uint32_t nt; retVal = getParam("nt", nt, argc, argv); if (!retVal) { cerr << "ERROR: no discretization specified" << endl; cout << "Usage: " << argv[0] << " -nt <value>" << endl; return 1; } // get steps uint32_t steps; retVal = getParam("steps", steps, argc, argv); if (!retVal) { cerr << "ERROR: no step specified" << endl; cout << "Usage: " << argv[0] << " -steps <value>" << endl; return 1; } // get implementation type int impl; retVal = getParam("impl", impl, argc, argv); if (!retVal) { cerr << "ERROR: no implementation specified" << endl; cout << "Usage: " << argv[0] << " -impl <value> {0:'Global Memory', 1:'Texture', 2:'Pitch', 3:'Shared Memory', 4:'2D Grid'}" << endl; return 1; } if(!suppressOut) { // output parameters cout << "repeats: " << repeats << endl; cout << "gray: " << gray << endl; cout << "MU: " << MU << endl; cout << "SIGMA: " << SIGMA << endl; cout << "TAU: " << TAU << endl; cout << "nt: " << nt << endl; cout << "Steps: " << steps << endl; cout << "Impl: " << impl << endl; } // Init camera / Load input image #ifdef CAMERA // Init camera cv::VideoCapture camera(0); if(!camera.isOpened()) { cerr << "ERROR: Could not open camera" << endl; return 1; } int camW = 640; int camH = 480; camera.set(CV_CAP_PROP_FRAME_WIDTH,camW); camera.set(CV_CAP_PROP_FRAME_HEIGHT,camH); // read in first frame to get the dimensions cv::Mat mInLeft; camera >> mInLeft; #else // Load the input image using opencv (load as grayscale if "gray==true", otherwise as is (may be color or grayscale)) cv::Mat mInLeft = cv::imread(imageLeft.c_str(), (gray? CV_LOAD_IMAGE_GRAYSCALE : -1)); // Load the input image using opencv (load as grayscale if "gray==true", otherwise as is (may be color or grayscale)) cv::Mat mInRight = cv::imread(imageRight.c_str(), (gray? CV_LOAD_IMAGE_GRAYSCALE : -1)); // check if both images are loaded if (mInLeft.data == NULL || mInRight.data == NULL) { cerr << "ERROR: Could not load one or more image(s) specified" << endl; return 1; } #endif // convert to float representation (opencv loads image values as single bytes by default) mInLeft.convertTo(mInLeft, CV_32F); mInRight.convertTo(mInRight, CV_32F); // convert range of each channel to [0,1] (opencv default is [0,255]) mInLeft /= 255.f; mInRight /= 255.f; // get image dimensions and both must be same if(mInLeft.cols != mInRight.cols || mInLeft.rows != mInRight.rows) {cerr << "ERROR: Image dimensions don't match!" << endl; return 1; } int w = mInLeft.cols; // width int h = mInLeft.rows; // height int nc = mInLeft.channels(); // number of channels if (!suppressOut) cout << "image dimensions: " << w << " x " << h << endl; //cv::Mat mOut(h,w,mInLeft.type()); // mOut will have the same number of channels as the input image, nc layers //cv::Mat mOut(h,w,CV_32FC3); // mOut will be a color image, 3 layers // ### Define your own output images here as needed cv::Mat mOutDepth(h, w, CV_32FC1); // mOut will be a color image, 1 layers // Allocate arrays // input/output image height: h // input image number of channels: nc // output image number of channels: mOut.channels(), as defined above (nc, 3, or 1) // allocate raw input image array float *imgInLeft = new float[(size_t)w * h * nc]; float *imgInRight = new float[(size_t)w * h * nc]; // allocate raw output array (the computation result will be stored in this array, then later converted to mOut for displaying) float *imgOutDepth = new float[(size_t)w * h * mOutDepth.channels()]; // For camera mode: Make a loop to read in camera frames #ifdef CAMERA // Read a camera image frame every 30 milliseconds: // cv::waitKey(30) waits 30 milliseconds for a keyboard input, // returns a value <0 if no key is pressed during this time, returns immediately with a value >=0 if a key is pressed while (cv::waitKey(30) < 0) { // Get camera image camera >> mInLeft; // convert to float representation (opencv loads image values as single bytes by default) mInLeft.convertTo(mInLeft,CV_32F); // convert range of each channel to [0,1] (opencv default is [0,255]) mInLeft /= 255.f; #endif // Init raw input image array // opencv images are interleaved: rgb rgb rgb... (actually bgr bgr bgr...) // But for CUDA it's better to work with layered images: rrr... ggg... bbb... // So we will convert as necessary, using interleaved "cv::Mat" for loading/saving/displaying, and layered "float*" for CUDA computations convert_mat_to_layered (imgInLeft, mInLeft); convert_mat_to_layered (imgInRight, mInRight); Timer timer; timer.start(); // GPU version based on implementation type selected switch(impl) { case 0: stereo_projection_PD(imgInLeft, imgInRight, imgOutDepth, dim3(w, h, 0), nc, dim3(w, h, nt), steps, MU, SIGMA, TAU); break; case 1: stereo_projection_PD_tex(imgInLeft, imgInRight, imgOutDepth, dim3(w, h, 0), nc, dim3(w, h, nt), steps, MU, SIGMA, TAU); break; case 2: stereo_projection_PD_pitch(imgInLeft, imgInRight, imgOutDepth, dim3(w, h, 0), nc, dim3(w, h, nt), steps, MU, SIGMA, TAU); break; case 3: stereo_projection_PD_sm(imgInLeft, imgInRight, imgOutDepth, dim3(w, h, 0), nc, dim3(w, h, nt), steps, MU, SIGMA, TAU); break; default: break; } timer.end(); float t = timer.get(); // elapsed time in seconds if(!suppressOut) cout << "time: " << t*1000 << " ms" << endl; // for output calculation convert back to interleaved open cv and normalize convert_layered_to_mat(mOutDepth, imgOutDepth); double minVal, maxVal; minMaxLoc(mOutDepth, &minVal, &maxVal); mOutDepth /= maxVal; // show input image // show at position (x_from_left=100,y_from_above=100) if(!suppressOut) showImage("Input Left", mInLeft, 100, 100); // show at position (x_from_left=100,y_from_above=100) if(!suppressOut) showImage("Input Right", mInRight, 100+w+40, 100); // ### Display your own output images here as needed if(!suppressOut) showImage("Depth Mapping", mOutDepth, 100+2*w+40, 100); #ifdef CAMERA // end of camera loop } #else // wait for key inputs cv::waitKey(0); #endif // save input and result cv::imwrite("images/out/depth_map.png", mOutDepth * 255.f); // free allocated arrays delete[] imgInLeft; delete[] imgInRight; delete[] imgOutDepth; // close all opencv windows cvDestroyAllWindows(); return 0; }
254c976cdeae24fe84cae3e781b25937d1ae0a55.cu
// ### // ### // ### Practical Course: GPU Programming in Computer Vision // ### // ### // ### Technical University Munich, Computer Vision Group // ### Winter Semester 2013/2014, March 3 - April 4 // ### // ### // ### Evgeny Strekalovskiy, Maria Klodt, Jan Stuehmer, Mohamed Souiai // ### // ### // ### // ### // ### // ### TODO: For every student of your group, please provide here: // ### // ### name, email, login username (for example p123) // ### // ### #include <aux.h> #include <iostream> using namespace std; // uncomment to use the camera //#define CAMERA #include "stereo_projection.h" int main(int argc, char **argv) { // Before the GPU can process your kernels, a so called "CUDA context" must be initialized // This happens on the very first call to a CUDA function, and takes some time (around half a second) // We will do it right here, so that the run time measurements are accurate cudaDeviceSynchronize(); CUDA_CHECK; // Reading command line parameters: // getParam("param", var, argc, argv) looks whether "-param xyz" is specified, and if so stores the value "xyz" in "var" // If "-param" is not specified, the value of "var" remains unchanged // // return value: getParam("param", ...) returns true if "-param" is specified, and false otherwise #ifdef CAMERA #else // input image - left and right string imageLeft = "", imageRight = ""; bool ret = getParam("i_left", imageLeft, argc, argv) && getParam("i_right", imageRight, argc, argv); if (!ret) cerr << "ERROR: one or more image(s) not specified" << endl; if (argc <= 1) { cout << "Usage: " << argv[0] << " -i_left <image> -i_right <image> [-repeats <repeats>] [-gray]" << endl; return 1; } #endif // suppress all outputs by program except file writes and errors bool suppressOut = false; getParam("suppress_out", suppressOut, argc, argv); cout << "Suppress Out: " << suppressOut << endl; // number of computation repetitions to get a better run time measurement int repeats = 1; getParam("repeats", repeats, argc, argv); // load the input image as grayscale if "-gray" is specifed bool gray = false; getParam("gray", gray, argc, argv); // ### Define your own parameters here as needed // get MU float MU; bool retVal = getParam("mu", MU, argc, argv); if (!retVal) { cerr << "ERROR: no MU specified" << endl; cout << "Usage: " << argv[0] << " -mu <value> " << endl; return 1; } // get sigma float SIGMA; retVal = getParam("sigma", SIGMA, argc, argv); if (!retVal) { cerr << "ERROR: no SIGMA specified" << endl; cout << "Usage: " << argv[0] << " -sigma <value>" << endl; return 1; } // get TAU float TAU; retVal = getParam("tau", TAU, argc, argv); if (!retVal) { cerr << "ERROR: no TAU specified" << endl; cout << "Usage: " << argv[0] << " -tau <value>" << endl; return 1; } // get discretization uint32_t nt; retVal = getParam("nt", nt, argc, argv); if (!retVal) { cerr << "ERROR: no discretization specified" << endl; cout << "Usage: " << argv[0] << " -nt <value>" << endl; return 1; } // get steps uint32_t steps; retVal = getParam("steps", steps, argc, argv); if (!retVal) { cerr << "ERROR: no step specified" << endl; cout << "Usage: " << argv[0] << " -steps <value>" << endl; return 1; } // get implementation type int impl; retVal = getParam("impl", impl, argc, argv); if (!retVal) { cerr << "ERROR: no implementation specified" << endl; cout << "Usage: " << argv[0] << " -impl <value> {0:'Global Memory', 1:'Texture', 2:'Pitch', 3:'Shared Memory', 4:'2D Grid'}" << endl; return 1; } if(!suppressOut) { // output parameters cout << "repeats: " << repeats << endl; cout << "gray: " << gray << endl; cout << "MU: " << MU << endl; cout << "SIGMA: " << SIGMA << endl; cout << "TAU: " << TAU << endl; cout << "nt: " << nt << endl; cout << "Steps: " << steps << endl; cout << "Impl: " << impl << endl; } // Init camera / Load input image #ifdef CAMERA // Init camera cv::VideoCapture camera(0); if(!camera.isOpened()) { cerr << "ERROR: Could not open camera" << endl; return 1; } int camW = 640; int camH = 480; camera.set(CV_CAP_PROP_FRAME_WIDTH,camW); camera.set(CV_CAP_PROP_FRAME_HEIGHT,camH); // read in first frame to get the dimensions cv::Mat mInLeft; camera >> mInLeft; #else // Load the input image using opencv (load as grayscale if "gray==true", otherwise as is (may be color or grayscale)) cv::Mat mInLeft = cv::imread(imageLeft.c_str(), (gray? CV_LOAD_IMAGE_GRAYSCALE : -1)); // Load the input image using opencv (load as grayscale if "gray==true", otherwise as is (may be color or grayscale)) cv::Mat mInRight = cv::imread(imageRight.c_str(), (gray? CV_LOAD_IMAGE_GRAYSCALE : -1)); // check if both images are loaded if (mInLeft.data == NULL || mInRight.data == NULL) { cerr << "ERROR: Could not load one or more image(s) specified" << endl; return 1; } #endif // convert to float representation (opencv loads image values as single bytes by default) mInLeft.convertTo(mInLeft, CV_32F); mInRight.convertTo(mInRight, CV_32F); // convert range of each channel to [0,1] (opencv default is [0,255]) mInLeft /= 255.f; mInRight /= 255.f; // get image dimensions and both must be same if(mInLeft.cols != mInRight.cols || mInLeft.rows != mInRight.rows) {cerr << "ERROR: Image dimensions don't match!" << endl; return 1; } int w = mInLeft.cols; // width int h = mInLeft.rows; // height int nc = mInLeft.channels(); // number of channels if (!suppressOut) cout << "image dimensions: " << w << " x " << h << endl; //cv::Mat mOut(h,w,mInLeft.type()); // mOut will have the same number of channels as the input image, nc layers //cv::Mat mOut(h,w,CV_32FC3); // mOut will be a color image, 3 layers // ### Define your own output images here as needed cv::Mat mOutDepth(h, w, CV_32FC1); // mOut will be a color image, 1 layers // Allocate arrays // input/output image height: h // input image number of channels: nc // output image number of channels: mOut.channels(), as defined above (nc, 3, or 1) // allocate raw input image array float *imgInLeft = new float[(size_t)w * h * nc]; float *imgInRight = new float[(size_t)w * h * nc]; // allocate raw output array (the computation result will be stored in this array, then later converted to mOut for displaying) float *imgOutDepth = new float[(size_t)w * h * mOutDepth.channels()]; // For camera mode: Make a loop to read in camera frames #ifdef CAMERA // Read a camera image frame every 30 milliseconds: // cv::waitKey(30) waits 30 milliseconds for a keyboard input, // returns a value <0 if no key is pressed during this time, returns immediately with a value >=0 if a key is pressed while (cv::waitKey(30) < 0) { // Get camera image camera >> mInLeft; // convert to float representation (opencv loads image values as single bytes by default) mInLeft.convertTo(mInLeft,CV_32F); // convert range of each channel to [0,1] (opencv default is [0,255]) mInLeft /= 255.f; #endif // Init raw input image array // opencv images are interleaved: rgb rgb rgb... (actually bgr bgr bgr...) // But for CUDA it's better to work with layered images: rrr... ggg... bbb... // So we will convert as necessary, using interleaved "cv::Mat" for loading/saving/displaying, and layered "float*" for CUDA computations convert_mat_to_layered (imgInLeft, mInLeft); convert_mat_to_layered (imgInRight, mInRight); Timer timer; timer.start(); // GPU version based on implementation type selected switch(impl) { case 0: stereo_projection_PD(imgInLeft, imgInRight, imgOutDepth, dim3(w, h, 0), nc, dim3(w, h, nt), steps, MU, SIGMA, TAU); break; case 1: stereo_projection_PD_tex(imgInLeft, imgInRight, imgOutDepth, dim3(w, h, 0), nc, dim3(w, h, nt), steps, MU, SIGMA, TAU); break; case 2: stereo_projection_PD_pitch(imgInLeft, imgInRight, imgOutDepth, dim3(w, h, 0), nc, dim3(w, h, nt), steps, MU, SIGMA, TAU); break; case 3: stereo_projection_PD_sm(imgInLeft, imgInRight, imgOutDepth, dim3(w, h, 0), nc, dim3(w, h, nt), steps, MU, SIGMA, TAU); break; default: break; } timer.end(); float t = timer.get(); // elapsed time in seconds if(!suppressOut) cout << "time: " << t*1000 << " ms" << endl; // for output calculation convert back to interleaved open cv and normalize convert_layered_to_mat(mOutDepth, imgOutDepth); double minVal, maxVal; minMaxLoc(mOutDepth, &minVal, &maxVal); mOutDepth /= maxVal; // show input image // show at position (x_from_left=100,y_from_above=100) if(!suppressOut) showImage("Input Left", mInLeft, 100, 100); // show at position (x_from_left=100,y_from_above=100) if(!suppressOut) showImage("Input Right", mInRight, 100+w+40, 100); // ### Display your own output images here as needed if(!suppressOut) showImage("Depth Mapping", mOutDepth, 100+2*w+40, 100); #ifdef CAMERA // end of camera loop } #else // wait for key inputs cv::waitKey(0); #endif // save input and result cv::imwrite("images/out/depth_map.png", mOutDepth * 255.f); // free allocated arrays delete[] imgInLeft; delete[] imgInRight; delete[] imgOutDepth; // close all opencv windows cvDestroyAllWindows(); return 0; }
625a773e5283b529c895b4339ff0f7d58d35b667.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Julian Gutierrez * Northeastern University * High Performance Computing * * Sobel Algorithm Implementation * */ #include "sobel.h" /*******************************************************/ /* Cuda Error Function */ /*******************************************************/ inline hipError_t checkCuda(hipError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != hipSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result)); exit(-1); } #endif return result; } using namespace std; void modThreshold (unsigned int value){ threshold = value; } /* * Sobel Kernel */ __global__ void sobelAlgorithm(unsigned char *intensity, unsigned char *result, unsigned int threshold){ int tx = threadIdx.x; int ty = threadIdx.y; // Shared memory __shared__ unsigned char tile[TILE_SIZE+2][TILE_SIZE+2]; // With reference to the total image int totalx = blockIdx.x*TILE_SIZE+tx; int totaly = blockIdx.y*TILE_SIZE+ty; // Size of a row and column int xsize = TILE_SIZE*gridDim.x; int ysize = TILE_SIZE*gridDim.y; int location = totaly*xsize + totalx; if (((totalx > 1) && (totalx < xsize)) && ((totaly > 1) && (totaly < ysize))) { int tileX = (totalx%TILE_SIZE) + 1; int tileY = (totaly%TILE_SIZE) + 1; tile[ tileX - 1 ][ tileY - 1 ] = intensity[ location - 1 -xsize ]; tile[ tileX + 1 ][ tileY - 1 ] = intensity[ location + 1 -xsize ]; tile[ tileX - 1 ][ tileY + 1 ] = intensity[ location - 1 +xsize ]; tile[ tileX + 1 ][ tileY + 1 ] = intensity[ location + 1 +xsize ]; /* int maxTile = TILE_SIZE + 1; int maxThread = TILE_SIZE - 1; if(tx == 0) { if (ty == 0) tile[0][0] = intensity[ location - 1 -xsize ]; if (ty == maxThread) tile[0][maxTile] = intensity[ location - 1 + xsize ]; tile[0][ty+1] = intensity[ location - 1]; } if(tx == maxThread) { if (ty == 0) tile[maxTile][0] = intensity[ location + 1 - xsize ]; if (ty == maxThread) tile[maxTile][maxTile] = intensity[ location + 1 + xsize ]; tile[maxTile][ty+1] = intensity[ location + 1]; } if(ty == 0) tile[tx+1][0] = intensity[ location - xsize]; if(ty == maxThread) tile[tx+1][maxTile] = intensity[ location + xsize]; ... */ __syncthreads(); tx = tx + 1; ty = ty + 1; int left = tx-1; int right = tx+1; int above = ty-1; int below = ty+1; // Horizontal int sum1 = tile[ left ][ below ] + 2 * tile[ tx ][ below ] + tile[ right ][ below ] - tile[ left ][ above ] - 2 * tile[ tx ][ above ] - tile[ right ] [ above ]; // Vertical int sum2 = tile[ left ][ above ] + 2 * tile[ left ][ ty ] + tile[ left ][ below ] - tile[ right ][ above ] - 2 * tile[ right ][ ty ] - tile[ right ][ below ]; int magnitude = sum1*sum1 + sum2*sum2; if (magnitude > threshold) result[location] = 255; else result[location] = 0; } } unsigned char *sobel(unsigned char *intensity, unsigned int height, unsigned int width){ #if defined(DEBUG) printf("Printing input data\n"); printf("Height: %d\n", height); printf("Width: %d\n", width); #endif int gridXSize = 1 + (( width - 1) / TILE_SIZE); int gridYSize = 1 + ((height - 1) / TILE_SIZE); int XSize = gridXSize*TILE_SIZE; int YSize = gridYSize*TILE_SIZE; // Both are the same size (CPU/GPU). gpu.size = XSize*YSize; // Allocate arrays in GPU memory #if defined(VERBOSE) printf ("Allocating arrays in GPU memory.\n"); #endif #if defined(CUDA_TIMING) float Ttime; TIMER_CREATE(Ttime); TIMER_START(Ttime); #endif checkCuda(hipMalloc((void**)&gpu.intensity , gpu.size*sizeof(char))); checkCuda(hipMalloc((void**)&gpu.result , gpu.size*sizeof(char))); // Allocate result array in CPU memory gpu.resultOnCPU = new unsigned char[gpu.size]; checkCuda(hipMemcpy(gpu.intensity, intensity, gpu.size*sizeof(char), hipMemcpyHostToDevice)); checkCuda(hipDeviceSynchronize()); #if defined(CUDA_TIMING) float Ktime; TIMER_CREATE(Ktime); TIMER_START(Ktime); #endif #if defined(VERBOSE) printf("Running algorithm on GPU.\n"); #endif dim3 dimGrid(gridXSize, gridYSize); dim3 dimBlock(BLOCK_TILE_SIZE, BLOCK_TILE_SIZE); // Launch kernel to begin image segmenation hipLaunchKernelGGL(( sobelAlgorithm), dim3(dimGrid), dim3(dimBlock), 0, 0, gpu.intensity, gpu.result, threshold); checkCuda(hipDeviceSynchronize()); #if defined(CUDA_TIMING) TIMER_END(Ktime); printf("Kernel Execution Time: %f ms\n", Ktime); #endif // Retrieve results from the GPU checkCuda(hipMemcpy(gpu.resultOnCPU, gpu.result, gpu.size*sizeof(char), hipMemcpyDeviceToHost)); // Free resources and end the program checkCuda(hipFree(gpu.intensity)); checkCuda(hipFree(gpu.result)); #if defined(CUDA_TIMING) TIMER_END(Ttime); printf("Total GPU Execution Time: %f ms\n", Ttime); #endif return(gpu.resultOnCPU); } unsigned char *sobelWarmup(unsigned char *intensity, unsigned int height, unsigned int width){ int gridXSize = 1 + (( width - 1) / TILE_SIZE); int gridYSize = 1 + ((height - 1) / TILE_SIZE); int XSize = gridXSize*TILE_SIZE; int YSize = gridYSize*TILE_SIZE; // Both are the same size (CPU/GPU). gpu.size = XSize*YSize; // Allocate arrays in GPU memory checkCuda(hipMalloc((void**)&gpu.intensity , gpu.size*sizeof(char))); checkCuda(hipMalloc((void**)&gpu.result , gpu.size*sizeof(char))); // Allocate result array in CPU memory gpu.resultOnCPU = new unsigned char[gpu.size]; checkCuda(hipMemcpy(gpu.intensity, intensity, gpu.size*sizeof(char), hipMemcpyHostToDevice)); checkCuda(hipDeviceSynchronize()); dim3 dimGrid(gridXSize, gridYSize); dim3 dimBlock(BLOCK_TILE_SIZE, BLOCK_TILE_SIZE); // Launch kernel to begin image segmenation hipLaunchKernelGGL(( sobelAlgorithm), dim3(dimGrid), dim3(dimBlock), 0, 0, gpu.intensity, gpu.result, threshold); checkCuda(hipDeviceSynchronize()); // Retrieve results from the GPU checkCuda(hipMemcpy(gpu.resultOnCPU, gpu.result, gpu.size*sizeof(char), hipMemcpyDeviceToHost)); // Free resources and end the program checkCuda(hipFree(gpu.intensity)); checkCuda(hipFree(gpu.result)); return(gpu.resultOnCPU); }
625a773e5283b529c895b4339ff0f7d58d35b667.cu
/* Julian Gutierrez * Northeastern University * High Performance Computing * * Sobel Algorithm Implementation * */ #include "sobel.h" /*******************************************************/ /* Cuda Error Function */ /*******************************************************/ inline cudaError_t checkCuda(cudaError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != cudaSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result)); exit(-1); } #endif return result; } using namespace std; void modThreshold (unsigned int value){ threshold = value; } /* * Sobel Kernel */ __global__ void sobelAlgorithm(unsigned char *intensity, unsigned char *result, unsigned int threshold){ int tx = threadIdx.x; int ty = threadIdx.y; // Shared memory __shared__ unsigned char tile[TILE_SIZE+2][TILE_SIZE+2]; // With reference to the total image int totalx = blockIdx.x*TILE_SIZE+tx; int totaly = blockIdx.y*TILE_SIZE+ty; // Size of a row and column int xsize = TILE_SIZE*gridDim.x; int ysize = TILE_SIZE*gridDim.y; int location = totaly*xsize + totalx; if (((totalx > 1) && (totalx < xsize)) && ((totaly > 1) && (totaly < ysize))) { int tileX = (totalx%TILE_SIZE) + 1; int tileY = (totaly%TILE_SIZE) + 1; tile[ tileX - 1 ][ tileY - 1 ] = intensity[ location - 1 -xsize ]; tile[ tileX + 1 ][ tileY - 1 ] = intensity[ location + 1 -xsize ]; tile[ tileX - 1 ][ tileY + 1 ] = intensity[ location - 1 +xsize ]; tile[ tileX + 1 ][ tileY + 1 ] = intensity[ location + 1 +xsize ]; /* int maxTile = TILE_SIZE + 1; int maxThread = TILE_SIZE - 1; if(tx == 0) { if (ty == 0) tile[0][0] = intensity[ location - 1 -xsize ]; if (ty == maxThread) tile[0][maxTile] = intensity[ location - 1 + xsize ]; tile[0][ty+1] = intensity[ location - 1]; } if(tx == maxThread) { if (ty == 0) tile[maxTile][0] = intensity[ location + 1 - xsize ]; if (ty == maxThread) tile[maxTile][maxTile] = intensity[ location + 1 + xsize ]; tile[maxTile][ty+1] = intensity[ location + 1]; } if(ty == 0) tile[tx+1][0] = intensity[ location - xsize]; if(ty == maxThread) tile[tx+1][maxTile] = intensity[ location + xsize]; ... */ __syncthreads(); tx = tx + 1; ty = ty + 1; int left = tx-1; int right = tx+1; int above = ty-1; int below = ty+1; // Horizontal int sum1 = tile[ left ][ below ] + 2 * tile[ tx ][ below ] + tile[ right ][ below ] - tile[ left ][ above ] - 2 * tile[ tx ][ above ] - tile[ right ] [ above ]; // Vertical int sum2 = tile[ left ][ above ] + 2 * tile[ left ][ ty ] + tile[ left ][ below ] - tile[ right ][ above ] - 2 * tile[ right ][ ty ] - tile[ right ][ below ]; int magnitude = sum1*sum1 + sum2*sum2; if (magnitude > threshold) result[location] = 255; else result[location] = 0; } } unsigned char *sobel(unsigned char *intensity, unsigned int height, unsigned int width){ #if defined(DEBUG) printf("Printing input data\n"); printf("Height: %d\n", height); printf("Width: %d\n", width); #endif int gridXSize = 1 + (( width - 1) / TILE_SIZE); int gridYSize = 1 + ((height - 1) / TILE_SIZE); int XSize = gridXSize*TILE_SIZE; int YSize = gridYSize*TILE_SIZE; // Both are the same size (CPU/GPU). gpu.size = XSize*YSize; // Allocate arrays in GPU memory #if defined(VERBOSE) printf ("Allocating arrays in GPU memory.\n"); #endif #if defined(CUDA_TIMING) float Ttime; TIMER_CREATE(Ttime); TIMER_START(Ttime); #endif checkCuda(cudaMalloc((void**)&gpu.intensity , gpu.size*sizeof(char))); checkCuda(cudaMalloc((void**)&gpu.result , gpu.size*sizeof(char))); // Allocate result array in CPU memory gpu.resultOnCPU = new unsigned char[gpu.size]; checkCuda(cudaMemcpy(gpu.intensity, intensity, gpu.size*sizeof(char), cudaMemcpyHostToDevice)); checkCuda(cudaDeviceSynchronize()); #if defined(CUDA_TIMING) float Ktime; TIMER_CREATE(Ktime); TIMER_START(Ktime); #endif #if defined(VERBOSE) printf("Running algorithm on GPU.\n"); #endif dim3 dimGrid(gridXSize, gridYSize); dim3 dimBlock(BLOCK_TILE_SIZE, BLOCK_TILE_SIZE); // Launch kernel to begin image segmenation sobelAlgorithm<<<dimGrid, dimBlock>>>(gpu.intensity, gpu.result, threshold); checkCuda(cudaDeviceSynchronize()); #if defined(CUDA_TIMING) TIMER_END(Ktime); printf("Kernel Execution Time: %f ms\n", Ktime); #endif // Retrieve results from the GPU checkCuda(cudaMemcpy(gpu.resultOnCPU, gpu.result, gpu.size*sizeof(char), cudaMemcpyDeviceToHost)); // Free resources and end the program checkCuda(cudaFree(gpu.intensity)); checkCuda(cudaFree(gpu.result)); #if defined(CUDA_TIMING) TIMER_END(Ttime); printf("Total GPU Execution Time: %f ms\n", Ttime); #endif return(gpu.resultOnCPU); } unsigned char *sobelWarmup(unsigned char *intensity, unsigned int height, unsigned int width){ int gridXSize = 1 + (( width - 1) / TILE_SIZE); int gridYSize = 1 + ((height - 1) / TILE_SIZE); int XSize = gridXSize*TILE_SIZE; int YSize = gridYSize*TILE_SIZE; // Both are the same size (CPU/GPU). gpu.size = XSize*YSize; // Allocate arrays in GPU memory checkCuda(cudaMalloc((void**)&gpu.intensity , gpu.size*sizeof(char))); checkCuda(cudaMalloc((void**)&gpu.result , gpu.size*sizeof(char))); // Allocate result array in CPU memory gpu.resultOnCPU = new unsigned char[gpu.size]; checkCuda(cudaMemcpy(gpu.intensity, intensity, gpu.size*sizeof(char), cudaMemcpyHostToDevice)); checkCuda(cudaDeviceSynchronize()); dim3 dimGrid(gridXSize, gridYSize); dim3 dimBlock(BLOCK_TILE_SIZE, BLOCK_TILE_SIZE); // Launch kernel to begin image segmenation sobelAlgorithm<<<dimGrid, dimBlock>>>(gpu.intensity, gpu.result, threshold); checkCuda(cudaDeviceSynchronize()); // Retrieve results from the GPU checkCuda(cudaMemcpy(gpu.resultOnCPU, gpu.result, gpu.size*sizeof(char), cudaMemcpyDeviceToHost)); // Free resources and end the program checkCuda(cudaFree(gpu.intensity)); checkCuda(cudaFree(gpu.result)); return(gpu.resultOnCPU); }
4bde7a0af5ed0c0a31fcda016f9d326e1a21d29f.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> __global__ void saxpy(int n, float a, float *x, float *y) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) y[i] = a*x[i] + y[i]; } int main(void) { int N = 1<<20; float *x, *y, *d_x, *d_y; x = (float*)malloc(N*sizeof(float)); y = (float*)malloc(N*sizeof(float)); hipMalloc(&d_x, N*sizeof(float)); hipMalloc(&d_y, N*sizeof(float)); for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } hipMemcpy(d_x, x, N*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_y, y, N*sizeof(float), hipMemcpyHostToDevice); // Perform SAXPY on 1M elements hipLaunchKernelGGL(( saxpy), dim3((N+255)/256), dim3(256), 0, 0, N, 2.0f, d_x, d_y); hipMemcpy(y, d_y, N*sizeof(float), hipMemcpyDeviceToHost); float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = max(maxError, abs(y[i]-4.0f)); printf("Max error: %f\n", maxError); hipFree(d_x); hipFree(d_y); free(x); free(y); }
4bde7a0af5ed0c0a31fcda016f9d326e1a21d29f.cu
#include <stdio.h> #include <cuda_runtime.h> __global__ void saxpy(int n, float a, float *x, float *y) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) y[i] = a*x[i] + y[i]; } int main(void) { int N = 1<<20; float *x, *y, *d_x, *d_y; x = (float*)malloc(N*sizeof(float)); y = (float*)malloc(N*sizeof(float)); cudaMalloc(&d_x, N*sizeof(float)); cudaMalloc(&d_y, N*sizeof(float)); for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } cudaMemcpy(d_x, x, N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_y, y, N*sizeof(float), cudaMemcpyHostToDevice); // Perform SAXPY on 1M elements saxpy<<<(N+255)/256, 256>>>(N, 2.0f, d_x, d_y); cudaMemcpy(y, d_y, N*sizeof(float), cudaMemcpyDeviceToHost); float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = max(maxError, abs(y[i]-4.0f)); printf("Max error: %f\n", maxError); cudaFree(d_x); cudaFree(d_y); free(x); free(y); }
61e26596c7b9f329b8375e4cea5adba211c4e31e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/layer.hpp" #include "caffe/util/io.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/layers/split_channel_layer.hpp" namespace caffe { template <typename Dtype> __global__ void split_data_forward_gpu_kernel(const int num, const Dtype* const bottom, Dtype * const top, const int channel_out, const int channel, const int inner_shape) { CUDA_KERNEL_LOOP(i, num) { int ps = i % inner_shape; int pc = (i / inner_shape) % channel_out; int pn = i / inner_shape / channel_out; int pidx = (pn*channel + pc)*inner_shape + ps; top[i] = bottom[pidx]; } } template <typename Dtype> __global__ void split_data_backward_gpu_kernel(const int num, Dtype* const bottom, const Dtype * const top, const int channel_out, const int channel, const int inner_shape) { CUDA_KERNEL_LOOP(i, num) { int ps = i % inner_shape; int pc = (i / inner_shape) % channel_out; int pn = i / inner_shape / channel_out; int pidx = (pn*channel + pc)*inner_shape + ps; bottom[pidx] = top[i]; } } template <typename Dtype> void SplitChannelLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { Dtype * const top_data = top[0]->mutable_gpu_data(); const Dtype * const bottom_data = bottom[0]->gpu_data(); int count = top[0]->count(); split_data_forward_gpu_kernel<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> > (count, bottom_data, top_data, channel_out_, bottom[0]->channels(), bottom[0]->count(2)); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> void SplitChannelLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { Dtype * const bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype * const top_diff = top[0]->gpu_diff(); int count = top[0]->count(); caffe_gpu_set(count, Dtype(0), bottom_diff); split_data_backward_gpu_kernel<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> > (count, bottom_diff, top_diff, channel_out_, bottom[0]->channels(), bottom[0]->count(2)); } INSTANTIATE_LAYER_GPU_FUNCS(SplitChannelLayer); } // namespace caffe
61e26596c7b9f329b8375e4cea5adba211c4e31e.cu
#include <vector> #include "caffe/layer.hpp" #include "caffe/util/io.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/layers/split_channel_layer.hpp" namespace caffe { template <typename Dtype> __global__ void split_data_forward_gpu_kernel(const int num, const Dtype* const bottom, Dtype * const top, const int channel_out, const int channel, const int inner_shape) { CUDA_KERNEL_LOOP(i, num) { int ps = i % inner_shape; int pc = (i / inner_shape) % channel_out; int pn = i / inner_shape / channel_out; int pidx = (pn*channel + pc)*inner_shape + ps; top[i] = bottom[pidx]; } } template <typename Dtype> __global__ void split_data_backward_gpu_kernel(const int num, Dtype* const bottom, const Dtype * const top, const int channel_out, const int channel, const int inner_shape) { CUDA_KERNEL_LOOP(i, num) { int ps = i % inner_shape; int pc = (i / inner_shape) % channel_out; int pn = i / inner_shape / channel_out; int pidx = (pn*channel + pc)*inner_shape + ps; bottom[pidx] = top[i]; } } template <typename Dtype> void SplitChannelLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { Dtype * const top_data = top[0]->mutable_gpu_data(); const Dtype * const bottom_data = bottom[0]->gpu_data(); int count = top[0]->count(); split_data_forward_gpu_kernel<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> > (count, bottom_data, top_data, channel_out_, bottom[0]->channels(), bottom[0]->count(2)); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> void SplitChannelLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { Dtype * const bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype * const top_diff = top[0]->gpu_diff(); int count = top[0]->count(); caffe_gpu_set(count, Dtype(0), bottom_diff); split_data_backward_gpu_kernel<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> > (count, bottom_diff, top_diff, channel_out_, bottom[0]->channels(), bottom[0]->count(2)); } INSTANTIATE_LAYER_GPU_FUNCS(SplitChannelLayer); } // namespace caffe
719e01cf6497587bb53107405c5db58eb0995407.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/cudart_utils.h> #include <raft/linalg/transpose.h> #include <test_utils.h> #include <cuml/datasets/make_blobs.hpp> #include <cuml/ensemble/randomforest.hpp> #include <raft/cuda_utils.cuh> namespace ML { using namespace MLCommon; struct RfInputs { int n_rows; int n_cols; int n_trees; float max_features; float max_samples; int max_depth; int max_leaves; bool bootstrap; bool bootstrap_features; int n_bins; int split_algo; int min_samples_leaf; int min_samples_split; float min_impurity_decrease; int n_streams; CRITERION split_criterion; float min_expected_acc; }; template <typename T> class RFBatchedClsTest : public ::testing::TestWithParam<RfInputs> { protected: void basicTest() { params = ::testing::TestWithParam<RfInputs>::GetParam(); DecisionTree::DecisionTreeParams tree_params; set_tree_params(tree_params, params.max_depth, params.max_leaves, params.max_features, params.n_bins, params.split_algo, params.min_samples_leaf, params.min_samples_split, params.min_impurity_decrease, params.bootstrap_features, params.split_criterion, false, true); RF_params rf_params; set_all_rf_params(rf_params, params.n_trees, params.bootstrap, params.max_samples, -1, params.n_streams, tree_params); CUDA_CHECK(hipStreamCreate(&stream)); handle.reset(new raft::handle_t(rf_params.n_streams)); handle->set_stream(stream); auto allocator = handle->get_device_allocator(); int data_len = params.n_rows * params.n_cols; data = (T*)allocator->allocate(data_len * sizeof(T), stream); labels = (int*)allocator->allocate(params.n_rows * sizeof(int), stream); predicted_labels = (int*)allocator->allocate(params.n_rows * sizeof(int), stream); Datasets::make_blobs(*handle, data, labels, params.n_rows, params.n_cols, 5, false, nullptr, nullptr, T(0.1), false, T(-0.5), T(0.5), 3536699ULL); labels_h.resize(params.n_rows); raft::update_host(labels_h.data(), labels, params.n_rows, stream); preprocess_labels(params.n_rows, labels_h, labels_map); raft::update_device(labels, labels_h.data(), params.n_rows, stream); // Training part forest = new typename ML::RandomForestMetaData<T, int>; null_trees_ptr(forest); fit(*handle, forest, data, params.n_rows, params.n_cols, labels, labels_map.size(), rf_params); // predict function expects row major lay out of data, so we need to // transpose the data first T* data_row_major; data_row_major = (T*)allocator->allocate(data_len * sizeof(T), stream); hipblasHandle_t cublas_h = handle->get_cublas_handle(); raft::linalg::transpose(*handle, data, data_row_major, params.n_rows, params.n_cols, stream); predict(*handle, forest, data_row_major, params.n_rows, params.n_cols, predicted_labels); raft::update_host(labels_h.data(), predicted_labels, params.n_rows, stream); RF_metrics tmp = score(*handle, forest, labels, params.n_rows, predicted_labels); CUDA_CHECK(hipStreamSynchronize(stream)); CUDA_CHECK(hipStreamDestroy(stream)); accuracy = tmp.accuracy; allocator->deallocate(data_row_major, data_len * sizeof(T), stream); } void SetUp() override { basicTest(); } void TearDown() override { auto allocator = handle->get_device_allocator(); accuracy = -1.0f; postprocess_labels(params.n_rows, labels_h, labels_map); labels_h.clear(); labels_map.clear(); allocator->deallocate(labels, params.n_rows * sizeof(int), stream); allocator->deallocate(predicted_labels, params.n_rows * sizeof(int), stream); allocator->deallocate(data, params.n_rows * params.n_cols * sizeof(T), stream); delete forest; handle.reset(); } protected: std::shared_ptr<raft::handle_t> handle; hipStream_t stream; RfInputs params; T* data; int* labels; std::vector<int> labels_h; std::map<int, int> labels_map; //unique map of labels to int vals starting from 0 RandomForestMetaData<T, int>* forest; float accuracy = -1.0f; // overriden in each test SetUp and TearDown int* predicted_labels; }; //------------------------------------------------------------------------------------------------------------------------------------- const std::vector<RfInputs> inputsf2_clf = { // Simple non-crash tests with small datasets {100, 59, 1, 1.0f, 0.4f, 16, -1, true, false, 10, SPLIT_ALGO::GLOBAL_QUANTILE, 2, 2, 0.0, 2, CRITERION::GINI, 0.0f}, {101, 59, 2, 1.0f, 0.4f, 10, -1, true, false, 13, SPLIT_ALGO::GLOBAL_QUANTILE, 2, 2, 0.0, 2, CRITERION::GINI, 0.0f}, {100, 1, 2, 1.0f, 0.4f, 10, -1, true, false, 15, SPLIT_ALGO::GLOBAL_QUANTILE, 2, 2, 0.0, 2, CRITERION::GINI, 0.0f}, // Simple accuracy tests {20000, 10, 25, 1.0f, 0.4f, 16, -1, true, false, 10, SPLIT_ALGO::GLOBAL_QUANTILE, 2, 2, 0.0, 2, CRITERION::GINI}, {20000, 10, 5, 1.0f, 0.4f, 14, -1, true, false, 10, SPLIT_ALGO::GLOBAL_QUANTILE, 2, 2, 0.0, 2, CRITERION::ENTROPY}}; typedef RFBatchedClsTest<float> RFBatchedClsTestF; TEST_P(RFBatchedClsTestF, Fit) { ASSERT_TRUE(accuracy >= params.min_expected_acc); } INSTANTIATE_TEST_CASE_P(RFBatchedClsTests, RFBatchedClsTestF, ::testing::ValuesIn(inputsf2_clf)); typedef RFBatchedClsTest<double> RFBatchedClsTestD; TEST_P(RFBatchedClsTestD, Fit) { ASSERT_TRUE(accuracy >= params.min_expected_acc); } INSTANTIATE_TEST_CASE_P(RFBatchedClsTests, RFBatchedClsTestD, ::testing::ValuesIn(inputsf2_clf)); } // end namespace ML
719e01cf6497587bb53107405c5db58eb0995407.cu
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/cudart_utils.h> #include <raft/linalg/transpose.h> #include <test_utils.h> #include <cuml/datasets/make_blobs.hpp> #include <cuml/ensemble/randomforest.hpp> #include <raft/cuda_utils.cuh> namespace ML { using namespace MLCommon; struct RfInputs { int n_rows; int n_cols; int n_trees; float max_features; float max_samples; int max_depth; int max_leaves; bool bootstrap; bool bootstrap_features; int n_bins; int split_algo; int min_samples_leaf; int min_samples_split; float min_impurity_decrease; int n_streams; CRITERION split_criterion; float min_expected_acc; }; template <typename T> class RFBatchedClsTest : public ::testing::TestWithParam<RfInputs> { protected: void basicTest() { params = ::testing::TestWithParam<RfInputs>::GetParam(); DecisionTree::DecisionTreeParams tree_params; set_tree_params(tree_params, params.max_depth, params.max_leaves, params.max_features, params.n_bins, params.split_algo, params.min_samples_leaf, params.min_samples_split, params.min_impurity_decrease, params.bootstrap_features, params.split_criterion, false, true); RF_params rf_params; set_all_rf_params(rf_params, params.n_trees, params.bootstrap, params.max_samples, -1, params.n_streams, tree_params); CUDA_CHECK(cudaStreamCreate(&stream)); handle.reset(new raft::handle_t(rf_params.n_streams)); handle->set_stream(stream); auto allocator = handle->get_device_allocator(); int data_len = params.n_rows * params.n_cols; data = (T*)allocator->allocate(data_len * sizeof(T), stream); labels = (int*)allocator->allocate(params.n_rows * sizeof(int), stream); predicted_labels = (int*)allocator->allocate(params.n_rows * sizeof(int), stream); Datasets::make_blobs(*handle, data, labels, params.n_rows, params.n_cols, 5, false, nullptr, nullptr, T(0.1), false, T(-0.5), T(0.5), 3536699ULL); labels_h.resize(params.n_rows); raft::update_host(labels_h.data(), labels, params.n_rows, stream); preprocess_labels(params.n_rows, labels_h, labels_map); raft::update_device(labels, labels_h.data(), params.n_rows, stream); // Training part forest = new typename ML::RandomForestMetaData<T, int>; null_trees_ptr(forest); fit(*handle, forest, data, params.n_rows, params.n_cols, labels, labels_map.size(), rf_params); // predict function expects row major lay out of data, so we need to // transpose the data first T* data_row_major; data_row_major = (T*)allocator->allocate(data_len * sizeof(T), stream); cublasHandle_t cublas_h = handle->get_cublas_handle(); raft::linalg::transpose(*handle, data, data_row_major, params.n_rows, params.n_cols, stream); predict(*handle, forest, data_row_major, params.n_rows, params.n_cols, predicted_labels); raft::update_host(labels_h.data(), predicted_labels, params.n_rows, stream); RF_metrics tmp = score(*handle, forest, labels, params.n_rows, predicted_labels); CUDA_CHECK(cudaStreamSynchronize(stream)); CUDA_CHECK(cudaStreamDestroy(stream)); accuracy = tmp.accuracy; allocator->deallocate(data_row_major, data_len * sizeof(T), stream); } void SetUp() override { basicTest(); } void TearDown() override { auto allocator = handle->get_device_allocator(); accuracy = -1.0f; postprocess_labels(params.n_rows, labels_h, labels_map); labels_h.clear(); labels_map.clear(); allocator->deallocate(labels, params.n_rows * sizeof(int), stream); allocator->deallocate(predicted_labels, params.n_rows * sizeof(int), stream); allocator->deallocate(data, params.n_rows * params.n_cols * sizeof(T), stream); delete forest; handle.reset(); } protected: std::shared_ptr<raft::handle_t> handle; cudaStream_t stream; RfInputs params; T* data; int* labels; std::vector<int> labels_h; std::map<int, int> labels_map; //unique map of labels to int vals starting from 0 RandomForestMetaData<T, int>* forest; float accuracy = -1.0f; // overriden in each test SetUp and TearDown int* predicted_labels; }; //------------------------------------------------------------------------------------------------------------------------------------- const std::vector<RfInputs> inputsf2_clf = { // Simple non-crash tests with small datasets {100, 59, 1, 1.0f, 0.4f, 16, -1, true, false, 10, SPLIT_ALGO::GLOBAL_QUANTILE, 2, 2, 0.0, 2, CRITERION::GINI, 0.0f}, {101, 59, 2, 1.0f, 0.4f, 10, -1, true, false, 13, SPLIT_ALGO::GLOBAL_QUANTILE, 2, 2, 0.0, 2, CRITERION::GINI, 0.0f}, {100, 1, 2, 1.0f, 0.4f, 10, -1, true, false, 15, SPLIT_ALGO::GLOBAL_QUANTILE, 2, 2, 0.0, 2, CRITERION::GINI, 0.0f}, // Simple accuracy tests {20000, 10, 25, 1.0f, 0.4f, 16, -1, true, false, 10, SPLIT_ALGO::GLOBAL_QUANTILE, 2, 2, 0.0, 2, CRITERION::GINI}, {20000, 10, 5, 1.0f, 0.4f, 14, -1, true, false, 10, SPLIT_ALGO::GLOBAL_QUANTILE, 2, 2, 0.0, 2, CRITERION::ENTROPY}}; typedef RFBatchedClsTest<float> RFBatchedClsTestF; TEST_P(RFBatchedClsTestF, Fit) { ASSERT_TRUE(accuracy >= params.min_expected_acc); } INSTANTIATE_TEST_CASE_P(RFBatchedClsTests, RFBatchedClsTestF, ::testing::ValuesIn(inputsf2_clf)); typedef RFBatchedClsTest<double> RFBatchedClsTestD; TEST_P(RFBatchedClsTestD, Fit) { ASSERT_TRUE(accuracy >= params.min_expected_acc); } INSTANTIATE_TEST_CASE_P(RFBatchedClsTests, RFBatchedClsTestD, ::testing::ValuesIn(inputsf2_clf)); } // end namespace ML
6870785f74022002551157d2fa8b201096f66d6e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @generated from magmablas/zgerbt_func_batched.cu, normal z -> c, Mon Jun 25 18:24:14 2018 @author Adrien Remy @author Azzam Haidar */ #include "magma_internal.h" #include "cgerbt.h" #define block_height 32 #define block_width 4 #define block_length 256 #define NB 64 /***************************************************************************//** Purpose ------- CPRBT_MVT compute B = UTB to randomize B Arguments --------- @param[in] n INTEGER The number of values of db. n >= 0. @param[in] du COMPLEX array, dimension (n,2) The 2*n vector representing the random butterfly matrix V @param[in,out] db COMPLEX array, dimension (n) The n vector db computed by CGESV_NOPIV_GPU On exit db = du*db @param[in] queue magma_queue_t Queue to execute in. *******************************************************************************/ extern "C" void magmablas_cprbt_mtv_batched( magma_int_t n, magmaFloatComplex *du, magmaFloatComplex **db_array, magma_int_t batchCount, magma_queue_t queue) { magma_int_t threads = block_length; dim3 grid( magma_ceildiv( n, 4*block_length ), batchCount); hipLaunchKernelGGL(( magmablas_capply_transpose_vector_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n/2, du, n, db_array, 0); hipLaunchKernelGGL(( magmablas_capply_transpose_vector_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n/2, du, n+n/2, db_array, n/2); threads = block_length; grid = magma_ceildiv( n, 2*block_length ); hipLaunchKernelGGL(( magmablas_capply_transpose_vector_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, du, 0, db_array, 0); } /***************************************************************************//** Purpose ------- CPRBT_MV compute B = VB to obtain the non randomized solution Arguments --------- @param[in] n INTEGER The number of values of db. n >= 0. @param[in,out] db COMPLEX array, dimension (n) The n vector db computed by CGESV_NOPIV_GPU On exit db = dv*db @param[in] dv COMPLEX array, dimension (n,2) The 2*n vector representing the random butterfly matrix V @param[in] queue magma_queue_t Queue to execute in. *******************************************************************************/ extern "C" void magmablas_cprbt_mv_batched( magma_int_t n, magmaFloatComplex *dv, magmaFloatComplex **db_array, magma_int_t batchCount, magma_queue_t queue) { magma_int_t threads = block_length; dim3 grid ( magma_ceildiv( n, 2*block_length ), batchCount); hipLaunchKernelGGL(( magmablas_capply_vector_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dv, 0, db_array, 0); threads = block_length; grid = magma_ceildiv( n, 4*block_length ); hipLaunchKernelGGL(( magmablas_capply_vector_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n/2, dv, n, db_array, 0); hipLaunchKernelGGL(( magmablas_capply_vector_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n/2, dv, n+n/2, db_array, n/2); } /***************************************************************************//** Purpose ------- CPRBT randomize a square general matrix using partial randomized transformation Arguments --------- @param[in] n INTEGER The number of columns and rows of the matrix dA. n >= 0. @param[in,out] dA COMPLEX array, dimension (n,ldda) The n-by-n matrix dA On exit dA = duT*dA*d_V @param[in] ldda INTEGER The leading dimension of the array dA. LDA >= max(1,n). @param[in] du COMPLEX array, dimension (n,2) The 2*n vector representing the random butterfly matrix U @param[in] dv COMPLEX array, dimension (n,2) The 2*n vector representing the random butterfly matrix V @param[in] queue magma_queue_t Queue to execute in. *******************************************************************************/ extern "C" void magmablas_cprbt_batched( magma_int_t n, magmaFloatComplex **dA_array, magma_int_t ldda, magmaFloatComplex *du, magmaFloatComplex *dv, magma_int_t batchCount, magma_queue_t queue) { du += ldda; dv += ldda; dim3 threads(block_height, block_width); dim3 grid( magma_ceildiv( n, 4*block_height ), magma_ceildiv( n, 4*block_width ), batchCount ); hipLaunchKernelGGL(( magmablas_celementary_multiplication_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n/2, dA_array, 0, ldda, du, 0, dv, 0); hipLaunchKernelGGL(( magmablas_celementary_multiplication_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n/2, dA_array, ldda*n/2, ldda, du, 0, dv, n/2); hipLaunchKernelGGL(( magmablas_celementary_multiplication_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n/2, dA_array, n/2, ldda, du, n/2, dv, 0); hipLaunchKernelGGL(( magmablas_celementary_multiplication_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n/2, dA_array, ldda*n/2+n/2, ldda, du, n/2, dv, n/2); dim3 threads2(block_height, block_width); dim3 grid2( magma_ceildiv( n, 2*block_height ), magma_ceildiv( n, 2*block_width ), batchCount ); hipLaunchKernelGGL(( magmablas_celementary_multiplication_kernel_batched), dim3(grid2), dim3(threads2), 0, queue->cuda_stream() , n, dA_array, 0, ldda, du, -ldda, dv, -ldda); }
6870785f74022002551157d2fa8b201096f66d6e.cu
/* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @generated from magmablas/zgerbt_func_batched.cu, normal z -> c, Mon Jun 25 18:24:14 2018 @author Adrien Remy @author Azzam Haidar */ #include "magma_internal.h" #include "cgerbt.h" #define block_height 32 #define block_width 4 #define block_length 256 #define NB 64 /***************************************************************************//** Purpose ------- CPRBT_MVT compute B = UTB to randomize B Arguments --------- @param[in] n INTEGER The number of values of db. n >= 0. @param[in] du COMPLEX array, dimension (n,2) The 2*n vector representing the random butterfly matrix V @param[in,out] db COMPLEX array, dimension (n) The n vector db computed by CGESV_NOPIV_GPU On exit db = du*db @param[in] queue magma_queue_t Queue to execute in. *******************************************************************************/ extern "C" void magmablas_cprbt_mtv_batched( magma_int_t n, magmaFloatComplex *du, magmaFloatComplex **db_array, magma_int_t batchCount, magma_queue_t queue) { magma_int_t threads = block_length; dim3 grid( magma_ceildiv( n, 4*block_length ), batchCount); magmablas_capply_transpose_vector_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>(n/2, du, n, db_array, 0); magmablas_capply_transpose_vector_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>(n/2, du, n+n/2, db_array, n/2); threads = block_length; grid = magma_ceildiv( n, 2*block_length ); magmablas_capply_transpose_vector_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>(n, du, 0, db_array, 0); } /***************************************************************************//** Purpose ------- CPRBT_MV compute B = VB to obtain the non randomized solution Arguments --------- @param[in] n INTEGER The number of values of db. n >= 0. @param[in,out] db COMPLEX array, dimension (n) The n vector db computed by CGESV_NOPIV_GPU On exit db = dv*db @param[in] dv COMPLEX array, dimension (n,2) The 2*n vector representing the random butterfly matrix V @param[in] queue magma_queue_t Queue to execute in. *******************************************************************************/ extern "C" void magmablas_cprbt_mv_batched( magma_int_t n, magmaFloatComplex *dv, magmaFloatComplex **db_array, magma_int_t batchCount, magma_queue_t queue) { magma_int_t threads = block_length; dim3 grid ( magma_ceildiv( n, 2*block_length ), batchCount); magmablas_capply_vector_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>(n, dv, 0, db_array, 0); threads = block_length; grid = magma_ceildiv( n, 4*block_length ); magmablas_capply_vector_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>(n/2, dv, n, db_array, 0); magmablas_capply_vector_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>(n/2, dv, n+n/2, db_array, n/2); } /***************************************************************************//** Purpose ------- CPRBT randomize a square general matrix using partial randomized transformation Arguments --------- @param[in] n INTEGER The number of columns and rows of the matrix dA. n >= 0. @param[in,out] dA COMPLEX array, dimension (n,ldda) The n-by-n matrix dA On exit dA = duT*dA*d_V @param[in] ldda INTEGER The leading dimension of the array dA. LDA >= max(1,n). @param[in] du COMPLEX array, dimension (n,2) The 2*n vector representing the random butterfly matrix U @param[in] dv COMPLEX array, dimension (n,2) The 2*n vector representing the random butterfly matrix V @param[in] queue magma_queue_t Queue to execute in. *******************************************************************************/ extern "C" void magmablas_cprbt_batched( magma_int_t n, magmaFloatComplex **dA_array, magma_int_t ldda, magmaFloatComplex *du, magmaFloatComplex *dv, magma_int_t batchCount, magma_queue_t queue) { du += ldda; dv += ldda; dim3 threads(block_height, block_width); dim3 grid( magma_ceildiv( n, 4*block_height ), magma_ceildiv( n, 4*block_width ), batchCount ); magmablas_celementary_multiplication_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>(n/2, dA_array, 0, ldda, du, 0, dv, 0); magmablas_celementary_multiplication_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>(n/2, dA_array, ldda*n/2, ldda, du, 0, dv, n/2); magmablas_celementary_multiplication_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>(n/2, dA_array, n/2, ldda, du, n/2, dv, 0); magmablas_celementary_multiplication_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>(n/2, dA_array, ldda*n/2+n/2, ldda, du, n/2, dv, n/2); dim3 threads2(block_height, block_width); dim3 grid2( magma_ceildiv( n, 2*block_height ), magma_ceildiv( n, 2*block_width ), batchCount ); magmablas_celementary_multiplication_kernel_batched<<< grid2, threads2, 0, queue->cuda_stream() >>>(n, dA_array, 0, ldda, du, -ldda, dv, -ldda); }
3280c6c99b708607c258ff1dfc05bce4f843dcc2.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "prova3.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( prova3), dim3(gridBlock),dim3(threadBlock), 0, 0, ); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( prova3), dim3(gridBlock),dim3(threadBlock), 0, 0, ); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( prova3), dim3(gridBlock),dim3(threadBlock), 0, 0, ); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
3280c6c99b708607c258ff1dfc05bce4f843dcc2.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "prova3.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); prova3<<<gridBlock,threadBlock>>>(); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { prova3<<<gridBlock,threadBlock>>>(); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { prova3<<<gridBlock,threadBlock>>>(); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
52decd200daf0dc6ee60ad09d6c0321b947f3f9a.hip
// !!! This is a file automatically generated by hipify!!! //blackScholesAnalyticEngine.cu //Scott Grauer-Gray //Functions for running black scholes using the analytic engine (from Quantlib) on the GPU //needed for optionInputStruct #include "blackScholesAnalyticEngineStructs.cuh" //needed for the kernel(s) to run on the GPU #include "blackScholesAnalyticEngineKernels.cu" #include "blackScholesAnalyticEngineKernelsCpu.cu" #include <stdio.h> #include <math.h> #include <sys/time.h> #include <time.h> #include <hip/hip_runtime.h> #define NUM_DIFF_SETTINGS 37 //function to run the black scholes analytic engine on the gpu void runBlackScholesAnalyticEngine() { int numberOfSamples = 50000000; { int numVals = numberOfSamples;//nSamplesArray[numTime]; optionInputStruct* values = new optionInputStruct[numVals]; for (int numOption = 0; numOption < numVals; numOption++) { if ((numOption % NUM_DIFF_SETTINGS) == 0) { optionInputStruct currVal = { CALL, 40.00, 42.00, 0.08, 0.04, 0.75, 0.35, 5.0975, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 1) { optionInputStruct currVal = { CALL, 100.00, 90.00, 0.10, 0.10, 0.10, 0.15, 0.0205, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 2) { optionInputStruct currVal = { CALL, 100.00, 100.00, 0.10, 0.10, 0.10, 0.15, 1.8734, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 3) { optionInputStruct currVal = { CALL, 100.00, 110.00, 0.10, 0.10, 0.10, 0.15, 9.9413, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 4) { optionInputStruct currVal = { CALL, 100.00, 90.00, 0.10, 0.10, 0.10, 0.25, 0.3150, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 5) { optionInputStruct currVal = { CALL, 100.00, 100.00, 0.10, 0.10, 0.10, 0.25, 3.1217, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 6) { optionInputStruct currVal = { CALL, 100.00, 110.00, 0.10, 0.10, 0.10, 0.25, 10.3556, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 7) { optionInputStruct currVal = { CALL, 100.00, 90.00, 0.10, 0.10, 0.10, 0.35, 0.9474, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 8) { optionInputStruct currVal = { CALL, 100.00, 100.00, 0.10, 0.10, 0.10, 0.35, 4.3693, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 9) { optionInputStruct currVal = { CALL, 100.00, 110.00, 0.10, 0.10, 0.10, 0.35, 11.1381, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 10) { optionInputStruct currVal = { CALL, 100.00, 90.00, 0.10, 0.10, 0.50, 0.15, 0.8069, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 11) { optionInputStruct currVal = { CALL, 100.00, 100.00, 0.10, 0.10, 0.50, 0.15, 4.0232, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 12) { optionInputStruct currVal = { CALL, 100.00, 110.00, 0.10, 0.10, 0.50, 0.15, 10.5769, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 13) { optionInputStruct currVal = { CALL, 100.00, 90.00, 0.10, 0.10, 0.50, 0.25, 2.7026, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 14) { optionInputStruct currVal = { CALL, 100.00, 100.00, 0.10, 0.10, 0.50, 0.25, 6.6997, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 15) { optionInputStruct currVal = { CALL, 100.00, 110.00, 0.10, 0.10, 0.50, 0.25, 12.7857, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 16) { optionInputStruct currVal = { CALL, 100.00, 90.00, 0.10, 0.10, 0.50, 0.35, 4.9329, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 17) { optionInputStruct currVal = { CALL, 100.00, 100.00, 0.10, 0.10, 0.50, 0.35, 9.3679, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 18) { optionInputStruct currVal = { CALL, 100.00, 110.00, 0.10, 0.10, 0.50, 0.35, 15.3086, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 19) { optionInputStruct currVal = { PUT, 100.00, 90.00, 0.10, 0.10, 0.10, 0.15, 9.9210, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 20) { optionInputStruct currVal = { PUT, 100.00, 100.00, 0.10, 0.10, 0.10, 0.15, 1.8734, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 21) { optionInputStruct currVal = { PUT, 100.00, 110.00, 0.10, 0.10, 0.10, 0.15, 0.0408, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 22) { optionInputStruct currVal = { PUT, 100.00, 90.00, 0.10, 0.10, 0.10, 0.25, 10.2155, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 23) { optionInputStruct currVal = { PUT, 100.00, 100.00, 0.10, 0.10, 0.10, 0.25, 3.1217, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 24) { optionInputStruct currVal = { PUT, 100.00, 110.00, 0.10, 0.10, 0.10, 0.25, 0.4551, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 25) { optionInputStruct currVal = { PUT, 100.00, 90.00, 0.10, 0.10, 0.10, 0.35, 10.8479, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 26) { optionInputStruct currVal = { PUT, 100.00, 100.00, 0.10, 0.10, 0.10, 0.35, 4.3693, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 27) { optionInputStruct currVal = { PUT, 100.00, 110.00, 0.10, 0.10, 0.10, 0.35, 1.2376, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 28) { optionInputStruct currVal = { PUT, 100.00, 90.00, 0.10, 0.10, 0.50, 0.15, 10.3192, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 29) { optionInputStruct currVal = { PUT, 100.00, 100.00, 0.10, 0.10, 0.50, 0.15, 4.0232, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 30) { optionInputStruct currVal = { PUT, 100.00, 110.00, 0.10, 0.10, 0.50, 0.15, 1.0646, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 31) { optionInputStruct currVal = { PUT, 100.00, 90.00, 0.10, 0.10, 0.50, 0.25, 12.2149, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 32) { optionInputStruct currVal = { PUT, 100.00, 100.00, 0.10, 0.10, 0.50, 0.25, 6.6997, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 33) { optionInputStruct currVal = { PUT, 100.00, 110.00, 0.10, 0.10, 0.50, 0.25, 3.2734, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 34) { optionInputStruct currVal = { PUT, 100.00, 90.00, 0.10, 0.10, 0.50, 0.35, 14.4452, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 35) { optionInputStruct currVal = { PUT, 100.00, 100.00, 0.10, 0.10, 0.50, 0.35, 9.3679, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 36) { optionInputStruct currVal = { PUT, 100.00, 110.00, 0.10, 0.10, 0.50, 0.35, 5.7963, 1.0e-4}; values[numOption] = currVal; } } // Run GPU code //initialize the arrays //declare and allocate the input and output data on the CPU float* outputVals = (float*)malloc(numVals * sizeof(float)); printf("Number of options: %d\n\n", numVals); long seconds, useconds; float mtimeCpu, mtimeGpu; struct timeval start; gettimeofday(&start, NULL); //declare the data on the GPU optionInputStruct* optionsGpu; float* outputValsGpu; //allocate space for data on GPU hipMalloc((void**)&optionsGpu, numVals * sizeof(optionInputStruct)); hipMalloc((void**)&outputValsGpu, numVals * sizeof(float)); //copy the data from the CPU to the GPU hipMemcpy(optionsGpu, values, numVals * sizeof(optionInputStruct), hipMemcpyHostToDevice); // setup execution parameters dim3 grid( (numVals + THREAD_BLOCK_SIZE - 1)/THREAD_BLOCK_SIZE, 1, 1); dim3 threads( THREAD_BLOCK_SIZE, 1, 1); hipLaunchKernelGGL(( getOutValOption) , dim3(dim3(grid)), dim3(dim3(threads)) , 0, 0, optionsGpu, outputValsGpu, numVals); //copy the resulting option values back to the CPU hipMemcpy(outputVals, outputValsGpu, numVals * sizeof(float), hipMemcpyDeviceToHost); hipFree(optionsGpu); hipFree(outputValsGpu); struct timeval end; gettimeofday(&end, NULL); seconds = end.tv_sec - start.tv_sec; useconds = end.tv_usec - start.tv_usec; mtimeGpu = ((seconds) * 1000 + ((float)useconds)/1000.0) + 0.5f; printf("Run on GPU\n"); printf("Processing time on GPU: %f (ms)\n", mtimeGpu); float totResult = 0.0f; for (int i=0; i<numVals; i++) { totResult += outputVals[i]; } printf("Summation of output prices on GPU: %f\n", totResult); printf("Output price at index %d on GPU: %f\n\n", numVals/2, outputVals[numVals/2]); //run on CPU gettimeofday(&start, NULL); for (size_t numOption=0; numOption < numVals; numOption++) { getOutValOptionCpu(values, outputVals, numOption, numVals); } gettimeofday(&end, NULL); seconds = end.tv_sec - start.tv_sec; useconds = end.tv_usec - start.tv_usec; mtimeCpu = ((seconds) * 1000 + ((float)useconds)/1000.0) + 0.5f; printf("Run on CPU\n"); printf("Processing time on CPU: %f (ms)\n", mtimeCpu); totResult = 0.0f; for (int i=0; i<numVals; i++) { totResult += outputVals[i]; } printf("Summation of output prices on CPU: %f\n", totResult); printf("Output price at index %d on CPU:: %f\n\n", numVals/2, outputVals[numVals/2]); printf("Speedup on GPU: %f\n", mtimeCpu / mtimeGpu); delete [] values; free(outputVals); } } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { runBlackScholesAnalyticEngine(); return 0; }
52decd200daf0dc6ee60ad09d6c0321b947f3f9a.cu
//blackScholesAnalyticEngine.cu //Scott Grauer-Gray //Functions for running black scholes using the analytic engine (from Quantlib) on the GPU //needed for optionInputStruct #include "blackScholesAnalyticEngineStructs.cuh" //needed for the kernel(s) to run on the GPU #include "blackScholesAnalyticEngineKernels.cu" #include "blackScholesAnalyticEngineKernelsCpu.cu" #include <stdio.h> #include <math.h> #include <sys/time.h> #include <time.h> #include <cuda.h> #define NUM_DIFF_SETTINGS 37 //function to run the black scholes analytic engine on the gpu void runBlackScholesAnalyticEngine() { int numberOfSamples = 50000000; { int numVals = numberOfSamples;//nSamplesArray[numTime]; optionInputStruct* values = new optionInputStruct[numVals]; for (int numOption = 0; numOption < numVals; numOption++) { if ((numOption % NUM_DIFF_SETTINGS) == 0) { optionInputStruct currVal = { CALL, 40.00, 42.00, 0.08, 0.04, 0.75, 0.35, 5.0975, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 1) { optionInputStruct currVal = { CALL, 100.00, 90.00, 0.10, 0.10, 0.10, 0.15, 0.0205, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 2) { optionInputStruct currVal = { CALL, 100.00, 100.00, 0.10, 0.10, 0.10, 0.15, 1.8734, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 3) { optionInputStruct currVal = { CALL, 100.00, 110.00, 0.10, 0.10, 0.10, 0.15, 9.9413, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 4) { optionInputStruct currVal = { CALL, 100.00, 90.00, 0.10, 0.10, 0.10, 0.25, 0.3150, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 5) { optionInputStruct currVal = { CALL, 100.00, 100.00, 0.10, 0.10, 0.10, 0.25, 3.1217, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 6) { optionInputStruct currVal = { CALL, 100.00, 110.00, 0.10, 0.10, 0.10, 0.25, 10.3556, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 7) { optionInputStruct currVal = { CALL, 100.00, 90.00, 0.10, 0.10, 0.10, 0.35, 0.9474, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 8) { optionInputStruct currVal = { CALL, 100.00, 100.00, 0.10, 0.10, 0.10, 0.35, 4.3693, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 9) { optionInputStruct currVal = { CALL, 100.00, 110.00, 0.10, 0.10, 0.10, 0.35, 11.1381, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 10) { optionInputStruct currVal = { CALL, 100.00, 90.00, 0.10, 0.10, 0.50, 0.15, 0.8069, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 11) { optionInputStruct currVal = { CALL, 100.00, 100.00, 0.10, 0.10, 0.50, 0.15, 4.0232, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 12) { optionInputStruct currVal = { CALL, 100.00, 110.00, 0.10, 0.10, 0.50, 0.15, 10.5769, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 13) { optionInputStruct currVal = { CALL, 100.00, 90.00, 0.10, 0.10, 0.50, 0.25, 2.7026, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 14) { optionInputStruct currVal = { CALL, 100.00, 100.00, 0.10, 0.10, 0.50, 0.25, 6.6997, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 15) { optionInputStruct currVal = { CALL, 100.00, 110.00, 0.10, 0.10, 0.50, 0.25, 12.7857, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 16) { optionInputStruct currVal = { CALL, 100.00, 90.00, 0.10, 0.10, 0.50, 0.35, 4.9329, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 17) { optionInputStruct currVal = { CALL, 100.00, 100.00, 0.10, 0.10, 0.50, 0.35, 9.3679, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 18) { optionInputStruct currVal = { CALL, 100.00, 110.00, 0.10, 0.10, 0.50, 0.35, 15.3086, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 19) { optionInputStruct currVal = { PUT, 100.00, 90.00, 0.10, 0.10, 0.10, 0.15, 9.9210, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 20) { optionInputStruct currVal = { PUT, 100.00, 100.00, 0.10, 0.10, 0.10, 0.15, 1.8734, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 21) { optionInputStruct currVal = { PUT, 100.00, 110.00, 0.10, 0.10, 0.10, 0.15, 0.0408, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 22) { optionInputStruct currVal = { PUT, 100.00, 90.00, 0.10, 0.10, 0.10, 0.25, 10.2155, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 23) { optionInputStruct currVal = { PUT, 100.00, 100.00, 0.10, 0.10, 0.10, 0.25, 3.1217, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 24) { optionInputStruct currVal = { PUT, 100.00, 110.00, 0.10, 0.10, 0.10, 0.25, 0.4551, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 25) { optionInputStruct currVal = { PUT, 100.00, 90.00, 0.10, 0.10, 0.10, 0.35, 10.8479, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 26) { optionInputStruct currVal = { PUT, 100.00, 100.00, 0.10, 0.10, 0.10, 0.35, 4.3693, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 27) { optionInputStruct currVal = { PUT, 100.00, 110.00, 0.10, 0.10, 0.10, 0.35, 1.2376, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 28) { optionInputStruct currVal = { PUT, 100.00, 90.00, 0.10, 0.10, 0.50, 0.15, 10.3192, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 29) { optionInputStruct currVal = { PUT, 100.00, 100.00, 0.10, 0.10, 0.50, 0.15, 4.0232, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 30) { optionInputStruct currVal = { PUT, 100.00, 110.00, 0.10, 0.10, 0.50, 0.15, 1.0646, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 31) { optionInputStruct currVal = { PUT, 100.00, 90.00, 0.10, 0.10, 0.50, 0.25, 12.2149, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 32) { optionInputStruct currVal = { PUT, 100.00, 100.00, 0.10, 0.10, 0.50, 0.25, 6.6997, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 33) { optionInputStruct currVal = { PUT, 100.00, 110.00, 0.10, 0.10, 0.50, 0.25, 3.2734, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 34) { optionInputStruct currVal = { PUT, 100.00, 90.00, 0.10, 0.10, 0.50, 0.35, 14.4452, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 35) { optionInputStruct currVal = { PUT, 100.00, 100.00, 0.10, 0.10, 0.50, 0.35, 9.3679, 1.0e-4}; values[numOption] = currVal; } if ((numOption % NUM_DIFF_SETTINGS) == 36) { optionInputStruct currVal = { PUT, 100.00, 110.00, 0.10, 0.10, 0.50, 0.35, 5.7963, 1.0e-4}; values[numOption] = currVal; } } // Run GPU code //initialize the arrays //declare and allocate the input and output data on the CPU float* outputVals = (float*)malloc(numVals * sizeof(float)); printf("Number of options: %d\n\n", numVals); long seconds, useconds; float mtimeCpu, mtimeGpu; struct timeval start; gettimeofday(&start, NULL); //declare the data on the GPU optionInputStruct* optionsGpu; float* outputValsGpu; //allocate space for data on GPU cudaMalloc((void**)&optionsGpu, numVals * sizeof(optionInputStruct)); cudaMalloc((void**)&outputValsGpu, numVals * sizeof(float)); //copy the data from the CPU to the GPU cudaMemcpy(optionsGpu, values, numVals * sizeof(optionInputStruct), cudaMemcpyHostToDevice); // setup execution parameters dim3 grid( (numVals + THREAD_BLOCK_SIZE - 1)/THREAD_BLOCK_SIZE, 1, 1); dim3 threads( THREAD_BLOCK_SIZE, 1, 1); getOutValOption <<< dim3(grid), dim3(threads) >>> (optionsGpu, outputValsGpu, numVals); //copy the resulting option values back to the CPU cudaMemcpy(outputVals, outputValsGpu, numVals * sizeof(float), cudaMemcpyDeviceToHost); cudaFree(optionsGpu); cudaFree(outputValsGpu); struct timeval end; gettimeofday(&end, NULL); seconds = end.tv_sec - start.tv_sec; useconds = end.tv_usec - start.tv_usec; mtimeGpu = ((seconds) * 1000 + ((float)useconds)/1000.0) + 0.5f; printf("Run on GPU\n"); printf("Processing time on GPU: %f (ms)\n", mtimeGpu); float totResult = 0.0f; for (int i=0; i<numVals; i++) { totResult += outputVals[i]; } printf("Summation of output prices on GPU: %f\n", totResult); printf("Output price at index %d on GPU: %f\n\n", numVals/2, outputVals[numVals/2]); //run on CPU gettimeofday(&start, NULL); for (size_t numOption=0; numOption < numVals; numOption++) { getOutValOptionCpu(values, outputVals, numOption, numVals); } gettimeofday(&end, NULL); seconds = end.tv_sec - start.tv_sec; useconds = end.tv_usec - start.tv_usec; mtimeCpu = ((seconds) * 1000 + ((float)useconds)/1000.0) + 0.5f; printf("Run on CPU\n"); printf("Processing time on CPU: %f (ms)\n", mtimeCpu); totResult = 0.0f; for (int i=0; i<numVals; i++) { totResult += outputVals[i]; } printf("Summation of output prices on CPU: %f\n", totResult); printf("Output price at index %d on CPU:: %f\n\n", numVals/2, outputVals[numVals/2]); printf("Speedup on GPU: %f\n", mtimeCpu / mtimeGpu); delete [] values; free(outputVals); } } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { runBlackScholesAnalyticEngine(); return 0; }
6e70bf3924a1b061d4986fbbe275f94c8f7aa806.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ****************************************************************** * HISTORY * 15-Oct-94 Jeff Shufelt (js), Carnegie Mellon University * Prepared for 15-681, Fall 1994. * Modified by Shuai Che ****************************************************************** */ #include <stdio.h> #include <stdlib.h> #include "backprop.h" #include <math.h> #include <fcntl.h> #include <sys/types.h> #include <sys/stat.h> #include <unistd.h> #define ABS(x) (((x) > 0.0) ? (x) : (-(x))) #define TILE_SIZE 8 /*** Allocate 1d array of floats ***/ float *alloc_1d_dbl(int n) { float *new_var; new_var = (float *) malloc ((unsigned) (n * sizeof (float))); if (new_var == NULL) { printf("ALLOC_1D_DBL: Couldn't allocate array of floats\n"); return (NULL); } return (new_var); } /*** Allocate 2d array of floats ***/ float *alloc_2d_dbl(int m, int n) { float *new_var; new_var = (float *) malloc ((unsigned) (m * n * sizeof (float))); if (new_var == NULL) { printf("ALLOC_2D_DBL: Couldn't allocate array of dbl ptrs\n"); return (NULL); } return (new_var); } void bpnn_randomize_weights(float *w, int m, int n) { int i, j; for (i = 0; i <= m; i++) { for (j = 0; j <= n; j++) { w[i * (n+1) + j] = (float) rand()/RAND_MAX; } } } void bpnn_randomize_row(float *w, int m) { int i; for (i = 0; i <= m; i++) { w[i] = 0.1; } } void bpnn_zero_weights(float *w, int m, int n) { int i, j; for (i = 0; i <= m; i++) { for (j = 0; j <= n; j++) { w[i * (n+1) + j] = 0.0; } } } void bpnn_initialize(int seed) { printf("Random number generator seed: %d\n", seed); srand(seed); } BPNN *bpnn_internal_create(int n_in, int n_hidden, int n_out) { BPNN *newnet; newnet = (BPNN *) malloc (sizeof (BPNN)); if (newnet == NULL) { printf("BPNN_CREATE: Couldn't allocate neural network\n"); return (NULL); } newnet->input_n = n_in; newnet->hidden_n = n_hidden; newnet->output_n = n_out; newnet->input_units = alloc_1d_dbl(n_in + 1); newnet->hidden_units = alloc_1d_dbl(n_hidden + 1); newnet->output_units = alloc_1d_dbl(n_out + 1); newnet->hidden_delta = alloc_1d_dbl(n_hidden + 1); newnet->output_delta = alloc_1d_dbl(n_out + 1); newnet->target = alloc_1d_dbl(n_out + 1); newnet->input_weights = alloc_2d_dbl(n_in + 1, n_hidden + 1); newnet->hidden_weights = alloc_2d_dbl(n_hidden + 1, n_out + 1); newnet->input_prev_weights = alloc_2d_dbl(n_in + 1, n_hidden + 1); newnet->hidden_prev_weights = alloc_2d_dbl(n_hidden + 1, n_out + 1); return (newnet); } void bpnn_free(BPNN *net) { free((char *) net->input_units); free((char *) net->hidden_units); free((char *) net->output_units); free((char *) net->hidden_delta); free((char *) net->output_delta); free((char *) net->target); free((char *) net->input_weights); free((char *) net->input_prev_weights); free((char *) net->hidden_weights); free((char *) net->hidden_prev_weights); free((char *) net); } /*** Creates a new fully-connected network from scratch, with the given numbers of input, hidden, and output units. Threshold units are automatically included. All weights are randomly initialized. Space is also allocated for temporary storage (momentum weights, error computations, etc). ***/ BPNN *bpnn_create(int n_in, int n_hidden, int n_out) { BPNN *newnet; newnet = bpnn_internal_create(n_in, n_hidden, n_out); #ifdef INITZERO bpnn_zero_weights(newnet->input_weights, n_in, n_hidden); #else bpnn_randomize_weights(newnet->input_weights, n_in, n_hidden); #endif bpnn_randomize_weights(newnet->hidden_weights, n_hidden, n_out); bpnn_zero_weights(newnet->input_prev_weights, n_in, n_hidden); bpnn_zero_weights(newnet->hidden_prev_weights, n_hidden, n_out); bpnn_randomize_row(newnet->target, n_out); return (newnet); } __global__ void layerforward(float *l1, float *l2, float *conn, int n1, int n2) { int j = blockIdx.x * blockDim.x + threadIdx.x; float sum = 0; l1[0] = 1.0; if (j < n2 && j != 0) { for (int k = 0 ; k < n1 ; k++) sum += conn[k * (n2) + j] * l1[k]; l2[j] = (1.0 / (1.0 + exp(-sum))); } } void launch_layerforward(float *l1, float *l2, float *conn, int n1, int n2) { const unsigned int BLOCK_SIZE = TILE_SIZE; dim3 DimGrid((int) ((n2 - 1) / BLOCK_SIZE + 1), 1, 1); dim3 DimBlock((int) (BLOCK_SIZE), 1, 1); hipLaunchKernelGGL(( layerforward), dim3(DimGrid), dim3(DimBlock), 0, 0, l1, l2, conn, n1, n2); } __global__ void output_error(float *delta, float *target, float *output, int nj, float *err) { int j = blockIdx.x * blockDim.x + threadIdx.x; if(j < nj && j != 0) { float o = output[j]; delta[j] = o * (1.0 - o) * (target[j] - o); atomicAdd(err, ABS(delta[j])); } } void launch_output_error(float *delta, float *target, float *output, int nj, float *err) { const unsigned int BLOCK_SIZE = TILE_SIZE; dim3 DimGrid((int) ((nj - 1)/BLOCK_SIZE + 1), 1, 1); dim3 DimBlock((int)(BLOCK_SIZE), 1, 1); // TODO this is all redundant if iteration is 1, since err is never used again *err = 0; float *err_cuda; hipMalloc((void **) &(err_cuda), sizeof(float)); hipMemcpy(err_cuda, err, sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( output_error), dim3(DimGrid), dim3(DimBlock), 0, 0, delta, target, output, nj, err_cuda); hipMemcpy(err, err_cuda, sizeof(float), hipMemcpyDeviceToHost); hipFree(err_cuda); } __global__ void hidden_error(float *delta_h, int nh, float *delta_o, int no, float *who, float *hidden, float *err) { int j = blockIdx.x * blockDim.x + threadIdx.x; float h; float sum = 0; if (j < nh && j != 0) { h = hidden[j]; for (int k = 1 ; k < no ; k++) { sum += delta_o[k] * who[j * no + k]; } delta_h[j] = h * (1.0 - h) * sum; atomicAdd(err, ABS(delta_h[j])); } } void launch_hidden_error(float *delta_h, int nh, float *delta_o, int no, float *who, float *hidden, float *err) { const unsigned int BLOCK_SIZE = TILE_SIZE; dim3 DimGrid((int) ((nh - 1)/BLOCK_SIZE + 1), 1, 1); dim3 DimBlock((int)(BLOCK_SIZE), 1, 1); // TODO this is all redundant if iteration is 1, since err is never used again *err = 0; float *err_cuda; hipMalloc((void **) &(err_cuda), sizeof(float)); hipMemcpy(err_cuda, err, sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( hidden_error), dim3(DimGrid), dim3(DimBlock), 0, 0, delta_h, nh, delta_o, no, who, hidden, err_cuda); hipMemcpy(err, err_cuda, sizeof(float), hipMemcpyDeviceToHost); hipFree(err_cuda); } __global__ void adjust_weights(float *delta, int ndelta, float *ly, int nly,float *w, float *oldw) { int j = blockIdx.x * blockDim.x + threadIdx.x; float new_dw; if (j < ndelta && j != 0) { for (int k = 0 ; k < nly ; k++) { new_dw = ((ETA * delta[j] * ly[k]) + (MOMENTUM * oldw[k * (ndelta) + j])); w[k * (ndelta) + j] += new_dw; oldw[k * (ndelta) + j] = new_dw; } } } void launch_adjust_weights(float *delta, int ndelta, float *ly, int nly,float *w, float *oldw) { const unsigned int BLOCK_SIZE = TILE_SIZE; dim3 DimGrid((int) ((ndelta - 1) / BLOCK_SIZE + 1), 1, 1); dim3 DimBlock((int) (BLOCK_SIZE), 1, 1); hipLaunchKernelGGL(( adjust_weights), dim3(DimGrid), dim3(DimBlock), 0, 0, delta, ndelta, ly, nly, w, oldw); } BPNN *createNetDevice(int n_in, int n_hidden, int n_out) { BPNN *newnet; newnet = (BPNN *) malloc (sizeof (BPNN)); newnet->input_n = n_in; newnet->hidden_n = n_hidden; newnet->output_n = n_out; hipMalloc((void **) &(newnet->input_units), sizeof(float) * (n_in + 1)); hipMalloc((void **) &(newnet->hidden_units), sizeof(float) * (n_hidden + 1)); hipMalloc((void **) &(newnet->output_units), sizeof(float) * (n_out + 1)); hipMalloc((void **) &(newnet->hidden_delta), sizeof(float) * (n_hidden + 1)); hipMalloc((void **) &(newnet->output_delta), sizeof(float) * (n_out + 1)); hipMalloc((void **) &(newnet->target), sizeof(float) * (n_out + 1)); hipMalloc((void **) &(newnet->input_weights), sizeof(float) * (n_in + 1) * (n_hidden + 1)); hipMalloc((void **) &(newnet->hidden_weights), sizeof(float) * (n_hidden + 1) * (n_out + 1)); hipMalloc((void **) &(newnet->input_prev_weights), sizeof(float) * (n_in + 1) * (n_hidden + 1)); hipMalloc((void **) &(newnet->hidden_prev_weights), sizeof(float) * (n_hidden + 1) * (n_out + 1)); return (newnet); } void copyNetToDevice(BPNN *net, BPNN *cudanet, int n_in, int n_hidden, int n_out) { hipMemcpy(cudanet->input_units, net->input_units, sizeof(float)*(n_in + 1), hipMemcpyHostToDevice); hipMemcpy(cudanet->hidden_units, net->hidden_units, sizeof(float)*(n_hidden + 1), hipMemcpyHostToDevice); hipMemcpy(cudanet->output_units, net->output_units, sizeof(float)*(n_out + 1), hipMemcpyHostToDevice); hipMemcpy(cudanet->hidden_delta, net->hidden_delta, sizeof(float)*(n_hidden + 1), hipMemcpyHostToDevice); hipMemcpy(cudanet->output_delta, net->output_delta, sizeof(float)*(n_out + 1), hipMemcpyHostToDevice); hipMemcpy(cudanet->target, net->target, sizeof(float)*(n_out + 1), hipMemcpyHostToDevice); hipMemcpy(cudanet->input_weights, net->input_weights, sizeof(float)*(n_in + 1) * (n_hidden + 1), hipMemcpyHostToDevice); hipMemcpy(cudanet->hidden_weights, net->hidden_weights, sizeof(float)*(n_hidden + 1) * (n_out + 1), hipMemcpyHostToDevice); hipMemcpy(cudanet->input_prev_weights, net->input_prev_weights, sizeof(float)*(n_in + 1) * (n_hidden + 1), hipMemcpyHostToDevice); hipMemcpy(cudanet->hidden_prev_weights, net->hidden_prev_weights, sizeof(float)*(n_hidden + 1) * (n_out + 1), hipMemcpyHostToDevice); } void copyNetFromDevice(BPNN *net, BPNN *cudanet, int n_in, int n_hidden, int n_out) { hipMemcpy(net->input_units, cudanet->input_units, sizeof(float)*(n_in + 1), hipMemcpyDeviceToHost); hipMemcpy(net->hidden_units, cudanet->hidden_units, sizeof(float)*(n_hidden + 1), hipMemcpyDeviceToHost); hipMemcpy(net->output_units, cudanet->output_units, sizeof(float)*(n_out + 1), hipMemcpyDeviceToHost); hipMemcpy(net->hidden_delta, cudanet->hidden_delta, sizeof(float)*(n_hidden + 1), hipMemcpyDeviceToHost); hipMemcpy(net->output_delta, cudanet->output_delta, sizeof(float)*(n_out + 1), hipMemcpyDeviceToHost); hipMemcpy(net->target, cudanet->target, sizeof(float)*(n_out + 1), hipMemcpyDeviceToHost); hipMemcpy(net->input_weights, cudanet->input_weights, sizeof(float)*(n_in + 1) * (n_hidden + 1), hipMemcpyDeviceToHost); hipMemcpy(net->hidden_weights, cudanet->hidden_weights, sizeof(float)*(n_hidden + 1) * (n_out + 1), hipMemcpyDeviceToHost); hipMemcpy(net->input_prev_weights, cudanet->input_prev_weights, sizeof(float)*(n_in + 1) * (n_hidden + 1), hipMemcpyDeviceToHost); hipMemcpy(net->hidden_prev_weights, cudanet->hidden_prev_weights, sizeof(float)*(n_hidden + 1) * (n_out + 1), hipMemcpyDeviceToHost); } void freeDeviceNet(BPNN *net) { hipFree(net->input_units); hipFree(net->hidden_units); hipFree(net->output_units); hipFree(net->hidden_delta); hipFree(net->output_delta); hipFree(net->target); hipFree(net->input_weights); hipFree(net->input_prev_weights); hipFree(net->hidden_weights); hipFree(net->hidden_prev_weights); free(net); }
6e70bf3924a1b061d4986fbbe275f94c8f7aa806.cu
/* ****************************************************************** * HISTORY * 15-Oct-94 Jeff Shufelt (js), Carnegie Mellon University * Prepared for 15-681, Fall 1994. * Modified by Shuai Che ****************************************************************** */ #include <stdio.h> #include <stdlib.h> #include "backprop.h" #include <math.h> #include <fcntl.h> #include <sys/types.h> #include <sys/stat.h> #include <unistd.h> #define ABS(x) (((x) > 0.0) ? (x) : (-(x))) #define TILE_SIZE 8 /*** Allocate 1d array of floats ***/ float *alloc_1d_dbl(int n) { float *new_var; new_var = (float *) malloc ((unsigned) (n * sizeof (float))); if (new_var == NULL) { printf("ALLOC_1D_DBL: Couldn't allocate array of floats\n"); return (NULL); } return (new_var); } /*** Allocate 2d array of floats ***/ float *alloc_2d_dbl(int m, int n) { float *new_var; new_var = (float *) malloc ((unsigned) (m * n * sizeof (float))); if (new_var == NULL) { printf("ALLOC_2D_DBL: Couldn't allocate array of dbl ptrs\n"); return (NULL); } return (new_var); } void bpnn_randomize_weights(float *w, int m, int n) { int i, j; for (i = 0; i <= m; i++) { for (j = 0; j <= n; j++) { w[i * (n+1) + j] = (float) rand()/RAND_MAX; } } } void bpnn_randomize_row(float *w, int m) { int i; for (i = 0; i <= m; i++) { w[i] = 0.1; } } void bpnn_zero_weights(float *w, int m, int n) { int i, j; for (i = 0; i <= m; i++) { for (j = 0; j <= n; j++) { w[i * (n+1) + j] = 0.0; } } } void bpnn_initialize(int seed) { printf("Random number generator seed: %d\n", seed); srand(seed); } BPNN *bpnn_internal_create(int n_in, int n_hidden, int n_out) { BPNN *newnet; newnet = (BPNN *) malloc (sizeof (BPNN)); if (newnet == NULL) { printf("BPNN_CREATE: Couldn't allocate neural network\n"); return (NULL); } newnet->input_n = n_in; newnet->hidden_n = n_hidden; newnet->output_n = n_out; newnet->input_units = alloc_1d_dbl(n_in + 1); newnet->hidden_units = alloc_1d_dbl(n_hidden + 1); newnet->output_units = alloc_1d_dbl(n_out + 1); newnet->hidden_delta = alloc_1d_dbl(n_hidden + 1); newnet->output_delta = alloc_1d_dbl(n_out + 1); newnet->target = alloc_1d_dbl(n_out + 1); newnet->input_weights = alloc_2d_dbl(n_in + 1, n_hidden + 1); newnet->hidden_weights = alloc_2d_dbl(n_hidden + 1, n_out + 1); newnet->input_prev_weights = alloc_2d_dbl(n_in + 1, n_hidden + 1); newnet->hidden_prev_weights = alloc_2d_dbl(n_hidden + 1, n_out + 1); return (newnet); } void bpnn_free(BPNN *net) { free((char *) net->input_units); free((char *) net->hidden_units); free((char *) net->output_units); free((char *) net->hidden_delta); free((char *) net->output_delta); free((char *) net->target); free((char *) net->input_weights); free((char *) net->input_prev_weights); free((char *) net->hidden_weights); free((char *) net->hidden_prev_weights); free((char *) net); } /*** Creates a new fully-connected network from scratch, with the given numbers of input, hidden, and output units. Threshold units are automatically included. All weights are randomly initialized. Space is also allocated for temporary storage (momentum weights, error computations, etc). ***/ BPNN *bpnn_create(int n_in, int n_hidden, int n_out) { BPNN *newnet; newnet = bpnn_internal_create(n_in, n_hidden, n_out); #ifdef INITZERO bpnn_zero_weights(newnet->input_weights, n_in, n_hidden); #else bpnn_randomize_weights(newnet->input_weights, n_in, n_hidden); #endif bpnn_randomize_weights(newnet->hidden_weights, n_hidden, n_out); bpnn_zero_weights(newnet->input_prev_weights, n_in, n_hidden); bpnn_zero_weights(newnet->hidden_prev_weights, n_hidden, n_out); bpnn_randomize_row(newnet->target, n_out); return (newnet); } __global__ void layerforward(float *l1, float *l2, float *conn, int n1, int n2) { int j = blockIdx.x * blockDim.x + threadIdx.x; float sum = 0; l1[0] = 1.0; if (j < n2 && j != 0) { for (int k = 0 ; k < n1 ; k++) sum += conn[k * (n2) + j] * l1[k]; l2[j] = (1.0 / (1.0 + exp(-sum))); } } void launch_layerforward(float *l1, float *l2, float *conn, int n1, int n2) { const unsigned int BLOCK_SIZE = TILE_SIZE; dim3 DimGrid((int) ((n2 - 1) / BLOCK_SIZE + 1), 1, 1); dim3 DimBlock((int) (BLOCK_SIZE), 1, 1); layerforward<<<DimGrid, DimBlock>>>(l1, l2, conn, n1, n2); } __global__ void output_error(float *delta, float *target, float *output, int nj, float *err) { int j = blockIdx.x * blockDim.x + threadIdx.x; if(j < nj && j != 0) { float o = output[j]; delta[j] = o * (1.0 - o) * (target[j] - o); atomicAdd(err, ABS(delta[j])); } } void launch_output_error(float *delta, float *target, float *output, int nj, float *err) { const unsigned int BLOCK_SIZE = TILE_SIZE; dim3 DimGrid((int) ((nj - 1)/BLOCK_SIZE + 1), 1, 1); dim3 DimBlock((int)(BLOCK_SIZE), 1, 1); // TODO this is all redundant if iteration is 1, since err is never used again *err = 0; float *err_cuda; cudaMalloc((void **) &(err_cuda), sizeof(float)); cudaMemcpy(err_cuda, err, sizeof(float), cudaMemcpyHostToDevice); output_error<<<DimGrid, DimBlock>>>(delta, target, output, nj, err_cuda); cudaMemcpy(err, err_cuda, sizeof(float), cudaMemcpyDeviceToHost); cudaFree(err_cuda); } __global__ void hidden_error(float *delta_h, int nh, float *delta_o, int no, float *who, float *hidden, float *err) { int j = blockIdx.x * blockDim.x + threadIdx.x; float h; float sum = 0; if (j < nh && j != 0) { h = hidden[j]; for (int k = 1 ; k < no ; k++) { sum += delta_o[k] * who[j * no + k]; } delta_h[j] = h * (1.0 - h) * sum; atomicAdd(err, ABS(delta_h[j])); } } void launch_hidden_error(float *delta_h, int nh, float *delta_o, int no, float *who, float *hidden, float *err) { const unsigned int BLOCK_SIZE = TILE_SIZE; dim3 DimGrid((int) ((nh - 1)/BLOCK_SIZE + 1), 1, 1); dim3 DimBlock((int)(BLOCK_SIZE), 1, 1); // TODO this is all redundant if iteration is 1, since err is never used again *err = 0; float *err_cuda; cudaMalloc((void **) &(err_cuda), sizeof(float)); cudaMemcpy(err_cuda, err, sizeof(float), cudaMemcpyHostToDevice); hidden_error<<<DimGrid, DimBlock>>>(delta_h, nh, delta_o, no, who, hidden, err_cuda); cudaMemcpy(err, err_cuda, sizeof(float), cudaMemcpyDeviceToHost); cudaFree(err_cuda); } __global__ void adjust_weights(float *delta, int ndelta, float *ly, int nly,float *w, float *oldw) { int j = blockIdx.x * blockDim.x + threadIdx.x; float new_dw; if (j < ndelta && j != 0) { for (int k = 0 ; k < nly ; k++) { new_dw = ((ETA * delta[j] * ly[k]) + (MOMENTUM * oldw[k * (ndelta) + j])); w[k * (ndelta) + j] += new_dw; oldw[k * (ndelta) + j] = new_dw; } } } void launch_adjust_weights(float *delta, int ndelta, float *ly, int nly,float *w, float *oldw) { const unsigned int BLOCK_SIZE = TILE_SIZE; dim3 DimGrid((int) ((ndelta - 1) / BLOCK_SIZE + 1), 1, 1); dim3 DimBlock((int) (BLOCK_SIZE), 1, 1); adjust_weights<<<DimGrid, DimBlock>>>(delta, ndelta, ly, nly, w, oldw); } BPNN *createNetDevice(int n_in, int n_hidden, int n_out) { BPNN *newnet; newnet = (BPNN *) malloc (sizeof (BPNN)); newnet->input_n = n_in; newnet->hidden_n = n_hidden; newnet->output_n = n_out; cudaMalloc((void **) &(newnet->input_units), sizeof(float) * (n_in + 1)); cudaMalloc((void **) &(newnet->hidden_units), sizeof(float) * (n_hidden + 1)); cudaMalloc((void **) &(newnet->output_units), sizeof(float) * (n_out + 1)); cudaMalloc((void **) &(newnet->hidden_delta), sizeof(float) * (n_hidden + 1)); cudaMalloc((void **) &(newnet->output_delta), sizeof(float) * (n_out + 1)); cudaMalloc((void **) &(newnet->target), sizeof(float) * (n_out + 1)); cudaMalloc((void **) &(newnet->input_weights), sizeof(float) * (n_in + 1) * (n_hidden + 1)); cudaMalloc((void **) &(newnet->hidden_weights), sizeof(float) * (n_hidden + 1) * (n_out + 1)); cudaMalloc((void **) &(newnet->input_prev_weights), sizeof(float) * (n_in + 1) * (n_hidden + 1)); cudaMalloc((void **) &(newnet->hidden_prev_weights), sizeof(float) * (n_hidden + 1) * (n_out + 1)); return (newnet); } void copyNetToDevice(BPNN *net, BPNN *cudanet, int n_in, int n_hidden, int n_out) { cudaMemcpy(cudanet->input_units, net->input_units, sizeof(float)*(n_in + 1), cudaMemcpyHostToDevice); cudaMemcpy(cudanet->hidden_units, net->hidden_units, sizeof(float)*(n_hidden + 1), cudaMemcpyHostToDevice); cudaMemcpy(cudanet->output_units, net->output_units, sizeof(float)*(n_out + 1), cudaMemcpyHostToDevice); cudaMemcpy(cudanet->hidden_delta, net->hidden_delta, sizeof(float)*(n_hidden + 1), cudaMemcpyHostToDevice); cudaMemcpy(cudanet->output_delta, net->output_delta, sizeof(float)*(n_out + 1), cudaMemcpyHostToDevice); cudaMemcpy(cudanet->target, net->target, sizeof(float)*(n_out + 1), cudaMemcpyHostToDevice); cudaMemcpy(cudanet->input_weights, net->input_weights, sizeof(float)*(n_in + 1) * (n_hidden + 1), cudaMemcpyHostToDevice); cudaMemcpy(cudanet->hidden_weights, net->hidden_weights, sizeof(float)*(n_hidden + 1) * (n_out + 1), cudaMemcpyHostToDevice); cudaMemcpy(cudanet->input_prev_weights, net->input_prev_weights, sizeof(float)*(n_in + 1) * (n_hidden + 1), cudaMemcpyHostToDevice); cudaMemcpy(cudanet->hidden_prev_weights, net->hidden_prev_weights, sizeof(float)*(n_hidden + 1) * (n_out + 1), cudaMemcpyHostToDevice); } void copyNetFromDevice(BPNN *net, BPNN *cudanet, int n_in, int n_hidden, int n_out) { cudaMemcpy(net->input_units, cudanet->input_units, sizeof(float)*(n_in + 1), cudaMemcpyDeviceToHost); cudaMemcpy(net->hidden_units, cudanet->hidden_units, sizeof(float)*(n_hidden + 1), cudaMemcpyDeviceToHost); cudaMemcpy(net->output_units, cudanet->output_units, sizeof(float)*(n_out + 1), cudaMemcpyDeviceToHost); cudaMemcpy(net->hidden_delta, cudanet->hidden_delta, sizeof(float)*(n_hidden + 1), cudaMemcpyDeviceToHost); cudaMemcpy(net->output_delta, cudanet->output_delta, sizeof(float)*(n_out + 1), cudaMemcpyDeviceToHost); cudaMemcpy(net->target, cudanet->target, sizeof(float)*(n_out + 1), cudaMemcpyDeviceToHost); cudaMemcpy(net->input_weights, cudanet->input_weights, sizeof(float)*(n_in + 1) * (n_hidden + 1), cudaMemcpyDeviceToHost); cudaMemcpy(net->hidden_weights, cudanet->hidden_weights, sizeof(float)*(n_hidden + 1) * (n_out + 1), cudaMemcpyDeviceToHost); cudaMemcpy(net->input_prev_weights, cudanet->input_prev_weights, sizeof(float)*(n_in + 1) * (n_hidden + 1), cudaMemcpyDeviceToHost); cudaMemcpy(net->hidden_prev_weights, cudanet->hidden_prev_weights, sizeof(float)*(n_hidden + 1) * (n_out + 1), cudaMemcpyDeviceToHost); } void freeDeviceNet(BPNN *net) { cudaFree(net->input_units); cudaFree(net->hidden_units); cudaFree(net->output_units); cudaFree(net->hidden_delta); cudaFree(net->output_delta); cudaFree(net->target); cudaFree(net->input_weights); cudaFree(net->input_prev_weights); cudaFree(net->hidden_weights); cudaFree(net->hidden_prev_weights); free(net); }
41c92e558edccc1d0962027032161aace0f18733.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <Windows.h> #include <iostream> using namespace std; int main() { hipDeviceProp_t prop; hipError_t cudaStatus; int count; cudaStatus = hipGetDeviceCount( &count ); if (cudaStatus != hipSuccess) { cout << "hipMalloc failed!" << endl; } for (int i=0; i< count; i++) { cudaStatus = hipGetDeviceProperties( &prop, i ); if (cudaStatus != hipSuccess) { cout << "hipGetDeviceProperties failed!" << endl; } cout << " --- " << i << " ---" << "\n" << endl; cout << "" << prop.name << "\n" << endl; cout << "" << prop.major << "." << prop.minor << "\n" << endl; cout << "" << prop.clockRate << "\n" << endl; if (prop.deviceOverlap) { cout << "cudaMemory()!" << "\n" << endl; } else { cout << "cudaMemory()!" << "\n" << endl; } if (prop.kernelExecTimeoutEnabled) { cout << "!" << "\n" << endl; } else { cout << "!" << "\n" << endl; } cout << "\n" << " ------" << "\n" << endl; cout << "" << prop.totalGlobalMem << "\n" << endl; cout << "" << prop.totalConstMem << "\n" << endl; cout << "" << prop.memPitch << "\n" << endl; cout << "" << prop.textureAlignment << "\n" << endl; cout << "\n" << " ------" << "\n" << endl; cout << "" << prop.multiProcessorCount << "\n" << endl; cout << "():" << prop.sharedMemPerBlock << "\n" << endl; cout << "32" << prop.regsPerBlock << "\n" << endl; cout << "" << prop.warpSize << "\n" << endl; cout << "" << prop.maxThreadsPerBlock << "\n" << endl; cout << "(Block)" << "(" << prop.maxThreadsDim[0] << "," << prop.maxThreadsDim[1] << "," << prop.maxThreadsDim[2] << ")" << "\n" << endl; cout << "(Grid)" << "(" << prop.maxGridSize[0] << "," << prop.maxGridSize[1] << "," << prop.maxGridSize[2] << ")" << "\n" << endl; } Sleep(200000); return 0; }
41c92e558edccc1d0962027032161aace0f18733.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <Windows.h> #include <iostream> using namespace std; int main() { cudaDeviceProp prop; cudaError_t cudaStatus; int count; cudaStatus = cudaGetDeviceCount( &count ); if (cudaStatus != cudaSuccess) { cout << "cudaMalloc failed!" << endl; } for (int i=0; i< count; i++) { cudaStatus = cudaGetDeviceProperties( &prop, i ); if (cudaStatus != cudaSuccess) { cout << "cudaGetDeviceProperties failed!" << endl; } cout << " --- 第" << i << "个设备信息 ---" << "\n" << endl; cout << "显卡名字:" << prop.name << "\n" << endl; cout << "计算能力:" << prop.major << "." << prop.minor << "\n" << endl; cout << "时钟频率:" << prop.clockRate << "\n" << endl; if (prop.deviceOverlap) { cout << "是否可以同时执行cudaMemory()调用和一个核函数调用:是!" << "\n" << endl; } else { cout << "是否可以同时执行cudaMemory()调用和一个核函数调用:否!" << "\n" << endl; } if (prop.kernelExecTimeoutEnabled) { cout << "设备上执行的核函数是否存在运行时限制:是!" << "\n" << endl; } else { cout << "设备上执行的核函数是否存在运行时限制:否!" << "\n" << endl; } cout << "\n" << " ---设备内存信息---" << "\n" << endl; cout << "全局内存总量(字节):" << prop.totalGlobalMem << "\n" << endl; cout << "常量内存总量(字节):" << prop.totalConstMem << "\n" << endl; cout << "内存复制中的最大修正值(字节):" << prop.memPitch << "\n" << endl; cout << "设备的纹理对齐要求:" << prop.textureAlignment << "\n" << endl; cout << "\n" << " ---设备的多处理器信息---" << "\n" << endl; cout << "流处理器数量:" << prop.multiProcessorCount << "\n" << endl; cout << "一个线程块可使用的最大共享内存数量(字节):" << prop.sharedMemPerBlock << "\n" << endl; cout << "一个线程块可使用的32位寄存器数量:" << prop.regsPerBlock << "\n" << endl; cout << "一个线程束中包含的线程个数:" << prop.warpSize << "\n" << endl; cout << "一个线程块中包含的最大线程数量:" << prop.maxThreadsPerBlock << "\n" << endl; cout << "线程块(Block)维数:" << "(" << prop.maxThreadsDim[0] << "," << prop.maxThreadsDim[1] << "," << prop.maxThreadsDim[2] << ")" << "\n" << endl; cout << "线程格(Grid)维数:" << "(" << prop.maxGridSize[0] << "," << prop.maxGridSize[1] << "," << prop.maxGridSize[2] << ")" << "\n" << endl; } Sleep(200000); return 0; }
cd5926d276cec53a3fa2642312b0c572b051786d.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include "../CPU/linear.h" #include "../CPU/relu.h" #include "../CPU/train.h" #include "../GPU/linear.h" #include "../GPU/relu.h" #include "../GPU/train.h" #include "../utils/utils.h" int main(){ int bs, n_in, n_hidden, n_epochs; int sz_inp, sz_weights1, sz_hidden; float *inp_cpu, *out_cpu, *inp_gpu, *out_gpu; for (int i=0; i<8; i++){ std::cout << "Iteration " << i+1 << std::endl; bs = random_int(8, 64); n_in = random_int(16, 32); n_epochs = random_int(1, 4); n_hidden = n_in/2; sz_inp = bs*n_in; sz_weights1 = n_in*n_hidden; sz_hidden = bs*n_hidden; inp_cpu = new float[sz_inp]; hipMallocManaged(&inp_gpu, sz_inp*sizeof(float)); out_cpu = new float[bs]; hipMallocManaged(&out_gpu, bs*sizeof(float)); fill_array(inp_cpu, sz_inp); set_eq(inp_gpu, inp_cpu, sz_inp); fill_array(out_cpu, bs); set_eq(out_gpu, out_cpu, bs); Linear_CPU* lin1_cpu = new Linear_CPU(bs, n_in, n_hidden); Linear_GPU* lin1_gpu = new Linear_GPU(bs, n_in, n_hidden); set_eq(lin1_gpu->weights, lin1_cpu->weights, sz_weights1); ReLU_CPU* relu1_cpu = new ReLU_CPU(sz_hidden); ReLU_GPU* relu1_gpu = new ReLU_GPU(sz_hidden); Linear_CPU* lin2_cpu = new Linear_CPU(bs, n_hidden, 1); Linear_GPU* lin2_gpu = new Linear_GPU(bs, n_hidden, 1); set_eq(lin2_gpu->weights, lin2_cpu->weights, n_hidden); std::vector<Module*> layers_cpu = {lin1_cpu, relu1_cpu, lin2_cpu}; std::vector<Module*> layers_gpu = {lin1_gpu, relu1_gpu, lin2_gpu}; Sequential_CPU seq_cpu(layers_cpu); Sequential_GPU seq_gpu(layers_gpu); std::cout << "Result of train" << std::endl; std::cout << "CPU" << std::endl; train_cpu(seq_cpu, inp_cpu, out_cpu, bs, n_in, n_epochs); std::cout << "GPU" << std::endl; train_gpu(seq_gpu, inp_gpu, out_gpu, bs, n_in, n_epochs); std::cout << "*********" << std::endl; } delete[] inp_cpu; hipFree(inp_gpu); delete[] out_cpu; hipFree(out_gpu); delete lin1_cpu; delete lin1_gpu; delete lin2_cpu; delete lin2_gpu; delete relu1_cpu; delete relu1_gpu; return 0; }
cd5926d276cec53a3fa2642312b0c572b051786d.cu
#include <iostream> #include "../CPU/linear.h" #include "../CPU/relu.h" #include "../CPU/train.h" #include "../GPU/linear.h" #include "../GPU/relu.h" #include "../GPU/train.h" #include "../utils/utils.h" int main(){ int bs, n_in, n_hidden, n_epochs; int sz_inp, sz_weights1, sz_hidden; float *inp_cpu, *out_cpu, *inp_gpu, *out_gpu; for (int i=0; i<8; i++){ std::cout << "Iteration " << i+1 << std::endl; bs = random_int(8, 64); n_in = random_int(16, 32); n_epochs = random_int(1, 4); n_hidden = n_in/2; sz_inp = bs*n_in; sz_weights1 = n_in*n_hidden; sz_hidden = bs*n_hidden; inp_cpu = new float[sz_inp]; cudaMallocManaged(&inp_gpu, sz_inp*sizeof(float)); out_cpu = new float[bs]; cudaMallocManaged(&out_gpu, bs*sizeof(float)); fill_array(inp_cpu, sz_inp); set_eq(inp_gpu, inp_cpu, sz_inp); fill_array(out_cpu, bs); set_eq(out_gpu, out_cpu, bs); Linear_CPU* lin1_cpu = new Linear_CPU(bs, n_in, n_hidden); Linear_GPU* lin1_gpu = new Linear_GPU(bs, n_in, n_hidden); set_eq(lin1_gpu->weights, lin1_cpu->weights, sz_weights1); ReLU_CPU* relu1_cpu = new ReLU_CPU(sz_hidden); ReLU_GPU* relu1_gpu = new ReLU_GPU(sz_hidden); Linear_CPU* lin2_cpu = new Linear_CPU(bs, n_hidden, 1); Linear_GPU* lin2_gpu = new Linear_GPU(bs, n_hidden, 1); set_eq(lin2_gpu->weights, lin2_cpu->weights, n_hidden); std::vector<Module*> layers_cpu = {lin1_cpu, relu1_cpu, lin2_cpu}; std::vector<Module*> layers_gpu = {lin1_gpu, relu1_gpu, lin2_gpu}; Sequential_CPU seq_cpu(layers_cpu); Sequential_GPU seq_gpu(layers_gpu); std::cout << "Result of train" << std::endl; std::cout << "CPU" << std::endl; train_cpu(seq_cpu, inp_cpu, out_cpu, bs, n_in, n_epochs); std::cout << "GPU" << std::endl; train_gpu(seq_gpu, inp_gpu, out_gpu, bs, n_in, n_epochs); std::cout << "*********" << std::endl; } delete[] inp_cpu; cudaFree(inp_gpu); delete[] out_cpu; cudaFree(out_gpu); delete lin1_cpu; delete lin1_gpu; delete lin2_cpu; delete lin2_gpu; delete relu1_cpu; delete relu1_gpu; return 0; }
06bbcd81c1b435562efe3331589f02486e7c23a0.hip
// !!! This is a file automatically generated by hipify!!! /** Genetic algorithm for finding function aproximation. GPU accelerated version Given data points {x, f(x)+noise} generated by noisy polynomial function f(x) = c4*x^3 + c3*x^2 + c2*x + c1, find unknown parameters c1, c2, c3 and c4. Inputs: The set of points on a surface (5001000); The size of population P (10002000); E_m , D_m mean and variance for Mutation to generate the random number of mutated genes; maxIter - the maximum number of generations, maxConstIter - the maximum number of generations with constant value of the best fitness. Outputs: The time of processing on GPU; The set of coefficients of the polynomial that approximates the given set of points; The best fitness value; The last generation number (number of evaluated iterations). */ #include <iostream> #include <cstdlib> #include <cstdio> #include <cmath> #include <time.h> #include <algorithm> #include <hip/hip_runtime.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <thrust/sort.h> #include <thrust/device_ptr.h> #include "config.h" using namespace std; #define THREAD 128 #define BLOCK (POPULATION_SIZE / THREAD) // Reads input file with noisy points. Points will be approximated by // polynomial function using GA. static float *readData(const char *name, const int POINTS_CNT); // Gets last error and prints message when error is present static void check_cuda_error(const char *message); /** An individual fitness function is the difference between measured f(x) and approximated polynomial g(x), built using individual's coeficients, evaluated on input data points. Smaller value means bigger fitness */ __global__ void fitness_evaluate(float *individuals, float *points, float *fitness) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= POPULATION_SIZE) return; float sumError = 0.0f; //for every given data point for (int pt = 0; pt < N_POINTS; pt++) { float f_approx = 0.0f; //for every polynomial parameter: Ci * x^(order) for (int order = 0; order < INDIVIDUAL_LEN; order++) { f_approx += individuals[idx * INDIVIDUAL_LEN + order] * pow(points[pt], order); } sumError += pow(f_approx - points[N_POINTS + pt], 2); } //The lower value of fitness is, the better individual fits the model fitness[idx] = sumError; } /** Individual is set of coeficients c1-c4. For example: parent1 == [0 0 0 0] parent2 == [1 1 1 1] crosspoint(random between 1 and 3) = 2 then child1 = [0 0 1 1] child2 = [1 1 0 0] */ __global__ void crossover(float *population_dev, hiprandState_t *state) { int idx = blockDim.x * blockIdx.x + threadIdx.x; //Replace only second half of the population by new individuals //created by crossover from the first half of the population if ((idx >= POPULATION_SIZE) || (idx <= POPULATION_SIZE / 2)) return; //randomly select two fit parents for mating from the fittest half of the population hiprandState_t localState = state[idx]; int parent1_i = (hiprand(&localState) % (POPULATION_SIZE / 2)) * INDIVIDUAL_LEN; int parent2_i = (hiprand(&localState) % (POPULATION_SIZE / 2)) * INDIVIDUAL_LEN; //select crosspoint, do not select beginning and end of individual as crosspoint int crosspoint = hiprand(&localState) % (INDIVIDUAL_LEN - 2) + 1; state[idx] = localState; //move index to beginning of given individual in population matrix idx *= INDIVIDUAL_LEN; //do actual crossover for (int j = 0; j < crosspoint; j++) { population_dev[idx + j] = population_dev[parent1_i + j]; } for (int j = crosspoint; j < INDIVIDUAL_LEN; j++) { population_dev[idx + j] = population_dev[parent2_i + j]; } } /** Generates probabilities for mutation of individuals and their genes into arrays in device global memory */ static void generateMutProbab(float** mutIndivid, float **mutGene, hiprandGenerator_t generator) { //mutation rate of individuals hiprandGenerateNormal(generator, *mutIndivid, POPULATION_SIZE, mu_individuals, sigma_individuals); check_cuda_error("Error in normalGenerating 1"); //mutation rate of each gene hiprandGenerateNormal(generator, *mutGene, POPULATION_SIZE*INDIVIDUAL_LEN, mu_genes, sigma_genes); check_cuda_error("Error in normalGenerating 2"); } /** Mutation is addition of noise to genes, given mean and stddev. probabilities of mutating individuals and their genes is computed before calling this kernel @mutGene @mutIndivid For example(binary representation of genes): individual == [1 1 1 1] mutNumber = 2 loop 2 times: 1st: num_of_bit_to_mutate = 2 inverse individuals[2] -> [1 1 0 1] 2nd: num_of_bit_to_mutate = 0 inverse individuals[0] -> [0 1 0 1] return mutated individual [0 1 0 1] */ __global__ void mutation(float *individuals, hiprandState_t *state, float* mutIndivid, float* mutGene) { int idx = blockDim.x * blockIdx.x + threadIdx.x; //first individual is not mutated to keep the best solution unchanged if ((idx >= POPULATION_SIZE) || (idx < 1)) return; hiprandState_t localState = state[idx]; float mutationRate = mutIndivid[idx]; for (int j = 0; j < INDIVIDUAL_LEN; j++) { int flip_idx = idx * INDIVIDUAL_LEN + j; //probability of mutating gene if (mutGene[flip_idx] < mutationRate) { individuals[flip_idx] += 0.01f * (2 * hiprand_uniform(&localState) - 1); } } state[idx] = localState; } /** Sets up indexes for thrust::sort_by_key */ __global__ void setIndexes(int *indexes) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= POPULATION_SIZE) return; indexes[idx] = idx; } /* population - sorted individuals according to their fitness individuals with small (good) fitness value are put to the beginning individuals with large (bad) fitness value are placed at the end; */ __global__ void selection(float *population, float *newPopulation, int* indexes) { int idx = blockDim.x * blockIdx.x + threadIdx.x; //only first half needs to be placed in sorted manner //second half will be overwritten anyway if (idx > POPULATION_SIZE / 2) return; //reorder population so that fittest individuals are first for (int j = 0; j < INDIVIDUAL_LEN; j++) { newPopulation[idx * INDIVIDUAL_LEN + j] = population[indexes[idx] * INDIVIDUAL_LEN + j]; } } /** Initializes seed for CUDA random generator */ __global__ void initCurand(hiprandState_t *state) { int idx = blockDim.x * blockIdx.x + threadIdx.x; hiprand_init(1337, idx, 0, &state[idx]); } /** Initializes initial population by random values. Use range <-50.0, 50.0> Must provide greater state space (random num. interval), otherwise solution is found in first few steps (i.e. <-5,5> is found in very first iter.) */ __global__ void initPopulation(float *population, hiprandState_t *state) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= POPULATION_SIZE) return; hiprandState_t localState = state[idx]; for (int i = 0; i < INDIVIDUAL_LEN; i++) population[idx * INDIVIDUAL_LEN + i] = 10 * hiprand_uniform(&localState) - 5; state[idx] = localState; } //------------------------------------------------------------------------------ /* ------------------------ | Main body of the GA | ------------------------ */ int main(int argc, char **argv) { if (argc != 2) { cout << "Usage: " << argv[0] << " inputFile" << endl; return -1; } //read input data //points are the data to approximate by a polynomial float *points = readData(argv[1], N_POINTS); /** Allocations of memory */ //device memory for holding input points float *points_dev; hipMalloc(&points_dev, 2*N_POINTS*sizeof(float)); // [x, f(x)+err] check_cuda_error("Error allocating device memory"); hipMemcpy(points_dev, points, 2*N_POINTS*sizeof(float), hipMemcpyHostToDevice); check_cuda_error("Error copying data"); //arrays to hold old and new population float *population_dev; hipMalloc(&population_dev, POPULATION_SIZE * INDIVIDUAL_LEN * sizeof(float)); check_cuda_error("Error allocating device memory"); float *newPopulation_dev; hipMalloc(&newPopulation_dev, POPULATION_SIZE * INDIVIDUAL_LEN * sizeof(float)); hipMemset(newPopulation_dev, 0, POPULATION_SIZE * INDIVIDUAL_LEN * sizeof(float)); check_cuda_error("Error allocating device memory"); //arrays that keeps fitness of individuals withing current population float *fitness_dev; hipMalloc(&fitness_dev, POPULATION_SIZE*sizeof(float)); check_cuda_error("Error allocating device memory"); //key value for sorting int *indexes_dev; hipMalloc(&indexes_dev, POPULATION_SIZE*sizeof(int)); check_cuda_error("Error allocating device memory"); hiprandState_t *state_random; hipMalloc((void **)&state_random,POPULATION_SIZE * INDIVIDUAL_LEN * sizeof(hiprandState_t)); check_cuda_error("Allocating memory for hiprandState_t"); //mutation probabilities float* mutIndivid_d; hipMalloc((void **) &mutIndivid_d,POPULATION_SIZE*sizeof(float)); check_cuda_error("Allocating memory in mutIndivid_d"); float* mutGene_d; hipMalloc((void **)&mutGene_d,POPULATION_SIZE*INDIVIDUAL_LEN*sizeof(float)); check_cuda_error("Allocating memory in mutGene_d"); //create PRNG for generating mutation probabilities hiprandGenerator_t generator; hiprandCreateGenerator(&generator, HIPRAND_RNG_PSEUDO_DEFAULT); check_cuda_error("Error in hiprandCreateGenerator"); hiprandSetPseudoRandomGeneratorSeed(generator, 0); check_cuda_error("Error in curandSeed"); //recast device pointers into thrust copatible pointers thrust::device_ptr<int> indexes_thrust = thrust::device_pointer_cast(indexes_dev); thrust::device_ptr<float> fitnesses_thrust = thrust::device_pointer_cast(fitness_dev); //Initialize first population (with zeros or some random values) hipLaunchKernelGGL(( initCurand), dim3(BLOCK), dim3(THREAD), 0, 0, state_random); hipLaunchKernelGGL(( initPopulation), dim3(BLOCK), dim3(THREAD), 0, 0, population_dev, state_random); //<-5, 5> /** Main GA loop */ int t1 = clock(); //start timer int generationNumber = 0; int noChangeIter = 0; float bestFitness = INFINITY; float previousBestFitness = INFINITY; while ( (generationNumber < maxGenerationNumber) /*&& (bestFitness > targetErr) && (noChangeIter < maxConstIter) */) { generationNumber++; /** crossover first half of the population and create new population */ hipLaunchKernelGGL(( crossover), dim3(BLOCK), dim3(THREAD), 0, 0, population_dev, state_random); hipDeviceSynchronize(); /** mutate population and childrens in the whole population*/ generateMutProbab(&mutIndivid_d, &mutGene_d, generator); hipDeviceSynchronize(); hipLaunchKernelGGL(( mutation), dim3(BLOCK), dim3(THREAD), 0, 0, population_dev, state_random, mutIndivid_d, mutGene_d); hipDeviceSynchronize(); /** evaluate fitness of individuals in population */ hipLaunchKernelGGL(( fitness_evaluate), dim3(BLOCK), dim3(THREAD), 0, 0, population_dev, points_dev, fitness_dev); hipDeviceSynchronize(); /** select individuals for mating to create the next generation, i.e. sort population according to its fitness and keep fittest individuals first in population */ hipLaunchKernelGGL(( setIndexes), dim3(BLOCK), dim3(THREAD), 0, 0, indexes_dev); hipDeviceSynchronize(); thrust::stable_sort_by_key(fitnesses_thrust, fitnesses_thrust + POPULATION_SIZE, indexes_thrust); hipLaunchKernelGGL(( selection), dim3(BLOCK), dim3(THREAD), 0, 0, population_dev, newPopulation_dev, indexes_dev); hipDeviceSynchronize(); //swap populations float *tmp = population_dev; population_dev = newPopulation_dev; newPopulation_dev = tmp; /** time step evaluation - convergence criterion check */ //get BEST FITNESS to host hipMemcpy(&bestFitness, fitness_dev, sizeof(float), hipMemcpyDeviceToHost); check_cuda_error("Coping fitnesses_dev[0] to host"); //check if the fitness is decreasing or if we are stuck at local minima if(fabs(bestFitness - previousBestFitness) < 0.001f) noChangeIter++; else noChangeIter = 0; previousBestFitness = bestFitness; #if defined(DEBUG) //log message cout << "#" << generationNumber<< " Fitness: " << bestFitness << \ " Iterations without change: " << noChangeIter << endl; #endif } int t2 = clock(); //stop timer cout << "------------------------------------------------------------" << endl; cout << "Finished! Found Solution:" << endl; //get solution from device to host float *solution = new float[INDIVIDUAL_LEN]; hipMemcpy(solution, population_dev, INDIVIDUAL_LEN*sizeof(float), hipMemcpyDeviceToHost); check_cuda_error("Coping solution to host"); //solution is first individual of population with the best params of a polynomial cout << "\tc0 = " << solution[0] << endl << "\tc1 = " << solution[1] << endl << "\tc2 = " << solution[2] << endl << "\tc3 = " << solution[3] << endl << "Best fitness: " << bestFitness << endl << "Generations: " << generationNumber << endl; cout << "Time for GPU calculation equals \033[35m" << (t2-t1)/(double)CLOCKS_PER_SEC << " seconds\033[0m" << endl; delete [] points; delete [] solution; hipFree(points_dev);//input points hipFree(fitness_dev);//fitness array hipFree(indexes_dev);//key for sorting hipFree(population_dev); hipFree(newPopulation_dev); hipFree(state_random);//state hiprand hipFree(mutIndivid_d);//mutation probability hipFree(mutGene_d);//mutation probability hiprandDestroyGenerator(generator); return 0; } //------------------------------------------------------------------------------ static float *readData(const char *name, const int POINTS_CNT) { FILE *file = fopen(name, "r"); float *points = new float[2 * POINTS_CNT]; if (file) { //x, f(x) for (int k = 0; k < POINTS_CNT; k++) { if (fscanf(file, "%f %f", &points[k], &points[POINTS_CNT + k]) == EOF) { cerr << "Unexpected end of input data" << endl; exit(1); } } fclose(file); cout << "Reading file - success!" << endl; } else { cerr << "Error while opening the file " << name << "!!!" << endl; delete [] points; exit(1); } return points; } static void check_cuda_error(const char *message) { hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("\033[31mERROR: %s: %s\n\033[0m", message, hipGetErrorString(err)); exit(1); } }
06bbcd81c1b435562efe3331589f02486e7c23a0.cu
/** Genetic algorithm for finding function aproximation. GPU accelerated version Given data points {x, f(x)+noise} generated by noisy polynomial function f(x) = c4*x^3 + c3*x^2 + c2*x + c1, find unknown parameters c1, c2, c3 and c4. Inputs: • The set of points on a surface (500–1000); • The size of population P (1000–2000); • E_m , D_m – mean and variance for Mutation to generate the random number of mutated genes; • maxIter - the maximum number of generations, maxConstIter - the maximum number of generations with constant value of the best fitness. Outputs: • The time of processing on GPU; • The set of coefficients of the polynomial that approximates the given set of points; • The best fitness value; • The last generation number (number of evaluated iterations). */ #include <iostream> #include <cstdlib> #include <cstdio> #include <cmath> #include <time.h> #include <algorithm> #include <cuda.h> #include <curand.h> #include <curand_kernel.h> #include <thrust/sort.h> #include <thrust/device_ptr.h> #include "config.h" using namespace std; #define THREAD 128 #define BLOCK (POPULATION_SIZE / THREAD) // Reads input file with noisy points. Points will be approximated by // polynomial function using GA. static float *readData(const char *name, const int POINTS_CNT); // Gets last error and prints message when error is present static void check_cuda_error(const char *message); /** An individual fitness function is the difference between measured f(x) and approximated polynomial g(x), built using individual's coeficients, evaluated on input data points. Smaller value means bigger fitness */ __global__ void fitness_evaluate(float *individuals, float *points, float *fitness) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= POPULATION_SIZE) return; float sumError = 0.0f; //for every given data point for (int pt = 0; pt < N_POINTS; pt++) { float f_approx = 0.0f; //for every polynomial parameter: Ci * x^(order) for (int order = 0; order < INDIVIDUAL_LEN; order++) { f_approx += individuals[idx * INDIVIDUAL_LEN + order] * pow(points[pt], order); } sumError += pow(f_approx - points[N_POINTS + pt], 2); } //The lower value of fitness is, the better individual fits the model fitness[idx] = sumError; } /** Individual is set of coeficients c1-c4. For example: parent1 == [0 0 0 0] parent2 == [1 1 1 1] crosspoint(random between 1 and 3) = 2 then child1 = [0 0 1 1] child2 = [1 1 0 0] */ __global__ void crossover(float *population_dev, curandState *state) { int idx = blockDim.x * blockIdx.x + threadIdx.x; //Replace only second half of the population by new individuals //created by crossover from the first half of the population if ((idx >= POPULATION_SIZE) || (idx <= POPULATION_SIZE / 2)) return; //randomly select two fit parents for mating from the fittest half of the population curandState localState = state[idx]; int parent1_i = (curand(&localState) % (POPULATION_SIZE / 2)) * INDIVIDUAL_LEN; int parent2_i = (curand(&localState) % (POPULATION_SIZE / 2)) * INDIVIDUAL_LEN; //select crosspoint, do not select beginning and end of individual as crosspoint int crosspoint = curand(&localState) % (INDIVIDUAL_LEN - 2) + 1; state[idx] = localState; //move index to beginning of given individual in population matrix idx *= INDIVIDUAL_LEN; //do actual crossover for (int j = 0; j < crosspoint; j++) { population_dev[idx + j] = population_dev[parent1_i + j]; } for (int j = crosspoint; j < INDIVIDUAL_LEN; j++) { population_dev[idx + j] = population_dev[parent2_i + j]; } } /** Generates probabilities for mutation of individuals and their genes into arrays in device global memory */ static void generateMutProbab(float** mutIndivid, float **mutGene, curandGenerator_t generator) { //mutation rate of individuals curandGenerateNormal(generator, *mutIndivid, POPULATION_SIZE, mu_individuals, sigma_individuals); check_cuda_error("Error in normalGenerating 1"); //mutation rate of each gene curandGenerateNormal(generator, *mutGene, POPULATION_SIZE*INDIVIDUAL_LEN, mu_genes, sigma_genes); check_cuda_error("Error in normalGenerating 2"); } /** Mutation is addition of noise to genes, given mean and stddev. probabilities of mutating individuals and their genes is computed before calling this kernel @mutGene @mutIndivid For example(binary representation of genes): individual == [1 1 1 1] mutNumber = 2 loop 2 times: 1st: num_of_bit_to_mutate = 2 inverse individuals[2] -> [1 1 0 1] 2nd: num_of_bit_to_mutate = 0 inverse individuals[0] -> [0 1 0 1] return mutated individual [0 1 0 1] */ __global__ void mutation(float *individuals, curandState *state, float* mutIndivid, float* mutGene) { int idx = blockDim.x * blockIdx.x + threadIdx.x; //first individual is not mutated to keep the best solution unchanged if ((idx >= POPULATION_SIZE) || (idx < 1)) return; curandState localState = state[idx]; float mutationRate = mutIndivid[idx]; for (int j = 0; j < INDIVIDUAL_LEN; j++) { int flip_idx = idx * INDIVIDUAL_LEN + j; //probability of mutating gene if (mutGene[flip_idx] < mutationRate) { individuals[flip_idx] += 0.01f * (2 * curand_uniform(&localState) - 1); } } state[idx] = localState; } /** Sets up indexes for thrust::sort_by_key */ __global__ void setIndexes(int *indexes) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= POPULATION_SIZE) return; indexes[idx] = idx; } /* population - sorted individuals according to their fitness individuals with small (good) fitness value are put to the beginning individuals with large (bad) fitness value are placed at the end; */ __global__ void selection(float *population, float *newPopulation, int* indexes) { int idx = blockDim.x * blockIdx.x + threadIdx.x; //only first half needs to be placed in sorted manner //second half will be overwritten anyway if (idx > POPULATION_SIZE / 2) return; //reorder population so that fittest individuals are first for (int j = 0; j < INDIVIDUAL_LEN; j++) { newPopulation[idx * INDIVIDUAL_LEN + j] = population[indexes[idx] * INDIVIDUAL_LEN + j]; } } /** Initializes seed for CUDA random generator */ __global__ void initCurand(curandState *state) { int idx = blockDim.x * blockIdx.x + threadIdx.x; curand_init(1337, idx, 0, &state[idx]); } /** Initializes initial population by random values. Use range <-50.0, 50.0> Must provide greater state space (random num. interval), otherwise solution is found in first few steps (i.e. <-5,5> is found in very first iter.) */ __global__ void initPopulation(float *population, curandState *state) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= POPULATION_SIZE) return; curandState localState = state[idx]; for (int i = 0; i < INDIVIDUAL_LEN; i++) population[idx * INDIVIDUAL_LEN + i] = 10 * curand_uniform(&localState) - 5; state[idx] = localState; } //------------------------------------------------------------------------------ /* ------------------------ | Main body of the GA | ------------------------ */ int main(int argc, char **argv) { if (argc != 2) { cout << "Usage: " << argv[0] << " inputFile" << endl; return -1; } //read input data //points are the data to approximate by a polynomial float *points = readData(argv[1], N_POINTS); /** Allocations of memory */ //device memory for holding input points float *points_dev; cudaMalloc(&points_dev, 2*N_POINTS*sizeof(float)); // [x, f(x)+err] check_cuda_error("Error allocating device memory"); cudaMemcpy(points_dev, points, 2*N_POINTS*sizeof(float), cudaMemcpyHostToDevice); check_cuda_error("Error copying data"); //arrays to hold old and new population float *population_dev; cudaMalloc(&population_dev, POPULATION_SIZE * INDIVIDUAL_LEN * sizeof(float)); check_cuda_error("Error allocating device memory"); float *newPopulation_dev; cudaMalloc(&newPopulation_dev, POPULATION_SIZE * INDIVIDUAL_LEN * sizeof(float)); cudaMemset(newPopulation_dev, 0, POPULATION_SIZE * INDIVIDUAL_LEN * sizeof(float)); check_cuda_error("Error allocating device memory"); //arrays that keeps fitness of individuals withing current population float *fitness_dev; cudaMalloc(&fitness_dev, POPULATION_SIZE*sizeof(float)); check_cuda_error("Error allocating device memory"); //key value for sorting int *indexes_dev; cudaMalloc(&indexes_dev, POPULATION_SIZE*sizeof(int)); check_cuda_error("Error allocating device memory"); curandState *state_random; cudaMalloc((void **)&state_random,POPULATION_SIZE * INDIVIDUAL_LEN * sizeof(curandState)); check_cuda_error("Allocating memory for curandState"); //mutation probabilities float* mutIndivid_d; cudaMalloc((void **) &mutIndivid_d,POPULATION_SIZE*sizeof(float)); check_cuda_error("Allocating memory in mutIndivid_d"); float* mutGene_d; cudaMalloc((void **)&mutGene_d,POPULATION_SIZE*INDIVIDUAL_LEN*sizeof(float)); check_cuda_error("Allocating memory in mutGene_d"); //create PRNG for generating mutation probabilities curandGenerator_t generator; curandCreateGenerator(&generator, CURAND_RNG_PSEUDO_DEFAULT); check_cuda_error("Error in curandCreateGenerator"); curandSetPseudoRandomGeneratorSeed(generator, 0); check_cuda_error("Error in curandSeed"); //recast device pointers into thrust copatible pointers thrust::device_ptr<int> indexes_thrust = thrust::device_pointer_cast(indexes_dev); thrust::device_ptr<float> fitnesses_thrust = thrust::device_pointer_cast(fitness_dev); //Initialize first population (with zeros or some random values) initCurand<<<BLOCK, THREAD>>>(state_random); initPopulation<<<BLOCK, THREAD>>>(population_dev, state_random); //<-5, 5> /** Main GA loop */ int t1 = clock(); //start timer int generationNumber = 0; int noChangeIter = 0; float bestFitness = INFINITY; float previousBestFitness = INFINITY; while ( (generationNumber < maxGenerationNumber) /*&& (bestFitness > targetErr) && (noChangeIter < maxConstIter) */) { generationNumber++; /** crossover first half of the population and create new population */ crossover<<<BLOCK, THREAD>>>(population_dev, state_random); cudaDeviceSynchronize(); /** mutate population and childrens in the whole population*/ generateMutProbab(&mutIndivid_d, &mutGene_d, generator); cudaDeviceSynchronize(); mutation<<<BLOCK, THREAD>>>(population_dev, state_random, mutIndivid_d, mutGene_d); cudaDeviceSynchronize(); /** evaluate fitness of individuals in population */ fitness_evaluate<<<BLOCK, THREAD>>>(population_dev, points_dev, fitness_dev); cudaDeviceSynchronize(); /** select individuals for mating to create the next generation, i.e. sort population according to its fitness and keep fittest individuals first in population */ setIndexes<<<BLOCK, THREAD>>>(indexes_dev); cudaDeviceSynchronize(); thrust::stable_sort_by_key(fitnesses_thrust, fitnesses_thrust + POPULATION_SIZE, indexes_thrust); selection<<<BLOCK, THREAD>>>(population_dev, newPopulation_dev, indexes_dev); cudaDeviceSynchronize(); //swap populations float *tmp = population_dev; population_dev = newPopulation_dev; newPopulation_dev = tmp; /** time step evaluation - convergence criterion check */ //get BEST FITNESS to host cudaMemcpy(&bestFitness, fitness_dev, sizeof(float), cudaMemcpyDeviceToHost); check_cuda_error("Coping fitnesses_dev[0] to host"); //check if the fitness is decreasing or if we are stuck at local minima if(fabs(bestFitness - previousBestFitness) < 0.001f) noChangeIter++; else noChangeIter = 0; previousBestFitness = bestFitness; #if defined(DEBUG) //log message cout << "#" << generationNumber<< " Fitness: " << bestFitness << \ " Iterations without change: " << noChangeIter << endl; #endif } int t2 = clock(); //stop timer cout << "------------------------------------------------------------" << endl; cout << "Finished! Found Solution:" << endl; //get solution from device to host float *solution = new float[INDIVIDUAL_LEN]; cudaMemcpy(solution, population_dev, INDIVIDUAL_LEN*sizeof(float), cudaMemcpyDeviceToHost); check_cuda_error("Coping solution to host"); //solution is first individual of population with the best params of a polynomial cout << "\tc0 = " << solution[0] << endl << "\tc1 = " << solution[1] << endl << "\tc2 = " << solution[2] << endl << "\tc3 = " << solution[3] << endl << "Best fitness: " << bestFitness << endl << "Generations: " << generationNumber << endl; cout << "Time for GPU calculation equals \033[35m" << (t2-t1)/(double)CLOCKS_PER_SEC << " seconds\033[0m" << endl; delete [] points; delete [] solution; cudaFree(points_dev);//input points cudaFree(fitness_dev);//fitness array cudaFree(indexes_dev);//key for sorting cudaFree(population_dev); cudaFree(newPopulation_dev); cudaFree(state_random);//state curand cudaFree(mutIndivid_d);//mutation probability cudaFree(mutGene_d);//mutation probability curandDestroyGenerator(generator); return 0; } //------------------------------------------------------------------------------ static float *readData(const char *name, const int POINTS_CNT) { FILE *file = fopen(name, "r"); float *points = new float[2 * POINTS_CNT]; if (file) { //x, f(x) for (int k = 0; k < POINTS_CNT; k++) { if (fscanf(file, "%f %f", &points[k], &points[POINTS_CNT + k]) == EOF) { cerr << "Unexpected end of input data" << endl; exit(1); } } fclose(file); cout << "Reading file - success!" << endl; } else { cerr << "Error while opening the file " << name << "!!!" << endl; delete [] points; exit(1); } return points; } static void check_cuda_error(const char *message) { cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("\033[31mERROR: %s: %s\n\033[0m", message, cudaGetErrorString(err)); exit(1); } }
e7366b9f28b558a0f96062cdeae7a40fa2b69f5a.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <list> using namespace std; hipError_t addWithCuda(int* c, const int* a, const int* b, unsigned int size); __global__ void addKernel(int* c, const int* a, const int* b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } // __global__ functions, or "kernels", execute on the device __global__ void hello_kernel(void) { printf("Hello, prime number will be generated from the gpu device!\n"); int low = 0, high = 10, i; bool isPrime = true; } int main(void) { const int low = 0; const int high = 10; int c[high] = { 0 }; // greet from the host printf("Hello, starting the host!\n"); // launch a kernel with a single thread to greet from the device hello_kernel << <1, 1 >> > (); // wait for the device to finish so that we see the message hipDeviceSynchronize(); const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; // Add vectors in parallel. hipError_t cudaStatus = addWithCuda(c, a, b, arraySize); if (cudaStatus != hipSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. hipError_t addWithCuda(int* c, const int* a, const int* b, unsigned int size) { int* dev_a = 0; int* dev_b = 0; int* dev_c = 0; hipError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. addKernel << <1, size >> > (dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } Error: hipFree(dev_c); hipFree(dev_a); hipFree(dev_b); return cudaStatus; }
e7366b9f28b558a0f96062cdeae7a40fa2b69f5a.cu
#include <stdio.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <list> using namespace std; cudaError_t addWithCuda(int* c, const int* a, const int* b, unsigned int size); __global__ void addKernel(int* c, const int* a, const int* b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } // __global__ functions, or "kernels", execute on the device __global__ void hello_kernel(void) { printf("Hello, prime number will be generated from the gpu device!\n"); int low = 0, high = 10, i; bool isPrime = true; } int main(void) { const int low = 0; const int high = 10; int c[high] = { 0 }; // greet from the host printf("Hello, starting the host!\n"); // launch a kernel with a single thread to greet from the device hello_kernel << <1, 1 >> > (); // wait for the device to finish so that we see the message cudaDeviceSynchronize(); const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; // Add vectors in parallel. cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. cudaError_t addWithCuda(int* c, const int* a, const int* b, unsigned int size) { int* dev_a = 0; int* dev_b = 0; int* dev_c = 0; cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. addKernel << <1, size >> > (dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(dev_c); cudaFree(dev_a); cudaFree(dev_b); return cudaStatus; }
abb5c891649a893dadf77f31d9e23adfdc9673a0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2009-2023 The Regents of the University of Michigan. // Part of HOOMD-blue, released under the BSD 3-Clause License. #include "ParticleData.cuh" /*! \file ParticleData.cu \brief ImplementsGPU kernel code and data structure functions used by ParticleData */ #ifdef ENABLE_MPI #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wconversion" #include <hipcub/hipcub.hpp> #include <thrust/device_ptr.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/scatter.h> #pragma GCC diagnostic pop namespace hoomd { namespace kernel { //! Kernel to partition particle data __global__ void gpu_scatter_particle_data_kernel(const unsigned int nwork, const Scalar4* d_pos, const Scalar4* d_vel, const Scalar3* d_accel, const Scalar* d_charge, const Scalar* d_diameter, const int3* d_image, const unsigned int* d_body, const Scalar4* d_orientation, const Scalar4* d_angmom, const Scalar3* d_inertia, const Scalar4* d_net_force, const Scalar4* d_net_torque, const Scalar* d_net_virial, unsigned int net_virial_pitch, const unsigned int* d_tag, unsigned int* d_rtag, Scalar4* d_pos_alt, Scalar4* d_vel_alt, Scalar3* d_accel_alt, Scalar* d_charge_alt, Scalar* d_diameter_alt, int3* d_image_alt, unsigned int* d_body_alt, Scalar4* d_orientation_alt, Scalar4* d_angmom_alt, Scalar3* d_inertia_alt, Scalar4* d_net_force_alt, Scalar4* d_net_torque_alt, Scalar* d_net_virial_alt, unsigned int* d_tag_alt, detail::pdata_element* d_out, unsigned int* d_comm_flags, unsigned int* d_comm_flags_out, const unsigned int* d_scan, const unsigned int offset) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= nwork) return; idx += offset; bool remove = d_comm_flags[idx]; unsigned int scan_remove = d_scan[idx]; unsigned int scan_keep = idx - scan_remove; if (remove) { detail::pdata_element p; p.pos = d_pos[idx]; p.vel = d_vel[idx]; p.accel = d_accel[idx]; p.charge = d_charge[idx]; p.diameter = d_diameter[idx]; p.image = d_image[idx]; p.body = d_body[idx]; p.orientation = d_orientation[idx]; p.angmom = d_angmom[idx]; p.inertia = d_inertia[idx]; p.net_force = d_net_force[idx]; p.net_torque = d_net_torque[idx]; for (unsigned int j = 0; j < 6; ++j) p.net_virial[j] = d_net_virial[j * net_virial_pitch + idx]; p.tag = d_tag[idx]; d_out[scan_remove] = p; d_comm_flags_out[scan_remove] = d_comm_flags[idx]; // reset communication flags d_comm_flags[idx] = 0; // reset rtag d_rtag[p.tag] = NOT_LOCAL; } else { d_pos_alt[scan_keep] = d_pos[idx]; d_vel_alt[scan_keep] = d_vel[idx]; d_accel_alt[scan_keep] = d_accel[idx]; d_charge_alt[scan_keep] = d_charge[idx]; d_diameter_alt[scan_keep] = d_diameter[idx]; d_image_alt[scan_keep] = d_image[idx]; d_body_alt[scan_keep] = d_body[idx]; d_orientation_alt[scan_keep] = d_orientation[idx]; d_angmom_alt[scan_keep] = d_angmom[idx]; d_inertia_alt[scan_keep] = d_inertia[idx]; d_net_force_alt[scan_keep] = d_net_force[idx]; d_net_torque_alt[scan_keep] = d_net_torque[idx]; for (unsigned int j = 0; j < 6; ++j) d_net_virial_alt[j * net_virial_pitch + scan_keep] = d_net_virial[j * net_virial_pitch + idx]; unsigned int tag = d_tag[idx]; d_tag_alt[scan_keep] = tag; // update rtag d_rtag[tag] = scan_keep; } } __global__ void gpu_select_sent_particles(unsigned int N, unsigned int* d_comm_flags, unsigned int* d_tmp) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= N) return; d_tmp[idx] = d_comm_flags[idx] ? 1 : 0; } /*! \param N Number of local particles \param d_pos Device array of particle positions \param d_vel Device array of particle velocities \param d_accel Device array of particle accelerations \param d_charge Device array of particle charges \param d_diameter Device array of particle diameters \param d_image Device array of particle images \param d_body Device array of particle body tags \param d_orientation Device array of particle orientations \param d_angmom Device array of particle angular momenta \param d_inertia Device array of particle moments of inertia \param d_net_force Net force \param d_net_torque Net torque \param d_net_virial Net virial \param net_virial_pitch Pitch of net virial array \param d_tag Device array of particle tags \param d_rtag Device array for reverse-lookup table \param d_pos_alt Device array of particle positions (output) \param d_vel_alt Device array of particle velocities (output) \param d_accel_alt Device array of particle accelerations (output) \param d_charge_alt Device array of particle charges (output) \param d_diameter_alt Device array of particle diameters (output) \param d_image_alt Device array of particle images (output) \param d_body_alt Device array of particle body tags (output) \param d_orientation_alt Device array of particle orientations (output) \param d_angmom_alt Device array of particle angular momenta (output) \param d_inertia Device array of particle moments of inertia (output) \param d_net_force Net force (output) \param d_net_torque Net torque (output) \param d_net_virial Net virial (output) \param d_out Output array for packed particle data \param max_n_out Maximum number of elements to write to output array \returns Number of elements marked for removal */ unsigned int gpu_pdata_remove(const unsigned int N, const Scalar4* d_pos, const Scalar4* d_vel, const Scalar3* d_accel, const Scalar* d_charge, const Scalar* d_diameter, const int3* d_image, const unsigned int* d_body, const Scalar4* d_orientation, const Scalar4* d_angmom, const Scalar3* d_inertia, const Scalar4* d_net_force, const Scalar4* d_net_torque, const Scalar* d_net_virial, unsigned int net_virial_pitch, const unsigned int* d_tag, unsigned int* d_rtag, Scalar4* d_pos_alt, Scalar4* d_vel_alt, Scalar3* d_accel_alt, Scalar* d_charge_alt, Scalar* d_diameter_alt, int3* d_image_alt, unsigned int* d_body_alt, Scalar4* d_orientation_alt, Scalar4* d_angmom_alt, Scalar3* d_inertia_alt, Scalar4* d_net_force_alt, Scalar4* d_net_torque_alt, Scalar* d_net_virial_alt, unsigned int* d_tag_alt, detail::pdata_element* d_out, unsigned int* d_comm_flags, unsigned int* d_comm_flags_out, unsigned int max_n_out, unsigned int* d_tmp, CachedAllocator& alloc, GPUPartition& gpu_partition) { if (!N) return 0; assert(d_pos); assert(d_vel); assert(d_accel); assert(d_charge); assert(d_diameter); assert(d_image); assert(d_body); assert(d_orientation); assert(d_angmom); assert(d_inertia); assert(d_net_force); assert(d_net_torque); assert(d_net_virial); assert(d_tag); assert(d_rtag); assert(d_pos_alt); assert(d_vel_alt); assert(d_accel_alt); assert(d_charge_alt); assert(d_diameter_alt); assert(d_image_alt); assert(d_body_alt); assert(d_orientation_alt); assert(d_angmom_alt); assert(d_inertia_alt); assert(d_net_force_alt); assert(d_net_torque_alt); assert(d_net_virial_alt); assert(d_tag_alt); assert(d_out); assert(d_comm_flags); assert(d_comm_flags_out); assert(d_tmp); unsigned int n_out; // partition particle data into local and removed particles unsigned int block_size = 256; unsigned int n_blocks = N / block_size + 1; // select nonzero communication flags hipLaunchKernelGGL(gpu_select_sent_particles, dim3(n_blocks), dim3(block_size), 0, 0, N, d_comm_flags, d_tmp); // perform a scan over the array of ones and zeroes void* d_temp_storage = NULL; size_t temp_storage_bytes = 0; // determine size of temporary storage unsigned int* d_scan = alloc.getTemporaryBuffer<unsigned int>(N); assert(d_scan); hipcub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_tmp, d_scan, N); d_temp_storage = alloc.getTemporaryBuffer<char>(temp_storage_bytes); hipcub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_tmp, d_scan, N); alloc.deallocate((char*)d_temp_storage); // determine total number of sent particles d_temp_storage = NULL; temp_storage_bytes = 0; unsigned int* d_n_out = (unsigned int*)alloc.getTemporaryBuffer<unsigned int>(1); assert(d_n_out); hipcub::DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_tmp, d_n_out, N); d_temp_storage = alloc.allocate(temp_storage_bytes); hipcub::DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_tmp, d_n_out, N); alloc.deallocate((char*)d_temp_storage); hipMemcpy(&n_out, d_n_out, sizeof(unsigned int), hipMemcpyDeviceToHost); alloc.deallocate((char*)d_n_out); // Don't write past end of buffer if (n_out <= max_n_out) { // partition particle data into local and removed particles for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev) { auto range = gpu_partition.getRangeAndSetGPU(idev); unsigned int nwork = range.second - range.first; unsigned int offset = range.first; unsigned int block_size = 256; unsigned int n_blocks = nwork / block_size + 1; hipLaunchKernelGGL(gpu_scatter_particle_data_kernel, dim3(n_blocks), dim3(block_size), 0, 0, nwork, d_pos, d_vel, d_accel, d_charge, d_diameter, d_image, d_body, d_orientation, d_angmom, d_inertia, d_net_force, d_net_torque, d_net_virial, net_virial_pitch, d_tag, d_rtag, d_pos_alt, d_vel_alt, d_accel_alt, d_charge_alt, d_diameter_alt, d_image_alt, d_body_alt, d_orientation_alt, d_angmom_alt, d_inertia_alt, d_net_force_alt, d_net_torque_alt, d_net_virial_alt, d_tag_alt, d_out, d_comm_flags, d_comm_flags_out, d_scan, offset); } } // free temp buf alloc.deallocate((char*)d_scan); // return elements written to output stream return n_out; } __global__ void gpu_pdata_add_particles_kernel(unsigned int old_nparticles, unsigned int num_add_ptls, Scalar4* d_pos, Scalar4* d_vel, Scalar3* d_accel, Scalar* d_charge, Scalar* d_diameter, int3* d_image, unsigned int* d_body, Scalar4* d_orientation, Scalar4* d_angmom, Scalar3* d_inertia, Scalar4* d_net_force, Scalar4* d_net_torque, Scalar* d_net_virial, unsigned int net_virial_pitch, unsigned int* d_tag, unsigned int* d_rtag, const detail::pdata_element* d_in, unsigned int* d_comm_flags) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= num_add_ptls) return; detail::pdata_element p = d_in[idx]; unsigned int add_idx = old_nparticles + idx; d_pos[add_idx] = p.pos; d_vel[add_idx] = p.vel; d_accel[add_idx] = p.accel; d_charge[add_idx] = p.charge; d_diameter[add_idx] = p.diameter; d_image[add_idx] = p.image; d_body[add_idx] = p.body; d_orientation[add_idx] = p.orientation; d_angmom[add_idx] = p.angmom; d_inertia[add_idx] = p.inertia; d_net_force[add_idx] = p.net_force; d_net_torque[add_idx] = p.net_torque; for (unsigned int j = 0; j < 6; ++j) d_net_virial[j * net_virial_pitch + add_idx] = p.net_virial[j]; d_tag[add_idx] = p.tag; d_rtag[p.tag] = add_idx; d_comm_flags[add_idx] = 0; } /*! \param old_nparticles old local particle count \param num_add_ptls Number of particles in input array \param d_pos Device array of particle positions \param d_vel Device iarray of particle velocities \param d_accel Device array of particle accelerations \param d_charge Device array of particle charges \param d_diameter Device array of particle diameters \param d_image Device array of particle images \param d_body Device array of particle body tags \param d_orientation Device array of particle orientations \param d_angmom Device array of particle angular momenta \param d_inertia Device array of particle moments of inertia \param d_net_force Net force \param d_net_torque Net torque \param d_net_virial Net virial \param d_tag Device array of particle tags \param d_rtag Device array for reverse-lookup table \param d_in Device array of packed input particle data \param d_comm_flags Device array of communication flags (pdata) */ void gpu_pdata_add_particles(const unsigned int old_nparticles, const unsigned int num_add_ptls, Scalar4* d_pos, Scalar4* d_vel, Scalar3* d_accel, Scalar* d_charge, Scalar* d_diameter, int3* d_image, unsigned int* d_body, Scalar4* d_orientation, Scalar4* d_angmom, Scalar3* d_inertia, Scalar4* d_net_force, Scalar4* d_net_torque, Scalar* d_net_virial, unsigned int net_virial_pitch, unsigned int* d_tag, unsigned int* d_rtag, const detail::pdata_element* d_in, unsigned int* d_comm_flags) { assert(d_pos); assert(d_vel); assert(d_accel); assert(d_charge); assert(d_diameter); assert(d_image); assert(d_body); assert(d_orientation); assert(d_angmom); assert(d_inertia); assert(d_net_force); assert(d_net_torque); assert(d_net_virial); assert(d_tag); assert(d_rtag); assert(d_in); unsigned int block_size = 256; unsigned int n_blocks = num_add_ptls / block_size + 1; hipLaunchKernelGGL(gpu_pdata_add_particles_kernel, dim3(n_blocks), dim3(block_size), 0, 0, old_nparticles, num_add_ptls, d_pos, d_vel, d_accel, d_charge, d_diameter, d_image, d_body, d_orientation, d_angmom, d_inertia, d_net_force, d_net_torque, d_net_virial, net_virial_pitch, d_tag, d_rtag, d_in, d_comm_flags); } } // end namespace kernel } // end namespace hoomd #endif // ENABLE_MPI
abb5c891649a893dadf77f31d9e23adfdc9673a0.cu
// Copyright (c) 2009-2023 The Regents of the University of Michigan. // Part of HOOMD-blue, released under the BSD 3-Clause License. #include "ParticleData.cuh" /*! \file ParticleData.cu \brief ImplementsGPU kernel code and data structure functions used by ParticleData */ #ifdef ENABLE_MPI #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wconversion" #include <hipcub/hipcub.hpp> #include <thrust/device_ptr.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/scatter.h> #pragma GCC diagnostic pop namespace hoomd { namespace kernel { //! Kernel to partition particle data __global__ void gpu_scatter_particle_data_kernel(const unsigned int nwork, const Scalar4* d_pos, const Scalar4* d_vel, const Scalar3* d_accel, const Scalar* d_charge, const Scalar* d_diameter, const int3* d_image, const unsigned int* d_body, const Scalar4* d_orientation, const Scalar4* d_angmom, const Scalar3* d_inertia, const Scalar4* d_net_force, const Scalar4* d_net_torque, const Scalar* d_net_virial, unsigned int net_virial_pitch, const unsigned int* d_tag, unsigned int* d_rtag, Scalar4* d_pos_alt, Scalar4* d_vel_alt, Scalar3* d_accel_alt, Scalar* d_charge_alt, Scalar* d_diameter_alt, int3* d_image_alt, unsigned int* d_body_alt, Scalar4* d_orientation_alt, Scalar4* d_angmom_alt, Scalar3* d_inertia_alt, Scalar4* d_net_force_alt, Scalar4* d_net_torque_alt, Scalar* d_net_virial_alt, unsigned int* d_tag_alt, detail::pdata_element* d_out, unsigned int* d_comm_flags, unsigned int* d_comm_flags_out, const unsigned int* d_scan, const unsigned int offset) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= nwork) return; idx += offset; bool remove = d_comm_flags[idx]; unsigned int scan_remove = d_scan[idx]; unsigned int scan_keep = idx - scan_remove; if (remove) { detail::pdata_element p; p.pos = d_pos[idx]; p.vel = d_vel[idx]; p.accel = d_accel[idx]; p.charge = d_charge[idx]; p.diameter = d_diameter[idx]; p.image = d_image[idx]; p.body = d_body[idx]; p.orientation = d_orientation[idx]; p.angmom = d_angmom[idx]; p.inertia = d_inertia[idx]; p.net_force = d_net_force[idx]; p.net_torque = d_net_torque[idx]; for (unsigned int j = 0; j < 6; ++j) p.net_virial[j] = d_net_virial[j * net_virial_pitch + idx]; p.tag = d_tag[idx]; d_out[scan_remove] = p; d_comm_flags_out[scan_remove] = d_comm_flags[idx]; // reset communication flags d_comm_flags[idx] = 0; // reset rtag d_rtag[p.tag] = NOT_LOCAL; } else { d_pos_alt[scan_keep] = d_pos[idx]; d_vel_alt[scan_keep] = d_vel[idx]; d_accel_alt[scan_keep] = d_accel[idx]; d_charge_alt[scan_keep] = d_charge[idx]; d_diameter_alt[scan_keep] = d_diameter[idx]; d_image_alt[scan_keep] = d_image[idx]; d_body_alt[scan_keep] = d_body[idx]; d_orientation_alt[scan_keep] = d_orientation[idx]; d_angmom_alt[scan_keep] = d_angmom[idx]; d_inertia_alt[scan_keep] = d_inertia[idx]; d_net_force_alt[scan_keep] = d_net_force[idx]; d_net_torque_alt[scan_keep] = d_net_torque[idx]; for (unsigned int j = 0; j < 6; ++j) d_net_virial_alt[j * net_virial_pitch + scan_keep] = d_net_virial[j * net_virial_pitch + idx]; unsigned int tag = d_tag[idx]; d_tag_alt[scan_keep] = tag; // update rtag d_rtag[tag] = scan_keep; } } __global__ void gpu_select_sent_particles(unsigned int N, unsigned int* d_comm_flags, unsigned int* d_tmp) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= N) return; d_tmp[idx] = d_comm_flags[idx] ? 1 : 0; } /*! \param N Number of local particles \param d_pos Device array of particle positions \param d_vel Device array of particle velocities \param d_accel Device array of particle accelerations \param d_charge Device array of particle charges \param d_diameter Device array of particle diameters \param d_image Device array of particle images \param d_body Device array of particle body tags \param d_orientation Device array of particle orientations \param d_angmom Device array of particle angular momenta \param d_inertia Device array of particle moments of inertia \param d_net_force Net force \param d_net_torque Net torque \param d_net_virial Net virial \param net_virial_pitch Pitch of net virial array \param d_tag Device array of particle tags \param d_rtag Device array for reverse-lookup table \param d_pos_alt Device array of particle positions (output) \param d_vel_alt Device array of particle velocities (output) \param d_accel_alt Device array of particle accelerations (output) \param d_charge_alt Device array of particle charges (output) \param d_diameter_alt Device array of particle diameters (output) \param d_image_alt Device array of particle images (output) \param d_body_alt Device array of particle body tags (output) \param d_orientation_alt Device array of particle orientations (output) \param d_angmom_alt Device array of particle angular momenta (output) \param d_inertia Device array of particle moments of inertia (output) \param d_net_force Net force (output) \param d_net_torque Net torque (output) \param d_net_virial Net virial (output) \param d_out Output array for packed particle data \param max_n_out Maximum number of elements to write to output array \returns Number of elements marked for removal */ unsigned int gpu_pdata_remove(const unsigned int N, const Scalar4* d_pos, const Scalar4* d_vel, const Scalar3* d_accel, const Scalar* d_charge, const Scalar* d_diameter, const int3* d_image, const unsigned int* d_body, const Scalar4* d_orientation, const Scalar4* d_angmom, const Scalar3* d_inertia, const Scalar4* d_net_force, const Scalar4* d_net_torque, const Scalar* d_net_virial, unsigned int net_virial_pitch, const unsigned int* d_tag, unsigned int* d_rtag, Scalar4* d_pos_alt, Scalar4* d_vel_alt, Scalar3* d_accel_alt, Scalar* d_charge_alt, Scalar* d_diameter_alt, int3* d_image_alt, unsigned int* d_body_alt, Scalar4* d_orientation_alt, Scalar4* d_angmom_alt, Scalar3* d_inertia_alt, Scalar4* d_net_force_alt, Scalar4* d_net_torque_alt, Scalar* d_net_virial_alt, unsigned int* d_tag_alt, detail::pdata_element* d_out, unsigned int* d_comm_flags, unsigned int* d_comm_flags_out, unsigned int max_n_out, unsigned int* d_tmp, CachedAllocator& alloc, GPUPartition& gpu_partition) { if (!N) return 0; assert(d_pos); assert(d_vel); assert(d_accel); assert(d_charge); assert(d_diameter); assert(d_image); assert(d_body); assert(d_orientation); assert(d_angmom); assert(d_inertia); assert(d_net_force); assert(d_net_torque); assert(d_net_virial); assert(d_tag); assert(d_rtag); assert(d_pos_alt); assert(d_vel_alt); assert(d_accel_alt); assert(d_charge_alt); assert(d_diameter_alt); assert(d_image_alt); assert(d_body_alt); assert(d_orientation_alt); assert(d_angmom_alt); assert(d_inertia_alt); assert(d_net_force_alt); assert(d_net_torque_alt); assert(d_net_virial_alt); assert(d_tag_alt); assert(d_out); assert(d_comm_flags); assert(d_comm_flags_out); assert(d_tmp); unsigned int n_out; // partition particle data into local and removed particles unsigned int block_size = 256; unsigned int n_blocks = N / block_size + 1; // select nonzero communication flags hipLaunchKernelGGL(gpu_select_sent_particles, dim3(n_blocks), dim3(block_size), 0, 0, N, d_comm_flags, d_tmp); // perform a scan over the array of ones and zeroes void* d_temp_storage = NULL; size_t temp_storage_bytes = 0; // determine size of temporary storage unsigned int* d_scan = alloc.getTemporaryBuffer<unsigned int>(N); assert(d_scan); hipcub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_tmp, d_scan, N); d_temp_storage = alloc.getTemporaryBuffer<char>(temp_storage_bytes); hipcub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_tmp, d_scan, N); alloc.deallocate((char*)d_temp_storage); // determine total number of sent particles d_temp_storage = NULL; temp_storage_bytes = 0; unsigned int* d_n_out = (unsigned int*)alloc.getTemporaryBuffer<unsigned int>(1); assert(d_n_out); hipcub::DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_tmp, d_n_out, N); d_temp_storage = alloc.allocate(temp_storage_bytes); hipcub::DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_tmp, d_n_out, N); alloc.deallocate((char*)d_temp_storage); hipMemcpy(&n_out, d_n_out, sizeof(unsigned int), hipMemcpyDeviceToHost); alloc.deallocate((char*)d_n_out); // Don't write past end of buffer if (n_out <= max_n_out) { // partition particle data into local and removed particles for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev) { auto range = gpu_partition.getRangeAndSetGPU(idev); unsigned int nwork = range.second - range.first; unsigned int offset = range.first; unsigned int block_size = 256; unsigned int n_blocks = nwork / block_size + 1; hipLaunchKernelGGL(gpu_scatter_particle_data_kernel, dim3(n_blocks), dim3(block_size), 0, 0, nwork, d_pos, d_vel, d_accel, d_charge, d_diameter, d_image, d_body, d_orientation, d_angmom, d_inertia, d_net_force, d_net_torque, d_net_virial, net_virial_pitch, d_tag, d_rtag, d_pos_alt, d_vel_alt, d_accel_alt, d_charge_alt, d_diameter_alt, d_image_alt, d_body_alt, d_orientation_alt, d_angmom_alt, d_inertia_alt, d_net_force_alt, d_net_torque_alt, d_net_virial_alt, d_tag_alt, d_out, d_comm_flags, d_comm_flags_out, d_scan, offset); } } // free temp buf alloc.deallocate((char*)d_scan); // return elements written to output stream return n_out; } __global__ void gpu_pdata_add_particles_kernel(unsigned int old_nparticles, unsigned int num_add_ptls, Scalar4* d_pos, Scalar4* d_vel, Scalar3* d_accel, Scalar* d_charge, Scalar* d_diameter, int3* d_image, unsigned int* d_body, Scalar4* d_orientation, Scalar4* d_angmom, Scalar3* d_inertia, Scalar4* d_net_force, Scalar4* d_net_torque, Scalar* d_net_virial, unsigned int net_virial_pitch, unsigned int* d_tag, unsigned int* d_rtag, const detail::pdata_element* d_in, unsigned int* d_comm_flags) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= num_add_ptls) return; detail::pdata_element p = d_in[idx]; unsigned int add_idx = old_nparticles + idx; d_pos[add_idx] = p.pos; d_vel[add_idx] = p.vel; d_accel[add_idx] = p.accel; d_charge[add_idx] = p.charge; d_diameter[add_idx] = p.diameter; d_image[add_idx] = p.image; d_body[add_idx] = p.body; d_orientation[add_idx] = p.orientation; d_angmom[add_idx] = p.angmom; d_inertia[add_idx] = p.inertia; d_net_force[add_idx] = p.net_force; d_net_torque[add_idx] = p.net_torque; for (unsigned int j = 0; j < 6; ++j) d_net_virial[j * net_virial_pitch + add_idx] = p.net_virial[j]; d_tag[add_idx] = p.tag; d_rtag[p.tag] = add_idx; d_comm_flags[add_idx] = 0; } /*! \param old_nparticles old local particle count \param num_add_ptls Number of particles in input array \param d_pos Device array of particle positions \param d_vel Device iarray of particle velocities \param d_accel Device array of particle accelerations \param d_charge Device array of particle charges \param d_diameter Device array of particle diameters \param d_image Device array of particle images \param d_body Device array of particle body tags \param d_orientation Device array of particle orientations \param d_angmom Device array of particle angular momenta \param d_inertia Device array of particle moments of inertia \param d_net_force Net force \param d_net_torque Net torque \param d_net_virial Net virial \param d_tag Device array of particle tags \param d_rtag Device array for reverse-lookup table \param d_in Device array of packed input particle data \param d_comm_flags Device array of communication flags (pdata) */ void gpu_pdata_add_particles(const unsigned int old_nparticles, const unsigned int num_add_ptls, Scalar4* d_pos, Scalar4* d_vel, Scalar3* d_accel, Scalar* d_charge, Scalar* d_diameter, int3* d_image, unsigned int* d_body, Scalar4* d_orientation, Scalar4* d_angmom, Scalar3* d_inertia, Scalar4* d_net_force, Scalar4* d_net_torque, Scalar* d_net_virial, unsigned int net_virial_pitch, unsigned int* d_tag, unsigned int* d_rtag, const detail::pdata_element* d_in, unsigned int* d_comm_flags) { assert(d_pos); assert(d_vel); assert(d_accel); assert(d_charge); assert(d_diameter); assert(d_image); assert(d_body); assert(d_orientation); assert(d_angmom); assert(d_inertia); assert(d_net_force); assert(d_net_torque); assert(d_net_virial); assert(d_tag); assert(d_rtag); assert(d_in); unsigned int block_size = 256; unsigned int n_blocks = num_add_ptls / block_size + 1; hipLaunchKernelGGL(gpu_pdata_add_particles_kernel, dim3(n_blocks), dim3(block_size), 0, 0, old_nparticles, num_add_ptls, d_pos, d_vel, d_accel, d_charge, d_diameter, d_image, d_body, d_orientation, d_angmom, d_inertia, d_net_force, d_net_torque, d_net_virial, net_virial_pitch, d_tag, d_rtag, d_in, d_comm_flags); } } // end namespace kernel } // end namespace hoomd #endif // ENABLE_MPI
9d12e0a9ffc3132afb28aacfd662597f3444cde9.hip
// !!! This is a file automatically generated by hipify!!! #include <nvgraph_gdf.h> #include <thrust/device_vector.h> #include <ctime> #include "utilities/error_utils.h" //RMM: // #include <rmm_utils.h> template<typename T> using Vector = thrust::device_vector<T, rmm_allocator<T>>; gdf_error nvgraph2gdf_error(nvgraphStatus_t nvg_stat) { switch (nvg_stat) { case NVGRAPH_STATUS_SUCCESS: return GDF_SUCCESS; case NVGRAPH_STATUS_NOT_INITIALIZED: return GDF_INVALID_API_CALL; case NVGRAPH_STATUS_INVALID_VALUE: return GDF_INVALID_API_CALL; case NVGRAPH_STATUS_TYPE_NOT_SUPPORTED: return GDF_UNSUPPORTED_DTYPE; case NVGRAPH_STATUS_GRAPH_TYPE_NOT_SUPPORTED: return GDF_INVALID_API_CALL; default: return GDF_CUDA_ERROR; } } gdf_error nvgraph2gdf_error_verbose(nvgraphStatus_t nvg_stat) { switch (nvg_stat) { case NVGRAPH_STATUS_NOT_INITIALIZED: std::cerr << "nvGRAPH not initialized"; return GDF_CUDA_ERROR; case NVGRAPH_STATUS_ALLOC_FAILED: std::cerr << "nvGRAPH alloc failed"; return GDF_CUDA_ERROR; case NVGRAPH_STATUS_INVALID_VALUE: std::cerr << "nvGRAPH invalid value"; return GDF_CUDA_ERROR; case NVGRAPH_STATUS_ARCH_MISMATCH: std::cerr << "nvGRAPH arch mismatch"; return GDF_CUDA_ERROR; case NVGRAPH_STATUS_MAPPING_ERROR: std::cerr << "nvGRAPH mapping error"; return GDF_CUDA_ERROR; case NVGRAPH_STATUS_EXECUTION_FAILED: std::cerr << "nvGRAPH execution failed"; return GDF_CUDA_ERROR; case NVGRAPH_STATUS_INTERNAL_ERROR: std::cerr << "nvGRAPH internal error"; return GDF_CUDA_ERROR; case NVGRAPH_STATUS_TYPE_NOT_SUPPORTED: std::cerr << "nvGRAPH type not supported"; return GDF_CUDA_ERROR; case NVGRAPH_STATUS_NOT_CONVERGED: std::cerr << "nvGRAPH algorithm failed to converge"; return GDF_CUDA_ERROR; case NVGRAPH_STATUS_GRAPH_TYPE_NOT_SUPPORTED: std::cerr << "nvGRAPH graph type not supported"; return GDF_CUDA_ERROR; default: std::cerr << "Unknown nvGRAPH Status"; return GDF_CUDA_ERROR; } } #ifdef VERBOSE #define NVG_TRY(call) \ { \ if ((call)!=NVGRAPH_STATUS_SUCCESS) \ return nvgraph2gdf_error_verbose((call)); \ } #else #define NVG_TRY(call) \ { \ if ((call)!=NVGRAPH_STATUS_SUCCESS) \ return nvgraph2gdf_error((call)); \ } #endif gdf_error gdf_createGraph_nvgraph(nvgraphHandle_t nvg_handle, gdf_graph* gdf_G, nvgraphGraphDescr_t* nvgraph_G, bool use_transposed) { // check input GDF_REQUIRE(!((gdf_G->edgeList == nullptr) && (gdf_G->adjList == nullptr) && (gdf_G->transposedAdjList == nullptr)), GDF_INVALID_API_CALL); nvgraphTopologyType_t TT; ; hipDataType settype; // create an nvgraph graph handle NVG_TRY(nvgraphCreateGraphDescr(nvg_handle, nvgraph_G)); // setup nvgraph variables if (use_transposed) { // convert edgeList to transposedAdjList if (gdf_G->transposedAdjList == nullptr) { GDF_TRY(gdf_add_transpose(gdf_G)); } // using exiting transposedAdjList if it exisits and if adjList is missing TT = NVGRAPH_CSC_32; nvgraphCSCTopology32I_st topoData; topoData.nvertices = gdf_G->transposedAdjList->offsets->size -1; topoData.nedges = gdf_G->transposedAdjList->indices->size; topoData.destination_offsets = (int *) gdf_G->transposedAdjList->offsets->data; topoData.source_indices = (int *) gdf_G->transposedAdjList->indices->data; // attach the transposed adj list NVG_TRY(nvgraphAttachGraphStructure(nvg_handle, *nvgraph_G, (void *)&topoData, TT)); //attach edge values if (gdf_G->transposedAdjList->edge_data) { switch (gdf_G->transposedAdjList->edge_data->dtype) { case GDF_FLOAT32: settype = HIP_R_32F; NVG_TRY(nvgraphAttachEdgeData(nvg_handle, *nvgraph_G, 0, settype, (float *) gdf_G->transposedAdjList->edge_data->data)); case GDF_FLOAT64: settype = HIP_R_64F; NVG_TRY(nvgraphAttachEdgeData(nvg_handle, *nvgraph_G, 0, settype, (double *) gdf_G->transposedAdjList->edge_data->data)); default: return GDF_UNSUPPORTED_DTYPE; } } } else { // convert edgeList to adjList if (gdf_G->adjList == nullptr) { GDF_TRY(gdf_add_adj_list(gdf_G)); } TT = NVGRAPH_CSR_32; nvgraphCSRTopology32I_st topoData; topoData.nvertices = gdf_G->adjList->offsets->size -1; topoData.nedges = gdf_G->adjList->indices->size; topoData.source_offsets = (int *) gdf_G->adjList->offsets->data; topoData.destination_indices = (int *) gdf_G->adjList->indices->data; // attach adj list NVG_TRY(nvgraphAttachGraphStructure(nvg_handle, *nvgraph_G, (void *)&topoData, TT)); //attach edge values if (gdf_G->adjList->edge_data) { switch (gdf_G->adjList->edge_data->dtype) { case GDF_FLOAT32: settype = HIP_R_32F; NVG_TRY(nvgraphAttachEdgeData(nvg_handle, *nvgraph_G, 0, settype, (float *) gdf_G->adjList->edge_data->data)); case GDF_FLOAT64: settype = HIP_R_64F; NVG_TRY(nvgraphAttachEdgeData(nvg_handle, *nvgraph_G, 0, settype, (double *) gdf_G->adjList->edge_data->data)); default: return GDF_UNSUPPORTED_DTYPE; } } } return GDF_SUCCESS; } gdf_error gdf_sssp_nvgraph(gdf_graph *gdf_G, const int *source_vert, gdf_column *sssp_distances) { std::clock_t start; GDF_REQUIRE( gdf_G != nullptr , GDF_INVALID_API_CALL ); GDF_REQUIRE( *source_vert >= 0 , GDF_INVALID_API_CALL ); GDF_REQUIRE( *source_vert < sssp_distances->size , GDF_INVALID_API_CALL ); GDF_REQUIRE( sssp_distances != nullptr , GDF_INVALID_API_CALL ); GDF_REQUIRE( sssp_distances->data != nullptr , GDF_INVALID_API_CALL ); GDF_REQUIRE( !sssp_distances->valid , GDF_VALIDITY_UNSUPPORTED ); GDF_REQUIRE( sssp_distances->size > 0 , GDF_INVALID_API_CALL ); // init nvgraph // TODO : time this call nvgraphHandle_t nvg_handle = 0; nvgraphGraphDescr_t nvgraph_G = 0; hipDataType settype; start = std::clock(); NVG_TRY(nvgraphCreate(&nvg_handle)); GDF_TRY(gdf_createGraph_nvgraph(nvg_handle, gdf_G, &nvgraph_G, true)); std::cout << (std::clock() - start) / (double)(CLOCKS_PER_SEC / 1000) << ","; // in ms int sssp_index = 0; int weight_index = 0; Vector<float> d_val; //RMM: // hipStream_t stream{nullptr}; rmm_temp_allocator allocator(stream); start = std::clock(); if (gdf_G->transposedAdjList->edge_data == nullptr) { // use a fp32 vector [1,...,1] settype = HIP_R_32F; d_val.resize(gdf_G->transposedAdjList->indices->size); thrust::fill(thrust::hip::par(allocator).on(stream), d_val.begin(), d_val.end(), 1.0); NVG_TRY(nvgraphAttachEdgeData(nvg_handle, nvgraph_G, weight_index, settype, (void *) thrust::raw_pointer_cast(d_val.data()))); } else { switch (gdf_G->transposedAdjList->edge_data->dtype) { case GDF_FLOAT32: settype = HIP_R_32F; case GDF_FLOAT64: settype = HIP_R_64F; default: return GDF_UNSUPPORTED_DTYPE; } } NVG_TRY(nvgraphAttachVertexData(nvg_handle, nvgraph_G, 0, settype, sssp_distances->data )); std::cout << (std::clock() - start) / (double)(CLOCKS_PER_SEC / 1000) << ","; // in ms start = std::clock(); NVG_TRY(nvgraphSssp(nvg_handle, nvgraph_G, weight_index, source_vert, sssp_index)); std::cout << (std::clock() - start) / (double)(CLOCKS_PER_SEC / 1000) << ","; // in ms start = std::clock(); NVG_TRY(nvgraphDestroyGraphDescr(nvg_handle, nvgraph_G)); NVG_TRY(nvgraphDestroy(nvg_handle)); std::cout << (std::clock() - start) / (double)(CLOCKS_PER_SEC / 1000) <<std::endl; // in ms return GDF_SUCCESS; }
9d12e0a9ffc3132afb28aacfd662597f3444cde9.cu
#include <nvgraph_gdf.h> #include <thrust/device_vector.h> #include <ctime> #include "utilities/error_utils.h" //RMM: // #include <rmm_utils.h> template<typename T> using Vector = thrust::device_vector<T, rmm_allocator<T>>; gdf_error nvgraph2gdf_error(nvgraphStatus_t nvg_stat) { switch (nvg_stat) { case NVGRAPH_STATUS_SUCCESS: return GDF_SUCCESS; case NVGRAPH_STATUS_NOT_INITIALIZED: return GDF_INVALID_API_CALL; case NVGRAPH_STATUS_INVALID_VALUE: return GDF_INVALID_API_CALL; case NVGRAPH_STATUS_TYPE_NOT_SUPPORTED: return GDF_UNSUPPORTED_DTYPE; case NVGRAPH_STATUS_GRAPH_TYPE_NOT_SUPPORTED: return GDF_INVALID_API_CALL; default: return GDF_CUDA_ERROR; } } gdf_error nvgraph2gdf_error_verbose(nvgraphStatus_t nvg_stat) { switch (nvg_stat) { case NVGRAPH_STATUS_NOT_INITIALIZED: std::cerr << "nvGRAPH not initialized"; return GDF_CUDA_ERROR; case NVGRAPH_STATUS_ALLOC_FAILED: std::cerr << "nvGRAPH alloc failed"; return GDF_CUDA_ERROR; case NVGRAPH_STATUS_INVALID_VALUE: std::cerr << "nvGRAPH invalid value"; return GDF_CUDA_ERROR; case NVGRAPH_STATUS_ARCH_MISMATCH: std::cerr << "nvGRAPH arch mismatch"; return GDF_CUDA_ERROR; case NVGRAPH_STATUS_MAPPING_ERROR: std::cerr << "nvGRAPH mapping error"; return GDF_CUDA_ERROR; case NVGRAPH_STATUS_EXECUTION_FAILED: std::cerr << "nvGRAPH execution failed"; return GDF_CUDA_ERROR; case NVGRAPH_STATUS_INTERNAL_ERROR: std::cerr << "nvGRAPH internal error"; return GDF_CUDA_ERROR; case NVGRAPH_STATUS_TYPE_NOT_SUPPORTED: std::cerr << "nvGRAPH type not supported"; return GDF_CUDA_ERROR; case NVGRAPH_STATUS_NOT_CONVERGED: std::cerr << "nvGRAPH algorithm failed to converge"; return GDF_CUDA_ERROR; case NVGRAPH_STATUS_GRAPH_TYPE_NOT_SUPPORTED: std::cerr << "nvGRAPH graph type not supported"; return GDF_CUDA_ERROR; default: std::cerr << "Unknown nvGRAPH Status"; return GDF_CUDA_ERROR; } } #ifdef VERBOSE #define NVG_TRY(call) \ { \ if ((call)!=NVGRAPH_STATUS_SUCCESS) \ return nvgraph2gdf_error_verbose((call)); \ } #else #define NVG_TRY(call) \ { \ if ((call)!=NVGRAPH_STATUS_SUCCESS) \ return nvgraph2gdf_error((call)); \ } #endif gdf_error gdf_createGraph_nvgraph(nvgraphHandle_t nvg_handle, gdf_graph* gdf_G, nvgraphGraphDescr_t* nvgraph_G, bool use_transposed) { // check input GDF_REQUIRE(!((gdf_G->edgeList == nullptr) && (gdf_G->adjList == nullptr) && (gdf_G->transposedAdjList == nullptr)), GDF_INVALID_API_CALL); nvgraphTopologyType_t TT; ; cudaDataType_t settype; // create an nvgraph graph handle NVG_TRY(nvgraphCreateGraphDescr(nvg_handle, nvgraph_G)); // setup nvgraph variables if (use_transposed) { // convert edgeList to transposedAdjList if (gdf_G->transposedAdjList == nullptr) { GDF_TRY(gdf_add_transpose(gdf_G)); } // using exiting transposedAdjList if it exisits and if adjList is missing TT = NVGRAPH_CSC_32; nvgraphCSCTopology32I_st topoData; topoData.nvertices = gdf_G->transposedAdjList->offsets->size -1; topoData.nedges = gdf_G->transposedAdjList->indices->size; topoData.destination_offsets = (int *) gdf_G->transposedAdjList->offsets->data; topoData.source_indices = (int *) gdf_G->transposedAdjList->indices->data; // attach the transposed adj list NVG_TRY(nvgraphAttachGraphStructure(nvg_handle, *nvgraph_G, (void *)&topoData, TT)); //attach edge values if (gdf_G->transposedAdjList->edge_data) { switch (gdf_G->transposedAdjList->edge_data->dtype) { case GDF_FLOAT32: settype = CUDA_R_32F; NVG_TRY(nvgraphAttachEdgeData(nvg_handle, *nvgraph_G, 0, settype, (float *) gdf_G->transposedAdjList->edge_data->data)); case GDF_FLOAT64: settype = CUDA_R_64F; NVG_TRY(nvgraphAttachEdgeData(nvg_handle, *nvgraph_G, 0, settype, (double *) gdf_G->transposedAdjList->edge_data->data)); default: return GDF_UNSUPPORTED_DTYPE; } } } else { // convert edgeList to adjList if (gdf_G->adjList == nullptr) { GDF_TRY(gdf_add_adj_list(gdf_G)); } TT = NVGRAPH_CSR_32; nvgraphCSRTopology32I_st topoData; topoData.nvertices = gdf_G->adjList->offsets->size -1; topoData.nedges = gdf_G->adjList->indices->size; topoData.source_offsets = (int *) gdf_G->adjList->offsets->data; topoData.destination_indices = (int *) gdf_G->adjList->indices->data; // attach adj list NVG_TRY(nvgraphAttachGraphStructure(nvg_handle, *nvgraph_G, (void *)&topoData, TT)); //attach edge values if (gdf_G->adjList->edge_data) { switch (gdf_G->adjList->edge_data->dtype) { case GDF_FLOAT32: settype = CUDA_R_32F; NVG_TRY(nvgraphAttachEdgeData(nvg_handle, *nvgraph_G, 0, settype, (float *) gdf_G->adjList->edge_data->data)); case GDF_FLOAT64: settype = CUDA_R_64F; NVG_TRY(nvgraphAttachEdgeData(nvg_handle, *nvgraph_G, 0, settype, (double *) gdf_G->adjList->edge_data->data)); default: return GDF_UNSUPPORTED_DTYPE; } } } return GDF_SUCCESS; } gdf_error gdf_sssp_nvgraph(gdf_graph *gdf_G, const int *source_vert, gdf_column *sssp_distances) { std::clock_t start; GDF_REQUIRE( gdf_G != nullptr , GDF_INVALID_API_CALL ); GDF_REQUIRE( *source_vert >= 0 , GDF_INVALID_API_CALL ); GDF_REQUIRE( *source_vert < sssp_distances->size , GDF_INVALID_API_CALL ); GDF_REQUIRE( sssp_distances != nullptr , GDF_INVALID_API_CALL ); GDF_REQUIRE( sssp_distances->data != nullptr , GDF_INVALID_API_CALL ); GDF_REQUIRE( !sssp_distances->valid , GDF_VALIDITY_UNSUPPORTED ); GDF_REQUIRE( sssp_distances->size > 0 , GDF_INVALID_API_CALL ); // init nvgraph // TODO : time this call nvgraphHandle_t nvg_handle = 0; nvgraphGraphDescr_t nvgraph_G = 0; cudaDataType_t settype; start = std::clock(); NVG_TRY(nvgraphCreate(&nvg_handle)); GDF_TRY(gdf_createGraph_nvgraph(nvg_handle, gdf_G, &nvgraph_G, true)); std::cout << (std::clock() - start) / (double)(CLOCKS_PER_SEC / 1000) << ","; // in ms int sssp_index = 0; int weight_index = 0; Vector<float> d_val; //RMM: // cudaStream_t stream{nullptr}; rmm_temp_allocator allocator(stream); start = std::clock(); if (gdf_G->transposedAdjList->edge_data == nullptr) { // use a fp32 vector [1,...,1] settype = CUDA_R_32F; d_val.resize(gdf_G->transposedAdjList->indices->size); thrust::fill(thrust::cuda::par(allocator).on(stream), d_val.begin(), d_val.end(), 1.0); NVG_TRY(nvgraphAttachEdgeData(nvg_handle, nvgraph_G, weight_index, settype, (void *) thrust::raw_pointer_cast(d_val.data()))); } else { switch (gdf_G->transposedAdjList->edge_data->dtype) { case GDF_FLOAT32: settype = CUDA_R_32F; case GDF_FLOAT64: settype = CUDA_R_64F; default: return GDF_UNSUPPORTED_DTYPE; } } NVG_TRY(nvgraphAttachVertexData(nvg_handle, nvgraph_G, 0, settype, sssp_distances->data )); std::cout << (std::clock() - start) / (double)(CLOCKS_PER_SEC / 1000) << ","; // in ms start = std::clock(); NVG_TRY(nvgraphSssp(nvg_handle, nvgraph_G, weight_index, source_vert, sssp_index)); std::cout << (std::clock() - start) / (double)(CLOCKS_PER_SEC / 1000) << ","; // in ms start = std::clock(); NVG_TRY(nvgraphDestroyGraphDescr(nvg_handle, nvgraph_G)); NVG_TRY(nvgraphDestroy(nvg_handle)); std::cout << (std::clock() - start) / (double)(CLOCKS_PER_SEC / 1000) <<std::endl; // in ms return GDF_SUCCESS; }
bad6509492c94f72602997b4841bc03179bbb8a3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void im2col_gpu_kernel(const int n, const float* data_im, const int height, const int width, const int ksize, const int pad, const int stride, const int height_col, const int width_col, float *data_col) { int index = blockIdx.x*blockDim.x+threadIdx.x; for(; index < n; index += blockDim.x*gridDim.x){ int w_out = index % width_col; int h_index = index / width_col; int h_out = h_index % height_col; int channel_in = h_index / height_col; int channel_out = channel_in * ksize * ksize; int h_in = h_out * stride - pad; int w_in = w_out * stride - pad; float* data_col_ptr = data_col; data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out; const float* data_im_ptr = data_im; data_im_ptr += (channel_in * height + h_in) * width + w_in; for (int i = 0; i < ksize; ++i) { for (int j = 0; j < ksize; ++j) { int h = h_in + i; int w = w_in + j; *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ? data_im_ptr[i * width + j] : 0; //data_im[(channel_in * height + h_in) * width + w_in + i * width + j]; //*data_col_ptr = data_im_ptr[ii * width + jj]; data_col_ptr += height_col * width_col; } } } }
bad6509492c94f72602997b4841bc03179bbb8a3.cu
#include "includes.h" __global__ void im2col_gpu_kernel(const int n, const float* data_im, const int height, const int width, const int ksize, const int pad, const int stride, const int height_col, const int width_col, float *data_col) { int index = blockIdx.x*blockDim.x+threadIdx.x; for(; index < n; index += blockDim.x*gridDim.x){ int w_out = index % width_col; int h_index = index / width_col; int h_out = h_index % height_col; int channel_in = h_index / height_col; int channel_out = channel_in * ksize * ksize; int h_in = h_out * stride - pad; int w_in = w_out * stride - pad; float* data_col_ptr = data_col; data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out; const float* data_im_ptr = data_im; data_im_ptr += (channel_in * height + h_in) * width + w_in; for (int i = 0; i < ksize; ++i) { for (int j = 0; j < ksize; ++j) { int h = h_in + i; int w = w_in + j; *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ? data_im_ptr[i * width + j] : 0; //data_im[(channel_in * height + h_in) * width + w_in + i * width + j]; //*data_col_ptr = data_im_ptr[ii * width + jj]; data_col_ptr += height_col * width_col; } } } }
fd201c90359d3f5b4f98ef5bb4f302ce96c12ed8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" __global__ void sigmoid(float *activation, unsigned int length) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < length; i += blockDim.x * gridDim.x) { activation[i]=1.0f/(1.0f+__expf(-activation[i])); //activation[i]=1.0f/(1.0f+expf(-activation[i])); //activation[i]=activation[i]/(0.5f+0.5f*fabsf(activation[i]))+0.5f; } }
fd201c90359d3f5b4f98ef5bb4f302ce96c12ed8.cu
extern "C" __global__ void sigmoid(float *activation, unsigned int length) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < length; i += blockDim.x * gridDim.x) { activation[i]=1.0f/(1.0f+__expf(-activation[i])); //activation[i]=1.0f/(1.0f+expf(-activation[i])); //activation[i]=activation[i]/(0.5f+0.5f*fabsf(activation[i]))+0.5f; } }
ca2a9bf18167297a3f261f619afeac5a01098633.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "external/petscvector/petscvector.cuh" #include "external/petscvector/common/fem2D.h" namespace pascinference { namespace common { __global__ void kernel_fem2D_reduce_data(double *data1, double *data2, int T1, int T2, int Tbegin1, int Tbegin2, int T1local, int T2local, int left_t1_idx, int left_t2_idx, double diff); __global__ void kernel_fem2D_prolongate_data(double *data1, double *data2, int T1, int T2, int Tbegin1, int Tbegin2, int T1local, int T2local, int left_t1_idx, int left_t2_idx, double diff); void Fem2D<PetscVector>::ExternalContent::cuda_occupancy(){ LOG_FUNC_BEGIN /* compute optimal kernel calls */ gpuErrchk( hipOccupancyMaxPotentialBlockSize( &minGridSize_reduce, &blockSize_reduce, kernel_fem2D_reduce_data, 0, 0) ); gpuErrchk( hipOccupancyMaxPotentialBlockSize( &minGridSize_prolongate, &blockSize_prolongate, kernel_fem2D_prolongate_data, 0, 0) ); LOG_FUNC_END } void Fem2D<PetscVector>::ExternalContent::cuda_reduce_data(double *data1_arr, double *data2_arr, int T1, int T2, int Tbegin1, int Tbegin2, int T1local, int T2local, int left_t1_idx, int left_t2_idx, double diff){ LOG_FUNC_BEGIN hipLaunchKernelGGL(( kernel_fem2D_reduce_data), dim3(gridSize_reduce), dim3(blockSize_reduce), 0, 0, data1_arr, data2_arr, T1, T2, Tbegin1, Tbegin2, T1local, T2local, left_t1_idx, left_t2_idx, diff); gpuErrchk( hipDeviceSynchronize() ); LOG_FUNC_END } void Fem2D<PetscVector>::ExternalContent::cuda_prolongate_data(double *data1_arr, double *data2_arr, int T1, int T2, int Tbegin1, int Tbegin2, int T1local, int T2local, int left_t1_idx, int left_t2_idx, double diff){ LOG_FUNC_BEGIN hipLaunchKernelGGL(( kernel_fem2D_prolongate_data), dim3(gridSize_prolongate), dim3(blockSize_prolongate), 0, 0, data1_arr, data2_arr, T1, T2, Tbegin1, Tbegin2, T1local, T2local, left_t1_idx, left_t2_idx, diff); gpuErrchk( hipDeviceSynchronize() ); LOG_FUNC_END } __global__ void kernel_fem2D_reduce_data(double *data1, double *data2, int T1, int T2, int Tbegin1, int Tbegin2, int T1local, int T2local, int left_t1_idx, int left_t2_idx, double diff) { int t2 = blockIdx.x*blockDim.x + threadIdx.x; if(t2 < T2local){ double center_t1 = (Tbegin2+t2)*diff; double left_t1 = (Tbegin2+t2-1)*diff; double right_t1 = (Tbegin2+t2+1)*diff; int id_counter = floor(left_t1) - left_t1_idx; /* first index in provided local t1 array */ double phi_value; /* value of basis function */ /* left part of hat function */ double mysum = 0.0; int t1 = floor(left_t1); /* compute linear combination with coefficients given by basis functions */ while(t1 <= center_t1){ phi_value = (t1 - left_t1)/(center_t1 - left_t1); mysum += phi_value*data1[id_counter]; t1 += 1; id_counter += 1; } /* right part of hat function */ while(t1 < right_t1){ phi_value = (t1 - right_t1)/(center_t1 - right_t1); mysum += phi_value*data1[id_counter]; t1 += 1; id_counter += 1; } data2[t2] = mysum; } } __global__ void kernel_fem2D_prolongate_data(double *data1, double *data2, int T1, int T2, int Tbegin1, int Tbegin2, int T1local, int T2local, int left_t1_idx, int left_t2_idx, double diff) { int t1 = blockIdx.x*blockDim.x + threadIdx.x; if(t1 < T1local){ int t2_left_id_orig = floor((t1 + Tbegin1)/diff); int t2_right_id_orig = floor((t1 + Tbegin1)/diff) + 1; double t1_left = t2_left_id_orig*diff; double t1_right = t2_right_id_orig*diff; int t2_left_id = t2_left_id_orig - left_t2_idx; int t2_right_id = t2_right_id_orig - left_t2_idx; /* value of basis functions */ double t1_value = 0.0; double phi_value_left = (t1 + Tbegin1 - t1_left)/(t1_right - t1_left); t1_value += phi_value_left*data2[t2_right_id]; double phi_value_right = (t1 + Tbegin1 - t1_right)/(t1_left - t1_right); t1_value += phi_value_right*data2[t2_left_id]; data1[t1] = t1_value; } } } } /* end of namespace */
ca2a9bf18167297a3f261f619afeac5a01098633.cu
#include "external/petscvector/petscvector.cuh" #include "external/petscvector/common/fem2D.h" namespace pascinference { namespace common { __global__ void kernel_fem2D_reduce_data(double *data1, double *data2, int T1, int T2, int Tbegin1, int Tbegin2, int T1local, int T2local, int left_t1_idx, int left_t2_idx, double diff); __global__ void kernel_fem2D_prolongate_data(double *data1, double *data2, int T1, int T2, int Tbegin1, int Tbegin2, int T1local, int T2local, int left_t1_idx, int left_t2_idx, double diff); void Fem2D<PetscVector>::ExternalContent::cuda_occupancy(){ LOG_FUNC_BEGIN /* compute optimal kernel calls */ gpuErrchk( cudaOccupancyMaxPotentialBlockSize( &minGridSize_reduce, &blockSize_reduce, kernel_fem2D_reduce_data, 0, 0) ); gpuErrchk( cudaOccupancyMaxPotentialBlockSize( &minGridSize_prolongate, &blockSize_prolongate, kernel_fem2D_prolongate_data, 0, 0) ); LOG_FUNC_END } void Fem2D<PetscVector>::ExternalContent::cuda_reduce_data(double *data1_arr, double *data2_arr, int T1, int T2, int Tbegin1, int Tbegin2, int T1local, int T2local, int left_t1_idx, int left_t2_idx, double diff){ LOG_FUNC_BEGIN kernel_fem2D_reduce_data<<<gridSize_reduce, blockSize_reduce>>>(data1_arr, data2_arr, T1, T2, Tbegin1, Tbegin2, T1local, T2local, left_t1_idx, left_t2_idx, diff); gpuErrchk( cudaDeviceSynchronize() ); LOG_FUNC_END } void Fem2D<PetscVector>::ExternalContent::cuda_prolongate_data(double *data1_arr, double *data2_arr, int T1, int T2, int Tbegin1, int Tbegin2, int T1local, int T2local, int left_t1_idx, int left_t2_idx, double diff){ LOG_FUNC_BEGIN kernel_fem2D_prolongate_data<<<gridSize_prolongate, blockSize_prolongate>>>(data1_arr, data2_arr, T1, T2, Tbegin1, Tbegin2, T1local, T2local, left_t1_idx, left_t2_idx, diff); gpuErrchk( cudaDeviceSynchronize() ); LOG_FUNC_END } __global__ void kernel_fem2D_reduce_data(double *data1, double *data2, int T1, int T2, int Tbegin1, int Tbegin2, int T1local, int T2local, int left_t1_idx, int left_t2_idx, double diff) { int t2 = blockIdx.x*blockDim.x + threadIdx.x; if(t2 < T2local){ double center_t1 = (Tbegin2+t2)*diff; double left_t1 = (Tbegin2+t2-1)*diff; double right_t1 = (Tbegin2+t2+1)*diff; int id_counter = floor(left_t1) - left_t1_idx; /* first index in provided local t1 array */ double phi_value; /* value of basis function */ /* left part of hat function */ double mysum = 0.0; int t1 = floor(left_t1); /* compute linear combination with coefficients given by basis functions */ while(t1 <= center_t1){ phi_value = (t1 - left_t1)/(center_t1 - left_t1); mysum += phi_value*data1[id_counter]; t1 += 1; id_counter += 1; } /* right part of hat function */ while(t1 < right_t1){ phi_value = (t1 - right_t1)/(center_t1 - right_t1); mysum += phi_value*data1[id_counter]; t1 += 1; id_counter += 1; } data2[t2] = mysum; } } __global__ void kernel_fem2D_prolongate_data(double *data1, double *data2, int T1, int T2, int Tbegin1, int Tbegin2, int T1local, int T2local, int left_t1_idx, int left_t2_idx, double diff) { int t1 = blockIdx.x*blockDim.x + threadIdx.x; if(t1 < T1local){ int t2_left_id_orig = floor((t1 + Tbegin1)/diff); int t2_right_id_orig = floor((t1 + Tbegin1)/diff) + 1; double t1_left = t2_left_id_orig*diff; double t1_right = t2_right_id_orig*diff; int t2_left_id = t2_left_id_orig - left_t2_idx; int t2_right_id = t2_right_id_orig - left_t2_idx; /* value of basis functions */ double t1_value = 0.0; double phi_value_left = (t1 + Tbegin1 - t1_left)/(t1_right - t1_left); t1_value += phi_value_left*data2[t2_right_id]; double phi_value_right = (t1 + Tbegin1 - t1_right)/(t1_left - t1_right); t1_value += phi_value_right*data2[t2_left_id]; data1[t1] = t1_value; } } } } /* end of namespace */
45fd796e6a15e7f5bcea14f44d89bced90daa391.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "vec_divScalarFloat.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int n = XSIZE*YSIZE; float *result = NULL; hipMalloc(&result, XSIZE*YSIZE); float *x = NULL; hipMalloc(&x, XSIZE*YSIZE); float y = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( vec_divScalarFloat), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( vec_divScalarFloat), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( vec_divScalarFloat), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
45fd796e6a15e7f5bcea14f44d89bced90daa391.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "vec_divScalarFloat.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int n = XSIZE*YSIZE; float *result = NULL; cudaMalloc(&result, XSIZE*YSIZE); float *x = NULL; cudaMalloc(&x, XSIZE*YSIZE); float y = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); vec_divScalarFloat<<<gridBlock,threadBlock>>>(n,result,x,y); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { vec_divScalarFloat<<<gridBlock,threadBlock>>>(n,result,x,y); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { vec_divScalarFloat<<<gridBlock,threadBlock>>>(n,result,x,y); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
03a6a867151034ff7177b1e6257b7af15bda2fb2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void vector_dot_product_cu(int *d_c, int *d_a, int *d_b){ __shared__ int tmp[4]; int i = threadIdx.x; tmp[i] = d_a[i] * d_b[i]; __syncthreads(); int sum = 0; for (int j = 0; j < 4; j++) { sum = sum + tmp[j]; } *d_c = sum; } int main(void) { int N = 4; int a[N] = { 22, 13, 16, 5 }; int b[N] = { 5, 22, 17, 37 }; int c; int *d_a, *d_b, *d_c; hipMalloc((void**)&d_a, sizeof(int)*N); hipMalloc((void**)&d_b, sizeof(int)*N); hipMalloc((void**)&d_c, sizeof(int)); hipMemcpy(d_a, a, sizeof(int)*N, hipMemcpyHostToDevice); hipMemcpy(d_b, b, sizeof(int)*N, hipMemcpyHostToDevice); hipLaunchKernelGGL(( vector_dot_product_cu), dim3(1),dim3(N), 0, 0, d_c, d_a, d_b); hipMemcpy(&c, d_c, sizeof(int), hipMemcpyDeviceToHost); hipFree(d_a); hipFree(d_b); printf("A "); for (int i = 0; i < N; i++) { printf("%d ", a[i]); } printf("\n"); printf("B "); for (int i = 0; i < N; i++) { printf("%d ", b[i]); } printf("\n"); printf("Answer = %d\n", c); return 0; }
03a6a867151034ff7177b1e6257b7af15bda2fb2.cu
#include <stdio.h> __global__ void vector_dot_product_cu(int *d_c, int *d_a, int *d_b){ __shared__ int tmp[4]; int i = threadIdx.x; tmp[i] = d_a[i] * d_b[i]; __syncthreads(); int sum = 0; for (int j = 0; j < 4; j++) { sum = sum + tmp[j]; } *d_c = sum; } int main(void) { int N = 4; int a[N] = { 22, 13, 16, 5 }; int b[N] = { 5, 22, 17, 37 }; int c; int *d_a, *d_b, *d_c; cudaMalloc((void**)&d_a, sizeof(int)*N); cudaMalloc((void**)&d_b, sizeof(int)*N); cudaMalloc((void**)&d_c, sizeof(int)); cudaMemcpy(d_a, a, sizeof(int)*N, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, sizeof(int)*N, cudaMemcpyHostToDevice); vector_dot_product_cu<<<1,N>>>(d_c, d_a, d_b); cudaMemcpy(&c, d_c, sizeof(int), cudaMemcpyDeviceToHost); cudaFree(d_a); cudaFree(d_b); printf("A "); for (int i = 0; i < N; i++) { printf("%d ", a[i]); } printf("\n"); printf("B "); for (int i = 0; i < N; i++) { printf("%d ", b[i]); } printf("\n"); printf("Answer = %d\n", c); return 0; }
6a202c85f01e5cf8ff3b5bba0db1bd3e0e292a1d.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> int main() { int nDevices; hipGetDeviceCount(&nDevices); for (int i = 0; i < nDevices; i++) { hipDeviceProp_t prop; hipGetDeviceProperties(&prop, i); printf("Device Number: %d\n", i); printf(" Device name: %s\n", prop.name); printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf(" Peak Memory Bandwidth (GB/s): %f\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); printf(" pciBusID %d\n",prop.pciBusID); printf(" pciDeviceID %d\n",prop.pciDeviceID); printf(" Compute Capability: %d.%d\n",prop.major,prop.minor); printf(" totalGlobalMem:%zu\n",prop.totalGlobalMem); printf(" warpSize:%d\n",prop.warpSize); printf(" regsPerBlock:%d\n",prop.regsPerBlock); printf(" sharedMemPerBlock:%d\n",prop.sharedMemPerBlock); printf("\n"); } }
6a202c85f01e5cf8ff3b5bba0db1bd3e0e292a1d.cu
#include <stdio.h> int main() { int nDevices; cudaGetDeviceCount(&nDevices); for (int i = 0; i < nDevices; i++) { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, i); printf("Device Number: %d\n", i); printf(" Device name: %s\n", prop.name); printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf(" Peak Memory Bandwidth (GB/s): %f\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); printf(" pciBusID %d\n",prop.pciBusID); printf(" pciDeviceID %d\n",prop.pciDeviceID); printf(" Compute Capability: %d.%d\n",prop.major,prop.minor); printf(" totalGlobalMem:%zu\n",prop.totalGlobalMem); printf(" warpSize:%d\n",prop.warpSize); printf(" regsPerBlock:%d\n",prop.regsPerBlock); printf(" sharedMemPerBlock:%d\n",prop.sharedMemPerBlock); printf("\n"); } }
397ff2ff04d2109625d7d3380cab668d59127452.hip
// !!! This is a file automatically generated by hipify!!! #include <cudatbx/cuda_base.cuh> #include <simtbx/gpu/structure_factors.h> namespace simtbx { namespace gpu { gpu_energy_channels::gpu_energy_channels(int const& deviceId){ h_deviceID = deviceId; hipSetDevice(deviceId); } void gpu_energy_channels::structure_factors_to_GPU_detail(af::shared<double> linear_amplitudes){ double * raw_ptr = linear_amplitudes.begin(); CUDAREAL * cu_Fhkl = NULL; cudaSafeCall(hipMalloc((void ** )&cu_Fhkl, sizeof(*cu_Fhkl) * linear_amplitudes.size())); cudaSafeCall(hipMemcpy(cu_Fhkl, raw_ptr, sizeof(*cu_Fhkl) * linear_amplitudes.size(), hipMemcpyHostToDevice)); d_channel_Fhkl.push_back(cu_Fhkl); if (d_channel_Fhkl.size()==1) { //first time through send ranges to device hklParams FhklParams = { h_range * k_range * l_range, h_min, h_max, h_range, k_min, k_max, k_range, l_min, l_max, l_range }; cudaSafeCall(hipMalloc((void ** )&cu_FhklParams, sizeof(*cu_FhklParams))); cudaSafeCall(hipMemcpy(cu_FhklParams, &FhklParams, sizeof(*cu_FhklParams), hipMemcpyHostToDevice)); } } void gpu_energy_channels::free_detail(){ cudaSafeCall(hipSetDevice(h_deviceID)); for (int i_cu_ptr=0; i_cu_ptr < d_channel_Fhkl.size(); ++i_cu_ptr){ cudaSafeCall(hipFree(d_channel_Fhkl[i_cu_ptr])); } cudaSafeCall(hipFree(cu_FhklParams)); } } // gpu } // simtbx
397ff2ff04d2109625d7d3380cab668d59127452.cu
#include <cudatbx/cuda_base.cuh> #include <simtbx/gpu/structure_factors.h> namespace simtbx { namespace gpu { gpu_energy_channels::gpu_energy_channels(int const& deviceId){ h_deviceID = deviceId; cudaSetDevice(deviceId); } void gpu_energy_channels::structure_factors_to_GPU_detail(af::shared<double> linear_amplitudes){ double * raw_ptr = linear_amplitudes.begin(); CUDAREAL * cu_Fhkl = NULL; cudaSafeCall(cudaMalloc((void ** )&cu_Fhkl, sizeof(*cu_Fhkl) * linear_amplitudes.size())); cudaSafeCall(cudaMemcpy(cu_Fhkl, raw_ptr, sizeof(*cu_Fhkl) * linear_amplitudes.size(), cudaMemcpyHostToDevice)); d_channel_Fhkl.push_back(cu_Fhkl); if (d_channel_Fhkl.size()==1) { //first time through send ranges to device hklParams FhklParams = { h_range * k_range * l_range, h_min, h_max, h_range, k_min, k_max, k_range, l_min, l_max, l_range }; cudaSafeCall(cudaMalloc((void ** )&cu_FhklParams, sizeof(*cu_FhklParams))); cudaSafeCall(cudaMemcpy(cu_FhklParams, &FhklParams, sizeof(*cu_FhklParams), cudaMemcpyHostToDevice)); } } void gpu_energy_channels::free_detail(){ cudaSafeCall(cudaSetDevice(h_deviceID)); for (int i_cu_ptr=0; i_cu_ptr < d_channel_Fhkl.size(); ++i_cu_ptr){ cudaSafeCall(cudaFree(d_channel_Fhkl[i_cu_ptr])); } cudaSafeCall(cudaFree(cu_FhklParams)); } } // gpu } // simtbx
245df5157cc07445972284dae21a64314476773e.hip
// !!! This is a file automatically generated by hipify!!! // Includes #include <stdio.h> #include <stdlib.h> // includes CUDA #include <hip/hip_runtime.h> #define THREADS_PER_BLOCK 256 #define NUM_OF_BLOCKS 640 // Functions void CleanupResources(void); void RandomInit_int(unsigned*, int); void RandomInit_fp(float*, int); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(hipError_t err, const char *file, const int line ) { if(hipSuccess != err){ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling hipGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) { hipError_t err = hipGetLastError(); if (hipSuccess != err){ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions // Device code __global__ void PowerKernal1(unsigned *A, unsigned *B, int N, int iterations) { int id = blockDim.x * blockIdx.x + threadIdx.x; int cta_id=blockDim.x * blockIdx.x; int offset=THREADS_PER_BLOCK/2; unsigned sum=0; if(id < N){ for(unsigned i=0; i<iterations; ++i){ A[id] = A[id] + B[id] + id; //for(unsigned j=0; j<iterations/4; ++j){ sum += A[id]; sum += A[id+1]; sum += A[id+2]; if(id%2==0){ sum += A[id+5]; A[id+6]=sum; sum += A[id+7]; sum += A[id+8]; sum += A[id+9]; A[id+3]=sum; sum += A[id+4]; sum += A[id+10]; A[id+11]=sum; sum += A[id+12]; sum += A[id+13]; A[id+14]=sum; A[id+15]=sum; } A[id] = sum+A[id]+B[id]; } } } __global__ void PowerKernalEmpty(unsigned* C, int N, int iterations) { unsigned id = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation __syncthreads(); // Excessive Mod/Div Operations for(unsigned long k=0; k<iterations*(blockDim.x + 299);k++) { //Value1=(I1)+k; //Value2=(I2)+k; //Value3=(Value2)+k; //Value2=(Value1)+k; /* __asm volatile ( "B0: bra.uni B1;\n\t" "B1: bra.uni B2;\n\t" "B2: bra.uni B3;\n\t" "B3: bra.uni B4;\n\t" "B4: bra.uni B5;\n\t" "B5: bra.uni B6;\n\t" "B6: bra.uni B7;\n\t" "B7: bra.uni B8;\n\t" "B8: bra.uni B9;\n\t" "B9: bra.uni B10;\n\t" "B10: bra.uni B11;\n\t" "B11: bra.uni B12;\n\t" "B12: bra.uni B13;\n\t" "B13: bra.uni B14;\n\t" "B14: bra.uni B15;\n\t" "B15: bra.uni B16;\n\t" "B16: bra.uni B17;\n\t" "B17: bra.uni B18;\n\t" "B18: bra.uni B19;\n\t" "B19: bra.uni B20;\n\t" "B20: bra.uni B21;\n\t" "B21: bra.uni B22;\n\t" "B22: bra.uni B23;\n\t" "B23: bra.uni B24;\n\t" "B24: bra.uni B25;\n\t" "B25: bra.uni B26;\n\t" "B26: bra.uni B27;\n\t" "B27: bra.uni B28;\n\t" "B28: bra.uni B29;\n\t" "B29: bra.uni B30;\n\t" "B30: bra.uni B31;\n\t" "B31: bra.uni LOOP;\n\t" "LOOP:" ); */ } C[id]=id; __syncthreads(); } // Host code unsigned *h_A1, *h_A2, *h_A3; unsigned *d_A1, *d_A2, *d_A3; int main(int argc, char** argv) { int iterations; if (argc != 2){ fprintf(stderr,"usage: %s #iterations\n",argv[0]); exit(1); } else{ iterations = atoi(argv[1]); } printf("Power Microbenchmark with %d iterations\n",iterations); int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS*2; // Allocate input vectors h_A and h_B in host memory size_t size1 = N * sizeof(unsigned); h_A1 = (unsigned*)malloc(size1); if (h_A1 == 0) CleanupResources(); h_A2 = (unsigned*)malloc(size1); if (h_A2 == 0) CleanupResources(); dim3 dimGrid2(1,1); dim3 dimBlock2(1,1); // Initialize input vectors RandomInit_int(h_A1, N); RandomInit_int(h_A2, N); // Allocate vectors in device memory checkCudaErrors( hipMalloc((void**)&d_A1, size1) ); checkCudaErrors( hipMalloc((void**)&d_A2, size1) ); // Copy vectors from host memory to device memory checkCudaErrors( hipMemcpy(d_A1, h_A1, size1, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(d_A2, h_A2, size1, hipMemcpyHostToDevice) ); dim3 dimGrid(NUM_OF_BLOCKS,1); dim3 dimBlock(THREADS_PER_BLOCK,1); hipEvent_t start, stop; float elapsedTime = 0; checkCudaErrors(hipEventCreate(&start)); checkCudaErrors(hipEventCreate(&stop)); hipDeviceSynchronize(); //PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A3, N); checkCudaErrors(hipEventRecord(start)); hipLaunchKernelGGL(( PowerKernal1), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A1, d_A2, N, iterations); checkCudaErrors(hipEventRecord(stop)); //PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A3, N); checkCudaErrors(hipEventSynchronize(stop)); checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, stop)); printf("gpu execution time = %.2f s\n", elapsedTime/1000); getLastCudaError("kernel launch failure"); hipDeviceSynchronize(); checkCudaErrors(hipEventDestroy(start)); checkCudaErrors(hipEventDestroy(stop)); CleanupResources(); return 0; } void CleanupResources(void) { // Free device memory if (d_A1) hipFree(d_A1); if (d_A2) hipFree(d_A2); if (d_A3) hipFree(d_A3); // Free host memory if (h_A1) free(h_A1); if (h_A2) free(h_A2); if (h_A3) free(h_A3); } // Allocates an array with random float entries. void RandomInit_int(unsigned* data, int n) { for (int i = 0; i < n; ++i){ srand((unsigned)time(0)); data[i] = rand() / RAND_MAX; } } void RandomInit_fp(float* data, int n) { for (int i = 0; i < n; ++i){ data[i] = rand() / RAND_MAX; } }
245df5157cc07445972284dae21a64314476773e.cu
// Includes #include <stdio.h> #include <stdlib.h> // includes CUDA #include <cuda_runtime.h> #define THREADS_PER_BLOCK 256 #define NUM_OF_BLOCKS 640 // Functions void CleanupResources(void); void RandomInit_int(unsigned*, int); void RandomInit_fp(float*, int); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(cudaError err, const char *file, const int line ) { if(cudaSuccess != err){ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling cudaGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err){ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions // Device code __global__ void PowerKernal1(unsigned *A, unsigned *B, int N, int iterations) { int id = blockDim.x * blockIdx.x + threadIdx.x; int cta_id=blockDim.x * blockIdx.x; int offset=THREADS_PER_BLOCK/2; unsigned sum=0; if(id < N){ for(unsigned i=0; i<iterations; ++i){ A[id] = A[id] + B[id] + id; //for(unsigned j=0; j<iterations/4; ++j){ sum += A[id]; sum += A[id+1]; sum += A[id+2]; if(id%2==0){ sum += A[id+5]; A[id+6]=sum; sum += A[id+7]; sum += A[id+8]; sum += A[id+9]; A[id+3]=sum; sum += A[id+4]; sum += A[id+10]; A[id+11]=sum; sum += A[id+12]; sum += A[id+13]; A[id+14]=sum; A[id+15]=sum; } A[id] = sum+A[id]+B[id]; } } } __global__ void PowerKernalEmpty(unsigned* C, int N, int iterations) { unsigned id = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation __syncthreads(); // Excessive Mod/Div Operations for(unsigned long k=0; k<iterations*(blockDim.x + 299);k++) { //Value1=(I1)+k; //Value2=(I2)+k; //Value3=(Value2)+k; //Value2=(Value1)+k; /* __asm volatile ( "B0: bra.uni B1;\n\t" "B1: bra.uni B2;\n\t" "B2: bra.uni B3;\n\t" "B3: bra.uni B4;\n\t" "B4: bra.uni B5;\n\t" "B5: bra.uni B6;\n\t" "B6: bra.uni B7;\n\t" "B7: bra.uni B8;\n\t" "B8: bra.uni B9;\n\t" "B9: bra.uni B10;\n\t" "B10: bra.uni B11;\n\t" "B11: bra.uni B12;\n\t" "B12: bra.uni B13;\n\t" "B13: bra.uni B14;\n\t" "B14: bra.uni B15;\n\t" "B15: bra.uni B16;\n\t" "B16: bra.uni B17;\n\t" "B17: bra.uni B18;\n\t" "B18: bra.uni B19;\n\t" "B19: bra.uni B20;\n\t" "B20: bra.uni B21;\n\t" "B21: bra.uni B22;\n\t" "B22: bra.uni B23;\n\t" "B23: bra.uni B24;\n\t" "B24: bra.uni B25;\n\t" "B25: bra.uni B26;\n\t" "B26: bra.uni B27;\n\t" "B27: bra.uni B28;\n\t" "B28: bra.uni B29;\n\t" "B29: bra.uni B30;\n\t" "B30: bra.uni B31;\n\t" "B31: bra.uni LOOP;\n\t" "LOOP:" ); */ } C[id]=id; __syncthreads(); } // Host code unsigned *h_A1, *h_A2, *h_A3; unsigned *d_A1, *d_A2, *d_A3; int main(int argc, char** argv) { int iterations; if (argc != 2){ fprintf(stderr,"usage: %s #iterations\n",argv[0]); exit(1); } else{ iterations = atoi(argv[1]); } printf("Power Microbenchmark with %d iterations\n",iterations); int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS*2; // Allocate input vectors h_A and h_B in host memory size_t size1 = N * sizeof(unsigned); h_A1 = (unsigned*)malloc(size1); if (h_A1 == 0) CleanupResources(); h_A2 = (unsigned*)malloc(size1); if (h_A2 == 0) CleanupResources(); dim3 dimGrid2(1,1); dim3 dimBlock2(1,1); // Initialize input vectors RandomInit_int(h_A1, N); RandomInit_int(h_A2, N); // Allocate vectors in device memory checkCudaErrors( cudaMalloc((void**)&d_A1, size1) ); checkCudaErrors( cudaMalloc((void**)&d_A2, size1) ); // Copy vectors from host memory to device memory checkCudaErrors( cudaMemcpy(d_A1, h_A1, size1, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(d_A2, h_A2, size1, cudaMemcpyHostToDevice) ); dim3 dimGrid(NUM_OF_BLOCKS,1); dim3 dimBlock(THREADS_PER_BLOCK,1); cudaEvent_t start, stop; float elapsedTime = 0; checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); cudaThreadSynchronize(); //PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A3, N); checkCudaErrors(cudaEventRecord(start)); PowerKernal1<<<dimGrid,dimBlock>>>(d_A1, d_A2, N, iterations); checkCudaErrors(cudaEventRecord(stop)); //PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A3, N); checkCudaErrors(cudaEventSynchronize(stop)); checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop)); printf("gpu execution time = %.2f s\n", elapsedTime/1000); getLastCudaError("kernel launch failure"); cudaThreadSynchronize(); checkCudaErrors(cudaEventDestroy(start)); checkCudaErrors(cudaEventDestroy(stop)); CleanupResources(); return 0; } void CleanupResources(void) { // Free device memory if (d_A1) cudaFree(d_A1); if (d_A2) cudaFree(d_A2); if (d_A3) cudaFree(d_A3); // Free host memory if (h_A1) free(h_A1); if (h_A2) free(h_A2); if (h_A3) free(h_A3); } // Allocates an array with random float entries. void RandomInit_int(unsigned* data, int n) { for (int i = 0; i < n; ++i){ srand((unsigned)time(0)); data[i] = rand() / RAND_MAX; } } void RandomInit_fp(float* data, int n) { for (int i = 0; i < n; ++i){ data[i] = rand() / RAND_MAX; } }
b52da1bb199e79899808bfaf97547ea011aae23e.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <cstdio> #define THREADS_PER_WARP 32 #define WARPS_PER_CTA 32 #define DEFAULT_CTAS 10 #define DEFAULT_NREPS 10 typedef unsigned long long int uint64_t; typedef int uint32_t; __managed__ uint64_t global_sum; /////////////////////////////////////////////////////////////////////////////// // The is the core function of this program. /////////////////////////////////////////////////////////////////////////////// __global__ void simple_add(int nreps) { int local_sum = 0; for (int i=0; i<nreps; i++) { local_sum += 1; } atomicAdd(&global_sum, local_sum); } /////////////////////////////////////////////////////////////////////////////// // This is a wrapper to call the simple_add. /////////////////////////////////////////////////////////////////////////////// void simple_add_wrapper(int ctas, int nreps) { dim3 block(WARPS_PER_CTA * THREADS_PER_WARP, 1); dim3 grid(ctas, 1); hipLaunchKernelGGL(( simple_add), dim3(grid),dim3(block),0, 0, nreps); hipError_t error = hipDeviceSynchronize(); if (error != hipSuccess) { printf("Error: kernel failed %s\n", hipGetErrorString(error)); } } int main(int argc, char *argv[]) { setbuf(stdout, NULL); // Disable stdout buffering //Set the device int device = 0; hipSetDevice(device); hipDeviceProp_t cudaDevicePropForChoosing; hipGetDeviceProperties(&cudaDevicePropForChoosing, device); printf("Device %d (%s) is being used\n", device, cudaDevicePropForChoosing.name); printf("memory: %.4f GB %s %d SMs x%d\n", cudaDevicePropForChoosing.totalGlobalMem/(1024.f*1024.f*1024.f), (cudaDevicePropForChoosing.ECCEnabled)?"ECC on":"ECC off", cudaDevicePropForChoosing.multiProcessorCount, cudaDevicePropForChoosing.clockRate ); int nreps = DEFAULT_NREPS; int ctas = DEFAULT_CTAS; printf("#CTAs=%d, nreps=%d, threads/CTA=%d\n", ctas, nreps, THREADS_PER_WARP*WARPS_PER_CTA); global_sum = 0; // initialize the sum to 0 // Call the main function now simple_add_wrapper(ctas, nreps); printf("global sum = %lld \n", global_sum); hipDeviceReset(); return 0; }
b52da1bb199e79899808bfaf97547ea011aae23e.cu
/* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <cuda_runtime.h> #include <cuda.h> #include <cstdio> #define THREADS_PER_WARP 32 #define WARPS_PER_CTA 32 #define DEFAULT_CTAS 10 #define DEFAULT_NREPS 10 typedef unsigned long long int uint64_t; typedef int uint32_t; __managed__ uint64_t global_sum; /////////////////////////////////////////////////////////////////////////////// // The is the core function of this program. /////////////////////////////////////////////////////////////////////////////// __global__ void simple_add(int nreps) { int local_sum = 0; for (int i=0; i<nreps; i++) { local_sum += 1; } atomicAdd(&global_sum, local_sum); } /////////////////////////////////////////////////////////////////////////////// // This is a wrapper to call the simple_add. /////////////////////////////////////////////////////////////////////////////// void simple_add_wrapper(int ctas, int nreps) { dim3 block(WARPS_PER_CTA * THREADS_PER_WARP, 1); dim3 grid(ctas, 1); simple_add<<<grid,block,0>>>(nreps); cudaError_t error = cudaThreadSynchronize(); if (error != cudaSuccess) { printf("Error: kernel failed %s\n", cudaGetErrorString(error)); } } int main(int argc, char *argv[]) { setbuf(stdout, NULL); // Disable stdout buffering //Set the device int device = 0; cudaSetDevice(device); cudaDeviceProp cudaDevicePropForChoosing; cudaGetDeviceProperties(&cudaDevicePropForChoosing, device); printf("Device %d (%s) is being used\n", device, cudaDevicePropForChoosing.name); printf("memory: %.4f GB %s %d SMs x%d\n", cudaDevicePropForChoosing.totalGlobalMem/(1024.f*1024.f*1024.f), (cudaDevicePropForChoosing.ECCEnabled)?"ECC on":"ECC off", cudaDevicePropForChoosing.multiProcessorCount, cudaDevicePropForChoosing.clockRate ); int nreps = DEFAULT_NREPS; int ctas = DEFAULT_CTAS; printf("#CTAs=%d, nreps=%d, threads/CTA=%d\n", ctas, nreps, THREADS_PER_WARP*WARPS_PER_CTA); global_sum = 0; // initialize the sum to 0 // Call the main function now simple_add_wrapper(ctas, nreps); printf("global sum = %lld \n", global_sum); cudaThreadExit(); return 0; }
80e07843f21b3cdc731f98771552fbd89991aa0b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #ifdef __NVCC__ #include "hipcub/hipcub.hpp" #endif #ifdef __HIPCC__ #include <hipcub/hipcub.hpp> namespace cub = hipcub; #endif #include <paddle/fluid/memory/allocation/allocator.h> #include "paddle/fluid/framework/mixed_vector.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/operators/detection/bbox_util.h" #include "paddle/fluid/operators/detection/collect_fpn_proposals_op.h" #include "paddle/fluid/operators/math/concat_and_split.h" #include "paddle/fluid/operators/strided_memcpy.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/fluid/platform/for_range.h" #include "paddle/phi/kernels/funcs/gather.cu.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; using LoDTensor = framework::LoDTensor; static constexpr int kNumCUDAThreads = 64; static constexpr int kNumMaxinumNumBlocks = 4096; const int kBBoxSize = 4; static inline int NumBlocks(const int N) { return ::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads, kNumMaxinumNumBlocks); } static __global__ void GetLengthLoD(const int nthreads, const int* batch_ids, int* length_lod) { CUDA_KERNEL_LOOP(i, nthreads) { platform::CudaAtomicAdd(length_lod + batch_ids[i], 1); } } template <typename DeviceContext, typename T> class GPUCollectFpnProposalsOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { const auto roi_ins = ctx.MultiInput<LoDTensor>("MultiLevelRois"); const auto score_ins = ctx.MultiInput<LoDTensor>("MultiLevelScores"); auto fpn_rois = ctx.Output<LoDTensor>("FpnRois"); auto& dev_ctx = ctx.template device_context<DeviceContext>(); const int post_nms_topN = ctx.Attr<int>("post_nms_topN"); // concat inputs along axis = 0 int roi_offset = 0; int score_offset = 0; int total_roi_num = 0; for (size_t i = 0; i < roi_ins.size(); ++i) { total_roi_num += roi_ins[i]->dims()[0]; } int real_post_num = min(post_nms_topN, total_roi_num); fpn_rois->mutable_data<T>({real_post_num, kBBoxSize}, dev_ctx.GetPlace()); Tensor concat_rois; Tensor concat_scores; T* concat_rois_data = concat_rois.mutable_data<T>( {total_roi_num, kBBoxSize}, dev_ctx.GetPlace()); T* concat_scores_data = concat_scores.mutable_data<T>({total_roi_num, 1}, dev_ctx.GetPlace()); Tensor roi_batch_id_list; roi_batch_id_list.Resize({total_roi_num}); int* roi_batch_id_data = roi_batch_id_list.mutable_data<int>(platform::CPUPlace()); int index = 0; int lod_size; auto place = dev_ctx.GetPlace(); auto multi_rois_num = ctx.MultiInput<Tensor>("MultiLevelRoIsNum"); for (size_t i = 0; i < roi_ins.size(); ++i) { auto roi_in = roi_ins[i]; auto score_in = score_ins[i]; if (multi_rois_num.size() > 0) { framework::Tensor temp; paddle::framework::TensorCopySync( *multi_rois_num[i], platform::CPUPlace(), &temp); const int* length_in = temp.data<int>(); lod_size = multi_rois_num[i]->numel(); for (size_t n = 0; n < lod_size; ++n) { for (size_t j = 0; j < length_in[n]; ++j) { roi_batch_id_data[index++] = n; } } } else { auto length_in = roi_in->lod().back(); lod_size = length_in.size() - 1; for (size_t n = 0; n < lod_size; ++n) { for (size_t j = length_in[n]; j < length_in[n + 1]; ++j) { roi_batch_id_data[index++] = n; } } } memory::Copy(place, concat_rois_data + roi_offset, place, roi_in->data<T>(), roi_in->numel() * sizeof(T), dev_ctx.stream()); memory::Copy(place, concat_scores_data + score_offset, place, score_in->data<T>(), score_in->numel() * sizeof(T), dev_ctx.stream()); roi_offset += roi_in->numel(); score_offset += score_in->numel(); } // copy batch id list to GPU Tensor roi_batch_id_list_gpu; framework::TensorCopy( roi_batch_id_list, dev_ctx.GetPlace(), &roi_batch_id_list_gpu); Tensor index_in_t; int* idx_in = index_in_t.mutable_data<int>({total_roi_num}, dev_ctx.GetPlace()); platform::ForRange<phi::GPUContext> for_range_total(dev_ctx, total_roi_num); for_range_total(RangeInitFunctor{0, 1, idx_in}); Tensor keys_out_t; T* keys_out = keys_out_t.mutable_data<T>({total_roi_num}, dev_ctx.GetPlace()); Tensor index_out_t; int* idx_out = index_out_t.mutable_data<int>({total_roi_num}, dev_ctx.GetPlace()); // Determine temporary device storage requirements size_t temp_storage_bytes = 0; hipcub::DeviceRadixSort::SortPairsDescending<T, int>(nullptr, temp_storage_bytes, concat_scores.data<T>(), keys_out, idx_in, idx_out, total_roi_num, 0, sizeof(T) * 8, dev_ctx.stream()); // Allocate temporary storage auto d_temp_storage = memory::Alloc(place, temp_storage_bytes); // Run sorting operation // sort score to get corresponding index hipcub::DeviceRadixSort::SortPairsDescending<T, int>(d_temp_storage->ptr(), temp_storage_bytes, concat_scores.data<T>(), keys_out, idx_in, idx_out, total_roi_num, 0, sizeof(T) * 8, dev_ctx.stream()); index_out_t.Resize({real_post_num}); Tensor sorted_rois; sorted_rois.mutable_data<T>({real_post_num, kBBoxSize}, dev_ctx.GetPlace()); Tensor sorted_batch_id; sorted_batch_id.mutable_data<int>({real_post_num}, dev_ctx.GetPlace()); phi::funcs::GPUGather<T>(dev_ctx, concat_rois, index_out_t, &sorted_rois); phi::funcs::GPUGather<int>( dev_ctx, roi_batch_id_list_gpu, index_out_t, &sorted_batch_id); Tensor batch_index_t; int* batch_idx_in = batch_index_t.mutable_data<int>({real_post_num}, dev_ctx.GetPlace()); platform::ForRange<phi::GPUContext> for_range_post(dev_ctx, real_post_num); for_range_post(RangeInitFunctor{0, 1, batch_idx_in}); Tensor out_id_t; int* out_id_data = out_id_t.mutable_data<int>({real_post_num}, dev_ctx.GetPlace()); // Determine temporary device storage requirements temp_storage_bytes = 0; hipcub::DeviceRadixSort::SortPairs<int, int>(nullptr, temp_storage_bytes, sorted_batch_id.data<int>(), out_id_data, batch_idx_in, index_out_t.data<int>(), real_post_num, 0, sizeof(int) * 8, dev_ctx.stream()); // Allocate temporary storage d_temp_storage = memory::Alloc(place, temp_storage_bytes); // Run sorting operation // sort batch_id to get corresponding index hipcub::DeviceRadixSort::SortPairs<int, int>(d_temp_storage->ptr(), temp_storage_bytes, sorted_batch_id.data<int>(), out_id_data, batch_idx_in, index_out_t.data<int>(), real_post_num, 0, sizeof(int) * 8, dev_ctx.stream()); phi::funcs::GPUGather<T>(dev_ctx, sorted_rois, index_out_t, fpn_rois); Tensor length_lod; int* length_lod_data = length_lod.mutable_data<int>({lod_size}, dev_ctx.GetPlace()); phi::funcs::SetConstant<phi::GPUContext, int> set_zero; set_zero(dev_ctx, &length_lod, static_cast<int>(0)); int blocks = NumBlocks(real_post_num); int threads = kNumCUDAThreads; // get length-based lod by batch ids hipLaunchKernelGGL(( GetLengthLoD), dim3(blocks), dim3(threads), 0, dev_ctx.stream(), real_post_num, out_id_data, length_lod_data); std::vector<int> length_lod_cpu(lod_size); memory::Copy(platform::CPUPlace(), length_lod_cpu.data(), place, length_lod_data, sizeof(int) * lod_size, dev_ctx.stream()); dev_ctx.Wait(); std::vector<size_t> offset(1, 0); for (int i = 0; i < lod_size; ++i) { offset.emplace_back(offset.back() + length_lod_cpu[i]); } if (ctx.HasOutput("RoisNum")) { auto* rois_num = ctx.Output<Tensor>("RoisNum"); int* rois_num_data = rois_num->mutable_data<int>({lod_size}, place); memory::Copy(place, rois_num_data, place, length_lod_data, lod_size * sizeof(int), dev_ctx.stream()); } framework::LoD lod; lod.emplace_back(offset); fpn_rois->set_lod(lod); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( collect_fpn_proposals, ops::GPUCollectFpnProposalsOpKernel<phi::GPUContext, float>, ops::GPUCollectFpnProposalsOpKernel<phi::GPUContext, double>);
80e07843f21b3cdc731f98771552fbd89991aa0b.cu
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #ifdef __NVCC__ #include "cub/cub.cuh" #endif #ifdef __HIPCC__ #include <hipcub/hipcub.hpp> namespace cub = hipcub; #endif #include <paddle/fluid/memory/allocation/allocator.h> #include "paddle/fluid/framework/mixed_vector.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/operators/detection/bbox_util.h" #include "paddle/fluid/operators/detection/collect_fpn_proposals_op.h" #include "paddle/fluid/operators/math/concat_and_split.h" #include "paddle/fluid/operators/strided_memcpy.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/fluid/platform/for_range.h" #include "paddle/phi/kernels/funcs/gather.cu.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; using LoDTensor = framework::LoDTensor; static constexpr int kNumCUDAThreads = 64; static constexpr int kNumMaxinumNumBlocks = 4096; const int kBBoxSize = 4; static inline int NumBlocks(const int N) { return std::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads, kNumMaxinumNumBlocks); } static __global__ void GetLengthLoD(const int nthreads, const int* batch_ids, int* length_lod) { CUDA_KERNEL_LOOP(i, nthreads) { platform::CudaAtomicAdd(length_lod + batch_ids[i], 1); } } template <typename DeviceContext, typename T> class GPUCollectFpnProposalsOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { const auto roi_ins = ctx.MultiInput<LoDTensor>("MultiLevelRois"); const auto score_ins = ctx.MultiInput<LoDTensor>("MultiLevelScores"); auto fpn_rois = ctx.Output<LoDTensor>("FpnRois"); auto& dev_ctx = ctx.template device_context<DeviceContext>(); const int post_nms_topN = ctx.Attr<int>("post_nms_topN"); // concat inputs along axis = 0 int roi_offset = 0; int score_offset = 0; int total_roi_num = 0; for (size_t i = 0; i < roi_ins.size(); ++i) { total_roi_num += roi_ins[i]->dims()[0]; } int real_post_num = min(post_nms_topN, total_roi_num); fpn_rois->mutable_data<T>({real_post_num, kBBoxSize}, dev_ctx.GetPlace()); Tensor concat_rois; Tensor concat_scores; T* concat_rois_data = concat_rois.mutable_data<T>( {total_roi_num, kBBoxSize}, dev_ctx.GetPlace()); T* concat_scores_data = concat_scores.mutable_data<T>({total_roi_num, 1}, dev_ctx.GetPlace()); Tensor roi_batch_id_list; roi_batch_id_list.Resize({total_roi_num}); int* roi_batch_id_data = roi_batch_id_list.mutable_data<int>(platform::CPUPlace()); int index = 0; int lod_size; auto place = dev_ctx.GetPlace(); auto multi_rois_num = ctx.MultiInput<Tensor>("MultiLevelRoIsNum"); for (size_t i = 0; i < roi_ins.size(); ++i) { auto roi_in = roi_ins[i]; auto score_in = score_ins[i]; if (multi_rois_num.size() > 0) { framework::Tensor temp; paddle::framework::TensorCopySync( *multi_rois_num[i], platform::CPUPlace(), &temp); const int* length_in = temp.data<int>(); lod_size = multi_rois_num[i]->numel(); for (size_t n = 0; n < lod_size; ++n) { for (size_t j = 0; j < length_in[n]; ++j) { roi_batch_id_data[index++] = n; } } } else { auto length_in = roi_in->lod().back(); lod_size = length_in.size() - 1; for (size_t n = 0; n < lod_size; ++n) { for (size_t j = length_in[n]; j < length_in[n + 1]; ++j) { roi_batch_id_data[index++] = n; } } } memory::Copy(place, concat_rois_data + roi_offset, place, roi_in->data<T>(), roi_in->numel() * sizeof(T), dev_ctx.stream()); memory::Copy(place, concat_scores_data + score_offset, place, score_in->data<T>(), score_in->numel() * sizeof(T), dev_ctx.stream()); roi_offset += roi_in->numel(); score_offset += score_in->numel(); } // copy batch id list to GPU Tensor roi_batch_id_list_gpu; framework::TensorCopy( roi_batch_id_list, dev_ctx.GetPlace(), &roi_batch_id_list_gpu); Tensor index_in_t; int* idx_in = index_in_t.mutable_data<int>({total_roi_num}, dev_ctx.GetPlace()); platform::ForRange<phi::GPUContext> for_range_total(dev_ctx, total_roi_num); for_range_total(RangeInitFunctor{0, 1, idx_in}); Tensor keys_out_t; T* keys_out = keys_out_t.mutable_data<T>({total_roi_num}, dev_ctx.GetPlace()); Tensor index_out_t; int* idx_out = index_out_t.mutable_data<int>({total_roi_num}, dev_ctx.GetPlace()); // Determine temporary device storage requirements size_t temp_storage_bytes = 0; cub::DeviceRadixSort::SortPairsDescending<T, int>(nullptr, temp_storage_bytes, concat_scores.data<T>(), keys_out, idx_in, idx_out, total_roi_num, 0, sizeof(T) * 8, dev_ctx.stream()); // Allocate temporary storage auto d_temp_storage = memory::Alloc(place, temp_storage_bytes); // Run sorting operation // sort score to get corresponding index cub::DeviceRadixSort::SortPairsDescending<T, int>(d_temp_storage->ptr(), temp_storage_bytes, concat_scores.data<T>(), keys_out, idx_in, idx_out, total_roi_num, 0, sizeof(T) * 8, dev_ctx.stream()); index_out_t.Resize({real_post_num}); Tensor sorted_rois; sorted_rois.mutable_data<T>({real_post_num, kBBoxSize}, dev_ctx.GetPlace()); Tensor sorted_batch_id; sorted_batch_id.mutable_data<int>({real_post_num}, dev_ctx.GetPlace()); phi::funcs::GPUGather<T>(dev_ctx, concat_rois, index_out_t, &sorted_rois); phi::funcs::GPUGather<int>( dev_ctx, roi_batch_id_list_gpu, index_out_t, &sorted_batch_id); Tensor batch_index_t; int* batch_idx_in = batch_index_t.mutable_data<int>({real_post_num}, dev_ctx.GetPlace()); platform::ForRange<phi::GPUContext> for_range_post(dev_ctx, real_post_num); for_range_post(RangeInitFunctor{0, 1, batch_idx_in}); Tensor out_id_t; int* out_id_data = out_id_t.mutable_data<int>({real_post_num}, dev_ctx.GetPlace()); // Determine temporary device storage requirements temp_storage_bytes = 0; cub::DeviceRadixSort::SortPairs<int, int>(nullptr, temp_storage_bytes, sorted_batch_id.data<int>(), out_id_data, batch_idx_in, index_out_t.data<int>(), real_post_num, 0, sizeof(int) * 8, dev_ctx.stream()); // Allocate temporary storage d_temp_storage = memory::Alloc(place, temp_storage_bytes); // Run sorting operation // sort batch_id to get corresponding index cub::DeviceRadixSort::SortPairs<int, int>(d_temp_storage->ptr(), temp_storage_bytes, sorted_batch_id.data<int>(), out_id_data, batch_idx_in, index_out_t.data<int>(), real_post_num, 0, sizeof(int) * 8, dev_ctx.stream()); phi::funcs::GPUGather<T>(dev_ctx, sorted_rois, index_out_t, fpn_rois); Tensor length_lod; int* length_lod_data = length_lod.mutable_data<int>({lod_size}, dev_ctx.GetPlace()); phi::funcs::SetConstant<phi::GPUContext, int> set_zero; set_zero(dev_ctx, &length_lod, static_cast<int>(0)); int blocks = NumBlocks(real_post_num); int threads = kNumCUDAThreads; // get length-based lod by batch ids GetLengthLoD<<<blocks, threads, 0, dev_ctx.stream()>>>( real_post_num, out_id_data, length_lod_data); std::vector<int> length_lod_cpu(lod_size); memory::Copy(platform::CPUPlace(), length_lod_cpu.data(), place, length_lod_data, sizeof(int) * lod_size, dev_ctx.stream()); dev_ctx.Wait(); std::vector<size_t> offset(1, 0); for (int i = 0; i < lod_size; ++i) { offset.emplace_back(offset.back() + length_lod_cpu[i]); } if (ctx.HasOutput("RoisNum")) { auto* rois_num = ctx.Output<Tensor>("RoisNum"); int* rois_num_data = rois_num->mutable_data<int>({lod_size}, place); memory::Copy(place, rois_num_data, place, length_lod_data, lod_size * sizeof(int), dev_ctx.stream()); } framework::LoD lod; lod.emplace_back(offset); fpn_rois->set_lod(lod); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( collect_fpn_proposals, ops::GPUCollectFpnProposalsOpKernel<phi::GPUContext, float>, ops::GPUCollectFpnProposalsOpKernel<phi::GPUContext, double>);
3c900669ea4fd4f7c921e8cf4f97ceafcdc049be.hip
// !!! This is a file automatically generated by hipify!!! #include "params.hpp" #include "indices.cuh" #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <hip/hip_vector_types.h> // Nearest lower power of 2 __device__ __inline__ uint flp2 (uint x) { return (0x80000000u >> __clz(x)); } //Computes the squared difference between two numbers template<typename T> __device__ __inline__ T L2p2(const T i1, const T i2) { T diff = i1 - i2; return diff*diff; } /* Adds new patch to patch stack (only N most similar are kept) Note: Stack is just an array, not FIFO */ __device__ void add_to_matched_image( uint *stack, //IN/OUT: Stack of N patches matched to current reference patch uchar *num_patches_in_stack,//IN/OUT: Number of patches in stack const uint value, //IN: [..DIFF(ushort)..|..LOC_Y(sbyte)..|..LOC_X(sbyte)..] const Params & params //IN: Denoising parameters ) { //stack[*num_patches_in_stack-1] is most similar (lowest number) int k; uchar num = (*num_patches_in_stack); if (num < params.N) //add new value { k = num++; while(k > 0 && value > stack[k-1]) { stack[k] = stack[k-1]; --k; } stack[k] = value; *num_patches_in_stack = num; } else if (value >= stack[0]) return; else //delete highest value and add new { k = 1; while (k < params.N && value < stack[k]) { stack[k-1] = stack[k]; k++; } stack[k-1] = value; } } /* Block-matching algorithm For each processed reference patch it finds maximaly N similar patches that pass the distance threshold and stores them to the g_stacks array. It also returns the number of them for each reference patch in g_num_patches_in_stack. Used denoising parameters: n,k,N,T,p Division: Kernel handles gridDim.y lines starting with the line passed in argument. Each block handles warpSize reference patches in line. Each thread process one reference patch. All the warps of a block process the same reference patches. */ __global__ void block_matching( const uchar* __restrict image, //IN: Original image ushort* g_stacks, //OUT: For each reference patch contains addresses of similar patches (patch is adressed by top left corner) [..LOC_Y(sbyte)..|..LOC_X(sbyte)..] uint* g_num_patches_in_stack, //OUT: For each reference patch contains number of similar patches const uint2 image_dim, //IN: Image dimensions const uint2 stacks_dim, //IN: Size of area, where reference patches could be located const Params params, //IN: Denoising parameters const uint2 start_point) //IN: Address of the top-left reference patch of a batch { //One block is processing warpSize patches (because each warp is computing distance of same warpSize patches from different displaced patches) int tid = threadIdx.x % warpSize; int wid = threadIdx.x / warpSize; int num_warps = blockDim.x/warpSize; //p_block denotes reference rectangle on which current cuda block is computing uint p_rectangle_width = ((warpSize-1) * params.p) + params.k; uint p_rectangle_start = start_point.x + blockIdx.x * warpSize * params.p; //Shared arrays extern __shared__ uint s_data[]; uint *s_diff = (uint*)&s_data; //SIZE: p_rectangle_width*num_warps uint *s_stacks = (uint*)&s_data[p_rectangle_width*num_warps]; //SIZE: params.N*num_warps*warpSize uchar *s_patches_in_stack = (uchar*)&s_data[num_warps*(p_rectangle_width + params.N*warpSize)]; //SIZE: num_warps*warpSize uchar *s_image_p = (uchar*)&s_patches_in_stack[num_warps*warpSize]; //SIZE: p_rectangle_width*params.k s_diff += idx2(0, wid, p_rectangle_width); //Initialize s_patches_in_stack to zero s_patches_in_stack[ idx2(tid, wid, warpSize) ] = 0; int2 p; //Address of reference patch int2 q; //Address of patch against which the difference is computed p.x = p_rectangle_start + (tid*params.p); p.y = start_point.y + (blockIdx.y*params.p); //Ensure, that the bottom most patches will be taken as reference patches regardless the p parameter. if (p.y >= stacks_dim.y && p.y < stacks_dim.y + params.p - 1) p.y = stacks_dim.y - 1; else if (p.y >= stacks_dim.y) return; //Ensure, that the right most patches will be taken as reference patches regardless the p parameter. uint inner_p_x = tid*params.p; if (p.x >= stacks_dim.x && p.x < stacks_dim.x + params.p - 1) { inner_p_x -= (p.x - (stacks_dim.x - 1)); p.x = stacks_dim.x - 1; } //Load reference patches needed by actual block to shared memory for(int i = threadIdx.x; i < p_rectangle_width*params.k; i+=blockDim.x) { int sx = i % p_rectangle_width; int sy = i / p_rectangle_width; if (p_rectangle_start+sx >= image_dim.x) continue; s_image_p[i] = image[idx2(p_rectangle_start+sx,p.y+sy,image_dim.x)]; } __syncthreads(); //scale difference so that it can fit ushort uint shift = (__clz(params.Tn) < 16u) ? 16u - (uint)__clz(params.Tn) : 0; //Ensure that displaced patch coordinates (q) will be positive int2 from; from.y = (p.y - (int)params.n < 0) ? -p.y : -(int)params.n; from.x = (((int)p_rectangle_start) - (int)params.n < 0) ? -((int)p_rectangle_start) : -(int)params.n; from.x += wid; //For each displacement (x,y) in n neighbourhood for(int y = from.y; y <= (int)params.n; ++y) { q.y = p.y + y; if (q.y >= stacks_dim.y) break; for(int x = from.x; x <= (int)params.n; x += num_warps) { //Reference patch is always the most similar to itself (there is no need to copute it) if (x == 0 && y == 0) continue; //Each warp is computing the same patch with slightly different displacement. //Compute distance of reference patch p from current patch q which is dispaced by (x+tid,y) //q_block denotes displaced rectangle which is processed by the current warp uint q_rectangle_start = p_rectangle_start + x; q.x = q_rectangle_start + inner_p_x; //Compute distance for each column of reference patch for(uint i = tid; i < p_rectangle_width && p_rectangle_start+i < image_dim.x && q_rectangle_start+i < image_dim.x; i+=warpSize) { uint dist = 0; for(uint iy = 0; iy < params.k; ++iy) { dist += L2p2((int)s_image_p[ idx2(i, iy, p_rectangle_width) ], (int)image[ idx2(q_rectangle_start+i, q.y+iy, image_dim.x) ]); } s_diff[i] = dist; } if (p.x >= stacks_dim.x || q.x >= stacks_dim.x) continue; //Sum column distances to obtain patch distance uint diff = 0; for (uint i = 0; i < params.k; ++i) diff += s_diff[inner_p_x + i]; //Distance threshold if(diff < params.Tn) { uint loc_y = (uint)((q.y - p.y) & 0xFF); //relative location y (-127 to 127) uint loc_x = (uint)((q.x - p.x) & 0xFF); //relative location x (-127 to 127) diff >>= shift; diff <<= 16u; // [..DIFF(ushort)..|..LOC_Y(sbyte)..|..LOC_X(sbyte)..] diff |= (loc_y << 8u); diff |= loc_x; //Add current patch to s_stacks add_to_matched_image( &s_stacks[ params.N * idx2(tid, wid, warpSize) ], &s_patches_in_stack[ idx2(tid, wid, warpSize) ], diff, params ); } } } __syncthreads(); uint batch_size = gridDim.x*warpSize; uint block_address_x = blockIdx.x*warpSize+tid; if (wid > 0) return; //Select N most similar patches for each reference patch from stacks in shared memory and save them to global memory //Each thread represents one reference patch //Each thread will find N most similar blocks in num_warps stacks (which were computed by different warps) and save them into global memory //In shared memory the most similar patch is at the end, in global memory the order does not matter //DEV: performance impact cca 8% if (p.x >= stacks_dim.x) return; int j; for (j = 0; j < params.N; ++j) { uint count = 0; uint minIdx = 0; uint minVal = 0xFFFFFFFF; //INF //Finds patch with minimal value of remaining for (int i = minIdx; i < num_warps; ++i) { count = (uint)s_patches_in_stack[ idx2(tid, i, warpSize) ]; if (count == 0) continue; uint newMinVal = s_stacks[ idx3(count-1,tid,i,params.N,warpSize) ]; if (newMinVal < minVal) { minVal = newMinVal; minIdx = i; } } if (minVal == 0xFFFFFFFF) break; //All stacks are empty //Remove patch from shared stack s_patches_in_stack[ idx2(tid, minIdx, warpSize) ]--; //Adds patch to stack in global memory g_stacks[idx3(j, block_address_x, blockIdx.y, params.N, batch_size)] = (ushort)(minVal & 0xFFFF); } //Save to the global memory the number of similar patches rounded to the nearest lower power of two g_num_patches_in_stack[ idx2(block_address_x ,blockIdx.y, batch_size) ] = flp2((uint)j+1)-1; } extern "C" void run_block_matching( const uchar* __restrict image, //Original image ushort* stacks, //For each reference patch contains addresses of similar patches (patch is adressed by top left corner) uint* num_patches_in_stack, //For each reference patch contains number of similar patches const uint2 image_dim, //Image dimensions const uint2 stacks_dim, //size of area where reference patches could be located const Params params, //Denoising parameters const uint2 start_point, //Address of the top-left reference patch of a batch const dim3 num_threads, const dim3 num_blocks, const uint shared_memory_size ) { hipLaunchKernelGGL(( block_matching), dim3(num_blocks), dim3(num_threads),shared_memory_size, 0, image, stacks, num_patches_in_stack, image_dim, stacks_dim, params, start_point ); }
3c900669ea4fd4f7c921e8cf4f97ceafcdc049be.cu
#include "params.hpp" #include "indices.cuh" #include <cuda.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <vector_types.h> // Nearest lower power of 2 __device__ __inline__ uint flp2 (uint x) { return (0x80000000u >> __clz(x)); } //Computes the squared difference between two numbers template<typename T> __device__ __inline__ T L2p2(const T i1, const T i2) { T diff = i1 - i2; return diff*diff; } /* Adds new patch to patch stack (only N most similar are kept) Note: Stack is just an array, not FIFO */ __device__ void add_to_matched_image( uint *stack, //IN/OUT: Stack of N patches matched to current reference patch uchar *num_patches_in_stack,//IN/OUT: Number of patches in stack const uint value, //IN: [..DIFF(ushort)..|..LOC_Y(sbyte)..|..LOC_X(sbyte)..] const Params & params //IN: Denoising parameters ) { //stack[*num_patches_in_stack-1] is most similar (lowest number) int k; uchar num = (*num_patches_in_stack); if (num < params.N) //add new value { k = num++; while(k > 0 && value > stack[k-1]) { stack[k] = stack[k-1]; --k; } stack[k] = value; *num_patches_in_stack = num; } else if (value >= stack[0]) return; else //delete highest value and add new { k = 1; while (k < params.N && value < stack[k]) { stack[k-1] = stack[k]; k++; } stack[k-1] = value; } } /* Block-matching algorithm For each processed reference patch it finds maximaly N similar patches that pass the distance threshold and stores them to the g_stacks array. It also returns the number of them for each reference patch in g_num_patches_in_stack. Used denoising parameters: n,k,N,T,p Division: Kernel handles gridDim.y lines starting with the line passed in argument. Each block handles warpSize reference patches in line. Each thread process one reference patch. All the warps of a block process the same reference patches. */ __global__ void block_matching( const uchar* __restrict image, //IN: Original image ushort* g_stacks, //OUT: For each reference patch contains addresses of similar patches (patch is adressed by top left corner) [..LOC_Y(sbyte)..|..LOC_X(sbyte)..] uint* g_num_patches_in_stack, //OUT: For each reference patch contains number of similar patches const uint2 image_dim, //IN: Image dimensions const uint2 stacks_dim, //IN: Size of area, where reference patches could be located const Params params, //IN: Denoising parameters const uint2 start_point) //IN: Address of the top-left reference patch of a batch { //One block is processing warpSize patches (because each warp is computing distance of same warpSize patches from different displaced patches) int tid = threadIdx.x % warpSize; int wid = threadIdx.x / warpSize; int num_warps = blockDim.x/warpSize; //p_block denotes reference rectangle on which current cuda block is computing uint p_rectangle_width = ((warpSize-1) * params.p) + params.k; uint p_rectangle_start = start_point.x + blockIdx.x * warpSize * params.p; //Shared arrays extern __shared__ uint s_data[]; uint *s_diff = (uint*)&s_data; //SIZE: p_rectangle_width*num_warps uint *s_stacks = (uint*)&s_data[p_rectangle_width*num_warps]; //SIZE: params.N*num_warps*warpSize uchar *s_patches_in_stack = (uchar*)&s_data[num_warps*(p_rectangle_width + params.N*warpSize)]; //SIZE: num_warps*warpSize uchar *s_image_p = (uchar*)&s_patches_in_stack[num_warps*warpSize]; //SIZE: p_rectangle_width*params.k s_diff += idx2(0, wid, p_rectangle_width); //Initialize s_patches_in_stack to zero s_patches_in_stack[ idx2(tid, wid, warpSize) ] = 0; int2 p; //Address of reference patch int2 q; //Address of patch against which the difference is computed p.x = p_rectangle_start + (tid*params.p); p.y = start_point.y + (blockIdx.y*params.p); //Ensure, that the bottom most patches will be taken as reference patches regardless the p parameter. if (p.y >= stacks_dim.y && p.y < stacks_dim.y + params.p - 1) p.y = stacks_dim.y - 1; else if (p.y >= stacks_dim.y) return; //Ensure, that the right most patches will be taken as reference patches regardless the p parameter. uint inner_p_x = tid*params.p; if (p.x >= stacks_dim.x && p.x < stacks_dim.x + params.p - 1) { inner_p_x -= (p.x - (stacks_dim.x - 1)); p.x = stacks_dim.x - 1; } //Load reference patches needed by actual block to shared memory for(int i = threadIdx.x; i < p_rectangle_width*params.k; i+=blockDim.x) { int sx = i % p_rectangle_width; int sy = i / p_rectangle_width; if (p_rectangle_start+sx >= image_dim.x) continue; s_image_p[i] = image[idx2(p_rectangle_start+sx,p.y+sy,image_dim.x)]; } __syncthreads(); //scale difference so that it can fit ushort uint shift = (__clz(params.Tn) < 16u) ? 16u - (uint)__clz(params.Tn) : 0; //Ensure that displaced patch coordinates (q) will be positive int2 from; from.y = (p.y - (int)params.n < 0) ? -p.y : -(int)params.n; from.x = (((int)p_rectangle_start) - (int)params.n < 0) ? -((int)p_rectangle_start) : -(int)params.n; from.x += wid; //For each displacement (x,y) in n neighbourhood for(int y = from.y; y <= (int)params.n; ++y) { q.y = p.y + y; if (q.y >= stacks_dim.y) break; for(int x = from.x; x <= (int)params.n; x += num_warps) { //Reference patch is always the most similar to itself (there is no need to copute it) if (x == 0 && y == 0) continue; //Each warp is computing the same patch with slightly different displacement. //Compute distance of reference patch p from current patch q which is dispaced by (x+tid,y) //q_block denotes displaced rectangle which is processed by the current warp uint q_rectangle_start = p_rectangle_start + x; q.x = q_rectangle_start + inner_p_x; //Compute distance for each column of reference patch for(uint i = tid; i < p_rectangle_width && p_rectangle_start+i < image_dim.x && q_rectangle_start+i < image_dim.x; i+=warpSize) { uint dist = 0; for(uint iy = 0; iy < params.k; ++iy) { dist += L2p2((int)s_image_p[ idx2(i, iy, p_rectangle_width) ], (int)image[ idx2(q_rectangle_start+i, q.y+iy, image_dim.x) ]); } s_diff[i] = dist; } if (p.x >= stacks_dim.x || q.x >= stacks_dim.x) continue; //Sum column distances to obtain patch distance uint diff = 0; for (uint i = 0; i < params.k; ++i) diff += s_diff[inner_p_x + i]; //Distance threshold if(diff < params.Tn) { uint loc_y = (uint)((q.y - p.y) & 0xFF); //relative location y (-127 to 127) uint loc_x = (uint)((q.x - p.x) & 0xFF); //relative location x (-127 to 127) diff >>= shift; diff <<= 16u; // [..DIFF(ushort)..|..LOC_Y(sbyte)..|..LOC_X(sbyte)..] diff |= (loc_y << 8u); diff |= loc_x; //Add current patch to s_stacks add_to_matched_image( &s_stacks[ params.N * idx2(tid, wid, warpSize) ], &s_patches_in_stack[ idx2(tid, wid, warpSize) ], diff, params ); } } } __syncthreads(); uint batch_size = gridDim.x*warpSize; uint block_address_x = blockIdx.x*warpSize+tid; if (wid > 0) return; //Select N most similar patches for each reference patch from stacks in shared memory and save them to global memory //Each thread represents one reference patch //Each thread will find N most similar blocks in num_warps stacks (which were computed by different warps) and save them into global memory //In shared memory the most similar patch is at the end, in global memory the order does not matter //DEV: performance impact cca 8% if (p.x >= stacks_dim.x) return; int j; for (j = 0; j < params.N; ++j) { uint count = 0; uint minIdx = 0; uint minVal = 0xFFFFFFFF; //INF //Finds patch with minimal value of remaining for (int i = minIdx; i < num_warps; ++i) { count = (uint)s_patches_in_stack[ idx2(tid, i, warpSize) ]; if (count == 0) continue; uint newMinVal = s_stacks[ idx3(count-1,tid,i,params.N,warpSize) ]; if (newMinVal < minVal) { minVal = newMinVal; minIdx = i; } } if (minVal == 0xFFFFFFFF) break; //All stacks are empty //Remove patch from shared stack s_patches_in_stack[ idx2(tid, minIdx, warpSize) ]--; //Adds patch to stack in global memory g_stacks[idx3(j, block_address_x, blockIdx.y, params.N, batch_size)] = (ushort)(minVal & 0xFFFF); } //Save to the global memory the number of similar patches rounded to the nearest lower power of two g_num_patches_in_stack[ idx2(block_address_x ,blockIdx.y, batch_size) ] = flp2((uint)j+1)-1; } extern "C" void run_block_matching( const uchar* __restrict image, //Original image ushort* stacks, //For each reference patch contains addresses of similar patches (patch is adressed by top left corner) uint* num_patches_in_stack, //For each reference patch contains number of similar patches const uint2 image_dim, //Image dimensions const uint2 stacks_dim, //size of area where reference patches could be located const Params params, //Denoising parameters const uint2 start_point, //Address of the top-left reference patch of a batch const dim3 num_threads, const dim3 num_blocks, const uint shared_memory_size ) { block_matching<<<num_blocks, num_threads,shared_memory_size>>>( image, stacks, num_patches_in_stack, image_dim, stacks_dim, params, start_point ); }
bf16e049ecfe1ecc2acf0ae9df1106f8ce04f3fe.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Created by chengjin on 2020-06-30. // #include "cu_utils.h" #include "embedding_kernel.h" namespace quake { namespace framework { namespace ops_lib { template<typename T,typename Tidx> __global__ static void _gather(int idx_num,int row,int col, const T* data,const Tidx* index,T* output) { int c = blockIdx.x * blockDim.x + threadIdx.x; // col index int y = blockIdx.y * blockDim.y + threadIdx.y; // row index*batchsize int id=y%idx_num; int b=y/idx_num; //assign numbers if(id<idx_num){ int row_idx=int(index[b*idx_num+id]); if(row_idx<row){ output[b*idx_num*col+id*col+c]=data[row_idx*col+c]; } } } //implements template<typename T,typename Tidx> void embedding_forward_gpu(hipStream_t stream,const Tidx* input, const T* weight,T* output,int batchsize,int idx_num,int emb_num,int emb_dim) { dim3 Bl(CU2DBLOCK,CU2DBLOCK); dim3 Gr(n_blocks(emb_dim,CU2DBLOCK),n_blocks(idx_num*batchsize,CU2DBLOCK)); hipLaunchKernelGGL(( _gather), dim3(Gr),dim3(Bl),0,stream, idx_num,emb_num,emb_dim,weight,input,output); } template void embedding_forward_gpu<float,float>(hipStream_t stream,const float* input, const float* weight,float* output,int batchsize,int idx_num,int emb_num,int emb_dim); template void embedding_forward_gpu<__half,__half>(hipStream_t stream,const __half* input, const __half* weight,__half* output,int batchsize,int idx_num,int emb_num,int emb_dim); template void embedding_forward_gpu<float,int>(hipStream_t stream,const int* input, const float* weight,float* output,int batchsize,int idx_num,int emb_num,int emb_dim); } // namespace ops_lib } // namespace framework } // namespace quake
bf16e049ecfe1ecc2acf0ae9df1106f8ce04f3fe.cu
// // Created by chengjin on 2020-06-30. // #include "cu_utils.h" #include "embedding_kernel.h" namespace quake { namespace framework { namespace ops_lib { template<typename T,typename Tidx> __global__ static void _gather(int idx_num,int row,int col, const T* data,const Tidx* index,T* output) { int c = blockIdx.x * blockDim.x + threadIdx.x; // col index int y = blockIdx.y * blockDim.y + threadIdx.y; // row index*batchsize int id=y%idx_num; int b=y/idx_num; //assign numbers if(id<idx_num){ int row_idx=int(index[b*idx_num+id]); if(row_idx<row){ output[b*idx_num*col+id*col+c]=data[row_idx*col+c]; } } } //implements template<typename T,typename Tidx> void embedding_forward_gpu(cudaStream_t stream,const Tidx* input, const T* weight,T* output,int batchsize,int idx_num,int emb_num,int emb_dim) { dim3 Bl(CU2DBLOCK,CU2DBLOCK); dim3 Gr(n_blocks(emb_dim,CU2DBLOCK),n_blocks(idx_num*batchsize,CU2DBLOCK)); _gather<<<Gr,Bl,0,stream>>>(idx_num,emb_num,emb_dim,weight,input,output); } template void embedding_forward_gpu<float,float>(cudaStream_t stream,const float* input, const float* weight,float* output,int batchsize,int idx_num,int emb_num,int emb_dim); template void embedding_forward_gpu<__half,__half>(cudaStream_t stream,const __half* input, const __half* weight,__half* output,int batchsize,int idx_num,int emb_num,int emb_dim); template void embedding_forward_gpu<float,int>(cudaStream_t stream,const int* input, const float* weight,float* output,int batchsize,int idx_num,int emb_num,int emb_dim); } // namespace ops_lib } // namespace framework } // namespace quake
81c15d8ed0d5bd82e6cc84bf021ecd3055c7804c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // This file is adapted from TetGen #include "CudaMesh.h" #include "CudaPredicates.h" #include <thrust/device_ptr.h> #include <stdio.h> #include <assert.h> #include <vector> /////////////////////////////////////////////////////////////////////////////// // // // Variables // // // /////////////////////////////////////////////////////////////////////////////// /* Kernel constants */ __constant__ REAL raw_kernelconstants[2]; REAL host_kernelconstants[2]; /* Helpers */ __device__ uint64 cudamesh_encodeUInt64Priority(int priority, int index) { return (((uint64)priority) << 32) + index; } __device__ int cudamesh_getUInt64PriorityIndex(uint64 priority) { return (priority & 0xFFFFFFFF); } __device__ int cudamesh_getUInt64Priority(uint64 priority) { return (priority >> 32); } __device__ bool cudamesh_isNearZero(double val) { if (val > -EPSILON && val < EPSILON) return true; else return false; } __device__ bool cudamesh_isInvalid(double val) { if (val > 10000000 || val < -10000000) return true; else return false; } /* Initialize fast lookup tables for mesh maniplulation primitives. */ __constant__ int raw_bondtbl[144]; __constant__ int raw_fsymtbl[144]; __constant__ int raw_enexttbl[12]; __constant__ int raw_eprevtbl[12]; __constant__ int raw_enextesymtbl[12]; __constant__ int raw_eprevesymtbl[12]; __constant__ int raw_eorgoppotbl[12]; __constant__ int raw_edestoppotbl[12]; __constant__ int raw_facepivot1[12]; __constant__ int raw_facepivot2[144]; __constant__ int raw_tsbondtbl[72]; __constant__ int raw_stbondtbl[72]; __constant__ int raw_tspivottbl[72]; __constant__ int raw_stpivottbl[72]; int host_bondtbl[144] = { 0, }; int host_fsymtbl[144] = { 0, }; int host_enexttbl[12] = { 0, }; int host_eprevtbl[12] = { 0, }; int host_enextesymtbl[12] = { 0, }; int host_eprevesymtbl[12] = { 0, }; int host_eorgoppotbl[12] = { 0, }; int host_edestoppotbl[12] = { 0, }; int host_facepivot1[12] = { 0, }; int host_facepivot2[144] = { 0, }; int host_tsbondtbl[72] = { 0, }; int host_stbondtbl[72] = { 0, }; int host_tspivottbl[72] = { 0, }; int host_stpivottbl[72] = { 0, }; // Table 'esymtbl' takes an directed edge (version) as input, returns the // inversed edge (version) of it. __constant__ int raw_esymtbl[12]; int host_esymtbl[12] = { 9, 6, 11, 4, 3, 7, 1, 5, 10, 0, 8, 2 }; // The following four tables give the 12 permutations of the set {0,1,2,3}. __constant__ int raw_orgpivot[12]; __constant__ int raw_destpivot[12]; __constant__ int raw_apexpivot[12]; __constant__ int raw_oppopivot[12]; int host_orgpivot[12] = { 3, 3, 1, 1, 2, 0, 0, 2, 1, 2, 3, 0 }; int host_destpivot[12] = { 2, 0, 0, 2, 1, 2, 3, 0, 3, 3, 1, 1 }; int host_apexpivot[12] = { 1, 2, 3, 0, 3, 3, 1, 1, 2, 0, 0, 2 }; int host_oppopivot[12] = { 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3 }; // The twelve versions correspond to six undirected edges. The following two // tables map a version to an undirected edge and vice versa. __constant__ int raw_ver2edge[12]; __constant__ int raw_edge2ver[6]; int host_ver2edge[12] = { 0, 1, 2, 3, 3, 5, 1, 5, 4, 0, 4, 2 }; int host_edge2ver[6] = { 0, 1, 2, 3, 8, 5 }; // Edge versions whose apex or opposite may be dummypoint. __constant__ int raw_epivot[12]; int host_epivot[12] = { 4, 5, 2, 11, 4, 5, 2, 11, 4, 5, 2, 11 }; // Table 'snextpivot' takes an edge version as input, returns the next edge // version in the same edge ring. __constant__ int raw_snextpivot[6]; int host_snextpivot[6] = { 2, 5, 4, 1, 0, 3 }; // The following three tables give the 6 permutations of the set {0,1,2}. // An offset 3 is added to each element for a direct access of the points // in the triangle data structure. __constant__ int raw_sorgpivot[6]; __constant__ int raw_sdestpivot[6]; __constant__ int raw_sapexpivot[6]; int host_sorgpivot[6] = { 0, 1, 1, 2, 2, 0 }; int host_sdestpivot[6] = { 1, 0, 2, 1, 0, 2 }; int host_sapexpivot[6] = { 2, 2, 0, 0, 1, 1 }; /* Initialize Geometric Predicates arrays*/ REAL host_constData[17]; int host_constOptions[2]; /////////////////////////////////////////////////////////////////////////////// // // // Geometric helpers // // // /////////////////////////////////////////////////////////////////////////////// __device__ bool cudamesh_lu_decmp(REAL lu[4][4], int n, int* ps, REAL* d, int N) { REAL scales[4]; REAL pivot, biggest, mult, tempf; int pivotindex = 0; int i, j, k; *d = 1.0; // No row interchanges yet. for (i = N; i < n + N; i++) { // For each row. // Find the largest element in each row for row equilibration biggest = 0.0; for (j = N; j < n + N; j++) if (biggest < (tempf = fabs(lu[i][j]))) biggest = tempf; if (biggest != 0.0) scales[i] = 1.0 / biggest; else { scales[i] = 0.0; return false; // Zero row: singular matrix. } ps[i] = i; // Initialize pivot sequence. } for (k = N; k < n + N - 1; k++) { // For each column. // Find the largest element in each column to pivot around. biggest = 0.0; for (i = k; i < n + N; i++) { if (biggest < (tempf = fabs(lu[ps[i]][k]) * scales[ps[i]])) { biggest = tempf; pivotindex = i; } } if (biggest == 0.0) { return false; // Zero column: singular matrix. } if (pivotindex != k) { // Update pivot sequence. j = ps[k]; ps[k] = ps[pivotindex]; ps[pivotindex] = j; *d = -(*d); // ...and change the parity of d. } // Pivot, eliminating an extra variable each time pivot = lu[ps[k]][k]; for (i = k + 1; i < n + N; i++) { lu[ps[i]][k] = mult = lu[ps[i]][k] / pivot; if (mult != 0.0) { for (j = k + 1; j < n + N; j++) lu[ps[i]][j] -= mult * lu[ps[k]][j]; } } } // (lu[ps[n + N - 1]][n + N - 1] == 0.0) ==> A is singular. return lu[ps[n + N - 1]][n + N - 1] != 0.0; } __device__ void cudamesh_lu_solve(REAL lu[4][4], int n, int* ps, REAL* b, int N) { int i, j; REAL X[4], dot; for (i = N; i < n + N; i++) X[i] = 0.0; // Vector reduction using U triangular matrix. for (i = N; i < n + N; i++) { dot = 0.0; for (j = N; j < i + N; j++) dot += lu[ps[i]][j] * X[j]; X[i] = b[ps[i]] - dot; } // Back substitution, in L triangular matrix. for (i = n + N - 1; i >= N; i--) { dot = 0.0; for (j = i + 1; j < n + N; j++) dot += lu[ps[i]][j] * X[j]; X[i] = (X[i] - dot) / lu[ps[i]][i]; } for (i = N; i < n + N; i++) b[i] = X[i]; } __device__ bool cudamesh_circumsphere(REAL* pa, REAL* pb, REAL* pc, REAL* pd, REAL* cent, REAL* radius) { REAL A[4][4], rhs[4], D; int indx[4]; // Compute the coefficient matrix A (3x3). A[0][0] = pb[0] - pa[0]; A[0][1] = pb[1] - pa[1]; A[0][2] = pb[2] - pa[2]; A[1][0] = pc[0] - pa[0]; A[1][1] = pc[1] - pa[1]; A[1][2] = pc[2] - pa[2]; if (pd != NULL) { A[2][0] = pd[0] - pa[0]; A[2][1] = pd[1] - pa[1]; A[2][2] = pd[2] - pa[2]; } else { cudamesh_cross(A[0], A[1], A[2]); } // Compute the right hand side vector b (3x1). rhs[0] = 0.5 * cudamesh_dot(A[0], A[0]); rhs[1] = 0.5 * cudamesh_dot(A[1], A[1]); if (pd != NULL) { rhs[2] = 0.5 * cudamesh_dot(A[2], A[2]); } else { rhs[2] = 0.0; } // Solve the 3 by 3 equations use LU decomposition with partial pivoting // and backward and forward substitute.. if (!cudamesh_lu_decmp(A, 3, indx, &D, 0)) { if (radius != (REAL *)NULL) *radius = 0.0; return false; } cudamesh_lu_solve(A, 3, indx, rhs, 0); if (cent != (REAL *)NULL) { cent[0] = pa[0] + rhs[0]; cent[1] = pa[1] + rhs[1]; cent[2] = pa[2] + rhs[2]; } if (radius != (REAL *)NULL) { *radius = sqrt(rhs[0] * rhs[0] + rhs[1] * rhs[1] + rhs[2] * rhs[2]); } return true; } __device__ void cudamesh_facenormal(REAL* pa, REAL* pb, REAL* pc, REAL *n, int pivot, REAL* lav) { REAL v1[3], v2[3], v3[3], *pv1, *pv2; REAL L1, L2, L3; v1[0] = pb[0] - pa[0]; // edge vector v1: a->b v1[1] = pb[1] - pa[1]; v1[2] = pb[2] - pa[2]; v2[0] = pa[0] - pc[0]; // edge vector v2: c->a v2[1] = pa[1] - pc[1]; v2[2] = pa[2] - pc[2]; // Default, normal is calculated by: v1 x (-v2) (see Fig. fnormal). if (pivot > 0) { // Choose edge vectors by Burdakov's algorithm. v3[0] = pc[0] - pb[0]; // edge vector v3: b->c v3[1] = pc[1] - pb[1]; v3[2] = pc[2] - pb[2]; L1 = cudamesh_dot(v1, v1); L2 = cudamesh_dot(v2, v2); L3 = cudamesh_dot(v3, v3); // Sort the three edge lengths. if (L1 < L2) { if (L2 < L3) { pv1 = v1; pv2 = v2; // n = v1 x (-v2). } else { pv1 = v3; pv2 = v1; // n = v3 x (-v1). } } else { if (L1 < L3) { pv1 = v1; pv2 = v2; // n = v1 x (-v2). } else { pv1 = v2; pv2 = v3; // n = v2 x (-v3). } } if (lav) { // return the average edge length. *lav = (sqrt(L1) + sqrt(L2) + sqrt(L3)) / 3.0; } } else { pv1 = v1; pv2 = v2; // n = v1 x (-v2). } // Calculate the face normal. cudamesh_cross(pv1, pv2, n); // Inverse the direction; n[0] = -n[0]; n[1] = -n[1]; n[2] = -n[2]; } __device__ void cudamesh_calculateabovepoint4(REAL* pa, REAL* pb, REAL* pc, REAL* pd, REAL* abovept) { REAL n1[3], n2[3], *norm; REAL len, len1, len2; // Select a base. cudamesh_facenormal(pa, pb, pc, n1, 1, NULL); len1 = sqrt(cudamesh_dot(n1, n1)); cudamesh_facenormal(pa, pb, pd, n2, 1, NULL); len2 = sqrt(cudamesh_dot(n2, n2)); if (len1 > len2) { norm = n1; len = len1; } else { norm = n2; len = len2; } assert(len > 0); norm[0] /= len; norm[1] /= len; norm[2] /= len; len = cudamesh_distance(pa, pb); abovept[0] = pa[0] + len * norm[0]; abovept[1] = pa[1] + len * norm[1]; abovept[2] = pa[2] + len * norm[2]; } __device__ int cudamesh_segsegadjacent( int seg1, int seg2, int* d_seg2parentidxlist, int* d_segparentendpointidxlist ) { int segidx1 = d_seg2parentidxlist[seg1]; int segidx2 = d_seg2parentidxlist[seg2]; if (segidx1 == segidx2) return 0; int pa1 = d_segparentendpointidxlist[segidx1 * 2]; int pb1 = d_segparentendpointidxlist[segidx1 * 2 + 1]; int pa2 = d_segparentendpointidxlist[segidx2 * 2]; int pb2 = d_segparentendpointidxlist[segidx2 * 2 + 1]; if ((pa1 == pa2) || (pa1 == pb2) || (pb1 == pa2) || (pb1 == pb2)) return 1; return 0; } __device__ int cudamesh_segfacetadjacent( int subseg, int subsh, int* d_seg2parentidxlist, int* d_segparentendpointidxlist, int* d_tri2parentidxlist, int* d_triid2parentoffsetlist, int* d_triparentendpointidxlist ) { int segidx = d_seg2parentidxlist[subseg]; int pa = d_segparentendpointidxlist[segidx * 2]; int pb = d_segparentendpointidxlist[segidx * 2 + 1]; int fidx = d_tri2parentidxlist[subsh]; int count = 0, i; int p; for (i = d_triid2parentoffsetlist[fidx]; i < d_triid2parentoffsetlist[fidx + 1]; i++) { p = d_triparentendpointidxlist[i]; if (p == pa || p == pb) count++; } return count == 1; } __device__ int cudamesh_facetfacetadjacent( int subsh1, int subsh2, int* d_tri2parentidxlist, int* d_triid2parentoffsetlist, int* d_triparentendpointidxlist ) { int count = 0; int fidx1 = d_tri2parentidxlist[subsh1]; int fidx2 = d_tri2parentidxlist[subsh2]; if (fidx1 == fidx2) return 0; int p1, p2; for (int i = d_triid2parentoffsetlist[fidx1]; i < d_triid2parentoffsetlist[fidx1 + 1]; i++) { p1 = d_triparentendpointidxlist[i]; for (int j = d_triid2parentoffsetlist[fidx2]; j < d_triid2parentoffsetlist[fidx2 + 1]; j++) { p2 = d_triparentendpointidxlist[j]; if (p1 == p2) { count++; break; } } } return count > 0; } __device__ REAL cudamesh_tetrahedronvolume( int tetid, REAL* d_pointlist, int* d_tetlist ) { REAL vda[3], vdb[3], vdc[3]; REAL vab[3], vbc[3], vca[3]; REAL elen[6]; int ipa, ipb, ipc, ipd; REAL *pa, *pb, *pc, *pd; int i; ipa = d_tetlist[4 * tetid + 0]; ipb = d_tetlist[4 * tetid + 1]; ipc = d_tetlist[4 * tetid + 2]; ipd = d_tetlist[4 * tetid + 3]; pa = cudamesh_id2pointlist(ipa, d_pointlist); pb = cudamesh_id2pointlist(ipb, d_pointlist); pc = cudamesh_id2pointlist(ipc, d_pointlist); pd = cudamesh_id2pointlist(ipd, d_pointlist); // Get the edge vectors vda: d->a, vdb: d->b, vdc: d->c. for (i = 0; i < 3; i++) vda[i] = pa[i] - pd[i]; for (i = 0; i < 3; i++) vdb[i] = pb[i] - pd[i]; for (i = 0; i < 3; i++) vdc[i] = pc[i] - pd[i]; // Get the other edge vectors. for (i = 0; i < 3; i++) vab[i] = pb[i] - pa[i]; for (i = 0; i < 3; i++) vbc[i] = pc[i] - pb[i]; for (i = 0; i < 3; i++) vca[i] = pa[i] - pc[i]; elen[0] = cudamesh_dot(vda, vda); elen[1] = cudamesh_dot(vdb, vdb); elen[2] = cudamesh_dot(vdc, vdc); elen[3] = cudamesh_dot(vab, vab); elen[4] = cudamesh_dot(vbc, vbc); elen[5] = cudamesh_dot(vca, vca); // Use heron-type formula to compute the volume of a tetrahedron // https://en.wikipedia.org/wiki/Heron%27s_formula REAL U, V, W, u, v, w; // first three form a triangle; u opposite to U and so on REAL X, x, Y, y, Z, z; REAL a, b, c, d; U = sqrt(elen[3]); //ab V = sqrt(elen[4]); //bc W = sqrt(elen[5]); //ca u = sqrt(elen[2]); //dc v = sqrt(elen[0]); //da w = sqrt(elen[1]); //db X = (w - U + v)*(U + v + w); x = (U - v + w)*(v - w + U); Y = (u - V + w)*(V + w + u); y = (V - w + u)*(w - u + V); Z = (v - W + u)*(W + u + v); z = (W - u + v)*(u - v + W); a = sqrt(x*Y*Z); b = sqrt(y*Z*X); c = sqrt(z*X*Y); d = sqrt(x*y*z); REAL vol = sqrt((-a + b + c + d)*(a - b + c + d)*(a + b - c + d)*(a + b + c - d)) / (192 * u*v*w); return vol; } /////////////////////////////////////////////////////////////////////////////// // // // Geometric predicates with symbolic perturbation // // // /////////////////////////////////////////////////////////////////////////////// __device__ REAL cudamesh_insphere_s(REAL* pa, REAL* pb, REAL* pc, REAL* pd, REAL* pe, int ia, int ib, int ic, int id, int ie) { REAL sign; // Using fast version means using inexact method. // This may cause robustness issues. // Need to handle later on. sign = cuda_inspherefast(pa, pb, pc, pd, pe); //if (fabs(sign) < EPSILON) // sign = cuda_insphereexact(pa, pb, pc, pd, pe); if (sign != 0.0) { return sign; } // Symbolic perturbation. REAL* pt[5], *swappt; int idx[5], swapidx; REAL oriA, oriB; int swaps, count; int n, i; pt[0] = pa; pt[1] = pb; pt[2] = pc; pt[3] = pd; pt[4] = pe; idx[0] = ia; idx[1] = ib; idx[2] = ic; idx[3] = id; idx[4] = ie; // Sort the five points such that their indices are in the increasing // order. An optimized bubble sort algorithm is used, i.e., it has // the worst case O(n^2) runtime, but it is usually much faster. swaps = 0; // Record the total number of swaps. n = 5; do { count = 0; n = n - 1; for (i = 0; i < n; i++) { if (idx[i] > idx[i + 1]) { swappt = pt[i]; pt[i] = pt[i + 1]; pt[i + 1] = swappt; swapidx = idx[i]; idx[i] = idx[i + 1]; idx[i + 1] = swapidx; count++; } } swaps += count; break; } while (count > 0); // Continue if some points are swapped. oriA = cuda_orient3d(pt[1], pt[2], pt[3], pt[4]); if (oriA != 0.0) { // Flip the sign if there are odd number of swaps. if ((swaps % 2) != 0) oriA = -oriA; return oriA; } oriB = -cuda_orient3d(pt[0], pt[2], pt[3], pt[4]); assert(oriB != 0.0); // SELF_CHECK // Flip the sign if there are odd number of swaps. if ((swaps % 2) != 0) oriB = -oriB; return oriB; } __device__ REAL cudamesh_incircle3d(REAL* pa, REAL* pb, REAL* pc, REAL* pd) { REAL area2[2], n1[3], n2[3], c[3]; REAL sign, r, d; // Calculate the areas of the two triangles [a, b, c] and [b, a, d]. cudamesh_facenormal(pa, pb, pc, n1, 1, NULL); area2[0] = cudamesh_dot(n1, n1); cudamesh_facenormal(pb, pa, pd, n2, 1, NULL); area2[1] = cudamesh_dot(n2, n2); if (area2[0] > area2[1]) { // Choose [a, b, c] as the base triangle. cudamesh_circumsphere(pa, pb, pc, NULL, c, &r); d = cudamesh_distance(c, pd); } else { // Choose [b, a, d] as the base triangle. if (area2[1] > 0) { cudamesh_circumsphere(pb, pa, pd, NULL, c, &r); d = cudamesh_distance(c, pc); } else { // The four points are collinear. This case only happens on the boundary. return 0; // Return "not inside". } } sign = d - r; if (fabs(sign) / r < EPSILON) { sign = 0; } return sign; } /////////////////////////////////////////////////////////////////////////////// // // // Mesh manipulation primitives // // // /////////////////////////////////////////////////////////////////////////////// /* Initialize tables */ void cudamesh_inittables() { // init arrays int i, j; hipMemcpyToSymbol(raw_esymtbl, host_esymtbl, 12 * sizeof(int)); hipMemcpyToSymbol(raw_orgpivot, host_orgpivot, 12 * sizeof(int)); hipMemcpyToSymbol(raw_destpivot, host_destpivot, 12 * sizeof(int)); hipMemcpyToSymbol(raw_apexpivot, host_apexpivot, 12 * sizeof(int)); hipMemcpyToSymbol(raw_oppopivot, host_oppopivot, 12 * sizeof(int)); hipMemcpyToSymbol(raw_ver2edge, host_ver2edge, 12 * sizeof(int)); hipMemcpyToSymbol(raw_edge2ver, host_edge2ver, 6 * sizeof(int)); hipMemcpyToSymbol(raw_epivot, host_epivot, 12 * sizeof(int)); hipMemcpyToSymbol(raw_snextpivot, host_snextpivot, 6 * sizeof(int)); hipMemcpyToSymbol(raw_sorgpivot, host_sorgpivot, 6 * sizeof(int)); hipMemcpyToSymbol(raw_sdestpivot, host_sdestpivot, 6 * sizeof(int)); hipMemcpyToSymbol(raw_sapexpivot, host_sapexpivot, 6 * sizeof(int)); // i = t1.ver; j = t2.ver; for (i = 0; i < 12; i++) { for (j = 0; j < 12; j++) { host_bondtbl[12* i + j] = (j & 3) + (((i & 12) + (j & 12)) % 12); } } hipMemcpyToSymbol(raw_bondtbl, host_bondtbl, 144 * sizeof(int)); // i = t1.ver; j = t2.ver for (i = 0; i < 12; i++) { for (j = 0; j < 12; j++) { host_fsymtbl[12 * i + j] = (j + 12 - (i & 12)) % 12; } } hipMemcpyToSymbol(raw_fsymtbl, host_fsymtbl, 144 * sizeof(int)); for (i = 0; i < 12; i++) { host_facepivot1[i] = (host_esymtbl[i] & 3); } hipMemcpyToSymbol(raw_facepivot1, host_facepivot1, 12 * sizeof(int)); for (i = 0; i < 12; i++) { for (j = 0; j < 12; j++) { host_facepivot2[12 * i + j] = host_fsymtbl[12 * host_esymtbl[i] + j]; } } hipMemcpyToSymbol(raw_facepivot2, host_facepivot2, 144 * sizeof(int)); for (i = 0; i < 12; i++) { host_enexttbl[i] = (i + 4) % 12; host_eprevtbl[i] = (i + 8) % 12; } hipMemcpyToSymbol(raw_enexttbl, host_enexttbl, 12 * sizeof(int)); hipMemcpyToSymbol(raw_eprevtbl, host_eprevtbl, 12 * sizeof(int)); for (i = 0; i < 12; i++) { host_enextesymtbl[i] = host_esymtbl[host_enexttbl[i]]; host_eprevesymtbl[i] = host_esymtbl[host_eprevtbl[i]]; } hipMemcpyToSymbol(raw_enextesymtbl, host_enextesymtbl, 12 * sizeof(int)); hipMemcpyToSymbol(raw_eprevesymtbl, host_eprevesymtbl, 12 * sizeof(int)); for (i = 0; i < 12; i++) { host_eorgoppotbl[i] = host_eprevtbl[host_esymtbl[host_enexttbl[i]]]; host_edestoppotbl[i] = host_enexttbl[host_esymtbl[host_eprevtbl[i]]]; } hipMemcpyToSymbol(raw_eorgoppotbl, host_eorgoppotbl, 12 * sizeof(int)); hipMemcpyToSymbol(raw_edestoppotbl, host_edestoppotbl, 12 * sizeof(int)); int soffset, toffset; // i = t.ver, j = s.shver for (i = 0; i < 12; i++) { for (j = 0; j < 6; j++) { if ((j & 1) == 0) { soffset = (6 - ((i & 12) >> 1)) % 6; toffset = (12 - ((j & 6) << 1)) % 12; } else { soffset = (i & 12) >> 1; toffset = (j & 6) << 1; } host_tsbondtbl[6 * i + j] = (j & 1) + (((j & 6) + soffset) % 6); host_stbondtbl[6 * i + j] = (i & 3) + (((i & 12) + toffset) % 12); } } hipMemcpyToSymbol(raw_tsbondtbl, host_tsbondtbl, 72 * sizeof(int)); hipMemcpyToSymbol(raw_stbondtbl, host_stbondtbl, 72 * sizeof(int)); // i = t.ver, j = s.shver for (i = 0; i < 12; i++) { for (j = 0; j < 6; j++) { if ((j & 1) == 0) { soffset = (i & 12) >> 1; toffset = (j & 6) << 1; } else { soffset = (6 - ((i & 12) >> 1)) % 6; toffset = (12 - ((j & 6) << 1)) % 12; } host_tspivottbl[6 * i + j] = (j & 1) + (((j & 6) + soffset) % 6); host_stpivottbl[6 * i + j] = (i & 3) + (((i & 12) + toffset) % 12); } } hipMemcpyToSymbol(raw_tspivottbl, host_tspivottbl, 72 * sizeof(int)); hipMemcpyToSymbol(raw_stpivottbl, host_stpivottbl, 72 * sizeof(int)); } /* Init bounding box*/ void cudamesh_initbbox( int numofpoints, double* pointlist, int& xmax, int& xmin, int& ymax, int& ymin, int& zmax, int& zmin) { int i; double x, y, z; for (i = 0; i < numofpoints; i++) { x = pointlist[3 * i]; y = pointlist[3 * i + 1]; z = pointlist[3 * i + 2]; if (i == 0) { xmin = xmax = x; ymin = ymax = y; zmin = zmax = z; } else { xmin = (x < xmin) ? x : xmin; xmax = (x > xmax) ? x : xmax; ymin = (y < ymin) ? y : ymin; ymax = (y > ymax) ? y : ymax; zmin = (z < zmin) ? z : zmin; zmax = (z > zmax) ? z : zmax; } } } /* Initialize Geometric primitives */ void cudamesh_exactinit(int verbose, int noexact, int nofilter, REAL maxx, REAL maxy, REAL maxz) { REAL half; REAL check, lastcheck; int every_other; every_other = 1; half = 0.5; host_constData[1] /*epsilon*/ = 1.0; host_constData[0] /*splitter*/ = 1.0; check = 1.0; /* Repeatedly divide `epsilon' by two until it is too small to add to */ /* one without causing roundoff. (Also check if the sum is equal to */ /* the previous sum, for machines that round up instead of using exact */ /* rounding. Not that this library will work on such machines anyway. */ do { lastcheck = check; host_constData[1] /*epsilon*/ *= half; if (every_other) { host_constData[0] /*splitter*/ *= 2.0; } every_other = !every_other; check = 1.0 + host_constData[1] /*epsilon*/; } while ((check != 1.0) && (check != lastcheck)); host_constData[0] /*splitter*/ += 1.0; /* Error bounds for orientation and incircle tests. */ host_constData[2] /*resulterrbound*/ = (3.0 + 8.0 * host_constData[1] /*epsilon*/) * host_constData[1] /*epsilon*/; host_constData[3] /*ccwerrboundA*/ = (3.0 + 16.0 * host_constData[1] /*epsilon*/) * host_constData[1] /*epsilon*/; host_constData[4] /*ccwerrboundB*/ = (2.0 + 12.0 * host_constData[1] /*epsilon*/) * host_constData[1] /*epsilon*/; host_constData[5] /*ccwerrboundC*/ = (9.0 + 64.0 * host_constData[1] /*epsilon*/) * host_constData[1] /*epsilon*/ * host_constData[1] /*epsilon*/; host_constData[6] /*o3derrboundA*/ = (7.0 + 56.0 * host_constData[1] /*epsilon*/) * host_constData[1] /*epsilon*/; host_constData[7] /*o3derrboundB*/ = (3.0 + 28.0 * host_constData[1] /*epsilon*/) * host_constData[1] /*epsilon*/; host_constData[8] /*o3derrboundC*/ = (26.0 + 288.0 * host_constData[1] /*epsilon*/) * host_constData[1] /*epsilon*/ * host_constData[1] /*epsilon*/; host_constData[9] /*iccerrboundA*/ = (10.0 + 96.0 * host_constData[1] /*epsilon*/) * host_constData[1] /*epsilon*/; host_constData[10] /*iccerrboundB*/ = (4.0 + 48.0 * host_constData[1] /*epsilon*/) * host_constData[1] /*epsilon*/; host_constData[11] /*iccerrboundC*/ = (44.0 + 576.0 * host_constData[1] /*epsilon*/) * host_constData[1] /*epsilon*/ * host_constData[1] /*epsilon*/; host_constData[12] /*isperrboundA*/ = (16.0 + 224.0 * host_constData[1] /*epsilon*/) * host_constData[1] /*epsilon*/; host_constData[13] /*isperrboundB*/ = (5.0 + 72.0 * host_constData[1] /*epsilon*/) * host_constData[1] /*epsilon*/; host_constData[14] /*isperrboundC*/ = (71.0 + 1408.0 * host_constData[1] /*epsilon*/) * host_constData[1] /*epsilon*/ * host_constData[1] /*epsilon*/; // Set TetGen options. Added by H. Si, 2012-08-23. host_constOptions[0] /*_use_inexact_arith*/ = noexact; host_constOptions[1] /*_use_static_filter*/ = !nofilter; // Calculate the two static filters for orient3d() and insphere() tests. // Added by H. Si, 2012-08-23. // Sort maxx < maxy < maxz. Re-use 'half' for swapping. assert(maxx > 0); assert(maxy > 0); assert(maxz > 0); if (maxx > maxz) { half = maxx; maxx = maxz; maxz = half; } if (maxy > maxz) { half = maxy; maxy = maxz; maxz = half; } else if (maxy < maxx) { half = maxy; maxy = maxx; maxx = half; } host_constData[15] /*o3dstaticfilter*/ = 5.1107127829973299e-15 * maxx * maxy * maxz; host_constData[16] /*ispstaticfilter*/ = 1.2466136531027298e-13 * maxx * maxy * maxz * (maxz * maxz); // Copy to const memory hipMemcpyToSymbol(raw_constData, host_constData, 17 * sizeof(REAL)); hipMemcpyToSymbol(raw_constOptions, host_constOptions, 2 * sizeof(int)); //for (int i = 0; i<17; i++) // printf("host_constData[%d] = %g\n", i, host_constData[i]); //for (int i = 0; i < 2; i++) // printf("host_constOptions[%d] = %d\n", i, host_constOptions[i]); } /* Init Kernel constants */ void cudamesh_initkernelconstants(REAL maxx, REAL maxy, REAL maxz) { REAL longest = sqrt(maxx*maxx + maxy*maxy + maxz*maxz); REAL minedgelength = longest*EPSILON; host_kernelconstants[0] = minedgelength; hipMemcpyToSymbol(raw_kernelconstants, host_kernelconstants, sizeof(REAL)); } /* Primitives for points */ // Convert point index to pointer to pointlist __device__ double* cudamesh_id2pointlist(int index, double* pointlist) { return (pointlist + 3 * index); } /* Primitives for tetrahedron */ // The following primtives get or set the origin, destination, face apex, // or face opposite of an ordered tetrahedron. __device__ int cudamesh_org(tethandle t, int* tetlist) { return tetlist[4 * t.id + raw_orgpivot[t.ver]]; } __device__ int cudamesh_dest(tethandle t, int* tetlist) { return tetlist[4 * t.id + raw_destpivot[t.ver]]; } __device__ int cudamesh_apex(tethandle t, int* tetlist) { return tetlist[4 * t.id + raw_apexpivot[t.ver]]; } __device__ int cudamesh_oppo(tethandle t, int* tetlist) { return tetlist[4 * t.id + raw_oppopivot[t.ver]]; } __device__ void cudamesh_setorg(tethandle t, int p, int* tetlist) { tetlist[4 * t.id + raw_orgpivot[t.ver]] = p; } __device__ void cudamesh_setdest(tethandle t, int p, int* tetlist) { tetlist[4 * t.id + raw_destpivot[t.ver]] = p; } __device__ void cudamesh_setapex(tethandle t, int p, int* tetlist) { tetlist[4 * t.id + raw_apexpivot[t.ver]] = p; } __device__ void cudamesh_setoppo(tethandle t, int p, int* tetlist) { tetlist[4 * t.id + raw_oppopivot[t.ver]] = p; } // bond() connects two tetrahedra together. (t1,v1) and (t2,v2) must // refer to the same face and the same edge. __device__ void cudamesh_bond(tethandle t1, tethandle t2, tethandle* neighborlist) { neighborlist[4 * t1.id + (t1.ver & 3)] = tethandle(t2.id, raw_bondtbl[12 * t1.ver + t2.ver]); neighborlist[4 * t2.id + (t2.ver & 3)] = tethandle(t1.id, raw_bondtbl[12 * t2.ver + t1.ver]); } // dissolve() a bond (from one side). __device__ void cudamesh_dissolve(tethandle t, tethandle* neighborlist) { neighborlist[4 * t.id + (t.ver & 3)] = tethandle(-1, 11); // empty handle } // esym() finds the reversed edge. It is in the other face of the // same tetrahedron. __device__ void cudamesh_esym(tethandle& t1, tethandle& t2) { (t2).id = (t1).id; (t2).ver = raw_esymtbl[(t1).ver]; } __device__ void cudamesh_esymself(tethandle& t) { (t).ver = raw_esymtbl[(t).ver]; } // enext() finds the next edge (counterclockwise) in the same face. __device__ void cudamesh_enext(tethandle& t1, tethandle& t2) { t2.id = t1.id; t2.ver = raw_enexttbl[t1.ver]; } __device__ void cudamesh_enextself(tethandle& t) { t.ver = raw_enexttbl[t.ver]; } // eprev() finds the next edge (clockwise) in the same face. __device__ void cudamesh_eprev(tethandle& t1, tethandle& t2) { t2.id = t1.id; t2.ver = raw_eprevtbl[t1.ver]; } __device__ void cudamesh_eprevself(tethandle& t) { t.ver = raw_eprevtbl[t.ver]; } // enextesym() finds the reversed edge of the next edge. It is in the other // face of the same tetrahedron. It is the combination esym() * enext(). __device__ void cudamesh_enextesym(tethandle& t1, tethandle& t2) { t2.id = t1.id; t2.ver = raw_enextesymtbl[t1.ver]; } __device__ void cudamesh_enextesymself(tethandle& t) { t.ver = raw_enextesymtbl[t.ver]; } // eprevesym() finds the reversed edge of the previous edge. __device__ void cudamesh_eprevesym(tethandle& t1, tethandle& t2) { t2.id = t1.id; t2.ver = raw_eprevesymtbl[t1.ver]; } __device__ void cudamesh_eprevesymself(tethandle& t) { t.ver = raw_eprevesymtbl[t.ver]; } // eorgoppo() Finds the opposite face of the origin of the current edge. // Return the opposite edge of the current edge. __device__ void cudamesh_eorgoppo(tethandle& t1, tethandle& t2) { t2.id = t1.id; t2.ver = raw_eorgoppotbl[t1.ver]; } __device__ void cudamesh_eorgoppoself(tethandle& t) { t.ver = raw_eorgoppotbl[t.ver]; } // edestoppo() Finds the opposite face of the destination of the current // edge. Return the opposite edge of the current edge. __device__ void cudamesh_edestoppo(tethandle& t1, tethandle& t2) { t2.id = t1.id; t2.ver = raw_edestoppotbl[t1.ver]; } __device__ void cudamesh_edestoppoself(tethandle& t) { t.ver = raw_edestoppotbl[t.ver]; } // fsym() finds the adjacent tetrahedron at the same face and the same edge. __device__ void cudamesh_fsym(tethandle& t1, tethandle& t2, tethandle* neighborlist) { t2 = neighborlist[4 * t1.id + (t1.ver & 3)]; t2.ver = raw_fsymtbl[12 * t1.ver + t2.ver]; } __device__ void cudamesh_fsymself(tethandle& t, tethandle* neighborlist) { char t1ver = t.ver; t = neighborlist[4 * t.id + (t.ver & 3)]; t.ver = raw_fsymtbl[12 * t1ver + t.ver]; } // fnext() finds the next face while rotating about an edge according to // a right-hand rule. The face is in the adjacent tetrahedron. It is // the combination: fsym() * esym(). __device__ void cudamesh_fnext(tethandle& t1, tethandle& t2, tethandle* neighborlist) { t2 = neighborlist[4 * t1.id + raw_facepivot1[t1.ver]]; t2.ver = raw_facepivot2[12 * t1.ver + t2.ver]; } __device__ void cudamesh_fnextself(tethandle& t, tethandle* neighborlist) { char t1ver = t.ver; t = neighborlist[4 * t.id + raw_facepivot1[t.ver]]; t.ver = raw_facepivot2[12 * t1ver + t.ver]; } // ishulltet() tests if t is a hull tetrahedron. __device__ bool cudamesh_ishulltet(tethandle t, int* tetlist) { return tetlist[4 * t.id + 3] == -1; } // isdeadtet() tests if t is a tetrahedron is dead. __device__ bool cudamesh_isdeadtet(tethandle t) { return (t.id == -1); } /* Primitives for subfaces and subsegments. */ // spivot() finds the adjacent subface (s2) for a given subface (s1). // s1 and s2 share at the same edge. __device__ void cudamesh_spivot(trihandle& s1, trihandle& s2, trihandle* tri2trilist) { s2 = tri2trilist[3 * s1.id + (s1.shver >> 1)]; } __device__ void cudamesh_spivotself(trihandle& s, trihandle* tri2trilist) { s = tri2trilist[3 * s.id + (s.shver >> 1)]; } // sbond() bonds two subfaces (s1) and (s2) together. s1 and s2 must refer // to the same edge. No requirement is needed on their orientations. __device__ void cudamesh_sbond(trihandle& s1, trihandle& s2, trihandle* tri2trilist) { tri2trilist[3 * s1.id + (s1.shver >> 1)] = s2; tri2trilist[3 * s2.id + (s2.shver >> 1)] = s1; } // sbond1() bonds s1 <== s2, i.e., after bonding, s1 is pointing to s2, // but s2 is not pointing to s1. s1 and s2 must refer to the same edge. // No requirement is needed on their orientations. __device__ void cudamesh_sbond1(trihandle& s1, trihandle& s2, trihandle* tri2trilist) { tri2trilist[3 * s1.id + (s1.shver >> 1)] = s2; } // Dissolve a subface bond (from one side). Note that the other subface // will still think it's connected to this subface. __device__ void cudamesh_sdissolve(trihandle& s, trihandle* tri2trilist) { tri2trilist[3 * s.id + (s.shver >> 1)] = trihandle(-1, 0); } // These primitives determine or set the origin, destination, or apex // of a subface with respect to the edge version. __device__ int cudamesh_sorg(trihandle& s, int* trilist) { return trilist[3 * s.id + raw_sorgpivot[s.shver]]; } __device__ int cudamesh_sdest(trihandle& s, int* trilist) { return trilist[3 * s.id + raw_sdestpivot[s.shver]]; } __device__ int cudamesh_sapex(trihandle& s, int* trilist) { return trilist[3 * s.id + raw_sapexpivot[s.shver]]; } __device__ void cudamesh_setsorg(trihandle& s, int p, int* trilist) { trilist[3 * s.id + raw_sorgpivot[s.shver]] = p; } __device__ void cudamesh_setsdest(trihandle& s, int p, int* trilist) { trilist[3 * s.id + raw_sdestpivot[s.shver]] = p; } __device__ void cudamesh_setsapex(trihandle& s, int p, int* trilist) { trilist[3 * s.id + raw_sapexpivot[s.shver]] = p; } // sesym() reserves the direction of the lead edge. __device__ void cudamesh_sesym(trihandle& s1, trihandle& s2) { s2.id = s1.id; s2.shver = (s1.shver ^ 1); // Inverse the last bit. } __device__ void cudamesh_sesymself(trihandle& s) { s.shver ^= 1; } // senext() finds the next edge (counterclockwise) in the same orientation // of this face. __device__ void cudamesh_senext(trihandle& s1, trihandle& s2) { s2.id = s1.id; s2.shver = raw_snextpivot[s1.shver]; } __device__ void cudamesh_senextself(trihandle& s) { s.shver = raw_snextpivot[s.shver]; } __device__ void cudamesh_senext2(trihandle& s1, trihandle& s2) { s2.id = s1.id; s2.shver = raw_snextpivot[raw_snextpivot[s1.shver]]; } __device__ void cudamesh_senext2self(trihandle& s) { s.shver = raw_snextpivot[raw_snextpivot[s.shver]]; } /* Primitives for interacting tetrahedra and subfaces. */ // tsbond() bond a tetrahedron (t) and a subface (s) together. // Note that t and s must be the same face and the same edge. Moreover, // t and s have the same orientation. // Since the edge number in t and in s can be any number in {0,1,2}. We bond // the edge in s which corresponds to t's 0th edge, and vice versa. __device__ void cudamesh_tsbond(tethandle& t, trihandle& s, trihandle* tet2trilist, tethandle* tri2tetlist) { // Bond t <== s. tet2trilist[4 * t.id + (t.ver & 3)] = trihandle(s.id, raw_tsbondtbl[6 * t.ver + s.shver]); // Bond s <== t. tri2tetlist[2 * s.id + (s.shver & 1)] = tethandle(t.id, raw_stbondtbl[6 * t.ver + s.shver]); } // tspivot() finds a subface (s) abutting on the given tetrahdera (t). // Return s.id = -1 if there is no subface at t. Otherwise, return // the subface s, and s and t must be at the same edge wth the same // orientation. __device__ void cudamesh_tspivot(tethandle& t, trihandle& s, trihandle* tet2trilist) { // Get the attached subface s. s = tet2trilist[4 * t.id + (t.ver & 3)]; if (s.id == -1) return; (s).shver = raw_tspivottbl[6 * t.ver + s.shver]; } // stpivot() finds a tetrahedron (t) abutting a given subface (s). // Return the t (if it exists) with the same edge and the same // orientation of s. __device__ void cudamesh_stpivot(trihandle& s, tethandle& t, tethandle* tri2tetlist) { t = tri2tetlist[2 * s.id + (s.shver & 1)]; if (t.id == -1) { return; } (t).ver = raw_stpivottbl[6 * t.ver + s.shver]; } /* Primitives for interacting between tetrahedra and segments */ __device__ void cudamesh_tsspivot1(tethandle& t, trihandle& seg, trihandle* tet2seglist) { seg = tet2seglist[6 * t.id + raw_ver2edge[t.ver]]; } __device__ void cudamesh_tssbond1(tethandle& t, trihandle& seg, trihandle* tet2seglist) { tet2seglist[6 * t.id + raw_ver2edge[t.ver]] = seg; } __device__ void cudamesh_sstbond1(trihandle& s, tethandle& t, tethandle* seg2tetlist) { seg2tetlist[s.id + 0] = t; } __device__ void cudamesh_sstpivot1(trihandle& s, tethandle& t, tethandle* seg2tetlist) { t = seg2tetlist[s.id]; } /* Primitives for interacting between subfaces and segments */ __device__ void cudamesh_ssbond(trihandle& s, trihandle& edge, trihandle* tri2seglist, trihandle* seg2trilist) { tri2seglist[3 * s.id + (s.shver >> 1)] = edge; seg2trilist[3 * edge.id + 0] = s; } __device__ void cudamesh_ssbond1(trihandle& s, trihandle& edge, trihandle* tri2seglist) { tri2seglist[3 * s.id + (s.shver >> 1)] = edge; } __device__ void cudamesh_sspivot(trihandle& s, trihandle& edge, trihandle* tri2seglist) { edge = tri2seglist[3 * s.id + (s.shver >> 1)]; } __device__ bool cudamesh_isshsubseg(trihandle&s, trihandle* tri2seglist) { return (tri2seglist[3 * s.id + (s.shver >> 1)].id != -1); } /* Advanced primitives. */ __device__ void cudamesh_point2tetorg(int pa, tethandle& searchtet, tethandle* point2tetlist, int* tetlist) { searchtet = point2tetlist[pa]; if (tetlist[4 * searchtet.id + 0] == pa) { searchtet.ver = 11; } else if (tetlist[4 * searchtet.id + 1] == pa) { searchtet.ver = 3; } else if (tetlist[4 * searchtet.id + 2] == pa) { searchtet.ver = 7; } else { assert(tetlist[4 * searchtet.id + 3] == pa); // SELF_CHECK searchtet.ver = 0; } } /* Geometric calculations (non-robust) */ // dot() returns the dot product: v1 dot v2. __device__ REAL cudamesh_dot(REAL* v1, REAL* v2) { return v1[0] * v2[0] + v1[1] * v2[1] + v1[2] * v2[2]; } // distance() computes the Euclidean distance between two points. __device__ REAL cudamesh_distance(REAL* p1, REAL* p2) { //printf("%lf %lf %lf - %lf %lf %lf\n", // p1[0], p1[1], p1[2], p2[0], p2[1], p2[2]); return sqrt((p2[0] - p1[0]) * (p2[0] - p1[0]) + (p2[1] - p1[1]) * (p2[1] - p1[1]) + (p2[2] - p1[2]) * (p2[2] - p1[2])); } // cross() computes the cross product: n = v1 cross v2. __device__ void cudamesh_cross(REAL* v1, REAL* v2, REAL* n) { n[0] = v1[1] * v2[2] - v2[1] * v1[2]; n[1] = -(v1[0] * v2[2] - v2[0] * v1[2]); n[2] = v1[0] * v2[1] - v2[0] * v1[1]; } /* Helpers */ __device__ unsigned long cudamesh_randomnation(unsigned long * randomseed, unsigned int choices) { unsigned long newrandom; if (choices >= 714025l) { newrandom = (*randomseed * 1366l + 150889l) % 714025l; *randomseed = (newrandom * 1366l + 150889l) % 714025l; newrandom = newrandom * (choices / 714025l) + *randomseed; if (newrandom >= choices) { return newrandom - choices; } else { return newrandom; } } else { *randomseed = (*randomseed * 1366l + 150889l) % 714025l; return *randomseed % choices; } } /////////////////////////////////////////////////////////////////////////////// // // // finddirection() Find the tet on the path from one point to another. // // // // The path starts from 'searchtet''s origin and ends at 'endpt'. On finish, // // 'searchtet' contains a tet on the path, its origin does not change. // // // // The return value indicates one of the following cases (let 'searchtet' be // // abcd, a is the origin of the path): // // - ACROSSVERT, edge ab is collinear with the path; // // - ACROSSEDGE, edge bc intersects with the path; // // - ACROSSFACE, face bcd intersects with the path. // // // // WARNING: This routine is designed for convex triangulations, and will not // // generally work after the holes and concavities have been carved. // // // /////////////////////////////////////////////////////////////////////////////// __device__ enum interresult cudamesh_finddirection(tethandle* searchtet, int endpt, double* pointlist, int* tetlist, tethandle* neighborlist, unsigned long* randomseed) { tethandle neightet; int pa, pb, pc, pd; enum { HMOVE, RMOVE, LMOVE } nextmove; REAL hori, rori, lori; int t1ver; int s; // The origin is fixed. pa = cudamesh_org(*searchtet, tetlist); if (tetlist[4 * searchtet->id + 3] == -1) { // A hull tet. Choose the neighbor of its base face. *searchtet = neighborlist[4 * searchtet->id + 3]; // Reset the origin to be pa. if (tetlist[4 * searchtet->id + 0] == pa) { searchtet->ver = 11; } else if (tetlist[4 * searchtet->id + 1] == pa) { searchtet->ver = 3; } else if (tetlist[4 * searchtet->id + 2] == pa) { searchtet->ver = 7; } else { assert(tetlist[4 * searchtet->id + 3] == pa); searchtet->ver = 0; } } pb = cudamesh_dest(*searchtet, tetlist); // Check whether the destination or apex is 'endpt'. if (pb == endpt) { // pa->pb is the search edge. return ACROSSVERT; } pc = cudamesh_apex(*searchtet, tetlist); if (pc == endpt) { // pa->pc is the search edge. cudamesh_eprevesymself(*searchtet); return ACROSSVERT; } double *p[5]; // Walk through tets around pa until the right one is found. while (1) { pd = cudamesh_oppo(*searchtet, tetlist); // Check whether the opposite vertex is 'endpt'. if (pd == endpt) { // pa->pd is the search edge. cudamesh_esymself(*searchtet); cudamesh_enextself(*searchtet); return ACROSSVERT; } // Check if we have entered outside of the domain. if (pd == -1) { // This is possible when the mesh is non-convex. return ACROSSSUB; // Hit a boundary. } // Now assume that the base face abc coincides with the horizon plane, // and d lies above the horizon. The search point 'endpt' may lie // above or below the horizon. We test the orientations of 'endpt' // with respect to three planes: abc (horizon), bad (right plane), // and acd (left plane). p[0] = cudamesh_id2pointlist(pa, pointlist); p[1] = cudamesh_id2pointlist(pb, pointlist); p[2] = cudamesh_id2pointlist(pc, pointlist); p[3] = cudamesh_id2pointlist(pd, pointlist); p[4] = cudamesh_id2pointlist(endpt, pointlist); hori = cuda_orient3d(p[0], p[1], p[2], p[4]); rori = cuda_orient3d(p[1], p[0], p[3], p[4]); lori = cuda_orient3d(p[0], p[2], p[3], p[4]); // Now decide the tet to move. It is possible there are more than one // tets are viable moves. Is so, randomly choose one. if (hori > 0) { if (rori > 0) { if (lori > 0) { // Any of the three neighbors is a viable move. s = cudamesh_randomnation(randomseed, 3); if (s == 0) { nextmove = HMOVE; } else if (s == 1) { nextmove = RMOVE; } else { nextmove = LMOVE; } } else { // Two tets, below horizon and below right, are viable. //s = randomnation(2); if (cudamesh_randomnation(randomseed, 2)) { nextmove = HMOVE; } else { nextmove = RMOVE; } } } else { if (lori > 0) { // Two tets, below horizon and below left, are viable. //s = randomnation(2); if (cudamesh_randomnation(randomseed, 2)) { nextmove = HMOVE; } else { nextmove = LMOVE; } } else { // The tet below horizon is chosen. nextmove = HMOVE; } } } else { if (rori > 0) { if (lori > 0) { // Two tets, below right and below left, are viable. //s = randomnation(2); if (cudamesh_randomnation(randomseed, 2)) { nextmove = RMOVE; } else { nextmove = LMOVE; } } else { // The tet below right is chosen. nextmove = RMOVE; } } else { if (lori > 0) { // The tet below left is chosen. nextmove = LMOVE; } else { // 'endpt' lies either on the plane(s) or across face bcd. if (hori == 0) { if (rori == 0) { // pa->'endpt' is COLLINEAR with pa->pb. return ACROSSVERT; } if (lori == 0) { // pa->'endpt' is COLLINEAR with pa->pc. cudamesh_eprevesymself(*searchtet); // // [a,c,d] return ACROSSVERT; } // pa->'endpt' crosses the edge pb->pc. return ACROSSEDGE; } if (rori == 0) { if (lori == 0) { // pa->'endpt' is COLLINEAR with pa->pd. cudamesh_esymself(*searchtet); // face bad. cudamesh_enextself(*searchtet); // face [a,d,b] return ACROSSVERT; } // pa->'endpt' crosses the edge pb->pd. cudamesh_esymself(*searchtet); // face bad. cudamesh_enextself(*searchtet); // face adb return ACROSSEDGE; } if (lori == 0) { // pa->'endpt' crosses the edge pc->pd. cudamesh_eprevesymself(*searchtet); // [a,c,d] return ACROSSEDGE; } // pa->'endpt' crosses the face bcd. return ACROSSFACE; } } } // Move to the next tet, fix pa as its origin. if (nextmove == RMOVE) { cudamesh_fnextself(*searchtet, neighborlist); } else if (nextmove == LMOVE) { cudamesh_eprevself(*searchtet); cudamesh_fnextself(*searchtet, neighborlist); cudamesh_enextself(*searchtet); } else { // HMOVE cudamesh_fsymself(*searchtet, neighborlist); cudamesh_enextself(*searchtet); } assert(cudamesh_org(*searchtet, tetlist) == pa); pb = cudamesh_dest(*searchtet, tetlist); pc = cudamesh_apex(*searchtet, tetlist); } // while (1) } ///////////////////////////////////////////////////////////////////////////////// //// // //// getedge() Get a tetrahedron having the two endpoints. // //// // //// The method here is to search the second vertex in the link faces of the // //// first vertex. The global array 'cavetetlist' is re-used for searching. // //// // //// This function is used for the case when the mesh is non-convex. Otherwise,// //// the function finddirection() should be faster than this. // //// // ///////////////////////////////////////////////////////////////////////////////// // ////int getedge(int e1, int e2, tethandle *tedge, tethandle* point2tet, double* pointlist, int* tetlist, tethandle* neighborlist, int* markerlist) ////{ //// tethandle searchtet, neightet, parytet; //// int pt; //// int done; //// int i, j; //// //// // Quickly check if 'tedge' is just this edge. //// if (!isdeadtet(*tedge)) { //// if (org(*tedge, tetlist) == e1) { //// if (dest(*tedge, tetlist) == e2) { //// return 1; //// } //// } //// else if (org(*tedge, tetlist) == e2) { //// if (dest(*tedge, tetlist) == e1) { //// esymself(*tedge); //// return 1; //// } //// } //// } //// //// // Search for the edge [e1, e2]. //// point2tetorg(e1, *tedge, point2tet, tetlist); //// finddirection(tedge, e2, pointlist, tetlist, neighborlist); //// if (dest(*tedge, tetlist) == e2) //// { //// return 1; //// } //// else //// { //// // Search for the edge [e2, e1]. //// point2tetorg(e2, *tedge, point2tet, tetlist); //// finddirection(tedge, e1, pointlist, tetlist, neighborlist); //// if (dest(*tedge, tetlist) == e1) { //// esymself(*tedge); //// return 1; //// } //// } //// //// // Go to the link face of e1. //// point2tetorg(e1, searchtet, point2tet, tetlist); //// enextesymself(searchtet); //// //// std::vector<tethandle> recordtetlist; // recorded tet list //// //// // Search e2. //// for (i = 0; i < 3; i++) { //// pt = apex(searchtet, tetlist); //// if (pt == e2) { //// // Found. 'searchtet' is [#,#,e2,e1]. //// eorgoppo(searchtet, *tedge); // [e1,e2,#,#]. //// return 1; //// } //// enextself(searchtet); //// } //// //// // Get the adjacent link face at 'searchtet'. //// fnext(searchtet, neightet, neighborlist); //// esymself(neightet); //// // assert(oppo(neightet) == e1); //// pt = apex(neightet, tetlist); //// if (pt == e2) { //// // Found. 'neightet' is [#,#,e2,e1]. //// eorgoppo(neightet, *tedge); // [e1,e2,#,#]. //// return 1; //// } //// //// // Continue searching in the link face of e1. //// markerlist[searchtet.id] = 1; // initial value of markerlist must be 0 //// recordtetlist.push_back(searchtet); //// markerlist[neightet.id] = 1; //// recordtetlist.push_back(neightet); //// //// done = 0; //// //// for (i = 0; (i < recordtetlist.size()) && !done; i++) { //// parytet = recordtetlist[i]; //// searchtet = parytet; //// for (j = 0; (j < 2) && !done; j++) { //// enextself(searchtet); //// fnext(searchtet, neightet, neighborlist); //// if (!markerlist[neightet.id]) { //// esymself(neightet); //// pt = apex(neightet, tetlist); //// if (pt == e2) { //// // Found. 'neightet' is [#,#,e2,e1]. //// eorgoppo(neightet, *tedge); //// done = 1; //// } //// else { //// markerlist[neightet.id] = 1; //// recordtetlist.push_back(neightet); //// } //// } //// } // j //// } // i //// //// // Uninfect the list of visited tets. //// for (i = 0; i < recordtetlist.size(); i++) { //// parytet = recordtetlist[i]; //// markerlist[parytet.id] = 0; //// } //// //// return done; ////} /* Refinement */ // Insert point __global__ void kernelCheckAbortiveElements( int* d_insertidxlist, tristatus* d_segstatus, tristatus* d_tristatus, tetstatus* d_tetstatus, int* d_threadmarker, int insertiontype, int numofinsertpt ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofinsertpt) return; int insertid = d_insertidxlist[pos]; bool flag; if (insertiontype == 0) flag = d_segstatus[insertid].isAbortive(); else if (insertiontype == 1) flag = d_tristatus[insertid].isAbortive(); else if (insertiontype == 2) flag = d_tetstatus[insertid].isAbortive(); if (flag) d_threadmarker[pos] = -1; } __global__ void kernelCheckInsertRadius_Seg( int* d_segidlist, REAL* d_pointlist, REAL* d_pointradius, int* d_seglist, tristatus* d_segstatus, int* d_segencmarker, int* d_threadmarker, int numofseg ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofseg) return; int segId = d_segidlist[pos]; if (d_segstatus[segId].isAbortive()) { d_threadmarker[pos] = -1; return; } int encptidx = d_segencmarker[pos]; if (encptidx != MAXINT) // not encroached by splitting segment and subface routines return; trihandle splitseg(segId, 0); int ipa, ipb; ipa = cudamesh_sorg(splitseg, d_seglist); ipb = cudamesh_sdest(splitseg, d_seglist); REAL *pa, *pb; pa = cudamesh_id2pointlist(ipa, d_pointlist); pb = cudamesh_id2pointlist(ipb, d_pointlist); REAL len = cudamesh_distance(pa, pb); REAL smrrv = d_pointradius[ipa]; REAL rrv = d_pointradius[ipb]; if (rrv > 0) { if (smrrv > 0) { if (rrv < smrrv) { smrrv = rrv; } } else { smrrv = rrv; } } if (smrrv > 0) { if ((fabs(smrrv - len) / len) < EPSILON) smrrv = len; if (len < smrrv) { d_segstatus[segId].setAbortive(true); d_threadmarker[pos] = -1; return; } } } __global__ void kernelComputePriority_Seg( int* d_segidlist, int* d_threadlist, int* d_seglist, REAL* d_pointlist, int* d_priority, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int segId = d_segidlist[threadId]; trihandle splitseg(segId, 0); int ipa, ipb; ipa = cudamesh_sorg(splitseg, d_seglist); ipb = cudamesh_sdest(splitseg, d_seglist); REAL *pa, *pb; pa = cudamesh_id2pointlist(ipa, d_pointlist); pb = cudamesh_id2pointlist(ipb, d_pointlist); REAL len = cudamesh_distance(pa, pb); d_priority[threadId] = __float_as_int((float)(1/len)); } __global__ void kernelInitSearchTet_Seg( int* d_segidlist, int* d_threadlist, tethandle* d_seg2tetlist, tethandle* d_searchtetlist, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int segId = d_segidlist[threadId]; trihandle splitseg(segId, 0); tethandle searchtet; cudamesh_sstpivot1(splitseg, searchtet, d_seg2tetlist); d_searchtetlist[threadId] = searchtet; } __global__ void kernelCheckInsertRadius_Subface( int* d_subfaceidlist, REAL* d_insertptlist, REAL* d_pointlist, trihandle* d_point2trilist, verttype* d_pointtypelist, REAL* d_pointradius, int* d_threadmarker, int* d_seg2parentidxlist, int* d_segparentendpointidxlist, int* d_tri2parentidxlist, int* d_triid2parentoffsetlist, int* d_triparentendpointidxlist, tristatus* d_tristatus, int* d_subfaceencmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int subfaceid = d_subfaceidlist[pos]; if (d_tristatus[subfaceid].isAbortive()) { d_threadmarker[pos] = -1; return; } int encptidx = d_subfaceencmarker[subfaceid]; if (encptidx == MAXINT) // Mark as encroached when trying to split a tet return; trihandle parentseg, parentsh; trihandle splitfac(subfaceid, 0); REAL rv, rp; REAL* newpt = d_insertptlist + 3 * pos; REAL* encpt = cudamesh_id2pointlist(encptidx, d_pointlist); rv = cudamesh_distance(newpt, encpt); if (d_pointtypelist[encptidx] == FREESEGVERTEX) { parentseg = d_point2trilist[encptidx]; if (cudamesh_segfacetadjacent(parentseg.id, splitfac.id, d_seg2parentidxlist, d_segparentendpointidxlist, d_tri2parentidxlist, d_triid2parentoffsetlist, d_triparentendpointidxlist)) { //printf("Adjacent: Seg #%d, Subface #%d\n", // d_seg2parentidxlist[parentseg.id], d_tri2parentidxlist[splitfac.id]); rp = d_pointradius[encptidx]; if (rv < (sqrt(2.0) * rp)) { // This insertion may cause no termination. d_threadmarker[pos] = -1; // Reject the insertion of newpt. d_tristatus[subfaceid].setAbortive(true); } } } else if (d_pointtypelist[encptidx] == FREEFACETVERTEX) { parentsh = d_point2trilist[encptidx]; if (cudamesh_facetfacetadjacent(parentsh.id, splitfac.id, d_tri2parentidxlist, d_triid2parentoffsetlist, d_triparentendpointidxlist)) { //printf("Adjacent: Subface #%d, Subface #%d\n", // d_tri2parentidxlist[parentsh.id], d_tri2parentidxlist[splitfac.id]); rp = d_pointradius[encptidx]; if (rv < rp) { d_threadmarker[pos] = -1; // Reject the insertion of newpt. d_tristatus[subfaceid].setAbortive(true); } } } } __global__ void kernelInitSearchshList( int* d_subfaceidlist, int* d_threadlist, trihandle* d_searchsh, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int subfaceid = d_subfaceidlist[threadId]; d_searchsh[threadId] = trihandle(subfaceid, 0); } __global__ void kernelSurfacePointLocation( int* d_subfaceidlist, trihandle* d_searchsh, tethandle* d_searchtetlist, locateresult* d_pointlocation, REAL* d_insertptlist, REAL* d_pointlist, int* d_threadlist, int* d_trifacelist, tethandle* d_tri2tetlist, trihandle* d_tri2trilist, trihandle* d_tri2seglist, tristatus* d_tristatus, unsigned long* d_randomseed, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; trihandle neighsh; trihandle *searchsh = d_searchsh + threadId; REAL *searchpt = d_insertptlist + 3 * threadId; REAL *pa, *pb, *pc; unsigned long *randomseed = d_randomseed + pos; REAL abvpt[3]; enum locateresult loc; enum { MOVE_BC, MOVE_CA } nextmove; REAL ori, ori_bc, ori_ca; int i; pa = cudamesh_id2pointlist(cudamesh_sorg(*searchsh, d_trifacelist), d_pointlist); pb = cudamesh_id2pointlist(cudamesh_sdest(*searchsh, d_trifacelist), d_pointlist); pc = cudamesh_id2pointlist(cudamesh_sapex(*searchsh, d_trifacelist), d_pointlist); // Calculate an above point for this facet. cudamesh_calculateabovepoint4(searchpt, pa, pb, pc, abvpt); // 'abvpt' is given. Make sure it is above [a,b,c] ori = cuda_orient3d(pa, pb, pc, abvpt); assert(ori != 0); // SELF_CHECK if (ori > 0) { cudamesh_sesymself(*searchsh); // Reverse the face orientation. } // Find an edge of the face s.t. p lies on its right-hand side (CCW). for (i = 0; i < 3; i++) { pa = cudamesh_id2pointlist(cudamesh_sorg(*searchsh, d_trifacelist), d_pointlist); pb = cudamesh_id2pointlist(cudamesh_sdest(*searchsh, d_trifacelist), d_pointlist); ori = cuda_orient3d(pa, pb, abvpt, searchpt); if (ori > 0) break; cudamesh_senextself(*searchsh); } assert(i < 3); // SELF_CHECK pc = cudamesh_id2pointlist(cudamesh_sapex(*searchsh, d_trifacelist), d_pointlist); if (pc[0] == searchpt[0] && pc[1] == searchpt[1] && pc[2] == searchpt[2]) { cudamesh_senext2self(*searchsh); loc = ONVERTEX; } else { while (1) { ori_bc = cuda_orient3d(pb, pc, abvpt, searchpt); ori_ca = cuda_orient3d(pc, pa, abvpt, searchpt); if (ori_bc < 0) { if (ori_ca < 0) { // (--) // Any of the edges is a viable move. if (cudamesh_randomnation(randomseed,2)) { nextmove = MOVE_CA; } else { nextmove = MOVE_BC; } } else { // (-#) // Edge [b, c] is viable. nextmove = MOVE_BC; } } else { if (ori_ca < 0) { // (#-) // Edge [c, a] is viable. nextmove = MOVE_CA; } else { if (ori_bc > 0) { if (ori_ca > 0) { // (++) loc = ONFACE; // Inside [a, b, c]. break; } else { // (+0) cudamesh_senext2self(*searchsh); // On edge [c, a]. loc = ONEDGE; break; } } else { // ori_bc == 0 if (ori_ca > 0) { // (0+) cudamesh_senextself(*searchsh); // On edge [b, c]. loc = ONEDGE; break; } else { // (00) // p is coincident with vertex c. cudamesh_senext2self(*searchsh); loc = ONVERTEX; break; } } } } // Move to the next face. if (nextmove == MOVE_BC) { cudamesh_senextself(*searchsh); } else { cudamesh_senext2self(*searchsh); } // NON-convex case. Check if we will cross a boundary. if (cudamesh_isshsubseg(*searchsh, d_tri2seglist)) { loc = ENCSEGMENT; break; } cudamesh_spivot(*searchsh, neighsh, d_tri2trilist); if (neighsh.id == -1) { loc = OUTSIDE; // A hull edge. break; } // Adjust the edge orientation. if (cudamesh_sorg(neighsh, d_trifacelist) != cudamesh_sdest(*searchsh, d_trifacelist)) { cudamesh_sesymself(neighsh); } assert(cudamesh_sorg(neighsh, d_trifacelist) == cudamesh_sdest(*searchsh, d_trifacelist)); // SELF_CHECK // Update the newly discovered face and its endpoints. *searchsh = neighsh; pa = cudamesh_id2pointlist(cudamesh_sorg(*searchsh, d_trifacelist), d_pointlist); pb = cudamesh_id2pointlist(cudamesh_sdest(*searchsh, d_trifacelist), d_pointlist); pc = cudamesh_id2pointlist(cudamesh_sapex(*searchsh, d_trifacelist), d_pointlist); if (pc == searchpt) { cudamesh_senext2self(*searchsh); loc = ONVERTEX; break; } } // while (1) } d_pointlocation[threadId] = loc; if (!(loc == ONFACE || loc == ONEDGE)) { int subfaceid = d_subfaceidlist[threadId]; d_tristatus[subfaceid].setAbortive(true); // mark the encroached subface rather than the located one d_threadmarker[threadId] = -1; return; } tethandle searchtet; cudamesh_stpivot(*searchsh, searchtet, d_tri2tetlist); d_searchtetlist[threadId] = searchtet; } __global__ void kernelComputePriority_Subface( int* d_insertidxlist, int* d_threadlist, int* d_trifacelist, int* d_tri2parentidxlist, int* d_triid2parentoffsetlist, int* d_triparentendpointidxlist, REAL* d_pointlist, int* d_priority, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int subfaceid = d_insertidxlist[threadId]; trihandle splitsh = trihandle(subfaceid, 0); int ipa, ipb, ipc; ipa = cudamesh_sorg(splitsh, d_trifacelist); ipb = cudamesh_sdest(splitsh, d_trifacelist); ipc = cudamesh_sapex(splitsh, d_trifacelist); REAL *pa, *pb, *pc; pa = cudamesh_id2pointlist(ipa, d_pointlist); pb = cudamesh_id2pointlist(ipb, d_pointlist); pc = cudamesh_id2pointlist(ipc, d_pointlist); // Compute the area of this 3D triangle REAL AB[3], AC[3]; int i; for (i = 0; i < 3; i++) { AB[i] = pb[i] - pa[i]; AC[i] = pc[i] - pa[i]; } REAL area = sqrt((AB[1] * AC[2] - AB[2] * AC[1])*(AB[1] * AC[2] - AB[2] * AC[1]) + (AB[2] * AC[0] - AB[0] * AC[2])*(AB[2] * AC[0] - AB[0] * AC[2]) + (AB[0] * AC[1] - AB[1] * AC[0])*(AB[0] * AC[1] - AB[1] * AC[0])) / 2; d_priority[threadId] = __float_as_int((float)(1/ area)); //int offsetid = d_tri2parentidxlist[subfaceid]; //REAL* pt[4]; //for (int i = d_triid2parentoffsetlist[offsetid]; i < d_triid2parentoffsetlist[offsetid]; i++) //{ // //} } __global__ void kernelCheckInsertRadius_Tet( int* d_tetidlist, REAL* d_pointlist, REAL* d_pointradius, int* d_tetlist, tetstatus* d_tetstatus, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int tetid = d_tetidlist[pos]; if (d_tetstatus[tetid].isAbortive()) { d_threadmarker[pos] = -1; return; } tethandle chktet(tetid, 11), checkedge; int ie1, ie2; int i, j; REAL *e1, *e2; REAL smlen = 0; REAL rrv, smrrv; REAL elen[6]; // Get the shortest edge of this tet. checkedge.id = chktet.id; for (i = 0; i < 6; i++) { checkedge.ver = raw_edge2ver[i]; ie1 = cudamesh_org(checkedge, d_tetlist); ie2 = cudamesh_dest(checkedge, d_tetlist); e1 = cudamesh_id2pointlist(ie1, d_pointlist); e2 = cudamesh_id2pointlist(ie2, d_pointlist); elen[i] = cudamesh_distance(e1, e2); if (i == 0) { smlen = elen[i]; j = 0; } else { if (elen[i] < smlen) { smlen = elen[i]; j = i; } } } // Check if the edge is too short. checkedge.ver = raw_edge2ver[j]; // Get the smallest rrv of e1 and e2. // Note: if rrv of e1 and e2 is zero. Do not use it. ie1 = cudamesh_org(checkedge, d_tetlist); smrrv = d_pointradius[ie1]; ie2 = cudamesh_dest(checkedge, d_tetlist); rrv = d_pointradius[ie2]; if (rrv > 0) { if (smrrv > 0) { if (rrv < smrrv) { smrrv = rrv; } } else { smrrv = rrv; } } if (smrrv > 0) { // To avoid rounding error, round smrrv before doing comparison. if ((fabs(smrrv - smlen) / smlen) <EPSILON) { smrrv = smlen; } if (smrrv > smlen) { d_tetstatus[tetid].setAbortive(true); d_threadmarker[pos] = -1; return; } } } __global__ void kernelPointLocation( int* d_tetidlist, REAL* d_insertptlist, locateresult* d_pointlocation, tethandle* d_searchtetlist, int* d_threadlist, REAL* d_pointlist, int* d_tetlist, tethandle* d_neighborlist, tetstatus* d_tetstatus, int* d_priority, unsigned long* d_randomseed, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; if (d_threadmarker[threadId] == -1) return; int tetid = d_tetidlist[threadId]; tethandle* searchtet = d_searchtetlist + threadId; REAL* searchpt = d_insertptlist + 3 * threadId; unsigned long* randomseed = d_randomseed + pos; REAL *torg, *tdest, *tapex, *toppo; enum { ORGMOVE, DESTMOVE, APEXMOVE } nextmove; REAL ori, oriorg, oridest, oriapex; enum locateresult loc = OUTSIDE; int t1ver; int s; int step = 1; // Init searchtet searchtet->id = tetid; searchtet->ver = 11; // Check if we are in the outside of the convex hull. if (cudamesh_ishulltet(*searchtet, d_tetlist)) { // Get its adjacent tet (inside the hull). searchtet->ver = 3; cudamesh_fsymself(*searchtet, d_neighborlist); } // Let searchtet be the face such that 'searchpt' lies above to it. for (searchtet->ver = 0; searchtet->ver < 4; searchtet->ver++) { torg = cudamesh_id2pointlist(cudamesh_org(*searchtet, d_tetlist), d_pointlist); tdest = cudamesh_id2pointlist(cudamesh_dest(*searchtet, d_tetlist), d_pointlist); tapex = cudamesh_id2pointlist(cudamesh_apex(*searchtet, d_tetlist), d_pointlist); ori = cuda_orient3d(torg, tdest, tapex, searchpt); if (ori < 0.0) break; } assert(searchtet->ver != 4); // Walk through tetrahedra to locate the point. while (true) { toppo = cudamesh_id2pointlist(cudamesh_oppo(*searchtet, d_tetlist), d_pointlist); // Check if the vertex is we seek. if (toppo[0] == searchpt[0] && toppo[1] == searchpt[1] && toppo[2] == searchpt[2]) { // Adjust the origin of searchtet to be searchpt. cudamesh_esymself(*searchtet); cudamesh_eprevself(*searchtet); loc = ONVERTEX; // return ONVERTEX; break; } // We enter from one of serarchtet's faces, which face do we exit? oriorg = cuda_orient3d(tdest, tapex, toppo, searchpt); oridest = cuda_orient3d(tapex, torg, toppo, searchpt); oriapex = cuda_orient3d(torg, tdest, toppo, searchpt); // Now decide which face to move. It is possible there are more than one // faces are viable moves. If so, randomly choose one. if (oriorg < 0) { if (oridest < 0) { if (oriapex < 0) { // All three faces are possible. s = cudamesh_randomnation(randomseed, 3); // 's' is in {0,1,2}. if (s == 0) { nextmove = ORGMOVE; } else if (s == 1) { nextmove = DESTMOVE; } else { nextmove = APEXMOVE; } } else { // Two faces, opposite to origin and destination, are viable. //s = randomnation(2); // 's' is in {0,1}. if (cudamesh_randomnation(randomseed, 2)) { nextmove = ORGMOVE; } else { nextmove = DESTMOVE; } } } else { if (oriapex < 0) { // Two faces, opposite to origin and apex, are viable. //s = randomnation(2); // 's' is in {0,1}. if (cudamesh_randomnation(randomseed, 2)) { nextmove = ORGMOVE; } else { nextmove = APEXMOVE; } } else { // Only the face opposite to origin is viable. nextmove = ORGMOVE; } } } else { if (oridest < 0) { if (oriapex < 0) { // Two faces, opposite to destination and apex, are viable. //s = randomnation(2); // 's' is in {0,1}. if (cudamesh_randomnation(randomseed, 2)) { nextmove = DESTMOVE; } else { nextmove = APEXMOVE; } } else { // Only the face opposite to destination is viable. nextmove = DESTMOVE; } } else { if (oriapex < 0) { // Only the face opposite to apex is viable. nextmove = APEXMOVE; } else { // The point we seek must be on the boundary of or inside this // tetrahedron. Check for boundary cases. if (oriorg == 0) { // Go to the face opposite to origin. cudamesh_enextesymself(*searchtet); if (oridest == 0) { cudamesh_eprevself(*searchtet); // edge oppo->apex if (oriapex == 0) { // oppo is duplicated with p. loc = ONVERTEX; // return ONVERTEX; break; } loc = ONEDGE; // return ONEDGE; break; } if (oriapex == 0) { cudamesh_enextself(*searchtet); // edge dest->oppo loc = ONEDGE; // return ONEDGE; break; } loc = ONFACE; // return ONFACE; break; } if (oridest == 0) { // Go to the face opposite to destination. cudamesh_eprevesymself(*searchtet); if (oriapex == 0) { cudamesh_eprevself(*searchtet); // edge oppo->org loc = ONEDGE; // return ONEDGE; break; } loc = ONFACE; // return ONFACE; break; } if (oriapex == 0) { // Go to the face opposite to apex cudamesh_esymself(*searchtet); loc = ONFACE; // return ONFACE; break; } loc = INTETRAHEDRON; // return INTETRAHEDRON; break; } } } // Move to the selected face. if (nextmove == ORGMOVE) { cudamesh_enextesymself(*searchtet); } else if (nextmove == DESTMOVE) { cudamesh_eprevesymself(*searchtet); } else { cudamesh_esymself(*searchtet); } // Move to the adjacent tetrahedron (maybe a hull tetrahedron). cudamesh_fsymself(*searchtet, d_neighborlist); if (cudamesh_oppo(*searchtet, d_tetlist) == -1) { loc = OUTSIDE; // return OUTSIDE; break; } // Retreat the three vertices of the base face. torg = cudamesh_id2pointlist(cudamesh_org(*searchtet, d_tetlist), d_pointlist); tdest = cudamesh_id2pointlist(cudamesh_dest(*searchtet, d_tetlist), d_pointlist); tapex = cudamesh_id2pointlist(cudamesh_apex(*searchtet, d_tetlist), d_pointlist); step++; } // while (true) d_pointlocation[threadId] = loc; // set weighted priority //REAL vol = cudamesh_tetrahedronvolume(tetid, d_pointlist, d_tetlist); //REAL wp = 0.5*vol + 0.5*step; //d_priority[threadId] = __float_as_int((float)(1 / wp)); if (loc == ONVERTEX) { d_tetstatus[tetid].setAbortive(true); d_threadmarker[threadId] = -1; } } __global__ void kernelMarkAndCountInitialCavity( int* d_insertidxlist, locateresult* d_pointlocation, int* d_threadlist, tethandle* d_searchtet, trihandle* d_searchsh, trihandle* d_seg2trilist, int* d_trifacelist, trihandle* d_tri2trilist, tethandle* d_neighborlist, int* d_priority, uint64* d_tetmarker, int* d_segmarker, uint64* d_trimarker, int* d_threadmarker, int* d_initialcavitysize, int* d_initialsubcavitysize, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int threadmarker = d_threadmarker[threadId]; tethandle searchtet = d_searchtet[threadId]; tethandle spintet, neightet; locateresult loc = d_pointlocation[threadId]; // initial cavity // mark all tets share at this edge int count = 0, i; int old; uint64 marker, oldmarker; marker = cudamesh_encodeUInt64Priority(d_priority[threadId], threadId); if (loc == ONEDGE) { spintet = searchtet; while (1) { // check if already lost if (d_threadmarker[threadId] == -1) // lost because of other threads { count = 0; break; } // marking competition oldmarker = atomicMin(d_tetmarker + spintet.id, marker); if (marker < oldmarker) // winned { old = cudamesh_getUInt64PriorityIndex(oldmarker); if (old != MAXUINT) { d_threadmarker[old] = -1; atomicMin(d_initialcavitysize + old, 0); atomicMin(d_initialsubcavitysize + old, 0); } } else // lost { d_threadmarker[threadId] = -1; count = 0; break; } count++; cudamesh_fnextself(spintet, d_neighborlist); if (spintet.id == searchtet.id) break; } // while (1) } else if (loc == ONFACE) { // check if already lost if (d_threadmarker[threadId] == -1) // lost because of other threads { count = 0; } else // mark two adjacent tets on the face { spintet = searchtet; for (i = 0; i < 2; i++) { // marking competition oldmarker = atomicMin(d_tetmarker + spintet.id, marker); if (marker < oldmarker) // winned { old = cudamesh_getUInt64PriorityIndex(oldmarker); if (old != MAXUINT) { d_threadmarker[old] = -1; atomicMin(d_initialcavitysize + old, 0); atomicMin(d_initialsubcavitysize + old, 0); } } else // lost { d_threadmarker[threadId] = -1; count = 0; break; } count++; spintet = d_neighborlist[4 * searchtet.id + (searchtet.ver & 3)]; } } } else if (loc == INTETRAHEDRON || loc == OUTSIDE) { // check if already lost if (d_threadmarker[threadId] == -1) // lost because of other threads { count = 0; } else // mark four adjecent tets { // marking competition oldmarker = atomicMin(d_tetmarker + searchtet.id, marker); if (marker < oldmarker) // winned { count = 1; old = cudamesh_getUInt64PriorityIndex(oldmarker); if (old != MAXUINT) { d_threadmarker[old] = -1; atomicMin(d_initialcavitysize + old, 0); atomicMin(d_initialsubcavitysize + old, 0); } } else // lost { d_threadmarker[threadId] = -1; count = 0; } } } atomicMin(d_initialcavitysize + threadId, count); // Initial subcavity // Count all subfaces share at this edge. int scount = 0; if (count == 0) scount = 0; else { trihandle splitsh; if (loc == ONEDGE) { if (threadmarker == 0) { int segId = d_insertidxlist[threadId]; trihandle splitseg(segId, 0); atomicMin(d_segmarker + splitseg.id, threadId); cudamesh_spivot(splitseg, splitsh, d_seg2trilist); } else if (threadmarker == 1) { splitsh = d_searchsh[threadId]; } if (splitsh.id != -1) { int pa = cudamesh_sorg(splitsh, d_trifacelist); trihandle neighsh = splitsh; while (1) { // Check if already lost if (d_threadmarker[threadId] == -1) // lost because of other threads { scount = 0; break; } // Adjust the origin of its edge to be 'pa'. if (cudamesh_sorg(neighsh, d_trifacelist) != pa) { cudamesh_sesymself(neighsh); } // Mark this face atomicMin(d_trimarker + neighsh.id, marker); // count this face scount++; // Go to the next face at the edge. cudamesh_spivotself(neighsh, d_tri2trilist); // Stop if all faces at the edge have been visited. if (neighsh.id == splitsh.id) break; if (neighsh.id == -1) break; } // while (1) } } else if (loc == ONFACE) { if (threadmarker == 1) { // Check if already lost if (d_threadmarker[threadId] == -1) // lost because of other threads { scount = 0; } else { splitsh = d_searchsh[threadId]; // Mark this face atomicMin(d_trimarker + splitsh.id, marker); // count this face scount++; } } } } atomicMin(d_initialsubcavitysize + threadId, scount); } __global__ void kernelInitCavityLinklist( int* d_insertidxlist, locateresult* d_pointlocation, int* d_threadlist, tethandle* d_searchtet, trihandle* d_searchsh, trihandle* d_seg2trilist, int* d_trifacelist, tethandle* d_tri2tetlist, trihandle* d_tri2trilist, int* d_tetlist, tethandle* d_neighborlist, int* d_initialcavityindices, tethandle* d_caveoldtetlist, int* d_caveoldtetprev, int* d_caveoldtetnext, int* d_caveoldtethead, int* d_caveoldtettail, tethandle* d_cavetetlist, int* d_cavetetprev, int* d_cavetetnext, int* d_cavetethead, int* d_cavetettail, int* d_initialsubcavityindices, int* d_initialsubcavitysize, int* d_cavethreadidx, trihandle* d_caveshlist, int* d_caveshprev, int* d_caveshnext, int* d_caveshhead, int* d_caveshtail, trihandle* d_cavesegshlist, int* d_cavesegshprev, int* d_cavesegshnext, int* d_cavesegshhead, int* d_cavesegshtail, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int threadmarker = d_threadmarker[threadId]; tethandle searchtet = d_searchtet[threadId]; tethandle spintet, neightet; locateresult loc = d_pointlocation[threadId]; // cavities int icsindex = d_initialcavityindices[threadId]; int count = 0; int prev = -1; int icavityIdx; int tetidxfactor = 4; if (loc == ONEDGE) { spintet = searchtet; while (1) { // initial cavity index icavityIdx = icsindex + count; // add to tet list cudamesh_eorgoppo(spintet, neightet); neightet = d_neighborlist[4 * neightet.id + (neightet.ver & 3)]; neightet.ver = raw_epivot[neightet.ver]; d_cavetetlist[tetidxfactor * icavityIdx] = neightet; d_cavetetprev[tetidxfactor * icavityIdx] = (prev == -1) ? -1 : tetidxfactor * prev + 1; d_cavetetnext[tetidxfactor * icavityIdx] = tetidxfactor * icavityIdx + 1; if (prev != -1) d_cavetetnext[tetidxfactor * prev + 1] = tetidxfactor * icavityIdx; d_cavethreadidx[tetidxfactor * icavityIdx] = threadId; cudamesh_edestoppo(spintet, neightet); neightet = d_neighborlist[4 * neightet.id + (neightet.ver & 3)]; neightet.ver = raw_epivot[neightet.ver]; d_cavetetlist[tetidxfactor * icavityIdx + 1] = neightet; d_cavetetprev[tetidxfactor * icavityIdx + 1] = tetidxfactor * icavityIdx; d_cavetetnext[tetidxfactor * icavityIdx + 1] = -1; d_cavethreadidx[tetidxfactor * icavityIdx + 1] = threadId; // add to old tet list d_caveoldtetlist[icavityIdx] = spintet; // current tet d_caveoldtetprev[icavityIdx] = prev; // previous d_caveoldtetnext[icavityIdx] = -1; // next, set to -1 first if (prev != -1) d_caveoldtetnext[prev] = icavityIdx; // previous next, set to me if (count == 0) { d_caveoldtethead[threadId] = icavityIdx; d_cavetethead[threadId] = tetidxfactor * icavityIdx; } // next iteration prev = icavityIdx; cudamesh_fnextself(spintet, d_neighborlist); if (spintet.id == searchtet.id) { d_caveoldtettail[threadId] = icavityIdx; d_cavetettail[threadId] = tetidxfactor * icavityIdx + 1; break; } count++; } // while (1) } else if (loc == ONFACE) { int i, j; // initial cavity index icavityIdx = icsindex; // add to tet and old tet list j = (searchtet.ver & 3); for (i = 1; i < 4; i++) { neightet = d_neighborlist[4 * searchtet.id + (j + i) % 4]; d_cavetetlist[tetidxfactor * icavityIdx + i - 1] = neightet; d_cavetetprev[tetidxfactor * icavityIdx + i - 1] = (i == 1) ? -1 : tetidxfactor * icavityIdx + i - 2; d_cavetetnext[tetidxfactor * icavityIdx + i - 1] = tetidxfactor * icavityIdx + i; d_cavethreadidx[tetidxfactor * icavityIdx + i - 1] = threadId; } d_cavetethead[threadId] = tetidxfactor * icavityIdx; d_caveoldtetlist[icavityIdx] = searchtet; d_caveoldtetprev[icavityIdx] = -1; d_caveoldtetnext[icavityIdx] = icavityIdx + 1; d_caveoldtethead[threadId] = icavityIdx; icavityIdx++; spintet = d_neighborlist[4 * searchtet.id + j]; j = (spintet.ver & 3); for (i = 1; i < 4; i++) { neightet = d_neighborlist[4 * spintet.id + (j + i) % 4]; d_cavetetlist[tetidxfactor * icavityIdx + i - 1] = neightet; d_cavetetprev[tetidxfactor * icavityIdx + i - 1] = tetidxfactor * icavityIdx + i - 2; d_cavetetnext[tetidxfactor * icavityIdx + i - 1] = (i == 3) ? -1 : tetidxfactor * icavityIdx + i; d_cavethreadidx[tetidxfactor * icavityIdx + i - 1] = threadId; } d_cavetettail[threadId] = tetidxfactor * icavityIdx + 2; d_caveoldtetlist[icavityIdx] = spintet; d_caveoldtetprev[icavityIdx] = icavityIdx -1; d_caveoldtetnext[icavityIdx] = -1; d_caveoldtettail[threadId] = icavityIdx; } else if (loc == INTETRAHEDRON || loc == OUTSIDE) { int i; // initial cavity index icavityIdx = icsindex; // add to tet and old tet list for (i = 0; i < 4; i++) { neightet = d_neighborlist[4 * searchtet.id + i]; d_cavetetlist[tetidxfactor * icavityIdx + i] = neightet; d_cavetetprev[tetidxfactor * icavityIdx + i] = (i == 0) ? -1 : tetidxfactor * icavityIdx + i - 1; d_cavetetnext[tetidxfactor * icavityIdx + i] = (i == 3) ? -1 : tetidxfactor * icavityIdx + i + 1; d_cavethreadidx[tetidxfactor * icavityIdx + i] = threadId; } d_cavetethead[threadId] = tetidxfactor * icavityIdx; d_cavetettail[threadId] = tetidxfactor * icavityIdx + 3; d_caveoldtetlist[icavityIdx] = searchtet; d_caveoldtetprev[icavityIdx] = -1; d_caveoldtetnext[icavityIdx] = -1; d_caveoldtethead[threadId] = icavityIdx; d_caveoldtettail[threadId] = icavityIdx; } // subcavities if (d_initialsubcavitysize[threadId] != 0) // when splitseg is dangling segment, this equals to 0 { int iscsindex = d_initialsubcavityindices[threadId]; int scount = 0; int sprev = -1; int iscavityIdx; trihandle splitsh; if (loc == ONEDGE) { if (threadmarker == 0) { int segId = d_insertidxlist[threadId]; trihandle splitseg(segId, 0); cudamesh_spivot(splitseg, splitsh, d_seg2trilist); } else if (threadmarker == 1) { splitsh = d_searchsh[threadId]; } // Collect all subfaces share at this edge. if (splitsh.id != -1) { int pa = cudamesh_sorg(splitsh, d_trifacelist); trihandle neighsh = splitsh; while (1) { // Initial subcavity index iscavityIdx = iscsindex + scount; // Adjust the origin of its edge to be 'pa'. if (cudamesh_sorg(neighsh, d_trifacelist) != pa) { cudamesh_sesymself(neighsh); } // add to cavesh and cavesegsh list d_caveshlist[iscavityIdx] = neighsh; // current tet d_caveshprev[iscavityIdx] = sprev; // previous d_caveshnext[iscavityIdx] = -1; // next, set to -1 first d_cavesegshlist[iscavityIdx] = neighsh; // current triface d_cavesegshprev[iscavityIdx] = sprev; // previous d_cavesegshnext[iscavityIdx] = -1; // next, set to -1 first if (sprev != -1) { d_caveshnext[sprev] = iscavityIdx; // previous next, set to me d_cavesegshnext[sprev] = iscavityIdx; // previous next, set to me } if (scount == 0) { d_caveshhead[threadId] = iscavityIdx; d_cavesegshhead[threadId] = iscavityIdx; } // next iteration sprev = iscavityIdx; // count this face scount++; // Go to the next face at the edge. cudamesh_spivotself(neighsh, d_tri2trilist); // Stop if all faces at the edge have been visited. if (neighsh.id == splitsh.id || neighsh.id == -1) { d_caveshtail[threadId] = iscavityIdx; d_cavesegshtail[threadId] = iscavityIdx; break; } } // while (1) } } else if (loc == ONFACE) { if (threadmarker == 1) { iscavityIdx = iscsindex; splitsh = d_searchsh[threadId]; d_caveshlist[iscavityIdx] = splitsh; d_caveshprev[iscavityIdx] = -1; d_caveshnext[iscavityIdx] = -1; d_caveshhead[threadId] = iscavityIdx; d_caveshtail[threadId] = iscavityIdx; } } } } __global__ void kernelInitLinklistCurPointer( int* d_threadlist, int* d_linklisthead, int* d_linklistcur, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; d_linklistcur[threadId] = d_linklisthead[threadId]; } __global__ void kernelCavityRatioControl( int* d_cavethreadidx, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_cavethreadidx[pos]; if (threadId != -1) // owners of larger cavities d_threadmarker[threadId] = -1; } __global__ void kernelLargeCavityCheck( int* d_insertidxlist, REAL* d_insertptlist, int* d_cavethreadidx, tristatus* d_segstatus, tristatus* d_tristatus, tetstatus* d_tetstatus, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_cavethreadidx[pos]; if (threadId != -1) // owners of large cavities { int threadmarker = d_threadmarker[threadId]; if (threadmarker != -1) { int eleidx = d_insertidxlist[threadId]; if (threadmarker == 0) d_segstatus[eleidx].setAbortive(true); else if (threadmarker == 1) d_tristatus[eleidx].setAbortive(true); else if (threadmarker == 2) d_tetstatus[eleidx].setAbortive(true); d_threadmarker[threadId] = -1; } } } __global__ void kernelCavityExpandingCheck( int* d_cavethreadidx, REAL* d_pointlist, int* d_tetlist, tethandle* d_neighborlist, REAL* d_insertptlist, tethandle* d_cavetetlist, int* d_cavetetprev, int* d_cavetetnext, int* d_cavetethead, int* d_cavetettail, int* d_cavetetexpandsize, tethandle* d_caveoldtetlist, int* d_caveoldtetprev, int* d_caveoldtetnext, int* d_caveoldtethead, int* d_caveoldtettail, int* d_caveoldtetexpandsize, tethandle* d_cavebdrylist, int* d_cavebdryprev, int* d_cavebdrynext, int* d_cavebdryhead, int* d_cavebdrytail, int* d_cavebdryexpandsize, int* d_threadmarker, int* d_priority, uint64* d_tetmarker, int cavetetcurstartindex, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int tetexpandsize = 0; int oldtetexpandsize = 0; int bdryexpandsize = 0; int threadId = d_cavethreadidx[pos]; if (threadId != -1) // threadId is -1 in the unused slot { REAL* insertpt = d_insertptlist + 3 * threadId; int cur = cavetetcurstartindex + pos; tethandle cavetet = d_cavetetlist[cur]; if (d_threadmarker[threadId] != -1) // avoid to expand loser { uint64 marker = cudamesh_encodeUInt64Priority(d_priority[threadId], threadId); if (d_tetmarker[cavetet.id] != marker) // need to check { bool enqflag = false; double sign; // Get four endpoints of cavetet REAL *pts[4]; int idx[4]; for (int i = 0; i < 4; i++) { idx[i] = d_tetlist[4 * cavetet.id + i]; if (idx[i] != -1) pts[i] = cudamesh_id2pointlist(idx[i], d_pointlist); else pts[i] = NULL; } // Test if cavetet is included in the (enlarged) cavity if (idx[3] != -1) { sign = cudamesh_insphere_s(pts[0], pts[1], pts[2], pts[3], insertpt, idx[0], idx[1], idx[2], idx[3], MAXINT); enqflag = (sign < 0.0); } else // A hull face (must be a subface). Test its neighbor. { // We FIRST finclude it in the initial cavity if its adjacent tet is // not Delaunay wrt p. Will validate it later on. tethandle neineitet = d_neighborlist[4 * cavetet.id + 3]; if (d_tetmarker[neineitet.id] != marker) // need to check { // Get four endpoints of neineitet for (int i = 0; i < 4; i++) { idx[i] = d_tetlist[4 * neineitet.id + i]; if (idx[i] != -1) pts[i] = cudamesh_id2pointlist(idx[i], d_pointlist); else pts[i] = NULL; } assert(idx[3] != -1); sign = cudamesh_insphere_s(pts[0], pts[1], pts[2], pts[3], insertpt, idx[0], idx[1], idx[2], idx[3], MAXINT); enqflag = (sign < 0.0); } else { enqflag = true; } } // Count size if (enqflag) { uint64 oldmarker = atomicMin(d_tetmarker + cavetet.id, marker); if (marker < oldmarker) // I winned { tetexpandsize = 3; oldtetexpandsize = 1; int old = cudamesh_getUInt64PriorityIndex(oldmarker); if (old != MAXUINT) { d_threadmarker[old] = -1; } } else if (marker > oldmarker) // I lost { d_threadmarker[threadId] = -1; } } else { bdryexpandsize = 1; } } } } d_cavetetexpandsize[pos] = tetexpandsize; d_caveoldtetexpandsize[pos] = oldtetexpandsize; d_cavebdryexpandsize[pos] = bdryexpandsize; } __global__ void kernelCorrectExpandingSize( int* d_cavethreadidx, int* d_cavetetexpandsize, int* d_caveoldtetexpandsize, int* d_cavebdryexpandsize, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_cavethreadidx[pos]; if (threadId != -1 && d_threadmarker[threadId] == -1) { d_cavetetexpandsize[pos] = 0; d_caveoldtetexpandsize[pos] = 0; d_cavebdryexpandsize[pos] = 0; } } __global__ void kernelCavityExpandingSetThreadidx( int* d_cavethreadidx, int* d_cavetetexpandsize, int* d_cavetetexpandindices, int* d_cavetetthreadidx, int* d_caveoldtetexpandsize, int* d_caveoldtetexpandindices, int* d_caveoldtetthreadidx, int* d_cavebdryexpandsize, int* d_cavebdryexpandindices, int* d_cavebdrythreadidx, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_cavethreadidx[pos]; if (threadId == -1) return; int eindex; if (d_cavetetexpandsize[pos] != 0) { eindex = d_cavetetexpandindices[pos]; for (int j = 0; j < 3; j++) { d_cavetetthreadidx[eindex + j] = threadId; } } if (d_caveoldtetexpandsize[pos] != 0) { eindex = d_caveoldtetexpandindices[pos]; d_caveoldtetthreadidx[eindex] = threadId; } if (d_cavebdryexpandsize[pos] != 0) { eindex = d_cavebdryexpandindices[pos]; d_cavebdrythreadidx[eindex] = threadId; } } __global__ void kernelCavityExpandingMarkAndAppend( int* d_cavethreadidx, tethandle* d_neighborlist, tethandle* d_cavetetlist, int* d_cavetetprev, int* d_cavetetnext, int* d_cavetethead, int* d_cavetettail, int* d_cavetetexpandsize, int* d_cavetetexpandindices, int* d_cavetetthreadidx, int cavetetstartindex, int cavetetexpandsize, tethandle* d_caveoldtetlist, int* d_caveoldtetprev, int* d_caveoldtetnext, int* d_caveoldtethead, int* d_caveoldtettail, int* d_caveoldtetexpandsize, int* d_caveoldtetexpandindices, int* d_caveoldtetthreadidx, int caveoldtetstartindex, int caveoldtetexpandsize, tethandle* d_cavebdrylist, int* d_cavebdryprev, int* d_cavebdrynext, int* d_cavebdryhead, int* d_cavebdrytail, int* d_cavebdryexpandsize, int* d_cavebdryexpandindices, int* d_cavebdrythreadidx, int cavebdrystartindex, int cavebdryexpandsize, int* d_threadmarker, int* d_priority, uint64* d_tetmarker, int cavetetcurstartindex, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_cavethreadidx[pos]; if (threadId == -1) return; if (d_threadmarker[threadId] == -1) return; int cur = cavetetcurstartindex + pos; tethandle cavetet = d_cavetetlist[cur]; int sindex, eindex, prev; if (d_cavetetexpandsize[pos] != 0) { eindex = d_cavetetexpandindices[pos]; sindex = cavetetstartindex + eindex; // Append cavetetlist and mark current tet int k = (cavetet.ver & 3); // The current face number tethandle neightet; int newid; if (eindex == 0 || d_cavetetthreadidx[eindex - 1] != threadId) { prev = d_cavetettail[threadId]; d_cavetetnext[prev] = sindex; // prev must not be -1 } else prev = sindex - 1; for (int j = 1; j < 4; j++) { neightet = d_neighborlist[4 * cavetet.id + (j + k) % 4]; newid = sindex + j - 1; d_cavetetlist[newid] = neightet; d_cavetetprev[newid] = prev; d_cavetetnext[newid] = newid + 1; // set to next one first prev = newid; } if (eindex + 2 == cavetetexpandsize - 1 || d_cavetetthreadidx[eindex + 3] != threadId) d_cavetetnext[newid] = -1; } if (d_caveoldtetexpandsize[pos] != 0) { eindex = d_caveoldtetexpandindices[pos]; sindex = caveoldtetstartindex + eindex; if (eindex == 0 || d_caveoldtetthreadidx[eindex - 1] != threadId) { prev = d_caveoldtettail[threadId]; d_caveoldtetnext[prev] = sindex; // prev must not be -1 } else prev = sindex - 1; d_caveoldtetlist[sindex] = cavetet; d_caveoldtetprev[sindex] = prev; d_caveoldtetnext[sindex] = sindex + 1; if (eindex == caveoldtetexpandsize - 1 || d_caveoldtetthreadidx[eindex + 1] != threadId) d_caveoldtetnext[sindex] = -1; } if (d_cavebdryexpandsize[pos] != 0) { eindex = d_cavebdryexpandindices[pos]; sindex = cavebdrystartindex + eindex; if (eindex == 0 || d_cavebdrythreadidx[eindex - 1] != threadId) { prev = d_cavebdrytail[threadId]; if (prev != -1) d_cavebdrynext[prev] = sindex; // prev must not be -1 if (d_cavebdryhead[threadId] == -1) // initialize cavebdry list header d_cavebdryhead[threadId] = sindex; } else prev = sindex - 1; cavetet.ver = raw_epivot[cavetet.ver]; d_cavebdrylist[sindex] = cavetet; d_cavebdryprev[sindex] = prev; d_cavebdrynext[sindex] = sindex + 1; if (eindex == cavebdryexpandsize - 1 || d_cavebdrythreadidx[eindex + 1] != threadId) d_cavebdrynext[sindex] = -1; } } __global__ void kernelCavityExpandingUpdateListTails( int* d_cavethreadidx, int* d_cavetetnext, int* d_cavetettail, int* d_cavetetexpandsize, int* d_cavetetexpandindices, int cavetetstartindex, int* d_caveoldtetnext, int* d_caveoldtettail, int* d_caveoldtetexpandsize, int* d_caveoldtetexpandindices, int caveoldtetstartindex, int* d_cavebdrynext, int* d_cavebdrytail, int* d_cavebdryexpandsize, int* d_cavebdryexpandindices, int* d_threadmarker, int cavebdrystartindex, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_cavethreadidx[pos]; if (threadId == -1) return; if (d_threadmarker[threadId] == -1) return; int sindex, eindex, prev; if (d_cavetetexpandsize[pos] != 0) { eindex = d_cavetetexpandindices[pos]; sindex = cavetetstartindex + eindex + 2; if (d_cavetetnext[sindex] == -1) d_cavetettail[threadId] = sindex; } if (d_caveoldtetexpandsize[pos] != 0) { eindex = d_caveoldtetexpandindices[pos]; sindex = caveoldtetstartindex + eindex; if (d_caveoldtetnext[sindex] == -1) d_caveoldtettail[threadId] = sindex; } if (d_cavebdryexpandsize[pos] != 0) { eindex = d_cavebdryexpandindices[pos]; sindex = cavebdrystartindex + eindex; if (d_cavebdrynext[sindex] == -1) d_cavebdrytail[threadId] = sindex; } } __global__ void kernelMarkCavityAdjacentSubsegs( int* d_threadlist, trihandle* d_tet2seglist, tethandle* d_caveoldtetlist, int* d_caveoldtetnext, int* d_caveoldtethead, int* d_segmarker, int* d_threadmarker, int numofthreads, uint64* d_tetmarker ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int i = d_caveoldtethead[threadId]; int old; tethandle cavetet; trihandle checkseg; while (i != -1) { cavetet = d_caveoldtetlist[i]; for (int j = 0; j < 6; j++) { checkseg = d_tet2seglist[6 * cavetet.id + j]; if (checkseg.id != -1) { old = atomicMin(d_segmarker + checkseg.id, threadId); if (old < threadId) d_threadmarker[threadId] = -1; else if (old > threadId && old != MAXINT) d_threadmarker[old] = -1; } } i = d_caveoldtetnext[i]; } } __global__ void kernelCountCavitySubsegs( int* d_threadlist, trihandle* d_tet2seglist, tethandle* d_caveoldtetlist, int* d_caveoldtetnext, int* d_caveoldtethead, int* d_cavetetsegsize, int* d_segmarker, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int cavetetsegsize = 0; if (d_threadmarker[threadId] != -1) { int i = d_caveoldtethead[threadId]; tethandle cavetet; trihandle checkseg; while (i != -1) { cavetet = d_caveoldtetlist[i]; for (int j = 0; j < 6; j++) { checkseg = d_tet2seglist[6 * cavetet.id + j]; if (checkseg.id != -1) { if (d_segmarker[checkseg.id] == threadId) { cavetetsegsize++; d_segmarker[checkseg.id] = MAXINT; // Mark as counted } } } i = d_caveoldtetnext[i]; } } d_cavetetsegsize[pos] = cavetetsegsize; } __global__ void kernelAppendCavitySubsegs( int* d_threadlist, trihandle* d_tet2seglist, tethandle* d_caveoldtetlist, int* d_caveoldtetnext, int* d_caveoldtethead, trihandle* d_cavetetseglist, int* d_cavetetsegprev, int* d_cavetetsegnext, int* d_cavetetseghead, int* d_cavetetsegtail, int* d_cavetetsegsize, int* d_cavetetsegindices, int* d_segmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; if (d_cavetetsegsize[pos] == 0) return; int sindex = d_cavetetsegindices[pos]; d_cavetetseghead[threadId] = sindex; int i = d_caveoldtethead[threadId]; tethandle cavetet; trihandle checkseg; int index, count = 0, prev = -1; while (i != -1) { cavetet = d_caveoldtetlist[i]; for (int j = 0; j < 6; j++) { checkseg = d_tet2seglist[6 * cavetet.id + j]; if (checkseg.id != -1) { if (d_segmarker[checkseg.id] == MAXINT) { d_segmarker[checkseg.id] = -2; // Mark as appended index = sindex + count; d_cavetetseglist[index] = checkseg; d_cavetetsegprev[index] = prev; d_cavetetsegnext[index] = -1; if (prev != -1) d_cavetetsegnext[prev] = index; count++; prev = index; } } } i = d_caveoldtetnext[i]; if (i == -1) // reached the end { d_cavetetsegtail[threadId] = index; } } } __global__ void kernelCheckSegmentEncroachment( int* d_insertidxlist, REAL* d_insertptlist, int* d_threadlist, REAL* d_pointlist, int* d_seglist, int* d_segencmarker, tristatus* d_segstatus, tristatus* d_tristatus, tetstatus* d_tetstatus, trihandle* d_cavetetseglist, int* d_cavetetsegnext, int* d_cavetetseghead, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int threadmarker = d_threadmarker[threadId]; if (threadmarker <= 0) // loser or subsegment return; REAL *insertpt = d_insertptlist + 3 * threadId; int ipa, ipb, encpt; REAL *pa, *pb; trihandle paryseg; bool flag = false; int i = d_cavetetseghead[threadId]; while (i != -1) { paryseg = d_cavetetseglist[i]; ipa = d_seglist[3 * paryseg.id + 0]; ipb = d_seglist[3 * paryseg.id + 1]; pa = cudamesh_id2pointlist(ipa, d_pointlist); pb = cudamesh_id2pointlist(ipb, d_pointlist); if (checkseg4encroach(pa, pb, insertpt)) // encroached { flag = true; if (!d_segstatus[paryseg.id].isAbortive()) { d_segencmarker[paryseg.id] = MAXINT; d_threadmarker[threadId] = -1; break; } } i = d_cavetetsegnext[i]; } if (flag && d_threadmarker[threadId] != -1) // segments encroached are all abortive { int eleidx = d_insertidxlist[threadId]; if (threadmarker == 1) { d_tristatus[eleidx].setAbortive(true); } else if (threadmarker == 2) { d_tetstatus[eleidx].setAbortive(true); } d_threadmarker[threadId] = -1; } } __global__ void kernelMarkCavityAdjacentSubfaces( int* d_threadlist, trihandle* d_tet2trilist, tethandle* d_caveoldtetlist, int* d_caveoldtetnext, int* d_caveoldtethead, int* d_trimarker, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int i = d_caveoldtethead[threadId]; int old; tethandle cavetet; trihandle checksh; while (i != -1) { cavetet = d_caveoldtetlist[i]; for (int j = 0; j < 4; j++) { checksh = d_tet2trilist[4 * cavetet.id + j]; if (checksh.id != -1) { old = atomicMin(d_trimarker + checksh.id, threadId); if (old < threadId) d_threadmarker[threadId] = -1; else if (old > threadId && old != MAXINT) d_threadmarker[old] = -1; } } i = d_caveoldtetnext[i]; } } __global__ void kernelCountCavitySubfaces( int* d_threadlist, trihandle* d_tet2trilist, tethandle* d_caveoldtetlist, int* d_caveoldtetnext, int* d_caveoldtethead, int* d_cavetetshsize, int* d_trimarker, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int cavetetshsize = 0; if (d_threadmarker[threadId] != -1) { int i = d_caveoldtethead[threadId]; tethandle cavetet; trihandle checksh; while (i != -1) { cavetet = d_caveoldtetlist[i]; for (int j = 0; j < 4; j++) { checksh = d_tet2trilist[4 * cavetet.id + j]; if (checksh.id != -1) { if (d_trimarker[checksh.id] == threadId) { cavetetshsize++; d_trimarker[checksh.id] = MAXINT; } } } i = d_caveoldtetnext[i]; } } d_cavetetshsize[pos] = cavetetshsize; } __global__ void kernelAppendCavitySubfaces( int* d_threadlist, trihandle* d_tet2trilist, tethandle* d_caveoldtetlist, int* d_caveoldtetnext, int* d_caveoldtethead, trihandle* d_cavetetshlist, int* d_cavetetshprev, int* d_cavetetshnext, int* d_cavetetshhead, int* d_cavetetshtail, int* d_cavetetshsize, int* d_cavetetshindices, int* d_trimarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; if (d_cavetetshsize[pos] == 0) return; int sindex = d_cavetetshindices[pos]; d_cavetetshhead[threadId] = sindex; int i = d_caveoldtethead[threadId]; tethandle cavetet; trihandle checksh; int index, count = 0, prev = -1; while (i != -1) { cavetet = d_caveoldtetlist[i]; for (int j = 0; j < 4; j++) { checksh = d_tet2trilist[4 * cavetet.id + j]; if (checksh.id != -1) { if (d_trimarker[checksh.id] == MAXINT) { d_trimarker[checksh.id] = -2; // Mark as appended index = sindex + count; d_cavetetshlist[index] = checksh; d_cavetetshprev[index] = prev; d_cavetetshnext[index] = -1; if (prev != -1) d_cavetetshnext[prev] = index; count++; prev = index; } } } i = d_caveoldtetnext[i]; if (i == -1) // reached the end { d_cavetetshtail[threadId] = index; } } } __global__ void kernelCheckSubfaceEncroachment( int* d_insertidxlist, REAL* d_insertptlist, locateresult* d_pointlocation, int* d_threadlist, REAL* d_pointlist, int* d_trifacelist, int* d_subfaceencmarker, tristatus* d_tristatus, tetstatus* d_tetstatus, trihandle* d_cavetetshlist, int* d_cavetetshnext, int* d_cavetetshhead, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int threadmarker = d_threadmarker[threadId]; if (threadmarker != 2) // not a tetrahedron return; locateresult loc = d_pointlocation[threadId]; REAL *insertpt = d_insertptlist + 3 * threadId; REAL *pa, *pb, *pc; trihandle parysh; bool flag = false; int i = d_cavetetshhead[threadId]; while (i != -1) { parysh = d_cavetetshlist[i]; pa = cudamesh_id2pointlist(d_trifacelist[3 * parysh.id + 0], d_pointlist); pb = cudamesh_id2pointlist(d_trifacelist[3 * parysh.id + 1], d_pointlist); pc = cudamesh_id2pointlist(d_trifacelist[3 * parysh.id + 2], d_pointlist); if (checkface4encroach(pa, pb, pc, insertpt)) // encroached { flag = true; if (!d_tristatus[parysh.id].isAbortive()) { d_subfaceencmarker[parysh.id] = MAXINT; d_threadmarker[threadId] = -1; break; } } i = d_cavetetshnext[i]; } if (loc == OUTSIDE || (flag && d_threadmarker[threadId] != -1)) { // subfaces encroached are all abortive or points are outside the domain int insertidx = d_insertidxlist[threadId]; d_tetstatus[insertidx].setAbortive(true); d_threadmarker[threadId] = -1; } } __global__ void kernelSubCavityExpandingCheck( int* d_threadlist, REAL* d_pointlist, tethandle* d_neighborlist, int* d_trifacelist, tethandle* d_tri2tetlist, trihandle* d_tri2trilist, trihandle* d_tri2seglist, REAL* d_insertptlist, trihandle* d_caveshlist, int* d_caveshcur, int* d_caveshexpandsize, int* d_caveshexpandflag, int* d_priority, uint64* d_tetmarker, uint64* d_trimarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; REAL* insertpt = d_insertptlist + 3 * threadId; int cur = d_caveshcur[threadId]; if (cur == -1) // this means that caveshlist is empty return; trihandle checksh = d_caveshlist[cur]; trihandle neighsh; tethandle neightet; REAL sign; REAL* pt[3]; int flag[3] = {0, 0, 0}; int shexpandsize = 0; //assert(d_trimarker[checksh.id] == threadId); uint64 marker = cudamesh_encodeUInt64Priority(d_priority[threadId], threadId); for (int j = 0; j < 3; j++) { if (!cudamesh_isshsubseg(checksh, d_tri2seglist)) { cudamesh_spivot(checksh, neighsh, d_tri2trilist); //assert(neighsh.id != -1); if (cudamesh_getUInt64PriorityIndex(d_trimarker[neighsh.id]) != threadId) { cudamesh_stpivot(neighsh, neightet, d_tri2tetlist); if (cudamesh_getUInt64PriorityIndex(d_tetmarker[neightet.id]) == threadId) { cudamesh_fsymself(neightet, d_neighborlist); if (cudamesh_getUInt64PriorityIndex(d_tetmarker[neightet.id]) == threadId) { pt[0] = cudamesh_id2pointlist(cudamesh_sorg(neighsh, d_trifacelist), d_pointlist); pt[1] = cudamesh_id2pointlist(cudamesh_sdest(neighsh, d_trifacelist), d_pointlist); pt[2] = cudamesh_id2pointlist(cudamesh_sapex(neighsh, d_trifacelist), d_pointlist); sign = cudamesh_incircle3d(pt[0], pt[1], pt[2], insertpt); if (sign < 0) { atomicMin(d_trimarker + neighsh.id, marker); shexpandsize++; flag[j] = 1; } } } } } cudamesh_senextself(checksh); } d_caveshexpandsize[pos] = shexpandsize; if (shexpandsize > 0) { for (int j = 0; j < 3; j++) { if ((flag[j] == 1 && shexpandsize == 1) || (flag[j] == 0 && shexpandsize == 2)) { d_caveshexpandflag[pos] = j; break; } } } } __global__ void kernelSubCavityExpandingAppend( int* d_threadlist, trihandle* d_tri2trilist, trihandle* d_caveshlist, int* d_caveshprev, int* d_caveshnext, int* d_caveshhead, int* d_caveshtail, int* d_caveshcur, int* d_caveshexpandsize, int* d_caveshexpandindices, int* d_caveshexpandflag, int caveshstartindex, int* d_threadfinishmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int cur = d_caveshcur[threadId]; if (cur == -1) { d_threadfinishmarker[threadId] = -1; return; } trihandle checksh = d_caveshlist[cur]; trihandle neighsh; int sindex; int caveshexpandsize = d_caveshexpandsize[pos]; int caveshexpandflag = d_caveshexpandflag[pos]; if (caveshexpandsize != 0) { sindex = caveshstartindex + d_caveshexpandindices[pos]; int prev = d_caveshtail[threadId]; int newid = sindex; for (int j = 0; j < 3; j++) { if ((caveshexpandsize == 1 && j == caveshexpandflag) || (caveshexpandsize == 2 && j != caveshexpandflag) || caveshexpandsize == 3) { cudamesh_spivot(checksh, neighsh, d_tri2trilist); d_caveshlist[newid] = neighsh; d_caveshprev[newid] = prev; d_caveshnext[newid] = -1; if (prev != -1) d_caveshnext[prev] = newid; prev = newid++; } cudamesh_senextself(checksh); } d_caveshtail[threadId] = newid - 1; // Update current linklist pointer to next one d_caveshcur[threadId] = d_caveshnext[cur]; } else { if (cur == d_caveshtail[threadId]) d_threadfinishmarker[threadId] = -1; else d_caveshcur[threadId] = d_caveshnext[cur]; } } __global__ void kernelCavityBoundarySubfacesCheck( int* d_insertidxlist, int* d_threadlist, REAL* d_pointlist, int* d_tetlist, tethandle* d_neighborlist, trihandle* d_tet2trilist, trihandle* d_tet2seglist, tethandle* d_tri2tetlist, REAL* d_insertptlist, trihandle* d_cavetetshlist, int* d_cavetetshnext, int* d_cavetetshhead, int* d_cavetetshmarker, tethandle* d_cavetetshflag, int* d_cavebdryexpandsize, int* d_cutcount, uint64* d_tetmarker, uint64* d_trimarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; REAL* insertpt = d_insertptlist + 3 * threadId; trihandle parysh; tethandle neightet; int cavebdryexpandsize = 0; int cutcount = 0; double ori; int i = d_cavetetshhead[threadId]; while (i != -1) { parysh = d_cavetetshlist[i]; cudamesh_stpivot(parysh, neightet, d_tri2tetlist); if (cudamesh_getUInt64PriorityIndex(d_tetmarker[neightet.id]) == threadId) { cudamesh_fsymself(neightet, d_neighborlist); if (cudamesh_getUInt64PriorityIndex(d_tetmarker[neightet.id]) == threadId) { // Found a subface inside subcavity if (cudamesh_getUInt64PriorityIndex(d_trimarker[parysh.id]) != threadId) { if (cudamesh_oppo(neightet, d_tetlist) != -1) { cudamesh_fsymself(neightet, d_neighborlist); } if (cudamesh_oppo(neightet, d_tetlist) != -1) { int idx[3]; REAL* pt[3]; idx[0] = cudamesh_org(neightet, d_tetlist); idx[1] = cudamesh_dest(neightet, d_tetlist); idx[2] = cudamesh_apex(neightet, d_tetlist); for (int j = 0; j < 3; j++) { pt[j] = cudamesh_id2pointlist(idx[j], d_pointlist); } ori = cuda_orient3d(pt[0], pt[1], pt[2], insertpt); if (ori < 0) { cudamesh_fsymself(neightet, d_neighborlist); ori = -ori; } } else { ori = 1; } // unmark and record this tet if it is either invisible by or coplanar with p if (ori >= 0) { d_tetmarker[neightet.id] = MAXULL; // unmark this tet d_cavetetshmarker[i] = 0; // mark this subface d_cavetetshflag[i] = neightet; cutcount++; cavebdryexpandsize += 4; } } } } i = d_cavetetshnext[i]; } d_cavebdryexpandsize[pos] = cavebdryexpandsize; d_cutcount[threadId] = cutcount; } __global__ void kernelCavityBoundarySubfacesAppend( int* d_threadlist, trihandle* d_cavetetshlist, int* d_cavetetshnext, int* d_cavetetshhead, int* d_cavetetshmarker, tethandle* d_cavetetshflag, tethandle* d_cavebdrylist, int* d_cavebdryprev, int* d_cavebdrynext, int* d_cavebdryhead, int* d_cavebdrytail, int* d_cavebdryexpandsize, int* d_cavebdryexpandindices, int cavebdrystartindex, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; if (d_cavebdryexpandsize[pos] == 0) return; int threadId = d_threadlist[pos]; tethandle neightet, neineitet; int sindex = d_cavebdryexpandindices[pos]; int prev = d_cavebdrytail[threadId]; int newid = cavebdrystartindex + sindex; int i = d_cavetetshhead[threadId]; while (i != -1) { if (d_cavetetshmarker[i] == 0) // Need to append { neightet = d_cavetetshflag[i]; neightet.ver = raw_epivot[neightet.ver]; d_cavebdrylist[newid] = neightet; d_cavebdryprev[newid] = prev; d_cavebdrynext[newid] = -1; // set to -1 first if (prev != -1) d_cavebdrynext[prev] = newid; prev = newid; newid++; for (int j = 0; j < 3; j++) { cudamesh_esym(neightet, neineitet); neineitet.ver = raw_epivot[neineitet.ver]; d_cavebdrylist[newid] = neineitet; d_cavebdryprev[newid] = prev; d_cavebdrynext[newid] = -1; // set to -1 first if (prev != -1) d_cavebdrynext[prev] = newid; prev = newid; newid++; cudamesh_enextself(neightet); } } i = d_cavetetshnext[i]; if (i == -1) { d_cavebdrytail[threadId] = newid - 1; } } } __global__ void kernelCavityBoundarySubsegsCheck( int* d_threadlist, REAL* d_pointlist, int* d_tetlist, tethandle* d_neighborlist, int* d_seglist, tethandle* d_seg2tetlist, REAL* d_insertptlist, trihandle* d_cavetetseglist, int* d_cavetetsegnext, int* d_cavetetseghead, int* d_cavetetsegmarker, tethandle* d_cavetetsegflag, int* d_cavebdryexpandsize, int* d_cutcount, uint64* d_tetmarker, int* d_segmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; REAL* insertpt = d_insertptlist + 3 * threadId; REAL *pa, *pb, *pc; int ia, ib, ic; trihandle paryseg; tethandle neightet, spintet, neineitet; int cavebdryexpandsize = 0; int cutcount = 0; double ori; int i = d_cavetetseghead[threadId]; int j; while (i != -1) { paryseg = d_cavetetseglist[i]; if (d_segmarker[paryseg.id] != threadId) // not a splitting segment { cudamesh_sstpivot1(paryseg, neightet, d_seg2tetlist); { int pa, pb, pc, pd; pa = cudamesh_sorg(paryseg, d_seglist); pb = cudamesh_sdest(paryseg, d_seglist); pc = cudamesh_org(neightet, d_tetlist); pd = cudamesh_dest(neightet, d_tetlist); if ((pa == pc && pb == pd) || (pa == pd && pb == pc)) { } } spintet = neightet; while (1) { if (cudamesh_getUInt64PriorityIndex(d_tetmarker[spintet.id]) != threadId) break; cudamesh_fnextself(spintet, d_neighborlist); if (spintet.id == neightet.id) break; } if (cudamesh_getUInt64PriorityIndex(d_tetmarker[spintet.id]) == threadId) // This segment is inside cavity { // Find an adjacent tet at this segment such that both faces // at this segment are not visible by p. ia = cudamesh_org(neightet, d_tetlist); ib = cudamesh_dest(neightet, d_tetlist); pa = cudamesh_id2pointlist(ia, d_pointlist); pb = cudamesh_id2pointlist(ib, d_pointlist); spintet = neightet; j = 0; while (1) { ic = cudamesh_apex(spintet, d_tetlist); if (ic != -1) { pc = cudamesh_id2pointlist(ic, d_pointlist); ori = cuda_orient3d(pa, pb, pc, insertpt); if (ori >= 0) { // Not visible. Check another face in this tet. cudamesh_esym(spintet, neineitet); ic = cudamesh_apex(neineitet, d_tetlist); if (ic != -1) { pc = cudamesh_id2pointlist(ic, d_pointlist); ori = cuda_orient3d(pb, pa, pc, insertpt); if (ori >= 0) { // Not visible. Found this face. j = 1; // Flag that it is found. break; } } } } cudamesh_fnextself(spintet, d_neighborlist); if (spintet.id == neightet.id) break; } if (j == 0) { //printf("threadId #%d: Subseg check error - Couldn't find the tet to be unmarked!\n", threadId); } neightet = spintet; d_tetmarker[neightet.id] = MAXULL; // unmark this tet d_cavetetsegmarker[i] = 0; // mark this subseg d_cavetetsegflag[i] = neightet; cutcount++; cavebdryexpandsize += 4; } } i = d_cavetetsegnext[i]; } d_cavebdryexpandsize[pos] = cavebdryexpandsize; d_cutcount[threadId] += cutcount; } __global__ void kernelCavityBoundarySubsegsAppend( int* d_threadlist, trihandle* d_cavetetseglist, int* d_cavetetsegnext, int* d_cavetetseghead, int* d_cavetetsegmarker, tethandle* d_cavetetsegflag, tethandle* d_cavebdrylist, int* d_cavebdryprev, int* d_cavebdrynext, int* d_cavebdryhead, int* d_cavebdrytail, int* d_cavebdryexpandsize, int* d_cavebdryexpandindices, int cavebdrystartindex, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; if (d_cavebdryexpandsize[pos] == 0) return; int threadId = d_threadlist[pos]; tethandle neightet, neineitet; int sindex = d_cavebdryexpandindices[pos]; int prev = d_cavebdrytail[threadId]; int newid = cavebdrystartindex + sindex; int i = d_cavetetseghead[threadId]; while (i != -1) { if (d_cavetetsegmarker[i] == 0) // Need to append { neightet = d_cavetetsegflag[i]; neightet.ver = raw_epivot[neightet.ver]; d_cavebdrylist[newid] = neightet; d_cavebdryprev[newid] = prev; d_cavebdrynext[newid] = -1; // set to -1 first if (prev != -1) d_cavebdrynext[prev] = newid; prev = newid; newid++; for (int j = 0; j < 3; j++) { cudamesh_esym(neightet, neineitet); neineitet.ver = raw_epivot[neineitet.ver]; d_cavebdrylist[newid] = neineitet; d_cavebdryprev[newid] = prev; d_cavebdrynext[newid] = -1; // set to -1 first if (prev != -1) d_cavebdrynext[prev] = newid; prev = newid; newid++; cudamesh_enextself(neightet); } } i = d_cavetetsegnext[i]; if (i == -1) { d_cavebdrytail[threadId] = newid - 1; } } } __global__ void kernelUpdateCavity2StarShapedSortOutBoundaryListCount( int* d_threadlist, int* d_cavebdrynext, int* d_cavebdryhead, int* d_cavecount, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int count = 0; int threadId = d_threadlist[pos]; int i = d_cavebdryhead[threadId]; while (i != -1) { count += 1; i = d_cavebdrynext[i]; } d_cavecount[pos] = count; } __global__ void kernelUpdateCavity2StarShapedSortOutBoundaryListAppend( int* d_threadlist, tethandle* d_cavebdrylist, int* d_cavebdryprev, int* d_cavebdrynext, int* d_cavebdryhead, int* d_cavebdrytail, tethandle* d_cavelist, int* d_caveprev, int* d_cavenext, int* d_expandindices, int* d_cavethreadidx, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int sindex = d_expandindices[pos]; int prev = -1; int i = d_cavebdryhead[threadId]; d_cavebdryhead[threadId] = sindex; while (i != -1) { d_cavelist[sindex] = d_cavebdrylist[i]; d_caveprev[sindex] = prev; d_cavenext[sindex] = -1; if (prev != -1) d_cavenext[prev] = sindex; d_cavethreadidx[sindex] = threadId; prev = sindex; sindex++; i = d_cavebdrynext[i]; } d_cavebdrytail[threadId] = sindex - 1; } __global__ void kernelUpdateCavity2StarShapedCheck( int* d_insertidxlist, int* d_cavethreadidx, REAL* d_pointlist, int* d_tetlist, tethandle* d_neighborlist, trihandle* d_tet2seglist, REAL* d_insertptlist, tethandle* d_cavebdrylist, int* d_cavebdryprev, int* d_cavebdrynext, int* d_cavebdryhead, int* d_cavebdrytail, int* d_cavetetexpandsize, int* d_cavebdryexpandsize, int* d_cutcount, uint64* d_tetmarker, int cavebdrycurstartindex, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int tetexpandsize = 0; int bdryexpandsize = 0; int threadId = d_cavethreadidx[pos]; REAL* insertpt = d_insertptlist + 3 * threadId; int cur = cavebdrycurstartindex + pos; tethandle cavetet = d_cavebdrylist[cur]; tethandle neightet; cudamesh_fsym(cavetet, neightet, d_neighborlist); bool enqflag; REAL ori; if (cudamesh_getUInt64PriorityIndex(d_tetmarker[neightet.id]) == threadId) { if (cudamesh_apex(cavetet, d_tetlist) != -1) { if (cudamesh_oppo(neightet, d_tetlist) != -1) { REAL *pts[3]; int idx[3]; idx[0] = cudamesh_org(cavetet, d_tetlist); idx[1] = cudamesh_dest(cavetet, d_tetlist); idx[2] = cudamesh_apex(cavetet, d_tetlist); for (int i = 0; i < 3; i++) { pts[i] = cudamesh_id2pointlist(idx[i], d_pointlist); } ori = cuda_orient3d(pts[0], pts[1], pts[2], insertpt); enqflag = (ori > 0); } else { // It is a hull face. And its adjacent tet (at inside of the // domain) has been cut from the cavity. Cut it as well. enqflag = false; } } else { enqflag = true; // A hull edge } if (enqflag) { tetexpandsize = 1; } else { d_tetmarker[neightet.id] = MAXULL; d_cutcount[threadId] += 1; // This may cause a wrong value but it doesn't affect the result bdryexpandsize = 3; } } d_cavetetexpandsize[pos] = tetexpandsize; d_cavebdryexpandsize[pos] = bdryexpandsize; } __global__ void kernelUpdateCavity2StarShapedSetThreadidx( int* d_cavethreadidx, int* d_cavetetexpandsize, int* d_cavetetexpandindices, int* d_cavetetthreadidx, int* d_cavebdryexpandsize, int* d_cavebdryexpandindices, int* d_cavebdrythreadidx, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_cavethreadidx[pos]; int eindex; if (d_cavetetexpandsize[pos] != 0) { eindex = d_cavetetexpandindices[pos]; d_cavetetthreadidx[eindex] = threadId; } if (d_cavebdryexpandsize[pos] != 0) { eindex = d_cavebdryexpandindices[pos]; for (int j = 0; j < 3; j++) { d_cavebdrythreadidx[eindex + j] = threadId; } } } __global__ void kernelUpdateCavity2StarShapedAppend( int* d_cavethreadidx, tethandle* d_neighborlist, tethandle* d_cavebdrylist, int* d_cavebdryprev, int* d_cavebdrynext, int* d_cavebdryhead, int* d_cavebdrytail, int* d_cavebdryexpandsize, int* d_cavebdryexpandindices, int* d_cavebdrythreadidx, int cavebdrystartindex, int cavebdryexpandsize, tethandle* d_cavetetlist, int* d_cavetetprev, int* d_cavetetnext, int* d_cavetethead, int* d_cavetettail, int* d_cavetetexpandsize, int* d_cavetetexpandindices, int* d_cavetetthreadidx, int cavetetstartindex, int cavetetexpandsize, int cavebdrycurstartindex, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_cavethreadidx[pos]; int cur = cavebdrycurstartindex + pos; tethandle cavetet = d_cavebdrylist[cur]; int eindex, sindex, prev; if (d_cavebdryexpandsize[pos] != 0) { eindex = d_cavebdryexpandindices[pos]; sindex = cavebdrystartindex + eindex; tethandle neightet, neineitet; cudamesh_fsym(cavetet, neightet, d_neighborlist); int newid; if (eindex == 0 || d_cavebdrythreadidx[eindex - 1] != threadId) { prev = d_cavebdrytail[threadId]; d_cavebdrynext[prev] = sindex; // prev must not be -1 } else prev = sindex - 1; // Add three new faces to find new boundaries. for (int j = 0; j < 3; j++) { newid = sindex + j; cudamesh_esym(neightet, neineitet); neineitet.ver = raw_epivot[neineitet.ver]; d_cavebdrylist[newid] = neineitet; d_cavebdryprev[newid] = prev; d_cavebdrynext[newid] = newid + 1; // set to next one first prev = newid; cudamesh_enextself(neightet); } if (eindex + 2 == cavebdryexpandsize - 1 || d_cavebdrythreadidx[eindex + 3] != threadId) { //if (threadId == 153) // printf("threadId = %d, cavebdryexpandsize = %d, eindex + 2 = %d, d_cavebdrythreadidx[eindex + 3] = %d\n", // threadId, cavebdryexpandsize, eindex + 2, d_cavebdrythreadidx[eindex + 3]); d_cavebdrynext[newid] = -1; } } if (d_cavetetexpandsize[pos] != 0) { eindex = d_cavetetexpandindices[pos]; sindex = cavetetstartindex + eindex; if (eindex == 0 || d_cavetetthreadidx[eindex - 1] != threadId) { prev = d_cavetettail[threadId]; if (prev != -1) d_cavetetnext[prev] = sindex; if (d_cavetethead[threadId] == -1) // initialize cavebdry list header d_cavetethead[threadId] = sindex; } else prev = sindex - 1; d_cavetetlist[sindex] = cavetet; d_cavetetprev[sindex] = prev; d_cavetetnext[sindex] = sindex + 1; if (eindex == cavetetexpandsize - 1 || d_cavetetthreadidx[eindex + 1] != threadId) d_cavetetnext[sindex] = -1; } } __global__ void kernelUpdateCavity2StarShapedUpdateListTails( int* d_cavethreadidx, int* d_cavetetnext, int* d_cavetettail, int* d_cavetetexpandsize, int* d_cavetetexpandindices, int cavetetstartindex, int* d_cavebdrynext, int* d_cavebdrytail, int* d_cavebdryexpandsize, int* d_cavebdryexpandindices, int cavebdrystartindex, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_cavethreadidx[pos]; int sindex, eindex, prev; if (d_cavetetexpandsize[pos] != 0) { eindex = d_cavetetexpandindices[pos]; sindex = cavetetstartindex + eindex; if (d_cavetetnext[sindex] == -1) d_cavetettail[threadId] = sindex; } if (d_cavebdryexpandsize[pos] != 0) { eindex = d_cavebdryexpandindices[pos]; sindex = cavebdrystartindex + eindex + 2; if (d_cavebdrynext[sindex] == -1) { d_cavebdrytail[threadId] = sindex; } } } __global__ void kernelUpdateBoundaryFaces( int* d_threadlist, tethandle* d_neighborlist, tethandle* d_cavetetlist, int* d_cavetetnext, int* d_cavetethead, tethandle* d_cavebdrylist, int* d_cavebdryprev, int* d_cavebdrynext, int* d_cavebdryhead, int* d_cavebdrytail, int* d_cutcount, uint64* d_tetmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; tethandle cavetet, neightet, prevtet; if (d_cutcount[threadId] > 0) { // Reuse old space int cur = d_cavebdryhead[threadId]; int prev = -1; int i = d_cavetethead[threadId]; while (i != -1) { cavetet = d_cavetetlist[i]; cudamesh_fsym(cavetet, neightet, d_neighborlist); if (cudamesh_getUInt64PriorityIndex(d_tetmarker[neightet.id]) == threadId) { d_cavebdrylist[cur] = cavetet; prev = cur; cur = d_cavebdrynext[cur]; } i = d_cavetetnext[i]; if (i == -1) // reach the end of new boundary faces { if (prev != -1) { d_cavebdrynext[prev] = -1; d_cavebdrytail[threadId] = prev; } else { // this should not happen } } } } } __global__ void kernelUpdateOldTets( int* d_insertidxlist, int* d_threadlist, tethandle* d_neighborlist, tristatus* d_segstatus, tristatus* d_tristatus, tetstatus* d_tetstatus, tethandle* d_caveoldtetlist, int* d_caveoldtetprev, int* d_caveoldtetnext, int* d_caveoldtethead, int* d_caveoldtettail, int* d_cutcount, uint64* d_tetmarker, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int threadmarker = d_threadmarker[threadId]; tethandle cavetet, neightet; if (d_cutcount[threadId] > 0) { // Reuse old space int prev = -1; int i = d_caveoldtethead[threadId]; while (i != -1) { cavetet = d_caveoldtetlist[i]; if (cudamesh_getUInt64PriorityIndex(d_tetmarker[cavetet.id]) == threadId) { if (prev != -1) d_caveoldtetnext[prev] = i; else d_caveoldtethead[threadId] = i; d_caveoldtetprev[i] = prev; prev = i; } i = d_caveoldtetnext[i]; if (i == -1) // reach the end of new boundary faces { if (prev != -1) { d_caveoldtetnext[prev] = -1; d_caveoldtettail[threadId] = prev; } else { // The cavity should contain at least one tet // Usually this would not happen int eleidx = d_insertidxlist[threadId]; if (threadmarker == 0) d_segstatus[eleidx].setAbortive(true); else if (threadmarker == 1) d_tristatus[eleidx].setAbortive(true); else if (threadmarker == 2) d_tetstatus[eleidx].setAbortive(true); d_threadmarker[threadId] = -1; } } } } } __global__ void kernelAdjacentCavitiesCheck( int* d_threadlist, tethandle* d_neighborlist, trihandle* d_tet2trilist, int* d_priority, tethandle* d_cavebdrylist, int* d_cavebdrynext, int* d_cavebdryhead, uint64* d_tetmarker, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; if (d_threadmarker[threadId] == -1) return; int neighborId; tethandle cavetet; int i = d_cavebdryhead[threadId]; while (i != -1) { cavetet = d_cavebdrylist[i]; neighborId = cudamesh_getUInt64PriorityIndex(d_tetmarker[cavetet.id]); if (neighborId != MAXUINT && neighborId != threadId) // neighbor also marked { if (d_threadmarker[neighborId] != -1) // neighbor is alive also { if(threadId > neighborId) { d_threadmarker[threadId] = -1; return; } } } i = d_cavebdrynext[i]; } } __global__ void kernelUpdateSubcavities( int* d_threadlist, tethandle* d_neighborlist, tethandle* d_tri2tetlist, trihandle* d_caveshlist, int* d_caveshprev, int* d_caveshnext, int* d_caveshhead, int* d_caveshtail, int* d_cutshcount, uint64* d_tetmarker, uint64* d_trimarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; trihandle parysh; tethandle neightet; // Reuse old space bool enqflag; int cutshcount = 0; int prev = -1; int i = d_caveshhead[threadId]; // for dangling segment, this is -1 while (i != -1) { parysh = d_caveshlist[i]; if (cudamesh_getUInt64PriorityIndex(d_trimarker[parysh.id]) == threadId) { enqflag = false; cudamesh_stpivot(parysh, neightet, d_tri2tetlist); if (cudamesh_getUInt64PriorityIndex(d_tetmarker[neightet.id]) == threadId) { cudamesh_fsymself(neightet, d_neighborlist); if (cudamesh_getUInt64PriorityIndex(d_tetmarker[neightet.id]) == threadId) enqflag = true; } if (enqflag) { if (prev != -1) d_caveshnext[prev] = i; else d_caveshhead[threadId] = i; d_caveshprev[i] = prev; prev = i; } else { d_trimarker[parysh.id] = MAXULL; cutshcount++; } } i = d_caveshnext[i]; if (i == -1) // reach the end of subcavity faces { if (prev != -1) { d_caveshnext[prev] = -1; d_caveshtail[threadId] = prev; } } } d_cutshcount[pos] = cutshcount; } __global__ void kernelValidateSubcavities( int* d_insertidxlist, locateresult* d_pointlocation, trihandle* d_searchsh, int* d_threadlist, trihandle* d_seg2trilist, tristatus* d_segstatus, int* d_trifacelist, trihandle* d_tri2trilist, tristatus* d_tristatus, int* d_segmarker, uint64* d_trimarker, int* d_cutshcount, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int cutshcount = d_cutshcount[pos]; if (cutshcount == 0) return; int threadmarker = d_threadmarker[threadId]; locateresult loc = d_pointlocation[threadId]; int i = 0; trihandle splitsh, neighsh; if (loc == ONFACE) { if (threadmarker == 1) { splitsh = d_searchsh[threadId]; if (cudamesh_getUInt64PriorityIndex(d_trimarker[splitsh.id]) != threadId) { printf("threadId #%d - Invalid trimarker #%d - %d\n", threadId, splitsh.id, cudamesh_getUInt64PriorityIndex(d_trimarker[splitsh.id])); i++; } } } else if (loc == ONEDGE) { if (threadmarker == 0) { int segId = d_insertidxlist[threadId]; trihandle splitseg(segId, 0); if (d_segmarker[segId] != threadId) { printf("threadId #%d - Invalid segmarker %d\n", threadId, d_segmarker[segId]); i++; } cudamesh_spivot(splitseg, splitsh, d_seg2trilist); } else if (threadmarker == 1) { splitsh = d_searchsh[threadId]; } if (splitsh.id != -1) { // All subfaces at this edge should be in subcavity int pa = cudamesh_sorg(splitsh, d_trifacelist); neighsh = splitsh; while (1) { if (cudamesh_sorg(neighsh, d_trifacelist) != pa) { cudamesh_sesymself(neighsh); } if (cudamesh_getUInt64PriorityIndex(d_trimarker[neighsh.id]) != threadId) { printf("threadId #%d - Invalid trimarker #%d - %d\n", threadId, neighsh.id, cudamesh_getUInt64PriorityIndex(d_trimarker[neighsh.id])); i++; } cudamesh_spivotself(neighsh, d_tri2trilist); if (neighsh.id == splitsh.id) break; if (neighsh.id == -1) break; } } } if (i > 0) { int eleidx = d_insertidxlist[threadId]; if (threadmarker == 0) { d_segstatus[eleidx].setAbortive(true); } else if (threadmarker == 1) { d_tristatus[eleidx].setAbortive(true); } d_threadmarker[threadId] = -1; } } __global__ void kernelValidateRefinementElements( int* d_insertidxlist, trihandle* d_searchsh, tethandle* d_searchtet, tethandle* d_neighborlist, int* d_threadlist, tristatus* d_segstatus, tristatus* d_tristatus, tetstatus* d_tetstatus, uint64* d_trimarker, uint64* d_tetmarker, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int threadmarker = d_threadmarker[threadId]; int insertidx = d_insertidxlist[threadId]; if (threadmarker == 0) { tethandle spintet; tethandle searchtet = d_searchtet[threadId]; spintet = searchtet; while (1) { if (cudamesh_getUInt64PriorityIndex(d_tetmarker[spintet.id]) != threadId) { d_segstatus[insertidx].setAbortive(true); d_threadmarker[threadId] = -1; break; } cudamesh_fnextself(spintet, d_neighborlist); if (spintet.id == searchtet.id) break; } } else if (threadmarker == 1) { int elementid = d_searchsh[threadId].id; if (cudamesh_getUInt64PriorityIndex(d_trimarker[elementid]) != threadId) { d_tristatus[insertidx].setAbortive(true); d_threadmarker[threadId] = -1; } } else if (threadmarker == 2) { if (cudamesh_getUInt64PriorityIndex(d_tetmarker[insertidx]) != threadId) { d_tetstatus[insertidx].setAbortive(true); d_threadmarker[threadId] = -1; } } } __global__ void kernelCheckDistances2ClosePoints( int* d_insertidxlist, REAL* d_insertptlist, locateresult* d_pointlocation, tethandle* d_searchtet, int* d_threadlist, REAL* d_pointlist, tethandle* d_neighborlist, int* d_tetlist, tristatus* d_segstatus, tristatus* d_tristatus, tetstatus* d_tetstatus, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int insertiontype = d_threadmarker[threadId]; int insertidx = d_insertidxlist[threadId]; REAL* insertpt = d_insertptlist + 3 * threadId; tethandle searchtet, spintet; searchtet = d_searchtet[threadId]; int ptidx, i; REAL* pt, rd; REAL minedgelength = raw_kernelconstants[0]; locateresult loc = d_pointlocation[threadId]; if (loc == ONEDGE) { spintet = searchtet; ptidx = cudamesh_org(spintet, d_tetlist); pt = cudamesh_id2pointlist(ptidx, d_pointlist); rd = cudamesh_distance(pt, insertpt); if (rd < minedgelength) { if (insertiontype == 0) d_segstatus[insertidx].setAbortive(true); else if (insertiontype == 1) d_tristatus[insertidx].setAbortive(true); else if (insertiontype == 2) d_tetstatus[insertidx].setAbortive(true); d_threadmarker[threadId] = -1; return; } ptidx = cudamesh_dest(spintet, d_tetlist); pt = cudamesh_id2pointlist(ptidx, d_pointlist); rd = cudamesh_distance(pt, insertpt); if (rd < minedgelength) { if (insertiontype == 0) d_segstatus[insertidx].setAbortive(true); else if (insertiontype == 1) d_tristatus[insertidx].setAbortive(true); else if (insertiontype == 2) d_tetstatus[insertidx].setAbortive(true); d_threadmarker[threadId] = -1; return; } while (1) { ptidx = cudamesh_apex(spintet, d_tetlist); if (ptidx != -1) { pt = cudamesh_id2pointlist(ptidx, d_pointlist); rd = cudamesh_distance(pt, insertpt); if (rd < minedgelength) { if (insertiontype == 0) d_segstatus[insertidx].setAbortive(true); else if (insertiontype == 1) d_tristatus[insertidx].setAbortive(true); else if (insertiontype == 2) d_tetstatus[insertidx].setAbortive(true); d_threadmarker[threadId] = -1; return; } } cudamesh_fnextself(spintet, d_neighborlist); if (spintet.id == searchtet.id) break; } } else if (loc == ONFACE) { for (i = 0; i < 3; i++) { ptidx = d_tetlist[4 * searchtet.id + i]; pt = cudamesh_id2pointlist(ptidx, d_pointlist); rd = cudamesh_distance(pt, insertpt); if (rd < minedgelength) { if (insertiontype == 1) d_tristatus[insertidx].setAbortive(true); else if (insertiontype == 2) d_tetstatus[insertidx].setAbortive(true); d_threadmarker[threadId] = -1; return; } } ptidx = d_tetlist[4 * searchtet.id + 3]; if (ptidx != -1) { pt = cudamesh_id2pointlist(ptidx, d_pointlist); rd = cudamesh_distance(pt, insertpt); if (rd < minedgelength) { if (insertiontype == 1) d_tristatus[insertidx].setAbortive(true); else if (insertiontype == 2) d_tetstatus[insertidx].setAbortive(true); d_threadmarker[threadId] = -1; return; } } cudamesh_fsym(searchtet, spintet, d_neighborlist); ptidx = cudamesh_oppo(spintet, d_tetlist); if (ptidx != -1) { pt = cudamesh_id2pointlist(ptidx, d_pointlist); rd = cudamesh_distance(pt, insertpt); if (rd < minedgelength) { if (insertiontype == 1) d_tristatus[insertidx].setAbortive(true); else if (insertiontype == 2) d_tetstatus[insertidx].setAbortive(true); d_threadmarker[threadId] = -1; return; } } } else if (loc == INTETRAHEDRON) { for (i = 0; i < 4; i++) { ptidx = d_tetlist[4 * searchtet.id + i]; pt = cudamesh_id2pointlist(ptidx, d_pointlist); rd = cudamesh_distance(pt, insertpt); if (rd < minedgelength) { if (insertiontype == 2) d_tetstatus[insertidx].setAbortive(true); d_threadmarker[threadId] = -1; return; } } } } __global__ void kernelComputeShortestEdgeLength( int* d_threadlist, REAL* d_pointlist, int* d_tetlist, tethandle* d_caveoldtetlist, int* d_caveoldtetnext, int* d_caveoldtethead, REAL* d_insertptlist, REAL* d_smlen, int* d_parentpt, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int threadmarker = d_threadmarker[threadId]; if (threadmarker == 2) // a tetrahedron return; REAL* insertpt = d_insertptlist + 3 * threadId; tethandle cavetet; int ptidx, parentpt; REAL *pts, smlen = -1.0, len; int i = d_caveoldtethead[threadId], j; cavetet = d_caveoldtetlist[i]; ptidx = d_tetlist[4 * cavetet.id + 0]; pts = cudamesh_id2pointlist(ptidx, d_pointlist); smlen = cudamesh_distance(pts, insertpt); parentpt = ptidx; while (i != -1) { cavetet = d_caveoldtetlist[i]; for (j = 0; j < 4; j++) { ptidx = d_tetlist[4 * cavetet.id + j]; if (ptidx == -1) continue; pts = cudamesh_id2pointlist(ptidx, d_pointlist); len = cudamesh_distance(pts, insertpt); if(len < smlen) { smlen = len; parentpt = ptidx; } } i = d_caveoldtetnext[i]; } d_smlen[threadId] = smlen; d_parentpt[threadId] = parentpt; } __global__ void kernelUpdateCavitySubsegs( int* d_threadlist, int* d_tetlist, tethandle* d_neighborlist, int* d_seglist, tethandle* d_seg2tetlist, trihandle* d_cavetetseglist, int* d_cavetetsegprev, int* d_cavetetsegnext, int* d_cavetetseghead, int* d_cavetetsegtail, uint64* d_tetmarker, int* d_segmarker, int* d_segmarker2, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; tethandle spintet, neightet, neineitet; trihandle paryseg, checkseg; // Reuse old space int j, k, markeridx; int prev = -1; int i = d_cavetetseghead[threadId]; while (i != -1) { paryseg = d_cavetetseglist[i]; if (d_segmarker[paryseg.id] != threadId) // not a splitting segment { // Check if the segment is inside the cavity. // 'j' counts the num of adjacent tets of this seg. // 'k' counts the num of adjacent tets which are 'infected'. j = k = 0; cudamesh_sstpivot1(paryseg, neightet, d_seg2tetlist); spintet = neightet; while (1) { j++; markeridx = cudamesh_getUInt64PriorityIndex(d_tetmarker[spintet.id]); if (markeridx != threadId) // outside cavity { // Remember it only when it is not inside other cavities // (possible when cavities share edges/segments) if (markeridx == MAXUINT || (markeridx != MAXUINT && d_threadmarker[markeridx] == -1)) // a unmarked tet or a tet belongs to loser neineitet = spintet; } else { k++; } cudamesh_fnextself(spintet, d_neighborlist); if (spintet.id == neightet.id) break; } if (k == 0) // should be removed { } else if (k < j) // on the boundary { assert(neineitet.id != -1); // there must be a tet that is not included in any cavities // connect it to the recorded outer tet cudamesh_sstbond1(paryseg, neineitet, d_seg2tetlist); // update cavetetseg list if (prev != -1) d_cavetetsegnext[prev] = i; else d_cavetetseghead[threadId] = i; d_cavetetsegprev[i] = prev; prev = i; } else // impossible { assert(0); printf("Error: Segment #%d is inside the cavity!\n", paryseg.id); } } i = d_cavetetsegnext[i]; if (i == -1) // reach the end of cavetetseg { if (prev != -1) // when there is at least one boundary segment { d_cavetetsegnext[prev] = -1; d_cavetetsegtail[threadId] = prev; } else // no boundary segment { d_cavetetseghead[threadId] = -1; d_cavetetsegtail[threadId] = -1; } } } } __global__ void kernelUpdateCavitySubfaces( int* d_threadlist, tethandle* d_neighborlist, tethandle* d_tri2tetlist, trihandle* d_cavetetshlist, int* d_cavetetshprev, int* d_cavetetshnext, int* d_cavetetshhead, int* d_cavetetshtail, uint64* d_tetmarker, uint64* d_trimarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; tethandle neightet; trihandle parysh, checksh; // Reuse old space int j, k; int prev = -1; int i = d_cavetetshhead[threadId]; while (i != -1) { parysh = d_cavetetshlist[i]; if (cudamesh_getUInt64PriorityIndex(d_trimarker[parysh.id]) != threadId) // not inside subcavity { // Check if this subface is inside the cavity. k = 0; for (j = 0; j < 2; j++) { cudamesh_stpivot(parysh, neightet, d_tri2tetlist); if (cudamesh_getUInt64PriorityIndex(d_tetmarker[neightet.id]) != threadId) { checksh = parysh; // remember this side } else { k++; } cudamesh_sesymself(parysh); } if (k == 0) // should be removed { } else if (k == 1) // on the boundary { parysh = checksh; // update cavetetsh list if (prev != -1) d_cavetetshnext[prev] = i; else d_cavetetshhead[threadId] = i; d_cavetetshprev[i] = prev; d_cavetetshlist[i] = parysh; prev = i; } else // impossible { assert(0); printf("Error: Subface #%d is inside the cavity!\n", parysh.id); } } i = d_cavetetshnext[i]; if (i == -1) // reach the end of cavetetsh { if (prev != -1) // when there is at least one boundary subface { d_cavetetshnext[prev] = -1; d_cavetetshtail[threadId] = prev; } else // no boundary subface { d_cavetetshhead[threadId] = -1; d_cavetetshtail[threadId] = -1; } } } } __global__ void kernelInsertNewPoints( int* d_threadlist, REAL* d_pointlist, verttype* d_pointtypelist, REAL* d_insertptlist, int* d_threadmarker, int oldpointsize, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int threadmarker = d_threadmarker[threadId]; int newidx = oldpointsize + pos; if (threadmarker == 0) d_pointtypelist[newidx] = FREESEGVERTEX; else if(threadmarker == 1) d_pointtypelist[newidx] = FREEFACETVERTEX; else d_pointtypelist[newidx] = FREEVOLVERTEX; newidx *= 3; REAL* insertpt = d_insertptlist + 3 * threadId; d_pointlist[newidx++] = insertpt[0]; d_pointlist[newidx++] = insertpt[1]; d_pointlist[newidx++] = insertpt[2]; } __global__ void kernelCountNewTets( int* d_threadlist, tethandle* d_cavebdrylist, int* d_cavebdrynext, int* d_cavebdryhead, int* d_tetexpandsize, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int expandsize = 0; int i = d_cavebdryhead[threadId]; while (i != -1) { expandsize++; i = d_cavebdrynext[i]; } d_tetexpandsize[pos] = expandsize; } __global__ void kernelInsertNewTets( int* d_threadlist, tethandle* d_point2tetlist, int* d_tetlist, tethandle* d_neighborlist, trihandle* d_tet2trilist, trihandle* d_tet2seglist, tetstatus* d_tetstatus, tethandle* d_cavebdrylist, int* d_cavebdrynext, int* d_cavebdryhead, int* d_tetexpandindices, int* d_emptytetindices, int oldpointsize, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int startidx = d_tetexpandindices[pos], newtetidx; int newptidx = oldpointsize + pos; tethandle neightet, oldtet, newtet; int i = d_cavebdryhead[threadId]; while (i != -1) { newtetidx = d_emptytetindices[startidx++]; neightet = d_cavebdrylist[i]; cudamesh_fsym(neightet, oldtet, d_neighborlist); // Get the oldtet (inside the cavity). // There might be duplicate elements in cavebdrylist. // In that case, oldtet will be newtet. Check to avoid if (!d_tetstatus[oldtet.id].isEmpty()) { if (cudamesh_apex(neightet, d_tetlist) != -1) { // Create a new tet in the cavity newtet.id = newtetidx; newtet.ver = 11; cudamesh_setorg(newtet, cudamesh_dest(neightet, d_tetlist), d_tetlist); cudamesh_setdest(newtet, cudamesh_org(neightet, d_tetlist), d_tetlist); cudamesh_setapex(newtet, cudamesh_apex(neightet, d_tetlist), d_tetlist); cudamesh_setoppo(newtet, newptidx, d_tetlist); } else { // Create a new hull tet newtet.id = newtetidx; newtet.ver = 11; cudamesh_setorg(newtet, cudamesh_org(neightet, d_tetlist), d_tetlist); cudamesh_setdest(newtet, cudamesh_dest(neightet, d_tetlist), d_tetlist); cudamesh_setapex(newtet, newptidx, d_tetlist); cudamesh_setoppo(newtet, -1, d_tetlist); // It must opposite to face 3. // Adjust back to the cavity bounday face. cudamesh_esymself(newtet); } // Connect newtet <==> neightet, this also disconnect the old bond. cudamesh_bond(newtet, neightet, d_neighborlist); // Oldtet still connects to neightet d_cavebdrylist[i] = oldtet; } else // duplicate elements cause fake oldtet { d_cavebdrylist[i] = tethandle(-1, 11); } i = d_cavebdrynext[i]; } d_point2tetlist[newptidx] = newtet; } __global__ void kernelConnectNewTetNeighbors( int* d_threadlist, tethandle* d_point2tetlist, int* d_tetlist, tethandle* d_neighborlist, tetstatus* d_tetstatus, tethandle* d_cavebdrylist, int* d_cavebdrynext, int* d_cavebdryhead, uint64* d_tetmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; tethandle oldtet, neightet, newtet, newneitet, spintet; int orgidx; int i = d_cavebdryhead[threadId], j; while (i != -1) { // Get the newtet and oldtet at the same face. oldtet = d_cavebdrylist[i]; if (oldtet.id != -1) // not fake one { cudamesh_fsym(oldtet, neightet, d_neighborlist); cudamesh_fsym(neightet, newtet, d_neighborlist); // Comment: oldtet and newtet must be at the same directed edge. // Connect the three other faces of this newtet. for (j = 0; j < 3; j++) { cudamesh_esym(newtet, neightet); // Go to the face // Do not have neighbor yet if (d_neighborlist[4 * neightet.id + (neightet.ver & 3)].id == -1) { // Find the adjacent face of this new tet spintet = oldtet; while (1) { cudamesh_fnextself(spintet, d_neighborlist); if (cudamesh_getUInt64PriorityIndex(d_tetmarker[spintet.id]) != threadId) break; } cudamesh_fsym(spintet, newneitet, d_neighborlist); cudamesh_esymself(newneitet); cudamesh_bond(neightet, newneitet, d_neighborlist); } orgidx = cudamesh_org(newtet, d_tetlist); if(orgidx != -1) d_point2tetlist[orgidx] = newtet; cudamesh_enextself(newtet); cudamesh_enextself(oldtet); } d_cavebdrylist[i] = newtet; // Save the new tet // Update tetstatus d_tetstatus[oldtet.id].clear(); d_tetstatus[newtet.id].setEmpty(false); } i = d_cavebdrynext[i]; } // Check neighbor //i = d_cavebdryhead[threadId]; //while (i != -1) //{ // newtet = d_cavebdrylist[i]; // if (newtet.id != -1) // { // for (j = 0; j < 4; j++) // { // newtet.ver = j; // neightet = d_neighborlist[4 * newtet.id + (newtet.ver & 3)]; // if (d_neighborlist[4 * neightet.id + (neightet.ver & 3)].id != newtet.id) // printf("Wrong neighbor(%d): Tet#%d - %d, %d, %d, %d, Tet#%d - %d, %d, %d, %d\n", // threadId, // newtet.id, // d_neighborlist[4 * newtet.id + 0].id, d_neighborlist[4 * newtet.id + 1].id, // d_neighborlist[4 * newtet.id + 2].id, d_neighborlist[4 * newtet.id + 3].id, // neightet.id, // d_neighborlist[4 * neightet.id + 0].id, d_neighborlist[4 * neightet.id + 1].id, // d_neighborlist[4 * neightet.id + 2].id, d_neighborlist[4 * neightet.id + 3].id); // } // } // i = d_cavebdrynext[i]; //} } __global__ void kernelConnectBoundarySubfaces2NewTets( int* d_threadlist, tethandle* d_tri2tetlist, tethandle* d_neighborlist, trihandle* d_tet2trilist, trihandle* d_cavetetshlist, int* d_cavetetshnext, int* d_cavetetshhead, uint64* d_trimarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; trihandle parysh; tethandle neightet, newtet; int i = d_cavetetshhead[threadId]; while (i != -1) { parysh = d_cavetetshlist[i]; // this is connect to a outside tet // Connect it if it is a boundary subface if (cudamesh_getUInt64PriorityIndex(d_trimarker[parysh.id]) != threadId) { cudamesh_stpivot(parysh, neightet, d_tri2tetlist); cudamesh_fsym(neightet, newtet, d_neighborlist); cudamesh_sesymself(parysh); cudamesh_tsbond(newtet, parysh, d_tet2trilist, d_tri2tetlist); } i = d_cavetetshnext[i]; } } __global__ void kernelConnectBoundarySubsegs2NewTets( int* d_threadlist, tethandle* d_seg2tetlist, tethandle* d_neighborlist, trihandle* d_tet2seglist, tetstatus* d_tetstatus, trihandle* d_cavetetseglist, int* d_cavetetsegnext, int* d_cavetetseghead, int* d_segmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; trihandle paryseg; tethandle neightet, spintet; int i = d_cavetetseghead[threadId]; while (i != -1) { paryseg = d_cavetetseglist[i]; // Connect it if it is a boundary subseg if (d_segmarker[paryseg.id] != threadId) { cudamesh_sstpivot1(paryseg, neightet, d_seg2tetlist); spintet = neightet; while (1) { cudamesh_tssbond1(spintet, paryseg, d_tet2seglist); cudamesh_fnextself(spintet, d_neighborlist); if (spintet.id == neightet.id) break; } } else { // This may happen when there is only one splitting segment } i = d_cavetetsegnext[i]; } } __global__ void kernelSubCavityBoundaryEdgeCheck( int* d_threadlist, trihandle* d_tri2seglist, trihandle* d_tri2trilist, trihandle* d_caveshlist, int* d_caveshnext, int* d_caveshhead, int* d_caveshbdsize, uint64* d_trimarker, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int threadmarker = d_threadmarker[threadId]; if (threadmarker == 2) { d_caveshbdsize[pos] = 0; return; } trihandle cavesh, neighsh; REAL sign; int caveshbdsize = 0; int i = d_caveshhead[threadId], j; while (i != -1) { cavesh = d_caveshlist[i]; for (j = 0; j < 3; j++) { if (!cudamesh_isshsubseg(cavesh, d_tri2seglist)) { cudamesh_spivot(cavesh, neighsh, d_tri2trilist); if (neighsh.id != -1) { if (cudamesh_getUInt64PriorityIndex(d_trimarker[neighsh.id]) != threadId) { // A boundary edge sign = 1; } else { // Internal edge sign = -1; } } else { // A boundary edge sign = 1; } } else { // A segment. It is a boundary edge sign = 1; } if (sign >= 0) { caveshbdsize++; } cudamesh_senextself(cavesh); } i = d_caveshnext[i]; } d_caveshbdsize[pos] = caveshbdsize; } __global__ void kernelSubCavityBoundaryEdgeAppend( int* d_threadlist, trihandle* d_tri2seglist, trihandle* d_tri2trilist, trihandle* d_caveshlist, int* d_caveshnext, int* d_caveshhead, trihandle* d_caveshbdlist, int* d_caveshbdprev, int* d_caveshbdnext, int* d_caveshbdhead, int* d_caveshbdtail, int* d_caveshbdsize, int* d_caveshbdindices, uint64* d_trimarker, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int threadmarker = d_threadmarker[threadId]; if (threadmarker == 2) return; trihandle cavesh, neighsh; REAL sign; int caveshbdsize = d_caveshbdsize[pos]; if (caveshbdsize == 0) return; int prev = -1, newid = d_caveshbdindices[pos]; int i = d_caveshhead[threadId], j; while (i != -1) { cavesh = d_caveshlist[i]; for (j = 0; j < 3; j++) { if (!cudamesh_isshsubseg(cavesh, d_tri2seglist)) { cudamesh_spivot(cavesh, neighsh, d_tri2trilist); if (neighsh.id != -1) { if (cudamesh_getUInt64PriorityIndex(d_trimarker[neighsh.id]) != threadId) { // A boundary edge sign = 1; } else { // Internal edge sign = -1; } } else { // A boundary edge sign = 1; } } else { // A segment. It is a boundary edge sign = 1; } if (sign >= 0) { d_caveshbdlist[newid] = cavesh; d_caveshbdprev[newid] = prev; d_caveshbdnext[newid] = -1; if (prev != -1) d_caveshbdnext[prev] = newid; else d_caveshbdhead[threadId] = newid; prev = newid; newid++; } cudamesh_senextself(cavesh); } i = d_caveshnext[i]; if (i == -1) // reach the end of list { d_caveshbdtail[threadId] = prev; } } } __global__ void kernelInsertNewSubfaces( int* d_threadlist, trihandle* d_point2trilist, verttype* d_pointtypelist, int* d_seglist, trihandle* d_seg2trilist, int* d_trifacelist, trihandle* d_tri2trilist, trihandle* d_tri2seglist, int* d_tri2parentidxlist, tristatus* d_tristatus, trihandle* d_caveshbdlist, int* d_caveshbdnext, int* d_caveshbdhead, int* d_caveshbdindices, int* d_emptytriindices, trihandle* d_casout, trihandle* d_casin, int* d_threadmarker, int oldpointsize, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int threadmarker = d_threadmarker[threadId]; if (threadmarker == 2) return; int startidx = d_caveshbdindices[pos], newtriidx; int newptidx = oldpointsize + pos; trihandle parysh, checkseg, newsh, casin, casout, neighsh; int pa, pb; int i = d_caveshbdhead[threadId]; while (i != -1) { parysh = d_caveshbdlist[i]; cudamesh_sspivot(parysh, checkseg, d_tri2seglist); if ((parysh.shver & 01) != 0) cudamesh_sesymself(parysh); pa = cudamesh_sorg(parysh, d_trifacelist); pb = cudamesh_sdest(parysh, d_trifacelist); // Create a new subface newtriidx = d_emptytriindices[startidx++]; newsh.id = newtriidx; newsh.shver = 0; cudamesh_setsorg(newsh, pa, d_trifacelist); cudamesh_setsdest(newsh, pb, d_trifacelist); cudamesh_setsapex(newsh, newptidx, d_trifacelist); d_tri2parentidxlist[newtriidx] = d_tri2parentidxlist[parysh.id]; if (d_pointtypelist[pa] == FREEFACETVERTEX) { d_point2trilist[pa] = newsh; } if (d_pointtypelist[pb] == FREEFACETVERTEX) { d_point2trilist[pb] = newsh; } // Save the outer subfaces first cudamesh_spivot(parysh, casout, d_tri2trilist); d_casout[i] = casout; if (casout.id != -1) { casin = casout; if (checkseg.id != -1) { // Make sure that newsh has the right ori at this segment. checkseg.shver = 0; if (cudamesh_sorg(newsh, d_trifacelist) != cudamesh_sorg(checkseg, d_seglist)) { cudamesh_sesymself(newsh); cudamesh_sesymself(parysh); // This side should also be inverse. } cudamesh_spivot(casin, neighsh, d_tri2trilist); while (neighsh.id != parysh.id) { casin = neighsh; cudamesh_spivot(casin, neighsh, d_tri2trilist); } } d_casin[i] = casin; } i = d_caveshbdnext[i]; } } __global__ void kernelConnectNewSubface2OuterSubface_Phase1( int* d_threadlist, trihandle* d_point2trilist, verttype* d_pointtypelist, int* d_seglist, trihandle* d_seg2trilist, int* d_trifacelist, trihandle* d_tri2trilist, trihandle* d_tri2seglist, int* d_tri2parentidxlist, tristatus* d_tristatus, trihandle* d_caveshbdlist, int* d_caveshbdnext, int* d_caveshbdhead, int* d_caveshbdindices, int* d_emptytriindices, trihandle* d_casout, trihandle* d_casin, int* d_threadmarker, int oldpointsize, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int threadmarker = d_threadmarker[threadId]; if (threadmarker == 2) return; int startidx = d_caveshbdindices[pos], newtriidx; int newptidx = oldpointsize + pos; trihandle parysh, checkseg, newsh, casin, casout, neighsh; int pa, pb; int i = d_caveshbdhead[threadId]; while (i != -1) { parysh = d_caveshbdlist[i]; cudamesh_sspivot(parysh, checkseg, d_tri2seglist); if ((parysh.shver & 01) != 0) cudamesh_sesymself(parysh); // Create a new subface newtriidx = d_emptytriindices[startidx++]; newsh.id = newtriidx; newsh.shver = 0; // Connect newsh to outer old subfaces (Phase 1). casout = d_casout[i]; if (casout.id != -1) { //casin = casout; if (checkseg.id != -1) { // Make sure that newsh has the right ori at this segment. checkseg.shver = 0; if (cudamesh_sorg(newsh, d_trifacelist) != cudamesh_sorg(checkseg, d_seglist)) { cudamesh_sesymself(newsh); cudamesh_sesymself(parysh); // This side should also be inverse. } } casin = d_casin[i]; cudamesh_sbond1(newsh, casout, d_tri2trilist); cudamesh_sbond1(casin, newsh, d_tri2trilist); } i = d_caveshbdnext[i]; } } __global__ void kernelConnectNewSubface2OuterSubface_Phase2( int* d_threadlist, trihandle* d_point2trilist, verttype* d_pointtypelist, int* d_seglist, trihandle* d_seg2trilist, int* d_trifacelist, trihandle* d_tri2trilist, trihandle* d_tri2seglist, int* d_tri2parentidxlist, tristatus* d_tristatus, trihandle* d_caveshbdlist, int* d_caveshbdnext, int* d_caveshbdhead, int* d_caveshbdindices, int* d_emptytriindices, int* d_threadmarker, int oldpointsize, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int threadmarker = d_threadmarker[threadId]; if (threadmarker == 2) return; int startidx = d_caveshbdindices[pos], newtriidx; int newptidx = oldpointsize + pos; trihandle parysh, checkseg, newsh, casout; int i = d_caveshbdhead[threadId]; while (i != -1) { parysh = d_caveshbdlist[i]; cudamesh_sspivot(parysh, checkseg, d_tri2seglist); if ((parysh.shver & 01) != 0) { cudamesh_sesymself(parysh); d_caveshbdlist[i] = parysh; // Update the element in the list } // Create a new subface newtriidx = d_emptytriindices[startidx++]; newsh.id = newtriidx; newsh.shver = 0; // Connect newsh to outer subfaces (Phase 2). // Check if old subface is connected to new one, // if so, fix it cudamesh_spivot(parysh, casout, d_tri2trilist); if (casout.id != -1) { if (checkseg.id != -1) { // Make sure that newsh has the right ori at this segment. checkseg.shver = 0; if (cudamesh_sorg(newsh, d_trifacelist) != cudamesh_sorg(checkseg, d_seglist)) { cudamesh_sesymself(newsh); cudamesh_sesymself(parysh); // This side should also be inverse. d_caveshbdlist[i] = parysh; // Update the element in the list } } if (d_tristatus[casout.id].isEmpty()) // old subface is connected to new one { cudamesh_sbond1(newsh, casout, d_tri2trilist); } } if (checkseg.id != -1) { cudamesh_ssbond(newsh, checkseg, d_tri2seglist, d_seg2trilist); } // Connect oldsh <== newsh (for connecting adjacent new subfaces). // parysh and newsh point to the same edge and the same ori. cudamesh_sbond1(parysh, newsh, d_tri2trilist); i = d_caveshbdnext[i]; } if (d_pointtypelist[newptidx] == FREEFACETVERTEX) d_point2trilist[newptidx] = newsh; } __global__ void kernelConnectNewSubfaceNeighbors( int* d_threadlist, int* d_trifacelist, trihandle* d_tri2trilist, tristatus* d_tristatus, trihandle* d_caveshbdlist, int* d_caveshbdnext, int* d_caveshbdhead, uint64* d_trimarker, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int threadmarker = d_threadmarker[threadId]; if (threadmarker == 2) return; trihandle parysh, newsh, neighsh; int pa, pb; int i = d_caveshbdhead[threadId]; while (i != -1) { // Get an old subface at edge [a, b]. parysh = d_caveshbdlist[i]; cudamesh_spivot(parysh, newsh, d_tri2trilist); // The new subface [a, b, p]. cudamesh_senextself(newsh); // At edge [b, p]. cudamesh_spivot(newsh, neighsh, d_tri2trilist); if (neighsh.id == -1) // No neighbor yet { // Find the adjacent new subface at edge [b, p]. pb = cudamesh_sdest(parysh, d_trifacelist); neighsh = parysh; while (1) { cudamesh_senextself(neighsh); cudamesh_spivotself(neighsh, d_tri2trilist); if (neighsh.id == -1) break; if (cudamesh_getUInt64PriorityIndex(d_trimarker[neighsh.id]) != threadId) break; if (cudamesh_sdest(neighsh, d_trifacelist) != pb) cudamesh_sesymself(neighsh); } if (neighsh.id != -1) { // Now 'neighsh' is a new subface at edge [b, #]. if (cudamesh_sorg(neighsh, d_trifacelist) != pb) cudamesh_sesymself(neighsh); cudamesh_senext2self(neighsh); // Go to the open edge [p, b]. cudamesh_sbond(newsh, neighsh, d_tri2trilist); } else { assert(false); } } cudamesh_spivot(parysh, newsh, d_tri2trilist); // The new subface [a, b, p]. cudamesh_senext2self(newsh); // At edge [p, a]. cudamesh_spivot(newsh, neighsh, d_tri2trilist); if (neighsh.id == -1) // No neighbor yet { // Find the adjacent new subface at edge [p, a]. pa = cudamesh_sorg(parysh, d_trifacelist); neighsh = parysh; while (1) { cudamesh_senext2self(neighsh); cudamesh_spivotself(neighsh, d_tri2trilist); if (neighsh.id == -1) break; if (cudamesh_getUInt64PriorityIndex(d_trimarker[neighsh.id]) != threadId) break; if (cudamesh_sorg(neighsh, d_trifacelist) != pa) cudamesh_sesymself(neighsh); } if (neighsh.id != -1) { // Now 'neighsh' is a new subface at edge [#, a]. if (cudamesh_sdest(neighsh, d_trifacelist) != pa) cudamesh_sesymself(neighsh); cudamesh_senextself(neighsh); // Go to the open edge [a, p]. cudamesh_sbond(newsh, neighsh, d_tri2trilist); } else { assert(false); } } // Update tristatus d_tristatus[parysh.id].clear(); d_tristatus[newsh.id].setEmpty(false); i = d_caveshbdnext[i]; } } __global__ void kernelRemoveDegeneratedNewSubfaces( int* d_threadlist, trihandle* d_point2trilist, verttype* d_pointtypelist, int* d_trifacelist, trihandle* d_tri2trilist, tristatus* d_tristatus, trihandle* d_cavesegshlist, int* d_cavesegshnext, int* d_cavesegshhead, int* d_cavesegshtail, int* d_threadmarker, int oldpointsize, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int threadmarker = d_threadmarker[threadId]; if (threadmarker == 2) return; trihandle parysh, cavesh, newsh, neighsh, casout; int newptidx = oldpointsize + pos; int i, j, head, next; i = head = d_cavesegshhead[threadId]; bool onesubface = (head == d_cavesegshtail[threadId]); while (i != -1) { // Get the next old subface. next = d_cavesegshnext[i]; // Get the saved old subface. parysh = d_cavesegshlist[i]; // Get a possible new degenerated subface. cudamesh_spivot(parysh, cavesh, d_tri2trilist); if (cudamesh_sapex(cavesh, d_trifacelist) == newptidx) // a new degenerated subface { if (onesubface) // only one degenerated subface { for (j = 0; j < 2; j++) { cudamesh_senextself(cavesh); cudamesh_spivot(cavesh, newsh, d_tri2trilist); cudamesh_sdissolve(newsh, d_tri2trilist); } } else // more than one degenerated subface share at this segment { if (next == -1) parysh = d_cavesegshlist[head]; else parysh = d_cavesegshlist[next]; cudamesh_spivot(parysh, neighsh, d_tri2trilist); // Adjust cavesh and neighsh both at edge a->b, and has p as apex. if (cudamesh_sorg(neighsh, d_trifacelist) != cudamesh_sorg(cavesh, d_trifacelist)) { cudamesh_sesymself(neighsh); assert(cudamesh_sorg(neighsh, d_trifacelist) == cudamesh_sorg(cavesh, d_trifacelist)); } assert(cudamesh_sapex(neighsh, d_trifacelist) == newptidx); // Connect adjacent faces at two other edges of cavesh and neighsh. // As a result, the two degenerated new faces are squeezed from the // new triangulation of the cavity. Note that the squeezed faces // still hold the adjacent informations which will be used in // re-connecting subsegments (if they exist). for (j = 0; j < 2; j++) { cudamesh_senextself(cavesh); cudamesh_senextself(neighsh); cudamesh_spivot(cavesh, newsh, d_tri2trilist); cudamesh_spivot(neighsh, casout, d_tri2trilist); cudamesh_sbond1(newsh, casout, d_tri2trilist); } } // Update tristatus d_tristatus[cavesh.id].clear(); // delete this degenerated subface // Update the point-to-subface map. if (d_pointtypelist[newptidx] == FREEFACETVERTEX) d_point2trilist[newptidx] = newsh; } i = next; } } __global__ void kernelInsertNewSubsegs( int* d_segidlist, int* d_threadlist, trihandle* d_point2trilist, verttype* d_pointtypelist, int* d_seglist, trihandle* d_seg2trilist, int* d_seg2parentidxlist, tristatus* d_segstatus, int* d_trifacelist, trihandle* d_tri2trilist, trihandle* d_tri2seglist, int* d_segencmarker, trihandle* d_cavesegshlist, int* d_cavesegshnext, int* d_cavesegshhead, int* d_emptysegindices, int* d_threadmarker, int oldpointsize, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int threadmarker = d_threadmarker[threadId]; if (threadmarker != 0) return; int newptidx = oldpointsize + pos; trihandle splitseg, aseg, bseg, aoutseg, boutseg; int pa, pb; splitseg = trihandle(d_segidlist[threadId], 0); pa = cudamesh_sorg(splitseg, d_seglist); pb = cudamesh_sdest(splitseg, d_seglist); // Set new segments aseg.id = d_emptysegindices[2 * pos]; aseg.shver = 0; bseg.id = d_emptysegindices[2 * pos + 1]; bseg.shver = 0; cudamesh_setsorg(aseg, pa, d_seglist); cudamesh_setsdest(aseg, newptidx, d_seglist); cudamesh_setsapex(aseg, -1, d_seglist); cudamesh_setsorg(bseg, newptidx, d_seglist); cudamesh_setsdest(bseg, pb, d_seglist); cudamesh_setsapex(bseg, -1, d_seglist); d_seg2parentidxlist[aseg.id] = d_seg2parentidxlist[splitseg.id]; d_seg2parentidxlist[bseg.id] = d_seg2parentidxlist[splitseg.id]; // Update segstatus d_segstatus[splitseg.id].clear(); d_segstatus[aseg.id].setEmpty(false); d_segstatus[bseg.id].setEmpty(false); // Reset segment encroachement marker d_segencmarker[splitseg.id] = -1; // Connect [#, a]<->[a, p]. It is possible that [#, a] is an old segment to be removed cudamesh_senext2(splitseg, boutseg); // Temporarily use boutseg. cudamesh_spivotself(boutseg, d_seg2trilist); if (boutseg.id != -1) { cudamesh_senext2(aseg, aoutseg); cudamesh_sbond(boutseg, aoutseg, d_seg2trilist); } // Connect [p, b]<->[b, #]. It is possible that [b, #] is an old segment to be removed cudamesh_senext(splitseg, aoutseg); cudamesh_spivotself(aoutseg, d_seg2trilist); if (aoutseg.id != -1) { cudamesh_senext(bseg, boutseg); cudamesh_sbond(boutseg, aoutseg, d_seg2trilist); } // Connect [a, p] <-> [p, b]. cudamesh_senext(aseg, aoutseg); cudamesh_senext2(bseg, boutseg); cudamesh_sbond(aoutseg, boutseg, d_seg2trilist); // Connect subsegs [a, p] and [p, b] to adjacent new subfaces. // Although the degenerated new faces have been squeezed. They still // hold the connections to the actual new faces. trihandle parysh, neighsh, newsh; int i = d_cavesegshhead[threadId]; while (i != -1) { parysh = d_cavesegshlist[i]; cudamesh_spivot(parysh, neighsh, d_tri2trilist); // neighsh is a degenerated new face. if (cudamesh_sorg(neighsh, d_trifacelist) != pa) { cudamesh_sesymself(neighsh); } cudamesh_senext2(neighsh, newsh); cudamesh_spivotself(newsh, d_tri2trilist); // The edge [p, a] in newsh cudamesh_ssbond(newsh, aseg, d_tri2seglist, d_seg2trilist); cudamesh_senext(neighsh, newsh); cudamesh_spivotself(newsh, d_tri2trilist); // The edge [b, p] in newsh cudamesh_ssbond(newsh, bseg, d_tri2seglist, d_seg2trilist); i = d_cavesegshnext[i]; } if (d_pointtypelist[newptidx] == FREESEGVERTEX) d_point2trilist[newptidx] = aseg; if (d_pointtypelist[pa] == FREESEGVERTEX) d_point2trilist[pa] = aseg; if (d_pointtypelist[pb] == FREESEGVERTEX) d_point2trilist[pb] = bseg; } __global__ void kernelConnectNewSubseg2OuterSubseg( int* d_segidlist, int* d_threadlist, trihandle* d_seg2trilist, int* d_segmarker, trihandle* d_cavesegshlist, int* d_cavesegshprev, int* d_cavesegshnext, int* d_cavesegshhead, int* d_cavesegshtail, int* d_emptysegindices, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int threadmarker = d_threadmarker[threadId]; if (threadmarker != 0) return; trihandle splitseg, aseg, bseg, aoutseg, boutseg; // Get old and new segments splitseg = trihandle(d_segidlist[threadId], 0); aseg.id = d_emptysegindices[2 * pos]; aseg.shver = 0; bseg.id = d_emptysegindices[2 * pos + 1]; bseg.shver = 0; // Connect [#, a]<->[a, p]. // If [a, b] is connected to a new segment [#, a], // then it is possible that [a, p] is connected to an old segment [*, a]. // Fix it. cudamesh_senext2(splitseg, boutseg); cudamesh_spivotself(boutseg, d_seg2trilist); if (boutseg.id != -1 && d_segmarker[boutseg.id] == MAXINT) { cudamesh_senext2(aseg, aoutseg); cudamesh_sbond(boutseg, aoutseg, d_seg2trilist); } // Connect [p, b]<->[b, #]. // if [a, b] is connected to a new segment [b, #], // then it is possible that [p, b] is connected to an old segment [b, *]. // Fix it. cudamesh_senext(splitseg, aoutseg); cudamesh_spivotself(aoutseg, d_seg2trilist); if (aoutseg.id != -1 && d_segmarker[aoutseg.id] == MAXINT) { cudamesh_senext(bseg, boutseg); cudamesh_sbond(boutseg, aoutseg, d_seg2trilist); } // Add new segments into list int newidx = 2 * pos; d_cavesegshhead[threadId] = newidx; d_cavesegshtail[threadId] = newidx + 1; d_cavesegshlist[newidx] = aseg; d_cavesegshprev[newidx] = -1; d_cavesegshnext[newidx] = newidx + 1; d_cavesegshlist[newidx + 1] = bseg; d_cavesegshprev[newidx + 1] = newidx; d_cavesegshnext[newidx + 1] = -1; } __global__ void kernelConnectNewSubfaces2NewTets( int* d_threadlist, int* d_trifacelist, tethandle* d_tri2tetlist, trihandle* d_tri2trilist, trihandle* d_tri2seglist, tristatus* d_tristatus, int* d_tetlist, tethandle* d_neighborlist, trihandle* d_tet2trilist, uint64* d_tetmarker, trihandle* d_caveshbdlist, int* d_caveshbdnext, int* d_caveshbdhead, int* d_threadmarker, int oldpointsize, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int threadmarker = d_threadmarker[threadId]; if (threadmarker == 2) return; int newptidx = oldpointsize + pos; trihandle parysh, checksh; tethandle neightet, spintet; int i = d_caveshbdhead[threadId], j; while (i != -1) { // Get an old subface at edge [a, b]. parysh = d_caveshbdlist[i]; cudamesh_spivot(parysh, checksh, d_tri2trilist); // The new subface [a, b, p]. // Do not recover a deleted new face (degenerated). if (!d_tristatus[checksh.id].isEmpty()) { // Note that the old subface still connects to adjacent old tets // of C(p), which still connect to the tets outside C(p). cudamesh_stpivot(parysh, neightet, d_tri2tetlist); //assert(d_tetmarker[neightet.id] == threadId); // Find the adjacent tet containing the edge [a,b] outside C(p). spintet = neightet; while (1) { cudamesh_fnextself(spintet, d_neighborlist); //printf("spintet %d\n", spintet.id); if (cudamesh_getUInt64PriorityIndex(d_tetmarker[spintet.id]) != threadId) break; assert(spintet.id != neightet.id); } // The adjacent tet connects to a new tet in C(p). cudamesh_fsym(spintet, neightet, d_neighborlist); //assert(d_tetmarker[neightet.id] != threadId); // Find the tet containing the face [a, b, p]. spintet = neightet; while (1) { cudamesh_fnextself(spintet, d_neighborlist); if (cudamesh_apex(spintet, d_tetlist) == newptidx) break; assert(spintet.id != neightet.id); } // Adjust the edge direction in spintet and checksh. if (cudamesh_sorg(checksh, d_trifacelist) != cudamesh_org(spintet, d_tetlist)) { cudamesh_sesymself(checksh); assert(cudamesh_sorg(checksh, d_trifacelist) == cudamesh_org(spintet, d_tetlist)); } assert(cudamesh_sdest(checksh, d_trifacelist) == cudamesh_dest(spintet, d_tetlist)); // Connect the subface to two adjacent tets. cudamesh_tsbond(spintet, checksh, d_tet2trilist, d_tri2tetlist); cudamesh_fsymself(spintet, d_neighborlist); cudamesh_sesymself(checksh); cudamesh_tsbond(spintet, checksh, d_tet2trilist, d_tri2tetlist); } else { // A deleted degenerated subface // Clear all neighbor information for (j = 0; j < 2; j++) { d_tri2tetlist[2 * checksh.id + j] = tethandle(-1, 11); } for (j = 0; j < 3; j++) { d_tri2trilist[3 * checksh.id + j] = trihandle(-1, 0); } for (j = 0; j < 3; j++) { d_tri2seglist[3 * checksh.id + j] = trihandle(-1, 0); } } i = d_caveshbdnext[i]; } } __global__ void kernelConnectNewSubsegs2NewTets( int* d_threadlist, REAL* d_pointlist, tethandle* d_point2tetlist, int* d_seglist, trihandle* d_seg2trilist, tethandle* d_seg2tetlist, tethandle* d_tri2tetlist, trihandle* d_tri2trilist, int* d_tetlist, tethandle* d_neighborlist, trihandle* d_tet2seglist, uint64* d_tetmarker, trihandle* d_cavesegshlist, int* d_cavesegshnext, int* d_cavesegshhead, unsigned long* d_randomseed, int* d_threadmarker, int* d_insertidx, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int threadmarker = d_threadmarker[threadId]; if (threadmarker != 0) return; trihandle checkseg, checksh; tethandle neightet, spintet; int i = d_cavesegshhead[threadId]; while (i != -1) { checkseg = d_cavesegshlist[i]; // Get the adjacent new subface. checkseg.shver = 0; cudamesh_spivot(checkseg, checksh, d_seg2trilist);; if (checksh.id != -1) { // Get the adjacent new tetrahedron. cudamesh_stpivot(checksh, neightet, d_tri2tetlist); } else { // It's a dangling segment. cudamesh_point2tetorg(cudamesh_sorg(checkseg, d_seglist), neightet, d_point2tetlist, d_tetlist); cudamesh_finddirection(&neightet, cudamesh_sdest(checkseg, d_seglist), d_pointlist, d_tetlist, d_neighborlist, d_randomseed + pos); assert(cudamesh_dest(neightet, d_tetlist) == cudamesh_sdest(checkseg, d_seglist)); } //assert(d_tetmarker[neightet.id] != threadId); cudamesh_sstbond1(checkseg, neightet, d_seg2tetlist); spintet = neightet; while (1) { cudamesh_tssbond1(spintet, checkseg, d_tet2seglist); cudamesh_fnextself(spintet, d_neighborlist); if (spintet.id == neightet.id) break; } i = d_cavesegshnext[i]; } } __global__ void kernelResetOldSubsegInfo( int* d_segidlist, int* d_threadlist, int* d_threadmarker, trihandle* d_seg2trilist, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int threadmarker = d_threadmarker[threadId]; if (threadmarker != 0) return; int segid = d_segidlist[threadId]; for (int j = 0; j < 3; j++) { d_seg2trilist[3 * segid + j] = trihandle(-1, 0); } } __global__ void kernelResetOldSubfaceInfo( int* d_threadlist, trihandle* d_tri2trilist, trihandle* d_tri2seglist, tristatus* d_tristatus, int* d_subfaceencmarker, trihandle* d_caveshlist, int* d_caveshnext, int* d_caveshhead, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos], j; int threadmarker = d_threadmarker[threadId]; if (threadmarker != 0 && threadmarker != 1) return; trihandle checksh; int i = d_caveshhead[threadId]; while (i != -1) { checksh = d_caveshlist[i]; d_tristatus[checksh.id].clear(); d_subfaceencmarker[checksh.id] = -1; for (j = 0; j < 3; j++) { d_tri2trilist[3 * checksh.id + j] = trihandle(-1, 0); // reset neighbor to empty } for (j = 0; j < 3; j++) { d_tri2seglist[3 * checksh.id + j] = trihandle(-1, 0); } i = d_caveshnext[i]; } } __global__ void kernelResetOldTetInfo( int* d_threadlist, tethandle* d_neighborlist, trihandle* d_tet2trilist, trihandle* d_tet2seglist, tetstatus* d_tetstatus, tethandle* d_caveoldtetlist, int* d_caveoldtetnext, int* d_caveoldtethead, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos], j; tethandle checktet; int i = d_caveoldtethead[threadId]; while (i != -1) { checktet = d_caveoldtetlist[i]; d_tetstatus[checktet.id].clear(); for (j = 0; j < 4; j++) { d_neighborlist[4 * checktet.id + j] = tethandle(-1, 11); // reset neighbor to empty } for (j = 0; j < 4; j++) { d_tet2trilist[4 * checktet.id + j] = trihandle(-1, 0); // reset subface to empty } for (j = 0; j < 6; j++) { d_tet2seglist[6 * checktet.id + j] = trihandle(-1, 0); // reset subseg to empty } i = d_caveoldtetnext[i]; } } __global__ void kernelUpdateSegencmarker( int* d_threadlist, REAL * d_pointlist, int* d_seglist, tethandle* d_seg2tetlist, int* d_tetlist, tethandle* d_neighborlist, trihandle* d_cavetetseglist, int* d_cavetetsegnext, int* d_cavetetseghead, trihandle* d_cavesegshlist, int* d_cavesegshnext, int* d_cavesegshhead, int* d_segmarker, int* d_segencmarker, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int threadmarker = d_threadmarker[threadId]; trihandle checkseg; int i, encpt; // Check all segments outside cavity i = d_cavetetseghead[threadId]; while (i != -1) { checkseg = d_cavetetseglist[i]; if (d_segmarker[checkseg.id] != threadId) // Not a splitting segment { checkseg4split( &checkseg, encpt, d_pointlist, d_seglist, d_seg2tetlist, d_tetlist, d_neighborlist); d_segencmarker[checkseg.id] = encpt; } i = d_cavetetsegnext[i]; } // Check new segments when it is segment point insertion. // In this case, new segments are stored in cavesegshlist if (threadmarker == 0) { i = d_cavesegshhead[threadId]; while (i != -1) { checkseg = d_cavesegshlist[i]; checkseg4split( &checkseg, encpt, d_pointlist, d_seglist, d_seg2tetlist, d_tetlist, d_neighborlist); d_segencmarker[checkseg.id] = encpt; i = d_cavesegshnext[i]; } } } __global__ void kernelUpdateSubfaceencmarker( int* d_threadlist, REAL * d_pointlist, int* d_trifacelist, tethandle* d_tri2tetlist, trihandle* d_tri2trilist, tristatus* d_tristatus, int* d_tetlist, trihandle* d_cavetetshlist, int* d_cavetetshnext, int* d_cavetetshhead, trihandle* d_caveshbdlist, int* d_caveshbdnext, int* d_caveshbdhead, uint64* d_trimarker, int* d_subfaceencmarker, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int threadmarker = d_threadmarker[threadId]; trihandle checkfac; int i, encpt; // Check all subfaces outside cavity i = d_cavetetshhead[threadId]; while (i != -1) { checkfac = d_cavetetshlist[i]; if (cudamesh_getUInt64PriorityIndex(d_trimarker[checkfac.id]) != threadId) // Not a splitting subface { checkface4split( &checkfac, encpt, d_pointlist, d_trifacelist, d_tri2tetlist, d_tetlist); d_subfaceencmarker[checkfac.id] = encpt; } i = d_cavetetshnext[i]; } // Check new subfaces when it is segment/subface point insertion. // In this case, new subfaces are connected to old subfaces in caveshbdlist if (threadmarker == 0 || threadmarker == 1) { trihandle parysh; i = d_caveshbdhead[threadId]; while (i != -1) { parysh = d_caveshbdlist[i]; cudamesh_spivot(parysh, checkfac, d_tri2trilist); if (!d_tristatus[checkfac.id].isEmpty()) { checkface4split( &checkfac, encpt, d_pointlist, d_trifacelist, d_tri2tetlist, d_tetlist); d_subfaceencmarker[checkfac.id] = encpt; } i = d_caveshbdnext[i]; } } } __global__ void kernelUpdateTetBadstatus( int* d_threadlist, REAL* d_pointlist, int* d_tetlist, tetstatus* d_tetstatus, tethandle* d_cavebdrylist, int* d_cavebdrynext, int* d_cavebdryhead, REAL minratio, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; tethandle cavetet; int i = d_cavebdryhead[threadId]; while (i != -1) { cavetet = d_cavebdrylist[i]; if (cavetet.id != -1) // cavetet.id may be -1 because of redundency { if (checktet4split(&cavetet, d_pointlist, d_tetlist, minratio)) d_tetstatus[cavetet.id].setBad(true); } i = d_cavebdrynext[i]; } } __global__ void kernelUpdateInsertRadius( int* d_threadlist, int* d_insertidxlist, REAL* d_pointlist, trihandle* d_point2trilist, verttype* d_pointtypelist, REAL* d_pointradius, int* d_seg2parentidxlist, int* d_segparentendpointidxlist, int* d_tri2parentidxlist, int* d_triid2parentoffsetlist, int* d_triparentendpointidxlist, int* d_tetlist, REAL* d_smlen, int* d_parentpt, int* d_threadmarker, int oldpointsize, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int threadmarker = d_threadmarker[threadId]; if (threadmarker == 0) { int newptidx = oldpointsize + pos; int parentptidx = d_parentpt[threadId]; REAL rv = d_smlen[threadId], rp; verttype parenttype = d_pointtypelist[parentptidx]; if (parenttype == FREESEGVERTEX) { trihandle parentseg1, parentseg2; parentseg1 = d_point2trilist[newptidx]; parentseg2 = d_point2trilist[parentptidx]; if (cudamesh_segsegadjacent(parentseg1.id, parentseg2.id, d_seg2parentidxlist, d_segparentendpointidxlist)) { rp = d_pointradius[parentptidx]; if (rv < rp) rv = rp; // The relaxed insertion radius of new point } } else if (parenttype == FREEFACETVERTEX) { trihandle parentseg, parentsh; parentseg = d_point2trilist[newptidx]; parentsh = d_point2trilist[parentptidx]; if (cudamesh_segfacetadjacent(parentseg.id, parentsh.id, d_seg2parentidxlist, d_segparentendpointidxlist, d_tri2parentidxlist, d_triid2parentoffsetlist, d_triparentendpointidxlist)) { rp = d_pointradius[parentptidx]; if (rv < rp) rv = rp; // The relaxed insertion radius of new point } } d_pointradius[newptidx] = rv; } else if (threadmarker == 1) { int newptidx = oldpointsize + pos; int parentptidx = d_parentpt[threadId]; REAL rv = d_smlen[threadId], rp; verttype parenttype = d_pointtypelist[parentptidx]; if (parenttype == FREESEGVERTEX) { trihandle parentseg, parentsh; parentseg = d_point2trilist[parentptidx]; parentsh = d_point2trilist[newptidx]; if (cudamesh_segfacetadjacent(parentseg.id, parentsh.id, d_seg2parentidxlist, d_segparentendpointidxlist, d_tri2parentidxlist, d_triid2parentoffsetlist, d_triparentendpointidxlist)) { rp = d_pointradius[parentptidx]; if (rv < (sqrt(2.0) * rp)) rv = sqrt(2.0) * rp; // The relaxed insertion radius of new point } } else if (parenttype == FREEFACETVERTEX) { trihandle parentsh1, parentsh2; parentsh1 = d_point2trilist[parentptidx]; parentsh2 = d_point2trilist[newptidx]; if (cudamesh_facetfacetadjacent(parentsh1.id, parentsh2.id, d_tri2parentidxlist, d_triid2parentoffsetlist, d_triparentendpointidxlist)) { rp = d_pointradius[parentptidx]; if (rv < rp) rv = rp; // The relaxed insertion radius of new point } } d_pointradius[newptidx] = rv; } else { int splittetid = d_insertidxlist[threadId]; tethandle splittet(splittetid, 11); int newptidx = oldpointsize + pos; REAL *newpt = cudamesh_id2pointlist(newptidx, d_pointlist); int orgidx = cudamesh_org(splittet, d_tetlist); REAL *org = cudamesh_id2pointlist(orgidx, d_pointlist); REAL rv = cudamesh_distance(newpt, org); d_pointradius[newptidx] = rv; } } __global__ void kernelUpdateInsertRadius_Seg( int* d_threadlist, trihandle* d_point2trilist, verttype* d_pointtypelist, REAL* d_pointradius, int* d_seg2parentidxlist, int* d_segparentendpointidxlist, int* d_tri2parentidxlist, int* d_triid2parentoffsetlist, int* d_triparentendpointidxlist, REAL* d_smlen, int* d_parentpt, int oldpointsize, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int newptidx = oldpointsize + pos; int parentptidx = d_parentpt[threadId]; REAL rv = d_smlen[threadId], rp; verttype parenttype = d_pointtypelist[parentptidx]; if (parenttype == FREESEGVERTEX) { trihandle parentseg1, parentseg2; parentseg1 = d_point2trilist[newptidx]; parentseg2 = d_point2trilist[parentptidx]; if (cudamesh_segsegadjacent(parentseg1.id, parentseg2.id, d_seg2parentidxlist, d_segparentendpointidxlist)) { rp = d_pointradius[parentptidx]; if (rv < rp) rv = rp; // The relaxed insertion radius of new point } } else if (parenttype == FREEFACETVERTEX) { trihandle parentseg, parentsh; parentseg = d_point2trilist[newptidx]; parentsh = d_point2trilist[parentptidx]; if (cudamesh_segfacetadjacent(parentseg.id, parentsh.id, d_seg2parentidxlist, d_segparentendpointidxlist, d_tri2parentidxlist, d_triid2parentoffsetlist, d_triparentendpointidxlist)) { rp = d_pointradius[parentptidx]; if (rv < rp) rv = rp; // The relaxed insertion radius of new point } } d_pointradius[newptidx] = rv; } __global__ void kernelUpdateInsertRadius_Subface( int* d_threadlist, trihandle* d_point2trilist, verttype* d_pointtypelist, REAL* d_pointradius, int* d_seg2parentidxlist, int* d_segparentendpointidxlist, int* d_tri2parentidxlist, int* d_triid2parentoffsetlist, int* d_triparentendpointidxlist, REAL* d_smlen, int* d_parentpt, int oldpointsize, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int newptidx = oldpointsize + pos; int parentptidx = d_parentpt[threadId]; REAL rv = d_smlen[threadId], rp; verttype parenttype = d_pointtypelist[parentptidx]; if (parenttype == FREESEGVERTEX) { trihandle parentseg, parentsh; parentseg = d_point2trilist[parentptidx]; parentsh = d_point2trilist[newptidx]; if (cudamesh_segfacetadjacent(parentseg.id, parentsh.id, d_seg2parentidxlist, d_segparentendpointidxlist, d_tri2parentidxlist, d_triid2parentoffsetlist, d_triparentendpointidxlist)) { rp = d_pointradius[parentptidx]; if (rv < (sqrt(2.0) * rp)) rv = sqrt(2.0) * rp; // The relaxed insertion radius of new point } } else if (parenttype == FREEFACETVERTEX) { trihandle parentsh1, parentsh2; parentsh1 = d_point2trilist[parentptidx]; parentsh2 = d_point2trilist[newptidx]; if (cudamesh_facetfacetadjacent(parentsh1.id, parentsh2.id, d_tri2parentidxlist, d_triid2parentoffsetlist, d_triparentendpointidxlist)) { rp = d_pointradius[parentptidx]; if (rv < rp) rv = rp; // The relaxed insertion radius of new point } } d_pointradius[newptidx] = rv; } __global__ void kernelUpdateInsertRadius_Tet( int* d_insertidxlist, REAL* d_insertptlist, int* d_threadlist, REAL* d_pointlist, REAL* d_pointradius, int* d_tetlist, int oldpointsize, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int splittetid = d_insertidxlist[threadId]; tethandle splittet(splittetid, 11); int newptidx = oldpointsize + pos; REAL *newpt = cudamesh_id2pointlist(newptidx, d_pointlist); int orgidx = cudamesh_org(splittet, d_tetlist); REAL *org = cudamesh_id2pointlist(orgidx, d_pointlist); REAL rv = cudamesh_distance(newpt, org); d_pointradius[newptidx] = rv; } // Check mesh __global__ void kernelCheckPointNeighbors( trihandle* d_point2trilist, tethandle* d_point2tetlist, verttype* d_pointtypelist, int* d_seglist, tristatus* d_segstatus, int* d_trifacelist, tristatus* d_tristatus, int* d_tetlist, tetstatus* d_tetstatus, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int i, p; bool flag = false; trihandle neighseg, neighsh; tethandle neightet; verttype pointtype = d_pointtypelist[pos]; if (pointtype == FREESEGVERTEX) { neighseg = d_point2trilist[pos]; if (neighseg.id != -1) { if (d_segstatus[neighseg.id].isEmpty()) { printf("Point #%d: Empty subseg neighbor #%d\n", pos, neighseg.id); } else { for (i = 0; i < 3; i++) { p = d_seglist[3 * neighseg.id + i]; if (i == 2 && p != -1) { printf("Point #%d: Wrong point type (on subseg) or neighbor type (subseg) #%d - %d, %d, %d\n", pos, neighseg.id, d_seglist[3 * neighseg.id + 0], d_seglist[3 * neighseg.id + 1], d_seglist[3 * neighseg.id + 2]); } if (p == pos) { flag = true; break; } } if (!flag) printf("Point #%d: Wrong subface neighbor #%d - %d, %d, %d\n", pos, neighseg.id, d_seglist[3 * neighseg.id + 0], d_seglist[3 * neighseg.id + 1], d_seglist[3 * neighseg.id + 2]); } } else { printf("Point #%d: Missing segment neighbor\n"); } } else if (pointtype == FREEFACETVERTEX) { neighsh = d_point2trilist[pos]; if (neighsh.id != -1) { if (d_tristatus[neighsh.id].isEmpty()) { printf("Point #%d: Empty subface neighbor #%d\n", pos, neighsh.id); } else { for (i = 0; i < 3; i++) { p = d_trifacelist[3 * neighsh.id + i]; if (p == -1) { printf("Point #%d: Wrong point type (on subface) or neighbor type (subface) #%d - %d, %d, %d\n",pos, neighsh.id, d_trifacelist[3 * neighsh.id + 0], d_trifacelist[3 * neighsh.id + 1], d_trifacelist[3 * neighsh.id + 2]); } if (p == pos) { flag = true; break; } } if (!flag) printf("Point #%d: Wrong subface neighbor #%d - %d, %d, %d\n", pos, neighsh.id, d_trifacelist[3 * neighsh.id + 0], d_trifacelist[3 * neighsh.id + 1], d_trifacelist[3 * neighsh.id + 2]); } } else { printf("Point #%d: Missing subface neighbor\n"); } } neightet = d_point2tetlist[pos]; if (neightet.id != -1) { //printf("%d ", neightet.id); if (d_tetstatus[neightet.id].isEmpty()) { printf("Point #%d: Empty tet neighbor #%d\n", pos, neightet.id); } else { for (i = 0; i < 4; i++) { p = d_tetlist[4 * neightet.id + i]; if (p == pos) { flag = true; break; } } if (!flag) printf("Point #%d: Wrong tet neighbor #%d - %d, %d, %d, %d\n", pos, neightet.id, d_tetlist[4 * neightet.id + 0], d_tetlist[4 * neightet.id + 1], d_tetlist[4 * neightet.id + 2], d_tetlist[4 * neightet.id + 3]); } } } __global__ void kernelCheckSubsegNeighbors( int* d_seglist, trihandle* d_seg2trilist, tethandle* d_seg2tetlist, tristatus* d_segstatus, int* d_trifacelist, trihandle* d_tri2seglist, tristatus* d_tristatus, int* d_tetlist, trihandle* d_tet2seglist, tetstatus* d_tetstatus, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; if (d_segstatus[pos].isEmpty()) return; trihandle checkseg(pos, 0), neighsh, neighseg, prevseg, nextseg; int pa, pb, pc, pd; cudamesh_spivot(checkseg, neighsh, d_seg2trilist); if (neighsh.id != -1) { if (d_tristatus[neighsh.id].isEmpty()) { printf("Subseg #%d: Empty subface neighbor #%d\n", checkseg.id, neighsh.id); } else { if (d_trifacelist[3 * neighsh.id + 2] == -1) { printf("Subseg #%d: Wrong neighbor type (Should be subface) #%d\n", checkseg.id, neighsh.id); } else { cudamesh_sspivot(neighsh, neighseg, d_tri2seglist); if (neighseg.id != checkseg.id) printf("Subseg #%d: Wrong subface neighbor #%d - %d, %d, %d\n", checkseg.id, neighsh.id, d_tri2seglist[3 * neighsh.id + 0].id, d_tri2seglist[3 * neighsh.id + 1].id, d_tri2seglist[3 * neighsh.id + 2].id); else { pa = cudamesh_sorg(checkseg, d_seglist); pb = cudamesh_sdest(checkseg, d_seglist); pc = cudamesh_sorg(neighsh, d_trifacelist); pd = cudamesh_sdest(neighsh, d_trifacelist); if ((pa == pc && pb == pd) || (pa == pd && pb == pc)) { } else { printf("Subseg #%d - %d, %d: Wrong subface neighbor endpoints #%d - %d, %d, %d\n", checkseg.id, d_seglist[3 * checkseg.id], d_seglist[3 * checkseg.id + 1], neighsh.id, d_trifacelist[3 * neighsh.id], d_trifacelist[3 * neighsh.id + 1], d_trifacelist[3 * neighsh.id + 2]); } } } } } cudamesh_senextself(checkseg); cudamesh_spivot(checkseg, prevseg, d_seg2trilist); if (prevseg.id != -1) { if (d_segstatus[prevseg.id].isEmpty()) { printf("Subseg #%d: Empty subseg neighbor #%d\n", checkseg.id, prevseg.id); } else { if (d_seglist[3 * prevseg.id + 2] != -1) { printf("Subseg #%d: Wrong neighbor type (Should be subseg) #%d\n", checkseg.id, prevseg.id); } else { cudamesh_spivot(prevseg, neighseg, d_seg2trilist); if(neighseg.id != checkseg.id) printf("Subseg #%d: Wrong subseg neighbor #%d - %d, %d, %d\n", checkseg.id, prevseg.id, d_seg2trilist[3 * prevseg.id + 0].id, d_seg2trilist[3 * prevseg.id + 1].id, d_seg2trilist[3 * prevseg.id + 2].id); } } } cudamesh_senextself(checkseg); cudamesh_spivot(checkseg, nextseg, d_seg2trilist); if (nextseg.id != -1) { if (d_segstatus[nextseg.id].isEmpty()) { printf("Subseg #%d: Empty subseg neighbor #%d\n", checkseg.id, prevseg.id); } else { if (d_seglist[3 * nextseg.id + 2] != -1) { printf("Subseg #%d: Wrong neighbor type (Should be subseg) #%d\n", checkseg.id, nextseg.id); } else { cudamesh_spivot(nextseg, neighseg, d_seg2trilist); if (neighseg.id != checkseg.id) printf("Subseg #%d: Wrong subseg neighbor #%d - %d, %d, %d\n", checkseg.id, nextseg.id, d_seg2trilist[3 * nextseg.id + 0].id, d_seg2trilist[3 * nextseg.id + 1].id, d_seg2trilist[3 * nextseg.id + 2].id); } } } tethandle neightet; checkseg.shver = 0; cudamesh_sstpivot1(checkseg, neightet, d_seg2tetlist); if (neightet.id != -1) { if (d_tetstatus[neightet.id].isEmpty()) { printf("Subseg #%d: Empty tet neighbor #%d\n", checkseg.id, neightet.id); } else { cudamesh_tsspivot1(neightet, neighseg, d_tet2seglist); if (neighseg.id != checkseg.id) printf("Subseg #%d: Wrong tet neighbor #%d - %d, %d, %d, %d, %d, %d\n", checkseg.id, neightet.id, d_tet2seglist[6 * neightet.id + 0].id, d_tet2seglist[6 * neightet.id + 1].id, d_tet2seglist[6 * neightet.id + 2].id, d_tet2seglist[6 * neightet.id + 3].id, d_tet2seglist[6 * neightet.id + 4].id, d_tet2seglist[6 * neightet.id + 5].id); else { pa = cudamesh_sorg(checkseg, d_seglist); pb = cudamesh_sdest(checkseg, d_seglist); pc = cudamesh_org(neightet, d_tetlist); pd = cudamesh_dest(neightet, d_tetlist); if ((pa == pc && pb == pd) || (pa == pd && pb == pc)) { } else { printf("Subseg #%d - %d, %d: Wrong tet neighbor endpoints #%d(%d) - %d, %d, %d, %d\n", checkseg.id, d_seglist[3 * checkseg.id], d_seglist[3 * checkseg.id + 1], neightet.id, neightet.ver, d_tetlist[4 * neightet.id], d_tetlist[4 * neightet.id + 1], d_tetlist[4 * neightet.id + 2], d_tetlist[4 * neightet.id + 3]); } } } } } __global__ void kernelCheckSubfaceNeighbors( int* d_seglist, trihandle* d_seg2trilist, tristatus* d_segstatus, int* d_trifacelist, tethandle* d_tri2tetlist, trihandle* d_tri2trilist, trihandle* d_tri2seglist, tristatus* d_tristatus, int* d_tetlist, trihandle* d_tet2trilist, tetstatus* d_tetstatus, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; if (d_tristatus[pos].isEmpty()) return; trihandle checksh(pos, 0), neighseg, neighsh, neineighsh; tethandle neightet; int i, pa, pb, pc, pd, pe, pf; for (i = 0; i < 3; i++) { cudamesh_senextself(checksh); cudamesh_sspivot(checksh, neighseg, d_tri2seglist); if (neighseg.id != -1) { if (d_segstatus[neighseg.id].isEmpty()) printf("Subface #%d: Empty subseg neighbor #%d\n", checksh.id, neighseg.id); else { cudamesh_spivot(neighseg, neighsh, d_seg2trilist); if (neighsh.id == -1) { printf("Subface #%d: Wrong subseg neighbor, Subface #%d - %d, %d, %d, Subseg #%d - (-1)\n", checksh.id, d_tri2seglist[3 * checksh.id + 0].id, d_tri2seglist[3 * checksh.id + 1].id, d_tri2seglist[3 * checksh.id + 2].id, neighseg.id); } else { //printf("%d ", neighsh.id); bool found = false; cudamesh_spivot(neighsh, neineighsh, d_tri2trilist); if (neighsh.id == checksh.id) found = true; if (neineighsh.id == -1) // this only happen when neighsh is a single subface { if(checksh.id != neighsh.id) printf("Subface: Wrong single subface neighbor - Checksh #%d, Neighseg #%d, Neighsh #%d\n", checksh.id, neighseg.id, neighsh.id); } else { if (neighsh.id == neineighsh.id) { if (checksh.id != neighsh.id) printf("Subface: Wrong single subface neighbor - Checksh #%d, Neighsh #%d, neineighsh #%d\n", checksh.id, neighsh.id, neineighsh.id); } else { while (neineighsh.id != neighsh.id) { if (neineighsh.id == checksh.id) { found = true; break; } cudamesh_spivotself(neineighsh, d_tri2trilist); } } } if (!found) printf("Subface #%d: Wrong subseg neighbor #%d, missing in loop\n", checksh.id, neighseg.id); else { pa = cudamesh_sorg(checksh, d_trifacelist); pb = cudamesh_sdest(checksh, d_trifacelist); pc = cudamesh_sorg(neighseg, d_seglist); pd = cudamesh_sdest(neighseg, d_seglist); if ((pa == pc && pb == pd) || (pa == pd && pb == pc)) { } else { printf("Subface #%d - %d, %d, %d: Wrong subseg neighbor endpoints #%d - %d, %d, %d\n", checksh.id, d_trifacelist[3 * checksh.id + 0], d_trifacelist[3 * checksh.id + 1], d_trifacelist[3 * checksh.id + 2], neighseg.id, d_seglist[3 * neighseg.id + 0], d_seglist[3 * neighseg.id + 1], d_seglist[3 * neighseg.id + 2]); } } } } } } for (i = 0; i < 3; i++) { cudamesh_senextself(checksh); cudamesh_spivot(checksh, neighsh, d_tri2trilist); if (neighsh.id != -1) { while (neighsh.id != checksh.id) { if (d_tristatus[neighsh.id].isEmpty()) { printf("Subface #%d - %d, %d, %d - %d, %d, %d: Empty subface neighbor #%d - %d, %d, %d - %d, %d, %d\n", checksh.id, d_tri2trilist[3 * checksh.id + 0].id, d_tri2trilist[3 * checksh.id + 1].id, d_tri2trilist[3 * checksh.id + 2].id, d_trifacelist[3 * checksh.id + 0], d_trifacelist[3 * checksh.id + 1], d_trifacelist[3 * checksh.id + 2], neighsh.id, d_tri2trilist[3 * neighsh.id + 0].id, d_tri2trilist[3 * neighsh.id + 1].id, d_tri2trilist[3 * neighsh.id + 2].id, d_trifacelist[3 * neighsh.id + 0], d_trifacelist[3 * neighsh.id + 1], d_trifacelist[3 * neighsh.id + 2]); break; } cudamesh_spivotself(neighsh, d_tri2trilist); } } } for (i = 0; i < 2; i++) { cudamesh_sesymself(checksh); cudamesh_stpivot(checksh, neightet, d_tri2tetlist); if (neightet.id != -1) { if (d_tetstatus[neightet.id].isEmpty()) { printf("Subface #%d: Empty tet neighbor #%d\n", checksh.id, neightet.id); } else { cudamesh_tspivot(neightet, neighsh, d_tet2trilist); if (neighsh.id != checksh.id) printf("Subface #%d: Wrong tet neighbor #%d - %d, %d, %d, %d\n", checksh.id, neightet.id, d_tet2trilist[4 * neightet.id + 0].id, d_tet2trilist[4 * neightet.id + 1].id, d_tet2trilist[4 * neightet.id + 2].id, d_tet2trilist[4 * neightet.id + 3].id); else { pa = cudamesh_sorg(checksh, d_trifacelist); pb = cudamesh_sdest(checksh, d_trifacelist); pc = cudamesh_sapex(checksh, d_trifacelist); pd = cudamesh_org(neightet, d_tetlist); pe = cudamesh_dest(neightet, d_tetlist); pf = cudamesh_apex(neightet, d_tetlist); if (pa == pd && pb == pe && pc == pf) { } else { printf("Subface #%d - %d, %d, %d: Wrong tet neighbor endpoints #%d - %d, %d, %d, %d\n", checksh.id, d_trifacelist[3 * checksh.id + 0], d_trifacelist[3 * checksh.id + 1], d_trifacelist[3 * checksh.id + 2], neightet.id, d_tetlist[4 * neightet.id + 0], d_tetlist[4 * neightet.id + 1], d_tetlist[4 * neightet.id + 2], d_tetlist[4 * neightet.id + 3]); } } } } } } __global__ void kernelCheckTetNeighbors( int* d_seglist, tethandle* d_seg2tetlist, tristatus* d_segstatus, int* d_trifacelist, tethandle* d_tri2tetlist, tristatus* d_tristatus, int* d_tetlist, tethandle* d_neighborlist, trihandle* d_tet2trilist, trihandle* d_tet2seglist, tetstatus* d_tetstatus, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; if (d_tetstatus[pos].isEmpty()) return; tethandle neightet, neineightet; trihandle neighsh, neighseg; int i, pa, pb, pc, pd, pe, pf; for (i = 0; i < 4; i++) { neightet = d_neighborlist[4 * pos + i]; if (neightet.id != -1) { if (d_tetstatus[neightet.id].isEmpty()) { printf("Tet #%d - %d, %d, %d, %d: Empty tet neighbor #%d - %d, %d, %d, %d\n", pos, d_neighborlist[4 * pos].id, d_neighborlist[4 * pos + 1].id, d_neighborlist[4 * pos + 2].id, d_neighborlist[4 * pos + 3].id, neightet.id, d_neighborlist[4 * neightet.id].id, d_neighborlist[4 * neightet.id + 1].id, d_neighborlist[4 * neightet.id + 2].id, d_neighborlist[4 * neightet.id + 3].id); } else { cudamesh_fsym(neightet, neineightet, d_neighborlist); if (neineightet.id != pos) printf("Tet #%d: Wrong tet neighbor #%d - %d, %d, %d, %d\n", pos, neightet.id, d_neighborlist[4 * neightet.id + 0].id, d_neighborlist[4 * neightet.id + 1].id, d_neighborlist[4 * neightet.id + 2].id, d_neighborlist[4 * neightet.id + 3].id); else { pa = cudamesh_org(neightet, d_tetlist); pb = cudamesh_dest(neightet, d_tetlist); pc = cudamesh_apex(neightet, d_tetlist); pd = cudamesh_org(neineightet, d_tetlist); pe = cudamesh_dest(neineightet, d_tetlist); pf = cudamesh_apex(neineightet, d_tetlist); if (pa == pe && pb == pd && pc == pf) { } else { printf("Tet #%d - %d, %d, %d, %d: Wrong tet neighbor endpoints #%d - %d, %d, %d, %d\n", pos, d_tetlist[4 * pos], d_tetlist[4 * pos + 1], d_tetlist[4 * pos + 2], d_tetlist[4 * pos + 3], neightet.id, d_tetlist[4 * neightet.id + 0], d_tetlist[4 * neightet.id + 1], d_tetlist[4 * neightet.id + 2], d_tetlist[4 * neightet.id + 3]); } } } } else { printf("Tet #%d - %d, %d, %d, %d: Empty tet neighbor #%d - %d, %d, %d, %d\n", pos, d_neighborlist[4 * pos].id, d_neighborlist[4 * pos + 1].id, d_neighborlist[4 * pos + 2].id, d_neighborlist[4 * pos + 3].id, neightet.id, d_neighborlist[4 * neightet.id].id, d_neighborlist[4 * neightet.id + 1].id, d_neighborlist[4 * neightet.id + 2].id, d_neighborlist[4 * neightet.id + 3].id); } } for (i = 0; i < 4; i++) { neighsh = d_tet2trilist[4 * pos + i]; if (neighsh.id != -1) { if (d_tristatus[neighsh.id].isEmpty()) { printf("Tet #%d - %d, %d, %d, %d: Empty subface neighbor #%d - %d, %d\n", pos, d_tet2trilist[4 * pos].id, d_tet2trilist[4 * pos + 1].id, d_tet2trilist[4 * pos + 2].id, d_tet2trilist[4 * pos + 3].id, neighsh.id, d_tri2tetlist[2 * neightet.id].id, d_tri2tetlist[2 * neightet.id + 1].id); } else { cudamesh_stpivot(neighsh, neightet, d_tri2tetlist); if(neightet.id != pos) printf("Tet #%d: Wrong subface neighbor #%d - %d, %d\n", pos, neighsh.id, d_tri2tetlist[2 * neighsh.id + 0].id, d_tri2tetlist[2 * neighsh.id + 1].id); else { pa = cudamesh_sorg(neighsh, d_trifacelist); pb = cudamesh_sdest(neighsh, d_trifacelist); pc = cudamesh_sapex(neighsh, d_trifacelist); pd = cudamesh_org(neightet, d_tetlist); pe = cudamesh_dest(neightet, d_tetlist); pf = cudamesh_apex(neightet, d_tetlist); if(pa == pd && pb == pe && pc == pf) { } else { printf("Tet #%d - %d, %d, %d, %d: Wrong subface neighbor endpoints #%d - %d, %d, %d\n", pos, d_tetlist[4 * pos], d_tetlist[4 * pos + 1], d_tetlist[4 * pos + 2], d_tetlist[4 * pos + 3], neighsh.id, d_trifacelist[3 * neighsh.id + 0], d_trifacelist[3 * neighsh.id + 1], d_trifacelist[3 * neighsh.id + 2]); } } } } } for (i = 0; i < 6; i++) { neighseg = d_tet2seglist[6 * pos + i]; if (neighseg.id != -1) { if(d_segstatus[neighseg.id].isEmpty()) { printf("Tet #%d - %d, %d, %d, %d, %d, %d: Empty subseg neighbor #%d - %d\n", pos, d_tet2seglist[6 * pos].id, d_tet2seglist[6 * pos + 1].id, d_tet2seglist[6 * pos + 2].id, d_tet2seglist[6 * pos + 3].id, d_tet2seglist[6 * pos + 4].id, d_tet2seglist[6 * pos + 5].id, neighseg.id, d_seg2tetlist[neighseg.id].id); } else { cudamesh_sstpivot1(neighseg, neightet, d_seg2tetlist); if (neightet.id == -1) printf("Tet #%d - Incident Subseg #%d has empty tet neighbor\n", pos, neighseg.id); else { pa = cudamesh_sorg(neighseg, d_seglist); pb = cudamesh_sdest(neighseg, d_seglist); pc = cudamesh_org(neightet, d_tetlist); pd = cudamesh_dest(neightet, d_tetlist); if ((pa == pc && pb == pd) || (pa == pd && pb == pc)) { } else { printf("pa = %d, pb = %d, pc = %d, pd = %d\n", pa, pb, pc, pd); printf("Tet #%d(%d) - %d, %d, %d, %d: Wrong subseg neighbor endpoints #%d - %d, %d, %d\n", neightet.id, neightet.ver, d_tetlist[4 * neightet.id + 0], d_tetlist[4 * neightet.id + 1], d_tetlist[4 * neightet.id + 2], d_tetlist[4 * neightet.id + 3], neighseg.id, d_seglist[3 * neighseg.id], d_seglist[3 * neighseg.id + 1], d_seglist[3 * neighseg.id + 2]); } } } } } } // Split bad elements __global__ void kernelCheckBadElementList( int* d_badeleidlist, int* d_threadmarker, int* d_segencmarker, int* d_subfaceencmarker, tetstatus* d_tetstatus, int numofencsegs, int numofencsubfaces, int numofbadtets, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; if (pos < numofencsegs) { if(d_threadmarker[pos] != 0) printf("threadId #%d - seg #%d - wrong thread marker %d\n", pos, d_badeleidlist[pos], d_threadmarker[pos]); else if(d_segencmarker[d_badeleidlist[pos]] < 0) printf("threadId #%d - seg #%d - wrong encroachement marker %d\n", pos, d_badeleidlist[pos], d_segencmarker[d_badeleidlist[pos]]); } else if (pos < numofencsubfaces + numofencsegs) { if (d_threadmarker[pos] != 1) printf("threadId #%d - subface #%d - wrong thread marker %d\n", pos, d_badeleidlist[pos], d_threadmarker[pos]); else if (d_subfaceencmarker[d_badeleidlist[pos]] < 0) printf("threadId #%d - subface #%d - wrong encroachement marker %d\n", pos, d_badeleidlist[pos], d_subfaceencmarker[d_badeleidlist[pos]]); } else { if (d_threadmarker[pos] != 2) printf("threadId #%d - tet #%d - wrong thread marker %d\n", pos, d_badeleidlist[pos], d_threadmarker[pos]); else if (!d_tetstatus[d_badeleidlist[pos]].isBad() || d_tetstatus[d_badeleidlist[pos]].isEmpty()) printf("threadId #%d - tet #%d - wrong tet status\n", pos, d_badeleidlist[pos]); } } __global__ void kernelComputeSteinerPointAndPriority( REAL* d_pointlist, trihandle* d_point2trilist, verttype* d_pointtypelist, int* d_seglist, int* d_seg2parentlist, int* d_segparentlist, int* d_segencmarker, int* d_trifacelist, tristatus* d_tristatus, int* d_tetlist, tetstatus* d_tetstatus, int* d_insertidxlist, int* d_threadmarker, REAL* d_steinerptlist, REAL* d_priority, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int eleidx = d_insertidxlist[pos]; int threadmarker = d_threadmarker[pos]; REAL* steinpt = cudamesh_id2pointlist(pos, d_steinerptlist); if (threadmarker == 0) // is a subsegment { trihandle seg(eleidx, 0); REAL* ei = cudamesh_id2pointlist(cudamesh_sorg(seg, d_seglist), d_pointlist); REAL* ej = cudamesh_id2pointlist(cudamesh_sdest(seg, d_seglist), d_pointlist); REAL len = cudamesh_distance(ei, ej); d_priority[pos] = 1 / len; int adjflag = 0, i; int refptidx = d_segencmarker[eleidx]; if (refptidx != MAXINT) { REAL* refpt = cudamesh_id2pointlist(refptidx, d_pointlist); REAL L, L1, t; if (d_pointtypelist[refptidx] == FREESEGVERTEX) { trihandle parentseg; parentseg = d_point2trilist[refptidx]; int sidx1 = d_seg2parentlist[parentseg.id]; int idx_pi = d_segparentlist[sidx1 * 2]; int idx_pj = d_segparentlist[sidx1 * 2 + 1]; REAL* far_pi = cudamesh_id2pointlist(idx_pi, d_pointlist); REAL* far_pj = cudamesh_id2pointlist(idx_pj, d_pointlist); int sidx2 = d_seg2parentlist[seg.id]; int idx_ei = d_segparentlist[sidx2 * 2]; int idx_ej = d_segparentlist[sidx2 * 2 + 1]; REAL* far_ei = cudamesh_id2pointlist(idx_ei, d_pointlist); REAL* far_ej = cudamesh_id2pointlist(idx_ej, d_pointlist); if ((idx_pi == idx_ei) || (idx_pj == idx_ei)) { // Create a Steiner point at the intersection of the segment // [far_ei, far_ej] and the sphere centered at far_ei with // radius |far_ei - refpt|. L = cudamesh_distance(far_ei, far_ej); L1 = cudamesh_distance(far_ei, refpt); t = L1 / L; for (i = 0; i < 3; i++) { steinpt[i] = far_ei[i] + t * (far_ej[i] - far_ei[i]); } adjflag = 1; } else if ((idx_pi == idx_ej) || (idx_pj == idx_ej)) { L = cudamesh_distance(far_ei, far_ej); L1 = cudamesh_distance(far_ej, refpt); t = L1 / L; for (i = 0; i < 3; i++) { steinpt[i] = far_ej[i] + t * (far_ei[i] - far_ej[i]); } adjflag = 1; } else { // Cut the segment by the projection point of refpt. projectpoint2edge(refpt, ei, ej, steinpt); } } else { // Cut the segment by the projection point of refpt. projectpoint2edge(refpt, ei, ej, steinpt); } // Make sure that steinpt is not too close to ei and ej. L = cudamesh_distance(ei, ej); L1 = cudamesh_distance(steinpt, ei); t = L1 / L; if ((t < 0.2) || (t > 0.8)) { // Split the point at the middle. for (i = 0; i < 3; i++) { steinpt[i] = ei[i] + 0.5 * (ej[i] - ei[i]); } } } else { // Split the point at the middle. for (i = 0; i < 3; i++) { steinpt[i] = ei[i] + 0.5 * (ej[i] - ei[i]); } } } else if (threadmarker == 1) // is a subface { REAL *pa, *pb, *pc; REAL area, rd, len; REAL A[4][4], rhs[4], D; int indx[4]; int i; trihandle chkfac(eleidx, 0); REAL* steinpt = cudamesh_id2pointlist(pos, d_steinerptlist); pa = cudamesh_id2pointlist(cudamesh_sorg(chkfac, d_trifacelist), d_pointlist); pb = cudamesh_id2pointlist(cudamesh_sdest(chkfac, d_trifacelist), d_pointlist); pc = cudamesh_id2pointlist(cudamesh_sapex(chkfac, d_trifacelist), d_pointlist); // Compute the coefficient matrix A (3x3). A[0][0] = pb[0] - pa[0]; A[0][1] = pb[1] - pa[1]; A[0][2] = pb[2] - pa[2]; // vector V1 (pa->pb) A[1][0] = pc[0] - pa[0]; A[1][1] = pc[1] - pa[1]; A[1][2] = pc[2] - pa[2]; // vector V2 (pa->pc) cudamesh_cross(A[0], A[1], A[2]); // vector V3 (V1 X V2) area = 0.5 * sqrt(cudamesh_dot(A[2], A[2])); // The area of [a,b,c]. d_priority[pos] = 1 / area; // Compute the right hand side vector b (3x1). rhs[0] = 0.5 * cudamesh_dot(A[0], A[0]); // edge [a,b] rhs[1] = 0.5 * cudamesh_dot(A[1], A[1]); // edge [a,c] rhs[2] = 0.0; // Solve the 3 by 3 equations use LU decomposition with partial // pivoting and backward and forward substitute. if (!cudamesh_lu_decmp(A, 3, indx, &D, 0)) { // A degenerate triangle. //printf("kernelComputeSteinerPointOnSubface: A degenerate subface. This should not happen!\n"); } cudamesh_lu_solve(A, 3, indx, rhs, 0); steinpt[0] = pa[0] + rhs[0]; steinpt[1] = pa[1] + rhs[1]; steinpt[2] = pa[2] + rhs[2]; } else if(threadmarker == 2) // is a tetrahedron { int tetid = eleidx; int ipa, ipb, ipc, ipd; REAL *pa, *pb, *pc, *pd; REAL vda[3], vdb[3], vdc[3]; REAL vab[3], vbc[3], vca[3]; REAL elen[6]; REAL smlen = 0, rd; REAL A[4][4], rhs[4], D; int indx[4]; int i; ipd = d_tetlist[4 * tetid + 3]; if (ipd == -1) { // This should not happend printf("Thread #%d - Error: Try to split a hull tet #%d!\n", pos, tetid); return; } ipa = d_tetlist[4 * tetid + 0]; ipb = d_tetlist[4 * tetid + 1]; ipc = d_tetlist[4 * tetid + 2]; pa = cudamesh_id2pointlist(ipa, d_pointlist); pb = cudamesh_id2pointlist(ipb, d_pointlist); pc = cudamesh_id2pointlist(ipc, d_pointlist); pd = cudamesh_id2pointlist(ipd, d_pointlist); // Get the edge vectors vda: d->a, vdb: d->b, vdc: d->c. // Set the matrix A = [vda, vdb, vdc]^T. for (i = 0; i < 3; i++) A[0][i] = vda[i] = pa[i] - pd[i]; for (i = 0; i < 3; i++) A[1][i] = vdb[i] = pb[i] - pd[i]; for (i = 0; i < 3; i++) A[2][i] = vdc[i] = pc[i] - pd[i]; // Get the other edge vectors. for (i = 0; i < 3; i++) vab[i] = pb[i] - pa[i]; for (i = 0; i < 3; i++) vbc[i] = pc[i] - pb[i]; for (i = 0; i < 3; i++) vca[i] = pa[i] - pc[i]; if (!cudamesh_lu_decmp(A, 3, indx, &D, 0)) { // This should not happend //printf("Thread #%d - Error: Try to split a degenerated tet #%d!\n", threadId, tetid); d_tetstatus[tetid].setAbortive(true); d_threadmarker[pos] = -1; return; } // Calculate the circumcenter and radius of this tet. rhs[0] = 0.5 * cudamesh_dot(vda, vda); rhs[1] = 0.5 * cudamesh_dot(vdb, vdb); rhs[2] = 0.5 * cudamesh_dot(vdc, vdc); cudamesh_lu_solve(A, 3, indx, rhs, 0); for (i = 0; i < 3; i++) { steinpt[i] = pd[i] + rhs[i]; } //Calculate the shortest edge length. elen[0] = cudamesh_dot(vda, vda); elen[1] = cudamesh_dot(vdb, vdb); elen[2] = cudamesh_dot(vdc, vdc); elen[3] = cudamesh_dot(vab, vab); elen[4] = cudamesh_dot(vbc, vbc); elen[5] = cudamesh_dot(vca, vca); // Use volume as priority // Use heron-type formula to compute the volume of a tetrahedron // https://en.wikipedia.org/wiki/Heron%27s_formula REAL U, V, W, u, v, w; // first three form a triangle; u opposite to U and so on REAL X, x, Y, y, Z, z; REAL a, b, c, d; U = sqrt(elen[3]); //ab V = sqrt(elen[4]); //bc W = sqrt(elen[5]); //ca u = sqrt(elen[2]); //dc v = sqrt(elen[0]); //da w = sqrt(elen[1]); //db X = (w - U + v)*(U + v + w); x = (U - v + w)*(v - w + U); Y = (u - V + w)*(V + w + u); y = (V - w + u)*(w - u + V); Z = (v - W + u)*(W + u + v); z = (W - u + v)*(u - v + W); a = sqrt(x*Y*Z); b = sqrt(y*Z*X); c = sqrt(z*X*Y); d = sqrt(x*y*z); REAL vol = sqrt((-a + b + c + d)*(a - b + c + d)*(a + b - c + d)*(a + b + c - d)) / (192 * u*v*w); d_priority[pos] = 1 / vol; } } __global__ void kernelModifyPriority( REAL* d_priorityreal, int* d_priorityint, REAL offset0, REAL offset1, REAL offset2, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadmarker = d_threadmarker[pos]; REAL offset; if (threadmarker == 0) offset = offset0; else if (threadmarker == 1) offset = offset1; else offset = offset2; REAL priority = d_priorityreal[pos] + offset; d_priorityreal[pos] = priority; d_priorityint[pos] = __float_as_int((float)priority); } __global__ void kernelCheckInsertRadius( REAL* d_pointlist, REAL* d_pointradius, trihandle* d_point2trilist, verttype* d_pointtypelist, int* d_seglist, tristatus* d_segstatus, int* d_seg2parentidxlist, int* d_segparentendpointidxlist, int* d_segencmarker, int* d_trifacelist, tristatus* d_tristatus, int* d_tetlist, tetstatus* d_tetstatus, int* d_tri2parentidxlist, int* d_triid2parentoffsetlist, int* d_triparentendpointidxlist, int* d_subfaceencmarker, int* d_insertidxlist, int* d_threadmarker, REAL* d_steinerptlist, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadmarker = d_threadmarker[pos]; int eleidx = d_insertidxlist[pos]; if (threadmarker == 0) { int segId = eleidx; if (d_segstatus[segId].isAbortive()) { d_threadmarker[pos] = -1; return; } int encptidx = d_segencmarker[pos]; if (encptidx != MAXINT) // not encroached by splitting segment and subface routines return; trihandle splitseg(segId, 0); int ipa, ipb; ipa = cudamesh_sorg(splitseg, d_seglist); ipb = cudamesh_sdest(splitseg, d_seglist); REAL *pa, *pb; pa = cudamesh_id2pointlist(ipa, d_pointlist); pb = cudamesh_id2pointlist(ipb, d_pointlist); REAL len = cudamesh_distance(pa, pb); REAL smrrv = d_pointradius[ipa]; REAL rrv = d_pointradius[ipb]; if (rrv > 0) { if (smrrv > 0) { if (rrv < smrrv) { smrrv = rrv; } } else { smrrv = rrv; } } if (smrrv > 0) { if ((fabs(smrrv - len) / len) < EPSILON) smrrv = len; if (len < smrrv) { d_segstatus[segId].setAbortive(true); d_threadmarker[pos] = -1; return; } } } else if (threadmarker == 1) { int subfaceid = eleidx; if (d_tristatus[subfaceid].isAbortive()) { d_threadmarker[pos] = -1; return; } int encptidx = d_subfaceencmarker[subfaceid]; if (encptidx == MAXINT) // Mark as encroached when trying to split a tet return; trihandle parentseg, parentsh; trihandle splitfac(subfaceid, 0); REAL rv, rp; REAL* newpt = d_steinerptlist + 3 * pos; REAL* encpt = cudamesh_id2pointlist(encptidx, d_pointlist); rv = cudamesh_distance(newpt, encpt); if (d_pointtypelist[encptidx] == FREESEGVERTEX) { parentseg = d_point2trilist[encptidx]; if (cudamesh_segfacetadjacent(parentseg.id, splitfac.id, d_seg2parentidxlist, d_segparentendpointidxlist, d_tri2parentidxlist, d_triid2parentoffsetlist, d_triparentendpointidxlist)) { rp = d_pointradius[encptidx]; if (rv < (sqrt(2.0) * rp)) { // This insertion may cause no termination. d_threadmarker[pos] = -1; // Reject the insertion of newpt. d_tristatus[subfaceid].setAbortive(true); } } } else if (d_pointtypelist[encptidx] == FREEFACETVERTEX) { parentsh = d_point2trilist[encptidx]; if (cudamesh_facetfacetadjacent(parentsh.id, splitfac.id, d_tri2parentidxlist, d_triid2parentoffsetlist, d_triparentendpointidxlist)) { rp = d_pointradius[encptidx]; if (rv < rp) { d_threadmarker[pos] = -1; // Reject the insertion of newpt. d_tristatus[subfaceid].setAbortive(true); } } } } else { int tetid = eleidx; if (d_tetstatus[tetid].isAbortive()) { d_threadmarker[pos] = -1; return; } tethandle chktet(tetid, 11), checkedge; int ie1, ie2; int i, j; REAL *e1, *e2; REAL smlen = 0; REAL rrv, smrrv; REAL elen[6]; // Get the shortest edge of this tet. checkedge.id = chktet.id; for (i = 0; i < 6; i++) { checkedge.ver = raw_edge2ver[i]; ie1 = cudamesh_org(checkedge, d_tetlist); ie2 = cudamesh_dest(checkedge, d_tetlist); e1 = cudamesh_id2pointlist(ie1, d_pointlist); e2 = cudamesh_id2pointlist(ie2, d_pointlist); elen[i] = cudamesh_distance(e1, e2); if (i == 0) { smlen = elen[i]; j = 0; } else { if (elen[i] < smlen) { smlen = elen[i]; j = i; } } } // Check if the edge is too short. checkedge.ver = raw_edge2ver[j]; // Get the smallest rrv of e1 and e2. // Note: if rrv of e1 and e2 is zero. Do not use it. ie1 = cudamesh_org(checkedge, d_tetlist); smrrv = d_pointradius[ie1]; ie2 = cudamesh_dest(checkedge, d_tetlist); rrv = d_pointradius[ie2]; if (rrv > 0) { if (smrrv > 0) { if (rrv < smrrv) { smrrv = rrv; } } else { smrrv = rrv; } } if (smrrv > 0) { // To avoid rounding error, round smrrv before doing comparison. if ((fabs(smrrv - smlen) / smlen) <EPSILON) { smrrv = smlen; } if (smrrv > smlen) { d_tetstatus[tetid].setAbortive(true); d_threadmarker[pos] = -1; return; } } } } __global__ void kernelLocatePoint( REAL* d_pointlist, tethandle* d_seg2tetlist, int* d_trifacelist, tethandle* d_tri2tetlist, trihandle* d_tri2trilist, trihandle* d_tri2seglist, tristatus* d_tristatus, int* d_tetlist, tethandle* d_neighborlist, tetstatus* d_tetstatus, int* d_priority, unsigned long* d_randomseed, locateresult* d_pointlocation, trihandle* d_searchsh, tethandle* d_searchtet, int* d_insertidxlist, int* d_threadmarker, int* d_threadlist, REAL* d_steinerptlist, int numofsplittablesubsegs, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int threadmarker = d_threadmarker[threadId]; int eleidx = d_insertidxlist[threadId]; if (threadmarker == 0) { trihandle splitseg(eleidx, 0); tethandle searchtet; cudamesh_sstpivot1(splitseg, searchtet, d_seg2tetlist); d_searchtet[threadId] = searchtet; d_pointlocation[threadId] = ONEDGE; } else if (threadmarker == 1) { int step = 1; int subfaceid = eleidx; d_searchsh[threadId] = trihandle(subfaceid, 0); trihandle neighsh; trihandle *searchsh = d_searchsh + threadId; REAL *searchpt = d_steinerptlist + 3 * threadId; REAL *pa, *pb, *pc; unsigned long *randomseed = d_randomseed + pos; REAL abvpt[3]; // Check if coordinates are valid if (cudamesh_isInvalid(searchpt[0]) || cudamesh_isInvalid(searchpt[1]) || cudamesh_isInvalid(searchpt[2])) { d_tristatus[subfaceid].setAbortive(true); d_threadmarker[threadId] = -1; return; } enum locateresult loc; enum { MOVE_BC, MOVE_CA } nextmove; REAL ori, ori_bc, ori_ca; int i; pa = cudamesh_id2pointlist(cudamesh_sorg(*searchsh, d_trifacelist), d_pointlist); pb = cudamesh_id2pointlist(cudamesh_sdest(*searchsh, d_trifacelist), d_pointlist); pc = cudamesh_id2pointlist(cudamesh_sapex(*searchsh, d_trifacelist), d_pointlist); // Calculate an above point for this facet. cudamesh_calculateabovepoint4(searchpt, pa, pb, pc, abvpt); // 'abvpt' is given. Make sure it is above [a,b,c] ori = cuda_orient3d(pa, pb, pc, abvpt); assert(ori != 0); // SELF_CHECK if (ori > 0) { cudamesh_sesymself(*searchsh); // Reverse the face orientation. } // Find an edge of the face s.t. p lies on its right-hand side (CCW). for (i = 0; i < 3; i++) { pa = cudamesh_id2pointlist(cudamesh_sorg(*searchsh, d_trifacelist), d_pointlist); pb = cudamesh_id2pointlist(cudamesh_sdest(*searchsh, d_trifacelist), d_pointlist); ori = cuda_orient3d(pa, pb, abvpt, searchpt); if (ori > 0) break; cudamesh_senextself(*searchsh); } assert(i < 3); // SELF_CHECK pc = cudamesh_id2pointlist(cudamesh_sapex(*searchsh, d_trifacelist), d_pointlist); if (pc[0] == searchpt[0] && pc[1] == searchpt[1] && pc[2] == searchpt[2]) { cudamesh_senext2self(*searchsh); loc = ONVERTEX; } else { while (1) { ori_bc = cuda_orient3d(pb, pc, abvpt, searchpt); ori_ca = cuda_orient3d(pc, pa, abvpt, searchpt); if (ori_bc < 0) { if (ori_ca < 0) { // (--) // Any of the edges is a viable move. if (cudamesh_randomnation(randomseed, 2)) { nextmove = MOVE_CA; } else { nextmove = MOVE_BC; } } else { // (-#) // Edge [b, c] is viable. nextmove = MOVE_BC; } } else { if (ori_ca < 0) { // (#-) // Edge [c, a] is viable. nextmove = MOVE_CA; } else { if (ori_bc > 0) { if (ori_ca > 0) { // (++) loc = ONFACE; // Inside [a, b, c]. break; } else { // (+0) cudamesh_senext2self(*searchsh); // On edge [c, a]. loc = ONEDGE; break; } } else { // ori_bc == 0 if (ori_ca > 0) { // (0+) cudamesh_senextself(*searchsh); // On edge [b, c]. loc = ONEDGE; break; } else { // (00) // p is coincident with vertex c. cudamesh_senext2self(*searchsh); loc = ONVERTEX; break; } } } } // Move to the next face. if (nextmove == MOVE_BC) { cudamesh_senextself(*searchsh); } else { cudamesh_senext2self(*searchsh); } // NON-convex case. Check if we will cross a boundary. if (cudamesh_isshsubseg(*searchsh, d_tri2seglist)) { loc = ENCSEGMENT; break; } cudamesh_spivot(*searchsh, neighsh, d_tri2trilist); if (neighsh.id == -1) { loc = OUTSIDE; // A hull edge. break; } // Adjust the edge orientation. if (cudamesh_sorg(neighsh, d_trifacelist) != cudamesh_sdest(*searchsh, d_trifacelist)) { cudamesh_sesymself(neighsh); } assert(cudamesh_sorg(neighsh, d_trifacelist) == cudamesh_sdest(*searchsh, d_trifacelist)); // SELF_CHECK // Update the newly discovered face and its endpoints. *searchsh = neighsh; pa = cudamesh_id2pointlist(cudamesh_sorg(*searchsh, d_trifacelist), d_pointlist); pb = cudamesh_id2pointlist(cudamesh_sdest(*searchsh, d_trifacelist), d_pointlist); pc = cudamesh_id2pointlist(cudamesh_sapex(*searchsh, d_trifacelist), d_pointlist); if (pc == searchpt) { cudamesh_senext2self(*searchsh); loc = ONVERTEX; break; } step++; //if (step > 1000) // invalid point coordinates //{ // printf("Subface %d, %d - %lf, %lf, %lf\n", eleidx, threadId, searchpt[0], searchpt[1], searchpt[2]); //} } // while (1) } d_pointlocation[threadId] = loc; if (!(loc == ONFACE || loc == ONEDGE)) { if(numofsplittablesubsegs == 0) d_tristatus[subfaceid].setAbortive(true); // mark the encroached subface rather than the located one d_threadmarker[threadId] = -1; return; } tethandle searchtet; cudamesh_stpivot(*searchsh, searchtet, d_tri2tetlist); d_searchtet[threadId] = searchtet; } else { int tetid = eleidx; tethandle* searchtet = d_searchtet + threadId; REAL* searchpt = d_steinerptlist + 3 * threadId; unsigned long* randomseed = d_randomseed + pos; // Check if coordinates are valid if (cudamesh_isInvalid(searchpt[0]) || cudamesh_isInvalid(searchpt[1]) || cudamesh_isInvalid(searchpt[2])) { d_tetstatus[tetid].setAbortive(true); d_threadmarker[threadId] = -1; return; } REAL *torg, *tdest, *tapex, *toppo; enum { ORGMOVE, DESTMOVE, APEXMOVE } nextmove; REAL ori, oriorg, oridest, oriapex; enum locateresult loc = OUTSIDE; int t1ver; int s; int step = 1; // Init searchtet searchtet->id = tetid; searchtet->ver = 11; // Check if we are in the outside of the convex hull. if (cudamesh_ishulltet(*searchtet, d_tetlist)) { // Get its adjacent tet (inside the hull). searchtet->ver = 3; cudamesh_fsymself(*searchtet, d_neighborlist); } // Let searchtet be the face such that 'searchpt' lies above to it. for (searchtet->ver = 0; searchtet->ver < 4; searchtet->ver++) { torg = cudamesh_id2pointlist(cudamesh_org(*searchtet, d_tetlist), d_pointlist); tdest = cudamesh_id2pointlist(cudamesh_dest(*searchtet, d_tetlist), d_pointlist); tapex = cudamesh_id2pointlist(cudamesh_apex(*searchtet, d_tetlist), d_pointlist); ori = cuda_orient3d(torg, tdest, tapex, searchpt); if (ori < 0.0) break; } assert(searchtet->ver != 4); // Walk through tetrahedra to locate the point. while (true) { toppo = cudamesh_id2pointlist(cudamesh_oppo(*searchtet, d_tetlist), d_pointlist); // Check if the vertex is we seek. if (toppo[0] == searchpt[0] && toppo[1] == searchpt[1] && toppo[2] == searchpt[2]) { // Adjust the origin of searchtet to be searchpt. cudamesh_esymself(*searchtet); cudamesh_eprevself(*searchtet); loc = ONVERTEX; // return ONVERTEX; break; } // We enter from one of serarchtet's faces, which face do we exit? oriorg = cuda_orient3d(tdest, tapex, toppo, searchpt); oridest = cuda_orient3d(tapex, torg, toppo, searchpt); oriapex = cuda_orient3d(torg, tdest, toppo, searchpt); // Now decide which face to move. It is possible there are more than one // faces are viable moves. If so, randomly choose one. if (oriorg < 0) { if (oridest < 0) { if (oriapex < 0) { // All three faces are possible. s = cudamesh_randomnation(randomseed, 3); // 's' is in {0,1,2}. if (s == 0) { nextmove = ORGMOVE; } else if (s == 1) { nextmove = DESTMOVE; } else { nextmove = APEXMOVE; } } else { // Two faces, opposite to origin and destination, are viable. //s = randomnation(2); // 's' is in {0,1}. if (cudamesh_randomnation(randomseed, 2)) { nextmove = ORGMOVE; } else { nextmove = DESTMOVE; } } } else { if (oriapex < 0) { // Two faces, opposite to origin and apex, are viable. //s = randomnation(2); // 's' is in {0,1}. if (cudamesh_randomnation(randomseed, 2)) { nextmove = ORGMOVE; } else { nextmove = APEXMOVE; } } else { // Only the face opposite to origin is viable. nextmove = ORGMOVE; } } } else { if (oridest < 0) { if (oriapex < 0) { // Two faces, opposite to destination and apex, are viable. //s = randomnation(2); // 's' is in {0,1}. if (cudamesh_randomnation(randomseed, 2)) { nextmove = DESTMOVE; } else { nextmove = APEXMOVE; } } else { // Only the face opposite to destination is viable. nextmove = DESTMOVE; } } else { if (oriapex < 0) { // Only the face opposite to apex is viable. nextmove = APEXMOVE; } else { // The point we seek must be on the boundary of or inside this // tetrahedron. Check for boundary cases. if (oriorg == 0) { // Go to the face opposite to origin. cudamesh_enextesymself(*searchtet); if (oridest == 0) { cudamesh_eprevself(*searchtet); // edge oppo->apex if (oriapex == 0) { // oppo is duplicated with p. loc = ONVERTEX; // return ONVERTEX; break; } loc = ONEDGE; // return ONEDGE; break; } if (oriapex == 0) { cudamesh_enextself(*searchtet); // edge dest->oppo loc = ONEDGE; // return ONEDGE; break; } loc = ONFACE; // return ONFACE; break; } if (oridest == 0) { // Go to the face opposite to destination. cudamesh_eprevesymself(*searchtet); if (oriapex == 0) { cudamesh_eprevself(*searchtet); // edge oppo->org loc = ONEDGE; // return ONEDGE; break; } loc = ONFACE; // return ONFACE; break; } if (oriapex == 0) { // Go to the face opposite to apex cudamesh_esymself(*searchtet); loc = ONFACE; // return ONFACE; break; } loc = INTETRAHEDRON; // return INTETRAHEDRON; break; } } } // Move to the selected face. if (nextmove == ORGMOVE) { cudamesh_enextesymself(*searchtet); } else if (nextmove == DESTMOVE) { cudamesh_eprevesymself(*searchtet); } else { cudamesh_esymself(*searchtet); } // Move to the adjacent tetrahedron (maybe a hull tetrahedron). cudamesh_fsymself(*searchtet, d_neighborlist); if (cudamesh_oppo(*searchtet, d_tetlist) == -1) { loc = OUTSIDE; // return OUTSIDE; break; } // Retreat the three vertices of the base face. torg = cudamesh_id2pointlist(cudamesh_org(*searchtet, d_tetlist), d_pointlist); tdest = cudamesh_id2pointlist(cudamesh_dest(*searchtet, d_tetlist), d_pointlist); tapex = cudamesh_id2pointlist(cudamesh_apex(*searchtet, d_tetlist), d_pointlist); step++; //if (step > 1000) // Invalid point coordinates //{ // printf("Tet %d, %d - %lf, %lf, %lf\n", eleidx, threadId, searchpt[0], searchpt[1], searchpt[2]); //} } // while (true) d_pointlocation[threadId] = loc; if (loc == ONVERTEX) { d_tetstatus[tetid].setAbortive(true); d_threadmarker[threadId] = -1; } } } // Split encroached segment __device__ int checkseg4split( trihandle *chkseg, int& encpt, REAL* pointlist, int* seglist, tethandle* seg2tetlist, int* tetlist, tethandle* neighborlist ) { REAL ccent[3], len, r; int i; REAL* forg = cudamesh_id2pointlist(cudamesh_sorg(*chkseg, seglist), pointlist); REAL* fdest = cudamesh_id2pointlist(cudamesh_sdest(*chkseg, seglist), pointlist); // Initialize the return values. encpt = -1; len = cudamesh_distance(forg, fdest); r = 0.5 * len; for (i = 0; i < 3; i++) { ccent[i] = 0.5 * (forg[i] + fdest[i]); } // Check if it is encroached. // Comment: There may exist more than one encroaching points of this segment. // The 'encpt' returns the one which is closet to it. tethandle searchtet, spintet; int eapex; REAL d, diff, smdist = 0; int t1ver; cudamesh_sstpivot1(*chkseg, searchtet, seg2tetlist); spintet = searchtet; while (1) { eapex = cudamesh_apex(spintet, tetlist); if (eapex != -1) { d = cudamesh_distance(ccent, cudamesh_id2pointlist(eapex, pointlist)); diff = d - r; if (fabs(diff) / r < EPSILON) diff = 0.0; // Rounding. if (diff < 0) { // This segment is encroached by eapex. if (encpt == -1) { encpt = eapex; smdist = d; } else { // Choose the closet encroaching point. if (d < smdist) { encpt = eapex; smdist = d; } } } } cudamesh_fnextself(spintet, neighborlist); if (spintet.id == searchtet.id) break; } // while (1) if (encpt != -1) { return 1; } return 0; // No need to split it. } __device__ int checkseg4encroach( REAL *pa, REAL* pb, REAL* checkpt ) { // Check if the point lies inside the diametrical sphere of this seg. REAL v1[3], v2[3]; v1[0] = pa[0] - checkpt[0]; v1[1] = pa[1] - checkpt[1]; v1[2] = pa[2] - checkpt[2]; v2[0] = pb[0] - checkpt[0]; v2[1] = pb[1] - checkpt[1]; v2[2] = pb[2] - checkpt[2]; if (cudamesh_dot(v1, v2) < 0) return 1; return 0; } __global__ void kernelMarkAllEncsegs( REAL * d_pointlist, int* d_seglist, tethandle* d_seg2tetlist, int* d_segencmarker, int* d_tetlist, tethandle* d_neighborlist, int numofsubseg ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofsubseg) return; trihandle chkseg(pos, 0); int encpt; checkseg4split( &chkseg, encpt, d_pointlist, d_seglist, d_seg2tetlist, d_tetlist, d_neighborlist); d_segencmarker[pos] = encpt; } __device__ void projectpoint2edge( REAL* p, REAL* e1, REAL* e2, REAL* prj ) { REAL v1[3], v2[3]; REAL len, l_p; v1[0] = e2[0] - e1[0]; v1[1] = e2[1] - e1[1]; v1[2] = e2[2] - e1[2]; v2[0] = p[0] - e1[0]; v2[1] = p[1] - e1[1]; v2[2] = p[2] - e1[2]; len = sqrt(cudamesh_dot(v1, v1)); assert(len != 0.0); v1[0] /= len; v1[1] /= len; v1[2] /= len; l_p = cudamesh_dot(v1, v2); prj[0] = e1[0] + l_p * v1[0]; prj[1] = e1[1] + l_p * v1[1]; prj[2] = e1[2] + l_p * v1[2]; } __global__ void kernelComputeSteinerPoint_Seg( int* d_threadlist, REAL* d_pointlist, trihandle* d_point2trilist, verttype* d_pointtypelist, int* d_seglist, int* d_seg2parentlist, int* d_segparentlist, int* d_segencmarker, int* d_encseglist, REAL* d_steinerptlist, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int encsegidx = d_encseglist[threadId]; trihandle seg(encsegidx, 0); REAL* ei = cudamesh_id2pointlist(cudamesh_sorg(seg, d_seglist), d_pointlist); REAL* ej = cudamesh_id2pointlist(cudamesh_sdest(seg, d_seglist), d_pointlist); int adjflag = 0, i; REAL* steinpt = cudamesh_id2pointlist(threadId, d_steinerptlist); int refptidx = d_segencmarker[encsegidx]; assert(refptidx >= 0); if (refptidx != MAXINT) { REAL* refpt = cudamesh_id2pointlist(refptidx, d_pointlist); REAL L, L1, t; if (d_pointtypelist[refptidx] == FREESEGVERTEX) { trihandle parentseg; parentseg = d_point2trilist[refptidx]; int sidx1 = d_seg2parentlist[parentseg.id]; int idx_pi = d_segparentlist[sidx1 * 2]; int idx_pj = d_segparentlist[sidx1 * 2 + 1]; REAL* far_pi = cudamesh_id2pointlist(idx_pi, d_pointlist); REAL* far_pj = cudamesh_id2pointlist(idx_pj, d_pointlist); int sidx2 = d_seg2parentlist[seg.id]; int idx_ei = d_segparentlist[sidx2 * 2]; int idx_ej = d_segparentlist[sidx2 * 2 + 1]; REAL* far_ei = cudamesh_id2pointlist(idx_ei, d_pointlist); REAL* far_ej = cudamesh_id2pointlist(idx_ej, d_pointlist); if ((idx_pi == idx_ei) || (idx_pj == idx_ei)) { // Create a Steiner point at the intersection of the segment // [far_ei, far_ej] and the sphere centered at far_ei with // radius |far_ei - refpt|. L = cudamesh_distance(far_ei, far_ej); L1 = cudamesh_distance(far_ei, refpt); t = L1 / L; for (i = 0; i < 3; i++) { steinpt[i] = far_ei[i] + t * (far_ej[i] - far_ei[i]); } adjflag = 1; } else if ((idx_pi == idx_ej) || (idx_pj == idx_ej)) { L = cudamesh_distance(far_ei, far_ej); L1 = cudamesh_distance(far_ej, refpt); t = L1 / L; for (i = 0; i < 3; i++) { steinpt[i] = far_ej[i] + t * (far_ei[i] - far_ej[i]); } adjflag = 1; } else { // Cut the segment by the projection point of refpt. projectpoint2edge(refpt, ei, ej, steinpt); } } else { // Cut the segment by the projection point of refpt. projectpoint2edge(refpt, ei, ej, steinpt); } // Make sure that steinpt is not too close to ei and ej. L = cudamesh_distance(ei, ej); L1 = cudamesh_distance(steinpt, ei); t = L1 / L; if ((t < 0.2) || (t > 0.8)) { // Split the point at the middle. for (i = 0; i < 3; i++) { steinpt[i] = ei[i] + 0.5 * (ej[i] - ei[i]); } } } else { // Split the point at the middle. for (i = 0; i < 3; i++) { steinpt[i] = ei[i] + 0.5 * (ej[i] - ei[i]); } } } // Split encroached subface __device__ int checkface4split( trihandle *chkfac, int& encpt, REAL* pointlist, int* trifacelist, tethandle* tri2tetlist, int* tetlist ) { REAL *pa, *pb, *pc; REAL area, rd, len; REAL A[4][4], rhs[4], cent[3], D; int indx[4]; int i; encpt = -1; pa = cudamesh_id2pointlist(cudamesh_sorg(*chkfac, trifacelist), pointlist); pb = cudamesh_id2pointlist(cudamesh_sdest(*chkfac, trifacelist), pointlist); pc = cudamesh_id2pointlist(cudamesh_sapex(*chkfac, trifacelist), pointlist); // Compute the coefficient matrix A (3x3). A[0][0] = pb[0] - pa[0]; A[0][1] = pb[1] - pa[1]; A[0][2] = pb[2] - pa[2]; // vector V1 (pa->pb) A[1][0] = pc[0] - pa[0]; A[1][1] = pc[1] - pa[1]; A[1][2] = pc[2] - pa[2]; // vector V2 (pa->pc) cudamesh_cross(A[0], A[1], A[2]); // vector V3 (V1 X V2) area = 0.5 * sqrt(cudamesh_dot(A[2], A[2])); // The area of [a,b,c]. // Compute the right hand side vector b (3x1). rhs[0] = 0.5 * cudamesh_dot(A[0], A[0]); // edge [a,b] rhs[1] = 0.5 * cudamesh_dot(A[1], A[1]); // edge [a,c] rhs[2] = 0.0; // Solve the 3 by 3 equations use LU decomposition with partial // pivoting and backward and forward substitute. if (!cudamesh_lu_decmp(A, 3, indx, &D, 0)) { // A degenerate triangle. //printf("checkface4split: A degenerate subface!\n"); encpt = -1; return -1; } cudamesh_lu_solve(A, 3, indx, rhs, 0); cent[0] = pa[0] + rhs[0]; cent[1] = pa[1] + rhs[1]; cent[2] = pa[2] + rhs[2]; rd = sqrt(rhs[0] * rhs[0] + rhs[1] * rhs[1] + rhs[2] * rhs[2]); tethandle searchtet; REAL smlen = 0; // Check if this subface is locally encroached. for (i = 0; i < 2; i++) { cudamesh_stpivot(*chkfac, searchtet, tri2tetlist); if (!cudamesh_ishulltet(searchtet, tetlist)) { len = cudamesh_distance( cudamesh_id2pointlist(cudamesh_oppo(searchtet, tetlist), pointlist), cent); if ((fabs(len - rd) / rd) < EPSILON) len = rd;// Rounding. if (len < rd) { if (smlen == 0) { smlen = len; encpt = cudamesh_oppo(searchtet, tetlist); } else { if (len < smlen) { smlen = len; encpt = cudamesh_oppo(searchtet, tetlist); } } } } cudamesh_sesymself(*chkfac); } return encpt != -1; } __device__ int checkface4encroach( REAL *pa, REAL *pb, REAL *pc, REAL *checkpt ) { REAL rd, len, cent[3]; cudamesh_circumsphere(pa, pb, pc, NULL, cent, &rd); assert(rd != 0); len = cudamesh_distance(cent, checkpt); if ((fabs(len - rd) / rd) < EPSILON) len = rd; // Rounding. if (len < rd) { // The point lies inside the circumsphere of this face. return 1; // Encroached. } return 0; } __global__ void kernelMarkAllEncsubfaces( REAL * d_pointlist, int* d_trifacelist, tethandle* d_tri2tetlist, int* d_subfaceencmarker, int* d_tetlist, int numofsubface ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofsubface) return; trihandle chkfac(pos, 0); int encpt; checkface4split( &chkfac, encpt, d_pointlist, d_trifacelist, d_tri2tetlist, d_tetlist); d_subfaceencmarker[pos] = encpt; } __global__ void kernelComputeSteinerPoint_Subface( REAL* d_pointlist, int* d_trifacelist, tristatus* d_tristatus, int* d_encsubfacelist, REAL* d_steinerptlist, int numofencsubface ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofencsubface) return; int encsubfaceidx = d_encsubfacelist[pos]; REAL *pa, *pb, *pc; REAL area, rd, len; REAL A[4][4], rhs[4], D; int indx[4]; int i; trihandle chkfac(encsubfaceidx, 0); REAL* steinpt = cudamesh_id2pointlist(pos, d_steinerptlist); pa = cudamesh_id2pointlist(cudamesh_sorg(chkfac, d_trifacelist), d_pointlist); pb = cudamesh_id2pointlist(cudamesh_sdest(chkfac, d_trifacelist), d_pointlist); pc = cudamesh_id2pointlist(cudamesh_sapex(chkfac, d_trifacelist), d_pointlist); // Compute the coefficient matrix A (3x3). A[0][0] = pb[0] - pa[0]; A[0][1] = pb[1] - pa[1]; A[0][2] = pb[2] - pa[2]; // vector V1 (pa->pb) A[1][0] = pc[0] - pa[0]; A[1][1] = pc[1] - pa[1]; A[1][2] = pc[2] - pa[2]; // vector V2 (pa->pc) cudamesh_cross(A[0], A[1], A[2]); // vector V3 (V1 X V2) area = 0.5 * sqrt(cudamesh_dot(A[2], A[2])); // The area of [a,b,c]. // Compute the right hand side vector b (3x1). rhs[0] = 0.5 * cudamesh_dot(A[0], A[0]); // edge [a,b] rhs[1] = 0.5 * cudamesh_dot(A[1], A[1]); // edge [a,c] rhs[2] = 0.0; // Solve the 3 by 3 equations use LU decomposition with partial // pivoting and backward and forward substitute. if (!cudamesh_lu_decmp(A, 3, indx, &D, 0)) { // A degenerate triangle. printf("kernelComputeSteinerPointOnSubface: A degenerate subface. This should not happen!\n"); } cudamesh_lu_solve(A, 3, indx, rhs, 0); steinpt[0] = pa[0] + rhs[0]; steinpt[1] = pa[1] + rhs[1]; steinpt[2] = pa[2] + rhs[2]; } // Split bad tets __device__ int checktet4split( tethandle* chktet, REAL* pointlist, int* tetlist, REAL minratio ) { int ipa, ipb, ipc, ipd; REAL *pa, *pb, *pc, *pd; REAL vda[3], vdb[3], vdc[3]; REAL vab[3], vbc[3], vca[3]; REAL elen[6]; REAL smlen = 0, rd; REAL A[4][4], rhs[4], D; int indx[4]; int i; ipd = tetlist[4*(*chktet).id + 3]; if (ipd == -1) { return 0; // Do not split a hull tet. } ipa = tetlist[4*(*chktet).id + 0]; ipb = tetlist[4*(*chktet).id + 1]; ipc = tetlist[4*(*chktet).id + 2]; pa = cudamesh_id2pointlist(ipa, pointlist); pb = cudamesh_id2pointlist(ipb, pointlist); pc = cudamesh_id2pointlist(ipc, pointlist); pd = cudamesh_id2pointlist(ipd, pointlist); // Get the edge vectors vda: d->a, vdb: d->b, vdc: d->c. // Set the matrix A = [vda, vdb, vdc]^T. for (i = 0; i < 3; i++) A[0][i] = vda[i] = pa[i] - pd[i]; for (i = 0; i < 3; i++) A[1][i] = vdb[i] = pb[i] - pd[i]; for (i = 0; i < 3; i++) A[2][i] = vdc[i] = pc[i] - pd[i]; // Get the other edge vectors. for (i = 0; i < 3; i++) vab[i] = pb[i] - pa[i]; for (i = 0; i < 3; i++) vbc[i] = pc[i] - pb[i]; for (i = 0; i < 3; i++) vca[i] = pa[i] - pc[i]; if (!cudamesh_lu_decmp(A, 3, indx, &D, 0)) { // A degenerated tet (vol = 0). // This is possible due to the use of exact arithmetic. We temporarily // leave this tet. It should be fixed by mesh optimization. return 0; } // Check the radius-edge ratio. Set by -q#. if (minratio > 0) { // Calculate the circumcenter and radius of this tet. rhs[0] = 0.5 * cudamesh_dot(vda, vda); rhs[1] = 0.5 * cudamesh_dot(vdb, vdb); rhs[2] = 0.5 * cudamesh_dot(vdc, vdc); cudamesh_lu_solve(A, 3, indx, rhs, 0); rd = sqrt(cudamesh_dot(rhs, rhs)); // Calculate the shortest edge length. elen[0] = cudamesh_dot(vda, vda); elen[1] = cudamesh_dot(vdb, vdb); elen[2] = cudamesh_dot(vdc, vdc); elen[3] = cudamesh_dot(vab, vab); elen[4] = cudamesh_dot(vbc, vbc); elen[5] = cudamesh_dot(vca, vca); smlen = elen[0]; //sidx = 0; for (i = 1; i < 6; i++) { if (smlen > elen[i]) { smlen = elen[i]; //sidx = i; } } smlen = sqrt(smlen); D = rd / smlen; if (D > minratio) { // A bad radius-edge ratio. return 1; } } return 0; } __global__ void kernelMarkAllBadtets( REAL* d_pointlist, int* d_tetlist, tetstatus* d_tetstatus, REAL minratio, int numofbadtet ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofbadtet) return; tethandle chktet(pos, 11); if (checktet4split(&chktet, d_pointlist, d_tetlist, minratio)) { d_tetstatus[pos].setBad(true); } } __global__ void kernelComputeSteinerPoint_Tet( int* d_tetidlist, REAL* d_insertptlist, int* d_threadlist, REAL* d_pointlist, int* d_tetlist, tetstatus* d_tetstatus, int* d_priority, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int tetid = d_tetidlist[threadId]; REAL* steinpt = d_insertptlist + 3 * threadId; int ipa, ipb, ipc, ipd; REAL *pa, *pb, *pc, *pd; REAL vda[3], vdb[3], vdc[3]; REAL vab[3], vbc[3], vca[3]; REAL elen[6]; REAL smlen = 0, rd; REAL A[4][4], rhs[4], D; int indx[4]; int i; ipd = d_tetlist[4 * tetid + 3]; if (ipd == -1) { // This should not happend printf("Thread #%d - Error: Try to split a hull tet #%d!\n", threadId, tetid); return; } ipa = d_tetlist[4 * tetid + 0]; ipb = d_tetlist[4 * tetid + 1]; ipc = d_tetlist[4 * tetid + 2]; pa = cudamesh_id2pointlist(ipa, d_pointlist); pb = cudamesh_id2pointlist(ipb, d_pointlist); pc = cudamesh_id2pointlist(ipc, d_pointlist); pd = cudamesh_id2pointlist(ipd, d_pointlist); // Get the edge vectors vda: d->a, vdb: d->b, vdc: d->c. // Set the matrix A = [vda, vdb, vdc]^T. for (i = 0; i < 3; i++) A[0][i] = vda[i] = pa[i] - pd[i]; for (i = 0; i < 3; i++) A[1][i] = vdb[i] = pb[i] - pd[i]; for (i = 0; i < 3; i++) A[2][i] = vdc[i] = pc[i] - pd[i]; // Get the other edge vectors. for (i = 0; i < 3; i++) vab[i] = pb[i] - pa[i]; for (i = 0; i < 3; i++) vbc[i] = pc[i] - pb[i]; for (i = 0; i < 3; i++) vca[i] = pa[i] - pc[i]; //if (cuda_orient3d(pa, pb, pc, pd) < 0.001 && cuda_orient3d(pa, pb, pc, pd) > -0.001) //{ // Nearly degenerated tet. // Set to abortive to avoid invalid point coordinate //d_tetstatus[tetid].setAbortive(true); //d_threadmarker[threadId] = -1; //return; //} if (!cudamesh_lu_decmp(A, 3, indx, &D, 0)) { // This should not happend //printf("Thread #%d - Error: Try to split a degenerated tet #%d!\n", threadId, tetid); d_tetstatus[tetid].setAbortive(true); d_threadmarker[threadId] = -1; return; } // Calculate the circumcenter and radius of this tet. rhs[0] = 0.5 * cudamesh_dot(vda, vda); rhs[1] = 0.5 * cudamesh_dot(vdb, vdb); rhs[2] = 0.5 * cudamesh_dot(vdc, vdc); cudamesh_lu_solve(A, 3, indx, rhs, 0); for (i = 0; i < 3; i++) { steinpt[i] = pd[i] + rhs[i]; } // set priority //rd = sqrt(cudamesh_dot(rhs, rhs)); //Calculate the shortest edge length. elen[0] = cudamesh_dot(vda, vda); elen[1] = cudamesh_dot(vdb, vdb); elen[2] = cudamesh_dot(vdc, vdc); elen[3] = cudamesh_dot(vab, vab); elen[4] = cudamesh_dot(vbc, vbc); elen[5] = cudamesh_dot(vca, vca); //Use radius-to-shortest-edge radio as priority //smlen = elen[0]; //sidx = 0; //for (i = 1; i < 6; i++) { // if (smlen > elen[i]) { // smlen = elen[i]; //sidx = i; // } //} //smlen = sqrt(smlen); //d_priority[threadId] = __float_as_int((float)(smlen / rd)); // Use volume as priority // Use heron-type formula to compute the volume of a tetrahedron // https://en.wikipedia.org/wiki/Heron%27s_formula //if (cuda_orient3d(pa, pb, pc, pd) < 0.001 && cuda_orient3d(pa, pb, pc, pd) > -0.001) //{ // d_priority[threadId] = MAXINT; //} //else { REAL U, V, W, u, v, w; // first three form a triangle; u opposite to U and so on REAL X, x, Y, y, Z, z; REAL a, b, c, d; U = sqrt(elen[3]); //ab V = sqrt(elen[4]); //bc W = sqrt(elen[5]); //ca u = sqrt(elen[2]); //dc v = sqrt(elen[0]); //da w = sqrt(elen[1]); //db X = (w - U + v)*(U + v + w); x = (U - v + w)*(v - w + U); Y = (u - V + w)*(V + w + u); y = (V - w + u)*(w - u + V); Z = (v - W + u)*(W + u + v); z = (W - u + v)*(u - v + W); a = sqrt(x*Y*Z); b = sqrt(y*Z*X); c = sqrt(z*X*Y); d = sqrt(x*y*z); REAL vol = sqrt((-a + b + c + d)*(a - b + c + d)*(a + b - c + d)*(a + b + c - d)) / (192 * u*v*w); d_priority[threadId] = __float_as_int((float)(1 / vol)); //d_priority[threadId] = __float_as_int((float)(1 / rd)); } //if (cuda_orient3d(pa, pb, pc, pd) < 0.001 && cuda_orient3d(pa, pb, pc, pd) > -0.001) //{ // if(pos < 100) // printf("%d ", d_priority[threadId]); //} //if (pos < 100) // printf("Tet #%d: (%lf, %lf, %lf), (%lf, %lf, %lf), (%lf, %lf, %lf), (%lf, %lf, %lf) | (%lf, %lf, %lf) | %lf\n", // tetid, // pa[0], pa[1], pa[2], // pb[0], pb[1], pb[2], // pc[0], pc[1], pc[2], // pd[0], pd[1], pd[2], // steinpt[0], steinpt[1], steinpt[2], // cuda_orient3d(pa, pb, pc, pd)); //if (pos < 100) // printf("%d ", d_priority[threadId]); } __global__ void kernelCompactSeg( int* d_seglist, int* d_sizes, int* d_indices, int* d_list, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; if (d_sizes[pos] == 0) return; int index = d_indices[pos]; d_list[2 * index + 0] = d_seglist[3 * pos + 0]; d_list[2 * index + 1] = d_seglist[3 * pos + 1]; } __global__ void kernelCompactTriface( int* d_trifacelist, int* d_sizes, int* d_indices, int* d_list, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; if (d_sizes[pos] == 0) return; int index = d_indices[pos]; d_list[3 * index + 0] = d_trifacelist[3 * pos + 0]; d_list[3 * index + 1] = d_trifacelist[3 * pos + 1]; d_list[3 * index + 2] = d_trifacelist[3 * pos + 2]; } __global__ void kernelCompactTet_Phase1( int* d_tetlist, tetstatus* d_tetstatus, int* d_sizes, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; if (d_tetstatus[pos].isEmpty()) d_sizes[pos] = 0; if (d_tetlist[4 * pos + 3] == -1) d_sizes[pos] = 0; } __global__ void kernelCompactTet_Phase2( int* d_tetlist, int* d_sizes, int* d_indices, int* d_list, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; if (d_sizes[pos] == 0) return; int index = d_indices[pos]; d_list[4 * index + 0] = d_tetlist[4 * pos + 0]; d_list[4 * index + 1] = d_tetlist[4 * pos + 1]; d_list[4 * index + 2] = d_tetlist[4 * pos + 2]; d_list[4 * index + 3] = d_tetlist[4 * pos + 3]; }
81c15d8ed0d5bd82e6cc84bf021ecd3055c7804c.cu
// This file is adapted from TetGen #include "CudaMesh.h" #include "CudaPredicates.h" #include <thrust/device_ptr.h> #include <stdio.h> #include <assert.h> #include <vector> /////////////////////////////////////////////////////////////////////////////// // // // Variables // // // /////////////////////////////////////////////////////////////////////////////// /* Kernel constants */ __constant__ REAL raw_kernelconstants[2]; REAL host_kernelconstants[2]; /* Helpers */ __device__ uint64 cudamesh_encodeUInt64Priority(int priority, int index) { return (((uint64)priority) << 32) + index; } __device__ int cudamesh_getUInt64PriorityIndex(uint64 priority) { return (priority & 0xFFFFFFFF); } __device__ int cudamesh_getUInt64Priority(uint64 priority) { return (priority >> 32); } __device__ bool cudamesh_isNearZero(double val) { if (val > -EPSILON && val < EPSILON) return true; else return false; } __device__ bool cudamesh_isInvalid(double val) { if (val > 10000000 || val < -10000000) return true; else return false; } /* Initialize fast lookup tables for mesh maniplulation primitives. */ __constant__ int raw_bondtbl[144]; __constant__ int raw_fsymtbl[144]; __constant__ int raw_enexttbl[12]; __constant__ int raw_eprevtbl[12]; __constant__ int raw_enextesymtbl[12]; __constant__ int raw_eprevesymtbl[12]; __constant__ int raw_eorgoppotbl[12]; __constant__ int raw_edestoppotbl[12]; __constant__ int raw_facepivot1[12]; __constant__ int raw_facepivot2[144]; __constant__ int raw_tsbondtbl[72]; __constant__ int raw_stbondtbl[72]; __constant__ int raw_tspivottbl[72]; __constant__ int raw_stpivottbl[72]; int host_bondtbl[144] = { 0, }; int host_fsymtbl[144] = { 0, }; int host_enexttbl[12] = { 0, }; int host_eprevtbl[12] = { 0, }; int host_enextesymtbl[12] = { 0, }; int host_eprevesymtbl[12] = { 0, }; int host_eorgoppotbl[12] = { 0, }; int host_edestoppotbl[12] = { 0, }; int host_facepivot1[12] = { 0, }; int host_facepivot2[144] = { 0, }; int host_tsbondtbl[72] = { 0, }; int host_stbondtbl[72] = { 0, }; int host_tspivottbl[72] = { 0, }; int host_stpivottbl[72] = { 0, }; // Table 'esymtbl' takes an directed edge (version) as input, returns the // inversed edge (version) of it. __constant__ int raw_esymtbl[12]; int host_esymtbl[12] = { 9, 6, 11, 4, 3, 7, 1, 5, 10, 0, 8, 2 }; // The following four tables give the 12 permutations of the set {0,1,2,3}. __constant__ int raw_orgpivot[12]; __constant__ int raw_destpivot[12]; __constant__ int raw_apexpivot[12]; __constant__ int raw_oppopivot[12]; int host_orgpivot[12] = { 3, 3, 1, 1, 2, 0, 0, 2, 1, 2, 3, 0 }; int host_destpivot[12] = { 2, 0, 0, 2, 1, 2, 3, 0, 3, 3, 1, 1 }; int host_apexpivot[12] = { 1, 2, 3, 0, 3, 3, 1, 1, 2, 0, 0, 2 }; int host_oppopivot[12] = { 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3 }; // The twelve versions correspond to six undirected edges. The following two // tables map a version to an undirected edge and vice versa. __constant__ int raw_ver2edge[12]; __constant__ int raw_edge2ver[6]; int host_ver2edge[12] = { 0, 1, 2, 3, 3, 5, 1, 5, 4, 0, 4, 2 }; int host_edge2ver[6] = { 0, 1, 2, 3, 8, 5 }; // Edge versions whose apex or opposite may be dummypoint. __constant__ int raw_epivot[12]; int host_epivot[12] = { 4, 5, 2, 11, 4, 5, 2, 11, 4, 5, 2, 11 }; // Table 'snextpivot' takes an edge version as input, returns the next edge // version in the same edge ring. __constant__ int raw_snextpivot[6]; int host_snextpivot[6] = { 2, 5, 4, 1, 0, 3 }; // The following three tables give the 6 permutations of the set {0,1,2}. // An offset 3 is added to each element for a direct access of the points // in the triangle data structure. __constant__ int raw_sorgpivot[6]; __constant__ int raw_sdestpivot[6]; __constant__ int raw_sapexpivot[6]; int host_sorgpivot[6] = { 0, 1, 1, 2, 2, 0 }; int host_sdestpivot[6] = { 1, 0, 2, 1, 0, 2 }; int host_sapexpivot[6] = { 2, 2, 0, 0, 1, 1 }; /* Initialize Geometric Predicates arrays*/ REAL host_constData[17]; int host_constOptions[2]; /////////////////////////////////////////////////////////////////////////////// // // // Geometric helpers // // // /////////////////////////////////////////////////////////////////////////////// __device__ bool cudamesh_lu_decmp(REAL lu[4][4], int n, int* ps, REAL* d, int N) { REAL scales[4]; REAL pivot, biggest, mult, tempf; int pivotindex = 0; int i, j, k; *d = 1.0; // No row interchanges yet. for (i = N; i < n + N; i++) { // For each row. // Find the largest element in each row for row equilibration biggest = 0.0; for (j = N; j < n + N; j++) if (biggest < (tempf = fabs(lu[i][j]))) biggest = tempf; if (biggest != 0.0) scales[i] = 1.0 / biggest; else { scales[i] = 0.0; return false; // Zero row: singular matrix. } ps[i] = i; // Initialize pivot sequence. } for (k = N; k < n + N - 1; k++) { // For each column. // Find the largest element in each column to pivot around. biggest = 0.0; for (i = k; i < n + N; i++) { if (biggest < (tempf = fabs(lu[ps[i]][k]) * scales[ps[i]])) { biggest = tempf; pivotindex = i; } } if (biggest == 0.0) { return false; // Zero column: singular matrix. } if (pivotindex != k) { // Update pivot sequence. j = ps[k]; ps[k] = ps[pivotindex]; ps[pivotindex] = j; *d = -(*d); // ...and change the parity of d. } // Pivot, eliminating an extra variable each time pivot = lu[ps[k]][k]; for (i = k + 1; i < n + N; i++) { lu[ps[i]][k] = mult = lu[ps[i]][k] / pivot; if (mult != 0.0) { for (j = k + 1; j < n + N; j++) lu[ps[i]][j] -= mult * lu[ps[k]][j]; } } } // (lu[ps[n + N - 1]][n + N - 1] == 0.0) ==> A is singular. return lu[ps[n + N - 1]][n + N - 1] != 0.0; } __device__ void cudamesh_lu_solve(REAL lu[4][4], int n, int* ps, REAL* b, int N) { int i, j; REAL X[4], dot; for (i = N; i < n + N; i++) X[i] = 0.0; // Vector reduction using U triangular matrix. for (i = N; i < n + N; i++) { dot = 0.0; for (j = N; j < i + N; j++) dot += lu[ps[i]][j] * X[j]; X[i] = b[ps[i]] - dot; } // Back substitution, in L triangular matrix. for (i = n + N - 1; i >= N; i--) { dot = 0.0; for (j = i + 1; j < n + N; j++) dot += lu[ps[i]][j] * X[j]; X[i] = (X[i] - dot) / lu[ps[i]][i]; } for (i = N; i < n + N; i++) b[i] = X[i]; } __device__ bool cudamesh_circumsphere(REAL* pa, REAL* pb, REAL* pc, REAL* pd, REAL* cent, REAL* radius) { REAL A[4][4], rhs[4], D; int indx[4]; // Compute the coefficient matrix A (3x3). A[0][0] = pb[0] - pa[0]; A[0][1] = pb[1] - pa[1]; A[0][2] = pb[2] - pa[2]; A[1][0] = pc[0] - pa[0]; A[1][1] = pc[1] - pa[1]; A[1][2] = pc[2] - pa[2]; if (pd != NULL) { A[2][0] = pd[0] - pa[0]; A[2][1] = pd[1] - pa[1]; A[2][2] = pd[2] - pa[2]; } else { cudamesh_cross(A[0], A[1], A[2]); } // Compute the right hand side vector b (3x1). rhs[0] = 0.5 * cudamesh_dot(A[0], A[0]); rhs[1] = 0.5 * cudamesh_dot(A[1], A[1]); if (pd != NULL) { rhs[2] = 0.5 * cudamesh_dot(A[2], A[2]); } else { rhs[2] = 0.0; } // Solve the 3 by 3 equations use LU decomposition with partial pivoting // and backward and forward substitute.. if (!cudamesh_lu_decmp(A, 3, indx, &D, 0)) { if (radius != (REAL *)NULL) *radius = 0.0; return false; } cudamesh_lu_solve(A, 3, indx, rhs, 0); if (cent != (REAL *)NULL) { cent[0] = pa[0] + rhs[0]; cent[1] = pa[1] + rhs[1]; cent[2] = pa[2] + rhs[2]; } if (radius != (REAL *)NULL) { *radius = sqrt(rhs[0] * rhs[0] + rhs[1] * rhs[1] + rhs[2] * rhs[2]); } return true; } __device__ void cudamesh_facenormal(REAL* pa, REAL* pb, REAL* pc, REAL *n, int pivot, REAL* lav) { REAL v1[3], v2[3], v3[3], *pv1, *pv2; REAL L1, L2, L3; v1[0] = pb[0] - pa[0]; // edge vector v1: a->b v1[1] = pb[1] - pa[1]; v1[2] = pb[2] - pa[2]; v2[0] = pa[0] - pc[0]; // edge vector v2: c->a v2[1] = pa[1] - pc[1]; v2[2] = pa[2] - pc[2]; // Default, normal is calculated by: v1 x (-v2) (see Fig. fnormal). if (pivot > 0) { // Choose edge vectors by Burdakov's algorithm. v3[0] = pc[0] - pb[0]; // edge vector v3: b->c v3[1] = pc[1] - pb[1]; v3[2] = pc[2] - pb[2]; L1 = cudamesh_dot(v1, v1); L2 = cudamesh_dot(v2, v2); L3 = cudamesh_dot(v3, v3); // Sort the three edge lengths. if (L1 < L2) { if (L2 < L3) { pv1 = v1; pv2 = v2; // n = v1 x (-v2). } else { pv1 = v3; pv2 = v1; // n = v3 x (-v1). } } else { if (L1 < L3) { pv1 = v1; pv2 = v2; // n = v1 x (-v2). } else { pv1 = v2; pv2 = v3; // n = v2 x (-v3). } } if (lav) { // return the average edge length. *lav = (sqrt(L1) + sqrt(L2) + sqrt(L3)) / 3.0; } } else { pv1 = v1; pv2 = v2; // n = v1 x (-v2). } // Calculate the face normal. cudamesh_cross(pv1, pv2, n); // Inverse the direction; n[0] = -n[0]; n[1] = -n[1]; n[2] = -n[2]; } __device__ void cudamesh_calculateabovepoint4(REAL* pa, REAL* pb, REAL* pc, REAL* pd, REAL* abovept) { REAL n1[3], n2[3], *norm; REAL len, len1, len2; // Select a base. cudamesh_facenormal(pa, pb, pc, n1, 1, NULL); len1 = sqrt(cudamesh_dot(n1, n1)); cudamesh_facenormal(pa, pb, pd, n2, 1, NULL); len2 = sqrt(cudamesh_dot(n2, n2)); if (len1 > len2) { norm = n1; len = len1; } else { norm = n2; len = len2; } assert(len > 0); norm[0] /= len; norm[1] /= len; norm[2] /= len; len = cudamesh_distance(pa, pb); abovept[0] = pa[0] + len * norm[0]; abovept[1] = pa[1] + len * norm[1]; abovept[2] = pa[2] + len * norm[2]; } __device__ int cudamesh_segsegadjacent( int seg1, int seg2, int* d_seg2parentidxlist, int* d_segparentendpointidxlist ) { int segidx1 = d_seg2parentidxlist[seg1]; int segidx2 = d_seg2parentidxlist[seg2]; if (segidx1 == segidx2) return 0; int pa1 = d_segparentendpointidxlist[segidx1 * 2]; int pb1 = d_segparentendpointidxlist[segidx1 * 2 + 1]; int pa2 = d_segparentendpointidxlist[segidx2 * 2]; int pb2 = d_segparentendpointidxlist[segidx2 * 2 + 1]; if ((pa1 == pa2) || (pa1 == pb2) || (pb1 == pa2) || (pb1 == pb2)) return 1; return 0; } __device__ int cudamesh_segfacetadjacent( int subseg, int subsh, int* d_seg2parentidxlist, int* d_segparentendpointidxlist, int* d_tri2parentidxlist, int* d_triid2parentoffsetlist, int* d_triparentendpointidxlist ) { int segidx = d_seg2parentidxlist[subseg]; int pa = d_segparentendpointidxlist[segidx * 2]; int pb = d_segparentendpointidxlist[segidx * 2 + 1]; int fidx = d_tri2parentidxlist[subsh]; int count = 0, i; int p; for (i = d_triid2parentoffsetlist[fidx]; i < d_triid2parentoffsetlist[fidx + 1]; i++) { p = d_triparentendpointidxlist[i]; if (p == pa || p == pb) count++; } return count == 1; } __device__ int cudamesh_facetfacetadjacent( int subsh1, int subsh2, int* d_tri2parentidxlist, int* d_triid2parentoffsetlist, int* d_triparentendpointidxlist ) { int count = 0; int fidx1 = d_tri2parentidxlist[subsh1]; int fidx2 = d_tri2parentidxlist[subsh2]; if (fidx1 == fidx2) return 0; int p1, p2; for (int i = d_triid2parentoffsetlist[fidx1]; i < d_triid2parentoffsetlist[fidx1 + 1]; i++) { p1 = d_triparentendpointidxlist[i]; for (int j = d_triid2parentoffsetlist[fidx2]; j < d_triid2parentoffsetlist[fidx2 + 1]; j++) { p2 = d_triparentendpointidxlist[j]; if (p1 == p2) { count++; break; } } } return count > 0; } __device__ REAL cudamesh_tetrahedronvolume( int tetid, REAL* d_pointlist, int* d_tetlist ) { REAL vda[3], vdb[3], vdc[3]; REAL vab[3], vbc[3], vca[3]; REAL elen[6]; int ipa, ipb, ipc, ipd; REAL *pa, *pb, *pc, *pd; int i; ipa = d_tetlist[4 * tetid + 0]; ipb = d_tetlist[4 * tetid + 1]; ipc = d_tetlist[4 * tetid + 2]; ipd = d_tetlist[4 * tetid + 3]; pa = cudamesh_id2pointlist(ipa, d_pointlist); pb = cudamesh_id2pointlist(ipb, d_pointlist); pc = cudamesh_id2pointlist(ipc, d_pointlist); pd = cudamesh_id2pointlist(ipd, d_pointlist); // Get the edge vectors vda: d->a, vdb: d->b, vdc: d->c. for (i = 0; i < 3; i++) vda[i] = pa[i] - pd[i]; for (i = 0; i < 3; i++) vdb[i] = pb[i] - pd[i]; for (i = 0; i < 3; i++) vdc[i] = pc[i] - pd[i]; // Get the other edge vectors. for (i = 0; i < 3; i++) vab[i] = pb[i] - pa[i]; for (i = 0; i < 3; i++) vbc[i] = pc[i] - pb[i]; for (i = 0; i < 3; i++) vca[i] = pa[i] - pc[i]; elen[0] = cudamesh_dot(vda, vda); elen[1] = cudamesh_dot(vdb, vdb); elen[2] = cudamesh_dot(vdc, vdc); elen[3] = cudamesh_dot(vab, vab); elen[4] = cudamesh_dot(vbc, vbc); elen[5] = cudamesh_dot(vca, vca); // Use heron-type formula to compute the volume of a tetrahedron // https://en.wikipedia.org/wiki/Heron%27s_formula REAL U, V, W, u, v, w; // first three form a triangle; u opposite to U and so on REAL X, x, Y, y, Z, z; REAL a, b, c, d; U = sqrt(elen[3]); //ab V = sqrt(elen[4]); //bc W = sqrt(elen[5]); //ca u = sqrt(elen[2]); //dc v = sqrt(elen[0]); //da w = sqrt(elen[1]); //db X = (w - U + v)*(U + v + w); x = (U - v + w)*(v - w + U); Y = (u - V + w)*(V + w + u); y = (V - w + u)*(w - u + V); Z = (v - W + u)*(W + u + v); z = (W - u + v)*(u - v + W); a = sqrt(x*Y*Z); b = sqrt(y*Z*X); c = sqrt(z*X*Y); d = sqrt(x*y*z); REAL vol = sqrt((-a + b + c + d)*(a - b + c + d)*(a + b - c + d)*(a + b + c - d)) / (192 * u*v*w); return vol; } /////////////////////////////////////////////////////////////////////////////// // // // Geometric predicates with symbolic perturbation // // // /////////////////////////////////////////////////////////////////////////////// __device__ REAL cudamesh_insphere_s(REAL* pa, REAL* pb, REAL* pc, REAL* pd, REAL* pe, int ia, int ib, int ic, int id, int ie) { REAL sign; // Using fast version means using inexact method. // This may cause robustness issues. // Need to handle later on. sign = cuda_inspherefast(pa, pb, pc, pd, pe); //if (fabs(sign) < EPSILON) // sign = cuda_insphereexact(pa, pb, pc, pd, pe); if (sign != 0.0) { return sign; } // Symbolic perturbation. REAL* pt[5], *swappt; int idx[5], swapidx; REAL oriA, oriB; int swaps, count; int n, i; pt[0] = pa; pt[1] = pb; pt[2] = pc; pt[3] = pd; pt[4] = pe; idx[0] = ia; idx[1] = ib; idx[2] = ic; idx[3] = id; idx[4] = ie; // Sort the five points such that their indices are in the increasing // order. An optimized bubble sort algorithm is used, i.e., it has // the worst case O(n^2) runtime, but it is usually much faster. swaps = 0; // Record the total number of swaps. n = 5; do { count = 0; n = n - 1; for (i = 0; i < n; i++) { if (idx[i] > idx[i + 1]) { swappt = pt[i]; pt[i] = pt[i + 1]; pt[i + 1] = swappt; swapidx = idx[i]; idx[i] = idx[i + 1]; idx[i + 1] = swapidx; count++; } } swaps += count; break; } while (count > 0); // Continue if some points are swapped. oriA = cuda_orient3d(pt[1], pt[2], pt[3], pt[4]); if (oriA != 0.0) { // Flip the sign if there are odd number of swaps. if ((swaps % 2) != 0) oriA = -oriA; return oriA; } oriB = -cuda_orient3d(pt[0], pt[2], pt[3], pt[4]); assert(oriB != 0.0); // SELF_CHECK // Flip the sign if there are odd number of swaps. if ((swaps % 2) != 0) oriB = -oriB; return oriB; } __device__ REAL cudamesh_incircle3d(REAL* pa, REAL* pb, REAL* pc, REAL* pd) { REAL area2[2], n1[3], n2[3], c[3]; REAL sign, r, d; // Calculate the areas of the two triangles [a, b, c] and [b, a, d]. cudamesh_facenormal(pa, pb, pc, n1, 1, NULL); area2[0] = cudamesh_dot(n1, n1); cudamesh_facenormal(pb, pa, pd, n2, 1, NULL); area2[1] = cudamesh_dot(n2, n2); if (area2[0] > area2[1]) { // Choose [a, b, c] as the base triangle. cudamesh_circumsphere(pa, pb, pc, NULL, c, &r); d = cudamesh_distance(c, pd); } else { // Choose [b, a, d] as the base triangle. if (area2[1] > 0) { cudamesh_circumsphere(pb, pa, pd, NULL, c, &r); d = cudamesh_distance(c, pc); } else { // The four points are collinear. This case only happens on the boundary. return 0; // Return "not inside". } } sign = d - r; if (fabs(sign) / r < EPSILON) { sign = 0; } return sign; } /////////////////////////////////////////////////////////////////////////////// // // // Mesh manipulation primitives // // // /////////////////////////////////////////////////////////////////////////////// /* Initialize tables */ void cudamesh_inittables() { // init arrays int i, j; cudaMemcpyToSymbol(raw_esymtbl, host_esymtbl, 12 * sizeof(int)); cudaMemcpyToSymbol(raw_orgpivot, host_orgpivot, 12 * sizeof(int)); cudaMemcpyToSymbol(raw_destpivot, host_destpivot, 12 * sizeof(int)); cudaMemcpyToSymbol(raw_apexpivot, host_apexpivot, 12 * sizeof(int)); cudaMemcpyToSymbol(raw_oppopivot, host_oppopivot, 12 * sizeof(int)); cudaMemcpyToSymbol(raw_ver2edge, host_ver2edge, 12 * sizeof(int)); cudaMemcpyToSymbol(raw_edge2ver, host_edge2ver, 6 * sizeof(int)); cudaMemcpyToSymbol(raw_epivot, host_epivot, 12 * sizeof(int)); cudaMemcpyToSymbol(raw_snextpivot, host_snextpivot, 6 * sizeof(int)); cudaMemcpyToSymbol(raw_sorgpivot, host_sorgpivot, 6 * sizeof(int)); cudaMemcpyToSymbol(raw_sdestpivot, host_sdestpivot, 6 * sizeof(int)); cudaMemcpyToSymbol(raw_sapexpivot, host_sapexpivot, 6 * sizeof(int)); // i = t1.ver; j = t2.ver; for (i = 0; i < 12; i++) { for (j = 0; j < 12; j++) { host_bondtbl[12* i + j] = (j & 3) + (((i & 12) + (j & 12)) % 12); } } cudaMemcpyToSymbol(raw_bondtbl, host_bondtbl, 144 * sizeof(int)); // i = t1.ver; j = t2.ver for (i = 0; i < 12; i++) { for (j = 0; j < 12; j++) { host_fsymtbl[12 * i + j] = (j + 12 - (i & 12)) % 12; } } cudaMemcpyToSymbol(raw_fsymtbl, host_fsymtbl, 144 * sizeof(int)); for (i = 0; i < 12; i++) { host_facepivot1[i] = (host_esymtbl[i] & 3); } cudaMemcpyToSymbol(raw_facepivot1, host_facepivot1, 12 * sizeof(int)); for (i = 0; i < 12; i++) { for (j = 0; j < 12; j++) { host_facepivot2[12 * i + j] = host_fsymtbl[12 * host_esymtbl[i] + j]; } } cudaMemcpyToSymbol(raw_facepivot2, host_facepivot2, 144 * sizeof(int)); for (i = 0; i < 12; i++) { host_enexttbl[i] = (i + 4) % 12; host_eprevtbl[i] = (i + 8) % 12; } cudaMemcpyToSymbol(raw_enexttbl, host_enexttbl, 12 * sizeof(int)); cudaMemcpyToSymbol(raw_eprevtbl, host_eprevtbl, 12 * sizeof(int)); for (i = 0; i < 12; i++) { host_enextesymtbl[i] = host_esymtbl[host_enexttbl[i]]; host_eprevesymtbl[i] = host_esymtbl[host_eprevtbl[i]]; } cudaMemcpyToSymbol(raw_enextesymtbl, host_enextesymtbl, 12 * sizeof(int)); cudaMemcpyToSymbol(raw_eprevesymtbl, host_eprevesymtbl, 12 * sizeof(int)); for (i = 0; i < 12; i++) { host_eorgoppotbl[i] = host_eprevtbl[host_esymtbl[host_enexttbl[i]]]; host_edestoppotbl[i] = host_enexttbl[host_esymtbl[host_eprevtbl[i]]]; } cudaMemcpyToSymbol(raw_eorgoppotbl, host_eorgoppotbl, 12 * sizeof(int)); cudaMemcpyToSymbol(raw_edestoppotbl, host_edestoppotbl, 12 * sizeof(int)); int soffset, toffset; // i = t.ver, j = s.shver for (i = 0; i < 12; i++) { for (j = 0; j < 6; j++) { if ((j & 1) == 0) { soffset = (6 - ((i & 12) >> 1)) % 6; toffset = (12 - ((j & 6) << 1)) % 12; } else { soffset = (i & 12) >> 1; toffset = (j & 6) << 1; } host_tsbondtbl[6 * i + j] = (j & 1) + (((j & 6) + soffset) % 6); host_stbondtbl[6 * i + j] = (i & 3) + (((i & 12) + toffset) % 12); } } cudaMemcpyToSymbol(raw_tsbondtbl, host_tsbondtbl, 72 * sizeof(int)); cudaMemcpyToSymbol(raw_stbondtbl, host_stbondtbl, 72 * sizeof(int)); // i = t.ver, j = s.shver for (i = 0; i < 12; i++) { for (j = 0; j < 6; j++) { if ((j & 1) == 0) { soffset = (i & 12) >> 1; toffset = (j & 6) << 1; } else { soffset = (6 - ((i & 12) >> 1)) % 6; toffset = (12 - ((j & 6) << 1)) % 12; } host_tspivottbl[6 * i + j] = (j & 1) + (((j & 6) + soffset) % 6); host_stpivottbl[6 * i + j] = (i & 3) + (((i & 12) + toffset) % 12); } } cudaMemcpyToSymbol(raw_tspivottbl, host_tspivottbl, 72 * sizeof(int)); cudaMemcpyToSymbol(raw_stpivottbl, host_stpivottbl, 72 * sizeof(int)); } /* Init bounding box*/ void cudamesh_initbbox( int numofpoints, double* pointlist, int& xmax, int& xmin, int& ymax, int& ymin, int& zmax, int& zmin) { int i; double x, y, z; for (i = 0; i < numofpoints; i++) { x = pointlist[3 * i]; y = pointlist[3 * i + 1]; z = pointlist[3 * i + 2]; if (i == 0) { xmin = xmax = x; ymin = ymax = y; zmin = zmax = z; } else { xmin = (x < xmin) ? x : xmin; xmax = (x > xmax) ? x : xmax; ymin = (y < ymin) ? y : ymin; ymax = (y > ymax) ? y : ymax; zmin = (z < zmin) ? z : zmin; zmax = (z > zmax) ? z : zmax; } } } /* Initialize Geometric primitives */ void cudamesh_exactinit(int verbose, int noexact, int nofilter, REAL maxx, REAL maxy, REAL maxz) { REAL half; REAL check, lastcheck; int every_other; every_other = 1; half = 0.5; host_constData[1] /*epsilon*/ = 1.0; host_constData[0] /*splitter*/ = 1.0; check = 1.0; /* Repeatedly divide `epsilon' by two until it is too small to add to */ /* one without causing roundoff. (Also check if the sum is equal to */ /* the previous sum, for machines that round up instead of using exact */ /* rounding. Not that this library will work on such machines anyway. */ do { lastcheck = check; host_constData[1] /*epsilon*/ *= half; if (every_other) { host_constData[0] /*splitter*/ *= 2.0; } every_other = !every_other; check = 1.0 + host_constData[1] /*epsilon*/; } while ((check != 1.0) && (check != lastcheck)); host_constData[0] /*splitter*/ += 1.0; /* Error bounds for orientation and incircle tests. */ host_constData[2] /*resulterrbound*/ = (3.0 + 8.0 * host_constData[1] /*epsilon*/) * host_constData[1] /*epsilon*/; host_constData[3] /*ccwerrboundA*/ = (3.0 + 16.0 * host_constData[1] /*epsilon*/) * host_constData[1] /*epsilon*/; host_constData[4] /*ccwerrboundB*/ = (2.0 + 12.0 * host_constData[1] /*epsilon*/) * host_constData[1] /*epsilon*/; host_constData[5] /*ccwerrboundC*/ = (9.0 + 64.0 * host_constData[1] /*epsilon*/) * host_constData[1] /*epsilon*/ * host_constData[1] /*epsilon*/; host_constData[6] /*o3derrboundA*/ = (7.0 + 56.0 * host_constData[1] /*epsilon*/) * host_constData[1] /*epsilon*/; host_constData[7] /*o3derrboundB*/ = (3.0 + 28.0 * host_constData[1] /*epsilon*/) * host_constData[1] /*epsilon*/; host_constData[8] /*o3derrboundC*/ = (26.0 + 288.0 * host_constData[1] /*epsilon*/) * host_constData[1] /*epsilon*/ * host_constData[1] /*epsilon*/; host_constData[9] /*iccerrboundA*/ = (10.0 + 96.0 * host_constData[1] /*epsilon*/) * host_constData[1] /*epsilon*/; host_constData[10] /*iccerrboundB*/ = (4.0 + 48.0 * host_constData[1] /*epsilon*/) * host_constData[1] /*epsilon*/; host_constData[11] /*iccerrboundC*/ = (44.0 + 576.0 * host_constData[1] /*epsilon*/) * host_constData[1] /*epsilon*/ * host_constData[1] /*epsilon*/; host_constData[12] /*isperrboundA*/ = (16.0 + 224.0 * host_constData[1] /*epsilon*/) * host_constData[1] /*epsilon*/; host_constData[13] /*isperrboundB*/ = (5.0 + 72.0 * host_constData[1] /*epsilon*/) * host_constData[1] /*epsilon*/; host_constData[14] /*isperrboundC*/ = (71.0 + 1408.0 * host_constData[1] /*epsilon*/) * host_constData[1] /*epsilon*/ * host_constData[1] /*epsilon*/; // Set TetGen options. Added by H. Si, 2012-08-23. host_constOptions[0] /*_use_inexact_arith*/ = noexact; host_constOptions[1] /*_use_static_filter*/ = !nofilter; // Calculate the two static filters for orient3d() and insphere() tests. // Added by H. Si, 2012-08-23. // Sort maxx < maxy < maxz. Re-use 'half' for swapping. assert(maxx > 0); assert(maxy > 0); assert(maxz > 0); if (maxx > maxz) { half = maxx; maxx = maxz; maxz = half; } if (maxy > maxz) { half = maxy; maxy = maxz; maxz = half; } else if (maxy < maxx) { half = maxy; maxy = maxx; maxx = half; } host_constData[15] /*o3dstaticfilter*/ = 5.1107127829973299e-15 * maxx * maxy * maxz; host_constData[16] /*ispstaticfilter*/ = 1.2466136531027298e-13 * maxx * maxy * maxz * (maxz * maxz); // Copy to const memory cudaMemcpyToSymbol(raw_constData, host_constData, 17 * sizeof(REAL)); cudaMemcpyToSymbol(raw_constOptions, host_constOptions, 2 * sizeof(int)); //for (int i = 0; i<17; i++) // printf("host_constData[%d] = %g\n", i, host_constData[i]); //for (int i = 0; i < 2; i++) // printf("host_constOptions[%d] = %d\n", i, host_constOptions[i]); } /* Init Kernel constants */ void cudamesh_initkernelconstants(REAL maxx, REAL maxy, REAL maxz) { REAL longest = sqrt(maxx*maxx + maxy*maxy + maxz*maxz); REAL minedgelength = longest*EPSILON; host_kernelconstants[0] = minedgelength; cudaMemcpyToSymbol(raw_kernelconstants, host_kernelconstants, sizeof(REAL)); } /* Primitives for points */ // Convert point index to pointer to pointlist __device__ double* cudamesh_id2pointlist(int index, double* pointlist) { return (pointlist + 3 * index); } /* Primitives for tetrahedron */ // The following primtives get or set the origin, destination, face apex, // or face opposite of an ordered tetrahedron. __device__ int cudamesh_org(tethandle t, int* tetlist) { return tetlist[4 * t.id + raw_orgpivot[t.ver]]; } __device__ int cudamesh_dest(tethandle t, int* tetlist) { return tetlist[4 * t.id + raw_destpivot[t.ver]]; } __device__ int cudamesh_apex(tethandle t, int* tetlist) { return tetlist[4 * t.id + raw_apexpivot[t.ver]]; } __device__ int cudamesh_oppo(tethandle t, int* tetlist) { return tetlist[4 * t.id + raw_oppopivot[t.ver]]; } __device__ void cudamesh_setorg(tethandle t, int p, int* tetlist) { tetlist[4 * t.id + raw_orgpivot[t.ver]] = p; } __device__ void cudamesh_setdest(tethandle t, int p, int* tetlist) { tetlist[4 * t.id + raw_destpivot[t.ver]] = p; } __device__ void cudamesh_setapex(tethandle t, int p, int* tetlist) { tetlist[4 * t.id + raw_apexpivot[t.ver]] = p; } __device__ void cudamesh_setoppo(tethandle t, int p, int* tetlist) { tetlist[4 * t.id + raw_oppopivot[t.ver]] = p; } // bond() connects two tetrahedra together. (t1,v1) and (t2,v2) must // refer to the same face and the same edge. __device__ void cudamesh_bond(tethandle t1, tethandle t2, tethandle* neighborlist) { neighborlist[4 * t1.id + (t1.ver & 3)] = tethandle(t2.id, raw_bondtbl[12 * t1.ver + t2.ver]); neighborlist[4 * t2.id + (t2.ver & 3)] = tethandle(t1.id, raw_bondtbl[12 * t2.ver + t1.ver]); } // dissolve() a bond (from one side). __device__ void cudamesh_dissolve(tethandle t, tethandle* neighborlist) { neighborlist[4 * t.id + (t.ver & 3)] = tethandle(-1, 11); // empty handle } // esym() finds the reversed edge. It is in the other face of the // same tetrahedron. __device__ void cudamesh_esym(tethandle& t1, tethandle& t2) { (t2).id = (t1).id; (t2).ver = raw_esymtbl[(t1).ver]; } __device__ void cudamesh_esymself(tethandle& t) { (t).ver = raw_esymtbl[(t).ver]; } // enext() finds the next edge (counterclockwise) in the same face. __device__ void cudamesh_enext(tethandle& t1, tethandle& t2) { t2.id = t1.id; t2.ver = raw_enexttbl[t1.ver]; } __device__ void cudamesh_enextself(tethandle& t) { t.ver = raw_enexttbl[t.ver]; } // eprev() finds the next edge (clockwise) in the same face. __device__ void cudamesh_eprev(tethandle& t1, tethandle& t2) { t2.id = t1.id; t2.ver = raw_eprevtbl[t1.ver]; } __device__ void cudamesh_eprevself(tethandle& t) { t.ver = raw_eprevtbl[t.ver]; } // enextesym() finds the reversed edge of the next edge. It is in the other // face of the same tetrahedron. It is the combination esym() * enext(). __device__ void cudamesh_enextesym(tethandle& t1, tethandle& t2) { t2.id = t1.id; t2.ver = raw_enextesymtbl[t1.ver]; } __device__ void cudamesh_enextesymself(tethandle& t) { t.ver = raw_enextesymtbl[t.ver]; } // eprevesym() finds the reversed edge of the previous edge. __device__ void cudamesh_eprevesym(tethandle& t1, tethandle& t2) { t2.id = t1.id; t2.ver = raw_eprevesymtbl[t1.ver]; } __device__ void cudamesh_eprevesymself(tethandle& t) { t.ver = raw_eprevesymtbl[t.ver]; } // eorgoppo() Finds the opposite face of the origin of the current edge. // Return the opposite edge of the current edge. __device__ void cudamesh_eorgoppo(tethandle& t1, tethandle& t2) { t2.id = t1.id; t2.ver = raw_eorgoppotbl[t1.ver]; } __device__ void cudamesh_eorgoppoself(tethandle& t) { t.ver = raw_eorgoppotbl[t.ver]; } // edestoppo() Finds the opposite face of the destination of the current // edge. Return the opposite edge of the current edge. __device__ void cudamesh_edestoppo(tethandle& t1, tethandle& t2) { t2.id = t1.id; t2.ver = raw_edestoppotbl[t1.ver]; } __device__ void cudamesh_edestoppoself(tethandle& t) { t.ver = raw_edestoppotbl[t.ver]; } // fsym() finds the adjacent tetrahedron at the same face and the same edge. __device__ void cudamesh_fsym(tethandle& t1, tethandle& t2, tethandle* neighborlist) { t2 = neighborlist[4 * t1.id + (t1.ver & 3)]; t2.ver = raw_fsymtbl[12 * t1.ver + t2.ver]; } __device__ void cudamesh_fsymself(tethandle& t, tethandle* neighborlist) { char t1ver = t.ver; t = neighborlist[4 * t.id + (t.ver & 3)]; t.ver = raw_fsymtbl[12 * t1ver + t.ver]; } // fnext() finds the next face while rotating about an edge according to // a right-hand rule. The face is in the adjacent tetrahedron. It is // the combination: fsym() * esym(). __device__ void cudamesh_fnext(tethandle& t1, tethandle& t2, tethandle* neighborlist) { t2 = neighborlist[4 * t1.id + raw_facepivot1[t1.ver]]; t2.ver = raw_facepivot2[12 * t1.ver + t2.ver]; } __device__ void cudamesh_fnextself(tethandle& t, tethandle* neighborlist) { char t1ver = t.ver; t = neighborlist[4 * t.id + raw_facepivot1[t.ver]]; t.ver = raw_facepivot2[12 * t1ver + t.ver]; } // ishulltet() tests if t is a hull tetrahedron. __device__ bool cudamesh_ishulltet(tethandle t, int* tetlist) { return tetlist[4 * t.id + 3] == -1; } // isdeadtet() tests if t is a tetrahedron is dead. __device__ bool cudamesh_isdeadtet(tethandle t) { return (t.id == -1); } /* Primitives for subfaces and subsegments. */ // spivot() finds the adjacent subface (s2) for a given subface (s1). // s1 and s2 share at the same edge. __device__ void cudamesh_spivot(trihandle& s1, trihandle& s2, trihandle* tri2trilist) { s2 = tri2trilist[3 * s1.id + (s1.shver >> 1)]; } __device__ void cudamesh_spivotself(trihandle& s, trihandle* tri2trilist) { s = tri2trilist[3 * s.id + (s.shver >> 1)]; } // sbond() bonds two subfaces (s1) and (s2) together. s1 and s2 must refer // to the same edge. No requirement is needed on their orientations. __device__ void cudamesh_sbond(trihandle& s1, trihandle& s2, trihandle* tri2trilist) { tri2trilist[3 * s1.id + (s1.shver >> 1)] = s2; tri2trilist[3 * s2.id + (s2.shver >> 1)] = s1; } // sbond1() bonds s1 <== s2, i.e., after bonding, s1 is pointing to s2, // but s2 is not pointing to s1. s1 and s2 must refer to the same edge. // No requirement is needed on their orientations. __device__ void cudamesh_sbond1(trihandle& s1, trihandle& s2, trihandle* tri2trilist) { tri2trilist[3 * s1.id + (s1.shver >> 1)] = s2; } // Dissolve a subface bond (from one side). Note that the other subface // will still think it's connected to this subface. __device__ void cudamesh_sdissolve(trihandle& s, trihandle* tri2trilist) { tri2trilist[3 * s.id + (s.shver >> 1)] = trihandle(-1, 0); } // These primitives determine or set the origin, destination, or apex // of a subface with respect to the edge version. __device__ int cudamesh_sorg(trihandle& s, int* trilist) { return trilist[3 * s.id + raw_sorgpivot[s.shver]]; } __device__ int cudamesh_sdest(trihandle& s, int* trilist) { return trilist[3 * s.id + raw_sdestpivot[s.shver]]; } __device__ int cudamesh_sapex(trihandle& s, int* trilist) { return trilist[3 * s.id + raw_sapexpivot[s.shver]]; } __device__ void cudamesh_setsorg(trihandle& s, int p, int* trilist) { trilist[3 * s.id + raw_sorgpivot[s.shver]] = p; } __device__ void cudamesh_setsdest(trihandle& s, int p, int* trilist) { trilist[3 * s.id + raw_sdestpivot[s.shver]] = p; } __device__ void cudamesh_setsapex(trihandle& s, int p, int* trilist) { trilist[3 * s.id + raw_sapexpivot[s.shver]] = p; } // sesym() reserves the direction of the lead edge. __device__ void cudamesh_sesym(trihandle& s1, trihandle& s2) { s2.id = s1.id; s2.shver = (s1.shver ^ 1); // Inverse the last bit. } __device__ void cudamesh_sesymself(trihandle& s) { s.shver ^= 1; } // senext() finds the next edge (counterclockwise) in the same orientation // of this face. __device__ void cudamesh_senext(trihandle& s1, trihandle& s2) { s2.id = s1.id; s2.shver = raw_snextpivot[s1.shver]; } __device__ void cudamesh_senextself(trihandle& s) { s.shver = raw_snextpivot[s.shver]; } __device__ void cudamesh_senext2(trihandle& s1, trihandle& s2) { s2.id = s1.id; s2.shver = raw_snextpivot[raw_snextpivot[s1.shver]]; } __device__ void cudamesh_senext2self(trihandle& s) { s.shver = raw_snextpivot[raw_snextpivot[s.shver]]; } /* Primitives for interacting tetrahedra and subfaces. */ // tsbond() bond a tetrahedron (t) and a subface (s) together. // Note that t and s must be the same face and the same edge. Moreover, // t and s have the same orientation. // Since the edge number in t and in s can be any number in {0,1,2}. We bond // the edge in s which corresponds to t's 0th edge, and vice versa. __device__ void cudamesh_tsbond(tethandle& t, trihandle& s, trihandle* tet2trilist, tethandle* tri2tetlist) { // Bond t <== s. tet2trilist[4 * t.id + (t.ver & 3)] = trihandle(s.id, raw_tsbondtbl[6 * t.ver + s.shver]); // Bond s <== t. tri2tetlist[2 * s.id + (s.shver & 1)] = tethandle(t.id, raw_stbondtbl[6 * t.ver + s.shver]); } // tspivot() finds a subface (s) abutting on the given tetrahdera (t). // Return s.id = -1 if there is no subface at t. Otherwise, return // the subface s, and s and t must be at the same edge wth the same // orientation. __device__ void cudamesh_tspivot(tethandle& t, trihandle& s, trihandle* tet2trilist) { // Get the attached subface s. s = tet2trilist[4 * t.id + (t.ver & 3)]; if (s.id == -1) return; (s).shver = raw_tspivottbl[6 * t.ver + s.shver]; } // stpivot() finds a tetrahedron (t) abutting a given subface (s). // Return the t (if it exists) with the same edge and the same // orientation of s. __device__ void cudamesh_stpivot(trihandle& s, tethandle& t, tethandle* tri2tetlist) { t = tri2tetlist[2 * s.id + (s.shver & 1)]; if (t.id == -1) { return; } (t).ver = raw_stpivottbl[6 * t.ver + s.shver]; } /* Primitives for interacting between tetrahedra and segments */ __device__ void cudamesh_tsspivot1(tethandle& t, trihandle& seg, trihandle* tet2seglist) { seg = tet2seglist[6 * t.id + raw_ver2edge[t.ver]]; } __device__ void cudamesh_tssbond1(tethandle& t, trihandle& seg, trihandle* tet2seglist) { tet2seglist[6 * t.id + raw_ver2edge[t.ver]] = seg; } __device__ void cudamesh_sstbond1(trihandle& s, tethandle& t, tethandle* seg2tetlist) { seg2tetlist[s.id + 0] = t; } __device__ void cudamesh_sstpivot1(trihandle& s, tethandle& t, tethandle* seg2tetlist) { t = seg2tetlist[s.id]; } /* Primitives for interacting between subfaces and segments */ __device__ void cudamesh_ssbond(trihandle& s, trihandle& edge, trihandle* tri2seglist, trihandle* seg2trilist) { tri2seglist[3 * s.id + (s.shver >> 1)] = edge; seg2trilist[3 * edge.id + 0] = s; } __device__ void cudamesh_ssbond1(trihandle& s, trihandle& edge, trihandle* tri2seglist) { tri2seglist[3 * s.id + (s.shver >> 1)] = edge; } __device__ void cudamesh_sspivot(trihandle& s, trihandle& edge, trihandle* tri2seglist) { edge = tri2seglist[3 * s.id + (s.shver >> 1)]; } __device__ bool cudamesh_isshsubseg(trihandle&s, trihandle* tri2seglist) { return (tri2seglist[3 * s.id + (s.shver >> 1)].id != -1); } /* Advanced primitives. */ __device__ void cudamesh_point2tetorg(int pa, tethandle& searchtet, tethandle* point2tetlist, int* tetlist) { searchtet = point2tetlist[pa]; if (tetlist[4 * searchtet.id + 0] == pa) { searchtet.ver = 11; } else if (tetlist[4 * searchtet.id + 1] == pa) { searchtet.ver = 3; } else if (tetlist[4 * searchtet.id + 2] == pa) { searchtet.ver = 7; } else { assert(tetlist[4 * searchtet.id + 3] == pa); // SELF_CHECK searchtet.ver = 0; } } /* Geometric calculations (non-robust) */ // dot() returns the dot product: v1 dot v2. __device__ REAL cudamesh_dot(REAL* v1, REAL* v2) { return v1[0] * v2[0] + v1[1] * v2[1] + v1[2] * v2[2]; } // distance() computes the Euclidean distance between two points. __device__ REAL cudamesh_distance(REAL* p1, REAL* p2) { //printf("%lf %lf %lf - %lf %lf %lf\n", // p1[0], p1[1], p1[2], p2[0], p2[1], p2[2]); return sqrt((p2[0] - p1[0]) * (p2[0] - p1[0]) + (p2[1] - p1[1]) * (p2[1] - p1[1]) + (p2[2] - p1[2]) * (p2[2] - p1[2])); } // cross() computes the cross product: n = v1 cross v2. __device__ void cudamesh_cross(REAL* v1, REAL* v2, REAL* n) { n[0] = v1[1] * v2[2] - v2[1] * v1[2]; n[1] = -(v1[0] * v2[2] - v2[0] * v1[2]); n[2] = v1[0] * v2[1] - v2[0] * v1[1]; } /* Helpers */ __device__ unsigned long cudamesh_randomnation(unsigned long * randomseed, unsigned int choices) { unsigned long newrandom; if (choices >= 714025l) { newrandom = (*randomseed * 1366l + 150889l) % 714025l; *randomseed = (newrandom * 1366l + 150889l) % 714025l; newrandom = newrandom * (choices / 714025l) + *randomseed; if (newrandom >= choices) { return newrandom - choices; } else { return newrandom; } } else { *randomseed = (*randomseed * 1366l + 150889l) % 714025l; return *randomseed % choices; } } /////////////////////////////////////////////////////////////////////////////// // // // finddirection() Find the tet on the path from one point to another. // // // // The path starts from 'searchtet''s origin and ends at 'endpt'. On finish, // // 'searchtet' contains a tet on the path, its origin does not change. // // // // The return value indicates one of the following cases (let 'searchtet' be // // abcd, a is the origin of the path): // // - ACROSSVERT, edge ab is collinear with the path; // // - ACROSSEDGE, edge bc intersects with the path; // // - ACROSSFACE, face bcd intersects with the path. // // // // WARNING: This routine is designed for convex triangulations, and will not // // generally work after the holes and concavities have been carved. // // // /////////////////////////////////////////////////////////////////////////////// __device__ enum interresult cudamesh_finddirection(tethandle* searchtet, int endpt, double* pointlist, int* tetlist, tethandle* neighborlist, unsigned long* randomseed) { tethandle neightet; int pa, pb, pc, pd; enum { HMOVE, RMOVE, LMOVE } nextmove; REAL hori, rori, lori; int t1ver; int s; // The origin is fixed. pa = cudamesh_org(*searchtet, tetlist); if (tetlist[4 * searchtet->id + 3] == -1) { // A hull tet. Choose the neighbor of its base face. *searchtet = neighborlist[4 * searchtet->id + 3]; // Reset the origin to be pa. if (tetlist[4 * searchtet->id + 0] == pa) { searchtet->ver = 11; } else if (tetlist[4 * searchtet->id + 1] == pa) { searchtet->ver = 3; } else if (tetlist[4 * searchtet->id + 2] == pa) { searchtet->ver = 7; } else { assert(tetlist[4 * searchtet->id + 3] == pa); searchtet->ver = 0; } } pb = cudamesh_dest(*searchtet, tetlist); // Check whether the destination or apex is 'endpt'. if (pb == endpt) { // pa->pb is the search edge. return ACROSSVERT; } pc = cudamesh_apex(*searchtet, tetlist); if (pc == endpt) { // pa->pc is the search edge. cudamesh_eprevesymself(*searchtet); return ACROSSVERT; } double *p[5]; // Walk through tets around pa until the right one is found. while (1) { pd = cudamesh_oppo(*searchtet, tetlist); // Check whether the opposite vertex is 'endpt'. if (pd == endpt) { // pa->pd is the search edge. cudamesh_esymself(*searchtet); cudamesh_enextself(*searchtet); return ACROSSVERT; } // Check if we have entered outside of the domain. if (pd == -1) { // This is possible when the mesh is non-convex. return ACROSSSUB; // Hit a boundary. } // Now assume that the base face abc coincides with the horizon plane, // and d lies above the horizon. The search point 'endpt' may lie // above or below the horizon. We test the orientations of 'endpt' // with respect to three planes: abc (horizon), bad (right plane), // and acd (left plane). p[0] = cudamesh_id2pointlist(pa, pointlist); p[1] = cudamesh_id2pointlist(pb, pointlist); p[2] = cudamesh_id2pointlist(pc, pointlist); p[3] = cudamesh_id2pointlist(pd, pointlist); p[4] = cudamesh_id2pointlist(endpt, pointlist); hori = cuda_orient3d(p[0], p[1], p[2], p[4]); rori = cuda_orient3d(p[1], p[0], p[3], p[4]); lori = cuda_orient3d(p[0], p[2], p[3], p[4]); // Now decide the tet to move. It is possible there are more than one // tets are viable moves. Is so, randomly choose one. if (hori > 0) { if (rori > 0) { if (lori > 0) { // Any of the three neighbors is a viable move. s = cudamesh_randomnation(randomseed, 3); if (s == 0) { nextmove = HMOVE; } else if (s == 1) { nextmove = RMOVE; } else { nextmove = LMOVE; } } else { // Two tets, below horizon and below right, are viable. //s = randomnation(2); if (cudamesh_randomnation(randomseed, 2)) { nextmove = HMOVE; } else { nextmove = RMOVE; } } } else { if (lori > 0) { // Two tets, below horizon and below left, are viable. //s = randomnation(2); if (cudamesh_randomnation(randomseed, 2)) { nextmove = HMOVE; } else { nextmove = LMOVE; } } else { // The tet below horizon is chosen. nextmove = HMOVE; } } } else { if (rori > 0) { if (lori > 0) { // Two tets, below right and below left, are viable. //s = randomnation(2); if (cudamesh_randomnation(randomseed, 2)) { nextmove = RMOVE; } else { nextmove = LMOVE; } } else { // The tet below right is chosen. nextmove = RMOVE; } } else { if (lori > 0) { // The tet below left is chosen. nextmove = LMOVE; } else { // 'endpt' lies either on the plane(s) or across face bcd. if (hori == 0) { if (rori == 0) { // pa->'endpt' is COLLINEAR with pa->pb. return ACROSSVERT; } if (lori == 0) { // pa->'endpt' is COLLINEAR with pa->pc. cudamesh_eprevesymself(*searchtet); // // [a,c,d] return ACROSSVERT; } // pa->'endpt' crosses the edge pb->pc. return ACROSSEDGE; } if (rori == 0) { if (lori == 0) { // pa->'endpt' is COLLINEAR with pa->pd. cudamesh_esymself(*searchtet); // face bad. cudamesh_enextself(*searchtet); // face [a,d,b] return ACROSSVERT; } // pa->'endpt' crosses the edge pb->pd. cudamesh_esymself(*searchtet); // face bad. cudamesh_enextself(*searchtet); // face adb return ACROSSEDGE; } if (lori == 0) { // pa->'endpt' crosses the edge pc->pd. cudamesh_eprevesymself(*searchtet); // [a,c,d] return ACROSSEDGE; } // pa->'endpt' crosses the face bcd. return ACROSSFACE; } } } // Move to the next tet, fix pa as its origin. if (nextmove == RMOVE) { cudamesh_fnextself(*searchtet, neighborlist); } else if (nextmove == LMOVE) { cudamesh_eprevself(*searchtet); cudamesh_fnextself(*searchtet, neighborlist); cudamesh_enextself(*searchtet); } else { // HMOVE cudamesh_fsymself(*searchtet, neighborlist); cudamesh_enextself(*searchtet); } assert(cudamesh_org(*searchtet, tetlist) == pa); pb = cudamesh_dest(*searchtet, tetlist); pc = cudamesh_apex(*searchtet, tetlist); } // while (1) } ///////////////////////////////////////////////////////////////////////////////// //// // //// getedge() Get a tetrahedron having the two endpoints. // //// // //// The method here is to search the second vertex in the link faces of the // //// first vertex. The global array 'cavetetlist' is re-used for searching. // //// // //// This function is used for the case when the mesh is non-convex. Otherwise,// //// the function finddirection() should be faster than this. // //// // ///////////////////////////////////////////////////////////////////////////////// // ////int getedge(int e1, int e2, tethandle *tedge, tethandle* point2tet, double* pointlist, int* tetlist, tethandle* neighborlist, int* markerlist) ////{ //// tethandle searchtet, neightet, parytet; //// int pt; //// int done; //// int i, j; //// //// // Quickly check if 'tedge' is just this edge. //// if (!isdeadtet(*tedge)) { //// if (org(*tedge, tetlist) == e1) { //// if (dest(*tedge, tetlist) == e2) { //// return 1; //// } //// } //// else if (org(*tedge, tetlist) == e2) { //// if (dest(*tedge, tetlist) == e1) { //// esymself(*tedge); //// return 1; //// } //// } //// } //// //// // Search for the edge [e1, e2]. //// point2tetorg(e1, *tedge, point2tet, tetlist); //// finddirection(tedge, e2, pointlist, tetlist, neighborlist); //// if (dest(*tedge, tetlist) == e2) //// { //// return 1; //// } //// else //// { //// // Search for the edge [e2, e1]. //// point2tetorg(e2, *tedge, point2tet, tetlist); //// finddirection(tedge, e1, pointlist, tetlist, neighborlist); //// if (dest(*tedge, tetlist) == e1) { //// esymself(*tedge); //// return 1; //// } //// } //// //// // Go to the link face of e1. //// point2tetorg(e1, searchtet, point2tet, tetlist); //// enextesymself(searchtet); //// //// std::vector<tethandle> recordtetlist; // recorded tet list //// //// // Search e2. //// for (i = 0; i < 3; i++) { //// pt = apex(searchtet, tetlist); //// if (pt == e2) { //// // Found. 'searchtet' is [#,#,e2,e1]. //// eorgoppo(searchtet, *tedge); // [e1,e2,#,#]. //// return 1; //// } //// enextself(searchtet); //// } //// //// // Get the adjacent link face at 'searchtet'. //// fnext(searchtet, neightet, neighborlist); //// esymself(neightet); //// // assert(oppo(neightet) == e1); //// pt = apex(neightet, tetlist); //// if (pt == e2) { //// // Found. 'neightet' is [#,#,e2,e1]. //// eorgoppo(neightet, *tedge); // [e1,e2,#,#]. //// return 1; //// } //// //// // Continue searching in the link face of e1. //// markerlist[searchtet.id] = 1; // initial value of markerlist must be 0 //// recordtetlist.push_back(searchtet); //// markerlist[neightet.id] = 1; //// recordtetlist.push_back(neightet); //// //// done = 0; //// //// for (i = 0; (i < recordtetlist.size()) && !done; i++) { //// parytet = recordtetlist[i]; //// searchtet = parytet; //// for (j = 0; (j < 2) && !done; j++) { //// enextself(searchtet); //// fnext(searchtet, neightet, neighborlist); //// if (!markerlist[neightet.id]) { //// esymself(neightet); //// pt = apex(neightet, tetlist); //// if (pt == e2) { //// // Found. 'neightet' is [#,#,e2,e1]. //// eorgoppo(neightet, *tedge); //// done = 1; //// } //// else { //// markerlist[neightet.id] = 1; //// recordtetlist.push_back(neightet); //// } //// } //// } // j //// } // i //// //// // Uninfect the list of visited tets. //// for (i = 0; i < recordtetlist.size(); i++) { //// parytet = recordtetlist[i]; //// markerlist[parytet.id] = 0; //// } //// //// return done; ////} /* Refinement */ // Insert point __global__ void kernelCheckAbortiveElements( int* d_insertidxlist, tristatus* d_segstatus, tristatus* d_tristatus, tetstatus* d_tetstatus, int* d_threadmarker, int insertiontype, int numofinsertpt ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofinsertpt) return; int insertid = d_insertidxlist[pos]; bool flag; if (insertiontype == 0) flag = d_segstatus[insertid].isAbortive(); else if (insertiontype == 1) flag = d_tristatus[insertid].isAbortive(); else if (insertiontype == 2) flag = d_tetstatus[insertid].isAbortive(); if (flag) d_threadmarker[pos] = -1; } __global__ void kernelCheckInsertRadius_Seg( int* d_segidlist, REAL* d_pointlist, REAL* d_pointradius, int* d_seglist, tristatus* d_segstatus, int* d_segencmarker, int* d_threadmarker, int numofseg ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofseg) return; int segId = d_segidlist[pos]; if (d_segstatus[segId].isAbortive()) { d_threadmarker[pos] = -1; return; } int encptidx = d_segencmarker[pos]; if (encptidx != MAXINT) // not encroached by splitting segment and subface routines return; trihandle splitseg(segId, 0); int ipa, ipb; ipa = cudamesh_sorg(splitseg, d_seglist); ipb = cudamesh_sdest(splitseg, d_seglist); REAL *pa, *pb; pa = cudamesh_id2pointlist(ipa, d_pointlist); pb = cudamesh_id2pointlist(ipb, d_pointlist); REAL len = cudamesh_distance(pa, pb); REAL smrrv = d_pointradius[ipa]; REAL rrv = d_pointradius[ipb]; if (rrv > 0) { if (smrrv > 0) { if (rrv < smrrv) { smrrv = rrv; } } else { smrrv = rrv; } } if (smrrv > 0) { if ((fabs(smrrv - len) / len) < EPSILON) smrrv = len; if (len < smrrv) { d_segstatus[segId].setAbortive(true); d_threadmarker[pos] = -1; return; } } } __global__ void kernelComputePriority_Seg( int* d_segidlist, int* d_threadlist, int* d_seglist, REAL* d_pointlist, int* d_priority, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int segId = d_segidlist[threadId]; trihandle splitseg(segId, 0); int ipa, ipb; ipa = cudamesh_sorg(splitseg, d_seglist); ipb = cudamesh_sdest(splitseg, d_seglist); REAL *pa, *pb; pa = cudamesh_id2pointlist(ipa, d_pointlist); pb = cudamesh_id2pointlist(ipb, d_pointlist); REAL len = cudamesh_distance(pa, pb); d_priority[threadId] = __float_as_int((float)(1/len)); } __global__ void kernelInitSearchTet_Seg( int* d_segidlist, int* d_threadlist, tethandle* d_seg2tetlist, tethandle* d_searchtetlist, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int segId = d_segidlist[threadId]; trihandle splitseg(segId, 0); tethandle searchtet; cudamesh_sstpivot1(splitseg, searchtet, d_seg2tetlist); d_searchtetlist[threadId] = searchtet; } __global__ void kernelCheckInsertRadius_Subface( int* d_subfaceidlist, REAL* d_insertptlist, REAL* d_pointlist, trihandle* d_point2trilist, verttype* d_pointtypelist, REAL* d_pointradius, int* d_threadmarker, int* d_seg2parentidxlist, int* d_segparentendpointidxlist, int* d_tri2parentidxlist, int* d_triid2parentoffsetlist, int* d_triparentendpointidxlist, tristatus* d_tristatus, int* d_subfaceencmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int subfaceid = d_subfaceidlist[pos]; if (d_tristatus[subfaceid].isAbortive()) { d_threadmarker[pos] = -1; return; } int encptidx = d_subfaceencmarker[subfaceid]; if (encptidx == MAXINT) // Mark as encroached when trying to split a tet return; trihandle parentseg, parentsh; trihandle splitfac(subfaceid, 0); REAL rv, rp; REAL* newpt = d_insertptlist + 3 * pos; REAL* encpt = cudamesh_id2pointlist(encptidx, d_pointlist); rv = cudamesh_distance(newpt, encpt); if (d_pointtypelist[encptidx] == FREESEGVERTEX) { parentseg = d_point2trilist[encptidx]; if (cudamesh_segfacetadjacent(parentseg.id, splitfac.id, d_seg2parentidxlist, d_segparentendpointidxlist, d_tri2parentidxlist, d_triid2parentoffsetlist, d_triparentendpointidxlist)) { //printf("Adjacent: Seg #%d, Subface #%d\n", // d_seg2parentidxlist[parentseg.id], d_tri2parentidxlist[splitfac.id]); rp = d_pointradius[encptidx]; if (rv < (sqrt(2.0) * rp)) { // This insertion may cause no termination. d_threadmarker[pos] = -1; // Reject the insertion of newpt. d_tristatus[subfaceid].setAbortive(true); } } } else if (d_pointtypelist[encptidx] == FREEFACETVERTEX) { parentsh = d_point2trilist[encptidx]; if (cudamesh_facetfacetadjacent(parentsh.id, splitfac.id, d_tri2parentidxlist, d_triid2parentoffsetlist, d_triparentendpointidxlist)) { //printf("Adjacent: Subface #%d, Subface #%d\n", // d_tri2parentidxlist[parentsh.id], d_tri2parentidxlist[splitfac.id]); rp = d_pointradius[encptidx]; if (rv < rp) { d_threadmarker[pos] = -1; // Reject the insertion of newpt. d_tristatus[subfaceid].setAbortive(true); } } } } __global__ void kernelInitSearchshList( int* d_subfaceidlist, int* d_threadlist, trihandle* d_searchsh, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int subfaceid = d_subfaceidlist[threadId]; d_searchsh[threadId] = trihandle(subfaceid, 0); } __global__ void kernelSurfacePointLocation( int* d_subfaceidlist, trihandle* d_searchsh, tethandle* d_searchtetlist, locateresult* d_pointlocation, REAL* d_insertptlist, REAL* d_pointlist, int* d_threadlist, int* d_trifacelist, tethandle* d_tri2tetlist, trihandle* d_tri2trilist, trihandle* d_tri2seglist, tristatus* d_tristatus, unsigned long* d_randomseed, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; trihandle neighsh; trihandle *searchsh = d_searchsh + threadId; REAL *searchpt = d_insertptlist + 3 * threadId; REAL *pa, *pb, *pc; unsigned long *randomseed = d_randomseed + pos; REAL abvpt[3]; enum locateresult loc; enum { MOVE_BC, MOVE_CA } nextmove; REAL ori, ori_bc, ori_ca; int i; pa = cudamesh_id2pointlist(cudamesh_sorg(*searchsh, d_trifacelist), d_pointlist); pb = cudamesh_id2pointlist(cudamesh_sdest(*searchsh, d_trifacelist), d_pointlist); pc = cudamesh_id2pointlist(cudamesh_sapex(*searchsh, d_trifacelist), d_pointlist); // Calculate an above point for this facet. cudamesh_calculateabovepoint4(searchpt, pa, pb, pc, abvpt); // 'abvpt' is given. Make sure it is above [a,b,c] ori = cuda_orient3d(pa, pb, pc, abvpt); assert(ori != 0); // SELF_CHECK if (ori > 0) { cudamesh_sesymself(*searchsh); // Reverse the face orientation. } // Find an edge of the face s.t. p lies on its right-hand side (CCW). for (i = 0; i < 3; i++) { pa = cudamesh_id2pointlist(cudamesh_sorg(*searchsh, d_trifacelist), d_pointlist); pb = cudamesh_id2pointlist(cudamesh_sdest(*searchsh, d_trifacelist), d_pointlist); ori = cuda_orient3d(pa, pb, abvpt, searchpt); if (ori > 0) break; cudamesh_senextself(*searchsh); } assert(i < 3); // SELF_CHECK pc = cudamesh_id2pointlist(cudamesh_sapex(*searchsh, d_trifacelist), d_pointlist); if (pc[0] == searchpt[0] && pc[1] == searchpt[1] && pc[2] == searchpt[2]) { cudamesh_senext2self(*searchsh); loc = ONVERTEX; } else { while (1) { ori_bc = cuda_orient3d(pb, pc, abvpt, searchpt); ori_ca = cuda_orient3d(pc, pa, abvpt, searchpt); if (ori_bc < 0) { if (ori_ca < 0) { // (--) // Any of the edges is a viable move. if (cudamesh_randomnation(randomseed,2)) { nextmove = MOVE_CA; } else { nextmove = MOVE_BC; } } else { // (-#) // Edge [b, c] is viable. nextmove = MOVE_BC; } } else { if (ori_ca < 0) { // (#-) // Edge [c, a] is viable. nextmove = MOVE_CA; } else { if (ori_bc > 0) { if (ori_ca > 0) { // (++) loc = ONFACE; // Inside [a, b, c]. break; } else { // (+0) cudamesh_senext2self(*searchsh); // On edge [c, a]. loc = ONEDGE; break; } } else { // ori_bc == 0 if (ori_ca > 0) { // (0+) cudamesh_senextself(*searchsh); // On edge [b, c]. loc = ONEDGE; break; } else { // (00) // p is coincident with vertex c. cudamesh_senext2self(*searchsh); loc = ONVERTEX; break; } } } } // Move to the next face. if (nextmove == MOVE_BC) { cudamesh_senextself(*searchsh); } else { cudamesh_senext2self(*searchsh); } // NON-convex case. Check if we will cross a boundary. if (cudamesh_isshsubseg(*searchsh, d_tri2seglist)) { loc = ENCSEGMENT; break; } cudamesh_spivot(*searchsh, neighsh, d_tri2trilist); if (neighsh.id == -1) { loc = OUTSIDE; // A hull edge. break; } // Adjust the edge orientation. if (cudamesh_sorg(neighsh, d_trifacelist) != cudamesh_sdest(*searchsh, d_trifacelist)) { cudamesh_sesymself(neighsh); } assert(cudamesh_sorg(neighsh, d_trifacelist) == cudamesh_sdest(*searchsh, d_trifacelist)); // SELF_CHECK // Update the newly discovered face and its endpoints. *searchsh = neighsh; pa = cudamesh_id2pointlist(cudamesh_sorg(*searchsh, d_trifacelist), d_pointlist); pb = cudamesh_id2pointlist(cudamesh_sdest(*searchsh, d_trifacelist), d_pointlist); pc = cudamesh_id2pointlist(cudamesh_sapex(*searchsh, d_trifacelist), d_pointlist); if (pc == searchpt) { cudamesh_senext2self(*searchsh); loc = ONVERTEX; break; } } // while (1) } d_pointlocation[threadId] = loc; if (!(loc == ONFACE || loc == ONEDGE)) { int subfaceid = d_subfaceidlist[threadId]; d_tristatus[subfaceid].setAbortive(true); // mark the encroached subface rather than the located one d_threadmarker[threadId] = -1; return; } tethandle searchtet; cudamesh_stpivot(*searchsh, searchtet, d_tri2tetlist); d_searchtetlist[threadId] = searchtet; } __global__ void kernelComputePriority_Subface( int* d_insertidxlist, int* d_threadlist, int* d_trifacelist, int* d_tri2parentidxlist, int* d_triid2parentoffsetlist, int* d_triparentendpointidxlist, REAL* d_pointlist, int* d_priority, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int subfaceid = d_insertidxlist[threadId]; trihandle splitsh = trihandle(subfaceid, 0); int ipa, ipb, ipc; ipa = cudamesh_sorg(splitsh, d_trifacelist); ipb = cudamesh_sdest(splitsh, d_trifacelist); ipc = cudamesh_sapex(splitsh, d_trifacelist); REAL *pa, *pb, *pc; pa = cudamesh_id2pointlist(ipa, d_pointlist); pb = cudamesh_id2pointlist(ipb, d_pointlist); pc = cudamesh_id2pointlist(ipc, d_pointlist); // Compute the area of this 3D triangle REAL AB[3], AC[3]; int i; for (i = 0; i < 3; i++) { AB[i] = pb[i] - pa[i]; AC[i] = pc[i] - pa[i]; } REAL area = sqrt((AB[1] * AC[2] - AB[2] * AC[1])*(AB[1] * AC[2] - AB[2] * AC[1]) + (AB[2] * AC[0] - AB[0] * AC[2])*(AB[2] * AC[0] - AB[0] * AC[2]) + (AB[0] * AC[1] - AB[1] * AC[0])*(AB[0] * AC[1] - AB[1] * AC[0])) / 2; d_priority[threadId] = __float_as_int((float)(1/ area)); //int offsetid = d_tri2parentidxlist[subfaceid]; //REAL* pt[4]; //for (int i = d_triid2parentoffsetlist[offsetid]; i < d_triid2parentoffsetlist[offsetid]; i++) //{ // //} } __global__ void kernelCheckInsertRadius_Tet( int* d_tetidlist, REAL* d_pointlist, REAL* d_pointradius, int* d_tetlist, tetstatus* d_tetstatus, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int tetid = d_tetidlist[pos]; if (d_tetstatus[tetid].isAbortive()) { d_threadmarker[pos] = -1; return; } tethandle chktet(tetid, 11), checkedge; int ie1, ie2; int i, j; REAL *e1, *e2; REAL smlen = 0; REAL rrv, smrrv; REAL elen[6]; // Get the shortest edge of this tet. checkedge.id = chktet.id; for (i = 0; i < 6; i++) { checkedge.ver = raw_edge2ver[i]; ie1 = cudamesh_org(checkedge, d_tetlist); ie2 = cudamesh_dest(checkedge, d_tetlist); e1 = cudamesh_id2pointlist(ie1, d_pointlist); e2 = cudamesh_id2pointlist(ie2, d_pointlist); elen[i] = cudamesh_distance(e1, e2); if (i == 0) { smlen = elen[i]; j = 0; } else { if (elen[i] < smlen) { smlen = elen[i]; j = i; } } } // Check if the edge is too short. checkedge.ver = raw_edge2ver[j]; // Get the smallest rrv of e1 and e2. // Note: if rrv of e1 and e2 is zero. Do not use it. ie1 = cudamesh_org(checkedge, d_tetlist); smrrv = d_pointradius[ie1]; ie2 = cudamesh_dest(checkedge, d_tetlist); rrv = d_pointradius[ie2]; if (rrv > 0) { if (smrrv > 0) { if (rrv < smrrv) { smrrv = rrv; } } else { smrrv = rrv; } } if (smrrv > 0) { // To avoid rounding error, round smrrv before doing comparison. if ((fabs(smrrv - smlen) / smlen) <EPSILON) { smrrv = smlen; } if (smrrv > smlen) { d_tetstatus[tetid].setAbortive(true); d_threadmarker[pos] = -1; return; } } } __global__ void kernelPointLocation( int* d_tetidlist, REAL* d_insertptlist, locateresult* d_pointlocation, tethandle* d_searchtetlist, int* d_threadlist, REAL* d_pointlist, int* d_tetlist, tethandle* d_neighborlist, tetstatus* d_tetstatus, int* d_priority, unsigned long* d_randomseed, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; if (d_threadmarker[threadId] == -1) return; int tetid = d_tetidlist[threadId]; tethandle* searchtet = d_searchtetlist + threadId; REAL* searchpt = d_insertptlist + 3 * threadId; unsigned long* randomseed = d_randomseed + pos; REAL *torg, *tdest, *tapex, *toppo; enum { ORGMOVE, DESTMOVE, APEXMOVE } nextmove; REAL ori, oriorg, oridest, oriapex; enum locateresult loc = OUTSIDE; int t1ver; int s; int step = 1; // Init searchtet searchtet->id = tetid; searchtet->ver = 11; // Check if we are in the outside of the convex hull. if (cudamesh_ishulltet(*searchtet, d_tetlist)) { // Get its adjacent tet (inside the hull). searchtet->ver = 3; cudamesh_fsymself(*searchtet, d_neighborlist); } // Let searchtet be the face such that 'searchpt' lies above to it. for (searchtet->ver = 0; searchtet->ver < 4; searchtet->ver++) { torg = cudamesh_id2pointlist(cudamesh_org(*searchtet, d_tetlist), d_pointlist); tdest = cudamesh_id2pointlist(cudamesh_dest(*searchtet, d_tetlist), d_pointlist); tapex = cudamesh_id2pointlist(cudamesh_apex(*searchtet, d_tetlist), d_pointlist); ori = cuda_orient3d(torg, tdest, tapex, searchpt); if (ori < 0.0) break; } assert(searchtet->ver != 4); // Walk through tetrahedra to locate the point. while (true) { toppo = cudamesh_id2pointlist(cudamesh_oppo(*searchtet, d_tetlist), d_pointlist); // Check if the vertex is we seek. if (toppo[0] == searchpt[0] && toppo[1] == searchpt[1] && toppo[2] == searchpt[2]) { // Adjust the origin of searchtet to be searchpt. cudamesh_esymself(*searchtet); cudamesh_eprevself(*searchtet); loc = ONVERTEX; // return ONVERTEX; break; } // We enter from one of serarchtet's faces, which face do we exit? oriorg = cuda_orient3d(tdest, tapex, toppo, searchpt); oridest = cuda_orient3d(tapex, torg, toppo, searchpt); oriapex = cuda_orient3d(torg, tdest, toppo, searchpt); // Now decide which face to move. It is possible there are more than one // faces are viable moves. If so, randomly choose one. if (oriorg < 0) { if (oridest < 0) { if (oriapex < 0) { // All three faces are possible. s = cudamesh_randomnation(randomseed, 3); // 's' is in {0,1,2}. if (s == 0) { nextmove = ORGMOVE; } else if (s == 1) { nextmove = DESTMOVE; } else { nextmove = APEXMOVE; } } else { // Two faces, opposite to origin and destination, are viable. //s = randomnation(2); // 's' is in {0,1}. if (cudamesh_randomnation(randomseed, 2)) { nextmove = ORGMOVE; } else { nextmove = DESTMOVE; } } } else { if (oriapex < 0) { // Two faces, opposite to origin and apex, are viable. //s = randomnation(2); // 's' is in {0,1}. if (cudamesh_randomnation(randomseed, 2)) { nextmove = ORGMOVE; } else { nextmove = APEXMOVE; } } else { // Only the face opposite to origin is viable. nextmove = ORGMOVE; } } } else { if (oridest < 0) { if (oriapex < 0) { // Two faces, opposite to destination and apex, are viable. //s = randomnation(2); // 's' is in {0,1}. if (cudamesh_randomnation(randomseed, 2)) { nextmove = DESTMOVE; } else { nextmove = APEXMOVE; } } else { // Only the face opposite to destination is viable. nextmove = DESTMOVE; } } else { if (oriapex < 0) { // Only the face opposite to apex is viable. nextmove = APEXMOVE; } else { // The point we seek must be on the boundary of or inside this // tetrahedron. Check for boundary cases. if (oriorg == 0) { // Go to the face opposite to origin. cudamesh_enextesymself(*searchtet); if (oridest == 0) { cudamesh_eprevself(*searchtet); // edge oppo->apex if (oriapex == 0) { // oppo is duplicated with p. loc = ONVERTEX; // return ONVERTEX; break; } loc = ONEDGE; // return ONEDGE; break; } if (oriapex == 0) { cudamesh_enextself(*searchtet); // edge dest->oppo loc = ONEDGE; // return ONEDGE; break; } loc = ONFACE; // return ONFACE; break; } if (oridest == 0) { // Go to the face opposite to destination. cudamesh_eprevesymself(*searchtet); if (oriapex == 0) { cudamesh_eprevself(*searchtet); // edge oppo->org loc = ONEDGE; // return ONEDGE; break; } loc = ONFACE; // return ONFACE; break; } if (oriapex == 0) { // Go to the face opposite to apex cudamesh_esymself(*searchtet); loc = ONFACE; // return ONFACE; break; } loc = INTETRAHEDRON; // return INTETRAHEDRON; break; } } } // Move to the selected face. if (nextmove == ORGMOVE) { cudamesh_enextesymself(*searchtet); } else if (nextmove == DESTMOVE) { cudamesh_eprevesymself(*searchtet); } else { cudamesh_esymself(*searchtet); } // Move to the adjacent tetrahedron (maybe a hull tetrahedron). cudamesh_fsymself(*searchtet, d_neighborlist); if (cudamesh_oppo(*searchtet, d_tetlist) == -1) { loc = OUTSIDE; // return OUTSIDE; break; } // Retreat the three vertices of the base face. torg = cudamesh_id2pointlist(cudamesh_org(*searchtet, d_tetlist), d_pointlist); tdest = cudamesh_id2pointlist(cudamesh_dest(*searchtet, d_tetlist), d_pointlist); tapex = cudamesh_id2pointlist(cudamesh_apex(*searchtet, d_tetlist), d_pointlist); step++; } // while (true) d_pointlocation[threadId] = loc; // set weighted priority //REAL vol = cudamesh_tetrahedronvolume(tetid, d_pointlist, d_tetlist); //REAL wp = 0.5*vol + 0.5*step; //d_priority[threadId] = __float_as_int((float)(1 / wp)); if (loc == ONVERTEX) { d_tetstatus[tetid].setAbortive(true); d_threadmarker[threadId] = -1; } } __global__ void kernelMarkAndCountInitialCavity( int* d_insertidxlist, locateresult* d_pointlocation, int* d_threadlist, tethandle* d_searchtet, trihandle* d_searchsh, trihandle* d_seg2trilist, int* d_trifacelist, trihandle* d_tri2trilist, tethandle* d_neighborlist, int* d_priority, uint64* d_tetmarker, int* d_segmarker, uint64* d_trimarker, int* d_threadmarker, int* d_initialcavitysize, int* d_initialsubcavitysize, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int threadmarker = d_threadmarker[threadId]; tethandle searchtet = d_searchtet[threadId]; tethandle spintet, neightet; locateresult loc = d_pointlocation[threadId]; // initial cavity // mark all tets share at this edge int count = 0, i; int old; uint64 marker, oldmarker; marker = cudamesh_encodeUInt64Priority(d_priority[threadId], threadId); if (loc == ONEDGE) { spintet = searchtet; while (1) { // check if already lost if (d_threadmarker[threadId] == -1) // lost because of other threads { count = 0; break; } // marking competition oldmarker = atomicMin(d_tetmarker + spintet.id, marker); if (marker < oldmarker) // winned { old = cudamesh_getUInt64PriorityIndex(oldmarker); if (old != MAXUINT) { d_threadmarker[old] = -1; atomicMin(d_initialcavitysize + old, 0); atomicMin(d_initialsubcavitysize + old, 0); } } else // lost { d_threadmarker[threadId] = -1; count = 0; break; } count++; cudamesh_fnextself(spintet, d_neighborlist); if (spintet.id == searchtet.id) break; } // while (1) } else if (loc == ONFACE) { // check if already lost if (d_threadmarker[threadId] == -1) // lost because of other threads { count = 0; } else // mark two adjacent tets on the face { spintet = searchtet; for (i = 0; i < 2; i++) { // marking competition oldmarker = atomicMin(d_tetmarker + spintet.id, marker); if (marker < oldmarker) // winned { old = cudamesh_getUInt64PriorityIndex(oldmarker); if (old != MAXUINT) { d_threadmarker[old] = -1; atomicMin(d_initialcavitysize + old, 0); atomicMin(d_initialsubcavitysize + old, 0); } } else // lost { d_threadmarker[threadId] = -1; count = 0; break; } count++; spintet = d_neighborlist[4 * searchtet.id + (searchtet.ver & 3)]; } } } else if (loc == INTETRAHEDRON || loc == OUTSIDE) { // check if already lost if (d_threadmarker[threadId] == -1) // lost because of other threads { count = 0; } else // mark four adjecent tets { // marking competition oldmarker = atomicMin(d_tetmarker + searchtet.id, marker); if (marker < oldmarker) // winned { count = 1; old = cudamesh_getUInt64PriorityIndex(oldmarker); if (old != MAXUINT) { d_threadmarker[old] = -1; atomicMin(d_initialcavitysize + old, 0); atomicMin(d_initialsubcavitysize + old, 0); } } else // lost { d_threadmarker[threadId] = -1; count = 0; } } } atomicMin(d_initialcavitysize + threadId, count); // Initial subcavity // Count all subfaces share at this edge. int scount = 0; if (count == 0) scount = 0; else { trihandle splitsh; if (loc == ONEDGE) { if (threadmarker == 0) { int segId = d_insertidxlist[threadId]; trihandle splitseg(segId, 0); atomicMin(d_segmarker + splitseg.id, threadId); cudamesh_spivot(splitseg, splitsh, d_seg2trilist); } else if (threadmarker == 1) { splitsh = d_searchsh[threadId]; } if (splitsh.id != -1) { int pa = cudamesh_sorg(splitsh, d_trifacelist); trihandle neighsh = splitsh; while (1) { // Check if already lost if (d_threadmarker[threadId] == -1) // lost because of other threads { scount = 0; break; } // Adjust the origin of its edge to be 'pa'. if (cudamesh_sorg(neighsh, d_trifacelist) != pa) { cudamesh_sesymself(neighsh); } // Mark this face atomicMin(d_trimarker + neighsh.id, marker); // count this face scount++; // Go to the next face at the edge. cudamesh_spivotself(neighsh, d_tri2trilist); // Stop if all faces at the edge have been visited. if (neighsh.id == splitsh.id) break; if (neighsh.id == -1) break; } // while (1) } } else if (loc == ONFACE) { if (threadmarker == 1) { // Check if already lost if (d_threadmarker[threadId] == -1) // lost because of other threads { scount = 0; } else { splitsh = d_searchsh[threadId]; // Mark this face atomicMin(d_trimarker + splitsh.id, marker); // count this face scount++; } } } } atomicMin(d_initialsubcavitysize + threadId, scount); } __global__ void kernelInitCavityLinklist( int* d_insertidxlist, locateresult* d_pointlocation, int* d_threadlist, tethandle* d_searchtet, trihandle* d_searchsh, trihandle* d_seg2trilist, int* d_trifacelist, tethandle* d_tri2tetlist, trihandle* d_tri2trilist, int* d_tetlist, tethandle* d_neighborlist, int* d_initialcavityindices, tethandle* d_caveoldtetlist, int* d_caveoldtetprev, int* d_caveoldtetnext, int* d_caveoldtethead, int* d_caveoldtettail, tethandle* d_cavetetlist, int* d_cavetetprev, int* d_cavetetnext, int* d_cavetethead, int* d_cavetettail, int* d_initialsubcavityindices, int* d_initialsubcavitysize, int* d_cavethreadidx, trihandle* d_caveshlist, int* d_caveshprev, int* d_caveshnext, int* d_caveshhead, int* d_caveshtail, trihandle* d_cavesegshlist, int* d_cavesegshprev, int* d_cavesegshnext, int* d_cavesegshhead, int* d_cavesegshtail, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int threadmarker = d_threadmarker[threadId]; tethandle searchtet = d_searchtet[threadId]; tethandle spintet, neightet; locateresult loc = d_pointlocation[threadId]; // cavities int icsindex = d_initialcavityindices[threadId]; int count = 0; int prev = -1; int icavityIdx; int tetidxfactor = 4; if (loc == ONEDGE) { spintet = searchtet; while (1) { // initial cavity index icavityIdx = icsindex + count; // add to tet list cudamesh_eorgoppo(spintet, neightet); neightet = d_neighborlist[4 * neightet.id + (neightet.ver & 3)]; neightet.ver = raw_epivot[neightet.ver]; d_cavetetlist[tetidxfactor * icavityIdx] = neightet; d_cavetetprev[tetidxfactor * icavityIdx] = (prev == -1) ? -1 : tetidxfactor * prev + 1; d_cavetetnext[tetidxfactor * icavityIdx] = tetidxfactor * icavityIdx + 1; if (prev != -1) d_cavetetnext[tetidxfactor * prev + 1] = tetidxfactor * icavityIdx; d_cavethreadidx[tetidxfactor * icavityIdx] = threadId; cudamesh_edestoppo(spintet, neightet); neightet = d_neighborlist[4 * neightet.id + (neightet.ver & 3)]; neightet.ver = raw_epivot[neightet.ver]; d_cavetetlist[tetidxfactor * icavityIdx + 1] = neightet; d_cavetetprev[tetidxfactor * icavityIdx + 1] = tetidxfactor * icavityIdx; d_cavetetnext[tetidxfactor * icavityIdx + 1] = -1; d_cavethreadidx[tetidxfactor * icavityIdx + 1] = threadId; // add to old tet list d_caveoldtetlist[icavityIdx] = spintet; // current tet d_caveoldtetprev[icavityIdx] = prev; // previous d_caveoldtetnext[icavityIdx] = -1; // next, set to -1 first if (prev != -1) d_caveoldtetnext[prev] = icavityIdx; // previous next, set to me if (count == 0) { d_caveoldtethead[threadId] = icavityIdx; d_cavetethead[threadId] = tetidxfactor * icavityIdx; } // next iteration prev = icavityIdx; cudamesh_fnextself(spintet, d_neighborlist); if (spintet.id == searchtet.id) { d_caveoldtettail[threadId] = icavityIdx; d_cavetettail[threadId] = tetidxfactor * icavityIdx + 1; break; } count++; } // while (1) } else if (loc == ONFACE) { int i, j; // initial cavity index icavityIdx = icsindex; // add to tet and old tet list j = (searchtet.ver & 3); for (i = 1; i < 4; i++) { neightet = d_neighborlist[4 * searchtet.id + (j + i) % 4]; d_cavetetlist[tetidxfactor * icavityIdx + i - 1] = neightet; d_cavetetprev[tetidxfactor * icavityIdx + i - 1] = (i == 1) ? -1 : tetidxfactor * icavityIdx + i - 2; d_cavetetnext[tetidxfactor * icavityIdx + i - 1] = tetidxfactor * icavityIdx + i; d_cavethreadidx[tetidxfactor * icavityIdx + i - 1] = threadId; } d_cavetethead[threadId] = tetidxfactor * icavityIdx; d_caveoldtetlist[icavityIdx] = searchtet; d_caveoldtetprev[icavityIdx] = -1; d_caveoldtetnext[icavityIdx] = icavityIdx + 1; d_caveoldtethead[threadId] = icavityIdx; icavityIdx++; spintet = d_neighborlist[4 * searchtet.id + j]; j = (spintet.ver & 3); for (i = 1; i < 4; i++) { neightet = d_neighborlist[4 * spintet.id + (j + i) % 4]; d_cavetetlist[tetidxfactor * icavityIdx + i - 1] = neightet; d_cavetetprev[tetidxfactor * icavityIdx + i - 1] = tetidxfactor * icavityIdx + i - 2; d_cavetetnext[tetidxfactor * icavityIdx + i - 1] = (i == 3) ? -1 : tetidxfactor * icavityIdx + i; d_cavethreadidx[tetidxfactor * icavityIdx + i - 1] = threadId; } d_cavetettail[threadId] = tetidxfactor * icavityIdx + 2; d_caveoldtetlist[icavityIdx] = spintet; d_caveoldtetprev[icavityIdx] = icavityIdx -1; d_caveoldtetnext[icavityIdx] = -1; d_caveoldtettail[threadId] = icavityIdx; } else if (loc == INTETRAHEDRON || loc == OUTSIDE) { int i; // initial cavity index icavityIdx = icsindex; // add to tet and old tet list for (i = 0; i < 4; i++) { neightet = d_neighborlist[4 * searchtet.id + i]; d_cavetetlist[tetidxfactor * icavityIdx + i] = neightet; d_cavetetprev[tetidxfactor * icavityIdx + i] = (i == 0) ? -1 : tetidxfactor * icavityIdx + i - 1; d_cavetetnext[tetidxfactor * icavityIdx + i] = (i == 3) ? -1 : tetidxfactor * icavityIdx + i + 1; d_cavethreadidx[tetidxfactor * icavityIdx + i] = threadId; } d_cavetethead[threadId] = tetidxfactor * icavityIdx; d_cavetettail[threadId] = tetidxfactor * icavityIdx + 3; d_caveoldtetlist[icavityIdx] = searchtet; d_caveoldtetprev[icavityIdx] = -1; d_caveoldtetnext[icavityIdx] = -1; d_caveoldtethead[threadId] = icavityIdx; d_caveoldtettail[threadId] = icavityIdx; } // subcavities if (d_initialsubcavitysize[threadId] != 0) // when splitseg is dangling segment, this equals to 0 { int iscsindex = d_initialsubcavityindices[threadId]; int scount = 0; int sprev = -1; int iscavityIdx; trihandle splitsh; if (loc == ONEDGE) { if (threadmarker == 0) { int segId = d_insertidxlist[threadId]; trihandle splitseg(segId, 0); cudamesh_spivot(splitseg, splitsh, d_seg2trilist); } else if (threadmarker == 1) { splitsh = d_searchsh[threadId]; } // Collect all subfaces share at this edge. if (splitsh.id != -1) { int pa = cudamesh_sorg(splitsh, d_trifacelist); trihandle neighsh = splitsh; while (1) { // Initial subcavity index iscavityIdx = iscsindex + scount; // Adjust the origin of its edge to be 'pa'. if (cudamesh_sorg(neighsh, d_trifacelist) != pa) { cudamesh_sesymself(neighsh); } // add to cavesh and cavesegsh list d_caveshlist[iscavityIdx] = neighsh; // current tet d_caveshprev[iscavityIdx] = sprev; // previous d_caveshnext[iscavityIdx] = -1; // next, set to -1 first d_cavesegshlist[iscavityIdx] = neighsh; // current triface d_cavesegshprev[iscavityIdx] = sprev; // previous d_cavesegshnext[iscavityIdx] = -1; // next, set to -1 first if (sprev != -1) { d_caveshnext[sprev] = iscavityIdx; // previous next, set to me d_cavesegshnext[sprev] = iscavityIdx; // previous next, set to me } if (scount == 0) { d_caveshhead[threadId] = iscavityIdx; d_cavesegshhead[threadId] = iscavityIdx; } // next iteration sprev = iscavityIdx; // count this face scount++; // Go to the next face at the edge. cudamesh_spivotself(neighsh, d_tri2trilist); // Stop if all faces at the edge have been visited. if (neighsh.id == splitsh.id || neighsh.id == -1) { d_caveshtail[threadId] = iscavityIdx; d_cavesegshtail[threadId] = iscavityIdx; break; } } // while (1) } } else if (loc == ONFACE) { if (threadmarker == 1) { iscavityIdx = iscsindex; splitsh = d_searchsh[threadId]; d_caveshlist[iscavityIdx] = splitsh; d_caveshprev[iscavityIdx] = -1; d_caveshnext[iscavityIdx] = -1; d_caveshhead[threadId] = iscavityIdx; d_caveshtail[threadId] = iscavityIdx; } } } } __global__ void kernelInitLinklistCurPointer( int* d_threadlist, int* d_linklisthead, int* d_linklistcur, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; d_linklistcur[threadId] = d_linklisthead[threadId]; } __global__ void kernelCavityRatioControl( int* d_cavethreadidx, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_cavethreadidx[pos]; if (threadId != -1) // owners of larger cavities d_threadmarker[threadId] = -1; } __global__ void kernelLargeCavityCheck( int* d_insertidxlist, REAL* d_insertptlist, int* d_cavethreadidx, tristatus* d_segstatus, tristatus* d_tristatus, tetstatus* d_tetstatus, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_cavethreadidx[pos]; if (threadId != -1) // owners of large cavities { int threadmarker = d_threadmarker[threadId]; if (threadmarker != -1) { int eleidx = d_insertidxlist[threadId]; if (threadmarker == 0) d_segstatus[eleidx].setAbortive(true); else if (threadmarker == 1) d_tristatus[eleidx].setAbortive(true); else if (threadmarker == 2) d_tetstatus[eleidx].setAbortive(true); d_threadmarker[threadId] = -1; } } } __global__ void kernelCavityExpandingCheck( int* d_cavethreadidx, REAL* d_pointlist, int* d_tetlist, tethandle* d_neighborlist, REAL* d_insertptlist, tethandle* d_cavetetlist, int* d_cavetetprev, int* d_cavetetnext, int* d_cavetethead, int* d_cavetettail, int* d_cavetetexpandsize, tethandle* d_caveoldtetlist, int* d_caveoldtetprev, int* d_caveoldtetnext, int* d_caveoldtethead, int* d_caveoldtettail, int* d_caveoldtetexpandsize, tethandle* d_cavebdrylist, int* d_cavebdryprev, int* d_cavebdrynext, int* d_cavebdryhead, int* d_cavebdrytail, int* d_cavebdryexpandsize, int* d_threadmarker, int* d_priority, uint64* d_tetmarker, int cavetetcurstartindex, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int tetexpandsize = 0; int oldtetexpandsize = 0; int bdryexpandsize = 0; int threadId = d_cavethreadidx[pos]; if (threadId != -1) // threadId is -1 in the unused slot { REAL* insertpt = d_insertptlist + 3 * threadId; int cur = cavetetcurstartindex + pos; tethandle cavetet = d_cavetetlist[cur]; if (d_threadmarker[threadId] != -1) // avoid to expand loser { uint64 marker = cudamesh_encodeUInt64Priority(d_priority[threadId], threadId); if (d_tetmarker[cavetet.id] != marker) // need to check { bool enqflag = false; double sign; // Get four endpoints of cavetet REAL *pts[4]; int idx[4]; for (int i = 0; i < 4; i++) { idx[i] = d_tetlist[4 * cavetet.id + i]; if (idx[i] != -1) pts[i] = cudamesh_id2pointlist(idx[i], d_pointlist); else pts[i] = NULL; } // Test if cavetet is included in the (enlarged) cavity if (idx[3] != -1) { sign = cudamesh_insphere_s(pts[0], pts[1], pts[2], pts[3], insertpt, idx[0], idx[1], idx[2], idx[3], MAXINT); enqflag = (sign < 0.0); } else // A hull face (must be a subface). Test its neighbor. { // We FIRST finclude it in the initial cavity if its adjacent tet is // not Delaunay wrt p. Will validate it later on. tethandle neineitet = d_neighborlist[4 * cavetet.id + 3]; if (d_tetmarker[neineitet.id] != marker) // need to check { // Get four endpoints of neineitet for (int i = 0; i < 4; i++) { idx[i] = d_tetlist[4 * neineitet.id + i]; if (idx[i] != -1) pts[i] = cudamesh_id2pointlist(idx[i], d_pointlist); else pts[i] = NULL; } assert(idx[3] != -1); sign = cudamesh_insphere_s(pts[0], pts[1], pts[2], pts[3], insertpt, idx[0], idx[1], idx[2], idx[3], MAXINT); enqflag = (sign < 0.0); } else { enqflag = true; } } // Count size if (enqflag) { uint64 oldmarker = atomicMin(d_tetmarker + cavetet.id, marker); if (marker < oldmarker) // I winned { tetexpandsize = 3; oldtetexpandsize = 1; int old = cudamesh_getUInt64PriorityIndex(oldmarker); if (old != MAXUINT) { d_threadmarker[old] = -1; } } else if (marker > oldmarker) // I lost { d_threadmarker[threadId] = -1; } } else { bdryexpandsize = 1; } } } } d_cavetetexpandsize[pos] = tetexpandsize; d_caveoldtetexpandsize[pos] = oldtetexpandsize; d_cavebdryexpandsize[pos] = bdryexpandsize; } __global__ void kernelCorrectExpandingSize( int* d_cavethreadidx, int* d_cavetetexpandsize, int* d_caveoldtetexpandsize, int* d_cavebdryexpandsize, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_cavethreadidx[pos]; if (threadId != -1 && d_threadmarker[threadId] == -1) { d_cavetetexpandsize[pos] = 0; d_caveoldtetexpandsize[pos] = 0; d_cavebdryexpandsize[pos] = 0; } } __global__ void kernelCavityExpandingSetThreadidx( int* d_cavethreadidx, int* d_cavetetexpandsize, int* d_cavetetexpandindices, int* d_cavetetthreadidx, int* d_caveoldtetexpandsize, int* d_caveoldtetexpandindices, int* d_caveoldtetthreadidx, int* d_cavebdryexpandsize, int* d_cavebdryexpandindices, int* d_cavebdrythreadidx, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_cavethreadidx[pos]; if (threadId == -1) return; int eindex; if (d_cavetetexpandsize[pos] != 0) { eindex = d_cavetetexpandindices[pos]; for (int j = 0; j < 3; j++) { d_cavetetthreadidx[eindex + j] = threadId; } } if (d_caveoldtetexpandsize[pos] != 0) { eindex = d_caveoldtetexpandindices[pos]; d_caveoldtetthreadidx[eindex] = threadId; } if (d_cavebdryexpandsize[pos] != 0) { eindex = d_cavebdryexpandindices[pos]; d_cavebdrythreadidx[eindex] = threadId; } } __global__ void kernelCavityExpandingMarkAndAppend( int* d_cavethreadidx, tethandle* d_neighborlist, tethandle* d_cavetetlist, int* d_cavetetprev, int* d_cavetetnext, int* d_cavetethead, int* d_cavetettail, int* d_cavetetexpandsize, int* d_cavetetexpandindices, int* d_cavetetthreadidx, int cavetetstartindex, int cavetetexpandsize, tethandle* d_caveoldtetlist, int* d_caveoldtetprev, int* d_caveoldtetnext, int* d_caveoldtethead, int* d_caveoldtettail, int* d_caveoldtetexpandsize, int* d_caveoldtetexpandindices, int* d_caveoldtetthreadidx, int caveoldtetstartindex, int caveoldtetexpandsize, tethandle* d_cavebdrylist, int* d_cavebdryprev, int* d_cavebdrynext, int* d_cavebdryhead, int* d_cavebdrytail, int* d_cavebdryexpandsize, int* d_cavebdryexpandindices, int* d_cavebdrythreadidx, int cavebdrystartindex, int cavebdryexpandsize, int* d_threadmarker, int* d_priority, uint64* d_tetmarker, int cavetetcurstartindex, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_cavethreadidx[pos]; if (threadId == -1) return; if (d_threadmarker[threadId] == -1) return; int cur = cavetetcurstartindex + pos; tethandle cavetet = d_cavetetlist[cur]; int sindex, eindex, prev; if (d_cavetetexpandsize[pos] != 0) { eindex = d_cavetetexpandindices[pos]; sindex = cavetetstartindex + eindex; // Append cavetetlist and mark current tet int k = (cavetet.ver & 3); // The current face number tethandle neightet; int newid; if (eindex == 0 || d_cavetetthreadidx[eindex - 1] != threadId) { prev = d_cavetettail[threadId]; d_cavetetnext[prev] = sindex; // prev must not be -1 } else prev = sindex - 1; for (int j = 1; j < 4; j++) { neightet = d_neighborlist[4 * cavetet.id + (j + k) % 4]; newid = sindex + j - 1; d_cavetetlist[newid] = neightet; d_cavetetprev[newid] = prev; d_cavetetnext[newid] = newid + 1; // set to next one first prev = newid; } if (eindex + 2 == cavetetexpandsize - 1 || d_cavetetthreadidx[eindex + 3] != threadId) d_cavetetnext[newid] = -1; } if (d_caveoldtetexpandsize[pos] != 0) { eindex = d_caveoldtetexpandindices[pos]; sindex = caveoldtetstartindex + eindex; if (eindex == 0 || d_caveoldtetthreadidx[eindex - 1] != threadId) { prev = d_caveoldtettail[threadId]; d_caveoldtetnext[prev] = sindex; // prev must not be -1 } else prev = sindex - 1; d_caveoldtetlist[sindex] = cavetet; d_caveoldtetprev[sindex] = prev; d_caveoldtetnext[sindex] = sindex + 1; if (eindex == caveoldtetexpandsize - 1 || d_caveoldtetthreadidx[eindex + 1] != threadId) d_caveoldtetnext[sindex] = -1; } if (d_cavebdryexpandsize[pos] != 0) { eindex = d_cavebdryexpandindices[pos]; sindex = cavebdrystartindex + eindex; if (eindex == 0 || d_cavebdrythreadidx[eindex - 1] != threadId) { prev = d_cavebdrytail[threadId]; if (prev != -1) d_cavebdrynext[prev] = sindex; // prev must not be -1 if (d_cavebdryhead[threadId] == -1) // initialize cavebdry list header d_cavebdryhead[threadId] = sindex; } else prev = sindex - 1; cavetet.ver = raw_epivot[cavetet.ver]; d_cavebdrylist[sindex] = cavetet; d_cavebdryprev[sindex] = prev; d_cavebdrynext[sindex] = sindex + 1; if (eindex == cavebdryexpandsize - 1 || d_cavebdrythreadidx[eindex + 1] != threadId) d_cavebdrynext[sindex] = -1; } } __global__ void kernelCavityExpandingUpdateListTails( int* d_cavethreadidx, int* d_cavetetnext, int* d_cavetettail, int* d_cavetetexpandsize, int* d_cavetetexpandindices, int cavetetstartindex, int* d_caveoldtetnext, int* d_caveoldtettail, int* d_caveoldtetexpandsize, int* d_caveoldtetexpandindices, int caveoldtetstartindex, int* d_cavebdrynext, int* d_cavebdrytail, int* d_cavebdryexpandsize, int* d_cavebdryexpandindices, int* d_threadmarker, int cavebdrystartindex, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_cavethreadidx[pos]; if (threadId == -1) return; if (d_threadmarker[threadId] == -1) return; int sindex, eindex, prev; if (d_cavetetexpandsize[pos] != 0) { eindex = d_cavetetexpandindices[pos]; sindex = cavetetstartindex + eindex + 2; if (d_cavetetnext[sindex] == -1) d_cavetettail[threadId] = sindex; } if (d_caveoldtetexpandsize[pos] != 0) { eindex = d_caveoldtetexpandindices[pos]; sindex = caveoldtetstartindex + eindex; if (d_caveoldtetnext[sindex] == -1) d_caveoldtettail[threadId] = sindex; } if (d_cavebdryexpandsize[pos] != 0) { eindex = d_cavebdryexpandindices[pos]; sindex = cavebdrystartindex + eindex; if (d_cavebdrynext[sindex] == -1) d_cavebdrytail[threadId] = sindex; } } __global__ void kernelMarkCavityAdjacentSubsegs( int* d_threadlist, trihandle* d_tet2seglist, tethandle* d_caveoldtetlist, int* d_caveoldtetnext, int* d_caveoldtethead, int* d_segmarker, int* d_threadmarker, int numofthreads, uint64* d_tetmarker ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int i = d_caveoldtethead[threadId]; int old; tethandle cavetet; trihandle checkseg; while (i != -1) { cavetet = d_caveoldtetlist[i]; for (int j = 0; j < 6; j++) { checkseg = d_tet2seglist[6 * cavetet.id + j]; if (checkseg.id != -1) { old = atomicMin(d_segmarker + checkseg.id, threadId); if (old < threadId) d_threadmarker[threadId] = -1; else if (old > threadId && old != MAXINT) d_threadmarker[old] = -1; } } i = d_caveoldtetnext[i]; } } __global__ void kernelCountCavitySubsegs( int* d_threadlist, trihandle* d_tet2seglist, tethandle* d_caveoldtetlist, int* d_caveoldtetnext, int* d_caveoldtethead, int* d_cavetetsegsize, int* d_segmarker, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int cavetetsegsize = 0; if (d_threadmarker[threadId] != -1) { int i = d_caveoldtethead[threadId]; tethandle cavetet; trihandle checkseg; while (i != -1) { cavetet = d_caveoldtetlist[i]; for (int j = 0; j < 6; j++) { checkseg = d_tet2seglist[6 * cavetet.id + j]; if (checkseg.id != -1) { if (d_segmarker[checkseg.id] == threadId) { cavetetsegsize++; d_segmarker[checkseg.id] = MAXINT; // Mark as counted } } } i = d_caveoldtetnext[i]; } } d_cavetetsegsize[pos] = cavetetsegsize; } __global__ void kernelAppendCavitySubsegs( int* d_threadlist, trihandle* d_tet2seglist, tethandle* d_caveoldtetlist, int* d_caveoldtetnext, int* d_caveoldtethead, trihandle* d_cavetetseglist, int* d_cavetetsegprev, int* d_cavetetsegnext, int* d_cavetetseghead, int* d_cavetetsegtail, int* d_cavetetsegsize, int* d_cavetetsegindices, int* d_segmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; if (d_cavetetsegsize[pos] == 0) return; int sindex = d_cavetetsegindices[pos]; d_cavetetseghead[threadId] = sindex; int i = d_caveoldtethead[threadId]; tethandle cavetet; trihandle checkseg; int index, count = 0, prev = -1; while (i != -1) { cavetet = d_caveoldtetlist[i]; for (int j = 0; j < 6; j++) { checkseg = d_tet2seglist[6 * cavetet.id + j]; if (checkseg.id != -1) { if (d_segmarker[checkseg.id] == MAXINT) { d_segmarker[checkseg.id] = -2; // Mark as appended index = sindex + count; d_cavetetseglist[index] = checkseg; d_cavetetsegprev[index] = prev; d_cavetetsegnext[index] = -1; if (prev != -1) d_cavetetsegnext[prev] = index; count++; prev = index; } } } i = d_caveoldtetnext[i]; if (i == -1) // reached the end { d_cavetetsegtail[threadId] = index; } } } __global__ void kernelCheckSegmentEncroachment( int* d_insertidxlist, REAL* d_insertptlist, int* d_threadlist, REAL* d_pointlist, int* d_seglist, int* d_segencmarker, tristatus* d_segstatus, tristatus* d_tristatus, tetstatus* d_tetstatus, trihandle* d_cavetetseglist, int* d_cavetetsegnext, int* d_cavetetseghead, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int threadmarker = d_threadmarker[threadId]; if (threadmarker <= 0) // loser or subsegment return; REAL *insertpt = d_insertptlist + 3 * threadId; int ipa, ipb, encpt; REAL *pa, *pb; trihandle paryseg; bool flag = false; int i = d_cavetetseghead[threadId]; while (i != -1) { paryseg = d_cavetetseglist[i]; ipa = d_seglist[3 * paryseg.id + 0]; ipb = d_seglist[3 * paryseg.id + 1]; pa = cudamesh_id2pointlist(ipa, d_pointlist); pb = cudamesh_id2pointlist(ipb, d_pointlist); if (checkseg4encroach(pa, pb, insertpt)) // encroached { flag = true; if (!d_segstatus[paryseg.id].isAbortive()) { d_segencmarker[paryseg.id] = MAXINT; d_threadmarker[threadId] = -1; break; } } i = d_cavetetsegnext[i]; } if (flag && d_threadmarker[threadId] != -1) // segments encroached are all abortive { int eleidx = d_insertidxlist[threadId]; if (threadmarker == 1) { d_tristatus[eleidx].setAbortive(true); } else if (threadmarker == 2) { d_tetstatus[eleidx].setAbortive(true); } d_threadmarker[threadId] = -1; } } __global__ void kernelMarkCavityAdjacentSubfaces( int* d_threadlist, trihandle* d_tet2trilist, tethandle* d_caveoldtetlist, int* d_caveoldtetnext, int* d_caveoldtethead, int* d_trimarker, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int i = d_caveoldtethead[threadId]; int old; tethandle cavetet; trihandle checksh; while (i != -1) { cavetet = d_caveoldtetlist[i]; for (int j = 0; j < 4; j++) { checksh = d_tet2trilist[4 * cavetet.id + j]; if (checksh.id != -1) { old = atomicMin(d_trimarker + checksh.id, threadId); if (old < threadId) d_threadmarker[threadId] = -1; else if (old > threadId && old != MAXINT) d_threadmarker[old] = -1; } } i = d_caveoldtetnext[i]; } } __global__ void kernelCountCavitySubfaces( int* d_threadlist, trihandle* d_tet2trilist, tethandle* d_caveoldtetlist, int* d_caveoldtetnext, int* d_caveoldtethead, int* d_cavetetshsize, int* d_trimarker, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int cavetetshsize = 0; if (d_threadmarker[threadId] != -1) { int i = d_caveoldtethead[threadId]; tethandle cavetet; trihandle checksh; while (i != -1) { cavetet = d_caveoldtetlist[i]; for (int j = 0; j < 4; j++) { checksh = d_tet2trilist[4 * cavetet.id + j]; if (checksh.id != -1) { if (d_trimarker[checksh.id] == threadId) { cavetetshsize++; d_trimarker[checksh.id] = MAXINT; } } } i = d_caveoldtetnext[i]; } } d_cavetetshsize[pos] = cavetetshsize; } __global__ void kernelAppendCavitySubfaces( int* d_threadlist, trihandle* d_tet2trilist, tethandle* d_caveoldtetlist, int* d_caveoldtetnext, int* d_caveoldtethead, trihandle* d_cavetetshlist, int* d_cavetetshprev, int* d_cavetetshnext, int* d_cavetetshhead, int* d_cavetetshtail, int* d_cavetetshsize, int* d_cavetetshindices, int* d_trimarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; if (d_cavetetshsize[pos] == 0) return; int sindex = d_cavetetshindices[pos]; d_cavetetshhead[threadId] = sindex; int i = d_caveoldtethead[threadId]; tethandle cavetet; trihandle checksh; int index, count = 0, prev = -1; while (i != -1) { cavetet = d_caveoldtetlist[i]; for (int j = 0; j < 4; j++) { checksh = d_tet2trilist[4 * cavetet.id + j]; if (checksh.id != -1) { if (d_trimarker[checksh.id] == MAXINT) { d_trimarker[checksh.id] = -2; // Mark as appended index = sindex + count; d_cavetetshlist[index] = checksh; d_cavetetshprev[index] = prev; d_cavetetshnext[index] = -1; if (prev != -1) d_cavetetshnext[prev] = index; count++; prev = index; } } } i = d_caveoldtetnext[i]; if (i == -1) // reached the end { d_cavetetshtail[threadId] = index; } } } __global__ void kernelCheckSubfaceEncroachment( int* d_insertidxlist, REAL* d_insertptlist, locateresult* d_pointlocation, int* d_threadlist, REAL* d_pointlist, int* d_trifacelist, int* d_subfaceencmarker, tristatus* d_tristatus, tetstatus* d_tetstatus, trihandle* d_cavetetshlist, int* d_cavetetshnext, int* d_cavetetshhead, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int threadmarker = d_threadmarker[threadId]; if (threadmarker != 2) // not a tetrahedron return; locateresult loc = d_pointlocation[threadId]; REAL *insertpt = d_insertptlist + 3 * threadId; REAL *pa, *pb, *pc; trihandle parysh; bool flag = false; int i = d_cavetetshhead[threadId]; while (i != -1) { parysh = d_cavetetshlist[i]; pa = cudamesh_id2pointlist(d_trifacelist[3 * parysh.id + 0], d_pointlist); pb = cudamesh_id2pointlist(d_trifacelist[3 * parysh.id + 1], d_pointlist); pc = cudamesh_id2pointlist(d_trifacelist[3 * parysh.id + 2], d_pointlist); if (checkface4encroach(pa, pb, pc, insertpt)) // encroached { flag = true; if (!d_tristatus[parysh.id].isAbortive()) { d_subfaceencmarker[parysh.id] = MAXINT; d_threadmarker[threadId] = -1; break; } } i = d_cavetetshnext[i]; } if (loc == OUTSIDE || (flag && d_threadmarker[threadId] != -1)) { // subfaces encroached are all abortive or points are outside the domain int insertidx = d_insertidxlist[threadId]; d_tetstatus[insertidx].setAbortive(true); d_threadmarker[threadId] = -1; } } __global__ void kernelSubCavityExpandingCheck( int* d_threadlist, REAL* d_pointlist, tethandle* d_neighborlist, int* d_trifacelist, tethandle* d_tri2tetlist, trihandle* d_tri2trilist, trihandle* d_tri2seglist, REAL* d_insertptlist, trihandle* d_caveshlist, int* d_caveshcur, int* d_caveshexpandsize, int* d_caveshexpandflag, int* d_priority, uint64* d_tetmarker, uint64* d_trimarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; REAL* insertpt = d_insertptlist + 3 * threadId; int cur = d_caveshcur[threadId]; if (cur == -1) // this means that caveshlist is empty return; trihandle checksh = d_caveshlist[cur]; trihandle neighsh; tethandle neightet; REAL sign; REAL* pt[3]; int flag[3] = {0, 0, 0}; int shexpandsize = 0; //assert(d_trimarker[checksh.id] == threadId); uint64 marker = cudamesh_encodeUInt64Priority(d_priority[threadId], threadId); for (int j = 0; j < 3; j++) { if (!cudamesh_isshsubseg(checksh, d_tri2seglist)) { cudamesh_spivot(checksh, neighsh, d_tri2trilist); //assert(neighsh.id != -1); if (cudamesh_getUInt64PriorityIndex(d_trimarker[neighsh.id]) != threadId) { cudamesh_stpivot(neighsh, neightet, d_tri2tetlist); if (cudamesh_getUInt64PriorityIndex(d_tetmarker[neightet.id]) == threadId) { cudamesh_fsymself(neightet, d_neighborlist); if (cudamesh_getUInt64PriorityIndex(d_tetmarker[neightet.id]) == threadId) { pt[0] = cudamesh_id2pointlist(cudamesh_sorg(neighsh, d_trifacelist), d_pointlist); pt[1] = cudamesh_id2pointlist(cudamesh_sdest(neighsh, d_trifacelist), d_pointlist); pt[2] = cudamesh_id2pointlist(cudamesh_sapex(neighsh, d_trifacelist), d_pointlist); sign = cudamesh_incircle3d(pt[0], pt[1], pt[2], insertpt); if (sign < 0) { atomicMin(d_trimarker + neighsh.id, marker); shexpandsize++; flag[j] = 1; } } } } } cudamesh_senextself(checksh); } d_caveshexpandsize[pos] = shexpandsize; if (shexpandsize > 0) { for (int j = 0; j < 3; j++) { if ((flag[j] == 1 && shexpandsize == 1) || (flag[j] == 0 && shexpandsize == 2)) { d_caveshexpandflag[pos] = j; break; } } } } __global__ void kernelSubCavityExpandingAppend( int* d_threadlist, trihandle* d_tri2trilist, trihandle* d_caveshlist, int* d_caveshprev, int* d_caveshnext, int* d_caveshhead, int* d_caveshtail, int* d_caveshcur, int* d_caveshexpandsize, int* d_caveshexpandindices, int* d_caveshexpandflag, int caveshstartindex, int* d_threadfinishmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int cur = d_caveshcur[threadId]; if (cur == -1) { d_threadfinishmarker[threadId] = -1; return; } trihandle checksh = d_caveshlist[cur]; trihandle neighsh; int sindex; int caveshexpandsize = d_caveshexpandsize[pos]; int caveshexpandflag = d_caveshexpandflag[pos]; if (caveshexpandsize != 0) { sindex = caveshstartindex + d_caveshexpandindices[pos]; int prev = d_caveshtail[threadId]; int newid = sindex; for (int j = 0; j < 3; j++) { if ((caveshexpandsize == 1 && j == caveshexpandflag) || (caveshexpandsize == 2 && j != caveshexpandflag) || caveshexpandsize == 3) { cudamesh_spivot(checksh, neighsh, d_tri2trilist); d_caveshlist[newid] = neighsh; d_caveshprev[newid] = prev; d_caveshnext[newid] = -1; if (prev != -1) d_caveshnext[prev] = newid; prev = newid++; } cudamesh_senextself(checksh); } d_caveshtail[threadId] = newid - 1; // Update current linklist pointer to next one d_caveshcur[threadId] = d_caveshnext[cur]; } else { if (cur == d_caveshtail[threadId]) d_threadfinishmarker[threadId] = -1; else d_caveshcur[threadId] = d_caveshnext[cur]; } } __global__ void kernelCavityBoundarySubfacesCheck( int* d_insertidxlist, int* d_threadlist, REAL* d_pointlist, int* d_tetlist, tethandle* d_neighborlist, trihandle* d_tet2trilist, trihandle* d_tet2seglist, tethandle* d_tri2tetlist, REAL* d_insertptlist, trihandle* d_cavetetshlist, int* d_cavetetshnext, int* d_cavetetshhead, int* d_cavetetshmarker, tethandle* d_cavetetshflag, int* d_cavebdryexpandsize, int* d_cutcount, uint64* d_tetmarker, uint64* d_trimarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; REAL* insertpt = d_insertptlist + 3 * threadId; trihandle parysh; tethandle neightet; int cavebdryexpandsize = 0; int cutcount = 0; double ori; int i = d_cavetetshhead[threadId]; while (i != -1) { parysh = d_cavetetshlist[i]; cudamesh_stpivot(parysh, neightet, d_tri2tetlist); if (cudamesh_getUInt64PriorityIndex(d_tetmarker[neightet.id]) == threadId) { cudamesh_fsymself(neightet, d_neighborlist); if (cudamesh_getUInt64PriorityIndex(d_tetmarker[neightet.id]) == threadId) { // Found a subface inside subcavity if (cudamesh_getUInt64PriorityIndex(d_trimarker[parysh.id]) != threadId) { if (cudamesh_oppo(neightet, d_tetlist) != -1) { cudamesh_fsymself(neightet, d_neighborlist); } if (cudamesh_oppo(neightet, d_tetlist) != -1) { int idx[3]; REAL* pt[3]; idx[0] = cudamesh_org(neightet, d_tetlist); idx[1] = cudamesh_dest(neightet, d_tetlist); idx[2] = cudamesh_apex(neightet, d_tetlist); for (int j = 0; j < 3; j++) { pt[j] = cudamesh_id2pointlist(idx[j], d_pointlist); } ori = cuda_orient3d(pt[0], pt[1], pt[2], insertpt); if (ori < 0) { cudamesh_fsymself(neightet, d_neighborlist); ori = -ori; } } else { ori = 1; } // unmark and record this tet if it is either invisible by or coplanar with p if (ori >= 0) { d_tetmarker[neightet.id] = MAXULL; // unmark this tet d_cavetetshmarker[i] = 0; // mark this subface d_cavetetshflag[i] = neightet; cutcount++; cavebdryexpandsize += 4; } } } } i = d_cavetetshnext[i]; } d_cavebdryexpandsize[pos] = cavebdryexpandsize; d_cutcount[threadId] = cutcount; } __global__ void kernelCavityBoundarySubfacesAppend( int* d_threadlist, trihandle* d_cavetetshlist, int* d_cavetetshnext, int* d_cavetetshhead, int* d_cavetetshmarker, tethandle* d_cavetetshflag, tethandle* d_cavebdrylist, int* d_cavebdryprev, int* d_cavebdrynext, int* d_cavebdryhead, int* d_cavebdrytail, int* d_cavebdryexpandsize, int* d_cavebdryexpandindices, int cavebdrystartindex, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; if (d_cavebdryexpandsize[pos] == 0) return; int threadId = d_threadlist[pos]; tethandle neightet, neineitet; int sindex = d_cavebdryexpandindices[pos]; int prev = d_cavebdrytail[threadId]; int newid = cavebdrystartindex + sindex; int i = d_cavetetshhead[threadId]; while (i != -1) { if (d_cavetetshmarker[i] == 0) // Need to append { neightet = d_cavetetshflag[i]; neightet.ver = raw_epivot[neightet.ver]; d_cavebdrylist[newid] = neightet; d_cavebdryprev[newid] = prev; d_cavebdrynext[newid] = -1; // set to -1 first if (prev != -1) d_cavebdrynext[prev] = newid; prev = newid; newid++; for (int j = 0; j < 3; j++) { cudamesh_esym(neightet, neineitet); neineitet.ver = raw_epivot[neineitet.ver]; d_cavebdrylist[newid] = neineitet; d_cavebdryprev[newid] = prev; d_cavebdrynext[newid] = -1; // set to -1 first if (prev != -1) d_cavebdrynext[prev] = newid; prev = newid; newid++; cudamesh_enextself(neightet); } } i = d_cavetetshnext[i]; if (i == -1) { d_cavebdrytail[threadId] = newid - 1; } } } __global__ void kernelCavityBoundarySubsegsCheck( int* d_threadlist, REAL* d_pointlist, int* d_tetlist, tethandle* d_neighborlist, int* d_seglist, tethandle* d_seg2tetlist, REAL* d_insertptlist, trihandle* d_cavetetseglist, int* d_cavetetsegnext, int* d_cavetetseghead, int* d_cavetetsegmarker, tethandle* d_cavetetsegflag, int* d_cavebdryexpandsize, int* d_cutcount, uint64* d_tetmarker, int* d_segmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; REAL* insertpt = d_insertptlist + 3 * threadId; REAL *pa, *pb, *pc; int ia, ib, ic; trihandle paryseg; tethandle neightet, spintet, neineitet; int cavebdryexpandsize = 0; int cutcount = 0; double ori; int i = d_cavetetseghead[threadId]; int j; while (i != -1) { paryseg = d_cavetetseglist[i]; if (d_segmarker[paryseg.id] != threadId) // not a splitting segment { cudamesh_sstpivot1(paryseg, neightet, d_seg2tetlist); { int pa, pb, pc, pd; pa = cudamesh_sorg(paryseg, d_seglist); pb = cudamesh_sdest(paryseg, d_seglist); pc = cudamesh_org(neightet, d_tetlist); pd = cudamesh_dest(neightet, d_tetlist); if ((pa == pc && pb == pd) || (pa == pd && pb == pc)) { } } spintet = neightet; while (1) { if (cudamesh_getUInt64PriorityIndex(d_tetmarker[spintet.id]) != threadId) break; cudamesh_fnextself(spintet, d_neighborlist); if (spintet.id == neightet.id) break; } if (cudamesh_getUInt64PriorityIndex(d_tetmarker[spintet.id]) == threadId) // This segment is inside cavity { // Find an adjacent tet at this segment such that both faces // at this segment are not visible by p. ia = cudamesh_org(neightet, d_tetlist); ib = cudamesh_dest(neightet, d_tetlist); pa = cudamesh_id2pointlist(ia, d_pointlist); pb = cudamesh_id2pointlist(ib, d_pointlist); spintet = neightet; j = 0; while (1) { ic = cudamesh_apex(spintet, d_tetlist); if (ic != -1) { pc = cudamesh_id2pointlist(ic, d_pointlist); ori = cuda_orient3d(pa, pb, pc, insertpt); if (ori >= 0) { // Not visible. Check another face in this tet. cudamesh_esym(spintet, neineitet); ic = cudamesh_apex(neineitet, d_tetlist); if (ic != -1) { pc = cudamesh_id2pointlist(ic, d_pointlist); ori = cuda_orient3d(pb, pa, pc, insertpt); if (ori >= 0) { // Not visible. Found this face. j = 1; // Flag that it is found. break; } } } } cudamesh_fnextself(spintet, d_neighborlist); if (spintet.id == neightet.id) break; } if (j == 0) { //printf("threadId #%d: Subseg check error - Couldn't find the tet to be unmarked!\n", threadId); } neightet = spintet; d_tetmarker[neightet.id] = MAXULL; // unmark this tet d_cavetetsegmarker[i] = 0; // mark this subseg d_cavetetsegflag[i] = neightet; cutcount++; cavebdryexpandsize += 4; } } i = d_cavetetsegnext[i]; } d_cavebdryexpandsize[pos] = cavebdryexpandsize; d_cutcount[threadId] += cutcount; } __global__ void kernelCavityBoundarySubsegsAppend( int* d_threadlist, trihandle* d_cavetetseglist, int* d_cavetetsegnext, int* d_cavetetseghead, int* d_cavetetsegmarker, tethandle* d_cavetetsegflag, tethandle* d_cavebdrylist, int* d_cavebdryprev, int* d_cavebdrynext, int* d_cavebdryhead, int* d_cavebdrytail, int* d_cavebdryexpandsize, int* d_cavebdryexpandindices, int cavebdrystartindex, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; if (d_cavebdryexpandsize[pos] == 0) return; int threadId = d_threadlist[pos]; tethandle neightet, neineitet; int sindex = d_cavebdryexpandindices[pos]; int prev = d_cavebdrytail[threadId]; int newid = cavebdrystartindex + sindex; int i = d_cavetetseghead[threadId]; while (i != -1) { if (d_cavetetsegmarker[i] == 0) // Need to append { neightet = d_cavetetsegflag[i]; neightet.ver = raw_epivot[neightet.ver]; d_cavebdrylist[newid] = neightet; d_cavebdryprev[newid] = prev; d_cavebdrynext[newid] = -1; // set to -1 first if (prev != -1) d_cavebdrynext[prev] = newid; prev = newid; newid++; for (int j = 0; j < 3; j++) { cudamesh_esym(neightet, neineitet); neineitet.ver = raw_epivot[neineitet.ver]; d_cavebdrylist[newid] = neineitet; d_cavebdryprev[newid] = prev; d_cavebdrynext[newid] = -1; // set to -1 first if (prev != -1) d_cavebdrynext[prev] = newid; prev = newid; newid++; cudamesh_enextself(neightet); } } i = d_cavetetsegnext[i]; if (i == -1) { d_cavebdrytail[threadId] = newid - 1; } } } __global__ void kernelUpdateCavity2StarShapedSortOutBoundaryListCount( int* d_threadlist, int* d_cavebdrynext, int* d_cavebdryhead, int* d_cavecount, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int count = 0; int threadId = d_threadlist[pos]; int i = d_cavebdryhead[threadId]; while (i != -1) { count += 1; i = d_cavebdrynext[i]; } d_cavecount[pos] = count; } __global__ void kernelUpdateCavity2StarShapedSortOutBoundaryListAppend( int* d_threadlist, tethandle* d_cavebdrylist, int* d_cavebdryprev, int* d_cavebdrynext, int* d_cavebdryhead, int* d_cavebdrytail, tethandle* d_cavelist, int* d_caveprev, int* d_cavenext, int* d_expandindices, int* d_cavethreadidx, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int sindex = d_expandindices[pos]; int prev = -1; int i = d_cavebdryhead[threadId]; d_cavebdryhead[threadId] = sindex; while (i != -1) { d_cavelist[sindex] = d_cavebdrylist[i]; d_caveprev[sindex] = prev; d_cavenext[sindex] = -1; if (prev != -1) d_cavenext[prev] = sindex; d_cavethreadidx[sindex] = threadId; prev = sindex; sindex++; i = d_cavebdrynext[i]; } d_cavebdrytail[threadId] = sindex - 1; } __global__ void kernelUpdateCavity2StarShapedCheck( int* d_insertidxlist, int* d_cavethreadidx, REAL* d_pointlist, int* d_tetlist, tethandle* d_neighborlist, trihandle* d_tet2seglist, REAL* d_insertptlist, tethandle* d_cavebdrylist, int* d_cavebdryprev, int* d_cavebdrynext, int* d_cavebdryhead, int* d_cavebdrytail, int* d_cavetetexpandsize, int* d_cavebdryexpandsize, int* d_cutcount, uint64* d_tetmarker, int cavebdrycurstartindex, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int tetexpandsize = 0; int bdryexpandsize = 0; int threadId = d_cavethreadidx[pos]; REAL* insertpt = d_insertptlist + 3 * threadId; int cur = cavebdrycurstartindex + pos; tethandle cavetet = d_cavebdrylist[cur]; tethandle neightet; cudamesh_fsym(cavetet, neightet, d_neighborlist); bool enqflag; REAL ori; if (cudamesh_getUInt64PriorityIndex(d_tetmarker[neightet.id]) == threadId) { if (cudamesh_apex(cavetet, d_tetlist) != -1) { if (cudamesh_oppo(neightet, d_tetlist) != -1) { REAL *pts[3]; int idx[3]; idx[0] = cudamesh_org(cavetet, d_tetlist); idx[1] = cudamesh_dest(cavetet, d_tetlist); idx[2] = cudamesh_apex(cavetet, d_tetlist); for (int i = 0; i < 3; i++) { pts[i] = cudamesh_id2pointlist(idx[i], d_pointlist); } ori = cuda_orient3d(pts[0], pts[1], pts[2], insertpt); enqflag = (ori > 0); } else { // It is a hull face. And its adjacent tet (at inside of the // domain) has been cut from the cavity. Cut it as well. enqflag = false; } } else { enqflag = true; // A hull edge } if (enqflag) { tetexpandsize = 1; } else { d_tetmarker[neightet.id] = MAXULL; d_cutcount[threadId] += 1; // This may cause a wrong value but it doesn't affect the result bdryexpandsize = 3; } } d_cavetetexpandsize[pos] = tetexpandsize; d_cavebdryexpandsize[pos] = bdryexpandsize; } __global__ void kernelUpdateCavity2StarShapedSetThreadidx( int* d_cavethreadidx, int* d_cavetetexpandsize, int* d_cavetetexpandindices, int* d_cavetetthreadidx, int* d_cavebdryexpandsize, int* d_cavebdryexpandindices, int* d_cavebdrythreadidx, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_cavethreadidx[pos]; int eindex; if (d_cavetetexpandsize[pos] != 0) { eindex = d_cavetetexpandindices[pos]; d_cavetetthreadidx[eindex] = threadId; } if (d_cavebdryexpandsize[pos] != 0) { eindex = d_cavebdryexpandindices[pos]; for (int j = 0; j < 3; j++) { d_cavebdrythreadidx[eindex + j] = threadId; } } } __global__ void kernelUpdateCavity2StarShapedAppend( int* d_cavethreadidx, tethandle* d_neighborlist, tethandle* d_cavebdrylist, int* d_cavebdryprev, int* d_cavebdrynext, int* d_cavebdryhead, int* d_cavebdrytail, int* d_cavebdryexpandsize, int* d_cavebdryexpandindices, int* d_cavebdrythreadidx, int cavebdrystartindex, int cavebdryexpandsize, tethandle* d_cavetetlist, int* d_cavetetprev, int* d_cavetetnext, int* d_cavetethead, int* d_cavetettail, int* d_cavetetexpandsize, int* d_cavetetexpandindices, int* d_cavetetthreadidx, int cavetetstartindex, int cavetetexpandsize, int cavebdrycurstartindex, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_cavethreadidx[pos]; int cur = cavebdrycurstartindex + pos; tethandle cavetet = d_cavebdrylist[cur]; int eindex, sindex, prev; if (d_cavebdryexpandsize[pos] != 0) { eindex = d_cavebdryexpandindices[pos]; sindex = cavebdrystartindex + eindex; tethandle neightet, neineitet; cudamesh_fsym(cavetet, neightet, d_neighborlist); int newid; if (eindex == 0 || d_cavebdrythreadidx[eindex - 1] != threadId) { prev = d_cavebdrytail[threadId]; d_cavebdrynext[prev] = sindex; // prev must not be -1 } else prev = sindex - 1; // Add three new faces to find new boundaries. for (int j = 0; j < 3; j++) { newid = sindex + j; cudamesh_esym(neightet, neineitet); neineitet.ver = raw_epivot[neineitet.ver]; d_cavebdrylist[newid] = neineitet; d_cavebdryprev[newid] = prev; d_cavebdrynext[newid] = newid + 1; // set to next one first prev = newid; cudamesh_enextself(neightet); } if (eindex + 2 == cavebdryexpandsize - 1 || d_cavebdrythreadidx[eindex + 3] != threadId) { //if (threadId == 153) // printf("threadId = %d, cavebdryexpandsize = %d, eindex + 2 = %d, d_cavebdrythreadidx[eindex + 3] = %d\n", // threadId, cavebdryexpandsize, eindex + 2, d_cavebdrythreadidx[eindex + 3]); d_cavebdrynext[newid] = -1; } } if (d_cavetetexpandsize[pos] != 0) { eindex = d_cavetetexpandindices[pos]; sindex = cavetetstartindex + eindex; if (eindex == 0 || d_cavetetthreadidx[eindex - 1] != threadId) { prev = d_cavetettail[threadId]; if (prev != -1) d_cavetetnext[prev] = sindex; if (d_cavetethead[threadId] == -1) // initialize cavebdry list header d_cavetethead[threadId] = sindex; } else prev = sindex - 1; d_cavetetlist[sindex] = cavetet; d_cavetetprev[sindex] = prev; d_cavetetnext[sindex] = sindex + 1; if (eindex == cavetetexpandsize - 1 || d_cavetetthreadidx[eindex + 1] != threadId) d_cavetetnext[sindex] = -1; } } __global__ void kernelUpdateCavity2StarShapedUpdateListTails( int* d_cavethreadidx, int* d_cavetetnext, int* d_cavetettail, int* d_cavetetexpandsize, int* d_cavetetexpandindices, int cavetetstartindex, int* d_cavebdrynext, int* d_cavebdrytail, int* d_cavebdryexpandsize, int* d_cavebdryexpandindices, int cavebdrystartindex, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_cavethreadidx[pos]; int sindex, eindex, prev; if (d_cavetetexpandsize[pos] != 0) { eindex = d_cavetetexpandindices[pos]; sindex = cavetetstartindex + eindex; if (d_cavetetnext[sindex] == -1) d_cavetettail[threadId] = sindex; } if (d_cavebdryexpandsize[pos] != 0) { eindex = d_cavebdryexpandindices[pos]; sindex = cavebdrystartindex + eindex + 2; if (d_cavebdrynext[sindex] == -1) { d_cavebdrytail[threadId] = sindex; } } } __global__ void kernelUpdateBoundaryFaces( int* d_threadlist, tethandle* d_neighborlist, tethandle* d_cavetetlist, int* d_cavetetnext, int* d_cavetethead, tethandle* d_cavebdrylist, int* d_cavebdryprev, int* d_cavebdrynext, int* d_cavebdryhead, int* d_cavebdrytail, int* d_cutcount, uint64* d_tetmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; tethandle cavetet, neightet, prevtet; if (d_cutcount[threadId] > 0) { // Reuse old space int cur = d_cavebdryhead[threadId]; int prev = -1; int i = d_cavetethead[threadId]; while (i != -1) { cavetet = d_cavetetlist[i]; cudamesh_fsym(cavetet, neightet, d_neighborlist); if (cudamesh_getUInt64PriorityIndex(d_tetmarker[neightet.id]) == threadId) { d_cavebdrylist[cur] = cavetet; prev = cur; cur = d_cavebdrynext[cur]; } i = d_cavetetnext[i]; if (i == -1) // reach the end of new boundary faces { if (prev != -1) { d_cavebdrynext[prev] = -1; d_cavebdrytail[threadId] = prev; } else { // this should not happen } } } } } __global__ void kernelUpdateOldTets( int* d_insertidxlist, int* d_threadlist, tethandle* d_neighborlist, tristatus* d_segstatus, tristatus* d_tristatus, tetstatus* d_tetstatus, tethandle* d_caveoldtetlist, int* d_caveoldtetprev, int* d_caveoldtetnext, int* d_caveoldtethead, int* d_caveoldtettail, int* d_cutcount, uint64* d_tetmarker, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int threadmarker = d_threadmarker[threadId]; tethandle cavetet, neightet; if (d_cutcount[threadId] > 0) { // Reuse old space int prev = -1; int i = d_caveoldtethead[threadId]; while (i != -1) { cavetet = d_caveoldtetlist[i]; if (cudamesh_getUInt64PriorityIndex(d_tetmarker[cavetet.id]) == threadId) { if (prev != -1) d_caveoldtetnext[prev] = i; else d_caveoldtethead[threadId] = i; d_caveoldtetprev[i] = prev; prev = i; } i = d_caveoldtetnext[i]; if (i == -1) // reach the end of new boundary faces { if (prev != -1) { d_caveoldtetnext[prev] = -1; d_caveoldtettail[threadId] = prev; } else { // The cavity should contain at least one tet // Usually this would not happen int eleidx = d_insertidxlist[threadId]; if (threadmarker == 0) d_segstatus[eleidx].setAbortive(true); else if (threadmarker == 1) d_tristatus[eleidx].setAbortive(true); else if (threadmarker == 2) d_tetstatus[eleidx].setAbortive(true); d_threadmarker[threadId] = -1; } } } } } __global__ void kernelAdjacentCavitiesCheck( int* d_threadlist, tethandle* d_neighborlist, trihandle* d_tet2trilist, int* d_priority, tethandle* d_cavebdrylist, int* d_cavebdrynext, int* d_cavebdryhead, uint64* d_tetmarker, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; if (d_threadmarker[threadId] == -1) return; int neighborId; tethandle cavetet; int i = d_cavebdryhead[threadId]; while (i != -1) { cavetet = d_cavebdrylist[i]; neighborId = cudamesh_getUInt64PriorityIndex(d_tetmarker[cavetet.id]); if (neighborId != MAXUINT && neighborId != threadId) // neighbor also marked { if (d_threadmarker[neighborId] != -1) // neighbor is alive also { if(threadId > neighborId) { d_threadmarker[threadId] = -1; return; } } } i = d_cavebdrynext[i]; } } __global__ void kernelUpdateSubcavities( int* d_threadlist, tethandle* d_neighborlist, tethandle* d_tri2tetlist, trihandle* d_caveshlist, int* d_caveshprev, int* d_caveshnext, int* d_caveshhead, int* d_caveshtail, int* d_cutshcount, uint64* d_tetmarker, uint64* d_trimarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; trihandle parysh; tethandle neightet; // Reuse old space bool enqflag; int cutshcount = 0; int prev = -1; int i = d_caveshhead[threadId]; // for dangling segment, this is -1 while (i != -1) { parysh = d_caveshlist[i]; if (cudamesh_getUInt64PriorityIndex(d_trimarker[parysh.id]) == threadId) { enqflag = false; cudamesh_stpivot(parysh, neightet, d_tri2tetlist); if (cudamesh_getUInt64PriorityIndex(d_tetmarker[neightet.id]) == threadId) { cudamesh_fsymself(neightet, d_neighborlist); if (cudamesh_getUInt64PriorityIndex(d_tetmarker[neightet.id]) == threadId) enqflag = true; } if (enqflag) { if (prev != -1) d_caveshnext[prev] = i; else d_caveshhead[threadId] = i; d_caveshprev[i] = prev; prev = i; } else { d_trimarker[parysh.id] = MAXULL; cutshcount++; } } i = d_caveshnext[i]; if (i == -1) // reach the end of subcavity faces { if (prev != -1) { d_caveshnext[prev] = -1; d_caveshtail[threadId] = prev; } } } d_cutshcount[pos] = cutshcount; } __global__ void kernelValidateSubcavities( int* d_insertidxlist, locateresult* d_pointlocation, trihandle* d_searchsh, int* d_threadlist, trihandle* d_seg2trilist, tristatus* d_segstatus, int* d_trifacelist, trihandle* d_tri2trilist, tristatus* d_tristatus, int* d_segmarker, uint64* d_trimarker, int* d_cutshcount, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int cutshcount = d_cutshcount[pos]; if (cutshcount == 0) return; int threadmarker = d_threadmarker[threadId]; locateresult loc = d_pointlocation[threadId]; int i = 0; trihandle splitsh, neighsh; if (loc == ONFACE) { if (threadmarker == 1) { splitsh = d_searchsh[threadId]; if (cudamesh_getUInt64PriorityIndex(d_trimarker[splitsh.id]) != threadId) { printf("threadId #%d - Invalid trimarker #%d - %d\n", threadId, splitsh.id, cudamesh_getUInt64PriorityIndex(d_trimarker[splitsh.id])); i++; } } } else if (loc == ONEDGE) { if (threadmarker == 0) { int segId = d_insertidxlist[threadId]; trihandle splitseg(segId, 0); if (d_segmarker[segId] != threadId) { printf("threadId #%d - Invalid segmarker %d\n", threadId, d_segmarker[segId]); i++; } cudamesh_spivot(splitseg, splitsh, d_seg2trilist); } else if (threadmarker == 1) { splitsh = d_searchsh[threadId]; } if (splitsh.id != -1) { // All subfaces at this edge should be in subcavity int pa = cudamesh_sorg(splitsh, d_trifacelist); neighsh = splitsh; while (1) { if (cudamesh_sorg(neighsh, d_trifacelist) != pa) { cudamesh_sesymself(neighsh); } if (cudamesh_getUInt64PriorityIndex(d_trimarker[neighsh.id]) != threadId) { printf("threadId #%d - Invalid trimarker #%d - %d\n", threadId, neighsh.id, cudamesh_getUInt64PriorityIndex(d_trimarker[neighsh.id])); i++; } cudamesh_spivotself(neighsh, d_tri2trilist); if (neighsh.id == splitsh.id) break; if (neighsh.id == -1) break; } } } if (i > 0) { int eleidx = d_insertidxlist[threadId]; if (threadmarker == 0) { d_segstatus[eleidx].setAbortive(true); } else if (threadmarker == 1) { d_tristatus[eleidx].setAbortive(true); } d_threadmarker[threadId] = -1; } } __global__ void kernelValidateRefinementElements( int* d_insertidxlist, trihandle* d_searchsh, tethandle* d_searchtet, tethandle* d_neighborlist, int* d_threadlist, tristatus* d_segstatus, tristatus* d_tristatus, tetstatus* d_tetstatus, uint64* d_trimarker, uint64* d_tetmarker, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int threadmarker = d_threadmarker[threadId]; int insertidx = d_insertidxlist[threadId]; if (threadmarker == 0) { tethandle spintet; tethandle searchtet = d_searchtet[threadId]; spintet = searchtet; while (1) { if (cudamesh_getUInt64PriorityIndex(d_tetmarker[spintet.id]) != threadId) { d_segstatus[insertidx].setAbortive(true); d_threadmarker[threadId] = -1; break; } cudamesh_fnextself(spintet, d_neighborlist); if (spintet.id == searchtet.id) break; } } else if (threadmarker == 1) { int elementid = d_searchsh[threadId].id; if (cudamesh_getUInt64PriorityIndex(d_trimarker[elementid]) != threadId) { d_tristatus[insertidx].setAbortive(true); d_threadmarker[threadId] = -1; } } else if (threadmarker == 2) { if (cudamesh_getUInt64PriorityIndex(d_tetmarker[insertidx]) != threadId) { d_tetstatus[insertidx].setAbortive(true); d_threadmarker[threadId] = -1; } } } __global__ void kernelCheckDistances2ClosePoints( int* d_insertidxlist, REAL* d_insertptlist, locateresult* d_pointlocation, tethandle* d_searchtet, int* d_threadlist, REAL* d_pointlist, tethandle* d_neighborlist, int* d_tetlist, tristatus* d_segstatus, tristatus* d_tristatus, tetstatus* d_tetstatus, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int insertiontype = d_threadmarker[threadId]; int insertidx = d_insertidxlist[threadId]; REAL* insertpt = d_insertptlist + 3 * threadId; tethandle searchtet, spintet; searchtet = d_searchtet[threadId]; int ptidx, i; REAL* pt, rd; REAL minedgelength = raw_kernelconstants[0]; locateresult loc = d_pointlocation[threadId]; if (loc == ONEDGE) { spintet = searchtet; ptidx = cudamesh_org(spintet, d_tetlist); pt = cudamesh_id2pointlist(ptidx, d_pointlist); rd = cudamesh_distance(pt, insertpt); if (rd < minedgelength) { if (insertiontype == 0) d_segstatus[insertidx].setAbortive(true); else if (insertiontype == 1) d_tristatus[insertidx].setAbortive(true); else if (insertiontype == 2) d_tetstatus[insertidx].setAbortive(true); d_threadmarker[threadId] = -1; return; } ptidx = cudamesh_dest(spintet, d_tetlist); pt = cudamesh_id2pointlist(ptidx, d_pointlist); rd = cudamesh_distance(pt, insertpt); if (rd < minedgelength) { if (insertiontype == 0) d_segstatus[insertidx].setAbortive(true); else if (insertiontype == 1) d_tristatus[insertidx].setAbortive(true); else if (insertiontype == 2) d_tetstatus[insertidx].setAbortive(true); d_threadmarker[threadId] = -1; return; } while (1) { ptidx = cudamesh_apex(spintet, d_tetlist); if (ptidx != -1) { pt = cudamesh_id2pointlist(ptidx, d_pointlist); rd = cudamesh_distance(pt, insertpt); if (rd < minedgelength) { if (insertiontype == 0) d_segstatus[insertidx].setAbortive(true); else if (insertiontype == 1) d_tristatus[insertidx].setAbortive(true); else if (insertiontype == 2) d_tetstatus[insertidx].setAbortive(true); d_threadmarker[threadId] = -1; return; } } cudamesh_fnextself(spintet, d_neighborlist); if (spintet.id == searchtet.id) break; } } else if (loc == ONFACE) { for (i = 0; i < 3; i++) { ptidx = d_tetlist[4 * searchtet.id + i]; pt = cudamesh_id2pointlist(ptidx, d_pointlist); rd = cudamesh_distance(pt, insertpt); if (rd < minedgelength) { if (insertiontype == 1) d_tristatus[insertidx].setAbortive(true); else if (insertiontype == 2) d_tetstatus[insertidx].setAbortive(true); d_threadmarker[threadId] = -1; return; } } ptidx = d_tetlist[4 * searchtet.id + 3]; if (ptidx != -1) { pt = cudamesh_id2pointlist(ptidx, d_pointlist); rd = cudamesh_distance(pt, insertpt); if (rd < minedgelength) { if (insertiontype == 1) d_tristatus[insertidx].setAbortive(true); else if (insertiontype == 2) d_tetstatus[insertidx].setAbortive(true); d_threadmarker[threadId] = -1; return; } } cudamesh_fsym(searchtet, spintet, d_neighborlist); ptidx = cudamesh_oppo(spintet, d_tetlist); if (ptidx != -1) { pt = cudamesh_id2pointlist(ptidx, d_pointlist); rd = cudamesh_distance(pt, insertpt); if (rd < minedgelength) { if (insertiontype == 1) d_tristatus[insertidx].setAbortive(true); else if (insertiontype == 2) d_tetstatus[insertidx].setAbortive(true); d_threadmarker[threadId] = -1; return; } } } else if (loc == INTETRAHEDRON) { for (i = 0; i < 4; i++) { ptidx = d_tetlist[4 * searchtet.id + i]; pt = cudamesh_id2pointlist(ptidx, d_pointlist); rd = cudamesh_distance(pt, insertpt); if (rd < minedgelength) { if (insertiontype == 2) d_tetstatus[insertidx].setAbortive(true); d_threadmarker[threadId] = -1; return; } } } } __global__ void kernelComputeShortestEdgeLength( int* d_threadlist, REAL* d_pointlist, int* d_tetlist, tethandle* d_caveoldtetlist, int* d_caveoldtetnext, int* d_caveoldtethead, REAL* d_insertptlist, REAL* d_smlen, int* d_parentpt, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int threadmarker = d_threadmarker[threadId]; if (threadmarker == 2) // a tetrahedron return; REAL* insertpt = d_insertptlist + 3 * threadId; tethandle cavetet; int ptidx, parentpt; REAL *pts, smlen = -1.0, len; int i = d_caveoldtethead[threadId], j; cavetet = d_caveoldtetlist[i]; ptidx = d_tetlist[4 * cavetet.id + 0]; pts = cudamesh_id2pointlist(ptidx, d_pointlist); smlen = cudamesh_distance(pts, insertpt); parentpt = ptidx; while (i != -1) { cavetet = d_caveoldtetlist[i]; for (j = 0; j < 4; j++) { ptidx = d_tetlist[4 * cavetet.id + j]; if (ptidx == -1) continue; pts = cudamesh_id2pointlist(ptidx, d_pointlist); len = cudamesh_distance(pts, insertpt); if(len < smlen) { smlen = len; parentpt = ptidx; } } i = d_caveoldtetnext[i]; } d_smlen[threadId] = smlen; d_parentpt[threadId] = parentpt; } __global__ void kernelUpdateCavitySubsegs( int* d_threadlist, int* d_tetlist, tethandle* d_neighborlist, int* d_seglist, tethandle* d_seg2tetlist, trihandle* d_cavetetseglist, int* d_cavetetsegprev, int* d_cavetetsegnext, int* d_cavetetseghead, int* d_cavetetsegtail, uint64* d_tetmarker, int* d_segmarker, int* d_segmarker2, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; tethandle spintet, neightet, neineitet; trihandle paryseg, checkseg; // Reuse old space int j, k, markeridx; int prev = -1; int i = d_cavetetseghead[threadId]; while (i != -1) { paryseg = d_cavetetseglist[i]; if (d_segmarker[paryseg.id] != threadId) // not a splitting segment { // Check if the segment is inside the cavity. // 'j' counts the num of adjacent tets of this seg. // 'k' counts the num of adjacent tets which are 'infected'. j = k = 0; cudamesh_sstpivot1(paryseg, neightet, d_seg2tetlist); spintet = neightet; while (1) { j++; markeridx = cudamesh_getUInt64PriorityIndex(d_tetmarker[spintet.id]); if (markeridx != threadId) // outside cavity { // Remember it only when it is not inside other cavities // (possible when cavities share edges/segments) if (markeridx == MAXUINT || (markeridx != MAXUINT && d_threadmarker[markeridx] == -1)) // a unmarked tet or a tet belongs to loser neineitet = spintet; } else { k++; } cudamesh_fnextself(spintet, d_neighborlist); if (spintet.id == neightet.id) break; } if (k == 0) // should be removed { } else if (k < j) // on the boundary { assert(neineitet.id != -1); // there must be a tet that is not included in any cavities // connect it to the recorded outer tet cudamesh_sstbond1(paryseg, neineitet, d_seg2tetlist); // update cavetetseg list if (prev != -1) d_cavetetsegnext[prev] = i; else d_cavetetseghead[threadId] = i; d_cavetetsegprev[i] = prev; prev = i; } else // impossible { assert(0); printf("Error: Segment #%d is inside the cavity!\n", paryseg.id); } } i = d_cavetetsegnext[i]; if (i == -1) // reach the end of cavetetseg { if (prev != -1) // when there is at least one boundary segment { d_cavetetsegnext[prev] = -1; d_cavetetsegtail[threadId] = prev; } else // no boundary segment { d_cavetetseghead[threadId] = -1; d_cavetetsegtail[threadId] = -1; } } } } __global__ void kernelUpdateCavitySubfaces( int* d_threadlist, tethandle* d_neighborlist, tethandle* d_tri2tetlist, trihandle* d_cavetetshlist, int* d_cavetetshprev, int* d_cavetetshnext, int* d_cavetetshhead, int* d_cavetetshtail, uint64* d_tetmarker, uint64* d_trimarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; tethandle neightet; trihandle parysh, checksh; // Reuse old space int j, k; int prev = -1; int i = d_cavetetshhead[threadId]; while (i != -1) { parysh = d_cavetetshlist[i]; if (cudamesh_getUInt64PriorityIndex(d_trimarker[parysh.id]) != threadId) // not inside subcavity { // Check if this subface is inside the cavity. k = 0; for (j = 0; j < 2; j++) { cudamesh_stpivot(parysh, neightet, d_tri2tetlist); if (cudamesh_getUInt64PriorityIndex(d_tetmarker[neightet.id]) != threadId) { checksh = parysh; // remember this side } else { k++; } cudamesh_sesymself(parysh); } if (k == 0) // should be removed { } else if (k == 1) // on the boundary { parysh = checksh; // update cavetetsh list if (prev != -1) d_cavetetshnext[prev] = i; else d_cavetetshhead[threadId] = i; d_cavetetshprev[i] = prev; d_cavetetshlist[i] = parysh; prev = i; } else // impossible { assert(0); printf("Error: Subface #%d is inside the cavity!\n", parysh.id); } } i = d_cavetetshnext[i]; if (i == -1) // reach the end of cavetetsh { if (prev != -1) // when there is at least one boundary subface { d_cavetetshnext[prev] = -1; d_cavetetshtail[threadId] = prev; } else // no boundary subface { d_cavetetshhead[threadId] = -1; d_cavetetshtail[threadId] = -1; } } } } __global__ void kernelInsertNewPoints( int* d_threadlist, REAL* d_pointlist, verttype* d_pointtypelist, REAL* d_insertptlist, int* d_threadmarker, int oldpointsize, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int threadmarker = d_threadmarker[threadId]; int newidx = oldpointsize + pos; if (threadmarker == 0) d_pointtypelist[newidx] = FREESEGVERTEX; else if(threadmarker == 1) d_pointtypelist[newidx] = FREEFACETVERTEX; else d_pointtypelist[newidx] = FREEVOLVERTEX; newidx *= 3; REAL* insertpt = d_insertptlist + 3 * threadId; d_pointlist[newidx++] = insertpt[0]; d_pointlist[newidx++] = insertpt[1]; d_pointlist[newidx++] = insertpt[2]; } __global__ void kernelCountNewTets( int* d_threadlist, tethandle* d_cavebdrylist, int* d_cavebdrynext, int* d_cavebdryhead, int* d_tetexpandsize, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int expandsize = 0; int i = d_cavebdryhead[threadId]; while (i != -1) { expandsize++; i = d_cavebdrynext[i]; } d_tetexpandsize[pos] = expandsize; } __global__ void kernelInsertNewTets( int* d_threadlist, tethandle* d_point2tetlist, int* d_tetlist, tethandle* d_neighborlist, trihandle* d_tet2trilist, trihandle* d_tet2seglist, tetstatus* d_tetstatus, tethandle* d_cavebdrylist, int* d_cavebdrynext, int* d_cavebdryhead, int* d_tetexpandindices, int* d_emptytetindices, int oldpointsize, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int startidx = d_tetexpandindices[pos], newtetidx; int newptidx = oldpointsize + pos; tethandle neightet, oldtet, newtet; int i = d_cavebdryhead[threadId]; while (i != -1) { newtetidx = d_emptytetindices[startidx++]; neightet = d_cavebdrylist[i]; cudamesh_fsym(neightet, oldtet, d_neighborlist); // Get the oldtet (inside the cavity). // There might be duplicate elements in cavebdrylist. // In that case, oldtet will be newtet. Check to avoid if (!d_tetstatus[oldtet.id].isEmpty()) { if (cudamesh_apex(neightet, d_tetlist) != -1) { // Create a new tet in the cavity newtet.id = newtetidx; newtet.ver = 11; cudamesh_setorg(newtet, cudamesh_dest(neightet, d_tetlist), d_tetlist); cudamesh_setdest(newtet, cudamesh_org(neightet, d_tetlist), d_tetlist); cudamesh_setapex(newtet, cudamesh_apex(neightet, d_tetlist), d_tetlist); cudamesh_setoppo(newtet, newptidx, d_tetlist); } else { // Create a new hull tet newtet.id = newtetidx; newtet.ver = 11; cudamesh_setorg(newtet, cudamesh_org(neightet, d_tetlist), d_tetlist); cudamesh_setdest(newtet, cudamesh_dest(neightet, d_tetlist), d_tetlist); cudamesh_setapex(newtet, newptidx, d_tetlist); cudamesh_setoppo(newtet, -1, d_tetlist); // It must opposite to face 3. // Adjust back to the cavity bounday face. cudamesh_esymself(newtet); } // Connect newtet <==> neightet, this also disconnect the old bond. cudamesh_bond(newtet, neightet, d_neighborlist); // Oldtet still connects to neightet d_cavebdrylist[i] = oldtet; } else // duplicate elements cause fake oldtet { d_cavebdrylist[i] = tethandle(-1, 11); } i = d_cavebdrynext[i]; } d_point2tetlist[newptidx] = newtet; } __global__ void kernelConnectNewTetNeighbors( int* d_threadlist, tethandle* d_point2tetlist, int* d_tetlist, tethandle* d_neighborlist, tetstatus* d_tetstatus, tethandle* d_cavebdrylist, int* d_cavebdrynext, int* d_cavebdryhead, uint64* d_tetmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; tethandle oldtet, neightet, newtet, newneitet, spintet; int orgidx; int i = d_cavebdryhead[threadId], j; while (i != -1) { // Get the newtet and oldtet at the same face. oldtet = d_cavebdrylist[i]; if (oldtet.id != -1) // not fake one { cudamesh_fsym(oldtet, neightet, d_neighborlist); cudamesh_fsym(neightet, newtet, d_neighborlist); // Comment: oldtet and newtet must be at the same directed edge. // Connect the three other faces of this newtet. for (j = 0; j < 3; j++) { cudamesh_esym(newtet, neightet); // Go to the face // Do not have neighbor yet if (d_neighborlist[4 * neightet.id + (neightet.ver & 3)].id == -1) { // Find the adjacent face of this new tet spintet = oldtet; while (1) { cudamesh_fnextself(spintet, d_neighborlist); if (cudamesh_getUInt64PriorityIndex(d_tetmarker[spintet.id]) != threadId) break; } cudamesh_fsym(spintet, newneitet, d_neighborlist); cudamesh_esymself(newneitet); cudamesh_bond(neightet, newneitet, d_neighborlist); } orgidx = cudamesh_org(newtet, d_tetlist); if(orgidx != -1) d_point2tetlist[orgidx] = newtet; cudamesh_enextself(newtet); cudamesh_enextself(oldtet); } d_cavebdrylist[i] = newtet; // Save the new tet // Update tetstatus d_tetstatus[oldtet.id].clear(); d_tetstatus[newtet.id].setEmpty(false); } i = d_cavebdrynext[i]; } // Check neighbor //i = d_cavebdryhead[threadId]; //while (i != -1) //{ // newtet = d_cavebdrylist[i]; // if (newtet.id != -1) // { // for (j = 0; j < 4; j++) // { // newtet.ver = j; // neightet = d_neighborlist[4 * newtet.id + (newtet.ver & 3)]; // if (d_neighborlist[4 * neightet.id + (neightet.ver & 3)].id != newtet.id) // printf("Wrong neighbor(%d): Tet#%d - %d, %d, %d, %d, Tet#%d - %d, %d, %d, %d\n", // threadId, // newtet.id, // d_neighborlist[4 * newtet.id + 0].id, d_neighborlist[4 * newtet.id + 1].id, // d_neighborlist[4 * newtet.id + 2].id, d_neighborlist[4 * newtet.id + 3].id, // neightet.id, // d_neighborlist[4 * neightet.id + 0].id, d_neighborlist[4 * neightet.id + 1].id, // d_neighborlist[4 * neightet.id + 2].id, d_neighborlist[4 * neightet.id + 3].id); // } // } // i = d_cavebdrynext[i]; //} } __global__ void kernelConnectBoundarySubfaces2NewTets( int* d_threadlist, tethandle* d_tri2tetlist, tethandle* d_neighborlist, trihandle* d_tet2trilist, trihandle* d_cavetetshlist, int* d_cavetetshnext, int* d_cavetetshhead, uint64* d_trimarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; trihandle parysh; tethandle neightet, newtet; int i = d_cavetetshhead[threadId]; while (i != -1) { parysh = d_cavetetshlist[i]; // this is connect to a outside tet // Connect it if it is a boundary subface if (cudamesh_getUInt64PriorityIndex(d_trimarker[parysh.id]) != threadId) { cudamesh_stpivot(parysh, neightet, d_tri2tetlist); cudamesh_fsym(neightet, newtet, d_neighborlist); cudamesh_sesymself(parysh); cudamesh_tsbond(newtet, parysh, d_tet2trilist, d_tri2tetlist); } i = d_cavetetshnext[i]; } } __global__ void kernelConnectBoundarySubsegs2NewTets( int* d_threadlist, tethandle* d_seg2tetlist, tethandle* d_neighborlist, trihandle* d_tet2seglist, tetstatus* d_tetstatus, trihandle* d_cavetetseglist, int* d_cavetetsegnext, int* d_cavetetseghead, int* d_segmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; trihandle paryseg; tethandle neightet, spintet; int i = d_cavetetseghead[threadId]; while (i != -1) { paryseg = d_cavetetseglist[i]; // Connect it if it is a boundary subseg if (d_segmarker[paryseg.id] != threadId) { cudamesh_sstpivot1(paryseg, neightet, d_seg2tetlist); spintet = neightet; while (1) { cudamesh_tssbond1(spintet, paryseg, d_tet2seglist); cudamesh_fnextself(spintet, d_neighborlist); if (spintet.id == neightet.id) break; } } else { // This may happen when there is only one splitting segment } i = d_cavetetsegnext[i]; } } __global__ void kernelSubCavityBoundaryEdgeCheck( int* d_threadlist, trihandle* d_tri2seglist, trihandle* d_tri2trilist, trihandle* d_caveshlist, int* d_caveshnext, int* d_caveshhead, int* d_caveshbdsize, uint64* d_trimarker, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int threadmarker = d_threadmarker[threadId]; if (threadmarker == 2) { d_caveshbdsize[pos] = 0; return; } trihandle cavesh, neighsh; REAL sign; int caveshbdsize = 0; int i = d_caveshhead[threadId], j; while (i != -1) { cavesh = d_caveshlist[i]; for (j = 0; j < 3; j++) { if (!cudamesh_isshsubseg(cavesh, d_tri2seglist)) { cudamesh_spivot(cavesh, neighsh, d_tri2trilist); if (neighsh.id != -1) { if (cudamesh_getUInt64PriorityIndex(d_trimarker[neighsh.id]) != threadId) { // A boundary edge sign = 1; } else { // Internal edge sign = -1; } } else { // A boundary edge sign = 1; } } else { // A segment. It is a boundary edge sign = 1; } if (sign >= 0) { caveshbdsize++; } cudamesh_senextself(cavesh); } i = d_caveshnext[i]; } d_caveshbdsize[pos] = caveshbdsize; } __global__ void kernelSubCavityBoundaryEdgeAppend( int* d_threadlist, trihandle* d_tri2seglist, trihandle* d_tri2trilist, trihandle* d_caveshlist, int* d_caveshnext, int* d_caveshhead, trihandle* d_caveshbdlist, int* d_caveshbdprev, int* d_caveshbdnext, int* d_caveshbdhead, int* d_caveshbdtail, int* d_caveshbdsize, int* d_caveshbdindices, uint64* d_trimarker, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int threadmarker = d_threadmarker[threadId]; if (threadmarker == 2) return; trihandle cavesh, neighsh; REAL sign; int caveshbdsize = d_caveshbdsize[pos]; if (caveshbdsize == 0) return; int prev = -1, newid = d_caveshbdindices[pos]; int i = d_caveshhead[threadId], j; while (i != -1) { cavesh = d_caveshlist[i]; for (j = 0; j < 3; j++) { if (!cudamesh_isshsubseg(cavesh, d_tri2seglist)) { cudamesh_spivot(cavesh, neighsh, d_tri2trilist); if (neighsh.id != -1) { if (cudamesh_getUInt64PriorityIndex(d_trimarker[neighsh.id]) != threadId) { // A boundary edge sign = 1; } else { // Internal edge sign = -1; } } else { // A boundary edge sign = 1; } } else { // A segment. It is a boundary edge sign = 1; } if (sign >= 0) { d_caveshbdlist[newid] = cavesh; d_caveshbdprev[newid] = prev; d_caveshbdnext[newid] = -1; if (prev != -1) d_caveshbdnext[prev] = newid; else d_caveshbdhead[threadId] = newid; prev = newid; newid++; } cudamesh_senextself(cavesh); } i = d_caveshnext[i]; if (i == -1) // reach the end of list { d_caveshbdtail[threadId] = prev; } } } __global__ void kernelInsertNewSubfaces( int* d_threadlist, trihandle* d_point2trilist, verttype* d_pointtypelist, int* d_seglist, trihandle* d_seg2trilist, int* d_trifacelist, trihandle* d_tri2trilist, trihandle* d_tri2seglist, int* d_tri2parentidxlist, tristatus* d_tristatus, trihandle* d_caveshbdlist, int* d_caveshbdnext, int* d_caveshbdhead, int* d_caveshbdindices, int* d_emptytriindices, trihandle* d_casout, trihandle* d_casin, int* d_threadmarker, int oldpointsize, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int threadmarker = d_threadmarker[threadId]; if (threadmarker == 2) return; int startidx = d_caveshbdindices[pos], newtriidx; int newptidx = oldpointsize + pos; trihandle parysh, checkseg, newsh, casin, casout, neighsh; int pa, pb; int i = d_caveshbdhead[threadId]; while (i != -1) { parysh = d_caveshbdlist[i]; cudamesh_sspivot(parysh, checkseg, d_tri2seglist); if ((parysh.shver & 01) != 0) cudamesh_sesymself(parysh); pa = cudamesh_sorg(parysh, d_trifacelist); pb = cudamesh_sdest(parysh, d_trifacelist); // Create a new subface newtriidx = d_emptytriindices[startidx++]; newsh.id = newtriidx; newsh.shver = 0; cudamesh_setsorg(newsh, pa, d_trifacelist); cudamesh_setsdest(newsh, pb, d_trifacelist); cudamesh_setsapex(newsh, newptidx, d_trifacelist); d_tri2parentidxlist[newtriidx] = d_tri2parentidxlist[parysh.id]; if (d_pointtypelist[pa] == FREEFACETVERTEX) { d_point2trilist[pa] = newsh; } if (d_pointtypelist[pb] == FREEFACETVERTEX) { d_point2trilist[pb] = newsh; } // Save the outer subfaces first cudamesh_spivot(parysh, casout, d_tri2trilist); d_casout[i] = casout; if (casout.id != -1) { casin = casout; if (checkseg.id != -1) { // Make sure that newsh has the right ori at this segment. checkseg.shver = 0; if (cudamesh_sorg(newsh, d_trifacelist) != cudamesh_sorg(checkseg, d_seglist)) { cudamesh_sesymself(newsh); cudamesh_sesymself(parysh); // This side should also be inverse. } cudamesh_spivot(casin, neighsh, d_tri2trilist); while (neighsh.id != parysh.id) { casin = neighsh; cudamesh_spivot(casin, neighsh, d_tri2trilist); } } d_casin[i] = casin; } i = d_caveshbdnext[i]; } } __global__ void kernelConnectNewSubface2OuterSubface_Phase1( int* d_threadlist, trihandle* d_point2trilist, verttype* d_pointtypelist, int* d_seglist, trihandle* d_seg2trilist, int* d_trifacelist, trihandle* d_tri2trilist, trihandle* d_tri2seglist, int* d_tri2parentidxlist, tristatus* d_tristatus, trihandle* d_caveshbdlist, int* d_caveshbdnext, int* d_caveshbdhead, int* d_caveshbdindices, int* d_emptytriindices, trihandle* d_casout, trihandle* d_casin, int* d_threadmarker, int oldpointsize, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int threadmarker = d_threadmarker[threadId]; if (threadmarker == 2) return; int startidx = d_caveshbdindices[pos], newtriidx; int newptidx = oldpointsize + pos; trihandle parysh, checkseg, newsh, casin, casout, neighsh; int pa, pb; int i = d_caveshbdhead[threadId]; while (i != -1) { parysh = d_caveshbdlist[i]; cudamesh_sspivot(parysh, checkseg, d_tri2seglist); if ((parysh.shver & 01) != 0) cudamesh_sesymself(parysh); // Create a new subface newtriidx = d_emptytriindices[startidx++]; newsh.id = newtriidx; newsh.shver = 0; // Connect newsh to outer old subfaces (Phase 1). casout = d_casout[i]; if (casout.id != -1) { //casin = casout; if (checkseg.id != -1) { // Make sure that newsh has the right ori at this segment. checkseg.shver = 0; if (cudamesh_sorg(newsh, d_trifacelist) != cudamesh_sorg(checkseg, d_seglist)) { cudamesh_sesymself(newsh); cudamesh_sesymself(parysh); // This side should also be inverse. } } casin = d_casin[i]; cudamesh_sbond1(newsh, casout, d_tri2trilist); cudamesh_sbond1(casin, newsh, d_tri2trilist); } i = d_caveshbdnext[i]; } } __global__ void kernelConnectNewSubface2OuterSubface_Phase2( int* d_threadlist, trihandle* d_point2trilist, verttype* d_pointtypelist, int* d_seglist, trihandle* d_seg2trilist, int* d_trifacelist, trihandle* d_tri2trilist, trihandle* d_tri2seglist, int* d_tri2parentidxlist, tristatus* d_tristatus, trihandle* d_caveshbdlist, int* d_caveshbdnext, int* d_caveshbdhead, int* d_caveshbdindices, int* d_emptytriindices, int* d_threadmarker, int oldpointsize, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int threadmarker = d_threadmarker[threadId]; if (threadmarker == 2) return; int startidx = d_caveshbdindices[pos], newtriidx; int newptidx = oldpointsize + pos; trihandle parysh, checkseg, newsh, casout; int i = d_caveshbdhead[threadId]; while (i != -1) { parysh = d_caveshbdlist[i]; cudamesh_sspivot(parysh, checkseg, d_tri2seglist); if ((parysh.shver & 01) != 0) { cudamesh_sesymself(parysh); d_caveshbdlist[i] = parysh; // Update the element in the list } // Create a new subface newtriidx = d_emptytriindices[startidx++]; newsh.id = newtriidx; newsh.shver = 0; // Connect newsh to outer subfaces (Phase 2). // Check if old subface is connected to new one, // if so, fix it cudamesh_spivot(parysh, casout, d_tri2trilist); if (casout.id != -1) { if (checkseg.id != -1) { // Make sure that newsh has the right ori at this segment. checkseg.shver = 0; if (cudamesh_sorg(newsh, d_trifacelist) != cudamesh_sorg(checkseg, d_seglist)) { cudamesh_sesymself(newsh); cudamesh_sesymself(parysh); // This side should also be inverse. d_caveshbdlist[i] = parysh; // Update the element in the list } } if (d_tristatus[casout.id].isEmpty()) // old subface is connected to new one { cudamesh_sbond1(newsh, casout, d_tri2trilist); } } if (checkseg.id != -1) { cudamesh_ssbond(newsh, checkseg, d_tri2seglist, d_seg2trilist); } // Connect oldsh <== newsh (for connecting adjacent new subfaces). // parysh and newsh point to the same edge and the same ori. cudamesh_sbond1(parysh, newsh, d_tri2trilist); i = d_caveshbdnext[i]; } if (d_pointtypelist[newptidx] == FREEFACETVERTEX) d_point2trilist[newptidx] = newsh; } __global__ void kernelConnectNewSubfaceNeighbors( int* d_threadlist, int* d_trifacelist, trihandle* d_tri2trilist, tristatus* d_tristatus, trihandle* d_caveshbdlist, int* d_caveshbdnext, int* d_caveshbdhead, uint64* d_trimarker, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int threadmarker = d_threadmarker[threadId]; if (threadmarker == 2) return; trihandle parysh, newsh, neighsh; int pa, pb; int i = d_caveshbdhead[threadId]; while (i != -1) { // Get an old subface at edge [a, b]. parysh = d_caveshbdlist[i]; cudamesh_spivot(parysh, newsh, d_tri2trilist); // The new subface [a, b, p]. cudamesh_senextself(newsh); // At edge [b, p]. cudamesh_spivot(newsh, neighsh, d_tri2trilist); if (neighsh.id == -1) // No neighbor yet { // Find the adjacent new subface at edge [b, p]. pb = cudamesh_sdest(parysh, d_trifacelist); neighsh = parysh; while (1) { cudamesh_senextself(neighsh); cudamesh_spivotself(neighsh, d_tri2trilist); if (neighsh.id == -1) break; if (cudamesh_getUInt64PriorityIndex(d_trimarker[neighsh.id]) != threadId) break; if (cudamesh_sdest(neighsh, d_trifacelist) != pb) cudamesh_sesymself(neighsh); } if (neighsh.id != -1) { // Now 'neighsh' is a new subface at edge [b, #]. if (cudamesh_sorg(neighsh, d_trifacelist) != pb) cudamesh_sesymself(neighsh); cudamesh_senext2self(neighsh); // Go to the open edge [p, b]. cudamesh_sbond(newsh, neighsh, d_tri2trilist); } else { assert(false); } } cudamesh_spivot(parysh, newsh, d_tri2trilist); // The new subface [a, b, p]. cudamesh_senext2self(newsh); // At edge [p, a]. cudamesh_spivot(newsh, neighsh, d_tri2trilist); if (neighsh.id == -1) // No neighbor yet { // Find the adjacent new subface at edge [p, a]. pa = cudamesh_sorg(parysh, d_trifacelist); neighsh = parysh; while (1) { cudamesh_senext2self(neighsh); cudamesh_spivotself(neighsh, d_tri2trilist); if (neighsh.id == -1) break; if (cudamesh_getUInt64PriorityIndex(d_trimarker[neighsh.id]) != threadId) break; if (cudamesh_sorg(neighsh, d_trifacelist) != pa) cudamesh_sesymself(neighsh); } if (neighsh.id != -1) { // Now 'neighsh' is a new subface at edge [#, a]. if (cudamesh_sdest(neighsh, d_trifacelist) != pa) cudamesh_sesymself(neighsh); cudamesh_senextself(neighsh); // Go to the open edge [a, p]. cudamesh_sbond(newsh, neighsh, d_tri2trilist); } else { assert(false); } } // Update tristatus d_tristatus[parysh.id].clear(); d_tristatus[newsh.id].setEmpty(false); i = d_caveshbdnext[i]; } } __global__ void kernelRemoveDegeneratedNewSubfaces( int* d_threadlist, trihandle* d_point2trilist, verttype* d_pointtypelist, int* d_trifacelist, trihandle* d_tri2trilist, tristatus* d_tristatus, trihandle* d_cavesegshlist, int* d_cavesegshnext, int* d_cavesegshhead, int* d_cavesegshtail, int* d_threadmarker, int oldpointsize, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int threadmarker = d_threadmarker[threadId]; if (threadmarker == 2) return; trihandle parysh, cavesh, newsh, neighsh, casout; int newptidx = oldpointsize + pos; int i, j, head, next; i = head = d_cavesegshhead[threadId]; bool onesubface = (head == d_cavesegshtail[threadId]); while (i != -1) { // Get the next old subface. next = d_cavesegshnext[i]; // Get the saved old subface. parysh = d_cavesegshlist[i]; // Get a possible new degenerated subface. cudamesh_spivot(parysh, cavesh, d_tri2trilist); if (cudamesh_sapex(cavesh, d_trifacelist) == newptidx) // a new degenerated subface { if (onesubface) // only one degenerated subface { for (j = 0; j < 2; j++) { cudamesh_senextself(cavesh); cudamesh_spivot(cavesh, newsh, d_tri2trilist); cudamesh_sdissolve(newsh, d_tri2trilist); } } else // more than one degenerated subface share at this segment { if (next == -1) parysh = d_cavesegshlist[head]; else parysh = d_cavesegshlist[next]; cudamesh_spivot(parysh, neighsh, d_tri2trilist); // Adjust cavesh and neighsh both at edge a->b, and has p as apex. if (cudamesh_sorg(neighsh, d_trifacelist) != cudamesh_sorg(cavesh, d_trifacelist)) { cudamesh_sesymself(neighsh); assert(cudamesh_sorg(neighsh, d_trifacelist) == cudamesh_sorg(cavesh, d_trifacelist)); } assert(cudamesh_sapex(neighsh, d_trifacelist) == newptidx); // Connect adjacent faces at two other edges of cavesh and neighsh. // As a result, the two degenerated new faces are squeezed from the // new triangulation of the cavity. Note that the squeezed faces // still hold the adjacent informations which will be used in // re-connecting subsegments (if they exist). for (j = 0; j < 2; j++) { cudamesh_senextself(cavesh); cudamesh_senextself(neighsh); cudamesh_spivot(cavesh, newsh, d_tri2trilist); cudamesh_spivot(neighsh, casout, d_tri2trilist); cudamesh_sbond1(newsh, casout, d_tri2trilist); } } // Update tristatus d_tristatus[cavesh.id].clear(); // delete this degenerated subface // Update the point-to-subface map. if (d_pointtypelist[newptidx] == FREEFACETVERTEX) d_point2trilist[newptidx] = newsh; } i = next; } } __global__ void kernelInsertNewSubsegs( int* d_segidlist, int* d_threadlist, trihandle* d_point2trilist, verttype* d_pointtypelist, int* d_seglist, trihandle* d_seg2trilist, int* d_seg2parentidxlist, tristatus* d_segstatus, int* d_trifacelist, trihandle* d_tri2trilist, trihandle* d_tri2seglist, int* d_segencmarker, trihandle* d_cavesegshlist, int* d_cavesegshnext, int* d_cavesegshhead, int* d_emptysegindices, int* d_threadmarker, int oldpointsize, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int threadmarker = d_threadmarker[threadId]; if (threadmarker != 0) return; int newptidx = oldpointsize + pos; trihandle splitseg, aseg, bseg, aoutseg, boutseg; int pa, pb; splitseg = trihandle(d_segidlist[threadId], 0); pa = cudamesh_sorg(splitseg, d_seglist); pb = cudamesh_sdest(splitseg, d_seglist); // Set new segments aseg.id = d_emptysegindices[2 * pos]; aseg.shver = 0; bseg.id = d_emptysegindices[2 * pos + 1]; bseg.shver = 0; cudamesh_setsorg(aseg, pa, d_seglist); cudamesh_setsdest(aseg, newptidx, d_seglist); cudamesh_setsapex(aseg, -1, d_seglist); cudamesh_setsorg(bseg, newptidx, d_seglist); cudamesh_setsdest(bseg, pb, d_seglist); cudamesh_setsapex(bseg, -1, d_seglist); d_seg2parentidxlist[aseg.id] = d_seg2parentidxlist[splitseg.id]; d_seg2parentidxlist[bseg.id] = d_seg2parentidxlist[splitseg.id]; // Update segstatus d_segstatus[splitseg.id].clear(); d_segstatus[aseg.id].setEmpty(false); d_segstatus[bseg.id].setEmpty(false); // Reset segment encroachement marker d_segencmarker[splitseg.id] = -1; // Connect [#, a]<->[a, p]. It is possible that [#, a] is an old segment to be removed cudamesh_senext2(splitseg, boutseg); // Temporarily use boutseg. cudamesh_spivotself(boutseg, d_seg2trilist); if (boutseg.id != -1) { cudamesh_senext2(aseg, aoutseg); cudamesh_sbond(boutseg, aoutseg, d_seg2trilist); } // Connect [p, b]<->[b, #]. It is possible that [b, #] is an old segment to be removed cudamesh_senext(splitseg, aoutseg); cudamesh_spivotself(aoutseg, d_seg2trilist); if (aoutseg.id != -1) { cudamesh_senext(bseg, boutseg); cudamesh_sbond(boutseg, aoutseg, d_seg2trilist); } // Connect [a, p] <-> [p, b]. cudamesh_senext(aseg, aoutseg); cudamesh_senext2(bseg, boutseg); cudamesh_sbond(aoutseg, boutseg, d_seg2trilist); // Connect subsegs [a, p] and [p, b] to adjacent new subfaces. // Although the degenerated new faces have been squeezed. They still // hold the connections to the actual new faces. trihandle parysh, neighsh, newsh; int i = d_cavesegshhead[threadId]; while (i != -1) { parysh = d_cavesegshlist[i]; cudamesh_spivot(parysh, neighsh, d_tri2trilist); // neighsh is a degenerated new face. if (cudamesh_sorg(neighsh, d_trifacelist) != pa) { cudamesh_sesymself(neighsh); } cudamesh_senext2(neighsh, newsh); cudamesh_spivotself(newsh, d_tri2trilist); // The edge [p, a] in newsh cudamesh_ssbond(newsh, aseg, d_tri2seglist, d_seg2trilist); cudamesh_senext(neighsh, newsh); cudamesh_spivotself(newsh, d_tri2trilist); // The edge [b, p] in newsh cudamesh_ssbond(newsh, bseg, d_tri2seglist, d_seg2trilist); i = d_cavesegshnext[i]; } if (d_pointtypelist[newptidx] == FREESEGVERTEX) d_point2trilist[newptidx] = aseg; if (d_pointtypelist[pa] == FREESEGVERTEX) d_point2trilist[pa] = aseg; if (d_pointtypelist[pb] == FREESEGVERTEX) d_point2trilist[pb] = bseg; } __global__ void kernelConnectNewSubseg2OuterSubseg( int* d_segidlist, int* d_threadlist, trihandle* d_seg2trilist, int* d_segmarker, trihandle* d_cavesegshlist, int* d_cavesegshprev, int* d_cavesegshnext, int* d_cavesegshhead, int* d_cavesegshtail, int* d_emptysegindices, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int threadmarker = d_threadmarker[threadId]; if (threadmarker != 0) return; trihandle splitseg, aseg, bseg, aoutseg, boutseg; // Get old and new segments splitseg = trihandle(d_segidlist[threadId], 0); aseg.id = d_emptysegindices[2 * pos]; aseg.shver = 0; bseg.id = d_emptysegindices[2 * pos + 1]; bseg.shver = 0; // Connect [#, a]<->[a, p]. // If [a, b] is connected to a new segment [#, a], // then it is possible that [a, p] is connected to an old segment [*, a]. // Fix it. cudamesh_senext2(splitseg, boutseg); cudamesh_spivotself(boutseg, d_seg2trilist); if (boutseg.id != -1 && d_segmarker[boutseg.id] == MAXINT) { cudamesh_senext2(aseg, aoutseg); cudamesh_sbond(boutseg, aoutseg, d_seg2trilist); } // Connect [p, b]<->[b, #]. // if [a, b] is connected to a new segment [b, #], // then it is possible that [p, b] is connected to an old segment [b, *]. // Fix it. cudamesh_senext(splitseg, aoutseg); cudamesh_spivotself(aoutseg, d_seg2trilist); if (aoutseg.id != -1 && d_segmarker[aoutseg.id] == MAXINT) { cudamesh_senext(bseg, boutseg); cudamesh_sbond(boutseg, aoutseg, d_seg2trilist); } // Add new segments into list int newidx = 2 * pos; d_cavesegshhead[threadId] = newidx; d_cavesegshtail[threadId] = newidx + 1; d_cavesegshlist[newidx] = aseg; d_cavesegshprev[newidx] = -1; d_cavesegshnext[newidx] = newidx + 1; d_cavesegshlist[newidx + 1] = bseg; d_cavesegshprev[newidx + 1] = newidx; d_cavesegshnext[newidx + 1] = -1; } __global__ void kernelConnectNewSubfaces2NewTets( int* d_threadlist, int* d_trifacelist, tethandle* d_tri2tetlist, trihandle* d_tri2trilist, trihandle* d_tri2seglist, tristatus* d_tristatus, int* d_tetlist, tethandle* d_neighborlist, trihandle* d_tet2trilist, uint64* d_tetmarker, trihandle* d_caveshbdlist, int* d_caveshbdnext, int* d_caveshbdhead, int* d_threadmarker, int oldpointsize, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int threadmarker = d_threadmarker[threadId]; if (threadmarker == 2) return; int newptidx = oldpointsize + pos; trihandle parysh, checksh; tethandle neightet, spintet; int i = d_caveshbdhead[threadId], j; while (i != -1) { // Get an old subface at edge [a, b]. parysh = d_caveshbdlist[i]; cudamesh_spivot(parysh, checksh, d_tri2trilist); // The new subface [a, b, p]. // Do not recover a deleted new face (degenerated). if (!d_tristatus[checksh.id].isEmpty()) { // Note that the old subface still connects to adjacent old tets // of C(p), which still connect to the tets outside C(p). cudamesh_stpivot(parysh, neightet, d_tri2tetlist); //assert(d_tetmarker[neightet.id] == threadId); // Find the adjacent tet containing the edge [a,b] outside C(p). spintet = neightet; while (1) { cudamesh_fnextself(spintet, d_neighborlist); //printf("spintet %d\n", spintet.id); if (cudamesh_getUInt64PriorityIndex(d_tetmarker[spintet.id]) != threadId) break; assert(spintet.id != neightet.id); } // The adjacent tet connects to a new tet in C(p). cudamesh_fsym(spintet, neightet, d_neighborlist); //assert(d_tetmarker[neightet.id] != threadId); // Find the tet containing the face [a, b, p]. spintet = neightet; while (1) { cudamesh_fnextself(spintet, d_neighborlist); if (cudamesh_apex(spintet, d_tetlist) == newptidx) break; assert(spintet.id != neightet.id); } // Adjust the edge direction in spintet and checksh. if (cudamesh_sorg(checksh, d_trifacelist) != cudamesh_org(spintet, d_tetlist)) { cudamesh_sesymself(checksh); assert(cudamesh_sorg(checksh, d_trifacelist) == cudamesh_org(spintet, d_tetlist)); } assert(cudamesh_sdest(checksh, d_trifacelist) == cudamesh_dest(spintet, d_tetlist)); // Connect the subface to two adjacent tets. cudamesh_tsbond(spintet, checksh, d_tet2trilist, d_tri2tetlist); cudamesh_fsymself(spintet, d_neighborlist); cudamesh_sesymself(checksh); cudamesh_tsbond(spintet, checksh, d_tet2trilist, d_tri2tetlist); } else { // A deleted degenerated subface // Clear all neighbor information for (j = 0; j < 2; j++) { d_tri2tetlist[2 * checksh.id + j] = tethandle(-1, 11); } for (j = 0; j < 3; j++) { d_tri2trilist[3 * checksh.id + j] = trihandle(-1, 0); } for (j = 0; j < 3; j++) { d_tri2seglist[3 * checksh.id + j] = trihandle(-1, 0); } } i = d_caveshbdnext[i]; } } __global__ void kernelConnectNewSubsegs2NewTets( int* d_threadlist, REAL* d_pointlist, tethandle* d_point2tetlist, int* d_seglist, trihandle* d_seg2trilist, tethandle* d_seg2tetlist, tethandle* d_tri2tetlist, trihandle* d_tri2trilist, int* d_tetlist, tethandle* d_neighborlist, trihandle* d_tet2seglist, uint64* d_tetmarker, trihandle* d_cavesegshlist, int* d_cavesegshnext, int* d_cavesegshhead, unsigned long* d_randomseed, int* d_threadmarker, int* d_insertidx, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int threadmarker = d_threadmarker[threadId]; if (threadmarker != 0) return; trihandle checkseg, checksh; tethandle neightet, spintet; int i = d_cavesegshhead[threadId]; while (i != -1) { checkseg = d_cavesegshlist[i]; // Get the adjacent new subface. checkseg.shver = 0; cudamesh_spivot(checkseg, checksh, d_seg2trilist);; if (checksh.id != -1) { // Get the adjacent new tetrahedron. cudamesh_stpivot(checksh, neightet, d_tri2tetlist); } else { // It's a dangling segment. cudamesh_point2tetorg(cudamesh_sorg(checkseg, d_seglist), neightet, d_point2tetlist, d_tetlist); cudamesh_finddirection(&neightet, cudamesh_sdest(checkseg, d_seglist), d_pointlist, d_tetlist, d_neighborlist, d_randomseed + pos); assert(cudamesh_dest(neightet, d_tetlist) == cudamesh_sdest(checkseg, d_seglist)); } //assert(d_tetmarker[neightet.id] != threadId); cudamesh_sstbond1(checkseg, neightet, d_seg2tetlist); spintet = neightet; while (1) { cudamesh_tssbond1(spintet, checkseg, d_tet2seglist); cudamesh_fnextself(spintet, d_neighborlist); if (spintet.id == neightet.id) break; } i = d_cavesegshnext[i]; } } __global__ void kernelResetOldSubsegInfo( int* d_segidlist, int* d_threadlist, int* d_threadmarker, trihandle* d_seg2trilist, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int threadmarker = d_threadmarker[threadId]; if (threadmarker != 0) return; int segid = d_segidlist[threadId]; for (int j = 0; j < 3; j++) { d_seg2trilist[3 * segid + j] = trihandle(-1, 0); } } __global__ void kernelResetOldSubfaceInfo( int* d_threadlist, trihandle* d_tri2trilist, trihandle* d_tri2seglist, tristatus* d_tristatus, int* d_subfaceencmarker, trihandle* d_caveshlist, int* d_caveshnext, int* d_caveshhead, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos], j; int threadmarker = d_threadmarker[threadId]; if (threadmarker != 0 && threadmarker != 1) return; trihandle checksh; int i = d_caveshhead[threadId]; while (i != -1) { checksh = d_caveshlist[i]; d_tristatus[checksh.id].clear(); d_subfaceencmarker[checksh.id] = -1; for (j = 0; j < 3; j++) { d_tri2trilist[3 * checksh.id + j] = trihandle(-1, 0); // reset neighbor to empty } for (j = 0; j < 3; j++) { d_tri2seglist[3 * checksh.id + j] = trihandle(-1, 0); } i = d_caveshnext[i]; } } __global__ void kernelResetOldTetInfo( int* d_threadlist, tethandle* d_neighborlist, trihandle* d_tet2trilist, trihandle* d_tet2seglist, tetstatus* d_tetstatus, tethandle* d_caveoldtetlist, int* d_caveoldtetnext, int* d_caveoldtethead, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos], j; tethandle checktet; int i = d_caveoldtethead[threadId]; while (i != -1) { checktet = d_caveoldtetlist[i]; d_tetstatus[checktet.id].clear(); for (j = 0; j < 4; j++) { d_neighborlist[4 * checktet.id + j] = tethandle(-1, 11); // reset neighbor to empty } for (j = 0; j < 4; j++) { d_tet2trilist[4 * checktet.id + j] = trihandle(-1, 0); // reset subface to empty } for (j = 0; j < 6; j++) { d_tet2seglist[6 * checktet.id + j] = trihandle(-1, 0); // reset subseg to empty } i = d_caveoldtetnext[i]; } } __global__ void kernelUpdateSegencmarker( int* d_threadlist, REAL * d_pointlist, int* d_seglist, tethandle* d_seg2tetlist, int* d_tetlist, tethandle* d_neighborlist, trihandle* d_cavetetseglist, int* d_cavetetsegnext, int* d_cavetetseghead, trihandle* d_cavesegshlist, int* d_cavesegshnext, int* d_cavesegshhead, int* d_segmarker, int* d_segencmarker, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int threadmarker = d_threadmarker[threadId]; trihandle checkseg; int i, encpt; // Check all segments outside cavity i = d_cavetetseghead[threadId]; while (i != -1) { checkseg = d_cavetetseglist[i]; if (d_segmarker[checkseg.id] != threadId) // Not a splitting segment { checkseg4split( &checkseg, encpt, d_pointlist, d_seglist, d_seg2tetlist, d_tetlist, d_neighborlist); d_segencmarker[checkseg.id] = encpt; } i = d_cavetetsegnext[i]; } // Check new segments when it is segment point insertion. // In this case, new segments are stored in cavesegshlist if (threadmarker == 0) { i = d_cavesegshhead[threadId]; while (i != -1) { checkseg = d_cavesegshlist[i]; checkseg4split( &checkseg, encpt, d_pointlist, d_seglist, d_seg2tetlist, d_tetlist, d_neighborlist); d_segencmarker[checkseg.id] = encpt; i = d_cavesegshnext[i]; } } } __global__ void kernelUpdateSubfaceencmarker( int* d_threadlist, REAL * d_pointlist, int* d_trifacelist, tethandle* d_tri2tetlist, trihandle* d_tri2trilist, tristatus* d_tristatus, int* d_tetlist, trihandle* d_cavetetshlist, int* d_cavetetshnext, int* d_cavetetshhead, trihandle* d_caveshbdlist, int* d_caveshbdnext, int* d_caveshbdhead, uint64* d_trimarker, int* d_subfaceencmarker, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int threadmarker = d_threadmarker[threadId]; trihandle checkfac; int i, encpt; // Check all subfaces outside cavity i = d_cavetetshhead[threadId]; while (i != -1) { checkfac = d_cavetetshlist[i]; if (cudamesh_getUInt64PriorityIndex(d_trimarker[checkfac.id]) != threadId) // Not a splitting subface { checkface4split( &checkfac, encpt, d_pointlist, d_trifacelist, d_tri2tetlist, d_tetlist); d_subfaceencmarker[checkfac.id] = encpt; } i = d_cavetetshnext[i]; } // Check new subfaces when it is segment/subface point insertion. // In this case, new subfaces are connected to old subfaces in caveshbdlist if (threadmarker == 0 || threadmarker == 1) { trihandle parysh; i = d_caveshbdhead[threadId]; while (i != -1) { parysh = d_caveshbdlist[i]; cudamesh_spivot(parysh, checkfac, d_tri2trilist); if (!d_tristatus[checkfac.id].isEmpty()) { checkface4split( &checkfac, encpt, d_pointlist, d_trifacelist, d_tri2tetlist, d_tetlist); d_subfaceencmarker[checkfac.id] = encpt; } i = d_caveshbdnext[i]; } } } __global__ void kernelUpdateTetBadstatus( int* d_threadlist, REAL* d_pointlist, int* d_tetlist, tetstatus* d_tetstatus, tethandle* d_cavebdrylist, int* d_cavebdrynext, int* d_cavebdryhead, REAL minratio, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; tethandle cavetet; int i = d_cavebdryhead[threadId]; while (i != -1) { cavetet = d_cavebdrylist[i]; if (cavetet.id != -1) // cavetet.id may be -1 because of redundency { if (checktet4split(&cavetet, d_pointlist, d_tetlist, minratio)) d_tetstatus[cavetet.id].setBad(true); } i = d_cavebdrynext[i]; } } __global__ void kernelUpdateInsertRadius( int* d_threadlist, int* d_insertidxlist, REAL* d_pointlist, trihandle* d_point2trilist, verttype* d_pointtypelist, REAL* d_pointradius, int* d_seg2parentidxlist, int* d_segparentendpointidxlist, int* d_tri2parentidxlist, int* d_triid2parentoffsetlist, int* d_triparentendpointidxlist, int* d_tetlist, REAL* d_smlen, int* d_parentpt, int* d_threadmarker, int oldpointsize, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int threadmarker = d_threadmarker[threadId]; if (threadmarker == 0) { int newptidx = oldpointsize + pos; int parentptidx = d_parentpt[threadId]; REAL rv = d_smlen[threadId], rp; verttype parenttype = d_pointtypelist[parentptidx]; if (parenttype == FREESEGVERTEX) { trihandle parentseg1, parentseg2; parentseg1 = d_point2trilist[newptidx]; parentseg2 = d_point2trilist[parentptidx]; if (cudamesh_segsegadjacent(parentseg1.id, parentseg2.id, d_seg2parentidxlist, d_segparentendpointidxlist)) { rp = d_pointradius[parentptidx]; if (rv < rp) rv = rp; // The relaxed insertion radius of new point } } else if (parenttype == FREEFACETVERTEX) { trihandle parentseg, parentsh; parentseg = d_point2trilist[newptidx]; parentsh = d_point2trilist[parentptidx]; if (cudamesh_segfacetadjacent(parentseg.id, parentsh.id, d_seg2parentidxlist, d_segparentendpointidxlist, d_tri2parentidxlist, d_triid2parentoffsetlist, d_triparentendpointidxlist)) { rp = d_pointradius[parentptidx]; if (rv < rp) rv = rp; // The relaxed insertion radius of new point } } d_pointradius[newptidx] = rv; } else if (threadmarker == 1) { int newptidx = oldpointsize + pos; int parentptidx = d_parentpt[threadId]; REAL rv = d_smlen[threadId], rp; verttype parenttype = d_pointtypelist[parentptidx]; if (parenttype == FREESEGVERTEX) { trihandle parentseg, parentsh; parentseg = d_point2trilist[parentptidx]; parentsh = d_point2trilist[newptidx]; if (cudamesh_segfacetadjacent(parentseg.id, parentsh.id, d_seg2parentidxlist, d_segparentendpointidxlist, d_tri2parentidxlist, d_triid2parentoffsetlist, d_triparentendpointidxlist)) { rp = d_pointradius[parentptidx]; if (rv < (sqrt(2.0) * rp)) rv = sqrt(2.0) * rp; // The relaxed insertion radius of new point } } else if (parenttype == FREEFACETVERTEX) { trihandle parentsh1, parentsh2; parentsh1 = d_point2trilist[parentptidx]; parentsh2 = d_point2trilist[newptidx]; if (cudamesh_facetfacetadjacent(parentsh1.id, parentsh2.id, d_tri2parentidxlist, d_triid2parentoffsetlist, d_triparentendpointidxlist)) { rp = d_pointradius[parentptidx]; if (rv < rp) rv = rp; // The relaxed insertion radius of new point } } d_pointradius[newptidx] = rv; } else { int splittetid = d_insertidxlist[threadId]; tethandle splittet(splittetid, 11); int newptidx = oldpointsize + pos; REAL *newpt = cudamesh_id2pointlist(newptidx, d_pointlist); int orgidx = cudamesh_org(splittet, d_tetlist); REAL *org = cudamesh_id2pointlist(orgidx, d_pointlist); REAL rv = cudamesh_distance(newpt, org); d_pointradius[newptidx] = rv; } } __global__ void kernelUpdateInsertRadius_Seg( int* d_threadlist, trihandle* d_point2trilist, verttype* d_pointtypelist, REAL* d_pointradius, int* d_seg2parentidxlist, int* d_segparentendpointidxlist, int* d_tri2parentidxlist, int* d_triid2parentoffsetlist, int* d_triparentendpointidxlist, REAL* d_smlen, int* d_parentpt, int oldpointsize, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int newptidx = oldpointsize + pos; int parentptidx = d_parentpt[threadId]; REAL rv = d_smlen[threadId], rp; verttype parenttype = d_pointtypelist[parentptidx]; if (parenttype == FREESEGVERTEX) { trihandle parentseg1, parentseg2; parentseg1 = d_point2trilist[newptidx]; parentseg2 = d_point2trilist[parentptidx]; if (cudamesh_segsegadjacent(parentseg1.id, parentseg2.id, d_seg2parentidxlist, d_segparentendpointidxlist)) { rp = d_pointradius[parentptidx]; if (rv < rp) rv = rp; // The relaxed insertion radius of new point } } else if (parenttype == FREEFACETVERTEX) { trihandle parentseg, parentsh; parentseg = d_point2trilist[newptidx]; parentsh = d_point2trilist[parentptidx]; if (cudamesh_segfacetadjacent(parentseg.id, parentsh.id, d_seg2parentidxlist, d_segparentendpointidxlist, d_tri2parentidxlist, d_triid2parentoffsetlist, d_triparentendpointidxlist)) { rp = d_pointradius[parentptidx]; if (rv < rp) rv = rp; // The relaxed insertion radius of new point } } d_pointradius[newptidx] = rv; } __global__ void kernelUpdateInsertRadius_Subface( int* d_threadlist, trihandle* d_point2trilist, verttype* d_pointtypelist, REAL* d_pointradius, int* d_seg2parentidxlist, int* d_segparentendpointidxlist, int* d_tri2parentidxlist, int* d_triid2parentoffsetlist, int* d_triparentendpointidxlist, REAL* d_smlen, int* d_parentpt, int oldpointsize, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int newptidx = oldpointsize + pos; int parentptidx = d_parentpt[threadId]; REAL rv = d_smlen[threadId], rp; verttype parenttype = d_pointtypelist[parentptidx]; if (parenttype == FREESEGVERTEX) { trihandle parentseg, parentsh; parentseg = d_point2trilist[parentptidx]; parentsh = d_point2trilist[newptidx]; if (cudamesh_segfacetadjacent(parentseg.id, parentsh.id, d_seg2parentidxlist, d_segparentendpointidxlist, d_tri2parentidxlist, d_triid2parentoffsetlist, d_triparentendpointidxlist)) { rp = d_pointradius[parentptidx]; if (rv < (sqrt(2.0) * rp)) rv = sqrt(2.0) * rp; // The relaxed insertion radius of new point } } else if (parenttype == FREEFACETVERTEX) { trihandle parentsh1, parentsh2; parentsh1 = d_point2trilist[parentptidx]; parentsh2 = d_point2trilist[newptidx]; if (cudamesh_facetfacetadjacent(parentsh1.id, parentsh2.id, d_tri2parentidxlist, d_triid2parentoffsetlist, d_triparentendpointidxlist)) { rp = d_pointradius[parentptidx]; if (rv < rp) rv = rp; // The relaxed insertion radius of new point } } d_pointradius[newptidx] = rv; } __global__ void kernelUpdateInsertRadius_Tet( int* d_insertidxlist, REAL* d_insertptlist, int* d_threadlist, REAL* d_pointlist, REAL* d_pointradius, int* d_tetlist, int oldpointsize, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int splittetid = d_insertidxlist[threadId]; tethandle splittet(splittetid, 11); int newptidx = oldpointsize + pos; REAL *newpt = cudamesh_id2pointlist(newptidx, d_pointlist); int orgidx = cudamesh_org(splittet, d_tetlist); REAL *org = cudamesh_id2pointlist(orgidx, d_pointlist); REAL rv = cudamesh_distance(newpt, org); d_pointradius[newptidx] = rv; } // Check mesh __global__ void kernelCheckPointNeighbors( trihandle* d_point2trilist, tethandle* d_point2tetlist, verttype* d_pointtypelist, int* d_seglist, tristatus* d_segstatus, int* d_trifacelist, tristatus* d_tristatus, int* d_tetlist, tetstatus* d_tetstatus, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int i, p; bool flag = false; trihandle neighseg, neighsh; tethandle neightet; verttype pointtype = d_pointtypelist[pos]; if (pointtype == FREESEGVERTEX) { neighseg = d_point2trilist[pos]; if (neighseg.id != -1) { if (d_segstatus[neighseg.id].isEmpty()) { printf("Point #%d: Empty subseg neighbor #%d\n", pos, neighseg.id); } else { for (i = 0; i < 3; i++) { p = d_seglist[3 * neighseg.id + i]; if (i == 2 && p != -1) { printf("Point #%d: Wrong point type (on subseg) or neighbor type (subseg) #%d - %d, %d, %d\n", pos, neighseg.id, d_seglist[3 * neighseg.id + 0], d_seglist[3 * neighseg.id + 1], d_seglist[3 * neighseg.id + 2]); } if (p == pos) { flag = true; break; } } if (!flag) printf("Point #%d: Wrong subface neighbor #%d - %d, %d, %d\n", pos, neighseg.id, d_seglist[3 * neighseg.id + 0], d_seglist[3 * neighseg.id + 1], d_seglist[3 * neighseg.id + 2]); } } else { printf("Point #%d: Missing segment neighbor\n"); } } else if (pointtype == FREEFACETVERTEX) { neighsh = d_point2trilist[pos]; if (neighsh.id != -1) { if (d_tristatus[neighsh.id].isEmpty()) { printf("Point #%d: Empty subface neighbor #%d\n", pos, neighsh.id); } else { for (i = 0; i < 3; i++) { p = d_trifacelist[3 * neighsh.id + i]; if (p == -1) { printf("Point #%d: Wrong point type (on subface) or neighbor type (subface) #%d - %d, %d, %d\n",pos, neighsh.id, d_trifacelist[3 * neighsh.id + 0], d_trifacelist[3 * neighsh.id + 1], d_trifacelist[3 * neighsh.id + 2]); } if (p == pos) { flag = true; break; } } if (!flag) printf("Point #%d: Wrong subface neighbor #%d - %d, %d, %d\n", pos, neighsh.id, d_trifacelist[3 * neighsh.id + 0], d_trifacelist[3 * neighsh.id + 1], d_trifacelist[3 * neighsh.id + 2]); } } else { printf("Point #%d: Missing subface neighbor\n"); } } neightet = d_point2tetlist[pos]; if (neightet.id != -1) { //printf("%d ", neightet.id); if (d_tetstatus[neightet.id].isEmpty()) { printf("Point #%d: Empty tet neighbor #%d\n", pos, neightet.id); } else { for (i = 0; i < 4; i++) { p = d_tetlist[4 * neightet.id + i]; if (p == pos) { flag = true; break; } } if (!flag) printf("Point #%d: Wrong tet neighbor #%d - %d, %d, %d, %d\n", pos, neightet.id, d_tetlist[4 * neightet.id + 0], d_tetlist[4 * neightet.id + 1], d_tetlist[4 * neightet.id + 2], d_tetlist[4 * neightet.id + 3]); } } } __global__ void kernelCheckSubsegNeighbors( int* d_seglist, trihandle* d_seg2trilist, tethandle* d_seg2tetlist, tristatus* d_segstatus, int* d_trifacelist, trihandle* d_tri2seglist, tristatus* d_tristatus, int* d_tetlist, trihandle* d_tet2seglist, tetstatus* d_tetstatus, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; if (d_segstatus[pos].isEmpty()) return; trihandle checkseg(pos, 0), neighsh, neighseg, prevseg, nextseg; int pa, pb, pc, pd; cudamesh_spivot(checkseg, neighsh, d_seg2trilist); if (neighsh.id != -1) { if (d_tristatus[neighsh.id].isEmpty()) { printf("Subseg #%d: Empty subface neighbor #%d\n", checkseg.id, neighsh.id); } else { if (d_trifacelist[3 * neighsh.id + 2] == -1) { printf("Subseg #%d: Wrong neighbor type (Should be subface) #%d\n", checkseg.id, neighsh.id); } else { cudamesh_sspivot(neighsh, neighseg, d_tri2seglist); if (neighseg.id != checkseg.id) printf("Subseg #%d: Wrong subface neighbor #%d - %d, %d, %d\n", checkseg.id, neighsh.id, d_tri2seglist[3 * neighsh.id + 0].id, d_tri2seglist[3 * neighsh.id + 1].id, d_tri2seglist[3 * neighsh.id + 2].id); else { pa = cudamesh_sorg(checkseg, d_seglist); pb = cudamesh_sdest(checkseg, d_seglist); pc = cudamesh_sorg(neighsh, d_trifacelist); pd = cudamesh_sdest(neighsh, d_trifacelist); if ((pa == pc && pb == pd) || (pa == pd && pb == pc)) { } else { printf("Subseg #%d - %d, %d: Wrong subface neighbor endpoints #%d - %d, %d, %d\n", checkseg.id, d_seglist[3 * checkseg.id], d_seglist[3 * checkseg.id + 1], neighsh.id, d_trifacelist[3 * neighsh.id], d_trifacelist[3 * neighsh.id + 1], d_trifacelist[3 * neighsh.id + 2]); } } } } } cudamesh_senextself(checkseg); cudamesh_spivot(checkseg, prevseg, d_seg2trilist); if (prevseg.id != -1) { if (d_segstatus[prevseg.id].isEmpty()) { printf("Subseg #%d: Empty subseg neighbor #%d\n", checkseg.id, prevseg.id); } else { if (d_seglist[3 * prevseg.id + 2] != -1) { printf("Subseg #%d: Wrong neighbor type (Should be subseg) #%d\n", checkseg.id, prevseg.id); } else { cudamesh_spivot(prevseg, neighseg, d_seg2trilist); if(neighseg.id != checkseg.id) printf("Subseg #%d: Wrong subseg neighbor #%d - %d, %d, %d\n", checkseg.id, prevseg.id, d_seg2trilist[3 * prevseg.id + 0].id, d_seg2trilist[3 * prevseg.id + 1].id, d_seg2trilist[3 * prevseg.id + 2].id); } } } cudamesh_senextself(checkseg); cudamesh_spivot(checkseg, nextseg, d_seg2trilist); if (nextseg.id != -1) { if (d_segstatus[nextseg.id].isEmpty()) { printf("Subseg #%d: Empty subseg neighbor #%d\n", checkseg.id, prevseg.id); } else { if (d_seglist[3 * nextseg.id + 2] != -1) { printf("Subseg #%d: Wrong neighbor type (Should be subseg) #%d\n", checkseg.id, nextseg.id); } else { cudamesh_spivot(nextseg, neighseg, d_seg2trilist); if (neighseg.id != checkseg.id) printf("Subseg #%d: Wrong subseg neighbor #%d - %d, %d, %d\n", checkseg.id, nextseg.id, d_seg2trilist[3 * nextseg.id + 0].id, d_seg2trilist[3 * nextseg.id + 1].id, d_seg2trilist[3 * nextseg.id + 2].id); } } } tethandle neightet; checkseg.shver = 0; cudamesh_sstpivot1(checkseg, neightet, d_seg2tetlist); if (neightet.id != -1) { if (d_tetstatus[neightet.id].isEmpty()) { printf("Subseg #%d: Empty tet neighbor #%d\n", checkseg.id, neightet.id); } else { cudamesh_tsspivot1(neightet, neighseg, d_tet2seglist); if (neighseg.id != checkseg.id) printf("Subseg #%d: Wrong tet neighbor #%d - %d, %d, %d, %d, %d, %d\n", checkseg.id, neightet.id, d_tet2seglist[6 * neightet.id + 0].id, d_tet2seglist[6 * neightet.id + 1].id, d_tet2seglist[6 * neightet.id + 2].id, d_tet2seglist[6 * neightet.id + 3].id, d_tet2seglist[6 * neightet.id + 4].id, d_tet2seglist[6 * neightet.id + 5].id); else { pa = cudamesh_sorg(checkseg, d_seglist); pb = cudamesh_sdest(checkseg, d_seglist); pc = cudamesh_org(neightet, d_tetlist); pd = cudamesh_dest(neightet, d_tetlist); if ((pa == pc && pb == pd) || (pa == pd && pb == pc)) { } else { printf("Subseg #%d - %d, %d: Wrong tet neighbor endpoints #%d(%d) - %d, %d, %d, %d\n", checkseg.id, d_seglist[3 * checkseg.id], d_seglist[3 * checkseg.id + 1], neightet.id, neightet.ver, d_tetlist[4 * neightet.id], d_tetlist[4 * neightet.id + 1], d_tetlist[4 * neightet.id + 2], d_tetlist[4 * neightet.id + 3]); } } } } } __global__ void kernelCheckSubfaceNeighbors( int* d_seglist, trihandle* d_seg2trilist, tristatus* d_segstatus, int* d_trifacelist, tethandle* d_tri2tetlist, trihandle* d_tri2trilist, trihandle* d_tri2seglist, tristatus* d_tristatus, int* d_tetlist, trihandle* d_tet2trilist, tetstatus* d_tetstatus, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; if (d_tristatus[pos].isEmpty()) return; trihandle checksh(pos, 0), neighseg, neighsh, neineighsh; tethandle neightet; int i, pa, pb, pc, pd, pe, pf; for (i = 0; i < 3; i++) { cudamesh_senextself(checksh); cudamesh_sspivot(checksh, neighseg, d_tri2seglist); if (neighseg.id != -1) { if (d_segstatus[neighseg.id].isEmpty()) printf("Subface #%d: Empty subseg neighbor #%d\n", checksh.id, neighseg.id); else { cudamesh_spivot(neighseg, neighsh, d_seg2trilist); if (neighsh.id == -1) { printf("Subface #%d: Wrong subseg neighbor, Subface #%d - %d, %d, %d, Subseg #%d - (-1)\n", checksh.id, d_tri2seglist[3 * checksh.id + 0].id, d_tri2seglist[3 * checksh.id + 1].id, d_tri2seglist[3 * checksh.id + 2].id, neighseg.id); } else { //printf("%d ", neighsh.id); bool found = false; cudamesh_spivot(neighsh, neineighsh, d_tri2trilist); if (neighsh.id == checksh.id) found = true; if (neineighsh.id == -1) // this only happen when neighsh is a single subface { if(checksh.id != neighsh.id) printf("Subface: Wrong single subface neighbor - Checksh #%d, Neighseg #%d, Neighsh #%d\n", checksh.id, neighseg.id, neighsh.id); } else { if (neighsh.id == neineighsh.id) { if (checksh.id != neighsh.id) printf("Subface: Wrong single subface neighbor - Checksh #%d, Neighsh #%d, neineighsh #%d\n", checksh.id, neighsh.id, neineighsh.id); } else { while (neineighsh.id != neighsh.id) { if (neineighsh.id == checksh.id) { found = true; break; } cudamesh_spivotself(neineighsh, d_tri2trilist); } } } if (!found) printf("Subface #%d: Wrong subseg neighbor #%d, missing in loop\n", checksh.id, neighseg.id); else { pa = cudamesh_sorg(checksh, d_trifacelist); pb = cudamesh_sdest(checksh, d_trifacelist); pc = cudamesh_sorg(neighseg, d_seglist); pd = cudamesh_sdest(neighseg, d_seglist); if ((pa == pc && pb == pd) || (pa == pd && pb == pc)) { } else { printf("Subface #%d - %d, %d, %d: Wrong subseg neighbor endpoints #%d - %d, %d, %d\n", checksh.id, d_trifacelist[3 * checksh.id + 0], d_trifacelist[3 * checksh.id + 1], d_trifacelist[3 * checksh.id + 2], neighseg.id, d_seglist[3 * neighseg.id + 0], d_seglist[3 * neighseg.id + 1], d_seglist[3 * neighseg.id + 2]); } } } } } } for (i = 0; i < 3; i++) { cudamesh_senextself(checksh); cudamesh_spivot(checksh, neighsh, d_tri2trilist); if (neighsh.id != -1) { while (neighsh.id != checksh.id) { if (d_tristatus[neighsh.id].isEmpty()) { printf("Subface #%d - %d, %d, %d - %d, %d, %d: Empty subface neighbor #%d - %d, %d, %d - %d, %d, %d\n", checksh.id, d_tri2trilist[3 * checksh.id + 0].id, d_tri2trilist[3 * checksh.id + 1].id, d_tri2trilist[3 * checksh.id + 2].id, d_trifacelist[3 * checksh.id + 0], d_trifacelist[3 * checksh.id + 1], d_trifacelist[3 * checksh.id + 2], neighsh.id, d_tri2trilist[3 * neighsh.id + 0].id, d_tri2trilist[3 * neighsh.id + 1].id, d_tri2trilist[3 * neighsh.id + 2].id, d_trifacelist[3 * neighsh.id + 0], d_trifacelist[3 * neighsh.id + 1], d_trifacelist[3 * neighsh.id + 2]); break; } cudamesh_spivotself(neighsh, d_tri2trilist); } } } for (i = 0; i < 2; i++) { cudamesh_sesymself(checksh); cudamesh_stpivot(checksh, neightet, d_tri2tetlist); if (neightet.id != -1) { if (d_tetstatus[neightet.id].isEmpty()) { printf("Subface #%d: Empty tet neighbor #%d\n", checksh.id, neightet.id); } else { cudamesh_tspivot(neightet, neighsh, d_tet2trilist); if (neighsh.id != checksh.id) printf("Subface #%d: Wrong tet neighbor #%d - %d, %d, %d, %d\n", checksh.id, neightet.id, d_tet2trilist[4 * neightet.id + 0].id, d_tet2trilist[4 * neightet.id + 1].id, d_tet2trilist[4 * neightet.id + 2].id, d_tet2trilist[4 * neightet.id + 3].id); else { pa = cudamesh_sorg(checksh, d_trifacelist); pb = cudamesh_sdest(checksh, d_trifacelist); pc = cudamesh_sapex(checksh, d_trifacelist); pd = cudamesh_org(neightet, d_tetlist); pe = cudamesh_dest(neightet, d_tetlist); pf = cudamesh_apex(neightet, d_tetlist); if (pa == pd && pb == pe && pc == pf) { } else { printf("Subface #%d - %d, %d, %d: Wrong tet neighbor endpoints #%d - %d, %d, %d, %d\n", checksh.id, d_trifacelist[3 * checksh.id + 0], d_trifacelist[3 * checksh.id + 1], d_trifacelist[3 * checksh.id + 2], neightet.id, d_tetlist[4 * neightet.id + 0], d_tetlist[4 * neightet.id + 1], d_tetlist[4 * neightet.id + 2], d_tetlist[4 * neightet.id + 3]); } } } } } } __global__ void kernelCheckTetNeighbors( int* d_seglist, tethandle* d_seg2tetlist, tristatus* d_segstatus, int* d_trifacelist, tethandle* d_tri2tetlist, tristatus* d_tristatus, int* d_tetlist, tethandle* d_neighborlist, trihandle* d_tet2trilist, trihandle* d_tet2seglist, tetstatus* d_tetstatus, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; if (d_tetstatus[pos].isEmpty()) return; tethandle neightet, neineightet; trihandle neighsh, neighseg; int i, pa, pb, pc, pd, pe, pf; for (i = 0; i < 4; i++) { neightet = d_neighborlist[4 * pos + i]; if (neightet.id != -1) { if (d_tetstatus[neightet.id].isEmpty()) { printf("Tet #%d - %d, %d, %d, %d: Empty tet neighbor #%d - %d, %d, %d, %d\n", pos, d_neighborlist[4 * pos].id, d_neighborlist[4 * pos + 1].id, d_neighborlist[4 * pos + 2].id, d_neighborlist[4 * pos + 3].id, neightet.id, d_neighborlist[4 * neightet.id].id, d_neighborlist[4 * neightet.id + 1].id, d_neighborlist[4 * neightet.id + 2].id, d_neighborlist[4 * neightet.id + 3].id); } else { cudamesh_fsym(neightet, neineightet, d_neighborlist); if (neineightet.id != pos) printf("Tet #%d: Wrong tet neighbor #%d - %d, %d, %d, %d\n", pos, neightet.id, d_neighborlist[4 * neightet.id + 0].id, d_neighborlist[4 * neightet.id + 1].id, d_neighborlist[4 * neightet.id + 2].id, d_neighborlist[4 * neightet.id + 3].id); else { pa = cudamesh_org(neightet, d_tetlist); pb = cudamesh_dest(neightet, d_tetlist); pc = cudamesh_apex(neightet, d_tetlist); pd = cudamesh_org(neineightet, d_tetlist); pe = cudamesh_dest(neineightet, d_tetlist); pf = cudamesh_apex(neineightet, d_tetlist); if (pa == pe && pb == pd && pc == pf) { } else { printf("Tet #%d - %d, %d, %d, %d: Wrong tet neighbor endpoints #%d - %d, %d, %d, %d\n", pos, d_tetlist[4 * pos], d_tetlist[4 * pos + 1], d_tetlist[4 * pos + 2], d_tetlist[4 * pos + 3], neightet.id, d_tetlist[4 * neightet.id + 0], d_tetlist[4 * neightet.id + 1], d_tetlist[4 * neightet.id + 2], d_tetlist[4 * neightet.id + 3]); } } } } else { printf("Tet #%d - %d, %d, %d, %d: Empty tet neighbor #%d - %d, %d, %d, %d\n", pos, d_neighborlist[4 * pos].id, d_neighborlist[4 * pos + 1].id, d_neighborlist[4 * pos + 2].id, d_neighborlist[4 * pos + 3].id, neightet.id, d_neighborlist[4 * neightet.id].id, d_neighborlist[4 * neightet.id + 1].id, d_neighborlist[4 * neightet.id + 2].id, d_neighborlist[4 * neightet.id + 3].id); } } for (i = 0; i < 4; i++) { neighsh = d_tet2trilist[4 * pos + i]; if (neighsh.id != -1) { if (d_tristatus[neighsh.id].isEmpty()) { printf("Tet #%d - %d, %d, %d, %d: Empty subface neighbor #%d - %d, %d\n", pos, d_tet2trilist[4 * pos].id, d_tet2trilist[4 * pos + 1].id, d_tet2trilist[4 * pos + 2].id, d_tet2trilist[4 * pos + 3].id, neighsh.id, d_tri2tetlist[2 * neightet.id].id, d_tri2tetlist[2 * neightet.id + 1].id); } else { cudamesh_stpivot(neighsh, neightet, d_tri2tetlist); if(neightet.id != pos) printf("Tet #%d: Wrong subface neighbor #%d - %d, %d\n", pos, neighsh.id, d_tri2tetlist[2 * neighsh.id + 0].id, d_tri2tetlist[2 * neighsh.id + 1].id); else { pa = cudamesh_sorg(neighsh, d_trifacelist); pb = cudamesh_sdest(neighsh, d_trifacelist); pc = cudamesh_sapex(neighsh, d_trifacelist); pd = cudamesh_org(neightet, d_tetlist); pe = cudamesh_dest(neightet, d_tetlist); pf = cudamesh_apex(neightet, d_tetlist); if(pa == pd && pb == pe && pc == pf) { } else { printf("Tet #%d - %d, %d, %d, %d: Wrong subface neighbor endpoints #%d - %d, %d, %d\n", pos, d_tetlist[4 * pos], d_tetlist[4 * pos + 1], d_tetlist[4 * pos + 2], d_tetlist[4 * pos + 3], neighsh.id, d_trifacelist[3 * neighsh.id + 0], d_trifacelist[3 * neighsh.id + 1], d_trifacelist[3 * neighsh.id + 2]); } } } } } for (i = 0; i < 6; i++) { neighseg = d_tet2seglist[6 * pos + i]; if (neighseg.id != -1) { if(d_segstatus[neighseg.id].isEmpty()) { printf("Tet #%d - %d, %d, %d, %d, %d, %d: Empty subseg neighbor #%d - %d\n", pos, d_tet2seglist[6 * pos].id, d_tet2seglist[6 * pos + 1].id, d_tet2seglist[6 * pos + 2].id, d_tet2seglist[6 * pos + 3].id, d_tet2seglist[6 * pos + 4].id, d_tet2seglist[6 * pos + 5].id, neighseg.id, d_seg2tetlist[neighseg.id].id); } else { cudamesh_sstpivot1(neighseg, neightet, d_seg2tetlist); if (neightet.id == -1) printf("Tet #%d - Incident Subseg #%d has empty tet neighbor\n", pos, neighseg.id); else { pa = cudamesh_sorg(neighseg, d_seglist); pb = cudamesh_sdest(neighseg, d_seglist); pc = cudamesh_org(neightet, d_tetlist); pd = cudamesh_dest(neightet, d_tetlist); if ((pa == pc && pb == pd) || (pa == pd && pb == pc)) { } else { printf("pa = %d, pb = %d, pc = %d, pd = %d\n", pa, pb, pc, pd); printf("Tet #%d(%d) - %d, %d, %d, %d: Wrong subseg neighbor endpoints #%d - %d, %d, %d\n", neightet.id, neightet.ver, d_tetlist[4 * neightet.id + 0], d_tetlist[4 * neightet.id + 1], d_tetlist[4 * neightet.id + 2], d_tetlist[4 * neightet.id + 3], neighseg.id, d_seglist[3 * neighseg.id], d_seglist[3 * neighseg.id + 1], d_seglist[3 * neighseg.id + 2]); } } } } } } // Split bad elements __global__ void kernelCheckBadElementList( int* d_badeleidlist, int* d_threadmarker, int* d_segencmarker, int* d_subfaceencmarker, tetstatus* d_tetstatus, int numofencsegs, int numofencsubfaces, int numofbadtets, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; if (pos < numofencsegs) { if(d_threadmarker[pos] != 0) printf("threadId #%d - seg #%d - wrong thread marker %d\n", pos, d_badeleidlist[pos], d_threadmarker[pos]); else if(d_segencmarker[d_badeleidlist[pos]] < 0) printf("threadId #%d - seg #%d - wrong encroachement marker %d\n", pos, d_badeleidlist[pos], d_segencmarker[d_badeleidlist[pos]]); } else if (pos < numofencsubfaces + numofencsegs) { if (d_threadmarker[pos] != 1) printf("threadId #%d - subface #%d - wrong thread marker %d\n", pos, d_badeleidlist[pos], d_threadmarker[pos]); else if (d_subfaceencmarker[d_badeleidlist[pos]] < 0) printf("threadId #%d - subface #%d - wrong encroachement marker %d\n", pos, d_badeleidlist[pos], d_subfaceencmarker[d_badeleidlist[pos]]); } else { if (d_threadmarker[pos] != 2) printf("threadId #%d - tet #%d - wrong thread marker %d\n", pos, d_badeleidlist[pos], d_threadmarker[pos]); else if (!d_tetstatus[d_badeleidlist[pos]].isBad() || d_tetstatus[d_badeleidlist[pos]].isEmpty()) printf("threadId #%d - tet #%d - wrong tet status\n", pos, d_badeleidlist[pos]); } } __global__ void kernelComputeSteinerPointAndPriority( REAL* d_pointlist, trihandle* d_point2trilist, verttype* d_pointtypelist, int* d_seglist, int* d_seg2parentlist, int* d_segparentlist, int* d_segencmarker, int* d_trifacelist, tristatus* d_tristatus, int* d_tetlist, tetstatus* d_tetstatus, int* d_insertidxlist, int* d_threadmarker, REAL* d_steinerptlist, REAL* d_priority, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int eleidx = d_insertidxlist[pos]; int threadmarker = d_threadmarker[pos]; REAL* steinpt = cudamesh_id2pointlist(pos, d_steinerptlist); if (threadmarker == 0) // is a subsegment { trihandle seg(eleidx, 0); REAL* ei = cudamesh_id2pointlist(cudamesh_sorg(seg, d_seglist), d_pointlist); REAL* ej = cudamesh_id2pointlist(cudamesh_sdest(seg, d_seglist), d_pointlist); REAL len = cudamesh_distance(ei, ej); d_priority[pos] = 1 / len; int adjflag = 0, i; int refptidx = d_segencmarker[eleidx]; if (refptidx != MAXINT) { REAL* refpt = cudamesh_id2pointlist(refptidx, d_pointlist); REAL L, L1, t; if (d_pointtypelist[refptidx] == FREESEGVERTEX) { trihandle parentseg; parentseg = d_point2trilist[refptidx]; int sidx1 = d_seg2parentlist[parentseg.id]; int idx_pi = d_segparentlist[sidx1 * 2]; int idx_pj = d_segparentlist[sidx1 * 2 + 1]; REAL* far_pi = cudamesh_id2pointlist(idx_pi, d_pointlist); REAL* far_pj = cudamesh_id2pointlist(idx_pj, d_pointlist); int sidx2 = d_seg2parentlist[seg.id]; int idx_ei = d_segparentlist[sidx2 * 2]; int idx_ej = d_segparentlist[sidx2 * 2 + 1]; REAL* far_ei = cudamesh_id2pointlist(idx_ei, d_pointlist); REAL* far_ej = cudamesh_id2pointlist(idx_ej, d_pointlist); if ((idx_pi == idx_ei) || (idx_pj == idx_ei)) { // Create a Steiner point at the intersection of the segment // [far_ei, far_ej] and the sphere centered at far_ei with // radius |far_ei - refpt|. L = cudamesh_distance(far_ei, far_ej); L1 = cudamesh_distance(far_ei, refpt); t = L1 / L; for (i = 0; i < 3; i++) { steinpt[i] = far_ei[i] + t * (far_ej[i] - far_ei[i]); } adjflag = 1; } else if ((idx_pi == idx_ej) || (idx_pj == idx_ej)) { L = cudamesh_distance(far_ei, far_ej); L1 = cudamesh_distance(far_ej, refpt); t = L1 / L; for (i = 0; i < 3; i++) { steinpt[i] = far_ej[i] + t * (far_ei[i] - far_ej[i]); } adjflag = 1; } else { // Cut the segment by the projection point of refpt. projectpoint2edge(refpt, ei, ej, steinpt); } } else { // Cut the segment by the projection point of refpt. projectpoint2edge(refpt, ei, ej, steinpt); } // Make sure that steinpt is not too close to ei and ej. L = cudamesh_distance(ei, ej); L1 = cudamesh_distance(steinpt, ei); t = L1 / L; if ((t < 0.2) || (t > 0.8)) { // Split the point at the middle. for (i = 0; i < 3; i++) { steinpt[i] = ei[i] + 0.5 * (ej[i] - ei[i]); } } } else { // Split the point at the middle. for (i = 0; i < 3; i++) { steinpt[i] = ei[i] + 0.5 * (ej[i] - ei[i]); } } } else if (threadmarker == 1) // is a subface { REAL *pa, *pb, *pc; REAL area, rd, len; REAL A[4][4], rhs[4], D; int indx[4]; int i; trihandle chkfac(eleidx, 0); REAL* steinpt = cudamesh_id2pointlist(pos, d_steinerptlist); pa = cudamesh_id2pointlist(cudamesh_sorg(chkfac, d_trifacelist), d_pointlist); pb = cudamesh_id2pointlist(cudamesh_sdest(chkfac, d_trifacelist), d_pointlist); pc = cudamesh_id2pointlist(cudamesh_sapex(chkfac, d_trifacelist), d_pointlist); // Compute the coefficient matrix A (3x3). A[0][0] = pb[0] - pa[0]; A[0][1] = pb[1] - pa[1]; A[0][2] = pb[2] - pa[2]; // vector V1 (pa->pb) A[1][0] = pc[0] - pa[0]; A[1][1] = pc[1] - pa[1]; A[1][2] = pc[2] - pa[2]; // vector V2 (pa->pc) cudamesh_cross(A[0], A[1], A[2]); // vector V3 (V1 X V2) area = 0.5 * sqrt(cudamesh_dot(A[2], A[2])); // The area of [a,b,c]. d_priority[pos] = 1 / area; // Compute the right hand side vector b (3x1). rhs[0] = 0.5 * cudamesh_dot(A[0], A[0]); // edge [a,b] rhs[1] = 0.5 * cudamesh_dot(A[1], A[1]); // edge [a,c] rhs[2] = 0.0; // Solve the 3 by 3 equations use LU decomposition with partial // pivoting and backward and forward substitute. if (!cudamesh_lu_decmp(A, 3, indx, &D, 0)) { // A degenerate triangle. //printf("kernelComputeSteinerPointOnSubface: A degenerate subface. This should not happen!\n"); } cudamesh_lu_solve(A, 3, indx, rhs, 0); steinpt[0] = pa[0] + rhs[0]; steinpt[1] = pa[1] + rhs[1]; steinpt[2] = pa[2] + rhs[2]; } else if(threadmarker == 2) // is a tetrahedron { int tetid = eleidx; int ipa, ipb, ipc, ipd; REAL *pa, *pb, *pc, *pd; REAL vda[3], vdb[3], vdc[3]; REAL vab[3], vbc[3], vca[3]; REAL elen[6]; REAL smlen = 0, rd; REAL A[4][4], rhs[4], D; int indx[4]; int i; ipd = d_tetlist[4 * tetid + 3]; if (ipd == -1) { // This should not happend printf("Thread #%d - Error: Try to split a hull tet #%d!\n", pos, tetid); return; } ipa = d_tetlist[4 * tetid + 0]; ipb = d_tetlist[4 * tetid + 1]; ipc = d_tetlist[4 * tetid + 2]; pa = cudamesh_id2pointlist(ipa, d_pointlist); pb = cudamesh_id2pointlist(ipb, d_pointlist); pc = cudamesh_id2pointlist(ipc, d_pointlist); pd = cudamesh_id2pointlist(ipd, d_pointlist); // Get the edge vectors vda: d->a, vdb: d->b, vdc: d->c. // Set the matrix A = [vda, vdb, vdc]^T. for (i = 0; i < 3; i++) A[0][i] = vda[i] = pa[i] - pd[i]; for (i = 0; i < 3; i++) A[1][i] = vdb[i] = pb[i] - pd[i]; for (i = 0; i < 3; i++) A[2][i] = vdc[i] = pc[i] - pd[i]; // Get the other edge vectors. for (i = 0; i < 3; i++) vab[i] = pb[i] - pa[i]; for (i = 0; i < 3; i++) vbc[i] = pc[i] - pb[i]; for (i = 0; i < 3; i++) vca[i] = pa[i] - pc[i]; if (!cudamesh_lu_decmp(A, 3, indx, &D, 0)) { // This should not happend //printf("Thread #%d - Error: Try to split a degenerated tet #%d!\n", threadId, tetid); d_tetstatus[tetid].setAbortive(true); d_threadmarker[pos] = -1; return; } // Calculate the circumcenter and radius of this tet. rhs[0] = 0.5 * cudamesh_dot(vda, vda); rhs[1] = 0.5 * cudamesh_dot(vdb, vdb); rhs[2] = 0.5 * cudamesh_dot(vdc, vdc); cudamesh_lu_solve(A, 3, indx, rhs, 0); for (i = 0; i < 3; i++) { steinpt[i] = pd[i] + rhs[i]; } //Calculate the shortest edge length. elen[0] = cudamesh_dot(vda, vda); elen[1] = cudamesh_dot(vdb, vdb); elen[2] = cudamesh_dot(vdc, vdc); elen[3] = cudamesh_dot(vab, vab); elen[4] = cudamesh_dot(vbc, vbc); elen[5] = cudamesh_dot(vca, vca); // Use volume as priority // Use heron-type formula to compute the volume of a tetrahedron // https://en.wikipedia.org/wiki/Heron%27s_formula REAL U, V, W, u, v, w; // first three form a triangle; u opposite to U and so on REAL X, x, Y, y, Z, z; REAL a, b, c, d; U = sqrt(elen[3]); //ab V = sqrt(elen[4]); //bc W = sqrt(elen[5]); //ca u = sqrt(elen[2]); //dc v = sqrt(elen[0]); //da w = sqrt(elen[1]); //db X = (w - U + v)*(U + v + w); x = (U - v + w)*(v - w + U); Y = (u - V + w)*(V + w + u); y = (V - w + u)*(w - u + V); Z = (v - W + u)*(W + u + v); z = (W - u + v)*(u - v + W); a = sqrt(x*Y*Z); b = sqrt(y*Z*X); c = sqrt(z*X*Y); d = sqrt(x*y*z); REAL vol = sqrt((-a + b + c + d)*(a - b + c + d)*(a + b - c + d)*(a + b + c - d)) / (192 * u*v*w); d_priority[pos] = 1 / vol; } } __global__ void kernelModifyPriority( REAL* d_priorityreal, int* d_priorityint, REAL offset0, REAL offset1, REAL offset2, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadmarker = d_threadmarker[pos]; REAL offset; if (threadmarker == 0) offset = offset0; else if (threadmarker == 1) offset = offset1; else offset = offset2; REAL priority = d_priorityreal[pos] + offset; d_priorityreal[pos] = priority; d_priorityint[pos] = __float_as_int((float)priority); } __global__ void kernelCheckInsertRadius( REAL* d_pointlist, REAL* d_pointradius, trihandle* d_point2trilist, verttype* d_pointtypelist, int* d_seglist, tristatus* d_segstatus, int* d_seg2parentidxlist, int* d_segparentendpointidxlist, int* d_segencmarker, int* d_trifacelist, tristatus* d_tristatus, int* d_tetlist, tetstatus* d_tetstatus, int* d_tri2parentidxlist, int* d_triid2parentoffsetlist, int* d_triparentendpointidxlist, int* d_subfaceencmarker, int* d_insertidxlist, int* d_threadmarker, REAL* d_steinerptlist, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadmarker = d_threadmarker[pos]; int eleidx = d_insertidxlist[pos]; if (threadmarker == 0) { int segId = eleidx; if (d_segstatus[segId].isAbortive()) { d_threadmarker[pos] = -1; return; } int encptidx = d_segencmarker[pos]; if (encptidx != MAXINT) // not encroached by splitting segment and subface routines return; trihandle splitseg(segId, 0); int ipa, ipb; ipa = cudamesh_sorg(splitseg, d_seglist); ipb = cudamesh_sdest(splitseg, d_seglist); REAL *pa, *pb; pa = cudamesh_id2pointlist(ipa, d_pointlist); pb = cudamesh_id2pointlist(ipb, d_pointlist); REAL len = cudamesh_distance(pa, pb); REAL smrrv = d_pointradius[ipa]; REAL rrv = d_pointradius[ipb]; if (rrv > 0) { if (smrrv > 0) { if (rrv < smrrv) { smrrv = rrv; } } else { smrrv = rrv; } } if (smrrv > 0) { if ((fabs(smrrv - len) / len) < EPSILON) smrrv = len; if (len < smrrv) { d_segstatus[segId].setAbortive(true); d_threadmarker[pos] = -1; return; } } } else if (threadmarker == 1) { int subfaceid = eleidx; if (d_tristatus[subfaceid].isAbortive()) { d_threadmarker[pos] = -1; return; } int encptidx = d_subfaceencmarker[subfaceid]; if (encptidx == MAXINT) // Mark as encroached when trying to split a tet return; trihandle parentseg, parentsh; trihandle splitfac(subfaceid, 0); REAL rv, rp; REAL* newpt = d_steinerptlist + 3 * pos; REAL* encpt = cudamesh_id2pointlist(encptidx, d_pointlist); rv = cudamesh_distance(newpt, encpt); if (d_pointtypelist[encptidx] == FREESEGVERTEX) { parentseg = d_point2trilist[encptidx]; if (cudamesh_segfacetadjacent(parentseg.id, splitfac.id, d_seg2parentidxlist, d_segparentendpointidxlist, d_tri2parentidxlist, d_triid2parentoffsetlist, d_triparentendpointidxlist)) { rp = d_pointradius[encptidx]; if (rv < (sqrt(2.0) * rp)) { // This insertion may cause no termination. d_threadmarker[pos] = -1; // Reject the insertion of newpt. d_tristatus[subfaceid].setAbortive(true); } } } else if (d_pointtypelist[encptidx] == FREEFACETVERTEX) { parentsh = d_point2trilist[encptidx]; if (cudamesh_facetfacetadjacent(parentsh.id, splitfac.id, d_tri2parentidxlist, d_triid2parentoffsetlist, d_triparentendpointidxlist)) { rp = d_pointradius[encptidx]; if (rv < rp) { d_threadmarker[pos] = -1; // Reject the insertion of newpt. d_tristatus[subfaceid].setAbortive(true); } } } } else { int tetid = eleidx; if (d_tetstatus[tetid].isAbortive()) { d_threadmarker[pos] = -1; return; } tethandle chktet(tetid, 11), checkedge; int ie1, ie2; int i, j; REAL *e1, *e2; REAL smlen = 0; REAL rrv, smrrv; REAL elen[6]; // Get the shortest edge of this tet. checkedge.id = chktet.id; for (i = 0; i < 6; i++) { checkedge.ver = raw_edge2ver[i]; ie1 = cudamesh_org(checkedge, d_tetlist); ie2 = cudamesh_dest(checkedge, d_tetlist); e1 = cudamesh_id2pointlist(ie1, d_pointlist); e2 = cudamesh_id2pointlist(ie2, d_pointlist); elen[i] = cudamesh_distance(e1, e2); if (i == 0) { smlen = elen[i]; j = 0; } else { if (elen[i] < smlen) { smlen = elen[i]; j = i; } } } // Check if the edge is too short. checkedge.ver = raw_edge2ver[j]; // Get the smallest rrv of e1 and e2. // Note: if rrv of e1 and e2 is zero. Do not use it. ie1 = cudamesh_org(checkedge, d_tetlist); smrrv = d_pointradius[ie1]; ie2 = cudamesh_dest(checkedge, d_tetlist); rrv = d_pointradius[ie2]; if (rrv > 0) { if (smrrv > 0) { if (rrv < smrrv) { smrrv = rrv; } } else { smrrv = rrv; } } if (smrrv > 0) { // To avoid rounding error, round smrrv before doing comparison. if ((fabs(smrrv - smlen) / smlen) <EPSILON) { smrrv = smlen; } if (smrrv > smlen) { d_tetstatus[tetid].setAbortive(true); d_threadmarker[pos] = -1; return; } } } } __global__ void kernelLocatePoint( REAL* d_pointlist, tethandle* d_seg2tetlist, int* d_trifacelist, tethandle* d_tri2tetlist, trihandle* d_tri2trilist, trihandle* d_tri2seglist, tristatus* d_tristatus, int* d_tetlist, tethandle* d_neighborlist, tetstatus* d_tetstatus, int* d_priority, unsigned long* d_randomseed, locateresult* d_pointlocation, trihandle* d_searchsh, tethandle* d_searchtet, int* d_insertidxlist, int* d_threadmarker, int* d_threadlist, REAL* d_steinerptlist, int numofsplittablesubsegs, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int threadmarker = d_threadmarker[threadId]; int eleidx = d_insertidxlist[threadId]; if (threadmarker == 0) { trihandle splitseg(eleidx, 0); tethandle searchtet; cudamesh_sstpivot1(splitseg, searchtet, d_seg2tetlist); d_searchtet[threadId] = searchtet; d_pointlocation[threadId] = ONEDGE; } else if (threadmarker == 1) { int step = 1; int subfaceid = eleidx; d_searchsh[threadId] = trihandle(subfaceid, 0); trihandle neighsh; trihandle *searchsh = d_searchsh + threadId; REAL *searchpt = d_steinerptlist + 3 * threadId; REAL *pa, *pb, *pc; unsigned long *randomseed = d_randomseed + pos; REAL abvpt[3]; // Check if coordinates are valid if (cudamesh_isInvalid(searchpt[0]) || cudamesh_isInvalid(searchpt[1]) || cudamesh_isInvalid(searchpt[2])) { d_tristatus[subfaceid].setAbortive(true); d_threadmarker[threadId] = -1; return; } enum locateresult loc; enum { MOVE_BC, MOVE_CA } nextmove; REAL ori, ori_bc, ori_ca; int i; pa = cudamesh_id2pointlist(cudamesh_sorg(*searchsh, d_trifacelist), d_pointlist); pb = cudamesh_id2pointlist(cudamesh_sdest(*searchsh, d_trifacelist), d_pointlist); pc = cudamesh_id2pointlist(cudamesh_sapex(*searchsh, d_trifacelist), d_pointlist); // Calculate an above point for this facet. cudamesh_calculateabovepoint4(searchpt, pa, pb, pc, abvpt); // 'abvpt' is given. Make sure it is above [a,b,c] ori = cuda_orient3d(pa, pb, pc, abvpt); assert(ori != 0); // SELF_CHECK if (ori > 0) { cudamesh_sesymself(*searchsh); // Reverse the face orientation. } // Find an edge of the face s.t. p lies on its right-hand side (CCW). for (i = 0; i < 3; i++) { pa = cudamesh_id2pointlist(cudamesh_sorg(*searchsh, d_trifacelist), d_pointlist); pb = cudamesh_id2pointlist(cudamesh_sdest(*searchsh, d_trifacelist), d_pointlist); ori = cuda_orient3d(pa, pb, abvpt, searchpt); if (ori > 0) break; cudamesh_senextself(*searchsh); } assert(i < 3); // SELF_CHECK pc = cudamesh_id2pointlist(cudamesh_sapex(*searchsh, d_trifacelist), d_pointlist); if (pc[0] == searchpt[0] && pc[1] == searchpt[1] && pc[2] == searchpt[2]) { cudamesh_senext2self(*searchsh); loc = ONVERTEX; } else { while (1) { ori_bc = cuda_orient3d(pb, pc, abvpt, searchpt); ori_ca = cuda_orient3d(pc, pa, abvpt, searchpt); if (ori_bc < 0) { if (ori_ca < 0) { // (--) // Any of the edges is a viable move. if (cudamesh_randomnation(randomseed, 2)) { nextmove = MOVE_CA; } else { nextmove = MOVE_BC; } } else { // (-#) // Edge [b, c] is viable. nextmove = MOVE_BC; } } else { if (ori_ca < 0) { // (#-) // Edge [c, a] is viable. nextmove = MOVE_CA; } else { if (ori_bc > 0) { if (ori_ca > 0) { // (++) loc = ONFACE; // Inside [a, b, c]. break; } else { // (+0) cudamesh_senext2self(*searchsh); // On edge [c, a]. loc = ONEDGE; break; } } else { // ori_bc == 0 if (ori_ca > 0) { // (0+) cudamesh_senextself(*searchsh); // On edge [b, c]. loc = ONEDGE; break; } else { // (00) // p is coincident with vertex c. cudamesh_senext2self(*searchsh); loc = ONVERTEX; break; } } } } // Move to the next face. if (nextmove == MOVE_BC) { cudamesh_senextself(*searchsh); } else { cudamesh_senext2self(*searchsh); } // NON-convex case. Check if we will cross a boundary. if (cudamesh_isshsubseg(*searchsh, d_tri2seglist)) { loc = ENCSEGMENT; break; } cudamesh_spivot(*searchsh, neighsh, d_tri2trilist); if (neighsh.id == -1) { loc = OUTSIDE; // A hull edge. break; } // Adjust the edge orientation. if (cudamesh_sorg(neighsh, d_trifacelist) != cudamesh_sdest(*searchsh, d_trifacelist)) { cudamesh_sesymself(neighsh); } assert(cudamesh_sorg(neighsh, d_trifacelist) == cudamesh_sdest(*searchsh, d_trifacelist)); // SELF_CHECK // Update the newly discovered face and its endpoints. *searchsh = neighsh; pa = cudamesh_id2pointlist(cudamesh_sorg(*searchsh, d_trifacelist), d_pointlist); pb = cudamesh_id2pointlist(cudamesh_sdest(*searchsh, d_trifacelist), d_pointlist); pc = cudamesh_id2pointlist(cudamesh_sapex(*searchsh, d_trifacelist), d_pointlist); if (pc == searchpt) { cudamesh_senext2self(*searchsh); loc = ONVERTEX; break; } step++; //if (step > 1000) // invalid point coordinates //{ // printf("Subface %d, %d - %lf, %lf, %lf\n", eleidx, threadId, searchpt[0], searchpt[1], searchpt[2]); //} } // while (1) } d_pointlocation[threadId] = loc; if (!(loc == ONFACE || loc == ONEDGE)) { if(numofsplittablesubsegs == 0) d_tristatus[subfaceid].setAbortive(true); // mark the encroached subface rather than the located one d_threadmarker[threadId] = -1; return; } tethandle searchtet; cudamesh_stpivot(*searchsh, searchtet, d_tri2tetlist); d_searchtet[threadId] = searchtet; } else { int tetid = eleidx; tethandle* searchtet = d_searchtet + threadId; REAL* searchpt = d_steinerptlist + 3 * threadId; unsigned long* randomseed = d_randomseed + pos; // Check if coordinates are valid if (cudamesh_isInvalid(searchpt[0]) || cudamesh_isInvalid(searchpt[1]) || cudamesh_isInvalid(searchpt[2])) { d_tetstatus[tetid].setAbortive(true); d_threadmarker[threadId] = -1; return; } REAL *torg, *tdest, *tapex, *toppo; enum { ORGMOVE, DESTMOVE, APEXMOVE } nextmove; REAL ori, oriorg, oridest, oriapex; enum locateresult loc = OUTSIDE; int t1ver; int s; int step = 1; // Init searchtet searchtet->id = tetid; searchtet->ver = 11; // Check if we are in the outside of the convex hull. if (cudamesh_ishulltet(*searchtet, d_tetlist)) { // Get its adjacent tet (inside the hull). searchtet->ver = 3; cudamesh_fsymself(*searchtet, d_neighborlist); } // Let searchtet be the face such that 'searchpt' lies above to it. for (searchtet->ver = 0; searchtet->ver < 4; searchtet->ver++) { torg = cudamesh_id2pointlist(cudamesh_org(*searchtet, d_tetlist), d_pointlist); tdest = cudamesh_id2pointlist(cudamesh_dest(*searchtet, d_tetlist), d_pointlist); tapex = cudamesh_id2pointlist(cudamesh_apex(*searchtet, d_tetlist), d_pointlist); ori = cuda_orient3d(torg, tdest, tapex, searchpt); if (ori < 0.0) break; } assert(searchtet->ver != 4); // Walk through tetrahedra to locate the point. while (true) { toppo = cudamesh_id2pointlist(cudamesh_oppo(*searchtet, d_tetlist), d_pointlist); // Check if the vertex is we seek. if (toppo[0] == searchpt[0] && toppo[1] == searchpt[1] && toppo[2] == searchpt[2]) { // Adjust the origin of searchtet to be searchpt. cudamesh_esymself(*searchtet); cudamesh_eprevself(*searchtet); loc = ONVERTEX; // return ONVERTEX; break; } // We enter from one of serarchtet's faces, which face do we exit? oriorg = cuda_orient3d(tdest, tapex, toppo, searchpt); oridest = cuda_orient3d(tapex, torg, toppo, searchpt); oriapex = cuda_orient3d(torg, tdest, toppo, searchpt); // Now decide which face to move. It is possible there are more than one // faces are viable moves. If so, randomly choose one. if (oriorg < 0) { if (oridest < 0) { if (oriapex < 0) { // All three faces are possible. s = cudamesh_randomnation(randomseed, 3); // 's' is in {0,1,2}. if (s == 0) { nextmove = ORGMOVE; } else if (s == 1) { nextmove = DESTMOVE; } else { nextmove = APEXMOVE; } } else { // Two faces, opposite to origin and destination, are viable. //s = randomnation(2); // 's' is in {0,1}. if (cudamesh_randomnation(randomseed, 2)) { nextmove = ORGMOVE; } else { nextmove = DESTMOVE; } } } else { if (oriapex < 0) { // Two faces, opposite to origin and apex, are viable. //s = randomnation(2); // 's' is in {0,1}. if (cudamesh_randomnation(randomseed, 2)) { nextmove = ORGMOVE; } else { nextmove = APEXMOVE; } } else { // Only the face opposite to origin is viable. nextmove = ORGMOVE; } } } else { if (oridest < 0) { if (oriapex < 0) { // Two faces, opposite to destination and apex, are viable. //s = randomnation(2); // 's' is in {0,1}. if (cudamesh_randomnation(randomseed, 2)) { nextmove = DESTMOVE; } else { nextmove = APEXMOVE; } } else { // Only the face opposite to destination is viable. nextmove = DESTMOVE; } } else { if (oriapex < 0) { // Only the face opposite to apex is viable. nextmove = APEXMOVE; } else { // The point we seek must be on the boundary of or inside this // tetrahedron. Check for boundary cases. if (oriorg == 0) { // Go to the face opposite to origin. cudamesh_enextesymself(*searchtet); if (oridest == 0) { cudamesh_eprevself(*searchtet); // edge oppo->apex if (oriapex == 0) { // oppo is duplicated with p. loc = ONVERTEX; // return ONVERTEX; break; } loc = ONEDGE; // return ONEDGE; break; } if (oriapex == 0) { cudamesh_enextself(*searchtet); // edge dest->oppo loc = ONEDGE; // return ONEDGE; break; } loc = ONFACE; // return ONFACE; break; } if (oridest == 0) { // Go to the face opposite to destination. cudamesh_eprevesymself(*searchtet); if (oriapex == 0) { cudamesh_eprevself(*searchtet); // edge oppo->org loc = ONEDGE; // return ONEDGE; break; } loc = ONFACE; // return ONFACE; break; } if (oriapex == 0) { // Go to the face opposite to apex cudamesh_esymself(*searchtet); loc = ONFACE; // return ONFACE; break; } loc = INTETRAHEDRON; // return INTETRAHEDRON; break; } } } // Move to the selected face. if (nextmove == ORGMOVE) { cudamesh_enextesymself(*searchtet); } else if (nextmove == DESTMOVE) { cudamesh_eprevesymself(*searchtet); } else { cudamesh_esymself(*searchtet); } // Move to the adjacent tetrahedron (maybe a hull tetrahedron). cudamesh_fsymself(*searchtet, d_neighborlist); if (cudamesh_oppo(*searchtet, d_tetlist) == -1) { loc = OUTSIDE; // return OUTSIDE; break; } // Retreat the three vertices of the base face. torg = cudamesh_id2pointlist(cudamesh_org(*searchtet, d_tetlist), d_pointlist); tdest = cudamesh_id2pointlist(cudamesh_dest(*searchtet, d_tetlist), d_pointlist); tapex = cudamesh_id2pointlist(cudamesh_apex(*searchtet, d_tetlist), d_pointlist); step++; //if (step > 1000) // Invalid point coordinates //{ // printf("Tet %d, %d - %lf, %lf, %lf\n", eleidx, threadId, searchpt[0], searchpt[1], searchpt[2]); //} } // while (true) d_pointlocation[threadId] = loc; if (loc == ONVERTEX) { d_tetstatus[tetid].setAbortive(true); d_threadmarker[threadId] = -1; } } } // Split encroached segment __device__ int checkseg4split( trihandle *chkseg, int& encpt, REAL* pointlist, int* seglist, tethandle* seg2tetlist, int* tetlist, tethandle* neighborlist ) { REAL ccent[3], len, r; int i; REAL* forg = cudamesh_id2pointlist(cudamesh_sorg(*chkseg, seglist), pointlist); REAL* fdest = cudamesh_id2pointlist(cudamesh_sdest(*chkseg, seglist), pointlist); // Initialize the return values. encpt = -1; len = cudamesh_distance(forg, fdest); r = 0.5 * len; for (i = 0; i < 3; i++) { ccent[i] = 0.5 * (forg[i] + fdest[i]); } // Check if it is encroached. // Comment: There may exist more than one encroaching points of this segment. // The 'encpt' returns the one which is closet to it. tethandle searchtet, spintet; int eapex; REAL d, diff, smdist = 0; int t1ver; cudamesh_sstpivot1(*chkseg, searchtet, seg2tetlist); spintet = searchtet; while (1) { eapex = cudamesh_apex(spintet, tetlist); if (eapex != -1) { d = cudamesh_distance(ccent, cudamesh_id2pointlist(eapex, pointlist)); diff = d - r; if (fabs(diff) / r < EPSILON) diff = 0.0; // Rounding. if (diff < 0) { // This segment is encroached by eapex. if (encpt == -1) { encpt = eapex; smdist = d; } else { // Choose the closet encroaching point. if (d < smdist) { encpt = eapex; smdist = d; } } } } cudamesh_fnextself(spintet, neighborlist); if (spintet.id == searchtet.id) break; } // while (1) if (encpt != -1) { return 1; } return 0; // No need to split it. } __device__ int checkseg4encroach( REAL *pa, REAL* pb, REAL* checkpt ) { // Check if the point lies inside the diametrical sphere of this seg. REAL v1[3], v2[3]; v1[0] = pa[0] - checkpt[0]; v1[1] = pa[1] - checkpt[1]; v1[2] = pa[2] - checkpt[2]; v2[0] = pb[0] - checkpt[0]; v2[1] = pb[1] - checkpt[1]; v2[2] = pb[2] - checkpt[2]; if (cudamesh_dot(v1, v2) < 0) return 1; return 0; } __global__ void kernelMarkAllEncsegs( REAL * d_pointlist, int* d_seglist, tethandle* d_seg2tetlist, int* d_segencmarker, int* d_tetlist, tethandle* d_neighborlist, int numofsubseg ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofsubseg) return; trihandle chkseg(pos, 0); int encpt; checkseg4split( &chkseg, encpt, d_pointlist, d_seglist, d_seg2tetlist, d_tetlist, d_neighborlist); d_segencmarker[pos] = encpt; } __device__ void projectpoint2edge( REAL* p, REAL* e1, REAL* e2, REAL* prj ) { REAL v1[3], v2[3]; REAL len, l_p; v1[0] = e2[0] - e1[0]; v1[1] = e2[1] - e1[1]; v1[2] = e2[2] - e1[2]; v2[0] = p[0] - e1[0]; v2[1] = p[1] - e1[1]; v2[2] = p[2] - e1[2]; len = sqrt(cudamesh_dot(v1, v1)); assert(len != 0.0); v1[0] /= len; v1[1] /= len; v1[2] /= len; l_p = cudamesh_dot(v1, v2); prj[0] = e1[0] + l_p * v1[0]; prj[1] = e1[1] + l_p * v1[1]; prj[2] = e1[2] + l_p * v1[2]; } __global__ void kernelComputeSteinerPoint_Seg( int* d_threadlist, REAL* d_pointlist, trihandle* d_point2trilist, verttype* d_pointtypelist, int* d_seglist, int* d_seg2parentlist, int* d_segparentlist, int* d_segencmarker, int* d_encseglist, REAL* d_steinerptlist, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int encsegidx = d_encseglist[threadId]; trihandle seg(encsegidx, 0); REAL* ei = cudamesh_id2pointlist(cudamesh_sorg(seg, d_seglist), d_pointlist); REAL* ej = cudamesh_id2pointlist(cudamesh_sdest(seg, d_seglist), d_pointlist); int adjflag = 0, i; REAL* steinpt = cudamesh_id2pointlist(threadId, d_steinerptlist); int refptidx = d_segencmarker[encsegidx]; assert(refptidx >= 0); if (refptidx != MAXINT) { REAL* refpt = cudamesh_id2pointlist(refptidx, d_pointlist); REAL L, L1, t; if (d_pointtypelist[refptidx] == FREESEGVERTEX) { trihandle parentseg; parentseg = d_point2trilist[refptidx]; int sidx1 = d_seg2parentlist[parentseg.id]; int idx_pi = d_segparentlist[sidx1 * 2]; int idx_pj = d_segparentlist[sidx1 * 2 + 1]; REAL* far_pi = cudamesh_id2pointlist(idx_pi, d_pointlist); REAL* far_pj = cudamesh_id2pointlist(idx_pj, d_pointlist); int sidx2 = d_seg2parentlist[seg.id]; int idx_ei = d_segparentlist[sidx2 * 2]; int idx_ej = d_segparentlist[sidx2 * 2 + 1]; REAL* far_ei = cudamesh_id2pointlist(idx_ei, d_pointlist); REAL* far_ej = cudamesh_id2pointlist(idx_ej, d_pointlist); if ((idx_pi == idx_ei) || (idx_pj == idx_ei)) { // Create a Steiner point at the intersection of the segment // [far_ei, far_ej] and the sphere centered at far_ei with // radius |far_ei - refpt|. L = cudamesh_distance(far_ei, far_ej); L1 = cudamesh_distance(far_ei, refpt); t = L1 / L; for (i = 0; i < 3; i++) { steinpt[i] = far_ei[i] + t * (far_ej[i] - far_ei[i]); } adjflag = 1; } else if ((idx_pi == idx_ej) || (idx_pj == idx_ej)) { L = cudamesh_distance(far_ei, far_ej); L1 = cudamesh_distance(far_ej, refpt); t = L1 / L; for (i = 0; i < 3; i++) { steinpt[i] = far_ej[i] + t * (far_ei[i] - far_ej[i]); } adjflag = 1; } else { // Cut the segment by the projection point of refpt. projectpoint2edge(refpt, ei, ej, steinpt); } } else { // Cut the segment by the projection point of refpt. projectpoint2edge(refpt, ei, ej, steinpt); } // Make sure that steinpt is not too close to ei and ej. L = cudamesh_distance(ei, ej); L1 = cudamesh_distance(steinpt, ei); t = L1 / L; if ((t < 0.2) || (t > 0.8)) { // Split the point at the middle. for (i = 0; i < 3; i++) { steinpt[i] = ei[i] + 0.5 * (ej[i] - ei[i]); } } } else { // Split the point at the middle. for (i = 0; i < 3; i++) { steinpt[i] = ei[i] + 0.5 * (ej[i] - ei[i]); } } } // Split encroached subface __device__ int checkface4split( trihandle *chkfac, int& encpt, REAL* pointlist, int* trifacelist, tethandle* tri2tetlist, int* tetlist ) { REAL *pa, *pb, *pc; REAL area, rd, len; REAL A[4][4], rhs[4], cent[3], D; int indx[4]; int i; encpt = -1; pa = cudamesh_id2pointlist(cudamesh_sorg(*chkfac, trifacelist), pointlist); pb = cudamesh_id2pointlist(cudamesh_sdest(*chkfac, trifacelist), pointlist); pc = cudamesh_id2pointlist(cudamesh_sapex(*chkfac, trifacelist), pointlist); // Compute the coefficient matrix A (3x3). A[0][0] = pb[0] - pa[0]; A[0][1] = pb[1] - pa[1]; A[0][2] = pb[2] - pa[2]; // vector V1 (pa->pb) A[1][0] = pc[0] - pa[0]; A[1][1] = pc[1] - pa[1]; A[1][2] = pc[2] - pa[2]; // vector V2 (pa->pc) cudamesh_cross(A[0], A[1], A[2]); // vector V3 (V1 X V2) area = 0.5 * sqrt(cudamesh_dot(A[2], A[2])); // The area of [a,b,c]. // Compute the right hand side vector b (3x1). rhs[0] = 0.5 * cudamesh_dot(A[0], A[0]); // edge [a,b] rhs[1] = 0.5 * cudamesh_dot(A[1], A[1]); // edge [a,c] rhs[2] = 0.0; // Solve the 3 by 3 equations use LU decomposition with partial // pivoting and backward and forward substitute. if (!cudamesh_lu_decmp(A, 3, indx, &D, 0)) { // A degenerate triangle. //printf("checkface4split: A degenerate subface!\n"); encpt = -1; return -1; } cudamesh_lu_solve(A, 3, indx, rhs, 0); cent[0] = pa[0] + rhs[0]; cent[1] = pa[1] + rhs[1]; cent[2] = pa[2] + rhs[2]; rd = sqrt(rhs[0] * rhs[0] + rhs[1] * rhs[1] + rhs[2] * rhs[2]); tethandle searchtet; REAL smlen = 0; // Check if this subface is locally encroached. for (i = 0; i < 2; i++) { cudamesh_stpivot(*chkfac, searchtet, tri2tetlist); if (!cudamesh_ishulltet(searchtet, tetlist)) { len = cudamesh_distance( cudamesh_id2pointlist(cudamesh_oppo(searchtet, tetlist), pointlist), cent); if ((fabs(len - rd) / rd) < EPSILON) len = rd;// Rounding. if (len < rd) { if (smlen == 0) { smlen = len; encpt = cudamesh_oppo(searchtet, tetlist); } else { if (len < smlen) { smlen = len; encpt = cudamesh_oppo(searchtet, tetlist); } } } } cudamesh_sesymself(*chkfac); } return encpt != -1; } __device__ int checkface4encroach( REAL *pa, REAL *pb, REAL *pc, REAL *checkpt ) { REAL rd, len, cent[3]; cudamesh_circumsphere(pa, pb, pc, NULL, cent, &rd); assert(rd != 0); len = cudamesh_distance(cent, checkpt); if ((fabs(len - rd) / rd) < EPSILON) len = rd; // Rounding. if (len < rd) { // The point lies inside the circumsphere of this face. return 1; // Encroached. } return 0; } __global__ void kernelMarkAllEncsubfaces( REAL * d_pointlist, int* d_trifacelist, tethandle* d_tri2tetlist, int* d_subfaceencmarker, int* d_tetlist, int numofsubface ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofsubface) return; trihandle chkfac(pos, 0); int encpt; checkface4split( &chkfac, encpt, d_pointlist, d_trifacelist, d_tri2tetlist, d_tetlist); d_subfaceencmarker[pos] = encpt; } __global__ void kernelComputeSteinerPoint_Subface( REAL* d_pointlist, int* d_trifacelist, tristatus* d_tristatus, int* d_encsubfacelist, REAL* d_steinerptlist, int numofencsubface ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofencsubface) return; int encsubfaceidx = d_encsubfacelist[pos]; REAL *pa, *pb, *pc; REAL area, rd, len; REAL A[4][4], rhs[4], D; int indx[4]; int i; trihandle chkfac(encsubfaceidx, 0); REAL* steinpt = cudamesh_id2pointlist(pos, d_steinerptlist); pa = cudamesh_id2pointlist(cudamesh_sorg(chkfac, d_trifacelist), d_pointlist); pb = cudamesh_id2pointlist(cudamesh_sdest(chkfac, d_trifacelist), d_pointlist); pc = cudamesh_id2pointlist(cudamesh_sapex(chkfac, d_trifacelist), d_pointlist); // Compute the coefficient matrix A (3x3). A[0][0] = pb[0] - pa[0]; A[0][1] = pb[1] - pa[1]; A[0][2] = pb[2] - pa[2]; // vector V1 (pa->pb) A[1][0] = pc[0] - pa[0]; A[1][1] = pc[1] - pa[1]; A[1][2] = pc[2] - pa[2]; // vector V2 (pa->pc) cudamesh_cross(A[0], A[1], A[2]); // vector V3 (V1 X V2) area = 0.5 * sqrt(cudamesh_dot(A[2], A[2])); // The area of [a,b,c]. // Compute the right hand side vector b (3x1). rhs[0] = 0.5 * cudamesh_dot(A[0], A[0]); // edge [a,b] rhs[1] = 0.5 * cudamesh_dot(A[1], A[1]); // edge [a,c] rhs[2] = 0.0; // Solve the 3 by 3 equations use LU decomposition with partial // pivoting and backward and forward substitute. if (!cudamesh_lu_decmp(A, 3, indx, &D, 0)) { // A degenerate triangle. printf("kernelComputeSteinerPointOnSubface: A degenerate subface. This should not happen!\n"); } cudamesh_lu_solve(A, 3, indx, rhs, 0); steinpt[0] = pa[0] + rhs[0]; steinpt[1] = pa[1] + rhs[1]; steinpt[2] = pa[2] + rhs[2]; } // Split bad tets __device__ int checktet4split( tethandle* chktet, REAL* pointlist, int* tetlist, REAL minratio ) { int ipa, ipb, ipc, ipd; REAL *pa, *pb, *pc, *pd; REAL vda[3], vdb[3], vdc[3]; REAL vab[3], vbc[3], vca[3]; REAL elen[6]; REAL smlen = 0, rd; REAL A[4][4], rhs[4], D; int indx[4]; int i; ipd = tetlist[4*(*chktet).id + 3]; if (ipd == -1) { return 0; // Do not split a hull tet. } ipa = tetlist[4*(*chktet).id + 0]; ipb = tetlist[4*(*chktet).id + 1]; ipc = tetlist[4*(*chktet).id + 2]; pa = cudamesh_id2pointlist(ipa, pointlist); pb = cudamesh_id2pointlist(ipb, pointlist); pc = cudamesh_id2pointlist(ipc, pointlist); pd = cudamesh_id2pointlist(ipd, pointlist); // Get the edge vectors vda: d->a, vdb: d->b, vdc: d->c. // Set the matrix A = [vda, vdb, vdc]^T. for (i = 0; i < 3; i++) A[0][i] = vda[i] = pa[i] - pd[i]; for (i = 0; i < 3; i++) A[1][i] = vdb[i] = pb[i] - pd[i]; for (i = 0; i < 3; i++) A[2][i] = vdc[i] = pc[i] - pd[i]; // Get the other edge vectors. for (i = 0; i < 3; i++) vab[i] = pb[i] - pa[i]; for (i = 0; i < 3; i++) vbc[i] = pc[i] - pb[i]; for (i = 0; i < 3; i++) vca[i] = pa[i] - pc[i]; if (!cudamesh_lu_decmp(A, 3, indx, &D, 0)) { // A degenerated tet (vol = 0). // This is possible due to the use of exact arithmetic. We temporarily // leave this tet. It should be fixed by mesh optimization. return 0; } // Check the radius-edge ratio. Set by -q#. if (minratio > 0) { // Calculate the circumcenter and radius of this tet. rhs[0] = 0.5 * cudamesh_dot(vda, vda); rhs[1] = 0.5 * cudamesh_dot(vdb, vdb); rhs[2] = 0.5 * cudamesh_dot(vdc, vdc); cudamesh_lu_solve(A, 3, indx, rhs, 0); rd = sqrt(cudamesh_dot(rhs, rhs)); // Calculate the shortest edge length. elen[0] = cudamesh_dot(vda, vda); elen[1] = cudamesh_dot(vdb, vdb); elen[2] = cudamesh_dot(vdc, vdc); elen[3] = cudamesh_dot(vab, vab); elen[4] = cudamesh_dot(vbc, vbc); elen[5] = cudamesh_dot(vca, vca); smlen = elen[0]; //sidx = 0; for (i = 1; i < 6; i++) { if (smlen > elen[i]) { smlen = elen[i]; //sidx = i; } } smlen = sqrt(smlen); D = rd / smlen; if (D > minratio) { // A bad radius-edge ratio. return 1; } } return 0; } __global__ void kernelMarkAllBadtets( REAL* d_pointlist, int* d_tetlist, tetstatus* d_tetstatus, REAL minratio, int numofbadtet ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofbadtet) return; tethandle chktet(pos, 11); if (checktet4split(&chktet, d_pointlist, d_tetlist, minratio)) { d_tetstatus[pos].setBad(true); } } __global__ void kernelComputeSteinerPoint_Tet( int* d_tetidlist, REAL* d_insertptlist, int* d_threadlist, REAL* d_pointlist, int* d_tetlist, tetstatus* d_tetstatus, int* d_priority, int* d_threadmarker, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; int threadId = d_threadlist[pos]; int tetid = d_tetidlist[threadId]; REAL* steinpt = d_insertptlist + 3 * threadId; int ipa, ipb, ipc, ipd; REAL *pa, *pb, *pc, *pd; REAL vda[3], vdb[3], vdc[3]; REAL vab[3], vbc[3], vca[3]; REAL elen[6]; REAL smlen = 0, rd; REAL A[4][4], rhs[4], D; int indx[4]; int i; ipd = d_tetlist[4 * tetid + 3]; if (ipd == -1) { // This should not happend printf("Thread #%d - Error: Try to split a hull tet #%d!\n", threadId, tetid); return; } ipa = d_tetlist[4 * tetid + 0]; ipb = d_tetlist[4 * tetid + 1]; ipc = d_tetlist[4 * tetid + 2]; pa = cudamesh_id2pointlist(ipa, d_pointlist); pb = cudamesh_id2pointlist(ipb, d_pointlist); pc = cudamesh_id2pointlist(ipc, d_pointlist); pd = cudamesh_id2pointlist(ipd, d_pointlist); // Get the edge vectors vda: d->a, vdb: d->b, vdc: d->c. // Set the matrix A = [vda, vdb, vdc]^T. for (i = 0; i < 3; i++) A[0][i] = vda[i] = pa[i] - pd[i]; for (i = 0; i < 3; i++) A[1][i] = vdb[i] = pb[i] - pd[i]; for (i = 0; i < 3; i++) A[2][i] = vdc[i] = pc[i] - pd[i]; // Get the other edge vectors. for (i = 0; i < 3; i++) vab[i] = pb[i] - pa[i]; for (i = 0; i < 3; i++) vbc[i] = pc[i] - pb[i]; for (i = 0; i < 3; i++) vca[i] = pa[i] - pc[i]; //if (cuda_orient3d(pa, pb, pc, pd) < 0.001 && cuda_orient3d(pa, pb, pc, pd) > -0.001) //{ // Nearly degenerated tet. // Set to abortive to avoid invalid point coordinate //d_tetstatus[tetid].setAbortive(true); //d_threadmarker[threadId] = -1; //return; //} if (!cudamesh_lu_decmp(A, 3, indx, &D, 0)) { // This should not happend //printf("Thread #%d - Error: Try to split a degenerated tet #%d!\n", threadId, tetid); d_tetstatus[tetid].setAbortive(true); d_threadmarker[threadId] = -1; return; } // Calculate the circumcenter and radius of this tet. rhs[0] = 0.5 * cudamesh_dot(vda, vda); rhs[1] = 0.5 * cudamesh_dot(vdb, vdb); rhs[2] = 0.5 * cudamesh_dot(vdc, vdc); cudamesh_lu_solve(A, 3, indx, rhs, 0); for (i = 0; i < 3; i++) { steinpt[i] = pd[i] + rhs[i]; } // set priority //rd = sqrt(cudamesh_dot(rhs, rhs)); //Calculate the shortest edge length. elen[0] = cudamesh_dot(vda, vda); elen[1] = cudamesh_dot(vdb, vdb); elen[2] = cudamesh_dot(vdc, vdc); elen[3] = cudamesh_dot(vab, vab); elen[4] = cudamesh_dot(vbc, vbc); elen[5] = cudamesh_dot(vca, vca); //Use radius-to-shortest-edge radio as priority //smlen = elen[0]; //sidx = 0; //for (i = 1; i < 6; i++) { // if (smlen > elen[i]) { // smlen = elen[i]; //sidx = i; // } //} //smlen = sqrt(smlen); //d_priority[threadId] = __float_as_int((float)(smlen / rd)); // Use volume as priority // Use heron-type formula to compute the volume of a tetrahedron // https://en.wikipedia.org/wiki/Heron%27s_formula //if (cuda_orient3d(pa, pb, pc, pd) < 0.001 && cuda_orient3d(pa, pb, pc, pd) > -0.001) //{ // d_priority[threadId] = MAXINT; //} //else { REAL U, V, W, u, v, w; // first three form a triangle; u opposite to U and so on REAL X, x, Y, y, Z, z; REAL a, b, c, d; U = sqrt(elen[3]); //ab V = sqrt(elen[4]); //bc W = sqrt(elen[5]); //ca u = sqrt(elen[2]); //dc v = sqrt(elen[0]); //da w = sqrt(elen[1]); //db X = (w - U + v)*(U + v + w); x = (U - v + w)*(v - w + U); Y = (u - V + w)*(V + w + u); y = (V - w + u)*(w - u + V); Z = (v - W + u)*(W + u + v); z = (W - u + v)*(u - v + W); a = sqrt(x*Y*Z); b = sqrt(y*Z*X); c = sqrt(z*X*Y); d = sqrt(x*y*z); REAL vol = sqrt((-a + b + c + d)*(a - b + c + d)*(a + b - c + d)*(a + b + c - d)) / (192 * u*v*w); d_priority[threadId] = __float_as_int((float)(1 / vol)); //d_priority[threadId] = __float_as_int((float)(1 / rd)); } //if (cuda_orient3d(pa, pb, pc, pd) < 0.001 && cuda_orient3d(pa, pb, pc, pd) > -0.001) //{ // if(pos < 100) // printf("%d ", d_priority[threadId]); //} //if (pos < 100) // printf("Tet #%d: (%lf, %lf, %lf), (%lf, %lf, %lf), (%lf, %lf, %lf), (%lf, %lf, %lf) | (%lf, %lf, %lf) | %lf\n", // tetid, // pa[0], pa[1], pa[2], // pb[0], pb[1], pb[2], // pc[0], pc[1], pc[2], // pd[0], pd[1], pd[2], // steinpt[0], steinpt[1], steinpt[2], // cuda_orient3d(pa, pb, pc, pd)); //if (pos < 100) // printf("%d ", d_priority[threadId]); } __global__ void kernelCompactSeg( int* d_seglist, int* d_sizes, int* d_indices, int* d_list, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; if (d_sizes[pos] == 0) return; int index = d_indices[pos]; d_list[2 * index + 0] = d_seglist[3 * pos + 0]; d_list[2 * index + 1] = d_seglist[3 * pos + 1]; } __global__ void kernelCompactTriface( int* d_trifacelist, int* d_sizes, int* d_indices, int* d_list, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; if (d_sizes[pos] == 0) return; int index = d_indices[pos]; d_list[3 * index + 0] = d_trifacelist[3 * pos + 0]; d_list[3 * index + 1] = d_trifacelist[3 * pos + 1]; d_list[3 * index + 2] = d_trifacelist[3 * pos + 2]; } __global__ void kernelCompactTet_Phase1( int* d_tetlist, tetstatus* d_tetstatus, int* d_sizes, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; if (d_tetstatus[pos].isEmpty()) d_sizes[pos] = 0; if (d_tetlist[4 * pos + 3] == -1) d_sizes[pos] = 0; } __global__ void kernelCompactTet_Phase2( int* d_tetlist, int* d_sizes, int* d_indices, int* d_list, int numofthreads ) { int pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos >= numofthreads) return; if (d_sizes[pos] == 0) return; int index = d_indices[pos]; d_list[4 * index + 0] = d_tetlist[4 * pos + 0]; d_list[4 * index + 1] = d_tetlist[4 * pos + 1]; d_list[4 * index + 2] = d_tetlist[4 * pos + 2]; d_list[4 * index + 3] = d_tetlist[4 * pos + 3]; }
dbbc5faf74c65268d380892722349cf06cb59ffd.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "property_generator.cuh" #include <utilities/base_fixture.hpp> #include <utilities/device_comm_wrapper.hpp> #include <utilities/mg_utilities.hpp> #include <utilities/test_graphs.hpp> #include <utilities/test_utilities.hpp> #include <utilities/thrust_wrapper.hpp> #include <prims/per_v_random_select_transform_outgoing_e.cuh> #include <prims/vertex_frontier.cuh> #include <cugraph/edge_src_dst_property.hpp> #include <cugraph/graph_functions.hpp> #include <cugraph/graph_view.hpp> #include <cugraph/utilities/dataframe_buffer.hpp> #include <cugraph/utilities/high_res_timer.hpp> #include <cugraph/utilities/host_scalar_comm.hpp> #include <cugraph/utilities/thrust_tuple_utils.hpp> #include <raft/comms/comms.hpp> #include <raft/comms/mpi_comms.hpp> #include <raft/core/comms.hpp> #include <raft/core/handle.hpp> #include <rmm/device_uvector.hpp> #include <thrust/adjacent_difference.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/optional.h> #include <thrust/tuple.h> #include <gtest/gtest.h> #include <random> template <typename vertex_t, typename property_t> struct e_op_t { using result_t = decltype(cugraph::thrust_tuple_cat(thrust::tuple<vertex_t, vertex_t>{}, cugraph::to_thrust_tuple(property_t{}), cugraph::to_thrust_tuple(property_t{}))); __device__ result_t operator()( vertex_t src, vertex_t dst, property_t src_prop, property_t dst_prop, thrust::nullopt_t) const { if constexpr (cugraph::is_thrust_tuple_of_arithmetic<property_t>::value) { static_assert(thrust::tuple_size<property_t>::value == size_t{2}); return thrust::make_tuple(src, dst, thrust::get<0>(src_prop), thrust::get<1>(src_prop), thrust::get<0>(dst_prop), thrust::get<1>(dst_prop)); } else { return thrust::make_tuple(src, dst, src_prop, dst_prop); } } }; struct Prims_Usecase { size_t num_seeds{0}; size_t K{0}; bool with_replacement{false}; bool use_invalid_value{false}; bool test_weighted{false}; bool check_correctness{true}; }; template <typename input_usecase_t> class Tests_MGPerVRandomSelectTransformOutgoingE : public ::testing::TestWithParam<std::tuple<Prims_Usecase, input_usecase_t>> { public: Tests_MGPerVRandomSelectTransformOutgoingE() {} static void SetUpTestCase() { handle_ = cugraph::test::initialize_mg_handle(); } static void TearDownTestCase() { handle_.reset(); } virtual void SetUp() {} virtual void TearDown() {} // Verify the results of per_v_random_select_transform_outgoing_e primitive template <typename vertex_t, typename edge_t, typename weight_t, typename property_t> void run_current_test(Prims_Usecase const& prims_usecase, input_usecase_t const& input_usecase) { HighResTimer hr_timer{}; auto const comm_rank = handle_->get_comms().get_rank(); auto const comm_size = handle_->get_comms().get_size(); // 1. create MG graph if (cugraph::test::g_perf) { RAFT_CUDA_TRY(hipDeviceSynchronize()); // for consistent performance measurement handle_->get_comms().barrier(); hr_timer.start("MG Construct graph"); } cugraph::graph_t<vertex_t, edge_t, false, true> mg_graph(*handle_); std::optional<rmm::device_uvector<vertex_t>> mg_renumber_map{std::nullopt}; std::tie(mg_graph, std::ignore, mg_renumber_map) = cugraph::test::construct_graph<vertex_t, edge_t, weight_t, false, true>( *handle_, input_usecase, prims_usecase.test_weighted, true); if (cugraph::test::g_perf) { RAFT_CUDA_TRY(hipDeviceSynchronize()); // for consistent performance measurement handle_->get_comms().barrier(); hr_timer.stop(); hr_timer.display_and_clear(std::cout); } auto mg_graph_view = mg_graph.view(); // 2. run MG per_v_random_select_transform_outgoing_e primitive const int hash_bin_count = 5; auto mg_vertex_prop = cugraph::test::generate<vertex_t, property_t>::vertex_property( *handle_, *mg_renumber_map, hash_bin_count); auto mg_src_prop = cugraph::test::generate<vertex_t, property_t>::src_property( *handle_, mg_graph_view, mg_vertex_prop); auto mg_dst_prop = cugraph::test::generate<vertex_t, property_t>::dst_property( *handle_, mg_graph_view, mg_vertex_prop); raft::random::RngState rng_state(static_cast<uint64_t>(handle_->get_comms().get_rank())); auto select_count = prims_usecase.with_replacement ? prims_usecase.num_seeds : ::min(prims_usecase.num_seeds, static_cast<size_t>(mg_graph_view.number_of_vertices())); auto mg_vertex_buffer = cugraph::select_random_vertices( *handle_, mg_graph_view, std::optional<raft::device_span<vertex_t const>>{std::nullopt}, rng_state, select_count, prims_usecase.with_replacement, false); constexpr size_t bucket_idx_cur = 0; constexpr size_t num_buckets = 1; cugraph::vertex_frontier_t<vertex_t, void, true, false> mg_vertex_frontier(*handle_, num_buckets); mg_vertex_frontier.bucket(bucket_idx_cur) .insert(cugraph::get_dataframe_buffer_begin(mg_vertex_buffer), cugraph::get_dataframe_buffer_end(mg_vertex_buffer)); using result_t = decltype(cugraph::thrust_tuple_cat(thrust::tuple<vertex_t, vertex_t>{}, cugraph::to_thrust_tuple(property_t{}), cugraph::to_thrust_tuple(property_t{}))); std::optional<result_t> invalid_value{std::nullopt}; if (prims_usecase.use_invalid_value) { invalid_value = result_t{}; thrust::get<0>(*invalid_value) = cugraph::invalid_vertex_id<vertex_t>::value; thrust::get<1>(*invalid_value) = cugraph::invalid_vertex_id<vertex_t>::value; } if (cugraph::test::g_perf) { RAFT_CUDA_TRY(hipDeviceSynchronize()); // for consistent performance measurement handle_->get_comms().barrier(); hr_timer.start("MG per_v_random_select_transform_outgoing_e"); } auto [mg_sample_offsets, mg_sample_e_op_results] = cugraph::per_v_random_select_transform_outgoing_e(*handle_, mg_graph_view, mg_vertex_frontier.bucket(bucket_idx_cur), mg_src_prop.view(), mg_dst_prop.view(), cugraph::edge_dummy_property_t{}.view(), e_op_t<vertex_t, property_t>{}, rng_state, prims_usecase.K, prims_usecase.with_replacement, invalid_value); if (cugraph::test::g_perf) { RAFT_CUDA_TRY(hipDeviceSynchronize()); // for consistent performance measurement handle_->get_comms().barrier(); hr_timer.stop(); hr_timer.display_and_clear(std::cout); } // 3. validate MG results if (prims_usecase.check_correctness) { cugraph::unrenumber_int_vertices<vertex_t, true>( *handle_, mg_vertex_frontier.bucket(bucket_idx_cur).begin(), mg_vertex_frontier.bucket(bucket_idx_cur).size(), (*mg_renumber_map).data(), mg_graph_view.vertex_partition_range_lasts()); std::optional<rmm::device_uvector<size_t>> mg_sample_counts{std::nullopt}; if (mg_sample_offsets) { mg_sample_counts = rmm::device_uvector<size_t>( mg_vertex_frontier.bucket(bucket_idx_cur).size(), handle_->get_stream()); thrust::adjacent_difference(handle_->get_thrust_policy(), (*mg_sample_offsets).begin() + 1, (*mg_sample_offsets).end(), (*mg_sample_counts).begin()); } cugraph::unrenumber_int_vertices<vertex_t, true>( *handle_, std::get<0>(mg_sample_e_op_results).data(), std::get<0>(mg_sample_e_op_results).size(), (*mg_renumber_map).data(), mg_graph_view.vertex_partition_range_lasts()); cugraph::unrenumber_int_vertices<vertex_t, true>( *handle_, std::get<1>(mg_sample_e_op_results).data(), std::get<1>(mg_sample_e_op_results).size(), (*mg_renumber_map).data(), mg_graph_view.vertex_partition_range_lasts()); auto mg_aggregate_frontier_vertices = cugraph::test::device_gatherv( *handle_, raft::device_span<vertex_t const>(mg_vertex_frontier.bucket(bucket_idx_cur).begin(), mg_vertex_frontier.bucket(bucket_idx_cur).size())); std::optional<rmm::device_uvector<size_t>> mg_aggregate_sample_counts{std::nullopt}; if (mg_sample_counts) { mg_aggregate_sample_counts = cugraph::test::device_gatherv( *handle_, raft::device_span<size_t const>((*mg_sample_counts).data(), (*mg_sample_counts).size())); } auto mg_aggregate_sample_e_op_results = cugraph::allocate_dataframe_buffer<result_t>(0, handle_->get_stream()); std::get<0>(mg_aggregate_sample_e_op_results) = cugraph::test::device_gatherv(*handle_, std::get<0>(mg_sample_e_op_results).data(), std::get<0>(mg_sample_e_op_results).size()); std::get<1>(mg_aggregate_sample_e_op_results) = cugraph::test::device_gatherv(*handle_, std::get<1>(mg_sample_e_op_results).data(), std::get<1>(mg_sample_e_op_results).size()); std::get<2>(mg_aggregate_sample_e_op_results) = cugraph::test::device_gatherv(*handle_, std::get<2>(mg_sample_e_op_results).data(), std::get<2>(mg_sample_e_op_results).size()); std::get<3>(mg_aggregate_sample_e_op_results) = cugraph::test::device_gatherv(*handle_, std::get<3>(mg_sample_e_op_results).data(), std::get<3>(mg_sample_e_op_results).size()); if constexpr (cugraph::is_thrust_tuple_of_arithmetic<property_t>::value) { std::get<4>(mg_aggregate_sample_e_op_results) = cugraph::test::device_gatherv(*handle_, std::get<4>(mg_sample_e_op_results).data(), std::get<4>(mg_sample_e_op_results).size()); std::get<5>(mg_aggregate_sample_e_op_results) = cugraph::test::device_gatherv(*handle_, std::get<5>(mg_sample_e_op_results).data(), std::get<5>(mg_sample_e_op_results).size()); } cugraph::graph_t<vertex_t, edge_t, false, false> sg_graph(*handle_); std::tie(sg_graph, std::ignore, std::ignore) = cugraph::test::mg_graph_to_sg_graph( *handle_, mg_graph_view, std::optional<cugraph::edge_property_view_t<edge_t, weight_t const*>>{std::nullopt}, std::make_optional<raft::device_span<vertex_t const>>((*mg_renumber_map).data(), (*mg_renumber_map).size()), false); if (handle_->get_comms().get_rank() == 0) { std::optional<rmm::device_uvector<size_t>> mg_aggregate_sample_offsets{std::nullopt}; if (mg_aggregate_sample_counts) { mg_aggregate_sample_offsets = rmm::device_uvector<size_t>( (*mg_aggregate_sample_counts).size() + 1, handle_->get_stream()); (*mg_aggregate_sample_offsets).set_element_to_zero_async(0, handle_->get_stream()); thrust::inclusive_scan(handle_->get_thrust_policy(), (*mg_aggregate_sample_counts).begin(), (*mg_aggregate_sample_counts).end(), (*mg_aggregate_sample_offsets).begin() + 1); } auto sg_graph_view = sg_graph.view(); rmm::device_uvector<edge_t> sg_offsets(sg_graph_view.number_of_vertices() + vertex_t{1}, handle_->get_stream()); thrust::copy(handle_->get_thrust_policy(), sg_graph_view.local_edge_partition_view().offsets().begin(), sg_graph_view.local_edge_partition_view().offsets().end(), sg_offsets.begin()); rmm::device_uvector<vertex_t> sg_indices(sg_graph_view.number_of_edges(), handle_->get_stream()); thrust::copy(handle_->get_thrust_policy(), sg_graph_view.local_edge_partition_view().indices().begin(), sg_graph_view.local_edge_partition_view().indices().end(), sg_indices.begin()); auto num_invalids = static_cast<size_t>(thrust::count_if( handle_->get_thrust_policy(), thrust::make_counting_iterator(size_t{0}), thrust::make_counting_iterator(mg_aggregate_frontier_vertices.size()), [frontier_vertex_first = mg_aggregate_frontier_vertices.begin(), sample_offsets = mg_aggregate_sample_offsets ? thrust::make_optional<size_t const*>( (*mg_aggregate_sample_offsets).data()) : thrust::nullopt, sample_e_op_result_first = cugraph::get_dataframe_buffer_begin(mg_aggregate_sample_e_op_results), sg_offsets = sg_offsets.begin(), sg_indices = sg_indices.begin(), K = prims_usecase.K, with_replacement = prims_usecase.with_replacement, invalid_value = invalid_value ? thrust::make_optional<result_t>(*invalid_value) : thrust::nullopt, property_transform = cugraph::test::detail::property_transform<vertex_t, property_t>{ hash_bin_count}] __device__(size_t i) { auto v = *(frontier_vertex_first + i); // check sample_offsets auto offset_first = sample_offsets ? *(*sample_offsets + i) : K * i; auto offset_last = sample_offsets ? *(*sample_offsets + (i + 1)) : K * (i + 1); if (!sample_offsets) { size_t num_valids{0}; for (size_t j = offset_first; j < offset_last; ++j) { auto e_op_result = *(sample_e_op_result_first + j); if (e_op_result == *invalid_value) { break; } ++num_valids; } for (size_t j = offset_first + num_valids; j < offset_last; ++j) { auto e_op_result = *(sample_e_op_result_first + j); if (e_op_result != *invalid_value) { return true; } } offset_last = offset_first + num_valids; } auto count = offset_last - offset_first; auto out_degree = *(sg_offsets + v + 1) - *(sg_offsets + v); if (with_replacement) { if ((out_degree > 0 && count != K) || (out_degree == 0 && count != 0)) { return true; } } else { if (count != ::min(static_cast<size_t>(out_degree), K)) { return true; } } // check sample_e_op_results for (size_t j = offset_first; j < offset_last; ++j) { auto e_op_result = *(sample_e_op_result_first + j); auto sg_src = thrust::get<0>(e_op_result); auto sg_dst = thrust::get<1>(e_op_result); auto sg_nbr_first = sg_indices + *(sg_offsets + sg_src); auto sg_nbr_last = sg_indices + *(sg_offsets + (sg_src + vertex_t{1})); if (!thrust::binary_search(thrust::seq, sg_nbr_first, sg_nbr_last, sg_dst)) { // assumed neighbor lists are sorted return true; } property_t src_val{}; property_t dst_val{}; if constexpr (cugraph::is_thrust_tuple_of_arithmetic<property_t>::value) { src_val = thrust::make_tuple(thrust::get<2>(e_op_result), thrust::get<3>(e_op_result)); dst_val = thrust::make_tuple(thrust::get<4>(e_op_result), thrust::get<5>(e_op_result)); } else { src_val = thrust::get<2>(e_op_result); dst_val = thrust::get<3>(e_op_result); } if (src_val != property_transform(sg_src)) { return true; } if (dst_val != property_transform(sg_dst)) { return true; } if (!with_replacement) { auto sg_dst_first = thrust::get<1>(sample_e_op_result_first.get_iterator_tuple()) + offset_first; auto sg_dst_last = thrust::get<1>(sample_e_op_result_first.get_iterator_tuple()) + offset_last; auto dst_count = thrust::count(thrust::seq, sg_dst_first, sg_dst_last, sg_dst); // this could be inefficient for high-degree vertices, if // we sort [sg_dst_first, sg_dst_last) we can use binary // search but we may better not modify the sampling output // and allow inefficiency as this is just for testing auto multiplicity = thrust::distance( thrust::lower_bound(thrust::seq, sg_nbr_first, sg_nbr_last, sg_dst), thrust::upper_bound(thrust::seq, sg_nbr_first, sg_nbr_last, sg_dst)); // this assumes neighbor lists are sorted if (dst_count > multiplicity) { return true; } } } return false; })); ASSERT_TRUE(num_invalids == 0); } } } private: static std::unique_ptr<raft::handle_t> handle_; }; template <typename input_usecase_t> std::unique_ptr<raft::handle_t> Tests_MGPerVRandomSelectTransformOutgoingE<input_usecase_t>::handle_ = nullptr; using Tests_MGPerVRandomSelectTransformOutgoingE_File = Tests_MGPerVRandomSelectTransformOutgoingE<cugraph::test::File_Usecase>; using Tests_MGPerVRandomSelectTransformOutgoingE_Rmat = Tests_MGPerVRandomSelectTransformOutgoingE<cugraph::test::Rmat_Usecase>; TEST_P(Tests_MGPerVRandomSelectTransformOutgoingE_File, CheckInt32Int32FloatTupleIntFloat) { auto param = GetParam(); run_current_test<int32_t, int32_t, float, thrust::tuple<int, float>>(std::get<0>(param), std::get<1>(param)); } TEST_P(Tests_MGPerVRandomSelectTransformOutgoingE_Rmat, CheckInt32Int32FloatTupleIntFloat) { auto param = GetParam(); run_current_test<int32_t, int32_t, float, thrust::tuple<int, float>>( std::get<0>(param), cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param))); } TEST_P(Tests_MGPerVRandomSelectTransformOutgoingE_Rmat, CheckInt32Int64FloatTupleIntFloat) { auto param = GetParam(); run_current_test<int32_t, int64_t, float, thrust::tuple<int, float>>( std::get<0>(param), cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param))); } TEST_P(Tests_MGPerVRandomSelectTransformOutgoingE_Rmat, CheckInt64Int64FloatTupleIntFloat) { auto param = GetParam(); run_current_test<int64_t, int64_t, float, thrust::tuple<int, float>>( std::get<0>(param), cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param))); } TEST_P(Tests_MGPerVRandomSelectTransformOutgoingE_File, CheckInt32Int32Float) { auto param = GetParam(); run_current_test<int32_t, int32_t, float, int>(std::get<0>(param), std::get<1>(param)); } TEST_P(Tests_MGPerVRandomSelectTransformOutgoingE_Rmat, CheckInt32Int32Float) { auto param = GetParam(); run_current_test<int32_t, int32_t, float, int>( std::get<0>(param), cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param))); } TEST_P(Tests_MGPerVRandomSelectTransformOutgoingE_Rmat, CheckInt32Int64Float) { auto param = GetParam(); run_current_test<int32_t, int64_t, float, int>( std::get<0>(param), cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param))); } TEST_P(Tests_MGPerVRandomSelectTransformOutgoingE_Rmat, CheckInt64Int64Float) { auto param = GetParam(); run_current_test<int64_t, int64_t, float, int>( std::get<0>(param), cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param))); } INSTANTIATE_TEST_SUITE_P( file_test, Tests_MGPerVRandomSelectTransformOutgoingE_File, ::testing::Combine( ::testing::Values(Prims_Usecase{size_t{1000}, size_t{4}, false, false, false, true}, Prims_Usecase{size_t{1000}, size_t{4}, false, true, false, true}, Prims_Usecase{size_t{1000}, size_t{4}, true, false, false, true}, Prims_Usecase{size_t{1000}, size_t{4}, true, true, false, true}), ::testing::Values(cugraph::test::File_Usecase("test/datasets/karate.mtx"), cugraph::test::File_Usecase("test/datasets/web-Google.mtx"), cugraph::test::File_Usecase("test/datasets/ljournal-2008.mtx"), cugraph::test::File_Usecase("test/datasets/webbase-1M.mtx")))); INSTANTIATE_TEST_SUITE_P( rmat_small_test, Tests_MGPerVRandomSelectTransformOutgoingE_Rmat, ::testing::Combine( ::testing::Values(Prims_Usecase{size_t{1000}, size_t{4}, false, false, false, true}, Prims_Usecase{size_t{1000}, size_t{4}, false, true, false, true}, Prims_Usecase{size_t{1000}, size_t{4}, true, false, false, true}, Prims_Usecase{size_t{1000}, size_t{4}, true, true, false, true}), ::testing::Values(cugraph::test::Rmat_Usecase(10, 16, 0.57, 0.19, 0.19, 0, false, false)))); INSTANTIATE_TEST_SUITE_P( rmat_benchmark_test, /* note that scale & edge factor can be overridden in benchmarking (with --gtest_filter to select only the rmat_benchmark_test with a specific vertex & edge type combination) by command line arguments and do not include more than one Rmat_Usecase that differ only in scale or edge factor (to avoid running same benchmarks more than once) */ Tests_MGPerVRandomSelectTransformOutgoingE_Rmat, ::testing::Combine( ::testing::Values(Prims_Usecase{size_t{10000000}, size_t{25}, false, false, false, false}, Prims_Usecase{size_t{10000000}, size_t{25}, false, true, false, false}, Prims_Usecase{size_t{10000000}, size_t{25}, true, false, false, false}, Prims_Usecase{size_t{10000000}, size_t{25}, true, true, false, false}), ::testing::Values(cugraph::test::Rmat_Usecase(20, 32, 0.57, 0.19, 0.19, 0, false, false)))); CUGRAPH_MG_TEST_PROGRAM_MAIN()
dbbc5faf74c65268d380892722349cf06cb59ffd.cu
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "property_generator.cuh" #include <utilities/base_fixture.hpp> #include <utilities/device_comm_wrapper.hpp> #include <utilities/mg_utilities.hpp> #include <utilities/test_graphs.hpp> #include <utilities/test_utilities.hpp> #include <utilities/thrust_wrapper.hpp> #include <prims/per_v_random_select_transform_outgoing_e.cuh> #include <prims/vertex_frontier.cuh> #include <cugraph/edge_src_dst_property.hpp> #include <cugraph/graph_functions.hpp> #include <cugraph/graph_view.hpp> #include <cugraph/utilities/dataframe_buffer.hpp> #include <cugraph/utilities/high_res_timer.hpp> #include <cugraph/utilities/host_scalar_comm.hpp> #include <cugraph/utilities/thrust_tuple_utils.hpp> #include <raft/comms/comms.hpp> #include <raft/comms/mpi_comms.hpp> #include <raft/core/comms.hpp> #include <raft/core/handle.hpp> #include <rmm/device_uvector.hpp> #include <thrust/adjacent_difference.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/optional.h> #include <thrust/tuple.h> #include <gtest/gtest.h> #include <random> template <typename vertex_t, typename property_t> struct e_op_t { using result_t = decltype(cugraph::thrust_tuple_cat(thrust::tuple<vertex_t, vertex_t>{}, cugraph::to_thrust_tuple(property_t{}), cugraph::to_thrust_tuple(property_t{}))); __device__ result_t operator()( vertex_t src, vertex_t dst, property_t src_prop, property_t dst_prop, thrust::nullopt_t) const { if constexpr (cugraph::is_thrust_tuple_of_arithmetic<property_t>::value) { static_assert(thrust::tuple_size<property_t>::value == size_t{2}); return thrust::make_tuple(src, dst, thrust::get<0>(src_prop), thrust::get<1>(src_prop), thrust::get<0>(dst_prop), thrust::get<1>(dst_prop)); } else { return thrust::make_tuple(src, dst, src_prop, dst_prop); } } }; struct Prims_Usecase { size_t num_seeds{0}; size_t K{0}; bool with_replacement{false}; bool use_invalid_value{false}; bool test_weighted{false}; bool check_correctness{true}; }; template <typename input_usecase_t> class Tests_MGPerVRandomSelectTransformOutgoingE : public ::testing::TestWithParam<std::tuple<Prims_Usecase, input_usecase_t>> { public: Tests_MGPerVRandomSelectTransformOutgoingE() {} static void SetUpTestCase() { handle_ = cugraph::test::initialize_mg_handle(); } static void TearDownTestCase() { handle_.reset(); } virtual void SetUp() {} virtual void TearDown() {} // Verify the results of per_v_random_select_transform_outgoing_e primitive template <typename vertex_t, typename edge_t, typename weight_t, typename property_t> void run_current_test(Prims_Usecase const& prims_usecase, input_usecase_t const& input_usecase) { HighResTimer hr_timer{}; auto const comm_rank = handle_->get_comms().get_rank(); auto const comm_size = handle_->get_comms().get_size(); // 1. create MG graph if (cugraph::test::g_perf) { RAFT_CUDA_TRY(cudaDeviceSynchronize()); // for consistent performance measurement handle_->get_comms().barrier(); hr_timer.start("MG Construct graph"); } cugraph::graph_t<vertex_t, edge_t, false, true> mg_graph(*handle_); std::optional<rmm::device_uvector<vertex_t>> mg_renumber_map{std::nullopt}; std::tie(mg_graph, std::ignore, mg_renumber_map) = cugraph::test::construct_graph<vertex_t, edge_t, weight_t, false, true>( *handle_, input_usecase, prims_usecase.test_weighted, true); if (cugraph::test::g_perf) { RAFT_CUDA_TRY(cudaDeviceSynchronize()); // for consistent performance measurement handle_->get_comms().barrier(); hr_timer.stop(); hr_timer.display_and_clear(std::cout); } auto mg_graph_view = mg_graph.view(); // 2. run MG per_v_random_select_transform_outgoing_e primitive const int hash_bin_count = 5; auto mg_vertex_prop = cugraph::test::generate<vertex_t, property_t>::vertex_property( *handle_, *mg_renumber_map, hash_bin_count); auto mg_src_prop = cugraph::test::generate<vertex_t, property_t>::src_property( *handle_, mg_graph_view, mg_vertex_prop); auto mg_dst_prop = cugraph::test::generate<vertex_t, property_t>::dst_property( *handle_, mg_graph_view, mg_vertex_prop); raft::random::RngState rng_state(static_cast<uint64_t>(handle_->get_comms().get_rank())); auto select_count = prims_usecase.with_replacement ? prims_usecase.num_seeds : std::min(prims_usecase.num_seeds, static_cast<size_t>(mg_graph_view.number_of_vertices())); auto mg_vertex_buffer = cugraph::select_random_vertices( *handle_, mg_graph_view, std::optional<raft::device_span<vertex_t const>>{std::nullopt}, rng_state, select_count, prims_usecase.with_replacement, false); constexpr size_t bucket_idx_cur = 0; constexpr size_t num_buckets = 1; cugraph::vertex_frontier_t<vertex_t, void, true, false> mg_vertex_frontier(*handle_, num_buckets); mg_vertex_frontier.bucket(bucket_idx_cur) .insert(cugraph::get_dataframe_buffer_begin(mg_vertex_buffer), cugraph::get_dataframe_buffer_end(mg_vertex_buffer)); using result_t = decltype(cugraph::thrust_tuple_cat(thrust::tuple<vertex_t, vertex_t>{}, cugraph::to_thrust_tuple(property_t{}), cugraph::to_thrust_tuple(property_t{}))); std::optional<result_t> invalid_value{std::nullopt}; if (prims_usecase.use_invalid_value) { invalid_value = result_t{}; thrust::get<0>(*invalid_value) = cugraph::invalid_vertex_id<vertex_t>::value; thrust::get<1>(*invalid_value) = cugraph::invalid_vertex_id<vertex_t>::value; } if (cugraph::test::g_perf) { RAFT_CUDA_TRY(cudaDeviceSynchronize()); // for consistent performance measurement handle_->get_comms().barrier(); hr_timer.start("MG per_v_random_select_transform_outgoing_e"); } auto [mg_sample_offsets, mg_sample_e_op_results] = cugraph::per_v_random_select_transform_outgoing_e(*handle_, mg_graph_view, mg_vertex_frontier.bucket(bucket_idx_cur), mg_src_prop.view(), mg_dst_prop.view(), cugraph::edge_dummy_property_t{}.view(), e_op_t<vertex_t, property_t>{}, rng_state, prims_usecase.K, prims_usecase.with_replacement, invalid_value); if (cugraph::test::g_perf) { RAFT_CUDA_TRY(cudaDeviceSynchronize()); // for consistent performance measurement handle_->get_comms().barrier(); hr_timer.stop(); hr_timer.display_and_clear(std::cout); } // 3. validate MG results if (prims_usecase.check_correctness) { cugraph::unrenumber_int_vertices<vertex_t, true>( *handle_, mg_vertex_frontier.bucket(bucket_idx_cur).begin(), mg_vertex_frontier.bucket(bucket_idx_cur).size(), (*mg_renumber_map).data(), mg_graph_view.vertex_partition_range_lasts()); std::optional<rmm::device_uvector<size_t>> mg_sample_counts{std::nullopt}; if (mg_sample_offsets) { mg_sample_counts = rmm::device_uvector<size_t>( mg_vertex_frontier.bucket(bucket_idx_cur).size(), handle_->get_stream()); thrust::adjacent_difference(handle_->get_thrust_policy(), (*mg_sample_offsets).begin() + 1, (*mg_sample_offsets).end(), (*mg_sample_counts).begin()); } cugraph::unrenumber_int_vertices<vertex_t, true>( *handle_, std::get<0>(mg_sample_e_op_results).data(), std::get<0>(mg_sample_e_op_results).size(), (*mg_renumber_map).data(), mg_graph_view.vertex_partition_range_lasts()); cugraph::unrenumber_int_vertices<vertex_t, true>( *handle_, std::get<1>(mg_sample_e_op_results).data(), std::get<1>(mg_sample_e_op_results).size(), (*mg_renumber_map).data(), mg_graph_view.vertex_partition_range_lasts()); auto mg_aggregate_frontier_vertices = cugraph::test::device_gatherv( *handle_, raft::device_span<vertex_t const>(mg_vertex_frontier.bucket(bucket_idx_cur).begin(), mg_vertex_frontier.bucket(bucket_idx_cur).size())); std::optional<rmm::device_uvector<size_t>> mg_aggregate_sample_counts{std::nullopt}; if (mg_sample_counts) { mg_aggregate_sample_counts = cugraph::test::device_gatherv( *handle_, raft::device_span<size_t const>((*mg_sample_counts).data(), (*mg_sample_counts).size())); } auto mg_aggregate_sample_e_op_results = cugraph::allocate_dataframe_buffer<result_t>(0, handle_->get_stream()); std::get<0>(mg_aggregate_sample_e_op_results) = cugraph::test::device_gatherv(*handle_, std::get<0>(mg_sample_e_op_results).data(), std::get<0>(mg_sample_e_op_results).size()); std::get<1>(mg_aggregate_sample_e_op_results) = cugraph::test::device_gatherv(*handle_, std::get<1>(mg_sample_e_op_results).data(), std::get<1>(mg_sample_e_op_results).size()); std::get<2>(mg_aggregate_sample_e_op_results) = cugraph::test::device_gatherv(*handle_, std::get<2>(mg_sample_e_op_results).data(), std::get<2>(mg_sample_e_op_results).size()); std::get<3>(mg_aggregate_sample_e_op_results) = cugraph::test::device_gatherv(*handle_, std::get<3>(mg_sample_e_op_results).data(), std::get<3>(mg_sample_e_op_results).size()); if constexpr (cugraph::is_thrust_tuple_of_arithmetic<property_t>::value) { std::get<4>(mg_aggregate_sample_e_op_results) = cugraph::test::device_gatherv(*handle_, std::get<4>(mg_sample_e_op_results).data(), std::get<4>(mg_sample_e_op_results).size()); std::get<5>(mg_aggregate_sample_e_op_results) = cugraph::test::device_gatherv(*handle_, std::get<5>(mg_sample_e_op_results).data(), std::get<5>(mg_sample_e_op_results).size()); } cugraph::graph_t<vertex_t, edge_t, false, false> sg_graph(*handle_); std::tie(sg_graph, std::ignore, std::ignore) = cugraph::test::mg_graph_to_sg_graph( *handle_, mg_graph_view, std::optional<cugraph::edge_property_view_t<edge_t, weight_t const*>>{std::nullopt}, std::make_optional<raft::device_span<vertex_t const>>((*mg_renumber_map).data(), (*mg_renumber_map).size()), false); if (handle_->get_comms().get_rank() == 0) { std::optional<rmm::device_uvector<size_t>> mg_aggregate_sample_offsets{std::nullopt}; if (mg_aggregate_sample_counts) { mg_aggregate_sample_offsets = rmm::device_uvector<size_t>( (*mg_aggregate_sample_counts).size() + 1, handle_->get_stream()); (*mg_aggregate_sample_offsets).set_element_to_zero_async(0, handle_->get_stream()); thrust::inclusive_scan(handle_->get_thrust_policy(), (*mg_aggregate_sample_counts).begin(), (*mg_aggregate_sample_counts).end(), (*mg_aggregate_sample_offsets).begin() + 1); } auto sg_graph_view = sg_graph.view(); rmm::device_uvector<edge_t> sg_offsets(sg_graph_view.number_of_vertices() + vertex_t{1}, handle_->get_stream()); thrust::copy(handle_->get_thrust_policy(), sg_graph_view.local_edge_partition_view().offsets().begin(), sg_graph_view.local_edge_partition_view().offsets().end(), sg_offsets.begin()); rmm::device_uvector<vertex_t> sg_indices(sg_graph_view.number_of_edges(), handle_->get_stream()); thrust::copy(handle_->get_thrust_policy(), sg_graph_view.local_edge_partition_view().indices().begin(), sg_graph_view.local_edge_partition_view().indices().end(), sg_indices.begin()); auto num_invalids = static_cast<size_t>(thrust::count_if( handle_->get_thrust_policy(), thrust::make_counting_iterator(size_t{0}), thrust::make_counting_iterator(mg_aggregate_frontier_vertices.size()), [frontier_vertex_first = mg_aggregate_frontier_vertices.begin(), sample_offsets = mg_aggregate_sample_offsets ? thrust::make_optional<size_t const*>( (*mg_aggregate_sample_offsets).data()) : thrust::nullopt, sample_e_op_result_first = cugraph::get_dataframe_buffer_begin(mg_aggregate_sample_e_op_results), sg_offsets = sg_offsets.begin(), sg_indices = sg_indices.begin(), K = prims_usecase.K, with_replacement = prims_usecase.with_replacement, invalid_value = invalid_value ? thrust::make_optional<result_t>(*invalid_value) : thrust::nullopt, property_transform = cugraph::test::detail::property_transform<vertex_t, property_t>{ hash_bin_count}] __device__(size_t i) { auto v = *(frontier_vertex_first + i); // check sample_offsets auto offset_first = sample_offsets ? *(*sample_offsets + i) : K * i; auto offset_last = sample_offsets ? *(*sample_offsets + (i + 1)) : K * (i + 1); if (!sample_offsets) { size_t num_valids{0}; for (size_t j = offset_first; j < offset_last; ++j) { auto e_op_result = *(sample_e_op_result_first + j); if (e_op_result == *invalid_value) { break; } ++num_valids; } for (size_t j = offset_first + num_valids; j < offset_last; ++j) { auto e_op_result = *(sample_e_op_result_first + j); if (e_op_result != *invalid_value) { return true; } } offset_last = offset_first + num_valids; } auto count = offset_last - offset_first; auto out_degree = *(sg_offsets + v + 1) - *(sg_offsets + v); if (with_replacement) { if ((out_degree > 0 && count != K) || (out_degree == 0 && count != 0)) { return true; } } else { if (count != std::min(static_cast<size_t>(out_degree), K)) { return true; } } // check sample_e_op_results for (size_t j = offset_first; j < offset_last; ++j) { auto e_op_result = *(sample_e_op_result_first + j); auto sg_src = thrust::get<0>(e_op_result); auto sg_dst = thrust::get<1>(e_op_result); auto sg_nbr_first = sg_indices + *(sg_offsets + sg_src); auto sg_nbr_last = sg_indices + *(sg_offsets + (sg_src + vertex_t{1})); if (!thrust::binary_search(thrust::seq, sg_nbr_first, sg_nbr_last, sg_dst)) { // assumed neighbor lists are sorted return true; } property_t src_val{}; property_t dst_val{}; if constexpr (cugraph::is_thrust_tuple_of_arithmetic<property_t>::value) { src_val = thrust::make_tuple(thrust::get<2>(e_op_result), thrust::get<3>(e_op_result)); dst_val = thrust::make_tuple(thrust::get<4>(e_op_result), thrust::get<5>(e_op_result)); } else { src_val = thrust::get<2>(e_op_result); dst_val = thrust::get<3>(e_op_result); } if (src_val != property_transform(sg_src)) { return true; } if (dst_val != property_transform(sg_dst)) { return true; } if (!with_replacement) { auto sg_dst_first = thrust::get<1>(sample_e_op_result_first.get_iterator_tuple()) + offset_first; auto sg_dst_last = thrust::get<1>(sample_e_op_result_first.get_iterator_tuple()) + offset_last; auto dst_count = thrust::count(thrust::seq, sg_dst_first, sg_dst_last, sg_dst); // this could be inefficient for high-degree vertices, if // we sort [sg_dst_first, sg_dst_last) we can use binary // search but we may better not modify the sampling output // and allow inefficiency as this is just for testing auto multiplicity = thrust::distance( thrust::lower_bound(thrust::seq, sg_nbr_first, sg_nbr_last, sg_dst), thrust::upper_bound(thrust::seq, sg_nbr_first, sg_nbr_last, sg_dst)); // this assumes neighbor lists are sorted if (dst_count > multiplicity) { return true; } } } return false; })); ASSERT_TRUE(num_invalids == 0); } } } private: static std::unique_ptr<raft::handle_t> handle_; }; template <typename input_usecase_t> std::unique_ptr<raft::handle_t> Tests_MGPerVRandomSelectTransformOutgoingE<input_usecase_t>::handle_ = nullptr; using Tests_MGPerVRandomSelectTransformOutgoingE_File = Tests_MGPerVRandomSelectTransformOutgoingE<cugraph::test::File_Usecase>; using Tests_MGPerVRandomSelectTransformOutgoingE_Rmat = Tests_MGPerVRandomSelectTransformOutgoingE<cugraph::test::Rmat_Usecase>; TEST_P(Tests_MGPerVRandomSelectTransformOutgoingE_File, CheckInt32Int32FloatTupleIntFloat) { auto param = GetParam(); run_current_test<int32_t, int32_t, float, thrust::tuple<int, float>>(std::get<0>(param), std::get<1>(param)); } TEST_P(Tests_MGPerVRandomSelectTransformOutgoingE_Rmat, CheckInt32Int32FloatTupleIntFloat) { auto param = GetParam(); run_current_test<int32_t, int32_t, float, thrust::tuple<int, float>>( std::get<0>(param), cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param))); } TEST_P(Tests_MGPerVRandomSelectTransformOutgoingE_Rmat, CheckInt32Int64FloatTupleIntFloat) { auto param = GetParam(); run_current_test<int32_t, int64_t, float, thrust::tuple<int, float>>( std::get<0>(param), cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param))); } TEST_P(Tests_MGPerVRandomSelectTransformOutgoingE_Rmat, CheckInt64Int64FloatTupleIntFloat) { auto param = GetParam(); run_current_test<int64_t, int64_t, float, thrust::tuple<int, float>>( std::get<0>(param), cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param))); } TEST_P(Tests_MGPerVRandomSelectTransformOutgoingE_File, CheckInt32Int32Float) { auto param = GetParam(); run_current_test<int32_t, int32_t, float, int>(std::get<0>(param), std::get<1>(param)); } TEST_P(Tests_MGPerVRandomSelectTransformOutgoingE_Rmat, CheckInt32Int32Float) { auto param = GetParam(); run_current_test<int32_t, int32_t, float, int>( std::get<0>(param), cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param))); } TEST_P(Tests_MGPerVRandomSelectTransformOutgoingE_Rmat, CheckInt32Int64Float) { auto param = GetParam(); run_current_test<int32_t, int64_t, float, int>( std::get<0>(param), cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param))); } TEST_P(Tests_MGPerVRandomSelectTransformOutgoingE_Rmat, CheckInt64Int64Float) { auto param = GetParam(); run_current_test<int64_t, int64_t, float, int>( std::get<0>(param), cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param))); } INSTANTIATE_TEST_SUITE_P( file_test, Tests_MGPerVRandomSelectTransformOutgoingE_File, ::testing::Combine( ::testing::Values(Prims_Usecase{size_t{1000}, size_t{4}, false, false, false, true}, Prims_Usecase{size_t{1000}, size_t{4}, false, true, false, true}, Prims_Usecase{size_t{1000}, size_t{4}, true, false, false, true}, Prims_Usecase{size_t{1000}, size_t{4}, true, true, false, true}), ::testing::Values(cugraph::test::File_Usecase("test/datasets/karate.mtx"), cugraph::test::File_Usecase("test/datasets/web-Google.mtx"), cugraph::test::File_Usecase("test/datasets/ljournal-2008.mtx"), cugraph::test::File_Usecase("test/datasets/webbase-1M.mtx")))); INSTANTIATE_TEST_SUITE_P( rmat_small_test, Tests_MGPerVRandomSelectTransformOutgoingE_Rmat, ::testing::Combine( ::testing::Values(Prims_Usecase{size_t{1000}, size_t{4}, false, false, false, true}, Prims_Usecase{size_t{1000}, size_t{4}, false, true, false, true}, Prims_Usecase{size_t{1000}, size_t{4}, true, false, false, true}, Prims_Usecase{size_t{1000}, size_t{4}, true, true, false, true}), ::testing::Values(cugraph::test::Rmat_Usecase(10, 16, 0.57, 0.19, 0.19, 0, false, false)))); INSTANTIATE_TEST_SUITE_P( rmat_benchmark_test, /* note that scale & edge factor can be overridden in benchmarking (with --gtest_filter to select only the rmat_benchmark_test with a specific vertex & edge type combination) by command line arguments and do not include more than one Rmat_Usecase that differ only in scale or edge factor (to avoid running same benchmarks more than once) */ Tests_MGPerVRandomSelectTransformOutgoingE_Rmat, ::testing::Combine( ::testing::Values(Prims_Usecase{size_t{10000000}, size_t{25}, false, false, false, false}, Prims_Usecase{size_t{10000000}, size_t{25}, false, true, false, false}, Prims_Usecase{size_t{10000000}, size_t{25}, true, false, false, false}, Prims_Usecase{size_t{10000000}, size_t{25}, true, true, false, false}), ::testing::Values(cugraph::test::Rmat_Usecase(20, 32, 0.57, 0.19, 0.19, 0, false, false)))); CUGRAPH_MG_TEST_PROGRAM_MAIN()
35aa69a3ad6c8f6a5fa5d507bd384db2ab922252.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <tests/utilities/column_wrapper.cuh> #include <tests/utilities/cudf_test_fixtures.h> #include <cudf/utilities/legacy/wrapper_types.hpp> #include <utilities/device_atomics.cuh> #include <gmock/gmock.h> #include <gtest/gtest.h> #include <thrust/device_vector.h> #include <thrust/transform.h> #include <bitset> #include <cstdint> #include <random> #include <iostream> template<typename T> __global__ void gpu_atomic_test(T *result, T *data, size_t size) { size_t id = blockIdx.x * blockDim.x + threadIdx.x; size_t step = blockDim.x * gridDim.x; for (; id < size; id += step) { atomicAdd(&result[0], data[id]); atomicMin(&result[1], data[id]); atomicMax(&result[2], data[id]); cudf::genericAtomicOperation(&result[3], data[id], cudf::DeviceSum{}); cudf::genericAtomicOperation(&result[4], data[id], cudf::DeviceMin{}); cudf::genericAtomicOperation(&result[5], data[id], cudf::DeviceMax{}); } } template<typename T, typename BinaryOp> __device__ T atomic_op(T* addr, T const & value, BinaryOp op) { T old_value = *addr; T assumed; do { assumed = old_value; const T new_value = op(old_value, value); old_value = atomicCAS(addr, assumed, new_value); } while (assumed != old_value); return old_value; } template<typename T> __global__ void gpu_atomicCAS_test(T *result, T *data, size_t size) { size_t id = blockIdx.x * blockDim.x + threadIdx.x; size_t step = blockDim.x * gridDim.x; for (; id < size; id += step) { atomic_op(&result[0], data[id], cudf::DeviceSum{}); atomic_op(&result[1], data[id], cudf::DeviceMin{}); atomic_op(&result[2], data[id], cudf::DeviceMax{}); atomic_op(&result[3], data[id], cudf::DeviceSum{}); atomic_op(&result[4], data[id], cudf::DeviceMin{}); atomic_op(&result[5], data[id], cudf::DeviceMax{}); } } template<typename T> __global__ void gpu_atomic_bitwiseOp_test(T *result, T *data, size_t size) { size_t id = blockIdx.x * blockDim.x + threadIdx.x; size_t step = blockDim.x * gridDim.x; for (; id < size; id += step) { atomicAnd(&result[0], data[id]); atomicOr(&result[1], data[id]); atomicXor(&result[2], data[id]); cudf::genericAtomicOperation(&result[3], data[id], cudf::DeviceAnd{}); cudf::genericAtomicOperation(&result[4], data[id], cudf::DeviceOr{}); cudf::genericAtomicOperation(&result[5], data[id], cudf::DeviceXor{}); } } template <typename T> struct AtomicsTest : public GdfTest { void atomic_test(std::vector<int> const & v_input, bool is_cas_test, int block_size=0, int grid_size=1) { size_t vec_size = v_input.size(); // use transform from std::vector<int> instead. std::vector<T> v(vec_size); std::transform(v_input.begin(), v_input.end(), v.begin(), [](int x) { T t(x) ; return t; } ); T exact[3]; exact[0] = std::accumulate(v.begin(), v.end(), T{0}); exact[1] = *( std::min_element(v.begin(), v.end()) ); exact[2] = *( std::max_element(v.begin(), v.end()) ); std::vector<T> result_init(6); result_init[0] = T{0}; result_init[1] = std::numeric_limits<T>::max(); result_init[2] = std::numeric_limits<T>::min(); result_init[3] = result_init[0]; result_init[4] = result_init[1]; result_init[5] = result_init[2]; thrust::device_vector<T> dev_data(v); thrust::device_vector<T> dev_result(result_init); if( block_size == 0) block_size = vec_size; if( is_cas_test ){ hipLaunchKernelGGL(( gpu_atomicCAS_test), dim3(grid_size), dim3(block_size), 0, 0, dev_result.data().get(), dev_data.data().get(), vec_size); }else{ hipLaunchKernelGGL(( gpu_atomic_test), dim3(grid_size), dim3(block_size), 0, 0, dev_result.data().get(), dev_data.data().get(), vec_size); } thrust::host_vector<T> host_result(dev_result); hipDeviceSynchronize(); CUDA_CHECK_LAST(); EXPECT_EQ(host_result[0], exact[0]) << "atomicAdd test failed"; EXPECT_EQ(host_result[1], exact[1]) << "atomicMin test failed"; EXPECT_EQ(host_result[2], exact[2]) << "atomicMax test failed"; EXPECT_EQ(host_result[3], exact[0]) << "atomicAdd test(2) failed"; EXPECT_EQ(host_result[4], exact[1]) << "atomicMin test(2) failed"; EXPECT_EQ(host_result[5], exact[2]) << "atomicMax test(2) failed"; } }; using TestingTypes = ::testing::Types< int8_t, int16_t, int32_t, int64_t, float, double, cudf::date32, cudf::date64, cudf::timestamp, cudf::category, cudf::nvstring_category, cudf::bool8>; TYPED_TEST_CASE(AtomicsTest, TestingTypes); // tests for atomicAdd/Min/Max TYPED_TEST(AtomicsTest, atomicOps) { bool is_cas_test = false; std::vector<int> input_array({0, 6, 0, -14, 13, 64, -13, -20, 45}); this->atomic_test(input_array, is_cas_test); std::vector<int> input_array2({6, -6, 13, 62, -11, -20, 33}); this->atomic_test(input_array2, is_cas_test); } // tests for atomicCAS TYPED_TEST(AtomicsTest, atomicCAS) { bool is_cas_test = true; std::vector<int> input_array({0, 6, 0, -14, 13, 64, -13, -20, 45}); this->atomic_test(input_array, is_cas_test); std::vector<int> input_array2({6, -6, 13, 62, -11, -20, 33}); this->atomic_test(input_array2, is_cas_test); } // tests for atomicAdd/Min/Max TYPED_TEST(AtomicsTest, atomicOpsGrid) { bool is_cas_test = false; int block_size=3; int grid_size=4; std::vector<int> input_array({0, 6, 0, -14, 13, 64, -13, -20, 45}); this->atomic_test(input_array, is_cas_test, block_size, grid_size); std::vector<int> input_array2({6, -6, 13, 62, -11, -20, 33}); this->atomic_test(input_array2, is_cas_test, block_size, grid_size); } // tests for atomicCAS TYPED_TEST(AtomicsTest, atomicCASGrid) { bool is_cas_test = true; int block_size=3; int grid_size=4; std::vector<int> input_array({0, 6, 0, -14, 13, 64, -13, -20, 45}); this->atomic_test(input_array, is_cas_test, block_size, grid_size); std::vector<int> input_array2({6, -6, 13, 62, -11, -20, 33}); this->atomic_test(input_array2, is_cas_test, block_size, grid_size); } // tests for large array TYPED_TEST(AtomicsTest, atomicOpsRandom) { bool is_cas_test = false; int block_size=256; int grid_size=64; std::vector<int> input_array(grid_size * block_size); std::default_random_engine engine; std::uniform_int_distribution<> dist(-10, 10); std::generate(input_array.begin(), input_array.end(), [&](){ return dist(engine);} ); this->atomic_test(input_array, is_cas_test, block_size, grid_size); } TYPED_TEST(AtomicsTest, atomicCASRandom) { bool is_cas_test = true; int block_size=256; int grid_size=64; std::vector<int> input_array(grid_size * block_size); std::default_random_engine engine; std::uniform_int_distribution<> dist(-10, 10); std::generate(input_array.begin(), input_array.end(), [&](){ return dist(engine);} ); this->atomic_test(input_array, is_cas_test, block_size, grid_size); } // ------------------------------------------------------------------ template <typename T> struct AtomicsBitwiseOpTest : public GdfTest { void atomic_test(std::vector<uint64_t> const & v_input, int block_size=0, int grid_size=1) { size_t vec_size = v_input.size(); std::vector<T> v(vec_size); std::transform(v_input.begin(), v_input.end(), v.begin(), [](int x) { T t(x) ; return t; } ); std::vector<T> identity = {T(~0ull), T(0), T(0), T(~0ull), T(0), T(0)}; T exact[3]; exact[0] = std::accumulate(v.begin(), v.end(), identity[0], [](T acc, uint64_t i) { return acc & T(i); }); exact[1] = std::accumulate(v.begin(), v.end(), identity[1], [](T acc, uint64_t i) { return acc | T(i); }); exact[2] = std::accumulate(v.begin(), v.end(), identity[2], [](T acc, uint64_t i) { return acc ^ T(i); }); thrust::device_vector<T> dev_result(identity); thrust::device_vector<T> dev_data(v); if( block_size == 0) block_size = vec_size; hipLaunchKernelGGL(( gpu_atomic_bitwiseOp_test<T>) , dim3(grid_size), dim3(block_size), 0, 0, reinterpret_cast<T*>( dev_result.data().get() ), reinterpret_cast<T*>( dev_data.data().get() ), vec_size); thrust::host_vector<T> host_result(dev_result); hipDeviceSynchronize(); CUDA_CHECK_LAST(); print_exact(exact, "exact"); print_exact(host_result.data(), "result"); EXPECT_EQ(host_result[0], exact[0]) << "atomicAnd test failed"; EXPECT_EQ(host_result[1], exact[1]) << "atomicOr test failed"; EXPECT_EQ(host_result[2], exact[2]) << "atomicXor test failed"; EXPECT_EQ(host_result[3], exact[0]) << "atomicAnd test(2) failed"; EXPECT_EQ(host_result[4], exact[1]) << "atomicOr test(2) failed"; EXPECT_EQ(host_result[5], exact[2]) << "atomicXor test(2) failed"; } void print_exact(const T *v, const char* msg){ std::cout << std::hex << std::showbase; std::cout << "The " << msg << " = {" << +v[0] << ", " << +v[1] << ", " << +v[2] << "}" << std::endl; } }; using BitwiseOpTestingTypes = ::testing::Types< int8_t, int16_t, int32_t, int64_t, uint8_t, uint16_t, uint32_t, uint64_t >; TYPED_TEST_CASE(AtomicsBitwiseOpTest, BitwiseOpTestingTypes); TYPED_TEST(AtomicsBitwiseOpTest, atomicBitwiseOps) { { // test for AND, XOR std::vector<uint64_t> input_array( {0xfcfcfcfcfcfcfc7f, 0x7f7f7f7f7f7ffc, 0xfffddffddffddfdf, 0x7f7f7f7f7f7ffc}); this->atomic_test(input_array); } { // test for OR, XOR std::vector<uint64_t> input_array( {0x01, 0xfc02, 0x1dff03, 0x1100a0b0801d0003, 0x8000000000000000, 0x1dff03}); this->atomic_test(input_array); } }
35aa69a3ad6c8f6a5fa5d507bd384db2ab922252.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <tests/utilities/column_wrapper.cuh> #include <tests/utilities/cudf_test_fixtures.h> #include <cudf/utilities/legacy/wrapper_types.hpp> #include <utilities/device_atomics.cuh> #include <gmock/gmock.h> #include <gtest/gtest.h> #include <thrust/device_vector.h> #include <thrust/transform.h> #include <bitset> #include <cstdint> #include <random> #include <iostream> template<typename T> __global__ void gpu_atomic_test(T *result, T *data, size_t size) { size_t id = blockIdx.x * blockDim.x + threadIdx.x; size_t step = blockDim.x * gridDim.x; for (; id < size; id += step) { atomicAdd(&result[0], data[id]); atomicMin(&result[1], data[id]); atomicMax(&result[2], data[id]); cudf::genericAtomicOperation(&result[3], data[id], cudf::DeviceSum{}); cudf::genericAtomicOperation(&result[4], data[id], cudf::DeviceMin{}); cudf::genericAtomicOperation(&result[5], data[id], cudf::DeviceMax{}); } } template<typename T, typename BinaryOp> __device__ T atomic_op(T* addr, T const & value, BinaryOp op) { T old_value = *addr; T assumed; do { assumed = old_value; const T new_value = op(old_value, value); old_value = atomicCAS(addr, assumed, new_value); } while (assumed != old_value); return old_value; } template<typename T> __global__ void gpu_atomicCAS_test(T *result, T *data, size_t size) { size_t id = blockIdx.x * blockDim.x + threadIdx.x; size_t step = blockDim.x * gridDim.x; for (; id < size; id += step) { atomic_op(&result[0], data[id], cudf::DeviceSum{}); atomic_op(&result[1], data[id], cudf::DeviceMin{}); atomic_op(&result[2], data[id], cudf::DeviceMax{}); atomic_op(&result[3], data[id], cudf::DeviceSum{}); atomic_op(&result[4], data[id], cudf::DeviceMin{}); atomic_op(&result[5], data[id], cudf::DeviceMax{}); } } template<typename T> __global__ void gpu_atomic_bitwiseOp_test(T *result, T *data, size_t size) { size_t id = blockIdx.x * blockDim.x + threadIdx.x; size_t step = blockDim.x * gridDim.x; for (; id < size; id += step) { atomicAnd(&result[0], data[id]); atomicOr(&result[1], data[id]); atomicXor(&result[2], data[id]); cudf::genericAtomicOperation(&result[3], data[id], cudf::DeviceAnd{}); cudf::genericAtomicOperation(&result[4], data[id], cudf::DeviceOr{}); cudf::genericAtomicOperation(&result[5], data[id], cudf::DeviceXor{}); } } template <typename T> struct AtomicsTest : public GdfTest { void atomic_test(std::vector<int> const & v_input, bool is_cas_test, int block_size=0, int grid_size=1) { size_t vec_size = v_input.size(); // use transform from std::vector<int> instead. std::vector<T> v(vec_size); std::transform(v_input.begin(), v_input.end(), v.begin(), [](int x) { T t(x) ; return t; } ); T exact[3]; exact[0] = std::accumulate(v.begin(), v.end(), T{0}); exact[1] = *( std::min_element(v.begin(), v.end()) ); exact[2] = *( std::max_element(v.begin(), v.end()) ); std::vector<T> result_init(6); result_init[0] = T{0}; result_init[1] = std::numeric_limits<T>::max(); result_init[2] = std::numeric_limits<T>::min(); result_init[3] = result_init[0]; result_init[4] = result_init[1]; result_init[5] = result_init[2]; thrust::device_vector<T> dev_data(v); thrust::device_vector<T> dev_result(result_init); if( block_size == 0) block_size = vec_size; if( is_cas_test ){ gpu_atomicCAS_test<<<grid_size, block_size>>>( dev_result.data().get(), dev_data.data().get(), vec_size); }else{ gpu_atomic_test<<<grid_size, block_size>>>( dev_result.data().get(), dev_data.data().get(), vec_size); } thrust::host_vector<T> host_result(dev_result); cudaDeviceSynchronize(); CUDA_CHECK_LAST(); EXPECT_EQ(host_result[0], exact[0]) << "atomicAdd test failed"; EXPECT_EQ(host_result[1], exact[1]) << "atomicMin test failed"; EXPECT_EQ(host_result[2], exact[2]) << "atomicMax test failed"; EXPECT_EQ(host_result[3], exact[0]) << "atomicAdd test(2) failed"; EXPECT_EQ(host_result[4], exact[1]) << "atomicMin test(2) failed"; EXPECT_EQ(host_result[5], exact[2]) << "atomicMax test(2) failed"; } }; using TestingTypes = ::testing::Types< int8_t, int16_t, int32_t, int64_t, float, double, cudf::date32, cudf::date64, cudf::timestamp, cudf::category, cudf::nvstring_category, cudf::bool8>; TYPED_TEST_CASE(AtomicsTest, TestingTypes); // tests for atomicAdd/Min/Max TYPED_TEST(AtomicsTest, atomicOps) { bool is_cas_test = false; std::vector<int> input_array({0, 6, 0, -14, 13, 64, -13, -20, 45}); this->atomic_test(input_array, is_cas_test); std::vector<int> input_array2({6, -6, 13, 62, -11, -20, 33}); this->atomic_test(input_array2, is_cas_test); } // tests for atomicCAS TYPED_TEST(AtomicsTest, atomicCAS) { bool is_cas_test = true; std::vector<int> input_array({0, 6, 0, -14, 13, 64, -13, -20, 45}); this->atomic_test(input_array, is_cas_test); std::vector<int> input_array2({6, -6, 13, 62, -11, -20, 33}); this->atomic_test(input_array2, is_cas_test); } // tests for atomicAdd/Min/Max TYPED_TEST(AtomicsTest, atomicOpsGrid) { bool is_cas_test = false; int block_size=3; int grid_size=4; std::vector<int> input_array({0, 6, 0, -14, 13, 64, -13, -20, 45}); this->atomic_test(input_array, is_cas_test, block_size, grid_size); std::vector<int> input_array2({6, -6, 13, 62, -11, -20, 33}); this->atomic_test(input_array2, is_cas_test, block_size, grid_size); } // tests for atomicCAS TYPED_TEST(AtomicsTest, atomicCASGrid) { bool is_cas_test = true; int block_size=3; int grid_size=4; std::vector<int> input_array({0, 6, 0, -14, 13, 64, -13, -20, 45}); this->atomic_test(input_array, is_cas_test, block_size, grid_size); std::vector<int> input_array2({6, -6, 13, 62, -11, -20, 33}); this->atomic_test(input_array2, is_cas_test, block_size, grid_size); } // tests for large array TYPED_TEST(AtomicsTest, atomicOpsRandom) { bool is_cas_test = false; int block_size=256; int grid_size=64; std::vector<int> input_array(grid_size * block_size); std::default_random_engine engine; std::uniform_int_distribution<> dist(-10, 10); std::generate(input_array.begin(), input_array.end(), [&](){ return dist(engine);} ); this->atomic_test(input_array, is_cas_test, block_size, grid_size); } TYPED_TEST(AtomicsTest, atomicCASRandom) { bool is_cas_test = true; int block_size=256; int grid_size=64; std::vector<int> input_array(grid_size * block_size); std::default_random_engine engine; std::uniform_int_distribution<> dist(-10, 10); std::generate(input_array.begin(), input_array.end(), [&](){ return dist(engine);} ); this->atomic_test(input_array, is_cas_test, block_size, grid_size); } // ------------------------------------------------------------------ template <typename T> struct AtomicsBitwiseOpTest : public GdfTest { void atomic_test(std::vector<uint64_t> const & v_input, int block_size=0, int grid_size=1) { size_t vec_size = v_input.size(); std::vector<T> v(vec_size); std::transform(v_input.begin(), v_input.end(), v.begin(), [](int x) { T t(x) ; return t; } ); std::vector<T> identity = {T(~0ull), T(0), T(0), T(~0ull), T(0), T(0)}; T exact[3]; exact[0] = std::accumulate(v.begin(), v.end(), identity[0], [](T acc, uint64_t i) { return acc & T(i); }); exact[1] = std::accumulate(v.begin(), v.end(), identity[1], [](T acc, uint64_t i) { return acc | T(i); }); exact[2] = std::accumulate(v.begin(), v.end(), identity[2], [](T acc, uint64_t i) { return acc ^ T(i); }); thrust::device_vector<T> dev_result(identity); thrust::device_vector<T> dev_data(v); if( block_size == 0) block_size = vec_size; gpu_atomic_bitwiseOp_test<T> <<<grid_size, block_size>>> ( reinterpret_cast<T*>( dev_result.data().get() ), reinterpret_cast<T*>( dev_data.data().get() ), vec_size); thrust::host_vector<T> host_result(dev_result); cudaDeviceSynchronize(); CUDA_CHECK_LAST(); print_exact(exact, "exact"); print_exact(host_result.data(), "result"); EXPECT_EQ(host_result[0], exact[0]) << "atomicAnd test failed"; EXPECT_EQ(host_result[1], exact[1]) << "atomicOr test failed"; EXPECT_EQ(host_result[2], exact[2]) << "atomicXor test failed"; EXPECT_EQ(host_result[3], exact[0]) << "atomicAnd test(2) failed"; EXPECT_EQ(host_result[4], exact[1]) << "atomicOr test(2) failed"; EXPECT_EQ(host_result[5], exact[2]) << "atomicXor test(2) failed"; } void print_exact(const T *v, const char* msg){ std::cout << std::hex << std::showbase; std::cout << "The " << msg << " = {" << +v[0] << ", " << +v[1] << ", " << +v[2] << "}" << std::endl; } }; using BitwiseOpTestingTypes = ::testing::Types< int8_t, int16_t, int32_t, int64_t, uint8_t, uint16_t, uint32_t, uint64_t >; TYPED_TEST_CASE(AtomicsBitwiseOpTest, BitwiseOpTestingTypes); TYPED_TEST(AtomicsBitwiseOpTest, atomicBitwiseOps) { { // test for AND, XOR std::vector<uint64_t> input_array( {0xfcfcfcfcfcfcfc7f, 0x7f7f7f7f7f7ffc, 0xfffddffddffddfdf, 0x7f7f7f7f7f7ffc}); this->atomic_test(input_array); } { // test for OR, XOR std::vector<uint64_t> input_array( {0x01, 0xfc02, 0x1dff03, 0x1100a0b0801d0003, 0x8000000000000000, 0x1dff03}); this->atomic_test(input_array); } }
b749972cf8a97a1f90bc024f8cde1d26498be4a9.hip
// !!! This is a file automatically generated by hipify!!! // Copyright 2020 Tier IV, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* * MIT License * Copyright (c) 2019-2020 Wang Xinyu * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <stdio.h> #include "hip/hip_runtime_api.h" #include "mish.hpp" namespace yolo { __device__ float mish(float x) { float e = __expf(x); float n = e * e + 2 * e; if (x <= -0.6f) return x * __fdividef(n, n + 2); return x - 2 * __fdividef(x, n + 2); } template <typename T, unsigned TPB> __global__ void mishKernel(const T * input, T * output, int num_elem) { int idx = threadIdx.x + TPB * blockIdx.x; if (idx >= num_elem) return; output[idx] = mish(input[idx]); } int mish(hipStream_t stream, const float * input, float * output, int n) { constexpr int blockSize = 256; const int gridSize = (n + blockSize - 1) / blockSize; hipLaunchKernelGGL(( mishKernel<float, blockSize>), dim3(gridSize), dim3(blockSize), 0, stream, input, output, n); return 0; } } // namespace yolo
b749972cf8a97a1f90bc024f8cde1d26498be4a9.cu
// Copyright 2020 Tier IV, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* * MIT License * Copyright (c) 2019-2020 Wang Xinyu * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <stdio.h> #include "cuda_runtime_api.h" #include "mish.hpp" namespace yolo { __device__ float mish(float x) { float e = __expf(x); float n = e * e + 2 * e; if (x <= -0.6f) return x * __fdividef(n, n + 2); return x - 2 * __fdividef(x, n + 2); } template <typename T, unsigned TPB> __global__ void mishKernel(const T * input, T * output, int num_elem) { int idx = threadIdx.x + TPB * blockIdx.x; if (idx >= num_elem) return; output[idx] = mish(input[idx]); } int mish(cudaStream_t stream, const float * input, float * output, int n) { constexpr int blockSize = 256; const int gridSize = (n + blockSize - 1) / blockSize; mishKernel<float, blockSize><<<gridSize, blockSize, 0, stream>>>(input, output, n); return 0; } } // namespace yolo
2759a91108dd8e65376dfac52b1d7f7760b704d1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <hiprand/hiprand.h> #define CHUNK 1 //Run CHUNK blocks, each with 1024 threads (or with the specified argument) and check error #define LOG 1024 // Print progress each LOG iterations #define LIMIT 1024*1024 // LIMIT of iterations #include "../common.h" __global__ void MontecarloPI(unsigned long *inside, unsigned long *outside, double *X, double *Y) { __shared__ unsigned int in; __shared__ unsigned int out; if (threadIdx.x == 0) { in = 0; out = 0; } __syncthreads(); double x = X[threadIdx.x]; double y = Y[threadIdx.x]; if (x * x + y * y < 1.0) { atomicInc(&in, blockDim.x + 1); // At most blockDim.x threads will sum here } else { atomicInc(&out, blockDim.x + 1); } __syncthreads(); if (threadIdx.x == 0) { *inside += in; *outside += out; } } int main(int argc, char *argv[]) { unsigned int digits; unsigned int threads; double precision; getParams(argc, argv, &threads, &digits, &precision); srand(SEED); unsigned int randomSize = threads; double *X, *Y; hipMalloc((void **) &X, randomSize * sizeof(double)); hipMalloc((void **) &Y, randomSize * sizeof(double)); hiprandGenerator_t rnd; hiprandCreateGenerator(&rnd, HIPRAND_RNG_PSEUDO_MTGP32); hiprandSetPseudoRandomGeneratorSeed(rnd, SEED); unsigned long h_inside = 0, h_outside = 0; unsigned long *d_inside, *d_outside; hipMalloc((void **) &d_inside, sizeof(unsigned long)); hipMalloc((void **) &d_outside, sizeof(unsigned long)); hipMemcpy(d_inside, &h_inside, sizeof(unsigned long), hipMemcpyHostToDevice); hipMemcpy(d_outside, &h_outside, sizeof(unsigned long), hipMemcpyHostToDevice); double pi, error = 1.0; unsigned long i = 0; while (error > precision && i < LIMIT) { hiprandGenerateUniformDouble(rnd, X, randomSize); hiprandGenerateUniformDouble(rnd, Y, randomSize); //@formatter:off hipLaunchKernelGGL(( MontecarloPI), dim3(CHUNK), dim3(threads), 0, 0, d_inside, d_outside, X, Y); //@formatter:on hipDeviceSynchronize(); hipMemcpy(&h_inside, d_inside, sizeof(unsigned long), hipMemcpyDeviceToHost); hipMemcpy(&h_outside, d_outside, sizeof(unsigned long), hipMemcpyDeviceToHost); pi = 4.0 * h_inside / (h_outside + h_inside); error = getError(pi); printLog(precision, pi, error, ++i); } hipFree(d_inside); hipFree(d_outside); hipDeviceReset(); return EXIT_SUCCESS; }
2759a91108dd8e65376dfac52b1d7f7760b704d1.cu
#include <curand.h> #define CHUNK 1 //Run CHUNK blocks, each with 1024 threads (or with the specified argument) and check error #define LOG 1024 // Print progress each LOG iterations #define LIMIT 1024*1024 // LIMIT of iterations #include "../common.h" __global__ void MontecarloPI(unsigned long *inside, unsigned long *outside, double *X, double *Y) { __shared__ unsigned int in; __shared__ unsigned int out; if (threadIdx.x == 0) { in = 0; out = 0; } __syncthreads(); double x = X[threadIdx.x]; double y = Y[threadIdx.x]; if (x * x + y * y < 1.0) { atomicInc(&in, blockDim.x + 1); // At most blockDim.x threads will sum here } else { atomicInc(&out, blockDim.x + 1); } __syncthreads(); if (threadIdx.x == 0) { *inside += in; *outside += out; } } int main(int argc, char *argv[]) { unsigned int digits; unsigned int threads; double precision; getParams(argc, argv, &threads, &digits, &precision); srand(SEED); unsigned int randomSize = threads; double *X, *Y; cudaMalloc((void **) &X, randomSize * sizeof(double)); cudaMalloc((void **) &Y, randomSize * sizeof(double)); curandGenerator_t rnd; curandCreateGenerator(&rnd, CURAND_RNG_PSEUDO_MTGP32); curandSetPseudoRandomGeneratorSeed(rnd, SEED); unsigned long h_inside = 0, h_outside = 0; unsigned long *d_inside, *d_outside; cudaMalloc((void **) &d_inside, sizeof(unsigned long)); cudaMalloc((void **) &d_outside, sizeof(unsigned long)); cudaMemcpy(d_inside, &h_inside, sizeof(unsigned long), cudaMemcpyHostToDevice); cudaMemcpy(d_outside, &h_outside, sizeof(unsigned long), cudaMemcpyHostToDevice); double pi, error = 1.0; unsigned long i = 0; while (error > precision && i < LIMIT) { curandGenerateUniformDouble(rnd, X, randomSize); curandGenerateUniformDouble(rnd, Y, randomSize); //@formatter:off MontecarloPI<<<CHUNK, threads>>>(d_inside, d_outside, X, Y); //@formatter:on cudaDeviceSynchronize(); cudaMemcpy(&h_inside, d_inside, sizeof(unsigned long), cudaMemcpyDeviceToHost); cudaMemcpy(&h_outside, d_outside, sizeof(unsigned long), cudaMemcpyDeviceToHost); pi = 4.0 * h_inside / (h_outside + h_inside); error = getError(pi); printLog(precision, pi, error, ++i); } cudaFree(d_inside); cudaFree(d_outside); cudaDeviceReset(); return EXIT_SUCCESS; }
9f12cf1b2da406caae3979ab3bcf2278565b76c5.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hipfft.h> #include <hermes/hermes.h> #include <iostream> using namespace hermes::cuda; #define NX 256 #define NY 128 __global__ void NormalizeIFFT(float *g_data, int width, int height, float N) { // index = x * height + y unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x; unsigned int yIndex = blockDim.y * blockIdx.y + threadIdx.y; int index = yIndex * width + xIndex; g_data[index] = g_data[index] / N; } int main() { hipfftReal input[NX][NY]; for (int x = 0; x < NX; x++) for (int y = 0; y < NY; y++) input[x][y] = x * y; hipfftReal *d_input; CUDA_CHECK(hipMalloc((void **)&d_input, sizeof(hipfftReal) * NX * NY)); CUDA_CHECK(hipMemcpy(d_input, input, sizeof(hipfftReal) * NX * NY, hipMemcpyHostToDevice)); hipfftComplex *d_output; CUDA_CHECK( hipMalloc((void **)&d_output, sizeof(hipfftComplex) * NX * (NY / 2 + 1))); hipfftHandle forwardPlan, inversePlan; if (hipfftPlan2d(&forwardPlan, NX, NY, HIPFFT_R2C) != HIPFFT_SUCCESS) { std::cerr << "CUFFT Error: Failed to create plan\n"; return -1; } if (hipfftPlan2d(&inversePlan, NX, NY, HIPFFT_C2R) != HIPFFT_SUCCESS) { std::cerr << "CUFFT Error: Failed to create plan\n"; return -1; } if (hipfftExecR2C(forwardPlan, d_input, d_output) != HIPFFT_SUCCESS) { fprintf(stderr, "CUFFT Error: Unable to execute plan\n"); return -1; } CUDA_CHECK(hipDeviceSynchronize()); if (hipfftExecC2R(inversePlan, d_output, d_input) != HIPFFT_SUCCESS) { fprintf(stderr, "CUFFT Error: Unable to execute plan\n"); return -1; } CUDA_CHECK(hipDeviceSynchronize()); dim3 grid(NX / 16, NY / 16, 1); dim3 threads(16, 16, 1); hipLaunchKernelGGL(( NormalizeIFFT), dim3(grid), dim3(threads), 0, 0, d_input, NX, NY, NX * NY); CUDA_CHECK(hipMemcpy(input, d_input, sizeof(hipfftReal) * NX * NY, hipMemcpyDeviceToHost)); CUDA_CHECK(hipDeviceSynchronize()); for (int x = 0; x < NX; x++) for (int y = 0; y < NY; y++) std::cerr << input[x][y] << " == " << x * y << std::endl; hipfftComplex output[NX * (NY / 2 + 1)]; CUDA_CHECK(hipMemcpy(output, d_output, sizeof(hipfftComplex) * NX * (NY / 2 + 1), hipMemcpyDeviceToHost)); // for (int x = 0; x < NX; x++) // std::cerr << output[x].x << " " << output[x].y << std::endl; hipfftDestroy(forwardPlan); hipfftDestroy(inversePlan); hipFree(d_input); hipFree(d_output); return 0; }
9f12cf1b2da406caae3979ab3bcf2278565b76c5.cu
#include <cuda_runtime.h> #include <cufft.h> #include <hermes/hermes.h> #include <iostream> using namespace hermes::cuda; #define NX 256 #define NY 128 __global__ void NormalizeIFFT(float *g_data, int width, int height, float N) { // index = x * height + y unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x; unsigned int yIndex = blockDim.y * blockIdx.y + threadIdx.y; int index = yIndex * width + xIndex; g_data[index] = g_data[index] / N; } int main() { cufftReal input[NX][NY]; for (int x = 0; x < NX; x++) for (int y = 0; y < NY; y++) input[x][y] = x * y; cufftReal *d_input; CUDA_CHECK(cudaMalloc((void **)&d_input, sizeof(cufftReal) * NX * NY)); CUDA_CHECK(cudaMemcpy(d_input, input, sizeof(cufftReal) * NX * NY, cudaMemcpyHostToDevice)); cufftComplex *d_output; CUDA_CHECK( cudaMalloc((void **)&d_output, sizeof(cufftComplex) * NX * (NY / 2 + 1))); cufftHandle forwardPlan, inversePlan; if (cufftPlan2d(&forwardPlan, NX, NY, CUFFT_R2C) != CUFFT_SUCCESS) { std::cerr << "CUFFT Error: Failed to create plan\n"; return -1; } if (cufftPlan2d(&inversePlan, NX, NY, CUFFT_C2R) != CUFFT_SUCCESS) { std::cerr << "CUFFT Error: Failed to create plan\n"; return -1; } if (cufftExecR2C(forwardPlan, d_input, d_output) != CUFFT_SUCCESS) { fprintf(stderr, "CUFFT Error: Unable to execute plan\n"); return -1; } CUDA_CHECK(cudaDeviceSynchronize()); if (cufftExecC2R(inversePlan, d_output, d_input) != CUFFT_SUCCESS) { fprintf(stderr, "CUFFT Error: Unable to execute plan\n"); return -1; } CUDA_CHECK(cudaDeviceSynchronize()); dim3 grid(NX / 16, NY / 16, 1); dim3 threads(16, 16, 1); NormalizeIFFT<<<grid, threads>>>(d_input, NX, NY, NX * NY); CUDA_CHECK(cudaMemcpy(input, d_input, sizeof(cufftReal) * NX * NY, cudaMemcpyDeviceToHost)); CUDA_CHECK(cudaDeviceSynchronize()); for (int x = 0; x < NX; x++) for (int y = 0; y < NY; y++) std::cerr << input[x][y] << " == " << x * y << std::endl; cufftComplex output[NX * (NY / 2 + 1)]; CUDA_CHECK(cudaMemcpy(output, d_output, sizeof(cufftComplex) * NX * (NY / 2 + 1), cudaMemcpyDeviceToHost)); // for (int x = 0; x < NX; x++) // std::cerr << output[x].x << " " << output[x].y << std::endl; cufftDestroy(forwardPlan); cufftDestroy(inversePlan); cudaFree(d_input); cudaFree(d_output); return 0; }
6a4823b1a41e111fd6a7e2dddb4113bfe8f78e3c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*------------------------------------------------------------------------ CUDA C extension for Python Provides functionality for back projection in PET image reconstruction. author: Pawel Markiewicz Copyrights: 2018 ------------------------------------------------------------------------*/ #include "auxmath.h" #include "prjb.h" #include "tprj.h" __constant__ float2 c_li2rng[NLI2R]; __constant__ short2 c_li2sn[NLI2R]; __constant__ char c_li2nos[NLI2R]; //=============================================================== // copy to the smaller axially image __global__ void imReduce(float *imr, float *im, int vz0, int nvz) { int iz = vz0 + threadIdx.x; int iy = SZ_IMZ * threadIdx.y + SZ_IMZ * blockDim.y * blockIdx.x; if (iy < SZ_IMY * SZ_IMZ) { int idx = SZ_IMZ * SZ_IMY * blockIdx.y + iy + iz; int idxr = threadIdx.x + (nvz * threadIdx.y + nvz * blockDim.y * blockIdx.x) + nvz * SZ_IMY * blockIdx.y; // copy to the axially smaller image imr[idxr] = im[idx]; } } //=============================================================== //**************** DIRECT *********************************** __global__ void bprj_drct(const float *sino, float *im, const float *tt, const unsigned char *tv, const int *subs, const short snno) { int ixt = subs[blockIdx.x]; // transaxial indx int ixz = threadIdx.x; // axial (z) float bin = sino[c_li2sn[ixz].x + blockIdx.x * snno]; float z = c_li2rng[ixz].x + .5 * SZ_RING; int w = (floorf(.5 * SZ_IMZ + SZ_VOXZi * z)); //------------------------------------------------- /*** accumulation ***/ // vector a (at) component signs int sgna0 = tv[N_TV * ixt] - 1; int sgna1 = tv[N_TV * ixt + 1] - 1; bool rbit = tv[N_TV * ixt + 2] & 0x01; // row bit int u = (int)tt[N_TT * ixt + 8]; int v = (u >> UV_SHFT); int uv = SZ_IMZ * ((u & 0x000001ff) + SZ_IMX * v); // next voxel (skipping the first fractional one) uv += !rbit * sgna0 * SZ_IMZ; uv -= rbit * sgna1 * SZ_IMZ * SZ_IMX; float dtr = tt[N_TT * ixt + 2]; float dtc = tt[N_TT * ixt + 3]; float trc = tt[N_TT * ixt] + rbit * dtr; float tcc = tt[N_TT * ixt + 1] + dtc * !rbit; rbit = tv[N_TV * ixt + 3] & 0x01; float tn = trc * rbit + tcc * !rbit; // next t float tp = tt[N_TT * ixt + 5]; // previous t float lt; //------------------------------------------------- for (int k = 3; k < (int)tt[N_TT * ixt + 9]; k++) { lt = tn - tp; atomicAdd(&im[uv + w], lt * bin); trc += dtr * rbit; tcc += dtc * !rbit; uv += !rbit * sgna0 * SZ_IMZ; uv -= rbit * sgna1 * SZ_IMZ * SZ_IMX; tp = tn; rbit = tv[N_TV * ixt + k + 1] & 0x01; tn = trc * rbit + tcc * !rbit; } } //************** OBLIQUE ************************************************** __global__ void bprj_oblq(const float *sino, float *im, const float *tt, const unsigned char *tv, const int *subs, const short snno, const int zoff, const short nil2r_c) { int ixz = threadIdx.x + zoff; // axial (z) if (ixz < nil2r_c) { int ixt = subs[blockIdx.x]; // blockIdx.x is the transaxial bin index // bin values to be back projected float bin = sino[c_li2sn[ixz].x + snno * blockIdx.x]; float bin_ = sino[c_li2sn[ixz].y + snno * blockIdx.x]; //------------------------------------------------- /*** accumulation ***/ // vector a (at) component signs int sgna0 = tv[N_TV * ixt] - 1; int sgna1 = tv[N_TV * ixt + 1] - 1; bool rbit = tv[N_TV * ixt + 2] & 0x01; // row bit int u = (int)tt[N_TT * ixt + 8]; int v = (u >> UV_SHFT); int uv = SZ_IMZ * ((u & 0x000001ff) + SZ_IMX * v); // next voxel (skipping the first fractional one) uv += !rbit * sgna0 * SZ_IMZ; uv -= rbit * sgna1 * SZ_IMZ * SZ_IMX; float dtr = tt[N_TT * ixt + 2]; float dtc = tt[N_TT * ixt + 3]; float trc = tt[N_TT * ixt] + rbit * dtr; float tcc = tt[N_TT * ixt + 1] + dtc * !rbit; rbit = tv[N_TV * ixt + 3] & 0x01; float tn = trc * rbit + tcc * !rbit; // next t float tp = tt[N_TT * ixt + 5]; // previous t //-------------------------------------------------- //**** AXIAL ***** float atn = tt[N_TT * ixt + 7]; float az = c_li2rng[ixz].y - c_li2rng[ixz].x; float az_atn = az / atn; float s_az_atn = sqrtf(az_atn * az_atn + 1); int sgnaz; if (az >= 0) sgnaz = 1; else sgnaz = -1; float pz = c_li2rng[ixz].x + .5 * SZ_RING; float z = pz + az_atn * tp; // here was t1 = tt[N_TT*ixt+4]<<<<<<<< int w = (floorf(.5 * SZ_IMZ + SZ_VOXZi * z)); float lz1 = (ceilf(.5 * SZ_IMZ + SZ_VOXZi * z)) * SZ_VOXZ - .5 * SZ_IMZ * SZ_VOXZ; // w is like in matlab by one greater z = c_li2rng[ixz].y + .5 * SZ_RING - az_atn * tp; // here was t1 = tt[N_TT*ixt+4]<<<<<<<<< int w_ = (floorf(.5 * SZ_IMZ + SZ_VOXZi * z)); z = pz + az_atn * tt[N_TT * ixt + 6]; // t2 float lz2 = (floorf(.5 * SZ_IMZ + SZ_VOXZi * z)) * SZ_VOXZ - .5 * SZ_IMZ * SZ_VOXZ; int nz = fabsf(lz2 - lz1) / SZ_VOXZ; // rintf float tz1 = (lz1 - pz) / az_atn; // first ray interaction with a row float tz2 = (lz2 - pz) / az_atn; // last ray interaction with a row float dtz = (tz2 - tz1) / nz; float tzc = tz1; //**************** float fr, lt; for (int k = 3; k < tt[N_TT * ixt + 9]; k++) { //<<< k=3 as 0 and 1 are for sign and 2 is skipped lt = tn - tp; if ((tn - tzc) > 0) { fr = (tzc - tp) / lt; atomicAdd(im + uv + w, fr * lt * s_az_atn * bin); atomicAdd(im + uv + w_, fr * lt * s_az_atn * bin_); // acc += fr*lt*s_az_atn * im[ w + uv ]; // acc_+= fr*lt*s_az_atn * im[ w_+ uv ]; w += sgnaz; w_ -= sgnaz; atomicAdd(im + uv + w, (1 - fr) * lt * s_az_atn * bin); atomicAdd(im + uv + w_, (1 - fr) * lt * s_az_atn * bin_); // acc += (1-fr)*lt*s_az_atn * im[ w + uv]; // acc_+= (1-fr)*lt*s_az_atn * im[ w_+ uv]; tzc += dtz; } else { atomicAdd(im + uv + w, lt * s_az_atn * bin); atomicAdd(im + uv + w_, lt * s_az_atn * bin_); // acc += lt*s_az_atn * im[ w + uv ]; // acc_+= lt*s_az_atn * im[ w_+ uv ]; } trc += dtr * rbit; tcc += dtc * !rbit; uv += !rbit * sgna0 * SZ_IMZ; uv -= rbit * sgna1 * SZ_IMZ * SZ_IMY; tp = tn; rbit = tv[N_TV * ixt + k + 1] & 0x01; tn = trc * rbit + tcc * !rbit; } } } //-------------------------------------------------------------------------------------------------- void gpu_bprj(float *d_im, float *d_sino, float *li2rng, short *li2sn, char *li2nos, short2 *d_s2c, float4 *d_crs, int *d_subs, float *d_tt, unsigned char *d_tv, int Nprj, Cnst Cnt, bool _sync) { int dev_id; hipGetDevice(&dev_id); if (Cnt.LOG <= LOGDEBUG) printf("i> using CUDA device #%d\n", dev_id); //----------------------------------------------------------------- // RINGS: either all or a subset of rings can be used for fast calc. //----------------------------------------------------------------- // number of rings customised int nrng_c, nil2r_c, vz0, vz1, nvz; // number of sinos short snno = -1; if (Cnt.SPN == 1) { // number of direct rings considered nrng_c = Cnt.RNG_END - Cnt.RNG_STRT; // number of "positive" michelogram elements used for projection (can be smaller than the // maximum) nil2r_c = (nrng_c + 1) * nrng_c / 2; snno = nrng_c * nrng_c; // correct for the max. ring difference in the full axial extent (don't use ring range (1,63) // as for this case no correction) if (nrng_c == NRINGS) { snno -= 12; nil2r_c -= 6; } } else if (Cnt.SPN == 11) { snno = NSINOS11; nrng_c = NRINGS; nil2r_c = NLI2R; } // voxels in axial direction vz0 = 2 * Cnt.RNG_STRT; vz1 = 2 * (Cnt.RNG_END - 1); nvz = 2 * nrng_c - 1; if (Cnt.LOG <= LOGDEBUG) { printf("i> detector rings range: [%d, %d) => number of sinos: %d\n", Cnt.RNG_STRT, Cnt.RNG_END, snno); printf(" corresponding voxels: [%d, %d] => number of voxels: %d\n", vz0, vz1, nvz); } //----------------------------------------------------------------- float *d_imf; // when rings are reduced if (nvz < SZ_IMZ) HANDLE_ERROR(hipMalloc(&d_imf, SZ_IMX * SZ_IMY * SZ_IMZ * sizeof(float))); else d_imf = d_im; HANDLE_ERROR(hipMemset(d_imf, 0, SZ_IMX * SZ_IMY * SZ_IMZ * sizeof(float))); //--- hipMemcpyToSymbol(c_li2rng, li2rng, nil2r_c * sizeof(float2)); hipMemcpyToSymbol(c_li2sn, li2sn, nil2r_c * sizeof(short2)); hipMemcpyToSymbol(c_li2nos, li2nos, nil2r_c * sizeof(char)); hipEvent_t start, stop; if (_sync) { hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); } if (Cnt.LOG <= LOGDEBUG) printf("i> calculating image through back projection... "); //------------DO TRANSAXIAL CALCULATIONS--------------------------------- gpu_siddon_tx(d_crs, d_s2c, d_tt, d_tv); //----------------------------------------------------------------------- //============================================================================ hipLaunchKernelGGL(( bprj_drct), dim3(Nprj), dim3(nrng_c), 0, 0, d_sino, d_imf, d_tt, d_tv, d_subs, snno); HANDLE_ERROR(hipGetLastError()); //============================================================================ int zoff = nrng_c; // number of oblique sinograms int Noblq = (nrng_c - 1) * nrng_c / 2; int Nz = ((Noblq + 127) / 128) * 128; //============================================================================ hipLaunchKernelGGL(( bprj_oblq), dim3(Nprj), dim3(Nz / 2), 0, 0, d_sino, d_imf, d_tt, d_tv, d_subs, snno, zoff, nil2r_c); HANDLE_ERROR(hipGetLastError()); zoff += Nz / 2; hipLaunchKernelGGL(( bprj_oblq), dim3(Nprj), dim3(Nz / 2), 0, 0, d_sino, d_imf, d_tt, d_tv, d_subs, snno, zoff, nil2r_c); HANDLE_ERROR(hipGetLastError()); //============================================================================ // // the actual axial size used (due to the customised ring subset used) // int vz0 = 2*Cnt.RNG_STRT; // int vz1 = 2*(Cnt.RNG_END-1); // // number of voxel for reduced number of rings (customised) // int nvz = vz1-vz0+1; // when rings are reduced if (nvz < SZ_IMZ) { // number of axial row for max threads int nar = NIPET_CU_THREADS / nvz; dim3 THRD(nvz, nar, 1); dim3 BLCK((SZ_IMY + nar - 1) / nar, SZ_IMX, 1); hipLaunchKernelGGL(( imReduce), dim3(BLCK), dim3(THRD), 0, 0, d_im, d_imf, vz0, nvz); HANDLE_ERROR(hipGetLastError()); HANDLE_ERROR(hipFree(d_imf)); if (Cnt.LOG <= LOGDEBUG) printf("i> reduced the axial (z) image size to %d\n", nvz); } if (_sync) { hipEventRecord(stop, 0); hipEventSynchronize(stop); // hipDeviceSynchronize(); float elapsedTime; hipEventElapsedTime(&elapsedTime, start, stop); hipEventDestroy(start); hipEventDestroy(stop); if (Cnt.LOG <= LOGDEBUG) printf("DONE in %fs.\n", 0.001 * elapsedTime); } else { if (Cnt.LOG <= LOGDEBUG) printf("DONE.\n"); } } //======================================================================= void rec_bprj(float *d_bimg, float *d_sino, int *d_sub, int Nprj, float *d_tt, unsigned char *d_tv, float *li2rng, short *li2sn, char *li2nos, Cnst Cnt) { int dev_id; hipGetDevice(&dev_id); if (Cnt.LOG <= LOGDEBUG) printf("i> using CUDA device #%d\n", dev_id); // get the axial LUTs in constant memory hipMemcpyToSymbol(c_li2rng, li2rng, NLI2R * sizeof(float2)); hipMemcpyToSymbol(c_li2sn, li2sn, NLI2R * sizeof(short2)); hipMemcpyToSymbol(c_li2nos, li2nos, NLI2R * sizeof(char)); // number of sinos short snno = -1; if (Cnt.SPN == 1) snno = NSINOS; else if (Cnt.SPN == 11) snno = NSINOS11; //> number of oblique sinograms int Noblq = (NRINGS * (NRINGS - 1) - 12) / 2; //> number of threads (in the axial direction) int Nz = ((Noblq + 127) / 128) * 128; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); if (Cnt.LOG <= LOGDEBUG) printf("i> subset back projection (Nprj=%d)... ", Nprj); //============================================================================ hipLaunchKernelGGL(( bprj_drct), dim3(Nprj), dim3(NRINGS), 0, 0, d_sino, d_bimg, d_tt, d_tv, d_sub, snno); HANDLE_ERROR(hipGetLastError()); //============================================================================ int zoff = NRINGS; //============================================================================ hipLaunchKernelGGL(( bprj_oblq), dim3(Nprj), dim3(Nz / 2), 0, 0, d_sino, d_bimg, d_tt, d_tv, d_sub, snno, zoff, NLI2R); HANDLE_ERROR(hipGetLastError()); //============================================================================ zoff += Nz / 2; //============================================================================ hipLaunchKernelGGL(( bprj_oblq), dim3(Nprj), dim3(Nz / 2), 0, 0, d_sino, d_bimg, d_tt, d_tv, d_sub, snno, zoff, NLI2R); HANDLE_ERROR(hipGetLastError()); //============================================================================ hipEventRecord(stop, 0); hipEventSynchronize(stop); float elapsedTime; hipEventElapsedTime(&elapsedTime, start, stop); hipEventDestroy(start); hipEventDestroy(stop); if (Cnt.LOG <= LOGDEBUG) printf("DONE in %fs.\n", 0.001 * elapsedTime); hipDeviceSynchronize(); return; }
6a4823b1a41e111fd6a7e2dddb4113bfe8f78e3c.cu
/*------------------------------------------------------------------------ CUDA C extension for Python Provides functionality for back projection in PET image reconstruction. author: Pawel Markiewicz Copyrights: 2018 ------------------------------------------------------------------------*/ #include "auxmath.h" #include "prjb.h" #include "tprj.h" __constant__ float2 c_li2rng[NLI2R]; __constant__ short2 c_li2sn[NLI2R]; __constant__ char c_li2nos[NLI2R]; //=============================================================== // copy to the smaller axially image __global__ void imReduce(float *imr, float *im, int vz0, int nvz) { int iz = vz0 + threadIdx.x; int iy = SZ_IMZ * threadIdx.y + SZ_IMZ * blockDim.y * blockIdx.x; if (iy < SZ_IMY * SZ_IMZ) { int idx = SZ_IMZ * SZ_IMY * blockIdx.y + iy + iz; int idxr = threadIdx.x + (nvz * threadIdx.y + nvz * blockDim.y * blockIdx.x) + nvz * SZ_IMY * blockIdx.y; // copy to the axially smaller image imr[idxr] = im[idx]; } } //=============================================================== //**************** DIRECT *********************************** __global__ void bprj_drct(const float *sino, float *im, const float *tt, const unsigned char *tv, const int *subs, const short snno) { int ixt = subs[blockIdx.x]; // transaxial indx int ixz = threadIdx.x; // axial (z) float bin = sino[c_li2sn[ixz].x + blockIdx.x * snno]; float z = c_li2rng[ixz].x + .5 * SZ_RING; int w = (floorf(.5 * SZ_IMZ + SZ_VOXZi * z)); //------------------------------------------------- /*** accumulation ***/ // vector a (at) component signs int sgna0 = tv[N_TV * ixt] - 1; int sgna1 = tv[N_TV * ixt + 1] - 1; bool rbit = tv[N_TV * ixt + 2] & 0x01; // row bit int u = (int)tt[N_TT * ixt + 8]; int v = (u >> UV_SHFT); int uv = SZ_IMZ * ((u & 0x000001ff) + SZ_IMX * v); // next voxel (skipping the first fractional one) uv += !rbit * sgna0 * SZ_IMZ; uv -= rbit * sgna1 * SZ_IMZ * SZ_IMX; float dtr = tt[N_TT * ixt + 2]; float dtc = tt[N_TT * ixt + 3]; float trc = tt[N_TT * ixt] + rbit * dtr; float tcc = tt[N_TT * ixt + 1] + dtc * !rbit; rbit = tv[N_TV * ixt + 3] & 0x01; float tn = trc * rbit + tcc * !rbit; // next t float tp = tt[N_TT * ixt + 5]; // previous t float lt; //------------------------------------------------- for (int k = 3; k < (int)tt[N_TT * ixt + 9]; k++) { lt = tn - tp; atomicAdd(&im[uv + w], lt * bin); trc += dtr * rbit; tcc += dtc * !rbit; uv += !rbit * sgna0 * SZ_IMZ; uv -= rbit * sgna1 * SZ_IMZ * SZ_IMX; tp = tn; rbit = tv[N_TV * ixt + k + 1] & 0x01; tn = trc * rbit + tcc * !rbit; } } //************** OBLIQUE ************************************************** __global__ void bprj_oblq(const float *sino, float *im, const float *tt, const unsigned char *tv, const int *subs, const short snno, const int zoff, const short nil2r_c) { int ixz = threadIdx.x + zoff; // axial (z) if (ixz < nil2r_c) { int ixt = subs[blockIdx.x]; // blockIdx.x is the transaxial bin index // bin values to be back projected float bin = sino[c_li2sn[ixz].x + snno * blockIdx.x]; float bin_ = sino[c_li2sn[ixz].y + snno * blockIdx.x]; //------------------------------------------------- /*** accumulation ***/ // vector a (at) component signs int sgna0 = tv[N_TV * ixt] - 1; int sgna1 = tv[N_TV * ixt + 1] - 1; bool rbit = tv[N_TV * ixt + 2] & 0x01; // row bit int u = (int)tt[N_TT * ixt + 8]; int v = (u >> UV_SHFT); int uv = SZ_IMZ * ((u & 0x000001ff) + SZ_IMX * v); // next voxel (skipping the first fractional one) uv += !rbit * sgna0 * SZ_IMZ; uv -= rbit * sgna1 * SZ_IMZ * SZ_IMX; float dtr = tt[N_TT * ixt + 2]; float dtc = tt[N_TT * ixt + 3]; float trc = tt[N_TT * ixt] + rbit * dtr; float tcc = tt[N_TT * ixt + 1] + dtc * !rbit; rbit = tv[N_TV * ixt + 3] & 0x01; float tn = trc * rbit + tcc * !rbit; // next t float tp = tt[N_TT * ixt + 5]; // previous t //-------------------------------------------------- //**** AXIAL ***** float atn = tt[N_TT * ixt + 7]; float az = c_li2rng[ixz].y - c_li2rng[ixz].x; float az_atn = az / atn; float s_az_atn = sqrtf(az_atn * az_atn + 1); int sgnaz; if (az >= 0) sgnaz = 1; else sgnaz = -1; float pz = c_li2rng[ixz].x + .5 * SZ_RING; float z = pz + az_atn * tp; // here was t1 = tt[N_TT*ixt+4]<<<<<<<< int w = (floorf(.5 * SZ_IMZ + SZ_VOXZi * z)); float lz1 = (ceilf(.5 * SZ_IMZ + SZ_VOXZi * z)) * SZ_VOXZ - .5 * SZ_IMZ * SZ_VOXZ; // w is like in matlab by one greater z = c_li2rng[ixz].y + .5 * SZ_RING - az_atn * tp; // here was t1 = tt[N_TT*ixt+4]<<<<<<<<< int w_ = (floorf(.5 * SZ_IMZ + SZ_VOXZi * z)); z = pz + az_atn * tt[N_TT * ixt + 6]; // t2 float lz2 = (floorf(.5 * SZ_IMZ + SZ_VOXZi * z)) * SZ_VOXZ - .5 * SZ_IMZ * SZ_VOXZ; int nz = fabsf(lz2 - lz1) / SZ_VOXZ; // rintf float tz1 = (lz1 - pz) / az_atn; // first ray interaction with a row float tz2 = (lz2 - pz) / az_atn; // last ray interaction with a row float dtz = (tz2 - tz1) / nz; float tzc = tz1; //**************** float fr, lt; for (int k = 3; k < tt[N_TT * ixt + 9]; k++) { //<<< k=3 as 0 and 1 are for sign and 2 is skipped lt = tn - tp; if ((tn - tzc) > 0) { fr = (tzc - tp) / lt; atomicAdd(im + uv + w, fr * lt * s_az_atn * bin); atomicAdd(im + uv + w_, fr * lt * s_az_atn * bin_); // acc += fr*lt*s_az_atn * im[ w + uv ]; // acc_+= fr*lt*s_az_atn * im[ w_+ uv ]; w += sgnaz; w_ -= sgnaz; atomicAdd(im + uv + w, (1 - fr) * lt * s_az_atn * bin); atomicAdd(im + uv + w_, (1 - fr) * lt * s_az_atn * bin_); // acc += (1-fr)*lt*s_az_atn * im[ w + uv]; // acc_+= (1-fr)*lt*s_az_atn * im[ w_+ uv]; tzc += dtz; } else { atomicAdd(im + uv + w, lt * s_az_atn * bin); atomicAdd(im + uv + w_, lt * s_az_atn * bin_); // acc += lt*s_az_atn * im[ w + uv ]; // acc_+= lt*s_az_atn * im[ w_+ uv ]; } trc += dtr * rbit; tcc += dtc * !rbit; uv += !rbit * sgna0 * SZ_IMZ; uv -= rbit * sgna1 * SZ_IMZ * SZ_IMY; tp = tn; rbit = tv[N_TV * ixt + k + 1] & 0x01; tn = trc * rbit + tcc * !rbit; } } } //-------------------------------------------------------------------------------------------------- void gpu_bprj(float *d_im, float *d_sino, float *li2rng, short *li2sn, char *li2nos, short2 *d_s2c, float4 *d_crs, int *d_subs, float *d_tt, unsigned char *d_tv, int Nprj, Cnst Cnt, bool _sync) { int dev_id; cudaGetDevice(&dev_id); if (Cnt.LOG <= LOGDEBUG) printf("i> using CUDA device #%d\n", dev_id); //----------------------------------------------------------------- // RINGS: either all or a subset of rings can be used for fast calc. //----------------------------------------------------------------- // number of rings customised int nrng_c, nil2r_c, vz0, vz1, nvz; // number of sinos short snno = -1; if (Cnt.SPN == 1) { // number of direct rings considered nrng_c = Cnt.RNG_END - Cnt.RNG_STRT; // number of "positive" michelogram elements used for projection (can be smaller than the // maximum) nil2r_c = (nrng_c + 1) * nrng_c / 2; snno = nrng_c * nrng_c; // correct for the max. ring difference in the full axial extent (don't use ring range (1,63) // as for this case no correction) if (nrng_c == NRINGS) { snno -= 12; nil2r_c -= 6; } } else if (Cnt.SPN == 11) { snno = NSINOS11; nrng_c = NRINGS; nil2r_c = NLI2R; } // voxels in axial direction vz0 = 2 * Cnt.RNG_STRT; vz1 = 2 * (Cnt.RNG_END - 1); nvz = 2 * nrng_c - 1; if (Cnt.LOG <= LOGDEBUG) { printf("i> detector rings range: [%d, %d) => number of sinos: %d\n", Cnt.RNG_STRT, Cnt.RNG_END, snno); printf(" corresponding voxels: [%d, %d] => number of voxels: %d\n", vz0, vz1, nvz); } //----------------------------------------------------------------- float *d_imf; // when rings are reduced if (nvz < SZ_IMZ) HANDLE_ERROR(cudaMalloc(&d_imf, SZ_IMX * SZ_IMY * SZ_IMZ * sizeof(float))); else d_imf = d_im; HANDLE_ERROR(cudaMemset(d_imf, 0, SZ_IMX * SZ_IMY * SZ_IMZ * sizeof(float))); //--- cudaMemcpyToSymbol(c_li2rng, li2rng, nil2r_c * sizeof(float2)); cudaMemcpyToSymbol(c_li2sn, li2sn, nil2r_c * sizeof(short2)); cudaMemcpyToSymbol(c_li2nos, li2nos, nil2r_c * sizeof(char)); cudaEvent_t start, stop; if (_sync) { cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); } if (Cnt.LOG <= LOGDEBUG) printf("i> calculating image through back projection... "); //------------DO TRANSAXIAL CALCULATIONS--------------------------------- gpu_siddon_tx(d_crs, d_s2c, d_tt, d_tv); //----------------------------------------------------------------------- //============================================================================ bprj_drct<<<Nprj, nrng_c>>>(d_sino, d_imf, d_tt, d_tv, d_subs, snno); HANDLE_ERROR(cudaGetLastError()); //============================================================================ int zoff = nrng_c; // number of oblique sinograms int Noblq = (nrng_c - 1) * nrng_c / 2; int Nz = ((Noblq + 127) / 128) * 128; //============================================================================ bprj_oblq<<<Nprj, Nz / 2>>>(d_sino, d_imf, d_tt, d_tv, d_subs, snno, zoff, nil2r_c); HANDLE_ERROR(cudaGetLastError()); zoff += Nz / 2; bprj_oblq<<<Nprj, Nz / 2>>>(d_sino, d_imf, d_tt, d_tv, d_subs, snno, zoff, nil2r_c); HANDLE_ERROR(cudaGetLastError()); //============================================================================ // // the actual axial size used (due to the customised ring subset used) // int vz0 = 2*Cnt.RNG_STRT; // int vz1 = 2*(Cnt.RNG_END-1); // // number of voxel for reduced number of rings (customised) // int nvz = vz1-vz0+1; // when rings are reduced if (nvz < SZ_IMZ) { // number of axial row for max threads int nar = NIPET_CU_THREADS / nvz; dim3 THRD(nvz, nar, 1); dim3 BLCK((SZ_IMY + nar - 1) / nar, SZ_IMX, 1); imReduce<<<BLCK, THRD>>>(d_im, d_imf, vz0, nvz); HANDLE_ERROR(cudaGetLastError()); HANDLE_ERROR(cudaFree(d_imf)); if (Cnt.LOG <= LOGDEBUG) printf("i> reduced the axial (z) image size to %d\n", nvz); } if (_sync) { cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // cudaDeviceSynchronize(); float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); if (Cnt.LOG <= LOGDEBUG) printf("DONE in %fs.\n", 0.001 * elapsedTime); } else { if (Cnt.LOG <= LOGDEBUG) printf("DONE.\n"); } } //======================================================================= void rec_bprj(float *d_bimg, float *d_sino, int *d_sub, int Nprj, float *d_tt, unsigned char *d_tv, float *li2rng, short *li2sn, char *li2nos, Cnst Cnt) { int dev_id; cudaGetDevice(&dev_id); if (Cnt.LOG <= LOGDEBUG) printf("i> using CUDA device #%d\n", dev_id); // get the axial LUTs in constant memory cudaMemcpyToSymbol(c_li2rng, li2rng, NLI2R * sizeof(float2)); cudaMemcpyToSymbol(c_li2sn, li2sn, NLI2R * sizeof(short2)); cudaMemcpyToSymbol(c_li2nos, li2nos, NLI2R * sizeof(char)); // number of sinos short snno = -1; if (Cnt.SPN == 1) snno = NSINOS; else if (Cnt.SPN == 11) snno = NSINOS11; //> number of oblique sinograms int Noblq = (NRINGS * (NRINGS - 1) - 12) / 2; //> number of threads (in the axial direction) int Nz = ((Noblq + 127) / 128) * 128; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); if (Cnt.LOG <= LOGDEBUG) printf("i> subset back projection (Nprj=%d)... ", Nprj); //============================================================================ bprj_drct<<<Nprj, NRINGS>>>(d_sino, d_bimg, d_tt, d_tv, d_sub, snno); HANDLE_ERROR(cudaGetLastError()); //============================================================================ int zoff = NRINGS; //============================================================================ bprj_oblq<<<Nprj, Nz / 2>>>(d_sino, d_bimg, d_tt, d_tv, d_sub, snno, zoff, NLI2R); HANDLE_ERROR(cudaGetLastError()); //============================================================================ zoff += Nz / 2; //============================================================================ bprj_oblq<<<Nprj, Nz / 2>>>(d_sino, d_bimg, d_tt, d_tv, d_sub, snno, zoff, NLI2R); HANDLE_ERROR(cudaGetLastError()); //============================================================================ cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); if (Cnt.LOG <= LOGDEBUG) printf("DONE in %fs.\n", 0.001 * elapsedTime); cudaDeviceSynchronize(); return; }
c91c77f6fb34f0067e73f6a198c2f2c72bf6e7f2.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <stdio.h> #include <assert.h> #define N 2//8 __device__ double C[2][2][2]; __device__ int index (int a, int b, int c){ return 4*a + 2*b + c; } __global__ void foo(double *H) { int idx = index (threadIdx.x,threadIdx.y,threadIdx.z); H[idx] = C[threadIdx.x][threadIdx.y][threadIdx.z]; } int main(){ double *a; double *dev_a; int size = N*sizeof(double); hipMalloc((void**)&dev_a, size); a = (double*)malloc(N*size); for (int i = 0; i < N; i++) a[i] = i; hipMemcpy(dev_a,a,size, hipMemcpyHostToDevice); dim3 blockDim(2,2,2); hipLaunchKernelGGL(( foo), dim3(1),dim3(blockDim), 0, 0, dev_a); //ESBMC_verify_kernel_c(foo, 1, blockDim, dev_a); hipMemcpy(a,dev_a,size,hipMemcpyDeviceToHost); free(a); hipFree(dev_a); return 0; }
c91c77f6fb34f0067e73f6a198c2f2c72bf6e7f2.cu
#include <cuda.h> #include <cuda_runtime_api.h> #include <stdio.h> #include <assert.h> #define N 2//8 __device__ double C[2][2][2]; __device__ int index (int a, int b, int c){ return 4*a + 2*b + c; } __global__ void foo(double *H) { int idx = index (threadIdx.x,threadIdx.y,threadIdx.z); H[idx] = C[threadIdx.x][threadIdx.y][threadIdx.z]; } int main(){ double *a; double *dev_a; int size = N*sizeof(double); cudaMalloc((void**)&dev_a, size); a = (double*)malloc(N*size); for (int i = 0; i < N; i++) a[i] = i; cudaMemcpy(dev_a,a,size, cudaMemcpyHostToDevice); dim3 blockDim(2,2,2); foo<<<1,blockDim>>>(dev_a); //ESBMC_verify_kernel_c(foo, 1, blockDim, dev_a); cudaMemcpy(a,dev_a,size,cudaMemcpyDeviceToHost); free(a); cudaFree(dev_a); return 0; }
be65bd699aab3ab520ab21bdf2c76b56f5abb0b6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Montecarlo.h" #include <iostream> #include <hiprand/hiprand_kernel.h> #include "Device.h" using std::cout; using std::endl; /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ extern __global__ void montecarlo(hiprandState_t* ptrTabDevGeneratorGM,int* ptrDevN0, float a, float b, float M, int nbFlechettes); extern __global__ void setup_kernel_rand(hiprandState_t* tabGeneratorThread, int deviceId); /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Constructeur *| \*-------------------------------------*/ Montecarlo::Montecarlo(const Grid& grid, float a, float b, float M, int nbFlechettes) : a(a), b(b), M(M), nbFlechettes(nbFlechettes) { this->dg = grid.dg; this->db = grid.db; this->sizeOctetTabGenerator = dg.x * dg.y * dg.z * db.x * db.y * db.z * sizeof(hiprandState_t); // octet this->sizeOctetN0 = sizeof(int); this->sizeSM = db.x * db.y * db.z * sizeof(int); // MM { // MM (malloc Device) { HANDLE_ERROR(hipMalloc(&ptrDevN0, sizeOctetN0)); HANDLE_ERROR(hipMalloc(&ptrTabDevGeneratorGM, sizeOctetTabGenerator)); } // MM (memset Device) { HANDLE_ERROR(hipMemset(ptrDevN0, 0, sizeOctetN0)); } Device::lastCudaError("Montecarlo MM (end allocation)"); // temp debug } hipLaunchKernelGGL(( setup_kernel_rand), dim3(dg), dim3(db), 0, 0, ptrTabDevGeneratorGM, Device::getDeviceId()); } Montecarlo::~Montecarlo(void) { //MM (device free) { HANDLE_ERROR(hipFree(ptrDevN0)); HANDLE_ERROR(hipFree(ptrTabDevGeneratorGM)); Device::lastCudaError("Montecarlo MM (end deallocation)"); // temp debug } } /*--------------------------------------*\ |* Methode *| \*-------------------------------------*/ float Montecarlo::getPi() { return this->pi; } void Montecarlo::run() { Device::lastCudaError("Montecarlo (before)"); // temp debug hipLaunchKernelGGL(( montecarlo), dim3(dg),dim3(db), sizeSM, 0, ptrTabDevGeneratorGM, ptrDevN0, a, b, M, nbFlechettes); // assynchrone Device::lastCudaError("Montecarlo (after)"); // temp debug Device::synchronize(); // Temp, only for printf in GPU // MM (Device -> Host) { HANDLE_ERROR(hipMemcpy(&N0, ptrDevN0, sizeOctetN0, hipMemcpyDeviceToHost)); // barriere synchronisation implicite } float delta = fabsf(b - a); float rektArea = M * delta; float ratioFlechette = N0 / (float)nbFlechettes; pi = 2 * rektArea * ratioFlechette; } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
be65bd699aab3ab520ab21bdf2c76b56f5abb0b6.cu
#include "Montecarlo.h" #include <iostream> #include <curand_kernel.h> #include "Device.h" using std::cout; using std::endl; /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ extern __global__ void montecarlo(curandState* ptrTabDevGeneratorGM,int* ptrDevN0, float a, float b, float M, int nbFlechettes); extern __global__ void setup_kernel_rand(curandState* tabGeneratorThread, int deviceId); /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Constructeur *| \*-------------------------------------*/ Montecarlo::Montecarlo(const Grid& grid, float a, float b, float M, int nbFlechettes) : a(a), b(b), M(M), nbFlechettes(nbFlechettes) { this->dg = grid.dg; this->db = grid.db; this->sizeOctetTabGenerator = dg.x * dg.y * dg.z * db.x * db.y * db.z * sizeof(curandState); // octet this->sizeOctetN0 = sizeof(int); this->sizeSM = db.x * db.y * db.z * sizeof(int); // MM { // MM (malloc Device) { HANDLE_ERROR(cudaMalloc(&ptrDevN0, sizeOctetN0)); HANDLE_ERROR(cudaMalloc(&ptrTabDevGeneratorGM, sizeOctetTabGenerator)); } // MM (memset Device) { HANDLE_ERROR(cudaMemset(ptrDevN0, 0, sizeOctetN0)); } Device::lastCudaError("Montecarlo MM (end allocation)"); // temp debug } setup_kernel_rand<<<dg, db>>>(ptrTabDevGeneratorGM, Device::getDeviceId()); } Montecarlo::~Montecarlo(void) { //MM (device free) { HANDLE_ERROR(cudaFree(ptrDevN0)); HANDLE_ERROR(cudaFree(ptrTabDevGeneratorGM)); Device::lastCudaError("Montecarlo MM (end deallocation)"); // temp debug } } /*--------------------------------------*\ |* Methode *| \*-------------------------------------*/ float Montecarlo::getPi() { return this->pi; } void Montecarlo::run() { Device::lastCudaError("Montecarlo (before)"); // temp debug montecarlo<<<dg,db, sizeSM>>>(ptrTabDevGeneratorGM, ptrDevN0, a, b, M, nbFlechettes); // assynchrone Device::lastCudaError("Montecarlo (after)"); // temp debug Device::synchronize(); // Temp, only for printf in GPU // MM (Device -> Host) { HANDLE_ERROR(cudaMemcpy(&N0, ptrDevN0, sizeOctetN0, cudaMemcpyDeviceToHost)); // barriere synchronisation implicite } float delta = fabsf(b - a); float rektArea = M * delta; float ratioFlechette = N0 / (float)nbFlechettes; pi = 2 * rektArea * ratioFlechette; } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
f86a224f6402a4e182c9422f65fd9026a5cb9200.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "rgb2hsl_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int img_size = XSIZE*YSIZE; unsigned char *img_r = NULL; hipMalloc(&img_r, XSIZE*YSIZE); unsigned char *img_g = NULL; hipMalloc(&img_g, XSIZE*YSIZE); unsigned char *img_b = NULL; hipMalloc(&img_b, XSIZE*YSIZE); float *img_h = NULL; hipMalloc(&img_h, XSIZE*YSIZE); float *img_s = NULL; hipMalloc(&img_s, XSIZE*YSIZE); unsigned char *img_l = NULL; hipMalloc(&img_l, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( rgb2hsl_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, img_size,img_r,img_g,img_b,img_h,img_s,img_l); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( rgb2hsl_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, img_size,img_r,img_g,img_b,img_h,img_s,img_l); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( rgb2hsl_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, img_size,img_r,img_g,img_b,img_h,img_s,img_l); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
f86a224f6402a4e182c9422f65fd9026a5cb9200.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "rgb2hsl_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int img_size = XSIZE*YSIZE; unsigned char *img_r = NULL; cudaMalloc(&img_r, XSIZE*YSIZE); unsigned char *img_g = NULL; cudaMalloc(&img_g, XSIZE*YSIZE); unsigned char *img_b = NULL; cudaMalloc(&img_b, XSIZE*YSIZE); float *img_h = NULL; cudaMalloc(&img_h, XSIZE*YSIZE); float *img_s = NULL; cudaMalloc(&img_s, XSIZE*YSIZE); unsigned char *img_l = NULL; cudaMalloc(&img_l, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); rgb2hsl_kernel<<<gridBlock,threadBlock>>>(img_size,img_r,img_g,img_b,img_h,img_s,img_l); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { rgb2hsl_kernel<<<gridBlock,threadBlock>>>(img_size,img_r,img_g,img_b,img_h,img_s,img_l); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { rgb2hsl_kernel<<<gridBlock,threadBlock>>>(img_size,img_r,img_g,img_b,img_h,img_s,img_l); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
07f093834a349a0cf7cc2f6dbab37ecbbbd5dec8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../include/cuda_utils.h" #define DEG2RAD 57.29578 #define R 25 typedef unsigned char uchar; __global__ void linearInterp(const uchar* const src, uchar* dst, int cols, int k) { int i = blockIdx.y, j = blockIdx.x; int px = j / k, py = i / k; double u = double(j) / double(k) - double(px), v = double(i) / double(k) - double(py); int basey = py * cols; double res = src[basey + px] * (1 - u) * (1 - v); res += src[basey + px + 1] * u * (1 - v); res += src[basey + cols + px] * (1 - u) * v; res += src[basey + cols + px + 1] * u * v; dst[2048 * i + j] = uchar(res); } __global__ void imgRotate(const uchar* const src, uchar* dst, int rows, int cols, double angle) { angle /= DEG2RAD; int i = blockIdx.y, j = blockIdx.x; int cx = cols / 2, cy = rows / 2; double vx = j - cx, vy = i - cy; double cosa = cos(angle), sina = sin(angle); double nvx = cosa * vx - sina * vy, nvy = sina * vx + cosa * vy; int projx = cx + nvx, projy = cy + nvy; if (projx >= 0 && projx < cols && projy >= 0 && projy < rows){ dst[projy * cols + projx] = src[i * cols + j]; } } __global__ void copyMakeBorder(const uchar* const src, uchar* dst, int cols, int pad_cols, int t, int l) { int i = blockIdx.y, j = blockIdx.x; dst[(i + t) * pad_cols + j + l] = src[i * cols + j]; } __global__ void imgShear(const uchar* const src, uchar* dst, int rows, int cols, double ratio) { int i = blockIdx.y, j = blockIdx.x; int cy = rows / 2; int dx = double(cy - i) * ratio; int base = i * cols, newx = j + dx; if (newx >= 0 && newx < cols) { dst[base + newx] = src[base + j]; } } __global__ void nearestInterp(const unsigned char* const src, unsigned char* dst, int cols, int k) { int i = blockIdx.y, j = blockIdx.x; int px = j / k, py = i / k; double u = double(j) / double(k) - double(px), v = double(i) / double(k) - double(py); if (u < 0.5 && v < 0.5) { dst[i * cols * k + j] = src[py * (cols + 1) + px]; } else if (u >= 0.5 && v < 0.5) { dst[i * cols * k + j] = src[py * (cols + 1) + px + 1]; } else if (v >= 0.5 && u < 0.5) { dst[i * cols * k + j] = src[(py + 1) * (cols + 1) + px]; } else { dst[i * cols * k + j] = src[(py + 1) * (cols + 1) + px + 1]; } } __global__ void medianFilter(const uchar* const src, uchar* dst, int rows, int cols, int radius) { int y = blockIdx.y, x = blockIdx.x, cnt = 0; uchar* value = &dst[y * cols + x]; if (*value != 0) return; int full_length = 2 * radius + 1; uchar *buf = (uchar *)malloc(full_length * full_length), med = 0; // malloc for (int i = -radius; i <= radius; i++) { int py = y + i; if (py >= 0 && py < rows) { py *= cols; for (int j = -radius; j <= radius; j++) { int px = x + j; if (px >= 0 && px < cols) { buf[cnt] = src[py + px]; cnt++; } } } } halfBubbleSort<uchar>(buf, &med, cnt); *value = med; free(buf); } template<typename T> __device__ void halfBubbleSort(T* buf, T* med, int size) { int half = int(size / 2) + 1, i = 0; for (; i < half; i++) { for (int j = i + 1; j < size; j++){ if (buf[i] > buf[j]) { T tmp = buf[i]; buf[i] = buf[j]; buf[j] = tmp; } } } *med = buf[i - 1]; }
07f093834a349a0cf7cc2f6dbab37ecbbbd5dec8.cu
#include "../include/cuda_utils.h" #define DEG2RAD 57.29578 #define R 25 typedef unsigned char uchar; __global__ void linearInterp(const uchar* const src, uchar* dst, int cols, int k) { int i = blockIdx.y, j = blockIdx.x; int px = j / k, py = i / k; double u = double(j) / double(k) - double(px), v = double(i) / double(k) - double(py); int basey = py * cols; double res = src[basey + px] * (1 - u) * (1 - v); res += src[basey + px + 1] * u * (1 - v); res += src[basey + cols + px] * (1 - u) * v; res += src[basey + cols + px + 1] * u * v; dst[2048 * i + j] = uchar(res); } __global__ void imgRotate(const uchar* const src, uchar* dst, int rows, int cols, double angle) { angle /= DEG2RAD; int i = blockIdx.y, j = blockIdx.x; int cx = cols / 2, cy = rows / 2; double vx = j - cx, vy = i - cy; double cosa = cos(angle), sina = sin(angle); double nvx = cosa * vx - sina * vy, nvy = sina * vx + cosa * vy; int projx = cx + nvx, projy = cy + nvy; if (projx >= 0 && projx < cols && projy >= 0 && projy < rows){ dst[projy * cols + projx] = src[i * cols + j]; } } __global__ void copyMakeBorder(const uchar* const src, uchar* dst, int cols, int pad_cols, int t, int l) { int i = blockIdx.y, j = blockIdx.x; dst[(i + t) * pad_cols + j + l] = src[i * cols + j]; } __global__ void imgShear(const uchar* const src, uchar* dst, int rows, int cols, double ratio) { int i = blockIdx.y, j = blockIdx.x; int cy = rows / 2; int dx = double(cy - i) * ratio; int base = i * cols, newx = j + dx; if (newx >= 0 && newx < cols) { dst[base + newx] = src[base + j]; } } __global__ void nearestInterp(const unsigned char* const src, unsigned char* dst, int cols, int k) { int i = blockIdx.y, j = blockIdx.x; int px = j / k, py = i / k; double u = double(j) / double(k) - double(px), v = double(i) / double(k) - double(py); if (u < 0.5 && v < 0.5) { dst[i * cols * k + j] = src[py * (cols + 1) + px]; } else if (u >= 0.5 && v < 0.5) { dst[i * cols * k + j] = src[py * (cols + 1) + px + 1]; } else if (v >= 0.5 && u < 0.5) { dst[i * cols * k + j] = src[(py + 1) * (cols + 1) + px]; } else { dst[i * cols * k + j] = src[(py + 1) * (cols + 1) + px + 1]; } } __global__ void medianFilter(const uchar* const src, uchar* dst, int rows, int cols, int radius) { int y = blockIdx.y, x = blockIdx.x, cnt = 0; uchar* value = &dst[y * cols + x]; if (*value != 0) return; int full_length = 2 * radius + 1; uchar *buf = (uchar *)malloc(full_length * full_length), med = 0; // 这个malloc用得就很难受 for (int i = -radius; i <= radius; i++) { int py = y + i; if (py >= 0 && py < rows) { py *= cols; for (int j = -radius; j <= radius; j++) { int px = x + j; if (px >= 0 && px < cols) { buf[cnt] = src[py + px]; cnt++; } } } } halfBubbleSort<uchar>(buf, &med, cnt); *value = med; free(buf); } template<typename T> __device__ void halfBubbleSort(T* buf, T* med, int size) { int half = int(size / 2) + 1, i = 0; for (; i < half; i++) { for (int j = i + 1; j < size; j++){ if (buf[i] > buf[j]) { T tmp = buf[i]; buf[i] = buf[j]; buf[j] = tmp; } } } *med = buf[i - 1]; }
6a46c350142c763b9f0c514c858af6dc6d0ba779.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "kernel.hip" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *A = NULL; hipMalloc(&A, XSIZE*YSIZE); float *C = NULL; hipMalloc(&C, XSIZE*YSIZE); const int N = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, A,C,N); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, A,C,N); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, A,C,N); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
6a46c350142c763b9f0c514c858af6dc6d0ba779.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *A = NULL; cudaMalloc(&A, XSIZE*YSIZE); float *C = NULL; cudaMalloc(&C, XSIZE*YSIZE); const int N = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); kernel<<<gridBlock,threadBlock>>>(A,C,N); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { kernel<<<gridBlock,threadBlock>>>(A,C,N); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { kernel<<<gridBlock,threadBlock>>>(A,C,N); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
b3306f56584b0b2055fdeab1ce1ff54910074b7c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernel.h" #define TX 32 #define TY 32 #define DIM 2100 struct hipComplex { float r; float i; __device__ hipComplex( float a, float b ) : r(a), i(b) {} __device__ float magnitude2( void ) { return r * r + i * i; } __device__ hipComplex operator*(const hipComplex& a) { return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i); } __device__ hipComplex operator-(const hipComplex& a) { return hipComplex(r-a.r, i-a.i); } __device__ hipComplex operator+(const hipComplex& a) { return hipComplex(r+a.r, i+a.i); } __device__ hipComplex operator/(const hipComplex& a) { return hipComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i)); } }; __device__ hipComplex conj(hipComplex m) { hipComplex out(m.r,-m.i); return out; } __device__ hipComplex nor(hipComplex m) { hipComplex out(m.r*m.r+m.i*m.i,0.0); return out; } __device__ float norg(hipComplex m) { return sqrtf(m.r*m.r+m.i*m.i); } __device__ hipComplex qpoch(hipComplex a, hipComplex q) { hipComplex out(1.0,0.0); hipComplex unity(1.0,0.0); int i = 0; hipComplex Q = q; if(q.magnitude2()>1.0) { return hipComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<80;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ hipComplex qp(hipComplex a, hipComplex q, int n) { hipComplex out(1.0,0.0); hipComplex unity(1.0,0.0); int i = 0; hipComplex Q = q; if(q.magnitude2()>1.0) { return hipComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<n;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ hipComplex ramphi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,mq)/qpoch(q,mq); } __device__ hipComplex rampsi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,q)*qpoch(q*q,q*q); } __device__ hipComplex ramchi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,q*q); } __device__ hipComplex ramf(hipComplex a, hipComplex b) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex ma = mone*a; hipComplex mb = mone*b; return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b); } // complex exponential __device__ hipComplex expc(hipComplex m) { hipComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i)); return out; } __device__ hipComplex powc(hipComplex ag, hipComplex bg) { hipComplex out(0.0,0.0); hipComplex mesp(0.0,0.0); hipComplex frim(0.0,0.0); double radiu, thet; /* get the proper polar form of the complex number */ radiu = sqrtf(ag.r*ag.r + ag.i*ag.i); thet = atan2f(ag.i,ag.r); /* mesp gives R^(c+di) */ mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu)); mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu)); /* frim gives e^(i theta (c+di)) */ /* now since we already have the machinery for performing complex exponentiation (just exp), we can just call that here */ frim.r = -1.0 * bg.i * thet; frim.i = bg.r * thet; frim = expc(frim); out = mesp*frim; return out; } // cosine (nothing algorithmically clean) __device__ hipComplex cosc(hipComplex m) { hipComplex ai(0.0,1.0); hipComplex ot(0.5,0.0); hipComplex mone(-1.0,0.0); hipComplex out = ot*(expc(m*ai) + expc(mone*m*ai)); return out; } __device__ hipComplex sins(hipComplex m) { hipComplex ai(0.0,1.0); hipComplex ot(0.0,0.5); hipComplex mone(-1.0,0.0); hipComplex out = ot*(expc(m*ai) - expc(mone*m*ai)); return out; } __device__ hipComplex tans(hipComplex m) { return sins(m)/cosc(m); } __device__ hipComplex moeb(hipComplex t, hipComplex a, hipComplex z) { hipComplex out(0.0,0.0); hipComplex ai(0.0,1.0); hipComplex unity(1.0,0.0); out = expc(ai*t) * (z-a)/(unity-conj(a)*z); return out; } __device__ hipComplex bnewt(hipComplex z) { hipComplex three(3.0,0.0); hipComplex unity(1.0,0.0); hipComplex out(0.0,0.0); hipComplex Z =z; hipComplex L(0.0,0.0); hipComplex R(0.62348980185873359,0.7818314824680298); hipComplex v(0.62348980185873359,0.7818314824680298); int i; for(i=0;i<100;i++) { L = sins(expc(Z)-cosc(Z))-Z; out = out + v*L; v = R * v; Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity); } return out; } __device__ hipComplex they3(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + powc(q,enn*enn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ hipComplex wahi(hipComplex z) { int u; hipComplex un(1.0,0.0); hipComplex ne(1.0,0.0); hipComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne); ne = ne + un; } out = out + un; return out; } __device__ hipComplex dwahi(hipComplex z) { int u; hipComplex un(1.0,0.0); hipComplex ne(1.0,0.0); hipComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne-un); ne = ne + un; } return out; } __device__ hipComplex they3p(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ hipComplex h3ey3p(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex aut(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); hipComplex vel(0.0,0.0); hipComplex rav(0.0,0.0); for(u=-40;u<40;u++) { vel = expc(dui*enn*z); rav = powc(q,enn*enn); aut = aut + (enn*enn)*rav/q*vel; out = out + rav*vel; enn = enn + onn; } return out/aut; } __device__ hipComplex thess(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex the1(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*sins(z); } __device__ hipComplex the2(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*cosc(z); } __device__ hipComplex the3(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex the4(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } /* routine to generate q-integers */ __device__ hipComplex qin(hipComplex a, hipComplex q) { hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); out = (unity - powc(q, a))/(unity-q); return out; } /* generating function for n^2 */ __device__ hipComplex geffa(hipComplex z, hipComplex q) { hipComplex out(0.0,0.0); hipComplex unity(1.0,0.0); hipComplex wu(0.0,0.0); hipComplex Z=unity; int v; for(v=0;v<20;v++) { out = out + qin(wu*wu,q)* Z; wu = wu + unity; Z = z * Z; } return out; } __device__ hipComplex thratd(hipComplex z, hipComplex q) { int n; hipComplex fau(4.0,0.0); hipComplex too(2.0,0.0); hipComplex unity(1.0,0.0); hipComplex ennn(1.0,0.0); hipComplex ni(-1.0,0.0); hipComplex noo(-1.0,0.0); hipComplex out(0.0,0.0); hipComplex loo = q; hipComplex qoo =q*q; for(n=0;n<80;n++) { out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z); qoo = qoo * q*q; loo = loo * q; ennn = ennn +unity; noo = ni * noo; } return out*fau; } __device__ hipComplex thess4(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex thesk(hipComplex z, hipComplex q, hipComplex r) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex roo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; roo = roo * r * r ; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r)); } return out; } __device__ hipComplex thass(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex rogers( hipComplex q) { hipComplex onf(0.2,0.0); hipComplex Q5 = q*q*q*q*q; hipComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5)); return out; } __device__ hipComplex flat(hipComplex m) { float ua = sqrtf(m.r*m.r + m.i*m.i); hipComplex out(m.r/ua,m.i/ua); return out; } __device__ hipComplex eff(hipComplex z, hipComplex lambda) { return z*z*z*z+ lambda/(z*z*z*z); } __device__ hipComplex thete(float R, hipComplex tau, hipComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ hipComplex A(0.0,0.0); /* miscellaneous setup */ hipComplex pai(3.14159265353898,0.0); hipComplex ai(0.0,1.0); hipComplex oo(1.0,0.0); hipComplex oot(2.0,0.0); hipComplex nini(9.0,0.0); hipComplex eigh(-18.0,0.0); /* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ hipComplex frann(1.0,0.0); frann = pai * ai * tau ; hipComplex shenn(1.0,0.0); shenn = oot * ai * z; hipComplex plenn(1.0,0.0); hipComplex enn(1.0,0.0); hipComplex ann(1.0,0.0); hipComplex bnn(1.0,0.0); hipComplex scrunn(1.0,0.0); float ca, cb,cc; int a, b; for(a=-10;a<10;a++) { ann.r = a; for(b=-10;b<10;b++) { bnn.r = b; if(((a+b)%2)==0) { scrunn.r = a*a + b*b; A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn)); } else { ca = 5.0 + a*a + b*b; cb = 2*(a * cos(R)- b * sin(R)); cc = 4*(b * cos(R)+a*sin(R)); scrunn.r = ca + cb + cc; A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn)); } } } return A; } __device__ hipComplex thetta(hipComplex tau, hipComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ hipComplex A(0.0,0.0); /* miscellaneous setup */ hipComplex pai(3.14159265353898,0.0); hipComplex ai(0.0,1.0); hipComplex oo(1.0,0.0); hipComplex oot(2.0,0.0); hipComplex nini(9.0,0.0); hipComplex eigh(-18.0,0.0); /* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ hipComplex frann(1.0,0.0); frann = pai * ai * tau ; hipComplex shenn(1.0,0.0); shenn = oot * ai * z; hipComplex plenn(1.0,0.0); hipComplex enn(1.0,0.0); int n; for(n=-10;n<10;n++) { enn.r = n; plenn = enn * enn; /* this get the hipComplex out of the event loop */ A = A + expc(frann* plenn) * expc(shenn* enn); } return A; } __device__ hipComplex mitlef(hipComplex z,hipComplex c) { hipComplex out(0.0,0.0); hipComplex Z(1.0,0.0); hipComplex frove(0.0,0.0); int v; for(v=0;v<20;v++) { frove.r = tgammaf(c.r*v+c.i); out = out + Z/frove; Z = Z * z; } return out; } __device__ hipComplex helva(hipComplex z) { hipComplex out(j0f(z.r),j1f(z.i)); return out; } __device__ hipComplex hilva(hipComplex z) { hipComplex out(j1f(z.r),j0f(z.i)); return out; } __device__ hipComplex hinva(hipComplex z) { hipComplex out(j1f(z.r),j1f(z.i)); return out; } __device__ hipComplex henga(hipComplex z) { hipComplex out(acoshf(z.r),asinhf(z.i)); return out; } __device__ hipComplex holva(hipComplex z) { hipComplex out(y0f(z.r),y1f(z.i)); return out; } __device__ hipComplex arago(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * hinva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex irigo(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * holva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex urigo(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * (hilva(z)+helva(z)) + qoo*qoo/(q*q)); } return out; } __device__ unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); } __global__ void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) { const int c = blockIdx.x*blockDim.x + threadIdx.x; const int r= blockIdx.y*blockDim.y + threadIdx.y; const int i = c + r*w; // 1D indexing float pi = 3.1415926535898; hipComplex ip(pi,0.0); const float scale = 5.0; float fx = -scale * (float)(DIM/2 - c)/(DIM/2); float fy = scale * (float)(DIM/2 - r)/(DIM/2); hipComplex effx(fx,0.0); hipComplex effy(fy,0.0); float LA = -scale * (float)(DIM/2 - pos.x)/(DIM/2); float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2); hipComplex mouse(LA,LB); hipComplex moux(LA,0.0); hipComplex mouy(0.0,LB); hipComplex q(fx,fy); /* hipComplex tik(sin(ticks/40.0f),0.0);*/ /* hipComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0)); hipComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024)); hipComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/ hipComplex fixon(.029348,.828934); hipComplex faxon(.029348,-.828934); hipComplex unity(1.0,0.0); hipComplex ai(0.0,1.0); hipComplex aon = expc(ai*moux); hipComplex uon= expc(mouy); hipComplex flurn(0.0,0.0); hipComplex accume(1.0,0.0); hipComplex eccume(0.0,0.0); hipComplex rhun(1.02871376821872462237195122725097462534904479,0.0); hipComplex cue = q; hipComplex lam(0.73736887807831963, -0.67549029426152396); hipComplex due(3.0,0.0); hipComplex tir(2.0,0.0); hipComplex selga(3.5,0.0); hipComplex vro(-1.0,0.0); hipComplex tle(1.0,0.0); hipComplex sle(4.0,0.0); hipComplex cherra(0.62348980185873359, 0.7818314824680298); hipComplex lerra = cherra*cherra; hipComplex ferra = lerra * cherra; hipComplex terra = ferra * cherra; hipComplex zerra = terra * cherra; hipComplex nerra = zerra * cherra; hipComplex vlarv(1/3.0,0.0); hipComplex sugna(0.70710678118654757, 0.70710678118654746); hipComplex regna(0.99966573338968745, 0.025853848581176047); hipComplex spa(sqrtf(2.0),0.0); hipComplex spb(sqrtf(3.0),0.0); hipComplex spc(sqrtf(4.0),0.0); hipComplex spd(sqrtf(5.0),0.0); hipComplex mrun(1/2.0,0.0); hipComplex gloon (4.0,0.0); hipComplex plenod(-.01,0.0); hipComplex nue = cue; hipComplex bor(-10.0,0.0); hipComplex nat(0.0,-10.0); hipComplex rhus(1.0,0.0); hipComplex D(0.739085133215160641655312087674,0.0); /* if ((c >= w) || (r >= h)) return; // Check if within image bounds const int i = c + r*w; // 1D indexing const int dist = sqrtf((c - pos.x)*(c - pos.x) + (r - pos.y)*(r - pos.y)); const unsigned char intensity = clip(255 - dist);*/ // theta function varying on constant // cue =thess(cue,fixon*mouse); int v=1; int axa=-10; /*while((v<100)&&norg(cue)<2.0) { cue = cue*(cue-mouy)*(cue-moux) -cue * q; v++; }*/ for(v=0;v<10;v++) { cue = (cue -urigo(cue,uon*faxon))/((cue -powc(ai,uon)*urigo(cue,aon*faxon))); accume = accume * uon*urigo(cue,aon*faxon); } cue = accume; double tha; tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi)); d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2)); d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2)); d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2)); d_out[i].w = 255; } void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) { const dim3 blockSize(TX, TY); const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY); hipLaunchKernelGGL(( distanceKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_out, w, h, pos); } /*for(v=1;v<5;v++) { cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); } cue = accume;*/ /*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q)); rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q)); cue = rhus+cue; cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/ /*for(v=0;v<60;v++){ cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon)); accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue)); } cue = accume;*/ /* One for (x+d)/cos(d) -cos(x)/d Tungilipa D = cos(D) cos(sqrt(x*D))/D -1 = 0.0 The other for cos(x)-x Eripgrunna */
b3306f56584b0b2055fdeab1ce1ff54910074b7c.cu
#include "kernel.h" #define TX 32 #define TY 32 #define DIM 2100 struct cuComplex { float r; float i; __device__ cuComplex( float a, float b ) : r(a), i(b) {} __device__ float magnitude2( void ) { return r * r + i * i; } __device__ cuComplex operator*(const cuComplex& a) { return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i); } __device__ cuComplex operator-(const cuComplex& a) { return cuComplex(r-a.r, i-a.i); } __device__ cuComplex operator+(const cuComplex& a) { return cuComplex(r+a.r, i+a.i); } __device__ cuComplex operator/(const cuComplex& a) { return cuComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i)); } }; __device__ cuComplex conj(cuComplex m) { cuComplex out(m.r,-m.i); return out; } __device__ cuComplex nor(cuComplex m) { cuComplex out(m.r*m.r+m.i*m.i,0.0); return out; } __device__ float norg(cuComplex m) { return sqrtf(m.r*m.r+m.i*m.i); } __device__ cuComplex qpoch(cuComplex a, cuComplex q) { cuComplex out(1.0,0.0); cuComplex unity(1.0,0.0); int i = 0; cuComplex Q = q; if(q.magnitude2()>1.0) { return cuComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<80;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ cuComplex qp(cuComplex a, cuComplex q, int n) { cuComplex out(1.0,0.0); cuComplex unity(1.0,0.0); int i = 0; cuComplex Q = q; if(q.magnitude2()>1.0) { return cuComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<n;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ cuComplex ramphi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,mq)/qpoch(q,mq); } __device__ cuComplex rampsi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,q)*qpoch(q*q,q*q); } __device__ cuComplex ramchi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,q*q); } __device__ cuComplex ramf(cuComplex a, cuComplex b) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex ma = mone*a; cuComplex mb = mone*b; return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b); } // complex exponential __device__ cuComplex expc(cuComplex m) { cuComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i)); return out; } __device__ cuComplex powc(cuComplex ag, cuComplex bg) { cuComplex out(0.0,0.0); cuComplex mesp(0.0,0.0); cuComplex frim(0.0,0.0); double radiu, thet; /* get the proper polar form of the complex number */ radiu = sqrtf(ag.r*ag.r + ag.i*ag.i); thet = atan2f(ag.i,ag.r); /* mesp gives R^(c+di) */ mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu)); mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu)); /* frim gives e^(i theta (c+di)) */ /* now since we already have the machinery for performing complex exponentiation (just exp), we can just call that here */ frim.r = -1.0 * bg.i * thet; frim.i = bg.r * thet; frim = expc(frim); out = mesp*frim; return out; } // cosine (nothing algorithmically clean) __device__ cuComplex cosc(cuComplex m) { cuComplex ai(0.0,1.0); cuComplex ot(0.5,0.0); cuComplex mone(-1.0,0.0); cuComplex out = ot*(expc(m*ai) + expc(mone*m*ai)); return out; } __device__ cuComplex sins(cuComplex m) { cuComplex ai(0.0,1.0); cuComplex ot(0.0,0.5); cuComplex mone(-1.0,0.0); cuComplex out = ot*(expc(m*ai) - expc(mone*m*ai)); return out; } __device__ cuComplex tans(cuComplex m) { return sins(m)/cosc(m); } __device__ cuComplex moeb(cuComplex t, cuComplex a, cuComplex z) { cuComplex out(0.0,0.0); cuComplex ai(0.0,1.0); cuComplex unity(1.0,0.0); out = expc(ai*t) * (z-a)/(unity-conj(a)*z); return out; } __device__ cuComplex bnewt(cuComplex z) { cuComplex three(3.0,0.0); cuComplex unity(1.0,0.0); cuComplex out(0.0,0.0); cuComplex Z =z; cuComplex L(0.0,0.0); cuComplex R(0.62348980185873359,0.7818314824680298); cuComplex v(0.62348980185873359,0.7818314824680298); int i; for(i=0;i<100;i++) { L = sins(expc(Z)-cosc(Z))-Z; out = out + v*L; v = R * v; Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity); } return out; } __device__ cuComplex they3(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + powc(q,enn*enn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ cuComplex wahi(cuComplex z) { int u; cuComplex un(1.0,0.0); cuComplex ne(1.0,0.0); cuComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne); ne = ne + un; } out = out + un; return out; } __device__ cuComplex dwahi(cuComplex z) { int u; cuComplex un(1.0,0.0); cuComplex ne(1.0,0.0); cuComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne-un); ne = ne + un; } return out; } __device__ cuComplex they3p(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ cuComplex h3ey3p(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex aut(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); cuComplex vel(0.0,0.0); cuComplex rav(0.0,0.0); for(u=-40;u<40;u++) { vel = expc(dui*enn*z); rav = powc(q,enn*enn); aut = aut + (enn*enn)*rav/q*vel; out = out + rav*vel; enn = enn + onn; } return out/aut; } __device__ cuComplex thess(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex the1(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*sins(z); } __device__ cuComplex the2(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*cosc(z); } __device__ cuComplex the3(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex the4(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } /* routine to generate q-integers */ __device__ cuComplex qin(cuComplex a, cuComplex q) { cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); out = (unity - powc(q, a))/(unity-q); return out; } /* generating function for n^2 */ __device__ cuComplex geffa(cuComplex z, cuComplex q) { cuComplex out(0.0,0.0); cuComplex unity(1.0,0.0); cuComplex wu(0.0,0.0); cuComplex Z=unity; int v; for(v=0;v<20;v++) { out = out + qin(wu*wu,q)* Z; wu = wu + unity; Z = z * Z; } return out; } __device__ cuComplex thratd(cuComplex z, cuComplex q) { int n; cuComplex fau(4.0,0.0); cuComplex too(2.0,0.0); cuComplex unity(1.0,0.0); cuComplex ennn(1.0,0.0); cuComplex ni(-1.0,0.0); cuComplex noo(-1.0,0.0); cuComplex out(0.0,0.0); cuComplex loo = q; cuComplex qoo =q*q; for(n=0;n<80;n++) { out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z); qoo = qoo * q*q; loo = loo * q; ennn = ennn +unity; noo = ni * noo; } return out*fau; } __device__ cuComplex thess4(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex thesk(cuComplex z, cuComplex q, cuComplex r) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex roo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; roo = roo * r * r ; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r)); } return out; } __device__ cuComplex thass(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex rogers( cuComplex q) { cuComplex onf(0.2,0.0); cuComplex Q5 = q*q*q*q*q; cuComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5)); return out; } __device__ cuComplex flat(cuComplex m) { float ua = sqrtf(m.r*m.r + m.i*m.i); cuComplex out(m.r/ua,m.i/ua); return out; } __device__ cuComplex eff(cuComplex z, cuComplex lambda) { return z*z*z*z+ lambda/(z*z*z*z); } __device__ cuComplex thete(float R, cuComplex tau, cuComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ cuComplex A(0.0,0.0); /* miscellaneous setup */ cuComplex pai(3.14159265353898,0.0); cuComplex ai(0.0,1.0); cuComplex oo(1.0,0.0); cuComplex oot(2.0,0.0); cuComplex nini(9.0,0.0); cuComplex eigh(-18.0,0.0); /* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ cuComplex frann(1.0,0.0); frann = pai * ai * tau ; cuComplex shenn(1.0,0.0); shenn = oot * ai * z; cuComplex plenn(1.0,0.0); cuComplex enn(1.0,0.0); cuComplex ann(1.0,0.0); cuComplex bnn(1.0,0.0); cuComplex scrunn(1.0,0.0); float ca, cb,cc; int a, b; for(a=-10;a<10;a++) { ann.r = a; for(b=-10;b<10;b++) { bnn.r = b; if(((a+b)%2)==0) { scrunn.r = a*a + b*b; A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn)); } else { ca = 5.0 + a*a + b*b; cb = 2*(a * cos(R)- b * sin(R)); cc = 4*(b * cos(R)+a*sin(R)); scrunn.r = ca + cb + cc; A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn)); } } } return A; } __device__ cuComplex thetta(cuComplex tau, cuComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ cuComplex A(0.0,0.0); /* miscellaneous setup */ cuComplex pai(3.14159265353898,0.0); cuComplex ai(0.0,1.0); cuComplex oo(1.0,0.0); cuComplex oot(2.0,0.0); cuComplex nini(9.0,0.0); cuComplex eigh(-18.0,0.0); /* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ cuComplex frann(1.0,0.0); frann = pai * ai * tau ; cuComplex shenn(1.0,0.0); shenn = oot * ai * z; cuComplex plenn(1.0,0.0); cuComplex enn(1.0,0.0); int n; for(n=-10;n<10;n++) { enn.r = n; plenn = enn * enn; /* this get the cuComplex out of the event loop */ A = A + expc(frann* plenn) * expc(shenn* enn); } return A; } __device__ cuComplex mitlef(cuComplex z,cuComplex c) { cuComplex out(0.0,0.0); cuComplex Z(1.0,0.0); cuComplex frove(0.0,0.0); int v; for(v=0;v<20;v++) { frove.r = tgammaf(c.r*v+c.i); out = out + Z/frove; Z = Z * z; } return out; } __device__ cuComplex helva(cuComplex z) { cuComplex out(j0f(z.r),j1f(z.i)); return out; } __device__ cuComplex hilva(cuComplex z) { cuComplex out(j1f(z.r),j0f(z.i)); return out; } __device__ cuComplex hinva(cuComplex z) { cuComplex out(j1f(z.r),j1f(z.i)); return out; } __device__ cuComplex henga(cuComplex z) { cuComplex out(acoshf(z.r),asinhf(z.i)); return out; } __device__ cuComplex holva(cuComplex z) { cuComplex out(y0f(z.r),y1f(z.i)); return out; } __device__ cuComplex arago(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * hinva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex irigo(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * holva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex urigo(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * (hilva(z)+helva(z)) + qoo*qoo/(q*q)); } return out; } __device__ unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); } __global__ void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) { const int c = blockIdx.x*blockDim.x + threadIdx.x; const int r= blockIdx.y*blockDim.y + threadIdx.y; const int i = c + r*w; // 1D indexing float pi = 3.1415926535898; cuComplex ip(pi,0.0); const float scale = 5.0; float fx = -scale * (float)(DIM/2 - c)/(DIM/2); float fy = scale * (float)(DIM/2 - r)/(DIM/2); cuComplex effx(fx,0.0); cuComplex effy(fy,0.0); float LA = -scale * (float)(DIM/2 - pos.x)/(DIM/2); float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2); cuComplex mouse(LA,LB); cuComplex moux(LA,0.0); cuComplex mouy(0.0,LB); cuComplex q(fx,fy); /* cuComplex tik(sin(ticks/40.0f),0.0);*/ /* cuComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0)); cuComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024)); cuComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/ cuComplex fixon(.029348,.828934); cuComplex faxon(.029348,-.828934); cuComplex unity(1.0,0.0); cuComplex ai(0.0,1.0); cuComplex aon = expc(ai*moux); cuComplex uon= expc(mouy); cuComplex flurn(0.0,0.0); cuComplex accume(1.0,0.0); cuComplex eccume(0.0,0.0); cuComplex rhun(1.02871376821872462237195122725097462534904479,0.0); cuComplex cue = q; cuComplex lam(0.73736887807831963, -0.67549029426152396); cuComplex due(3.0,0.0); cuComplex tir(2.0,0.0); cuComplex selga(3.5,0.0); cuComplex vro(-1.0,0.0); cuComplex tle(1.0,0.0); cuComplex sle(4.0,0.0); cuComplex cherra(0.62348980185873359, 0.7818314824680298); cuComplex lerra = cherra*cherra; cuComplex ferra = lerra * cherra; cuComplex terra = ferra * cherra; cuComplex zerra = terra * cherra; cuComplex nerra = zerra * cherra; cuComplex vlarv(1/3.0,0.0); cuComplex sugna(0.70710678118654757, 0.70710678118654746); cuComplex regna(0.99966573338968745, 0.025853848581176047); cuComplex spa(sqrtf(2.0),0.0); cuComplex spb(sqrtf(3.0),0.0); cuComplex spc(sqrtf(4.0),0.0); cuComplex spd(sqrtf(5.0),0.0); cuComplex mrun(1/2.0,0.0); cuComplex gloon (4.0,0.0); cuComplex plenod(-.01,0.0); cuComplex nue = cue; cuComplex bor(-10.0,0.0); cuComplex nat(0.0,-10.0); cuComplex rhus(1.0,0.0); cuComplex D(0.739085133215160641655312087674,0.0); /* if ((c >= w) || (r >= h)) return; // Check if within image bounds const int i = c + r*w; // 1D indexing const int dist = sqrtf((c - pos.x)*(c - pos.x) + (r - pos.y)*(r - pos.y)); const unsigned char intensity = clip(255 - dist);*/ // theta function varying on constant // cue =thess(cue,fixon*mouse); int v=1; int axa=-10; /*while((v<100)&&norg(cue)<2.0) { cue = cue*(cue-mouy)*(cue-moux) -cue * q; v++; }*/ for(v=0;v<10;v++) { cue = (cue -urigo(cue,uon*faxon))/((cue -powc(ai,uon)*urigo(cue,aon*faxon))); accume = accume * uon*urigo(cue,aon*faxon); } cue = accume; double tha; tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi)); d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2)); d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2)); d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2)); d_out[i].w = 255; } void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) { const dim3 blockSize(TX, TY); const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY); distanceKernel<<<gridSize, blockSize>>>(d_out, w, h, pos); } /*for(v=1;v<5;v++) { cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); } cue = accume;*/ /*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q)); rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q)); cue = rhus+cue; cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/ /*for(v=0;v<60;v++){ cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon)); accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue)); } cue = accume;*/ /* One for (x+d)/cos(d) -cos(x)/d Tungilipa D = cos(D) cos(sqrt(x*D))/D -1 = 0.0 The other for cos(x)-x Eripgrunna */
2878fa1634fb2a9f4f9119c50200e84093085869.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "gradient.h" #include "util.h" __global__ void g_l2norm(float * I, float *O, int w, int h, int nc) { int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; float sum = 0.f; if(nc > 1) { for(int c = 0; c < nc; c++) { sum += square(read_data(I, w, h, nc, x, y, c)); } write_data(O, sqrtf(sum), w, h, x, y); } else { write_data(O, read_data(I, w, h, x, y), w, h, x, y); } } __global__ void g_l2norm(float * V_1, float * V_2, float *O, int w, int h, int nc) { int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; float sum = 0.f; for(int c = 0; c < nc; c++) { sum += square(read_data(V_1, w, h, nc, x, y, c)); sum += square(read_data(V_2, w, h, nc, x, y, c)); } write_data(O, sqrtf(sum), w, h, x, y); } __global__ void g_gradient(float *I, float *V_1, float *V_2, int w, int h, int nc) { int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; int c = threadIdx.z + blockDim.z * blockIdx.z; float diff; float field; if(x < w - 1 && y < h - 1 && c < nc) { field = read_data(I, w, h, nc, x, y, c); // calculate the gradient with forward difference diff = read_data(I, w, h, nc, x + 1, y, c) - field; write_data(V_1, diff, w, h, nc, x, y, c); diff = read_data(I, w, h, nc, x, y + 1, c) - field; write_data(V_2, diff, w, h, nc, x, y, c); } } __global__ void g_divergence(float * V_1, float *V_2, float *D, int w, int h, int nc) { int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; int c = threadIdx.z + blockDim.z * blockIdx.z; if(x < w && y < h ) { // Compute the divergence as the sum of backwards differences from the gradient parts // backward difference in x direction of first gradient part float div_1 = read_data(V_1, w, h, nc, x, y, c) - read_data(V_1, w, h, nc, x - 1, y, c); // backward difference in y direction of second gradient part float div_2 = read_data(V_2, w, h, nc, x, y, c) - read_data(V_2, w, h, nc, x, y - 1, c); write_data(D, div_1 + div_2, w, h, nc, x, y, c); } }
2878fa1634fb2a9f4f9119c50200e84093085869.cu
#include "gradient.h" #include "util.h" __global__ void g_l2norm(float * I, float *O, int w, int h, int nc) { int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; float sum = 0.f; if(nc > 1) { for(int c = 0; c < nc; c++) { sum += square(read_data(I, w, h, nc, x, y, c)); } write_data(O, sqrtf(sum), w, h, x, y); } else { write_data(O, read_data(I, w, h, x, y), w, h, x, y); } } __global__ void g_l2norm(float * V_1, float * V_2, float *O, int w, int h, int nc) { int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; float sum = 0.f; for(int c = 0; c < nc; c++) { sum += square(read_data(V_1, w, h, nc, x, y, c)); sum += square(read_data(V_2, w, h, nc, x, y, c)); } write_data(O, sqrtf(sum), w, h, x, y); } __global__ void g_gradient(float *I, float *V_1, float *V_2, int w, int h, int nc) { int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; int c = threadIdx.z + blockDim.z * blockIdx.z; float diff; float field; if(x < w - 1 && y < h - 1 && c < nc) { field = read_data(I, w, h, nc, x, y, c); // calculate the gradient with forward difference diff = read_data(I, w, h, nc, x + 1, y, c) - field; write_data(V_1, diff, w, h, nc, x, y, c); diff = read_data(I, w, h, nc, x, y + 1, c) - field; write_data(V_2, diff, w, h, nc, x, y, c); } } __global__ void g_divergence(float * V_1, float *V_2, float *D, int w, int h, int nc) { int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; int c = threadIdx.z + blockDim.z * blockIdx.z; if(x < w && y < h ) { // Compute the divergence as the sum of backwards differences from the gradient parts // backward difference in x direction of first gradient part float div_1 = read_data(V_1, w, h, nc, x, y, c) - read_data(V_1, w, h, nc, x - 1, y, c); // backward difference in y direction of second gradient part float div_2 = read_data(V_2, w, h, nc, x, y, c) - read_data(V_2, w, h, nc, x, y - 1, c); write_data(D, div_1 + div_2, w, h, nc, x, y, c); } }
1946fee93c5ca44be037779fa94dc81d41bdec62.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <string.h> #include <hip/hip_runtime.h> #include <omp.h> #include <hip/hip_runtime.h> #include <driver_functions.h> #include <math_constants.h> #include "pushRelabelGPU.h" #ifdef DEBUG #define cudaCheckError(ans) { cudaAssert((ans), __FILE__, __LINE__); } inline void cudaAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr, "CUDA Error: %s at %s:%d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } #else #define cudaCheckError(ans) ans #endif #define IDX(i, j, n) ((i) * (n) + (j)) #define UPDIV(n, d) (((n) + (d) - 1) / (d)) static dim3 threadsPerBlock(1024, 1, 1); __global__ void pushRelabelLockFreeKernel(int *residualFlow, int *height, int *excessFlow, int *netFlowOutS, int *netFlowInT, int s, int t, int n) { int index = blockDim.x * blockIdx.x + threadIdx.x; int u = index; if (u >= s) { u++; } if (u >= t) { u++; } // one thread here for all vertices not s or t while (*netFlowOutS != *netFlowInT) { if (u < n && excessFlow[u] > 0) { int curExcess = excessFlow[u]; int curLowestNeighbor = -1; int neighborMinHeight = (int) CUDART_INF; for (int v = 0; v < n; v++) { if (u == v) continue; if (residualFlow[IDX(u, v, n)] > 0) { int tempHeight = height[v]; if (tempHeight < neighborMinHeight) { curLowestNeighbor = v; neighborMinHeight = tempHeight; } } } if (height[u] > neighborMinHeight) { int delta = min(curExcess, residualFlow[IDX(u, curLowestNeighbor, n)]); atomicSub(&residualFlow[IDX(u, curLowestNeighbor, n)], delta); atomicAdd(&residualFlow[IDX(curLowestNeighbor, u, n)], delta); atomicSub(&excessFlow[u], delta); atomicAdd(&excessFlow[curLowestNeighbor], delta); if (curLowestNeighbor == s) { atomicSub(netFlowOutS, delta); } else if (curLowestNeighbor == t) { atomicAdd(netFlowInT, delta); } } else { height[u] = neighborMinHeight + 1; } } } } // Push-relabel algorithm to find max s-t flow. Based on lock-free implementation // specified by Bo Hong. Uses one CUDA thread per vertex. Flow *pushRelabelLockFreeGPU(Graph *g, int s, int t) { int *residualFlow; int *height; int *excessFlow; int *netFlowOutS; int *netFlowInT; int *tempHeights = (int *)calloc(g->n, sizeof(int)); int *tempExcessFlows = (int *)calloc(g->n, sizeof(int)); int *finalFlow = (int *)malloc((g->n * g->n) * sizeof(int)); memcpy(finalFlow, g->capacities, (g->n * g->n) * sizeof(int)); cudaCheckError(hipMalloc((void **)&residualFlow, sizeof(int) * (g->n * g->n))); cudaCheckError(hipMalloc((void **)&height, sizeof(int) * g->n)); cudaCheckError(hipMalloc((void **)&excessFlow, sizeof(int) * g->n)); cudaCheckError(hipMalloc((void **)&netFlowOutS, sizeof(int))); cudaCheckError(hipMalloc((void **)&netFlowInT, sizeof(int))); // initialize preflow int flowOutS = 0; int flowInT = 0; tempHeights[s] = g->n; #pragma omp parallel for reduction(+:flowOutS) for (int v = 0; v < g->n; v++) { int cap = g->capacities[IDX(s, v, g->n)]; if (cap > 0 && (s != v)) { finalFlow[IDX(s, v, g->n)] = 0; finalFlow[IDX(v, s, g->n)] += cap; flowOutS += cap; tempExcessFlows[v] = cap; if (v == t) { flowInT += cap; } } } cudaCheckError(hipMemcpy(residualFlow, finalFlow, sizeof(int) * (g->n * g->n), hipMemcpyHostToDevice)); cudaCheckError(hipMemcpy(height, tempHeights, sizeof(int) * g->n, hipMemcpyHostToDevice)); cudaCheckError(hipMemcpy(excessFlow, tempExcessFlows, sizeof(int) * g->n, hipMemcpyHostToDevice)); cudaCheckError(hipMemcpy(netFlowInT, &flowInT, sizeof(int), hipMemcpyHostToDevice)); cudaCheckError(hipMemcpy(netFlowOutS, &flowOutS, sizeof(int), hipMemcpyHostToDevice)); int numBlocks = UPDIV((g->n - 2), threadsPerBlock.x); hipLaunchKernelGGL(( pushRelabelLockFreeKernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, residualFlow, height, excessFlow, netFlowOutS, netFlowInT, s, t, g->n); free(tempHeights); free(tempExcessFlows); cudaCheckError(hipDeviceSynchronize()); cudaCheckError(hipMemcpy(finalFlow, residualFlow, sizeof(int) * (g->n * g->n), hipMemcpyDeviceToHost)); cudaCheckError(hipMemcpy(&flowInT, netFlowInT, sizeof(int), hipMemcpyDeviceToHost)); // now update flow to represent actual flow #pragma omp parallel for schedule(static) for (int i = 0; i < (g->n * g-> n); i++) { finalFlow[i] = g->capacities[i] - finalFlow[i]; } Flow *result = (Flow *)malloc(sizeof(Flow)); result->maxFlow = flowInT; result->finalEdgeFlows = finalFlow; cudaCheckError(hipFree(residualFlow)); cudaCheckError(hipFree(height)); cudaCheckError(hipFree(excessFlow)); cudaCheckError(hipFree(netFlowOutS)); cudaCheckError(hipFree(netFlowInT)); return result; }
1946fee93c5ca44be037779fa94dc81d41bdec62.cu
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <cuda.h> #include <omp.h> #include <cuda_runtime.h> #include <driver_functions.h> #include <math_constants.h> #include "pushRelabelGPU.h" #ifdef DEBUG #define cudaCheckError(ans) { cudaAssert((ans), __FILE__, __LINE__); } inline void cudaAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr, "CUDA Error: %s at %s:%d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } #else #define cudaCheckError(ans) ans #endif #define IDX(i, j, n) ((i) * (n) + (j)) #define UPDIV(n, d) (((n) + (d) - 1) / (d)) static dim3 threadsPerBlock(1024, 1, 1); __global__ void pushRelabelLockFreeKernel(int *residualFlow, int *height, int *excessFlow, int *netFlowOutS, int *netFlowInT, int s, int t, int n) { int index = blockDim.x * blockIdx.x + threadIdx.x; int u = index; if (u >= s) { u++; } if (u >= t) { u++; } // one thread here for all vertices not s or t while (*netFlowOutS != *netFlowInT) { if (u < n && excessFlow[u] > 0) { int curExcess = excessFlow[u]; int curLowestNeighbor = -1; int neighborMinHeight = (int) CUDART_INF; for (int v = 0; v < n; v++) { if (u == v) continue; if (residualFlow[IDX(u, v, n)] > 0) { int tempHeight = height[v]; if (tempHeight < neighborMinHeight) { curLowestNeighbor = v; neighborMinHeight = tempHeight; } } } if (height[u] > neighborMinHeight) { int delta = min(curExcess, residualFlow[IDX(u, curLowestNeighbor, n)]); atomicSub(&residualFlow[IDX(u, curLowestNeighbor, n)], delta); atomicAdd(&residualFlow[IDX(curLowestNeighbor, u, n)], delta); atomicSub(&excessFlow[u], delta); atomicAdd(&excessFlow[curLowestNeighbor], delta); if (curLowestNeighbor == s) { atomicSub(netFlowOutS, delta); } else if (curLowestNeighbor == t) { atomicAdd(netFlowInT, delta); } } else { height[u] = neighborMinHeight + 1; } } } } // Push-relabel algorithm to find max s-t flow. Based on lock-free implementation // specified by Bo Hong. Uses one CUDA thread per vertex. Flow *pushRelabelLockFreeGPU(Graph *g, int s, int t) { int *residualFlow; int *height; int *excessFlow; int *netFlowOutS; int *netFlowInT; int *tempHeights = (int *)calloc(g->n, sizeof(int)); int *tempExcessFlows = (int *)calloc(g->n, sizeof(int)); int *finalFlow = (int *)malloc((g->n * g->n) * sizeof(int)); memcpy(finalFlow, g->capacities, (g->n * g->n) * sizeof(int)); cudaCheckError(cudaMalloc((void **)&residualFlow, sizeof(int) * (g->n * g->n))); cudaCheckError(cudaMalloc((void **)&height, sizeof(int) * g->n)); cudaCheckError(cudaMalloc((void **)&excessFlow, sizeof(int) * g->n)); cudaCheckError(cudaMalloc((void **)&netFlowOutS, sizeof(int))); cudaCheckError(cudaMalloc((void **)&netFlowInT, sizeof(int))); // initialize preflow int flowOutS = 0; int flowInT = 0; tempHeights[s] = g->n; #pragma omp parallel for reduction(+:flowOutS) for (int v = 0; v < g->n; v++) { int cap = g->capacities[IDX(s, v, g->n)]; if (cap > 0 && (s != v)) { finalFlow[IDX(s, v, g->n)] = 0; finalFlow[IDX(v, s, g->n)] += cap; flowOutS += cap; tempExcessFlows[v] = cap; if (v == t) { flowInT += cap; } } } cudaCheckError(cudaMemcpy(residualFlow, finalFlow, sizeof(int) * (g->n * g->n), cudaMemcpyHostToDevice)); cudaCheckError(cudaMemcpy(height, tempHeights, sizeof(int) * g->n, cudaMemcpyHostToDevice)); cudaCheckError(cudaMemcpy(excessFlow, tempExcessFlows, sizeof(int) * g->n, cudaMemcpyHostToDevice)); cudaCheckError(cudaMemcpy(netFlowInT, &flowInT, sizeof(int), cudaMemcpyHostToDevice)); cudaCheckError(cudaMemcpy(netFlowOutS, &flowOutS, sizeof(int), cudaMemcpyHostToDevice)); int numBlocks = UPDIV((g->n - 2), threadsPerBlock.x); pushRelabelLockFreeKernel<<<numBlocks, threadsPerBlock>>>(residualFlow, height, excessFlow, netFlowOutS, netFlowInT, s, t, g->n); free(tempHeights); free(tempExcessFlows); cudaCheckError(cudaThreadSynchronize()); cudaCheckError(cudaMemcpy(finalFlow, residualFlow, sizeof(int) * (g->n * g->n), cudaMemcpyDeviceToHost)); cudaCheckError(cudaMemcpy(&flowInT, netFlowInT, sizeof(int), cudaMemcpyDeviceToHost)); // now update flow to represent actual flow #pragma omp parallel for schedule(static) for (int i = 0; i < (g->n * g-> n); i++) { finalFlow[i] = g->capacities[i] - finalFlow[i]; } Flow *result = (Flow *)malloc(sizeof(Flow)); result->maxFlow = flowInT; result->finalEdgeFlows = finalFlow; cudaCheckError(cudaFree(residualFlow)); cudaCheckError(cudaFree(height)); cudaCheckError(cudaFree(excessFlow)); cudaCheckError(cudaFree(netFlowOutS)); cudaCheckError(cudaFree(netFlowInT)); return result; }
73604210fe64055116c500c628ca0f26fc293e70.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <driver_functions.h>
73604210fe64055116c500c628ca0f26fc293e70.cu
#include <cuda.h> #include <cuda_runtime.h> #include <driver_functions.h>