hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
9de93b78989e7176d4e62e551544066d589711c2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<cuda_runtime.h> #include<device_launch_parameters.h> __global__ void add(int *a, int *b, int m){ int id=blockIdx.x*blockDim.x+threadIdx.x; // c[id]=a[id]+b[id]; // printf("id: %d m: %d ", id, m); for (int i = 0; i < m; ++i){ b[id*m + i] = powf(a[id*m + i], id+1); // printf("index %d element %d\n", id*m + i, a[id*m + i]); } } int main() { int a[100], b[100],n, m; printf("Enter n: "); scanf("%d",&n); printf("Enter m: "); scanf("%d",&m); printf("Enter Matrix:\n"); for(int i=0;i<n*m;i++) scanf("%d",&a[i]); int *d_a,*d_b; int size=sizeof(int)*n*m; hipMalloc((void**)&d_a,size); hipMalloc((void**)&d_b,size); hipMemcpy(d_a,&a,size,hipMemcpyHostToDevice); hipLaunchKernelGGL(( add), dim3(n),dim3(1), 0, 0, d_a, d_b, m); hipMemcpy(&b,d_b,size,hipMemcpyDeviceToHost); for(int i=0;i<n*m;i++){ if (i % m == 0) { printf("\n"); } printf("%d ",b[i]); } printf("\n"); hipFree(d_a); hipFree(d_b); }
9de93b78989e7176d4e62e551544066d589711c2.cu
#include<stdio.h> #include<cuda_runtime.h> #include<device_launch_parameters.h> __global__ void add(int *a, int *b, int m){ int id=blockIdx.x*blockDim.x+threadIdx.x; // c[id]=a[id]+b[id]; // printf("id: %d m: %d ", id, m); for (int i = 0; i < m; ++i){ b[id*m + i] = powf(a[id*m + i], id+1); // printf("index %d element %d\n", id*m + i, a[id*m + i]); } } int main() { int a[100], b[100],n, m; printf("Enter n: "); scanf("%d",&n); printf("Enter m: "); scanf("%d",&m); printf("Enter Matrix:\n"); for(int i=0;i<n*m;i++) scanf("%d",&a[i]); int *d_a,*d_b; int size=sizeof(int)*n*m; cudaMalloc((void**)&d_a,size); cudaMalloc((void**)&d_b,size); cudaMemcpy(d_a,&a,size,cudaMemcpyHostToDevice); add<<<n,1>>>(d_a, d_b, m); cudaMemcpy(&b,d_b,size,cudaMemcpyDeviceToHost); for(int i=0;i<n*m;i++){ if (i % m == 0) { printf("\n"); } printf("%d ",b[i]); } printf("\n"); cudaFree(d_a); cudaFree(d_b); }
a110f38674ae70c72f77221fb3c5faa8f3afac61.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/kernel/arg_where_kernel_util.h" #include "oneflow/core/common/nd_index_offset_helper.h" #include "oneflow/core/common/fixed_vector.h" #include <hipcub/hipcub.hpp> namespace oneflow { namespace { constexpr int kFlatIndexToNdIndexProposedLaunchBlocks = 128; template<typename T, size_t NDims> struct StrideIterator { typedef StrideIterator self_type; typedef std::ptrdiff_t difference_type; typedef T value_type; typedef T* pointer; typedef T& reference; typedef std::random_access_iterator_tag iterator_category; explicit StrideIterator(T* ptr, size_t max_iters) : ptr_(ptr), max_iters_(max_iters) {} OF_DEVICE_FUNC reference operator[](int i) { assert(0 <= i && i < max_iters_); return *(ptr_ + (i * NDims)); } private: T* ptr_; size_t max_iters_; }; template<typename T, size_t NDims> __global__ void CudaOffsetToNdIndexInplace(NdIndexOffsetHelper<T, NDims> index_converter, const T* num_indices_ptr, T* indices_ptr) { CUDA_1D_KERNEL_LOOP_T(T, i, *num_indices_ptr) { T* cur_indices_ptr = indices_ptr + i * NDims; index_converter.OffsetToNdIndex(*cur_indices_ptr, cur_indices_ptr); } } template<typename T> struct IsTrue { OF_DEVICE_FUNC bool operator()(const T& val) const { return static_cast<bool>(val); } }; template<typename T, typename I, typename Iter> hipError_t SelectTrue(hipStream_t stream, int num_items, void* tmp, size_t& tmp_bytes, const T* flags, Iter out_iter, I* num_selected) { IsTrue<T> is_true; hipcub::TransformInputIterator<bool, IsTrue<T>, const T*> flag_iter(flags, is_true); hipcub::CountingInputIterator<I> offset_counter(0); return hipcub::DeviceSelect::Flagged(tmp, tmp_bytes, offset_counter, flag_iter, out_iter, num_selected, num_items, stream, false); } } // namespace template<typename T, typename I, size_t NDims> struct ArgWhereKernelUtil<DeviceType::kGPU, T, I, NDims> { static void ArgWhere(DeviceCtx* ctx, const ShapeView& in_shape, const T* in_ptr, void* tmp, size_t tmp_max_bytes, I* out_ptr, I* out_size_ptr) { if (in_shape.elem_cnt() == 0) { // deal with empty blob KernelUtil<DeviceType::kGPU, I>::Set(ctx, static_cast<I>(0), out_size_ptr); return; } CHECK_NOTNULL(ctx); CHECK_LE(in_shape.elem_cnt(), std::numeric_limits<I>::max()); size_t tmp_bytes = GetArgWhereWorkspaceSizeInBytes(ctx, in_shape.elem_cnt()); CHECK_LE(tmp_bytes, tmp_max_bytes); if (NDims == 1) { OF_CUDA_CHECK((SelectTrue<T, I, I*>(ctx->cuda_stream(), in_shape.elem_cnt(), tmp, tmp_bytes, in_ptr, out_ptr, out_size_ptr))); } else { StrideIterator<I, NDims> out_iter(out_ptr, in_shape.elem_cnt()); OF_CUDA_CHECK( (SelectTrue<T, I, StrideIterator<I, NDims>>(ctx->cuda_stream(), in_shape.elem_cnt(), tmp, tmp_bytes, in_ptr, out_iter, out_size_ptr))); fixed_vector<I, NDims> dims(NDims); std::transform(in_shape.ptr(), in_shape.ptr() + in_shape.NumAxes(), dims.begin(), [](int64_t dim) { return static_cast<I>(dim); }); NdIndexOffsetHelper<I, NDims> index_converter(dims.data(), dims.size()); hipLaunchKernelGGL(( CudaOffsetToNdIndexInplace<I, NDims>) , dim3(kFlatIndexToNdIndexProposedLaunchBlocks), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(), index_converter, out_size_ptr, out_ptr); } } static size_t GetArgWhereWorkspaceSizeInBytes(DeviceCtx* ctx, int64_t n) { hipStream_t stream = ctx ? ctx->cuda_stream() : 0; size_t tmp_bytes = 0; if (NDims == 1) { OF_CUDA_CHECK( (SelectTrue<T, I, I*>(stream, n, nullptr, tmp_bytes, nullptr, nullptr, nullptr))); } else { StrideIterator<I, NDims> out_iter(nullptr, n); OF_CUDA_CHECK((SelectTrue<T, I, StrideIterator<I, NDims>>(stream, n, nullptr, tmp_bytes, nullptr, out_iter, nullptr))); } return tmp_bytes; } }; OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_ARG_WHERE_KERNEL_UTIL, (DeviceType::kGPU), ARITHMETIC_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ) } // namespace oneflow
a110f38674ae70c72f77221fb3c5faa8f3afac61.cu
/* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/kernel/arg_where_kernel_util.h" #include "oneflow/core/common/nd_index_offset_helper.h" #include "oneflow/core/common/fixed_vector.h" #include <cub/cub.cuh> namespace oneflow { namespace { constexpr int kFlatIndexToNdIndexProposedLaunchBlocks = 128; template<typename T, size_t NDims> struct StrideIterator { typedef StrideIterator self_type; typedef std::ptrdiff_t difference_type; typedef T value_type; typedef T* pointer; typedef T& reference; typedef std::random_access_iterator_tag iterator_category; explicit StrideIterator(T* ptr, size_t max_iters) : ptr_(ptr), max_iters_(max_iters) {} OF_DEVICE_FUNC reference operator[](int i) { assert(0 <= i && i < max_iters_); return *(ptr_ + (i * NDims)); } private: T* ptr_; size_t max_iters_; }; template<typename T, size_t NDims> __global__ void CudaOffsetToNdIndexInplace(NdIndexOffsetHelper<T, NDims> index_converter, const T* num_indices_ptr, T* indices_ptr) { CUDA_1D_KERNEL_LOOP_T(T, i, *num_indices_ptr) { T* cur_indices_ptr = indices_ptr + i * NDims; index_converter.OffsetToNdIndex(*cur_indices_ptr, cur_indices_ptr); } } template<typename T> struct IsTrue { OF_DEVICE_FUNC bool operator()(const T& val) const { return static_cast<bool>(val); } }; template<typename T, typename I, typename Iter> cudaError_t SelectTrue(cudaStream_t stream, int num_items, void* tmp, size_t& tmp_bytes, const T* flags, Iter out_iter, I* num_selected) { IsTrue<T> is_true; cub::TransformInputIterator<bool, IsTrue<T>, const T*> flag_iter(flags, is_true); cub::CountingInputIterator<I> offset_counter(0); return cub::DeviceSelect::Flagged(tmp, tmp_bytes, offset_counter, flag_iter, out_iter, num_selected, num_items, stream, false); } } // namespace template<typename T, typename I, size_t NDims> struct ArgWhereKernelUtil<DeviceType::kGPU, T, I, NDims> { static void ArgWhere(DeviceCtx* ctx, const ShapeView& in_shape, const T* in_ptr, void* tmp, size_t tmp_max_bytes, I* out_ptr, I* out_size_ptr) { if (in_shape.elem_cnt() == 0) { // deal with empty blob KernelUtil<DeviceType::kGPU, I>::Set(ctx, static_cast<I>(0), out_size_ptr); return; } CHECK_NOTNULL(ctx); CHECK_LE(in_shape.elem_cnt(), std::numeric_limits<I>::max()); size_t tmp_bytes = GetArgWhereWorkspaceSizeInBytes(ctx, in_shape.elem_cnt()); CHECK_LE(tmp_bytes, tmp_max_bytes); if (NDims == 1) { OF_CUDA_CHECK((SelectTrue<T, I, I*>(ctx->cuda_stream(), in_shape.elem_cnt(), tmp, tmp_bytes, in_ptr, out_ptr, out_size_ptr))); } else { StrideIterator<I, NDims> out_iter(out_ptr, in_shape.elem_cnt()); OF_CUDA_CHECK( (SelectTrue<T, I, StrideIterator<I, NDims>>(ctx->cuda_stream(), in_shape.elem_cnt(), tmp, tmp_bytes, in_ptr, out_iter, out_size_ptr))); fixed_vector<I, NDims> dims(NDims); std::transform(in_shape.ptr(), in_shape.ptr() + in_shape.NumAxes(), dims.begin(), [](int64_t dim) { return static_cast<I>(dim); }); NdIndexOffsetHelper<I, NDims> index_converter(dims.data(), dims.size()); CudaOffsetToNdIndexInplace<I, NDims> <<<kFlatIndexToNdIndexProposedLaunchBlocks, kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(index_converter, out_size_ptr, out_ptr); } } static size_t GetArgWhereWorkspaceSizeInBytes(DeviceCtx* ctx, int64_t n) { cudaStream_t stream = ctx ? ctx->cuda_stream() : 0; size_t tmp_bytes = 0; if (NDims == 1) { OF_CUDA_CHECK( (SelectTrue<T, I, I*>(stream, n, nullptr, tmp_bytes, nullptr, nullptr, nullptr))); } else { StrideIterator<I, NDims> out_iter(nullptr, n); OF_CUDA_CHECK((SelectTrue<T, I, StrideIterator<I, NDims>>(stream, n, nullptr, tmp_bytes, nullptr, out_iter, nullptr))); } return tmp_bytes; } }; OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_ARG_WHERE_KERNEL_UTIL, (DeviceType::kGPU), ARITHMETIC_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ) } // namespace oneflow
d4d5d72aa16a7c2fdd661d513f29ecdbdfe3d5de.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <signal.h> // There are ways to get this data but I'm too lazy #define CUDA_CORES 384 //#define N 7 #define N 606 #define ITERATIONS 864000 // http://www.wolframalpha.com/input/?i=gravitational+constant+in+km%5E3%2F%28Yg+*+s%5E2%29 #define GRAVITATIONAL_CONSTANT 66.7 // km^3 / (Yg * s^2) //#define GRAVITATIONAL_CONSTANT 240300.0 // km^3 / (Yg * min^2) #define TIME_STEP 0.1 #define SAVE_STEP 50 volatile sig_atomic_t kill_flag = 0; // if the program gets killed, flag for the main loop void set_kill_flag(int sig){ // can be called asynchronously kill_flag = 1; // set flag } void random_ints(int* a, int num) { int i; for(i = 0; i < num; ++i) { a[i] = rand(); } } void random_doubles(double* a, int num, double multiplier) { int i; for(i = 0; i < num; i++) { a[i] = (double)rand() / (double)RAND_MAX * multiplier; } } void random_double4s(double4* a, int num, double m0, double m1, double m2, double m3) { int i; for(i = 0; i < num; i++) { a[i].x = ((double)rand() / (double)RAND_MAX - 0.5) * m0; a[i].y = ((double)rand() / (double)RAND_MAX - 0.5) * m1; a[i].z = ((double)rand() / (double)RAND_MAX - 0.5) * m2; a[i].w = ((double)rand() / (double)RAND_MAX) * m3; } } void load_initial_data(double4 *in_pos, double4 *in_vel, int num_particles) { FILE *ifp; char *mode = "r"; ifp = fopen("input.csv", mode); double w, x, y, z, xv, yv, zv; if(ifp == NULL) fprintf(stderr, "OH NO! No file!\n"); for(int i = 0; i < num_particles; i++) { fscanf(ifp, "%lf, %lf, %lf, %lf, %lf, %lf, %lf", &w, &x, &y, &z, &xv, &yv, &zv); in_pos[i].w = w; in_pos[i].x = x; in_pos[i].y = y; in_pos[i].z = z; in_vel[i].w = 0.0; in_vel[i].x = xv; in_vel[i].y = yv; in_vel[i].z = zv; printf("%g, %g, %g, %g, %g, %g, %g\n", w, x, y,z, xv, yv, zv); } fclose(ifp); } void save_continue_csv(const char *filename, double4 *poss, double4 *vels) { FILE *next_input = fopen(filename, "w"); for(int j = 0; j < N; j++) fprintf(next_input, "%g,%g,%g,%g,%g,%g,%g\n", poss[j].w, poss[j].x, poss[j].y, poss[j].z, vels[j].x, vels[j].y, vels[j].z); fclose(next_input); printf("Saved."); } __device__ double3 interaction(double4 body_a, double4 body_b, double3 accel) { double3 r; r.x = body_b.x - body_a.x; r.y = body_b.y - body_a.y; r.z = body_b.z - body_a.z; double dist_sq = r.x * r.x + r.y * r.y + r.z * r.z + 4e6; //dist_sq += 4e6; // softening factor double inv_dist = rsqrt(dist_sq); double inv_dist_cube = inv_dist * inv_dist * inv_dist; double accel_total = GRAVITATIONAL_CONSTANT * body_b.w * inv_dist_cube; accel.x += r.x * accel_total; accel.y += r.y * accel_total; accel.z += r.z * accel_total; return accel; } __device__ double3 tile_calculation(double4 body_a, double3 accel) { int i; extern __shared__ double4 shared_positions[]; //__shared__ double4 shared_positions[N]; //double4 *shared_positions = SharedMemory(); #pragma unroll 128 for(i = 0; i < blockDim.x; i++) { accel = interaction(body_a, shared_positions[i], accel); } return accel; } __device__ double4 calculate_accel(double4 *positions, int num_tiles, int num_particles) { extern __shared__ double4 shared_positions[]; double4 cur_body; // current block's body int tile; double3 accel = {0.0, 0.0, 0.0}; int gtid = blockIdx.x * blockDim.x + threadIdx.x; cur_body = positions[gtid]; for(tile = 0; tile < num_tiles; tile++) { int idx = tile * blockDim.x + threadIdx.x; shared_positions[threadIdx.x] = positions[idx]; __syncthreads(); #pragma unroll 128 for(int counter = 0; counter < blockDim.x; counter++) { accel = interaction(cur_body, shared_positions[counter], accel); } __syncthreads(); } double4 accel4 = {accel.x, accel.y, accel.z, 0.0}; return accel4; } __global__ void integrate(double4 *positions, double4 *vels, int num_tiles, int num_particles) { int index = blockIdx.x * blockDim.x + threadIdx.x; if(index >= num_particles) { return; } double4 position = positions[index]; double4 accel = calculate_accel(positions, num_tiles, num_particles); double4 velocity = vels[index]; velocity.x += accel.x * TIME_STEP; velocity.y += accel.y * TIME_STEP; velocity.z += accel.z * TIME_STEP; position.x += velocity.x * TIME_STEP; position.y += velocity.y * TIME_STEP; position.z += velocity.z * TIME_STEP; __syncthreads(); positions[index] = position; vels[index] = velocity; } int main(int argc, char *argv[]) { signal(SIGINT, set_kill_flag); int num_particles = N; int block_size = num_particles; int num_blocks = (num_particles + block_size-1) / block_size; int num_tiles = (num_particles + block_size - 1) / block_size; int shared_mem_size = block_size * 4 * sizeof(double); // 4 floats for pos double4 *positions, *vels; double4 *dev_positions, *dev_vels; int size = N * sizeof(double4); hipMalloc((void**)&dev_positions, size); hipMalloc((void**)&dev_vels, size); positions = (double4*)malloc(size); vels = (double4*)malloc(size); //int seed = time(NULL); //srand(seed); //random_double4s(positions, N, 6e8, 6e8, 6e3, 11.6 * 2.0); //random_double4s(vels, N, 0.5e2, 0.5e2, 0.1, 0.0); load_initial_data(positions, vels, N); hipMemcpy(dev_positions, positions, size, hipMemcpyHostToDevice); hipMemcpy(dev_vels, vels, size, hipMemcpyHostToDevice); FILE *fp = fopen("output.csv", "w"); for(int i = 0; i < ITERATIONS; i++) { hipLaunchKernelGGL(( integrate), dim3(num_blocks), dim3(block_size), shared_mem_size, 0, dev_positions, dev_vels, num_tiles, num_particles); hipMemcpy(positions, dev_positions, size, hipMemcpyDeviceToHost); hipMemcpy(vels, dev_vels, size, hipMemcpyDeviceToHost); if(i % SAVE_STEP == 0) { printf("%.2f\n", (double)i * 100.0 / (double)ITERATIONS); for(int j = 0; j < N; j++) fprintf(fp, "%g,%g,%g,%g,%g,%g\n", positions[j].x, positions[j].y, positions[j].z, vels[j].x, vels[j].y, vels[j].z); } if(kill_flag) { break; } } fclose(fp); if(kill_flag) { save_continue_csv("recovered-input.csv", positions, vels); } else { save_continue_csv("next-input.csv", positions, vels); } hipFree(dev_positions); hipFree(dev_vels); free(positions); free(vels); return 0; }
d4d5d72aa16a7c2fdd661d513f29ecdbdfe3d5de.cu
#include <stdio.h> #include <signal.h> // There are ways to get this data but I'm too lazy #define CUDA_CORES 384 //#define N 7 #define N 606 #define ITERATIONS 864000 // http://www.wolframalpha.com/input/?i=gravitational+constant+in+km%5E3%2F%28Yg+*+s%5E2%29 #define GRAVITATIONAL_CONSTANT 66.7 // km^3 / (Yg * s^2) //#define GRAVITATIONAL_CONSTANT 240300.0 // km^3 / (Yg * min^2) #define TIME_STEP 0.1 #define SAVE_STEP 50 volatile sig_atomic_t kill_flag = 0; // if the program gets killed, flag for the main loop void set_kill_flag(int sig){ // can be called asynchronously kill_flag = 1; // set flag } void random_ints(int* a, int num) { int i; for(i = 0; i < num; ++i) { a[i] = rand(); } } void random_doubles(double* a, int num, double multiplier) { int i; for(i = 0; i < num; i++) { a[i] = (double)rand() / (double)RAND_MAX * multiplier; } } void random_double4s(double4* a, int num, double m0, double m1, double m2, double m3) { int i; for(i = 0; i < num; i++) { a[i].x = ((double)rand() / (double)RAND_MAX - 0.5) * m0; a[i].y = ((double)rand() / (double)RAND_MAX - 0.5) * m1; a[i].z = ((double)rand() / (double)RAND_MAX - 0.5) * m2; a[i].w = ((double)rand() / (double)RAND_MAX) * m3; } } void load_initial_data(double4 *in_pos, double4 *in_vel, int num_particles) { FILE *ifp; char *mode = "r"; ifp = fopen("input.csv", mode); double w, x, y, z, xv, yv, zv; if(ifp == NULL) fprintf(stderr, "OH NO! No file!\n"); for(int i = 0; i < num_particles; i++) { fscanf(ifp, "%lf, %lf, %lf, %lf, %lf, %lf, %lf", &w, &x, &y, &z, &xv, &yv, &zv); in_pos[i].w = w; in_pos[i].x = x; in_pos[i].y = y; in_pos[i].z = z; in_vel[i].w = 0.0; in_vel[i].x = xv; in_vel[i].y = yv; in_vel[i].z = zv; printf("%g, %g, %g, %g, %g, %g, %g\n", w, x, y,z, xv, yv, zv); } fclose(ifp); } void save_continue_csv(const char *filename, double4 *poss, double4 *vels) { FILE *next_input = fopen(filename, "w"); for(int j = 0; j < N; j++) fprintf(next_input, "%g,%g,%g,%g,%g,%g,%g\n", poss[j].w, poss[j].x, poss[j].y, poss[j].z, vels[j].x, vels[j].y, vels[j].z); fclose(next_input); printf("Saved."); } __device__ double3 interaction(double4 body_a, double4 body_b, double3 accel) { double3 r; r.x = body_b.x - body_a.x; r.y = body_b.y - body_a.y; r.z = body_b.z - body_a.z; double dist_sq = r.x * r.x + r.y * r.y + r.z * r.z + 4e6; //dist_sq += 4e6; // softening factor double inv_dist = rsqrt(dist_sq); double inv_dist_cube = inv_dist * inv_dist * inv_dist; double accel_total = GRAVITATIONAL_CONSTANT * body_b.w * inv_dist_cube; accel.x += r.x * accel_total; accel.y += r.y * accel_total; accel.z += r.z * accel_total; return accel; } __device__ double3 tile_calculation(double4 body_a, double3 accel) { int i; extern __shared__ double4 shared_positions[]; //__shared__ double4 shared_positions[N]; //double4 *shared_positions = SharedMemory(); #pragma unroll 128 for(i = 0; i < blockDim.x; i++) { accel = interaction(body_a, shared_positions[i], accel); } return accel; } __device__ double4 calculate_accel(double4 *positions, int num_tiles, int num_particles) { extern __shared__ double4 shared_positions[]; double4 cur_body; // current block's body int tile; double3 accel = {0.0, 0.0, 0.0}; int gtid = blockIdx.x * blockDim.x + threadIdx.x; cur_body = positions[gtid]; for(tile = 0; tile < num_tiles; tile++) { int idx = tile * blockDim.x + threadIdx.x; shared_positions[threadIdx.x] = positions[idx]; __syncthreads(); #pragma unroll 128 for(int counter = 0; counter < blockDim.x; counter++) { accel = interaction(cur_body, shared_positions[counter], accel); } __syncthreads(); } double4 accel4 = {accel.x, accel.y, accel.z, 0.0}; return accel4; } __global__ void integrate(double4 *positions, double4 *vels, int num_tiles, int num_particles) { int index = blockIdx.x * blockDim.x + threadIdx.x; if(index >= num_particles) { return; } double4 position = positions[index]; double4 accel = calculate_accel(positions, num_tiles, num_particles); double4 velocity = vels[index]; velocity.x += accel.x * TIME_STEP; velocity.y += accel.y * TIME_STEP; velocity.z += accel.z * TIME_STEP; position.x += velocity.x * TIME_STEP; position.y += velocity.y * TIME_STEP; position.z += velocity.z * TIME_STEP; __syncthreads(); positions[index] = position; vels[index] = velocity; } int main(int argc, char *argv[]) { signal(SIGINT, set_kill_flag); int num_particles = N; int block_size = num_particles; int num_blocks = (num_particles + block_size-1) / block_size; int num_tiles = (num_particles + block_size - 1) / block_size; int shared_mem_size = block_size * 4 * sizeof(double); // 4 floats for pos double4 *positions, *vels; double4 *dev_positions, *dev_vels; int size = N * sizeof(double4); cudaMalloc((void**)&dev_positions, size); cudaMalloc((void**)&dev_vels, size); positions = (double4*)malloc(size); vels = (double4*)malloc(size); //int seed = time(NULL); //srand(seed); //random_double4s(positions, N, 6e8, 6e8, 6e3, 11.6 * 2.0); //random_double4s(vels, N, 0.5e2, 0.5e2, 0.1, 0.0); load_initial_data(positions, vels, N); cudaMemcpy(dev_positions, positions, size, cudaMemcpyHostToDevice); cudaMemcpy(dev_vels, vels, size, cudaMemcpyHostToDevice); FILE *fp = fopen("output.csv", "w"); for(int i = 0; i < ITERATIONS; i++) { integrate<<<num_blocks, block_size, shared_mem_size>>>(dev_positions, dev_vels, num_tiles, num_particles); cudaMemcpy(positions, dev_positions, size, cudaMemcpyDeviceToHost); cudaMemcpy(vels, dev_vels, size, cudaMemcpyDeviceToHost); if(i % SAVE_STEP == 0) { printf("%.2f\n", (double)i * 100.0 / (double)ITERATIONS); for(int j = 0; j < N; j++) fprintf(fp, "%g,%g,%g,%g,%g,%g\n", positions[j].x, positions[j].y, positions[j].z, vels[j].x, vels[j].y, vels[j].z); } if(kill_flag) { break; } } fclose(fp); if(kill_flag) { save_continue_csv("recovered-input.csv", positions, vels); } else { save_continue_csv("next-input.csv", positions, vels); } cudaFree(dev_positions); cudaFree(dev_vels); free(positions); free(vels); return 0; }
90f70684e5f7cf8ad683631063b5eaa4df17a808.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> void printImg(int * img,int width,int height); void copyImg(int *img ,int *h_img,int width ,int height); __global__ void mandelKernel(float lowerX,float lowerY,int* d_img,int resX,int resY,float stepX,float stepY,int maxIterations,size_t pitchSize) { // To avoid error caused by the floating number, use the following pseudo code int thisX = blockIdx.x * blockDim.x + threadIdx.x; int thisY = blockIdx.y * blockDim.y + threadIdx.y; float x = lowerX + thisX * stepX; float y = lowerY + thisY * stepY; float c_re = x,c_im = y; float z_re = x,z_im= y; int i,count=0; for (i = 0; i < maxIterations; ++i) { if (z_re * z_re + z_im * z_im > 4.f) break; //count ++; float new_re = z_re * z_re - z_im * z_im; float new_im = 2.f * z_re * z_im; z_re = c_re + new_re; z_im = c_im + new_im; } d_img[thisY*pitchSize+thisX] = i; //d_img[thisY*pitchSize+thisX] = 255; } // Host front-end function that allocates the memory and launches the GPU kernel void hostFE (float upperX, float upperY, float lowerX, float lowerY, int* img, int resX, int resY, int maxIterations) { ///resX is img width , resY is img height float stepX = (upperX - lowerX) / resX; float stepY = (upperY - lowerY) / resY; int imageSize = resX * resY * sizeof(int); // host mem int * h_img ; hipHostMalloc((void**)&h_img,imageSize,hipHostMallocMapped); // device mem int *d_img; size_t pitchSize; hipMallocPitch((void**)&d_img,&pitchSize,resX*sizeof(int),resY); //launch kernel int BLOCK_SIZE_X = 16; int BLOCK_SIZE_Y = 16; dim3 blockSize(BLOCK_SIZE_X, BLOCK_SIZE_Y); dim3 numBlock(resX / BLOCK_SIZE_X, resY / BLOCK_SIZE_Y); //hipMemcpy(d_img,img,resX*resY*sizeof(int),hipMemcpyHostToDevice); //note that pitchSize should be diveded by sizeof(int) because pitchSize hipLaunchKernelGGL(( mandelKernel), dim3(numBlock), dim3(blockSize), 0, 0, lowerX,lowerY,d_img,resX,resY,stepX,stepY,maxIterations,pitchSize/sizeof(int)); //hipMemcpy(h_img,d_img,resX*resY*sizeof(int),hipMemcpyDeviceToHost); hipMemcpy2D(h_img,resX*sizeof(int),d_img,pitchSize,resX*sizeof(int),resY, hipMemcpyDeviceToHost); copyImg(img,h_img,resX,resY); //printf("width %d height %d \n",resX,resY); //printImg(img,resX,1); hipFree(d_img); return ; } void copyImg(int *img ,int *h_img,int width ,int height){ for(int j=0;j<height;j++){ for(int i =0 ;i<width;i++){ img[j*width + i ] = h_img[j*width+i]; } } } void printImg(int * img,int width,int height){ for(int j=0;j<height;j++){ for(int i=0;i<width;i++){ printf("%d ",img[j*height+i]); } printf("\n"); } }
90f70684e5f7cf8ad683631063b5eaa4df17a808.cu
#include <cuda.h> #include <stdio.h> #include <stdlib.h> void printImg(int * img,int width,int height); void copyImg(int *img ,int *h_img,int width ,int height); __global__ void mandelKernel(float lowerX,float lowerY,int* d_img,int resX,int resY,float stepX,float stepY,int maxIterations,size_t pitchSize) { // To avoid error caused by the floating number, use the following pseudo code int thisX = blockIdx.x * blockDim.x + threadIdx.x; int thisY = blockIdx.y * blockDim.y + threadIdx.y; float x = lowerX + thisX * stepX; float y = lowerY + thisY * stepY; float c_re = x,c_im = y; float z_re = x,z_im= y; int i,count=0; for (i = 0; i < maxIterations; ++i) { if (z_re * z_re + z_im * z_im > 4.f) break; //count ++; float new_re = z_re * z_re - z_im * z_im; float new_im = 2.f * z_re * z_im; z_re = c_re + new_re; z_im = c_im + new_im; } d_img[thisY*pitchSize+thisX] = i; //d_img[thisY*pitchSize+thisX] = 255; } // Host front-end function that allocates the memory and launches the GPU kernel void hostFE (float upperX, float upperY, float lowerX, float lowerY, int* img, int resX, int resY, int maxIterations) { ///resX is img width , resY is img height float stepX = (upperX - lowerX) / resX; float stepY = (upperY - lowerY) / resY; int imageSize = resX * resY * sizeof(int); // host mem int * h_img ; cudaHostAlloc((void**)&h_img,imageSize,cudaHostAllocMapped); // device mem int *d_img; size_t pitchSize; cudaMallocPitch((void**)&d_img,&pitchSize,resX*sizeof(int),resY); //launch kernel int BLOCK_SIZE_X = 16; int BLOCK_SIZE_Y = 16; dim3 blockSize(BLOCK_SIZE_X, BLOCK_SIZE_Y); dim3 numBlock(resX / BLOCK_SIZE_X, resY / BLOCK_SIZE_Y); //cudaMemcpy(d_img,img,resX*resY*sizeof(int),cudaMemcpyHostToDevice); //note that pitchSize should be diveded by sizeof(int) because pitchSize mandelKernel<<<numBlock, blockSize>>>(lowerX,lowerY,d_img,resX,resY,stepX,stepY,maxIterations,pitchSize/sizeof(int)); //cudaMemcpy(h_img,d_img,resX*resY*sizeof(int),cudaMemcpyDeviceToHost); cudaMemcpy2D(h_img,resX*sizeof(int),d_img,pitchSize,resX*sizeof(int),resY, cudaMemcpyDeviceToHost); copyImg(img,h_img,resX,resY); //printf("width %d height %d \n",resX,resY); //printImg(img,resX,1); cudaFree(d_img); return ; } void copyImg(int *img ,int *h_img,int width ,int height){ for(int j=0;j<height;j++){ for(int i =0 ;i<width;i++){ img[j*width + i ] = h_img[j*width+i]; } } } void printImg(int * img,int width,int height){ for(int j=0;j<height;j++){ for(int i=0;i<width;i++){ printf("%d ",img[j*height+i]); } printf("\n"); } }
c45faff21f5d938518833336a7e982be2009ca78.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /********************************************************************* 11 12 Copyright (C) 2017 by Sidney Ribeiro Junior 13 14 This program is free software; you can redistribute it and/or modify 15 it under the terms of the GNU General Public License as published by 16 the Free Software Foundation; either version 2 of the License, or 17 (at your option) any later version. 18 19 This program is distributed in the hope that it will be useful, 20 but WITHOUT ANY WARRANTY; without even the implied warranty of 21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 GNU General Public License for more details. 23 24 You should have received a copy of the GNU General Public License 25 along with this program; if not, write to the Free Software 26 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 27 28 ********************************************************************/ /* * * knn.cu */ #define CUDA_API_PER_THREAD_DEFAULT_STREAM #include <stdio.h> #include <stdlib.h> #include <algorithm> #include <iostream> #include <queue> #include <vector> #include <set> #include <functional> #include "simjoin.cuh" #include "structs.cuh" #include "utils.cuh" #include "inverted_index.cuh" __host__ int findSimilars(InvertedIndex index, float threshold, struct DeviceVariables *dev_vars, Pair *similar_pairs, int probes_start, int probe_block_size, int probes_offset, int indexed_start, int indexed_block_size, int block_size, bool aggregate, DeviceTiming& deviceTiming) { dim3 grid, threads; get_grid_config(grid, threads); int *intersection = dev_vars->d_intersection; int *starts = dev_vars->d_starts; int *sizes = dev_vars->d_sizes; Entry *probes = dev_vars->d_entries;//indexed_block == probe_block? dev_vars->d_indexed: dev_vars->d_probes; Pair *pairs = dev_vars->d_pairs; int intersection_size = block_size*block_size; // TODO verificar tamanho quando blocos so menores q os blocos normais int *totalSimilars = (int *)malloc(sizeof(int)); // the last position of intersection is used to store the number of similar pairs DeviceTiming::EventPair* clearIntersection = deviceTiming.add("Clear intersection space", 0); hipMemset(intersection, 0, sizeof(int) * (intersection_size + 1)); deviceTiming.finish(clearIntersection); DeviceTiming::EventPair* calcIntersection = deviceTiming.add("Calculate intersection", 0); hipLaunchKernelGGL(( calculateIntersection), dim3(grid), dim3(threads), 0, 0, index, intersection, probes, starts, sizes, probes_start, probe_block_size, probes_offset, indexed_start, threshold, block_size); deviceTiming.finish(calcIntersection); // calculate Jaccard Similarity and store similar pairs in array pairs DeviceTiming::EventPair* calcSimilarity = deviceTiming.add("Calculate similarity", 0); hipLaunchKernelGGL(( calculateJaccardSimilarity), dim3(grid), dim3(threads), 0, 0, intersection, pairs, intersection + intersection_size, sizes, intersection_size, probes_start, indexed_start, probe_block_size, indexed_block_size, threshold, block_size); deviceTiming.finish(calcSimilarity); DeviceTiming::EventPair* transferPairs = deviceTiming.add("Transfer pairs to host", 0); gpuAssert(hipMemcpy(totalSimilars, intersection + intersection_size, sizeof(int), hipMemcpyDeviceToHost)); if (!aggregate) gpuAssert(hipMemcpy(similar_pairs, pairs, sizeof(Pair)*totalSimilars[0], hipMemcpyDeviceToHost)); deviceTiming.finish(transferPairs); return totalSimilars[0]; } __global__ void calculateIntersection(InvertedIndex index, int *intersection, Entry *probes, int *set_starts, int *set_sizes, int probes_start, int probe_block_size, int probes_offset, int indexed_start, float threshold, int block_size) { for (int i = blockIdx.x; i < probe_block_size; i += gridDim.x) { // percorre os probe sets int probe_id = i + probes_start; // setid_offset int probe_begin = set_starts[probe_id]; int probe_size = set_sizes[probe_id]; int maxsize = ceil(((float) probe_size)/threshold) + 1; for (int j = 0; j < probe_size; j++) { // percorre os termos de cada set int probe_entry = probes[probe_begin + j].term_id; int list_size = index.d_count[probe_entry]; int list_end = index.d_index[probe_entry]; int list_start = list_end - list_size; for (int k = list_start + threadIdx.x; k < list_end; k += blockDim.x) { // percorre a lista invertida int idx_entry = index.d_inverted_index[k].set_id; if (idx_entry > probe_id && set_sizes[idx_entry] < maxsize) atomicAdd(&intersection[i*block_size + idx_entry - indexed_start], 1); } } } } __global__ void calculateJaccardSimilarity(int *intersection, Pair *pairs, int *totalSimilars, int *sizes, int intersection_size, int probes_start, int indexed_start, int probe_block_size, int indexed_block_size, float threshold, int block_size) { int i = blockIdx.x*blockDim.x + threadIdx.x; for (; i < intersection_size; i += gridDim.x*blockDim.x) { if (intersection[i]) { int x = i/block_size + probes_start; int y = i%block_size + indexed_start; float similarity = (float) intersection[i]/(sizes[x] + sizes[y] - intersection[i]); if (similarity >= threshold) { int pos = atomicAdd(totalSimilars, 1); pairs[pos].set_x = x; pairs[pos].set_y = y; pairs[pos].similarity = similarity; } } } }
c45faff21f5d938518833336a7e982be2009ca78.cu
/********************************************************************* 11 12 Copyright (C) 2017 by Sidney Ribeiro Junior 13 14 This program is free software; you can redistribute it and/or modify 15 it under the terms of the GNU General Public License as published by 16 the Free Software Foundation; either version 2 of the License, or 17 (at your option) any later version. 18 19 This program is distributed in the hope that it will be useful, 20 but WITHOUT ANY WARRANTY; without even the implied warranty of 21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 GNU General Public License for more details. 23 24 You should have received a copy of the GNU General Public License 25 along with this program; if not, write to the Free Software 26 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 27 28 ********************************************************************/ /* * * knn.cu */ #define CUDA_API_PER_THREAD_DEFAULT_STREAM #include <stdio.h> #include <stdlib.h> #include <algorithm> #include <iostream> #include <queue> #include <vector> #include <set> #include <functional> #include "simjoin.cuh" #include "structs.cuh" #include "utils.cuh" #include "inverted_index.cuh" __host__ int findSimilars(InvertedIndex index, float threshold, struct DeviceVariables *dev_vars, Pair *similar_pairs, int probes_start, int probe_block_size, int probes_offset, int indexed_start, int indexed_block_size, int block_size, bool aggregate, DeviceTiming& deviceTiming) { dim3 grid, threads; get_grid_config(grid, threads); int *intersection = dev_vars->d_intersection; int *starts = dev_vars->d_starts; int *sizes = dev_vars->d_sizes; Entry *probes = dev_vars->d_entries;//indexed_block == probe_block? dev_vars->d_indexed: dev_vars->d_probes; Pair *pairs = dev_vars->d_pairs; int intersection_size = block_size*block_size; // TODO verificar tamanho quando blocos são menores q os blocos normais int *totalSimilars = (int *)malloc(sizeof(int)); // the last position of intersection is used to store the number of similar pairs DeviceTiming::EventPair* clearIntersection = deviceTiming.add("Clear intersection space", 0); cudaMemset(intersection, 0, sizeof(int) * (intersection_size + 1)); deviceTiming.finish(clearIntersection); DeviceTiming::EventPair* calcIntersection = deviceTiming.add("Calculate intersection", 0); calculateIntersection<<<grid, threads>>>(index, intersection, probes, starts, sizes, probes_start, probe_block_size, probes_offset, indexed_start, threshold, block_size); deviceTiming.finish(calcIntersection); // calculate Jaccard Similarity and store similar pairs in array pairs DeviceTiming::EventPair* calcSimilarity = deviceTiming.add("Calculate similarity", 0); calculateJaccardSimilarity<<<grid, threads>>>(intersection, pairs, intersection + intersection_size, sizes, intersection_size, probes_start, indexed_start, probe_block_size, indexed_block_size, threshold, block_size); deviceTiming.finish(calcSimilarity); DeviceTiming::EventPair* transferPairs = deviceTiming.add("Transfer pairs to host", 0); gpuAssert(cudaMemcpy(totalSimilars, intersection + intersection_size, sizeof(int), cudaMemcpyDeviceToHost)); if (!aggregate) gpuAssert(cudaMemcpy(similar_pairs, pairs, sizeof(Pair)*totalSimilars[0], cudaMemcpyDeviceToHost)); deviceTiming.finish(transferPairs); return totalSimilars[0]; } __global__ void calculateIntersection(InvertedIndex index, int *intersection, Entry *probes, int *set_starts, int *set_sizes, int probes_start, int probe_block_size, int probes_offset, int indexed_start, float threshold, int block_size) { for (int i = blockIdx.x; i < probe_block_size; i += gridDim.x) { // percorre os probe sets int probe_id = i + probes_start; // setid_offset int probe_begin = set_starts[probe_id]; int probe_size = set_sizes[probe_id]; int maxsize = ceil(((float) probe_size)/threshold) + 1; for (int j = 0; j < probe_size; j++) { // percorre os termos de cada set int probe_entry = probes[probe_begin + j].term_id; int list_size = index.d_count[probe_entry]; int list_end = index.d_index[probe_entry]; int list_start = list_end - list_size; for (int k = list_start + threadIdx.x; k < list_end; k += blockDim.x) { // percorre a lista invertida int idx_entry = index.d_inverted_index[k].set_id; if (idx_entry > probe_id && set_sizes[idx_entry] < maxsize) atomicAdd(&intersection[i*block_size + idx_entry - indexed_start], 1); } } } } __global__ void calculateJaccardSimilarity(int *intersection, Pair *pairs, int *totalSimilars, int *sizes, int intersection_size, int probes_start, int indexed_start, int probe_block_size, int indexed_block_size, float threshold, int block_size) { int i = blockIdx.x*blockDim.x + threadIdx.x; for (; i < intersection_size; i += gridDim.x*blockDim.x) { if (intersection[i]) { int x = i/block_size + probes_start; int y = i%block_size + indexed_start; float similarity = (float) intersection[i]/(sizes[x] + sizes[y] - intersection[i]); if (similarity >= threshold) { int pos = atomicAdd(totalSimilars, 1); pairs[pos].set_x = x; pairs[pos].set_y = y; pairs[pos].similarity = similarity; } } } }
0adce4e72adc8c6eab8a74b8468aa53946b49dca.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/HIPApplyUtils.cuh> namespace at { namespace cuda { #define THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM 100 #define THRESH_NUMBER_BINS_FOR_GLOBAL_MEM 1000 #define FOR_KERNEL_LOOP(i, lim) \ for (IndexType i = blockIdx.x * blockDim.x + threadIdx.x; i < lim; \ i += gridDim.x * blockDim.x) /* Memory types used for the 3 histogram implementations. See `CUDA_tensor_histogram` below. */ enum class CUDAHistogramMemoryType { SHARED, MULTI_BLOCK, GLOBAL }; namespace { template<typename input_t, typename IndexType> __device__ static IndexType getBin(input_t bVal, input_t minvalue, input_t maxvalue, int nbins) { IndexType bin = (int)((bVal - minvalue) * nbins / (maxvalue - minvalue)); // (only applicable for histc) // while each bin is inclusive at the lower end and exclusive at the higher, i.e. [start, end) // the last bin is inclusive at both, i.e. [start, end], in order to include maxvalue if exists // therefore when bin == nbins, adjust bin to the last bin if (bin == nbins) bin -= 1; return bin; } } /* Kernel for computing the histogram of the input. */ template < typename output_t, typename input_t, typename IndexType, int ADims, int PDims, int BDims, CUDAHistogramMemoryType MemoryType = CUDAHistogramMemoryType::MULTI_BLOCK, typename Op> #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_1(512) #endif __global__ void kernelHistogram1D( detail::TensorInfo<output_t, IndexType> a, /* output */ detail::TensorInfo<output_t, IndexType> p, /* partial output */ detail::TensorInfo<input_t, IndexType> b, /* input */ int nbins, input_t minvalue, input_t maxvalue, IndexType totalElements, Op getOp) { extern __shared__ unsigned char my_smem[]; output_t* smem = nullptr; if (MemoryType == CUDAHistogramMemoryType::SHARED) { ////////////////////////// Shared memory ////////////////////////// // atomically add to block specific shared memory // then atomically add to the global output tensor smem = reinterpret_cast<output_t*>(my_smem); for (IndexType i = threadIdx.x; i < a.sizes[0]; i += blockDim.x) { smem[i] = 0; } __syncthreads(); FOR_KERNEL_LOOP(linearIndex, totalElements) { // Convert `linearIndex` into an offset of `b` const IndexType bOffset = detail::IndexToOffset<input_t, IndexType, BDims>::get(linearIndex, b); const auto bVal = b.data[bOffset]; if (bVal >= minvalue && bVal <= maxvalue) { // Use value at `b` as an offset of `smem` const IndexType bin = getBin<input_t, IndexType>(bVal, minvalue, maxvalue, nbins); atomicAdd(&smem[bin], getOp(linearIndex)); } } __syncthreads(); // NOTE: atomically update output bin count. // Atomic update is imp since __syncthread() will only synchronize threads // in a given block, not across blocks. for (IndexType i = threadIdx.x; i < a.sizes[0]; i += blockDim.x) { const IndexType aOffset = detail::IndexToOffset<output_t, IndexType, ADims>::get(i, a); atomicAdd(&a.data[aOffset], smem[i]); } } else if (MemoryType == CUDAHistogramMemoryType::MULTI_BLOCK) { ////////////////////////// Multi Block memory ////////////////////////// // atomically add to block specific global tensor // then atomically add to the global output tensor // compute histogram for the block FOR_KERNEL_LOOP(linearIndex, totalElements) { // Convert `linearIndex` into an offset of `b` const IndexType bOffset = detail::IndexToOffset<input_t, IndexType, BDims>::get(linearIndex, b); const auto bVal = b.data[bOffset]; if (bVal >= minvalue && bVal <= maxvalue) { // Use value at `b` as an offset of `p` const IndexType bin = getBin<input_t, IndexType>(bVal, minvalue, maxvalue, nbins); const IndexType pIdx = p.strides[0] * blockIdx.x + bin; const IndexType pOffset = detail::IndexToOffset<output_t, IndexType, PDims>::get(pIdx, p); atomicAdd(&p.data[pOffset], getOp(linearIndex)); } } __syncthreads(); // NOTE: atomically update output bin count. // Atomic update is imp since __syncthread() will only synchronize threads // in a given block, not across blocks. const IndexType pIdx = p.strides[0] * blockIdx.x; const IndexType pOffset = detail::IndexToOffset<output_t, IndexType, PDims>::get(pIdx, p); for (IndexType i = threadIdx.x; i < a.sizes[0]; i += blockDim.x) { const IndexType aOffset = detail::IndexToOffset<output_t, IndexType, ADims>::get(i, a); atomicAdd(&a.data[aOffset], p.data[pOffset + i]); } } else { ////////////////////////// Global memory ////////////////////////// // atomically add to the output tensor // compute histogram for the block FOR_KERNEL_LOOP(linearIndex, totalElements) { // Convert `linearIndex` into an offset of `b` const IndexType bOffset = detail::IndexToOffset<input_t, IndexType, BDims>::get(linearIndex, b); const auto bVal = b.data[bOffset]; if (bVal >= minvalue && bVal <= maxvalue) { // Use value at `b` as an offset of `a` const IndexType bin = getBin<input_t, IndexType>(bVal, minvalue, maxvalue, nbins); const IndexType aOffset = detail::IndexToOffset<output_t, IndexType, ADims>::get(bin, a); atomicAdd(&a.data[aOffset], getOp(linearIndex)); } } } } #define HANDLE_CASE(MEMORY_TYPE, WEIGHTS_OP, SHARED_MEM) \ hipLaunchKernelGGL(( kernelHistogram1D<output_t, input_t, IndexType, 1, 2, -1, MEMORY_TYPE>) \ , dim3(grid), \ block, \ SHARED_MEM, \ getCurrentHIPStreamMasqueradingAsCUDA(), \ aInfo, pInfo, bInfo, nbins, minvalue, maxvalue, totalElements, WEIGHTS_OP); // AT_ASSERTM(hipGetLastError() == hipSuccess, "kernelHistogram1D failed"); #define HANDLE_SWITCH_CASE(mType, getOp) \ switch (mType) { \ case CUDAHistogramMemoryType::SHARED: \ HANDLE_CASE(CUDAHistogramMemoryType::SHARED, getOp, sharedMem); \ break; \ case CUDAHistogramMemoryType::MULTI_BLOCK: \ HANDLE_CASE(CUDAHistogramMemoryType::MULTI_BLOCK, getOp, 0); \ break; \ default: \ HANDLE_CASE(CUDAHistogramMemoryType::GLOBAL, getOp, 0); \ } inline int64_t getFreeGlobalMemory() { // no need to use `hipSetDevice` size_t free_mem, total_mem; hipMemGetInfo(&free_mem, &total_mem); // AT_ASSERTM( // hipGetLastError() == hipSuccess, // "CUDA_tensor_histogram failed to get free global memory"); return static_cast<int64_t>(free_mem); } /* Calculate the frequency of the input values. `a` contains the final output or the histogram. Input `b` is assumed to be 1-D non-negative int array. `c` optionally contains the weight vector. See `help torch.bincount` for details on the math. 3 implementations based of input size and memory usage: case: #bins < THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM and enough shared mem SHARED: Each block atomically adds to it's own **shared** hist copy, then atomically updates the global tensor. case: #bins < THRESH_NUMBER_BINS_FOR_GLOBAL_MEM and enough global mem MULTI_BLOCK: Each block atomically adds to it's own **global** hist copy, then atomically updates the global tensor. case: THRESH_NUMBER_BINS_FOR_GLOBAL_MEM <= #bins GLOBAL: all threads atomically update to a single **global** hist copy. */ template <typename output_t, typename input_t, bool HasWeights> bool CUDA_tensor_histogram( at::Tensor a, /* output */ at::Tensor b, /* input */ at::Tensor c, /* weights(optional) */ int64_t nbins, input_t minvalue, input_t maxvalue, TensorArgType aType = TensorArgType::ReadWrite, TensorArgType bType = TensorArgType::ReadOnly, TensorArgType cType = TensorArgType::ReadOnly) { checkBackend("CUDA_tensor_histogram", {a, b}, Backend::CUDA); if (HasWeights) { checkBackend("CUDA_tensor_histogram", {c}, Backend::CUDA); } auto totalElements = b.numel(); const dim3 block = getApplyBlock(); dim3 grid; int64_t curDevice = current_device(); if (curDevice == -1 || !getApplyGrid(totalElements, grid, curDevice)) { return false; } CUDAHistogramMemoryType memType = CUDAHistogramMemoryType::GLOBAL; auto maxSharedMem = getCurrentDeviceProperties()->sharedMemPerBlock; auto sharedMem = nbins * sizeof(output_t) + 8; // 8 guard bytes auto maxGlobalMem = getFreeGlobalMemory(); auto multiBlockMem = nbins * grid.x * sizeof(output_t) + 8; // 8 guard bytes // determine memory type to use in the kernel if (nbins < THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM && sharedMem < maxSharedMem) { memType = CUDAHistogramMemoryType::SHARED; } else if ( nbins < THRESH_NUMBER_BINS_FOR_GLOBAL_MEM && multiBlockMem < (maxGlobalMem / 2)) { // check against half of free mem to be extra safe // due to cached allocator, we may anyway have slightly more free mem memType = CUDAHistogramMemoryType::MULTI_BLOCK; } // alloc memory for MULTI_BLOCK using IndexType = int64_t; auto aInfo = detail::getTensorInfo<output_t, IndexType>(a); auto bInfo = detail::getTensorInfo<input_t, IndexType>(b); detail::TensorInfo<output_t, IndexType> pInfo(nullptr, 0, {}, {}); Tensor partial_output; if (memType == CUDAHistogramMemoryType::MULTI_BLOCK) { partial_output = native::zeros({grid.x, nbins}, a.options()); pInfo = detail::getTensorInfo<output_t, IndexType>(partial_output); } if (HasWeights) { auto cInfo = detail::getTensorInfo<output_t, IndexType>(c); const auto getWeightsOp = [cInfo] __device__(IndexType cIndex) { const IndexType cOffset = detail::IndexToOffset<output_t, IndexType, 1>::get(cIndex, cInfo); return cInfo.data[cOffset]; }; HANDLE_SWITCH_CASE(memType, getWeightsOp) } else { static const auto getDummyOp = [] __device__(IndexType) { return 1L; }; HANDLE_SWITCH_CASE(memType, getDummyOp) } return true; } #undef HANDLE_CASE #undef HANDLE_SWITCH_CASE #undef FOR_KERNEL_LOOP #undef THRESH_NUMBER_BINS_FOR_GLOBAL_MEM #undef THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM } // namespace cuda namespace { ///////////////// bincount ///////////////// template <typename input_t, typename weights_t> Tensor _bincount_cuda_template( const Tensor& self, const Tensor& weights, int64_t minlength) { if (minlength < 0) { AT_ERROR("minlength should be >= 0"); } if (self.dim() == 1 && self.numel() == 0) { return native::zeros({minlength}, device(kCUDA).dtype(kLong)); } if (self.dim() != 1 || (!std::is_same<input_t, uint8_t>::value && *self.min().cpu().data<input_t>() < 0)) { AT_ERROR("bincount only supports 1-d non-negative integral inputs."); } bool has_weights = weights.defined(); if (has_weights && weights.size(0) != self.size(0)) { AT_ERROR("input and weights should have the same length"); } const int64_t nbins = ::max(*self.max().cpu().data<input_t>() + (int64_t)1, minlength); const input_t minvalue = 0; const input_t maxvalue = nbins; // alloc output counter on GPU Tensor output; if (has_weights) { output = native::zeros({nbins}, weights.options()); auto ret = cuda::CUDA_tensor_histogram<weights_t, input_t, true>( output, self, weights, nbins, minvalue, maxvalue); } else { output = native::zeros({nbins}, device(DeviceType::CUDA).dtype(kLong)); auto ret = cuda::CUDA_tensor_histogram<int64_t, input_t, false>( output, self, weights, nbins, minvalue, maxvalue); } return output; } ///////////////// histc ///////////////// template <typename input_t> Tensor _histc_cuda_template( const Tensor& self, int64_t nbins, input_t min, input_t max) { if (nbins <= 0) { AT_ERROR("bins must be > 0"); } Tensor output = native::zeros({nbins}, device(DeviceType::CUDA).dtype(self.scalar_type())); input_t minvalue = min; input_t maxvalue = max; if (min == max) { minvalue = *self.min().cpu().data<input_t>(); maxvalue = *self.max().cpu().data<input_t>(); } if (minvalue == maxvalue) { minvalue = minvalue - 1; maxvalue = maxvalue + 1; } auto ret = cuda::CUDA_tensor_histogram<input_t, input_t, false>( output, self, Tensor(), nbins, minvalue, maxvalue); return output; } } // namespace namespace native { Tensor _bincount_cuda( const Tensor& self, const Tensor& weights, int64_t minlength) { return AT_DISPATCH_INTEGRAL_TYPES(self.scalar_type(), "bincount_cuda", [&] { const auto scalar = weights.scalar_type(); if (scalar == ScalarType::Undefined || scalar == ScalarType::Float) return _bincount_cuda_template<scalar_t, float>(self, weights, minlength); return _bincount_cuda_template<scalar_t, double>( self, weights.toType(CUDA(kDouble)), minlength); }); } Tensor _histc_cuda( const Tensor& self, int64_t nbins, Scalar min, Scalar max) { if (self.scalar_type() == ScalarType::Half) { AT_ERROR("HalfTensor is not supported"); } return AT_DISPATCH_ALL_TYPES(self.scalar_type(), "histc", [&] { return _histc_cuda_template<scalar_t>(self, nbins, min.to<scalar_t>(), max.to<scalar_t>()); }); } Tensor& _histc_out_cuda(Tensor& result, const Tensor& self, int64_t bins, Scalar min, Scalar max) { auto ret = _histc_cuda(self, bins, min, max); result.resize_as_(ret); result.copy_(ret); return result; } } // namespace native } // namespace at
0adce4e72adc8c6eab8a74b8468aa53946b49dca.cu
#include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/CUDAApplyUtils.cuh> namespace at { namespace cuda { #define THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM 100 #define THRESH_NUMBER_BINS_FOR_GLOBAL_MEM 1000 #define FOR_KERNEL_LOOP(i, lim) \ for (IndexType i = blockIdx.x * blockDim.x + threadIdx.x; i < lim; \ i += gridDim.x * blockDim.x) /* Memory types used for the 3 histogram implementations. See `CUDA_tensor_histogram` below. */ enum class CUDAHistogramMemoryType { SHARED, MULTI_BLOCK, GLOBAL }; namespace { template<typename input_t, typename IndexType> __device__ static IndexType getBin(input_t bVal, input_t minvalue, input_t maxvalue, int nbins) { IndexType bin = (int)((bVal - minvalue) * nbins / (maxvalue - minvalue)); // (only applicable for histc) // while each bin is inclusive at the lower end and exclusive at the higher, i.e. [start, end) // the last bin is inclusive at both, i.e. [start, end], in order to include maxvalue if exists // therefore when bin == nbins, adjust bin to the last bin if (bin == nbins) bin -= 1; return bin; } } /* Kernel for computing the histogram of the input. */ template < typename output_t, typename input_t, typename IndexType, int ADims, int PDims, int BDims, CUDAHistogramMemoryType MemoryType = CUDAHistogramMemoryType::MULTI_BLOCK, typename Op> #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_1(512) #endif __global__ void kernelHistogram1D( detail::TensorInfo<output_t, IndexType> a, /* output */ detail::TensorInfo<output_t, IndexType> p, /* partial output */ detail::TensorInfo<input_t, IndexType> b, /* input */ int nbins, input_t minvalue, input_t maxvalue, IndexType totalElements, Op getOp) { extern __shared__ unsigned char my_smem[]; output_t* smem = nullptr; if (MemoryType == CUDAHistogramMemoryType::SHARED) { ////////////////////////// Shared memory ////////////////////////// // atomically add to block specific shared memory // then atomically add to the global output tensor smem = reinterpret_cast<output_t*>(my_smem); for (IndexType i = threadIdx.x; i < a.sizes[0]; i += blockDim.x) { smem[i] = 0; } __syncthreads(); FOR_KERNEL_LOOP(linearIndex, totalElements) { // Convert `linearIndex` into an offset of `b` const IndexType bOffset = detail::IndexToOffset<input_t, IndexType, BDims>::get(linearIndex, b); const auto bVal = b.data[bOffset]; if (bVal >= minvalue && bVal <= maxvalue) { // Use value at `b` as an offset of `smem` const IndexType bin = getBin<input_t, IndexType>(bVal, minvalue, maxvalue, nbins); atomicAdd(&smem[bin], getOp(linearIndex)); } } __syncthreads(); // NOTE: atomically update output bin count. // Atomic update is imp since __syncthread() will only synchronize threads // in a given block, not across blocks. for (IndexType i = threadIdx.x; i < a.sizes[0]; i += blockDim.x) { const IndexType aOffset = detail::IndexToOffset<output_t, IndexType, ADims>::get(i, a); atomicAdd(&a.data[aOffset], smem[i]); } } else if (MemoryType == CUDAHistogramMemoryType::MULTI_BLOCK) { ////////////////////////// Multi Block memory ////////////////////////// // atomically add to block specific global tensor // then atomically add to the global output tensor // compute histogram for the block FOR_KERNEL_LOOP(linearIndex, totalElements) { // Convert `linearIndex` into an offset of `b` const IndexType bOffset = detail::IndexToOffset<input_t, IndexType, BDims>::get(linearIndex, b); const auto bVal = b.data[bOffset]; if (bVal >= minvalue && bVal <= maxvalue) { // Use value at `b` as an offset of `p` const IndexType bin = getBin<input_t, IndexType>(bVal, minvalue, maxvalue, nbins); const IndexType pIdx = p.strides[0] * blockIdx.x + bin; const IndexType pOffset = detail::IndexToOffset<output_t, IndexType, PDims>::get(pIdx, p); atomicAdd(&p.data[pOffset], getOp(linearIndex)); } } __syncthreads(); // NOTE: atomically update output bin count. // Atomic update is imp since __syncthread() will only synchronize threads // in a given block, not across blocks. const IndexType pIdx = p.strides[0] * blockIdx.x; const IndexType pOffset = detail::IndexToOffset<output_t, IndexType, PDims>::get(pIdx, p); for (IndexType i = threadIdx.x; i < a.sizes[0]; i += blockDim.x) { const IndexType aOffset = detail::IndexToOffset<output_t, IndexType, ADims>::get(i, a); atomicAdd(&a.data[aOffset], p.data[pOffset + i]); } } else { ////////////////////////// Global memory ////////////////////////// // atomically add to the output tensor // compute histogram for the block FOR_KERNEL_LOOP(linearIndex, totalElements) { // Convert `linearIndex` into an offset of `b` const IndexType bOffset = detail::IndexToOffset<input_t, IndexType, BDims>::get(linearIndex, b); const auto bVal = b.data[bOffset]; if (bVal >= minvalue && bVal <= maxvalue) { // Use value at `b` as an offset of `a` const IndexType bin = getBin<input_t, IndexType>(bVal, minvalue, maxvalue, nbins); const IndexType aOffset = detail::IndexToOffset<output_t, IndexType, ADims>::get(bin, a); atomicAdd(&a.data[aOffset], getOp(linearIndex)); } } } } #define HANDLE_CASE(MEMORY_TYPE, WEIGHTS_OP, SHARED_MEM) \ kernelHistogram1D<output_t, input_t, IndexType, 1, 2, -1, MEMORY_TYPE> \ <<<grid, \ block, \ SHARED_MEM, \ getCurrentCUDAStream()>>>( \ aInfo, pInfo, bInfo, nbins, minvalue, maxvalue, totalElements, WEIGHTS_OP); // AT_ASSERTM(cudaGetLastError() == cudaSuccess, "kernelHistogram1D failed"); #define HANDLE_SWITCH_CASE(mType, getOp) \ switch (mType) { \ case CUDAHistogramMemoryType::SHARED: \ HANDLE_CASE(CUDAHistogramMemoryType::SHARED, getOp, sharedMem); \ break; \ case CUDAHistogramMemoryType::MULTI_BLOCK: \ HANDLE_CASE(CUDAHistogramMemoryType::MULTI_BLOCK, getOp, 0); \ break; \ default: \ HANDLE_CASE(CUDAHistogramMemoryType::GLOBAL, getOp, 0); \ } inline int64_t getFreeGlobalMemory() { // no need to use `cudaSetDevice` size_t free_mem, total_mem; cudaMemGetInfo(&free_mem, &total_mem); // AT_ASSERTM( // cudaGetLastError() == cudaSuccess, // "CUDA_tensor_histogram failed to get free global memory"); return static_cast<int64_t>(free_mem); } /* Calculate the frequency of the input values. `a` contains the final output or the histogram. Input `b` is assumed to be 1-D non-negative int array. `c` optionally contains the weight vector. See `help torch.bincount` for details on the math. 3 implementations based of input size and memory usage: case: #bins < THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM and enough shared mem SHARED: Each block atomically adds to it's own **shared** hist copy, then atomically updates the global tensor. case: #bins < THRESH_NUMBER_BINS_FOR_GLOBAL_MEM and enough global mem MULTI_BLOCK: Each block atomically adds to it's own **global** hist copy, then atomically updates the global tensor. case: THRESH_NUMBER_BINS_FOR_GLOBAL_MEM <= #bins GLOBAL: all threads atomically update to a single **global** hist copy. */ template <typename output_t, typename input_t, bool HasWeights> bool CUDA_tensor_histogram( at::Tensor a, /* output */ at::Tensor b, /* input */ at::Tensor c, /* weights(optional) */ int64_t nbins, input_t minvalue, input_t maxvalue, TensorArgType aType = TensorArgType::ReadWrite, TensorArgType bType = TensorArgType::ReadOnly, TensorArgType cType = TensorArgType::ReadOnly) { checkBackend("CUDA_tensor_histogram", {a, b}, Backend::CUDA); if (HasWeights) { checkBackend("CUDA_tensor_histogram", {c}, Backend::CUDA); } auto totalElements = b.numel(); const dim3 block = getApplyBlock(); dim3 grid; int64_t curDevice = current_device(); if (curDevice == -1 || !getApplyGrid(totalElements, grid, curDevice)) { return false; } CUDAHistogramMemoryType memType = CUDAHistogramMemoryType::GLOBAL; auto maxSharedMem = getCurrentDeviceProperties()->sharedMemPerBlock; auto sharedMem = nbins * sizeof(output_t) + 8; // 8 guard bytes auto maxGlobalMem = getFreeGlobalMemory(); auto multiBlockMem = nbins * grid.x * sizeof(output_t) + 8; // 8 guard bytes // determine memory type to use in the kernel if (nbins < THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM && sharedMem < maxSharedMem) { memType = CUDAHistogramMemoryType::SHARED; } else if ( nbins < THRESH_NUMBER_BINS_FOR_GLOBAL_MEM && multiBlockMem < (maxGlobalMem / 2)) { // check against half of free mem to be extra safe // due to cached allocator, we may anyway have slightly more free mem memType = CUDAHistogramMemoryType::MULTI_BLOCK; } // alloc memory for MULTI_BLOCK using IndexType = int64_t; auto aInfo = detail::getTensorInfo<output_t, IndexType>(a); auto bInfo = detail::getTensorInfo<input_t, IndexType>(b); detail::TensorInfo<output_t, IndexType> pInfo(nullptr, 0, {}, {}); Tensor partial_output; if (memType == CUDAHistogramMemoryType::MULTI_BLOCK) { partial_output = native::zeros({grid.x, nbins}, a.options()); pInfo = detail::getTensorInfo<output_t, IndexType>(partial_output); } if (HasWeights) { auto cInfo = detail::getTensorInfo<output_t, IndexType>(c); const auto getWeightsOp = [cInfo] __device__(IndexType cIndex) { const IndexType cOffset = detail::IndexToOffset<output_t, IndexType, 1>::get(cIndex, cInfo); return cInfo.data[cOffset]; }; HANDLE_SWITCH_CASE(memType, getWeightsOp) } else { static const auto getDummyOp = [] __device__(IndexType) { return 1L; }; HANDLE_SWITCH_CASE(memType, getDummyOp) } return true; } #undef HANDLE_CASE #undef HANDLE_SWITCH_CASE #undef FOR_KERNEL_LOOP #undef THRESH_NUMBER_BINS_FOR_GLOBAL_MEM #undef THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM } // namespace cuda namespace { ///////////////// bincount ///////////////// template <typename input_t, typename weights_t> Tensor _bincount_cuda_template( const Tensor& self, const Tensor& weights, int64_t minlength) { if (minlength < 0) { AT_ERROR("minlength should be >= 0"); } if (self.dim() == 1 && self.numel() == 0) { return native::zeros({minlength}, device(kCUDA).dtype(kLong)); } if (self.dim() != 1 || (!std::is_same<input_t, uint8_t>::value && *self.min().cpu().data<input_t>() < 0)) { AT_ERROR("bincount only supports 1-d non-negative integral inputs."); } bool has_weights = weights.defined(); if (has_weights && weights.size(0) != self.size(0)) { AT_ERROR("input and weights should have the same length"); } const int64_t nbins = std::max(*self.max().cpu().data<input_t>() + (int64_t)1, minlength); const input_t minvalue = 0; const input_t maxvalue = nbins; // alloc output counter on GPU Tensor output; if (has_weights) { output = native::zeros({nbins}, weights.options()); auto ret = cuda::CUDA_tensor_histogram<weights_t, input_t, true>( output, self, weights, nbins, minvalue, maxvalue); } else { output = native::zeros({nbins}, device(DeviceType::CUDA).dtype(kLong)); auto ret = cuda::CUDA_tensor_histogram<int64_t, input_t, false>( output, self, weights, nbins, minvalue, maxvalue); } return output; } ///////////////// histc ///////////////// template <typename input_t> Tensor _histc_cuda_template( const Tensor& self, int64_t nbins, input_t min, input_t max) { if (nbins <= 0) { AT_ERROR("bins must be > 0"); } Tensor output = native::zeros({nbins}, device(DeviceType::CUDA).dtype(self.scalar_type())); input_t minvalue = min; input_t maxvalue = max; if (min == max) { minvalue = *self.min().cpu().data<input_t>(); maxvalue = *self.max().cpu().data<input_t>(); } if (minvalue == maxvalue) { minvalue = minvalue - 1; maxvalue = maxvalue + 1; } auto ret = cuda::CUDA_tensor_histogram<input_t, input_t, false>( output, self, Tensor(), nbins, minvalue, maxvalue); return output; } } // namespace namespace native { Tensor _bincount_cuda( const Tensor& self, const Tensor& weights, int64_t minlength) { return AT_DISPATCH_INTEGRAL_TYPES(self.scalar_type(), "bincount_cuda", [&] { const auto scalar = weights.scalar_type(); if (scalar == ScalarType::Undefined || scalar == ScalarType::Float) return _bincount_cuda_template<scalar_t, float>(self, weights, minlength); return _bincount_cuda_template<scalar_t, double>( self, weights.toType(CUDA(kDouble)), minlength); }); } Tensor _histc_cuda( const Tensor& self, int64_t nbins, Scalar min, Scalar max) { if (self.scalar_type() == ScalarType::Half) { AT_ERROR("HalfTensor is not supported"); } return AT_DISPATCH_ALL_TYPES(self.scalar_type(), "histc", [&] { return _histc_cuda_template<scalar_t>(self, nbins, min.to<scalar_t>(), max.to<scalar_t>()); }); } Tensor& _histc_out_cuda(Tensor& result, const Tensor& self, int64_t bins, Scalar min, Scalar max) { auto ret = _histc_cuda(self, bins, min, max); result.resize_as_(ret); result.copy_(ret); return result; } } // namespace native } // namespace at
bf75e7e6d1da941c655bec246adb23335c399b4b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // General #include <iostream> #include <algorithm> #include <sstream> // Warpkernel #include "warpkernel.hpp" // cusp #include <cusp/coo_matrix.h> #include <cusp/io/matrix_market.h> #include <cusp/csr_matrix.h> #include <cusp/multiply.h> #include <cusp/detail/timer.h> #include <cusp/hyb_matrix.h> // boost // stats #include <boost/accumulators/accumulators.hpp> #include <boost/accumulators/statistics/stats.hpp> #include <boost/accumulators/statistics/mean.hpp> #define DeviceSpace cusp::device_memory #define CPUSpace cusp::host_memory struct rand_float { double operator() () { return 1.1;//((double)(rand() % 100))/100. -0.3; } }; template <bool usecache> __inline__ __device__ double fetch_cache(const int& i, const double* x) { if (usecache) { int2 v = tex1Dfetch(x_tex,i); return __hiloint2double(v.y, v.x); } else { return x[i]; } } /**********/ template <bool usecache, typename ValueType, typename IndexType> __global__ void warpKernel2_noregister(IndexType nrows, int nwarps, ValueType* A, IndexType *colinds, IndexType *rowmap, uint* maxrows, IndexType *warp_offset, uint* reduction, uint* rows_offset_warp, ValueType* x , ValueType* y) { const uint tid = threadIdx.x; const uint id = tid + blockIdx.x * blockDim.x; const uint wid = tid & (WARP_SIZE-1); const uint warpid = id / WARP_SIZE; extern volatile __shared__ ValueType sumvalues[]; if (warpid >= nwarps) return; const uint offsets = reduction[warpid]; const uint row_start = rows_offset_warp[warpid]; const uint rowid = row_start + wid/offsets; if (rowid < nrows) { IndexType toffset = warp_offset[warpid] + wid; const uint maxnz = maxrows[warpid] * WARP_SIZE + toffset; ValueType sum = A[toffset] * fetch_cache<usecache> (colinds[toffset],x); for(toffset += WARP_SIZE; toffset<maxnz; toffset += WARP_SIZE) { sum += A[toffset] * fetch_cache<usecache> (colinds[toffset],x); } sumvalues[tid] = sum; // // possible reduction for (int i = 1; i< offsets; i <<= 1) { if (offsets > i ) { sumvalues[tid] += sumvalues[tid+i]; } } if ((wid & (offsets-1)) == 0) { y[rowmap[rowid]] = sumvalues[tid]; } } } /**********/ template <bool usecache, typename ValueType, typename IndexType > __global__ void warpKernel_nocoalesced(uint nrows, ValueType* A, IndexType *colinds, IndexType *rowmap, uint* maxrows, IndexType *warp_offset, ValueType* x, ValueType* y) { const uint tid = threadIdx.x; const uint id = tid + blockIdx.x * blockDim.x; const uint wid = tid & (WARP_SIZE-1); const uint warpid = id / WARP_SIZE; if (id < nrows) { uint maxnz = maxrows[warpid]; IndexType toffset = warp_offset[warpid] + wid * maxnz; maxnz += toffset; ValueType sum = A[toffset] * fetch_cache<usecache> (colinds[toffset],x); for(toffset ++; toffset < maxnz; toffset ++) { sum += A[toffset] * fetch_cache<usecache> (colinds[toffset],x); } y[rowmap[id]] = sum; } } /*********/ template<typename R> bool verify(R orig, R comp, uint nrows) { bool check = true; for (int i=0; i< nrows; i++) { if (abs((orig[i]-comp[i])/orig[i]) > 1E-5) { std::cout << orig[i] << "\t" << comp[i] << "\t" << i << std::endl; check = false; return check; } } return check; } template<typename R> bool verify_x(R orig, R comp, uint nrows, int *row_map) { bool temp = true; for (int i=0; i< nrows; i++) { if (abs((orig[row_map[i]]-comp[i])/orig[row_map[i]]) > 1E-5) { std::cout << orig[row_map[i]] << "," << comp[i] << " : " << i << std::endl; temp = false; return temp; } } return temp; } bool checkPairOrder(std::pair<uint,uint> pair1, std::pair<uint,uint> pair2) { return pair1.second > pair2.second || (pair1.second == pair2.second && pair1.first < pair2.first); } template<typename ValueType> void sort(ValueType *rows, std::vector<std::pair<uint,uint> > & nnz_r, uint & nwarps, uint nrows) { nwarps = (nrows + WARP_SIZE-1)/(WARP_SIZE); nnz_r.resize(nrows); uint nznrows = 0; // Re-arrange rows to reach our assumptions for (int w = 0; w < nwarps; w++) { for (int r = WARP_SIZE * w; r < nrows && r < WARP_SIZE*(w+1); r++) { uint rowsize = rows[r+1] - rows[r]; nnz_r[r] = std::make_pair(r,rowsize); if (rowsize > 0) nznrows++; } } // sort by rowsize std::sort( nnz_r.begin(), nnz_r.end(), checkPairOrder); } template <typename ValueType, typename IndexType> void scan(uint & nz, uint & nrows, ValueType * A, IndexType * rows, IndexType *colinds, uint & nwarps, uint & allocate_nz, std::vector<uint> & reorder_rows, // new_values[reorder_rows[i]] = A[i] std::vector<int> & warp_offsets, std::vector<uint> & max_nz_rows, std::vector<int> &row_map_, std::vector<int> &row_map_inv_, uint &nznrows) { std::vector<std::pair<uint,uint> > nnz_r; // non-zeros per row sort(rows, nnz_r, nwarps, nrows); std::vector<int> row_map(nrows); for(int r = 0; r < nrows; r++) row_map[r] = nnz_r[r].first; row_map_ = row_map; std::vector<int> row_map_inv(nrows); for(int i=0;i<nrows;i++) { row_map_inv[row_map[i]] = i; } row_map_inv_ = row_map_inv; std::vector<uint> A_w(nwarps); // max non-zeros per row std::vector<uint> nnz_imin(nwarps,nrows); // minimum non-zeros per row std::vector<uint> nnz_imax(nwarps); // maximum non-zeros per row // Use sorted row-sizes to calculate A_w, nnz_w, etc. for (int w = 0; w < nwarps; w++) { for (int r = WARP_SIZE * w; r < nrows && r < WARP_SIZE*(w+1); r++) { uint rowsize = nnz_r[r].second; if (rowsize < nnz_imin[w]) nnz_imin[w] = rowsize; // min if (rowsize > nnz_imax[w]) nnz_imax[w] = rowsize; // max } A_w[w] = nnz_imax[w]; } max_nz_rows = A_w; // set warp_offsets and allocate_nz; warp_offsets.resize(nwarps+1); warp_offsets[0] = 0; for(int w = 0; w < nwarps; w++) { warp_offsets[w+1] = warp_offsets[w] + A_w[w] * WARP_SIZE; } allocate_nz = warp_offsets[nwarps]; // Generate reordering map for future use reorder_rows.resize(nz); for (int w_s = 0; w_s < nwarps; w_s++) { for (int r_s = WARP_SIZE * w_s; r_s < nrows && r_s < WARP_SIZE * (w_s+1); r_s++) { int r = nnz_r[r_s].first; int rowsize = nnz_r[r_s].second; for(int i = 0; i < rowsize; i++) { reorder_rows[rows[r] + i] = warp_offsets[w_s] + (r_s % WARP_SIZE) + i*WARP_SIZE; } } } } /******* scan no reorder coalesce ****/ template <typename ValueType, typename IndexType> void scan_nocoalesced(uint & nz, uint & nrows, ValueType * A, IndexType * rows, IndexType *colinds, uint & nwarps, uint & allocate_nz, std::vector<uint> & reorder_rows, // new_values[reorder_rows[i]] = A[i] std::vector<int> & warp_offsets, std::vector<uint> & max_nz_rows, std::vector<int> &row_map_, std::vector<int> &row_map_inv_) { std::vector<std::pair<uint,uint> > nnz_r; // non-zeros per row sort(rows, nnz_r, nwarps, nrows); std::vector<int> row_map(nrows); for(int r = 0; r < nrows; r++) row_map[r] = nnz_r[r].first; row_map_ = row_map; std::vector<int> row_map_inv(nrows); for(int i=0;i<nrows;i++) { row_map_inv[row_map[i]] = i; } row_map_inv_ = row_map_inv; std::vector<uint> A_w(nwarps); // max non-zeros per row std::vector<uint> nnz_imin(nwarps,nrows); // minimum non-zeros per row std::vector<uint> nnz_imax(nwarps); // maximum non-zeros per row // Use sorted row-sizes to calculate A_w, nnz_w, etc. for (int w = 0; w < nwarps; w++) { for (int r = WARP_SIZE * w; r < nrows && r < WARP_SIZE*(w+1); r++) { uint rowsize = nnz_r[r].second; if (rowsize < nnz_imin[w]) nnz_imin[w] = rowsize; // min if (rowsize > nnz_imax[w]) nnz_imax[w] = rowsize; // max } A_w[w] = nnz_imax[w]; } max_nz_rows = A_w; // set warp_offsets and allocate_nz; warp_offsets.resize(nwarps+1); warp_offsets[0] = 0; for(int w = 0; w < nwarps; w++) { warp_offsets[w+1] = warp_offsets[w] + A_w[w] * WARP_SIZE; } allocate_nz = warp_offsets[nwarps]; // Generate reordering map for future use reorder_rows.resize(nz); for (int w_s = 0; w_s < nwarps; w_s++) { for (int r_s = WARP_SIZE * w_s; r_s < nrows && r_s < WARP_SIZE * (w_s+1); r_s++) { int r = nnz_r[r_s].first; int rowsize = nnz_r[r_s].second; for(int i = 0; i < rowsize; i++) { reorder_rows[rows[r] + i] = warp_offsets[w_s] + (r_s % WARP_SIZE) * max_nz_rows[w_s] + i; // undid reodering } } } } /******* scan no reorder coalesce no sort ****/ template <typename ValueType, typename IndexType> void scan_nocoalesced_nosort(uint & nz, uint & nrows, ValueType * A, IndexType * rows, IndexType *colinds, uint & nwarps, uint & allocate_nz, std::vector<uint> & reorder_rows, // new_values[reorder_rows[i]] = A[i] std::vector<int> & warp_offsets, std::vector<uint> & max_nz_rows, std::vector<int> &row_map_, std::vector<int> &row_map_inv_) { std::vector<std::pair<uint,uint> > nnz_r; // non-zeros per row nwarps = (nrows + WARP_SIZE-1)/(WARP_SIZE); // Re-arrange rows to reach our assumptions for (int w = 0; w < nwarps; w++) { for (int r = WARP_SIZE * w; r < nrows && r < WARP_SIZE*(w+1); r++) { uint rowsize = rows[r+1] - rows[r]; if (rowsize > 0) nnz_r.push_back(std::make_pair(r,rowsize)); } } std::vector<int> row_map(nrows); for(int r = 0; r < nrows; r++) row_map[r] = nnz_r[r].first; row_map_ = row_map; std::vector<int> row_map_inv(nrows); for(int i=0;i<nrows;i++) { row_map_inv[row_map[i]] = i; } row_map_inv_ = row_map_inv; std::vector<uint> A_w(nwarps); // max non-zeros per row std::vector<uint> nnz_imin(nwarps,nrows); // minimum non-zeros per row std::vector<uint> nnz_imax(nwarps); // maximum non-zeros per row // Use sorted row-sizes to calculate A_w, nnz_w, etc. for (int w = 0; w < nwarps; w++) { for (int r = WARP_SIZE * w; r < nrows && r < WARP_SIZE*(w+1); r++) { uint rowsize = nnz_r[r].second; if (rowsize < nnz_imin[w]) nnz_imin[w] = rowsize; // min if (rowsize > nnz_imax[w]) nnz_imax[w] = rowsize; // max } A_w[w] = nnz_imax[w]; } max_nz_rows = A_w; // set warp_offsets and allocate_nz; warp_offsets.resize(nwarps+1); warp_offsets[0] = 0; for(int w = 0; w < nwarps; w++) { warp_offsets[w+1] = warp_offsets[w] + A_w[w] * WARP_SIZE; } allocate_nz = warp_offsets[nwarps]; // Generate reordering map for future use reorder_rows.resize(nz); for (int w_s = 0; w_s < nwarps; w_s++) { for (int r_s = WARP_SIZE * w_s; r_s < nrows && r_s < WARP_SIZE * (w_s+1); r_s++) { int r = nnz_r[r_s].first; int rowsize = nnz_r[r_s].second; for(int i = 0; i < rowsize; i++) { reorder_rows[rows[r] + i] = warp_offsets[w_s] + (r_s % WARP_SIZE) * max_nz_rows[w_s] + i; // undid reodering } } } } /******* scan no sort ****/ template <typename ValueType, typename IndexType> void scan_nosort(uint & nz, uint & nrows, ValueType * A, IndexType * rows, IndexType *colinds, uint & nwarps, uint & allocate_nz, std::vector<uint> & reorder_rows, // new_values[reorder_rows[i]] = A[i] std::vector<int> & warp_offsets, std::vector<uint> & max_nz_rows, std::vector<int> &row_map_, std::vector<int> &row_map_inv_) { std::vector<std::pair<uint,uint> > nnz_r; // non-zeros per row nwarps = (nrows + WARP_SIZE-1)/(WARP_SIZE); // Re-arrange rows to reach our assumptions for (int w = 0; w < nwarps; w++) { for (int r = WARP_SIZE * w; r < nrows && r < WARP_SIZE*(w+1); r++) { uint rowsize = rows[r+1] - rows[r]; if (rowsize > 0) nnz_r.push_back(std::make_pair(r,rowsize)); } } std::vector<int> row_map(nrows); for(int r = 0; r < nrows; r++) row_map[r] = nnz_r[r].first; row_map_ = row_map; std::vector<int> row_map_inv(nrows); for(int i=0;i<nrows;i++) { row_map_inv[row_map[i]] = i; } row_map_inv_ = row_map_inv; std::vector<uint> A_w(nwarps); // max non-zeros per row std::vector<uint> nnz_imin(nwarps,nrows); // minimum non-zeros per row std::vector<uint> nnz_imax(nwarps); // maximum non-zeros per row // Use sorted row-sizes to calculate A_w, nnz_w, etc. for (int w = 0; w < nwarps; w++) { for (int r = WARP_SIZE * w; r < nrows && r < WARP_SIZE*(w+1); r++) { uint rowsize = nnz_r[r].second; if (rowsize < nnz_imin[w]) nnz_imin[w] = rowsize; // min if (rowsize > nnz_imax[w]) nnz_imax[w] = rowsize; // max } A_w[w] = nnz_imax[w]; } max_nz_rows = A_w; // set warp_offsets and allocate_nz; warp_offsets.resize(nwarps+1); warp_offsets[0] = 0; for(int w = 0; w < nwarps; w++) { warp_offsets[w+1] = warp_offsets[w] + A_w[w] * WARP_SIZE; } allocate_nz = warp_offsets[nwarps]; // Generate reordering map for future use reorder_rows.resize(nz); for (int w_s = 0; w_s < nwarps; w_s++) { for (int r_s = WARP_SIZE * w_s; r_s < nrows && r_s < WARP_SIZE * (w_s+1); r_s++) { int r = nnz_r[r_s].first; int rowsize = nnz_r[r_s].second; for(int i = 0; i < rowsize; i++) { reorder_rows[rows[r] + i] = warp_offsets[w_s] + (r_s % WARP_SIZE) + i*WARP_SIZE; } } } } /********* Reorder */ template<typename IndexType, typename ValueType> void reorder(ValueType *A, IndexType * colinds, cusp::array1d<ValueType, DeviceSpace> &device_values, // allocate nz cusp::array1d<IndexType, DeviceSpace> &device_colinds, // allocate nz uint nz, uint allocate_nz, std::vector<uint> reorder_rows) { cusp::array1d<ValueType, CPUSpace> new_values(allocate_nz,0); cusp::array1d<IndexType, CPUSpace> new_colinds(allocate_nz,0); device_values.resize(allocate_nz); device_colinds.resize(allocate_nz); for(int i=0; i< nz; i++) { new_values[reorder_rows[i]] = A[i]; new_colinds[reorder_rows[i]] = colinds[i]; } device_values = new_values; device_colinds = new_colinds; } /* warpkernel1 process */ // preform the reordering template<typename IndexType, typename ValueType> void process(ValueType *A, IndexType * colinds, cusp::array1d<ValueType, DeviceSpace> &device_values, // nz cusp::array1d<IndexType, DeviceSpace> &device_colinds, // nz cusp::array1d<IndexType, DeviceSpace> &device_row_map, // nrows cusp::array1d<uint, DeviceSpace> &device_max_nz_per_row, // nwarps cusp::array1d<IndexType, DeviceSpace> &device_warp_offsets, // nwarps uint nz, uint allocate_nz, std::vector<uint> reorder_rows, std::vector<int> warp_offsets, std::vector<uint> max_nz_rows, std::vector<int> row_map) { reorder(A,colinds, device_values, device_colinds, nz, allocate_nz, reorder_rows); device_row_map = row_map; device_max_nz_per_row = max_nz_rows; device_warp_offsets = warp_offsets; } /******** Purpose of this executable is to examine different effects of optimization *******/ #define ValueType double #define IndexType int int main(int argc, char *argv[]) { std::string matrixfilename = argv[1]; int ntests = 1; if (argc == 3) ntests = atoi(argv[2]); cusp::coo_matrix<IndexType, ValueType, CPUSpace> B; cusp::io::read_matrix_market_file(B, matrixfilename.c_str()); cusp::csr_matrix<IndexType, ValueType, CPUSpace> A = B; uint N = A.num_cols; uint nz = A.num_entries; // open up data file std::string filename; size_t pos = matrixfilename.find_last_of("/"); std::string matrixname; if (pos != std::string::npos ) matrixname.assign(matrixfilename.begin()+pos+1, matrixfilename.end()); else matrixname = matrixfilename; std::string datapath = "./data/" + matrixname + "_optimize.txt"; std::cout << "Starting data file = " << datapath << std::endl; std::ofstream datafile(datapath.c_str()); warpkernel::startDatafile(datafile, N, nz,ntests); cusp::array1d<ValueType, CPUSpace> x(N, 1.0); // thrust::generate(x.begin(),x.end(), rand_float()); cusp::array1d<ValueType, CPUSpace> y(N); { boost::accumulators::accumulator_set<ValueType, boost::accumulators::stats<boost::accumulators::tag::mean> > statstime; // cusp hyb multiplication for (int i=0;i<ntests;i++) { cusp::hyb_matrix<IndexType, ValueType, DeviceSpace> A1 = A; cusp::array1d<ValueType, DeviceSpace> dx = x; cusp::array1d<ValueType, DeviceSpace> dy = y; cusp::detail::timer cusptimer; cusptimer.start(); cusp::multiply(A1,dx,dy); ValueType measuredtime = cusptimer.seconds_elapsed(); statstime(measuredtime); y = dy; } std::cout << "cusp gpu time " << std::scientific << boost::accumulators::mean(statstime) << std::endl; warpkernel::addData(datafile, "cusp-csr", boost::accumulators::mean(statstime), -1, -1, -1, -1); } // setup for warpkernel1 cusp::array1d<ValueType, DeviceSpace> dx = x; warpkernel::structure kernel1( N, nz, 0); std::vector<uint> reorder_rows; std::vector<int> warp_offsets; std::vector<uint> max_nz_rows; std::vector<int> row_map; std::vector<int> row_map_inv; uint nznrows; scan(kernel1.nz, kernel1.nrows, &(A.values[0]), &(A.row_offsets[0]), &(A.column_indices[0]), kernel1.nwarps, kernel1.allocate_nz, reorder_rows, warp_offsets, max_nz_rows, row_map, row_map_inv, nznrows); uint warps_per_block = 6; uint nblocks = (kernel1.nwarps + warps_per_block -1)/warps_per_block; uint blocksize = warps_per_block * WARP_SIZE; // original warpkernel1 with cache { cusp::array1d<ValueType, DeviceSpace> dy(N, 0); // allocate arrays cusp::array1d<ValueType, DeviceSpace> device_values; // nz cusp::array1d<IndexType, DeviceSpace> device_colinds; // nz cusp::array1d<IndexType, DeviceSpace> device_row_map; // nrows cusp::array1d<uint, DeviceSpace> device_max_nz_per_row; // nwarps cusp::array1d<IndexType, DeviceSpace> device_warp_offsets; // nwarps cusp::array1d<uint, DeviceSpace> device_threads_per_row; // offsets - nwarps cusp::array1d<uint, DeviceSpace> device_row_offset_warp; // rows - nwarps process(&(A.values[0]), &(A.column_indices[0]), device_values, device_colinds, device_row_map, device_max_nz_per_row, device_warp_offsets, kernel1.nz, kernel1.allocate_nz, reorder_rows, warp_offsets, max_nz_rows, row_map); const bool cache = true; boost::accumulators::accumulator_set<ValueType, boost::accumulators::stats<boost::accumulators::tag::mean> > statstime; if (cache) hipBindTexture(0, x_tex, thrust::raw_pointer_cast(&dx[0])); for (int i=0; i < ntests; i++) { cusp::detail::timer t; t.start(); hipLaunchKernelGGL(( warpkernel::warpKernel<cache>) , dim3(nblocks), dim3(blocksize) , 0, 0, kernel1.nrows, thrust::raw_pointer_cast(&device_values[0]), thrust::raw_pointer_cast(&device_colinds[0]), thrust::raw_pointer_cast(&device_row_map[0]), thrust::raw_pointer_cast(&device_max_nz_per_row[0]), thrust::raw_pointer_cast(&device_warp_offsets[0]), thrust::raw_pointer_cast(&dx[0]), thrust::raw_pointer_cast(&dy[0])); ValueType measuretime = t.seconds_elapsed(); statstime(measuretime); } cusp::array1d<ValueType, CPUSpace> ycheck = dy; if (verify(y,ycheck,N)) { std::cout << "current warpkernel_cache time =" << boost::accumulators::mean(statstime) << std::endl; warpkernel::addData(datafile, "warpkernel_cache", boost::accumulators::mean(statstime), kernel1.allocate_nz, kernel1.nwarps, nblocks, blocksize); } else std::cout << "failed to verify warp_kernel_cache" << std::endl; } // warpkernel.hpp { cusp::array1d<ValueType, DeviceSpace> dx = x; warpkernel::structure kernel1; kernel1.scan(nz, N, A); uint warps_per_block = 6; uint nblocks = (kernel1.nwarps + warps_per_block-1)/warps_per_block; uint blocksize = warps_per_block * WARP_SIZE; warpkernel::engine<ValueType, IndexType, warpkernel::structure> eng(kernel1, &(A.values[0]), &(A.column_indices[0])); cusp::array1d<ValueType, DeviceSpace> dy(N,0); ValueType measuretime = 0; for (int t = 0; t < ntests; t++) { measuretime += eng.run<true>(nblocks, blocksize, thrust::raw_pointer_cast(&dx[0]), thrust::raw_pointer_cast(&dy[0])); } cusp::array1d<ValueType, CPUSpace> ycheck = dy; if (eng.verify(y,ycheck)) { std::cout << "warpkernel (" << nblocks << "," << blocksize <<") time = " << std::scientific << measuretime/ntests << std::endl; } else { std::cout << "Failed original warpkernel.hpp" << std::endl; } } // original warpkernel1 no cache { cusp::array1d<ValueType, DeviceSpace> dy(N, 0); // allocate arrays cusp::array1d<ValueType, DeviceSpace> device_values; // nz cusp::array1d<IndexType, DeviceSpace> device_colinds; // nz cusp::array1d<IndexType, DeviceSpace> device_row_map; // nrows cusp::array1d<uint, DeviceSpace> device_max_nz_per_row; // nwarps cusp::array1d<IndexType, DeviceSpace> device_warp_offsets; // nwarps cusp::array1d<uint, DeviceSpace> device_threads_per_row; // offsets - nwarps cusp::array1d<uint, DeviceSpace> device_row_offset_warp; // rows - nwarps process(&(A.values[0]), &(A.column_indices[0]), device_values, device_colinds, device_row_map, device_max_nz_per_row, device_warp_offsets, kernel1.nz, kernel1.allocate_nz, reorder_rows, warp_offsets, max_nz_rows, row_map); const bool cache = false; boost::accumulators::accumulator_set<ValueType, boost::accumulators::stats<boost::accumulators::tag::mean> > statstime; if (cache) hipBindTexture(0, x_tex, thrust::raw_pointer_cast(&dx[0])); for (int i=0; i < ntests; i++) { cusp::detail::timer t; t.start(); hipLaunchKernelGGL(( warpkernel::warpKernel<cache>) , dim3(nblocks), dim3(blocksize) , 0, 0, kernel1.nrows, thrust::raw_pointer_cast(&device_values[0]), thrust::raw_pointer_cast(&device_colinds[0]), thrust::raw_pointer_cast(&device_row_map[0]), thrust::raw_pointer_cast(&device_max_nz_per_row[0]), thrust::raw_pointer_cast(&device_warp_offsets[0]), thrust::raw_pointer_cast(&dx[0]), thrust::raw_pointer_cast(&dy[0])); ValueType measuretime = t.seconds_elapsed(); statstime(measuretime); } cusp::array1d<ValueType, CPUSpace> ycheck = dy; if (verify(y,ycheck,N)) { std::cout << "current warpkernel_nocache time =" << boost::accumulators::mean(statstime) << std::endl; warpkernel::addData(datafile, "warpkernel_nocache", boost::accumulators::mean(statstime), kernel1.allocate_nz, kernel1.nwarps, nblocks, blocksize); } else std::cout << "failed to verify warp_kernel_nocache" << std::endl; } // original warpkernel1 no reordering with cache { // allocate arrays cusp::array1d<ValueType, DeviceSpace> device_values; // nz cusp::array1d<IndexType, DeviceSpace> device_colinds; // nz cusp::array1d<IndexType, DeviceSpace> device_row_map; // nrows cusp::array1d<uint, DeviceSpace> device_max_nz_per_row; // nwarps cusp::array1d<IndexType, DeviceSpace> device_warp_offsets; // nwarps cusp::array1d<uint, DeviceSpace> device_threads_per_row; // offsets - nwarps cusp::array1d<uint, DeviceSpace> device_row_offset_warp; // rows - nwarps scan_nocoalesced(kernel1.nz, kernel1.nrows, &(A.values[0]), &(A.row_offsets[0]), &(A.column_indices[0]), kernel1.nwarps, kernel1.allocate_nz, reorder_rows, warp_offsets, max_nz_rows, row_map, row_map_inv); process(&(A.values[0]), &(A.column_indices[0]), device_values, device_colinds, device_row_map, device_max_nz_per_row, device_warp_offsets, kernel1.nz, kernel1.allocate_nz, reorder_rows, warp_offsets, max_nz_rows, row_map); // original warpkernel1 not coalesced with cache { cusp::array1d<ValueType, DeviceSpace> dy(N, 0); const bool cache = true; boost::accumulators::accumulator_set<ValueType, boost::accumulators::stats<boost::accumulators::tag::mean> > statstime; if (cache) hipBindTexture(0, x_tex, thrust::raw_pointer_cast(&dx[0])); for (int i=0; i < ntests; i++) { cusp::detail::timer t; t.start(); hipLaunchKernelGGL(( warpKernel_nocoalesced<cache>) , dim3(nblocks), dim3(blocksize) , 0, 0, kernel1.nrows, thrust::raw_pointer_cast(&device_values[0]), thrust::raw_pointer_cast(&device_colinds[0]), thrust::raw_pointer_cast(&device_row_map[0]), thrust::raw_pointer_cast(&device_max_nz_per_row[0]), thrust::raw_pointer_cast(&device_warp_offsets[0]), thrust::raw_pointer_cast(&dx[0]), thrust::raw_pointer_cast(&dy[0])); ValueType measuretime = t.seconds_elapsed(); statstime(measuretime); } cusp::array1d<ValueType, CPUSpace> ycheck = dy; if (verify(y,ycheck,N)) { std::cout << "current warpkernel_cache_nocoalesced time =" << boost::accumulators::mean(statstime) << std::endl; warpkernel::addData(datafile, "warpkernel_cache_nocoalesced", boost::accumulators::mean(statstime), kernel1.allocate_nz, kernel1.nwarps, nblocks, blocksize); } else std::cout << "failed to verify warp_kernel_cache_nocoalesced" << std::endl; } // original warpkernel1 not coalesced with no cache { cusp::array1d<ValueType, DeviceSpace> dy(N, 0); const bool cache = false; boost::accumulators::accumulator_set<ValueType, boost::accumulators::stats<boost::accumulators::tag::mean> > statstime; if (cache) hipBindTexture(0, x_tex, thrust::raw_pointer_cast(&dx[0])); for (int i=0; i < ntests; i++) { cusp::detail::timer t; t.start(); hipLaunchKernelGGL(( warpKernel_nocoalesced<cache>) , dim3(nblocks), dim3(blocksize) , 0, 0, kernel1.nrows, thrust::raw_pointer_cast(&device_values[0]), thrust::raw_pointer_cast(&device_colinds[0]), thrust::raw_pointer_cast(&device_row_map[0]), thrust::raw_pointer_cast(&device_max_nz_per_row[0]), thrust::raw_pointer_cast(&device_warp_offsets[0]), thrust::raw_pointer_cast(&dx[0]), thrust::raw_pointer_cast(&dy[0])); ValueType measuretime = t.seconds_elapsed(); statstime(measuretime); } cusp::array1d<ValueType, CPUSpace> ycheck = dy; if (verify(y,ycheck,N)) { std::cout << "current warpkernel_nocache_nocoalesced time =" << boost::accumulators::mean(statstime) << std::endl; warpkernel::addData(datafile, "warpkernel_nocache_nocoalesced", boost::accumulators::mean(statstime), kernel1.allocate_nz, kernel1.nwarps, nblocks, blocksize); } else std::cout << "failed to verify warp_kernel_nocahce_nocoalesced" << std::endl; } } // original warpkernel1 no reordering with cache no sorting { // allocate arrays cusp::array1d<ValueType, DeviceSpace> device_values; // nz cusp::array1d<IndexType, DeviceSpace> device_colinds; // nz cusp::array1d<IndexType, DeviceSpace> device_row_map; // nrows cusp::array1d<uint, DeviceSpace> device_max_nz_per_row; // nwarps cusp::array1d<IndexType, DeviceSpace> device_warp_offsets; // nwarps cusp::array1d<uint, DeviceSpace> device_threads_per_row; // offsets - nwarps cusp::array1d<uint, DeviceSpace> device_row_offset_warp; // rows - nwarps scan_nocoalesced_nosort(kernel1.nz, kernel1.nrows, &(A.values[0]), &(A.row_offsets[0]), &(A.column_indices[0]), kernel1.nwarps, kernel1.allocate_nz, reorder_rows, warp_offsets, max_nz_rows, row_map, row_map_inv); process(&(A.values[0]), &(A.column_indices[0]), device_values, device_colinds, device_row_map, device_max_nz_per_row, device_warp_offsets, kernel1.nz, kernel1.allocate_nz, reorder_rows, warp_offsets, max_nz_rows, row_map); // original warpkernel1 not coalesced with cache no sort { cusp::array1d<ValueType, DeviceSpace> dy(N, 0); const bool cache = true; boost::accumulators::accumulator_set<ValueType, boost::accumulators::stats<boost::accumulators::tag::mean> > statstime; if (cache) hipBindTexture(0, x_tex, thrust::raw_pointer_cast(&dx[0])); for (int i=0; i < ntests; i++) { cusp::detail::timer t; t.start(); hipLaunchKernelGGL(( warpKernel_nocoalesced<cache>) , dim3(nblocks), dim3(blocksize) , 0, 0, kernel1.nrows, thrust::raw_pointer_cast(&device_values[0]), thrust::raw_pointer_cast(&device_colinds[0]), thrust::raw_pointer_cast(&device_row_map[0]), thrust::raw_pointer_cast(&device_max_nz_per_row[0]), thrust::raw_pointer_cast(&device_warp_offsets[0]), thrust::raw_pointer_cast(&dx[0]), thrust::raw_pointer_cast(&dy[0])); ValueType measuretime = t.seconds_elapsed(); statstime(measuretime); } cusp::array1d<ValueType, CPUSpace> ycheck = dy; if (verify(y,ycheck,N)) { std::cout << "current warpkernel_cache_nocoalesced_nosort time =" << boost::accumulators::mean(statstime) << std::endl; warpkernel::addData(datafile, "warpkernel_cache_nocoalesced_nosort", boost::accumulators::mean(statstime), kernel1.allocate_nz, kernel1.nwarps, nblocks, blocksize); } else std::cout << "failed to verify warp_kernel_cache_nocoalesced_nosort" << std::endl; } // original warpkernel1 not coalesced with no cache no sort { cusp::array1d<ValueType, DeviceSpace> dy(N, 0); const bool cache = false; boost::accumulators::accumulator_set<ValueType, boost::accumulators::stats<boost::accumulators::tag::mean> > statstime; if (cache) hipBindTexture(0, x_tex, thrust::raw_pointer_cast(&dx[0])); for (int i=0; i < ntests; i++) { cusp::detail::timer t; t.start(); hipLaunchKernelGGL(( warpKernel_nocoalesced<cache>) , dim3(nblocks), dim3(blocksize) , 0, 0, kernel1.nrows, thrust::raw_pointer_cast(&device_values[0]), thrust::raw_pointer_cast(&device_colinds[0]), thrust::raw_pointer_cast(&device_row_map[0]), thrust::raw_pointer_cast(&device_max_nz_per_row[0]), thrust::raw_pointer_cast(&device_warp_offsets[0]), thrust::raw_pointer_cast(&dx[0]), thrust::raw_pointer_cast(&dy[0])); ValueType measuretime = t.seconds_elapsed(); statstime(measuretime); } cusp::array1d<ValueType, CPUSpace> ycheck = dy; if (verify(y,ycheck,N)) { std::cout << "current warpkernel_nocache_nocoalesced_nosort time =" << boost::accumulators::mean(statstime) << std::endl; warpkernel::addData(datafile, "warpkernel_nocache_nocoalesced_nosort", boost::accumulators::mean(statstime), kernel1.allocate_nz, kernel1.nwarps, nblocks, blocksize); } else std::cout << "failed to verify warp_kernel_cache_nocache_nocoalesced_nosort" << std::endl; } // original warpkernel1 no reordering with cache { // allocate arrays cusp::array1d<ValueType, DeviceSpace> device_values; // nz cusp::array1d<IndexType, DeviceSpace> device_colinds; // nz cusp::array1d<IndexType, DeviceSpace> device_row_map; // nrows cusp::array1d<uint, DeviceSpace> device_max_nz_per_row; // nwarps cusp::array1d<IndexType, DeviceSpace> device_warp_offsets; // nwarps cusp::array1d<uint, DeviceSpace> device_threads_per_row; // offsets - nwarps cusp::array1d<uint, DeviceSpace> device_row_offset_warp; // rows - nwarps scan_nosort(kernel1.nz, kernel1.nrows, &(A.values[0]), &(A.row_offsets[0]), &(A.column_indices[0]), kernel1.nwarps, kernel1.allocate_nz, reorder_rows, warp_offsets, max_nz_rows, row_map, row_map_inv); process(&(A.values[0]), &(A.column_indices[0]), device_values, device_colinds, device_row_map, device_max_nz_per_row, device_warp_offsets, kernel1.nz, kernel1.allocate_nz, reorder_rows, warp_offsets, max_nz_rows, row_map); // original warpkernel1 not coalesced with cache { cusp::array1d<ValueType, DeviceSpace> dy(N, 0); const bool cache = true; boost::accumulators::accumulator_set<ValueType, boost::accumulators::stats<boost::accumulators::tag::mean> > statstime; if (cache) hipBindTexture(0, x_tex, thrust::raw_pointer_cast(&dx[0])); for (int i=0; i < ntests; i++) { cusp::detail::timer t; t.start(); hipLaunchKernelGGL(( warpkernel::warpKernel<cache>) , dim3(nblocks), dim3(blocksize) , 0, 0, kernel1.nrows, thrust::raw_pointer_cast(&device_values[0]), thrust::raw_pointer_cast(&device_colinds[0]), thrust::raw_pointer_cast(&device_row_map[0]), thrust::raw_pointer_cast(&device_max_nz_per_row[0]), thrust::raw_pointer_cast(&device_warp_offsets[0]), thrust::raw_pointer_cast(&dx[0]), thrust::raw_pointer_cast(&dy[0])); ValueType measuretime = t.seconds_elapsed(); statstime(measuretime); } cusp::array1d<ValueType, CPUSpace> ycheck = dy; if (verify(y,ycheck,N)) { std::cout << "current warpkernel_cache_nosort time =" << boost::accumulators::mean(statstime) << std::endl; warpkernel::addData(datafile, "warpkernel_cache_nosort", boost::accumulators::mean(statstime), kernel1.allocate_nz, kernel1.nwarps, nblocks, blocksize); } else std::cout << "failed to verify warp_kernel_cache_nosort" << std::endl; } // original warpkernel1 not coalesced with no cache { cusp::array1d<ValueType, DeviceSpace> dy(N, 0); const bool cache = false; boost::accumulators::accumulator_set<ValueType, boost::accumulators::stats<boost::accumulators::tag::mean> > statstime; if (cache) hipBindTexture(0, x_tex, thrust::raw_pointer_cast(&dx[0])); for (int i=0; i < ntests; i++) { cusp::detail::timer t; t.start(); hipLaunchKernelGGL(( warpkernel::warpKernel<cache>) , dim3(nblocks), dim3(blocksize) , 0, 0, kernel1.nrows, thrust::raw_pointer_cast(&device_values[0]), thrust::raw_pointer_cast(&device_colinds[0]), thrust::raw_pointer_cast(&device_row_map[0]), thrust::raw_pointer_cast(&device_max_nz_per_row[0]), thrust::raw_pointer_cast(&device_warp_offsets[0]), thrust::raw_pointer_cast(&dx[0]), thrust::raw_pointer_cast(&dy[0])); ValueType measuretime = t.seconds_elapsed(); statstime(measuretime); } cusp::array1d<ValueType, CPUSpace> ycheck = dy; if (verify(y,ycheck,N)) { std::cout << "current warpkernel_nocache_nosort time =" << boost::accumulators::mean(statstime) << std::endl; warpkernel::addData(datafile, "warpkernel_nocache_nosort", boost::accumulators::mean(statstime), kernel1.allocate_nz, kernel1.nwarps, nblocks, blocksize); } else std::cout << "failed to verify warp_kernel_nocache_nosort" << std::endl; } } // warpkernel1 with reorderedx with cache { cusp::array1d<ValueType, DeviceSpace> dy(N, 0); kernel1.scan(nz, N, A); cusp::array1d<IndexType, CPUSpace> reorder_cols = A.column_indices; { cusp::detail::timer reordercolstimer; reordercolstimer.start(); kernel1.reorder_columns_coalesced(reorder_cols); ValueType reordertime = reordercolstimer.seconds_elapsed(); std::cout << "reorder column time " << reordertime << std::endl; warpkernel::addData(datafile, "reorder_col_time_cache_rex", reordertime, -1, -1, -1, -1); } cusp::array1d<ValueType, DeviceSpace> dreordered_x; { cusp::detail::timer reorderxtimer; reorderxtimer.start(); kernel1.reorder_x(x, dreordered_x); ValueType reordertime = reorderxtimer.seconds_elapsed(); std::cout << "reorder x time " << reordertime << std::endl; warpkernel::addData(datafile, "reorder_x_time_cache_rex", reordertime, -1, -1, -1, -1); } warpkernel::engine<ValueType, IndexType, warpkernel::structure> eng1(kernel1, &(A.values[0]), &(A.column_indices[0])); const bool cache = true; boost::accumulators::accumulator_set<ValueType, boost::accumulators::stats<boost::accumulators::tag::mean> > statstime; eng1.device_colinds = reorder_cols; for(int i=0;i< ntests;i++) { ValueType measuretime = eng1.run_x<cache>(nblocks, blocksize, thrust::raw_pointer_cast(&dreordered_x[0]), thrust::raw_pointer_cast(&dy[0])); statstime(measuretime); } cusp::array1d<ValueType, CPUSpace> ycheck = dy; if (verify_x(y,ycheck,N, &(kernel1.row_map[0]))) { std::cout << "current warpkernel_cache_rex time =" << boost::accumulators::mean(statstime) << std::endl; warpkernel::addData(datafile, "warpkernel_cache_rex", boost::accumulators::mean(statstime), kernel1.allocate_nz, kernel1.nwarps, nblocks, blocksize); } else std::cout << "failed to verify warp_kernel_cache_rex" << std::endl; } // warpkernel1 with reorderedx no cache { cusp::array1d<ValueType, DeviceSpace> dy(N, 0); kernel1.scan(nz, N, A); cusp::array1d<IndexType, CPUSpace> reorder_cols = A.column_indices; { cusp::detail::timer reordercolstimer; reordercolstimer.start(); kernel1.reorder_columns_coalesced(reorder_cols); ValueType reordertime = reordercolstimer.seconds_elapsed(); std::cout << "reorder column time " << reordertime << std::endl; warpkernel::addData(datafile, "reorder_col_time_nocache_rex", reordertime, -1, -1, -1, -1); } cusp::array1d<ValueType, DeviceSpace> dreordered_x; { cusp::detail::timer reorderxtimer; reorderxtimer.start(); kernel1.reorder_x(x, dreordered_x); ValueType reordertime = reorderxtimer.seconds_elapsed(); std::cout << "reorder x time " << reordertime << std::endl; warpkernel::addData(datafile, "reorder_x_time_nocache_rex", reordertime, -1, -1, -1, -1); } warpkernel::engine<ValueType, IndexType, warpkernel::structure> eng1(kernel1, &(A.values[0]), &(A.column_indices[0])); const bool cache = false; boost::accumulators::accumulator_set<ValueType, boost::accumulators::stats<boost::accumulators::tag::mean> > statstime; eng1.device_colinds = reorder_cols; for(int i=0;i< ntests;i++) { ValueType measuretime = eng1.run_x<cache>(nblocks, blocksize, thrust::raw_pointer_cast(&dreordered_x[0]), thrust::raw_pointer_cast(&dy[0])); statstime(measuretime); } cusp::array1d<ValueType, CPUSpace> ycheck = dy; if (verify_x(y,ycheck,N, &(kernel1.row_map[0]))) { std::cout << "current warpkernel_nocache_rex time =" << boost::accumulators::mean(statstime) << std::endl; warpkernel::addData(datafile, "warpkernel_nocache_rex", boost::accumulators::mean(statstime), kernel1.allocate_nz, kernel1.nwarps, nblocks, blocksize); } else std::cout << "failed to verify warp_kernel_nocache_rex" << std::endl; } // warpkernel1 with reorderedx and reordered A with cache { cusp::array1d<ValueType, DeviceSpace> dy(N, 0); kernel1.scan(nz, N, A); cusp::array1d<IndexType, CPUSpace> reorder_cols = A.column_indices; { cusp::detail::timer reordercolstimer; reordercolstimer.start(); kernel1.reorder_columns_rowsort(reorder_cols, A.row_offsets); ValueType reordertime = reordercolstimer.seconds_elapsed(); std::cout << "reorder column time " << reordertime << std::endl; warpkernel::addData(datafile, "reorder_col_time_cache_rex", reordertime, -1, -1, -1, -1); } cusp::array1d<ValueType, DeviceSpace> dreordered_x; { cusp::detail::timer reorderxtimer; reorderxtimer.start(); kernel1.reorder_x(x, dreordered_x); ValueType reordertime = reorderxtimer.seconds_elapsed(); std::cout << "reorder x time " << reordertime << std::endl; warpkernel::addData(datafile, "reorder_x_time_cache_rex", reordertime, -1, -1, -1, -1); } cusp::array1d<ValueType, CPUSpace> A_new_values(nz); { cusp::detail::timer reorderAtimer; reorderAtimer.start(); for(int i=0;i< nz; i++) { A_new_values[i] = A.values[kernel1.reorder_A_rows[i]]; } ValueType reordertime = reorderAtimer.seconds_elapsed(); std::cout << "reorder A time " << reordertime << std::endl; warpkernel::addData(datafile, "reorder_A_time_cache_rex", reordertime, -1, -1, -1, -1); } warpkernel::engine<ValueType, IndexType, warpkernel::structure> eng1(kernel1, &A_new_values[0], &reorder_cols[0]); const bool cache = true; boost::accumulators::accumulator_set<ValueType, boost::accumulators::stats<boost::accumulators::tag::mean> > statstime; for(int i=0;i< ntests;i++) { ValueType measuretime = eng1.run_x<cache>(nblocks, blocksize, thrust::raw_pointer_cast(&dreordered_x[0]), thrust::raw_pointer_cast(&dy[0])); statstime(measuretime); } cusp::array1d<ValueType, CPUSpace> ycheck = dy; if (verify_x(y,ycheck,N, &(kernel1.row_map[0]))) { std::cout << "current warpkernel_cache_rex_reA time =" << boost::accumulators::mean(statstime) << std::endl; warpkernel::addData(datafile, "warpkernel_cache_rex_reA", boost::accumulators::mean(statstime), kernel1.allocate_nz, kernel1.nwarps, nblocks, blocksize); } else std::cout << "failed to verify warp_kernel_cache_rex_reA" << std::endl; } // warpkernel1 with reorderedx and reordered A with nocache { cusp::array1d<ValueType, DeviceSpace> dy(N, 0); kernel1.scan(nz, N, A); cusp::array1d<IndexType, CPUSpace> reorder_cols = A.column_indices; { cusp::detail::timer reordercolstimer; reordercolstimer.start(); kernel1.reorder_columns_rowsort(reorder_cols, A.row_offsets); ValueType reordertime = reordercolstimer.seconds_elapsed(); std::cout << "reorder column time " << reordertime << std::endl; warpkernel::addData(datafile, "reorder_col_time_nocache_rex", reordertime, -1, -1, -1, -1); } cusp::array1d<ValueType, DeviceSpace> dreordered_x; { cusp::detail::timer reorderxtimer; reorderxtimer.start(); kernel1.reorder_x(x, dreordered_x); ValueType reordertime = reorderxtimer.seconds_elapsed(); std::cout << "reorder x time " << reordertime << std::endl; warpkernel::addData(datafile, "reorder_x_time_nocache_rex", reordertime, -1, -1, -1, -1); } cusp::array1d<ValueType, CPUSpace> A_new_values(nz); { cusp::detail::timer reorderAtimer; reorderAtimer.start(); for(int i=0;i< nz; i++) { A_new_values[i] = A.values[kernel1.reorder_A_rows[i]]; } ValueType reordertime = reorderAtimer.seconds_elapsed(); std::cout << "reorder A time " << reordertime << std::endl; warpkernel::addData(datafile, "reorder_A_time_nocache_rex", reordertime, -1, -1, -1, -1); } warpkernel::engine<ValueType, IndexType, warpkernel::structure> eng1(kernel1, &A_new_values[0], &reorder_cols[0]); const bool cache = false; boost::accumulators::accumulator_set<ValueType, boost::accumulators::stats<boost::accumulators::tag::mean> > statstime; for(int i=0;i< ntests;i++) { ValueType measuretime = eng1.run_x<cache>(nblocks, blocksize, thrust::raw_pointer_cast(&dreordered_x[0]), thrust::raw_pointer_cast(&dy[0])); statstime(measuretime); } cusp::array1d<ValueType, CPUSpace> ycheck = dy; if (verify_x(y,ycheck,N, &(kernel1.row_map[0]))) { std::cout << "current warpkernel_nocache_rex_reA time =" << boost::accumulators::mean(statstime) << std::endl; warpkernel::addData(datafile, "warpkernel_nocache_rex_reA", boost::accumulators::mean(statstime), kernel1.allocate_nz, kernel1.nwarps, nblocks, blocksize); } else std::cout << "failed to verify warp_kernel_nocache_rex_reA" << std::endl; } // warpkernel1 with reorderedx with cache remap { cusp::array1d<ValueType, DeviceSpace> dy(N, 0); kernel1.scan(nz, N, A); cusp::array1d<IndexType, CPUSpace> reorder_cols = A.column_indices; kernel1.reorder_columns_coalesced(reorder_cols); cusp::array1d<ValueType, DeviceSpace> dreordered_x; kernel1.reorder_x(x, dreordered_x); warpkernel::engine<ValueType, IndexType, warpkernel::structure> eng1(kernel1, &(A.values[0]), &(A.column_indices[0])); const bool cache = true; boost::accumulators::accumulator_set<ValueType, boost::accumulators::stats<boost::accumulators::tag::mean> > statstime; eng1.device_colinds = reorder_cols; for(int i=0;i< ntests;i++) { ValueType measuretime = eng1.run_x<cache>(nblocks, blocksize, thrust::raw_pointer_cast(&dreordered_x[0]), thrust::raw_pointer_cast(&dy[0])); statstime(measuretime); } cusp::array1d<ValueType, CPUSpace> ycheck = dy; if (verify_x(y,ycheck,N, &(kernel1.row_map[0]))) { std::cout << "current warpkernel_cache_rexmap time =" << boost::accumulators::mean(statstime) << std::endl; warpkernel::addData(datafile, "warpkernel_cache_rexmap", boost::accumulators::mean(statstime), kernel1.allocate_nz, kernel1.nwarps, nblocks, blocksize); } else std::cout << "failed to verify warp_kernel_cache_rexmap" << std::endl; } // warpkernel1 with reorderedx no cache remap { cusp::array1d<ValueType, DeviceSpace> dy(N, 0); kernel1.scan(nz, N, A); cusp::array1d<IndexType, CPUSpace> reorder_cols = A.column_indices; kernel1.reorder_columns_coalesced(reorder_cols); cusp::array1d<ValueType, DeviceSpace> dreordered_x; kernel1.reorder_x(x, dreordered_x); warpkernel::engine<ValueType, IndexType, warpkernel::structure> eng1(kernel1, &(A.values[0]), &(A.column_indices[0])); const bool cache = false; boost::accumulators::accumulator_set<ValueType, boost::accumulators::stats<boost::accumulators::tag::mean> > statstime; eng1.device_colinds = reorder_cols; for(int i=0;i< ntests;i++) { ValueType measuretime = eng1.run<cache>(nblocks, blocksize, thrust::raw_pointer_cast(&dreordered_x[0]), thrust::raw_pointer_cast(&dy[0])); statstime(measuretime); } cusp::array1d<ValueType, CPUSpace> ycheck = dy; if (verify(y,ycheck,N)) { std::cout << "current warpkernel_nocache_rexmap time =" << boost::accumulators::mean(statstime) << std::endl; warpkernel::addData(datafile, "warpkernel_nocache_rexmap", boost::accumulators::mean(statstime), kernel1.allocate_nz, kernel1.nwarps, nblocks, blocksize); } else std::cout << "failed to verify warp_kernel_nocache_rexmap" << std::endl; } } }
bf75e7e6d1da941c655bec246adb23335c399b4b.cu
// General #include <iostream> #include <algorithm> #include <sstream> // Warpkernel #include "warpkernel.hpp" // cusp #include <cusp/coo_matrix.h> #include <cusp/io/matrix_market.h> #include <cusp/csr_matrix.h> #include <cusp/multiply.h> #include <cusp/detail/timer.h> #include <cusp/hyb_matrix.h> // boost // stats #include <boost/accumulators/accumulators.hpp> #include <boost/accumulators/statistics/stats.hpp> #include <boost/accumulators/statistics/mean.hpp> #define DeviceSpace cusp::device_memory #define CPUSpace cusp::host_memory struct rand_float { double operator() () { return 1.1;//((double)(rand() % 100))/100. -0.3; } }; template <bool usecache> __inline__ __device__ double fetch_cache(const int& i, const double* x) { if (usecache) { int2 v = tex1Dfetch(x_tex,i); return __hiloint2double(v.y, v.x); } else { return x[i]; } } /**********/ template <bool usecache, typename ValueType, typename IndexType> __global__ void warpKernel2_noregister(IndexType nrows, int nwarps, ValueType* A, IndexType *colinds, IndexType *rowmap, uint* maxrows, IndexType *warp_offset, uint* reduction, uint* rows_offset_warp, ValueType* x , ValueType* y) { const uint tid = threadIdx.x; const uint id = tid + blockIdx.x * blockDim.x; const uint wid = tid & (WARP_SIZE-1); const uint warpid = id / WARP_SIZE; extern volatile __shared__ ValueType sumvalues[]; if (warpid >= nwarps) return; const uint offsets = reduction[warpid]; const uint row_start = rows_offset_warp[warpid]; const uint rowid = row_start + wid/offsets; if (rowid < nrows) { IndexType toffset = warp_offset[warpid] + wid; const uint maxnz = maxrows[warpid] * WARP_SIZE + toffset; ValueType sum = A[toffset] * fetch_cache<usecache> (colinds[toffset],x); for(toffset += WARP_SIZE; toffset<maxnz; toffset += WARP_SIZE) { sum += A[toffset] * fetch_cache<usecache> (colinds[toffset],x); } sumvalues[tid] = sum; // // possible reduction for (int i = 1; i< offsets; i <<= 1) { if (offsets > i ) { sumvalues[tid] += sumvalues[tid+i]; } } if ((wid & (offsets-1)) == 0) { y[rowmap[rowid]] = sumvalues[tid]; } } } /**********/ template <bool usecache, typename ValueType, typename IndexType > __global__ void warpKernel_nocoalesced(uint nrows, ValueType* A, IndexType *colinds, IndexType *rowmap, uint* maxrows, IndexType *warp_offset, ValueType* x, ValueType* y) { const uint tid = threadIdx.x; const uint id = tid + blockIdx.x * blockDim.x; const uint wid = tid & (WARP_SIZE-1); const uint warpid = id / WARP_SIZE; if (id < nrows) { uint maxnz = maxrows[warpid]; IndexType toffset = warp_offset[warpid] + wid * maxnz; maxnz += toffset; ValueType sum = A[toffset] * fetch_cache<usecache> (colinds[toffset],x); for(toffset ++; toffset < maxnz; toffset ++) { sum += A[toffset] * fetch_cache<usecache> (colinds[toffset],x); } y[rowmap[id]] = sum; } } /*********/ template<typename R> bool verify(R orig, R comp, uint nrows) { bool check = true; for (int i=0; i< nrows; i++) { if (abs((orig[i]-comp[i])/orig[i]) > 1E-5) { std::cout << orig[i] << "\t" << comp[i] << "\t" << i << std::endl; check = false; return check; } } return check; } template<typename R> bool verify_x(R orig, R comp, uint nrows, int *row_map) { bool temp = true; for (int i=0; i< nrows; i++) { if (abs((orig[row_map[i]]-comp[i])/orig[row_map[i]]) > 1E-5) { std::cout << orig[row_map[i]] << "," << comp[i] << " : " << i << std::endl; temp = false; return temp; } } return temp; } bool checkPairOrder(std::pair<uint,uint> pair1, std::pair<uint,uint> pair2) { return pair1.second > pair2.second || (pair1.second == pair2.second && pair1.first < pair2.first); } template<typename ValueType> void sort(ValueType *rows, std::vector<std::pair<uint,uint> > & nnz_r, uint & nwarps, uint nrows) { nwarps = (nrows + WARP_SIZE-1)/(WARP_SIZE); nnz_r.resize(nrows); uint nznrows = 0; // Re-arrange rows to reach our assumptions for (int w = 0; w < nwarps; w++) { for (int r = WARP_SIZE * w; r < nrows && r < WARP_SIZE*(w+1); r++) { uint rowsize = rows[r+1] - rows[r]; nnz_r[r] = std::make_pair(r,rowsize); if (rowsize > 0) nznrows++; } } // sort by rowsize std::sort( nnz_r.begin(), nnz_r.end(), checkPairOrder); } template <typename ValueType, typename IndexType> void scan(uint & nz, uint & nrows, ValueType * A, IndexType * rows, IndexType *colinds, uint & nwarps, uint & allocate_nz, std::vector<uint> & reorder_rows, // new_values[reorder_rows[i]] = A[i] std::vector<int> & warp_offsets, std::vector<uint> & max_nz_rows, std::vector<int> &row_map_, std::vector<int> &row_map_inv_, uint &nznrows) { std::vector<std::pair<uint,uint> > nnz_r; // non-zeros per row sort(rows, nnz_r, nwarps, nrows); std::vector<int> row_map(nrows); for(int r = 0; r < nrows; r++) row_map[r] = nnz_r[r].first; row_map_ = row_map; std::vector<int> row_map_inv(nrows); for(int i=0;i<nrows;i++) { row_map_inv[row_map[i]] = i; } row_map_inv_ = row_map_inv; std::vector<uint> A_w(nwarps); // max non-zeros per row std::vector<uint> nnz_imin(nwarps,nrows); // minimum non-zeros per row std::vector<uint> nnz_imax(nwarps); // maximum non-zeros per row // Use sorted row-sizes to calculate A_w, nnz_w, etc. for (int w = 0; w < nwarps; w++) { for (int r = WARP_SIZE * w; r < nrows && r < WARP_SIZE*(w+1); r++) { uint rowsize = nnz_r[r].second; if (rowsize < nnz_imin[w]) nnz_imin[w] = rowsize; // min if (rowsize > nnz_imax[w]) nnz_imax[w] = rowsize; // max } A_w[w] = nnz_imax[w]; } max_nz_rows = A_w; // set warp_offsets and allocate_nz; warp_offsets.resize(nwarps+1); warp_offsets[0] = 0; for(int w = 0; w < nwarps; w++) { warp_offsets[w+1] = warp_offsets[w] + A_w[w] * WARP_SIZE; } allocate_nz = warp_offsets[nwarps]; // Generate reordering map for future use reorder_rows.resize(nz); for (int w_s = 0; w_s < nwarps; w_s++) { for (int r_s = WARP_SIZE * w_s; r_s < nrows && r_s < WARP_SIZE * (w_s+1); r_s++) { int r = nnz_r[r_s].first; int rowsize = nnz_r[r_s].second; for(int i = 0; i < rowsize; i++) { reorder_rows[rows[r] + i] = warp_offsets[w_s] + (r_s % WARP_SIZE) + i*WARP_SIZE; } } } } /******* scan no reorder coalesce ****/ template <typename ValueType, typename IndexType> void scan_nocoalesced(uint & nz, uint & nrows, ValueType * A, IndexType * rows, IndexType *colinds, uint & nwarps, uint & allocate_nz, std::vector<uint> & reorder_rows, // new_values[reorder_rows[i]] = A[i] std::vector<int> & warp_offsets, std::vector<uint> & max_nz_rows, std::vector<int> &row_map_, std::vector<int> &row_map_inv_) { std::vector<std::pair<uint,uint> > nnz_r; // non-zeros per row sort(rows, nnz_r, nwarps, nrows); std::vector<int> row_map(nrows); for(int r = 0; r < nrows; r++) row_map[r] = nnz_r[r].first; row_map_ = row_map; std::vector<int> row_map_inv(nrows); for(int i=0;i<nrows;i++) { row_map_inv[row_map[i]] = i; } row_map_inv_ = row_map_inv; std::vector<uint> A_w(nwarps); // max non-zeros per row std::vector<uint> nnz_imin(nwarps,nrows); // minimum non-zeros per row std::vector<uint> nnz_imax(nwarps); // maximum non-zeros per row // Use sorted row-sizes to calculate A_w, nnz_w, etc. for (int w = 0; w < nwarps; w++) { for (int r = WARP_SIZE * w; r < nrows && r < WARP_SIZE*(w+1); r++) { uint rowsize = nnz_r[r].second; if (rowsize < nnz_imin[w]) nnz_imin[w] = rowsize; // min if (rowsize > nnz_imax[w]) nnz_imax[w] = rowsize; // max } A_w[w] = nnz_imax[w]; } max_nz_rows = A_w; // set warp_offsets and allocate_nz; warp_offsets.resize(nwarps+1); warp_offsets[0] = 0; for(int w = 0; w < nwarps; w++) { warp_offsets[w+1] = warp_offsets[w] + A_w[w] * WARP_SIZE; } allocate_nz = warp_offsets[nwarps]; // Generate reordering map for future use reorder_rows.resize(nz); for (int w_s = 0; w_s < nwarps; w_s++) { for (int r_s = WARP_SIZE * w_s; r_s < nrows && r_s < WARP_SIZE * (w_s+1); r_s++) { int r = nnz_r[r_s].first; int rowsize = nnz_r[r_s].second; for(int i = 0; i < rowsize; i++) { reorder_rows[rows[r] + i] = warp_offsets[w_s] + (r_s % WARP_SIZE) * max_nz_rows[w_s] + i; // undid reodering } } } } /******* scan no reorder coalesce no sort ****/ template <typename ValueType, typename IndexType> void scan_nocoalesced_nosort(uint & nz, uint & nrows, ValueType * A, IndexType * rows, IndexType *colinds, uint & nwarps, uint & allocate_nz, std::vector<uint> & reorder_rows, // new_values[reorder_rows[i]] = A[i] std::vector<int> & warp_offsets, std::vector<uint> & max_nz_rows, std::vector<int> &row_map_, std::vector<int> &row_map_inv_) { std::vector<std::pair<uint,uint> > nnz_r; // non-zeros per row nwarps = (nrows + WARP_SIZE-1)/(WARP_SIZE); // Re-arrange rows to reach our assumptions for (int w = 0; w < nwarps; w++) { for (int r = WARP_SIZE * w; r < nrows && r < WARP_SIZE*(w+1); r++) { uint rowsize = rows[r+1] - rows[r]; if (rowsize > 0) nnz_r.push_back(std::make_pair(r,rowsize)); } } std::vector<int> row_map(nrows); for(int r = 0; r < nrows; r++) row_map[r] = nnz_r[r].first; row_map_ = row_map; std::vector<int> row_map_inv(nrows); for(int i=0;i<nrows;i++) { row_map_inv[row_map[i]] = i; } row_map_inv_ = row_map_inv; std::vector<uint> A_w(nwarps); // max non-zeros per row std::vector<uint> nnz_imin(nwarps,nrows); // minimum non-zeros per row std::vector<uint> nnz_imax(nwarps); // maximum non-zeros per row // Use sorted row-sizes to calculate A_w, nnz_w, etc. for (int w = 0; w < nwarps; w++) { for (int r = WARP_SIZE * w; r < nrows && r < WARP_SIZE*(w+1); r++) { uint rowsize = nnz_r[r].second; if (rowsize < nnz_imin[w]) nnz_imin[w] = rowsize; // min if (rowsize > nnz_imax[w]) nnz_imax[w] = rowsize; // max } A_w[w] = nnz_imax[w]; } max_nz_rows = A_w; // set warp_offsets and allocate_nz; warp_offsets.resize(nwarps+1); warp_offsets[0] = 0; for(int w = 0; w < nwarps; w++) { warp_offsets[w+1] = warp_offsets[w] + A_w[w] * WARP_SIZE; } allocate_nz = warp_offsets[nwarps]; // Generate reordering map for future use reorder_rows.resize(nz); for (int w_s = 0; w_s < nwarps; w_s++) { for (int r_s = WARP_SIZE * w_s; r_s < nrows && r_s < WARP_SIZE * (w_s+1); r_s++) { int r = nnz_r[r_s].first; int rowsize = nnz_r[r_s].second; for(int i = 0; i < rowsize; i++) { reorder_rows[rows[r] + i] = warp_offsets[w_s] + (r_s % WARP_SIZE) * max_nz_rows[w_s] + i; // undid reodering } } } } /******* scan no sort ****/ template <typename ValueType, typename IndexType> void scan_nosort(uint & nz, uint & nrows, ValueType * A, IndexType * rows, IndexType *colinds, uint & nwarps, uint & allocate_nz, std::vector<uint> & reorder_rows, // new_values[reorder_rows[i]] = A[i] std::vector<int> & warp_offsets, std::vector<uint> & max_nz_rows, std::vector<int> &row_map_, std::vector<int> &row_map_inv_) { std::vector<std::pair<uint,uint> > nnz_r; // non-zeros per row nwarps = (nrows + WARP_SIZE-1)/(WARP_SIZE); // Re-arrange rows to reach our assumptions for (int w = 0; w < nwarps; w++) { for (int r = WARP_SIZE * w; r < nrows && r < WARP_SIZE*(w+1); r++) { uint rowsize = rows[r+1] - rows[r]; if (rowsize > 0) nnz_r.push_back(std::make_pair(r,rowsize)); } } std::vector<int> row_map(nrows); for(int r = 0; r < nrows; r++) row_map[r] = nnz_r[r].first; row_map_ = row_map; std::vector<int> row_map_inv(nrows); for(int i=0;i<nrows;i++) { row_map_inv[row_map[i]] = i; } row_map_inv_ = row_map_inv; std::vector<uint> A_w(nwarps); // max non-zeros per row std::vector<uint> nnz_imin(nwarps,nrows); // minimum non-zeros per row std::vector<uint> nnz_imax(nwarps); // maximum non-zeros per row // Use sorted row-sizes to calculate A_w, nnz_w, etc. for (int w = 0; w < nwarps; w++) { for (int r = WARP_SIZE * w; r < nrows && r < WARP_SIZE*(w+1); r++) { uint rowsize = nnz_r[r].second; if (rowsize < nnz_imin[w]) nnz_imin[w] = rowsize; // min if (rowsize > nnz_imax[w]) nnz_imax[w] = rowsize; // max } A_w[w] = nnz_imax[w]; } max_nz_rows = A_w; // set warp_offsets and allocate_nz; warp_offsets.resize(nwarps+1); warp_offsets[0] = 0; for(int w = 0; w < nwarps; w++) { warp_offsets[w+1] = warp_offsets[w] + A_w[w] * WARP_SIZE; } allocate_nz = warp_offsets[nwarps]; // Generate reordering map for future use reorder_rows.resize(nz); for (int w_s = 0; w_s < nwarps; w_s++) { for (int r_s = WARP_SIZE * w_s; r_s < nrows && r_s < WARP_SIZE * (w_s+1); r_s++) { int r = nnz_r[r_s].first; int rowsize = nnz_r[r_s].second; for(int i = 0; i < rowsize; i++) { reorder_rows[rows[r] + i] = warp_offsets[w_s] + (r_s % WARP_SIZE) + i*WARP_SIZE; } } } } /********* Reorder */ template<typename IndexType, typename ValueType> void reorder(ValueType *A, IndexType * colinds, cusp::array1d<ValueType, DeviceSpace> &device_values, // allocate nz cusp::array1d<IndexType, DeviceSpace> &device_colinds, // allocate nz uint nz, uint allocate_nz, std::vector<uint> reorder_rows) { cusp::array1d<ValueType, CPUSpace> new_values(allocate_nz,0); cusp::array1d<IndexType, CPUSpace> new_colinds(allocate_nz,0); device_values.resize(allocate_nz); device_colinds.resize(allocate_nz); for(int i=0; i< nz; i++) { new_values[reorder_rows[i]] = A[i]; new_colinds[reorder_rows[i]] = colinds[i]; } device_values = new_values; device_colinds = new_colinds; } /* warpkernel1 process */ // preform the reordering template<typename IndexType, typename ValueType> void process(ValueType *A, IndexType * colinds, cusp::array1d<ValueType, DeviceSpace> &device_values, // nz cusp::array1d<IndexType, DeviceSpace> &device_colinds, // nz cusp::array1d<IndexType, DeviceSpace> &device_row_map, // nrows cusp::array1d<uint, DeviceSpace> &device_max_nz_per_row, // nwarps cusp::array1d<IndexType, DeviceSpace> &device_warp_offsets, // nwarps uint nz, uint allocate_nz, std::vector<uint> reorder_rows, std::vector<int> warp_offsets, std::vector<uint> max_nz_rows, std::vector<int> row_map) { reorder(A,colinds, device_values, device_colinds, nz, allocate_nz, reorder_rows); device_row_map = row_map; device_max_nz_per_row = max_nz_rows; device_warp_offsets = warp_offsets; } /******** Purpose of this executable is to examine different effects of optimization *******/ #define ValueType double #define IndexType int int main(int argc, char *argv[]) { std::string matrixfilename = argv[1]; int ntests = 1; if (argc == 3) ntests = atoi(argv[2]); cusp::coo_matrix<IndexType, ValueType, CPUSpace> B; cusp::io::read_matrix_market_file(B, matrixfilename.c_str()); cusp::csr_matrix<IndexType, ValueType, CPUSpace> A = B; uint N = A.num_cols; uint nz = A.num_entries; // open up data file std::string filename; size_t pos = matrixfilename.find_last_of("/"); std::string matrixname; if (pos != std::string::npos ) matrixname.assign(matrixfilename.begin()+pos+1, matrixfilename.end()); else matrixname = matrixfilename; std::string datapath = "./data/" + matrixname + "_optimize.txt"; std::cout << "Starting data file = " << datapath << std::endl; std::ofstream datafile(datapath.c_str()); warpkernel::startDatafile(datafile, N, nz,ntests); cusp::array1d<ValueType, CPUSpace> x(N, 1.0); // thrust::generate(x.begin(),x.end(), rand_float()); cusp::array1d<ValueType, CPUSpace> y(N); { boost::accumulators::accumulator_set<ValueType, boost::accumulators::stats<boost::accumulators::tag::mean> > statstime; // cusp hyb multiplication for (int i=0;i<ntests;i++) { cusp::hyb_matrix<IndexType, ValueType, DeviceSpace> A1 = A; cusp::array1d<ValueType, DeviceSpace> dx = x; cusp::array1d<ValueType, DeviceSpace> dy = y; cusp::detail::timer cusptimer; cusptimer.start(); cusp::multiply(A1,dx,dy); ValueType measuredtime = cusptimer.seconds_elapsed(); statstime(measuredtime); y = dy; } std::cout << "cusp gpu time " << std::scientific << boost::accumulators::mean(statstime) << std::endl; warpkernel::addData(datafile, "cusp-csr", boost::accumulators::mean(statstime), -1, -1, -1, -1); } // setup for warpkernel1 cusp::array1d<ValueType, DeviceSpace> dx = x; warpkernel::structure kernel1( N, nz, 0); std::vector<uint> reorder_rows; std::vector<int> warp_offsets; std::vector<uint> max_nz_rows; std::vector<int> row_map; std::vector<int> row_map_inv; uint nznrows; scan(kernel1.nz, kernel1.nrows, &(A.values[0]), &(A.row_offsets[0]), &(A.column_indices[0]), kernel1.nwarps, kernel1.allocate_nz, reorder_rows, warp_offsets, max_nz_rows, row_map, row_map_inv, nznrows); uint warps_per_block = 6; uint nblocks = (kernel1.nwarps + warps_per_block -1)/warps_per_block; uint blocksize = warps_per_block * WARP_SIZE; // original warpkernel1 with cache { cusp::array1d<ValueType, DeviceSpace> dy(N, 0); // allocate arrays cusp::array1d<ValueType, DeviceSpace> device_values; // nz cusp::array1d<IndexType, DeviceSpace> device_colinds; // nz cusp::array1d<IndexType, DeviceSpace> device_row_map; // nrows cusp::array1d<uint, DeviceSpace> device_max_nz_per_row; // nwarps cusp::array1d<IndexType, DeviceSpace> device_warp_offsets; // nwarps cusp::array1d<uint, DeviceSpace> device_threads_per_row; // offsets - nwarps cusp::array1d<uint, DeviceSpace> device_row_offset_warp; // rows - nwarps process(&(A.values[0]), &(A.column_indices[0]), device_values, device_colinds, device_row_map, device_max_nz_per_row, device_warp_offsets, kernel1.nz, kernel1.allocate_nz, reorder_rows, warp_offsets, max_nz_rows, row_map); const bool cache = true; boost::accumulators::accumulator_set<ValueType, boost::accumulators::stats<boost::accumulators::tag::mean> > statstime; if (cache) cudaBindTexture(0, x_tex, thrust::raw_pointer_cast(&dx[0])); for (int i=0; i < ntests; i++) { cusp::detail::timer t; t.start(); warpkernel::warpKernel<cache> <<< nblocks, blocksize >>> (kernel1.nrows, thrust::raw_pointer_cast(&device_values[0]), thrust::raw_pointer_cast(&device_colinds[0]), thrust::raw_pointer_cast(&device_row_map[0]), thrust::raw_pointer_cast(&device_max_nz_per_row[0]), thrust::raw_pointer_cast(&device_warp_offsets[0]), thrust::raw_pointer_cast(&dx[0]), thrust::raw_pointer_cast(&dy[0])); ValueType measuretime = t.seconds_elapsed(); statstime(measuretime); } cusp::array1d<ValueType, CPUSpace> ycheck = dy; if (verify(y,ycheck,N)) { std::cout << "current warpkernel_cache time =" << boost::accumulators::mean(statstime) << std::endl; warpkernel::addData(datafile, "warpkernel_cache", boost::accumulators::mean(statstime), kernel1.allocate_nz, kernel1.nwarps, nblocks, blocksize); } else std::cout << "failed to verify warp_kernel_cache" << std::endl; } // warpkernel.hpp { cusp::array1d<ValueType, DeviceSpace> dx = x; warpkernel::structure kernel1; kernel1.scan(nz, N, A); uint warps_per_block = 6; uint nblocks = (kernel1.nwarps + warps_per_block-1)/warps_per_block; uint blocksize = warps_per_block * WARP_SIZE; warpkernel::engine<ValueType, IndexType, warpkernel::structure> eng(kernel1, &(A.values[0]), &(A.column_indices[0])); cusp::array1d<ValueType, DeviceSpace> dy(N,0); ValueType measuretime = 0; for (int t = 0; t < ntests; t++) { measuretime += eng.run<true>(nblocks, blocksize, thrust::raw_pointer_cast(&dx[0]), thrust::raw_pointer_cast(&dy[0])); } cusp::array1d<ValueType, CPUSpace> ycheck = dy; if (eng.verify(y,ycheck)) { std::cout << "warpkernel (" << nblocks << "," << blocksize <<") time = " << std::scientific << measuretime/ntests << std::endl; } else { std::cout << "Failed original warpkernel.hpp" << std::endl; } } // original warpkernel1 no cache { cusp::array1d<ValueType, DeviceSpace> dy(N, 0); // allocate arrays cusp::array1d<ValueType, DeviceSpace> device_values; // nz cusp::array1d<IndexType, DeviceSpace> device_colinds; // nz cusp::array1d<IndexType, DeviceSpace> device_row_map; // nrows cusp::array1d<uint, DeviceSpace> device_max_nz_per_row; // nwarps cusp::array1d<IndexType, DeviceSpace> device_warp_offsets; // nwarps cusp::array1d<uint, DeviceSpace> device_threads_per_row; // offsets - nwarps cusp::array1d<uint, DeviceSpace> device_row_offset_warp; // rows - nwarps process(&(A.values[0]), &(A.column_indices[0]), device_values, device_colinds, device_row_map, device_max_nz_per_row, device_warp_offsets, kernel1.nz, kernel1.allocate_nz, reorder_rows, warp_offsets, max_nz_rows, row_map); const bool cache = false; boost::accumulators::accumulator_set<ValueType, boost::accumulators::stats<boost::accumulators::tag::mean> > statstime; if (cache) cudaBindTexture(0, x_tex, thrust::raw_pointer_cast(&dx[0])); for (int i=0; i < ntests; i++) { cusp::detail::timer t; t.start(); warpkernel::warpKernel<cache> <<< nblocks, blocksize >>> (kernel1.nrows, thrust::raw_pointer_cast(&device_values[0]), thrust::raw_pointer_cast(&device_colinds[0]), thrust::raw_pointer_cast(&device_row_map[0]), thrust::raw_pointer_cast(&device_max_nz_per_row[0]), thrust::raw_pointer_cast(&device_warp_offsets[0]), thrust::raw_pointer_cast(&dx[0]), thrust::raw_pointer_cast(&dy[0])); ValueType measuretime = t.seconds_elapsed(); statstime(measuretime); } cusp::array1d<ValueType, CPUSpace> ycheck = dy; if (verify(y,ycheck,N)) { std::cout << "current warpkernel_nocache time =" << boost::accumulators::mean(statstime) << std::endl; warpkernel::addData(datafile, "warpkernel_nocache", boost::accumulators::mean(statstime), kernel1.allocate_nz, kernel1.nwarps, nblocks, blocksize); } else std::cout << "failed to verify warp_kernel_nocache" << std::endl; } // original warpkernel1 no reordering with cache { // allocate arrays cusp::array1d<ValueType, DeviceSpace> device_values; // nz cusp::array1d<IndexType, DeviceSpace> device_colinds; // nz cusp::array1d<IndexType, DeviceSpace> device_row_map; // nrows cusp::array1d<uint, DeviceSpace> device_max_nz_per_row; // nwarps cusp::array1d<IndexType, DeviceSpace> device_warp_offsets; // nwarps cusp::array1d<uint, DeviceSpace> device_threads_per_row; // offsets - nwarps cusp::array1d<uint, DeviceSpace> device_row_offset_warp; // rows - nwarps scan_nocoalesced(kernel1.nz, kernel1.nrows, &(A.values[0]), &(A.row_offsets[0]), &(A.column_indices[0]), kernel1.nwarps, kernel1.allocate_nz, reorder_rows, warp_offsets, max_nz_rows, row_map, row_map_inv); process(&(A.values[0]), &(A.column_indices[0]), device_values, device_colinds, device_row_map, device_max_nz_per_row, device_warp_offsets, kernel1.nz, kernel1.allocate_nz, reorder_rows, warp_offsets, max_nz_rows, row_map); // original warpkernel1 not coalesced with cache { cusp::array1d<ValueType, DeviceSpace> dy(N, 0); const bool cache = true; boost::accumulators::accumulator_set<ValueType, boost::accumulators::stats<boost::accumulators::tag::mean> > statstime; if (cache) cudaBindTexture(0, x_tex, thrust::raw_pointer_cast(&dx[0])); for (int i=0; i < ntests; i++) { cusp::detail::timer t; t.start(); warpKernel_nocoalesced<cache> <<< nblocks, blocksize >>> (kernel1.nrows, thrust::raw_pointer_cast(&device_values[0]), thrust::raw_pointer_cast(&device_colinds[0]), thrust::raw_pointer_cast(&device_row_map[0]), thrust::raw_pointer_cast(&device_max_nz_per_row[0]), thrust::raw_pointer_cast(&device_warp_offsets[0]), thrust::raw_pointer_cast(&dx[0]), thrust::raw_pointer_cast(&dy[0])); ValueType measuretime = t.seconds_elapsed(); statstime(measuretime); } cusp::array1d<ValueType, CPUSpace> ycheck = dy; if (verify(y,ycheck,N)) { std::cout << "current warpkernel_cache_nocoalesced time =" << boost::accumulators::mean(statstime) << std::endl; warpkernel::addData(datafile, "warpkernel_cache_nocoalesced", boost::accumulators::mean(statstime), kernel1.allocate_nz, kernel1.nwarps, nblocks, blocksize); } else std::cout << "failed to verify warp_kernel_cache_nocoalesced" << std::endl; } // original warpkernel1 not coalesced with no cache { cusp::array1d<ValueType, DeviceSpace> dy(N, 0); const bool cache = false; boost::accumulators::accumulator_set<ValueType, boost::accumulators::stats<boost::accumulators::tag::mean> > statstime; if (cache) cudaBindTexture(0, x_tex, thrust::raw_pointer_cast(&dx[0])); for (int i=0; i < ntests; i++) { cusp::detail::timer t; t.start(); warpKernel_nocoalesced<cache> <<< nblocks, blocksize >>> (kernel1.nrows, thrust::raw_pointer_cast(&device_values[0]), thrust::raw_pointer_cast(&device_colinds[0]), thrust::raw_pointer_cast(&device_row_map[0]), thrust::raw_pointer_cast(&device_max_nz_per_row[0]), thrust::raw_pointer_cast(&device_warp_offsets[0]), thrust::raw_pointer_cast(&dx[0]), thrust::raw_pointer_cast(&dy[0])); ValueType measuretime = t.seconds_elapsed(); statstime(measuretime); } cusp::array1d<ValueType, CPUSpace> ycheck = dy; if (verify(y,ycheck,N)) { std::cout << "current warpkernel_nocache_nocoalesced time =" << boost::accumulators::mean(statstime) << std::endl; warpkernel::addData(datafile, "warpkernel_nocache_nocoalesced", boost::accumulators::mean(statstime), kernel1.allocate_nz, kernel1.nwarps, nblocks, blocksize); } else std::cout << "failed to verify warp_kernel_nocahce_nocoalesced" << std::endl; } } // original warpkernel1 no reordering with cache no sorting { // allocate arrays cusp::array1d<ValueType, DeviceSpace> device_values; // nz cusp::array1d<IndexType, DeviceSpace> device_colinds; // nz cusp::array1d<IndexType, DeviceSpace> device_row_map; // nrows cusp::array1d<uint, DeviceSpace> device_max_nz_per_row; // nwarps cusp::array1d<IndexType, DeviceSpace> device_warp_offsets; // nwarps cusp::array1d<uint, DeviceSpace> device_threads_per_row; // offsets - nwarps cusp::array1d<uint, DeviceSpace> device_row_offset_warp; // rows - nwarps scan_nocoalesced_nosort(kernel1.nz, kernel1.nrows, &(A.values[0]), &(A.row_offsets[0]), &(A.column_indices[0]), kernel1.nwarps, kernel1.allocate_nz, reorder_rows, warp_offsets, max_nz_rows, row_map, row_map_inv); process(&(A.values[0]), &(A.column_indices[0]), device_values, device_colinds, device_row_map, device_max_nz_per_row, device_warp_offsets, kernel1.nz, kernel1.allocate_nz, reorder_rows, warp_offsets, max_nz_rows, row_map); // original warpkernel1 not coalesced with cache no sort { cusp::array1d<ValueType, DeviceSpace> dy(N, 0); const bool cache = true; boost::accumulators::accumulator_set<ValueType, boost::accumulators::stats<boost::accumulators::tag::mean> > statstime; if (cache) cudaBindTexture(0, x_tex, thrust::raw_pointer_cast(&dx[0])); for (int i=0; i < ntests; i++) { cusp::detail::timer t; t.start(); warpKernel_nocoalesced<cache> <<< nblocks, blocksize >>> (kernel1.nrows, thrust::raw_pointer_cast(&device_values[0]), thrust::raw_pointer_cast(&device_colinds[0]), thrust::raw_pointer_cast(&device_row_map[0]), thrust::raw_pointer_cast(&device_max_nz_per_row[0]), thrust::raw_pointer_cast(&device_warp_offsets[0]), thrust::raw_pointer_cast(&dx[0]), thrust::raw_pointer_cast(&dy[0])); ValueType measuretime = t.seconds_elapsed(); statstime(measuretime); } cusp::array1d<ValueType, CPUSpace> ycheck = dy; if (verify(y,ycheck,N)) { std::cout << "current warpkernel_cache_nocoalesced_nosort time =" << boost::accumulators::mean(statstime) << std::endl; warpkernel::addData(datafile, "warpkernel_cache_nocoalesced_nosort", boost::accumulators::mean(statstime), kernel1.allocate_nz, kernel1.nwarps, nblocks, blocksize); } else std::cout << "failed to verify warp_kernel_cache_nocoalesced_nosort" << std::endl; } // original warpkernel1 not coalesced with no cache no sort { cusp::array1d<ValueType, DeviceSpace> dy(N, 0); const bool cache = false; boost::accumulators::accumulator_set<ValueType, boost::accumulators::stats<boost::accumulators::tag::mean> > statstime; if (cache) cudaBindTexture(0, x_tex, thrust::raw_pointer_cast(&dx[0])); for (int i=0; i < ntests; i++) { cusp::detail::timer t; t.start(); warpKernel_nocoalesced<cache> <<< nblocks, blocksize >>> (kernel1.nrows, thrust::raw_pointer_cast(&device_values[0]), thrust::raw_pointer_cast(&device_colinds[0]), thrust::raw_pointer_cast(&device_row_map[0]), thrust::raw_pointer_cast(&device_max_nz_per_row[0]), thrust::raw_pointer_cast(&device_warp_offsets[0]), thrust::raw_pointer_cast(&dx[0]), thrust::raw_pointer_cast(&dy[0])); ValueType measuretime = t.seconds_elapsed(); statstime(measuretime); } cusp::array1d<ValueType, CPUSpace> ycheck = dy; if (verify(y,ycheck,N)) { std::cout << "current warpkernel_nocache_nocoalesced_nosort time =" << boost::accumulators::mean(statstime) << std::endl; warpkernel::addData(datafile, "warpkernel_nocache_nocoalesced_nosort", boost::accumulators::mean(statstime), kernel1.allocate_nz, kernel1.nwarps, nblocks, blocksize); } else std::cout << "failed to verify warp_kernel_cache_nocache_nocoalesced_nosort" << std::endl; } // original warpkernel1 no reordering with cache { // allocate arrays cusp::array1d<ValueType, DeviceSpace> device_values; // nz cusp::array1d<IndexType, DeviceSpace> device_colinds; // nz cusp::array1d<IndexType, DeviceSpace> device_row_map; // nrows cusp::array1d<uint, DeviceSpace> device_max_nz_per_row; // nwarps cusp::array1d<IndexType, DeviceSpace> device_warp_offsets; // nwarps cusp::array1d<uint, DeviceSpace> device_threads_per_row; // offsets - nwarps cusp::array1d<uint, DeviceSpace> device_row_offset_warp; // rows - nwarps scan_nosort(kernel1.nz, kernel1.nrows, &(A.values[0]), &(A.row_offsets[0]), &(A.column_indices[0]), kernel1.nwarps, kernel1.allocate_nz, reorder_rows, warp_offsets, max_nz_rows, row_map, row_map_inv); process(&(A.values[0]), &(A.column_indices[0]), device_values, device_colinds, device_row_map, device_max_nz_per_row, device_warp_offsets, kernel1.nz, kernel1.allocate_nz, reorder_rows, warp_offsets, max_nz_rows, row_map); // original warpkernel1 not coalesced with cache { cusp::array1d<ValueType, DeviceSpace> dy(N, 0); const bool cache = true; boost::accumulators::accumulator_set<ValueType, boost::accumulators::stats<boost::accumulators::tag::mean> > statstime; if (cache) cudaBindTexture(0, x_tex, thrust::raw_pointer_cast(&dx[0])); for (int i=0; i < ntests; i++) { cusp::detail::timer t; t.start(); warpkernel::warpKernel<cache> <<< nblocks, blocksize >>> (kernel1.nrows, thrust::raw_pointer_cast(&device_values[0]), thrust::raw_pointer_cast(&device_colinds[0]), thrust::raw_pointer_cast(&device_row_map[0]), thrust::raw_pointer_cast(&device_max_nz_per_row[0]), thrust::raw_pointer_cast(&device_warp_offsets[0]), thrust::raw_pointer_cast(&dx[0]), thrust::raw_pointer_cast(&dy[0])); ValueType measuretime = t.seconds_elapsed(); statstime(measuretime); } cusp::array1d<ValueType, CPUSpace> ycheck = dy; if (verify(y,ycheck,N)) { std::cout << "current warpkernel_cache_nosort time =" << boost::accumulators::mean(statstime) << std::endl; warpkernel::addData(datafile, "warpkernel_cache_nosort", boost::accumulators::mean(statstime), kernel1.allocate_nz, kernel1.nwarps, nblocks, blocksize); } else std::cout << "failed to verify warp_kernel_cache_nosort" << std::endl; } // original warpkernel1 not coalesced with no cache { cusp::array1d<ValueType, DeviceSpace> dy(N, 0); const bool cache = false; boost::accumulators::accumulator_set<ValueType, boost::accumulators::stats<boost::accumulators::tag::mean> > statstime; if (cache) cudaBindTexture(0, x_tex, thrust::raw_pointer_cast(&dx[0])); for (int i=0; i < ntests; i++) { cusp::detail::timer t; t.start(); warpkernel::warpKernel<cache> <<< nblocks, blocksize >>> (kernel1.nrows, thrust::raw_pointer_cast(&device_values[0]), thrust::raw_pointer_cast(&device_colinds[0]), thrust::raw_pointer_cast(&device_row_map[0]), thrust::raw_pointer_cast(&device_max_nz_per_row[0]), thrust::raw_pointer_cast(&device_warp_offsets[0]), thrust::raw_pointer_cast(&dx[0]), thrust::raw_pointer_cast(&dy[0])); ValueType measuretime = t.seconds_elapsed(); statstime(measuretime); } cusp::array1d<ValueType, CPUSpace> ycheck = dy; if (verify(y,ycheck,N)) { std::cout << "current warpkernel_nocache_nosort time =" << boost::accumulators::mean(statstime) << std::endl; warpkernel::addData(datafile, "warpkernel_nocache_nosort", boost::accumulators::mean(statstime), kernel1.allocate_nz, kernel1.nwarps, nblocks, blocksize); } else std::cout << "failed to verify warp_kernel_nocache_nosort" << std::endl; } } // warpkernel1 with reorderedx with cache { cusp::array1d<ValueType, DeviceSpace> dy(N, 0); kernel1.scan(nz, N, A); cusp::array1d<IndexType, CPUSpace> reorder_cols = A.column_indices; { cusp::detail::timer reordercolstimer; reordercolstimer.start(); kernel1.reorder_columns_coalesced(reorder_cols); ValueType reordertime = reordercolstimer.seconds_elapsed(); std::cout << "reorder column time " << reordertime << std::endl; warpkernel::addData(datafile, "reorder_col_time_cache_rex", reordertime, -1, -1, -1, -1); } cusp::array1d<ValueType, DeviceSpace> dreordered_x; { cusp::detail::timer reorderxtimer; reorderxtimer.start(); kernel1.reorder_x(x, dreordered_x); ValueType reordertime = reorderxtimer.seconds_elapsed(); std::cout << "reorder x time " << reordertime << std::endl; warpkernel::addData(datafile, "reorder_x_time_cache_rex", reordertime, -1, -1, -1, -1); } warpkernel::engine<ValueType, IndexType, warpkernel::structure> eng1(kernel1, &(A.values[0]), &(A.column_indices[0])); const bool cache = true; boost::accumulators::accumulator_set<ValueType, boost::accumulators::stats<boost::accumulators::tag::mean> > statstime; eng1.device_colinds = reorder_cols; for(int i=0;i< ntests;i++) { ValueType measuretime = eng1.run_x<cache>(nblocks, blocksize, thrust::raw_pointer_cast(&dreordered_x[0]), thrust::raw_pointer_cast(&dy[0])); statstime(measuretime); } cusp::array1d<ValueType, CPUSpace> ycheck = dy; if (verify_x(y,ycheck,N, &(kernel1.row_map[0]))) { std::cout << "current warpkernel_cache_rex time =" << boost::accumulators::mean(statstime) << std::endl; warpkernel::addData(datafile, "warpkernel_cache_rex", boost::accumulators::mean(statstime), kernel1.allocate_nz, kernel1.nwarps, nblocks, blocksize); } else std::cout << "failed to verify warp_kernel_cache_rex" << std::endl; } // warpkernel1 with reorderedx no cache { cusp::array1d<ValueType, DeviceSpace> dy(N, 0); kernel1.scan(nz, N, A); cusp::array1d<IndexType, CPUSpace> reorder_cols = A.column_indices; { cusp::detail::timer reordercolstimer; reordercolstimer.start(); kernel1.reorder_columns_coalesced(reorder_cols); ValueType reordertime = reordercolstimer.seconds_elapsed(); std::cout << "reorder column time " << reordertime << std::endl; warpkernel::addData(datafile, "reorder_col_time_nocache_rex", reordertime, -1, -1, -1, -1); } cusp::array1d<ValueType, DeviceSpace> dreordered_x; { cusp::detail::timer reorderxtimer; reorderxtimer.start(); kernel1.reorder_x(x, dreordered_x); ValueType reordertime = reorderxtimer.seconds_elapsed(); std::cout << "reorder x time " << reordertime << std::endl; warpkernel::addData(datafile, "reorder_x_time_nocache_rex", reordertime, -1, -1, -1, -1); } warpkernel::engine<ValueType, IndexType, warpkernel::structure> eng1(kernel1, &(A.values[0]), &(A.column_indices[0])); const bool cache = false; boost::accumulators::accumulator_set<ValueType, boost::accumulators::stats<boost::accumulators::tag::mean> > statstime; eng1.device_colinds = reorder_cols; for(int i=0;i< ntests;i++) { ValueType measuretime = eng1.run_x<cache>(nblocks, blocksize, thrust::raw_pointer_cast(&dreordered_x[0]), thrust::raw_pointer_cast(&dy[0])); statstime(measuretime); } cusp::array1d<ValueType, CPUSpace> ycheck = dy; if (verify_x(y,ycheck,N, &(kernel1.row_map[0]))) { std::cout << "current warpkernel_nocache_rex time =" << boost::accumulators::mean(statstime) << std::endl; warpkernel::addData(datafile, "warpkernel_nocache_rex", boost::accumulators::mean(statstime), kernel1.allocate_nz, kernel1.nwarps, nblocks, blocksize); } else std::cout << "failed to verify warp_kernel_nocache_rex" << std::endl; } // warpkernel1 with reorderedx and reordered A with cache { cusp::array1d<ValueType, DeviceSpace> dy(N, 0); kernel1.scan(nz, N, A); cusp::array1d<IndexType, CPUSpace> reorder_cols = A.column_indices; { cusp::detail::timer reordercolstimer; reordercolstimer.start(); kernel1.reorder_columns_rowsort(reorder_cols, A.row_offsets); ValueType reordertime = reordercolstimer.seconds_elapsed(); std::cout << "reorder column time " << reordertime << std::endl; warpkernel::addData(datafile, "reorder_col_time_cache_rex", reordertime, -1, -1, -1, -1); } cusp::array1d<ValueType, DeviceSpace> dreordered_x; { cusp::detail::timer reorderxtimer; reorderxtimer.start(); kernel1.reorder_x(x, dreordered_x); ValueType reordertime = reorderxtimer.seconds_elapsed(); std::cout << "reorder x time " << reordertime << std::endl; warpkernel::addData(datafile, "reorder_x_time_cache_rex", reordertime, -1, -1, -1, -1); } cusp::array1d<ValueType, CPUSpace> A_new_values(nz); { cusp::detail::timer reorderAtimer; reorderAtimer.start(); for(int i=0;i< nz; i++) { A_new_values[i] = A.values[kernel1.reorder_A_rows[i]]; } ValueType reordertime = reorderAtimer.seconds_elapsed(); std::cout << "reorder A time " << reordertime << std::endl; warpkernel::addData(datafile, "reorder_A_time_cache_rex", reordertime, -1, -1, -1, -1); } warpkernel::engine<ValueType, IndexType, warpkernel::structure> eng1(kernel1, &A_new_values[0], &reorder_cols[0]); const bool cache = true; boost::accumulators::accumulator_set<ValueType, boost::accumulators::stats<boost::accumulators::tag::mean> > statstime; for(int i=0;i< ntests;i++) { ValueType measuretime = eng1.run_x<cache>(nblocks, blocksize, thrust::raw_pointer_cast(&dreordered_x[0]), thrust::raw_pointer_cast(&dy[0])); statstime(measuretime); } cusp::array1d<ValueType, CPUSpace> ycheck = dy; if (verify_x(y,ycheck,N, &(kernel1.row_map[0]))) { std::cout << "current warpkernel_cache_rex_reA time =" << boost::accumulators::mean(statstime) << std::endl; warpkernel::addData(datafile, "warpkernel_cache_rex_reA", boost::accumulators::mean(statstime), kernel1.allocate_nz, kernel1.nwarps, nblocks, blocksize); } else std::cout << "failed to verify warp_kernel_cache_rex_reA" << std::endl; } // warpkernel1 with reorderedx and reordered A with nocache { cusp::array1d<ValueType, DeviceSpace> dy(N, 0); kernel1.scan(nz, N, A); cusp::array1d<IndexType, CPUSpace> reorder_cols = A.column_indices; { cusp::detail::timer reordercolstimer; reordercolstimer.start(); kernel1.reorder_columns_rowsort(reorder_cols, A.row_offsets); ValueType reordertime = reordercolstimer.seconds_elapsed(); std::cout << "reorder column time " << reordertime << std::endl; warpkernel::addData(datafile, "reorder_col_time_nocache_rex", reordertime, -1, -1, -1, -1); } cusp::array1d<ValueType, DeviceSpace> dreordered_x; { cusp::detail::timer reorderxtimer; reorderxtimer.start(); kernel1.reorder_x(x, dreordered_x); ValueType reordertime = reorderxtimer.seconds_elapsed(); std::cout << "reorder x time " << reordertime << std::endl; warpkernel::addData(datafile, "reorder_x_time_nocache_rex", reordertime, -1, -1, -1, -1); } cusp::array1d<ValueType, CPUSpace> A_new_values(nz); { cusp::detail::timer reorderAtimer; reorderAtimer.start(); for(int i=0;i< nz; i++) { A_new_values[i] = A.values[kernel1.reorder_A_rows[i]]; } ValueType reordertime = reorderAtimer.seconds_elapsed(); std::cout << "reorder A time " << reordertime << std::endl; warpkernel::addData(datafile, "reorder_A_time_nocache_rex", reordertime, -1, -1, -1, -1); } warpkernel::engine<ValueType, IndexType, warpkernel::structure> eng1(kernel1, &A_new_values[0], &reorder_cols[0]); const bool cache = false; boost::accumulators::accumulator_set<ValueType, boost::accumulators::stats<boost::accumulators::tag::mean> > statstime; for(int i=0;i< ntests;i++) { ValueType measuretime = eng1.run_x<cache>(nblocks, blocksize, thrust::raw_pointer_cast(&dreordered_x[0]), thrust::raw_pointer_cast(&dy[0])); statstime(measuretime); } cusp::array1d<ValueType, CPUSpace> ycheck = dy; if (verify_x(y,ycheck,N, &(kernel1.row_map[0]))) { std::cout << "current warpkernel_nocache_rex_reA time =" << boost::accumulators::mean(statstime) << std::endl; warpkernel::addData(datafile, "warpkernel_nocache_rex_reA", boost::accumulators::mean(statstime), kernel1.allocate_nz, kernel1.nwarps, nblocks, blocksize); } else std::cout << "failed to verify warp_kernel_nocache_rex_reA" << std::endl; } // warpkernel1 with reorderedx with cache remap { cusp::array1d<ValueType, DeviceSpace> dy(N, 0); kernel1.scan(nz, N, A); cusp::array1d<IndexType, CPUSpace> reorder_cols = A.column_indices; kernel1.reorder_columns_coalesced(reorder_cols); cusp::array1d<ValueType, DeviceSpace> dreordered_x; kernel1.reorder_x(x, dreordered_x); warpkernel::engine<ValueType, IndexType, warpkernel::structure> eng1(kernel1, &(A.values[0]), &(A.column_indices[0])); const bool cache = true; boost::accumulators::accumulator_set<ValueType, boost::accumulators::stats<boost::accumulators::tag::mean> > statstime; eng1.device_colinds = reorder_cols; for(int i=0;i< ntests;i++) { ValueType measuretime = eng1.run_x<cache>(nblocks, blocksize, thrust::raw_pointer_cast(&dreordered_x[0]), thrust::raw_pointer_cast(&dy[0])); statstime(measuretime); } cusp::array1d<ValueType, CPUSpace> ycheck = dy; if (verify_x(y,ycheck,N, &(kernel1.row_map[0]))) { std::cout << "current warpkernel_cache_rexmap time =" << boost::accumulators::mean(statstime) << std::endl; warpkernel::addData(datafile, "warpkernel_cache_rexmap", boost::accumulators::mean(statstime), kernel1.allocate_nz, kernel1.nwarps, nblocks, blocksize); } else std::cout << "failed to verify warp_kernel_cache_rexmap" << std::endl; } // warpkernel1 with reorderedx no cache remap { cusp::array1d<ValueType, DeviceSpace> dy(N, 0); kernel1.scan(nz, N, A); cusp::array1d<IndexType, CPUSpace> reorder_cols = A.column_indices; kernel1.reorder_columns_coalesced(reorder_cols); cusp::array1d<ValueType, DeviceSpace> dreordered_x; kernel1.reorder_x(x, dreordered_x); warpkernel::engine<ValueType, IndexType, warpkernel::structure> eng1(kernel1, &(A.values[0]), &(A.column_indices[0])); const bool cache = false; boost::accumulators::accumulator_set<ValueType, boost::accumulators::stats<boost::accumulators::tag::mean> > statstime; eng1.device_colinds = reorder_cols; for(int i=0;i< ntests;i++) { ValueType measuretime = eng1.run<cache>(nblocks, blocksize, thrust::raw_pointer_cast(&dreordered_x[0]), thrust::raw_pointer_cast(&dy[0])); statstime(measuretime); } cusp::array1d<ValueType, CPUSpace> ycheck = dy; if (verify(y,ycheck,N)) { std::cout << "current warpkernel_nocache_rexmap time =" << boost::accumulators::mean(statstime) << std::endl; warpkernel::addData(datafile, "warpkernel_nocache_rexmap", boost::accumulators::mean(statstime), kernel1.allocate_nz, kernel1.nwarps, nblocks, blocksize); } else std::cout << "failed to verify warp_kernel_nocache_rexmap" << std::endl; } } }
6015a01a0a671700a29bbc6fa538f45e8ff833b8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <math.h> #include <cstdlib> #include <hiprand/hiprand_kernel.h> #include <thrust/random.h> // add two arrays template<typename T> __global__ void add(T *output, T *inputA, T *inputB) { int idx = (blockIdx.x * blockDim.x) + threadIdx.x; output[idx] = inputA[idx] + inputB[idx]; } template<typename T> __global__ void initRandom(T *arr, float minValue, float maxValue) { int idx = blockIdx.x * blockDim.x + threadIdx.x; thrust::default_random_engine rng; thrust::uniform_real_distribution<float> dist(minValue, maxValue); rng.discard(idx); arr[idx] = dist(rng); } #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } int main () { int N = 8000 * 8000; // 800px x 800px image int iterations = 10; int size = N*sizeof(float); int blockSize = 256; int numBlocks = (N + blockSize - 1) / blockSize; float *x, *y, *output; // Allocate Unified Memory accessible from CPU or GPU hipMallocManaged(&x, size); hipMallocManaged(&y, size); hipMallocManaged(&output, size); // initialize arrays hipLaunchKernelGGL(( initRandom), dim3(numBlocks), dim3(blockSize), 0, 0, x, 0., 1.); hipLaunchKernelGGL(( initRandom), dim3(numBlocks), dim3(blockSize), 0, 0, y, 0., 1.); hipDeviceSynchronize(); for (int blerp = 0; blerp < iterations; blerp++) { hipLaunchKernelGGL(( add), dim3(numBlocks), dim3(blockSize), 0, 0, output, x, y); // Wait for GPU to finish before accessing on host hipDeviceSynchronize(); } // Free memory hipFree(x); hipFree(y); hipFree(output); return 0; }
6015a01a0a671700a29bbc6fa538f45e8ff833b8.cu
#include <iostream> #include <math.h> #include <cstdlib> #include <curand_kernel.h> #include <thrust/random.h> // add two arrays template<typename T> __global__ void add(T *output, T *inputA, T *inputB) { int idx = (blockIdx.x * blockDim.x) + threadIdx.x; output[idx] = inputA[idx] + inputB[idx]; } template<typename T> __global__ void initRandom(T *arr, float minValue, float maxValue) { int idx = blockIdx.x * blockDim.x + threadIdx.x; thrust::default_random_engine rng; thrust::uniform_real_distribution<float> dist(minValue, maxValue); rng.discard(idx); arr[idx] = dist(rng); } #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } int main () { int N = 8000 * 8000; // 800px x 800px image int iterations = 10; int size = N*sizeof(float); int blockSize = 256; int numBlocks = (N + blockSize - 1) / blockSize; float *x, *y, *output; // Allocate Unified Memory – accessible from CPU or GPU cudaMallocManaged(&x, size); cudaMallocManaged(&y, size); cudaMallocManaged(&output, size); // initialize arrays initRandom<<<numBlocks, blockSize>>>(x, 0., 1.); initRandom<<<numBlocks, blockSize>>>(y, 0., 1.); cudaDeviceSynchronize(); for (int blerp = 0; blerp < iterations; blerp++) { add<<<numBlocks, blockSize>>>(output, x, y); // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); } // Free memory cudaFree(x); cudaFree(y); cudaFree(output); return 0; }
28e8d9e308018c5dbeff30089d0d3ab0be89dadf.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <stdio.h> __global__ void print_parameters() { printf("blockIdx_x: %d, blockIdx_y: %d, blockIdx_z: %d, blockDim_x: %d, blockDim_y: %d, GridDim_x: %d, GridDim_y: %d \n", blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, gridDim.x, gridDim.y); } int main() { int nx, ny; nx = 16; ny = 16; dim3 block(8, 8); dim3 grid(nx / block.x, ny / block.y); print_parameters << <grid, block >> > (); hipDeviceSynchronize(); hipDeviceReset(); return 0; }
28e8d9e308018c5dbeff30089d0d3ab0be89dadf.cu
#include <cuda_runtime.h> #include <device_launch_parameters.h> #include <stdio.h> __global__ void print_parameters() { printf("blockIdx_x: %d, blockIdx_y: %d, blockIdx_z: %d, blockDim_x: %d, blockDim_y: %d, GridDim_x: %d, GridDim_y: %d \n", blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, gridDim.x, gridDim.y); } int main() { int nx, ny; nx = 16; ny = 16; dim3 block(8, 8); dim3 grid(nx / block.x, ny / block.y); print_parameters << <grid, block >> > (); cudaDeviceSynchronize(); cudaDeviceReset(); return 0; }
d7f713e36a85e564c2acab3518e23584fd6bb8ad.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kunet.h" __global__ void _adagrad32(int n, float eps, float *dw2, float *dw) { int i = threadIdx.x + blockIdx.x * blockDim.x; while (i < n) { dw2[i] += dw[i] * dw[i]; dw[i] /= (eps + sqrt(dw2[i])); i += blockDim.x * gridDim.x; } } __global__ void _l1reg32(int n, float l1, float *w, float *dw) { int i = threadIdx.x + blockIdx.x * blockDim.x; while (i < n) { if (w[i] > 0) dw[i] += l1; else if (w[i] < 0) dw[i] -= l1; i += blockDim.x * gridDim.x; } } __global__ void _adagrad64(int n, double eps, double *dw2, double *dw) { int i = threadIdx.x + blockIdx.x * blockDim.x; while (i < n) { dw2[i] += dw[i] * dw[i]; dw[i] /= (eps + sqrt(dw2[i])); i += blockDim.x * gridDim.x; } } __global__ void _l1reg64(int n, double l1, double *w, double *dw) { int i = threadIdx.x + blockIdx.x * blockDim.x; while (i < n) { if (w[i] > 0) dw[i] += l1; else if (w[i] < 0) dw[i] -= l1; i += blockDim.x * gridDim.x; } } extern "C" { void l1reg32(int n, float l1, float *w, float *dw) KCALL(_l1reg32,n,l1,w,dw); void l1reg64(int n, double l1, double *w, double *dw) KCALL(_l1reg64,n,l1,w,dw); void adagrad32(int n, float eps, float *dw2, float *dw) KCALL(_adagrad32,n,eps,dw2,dw); void adagrad64(int n, double eps, double *dw2, double *dw) KCALL(_adagrad64,n,eps,dw2,dw); }
d7f713e36a85e564c2acab3518e23584fd6bb8ad.cu
#include "kunet.h" __global__ void _adagrad32(int n, float eps, float *dw2, float *dw) { int i = threadIdx.x + blockIdx.x * blockDim.x; while (i < n) { dw2[i] += dw[i] * dw[i]; dw[i] /= (eps + sqrt(dw2[i])); i += blockDim.x * gridDim.x; } } __global__ void _l1reg32(int n, float l1, float *w, float *dw) { int i = threadIdx.x + blockIdx.x * blockDim.x; while (i < n) { if (w[i] > 0) dw[i] += l1; else if (w[i] < 0) dw[i] -= l1; i += blockDim.x * gridDim.x; } } __global__ void _adagrad64(int n, double eps, double *dw2, double *dw) { int i = threadIdx.x + blockIdx.x * blockDim.x; while (i < n) { dw2[i] += dw[i] * dw[i]; dw[i] /= (eps + sqrt(dw2[i])); i += blockDim.x * gridDim.x; } } __global__ void _l1reg64(int n, double l1, double *w, double *dw) { int i = threadIdx.x + blockIdx.x * blockDim.x; while (i < n) { if (w[i] > 0) dw[i] += l1; else if (w[i] < 0) dw[i] -= l1; i += blockDim.x * gridDim.x; } } extern "C" { void l1reg32(int n, float l1, float *w, float *dw) KCALL(_l1reg32,n,l1,w,dw); void l1reg64(int n, double l1, double *w, double *dw) KCALL(_l1reg64,n,l1,w,dw); void adagrad32(int n, float eps, float *dw2, float *dw) KCALL(_adagrad32,n,eps,dw2,dw); void adagrad64(int n, double eps, double *dw2, double *dw) KCALL(_adagrad64,n,eps,dw2,dw); }
d2a4e42c98d1c94acbbed74699aee85875d63b1f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "cuda/dcn_v2_im2col_cuda.h" #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <THH/THH.h> #include <THH/THHAtomics.cuh> #include <THH/THHDeviceUtils.cuh> THCState *state = at::globalContext().lazyInitCUDA(); // author: Charles Shang // https://github.com/torch/cunn/blob/master/lib/THCUNN/generic/SpatialConvolutionMM.cu // [batch gemm] // https://github.com/pytorch/pytorch/blob/master/aten/src/THC/generic/THCTensorMathBlas.cu __global__ void createBatchGemmBuffer(const float **input_b, float **output_b, float **columns_b, const float **ones_b, const float **weight_b, const float **bias_b, float *input, float *output, float *columns, float *ones, float *weight, float *bias, const int input_stride, const int output_stride, const int columns_stride, const int ones_stride, const int num_batches) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_batches) { input_b[idx] = input + idx * input_stride; output_b[idx] = output + idx * output_stride; columns_b[idx] = columns + idx * columns_stride; ones_b[idx] = ones + idx * ones_stride; // share weights and bias within a Mini-Batch weight_b[idx] = weight; bias_b[idx] = bias; } } at::Tensor dcn_v2_cuda_forward(const at::Tensor &input, const at::Tensor &weight, const at::Tensor &bias, const at::Tensor &offset, const at::Tensor &mask, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const int deformable_group) { using scalar_t = float; // THCAssertSameGPU(THCudaTensor_checkGPU(state, 5, input, weight, bias, offset, mask)); AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor"); AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor"); AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor"); AT_ASSERTM(mask.type().is_cuda(), "mask must be a CUDA tensor"); const int batch = input.size(0); const int channels = input.size(1); const int height = input.size(2); const int width = input.size(3); const int channels_out = weight.size(0); const int channels_kernel = weight.size(1); const int kernel_h_ = weight.size(2); const int kernel_w_ = weight.size(3); // printf("Kernels: %d %d %d %d\n", kernel_h_, kernel_w_, kernel_w, kernel_h); // printf("Channels: %d %d\n", channels, channels_kernel); // printf("Channels: %d %d\n", channels_out, channels_kernel); AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w, "Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_); AT_ASSERTM(channels == channels_kernel, "Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel); const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; auto ones = at::ones({batch, height_out, width_out}, input.options()); auto columns = at::empty({batch, channels * kernel_h * kernel_w, 1 * height_out * width_out}, input.options()); auto output = at::empty({batch, channels_out, height_out, width_out}, input.options()); // prepare for batch-wise computing, which is significantly faster than instance-wise computing // when batch size is large. // launch batch threads int matrices_size = batch * sizeof(float *); auto input_b = static_cast<const float **>(THCudaMalloc(state, matrices_size)); auto output_b = static_cast<float **>(THCudaMalloc(state, matrices_size)); auto columns_b = static_cast<float **>(THCudaMalloc(state, matrices_size)); auto ones_b = static_cast<const float **>(THCudaMalloc(state, matrices_size)); auto weight_b = static_cast<const float **>(THCudaMalloc(state, matrices_size)); auto bias_b = static_cast<const float **>(THCudaMalloc(state, matrices_size)); const int block = 128; const int grid = (batch + block - 1) / block; hipLaunchKernelGGL(( createBatchGemmBuffer), dim3(grid), dim3(block), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input_b, output_b, columns_b, ones_b, weight_b, bias_b, input.data<scalar_t>(), output.data<scalar_t>(), columns.data<scalar_t>(), ones.data<scalar_t>(), weight.data<scalar_t>(), bias.data<scalar_t>(), channels * width * height, channels_out * width_out * height_out, channels * kernel_h * kernel_w * height_out * width_out, height_out * width_out, batch); long m_ = channels_out; long n_ = height_out * width_out; long k_ = 1; THCudaBlas_SgemmBatched(state, 't', 'n', n_, m_, k_, 1.0f, ones_b, k_, bias_b, k_, 0.0f, output_b, n_, batch); modulated_deformable_im2col_cuda(c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input.data<scalar_t>(), offset.data<scalar_t>(), mask.data<scalar_t>(), batch, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, columns.data<scalar_t>()); long m = channels_out; long n = height_out * width_out; long k = channels * kernel_h * kernel_w; THCudaBlas_SgemmBatched(state, 'n', 'n', n, m, k, 1.0f, (const float **)columns_b, n, weight_b, k, 1.0f, output_b, n, batch); THCudaFree(state, input_b); THCudaFree(state, output_b); THCudaFree(state, columns_b); THCudaFree(state, ones_b); THCudaFree(state, weight_b); THCudaFree(state, bias_b); return output; } __global__ void createBatchGemmBufferBackward( float **grad_output_b, float **columns_b, float **ones_b, float **weight_b, float **grad_weight_b, float **grad_bias_b, float *grad_output, float *columns, float *ones, float *weight, float *grad_weight, float *grad_bias, const int grad_output_stride, const int columns_stride, const int ones_stride, const int num_batches) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_batches) { grad_output_b[idx] = grad_output + idx * grad_output_stride; columns_b[idx] = columns + idx * columns_stride; ones_b[idx] = ones + idx * ones_stride; // share weights and bias within a Mini-Batch weight_b[idx] = weight; grad_weight_b[idx] = grad_weight; grad_bias_b[idx] = grad_bias; } } std::vector<at::Tensor> dcn_v2_cuda_backward(const at::Tensor &input, const at::Tensor &weight, const at::Tensor &bias, const at::Tensor &offset, const at::Tensor &mask, const at::Tensor &grad_output, int kernel_h, int kernel_w, int stride_h, int stride_w, int pad_h, int pad_w, int dilation_h, int dilation_w, int deformable_group) { THArgCheck(input.is_contiguous(), 1, "input tensor has to be contiguous"); THArgCheck(weight.is_contiguous(), 2, "weight tensor has to be contiguous"); AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor"); AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor"); AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor"); AT_ASSERTM(mask.type().is_cuda(), "mask must be a CUDA tensor"); const int batch = input.size(0); const int channels = input.size(1); const int height = input.size(2); const int width = input.size(3); const int channels_out = weight.size(0); const int channels_kernel = weight.size(1); const int kernel_h_ = weight.size(2); const int kernel_w_ = weight.size(3); AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w, "Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_); AT_ASSERTM(channels == channels_kernel, "Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel); const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; auto ones = at::ones({height_out, width_out}, input.options()); auto columns = at::empty({channels * kernel_h * kernel_w, 1 * height_out * width_out}, input.options()); auto output = at::empty({batch, channels_out, height_out, width_out}, input.options()); auto grad_input = at::zeros_like(input); auto grad_weight = at::zeros_like(weight); auto grad_bias = at::zeros_like(bias); auto grad_offset = at::zeros_like(offset); auto grad_mask = at::zeros_like(mask); using scalar_t = float; for (int b = 0; b < batch; b++) { auto input_n = input.select(0, b); auto offset_n = offset.select(0, b); auto mask_n = mask.select(0, b); auto grad_output_n = grad_output.select(0, b); auto grad_input_n = grad_input.select(0, b); auto grad_offset_n = grad_offset.select(0, b); auto grad_mask_n = grad_mask.select(0, b); long m = channels * kernel_h * kernel_w; long n = height_out * width_out; long k = channels_out; THCudaBlas_Sgemm(state, 'n', 't', n, m, k, 1.0f, grad_output_n.data<scalar_t>(), n, weight.data<scalar_t>(), m, 0.0f, columns.data<scalar_t>(), n); // gradient w.r.t. input coordinate data modulated_deformable_col2im_coord_cuda(c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), columns.data<scalar_t>(), input_n.data<scalar_t>(), offset_n.data<scalar_t>(), mask_n.data<scalar_t>(), 1, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, grad_offset_n.data<scalar_t>(), grad_mask_n.data<scalar_t>()); // gradient w.r.t. input data modulated_deformable_col2im_cuda(c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), columns.data<scalar_t>(), offset_n.data<scalar_t>(), mask_n.data<scalar_t>(), 1, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, grad_input_n.data<scalar_t>()); // gradient w.r.t. weight, dWeight should accumulate across the batch and group modulated_deformable_im2col_cuda(c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input_n.data<scalar_t>(), offset_n.data<scalar_t>(), mask_n.data<scalar_t>(), 1, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, columns.data<scalar_t>()); long m_ = channels_out; long n_ = channels * kernel_h * kernel_w; long k_ = height_out * width_out; THCudaBlas_Sgemm(state, 't', 'n', n_, m_, k_, 1.0f, columns.data<scalar_t>(), k_, grad_output_n.data<scalar_t>(), k_, 1.0f, grad_weight.data<scalar_t>(), n_); // gradient w.r.t. bias // long m_ = channels_out; // long k__ = height_out * width_out; // THCudaBlas_Sgemv(state, // 't', // k_, m_, 1.0f, // grad_output_n.data<scalar_t>(), k_, // ones.data<scalar_t>(), 1, 1.0f, // grad_bias.data<scalar_t>(), 1); // } THCudaBlas_Sgemm(state, 'N', 'N', 1, m_, k_, 1.0f, ones.data<scalar_t>(), 1, grad_output_n.data<scalar_t>(), k_, 1.0f, grad_bias.data<scalar_t>(), 1); } return { grad_input, grad_offset, grad_mask, grad_weight, grad_bias }; }
d2a4e42c98d1c94acbbed74699aee85875d63b1f.cu
#include <vector> #include "cuda/dcn_v2_im2col_cuda.h" #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <THC/THC.h> #include <THC/THCAtomics.cuh> #include <THC/THCDeviceUtils.cuh> THCState *state = at::globalContext().lazyInitCUDA(); // author: Charles Shang // https://github.com/torch/cunn/blob/master/lib/THCUNN/generic/SpatialConvolutionMM.cu // [batch gemm] // https://github.com/pytorch/pytorch/blob/master/aten/src/THC/generic/THCTensorMathBlas.cu __global__ void createBatchGemmBuffer(const float **input_b, float **output_b, float **columns_b, const float **ones_b, const float **weight_b, const float **bias_b, float *input, float *output, float *columns, float *ones, float *weight, float *bias, const int input_stride, const int output_stride, const int columns_stride, const int ones_stride, const int num_batches) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_batches) { input_b[idx] = input + idx * input_stride; output_b[idx] = output + idx * output_stride; columns_b[idx] = columns + idx * columns_stride; ones_b[idx] = ones + idx * ones_stride; // share weights and bias within a Mini-Batch weight_b[idx] = weight; bias_b[idx] = bias; } } at::Tensor dcn_v2_cuda_forward(const at::Tensor &input, const at::Tensor &weight, const at::Tensor &bias, const at::Tensor &offset, const at::Tensor &mask, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const int deformable_group) { using scalar_t = float; // THCAssertSameGPU(THCudaTensor_checkGPU(state, 5, input, weight, bias, offset, mask)); AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor"); AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor"); AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor"); AT_ASSERTM(mask.type().is_cuda(), "mask must be a CUDA tensor"); const int batch = input.size(0); const int channels = input.size(1); const int height = input.size(2); const int width = input.size(3); const int channels_out = weight.size(0); const int channels_kernel = weight.size(1); const int kernel_h_ = weight.size(2); const int kernel_w_ = weight.size(3); // printf("Kernels: %d %d %d %d\n", kernel_h_, kernel_w_, kernel_w, kernel_h); // printf("Channels: %d %d\n", channels, channels_kernel); // printf("Channels: %d %d\n", channels_out, channels_kernel); AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w, "Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_); AT_ASSERTM(channels == channels_kernel, "Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel); const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; auto ones = at::ones({batch, height_out, width_out}, input.options()); auto columns = at::empty({batch, channels * kernel_h * kernel_w, 1 * height_out * width_out}, input.options()); auto output = at::empty({batch, channels_out, height_out, width_out}, input.options()); // prepare for batch-wise computing, which is significantly faster than instance-wise computing // when batch size is large. // launch batch threads int matrices_size = batch * sizeof(float *); auto input_b = static_cast<const float **>(THCudaMalloc(state, matrices_size)); auto output_b = static_cast<float **>(THCudaMalloc(state, matrices_size)); auto columns_b = static_cast<float **>(THCudaMalloc(state, matrices_size)); auto ones_b = static_cast<const float **>(THCudaMalloc(state, matrices_size)); auto weight_b = static_cast<const float **>(THCudaMalloc(state, matrices_size)); auto bias_b = static_cast<const float **>(THCudaMalloc(state, matrices_size)); const int block = 128; const int grid = (batch + block - 1) / block; createBatchGemmBuffer<<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>( input_b, output_b, columns_b, ones_b, weight_b, bias_b, input.data<scalar_t>(), output.data<scalar_t>(), columns.data<scalar_t>(), ones.data<scalar_t>(), weight.data<scalar_t>(), bias.data<scalar_t>(), channels * width * height, channels_out * width_out * height_out, channels * kernel_h * kernel_w * height_out * width_out, height_out * width_out, batch); long m_ = channels_out; long n_ = height_out * width_out; long k_ = 1; THCudaBlas_SgemmBatched(state, 't', 'n', n_, m_, k_, 1.0f, ones_b, k_, bias_b, k_, 0.0f, output_b, n_, batch); modulated_deformable_im2col_cuda(c10::cuda::getCurrentCUDAStream(), input.data<scalar_t>(), offset.data<scalar_t>(), mask.data<scalar_t>(), batch, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, columns.data<scalar_t>()); long m = channels_out; long n = height_out * width_out; long k = channels * kernel_h * kernel_w; THCudaBlas_SgemmBatched(state, 'n', 'n', n, m, k, 1.0f, (const float **)columns_b, n, weight_b, k, 1.0f, output_b, n, batch); THCudaFree(state, input_b); THCudaFree(state, output_b); THCudaFree(state, columns_b); THCudaFree(state, ones_b); THCudaFree(state, weight_b); THCudaFree(state, bias_b); return output; } __global__ void createBatchGemmBufferBackward( float **grad_output_b, float **columns_b, float **ones_b, float **weight_b, float **grad_weight_b, float **grad_bias_b, float *grad_output, float *columns, float *ones, float *weight, float *grad_weight, float *grad_bias, const int grad_output_stride, const int columns_stride, const int ones_stride, const int num_batches) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_batches) { grad_output_b[idx] = grad_output + idx * grad_output_stride; columns_b[idx] = columns + idx * columns_stride; ones_b[idx] = ones + idx * ones_stride; // share weights and bias within a Mini-Batch weight_b[idx] = weight; grad_weight_b[idx] = grad_weight; grad_bias_b[idx] = grad_bias; } } std::vector<at::Tensor> dcn_v2_cuda_backward(const at::Tensor &input, const at::Tensor &weight, const at::Tensor &bias, const at::Tensor &offset, const at::Tensor &mask, const at::Tensor &grad_output, int kernel_h, int kernel_w, int stride_h, int stride_w, int pad_h, int pad_w, int dilation_h, int dilation_w, int deformable_group) { THArgCheck(input.is_contiguous(), 1, "input tensor has to be contiguous"); THArgCheck(weight.is_contiguous(), 2, "weight tensor has to be contiguous"); AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor"); AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor"); AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor"); AT_ASSERTM(mask.type().is_cuda(), "mask must be a CUDA tensor"); const int batch = input.size(0); const int channels = input.size(1); const int height = input.size(2); const int width = input.size(3); const int channels_out = weight.size(0); const int channels_kernel = weight.size(1); const int kernel_h_ = weight.size(2); const int kernel_w_ = weight.size(3); AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w, "Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_); AT_ASSERTM(channels == channels_kernel, "Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel); const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; auto ones = at::ones({height_out, width_out}, input.options()); auto columns = at::empty({channels * kernel_h * kernel_w, 1 * height_out * width_out}, input.options()); auto output = at::empty({batch, channels_out, height_out, width_out}, input.options()); auto grad_input = at::zeros_like(input); auto grad_weight = at::zeros_like(weight); auto grad_bias = at::zeros_like(bias); auto grad_offset = at::zeros_like(offset); auto grad_mask = at::zeros_like(mask); using scalar_t = float; for (int b = 0; b < batch; b++) { auto input_n = input.select(0, b); auto offset_n = offset.select(0, b); auto mask_n = mask.select(0, b); auto grad_output_n = grad_output.select(0, b); auto grad_input_n = grad_input.select(0, b); auto grad_offset_n = grad_offset.select(0, b); auto grad_mask_n = grad_mask.select(0, b); long m = channels * kernel_h * kernel_w; long n = height_out * width_out; long k = channels_out; THCudaBlas_Sgemm(state, 'n', 't', n, m, k, 1.0f, grad_output_n.data<scalar_t>(), n, weight.data<scalar_t>(), m, 0.0f, columns.data<scalar_t>(), n); // gradient w.r.t. input coordinate data modulated_deformable_col2im_coord_cuda(c10::cuda::getCurrentCUDAStream(), columns.data<scalar_t>(), input_n.data<scalar_t>(), offset_n.data<scalar_t>(), mask_n.data<scalar_t>(), 1, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, grad_offset_n.data<scalar_t>(), grad_mask_n.data<scalar_t>()); // gradient w.r.t. input data modulated_deformable_col2im_cuda(c10::cuda::getCurrentCUDAStream(), columns.data<scalar_t>(), offset_n.data<scalar_t>(), mask_n.data<scalar_t>(), 1, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, grad_input_n.data<scalar_t>()); // gradient w.r.t. weight, dWeight should accumulate across the batch and group modulated_deformable_im2col_cuda(c10::cuda::getCurrentCUDAStream(), input_n.data<scalar_t>(), offset_n.data<scalar_t>(), mask_n.data<scalar_t>(), 1, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, columns.data<scalar_t>()); long m_ = channels_out; long n_ = channels * kernel_h * kernel_w; long k_ = height_out * width_out; THCudaBlas_Sgemm(state, 't', 'n', n_, m_, k_, 1.0f, columns.data<scalar_t>(), k_, grad_output_n.data<scalar_t>(), k_, 1.0f, grad_weight.data<scalar_t>(), n_); // gradient w.r.t. bias // long m_ = channels_out; // long k__ = height_out * width_out; // THCudaBlas_Sgemv(state, // 't', // k_, m_, 1.0f, // grad_output_n.data<scalar_t>(), k_, // ones.data<scalar_t>(), 1, 1.0f, // grad_bias.data<scalar_t>(), 1); // } THCudaBlas_Sgemm(state, 'N', 'N', 1, m_, k_, 1.0f, ones.data<scalar_t>(), 1, grad_output_n.data<scalar_t>(), k_, 1.0f, grad_bias.data<scalar_t>(), 1); } return { grad_input, grad_offset, grad_mask, grad_weight, grad_bias }; }
4bea669211813bf71398296353fe390f640603d6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef _TIMER_ #include "hip/hip_runtime_api.h" #endif #include "stdio.h" #define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) ) #define max(a,b) FORMA_MAX(a,b) #define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) ) #define min(a,b) FORMA_MIN(a,b) #define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 ) #ifndef FORMA_MAX_BLOCKDIM_0 #define FORMA_MAX_BLOCKDIM_0 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_1 #define FORMA_MAX_BLOCKDIM_1 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_2 #define FORMA_MAX_BLOCKDIM_2 1024 #endif #define GAPX (12) #define GAPY (12) #define EXTENT (10) template<typename T> __global__ void __kernel_init__(T* input, T value) { int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x); input[loc] = value; } template<typename T> void initialize_array(T* d_input, int size, T value) { dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0)); dim3 init_block(FORMA_MAX_BLOCKDIM_0); hipLaunchKernelGGL(( __kernel_init__), dim3(init_grid),dim3(init_block), 0, 0, d_input,value); } void Check_CUDA_Error(const char* message); /*Texture references */ /*Shared Memory Variable */ extern __shared__ char __FORMA_SHARED_MEM__[]; /* Device code Begin */ /* X, Y, Z */ __global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int L, int M, int N, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, int FORMA_BLOCKDIM_Z, float * __restrict__ __var_1__){ int __FORMA_SHARED_MEM_OFFSET__ = 0; float* __tilevar_2__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); float* __tilevar_3__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); float* __tilevar_4__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); float* __tilevar_5__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X)-16); int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)+GAPY); float t2_0=0.0f, t3_0=0.0f, t4_0=0.0f, t5_0=0.0f, t2_1=0.0f, t3_1=0.0f, t4_1=0.0f, t5_1=0.0f; float b2_0=0.0f, b3_0=0.0f, b4_0=0.0f, b5_0=0.0f, b2_1=0.0f, b3_1=0.0f, b4_1=0.0f, b5_1=0.0f; // Initialize the values int __iter_4__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ; int __iter_5__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ; if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1))) { b2_0 = input[__iter_5__+N*(__iter_4__+M*(0))]; __tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = input[__iter_5__+N*(__iter_4__+M*(1))]; t2_0 = input[__iter_5__+N*(__iter_4__+M*(2))]; t2_1 = input[__iter_5__+N*(__iter_4__+M*(3))]; } // Rest of the computation for (int __iter_2__ = 2; __iter_2__ < L-2; __iter_2__++) { if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1))){ b2_1 = b2_0; b2_0 = __tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; __tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t2_0; t2_0 = t2_1; t2_1 = input[__iter_5__+N*(__iter_4__+M*(__iter_2__+2))]; } __syncthreads (); if(__iter_4__ >= FORMA_MAX((__iter_1__+2),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-3))) { if(__iter_5__ >= FORMA_MAX((__iter_0__+2),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-3)) ){ float __temp_3__ = (__tilevar_2__[__iter_5__+2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_7__ = (__tilevar_2__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_8__ = (0.083000f * __temp_3__ + 0.083000f * __temp_7__); float __temp_12__ = (__tilevar_2__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_13__ = (__temp_8__ + 0.083000f * __temp_12__); float __temp_17__ = (__tilevar_2__[__iter_5__-2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_18__ = (__temp_13__ + 0.083000f * __temp_17__); float __temp_22__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+2-__iter_1__)]); float __temp_23__ = (__temp_18__ + 0.083000f * __temp_22__); float __temp_27__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]); float __temp_28__ = (__temp_23__ + 0.083000f * __temp_27__); float __temp_32__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]); float __temp_33__ = (__temp_28__ + 0.083000f * __temp_32__); float __temp_37__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-2-__iter_1__)]); float __temp_38__ = (__temp_33__ + 0.083000f * __temp_37__); float __temp_42__ = t2_1; float __temp_43__ = (__temp_38__ + 0.083000f * __temp_42__); float __temp_47__ = t2_0; float __temp_48__ = (__temp_43__ + 0.083000f * __temp_47__); float __temp_52__ = b2_0; float __temp_53__ = (__temp_48__ + 0.083000f * __temp_52__); float __temp_57__ = b2_1; float __temp_58__ = (__temp_53__ + 0.083000f * __temp_57__); float __temp_62__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_63__ = (__temp_58__ - 0.996000f * __temp_62__); b3_1 = b3_0; b3_0 = __tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; __tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t3_0; t3_0 = t3_1; t3_1 = __temp_63__; } } if ((__iter_4__ >= FORMA_MAX((__iter_1__+2),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-3))) & (__iter_5__ >= FORMA_MAX((__iter_0__+2),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-3))) & (__iter_4__ < (FORMA_MAX((__iter_1__+2),2)+4) | __iter_4__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-3))-4))) { __copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__))] = t3_1; } __syncthreads (); if(__iter_4__ >= FORMA_MAX((__iter_1__+4),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(M-3))) { if(__iter_5__ >= FORMA_MAX((__iter_0__+4),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(N-3)) ){ float __temp_3__ = (__tilevar_3__[__iter_5__+2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_7__ = (__tilevar_3__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_8__ = (0.083000f * __temp_3__ + 0.083000f * __temp_7__); float __temp_12__ = (__tilevar_3__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_13__ = (__temp_8__ + 0.083000f * __temp_12__); float __temp_17__ = (__tilevar_3__[__iter_5__-2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_18__ = (__temp_13__ + 0.083000f * __temp_17__); float __temp_22__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+2-__iter_1__)]); float __temp_23__ = (__temp_18__ + 0.083000f * __temp_22__); float __temp_27__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]); float __temp_28__ = (__temp_23__ + 0.083000f * __temp_27__); float __temp_32__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]); float __temp_33__ = (__temp_28__ + 0.083000f * __temp_32__); float __temp_37__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-2-__iter_1__)]); float __temp_38__ = (__temp_33__ + 0.083000f * __temp_37__); float __temp_42__ = t3_1; float __temp_43__ = (__temp_38__ + 0.083000f * __temp_42__); float __temp_47__ = t3_0; float __temp_48__ = (__temp_43__ + 0.083000f * __temp_47__); float __temp_52__ = b3_0; float __temp_53__ = (__temp_48__ + 0.083000f * __temp_52__); float __temp_57__ = b3_1; float __temp_58__ = (__temp_53__ + 0.083000f * __temp_57__); float __temp_62__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_63__ = (__temp_58__ - 0.996000f * __temp_62__); b4_1 = b4_0; b4_0 = __tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; __tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t4_0; t4_0 = t4_1; t4_1 = __temp_63__; } } if ((__iter_4__ >= FORMA_MAX((__iter_1__+4),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(M-3))) & (__iter_5__ >= FORMA_MAX((__iter_0__+4),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(N-3))) & (__iter_4__ < (FORMA_MAX((__iter_1__+4),2)+4) | __iter_4__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(M-3))-4))) { __copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__))] = t4_1; } __syncthreads (); if(__iter_4__ >= FORMA_MAX((__iter_1__+6),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-7),(M-3))) { if( __iter_5__ >= FORMA_MAX((__iter_0__+6),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-7),(N-3)) ){ float __temp_3__ = (__tilevar_4__[__iter_5__+2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_7__ = (__tilevar_4__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_8__ = (0.083000f * __temp_3__ + 0.083000f * __temp_7__); float __temp_12__ = (__tilevar_4__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_13__ = (__temp_8__ + 0.083000f * __temp_12__); float __temp_17__ = (__tilevar_4__[__iter_5__-2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_18__ = (__temp_13__ + 0.083000f * __temp_17__); float __temp_22__ = (__tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+2-__iter_1__)]); float __temp_23__ = (__temp_18__ + 0.083000f * __temp_22__); float __temp_27__ = (__tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]); float __temp_28__ = (__temp_23__ + 0.083000f * __temp_27__); float __temp_32__ = (__tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]); float __temp_33__ = (__temp_28__ + 0.083000f * __temp_32__); float __temp_37__ = (__tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-2-__iter_1__)]); float __temp_38__ = (__temp_33__ + 0.083000f * __temp_37__); float __temp_42__ = t4_1; float __temp_43__ = (__temp_38__ + 0.083000f * __temp_42__); float __temp_47__ = t4_0; float __temp_48__ = (__temp_43__ + 0.083000f * __temp_47__); float __temp_52__ = b4_0; float __temp_53__ = (__temp_48__ + 0.083000f * __temp_52__); float __temp_57__ = b4_1; float __temp_58__ = (__temp_53__ + 0.083000f * __temp_57__); float __temp_62__ = (__tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_63__ = (__temp_58__ - 0.996000f * __temp_62__); b5_1 = b5_0; b5_0 = __tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; __tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t5_0; t5_0 = t5_1; t5_1 = __temp_63__; } } if ((__iter_4__ >= FORMA_MAX((__iter_1__+6),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-7),(M-3))) & (__iter_5__ >= FORMA_MAX((__iter_0__+6),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-7),(N-3))) & (__iter_4__ < (FORMA_MAX((__iter_1__+6),2)+4) | __iter_4__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-7),(M-3))-4))) { __copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__))] = t5_1; } __syncthreads (); if(__iter_4__ >= FORMA_MAX((__iter_1__+8),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-9),(M-3))) { if(__iter_5__ >= FORMA_MAX((__iter_0__+8),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-9),(N-3))){ float __temp_3__ = (__tilevar_5__[__iter_5__+2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_7__ = (__tilevar_5__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_8__ = (0.083000f * __temp_3__ + 0.083000f * __temp_7__); float __temp_12__ = (__tilevar_5__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_13__ = (__temp_8__ + 0.083000f * __temp_12__); float __temp_17__ = (__tilevar_5__[__iter_5__-2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_18__ = (__temp_13__ + 0.083000f * __temp_17__); float __temp_22__ = (__tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+2-__iter_1__)]); float __temp_23__ = (__temp_18__ + 0.083000f * __temp_22__); float __temp_27__ = (__tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]); float __temp_28__ = (__temp_23__ + 0.083000f * __temp_27__); float __temp_32__ = (__tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]); float __temp_33__ = (__temp_28__ + 0.083000f * __temp_32__); float __temp_37__ = (__tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-2-__iter_1__)]); float __temp_38__ = (__temp_33__ + 0.083000f * __temp_37__); float __temp_42__ = t5_1; float __temp_43__ = (__temp_38__ + 0.083000f * __temp_42__); float __temp_47__ = t5_0; float __temp_48__ = (__temp_43__ + 0.083000f * __temp_47__); float __temp_52__ = b5_0; float __temp_53__ = (__temp_48__ + 0.083000f * __temp_52__); float __temp_57__ = b5_1; float __temp_58__ = (__temp_53__ + 0.083000f * __temp_57__); float __temp_62__ = (__tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_63__ = (__temp_58__ - 0.996000f * __temp_62__); __var_1__[__iter_5__+N*(__iter_4__+M*FORMA_MAX(__iter_2__-6,0))] = __temp_63__; } } } } int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){ int FORMA_BLOCKDIM_Y = (int)(blockDim.y); int FORMA_BLOCKDIM_X = (int)(blockDim.x); int SMemSize = 0; SMemSize += sizeof(float)*(4*FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); return SMemSize; } /* X, Y+GAP, Z */ __global__ void __kernel___forma_kernel__2__(float * __restrict__ input, int L, int M, int N, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, int FORMA_BLOCKDIM_Z, float * __restrict__ __var_1__){ int __FORMA_SHARED_MEM_OFFSET__ = 0; float* __tilevar_2__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); float* __tilevar_3__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); float* __tilevar_4__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); float* __tilevar_5__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X)+GAPX); int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)+GAPY) + (int)(FORMA_BLOCKDIM_Y); float t2_0=0.0f, t3_0=0.0f, t4_0=0.0f, t5_0=0.0f, t2_1=0.0f, t3_1=0.0f, t4_1=0.0f, t5_1=0.0f; float b2_0=0.0f, b3_0=0.0f, b4_0=0.0f, b5_0=0.0f, b2_1=0.0f, b3_1=0.0f, b4_1=0.0f, b5_1=0.0f; int __iter_4__ = FORMA_MAX(__iter_1__-EXTENT,0) + (int)(threadIdx.y) ; int __iter_5__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ; if(__iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1))) { b2_0 = input[__iter_5__+N*(__iter_4__+M*(0))]; __tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] = input[__iter_5__+N*(__iter_4__+M*(1))]; t2_0 = input[__iter_5__+N*(__iter_4__+M*(2))]; t2_1 = input[__iter_5__+N*(__iter_4__+M*(3))]; } // Rest of the computation for (int __iter_2__ = 2; __iter_2__ < L-2; __iter_2__++) { if(__iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1))) { b2_1 = b2_0; b2_0 = __tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]; __tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] = t2_0; t2_0 = t2_1; t2_1 = input[__iter_5__+N*(__iter_4__+M*(__iter_2__+2))]; } __syncthreads (); if(__iter_4__ >= FORMA_MAX((__iter_1__-2),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-3))) { if(__iter_5__ >= FORMA_MAX((__iter_0__+2),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-3)) ){ float __temp_3__ = (__tilevar_2__[__iter_5__+2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_7__ = (__tilevar_2__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_8__ = (0.083000f * __temp_3__ + 0.083000f * __temp_7__); float __temp_12__ = (__tilevar_2__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_13__ = (__temp_8__ + 0.083000f * __temp_12__); float __temp_17__ = (__tilevar_2__[__iter_5__-2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_18__ = (__temp_13__ + 0.083000f * __temp_17__); float __temp_22__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+2+EXTENT-__iter_1__)]); float __temp_23__ = (__temp_18__ + 0.083000f * __temp_22__); float __temp_27__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]); float __temp_28__ = (__temp_23__ + 0.083000f * __temp_27__); float __temp_32__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]); float __temp_33__ = (__temp_28__ + 0.083000f * __temp_32__); float __temp_37__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-2+EXTENT-__iter_1__)]); float __temp_38__ = (__temp_33__ + 0.083000f * __temp_37__); float __temp_42__ = t2_1; float __temp_43__ = (__temp_38__ + 0.083000f * __temp_42__); float __temp_47__ = t2_0; float __temp_48__ = (__temp_43__ + 0.083000f * __temp_47__); float __temp_52__ = b2_0; float __temp_53__ = (__temp_48__ + 0.083000f * __temp_52__); float __temp_57__ = b2_1; float __temp_58__ = (__temp_53__ + 0.083000f * __temp_57__); float __temp_62__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_63__ = (__temp_58__ - 0.996000f * __temp_62__); b3_1 = b3_0; b3_0 = __tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]; __tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] = t3_0; t3_0 = t3_1; t3_1 = __temp_63__; } } if ((__iter_4__ >= FORMA_MAX((__iter_1__-2),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-3))) & (__iter_5__ >= FORMA_MAX((__iter_0__+2),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-3))) & (__iter_5__ < (FORMA_MAX((__iter_0__+2),2)+4) | __iter_5__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-3))-4))) { __copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__))] = t3_1; } if ((__iter_4__ >= FORMA_MAX((__iter_1__-6),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+6)-1),(M-3))) & (__iter_5__ >= FORMA_MAX((__iter_0__+2),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-3))) & (__iter_4__ < FORMA_MAX((__iter_1__-2),2) | __iter_4__ > FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-3)))) { b3_1 = __copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__-4))]; b3_0 = __copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__-3))]; __tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] = __copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__-2))]; t3_0 = __copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__-1))]; t3_1 = __copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__))]; } __syncthreads (); if(__iter_4__ >= FORMA_MAX((__iter_1__-4),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(M-3))) { if(__iter_5__ >= FORMA_MAX((__iter_0__+4),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(N-3)) ){ float __temp_3__ = (__tilevar_3__[__iter_5__+2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_7__ = (__tilevar_3__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_8__ = (0.083000f * __temp_3__ + 0.083000f * __temp_7__); float __temp_12__ = (__tilevar_3__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_13__ = (__temp_8__ + 0.083000f * __temp_12__); float __temp_17__ = (__tilevar_3__[__iter_5__-2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_18__ = (__temp_13__ + 0.083000f * __temp_17__); float __temp_22__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+2+EXTENT-__iter_1__)]); float __temp_23__ = (__temp_18__ + 0.083000f * __temp_22__); float __temp_27__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]); float __temp_28__ = (__temp_23__ + 0.083000f * __temp_27__); float __temp_32__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]); float __temp_33__ = (__temp_28__ + 0.083000f * __temp_32__); float __temp_37__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-2+EXTENT-__iter_1__)]); float __temp_38__ = (__temp_33__ + 0.083000f * __temp_37__); float __temp_42__ = t3_1; float __temp_43__ = (__temp_38__ + 0.083000f * __temp_42__); float __temp_47__ = t3_0; float __temp_48__ = (__temp_43__ + 0.083000f * __temp_47__); float __temp_52__ = b3_0; float __temp_53__ = (__temp_48__ + 0.083000f * __temp_52__); float __temp_57__ = b3_1; float __temp_58__ = (__temp_53__ + 0.083000f * __temp_57__); float __temp_62__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_63__ = (__temp_58__ - 0.996000f * __temp_62__); b4_1 = b4_0; b4_0 = __tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]; __tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] = t4_0; t4_0 = t4_1; t4_1 = __temp_63__; } } if ((__iter_4__ >= FORMA_MAX((__iter_1__-4),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(M-3))) & (__iter_5__ >= FORMA_MAX((__iter_0__+4),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(N-3))) & (__iter_5__ < (FORMA_MAX((__iter_0__+4),2)+4) | __iter_5__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(N-3))-4))) { __copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__))] = t4_1; } if ((__iter_4__ >= FORMA_MAX((__iter_1__-8),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+8)-1),(M-3))) & (__iter_5__ >= FORMA_MAX((__iter_0__+4),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(N-3))) & (__iter_4__ < FORMA_MAX((__iter_1__-4),2) | __iter_4__ > FORMA_MIN(((__iter_1__+GAPY+4)-1),(M-3)))) { b4_1 = __copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__-4))]; b4_0 = __copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__-3))]; __tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] = __copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__-2))]; t4_0 = __copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__-1))]; t4_1 = __copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__))]; } __syncthreads (); if(__iter_4__ >= FORMA_MAX((__iter_1__-6),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+6)-1),(M-3))) { if(__iter_5__ >= FORMA_MAX((__iter_0__+6),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-7),(N-3)) ){ float __temp_3__ = (__tilevar_4__[__iter_5__+2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_7__ = (__tilevar_4__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_8__ = (0.083000f * __temp_3__ + 0.083000f * __temp_7__); float __temp_12__ = (__tilevar_4__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_13__ = (__temp_8__ + 0.083000f * __temp_12__); float __temp_17__ = (__tilevar_4__[__iter_5__-2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_18__ = (__temp_13__ + 0.083000f * __temp_17__); float __temp_22__ = (__tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+2+EXTENT-__iter_1__)]); float __temp_23__ = (__temp_18__ + 0.083000f * __temp_22__); float __temp_27__ = (__tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]); float __temp_28__ = (__temp_23__ + 0.083000f * __temp_27__); float __temp_32__ = (__tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]); float __temp_33__ = (__temp_28__ + 0.083000f * __temp_32__); float __temp_37__ = (__tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-2+EXTENT-__iter_1__)]); float __temp_38__ = (__temp_33__ + 0.083000f * __temp_37__); float __temp_42__ = t4_1; float __temp_43__ = (__temp_38__ + 0.083000f * __temp_42__); float __temp_47__ = t4_0; float __temp_48__ = (__temp_43__ + 0.083000f * __temp_47__); float __temp_52__ = b4_0; float __temp_53__ = (__temp_48__ + 0.083000f * __temp_52__); float __temp_57__ = b4_1; float __temp_58__ = (__temp_53__ + 0.083000f * __temp_57__); float __temp_62__ = (__tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_63__ = (__temp_58__ - 0.996000f * __temp_62__); b5_1 = b5_0; b5_0 = __tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]; __tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] = t5_0; t5_0 = t5_1; t5_1 = __temp_63__; } } if ((__iter_4__ >= FORMA_MAX((__iter_1__-6),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+6)-1),(M-3))) & (__iter_5__ >= FORMA_MAX((__iter_0__+6),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-7),(N-3))) & (__iter_5__ < (FORMA_MAX((__iter_0__+6),2)+4) | __iter_5__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-7),(N-3))-4))) { __copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__))] = t5_1; } if ((__iter_4__ >= FORMA_MAX((__iter_1__-10),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+10)-1),(M-3))) & (__iter_5__ >= FORMA_MAX((__iter_0__+6),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-7),(N-3))) & (__iter_4__ < FORMA_MAX((__iter_1__-6),2) | __iter_4__ > FORMA_MIN(((__iter_1__+GAPY+6)-1),(M-3)))) { b5_1 = __copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__-4))]; b5_0 = __copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__-3))]; __tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] = __copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__-2))]; t5_0 = __copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__-1))]; t5_1 = __copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__))]; } __syncthreads (); if(__iter_4__ >= FORMA_MAX((__iter_1__-8),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+8)-1),(M-3))) { if( __iter_5__ >= FORMA_MAX((__iter_0__+8),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-9),(N-3)) ){ float __temp_3__ = (__tilevar_5__[__iter_5__+2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_7__ = (__tilevar_5__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_8__ = (0.083000f * __temp_3__ + 0.083000f * __temp_7__); float __temp_12__ = (__tilevar_5__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_13__ = (__temp_8__ + 0.083000f * __temp_12__); float __temp_17__ = (__tilevar_5__[__iter_5__-2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_18__ = (__temp_13__ + 0.083000f * __temp_17__); float __temp_22__ = (__tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+2+EXTENT-__iter_1__)]); float __temp_23__ = (__temp_18__ + 0.083000f * __temp_22__); float __temp_27__ = (__tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]); float __temp_28__ = (__temp_23__ + 0.083000f * __temp_27__); float __temp_32__ = (__tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]); float __temp_33__ = (__temp_28__ + 0.083000f * __temp_32__); float __temp_37__ = (__tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-2+EXTENT-__iter_1__)]); float __temp_38__ = (__temp_33__ + 0.083000f * __temp_37__); float __temp_42__ = t5_1; float __temp_43__ = (__temp_38__ + 0.083000f * __temp_42__); float __temp_47__ = t5_0; float __temp_48__ = (__temp_43__ + 0.083000f * __temp_47__); float __temp_52__ = b5_0; float __temp_53__ = (__temp_48__ + 0.083000f * __temp_52__); float __temp_57__ = b5_1; float __temp_58__ = (__temp_53__ + 0.083000f * __temp_57__); float __temp_62__ = (__tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_63__ = (__temp_58__ - 0.996000f * __temp_62__); __var_1__[__iter_5__+N*(__iter_4__+M*FORMA_MAX(__iter_2__-6,0))] = __temp_63__; } } } } /* X+GAP, Y+GAP, Z */ __global__ void __kernel___forma_kernel__3__(float * __restrict__ input, int L, int M, int N, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, int FORMA_BLOCKDIM_Z, float * __restrict__ __var_1__){ int __FORMA_SHARED_MEM_OFFSET__ = 0; float* __tilevar_2__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); float* __tilevar_3__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); float* __tilevar_4__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); float* __tilevar_5__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X)+GAPX) + (int)(FORMA_BLOCKDIM_X); int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)+GAPY) + (int)(FORMA_BLOCKDIM_Y); float t2_0=0.0f, t3_0=0.0f, t4_0=0.0f, t5_0=0.0f, t2_1=0.0f, t3_1=0.0f, t4_1=0.0f, t5_1=0.0f; float b2_0=0.0f, b3_0=0.0f, b4_0=0.0f, b5_0=0.0f, b2_1=0.0f, b3_1=0.0f, b4_1=0.0f, b5_1=0.0f; int __iter_4__ = FORMA_MAX(__iter_1__-EXTENT,0) + (int)(threadIdx.y) ; int __iter_5__ = FORMA_MAX(__iter_0__-EXTENT,0) + (int)(threadIdx.x) ; if(__iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(N-1))) { b2_0 = input[__iter_5__+N*(__iter_4__+M*(0))]; __tilevar_2__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] = input[__iter_5__+N*(__iter_4__+M*(1))]; t2_0 = input[__iter_5__+N*(__iter_4__+M*(2))]; t2_1 = input[__iter_5__+N*(__iter_4__+M*(3))]; } for (int __iter_2__ = 2; __iter_2__ < L-2; __iter_2__++) { if(__iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(N-1))) { b2_1 = b2_0; b2_0 = __tilevar_2__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]; __tilevar_2__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] = t2_0; t2_0 = t2_1; t2_1 = input[__iter_5__+N*(__iter_4__+M*(__iter_2__+2))]; } __syncthreads (); if(__iter_4__ >= FORMA_MAX((__iter_1__-2),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-3))) { if(__iter_5__ >= FORMA_MAX((__iter_0__-2),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-3)) ){ float __temp_3__ = (__tilevar_2__[__iter_5__+2+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_7__ = (__tilevar_2__[__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_8__ = (0.083000f * __temp_3__ + 0.083000f * __temp_7__); float __temp_12__ = (__tilevar_2__[__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_13__ = (__temp_8__ + 0.083000f * __temp_12__); float __temp_17__ = (__tilevar_2__[__iter_5__-2+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_18__ = (__temp_13__ + 0.083000f * __temp_17__); float __temp_22__ = (__tilevar_2__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+2+EXTENT-__iter_1__)]); float __temp_23__ = (__temp_18__ + 0.083000f * __temp_22__); float __temp_27__ = (__tilevar_2__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]); float __temp_28__ = (__temp_23__ + 0.083000f * __temp_27__); float __temp_32__ = (__tilevar_2__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]); float __temp_33__ = (__temp_28__ + 0.083000f * __temp_32__); float __temp_37__ = (__tilevar_2__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-2+EXTENT-__iter_1__)]); float __temp_38__ = (__temp_33__ + 0.083000f * __temp_37__); float __temp_42__ = t2_1; float __temp_43__ = (__temp_38__ + 0.083000f * __temp_42__); float __temp_47__ = t2_0; float __temp_48__ = (__temp_43__ + 0.083000f * __temp_47__); float __temp_52__ = b2_0; float __temp_53__ = (__temp_48__ + 0.083000f * __temp_52__); float __temp_57__ = b2_1; float __temp_58__ = (__temp_53__ + 0.083000f * __temp_57__); float __temp_62__ = (__tilevar_2__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_63__ = (__temp_58__ - 0.996000f * __temp_62__); b3_1 = b3_0; b3_0 = __tilevar_3__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]; __tilevar_3__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] = t3_0; t3_0 = t3_1; t3_1 = __temp_63__; } } if ((__iter_4__ >= FORMA_MAX((__iter_1__-6),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+6)-1),(M-3))) & (__iter_5__ >= FORMA_MAX((__iter_0__-6),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+6)-1),(N-3))) & (__iter_4__ < FORMA_MAX((__iter_1__-2),2) | __iter_4__ > FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-3)) | __iter_5__ < FORMA_MAX((__iter_0__-2),2) | __iter_5__ > FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-3)))) { b3_1 = __copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__-4))]; b3_0 = __copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__-3))]; __tilevar_3__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] = __copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__-2))]; t3_0 = __copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__-1))]; t3_1 = __copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__))]; } __syncthreads (); if(__iter_4__ >= FORMA_MAX((__iter_1__-4),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(M-3))) { if(__iter_5__ >= FORMA_MAX((__iter_0__-4),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(N-3)) ){ float __temp_3__ = (__tilevar_3__[__iter_5__+2+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_7__ = (__tilevar_3__[__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_8__ = (0.083000f * __temp_3__ + 0.083000f * __temp_7__); float __temp_12__ = (__tilevar_3__[__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_13__ = (__temp_8__ + 0.083000f * __temp_12__); float __temp_17__ = (__tilevar_3__[__iter_5__-2+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_18__ = (__temp_13__ + 0.083000f * __temp_17__); float __temp_22__ = (__tilevar_3__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+2+EXTENT-__iter_1__)]); float __temp_23__ = (__temp_18__ + 0.083000f * __temp_22__); float __temp_27__ = (__tilevar_3__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]); float __temp_28__ = (__temp_23__ + 0.083000f * __temp_27__); float __temp_32__ = (__tilevar_3__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]); float __temp_33__ = (__temp_28__ + 0.083000f * __temp_32__); float __temp_37__ = (__tilevar_3__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-2+EXTENT-__iter_1__)]); float __temp_38__ = (__temp_33__ + 0.083000f * __temp_37__); float __temp_42__ = t3_1; float __temp_43__ = (__temp_38__ + 0.083000f * __temp_42__); float __temp_47__ = t3_0; float __temp_48__ = (__temp_43__ + 0.083000f * __temp_47__); float __temp_52__ = b3_0; float __temp_53__ = (__temp_48__ + 0.083000f * __temp_52__); float __temp_57__ = b3_1; float __temp_58__ = (__temp_53__ + 0.083000f * __temp_57__); float __temp_62__ = (__tilevar_3__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_63__ = (__temp_58__ - 0.996000f * __temp_62__); b4_1 = b4_0; b4_0 = __tilevar_4__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]; __tilevar_4__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] = t4_0; t4_0 = t4_1; t4_1 = __temp_63__; } } if ((__iter_4__ >= FORMA_MAX((__iter_1__-8),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+8)-1),(M-3))) & (__iter_5__ >= FORMA_MAX((__iter_0__-8),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+8)-1),(N-3))) & (__iter_4__ < (FORMA_MAX((__iter_1__-4),2)) | __iter_4__ > (FORMA_MIN(((__iter_1__+GAPY+4)-1),(M-3))) | __iter_5__ < FORMA_MAX((__iter_0__-4),2) | __iter_5__ > FORMA_MIN(((__iter_0__+GAPX+4)-1),(N-3)))) { b4_1 = __copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__-4))]; b4_0 = __copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__-3))]; __tilevar_4__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] = __copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__-2))]; t4_0 = __copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__-1))]; t4_1 = __copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__))]; } __syncthreads (); if(__iter_4__ >= FORMA_MAX((__iter_1__-6),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+6)-1),(M-3))) { if( __iter_5__ >= FORMA_MAX((__iter_0__-6),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+6)-1),(N-3)) ){ float __temp_3__ = (__tilevar_4__[__iter_5__+2+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_7__ = (__tilevar_4__[__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_8__ = (0.083000f * __temp_3__ + 0.083000f * __temp_7__); float __temp_12__ = (__tilevar_4__[__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_13__ = (__temp_8__ + 0.083000f * __temp_12__); float __temp_17__ = (__tilevar_4__[__iter_5__-2+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_18__ = (__temp_13__ + 0.083000f * __temp_17__); float __temp_22__ = (__tilevar_4__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+2+EXTENT-__iter_1__)]); float __temp_23__ = (__temp_18__ + 0.083000f * __temp_22__); float __temp_27__ = (__tilevar_4__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]); float __temp_28__ = (__temp_23__ + 0.083000f * __temp_27__); float __temp_32__ = (__tilevar_4__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]); float __temp_33__ = (__temp_28__ + 0.083000f * __temp_32__); float __temp_37__ = (__tilevar_4__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-2+EXTENT-__iter_1__)]); float __temp_38__ = (__temp_33__ + 0.083000f * __temp_37__); float __temp_42__ = t4_1; float __temp_43__ = (__temp_38__ + 0.083000f * __temp_42__); float __temp_47__ = t4_0; float __temp_48__ = (__temp_43__ + 0.083000f * __temp_47__); float __temp_52__ = b4_0; float __temp_53__ = (__temp_48__ + 0.083000f * __temp_52__); float __temp_57__ = b4_1; float __temp_58__ = (__temp_53__ + 0.083000f * __temp_57__); float __temp_62__ = (__tilevar_4__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_63__ = (__temp_58__ - 0.996000f * __temp_62__); b5_1 = b5_0; b5_0 = __tilevar_5__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]; __tilevar_5__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] = t5_0; t5_0 = t5_1; t5_1 = __temp_63__; } } if ((__iter_4__ >= FORMA_MAX((__iter_1__-10),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+10)-1),(M-3))) & (__iter_5__ >= FORMA_MAX((__iter_0__-10),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+10)-1),(N-3))) & (__iter_4__ < (FORMA_MAX((__iter_1__-6),2)) | __iter_4__ > (FORMA_MIN(((__iter_1__+GAPY+6)-1),(M-3))) | __iter_5__ < FORMA_MAX((__iter_0__-6),2) | __iter_5__ > FORMA_MIN(((__iter_0__+GAPX+6)-1),(N-3)))) { b5_1 = __copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__-4))]; b5_0 = __copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__-3))]; __tilevar_5__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] = __copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__-2))]; t5_0 = __copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__-1))]; t5_1 = __copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__))]; } __syncthreads (); if(__iter_4__ >= FORMA_MAX((__iter_1__-8),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+8)-1),(M-3))) { if(__iter_5__ >= FORMA_MAX((__iter_0__-8),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+8)-1),(N-3)) ){ float __temp_3__ = (__tilevar_5__[__iter_5__+2+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_7__ = (__tilevar_5__[__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_8__ = (0.083000f * __temp_3__ + 0.083000f * __temp_7__); float __temp_12__ = (__tilevar_5__[__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_13__ = (__temp_8__ + 0.083000f * __temp_12__); float __temp_17__ = (__tilevar_5__[__iter_5__-2+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_18__ = (__temp_13__ + 0.083000f * __temp_17__); float __temp_22__ = (__tilevar_5__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+2+EXTENT-__iter_1__)]); float __temp_23__ = (__temp_18__ + 0.083000f * __temp_22__); float __temp_27__ = (__tilevar_5__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]); float __temp_28__ = (__temp_23__ + 0.083000f * __temp_27__); float __temp_32__ = (__tilevar_5__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]); float __temp_33__ = (__temp_28__ + 0.083000f * __temp_32__); float __temp_37__ = (__tilevar_5__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-2+EXTENT-__iter_1__)]); float __temp_38__ = (__temp_33__ + 0.083000f * __temp_37__); float __temp_42__ = t5_1; float __temp_43__ = (__temp_38__ + 0.083000f * __temp_42__); float __temp_47__ = t5_0; float __temp_48__ = (__temp_43__ + 0.083000f * __temp_47__); float __temp_52__ = b5_0; float __temp_53__ = (__temp_48__ + 0.083000f * __temp_52__); float __temp_57__ = b5_1; float __temp_58__ = (__temp_53__ + 0.083000f * __temp_57__); float __temp_62__ = (__tilevar_5__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_63__ = (__temp_58__ - 0.996000f * __temp_62__); __var_1__[__iter_5__+N*(__iter_4__+M*FORMA_MAX(__iter_2__-6,0))] = __temp_63__; } } } } /*Device code End */ /* Host Code Begin */ extern "C" void j3d13pt(float * h_input, int L, int M, int N, float * __var_0__){ /* Host allocation Begin */ float * input; hipMalloc(&input,sizeof(float)*(L*M*N)); Check_CUDA_Error("Allocation Error!! : input\n"); hipPointerAttribute_t ptrAttrib_h_input; hipMemcpyKind memcpy_kind_h_input = hipMemcpyHostToDevice; if (hipPointerGetAttributes(&ptrAttrib_h_input, h_input) == hipSuccess) if (ptrAttrib_h_input.memoryType == hipMemoryTypeDevice) memcpy_kind_h_input = hipMemcpyDeviceToDevice; hipGetLastError(); if( memcpy_kind_h_input != hipMemcpyDeviceToDevice ){ hipMemcpy(input,h_input,sizeof(float)*(L*M*N), memcpy_kind_h_input); } float * __var_1__; hipMalloc(&__var_1__,sizeof(float)*(L*M*N)); Check_CUDA_Error("Allocation Error!! : __var_1__\n"); float * __copy_arr_0__; hipMalloc(&__copy_arr_0__,sizeof(float)*(L*M*N)); Check_CUDA_Error("Allocation Error!! : __copy_arr_0__\n"); float * __copy_arr_1__; hipMalloc(&__copy_arr_1__,sizeof(float)*(L*M*N)); Check_CUDA_Error("Allocation Error!! : __copy_arr_1__\n"); float * __copy_arr_2__; hipMalloc(&__copy_arr_2__,sizeof(float)*(L*M*N)); Check_CUDA_Error("Allocation Error!! : __copy_arr_2__\n"); /*Host Allocation End */ /* Kernel Launch Begin */ int __FORMA_MAX_SHARED_MEM__; hipDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,hipDeviceAttributeMaxSharedMemoryPerBlock,0); #ifdef _TIMER_ hipEvent_t _forma_timer_start_,_forma_timer_stop_; hipEventCreate(&_forma_timer_start_); hipEventCreate(&_forma_timer_stop_); hipEventRecord(_forma_timer_start_,0); #endif int __size_0___kernel___forma_kernel__0__ = N; int __size_1___kernel___forma_kernel__0__ = M; int __block_0___kernel___forma_kernel__0__ = 32; int __block_1___kernel___forma_kernel__0__ = 32; int __block_2___kernel___forma_kernel__0__ = 1; dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__,__block_2___kernel___forma_kernel__0__); int __SMemSize___kernel___forma_kernel__0__ = 0; __SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__); int __grid_0___kernel___forma_kernel__0t__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x-16); int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x+GAPX); int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.y+GAPY); int __grid_2___kernel___forma_kernel__0__ = 1; dim3 __gridConfig___kernel___forma_kernel__0t__(__grid_0___kernel___forma_kernel__0t__,__grid_1___kernel___forma_kernel__0__,__grid_2___kernel___forma_kernel__0__); dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__,__grid_2___kernel___forma_kernel__0__); dim3 unrollConfig (__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z); hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0t__), dim3(unrollConfig), __SMemSize___kernel___forma_kernel__0__, 0, input, L, M, N, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n"); hipLaunchKernelGGL(( __kernel___forma_kernel__2__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(unrollConfig), __SMemSize___kernel___forma_kernel__0__, 0, input, L, M, N, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__2__\n"); hipLaunchKernelGGL(( __kernel___forma_kernel__3__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(unrollConfig), __SMemSize___kernel___forma_kernel__0__, 0, input, L, M, N, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__3__\n"); hipPointerAttribute_t ptrAttrib___var_0__; hipMemcpyKind memcpy_kind___var_0__ = hipMemcpyDeviceToHost; if (hipPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == hipSuccess) if (ptrAttrib___var_0__.memoryType == hipMemoryTypeDevice) memcpy_kind___var_0__ = hipMemcpyDeviceToDevice; hipGetLastError(); hipMemcpy(__var_0__,__var_1__, sizeof(float)*(L*M*N), memcpy_kind___var_0__); #ifdef _TIMER_ hipEventRecord(_forma_timer_stop_,0); hipEventSynchronize(_forma_timer_stop_); float elapsedTime; hipEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_); printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime); hipEventDestroy(_forma_timer_start_); hipEventDestroy(_forma_timer_stop_); #endif /*Kernel Launch End */ /* Host Free Begin */ hipFree(input); hipFree(__var_1__); hipFree(__copy_arr_0__); hipFree(__copy_arr_1__); hipFree(__copy_arr_2__); } /*Host Free End*/
4bea669211813bf71398296353fe390f640603d6.cu
#include "cuda.h" #ifdef _TIMER_ #include "cuda_profiler_api.h" #endif #include "stdio.h" #define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) ) #define max(a,b) FORMA_MAX(a,b) #define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) ) #define min(a,b) FORMA_MIN(a,b) #define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 ) #ifndef FORMA_MAX_BLOCKDIM_0 #define FORMA_MAX_BLOCKDIM_0 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_1 #define FORMA_MAX_BLOCKDIM_1 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_2 #define FORMA_MAX_BLOCKDIM_2 1024 #endif #define GAPX (12) #define GAPY (12) #define EXTENT (10) template<typename T> __global__ void __kernel_init__(T* input, T value) { int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x); input[loc] = value; } template<typename T> void initialize_array(T* d_input, int size, T value) { dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0)); dim3 init_block(FORMA_MAX_BLOCKDIM_0); __kernel_init__<<<init_grid,init_block>>>(d_input,value); } void Check_CUDA_Error(const char* message); /*Texture references */ /*Shared Memory Variable */ extern __shared__ char __FORMA_SHARED_MEM__[]; /* Device code Begin */ /* X, Y, Z */ __global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int L, int M, int N, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, int FORMA_BLOCKDIM_Z, float * __restrict__ __var_1__){ int __FORMA_SHARED_MEM_OFFSET__ = 0; float* __tilevar_2__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); float* __tilevar_3__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); float* __tilevar_4__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); float* __tilevar_5__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X)-16); int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)+GAPY); float t2_0=0.0f, t3_0=0.0f, t4_0=0.0f, t5_0=0.0f, t2_1=0.0f, t3_1=0.0f, t4_1=0.0f, t5_1=0.0f; float b2_0=0.0f, b3_0=0.0f, b4_0=0.0f, b5_0=0.0f, b2_1=0.0f, b3_1=0.0f, b4_1=0.0f, b5_1=0.0f; // Initialize the values int __iter_4__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ; int __iter_5__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ; if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1))) { b2_0 = input[__iter_5__+N*(__iter_4__+M*(0))]; __tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = input[__iter_5__+N*(__iter_4__+M*(1))]; t2_0 = input[__iter_5__+N*(__iter_4__+M*(2))]; t2_1 = input[__iter_5__+N*(__iter_4__+M*(3))]; } // Rest of the computation for (int __iter_2__ = 2; __iter_2__ < L-2; __iter_2__++) { if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1))){ b2_1 = b2_0; b2_0 = __tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; __tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t2_0; t2_0 = t2_1; t2_1 = input[__iter_5__+N*(__iter_4__+M*(__iter_2__+2))]; } __syncthreads (); if(__iter_4__ >= FORMA_MAX((__iter_1__+2),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-3))) { if(__iter_5__ >= FORMA_MAX((__iter_0__+2),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-3)) ){ float __temp_3__ = (__tilevar_2__[__iter_5__+2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_7__ = (__tilevar_2__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_8__ = (0.083000f * __temp_3__ + 0.083000f * __temp_7__); float __temp_12__ = (__tilevar_2__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_13__ = (__temp_8__ + 0.083000f * __temp_12__); float __temp_17__ = (__tilevar_2__[__iter_5__-2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_18__ = (__temp_13__ + 0.083000f * __temp_17__); float __temp_22__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+2-__iter_1__)]); float __temp_23__ = (__temp_18__ + 0.083000f * __temp_22__); float __temp_27__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]); float __temp_28__ = (__temp_23__ + 0.083000f * __temp_27__); float __temp_32__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]); float __temp_33__ = (__temp_28__ + 0.083000f * __temp_32__); float __temp_37__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-2-__iter_1__)]); float __temp_38__ = (__temp_33__ + 0.083000f * __temp_37__); float __temp_42__ = t2_1; float __temp_43__ = (__temp_38__ + 0.083000f * __temp_42__); float __temp_47__ = t2_0; float __temp_48__ = (__temp_43__ + 0.083000f * __temp_47__); float __temp_52__ = b2_0; float __temp_53__ = (__temp_48__ + 0.083000f * __temp_52__); float __temp_57__ = b2_1; float __temp_58__ = (__temp_53__ + 0.083000f * __temp_57__); float __temp_62__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_63__ = (__temp_58__ - 0.996000f * __temp_62__); b3_1 = b3_0; b3_0 = __tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; __tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t3_0; t3_0 = t3_1; t3_1 = __temp_63__; } } if ((__iter_4__ >= FORMA_MAX((__iter_1__+2),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-3))) & (__iter_5__ >= FORMA_MAX((__iter_0__+2),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-3))) & (__iter_4__ < (FORMA_MAX((__iter_1__+2),2)+4) | __iter_4__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-3))-4))) { __copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__))] = t3_1; } __syncthreads (); if(__iter_4__ >= FORMA_MAX((__iter_1__+4),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(M-3))) { if(__iter_5__ >= FORMA_MAX((__iter_0__+4),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(N-3)) ){ float __temp_3__ = (__tilevar_3__[__iter_5__+2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_7__ = (__tilevar_3__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_8__ = (0.083000f * __temp_3__ + 0.083000f * __temp_7__); float __temp_12__ = (__tilevar_3__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_13__ = (__temp_8__ + 0.083000f * __temp_12__); float __temp_17__ = (__tilevar_3__[__iter_5__-2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_18__ = (__temp_13__ + 0.083000f * __temp_17__); float __temp_22__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+2-__iter_1__)]); float __temp_23__ = (__temp_18__ + 0.083000f * __temp_22__); float __temp_27__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]); float __temp_28__ = (__temp_23__ + 0.083000f * __temp_27__); float __temp_32__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]); float __temp_33__ = (__temp_28__ + 0.083000f * __temp_32__); float __temp_37__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-2-__iter_1__)]); float __temp_38__ = (__temp_33__ + 0.083000f * __temp_37__); float __temp_42__ = t3_1; float __temp_43__ = (__temp_38__ + 0.083000f * __temp_42__); float __temp_47__ = t3_0; float __temp_48__ = (__temp_43__ + 0.083000f * __temp_47__); float __temp_52__ = b3_0; float __temp_53__ = (__temp_48__ + 0.083000f * __temp_52__); float __temp_57__ = b3_1; float __temp_58__ = (__temp_53__ + 0.083000f * __temp_57__); float __temp_62__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_63__ = (__temp_58__ - 0.996000f * __temp_62__); b4_1 = b4_0; b4_0 = __tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; __tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t4_0; t4_0 = t4_1; t4_1 = __temp_63__; } } if ((__iter_4__ >= FORMA_MAX((__iter_1__+4),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(M-3))) & (__iter_5__ >= FORMA_MAX((__iter_0__+4),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(N-3))) & (__iter_4__ < (FORMA_MAX((__iter_1__+4),2)+4) | __iter_4__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(M-3))-4))) { __copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__))] = t4_1; } __syncthreads (); if(__iter_4__ >= FORMA_MAX((__iter_1__+6),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-7),(M-3))) { if( __iter_5__ >= FORMA_MAX((__iter_0__+6),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-7),(N-3)) ){ float __temp_3__ = (__tilevar_4__[__iter_5__+2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_7__ = (__tilevar_4__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_8__ = (0.083000f * __temp_3__ + 0.083000f * __temp_7__); float __temp_12__ = (__tilevar_4__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_13__ = (__temp_8__ + 0.083000f * __temp_12__); float __temp_17__ = (__tilevar_4__[__iter_5__-2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_18__ = (__temp_13__ + 0.083000f * __temp_17__); float __temp_22__ = (__tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+2-__iter_1__)]); float __temp_23__ = (__temp_18__ + 0.083000f * __temp_22__); float __temp_27__ = (__tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]); float __temp_28__ = (__temp_23__ + 0.083000f * __temp_27__); float __temp_32__ = (__tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]); float __temp_33__ = (__temp_28__ + 0.083000f * __temp_32__); float __temp_37__ = (__tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-2-__iter_1__)]); float __temp_38__ = (__temp_33__ + 0.083000f * __temp_37__); float __temp_42__ = t4_1; float __temp_43__ = (__temp_38__ + 0.083000f * __temp_42__); float __temp_47__ = t4_0; float __temp_48__ = (__temp_43__ + 0.083000f * __temp_47__); float __temp_52__ = b4_0; float __temp_53__ = (__temp_48__ + 0.083000f * __temp_52__); float __temp_57__ = b4_1; float __temp_58__ = (__temp_53__ + 0.083000f * __temp_57__); float __temp_62__ = (__tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_63__ = (__temp_58__ - 0.996000f * __temp_62__); b5_1 = b5_0; b5_0 = __tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]; __tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t5_0; t5_0 = t5_1; t5_1 = __temp_63__; } } if ((__iter_4__ >= FORMA_MAX((__iter_1__+6),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-7),(M-3))) & (__iter_5__ >= FORMA_MAX((__iter_0__+6),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-7),(N-3))) & (__iter_4__ < (FORMA_MAX((__iter_1__+6),2)+4) | __iter_4__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-7),(M-3))-4))) { __copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__))] = t5_1; } __syncthreads (); if(__iter_4__ >= FORMA_MAX((__iter_1__+8),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-9),(M-3))) { if(__iter_5__ >= FORMA_MAX((__iter_0__+8),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-9),(N-3))){ float __temp_3__ = (__tilevar_5__[__iter_5__+2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_7__ = (__tilevar_5__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_8__ = (0.083000f * __temp_3__ + 0.083000f * __temp_7__); float __temp_12__ = (__tilevar_5__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_13__ = (__temp_8__ + 0.083000f * __temp_12__); float __temp_17__ = (__tilevar_5__[__iter_5__-2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_18__ = (__temp_13__ + 0.083000f * __temp_17__); float __temp_22__ = (__tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+2-__iter_1__)]); float __temp_23__ = (__temp_18__ + 0.083000f * __temp_22__); float __temp_27__ = (__tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]); float __temp_28__ = (__temp_23__ + 0.083000f * __temp_27__); float __temp_32__ = (__tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]); float __temp_33__ = (__temp_28__ + 0.083000f * __temp_32__); float __temp_37__ = (__tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-2-__iter_1__)]); float __temp_38__ = (__temp_33__ + 0.083000f * __temp_37__); float __temp_42__ = t5_1; float __temp_43__ = (__temp_38__ + 0.083000f * __temp_42__); float __temp_47__ = t5_0; float __temp_48__ = (__temp_43__ + 0.083000f * __temp_47__); float __temp_52__ = b5_0; float __temp_53__ = (__temp_48__ + 0.083000f * __temp_52__); float __temp_57__ = b5_1; float __temp_58__ = (__temp_53__ + 0.083000f * __temp_57__); float __temp_62__ = (__tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]); float __temp_63__ = (__temp_58__ - 0.996000f * __temp_62__); __var_1__[__iter_5__+N*(__iter_4__+M*FORMA_MAX(__iter_2__-6,0))] = __temp_63__; } } } } int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){ int FORMA_BLOCKDIM_Y = (int)(blockDim.y); int FORMA_BLOCKDIM_X = (int)(blockDim.x); int SMemSize = 0; SMemSize += sizeof(float)*(4*FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); return SMemSize; } /* X, Y+GAP, Z */ __global__ void __kernel___forma_kernel__2__(float * __restrict__ input, int L, int M, int N, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, int FORMA_BLOCKDIM_Z, float * __restrict__ __var_1__){ int __FORMA_SHARED_MEM_OFFSET__ = 0; float* __tilevar_2__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); float* __tilevar_3__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); float* __tilevar_4__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); float* __tilevar_5__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X)+GAPX); int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)+GAPY) + (int)(FORMA_BLOCKDIM_Y); float t2_0=0.0f, t3_0=0.0f, t4_0=0.0f, t5_0=0.0f, t2_1=0.0f, t3_1=0.0f, t4_1=0.0f, t5_1=0.0f; float b2_0=0.0f, b3_0=0.0f, b4_0=0.0f, b5_0=0.0f, b2_1=0.0f, b3_1=0.0f, b4_1=0.0f, b5_1=0.0f; int __iter_4__ = FORMA_MAX(__iter_1__-EXTENT,0) + (int)(threadIdx.y) ; int __iter_5__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ; if(__iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1))) { b2_0 = input[__iter_5__+N*(__iter_4__+M*(0))]; __tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] = input[__iter_5__+N*(__iter_4__+M*(1))]; t2_0 = input[__iter_5__+N*(__iter_4__+M*(2))]; t2_1 = input[__iter_5__+N*(__iter_4__+M*(3))]; } // Rest of the computation for (int __iter_2__ = 2; __iter_2__ < L-2; __iter_2__++) { if(__iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1))) { b2_1 = b2_0; b2_0 = __tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]; __tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] = t2_0; t2_0 = t2_1; t2_1 = input[__iter_5__+N*(__iter_4__+M*(__iter_2__+2))]; } __syncthreads (); if(__iter_4__ >= FORMA_MAX((__iter_1__-2),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-3))) { if(__iter_5__ >= FORMA_MAX((__iter_0__+2),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-3)) ){ float __temp_3__ = (__tilevar_2__[__iter_5__+2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_7__ = (__tilevar_2__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_8__ = (0.083000f * __temp_3__ + 0.083000f * __temp_7__); float __temp_12__ = (__tilevar_2__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_13__ = (__temp_8__ + 0.083000f * __temp_12__); float __temp_17__ = (__tilevar_2__[__iter_5__-2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_18__ = (__temp_13__ + 0.083000f * __temp_17__); float __temp_22__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+2+EXTENT-__iter_1__)]); float __temp_23__ = (__temp_18__ + 0.083000f * __temp_22__); float __temp_27__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]); float __temp_28__ = (__temp_23__ + 0.083000f * __temp_27__); float __temp_32__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]); float __temp_33__ = (__temp_28__ + 0.083000f * __temp_32__); float __temp_37__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-2+EXTENT-__iter_1__)]); float __temp_38__ = (__temp_33__ + 0.083000f * __temp_37__); float __temp_42__ = t2_1; float __temp_43__ = (__temp_38__ + 0.083000f * __temp_42__); float __temp_47__ = t2_0; float __temp_48__ = (__temp_43__ + 0.083000f * __temp_47__); float __temp_52__ = b2_0; float __temp_53__ = (__temp_48__ + 0.083000f * __temp_52__); float __temp_57__ = b2_1; float __temp_58__ = (__temp_53__ + 0.083000f * __temp_57__); float __temp_62__ = (__tilevar_2__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_63__ = (__temp_58__ - 0.996000f * __temp_62__); b3_1 = b3_0; b3_0 = __tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]; __tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] = t3_0; t3_0 = t3_1; t3_1 = __temp_63__; } } if ((__iter_4__ >= FORMA_MAX((__iter_1__-2),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-3))) & (__iter_5__ >= FORMA_MAX((__iter_0__+2),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-3))) & (__iter_5__ < (FORMA_MAX((__iter_0__+2),2)+4) | __iter_5__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-3))-4))) { __copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__))] = t3_1; } if ((__iter_4__ >= FORMA_MAX((__iter_1__-6),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+6)-1),(M-3))) & (__iter_5__ >= FORMA_MAX((__iter_0__+2),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-3))) & (__iter_4__ < FORMA_MAX((__iter_1__-2),2) | __iter_4__ > FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-3)))) { b3_1 = __copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__-4))]; b3_0 = __copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__-3))]; __tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] = __copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__-2))]; t3_0 = __copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__-1))]; t3_1 = __copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__))]; } __syncthreads (); if(__iter_4__ >= FORMA_MAX((__iter_1__-4),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(M-3))) { if(__iter_5__ >= FORMA_MAX((__iter_0__+4),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(N-3)) ){ float __temp_3__ = (__tilevar_3__[__iter_5__+2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_7__ = (__tilevar_3__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_8__ = (0.083000f * __temp_3__ + 0.083000f * __temp_7__); float __temp_12__ = (__tilevar_3__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_13__ = (__temp_8__ + 0.083000f * __temp_12__); float __temp_17__ = (__tilevar_3__[__iter_5__-2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_18__ = (__temp_13__ + 0.083000f * __temp_17__); float __temp_22__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+2+EXTENT-__iter_1__)]); float __temp_23__ = (__temp_18__ + 0.083000f * __temp_22__); float __temp_27__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]); float __temp_28__ = (__temp_23__ + 0.083000f * __temp_27__); float __temp_32__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]); float __temp_33__ = (__temp_28__ + 0.083000f * __temp_32__); float __temp_37__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-2+EXTENT-__iter_1__)]); float __temp_38__ = (__temp_33__ + 0.083000f * __temp_37__); float __temp_42__ = t3_1; float __temp_43__ = (__temp_38__ + 0.083000f * __temp_42__); float __temp_47__ = t3_0; float __temp_48__ = (__temp_43__ + 0.083000f * __temp_47__); float __temp_52__ = b3_0; float __temp_53__ = (__temp_48__ + 0.083000f * __temp_52__); float __temp_57__ = b3_1; float __temp_58__ = (__temp_53__ + 0.083000f * __temp_57__); float __temp_62__ = (__tilevar_3__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_63__ = (__temp_58__ - 0.996000f * __temp_62__); b4_1 = b4_0; b4_0 = __tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]; __tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] = t4_0; t4_0 = t4_1; t4_1 = __temp_63__; } } if ((__iter_4__ >= FORMA_MAX((__iter_1__-4),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(M-3))) & (__iter_5__ >= FORMA_MAX((__iter_0__+4),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(N-3))) & (__iter_5__ < (FORMA_MAX((__iter_0__+4),2)+4) | __iter_5__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(N-3))-4))) { __copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__))] = t4_1; } if ((__iter_4__ >= FORMA_MAX((__iter_1__-8),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+8)-1),(M-3))) & (__iter_5__ >= FORMA_MAX((__iter_0__+4),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(N-3))) & (__iter_4__ < FORMA_MAX((__iter_1__-4),2) | __iter_4__ > FORMA_MIN(((__iter_1__+GAPY+4)-1),(M-3)))) { b4_1 = __copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__-4))]; b4_0 = __copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__-3))]; __tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] = __copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__-2))]; t4_0 = __copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__-1))]; t4_1 = __copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__))]; } __syncthreads (); if(__iter_4__ >= FORMA_MAX((__iter_1__-6),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+6)-1),(M-3))) { if(__iter_5__ >= FORMA_MAX((__iter_0__+6),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-7),(N-3)) ){ float __temp_3__ = (__tilevar_4__[__iter_5__+2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_7__ = (__tilevar_4__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_8__ = (0.083000f * __temp_3__ + 0.083000f * __temp_7__); float __temp_12__ = (__tilevar_4__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_13__ = (__temp_8__ + 0.083000f * __temp_12__); float __temp_17__ = (__tilevar_4__[__iter_5__-2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_18__ = (__temp_13__ + 0.083000f * __temp_17__); float __temp_22__ = (__tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+2+EXTENT-__iter_1__)]); float __temp_23__ = (__temp_18__ + 0.083000f * __temp_22__); float __temp_27__ = (__tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]); float __temp_28__ = (__temp_23__ + 0.083000f * __temp_27__); float __temp_32__ = (__tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]); float __temp_33__ = (__temp_28__ + 0.083000f * __temp_32__); float __temp_37__ = (__tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-2+EXTENT-__iter_1__)]); float __temp_38__ = (__temp_33__ + 0.083000f * __temp_37__); float __temp_42__ = t4_1; float __temp_43__ = (__temp_38__ + 0.083000f * __temp_42__); float __temp_47__ = t4_0; float __temp_48__ = (__temp_43__ + 0.083000f * __temp_47__); float __temp_52__ = b4_0; float __temp_53__ = (__temp_48__ + 0.083000f * __temp_52__); float __temp_57__ = b4_1; float __temp_58__ = (__temp_53__ + 0.083000f * __temp_57__); float __temp_62__ = (__tilevar_4__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_63__ = (__temp_58__ - 0.996000f * __temp_62__); b5_1 = b5_0; b5_0 = __tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]; __tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] = t5_0; t5_0 = t5_1; t5_1 = __temp_63__; } } if ((__iter_4__ >= FORMA_MAX((__iter_1__-6),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+6)-1),(M-3))) & (__iter_5__ >= FORMA_MAX((__iter_0__+6),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-7),(N-3))) & (__iter_5__ < (FORMA_MAX((__iter_0__+6),2)+4) | __iter_5__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-7),(N-3))-4))) { __copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__))] = t5_1; } if ((__iter_4__ >= FORMA_MAX((__iter_1__-10),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+10)-1),(M-3))) & (__iter_5__ >= FORMA_MAX((__iter_0__+6),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-7),(N-3))) & (__iter_4__ < FORMA_MAX((__iter_1__-6),2) | __iter_4__ > FORMA_MIN(((__iter_1__+GAPY+6)-1),(M-3)))) { b5_1 = __copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__-4))]; b5_0 = __copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__-3))]; __tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] = __copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__-2))]; t5_0 = __copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__-1))]; t5_1 = __copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__))]; } __syncthreads (); if(__iter_4__ >= FORMA_MAX((__iter_1__-8),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+8)-1),(M-3))) { if( __iter_5__ >= FORMA_MAX((__iter_0__+8),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-9),(N-3)) ){ float __temp_3__ = (__tilevar_5__[__iter_5__+2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_7__ = (__tilevar_5__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_8__ = (0.083000f * __temp_3__ + 0.083000f * __temp_7__); float __temp_12__ = (__tilevar_5__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_13__ = (__temp_8__ + 0.083000f * __temp_12__); float __temp_17__ = (__tilevar_5__[__iter_5__-2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_18__ = (__temp_13__ + 0.083000f * __temp_17__); float __temp_22__ = (__tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+2+EXTENT-__iter_1__)]); float __temp_23__ = (__temp_18__ + 0.083000f * __temp_22__); float __temp_27__ = (__tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]); float __temp_28__ = (__temp_23__ + 0.083000f * __temp_27__); float __temp_32__ = (__tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]); float __temp_33__ = (__temp_28__ + 0.083000f * __temp_32__); float __temp_37__ = (__tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-2+EXTENT-__iter_1__)]); float __temp_38__ = (__temp_33__ + 0.083000f * __temp_37__); float __temp_42__ = t5_1; float __temp_43__ = (__temp_38__ + 0.083000f * __temp_42__); float __temp_47__ = t5_0; float __temp_48__ = (__temp_43__ + 0.083000f * __temp_47__); float __temp_52__ = b5_0; float __temp_53__ = (__temp_48__ + 0.083000f * __temp_52__); float __temp_57__ = b5_1; float __temp_58__ = (__temp_53__ + 0.083000f * __temp_57__); float __temp_62__ = (__tilevar_5__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_63__ = (__temp_58__ - 0.996000f * __temp_62__); __var_1__[__iter_5__+N*(__iter_4__+M*FORMA_MAX(__iter_2__-6,0))] = __temp_63__; } } } } /* X+GAP, Y+GAP, Z */ __global__ void __kernel___forma_kernel__3__(float * __restrict__ input, int L, int M, int N, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, int FORMA_BLOCKDIM_Z, float * __restrict__ __var_1__){ int __FORMA_SHARED_MEM_OFFSET__ = 0; float* __tilevar_2__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); float* __tilevar_3__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); float* __tilevar_4__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); float* __tilevar_5__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X); int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X)+GAPX) + (int)(FORMA_BLOCKDIM_X); int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)+GAPY) + (int)(FORMA_BLOCKDIM_Y); float t2_0=0.0f, t3_0=0.0f, t4_0=0.0f, t5_0=0.0f, t2_1=0.0f, t3_1=0.0f, t4_1=0.0f, t5_1=0.0f; float b2_0=0.0f, b3_0=0.0f, b4_0=0.0f, b5_0=0.0f, b2_1=0.0f, b3_1=0.0f, b4_1=0.0f, b5_1=0.0f; int __iter_4__ = FORMA_MAX(__iter_1__-EXTENT,0) + (int)(threadIdx.y) ; int __iter_5__ = FORMA_MAX(__iter_0__-EXTENT,0) + (int)(threadIdx.x) ; if(__iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(N-1))) { b2_0 = input[__iter_5__+N*(__iter_4__+M*(0))]; __tilevar_2__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] = input[__iter_5__+N*(__iter_4__+M*(1))]; t2_0 = input[__iter_5__+N*(__iter_4__+M*(2))]; t2_1 = input[__iter_5__+N*(__iter_4__+M*(3))]; } for (int __iter_2__ = 2; __iter_2__ < L-2; __iter_2__++) { if(__iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(N-1))) { b2_1 = b2_0; b2_0 = __tilevar_2__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]; __tilevar_2__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] = t2_0; t2_0 = t2_1; t2_1 = input[__iter_5__+N*(__iter_4__+M*(__iter_2__+2))]; } __syncthreads (); if(__iter_4__ >= FORMA_MAX((__iter_1__-2),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-3))) { if(__iter_5__ >= FORMA_MAX((__iter_0__-2),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-3)) ){ float __temp_3__ = (__tilevar_2__[__iter_5__+2+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_7__ = (__tilevar_2__[__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_8__ = (0.083000f * __temp_3__ + 0.083000f * __temp_7__); float __temp_12__ = (__tilevar_2__[__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_13__ = (__temp_8__ + 0.083000f * __temp_12__); float __temp_17__ = (__tilevar_2__[__iter_5__-2+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_18__ = (__temp_13__ + 0.083000f * __temp_17__); float __temp_22__ = (__tilevar_2__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+2+EXTENT-__iter_1__)]); float __temp_23__ = (__temp_18__ + 0.083000f * __temp_22__); float __temp_27__ = (__tilevar_2__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]); float __temp_28__ = (__temp_23__ + 0.083000f * __temp_27__); float __temp_32__ = (__tilevar_2__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]); float __temp_33__ = (__temp_28__ + 0.083000f * __temp_32__); float __temp_37__ = (__tilevar_2__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-2+EXTENT-__iter_1__)]); float __temp_38__ = (__temp_33__ + 0.083000f * __temp_37__); float __temp_42__ = t2_1; float __temp_43__ = (__temp_38__ + 0.083000f * __temp_42__); float __temp_47__ = t2_0; float __temp_48__ = (__temp_43__ + 0.083000f * __temp_47__); float __temp_52__ = b2_0; float __temp_53__ = (__temp_48__ + 0.083000f * __temp_52__); float __temp_57__ = b2_1; float __temp_58__ = (__temp_53__ + 0.083000f * __temp_57__); float __temp_62__ = (__tilevar_2__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_63__ = (__temp_58__ - 0.996000f * __temp_62__); b3_1 = b3_0; b3_0 = __tilevar_3__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]; __tilevar_3__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] = t3_0; t3_0 = t3_1; t3_1 = __temp_63__; } } if ((__iter_4__ >= FORMA_MAX((__iter_1__-6),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+6)-1),(M-3))) & (__iter_5__ >= FORMA_MAX((__iter_0__-6),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+6)-1),(N-3))) & (__iter_4__ < FORMA_MAX((__iter_1__-2),2) | __iter_4__ > FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-3)) | __iter_5__ < FORMA_MAX((__iter_0__-2),2) | __iter_5__ > FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-3)))) { b3_1 = __copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__-4))]; b3_0 = __copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__-3))]; __tilevar_3__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] = __copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__-2))]; t3_0 = __copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__-1))]; t3_1 = __copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__))]; } __syncthreads (); if(__iter_4__ >= FORMA_MAX((__iter_1__-4),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(M-3))) { if(__iter_5__ >= FORMA_MAX((__iter_0__-4),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(N-3)) ){ float __temp_3__ = (__tilevar_3__[__iter_5__+2+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_7__ = (__tilevar_3__[__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_8__ = (0.083000f * __temp_3__ + 0.083000f * __temp_7__); float __temp_12__ = (__tilevar_3__[__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_13__ = (__temp_8__ + 0.083000f * __temp_12__); float __temp_17__ = (__tilevar_3__[__iter_5__-2+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_18__ = (__temp_13__ + 0.083000f * __temp_17__); float __temp_22__ = (__tilevar_3__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+2+EXTENT-__iter_1__)]); float __temp_23__ = (__temp_18__ + 0.083000f * __temp_22__); float __temp_27__ = (__tilevar_3__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]); float __temp_28__ = (__temp_23__ + 0.083000f * __temp_27__); float __temp_32__ = (__tilevar_3__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]); float __temp_33__ = (__temp_28__ + 0.083000f * __temp_32__); float __temp_37__ = (__tilevar_3__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-2+EXTENT-__iter_1__)]); float __temp_38__ = (__temp_33__ + 0.083000f * __temp_37__); float __temp_42__ = t3_1; float __temp_43__ = (__temp_38__ + 0.083000f * __temp_42__); float __temp_47__ = t3_0; float __temp_48__ = (__temp_43__ + 0.083000f * __temp_47__); float __temp_52__ = b3_0; float __temp_53__ = (__temp_48__ + 0.083000f * __temp_52__); float __temp_57__ = b3_1; float __temp_58__ = (__temp_53__ + 0.083000f * __temp_57__); float __temp_62__ = (__tilevar_3__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_63__ = (__temp_58__ - 0.996000f * __temp_62__); b4_1 = b4_0; b4_0 = __tilevar_4__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]; __tilevar_4__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] = t4_0; t4_0 = t4_1; t4_1 = __temp_63__; } } if ((__iter_4__ >= FORMA_MAX((__iter_1__-8),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+8)-1),(M-3))) & (__iter_5__ >= FORMA_MAX((__iter_0__-8),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+8)-1),(N-3))) & (__iter_4__ < (FORMA_MAX((__iter_1__-4),2)) | __iter_4__ > (FORMA_MIN(((__iter_1__+GAPY+4)-1),(M-3))) | __iter_5__ < FORMA_MAX((__iter_0__-4),2) | __iter_5__ > FORMA_MIN(((__iter_0__+GAPX+4)-1),(N-3)))) { b4_1 = __copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__-4))]; b4_0 = __copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__-3))]; __tilevar_4__[__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] = __copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__-2))]; t4_0 = __copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__-1))]; t4_1 = __copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__))]; } __syncthreads (); if(__iter_4__ >= FORMA_MAX((__iter_1__-6),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+6)-1),(M-3))) { if( __iter_5__ >= FORMA_MAX((__iter_0__-6),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+6)-1),(N-3)) ){ float __temp_3__ = (__tilevar_4__[__iter_5__+2+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_7__ = (__tilevar_4__[__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_8__ = (0.083000f * __temp_3__ + 0.083000f * __temp_7__); float __temp_12__ = (__tilevar_4__[__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_13__ = (__temp_8__ + 0.083000f * __temp_12__); float __temp_17__ = (__tilevar_4__[__iter_5__-2+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_18__ = (__temp_13__ + 0.083000f * __temp_17__); float __temp_22__ = (__tilevar_4__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+2+EXTENT-__iter_1__)]); float __temp_23__ = (__temp_18__ + 0.083000f * __temp_22__); float __temp_27__ = (__tilevar_4__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]); float __temp_28__ = (__temp_23__ + 0.083000f * __temp_27__); float __temp_32__ = (__tilevar_4__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]); float __temp_33__ = (__temp_28__ + 0.083000f * __temp_32__); float __temp_37__ = (__tilevar_4__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-2+EXTENT-__iter_1__)]); float __temp_38__ = (__temp_33__ + 0.083000f * __temp_37__); float __temp_42__ = t4_1; float __temp_43__ = (__temp_38__ + 0.083000f * __temp_42__); float __temp_47__ = t4_0; float __temp_48__ = (__temp_43__ + 0.083000f * __temp_47__); float __temp_52__ = b4_0; float __temp_53__ = (__temp_48__ + 0.083000f * __temp_52__); float __temp_57__ = b4_1; float __temp_58__ = (__temp_53__ + 0.083000f * __temp_57__); float __temp_62__ = (__tilevar_4__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_63__ = (__temp_58__ - 0.996000f * __temp_62__); b5_1 = b5_0; b5_0 = __tilevar_5__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]; __tilevar_5__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] = t5_0; t5_0 = t5_1; t5_1 = __temp_63__; } } if ((__iter_4__ >= FORMA_MAX((__iter_1__-10),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+10)-1),(M-3))) & (__iter_5__ >= FORMA_MAX((__iter_0__-10),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+10)-1),(N-3))) & (__iter_4__ < (FORMA_MAX((__iter_1__-6),2)) | __iter_4__ > (FORMA_MIN(((__iter_1__+GAPY+6)-1),(M-3))) | __iter_5__ < FORMA_MAX((__iter_0__-6),2) | __iter_5__ > FORMA_MIN(((__iter_0__+GAPX+6)-1),(N-3)))) { b5_1 = __copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__-4))]; b5_0 = __copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__-3))]; __tilevar_5__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] = __copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__-2))]; t5_0 = __copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__-1))]; t5_1 = __copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__))]; } __syncthreads (); if(__iter_4__ >= FORMA_MAX((__iter_1__-8),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+8)-1),(M-3))) { if(__iter_5__ >= FORMA_MAX((__iter_0__-8),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+8)-1),(N-3)) ){ float __temp_3__ = (__tilevar_5__[__iter_5__+2+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_7__ = (__tilevar_5__[__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_8__ = (0.083000f * __temp_3__ + 0.083000f * __temp_7__); float __temp_12__ = (__tilevar_5__[__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_13__ = (__temp_8__ + 0.083000f * __temp_12__); float __temp_17__ = (__tilevar_5__[__iter_5__-2+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_18__ = (__temp_13__ + 0.083000f * __temp_17__); float __temp_22__ = (__tilevar_5__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+2+EXTENT-__iter_1__)]); float __temp_23__ = (__temp_18__ + 0.083000f * __temp_22__); float __temp_27__ = (__tilevar_5__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]); float __temp_28__ = (__temp_23__ + 0.083000f * __temp_27__); float __temp_32__ = (__tilevar_5__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]); float __temp_33__ = (__temp_28__ + 0.083000f * __temp_32__); float __temp_37__ = (__tilevar_5__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-2+EXTENT-__iter_1__)]); float __temp_38__ = (__temp_33__ + 0.083000f * __temp_37__); float __temp_42__ = t5_1; float __temp_43__ = (__temp_38__ + 0.083000f * __temp_42__); float __temp_47__ = t5_0; float __temp_48__ = (__temp_43__ + 0.083000f * __temp_47__); float __temp_52__ = b5_0; float __temp_53__ = (__temp_48__ + 0.083000f * __temp_52__); float __temp_57__ = b5_1; float __temp_58__ = (__temp_53__ + 0.083000f * __temp_57__); float __temp_62__ = (__tilevar_5__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]); float __temp_63__ = (__temp_58__ - 0.996000f * __temp_62__); __var_1__[__iter_5__+N*(__iter_4__+M*FORMA_MAX(__iter_2__-6,0))] = __temp_63__; } } } } /*Device code End */ /* Host Code Begin */ extern "C" void j3d13pt(float * h_input, int L, int M, int N, float * __var_0__){ /* Host allocation Begin */ float * input; cudaMalloc(&input,sizeof(float)*(L*M*N)); Check_CUDA_Error("Allocation Error!! : input\n"); cudaPointerAttributes ptrAttrib_h_input; cudaMemcpyKind memcpy_kind_h_input = cudaMemcpyHostToDevice; if (cudaPointerGetAttributes(&ptrAttrib_h_input, h_input) == cudaSuccess) if (ptrAttrib_h_input.memoryType == cudaMemoryTypeDevice) memcpy_kind_h_input = cudaMemcpyDeviceToDevice; cudaGetLastError(); if( memcpy_kind_h_input != cudaMemcpyDeviceToDevice ){ cudaMemcpy(input,h_input,sizeof(float)*(L*M*N), memcpy_kind_h_input); } float * __var_1__; cudaMalloc(&__var_1__,sizeof(float)*(L*M*N)); Check_CUDA_Error("Allocation Error!! : __var_1__\n"); float * __copy_arr_0__; cudaMalloc(&__copy_arr_0__,sizeof(float)*(L*M*N)); Check_CUDA_Error("Allocation Error!! : __copy_arr_0__\n"); float * __copy_arr_1__; cudaMalloc(&__copy_arr_1__,sizeof(float)*(L*M*N)); Check_CUDA_Error("Allocation Error!! : __copy_arr_1__\n"); float * __copy_arr_2__; cudaMalloc(&__copy_arr_2__,sizeof(float)*(L*M*N)); Check_CUDA_Error("Allocation Error!! : __copy_arr_2__\n"); /*Host Allocation End */ /* Kernel Launch Begin */ int __FORMA_MAX_SHARED_MEM__; cudaDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,cudaDevAttrMaxSharedMemoryPerBlock,0); #ifdef _TIMER_ cudaEvent_t _forma_timer_start_,_forma_timer_stop_; cudaEventCreate(&_forma_timer_start_); cudaEventCreate(&_forma_timer_stop_); cudaEventRecord(_forma_timer_start_,0); #endif int __size_0___kernel___forma_kernel__0__ = N; int __size_1___kernel___forma_kernel__0__ = M; int __block_0___kernel___forma_kernel__0__ = 32; int __block_1___kernel___forma_kernel__0__ = 32; int __block_2___kernel___forma_kernel__0__ = 1; dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__,__block_2___kernel___forma_kernel__0__); int __SMemSize___kernel___forma_kernel__0__ = 0; __SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__); int __grid_0___kernel___forma_kernel__0t__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x-16); int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x+GAPX); int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.y+GAPY); int __grid_2___kernel___forma_kernel__0__ = 1; dim3 __gridConfig___kernel___forma_kernel__0t__(__grid_0___kernel___forma_kernel__0t__,__grid_1___kernel___forma_kernel__0__,__grid_2___kernel___forma_kernel__0__); dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__,__grid_2___kernel___forma_kernel__0__); dim3 unrollConfig (__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z); __kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0t__, unrollConfig, __SMemSize___kernel___forma_kernel__0__>>> (input, L, M, N, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n"); __kernel___forma_kernel__2__<<<__gridConfig___kernel___forma_kernel__0__, unrollConfig, __SMemSize___kernel___forma_kernel__0__>>> (input, L, M, N, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__2__\n"); __kernel___forma_kernel__3__<<<__gridConfig___kernel___forma_kernel__0__, unrollConfig, __SMemSize___kernel___forma_kernel__0__>>> (input, L, M, N, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__3__\n"); cudaPointerAttributes ptrAttrib___var_0__; cudaMemcpyKind memcpy_kind___var_0__ = cudaMemcpyDeviceToHost; if (cudaPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == cudaSuccess) if (ptrAttrib___var_0__.memoryType == cudaMemoryTypeDevice) memcpy_kind___var_0__ = cudaMemcpyDeviceToDevice; cudaGetLastError(); cudaMemcpy(__var_0__,__var_1__, sizeof(float)*(L*M*N), memcpy_kind___var_0__); #ifdef _TIMER_ cudaEventRecord(_forma_timer_stop_,0); cudaEventSynchronize(_forma_timer_stop_); float elapsedTime; cudaEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_); printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime); cudaEventDestroy(_forma_timer_start_); cudaEventDestroy(_forma_timer_stop_); #endif /*Kernel Launch End */ /* Host Free Begin */ cudaFree(input); cudaFree(__var_1__); cudaFree(__copy_arr_0__); cudaFree(__copy_arr_1__); cudaFree(__copy_arr_2__); } /*Host Free End*/
23f6240a3083bddf976dc8f53e539138e7e9619e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "common.h" void checkCUDAErrorFn(const char *msg, const char *file, int line) { hipError_t err = hipGetLastError(); if (hipSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err)); exit(EXIT_FAILURE); } namespace StreamCompaction { namespace Common { /** * Maps an array to an array of 0s and 1s for stream compaction. Elements * which map to 0 will be removed, and elements which map to 1 will be kept. */ __global__ void kernMapToBoolean(int n, int *bools, const int *idata) { // TODO int k = threadIdx.x; if (idata[k] == 0)bools[k] = 0; else bools[k] = 1; /*for (int i = 0; i < n; i++){ if (idata[i] == 0) bools[i] = 0; else { bools[i] = 1; } }*/ } /** * Performs scatter on an array. That is, for each element in idata, * if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]]. */ __global__ void kernScatter(int n, int *odata,const int *idata, const int *bools, const int *indices) { //last, dev_odata, dev_idata, dev_bool, dev_boolb //indices[i]={0,1,2,3,4},n is the muber of indices // TODO /*for (int i = 0; i < n; i++){ if (bools[i] == 1) { odata[indices[i]] = idata[i]; } }*/ //for (int i = 0; i < n; i++){ odata[i] = 0; } int k = threadIdx.x; if (bools[k] == 1){ int t = indices[k];// odata[t] = idata[k]; } } } }
23f6240a3083bddf976dc8f53e539138e7e9619e.cu
#include "common.h" void checkCUDAErrorFn(const char *msg, const char *file, int line) { cudaError_t err = cudaGetLastError(); if (cudaSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err)); exit(EXIT_FAILURE); } namespace StreamCompaction { namespace Common { /** * Maps an array to an array of 0s and 1s for stream compaction. Elements * which map to 0 will be removed, and elements which map to 1 will be kept. */ __global__ void kernMapToBoolean(int n, int *bools, const int *idata) { // TODO int k = threadIdx.x; if (idata[k] == 0)bools[k] = 0; else bools[k] = 1; /*for (int i = 0; i < n; i++){ if (idata[i] == 0) bools[i] = 0; else { bools[i] = 1; } }*/ } /** * Performs scatter on an array. That is, for each element in idata, * if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]]. */ __global__ void kernScatter(int n, int *odata,const int *idata, const int *bools, const int *indices) { //last, dev_odata, dev_idata, dev_bool, dev_boolb //indices[i]={0,1,2,3,4},n is the muber of indices // TODO /*for (int i = 0; i < n; i++){ if (bools[i] == 1) { odata[indices[i]] = idata[i]; } }*/ //for (int i = 0; i < n; i++){ odata[i] = 0; } int k = threadIdx.x; if (bools[k] == 1){ int t = indices[k];// odata[t] = idata[k]; } } } }
c913bcb8b2828df73abacc8ee1f9765037cadb8a.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (C) 2018 ETH Zurich // Copyright (C) 2018 UT-Battelle, LLC // All rights reserved. // // See LICENSE.txt for terms of usage. // See CITATION.md for citation guidelines, if DCA++ is used for scientific publications. // // Authors: Giovanni Balduzzi ([email protected]) // // This file implements the device methods of G0Interpolation<GPU>. #include "dca/phys/dca_step/cluster_solver/ctint/walker/tools/g0_interpolation_gpu.hpp" #include <hip/hip_runtime.h> #include "dca/linalg/util/error_cuda.hpp" #include "dca/util/cuda_blocks.hpp" namespace dca { namespace phys { namespace solver { namespace ctint { // dca::phys::solver::ctint:: __device__ double DeviceInterpolationData::operator()(double tau, int lindex) const { assert(tau >= -beta_ && tau <= beta_); if (tau == 0) // returns G0(tau = 0+) return g0_minus_[lindex]; short int factor = 1; if (tau < 0) { tau += beta_; factor = -1; } // Scale tau in [0, n_time_slices). Assume even spacing in time. const double scaled_tau = tau * n_div_beta_; const int tau_index(scaled_tau); const double delta_tau = scaled_tau - tau_index; // Get the pointer to the first akima coeff. const double* coeff_ptr = &values_[tau_index * coeff_size_ + lindex * stride_]; // Return akima interpolation. return factor * (coeff_ptr[0] + delta_tau * (coeff_ptr[1] + delta_tau * (coeff_ptr[2] + delta_tau * coeff_ptr[3]))); } __global__ void g0InterpolationTestKernel(double tau, const int lindex, DeviceInterpolationData g0, double* result) { *result = g0(tau, lindex); } double G0Interpolation<linalg::GPU>::operator()(double tau, int lindex) const { double* d_result; double result; hipMalloc((void**)&d_result, sizeof(double)); hipLaunchKernelGGL(( g0InterpolationTestKernel), dim3(1), dim3(1), 0, 0, tau, lindex, *this, d_result); assert(hipSuccess == hipPeekAtLastError()); hipMemcpy(&result, d_result, sizeof(double), hipMemcpyDeviceToHost); hipFree(d_result); return result; } } // namespace ctint } // namespace solver } // namespace phys } // namespace dca
c913bcb8b2828df73abacc8ee1f9765037cadb8a.cu
// Copyright (C) 2018 ETH Zurich // Copyright (C) 2018 UT-Battelle, LLC // All rights reserved. // // See LICENSE.txt for terms of usage. // See CITATION.md for citation guidelines, if DCA++ is used for scientific publications. // // Authors: Giovanni Balduzzi ([email protected]) // // This file implements the device methods of G0Interpolation<GPU>. #include "dca/phys/dca_step/cluster_solver/ctint/walker/tools/g0_interpolation_gpu.hpp" #include <cuda_runtime.h> #include "dca/linalg/util/error_cuda.hpp" #include "dca/util/cuda_blocks.hpp" namespace dca { namespace phys { namespace solver { namespace ctint { // dca::phys::solver::ctint:: __device__ double DeviceInterpolationData::operator()(double tau, int lindex) const { assert(tau >= -beta_ && tau <= beta_); if (tau == 0) // returns G0(tau = 0+) return g0_minus_[lindex]; short int factor = 1; if (tau < 0) { tau += beta_; factor = -1; } // Scale tau in [0, n_time_slices). Assume even spacing in time. const double scaled_tau = tau * n_div_beta_; const int tau_index(scaled_tau); const double delta_tau = scaled_tau - tau_index; // Get the pointer to the first akima coeff. const double* coeff_ptr = &values_[tau_index * coeff_size_ + lindex * stride_]; // Return akima interpolation. return factor * (coeff_ptr[0] + delta_tau * (coeff_ptr[1] + delta_tau * (coeff_ptr[2] + delta_tau * coeff_ptr[3]))); } __global__ void g0InterpolationTestKernel(double tau, const int lindex, DeviceInterpolationData g0, double* result) { *result = g0(tau, lindex); } double G0Interpolation<linalg::GPU>::operator()(double tau, int lindex) const { double* d_result; double result; cudaMalloc((void**)&d_result, sizeof(double)); g0InterpolationTestKernel<<<1, 1>>>(tau, lindex, *this, d_result); assert(cudaSuccess == cudaPeekAtLastError()); cudaMemcpy(&result, d_result, sizeof(double), cudaMemcpyDeviceToHost); cudaFree(d_result); return result; } } // namespace ctint } // namespace solver } // namespace phys } // namespace dca
cb9b2279d6b98b534daa93a6faa11ecd31570a66.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hiprand/hiprand.h" #include "rocblas.h" #include "maxpool_layer.h" #include "hip/hip_runtime.h" __global__ void forward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, int pad, float *input, float *output, int *indexes) { int h = (in_h + pad - size)/stride + 1; int w = (in_w + pad - size)/stride + 1; int c = in_c; int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(id >= n) return; int j = id % w; id /= w; int i = id % h; id /= h; int k = id % c; id /= c; int b = id; int w_offset = -pad/2; int h_offset = -pad/2; int out_index = j + w*(i + h*(k + c*b)); float max = -INFINITY; int max_i = -1; int l, m; for(l = 0; l < size; ++l){ for(m = 0; m < size; ++m){ int cur_h = h_offset + i*stride + l; int cur_w = w_offset + j*stride + m; int index = cur_w + in_w*(cur_h + in_h*(k + b*in_c)); int valid = (cur_h >= 0 && cur_h < in_h && cur_w >= 0 && cur_w < in_w); float val = (valid != 0) ? input[index] : -INFINITY; max_i = (val > max) ? index : max_i; max = (val > max) ? val : max; } } output[out_index] = max; indexes[out_index] = max_i; } __global__ void backward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, int pad, float *delta, float *prev_delta, int *indexes) { int h = (in_h + pad - size)/stride + 1; int w = (in_w + pad - size)/stride + 1; int c = in_c; int area = (size-1)/stride; int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(id >= n) return; int index = id; int j = id % in_w; id /= in_w; int i = id % in_h; id /= in_h; int k = id % in_c; id /= in_c; int b = id; int w_offset = -pad/2; int h_offset = -pad/2; float d = 0; int l, m; for(l = -area; l < area+1; ++l){ for(m = -area; m < area+1; ++m){ int out_w = (j-w_offset)/stride + m; int out_h = (i-h_offset)/stride + l; int out_index = out_w + w*(out_h + h*(k + c*b)); int valid = (out_w >= 0 && out_w < w && out_h >= 0 && out_h < h); d += (valid && indexes[out_index] == index) ? delta[out_index] : 0; } } prev_delta[index] += d; } void forward_maxpool_layer_gpu(maxpool_layer l, network net) { int h = l.out_h; int w = l.out_w; int c = l.c; size_t n = h*w*c*l.batch; hipLaunchKernelGGL(( forward_maxpool_layer_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, l.h, l.w, l.c, l.stride, l.size, l.pad, net.input_gpu, l.output_gpu, l.indexes_gpu); check_error(hipPeekAtLastError()); if(net.train && l.activ_quant_flag){ cuda_pull_array(l.output_gpu, l.output, l.out_c*l.out_w*l.out_h); uint8_t input_fake_quant = 0; FakeQuantWithMinMaxChannel(1, l.output, &input_fake_quant, l.out_c*l.out_w*l.out_h, l.min_activ_value, l.max_activ_value, l.activ_data_int8_scales, l.activ_data_int8_zero_point, ACTIV_QUANT, 0.999); cuda_push_array(l.output_gpu, l.output, l.out_c*l.out_w*l.out_h); // printf("scale is %f\n", *l.activ_data_int8_scales); cuda_push_array(l.activ_data_int8_scales_gpu, l.activ_data_int8_scales, 1); cuda_push_array_int8(l.activ_data_int8_zero_point_gpu, l.activ_data_int8_zero_point, 1); } } void backward_maxpool_layer_gpu(maxpool_layer layer, network net) { size_t n = layer.h*layer.w*layer.c*layer.batch; hipLaunchKernelGGL(( backward_maxpool_layer_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, layer.h, layer.w, layer.c, layer.stride, layer.size, layer.pad, layer.delta_gpu, net.delta_gpu, layer.indexes_gpu); check_error(hipPeekAtLastError()); }
cb9b2279d6b98b534daa93a6faa11ecd31570a66.cu
#include "cuda_runtime.h" #include "curand.h" #include "cublas_v2.h" #include "maxpool_layer.h" #include "cuda.h" __global__ void forward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, int pad, float *input, float *output, int *indexes) { int h = (in_h + pad - size)/stride + 1; int w = (in_w + pad - size)/stride + 1; int c = in_c; int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(id >= n) return; int j = id % w; id /= w; int i = id % h; id /= h; int k = id % c; id /= c; int b = id; int w_offset = -pad/2; int h_offset = -pad/2; int out_index = j + w*(i + h*(k + c*b)); float max = -INFINITY; int max_i = -1; int l, m; for(l = 0; l < size; ++l){ for(m = 0; m < size; ++m){ int cur_h = h_offset + i*stride + l; int cur_w = w_offset + j*stride + m; int index = cur_w + in_w*(cur_h + in_h*(k + b*in_c)); int valid = (cur_h >= 0 && cur_h < in_h && cur_w >= 0 && cur_w < in_w); float val = (valid != 0) ? input[index] : -INFINITY; max_i = (val > max) ? index : max_i; max = (val > max) ? val : max; } } output[out_index] = max; indexes[out_index] = max_i; } __global__ void backward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, int pad, float *delta, float *prev_delta, int *indexes) { int h = (in_h + pad - size)/stride + 1; int w = (in_w + pad - size)/stride + 1; int c = in_c; int area = (size-1)/stride; int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(id >= n) return; int index = id; int j = id % in_w; id /= in_w; int i = id % in_h; id /= in_h; int k = id % in_c; id /= in_c; int b = id; int w_offset = -pad/2; int h_offset = -pad/2; float d = 0; int l, m; for(l = -area; l < area+1; ++l){ for(m = -area; m < area+1; ++m){ int out_w = (j-w_offset)/stride + m; int out_h = (i-h_offset)/stride + l; int out_index = out_w + w*(out_h + h*(k + c*b)); int valid = (out_w >= 0 && out_w < w && out_h >= 0 && out_h < h); d += (valid && indexes[out_index] == index) ? delta[out_index] : 0; } } prev_delta[index] += d; } void forward_maxpool_layer_gpu(maxpool_layer l, network net) { int h = l.out_h; int w = l.out_w; int c = l.c; size_t n = h*w*c*l.batch; forward_maxpool_layer_kernel<<<cuda_gridsize(n), BLOCK>>>(n, l.h, l.w, l.c, l.stride, l.size, l.pad, net.input_gpu, l.output_gpu, l.indexes_gpu); check_error(cudaPeekAtLastError()); if(net.train && l.activ_quant_flag){ cuda_pull_array(l.output_gpu, l.output, l.out_c*l.out_w*l.out_h); uint8_t input_fake_quant = 0; FakeQuantWithMinMaxChannel(1, l.output, &input_fake_quant, l.out_c*l.out_w*l.out_h, l.min_activ_value, l.max_activ_value, l.activ_data_int8_scales, l.activ_data_int8_zero_point, ACTIV_QUANT, 0.999); cuda_push_array(l.output_gpu, l.output, l.out_c*l.out_w*l.out_h); // printf("scale is %f\n", *l.activ_data_int8_scales); cuda_push_array(l.activ_data_int8_scales_gpu, l.activ_data_int8_scales, 1); cuda_push_array_int8(l.activ_data_int8_zero_point_gpu, l.activ_data_int8_zero_point, 1); } } void backward_maxpool_layer_gpu(maxpool_layer layer, network net) { size_t n = layer.h*layer.w*layer.c*layer.batch; backward_maxpool_layer_kernel<<<cuda_gridsize(n), BLOCK>>>(n, layer.h, layer.w, layer.c, layer.stride, layer.size, layer.pad, layer.delta_gpu, net.delta_gpu, layer.indexes_gpu); check_error(cudaPeekAtLastError()); }
da3fd024ff778e04c51a48b537b4f2db8a5bf916.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "common.h" void checkCUDAErrorFn(const char *msg, const char *file, int line) { hipError_t err = hipGetLastError(); if (hipSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err)); exit(EXIT_FAILURE); } namespace StreamCompaction { namespace Common { /** * Maps an array to an array of 0s and 1s for stream compaction. Elements * which map to 0 will be removed, and elements which map to 1 will be kept. */ __global__ void kernMapToBoolean(int n, int *bools, const int *idata) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= n) return; if (idata[index]) bools[index] = 1; else bools[index] = 0; } /** * Performs scatter on an array. That is, for each element in idata, * if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]]. */ __global__ void kernScatter(int n, int *odata, const int *idata, const int *bools, const int *indices) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= n) return; if (bools[index]) odata[indices[index]] = idata[index]; } } }
da3fd024ff778e04c51a48b537b4f2db8a5bf916.cu
#include "common.h" void checkCUDAErrorFn(const char *msg, const char *file, int line) { cudaError_t err = cudaGetLastError(); if (cudaSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err)); exit(EXIT_FAILURE); } namespace StreamCompaction { namespace Common { /** * Maps an array to an array of 0s and 1s for stream compaction. Elements * which map to 0 will be removed, and elements which map to 1 will be kept. */ __global__ void kernMapToBoolean(int n, int *bools, const int *idata) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= n) return; if (idata[index]) bools[index] = 1; else bools[index] = 0; } /** * Performs scatter on an array. That is, for each element in idata, * if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]]. */ __global__ void kernScatter(int n, int *odata, const int *idata, const int *bools, const int *indices) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= n) return; if (bools[index]) odata[indices[index]] = idata[index]; } } }
01e3cc8c955d2d1b53b542b965f4668ab701f9b7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <solvers/block_jacobi_solver.h> #include <solvers/block_common_solver.h> #include <gaussian_elimination.h> #include <basic_types.h> #include <cutil.h> #include <util.h> #include <string> #include <miscmath.h> #include <texture.h> #include <amgx_cusparse.h> #include <ostream> #include <amgx_types/util.h> namespace amgx { namespace block_jacobi_solver { template <typename ValueTypeA, typename ValueTypeB> struct jacobi_presmooth_functor { double omega; jacobi_presmooth_functor( double omega ) : omega( omega ) {} __host__ __device__ ValueTypeB operator()( const ValueTypeB &b, const ValueTypeA &d ) const { return isNotCloseToZero(d) ? b * omega / d : b * omega / epsilon(d); } }; template <typename ValueTypeA, typename ValueTypeB> struct jacobi_postsmooth_functor { double omega; jacobi_postsmooth_functor( double omega ) : omega( omega ) {} template<typename Tuple> __host__ __device__ ValueTypeB operator( )( const Tuple &t ) const { ValueTypeB x = thrust::get<0>(t); ValueTypeA d = thrust::get<1>(t); ValueTypeB b = thrust::get<2>(t); ValueTypeB y = thrust::get<3>(t); // return x + omega * (b - y) / d. d = isNotCloseToZero(d) ? d : epsilon(d); d = types::util<ValueTypeA>::get_one() / d; b = b - y; b = b * omega; return b * d + x; } }; template <typename ValueTypeB> struct add_functor { __host__ __device__ ValueTypeB operator()( const ValueTypeB &x, const ValueTypeB &y )const { return x + y; } }; template<typename T> __device__ __forceinline__ T fmnaOp (T a, T b, T c) { return -(a * b) + c; } template<typename T> __device__ __forceinline__ T mulOp (T a, T b) { return a * b; } template<typename T> __device__ __forceinline__ T rcpOp (T a) { return 1.0 / (isNotCloseToZero(a) ? a : epsilon(a)); } template<typename T> __device__ __forceinline__ T absOp (T a) { return fabs(a); } // ----------------------------------- // KERNELS // ----------------------------------- template<typename T1, typename T2, int N> __global__ void matinv_matrix_per_thread_pivot (const T1 *A, T2 *Ainv, int batch) { #define A(row,col) A[(col)*N+(row)] #define AA(row,col) AA[(col)*N+(row)] #define Ainv(row,col) Ainv[(col)*N+(row)] const int blkNum = blockIdx.x * blockDim.x + threadIdx.x; int perm0, perm1, perm2, perm3; int icol0, icol1, icol2, icol3; T2 AA00, AA01, AA02, AA03, AA10, AA11, AA12, AA13; T2 AA20, AA21, AA22, AA23, AA30, AA31, AA32, AA33; T2 p, t; int i, pvt; A += blkNum * N * N; Ainv += blkNum * N * N; if (blkNum < batch) { AA00 = A[0]; AA10 = A[1]; AA20 = A[2]; AA30 = A[3]; AA01 = A[4]; AA11 = A[5]; AA21 = A[6]; AA31 = A[7]; AA02 = A[8]; AA12 = A[9]; AA22 = A[10]; AA32 = A[11]; AA03 = A[12]; AA13 = A[13]; AA23 = A[14]; AA33 = A[15]; perm0 = 0; perm1 = 1; perm2 = 2; perm3 = 3; /****************** iteration 0 ***********/ /* search pivot row */ p = absOp (AA00); pvt = 0; t = absOp (AA10); if (t > p) { p = t; pvt = 1; } t = absOp (AA20); if (t > p) { p = t; pvt = 2; } t = absOp (AA30); if (t > p) { p = t; pvt = 3; } /* swap pivot row with row 0 */ if (pvt == 1) { t = AA00; AA00 = AA10; AA10 = t; t = AA01; AA01 = AA11; AA11 = t; t = AA02; AA02 = AA12; AA12 = t; t = AA03; AA03 = AA13; AA13 = t; /* update permutation vector based on row swap */ i = perm0; perm0 = perm1; perm1 = i; } if (pvt == 2) { t = AA00; AA00 = AA20; AA20 = t; t = AA01; AA01 = AA21; AA21 = t; t = AA02; AA02 = AA22; AA22 = t; t = AA03; AA03 = AA23; AA23 = t; /* update permutation vector based on row swap */ i = perm0; perm0 = perm2; perm2 = i; } if (pvt == 3) { t = AA00; AA00 = AA30; AA30 = t; t = AA01; AA01 = AA31; AA31 = t; t = AA02; AA02 = AA32; AA32 = t; t = AA03; AA03 = AA33; AA33 = t; /* update permutation vector based on row swap */ i = perm0; perm0 = perm3; perm3 = i; } /* scale current row */ t = rcpOp (AA00); icol0 = perm0; AA00 = t; AA01 = mulOp (t, AA01); AA02 = mulOp (t, AA02); AA03 = mulOp (t, AA03); /* eliminate above and below current row */ t = AA10; AA10 = mulOp (-t, AA00); AA11 = fmnaOp (t, AA01, AA11); AA12 = fmnaOp (t, AA02, AA12); AA13 = fmnaOp (t, AA03, AA13); t = AA20; AA20 = mulOp (-t, AA00); AA21 = fmnaOp (t, AA01, AA21); AA22 = fmnaOp (t, AA02, AA22); AA23 = fmnaOp (t, AA03, AA23); t = AA30; AA30 = mulOp (-t, AA00); AA31 = fmnaOp (t, AA01, AA31); AA32 = fmnaOp (t, AA02, AA32); AA33 = fmnaOp (t, AA03, AA33); /****************** iteration 1 ***********/ /* search pivot row */ p = absOp (AA11); pvt = 1; t = absOp (AA21); if (t > p) { p = t; pvt = 2; } t = absOp (AA31); if (t > p) { p = t; pvt = 3; } /* swap pivot row with row 1 */ if (pvt == 2) { t = AA10; AA10 = AA20; AA20 = t; t = AA11; AA11 = AA21; AA21 = t; t = AA12; AA12 = AA22; AA22 = t; t = AA13; AA13 = AA23; AA23 = t; /* update permutation vector based on row swap */ i = perm1; perm1 = perm2; perm2 = i; } else if (pvt == 3) { t = AA10; AA10 = AA30; AA30 = t; t = AA11; AA11 = AA31; AA31 = t; t = AA12; AA12 = AA32; AA32 = t; t = AA13; AA13 = AA33; AA33 = t; /* update permutation vector based on row swap */ i = perm1; perm1 = perm3; perm3 = i; } /* scale current row */ t = rcpOp (AA11); icol1 = perm1; AA10 = mulOp (t, AA10); AA11 = t; AA12 = mulOp (t, AA12); AA13 = mulOp (t, AA13); /* eliminate above and below current row */ t = AA01; AA00 = fmnaOp (t, AA10, AA00); AA01 = mulOp (-t, AA11); AA02 = fmnaOp (t, AA12, AA02); AA03 = fmnaOp (t, AA13, AA03); t = AA21; AA20 = fmnaOp (t, AA10, AA20); AA21 = mulOp (-t, AA11); AA22 = fmnaOp (t, AA12, AA22); AA23 = fmnaOp (t, AA13, AA23); t = AA31; AA30 = fmnaOp (t, AA10, AA30); AA31 = mulOp (-t, AA11); AA32 = fmnaOp (t, AA12, AA32); AA33 = fmnaOp (t, AA13, AA33); /****************** iteration 2 ****************/ /* search pivot row */ p = absOp (AA22); pvt = 2; t = absOp (AA32); if (t > p) { p = t; pvt = 3; } /* swap pivot row with row 2 */ if (pvt == 3) { t = AA20; AA20 = AA30; AA30 = t; t = AA21; AA21 = AA31; AA31 = t; t = AA22; AA22 = AA32; AA32 = t; t = AA23; AA23 = AA33; AA33 = t; /* update permutation vector based on row swap */ i = perm2; perm2 = perm3; perm3 = i; } /* scale current row */ t = rcpOp (AA22); icol2 = perm2; AA20 = mulOp (t, AA20); AA21 = mulOp (t, AA21); AA22 = t; AA23 = mulOp (t, AA23); /* eliminate above and below current row */ t = AA02; AA00 = fmnaOp (t, AA20, AA00); AA01 = fmnaOp (t, AA21, AA01); AA02 = mulOp (-t, AA22); AA03 = fmnaOp (t, AA23, AA03); t = AA12; AA10 = fmnaOp (t, AA20, AA10); AA11 = fmnaOp (t, AA21, AA11); AA12 = mulOp (-t, AA22); AA13 = fmnaOp (t, AA23, AA13); t = AA32; AA30 = fmnaOp (t, AA20, AA30); AA31 = fmnaOp (t, AA21, AA31); AA32 = mulOp (-t, AA22); AA33 = fmnaOp (t, AA23, AA33); /****************** iteration 3 ****************/ /* scale current row */ t = rcpOp (AA33); icol3 = perm3; AA30 = mulOp (t, AA30); AA31 = mulOp (t, AA31); AA32 = mulOp (t, AA32); AA33 = t; /* eliminate above and below current row */ t = AA03; AA00 = fmnaOp (t, AA30, AA00); AA01 = fmnaOp (t, AA31, AA01); AA02 = fmnaOp (t, AA32, AA02); AA03 = mulOp (-t, AA33); t = AA13; AA10 = fmnaOp (t, AA30, AA10); AA11 = fmnaOp (t, AA31, AA11); AA12 = fmnaOp (t, AA32, AA12); AA13 = mulOp (-t, AA33); t = AA23; AA20 = fmnaOp (t, AA30, AA20); AA21 = fmnaOp (t, AA31, AA21); AA22 = fmnaOp (t, AA32, AA22); AA23 = mulOp (-t, AA33); /* sort columns into the correct order */ Ainv(0, icol0) = AA00; Ainv(1, icol0) = AA10; Ainv(2, icol0) = AA20; Ainv(3, icol0) = AA30; Ainv(0, icol1) = AA01; Ainv(1, icol1) = AA11; Ainv(2, icol1) = AA21; Ainv(3, icol1) = AA31; Ainv(0, icol2) = AA02; Ainv(1, icol2) = AA12; Ainv(2, icol2) = AA22; Ainv(3, icol2) = AA32; Ainv(0, icol3) = AA03; Ainv(1, icol3) = AA13; Ainv(2, icol3) = AA23; Ainv(3, icol3) = AA33; } } template<typename T, int N> __global__ void matinv_matrix_per_thread_no_pivot (const T *A, T *Ainv, int batch) { #define A(row,col) A[(col)*N+(row)] #define AA(row,col) AA[(col)*N+(row)] #define Ainv(row,col) Ainv[(col)*N+(row)] const int blkNum = blockIdx.x * blockDim.x + threadIdx.x; T AA00, AA01, AA02, AA03, AA10, AA11, AA12, AA13; T AA20, AA21, AA22, AA23, AA30, AA31, AA32, AA33; T t; A += blkNum * N * N; Ainv += blkNum * N * N; if (blkNum < batch) { AA00 = A[0]; AA10 = A[1]; AA20 = A[2]; AA30 = A[3]; AA01 = A[4]; AA11 = A[5]; AA21 = A[6]; AA31 = A[7]; AA02 = A[8]; AA12 = A[9]; AA22 = A[10]; AA32 = A[11]; AA03 = A[12]; AA13 = A[13]; AA23 = A[14]; AA33 = A[15]; /****************** iteration 0 ***********/ /* search pivot row */ t = 1.0 / (AA00); AA00 = t; AA01 = t * AA01; AA02 = t * AA02; AA03 = t * AA03; /* eliminate above and below current row */ t = AA10; AA10 = -t * AA00; AA11 = fmnaOp (t, AA01, AA11); AA12 = fmnaOp (t, AA02, AA12); AA13 = fmnaOp (t, AA03, AA13); t = AA20; AA20 = -t * AA00; AA21 = fmnaOp (t, AA01, AA21); AA22 = fmnaOp (t, AA02, AA22); AA23 = fmnaOp (t, AA03, AA23); t = AA30; AA30 = -t * AA00; AA31 = fmnaOp (t, AA01, AA31); AA32 = fmnaOp (t, AA02, AA32); AA33 = fmnaOp (t, AA03, AA33); /****************** iteration 1 ***********/ /* scale current row */ t = 1.0 / (AA11); AA10 = t * AA10; AA11 = t; AA12 = t * AA12; AA13 = t * AA13; /* eliminate above and below current row */ t = AA01; AA00 = fmnaOp (t, AA10, AA00); AA01 = -t * AA11; AA02 = fmnaOp (t, AA12, AA02); AA03 = fmnaOp (t, AA13, AA03); t = AA21; AA20 = fmnaOp (t, AA10, AA20); AA21 = -t * AA11; AA22 = fmnaOp (t, AA12, AA22); AA23 = fmnaOp (t, AA13, AA23); t = AA31; AA30 = fmnaOp (t, AA10, AA30); AA31 = -t * AA11; AA32 = fmnaOp (t, AA12, AA32); AA33 = fmnaOp (t, AA13, AA33); /****************** iteration 2 ****************/ /* scale current row */ t = 1.0 / (AA22); AA20 = t * AA20; AA21 = t * AA21; AA22 = t; AA23 = t * AA23; /* eliminate above and below current row */ t = AA02; AA00 = fmnaOp (t, AA20, AA00); AA01 = fmnaOp (t, AA21, AA01); AA02 = -t * AA22; AA03 = fmnaOp (t, AA23, AA03); t = AA12; AA10 = fmnaOp (t, AA20, AA10); AA11 = fmnaOp (t, AA21, AA11); AA12 = -t * AA22; AA13 = fmnaOp (t, AA23, AA13); t = AA32; AA30 = fmnaOp (t, AA20, AA30); AA31 = fmnaOp (t, AA21, AA31); AA32 = -t * AA22; AA33 = fmnaOp (t, AA23, AA33); /****************** iteration 3 ****************/ /* scale current row */ t = 1.0 / (AA33); AA30 = t * AA30; AA31 = t * AA31; AA32 = t * AA32; AA33 = t; /* eliminate above and below current row */ t = AA03; AA00 = fmnaOp (t, AA30, AA00); AA01 = fmnaOp (t, AA31, AA01); AA02 = fmnaOp (t, AA32, AA02); AA03 = -t * AA33; t = AA13; AA10 = fmnaOp (t, AA30, AA10); AA11 = fmnaOp (t, AA31, AA11); AA12 = fmnaOp (t, AA32, AA12); AA13 = -t * AA33; t = AA23; AA20 = fmnaOp (t, AA30, AA20); AA21 = fmnaOp (t, AA31, AA21); AA22 = fmnaOp (t, AA32, AA22); AA23 = -t * AA33; /* sort columns into the correct order */ Ainv(0, 0) = AA00; Ainv(1, 0) = AA10; Ainv(2, 0) = AA20; Ainv(3, 0) = AA30; Ainv(0, 1) = AA01; Ainv(1, 1) = AA11; Ainv(2, 1) = AA21; Ainv(3, 1) = AA31; Ainv(0, 2) = AA02; Ainv(1, 2) = AA12; Ainv(2, 2) = AA22; Ainv(3, 2) = AA32; Ainv(0, 3) = AA03; Ainv(1, 3) = AA13; Ainv(2, 3) = AA23; Ainv(3, 3) = AA33; } } template<typename IndexType, typename ValueTypeA, int threads_per_block, int halfwarps_per_block> __global__ void setupBlockJacobiSmoothbBigBlockDiaCsrKernel(const IndexType *row_offsets, const IndexType *column_indices, const ValueTypeA *values, const IndexType *dia_indices, ValueTypeA *Dinv, const int num_block_rows, int bsize, int bsize_sq, ValueTypeA *temp1) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; int halfwarp_id = tid >> 4; const int block_halfwarp_id = threadIdx.x >> 4; const int mat_entry_index = threadIdx.x & (16 - 1); const int i_ind = mat_entry_index >> 2; const int j_ind = mat_entry_index & 3; extern __shared__ volatile char schar[]; volatile ValueTypeA *s_Amat; s_Amat = (ValueTypeA *)&schar[0]; int tile_num = (bsize - 1) / 4 + 1; ValueTypeA *e_out = &temp1[(blockIdx.x * blockDim.x + threadIdx.x) * tile_num * tile_num]; while (halfwarp_id < num_block_rows) { int offset = halfwarp_id * bsize_sq + i_ind * bsize + j_ind; int s_offset = block_halfwarp_id * bsize_sq; // Store the diagonal for (int t1 = 0; t1 < tile_num; t1++) for (int t2 = 0; t2 < tile_num; t2++) if ((t1 * 4 + i_ind) < bsize && (t2 * 4 + j_ind) < bsize) { e_out[t1 * tile_num + t2] = values[bsize_sq * dia_indices[halfwarp_id] + (t1 * 4 + i_ind) * bsize + t2 * 4 + j_ind]; } // Each thread stores its entry in s_Amat for (int t1 = 0; t1 < tile_num; t1++) for (int t2 = 0; t2 < tile_num; t2++) if ((t1 * 4 + i_ind) < bsize && (t2 * 4 + j_ind) < bsize) { types::util<ValueTypeA>::volcast( e_out[t1 * tile_num + t2], s_Amat + (s_offset + (t1 * 4 + i_ind) * bsize + t2 * 4 + j_ind) ); } compute_block_inverse2<IndexType, ValueTypeA, halfwarps_per_block> ( s_Amat, s_offset, offset, i_ind, j_ind, Dinv, tile_num, bsize, bsize_sq ); halfwarp_id += gridDim.x * halfwarps_per_block; } } template<typename IndexType, typename ValueTypeA, int blockrows_per_cta, int blockrows_per_warp, int bsize, int bsize_sq> __global__ void setupBlockJacobiSmoothBbyBBlockDiaCsrKernel(const IndexType *row_offsets, const IndexType *column_indices, const ValueTypeA *values, const IndexType *dia_indices, ValueTypeA *Dinv, const int num_block_rows) { int warp_id = threadIdx.x / 32; int warp_thread_id = threadIdx.x & 31; // padding row blocks to fit in a single warp if ( warp_thread_id >= blockrows_per_warp * bsize_sq ) { return; } // new thread id with padding int tid = warp_id * blockrows_per_warp * bsize_sq + warp_thread_id; int cta_blockrow_id = tid / bsize_sq; int blockrow_id = blockIdx.x * blockrows_per_cta + cta_blockrow_id; const int mat_entry_index = tid - cta_blockrow_id * bsize_sq; const int i_ind = mat_entry_index / bsize; const int j_ind = mat_entry_index - i_ind * bsize; volatile __shared__ ValueTypeA s_Amat[bsize_sq * blockrows_per_cta ]; ValueTypeA e_out; while (blockrow_id < num_block_rows) { int offset = blockrow_id * bsize_sq + mat_entry_index; // Store the diagonal e_out = values[bsize_sq * dia_indices[blockrow_id] + mat_entry_index]; // Each thread stores its entry in s_Amat types::util<ValueTypeA>::volcast(e_out, s_Amat + tid); compute_block_inverse_row_major<IndexType, ValueTypeA, blockrows_per_cta, bsize, bsize_sq> ( s_Amat, cta_blockrow_id * bsize_sq, offset, i_ind, j_ind, Dinv ); blockrow_id += gridDim.x * blockrows_per_cta; } } template<typename ValueTypeA, typename ValueTypeB, typename IndexType, int threads_per_block, int halfwarps_per_block, int bsize, int log_bsize, int bsize_sq, int log_bsize_sq> __global__ void setupBlockJacobiSmooth4by4BlockDiaCsrKernel_V2(const IndexType *dia_indices, const ValueTypeA *A_values, ValueTypeA *Dinv, const int num_block_rows) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; int halfwarp_id = tid >> log_bsize_sq; const int block_halfwarp_id = threadIdx.x >> log_bsize_sq; const int mat_entry_index = threadIdx.x & (bsize_sq - 1); const int i_ind = mat_entry_index >> log_bsize; const int j_ind = mat_entry_index & (bsize - 1); volatile __shared__ ValueTypeA s_Amat[bsize_sq * halfwarps_per_block ]; int offset; ValueTypeA e_out; while (halfwarp_id < num_block_rows) { // Store the diagonal offset = halfwarp_id * bsize_sq; e_out = A_values[bsize_sq * dia_indices[halfwarp_id] + mat_entry_index]; // Each thread stores its entry in s_Amat types::util<ValueTypeA>::volcast(e_out, s_Amat + threadIdx.x); compute_block_inverse_row_major<int, ValueTypeA, halfwarps_per_block, bsize, bsize_sq> ( s_Amat, block_halfwarp_id * bsize_sq, offset + mat_entry_index, i_ind, j_ind, Dinv ); halfwarp_id += gridDim.x * blockDim.x >> log_bsize_sq; } } // Kernel to smooth with Jacobi smoother, Dinv assumed to be computed template<typename IndexType, typename ValueTypeA, typename ValueTypeB, int eighthwarps_per_block, int bsize, int log_bsize, int half_bsize, int bsize_sq> __global__ void jacobiSmooth4by4BlockDiaCsrKernel_NAIVE_tex_readDinv2(const IndexType *row_offsets, const IndexType *column_indices, const IndexType *dia_indices, const ValueTypeA *nonzero_values, const ValueTypeA *Dinv, const ValueTypeB *b, const ValueTypeB *x, double weight, const int num_block_rows, ValueTypeB *xout, const int row_offset) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; int eighthwarp_id = row_offset + (tid >> log_bsize); const int block_eighthwarp_id = threadIdx.x >> log_bsize; const int vec_entry_index = threadIdx.x & (bsize - 1); volatile __shared__ ValueTypeB s_xtemp[ bsize * eighthwarps_per_block ]; ValueTypeB bmAx, xin; ValueTypeB temp[bsize]; int offset, i, s_offset; while (eighthwarp_id < num_block_rows) { i = eighthwarp_id; offset = i * bsize + vec_entry_index; // 1. COMPUTING b-Ax bmAx = b[offset]; // Contribution from diagonal xin = x[offset]; types::util<ValueTypeB>::volcast(xin, s_xtemp + threadIdx.x); // Load dia_values and do matrix multiply loadAsVector<bsize>(nonzero_values + bsize_sq * dia_indices[i] + vec_entry_index * bsize, temp); s_offset = block_eighthwarp_id * bsize; #pragma unroll for (int m = 0; m < bsize; m++) { bmAx = bmAx - temp[m] * types::util<ValueTypeB>::volcast(s_xtemp[s_offset + m]); } // Contribution from each nonzero column int jmax = row_offsets[i + 1]; for (int jind = row_offsets[i]; jind < jmax; jind++) { IndexType jcol = __cachingLoad(&column_indices[jind]); if (jcol != i) { offset = jcol * bsize + vec_entry_index; types::util<ValueTypeB>::volcast(x[offset], s_xtemp + threadIdx.x); // Load nonzero_values s_offset = block_eighthwarp_id * bsize; offset = jind * bsize * bsize + vec_entry_index * bsize; loadAsVector<bsize>(nonzero_values + offset, temp); #pragma unroll for (int m = 0; m < bsize; m++) { bmAx = bmAx - temp[m] * types::util<ValueTypeB>::volcast(s_xtemp[s_offset + m]); } } } types::util<ValueTypeB>::volcast(bmAx, s_xtemp + threadIdx.x); bmAx = types::util<ValueTypeB>::get_zero(); // 2. Multiply by Dinv // Load Dinv and multiply to RHS offset = i * bsize * bsize + vec_entry_index * bsize; loadAsVector<bsize>(Dinv + offset, temp); s_offset = block_eighthwarp_id * bsize; #pragma unroll for (int m = 0; m < bsize; m++) { bmAx = bmAx + temp[m] * types::util<ValueTypeB>::volcast(s_xtemp[s_offset + m]); } xout[i * bsize + vec_entry_index] = xin + bmAx * weight; eighthwarp_id += gridDim.x * blockDim.x >> log_bsize; } } // Kernel to smooth with jacobi smoother, zero initial guess template<typename IndexType, typename ValueTypeA, typename ValueTypeB, int eighthwarps_per_block, int bsize, int log_bsize, int half_bsize> __global__ void jacobiSmooth4by4ZeroBlockDiaCsrKernel_NAIVE_tex_readDinv2(const ValueTypeA *Dinv, const ValueTypeB *b, double weight, const int num_block_rows, ValueTypeB *xout, const int row_offset) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; const int block_eighthwarp_id = threadIdx.x >> log_bsize; const int vec_entry_index = threadIdx.x & (bsize - 1); volatile __shared__ ValueTypeB s_xtemp[ bsize * eighthwarps_per_block ]; ValueTypeB bmAx; ValueTypeB temp[bsize]; int offset, i, s_offset; for (int eighthwarp_id = row_offset + (tid >> log_bsize); eighthwarp_id < num_block_rows; eighthwarp_id += (gridDim.x * blockDim.x >> log_bsize)) { i = eighthwarp_id; offset = i * bsize + vec_entry_index; types::util<ValueTypeB>::volcast(b[offset], s_xtemp + threadIdx.x); bmAx = types::util<ValueTypeB>::get_zero(); // Load Dinv and multiply to RHS offset = i * bsize * bsize + vec_entry_index * bsize; loadAsVector<bsize>(Dinv + offset, temp); s_offset = block_eighthwarp_id * bsize; #pragma unroll for (int m = 0; m < bsize; m++) { bmAx = bmAx + temp[m] * types::util<ValueTypeB>::volcast(s_xtemp[s_offset + m]); } xout[i * bsize + vec_entry_index] = bmAx * weight; } } //-------------------------------- // Methods //-------------------------------- // Constructor template<class T_Config> BlockJacobiSolver_Base<T_Config>::BlockJacobiSolver_Base( AMG_Config &cfg, const std::string &cfg_scope) : Solver<T_Config>( cfg, cfg_scope) { weight = cfg.AMG_Config::template getParameter<double>("relaxation_factor", cfg_scope); if (weight == 0) { weight = 1.; amgx_printf("Warning, setting weight to 1 instead of estimating largest_eigen_value in Block Jacobi smoother\n"); } } // Destructor template<class T_Config> BlockJacobiSolver_Base<T_Config>::~BlockJacobiSolver_Base() { this->Dinv.resize(0); } template<class T_Config> void BlockJacobiSolver_Base<T_Config>::printSolverParameters() const { std::cout << "relaxation_factor= " << this->weight << std::endl; } // Solver setup template<class T_Config> void BlockJacobiSolver_Base<T_Config>::solver_setup(bool reuse_matrix_structure) { Matrix<T_Config> *A_as_matrix = dynamic_cast<Matrix<T_Config>*>(this->m_A); if (!A_as_matrix) { FatalError("BlockJacobiSolver only works with explicit matrices", AMGX_ERR_INTERNAL); } computeDinv( *A_as_matrix ); if ( A_as_matrix->getBlockFormat() != ROW_MAJOR ) { FatalError("Block Jacobi solver only supports row major format", AMGX_ERR_CONFIGURATION); } } // template<class T_Config> void BlockJacobiSolver_Base<T_Config>::solve_init( VVector &b, VVector &x, bool xIsZero ) { } // Solve one iteration template<class T_Config> bool BlockJacobiSolver_Base<T_Config>::solve_iteration( VVector &b, VVector &x, bool xIsZero ) { //bool done = false; Matrix<T_Config> *A_as_matrix = (Matrix<T_Config> *) this->m_A; if (xIsZero) { x.dirtybit = 0; } ViewType oldView = A_as_matrix->currentView(); A_as_matrix->setViewExterior(); ViewType flags = (ViewType)(A_as_matrix->getViewInterior() | A_as_matrix->getViewExterior()); if (A_as_matrix->get_block_dimx() == 1 && A_as_matrix->get_block_dimy() == 1) { if (xIsZero) { smooth_with_0_initial_guess_1x1(*A_as_matrix, b, x, flags); } else { smooth_1x1(*A_as_matrix, b, x, flags); } } else if (A_as_matrix->get_block_dimx() == 4 && A_as_matrix->get_block_dimy() == 4) { if (xIsZero) { smooth_with_0_initial_guess_4x4(*A_as_matrix, b, x, flags); } else { smooth_4x4(*A_as_matrix, b, x, flags); } } else if (A_as_matrix->get_block_dimx() == A_as_matrix->get_block_dimy()) { if (xIsZero) { thrust::fill(x.begin(), x.end(), types::util<ValueTypeB>::get_zero()); cudaCheckError(); } smooth_BxB(*A_as_matrix, b, x, true, flags); } else { FatalError("Unsupported block size for BlockJacobi_Solver", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); } if (A_as_matrix->get_block_dimx() == 4 && A_as_matrix->get_block_dimy() == 4) { if (!xIsZero) // we write to t_res vector to avoid race condition in the kernel { x.swap(this->t_res); } } x.dirtybit = 1; A_as_matrix->setView(oldView); return this->converged( b, x ); } template<class T_Config> void BlockJacobiSolver_Base<T_Config>::solve_finalize( VVector &b, VVector &x ) {} template<class T_Config> void BlockJacobiSolver_Base<T_Config>::computeDinv( Matrix<T_Config> &A) { Matrix<T_Config> *A_as_matrix = (Matrix<T_Config> *) this->m_A; ViewType oldView = A.currentView(); A.setView(A_as_matrix->getViewExterior()); if (A.get_block_dimx() == 1 && A.get_block_dimy() == 1) { this->computeDinv_1x1(A); } else if (A.get_block_dimx() == 2 && A.get_block_dimy() == 2) { this->computeDinv_bxb<2>(A); } else if (A.get_block_dimx() == 3 && A.get_block_dimy() == 3) { this->computeDinv_3x3(A); } else if (A.get_block_dimx() == 4 && A.get_block_dimy() == 4) { this->computeDinv_4x4(A); } else if (A.get_block_dimx() == 5 && A.get_block_dimy() == 5) { this->computeDinv_bxb<5>(A); } else if (A.get_block_dimx() == A.get_block_dimy() && A.get_block_dimy() > 5) { this->computeDinv_Big(A, A.get_block_dimx()); } A.setView(oldView); } // Method to compute the inverse of the diagonal blocks template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::computeDinv_1x1(const Matrix_d &A) { Matrix_d *A_as_matrix = (Matrix_d *) this->m_A; // supports both diag this->Dinv.resize(A.get_num_rows()*A.get_block_dimx()*A.get_block_dimy(), types::util<ValueTypeA>::get_zero()); if ( A_as_matrix->hasProps(DIAG) ) { const int num_values = A_as_matrix->diagOffset() * A_as_matrix->get_block_size(); thrust::copy( A_as_matrix->values.begin() + num_values, A_as_matrix->values.begin() + num_values + A_as_matrix->get_num_rows()*A_as_matrix->get_block_size(), this->Dinv.begin() ); cudaCheckError(); } else { find_diag( *A_as_matrix ); } } // Method to compute the inverse of the diagonal blocks template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::computeDinv_1x1(const Matrix_h &A) { // Do nothing } // Method to compute the inverse of the diagonal blocks template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::computeDinv_4x4(const Matrix_h &A) { //FatalError("Block Jacobi smoother not implemented with this format, exiting"); //std::cout << "Warning, CPU version doesn't store the inverse of the blocks, like the GPU version" << std::endl; } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::computeDinv_3x3(const Matrix_h &A) { FatalError("3*3 Block Jacobi smoother not implemented with host format, exiting", AMGX_ERR_NOT_IMPLEMENTED); //std::cout << "Warning, CPU version doesn't store the inverse of the blocks, like the GPU version" << std::endl; } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::computeDinv_Big(const Matrix_h &A, const int bsize) { FatalError("Big Block Jacobi smoother not implemented with host format, exiting", AMGX_ERR_NOT_IMPLEMENTED); //std::cout << "Warning, CPU version doesn't store the inverse of the blocks, like the GPU version" << std::endl; } // Finding diag on device, CSR format template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::find_diag( const Matrix_h &A ) { //for each row for (int i = 0; i < A.get_num_rows(); i++) { //for each column for (int j = A.row_offsets[i]; j < A.row_offsets[i + 1]; j++) { if (A.col_indices[j] == i) { this->Dinv[i] = A.values[j]; break; } if (j == A.row_offsets[i + 1] - 1) { FatalError("Could not find a diagonal value", AMGX_ERR_BAD_PARAMETERS); } } } } // Finding diag on device, CSR format template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::find_diag( const Matrix_d &A ) { AMGX_CPU_PROFILER( "JacobiSolver::find_diag " ); const size_t THREADS_PER_BLOCK = 128; const size_t NUM_BLOCKS = ::min(AMGX_GRID_MAX_SIZE, (int)ceil((float)(A.get_num_rows()) / (float)(THREADS_PER_BLOCK))); hipLaunchKernelGGL(( find_diag_kernel_indexed_dia) , dim3((unsigned int)NUM_BLOCKS), dim3((unsigned int)THREADS_PER_BLOCK), 0, 0, A.get_num_rows(), A.diag.raw(), A.values.raw(), this->Dinv.raw()); cudaCheckError(); } // Method to compute the inverse of the diagonal blocks template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::computeDinv_4x4(const Matrix_d &A) { // supports both diag this->Dinv.resize(A.get_num_rows()*A.get_block_dimx()*A.get_block_dimy(), types::util<ValueTypeA>::get_zero()); const IndexType *A_row_offsets_ptr = A.row_offsets.raw(); const IndexType *A_column_indices_ptr = A.col_indices.raw(); const IndexType *A_dia_indices_ptr = A.diag.raw(); const ValueTypeA *A_values = A.values.raw(); ValueTypeA *Dinv_ptr = this->Dinv.raw(); #if 1 const int threads_per_block = 512; const int halfwarps_per_block = threads_per_block / 16; const int num_blocks = ::min(AMGX_GRID_MAX_SIZE, (int) (A.get_num_rows() - 1) / halfwarps_per_block + 1); hipFuncSetCacheConfig(setupBlockJacobiSmooth4by4BlockDiaCsrKernel_V2<ValueTypeA, ValueTypeB, IndexType, threads_per_block, halfwarps_per_block, 4, 2, 16, 4>, hipFuncCachePreferL1); hipLaunchKernelGGL(( setupBlockJacobiSmooth4by4BlockDiaCsrKernel_V2<ValueTypeA, ValueTypeB, IndexType, threads_per_block, halfwarps_per_block, 4, 2, 16, 4>) , dim3(num_blocks), dim3(threads_per_block), 0, 0, A_dia_indices_ptr, A_values, Dinv_ptr, A.get_num_rows()); cudaCheckError(); #else hipFuncSetCacheConfig(matinv_matrix_per_thread_pivot<ValueTypeA, ValueTypeB, 4>, hipFuncCachePreferL1); hipLaunchKernelGGL(( matinv_matrix_per_thread_pivot<ValueTypeA, ValueTypeB, 4>) , dim3((A.num_block_rows + 127) / 128), dim3(128) , 0, 0, A_dia_values_ptr, Dinv_ptr, A.num_block_rows); cudaCheckError(); #endif } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::computeDinv_3x3(const Matrix_d &A) { const int bsize = 3; // supports both diag this->Dinv.resize(A.get_num_rows()*A.get_block_dimx()*A.get_block_dimy(), types::util<ValueTypeA>::get_zero()); const IndexType *A_row_offsets_ptr = A.row_offsets.raw(); const IndexType *A_column_indices_ptr = A.col_indices.raw(); const IndexType *A_dia_idx_ptr = A.diag.raw(); ValueTypeA *Dinv_ptr = this->Dinv.raw(); const ValueTypeA *A_nonzero_values_ptr = A.values.raw(); // MUST BE MULTIPLE OF 16 const int threads_per_block = 256; const int blockrows_per_warp = 32 / (bsize * bsize); // blockrows per cta = blockrows_per_warp * number_of_warps_per_cta const int blockrows_per_cta = (threads_per_block / 32) * blockrows_per_warp ; const int num_blocks = ::min(AMGX_GRID_MAX_SIZE, (int) (A.get_num_rows() - 1) / blockrows_per_cta + 1); hipFuncSetCacheConfig(setupBlockJacobiSmoothBbyBBlockDiaCsrKernel<IndexType, ValueTypeA, blockrows_per_cta, blockrows_per_warp, bsize, bsize *bsize>, hipFuncCachePreferL1); hipLaunchKernelGGL(( setupBlockJacobiSmoothBbyBBlockDiaCsrKernel<IndexType, ValueTypeA, blockrows_per_cta, blockrows_per_warp, bsize, bsize *bsize>) , dim3(num_blocks), dim3(threads_per_block), 0, 0, A_row_offsets_ptr, A_column_indices_ptr, A_nonzero_values_ptr, A_dia_idx_ptr, Dinv_ptr, A.get_num_rows()); cudaCheckError(); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::computeDinv_Big(const Matrix_d &A, const int bsize) { //both DIAG supported this->Dinv.resize(A.get_num_rows()*A.get_block_dimx()*A.get_block_dimy(), types::util<ValueTypeA>::get_zero()); const IndexType *A_row_offsets_ptr = A.row_offsets.raw(); const IndexType *A_column_indices_ptr = A.col_indices.raw(); const IndexType *A_dia_idx_ptr = A.diag.raw(); ValueTypeA *Dinv_ptr = this->Dinv.raw(); const ValueTypeA *A_nonzero_values_ptr = A.values.raw(); MVector temp(AMGX_GRID_MAX_SIZE * ((bsize - 1) / 4 + 1) * ((bsize - 1) / 4 + 1)); ValueTypeA *temp_ptr = temp.raw(); // MUST BE MULTIPLE OF 16 const int threads_per_block = 512; const int halfwarps_per_block = threads_per_block / 16; const int num_blocks = ::min(AMGX_GRID_MAX_SIZE, (int) (A.get_num_rows() - 1) / halfwarps_per_block + 1); hipFuncSetCacheConfig(setupBlockJacobiSmoothbBigBlockDiaCsrKernel<IndexType, ValueTypeA, threads_per_block, halfwarps_per_block>, hipFuncCachePreferL1); hipLaunchKernelGGL(( setupBlockJacobiSmoothbBigBlockDiaCsrKernel<IndexType, ValueTypeA, threads_per_block, halfwarps_per_block>) , dim3(num_blocks), dim3(threads_per_block), sizeof(ValueTypeA)*bsize *bsize *halfwarps_per_block, 0, A_row_offsets_ptr, A_column_indices_ptr, A_nonzero_values_ptr, A_dia_idx_ptr, Dinv_ptr, A.get_num_rows(), bsize, bsize * bsize, temp_ptr); cudaCheckError(); } template<class T_Config> template<int bsize> void BlockJacobiSolver_Base<T_Config>::computeDinv_bxb(const Matrix<T_Config> &A) { if (TConfig::memSpace == AMGX_host) { FatalError("BlockJacobiSmooth Not implemented for host", AMGX_ERR_NOT_IMPLEMENTED); } else { // supports both diag this->Dinv.resize(A.get_num_rows()*A.get_block_dimx()*A.get_block_dimy(), types::util<ValueTypeA>::get_zero()); const IndexType *A_row_offsets_ptr = A.row_offsets.raw(); const IndexType *A_column_indices_ptr = A.col_indices.raw(); const IndexType *A_dia_idx_ptr = A.diag.raw(); ValueTypeA *Dinv_ptr = this->Dinv.raw(); const ValueTypeA *A_nonzero_values_ptr = A.values.raw(); // MUST BE MULTIPLE OF 16 const int threads_per_block = 256; const int blockrows_per_warp = 32 / (bsize * bsize); const int blockrows_per_cta = (threads_per_block / 32) * blockrows_per_warp; const int num_blocks = ::min(AMGX_GRID_MAX_SIZE, (int) (A.get_num_rows() - 1) / blockrows_per_cta + 1); hipFuncSetCacheConfig(setupBlockJacobiSmoothBbyBBlockDiaCsrKernel<IndexType, ValueTypeA, blockrows_per_cta, blockrows_per_warp, bsize, bsize *bsize>, hipFuncCachePreferL1); hipLaunchKernelGGL(( setupBlockJacobiSmoothBbyBBlockDiaCsrKernel<IndexType, ValueTypeA, blockrows_per_cta, blockrows_per_warp, bsize, bsize *bsize>) , dim3(num_blocks), dim3(threads_per_block), 0, 0, A_row_offsets_ptr, A_column_indices_ptr, A_nonzero_values_ptr, A_dia_idx_ptr, Dinv_ptr, A.get_num_rows()); cudaCheckError(); } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::smooth_BxB(Matrix_h &A, VVector &b, VVector &x, bool firstStep, ViewType separation_flags) { FatalError("M*M Block Jacobi smoother not implemented with host format, exiting", AMGX_ERR_NOT_IMPLEMENTED); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::smooth_4x4(const Matrix_h &A, const VVector &b, VVector &x, ViewType separation_flags) { VVector newx(x.size()); int bsize = A.get_block_dimx(); // Allocate space for block_matrix ValueTypeA **E = new ValueTypeA* [bsize]; for ( int i = 0; i < bsize; i++) { E[i] = new ValueTypeA[bsize]; } ValueTypeB *bmAx = new ValueTypeB[bsize]; ValueTypeB *temp = new ValueTypeB[bsize]; //for each block row for (int i = 0; i < A.get_num_rows(); i++) { // Compute b - sum_j A_j x_j (denoted by bmAx) for block_row i // Load diagonal for (int m = 0; m < bsize; m++) { for (int n = 0; n < bsize; n++) { E[m][n] = A.values[A.diag[i * bsize * bsize + bsize * m + n]]; } bmAx[m] = types::util<ValueTypeB>::get_zero(); temp[m] = types::util<ValueTypeB>::get_zero(); } // Contribution from diagonal for (int m = 0; m < bsize; m++) for (int n = 0; n < bsize; n++) { bmAx[m] = bmAx[m] - E[m][n] * x[i * bsize + n]; } // Contribution from each nonzero column for (int j = A.row_offsets[i]; j < A.row_offsets[i + 1]; j++) { IndexType jcol = A.col_indices[j]; for (int m = 0; m < bsize; m++) for (int n = 0; n < bsize; n++) { bmAx[m] = bmAx[m] - A.values[j * bsize * bsize + bsize * m + n] * x[jcol * bsize + n]; } } // Add b for (int m = 0; m < bsize; m++) { bmAx[m] = bmAx[m] + b[i * bsize + m]; } gaussianEliminationRowMajor(E, temp, bmAx, bsize); // Compute new value of x for (int m = 0; m < bsize; m++) { newx[i * bsize + m] = x[i * bsize + m] + temp[m] * this->weight; } } x.swap(newx); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::smooth_with_0_initial_guess_4x4(const Matrix_h &A, const VVector &b, VVector &x, ViewType separation_flags) { IndexType bsize = A.get_block_dimy(); ValueTypeA **E = new ValueTypeA* [bsize]; for ( int i = 0; i < bsize; i++) { E[i] = new ValueTypeA[bsize]; } ValueTypeB *rhs = new ValueTypeB[bsize]; ValueTypeB *temp = new ValueTypeB[bsize]; //for each block row for (int i = 0; i < A.get_num_rows(); i++) { // Load diagonal for (int m = 0; m < bsize; m++) { for (int n = 0; n < bsize; n++) { E[m][n] = A.values[A.diag[i * bsize * bsize + bsize * m + n]]; } rhs[m] = types::util<ValueTypeB>::get_zero(); } //rhs for (int m = 0; m < bsize; m++) { rhs[m] = rhs[m] + b[i * bsize + m]; } // Solve for temp gaussianEliminationRowMajor(E, temp, rhs, bsize); for (int m = 0; m < bsize; m++) { x[i * bsize + m] = temp[m] * this->weight; } } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::smooth_1x1_const(const Matrix_h &A, const VVector &b, VVector &x, ViewType separation_flags) { VVector newx(x.size()); //for each row for (int i = 0; i < A.get_num_rows(); i++) { ValueTypeB Axi = types::util<ValueTypeB>::get_zero(); ValueTypeB d = types::util<ValueTypeB>::get_one() * A.values[A.diag[i]]; ValueTypeB mydiaginv = types::util<ValueTypeB>::get_one() * this->weight / (isNotCloseToZero(d) ? d : epsilon(d) ); //for each column for (int j = A.row_offsets[i]; j < A.row_offsets[i + 1]; j++) { Axi = Axi + A.values[j] * x[A.col_indices[j]]; } newx[i] = x[i] + (b[i] - Axi) * mydiaginv ; } x.swap(newx); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::smooth_1x1(Matrix_h &A, VVector &b, VVector &x, ViewType separation_flags) { this->smooth_1x1_const(A, b, x, separation_flags); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::smooth_1x1(Matrix_d &A, VVector &b, VVector &x, ViewType separation_flags) { AMGX_CPU_PROFILER( "JacobiSolver::smooth_1x1 " ); if (this->t_res.size() != x.size()) { this->t_res.resize(x.size()); } if (this->y.size() != b.size()) { this->y.resize(b.size()); this->y.tag = this->tag * 100 + 3; this->y.set_block_dimx(b.get_block_dimx()); this->y.set_block_dimy(b.get_block_dimy()); } int num_rows = A.get_num_rows(); int offset = 0; A.getOffsetAndSizeForView(separation_flags, &offset, &num_rows); this->y.dirtybit = 0; multiply( A, x, this->y, separation_flags ); thrust::transform( thrust::make_zip_iterator(thrust::make_tuple( x.begin() + offset, this->Dinv.begin() + offset, b.begin() + offset, this->y.begin() + offset)), thrust::make_zip_iterator(thrust::make_tuple( x.begin() + A.get_num_rows(), this->Dinv.begin() + A.get_num_rows(), b.begin() + A.get_num_rows(), this->y.begin() + A.get_num_rows())), x.begin() + offset, jacobi_postsmooth_functor<ValueTypeA, ValueTypeB>( this->weight )); cudaCheckError(); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::smooth_with_0_initial_guess_1x1(const Matrix_h &A, const VVector &b, VVector &x, ViewType separation_flags) { //for each row for (int i = 0; i < A.get_num_rows(); i++) { ValueTypeB d = types::util<ValueTypeB>::get_one() * A.values[A.diag[i]]; ValueTypeB mydiag = types::util<ValueTypeB>::get_one() * this->weight / (isNotCloseToZero(d) ? d : epsilon(d)); x[i] = b[i] * mydiag; } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::smooth_with_0_initial_guess_1x1(const Matrix_d &A, const VVector &b, VVector &x, ViewType separation_flags) { AMGX_CPU_PROFILER( "JacobiSolver::smooth_with_0_initial_guess_1x1 " ); int num_rows = A.get_num_rows(); int offset = 0; A.getOffsetAndSizeForView(separation_flags, &offset, &num_rows); thrust::transform( b.begin( ) + offset, b.begin( ) + A.get_num_rows(), this->Dinv.begin( ) + offset, x.begin( ) + offset, jacobi_presmooth_functor<ValueTypeA, ValueTypeB>( this->weight )); cudaCheckError(); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::smooth_BxB(Matrix_d &A, VVector &b, VVector &x, bool firstStep, ViewType separation_flags) { IndexType num_rows; IndexType offset; A.getOffsetAndSizeForView(separation_flags, &offset, &num_rows); // aux vector initialization if (this->y.size() != b.size()) { this->y.resize(b.size()); this->y.tag = this->tag * 100 + 3; this->y.set_block_dimx(b.get_block_dimx()); this->y.set_block_dimy(b.get_block_dimy()); } thrust::copy(b.begin(), b.end(), this->y.begin()); // copy of vector b cudaCheckError(); Cusparse::bsrmv(types::util<ValueTypeB>::get_minus_one(), A, x, types::util<ValueTypeB>::get_one(), this->y, separation_flags); // y= -1.0f*(A.x) + y cudaCheckError(); Cusparse::bsrmv(types::util<ValueTypeB>::get_one() * this->weight, A, this->Dinv, this->y, types::util<ValueTypeB>::get_one(), x, separation_flags); // t_res = t_res + w*(Dinv.y) @ view cudaCheckError(); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::smooth_4x4(const Matrix_d &A, const VVector &b, VVector &x, ViewType separation_flags) { if (this->t_res.size() != x.size()) { this->t_res.resize(x.size()); } const IndexType *A_row_offsets_ptr = A.row_offsets.raw(); const IndexType *A_column_indices_ptr = A.col_indices.raw(); const IndexType *A_dia_idx_ptr = A.diag.raw(); const ValueTypeA *Dinv_ptr = this->Dinv.raw(); const ValueTypeA *A_nonzero_values_ptr = A.values.raw(); const ValueTypeB *b_ptr = b.raw(); ValueTypeB *x_ptr = x.raw(); ValueTypeB *xout_ptr = this->t_res.raw(); // always store original x IndexType num_rows = A.get_num_rows(); IndexType offset = 0; A.getOffsetAndSizeForView(separation_flags, &offset, &num_rows); const int threads_per_block = 512; const int eightwarps_per_block = threads_per_block / 4; const int num_blocks = ::min( AMGX_GRID_MAX_SIZE, (int) (num_rows - 1) / eightwarps_per_block + 1); // XXX We need to add latency hiding here if (!A.is_matrix_singleGPU()) { A.manager->exchange_halo(x, x.tag); } hipFuncSetCacheConfig(jacobiSmooth4by4BlockDiaCsrKernel_NAIVE_tex_readDinv2<IndexType, ValueTypeA, ValueTypeB, eightwarps_per_block, 4, 2, 2, 16>, hipFuncCachePreferL1); hipLaunchKernelGGL(( jacobiSmooth4by4BlockDiaCsrKernel_NAIVE_tex_readDinv2<IndexType, ValueTypeA, ValueTypeB, eightwarps_per_block, 4, 2, 2, 16>) , dim3(num_blocks), dim3(threads_per_block), 0, 0, A_row_offsets_ptr, A_column_indices_ptr, A_dia_idx_ptr, A_nonzero_values_ptr, Dinv_ptr, b_ptr, x_ptr, this->weight, offset + num_rows, xout_ptr, offset); cudaCheckError(); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::smooth_with_0_initial_guess_4x4(const Matrix_d &A, const VVector &b, VVector &x, ViewType separation_flags) { cudaCheckError(); const ValueTypeA *A_values_ptr = A.values.raw(); const ValueTypeB *b_ptr = b.raw(); const ValueTypeA *Dinv_ptr = this->Dinv.raw(); ValueTypeB *x_ptr = x.raw(); IndexType num_rows = A.get_num_rows(); IndexType offset = 0; A.getOffsetAndSizeForView(separation_flags, &offset, &num_rows); const int threads_per_block = 512; const int eightwarps_per_block = threads_per_block / 4; const int num_blocks = ::min( AMGX_GRID_MAX_SIZE, (int) (num_rows - 1) / eightwarps_per_block + 1); hipFuncSetCacheConfig(jacobiSmooth4by4ZeroBlockDiaCsrKernel_NAIVE_tex_readDinv2<IndexType, ValueTypeA, ValueTypeB, eightwarps_per_block, 4, 2, 2>, hipFuncCachePreferL1); hipLaunchKernelGGL(( jacobiSmooth4by4ZeroBlockDiaCsrKernel_NAIVE_tex_readDinv2<IndexType, ValueTypeA, ValueTypeB, eightwarps_per_block, 4, 2, 2>) , dim3(num_blocks), dim3(threads_per_block), 0, 0, Dinv_ptr, b_ptr, this->weight, offset + num_rows, x_ptr, offset); cudaCheckError(); } /**************************************** * Explict instantiations ***************************************/ #define AMGX_CASE_LINE(CASE) template class BlockJacobiSolver_Base<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE #define AMGX_CASE_LINE(CASE) template class BlockJacobiSolver<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE } // namespace block_jacobi } // namespace amgx
01e3cc8c955d2d1b53b542b965f4668ab701f9b7.cu
/* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <solvers/block_jacobi_solver.h> #include <solvers/block_common_solver.h> #include <gaussian_elimination.h> #include <basic_types.h> #include <cutil.h> #include <util.h> #include <string> #include <miscmath.h> #include <texture.h> #include <amgx_cusparse.h> #include <ostream> #include <amgx_types/util.h> namespace amgx { namespace block_jacobi_solver { template <typename ValueTypeA, typename ValueTypeB> struct jacobi_presmooth_functor { double omega; jacobi_presmooth_functor( double omega ) : omega( omega ) {} __host__ __device__ ValueTypeB operator()( const ValueTypeB &b, const ValueTypeA &d ) const { return isNotCloseToZero(d) ? b * omega / d : b * omega / epsilon(d); } }; template <typename ValueTypeA, typename ValueTypeB> struct jacobi_postsmooth_functor { double omega; jacobi_postsmooth_functor( double omega ) : omega( omega ) {} template<typename Tuple> __host__ __device__ ValueTypeB operator( )( const Tuple &t ) const { ValueTypeB x = thrust::get<0>(t); ValueTypeA d = thrust::get<1>(t); ValueTypeB b = thrust::get<2>(t); ValueTypeB y = thrust::get<3>(t); // return x + omega * (b - y) / d. d = isNotCloseToZero(d) ? d : epsilon(d); d = types::util<ValueTypeA>::get_one() / d; b = b - y; b = b * omega; return b * d + x; } }; template <typename ValueTypeB> struct add_functor { __host__ __device__ ValueTypeB operator()( const ValueTypeB &x, const ValueTypeB &y )const { return x + y; } }; template<typename T> __device__ __forceinline__ T fmnaOp (T a, T b, T c) { return -(a * b) + c; } template<typename T> __device__ __forceinline__ T mulOp (T a, T b) { return a * b; } template<typename T> __device__ __forceinline__ T rcpOp (T a) { return 1.0 / (isNotCloseToZero(a) ? a : epsilon(a)); } template<typename T> __device__ __forceinline__ T absOp (T a) { return fabs(a); } // ----------------------------------- // KERNELS // ----------------------------------- template<typename T1, typename T2, int N> __global__ void matinv_matrix_per_thread_pivot (const T1 *A, T2 *Ainv, int batch) { #define A(row,col) A[(col)*N+(row)] #define AA(row,col) AA[(col)*N+(row)] #define Ainv(row,col) Ainv[(col)*N+(row)] const int blkNum = blockIdx.x * blockDim.x + threadIdx.x; int perm0, perm1, perm2, perm3; int icol0, icol1, icol2, icol3; T2 AA00, AA01, AA02, AA03, AA10, AA11, AA12, AA13; T2 AA20, AA21, AA22, AA23, AA30, AA31, AA32, AA33; T2 p, t; int i, pvt; A += blkNum * N * N; Ainv += blkNum * N * N; if (blkNum < batch) { AA00 = A[0]; AA10 = A[1]; AA20 = A[2]; AA30 = A[3]; AA01 = A[4]; AA11 = A[5]; AA21 = A[6]; AA31 = A[7]; AA02 = A[8]; AA12 = A[9]; AA22 = A[10]; AA32 = A[11]; AA03 = A[12]; AA13 = A[13]; AA23 = A[14]; AA33 = A[15]; perm0 = 0; perm1 = 1; perm2 = 2; perm3 = 3; /****************** iteration 0 ***********/ /* search pivot row */ p = absOp (AA00); pvt = 0; t = absOp (AA10); if (t > p) { p = t; pvt = 1; } t = absOp (AA20); if (t > p) { p = t; pvt = 2; } t = absOp (AA30); if (t > p) { p = t; pvt = 3; } /* swap pivot row with row 0 */ if (pvt == 1) { t = AA00; AA00 = AA10; AA10 = t; t = AA01; AA01 = AA11; AA11 = t; t = AA02; AA02 = AA12; AA12 = t; t = AA03; AA03 = AA13; AA13 = t; /* update permutation vector based on row swap */ i = perm0; perm0 = perm1; perm1 = i; } if (pvt == 2) { t = AA00; AA00 = AA20; AA20 = t; t = AA01; AA01 = AA21; AA21 = t; t = AA02; AA02 = AA22; AA22 = t; t = AA03; AA03 = AA23; AA23 = t; /* update permutation vector based on row swap */ i = perm0; perm0 = perm2; perm2 = i; } if (pvt == 3) { t = AA00; AA00 = AA30; AA30 = t; t = AA01; AA01 = AA31; AA31 = t; t = AA02; AA02 = AA32; AA32 = t; t = AA03; AA03 = AA33; AA33 = t; /* update permutation vector based on row swap */ i = perm0; perm0 = perm3; perm3 = i; } /* scale current row */ t = rcpOp (AA00); icol0 = perm0; AA00 = t; AA01 = mulOp (t, AA01); AA02 = mulOp (t, AA02); AA03 = mulOp (t, AA03); /* eliminate above and below current row */ t = AA10; AA10 = mulOp (-t, AA00); AA11 = fmnaOp (t, AA01, AA11); AA12 = fmnaOp (t, AA02, AA12); AA13 = fmnaOp (t, AA03, AA13); t = AA20; AA20 = mulOp (-t, AA00); AA21 = fmnaOp (t, AA01, AA21); AA22 = fmnaOp (t, AA02, AA22); AA23 = fmnaOp (t, AA03, AA23); t = AA30; AA30 = mulOp (-t, AA00); AA31 = fmnaOp (t, AA01, AA31); AA32 = fmnaOp (t, AA02, AA32); AA33 = fmnaOp (t, AA03, AA33); /****************** iteration 1 ***********/ /* search pivot row */ p = absOp (AA11); pvt = 1; t = absOp (AA21); if (t > p) { p = t; pvt = 2; } t = absOp (AA31); if (t > p) { p = t; pvt = 3; } /* swap pivot row with row 1 */ if (pvt == 2) { t = AA10; AA10 = AA20; AA20 = t; t = AA11; AA11 = AA21; AA21 = t; t = AA12; AA12 = AA22; AA22 = t; t = AA13; AA13 = AA23; AA23 = t; /* update permutation vector based on row swap */ i = perm1; perm1 = perm2; perm2 = i; } else if (pvt == 3) { t = AA10; AA10 = AA30; AA30 = t; t = AA11; AA11 = AA31; AA31 = t; t = AA12; AA12 = AA32; AA32 = t; t = AA13; AA13 = AA33; AA33 = t; /* update permutation vector based on row swap */ i = perm1; perm1 = perm3; perm3 = i; } /* scale current row */ t = rcpOp (AA11); icol1 = perm1; AA10 = mulOp (t, AA10); AA11 = t; AA12 = mulOp (t, AA12); AA13 = mulOp (t, AA13); /* eliminate above and below current row */ t = AA01; AA00 = fmnaOp (t, AA10, AA00); AA01 = mulOp (-t, AA11); AA02 = fmnaOp (t, AA12, AA02); AA03 = fmnaOp (t, AA13, AA03); t = AA21; AA20 = fmnaOp (t, AA10, AA20); AA21 = mulOp (-t, AA11); AA22 = fmnaOp (t, AA12, AA22); AA23 = fmnaOp (t, AA13, AA23); t = AA31; AA30 = fmnaOp (t, AA10, AA30); AA31 = mulOp (-t, AA11); AA32 = fmnaOp (t, AA12, AA32); AA33 = fmnaOp (t, AA13, AA33); /****************** iteration 2 ****************/ /* search pivot row */ p = absOp (AA22); pvt = 2; t = absOp (AA32); if (t > p) { p = t; pvt = 3; } /* swap pivot row with row 2 */ if (pvt == 3) { t = AA20; AA20 = AA30; AA30 = t; t = AA21; AA21 = AA31; AA31 = t; t = AA22; AA22 = AA32; AA32 = t; t = AA23; AA23 = AA33; AA33 = t; /* update permutation vector based on row swap */ i = perm2; perm2 = perm3; perm3 = i; } /* scale current row */ t = rcpOp (AA22); icol2 = perm2; AA20 = mulOp (t, AA20); AA21 = mulOp (t, AA21); AA22 = t; AA23 = mulOp (t, AA23); /* eliminate above and below current row */ t = AA02; AA00 = fmnaOp (t, AA20, AA00); AA01 = fmnaOp (t, AA21, AA01); AA02 = mulOp (-t, AA22); AA03 = fmnaOp (t, AA23, AA03); t = AA12; AA10 = fmnaOp (t, AA20, AA10); AA11 = fmnaOp (t, AA21, AA11); AA12 = mulOp (-t, AA22); AA13 = fmnaOp (t, AA23, AA13); t = AA32; AA30 = fmnaOp (t, AA20, AA30); AA31 = fmnaOp (t, AA21, AA31); AA32 = mulOp (-t, AA22); AA33 = fmnaOp (t, AA23, AA33); /****************** iteration 3 ****************/ /* scale current row */ t = rcpOp (AA33); icol3 = perm3; AA30 = mulOp (t, AA30); AA31 = mulOp (t, AA31); AA32 = mulOp (t, AA32); AA33 = t; /* eliminate above and below current row */ t = AA03; AA00 = fmnaOp (t, AA30, AA00); AA01 = fmnaOp (t, AA31, AA01); AA02 = fmnaOp (t, AA32, AA02); AA03 = mulOp (-t, AA33); t = AA13; AA10 = fmnaOp (t, AA30, AA10); AA11 = fmnaOp (t, AA31, AA11); AA12 = fmnaOp (t, AA32, AA12); AA13 = mulOp (-t, AA33); t = AA23; AA20 = fmnaOp (t, AA30, AA20); AA21 = fmnaOp (t, AA31, AA21); AA22 = fmnaOp (t, AA32, AA22); AA23 = mulOp (-t, AA33); /* sort columns into the correct order */ Ainv(0, icol0) = AA00; Ainv(1, icol0) = AA10; Ainv(2, icol0) = AA20; Ainv(3, icol0) = AA30; Ainv(0, icol1) = AA01; Ainv(1, icol1) = AA11; Ainv(2, icol1) = AA21; Ainv(3, icol1) = AA31; Ainv(0, icol2) = AA02; Ainv(1, icol2) = AA12; Ainv(2, icol2) = AA22; Ainv(3, icol2) = AA32; Ainv(0, icol3) = AA03; Ainv(1, icol3) = AA13; Ainv(2, icol3) = AA23; Ainv(3, icol3) = AA33; } } template<typename T, int N> __global__ void matinv_matrix_per_thread_no_pivot (const T *A, T *Ainv, int batch) { #define A(row,col) A[(col)*N+(row)] #define AA(row,col) AA[(col)*N+(row)] #define Ainv(row,col) Ainv[(col)*N+(row)] const int blkNum = blockIdx.x * blockDim.x + threadIdx.x; T AA00, AA01, AA02, AA03, AA10, AA11, AA12, AA13; T AA20, AA21, AA22, AA23, AA30, AA31, AA32, AA33; T t; A += blkNum * N * N; Ainv += blkNum * N * N; if (blkNum < batch) { AA00 = A[0]; AA10 = A[1]; AA20 = A[2]; AA30 = A[3]; AA01 = A[4]; AA11 = A[5]; AA21 = A[6]; AA31 = A[7]; AA02 = A[8]; AA12 = A[9]; AA22 = A[10]; AA32 = A[11]; AA03 = A[12]; AA13 = A[13]; AA23 = A[14]; AA33 = A[15]; /****************** iteration 0 ***********/ /* search pivot row */ t = 1.0 / (AA00); AA00 = t; AA01 = t * AA01; AA02 = t * AA02; AA03 = t * AA03; /* eliminate above and below current row */ t = AA10; AA10 = -t * AA00; AA11 = fmnaOp (t, AA01, AA11); AA12 = fmnaOp (t, AA02, AA12); AA13 = fmnaOp (t, AA03, AA13); t = AA20; AA20 = -t * AA00; AA21 = fmnaOp (t, AA01, AA21); AA22 = fmnaOp (t, AA02, AA22); AA23 = fmnaOp (t, AA03, AA23); t = AA30; AA30 = -t * AA00; AA31 = fmnaOp (t, AA01, AA31); AA32 = fmnaOp (t, AA02, AA32); AA33 = fmnaOp (t, AA03, AA33); /****************** iteration 1 ***********/ /* scale current row */ t = 1.0 / (AA11); AA10 = t * AA10; AA11 = t; AA12 = t * AA12; AA13 = t * AA13; /* eliminate above and below current row */ t = AA01; AA00 = fmnaOp (t, AA10, AA00); AA01 = -t * AA11; AA02 = fmnaOp (t, AA12, AA02); AA03 = fmnaOp (t, AA13, AA03); t = AA21; AA20 = fmnaOp (t, AA10, AA20); AA21 = -t * AA11; AA22 = fmnaOp (t, AA12, AA22); AA23 = fmnaOp (t, AA13, AA23); t = AA31; AA30 = fmnaOp (t, AA10, AA30); AA31 = -t * AA11; AA32 = fmnaOp (t, AA12, AA32); AA33 = fmnaOp (t, AA13, AA33); /****************** iteration 2 ****************/ /* scale current row */ t = 1.0 / (AA22); AA20 = t * AA20; AA21 = t * AA21; AA22 = t; AA23 = t * AA23; /* eliminate above and below current row */ t = AA02; AA00 = fmnaOp (t, AA20, AA00); AA01 = fmnaOp (t, AA21, AA01); AA02 = -t * AA22; AA03 = fmnaOp (t, AA23, AA03); t = AA12; AA10 = fmnaOp (t, AA20, AA10); AA11 = fmnaOp (t, AA21, AA11); AA12 = -t * AA22; AA13 = fmnaOp (t, AA23, AA13); t = AA32; AA30 = fmnaOp (t, AA20, AA30); AA31 = fmnaOp (t, AA21, AA31); AA32 = -t * AA22; AA33 = fmnaOp (t, AA23, AA33); /****************** iteration 3 ****************/ /* scale current row */ t = 1.0 / (AA33); AA30 = t * AA30; AA31 = t * AA31; AA32 = t * AA32; AA33 = t; /* eliminate above and below current row */ t = AA03; AA00 = fmnaOp (t, AA30, AA00); AA01 = fmnaOp (t, AA31, AA01); AA02 = fmnaOp (t, AA32, AA02); AA03 = -t * AA33; t = AA13; AA10 = fmnaOp (t, AA30, AA10); AA11 = fmnaOp (t, AA31, AA11); AA12 = fmnaOp (t, AA32, AA12); AA13 = -t * AA33; t = AA23; AA20 = fmnaOp (t, AA30, AA20); AA21 = fmnaOp (t, AA31, AA21); AA22 = fmnaOp (t, AA32, AA22); AA23 = -t * AA33; /* sort columns into the correct order */ Ainv(0, 0) = AA00; Ainv(1, 0) = AA10; Ainv(2, 0) = AA20; Ainv(3, 0) = AA30; Ainv(0, 1) = AA01; Ainv(1, 1) = AA11; Ainv(2, 1) = AA21; Ainv(3, 1) = AA31; Ainv(0, 2) = AA02; Ainv(1, 2) = AA12; Ainv(2, 2) = AA22; Ainv(3, 2) = AA32; Ainv(0, 3) = AA03; Ainv(1, 3) = AA13; Ainv(2, 3) = AA23; Ainv(3, 3) = AA33; } } template<typename IndexType, typename ValueTypeA, int threads_per_block, int halfwarps_per_block> __global__ void setupBlockJacobiSmoothbBigBlockDiaCsrKernel(const IndexType *row_offsets, const IndexType *column_indices, const ValueTypeA *values, const IndexType *dia_indices, ValueTypeA *Dinv, const int num_block_rows, int bsize, int bsize_sq, ValueTypeA *temp1) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; int halfwarp_id = tid >> 4; const int block_halfwarp_id = threadIdx.x >> 4; const int mat_entry_index = threadIdx.x & (16 - 1); const int i_ind = mat_entry_index >> 2; const int j_ind = mat_entry_index & 3; extern __shared__ volatile char schar[]; volatile ValueTypeA *s_Amat; s_Amat = (ValueTypeA *)&schar[0]; int tile_num = (bsize - 1) / 4 + 1; ValueTypeA *e_out = &temp1[(blockIdx.x * blockDim.x + threadIdx.x) * tile_num * tile_num]; while (halfwarp_id < num_block_rows) { int offset = halfwarp_id * bsize_sq + i_ind * bsize + j_ind; int s_offset = block_halfwarp_id * bsize_sq; // Store the diagonal for (int t1 = 0; t1 < tile_num; t1++) for (int t2 = 0; t2 < tile_num; t2++) if ((t1 * 4 + i_ind) < bsize && (t2 * 4 + j_ind) < bsize) { e_out[t1 * tile_num + t2] = values[bsize_sq * dia_indices[halfwarp_id] + (t1 * 4 + i_ind) * bsize + t2 * 4 + j_ind]; } // Each thread stores its entry in s_Amat for (int t1 = 0; t1 < tile_num; t1++) for (int t2 = 0; t2 < tile_num; t2++) if ((t1 * 4 + i_ind) < bsize && (t2 * 4 + j_ind) < bsize) { types::util<ValueTypeA>::volcast( e_out[t1 * tile_num + t2], s_Amat + (s_offset + (t1 * 4 + i_ind) * bsize + t2 * 4 + j_ind) ); } compute_block_inverse2<IndexType, ValueTypeA, halfwarps_per_block> ( s_Amat, s_offset, offset, i_ind, j_ind, Dinv, tile_num, bsize, bsize_sq ); halfwarp_id += gridDim.x * halfwarps_per_block; } } template<typename IndexType, typename ValueTypeA, int blockrows_per_cta, int blockrows_per_warp, int bsize, int bsize_sq> __global__ void setupBlockJacobiSmoothBbyBBlockDiaCsrKernel(const IndexType *row_offsets, const IndexType *column_indices, const ValueTypeA *values, const IndexType *dia_indices, ValueTypeA *Dinv, const int num_block_rows) { int warp_id = threadIdx.x / 32; int warp_thread_id = threadIdx.x & 31; // padding row blocks to fit in a single warp if ( warp_thread_id >= blockrows_per_warp * bsize_sq ) { return; } // new thread id with padding int tid = warp_id * blockrows_per_warp * bsize_sq + warp_thread_id; int cta_blockrow_id = tid / bsize_sq; int blockrow_id = blockIdx.x * blockrows_per_cta + cta_blockrow_id; const int mat_entry_index = tid - cta_blockrow_id * bsize_sq; const int i_ind = mat_entry_index / bsize; const int j_ind = mat_entry_index - i_ind * bsize; volatile __shared__ ValueTypeA s_Amat[bsize_sq * blockrows_per_cta ]; ValueTypeA e_out; while (blockrow_id < num_block_rows) { int offset = blockrow_id * bsize_sq + mat_entry_index; // Store the diagonal e_out = values[bsize_sq * dia_indices[blockrow_id] + mat_entry_index]; // Each thread stores its entry in s_Amat types::util<ValueTypeA>::volcast(e_out, s_Amat + tid); compute_block_inverse_row_major<IndexType, ValueTypeA, blockrows_per_cta, bsize, bsize_sq> ( s_Amat, cta_blockrow_id * bsize_sq, offset, i_ind, j_ind, Dinv ); blockrow_id += gridDim.x * blockrows_per_cta; } } template<typename ValueTypeA, typename ValueTypeB, typename IndexType, int threads_per_block, int halfwarps_per_block, int bsize, int log_bsize, int bsize_sq, int log_bsize_sq> __global__ void setupBlockJacobiSmooth4by4BlockDiaCsrKernel_V2(const IndexType *dia_indices, const ValueTypeA *A_values, ValueTypeA *Dinv, const int num_block_rows) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; int halfwarp_id = tid >> log_bsize_sq; const int block_halfwarp_id = threadIdx.x >> log_bsize_sq; const int mat_entry_index = threadIdx.x & (bsize_sq - 1); const int i_ind = mat_entry_index >> log_bsize; const int j_ind = mat_entry_index & (bsize - 1); volatile __shared__ ValueTypeA s_Amat[bsize_sq * halfwarps_per_block ]; int offset; ValueTypeA e_out; while (halfwarp_id < num_block_rows) { // Store the diagonal offset = halfwarp_id * bsize_sq; e_out = A_values[bsize_sq * dia_indices[halfwarp_id] + mat_entry_index]; // Each thread stores its entry in s_Amat types::util<ValueTypeA>::volcast(e_out, s_Amat + threadIdx.x); compute_block_inverse_row_major<int, ValueTypeA, halfwarps_per_block, bsize, bsize_sq> ( s_Amat, block_halfwarp_id * bsize_sq, offset + mat_entry_index, i_ind, j_ind, Dinv ); halfwarp_id += gridDim.x * blockDim.x >> log_bsize_sq; } } // Kernel to smooth with Jacobi smoother, Dinv assumed to be computed template<typename IndexType, typename ValueTypeA, typename ValueTypeB, int eighthwarps_per_block, int bsize, int log_bsize, int half_bsize, int bsize_sq> __global__ void jacobiSmooth4by4BlockDiaCsrKernel_NAIVE_tex_readDinv2(const IndexType *row_offsets, const IndexType *column_indices, const IndexType *dia_indices, const ValueTypeA *nonzero_values, const ValueTypeA *Dinv, const ValueTypeB *b, const ValueTypeB *x, double weight, const int num_block_rows, ValueTypeB *xout, const int row_offset) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; int eighthwarp_id = row_offset + (tid >> log_bsize); const int block_eighthwarp_id = threadIdx.x >> log_bsize; const int vec_entry_index = threadIdx.x & (bsize - 1); volatile __shared__ ValueTypeB s_xtemp[ bsize * eighthwarps_per_block ]; ValueTypeB bmAx, xin; ValueTypeB temp[bsize]; int offset, i, s_offset; while (eighthwarp_id < num_block_rows) { i = eighthwarp_id; offset = i * bsize + vec_entry_index; // 1. COMPUTING b-Ax bmAx = b[offset]; // Contribution from diagonal xin = x[offset]; types::util<ValueTypeB>::volcast(xin, s_xtemp + threadIdx.x); // Load dia_values and do matrix multiply loadAsVector<bsize>(nonzero_values + bsize_sq * dia_indices[i] + vec_entry_index * bsize, temp); s_offset = block_eighthwarp_id * bsize; #pragma unroll for (int m = 0; m < bsize; m++) { bmAx = bmAx - temp[m] * types::util<ValueTypeB>::volcast(s_xtemp[s_offset + m]); } // Contribution from each nonzero column int jmax = row_offsets[i + 1]; for (int jind = row_offsets[i]; jind < jmax; jind++) { IndexType jcol = __cachingLoad(&column_indices[jind]); if (jcol != i) { offset = jcol * bsize + vec_entry_index; types::util<ValueTypeB>::volcast(x[offset], s_xtemp + threadIdx.x); // Load nonzero_values s_offset = block_eighthwarp_id * bsize; offset = jind * bsize * bsize + vec_entry_index * bsize; loadAsVector<bsize>(nonzero_values + offset, temp); #pragma unroll for (int m = 0; m < bsize; m++) { bmAx = bmAx - temp[m] * types::util<ValueTypeB>::volcast(s_xtemp[s_offset + m]); } } } types::util<ValueTypeB>::volcast(bmAx, s_xtemp + threadIdx.x); bmAx = types::util<ValueTypeB>::get_zero(); // 2. Multiply by Dinv // Load Dinv and multiply to RHS offset = i * bsize * bsize + vec_entry_index * bsize; loadAsVector<bsize>(Dinv + offset, temp); s_offset = block_eighthwarp_id * bsize; #pragma unroll for (int m = 0; m < bsize; m++) { bmAx = bmAx + temp[m] * types::util<ValueTypeB>::volcast(s_xtemp[s_offset + m]); } xout[i * bsize + vec_entry_index] = xin + bmAx * weight; eighthwarp_id += gridDim.x * blockDim.x >> log_bsize; } } // Kernel to smooth with jacobi smoother, zero initial guess template<typename IndexType, typename ValueTypeA, typename ValueTypeB, int eighthwarps_per_block, int bsize, int log_bsize, int half_bsize> __global__ void jacobiSmooth4by4ZeroBlockDiaCsrKernel_NAIVE_tex_readDinv2(const ValueTypeA *Dinv, const ValueTypeB *b, double weight, const int num_block_rows, ValueTypeB *xout, const int row_offset) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; const int block_eighthwarp_id = threadIdx.x >> log_bsize; const int vec_entry_index = threadIdx.x & (bsize - 1); volatile __shared__ ValueTypeB s_xtemp[ bsize * eighthwarps_per_block ]; ValueTypeB bmAx; ValueTypeB temp[bsize]; int offset, i, s_offset; for (int eighthwarp_id = row_offset + (tid >> log_bsize); eighthwarp_id < num_block_rows; eighthwarp_id += (gridDim.x * blockDim.x >> log_bsize)) { i = eighthwarp_id; offset = i * bsize + vec_entry_index; types::util<ValueTypeB>::volcast(b[offset], s_xtemp + threadIdx.x); bmAx = types::util<ValueTypeB>::get_zero(); // Load Dinv and multiply to RHS offset = i * bsize * bsize + vec_entry_index * bsize; loadAsVector<bsize>(Dinv + offset, temp); s_offset = block_eighthwarp_id * bsize; #pragma unroll for (int m = 0; m < bsize; m++) { bmAx = bmAx + temp[m] * types::util<ValueTypeB>::volcast(s_xtemp[s_offset + m]); } xout[i * bsize + vec_entry_index] = bmAx * weight; } } //-------------------------------- // Methods //-------------------------------- // Constructor template<class T_Config> BlockJacobiSolver_Base<T_Config>::BlockJacobiSolver_Base( AMG_Config &cfg, const std::string &cfg_scope) : Solver<T_Config>( cfg, cfg_scope) { weight = cfg.AMG_Config::template getParameter<double>("relaxation_factor", cfg_scope); if (weight == 0) { weight = 1.; amgx_printf("Warning, setting weight to 1 instead of estimating largest_eigen_value in Block Jacobi smoother\n"); } } // Destructor template<class T_Config> BlockJacobiSolver_Base<T_Config>::~BlockJacobiSolver_Base() { this->Dinv.resize(0); } template<class T_Config> void BlockJacobiSolver_Base<T_Config>::printSolverParameters() const { std::cout << "relaxation_factor= " << this->weight << std::endl; } // Solver setup template<class T_Config> void BlockJacobiSolver_Base<T_Config>::solver_setup(bool reuse_matrix_structure) { Matrix<T_Config> *A_as_matrix = dynamic_cast<Matrix<T_Config>*>(this->m_A); if (!A_as_matrix) { FatalError("BlockJacobiSolver only works with explicit matrices", AMGX_ERR_INTERNAL); } computeDinv( *A_as_matrix ); if ( A_as_matrix->getBlockFormat() != ROW_MAJOR ) { FatalError("Block Jacobi solver only supports row major format", AMGX_ERR_CONFIGURATION); } } // template<class T_Config> void BlockJacobiSolver_Base<T_Config>::solve_init( VVector &b, VVector &x, bool xIsZero ) { } // Solve one iteration template<class T_Config> bool BlockJacobiSolver_Base<T_Config>::solve_iteration( VVector &b, VVector &x, bool xIsZero ) { //bool done = false; Matrix<T_Config> *A_as_matrix = (Matrix<T_Config> *) this->m_A; if (xIsZero) { x.dirtybit = 0; } ViewType oldView = A_as_matrix->currentView(); A_as_matrix->setViewExterior(); ViewType flags = (ViewType)(A_as_matrix->getViewInterior() | A_as_matrix->getViewExterior()); if (A_as_matrix->get_block_dimx() == 1 && A_as_matrix->get_block_dimy() == 1) { if (xIsZero) { smooth_with_0_initial_guess_1x1(*A_as_matrix, b, x, flags); } else { smooth_1x1(*A_as_matrix, b, x, flags); } } else if (A_as_matrix->get_block_dimx() == 4 && A_as_matrix->get_block_dimy() == 4) { if (xIsZero) { smooth_with_0_initial_guess_4x4(*A_as_matrix, b, x, flags); } else { smooth_4x4(*A_as_matrix, b, x, flags); } } else if (A_as_matrix->get_block_dimx() == A_as_matrix->get_block_dimy()) { if (xIsZero) { thrust::fill(x.begin(), x.end(), types::util<ValueTypeB>::get_zero()); cudaCheckError(); } smooth_BxB(*A_as_matrix, b, x, true, flags); } else { FatalError("Unsupported block size for BlockJacobi_Solver", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); } if (A_as_matrix->get_block_dimx() == 4 && A_as_matrix->get_block_dimy() == 4) { if (!xIsZero) // we write to t_res vector to avoid race condition in the kernel { x.swap(this->t_res); } } x.dirtybit = 1; A_as_matrix->setView(oldView); return this->converged( b, x ); } template<class T_Config> void BlockJacobiSolver_Base<T_Config>::solve_finalize( VVector &b, VVector &x ) {} template<class T_Config> void BlockJacobiSolver_Base<T_Config>::computeDinv( Matrix<T_Config> &A) { Matrix<T_Config> *A_as_matrix = (Matrix<T_Config> *) this->m_A; ViewType oldView = A.currentView(); A.setView(A_as_matrix->getViewExterior()); if (A.get_block_dimx() == 1 && A.get_block_dimy() == 1) { this->computeDinv_1x1(A); } else if (A.get_block_dimx() == 2 && A.get_block_dimy() == 2) { this->computeDinv_bxb<2>(A); } else if (A.get_block_dimx() == 3 && A.get_block_dimy() == 3) { this->computeDinv_3x3(A); } else if (A.get_block_dimx() == 4 && A.get_block_dimy() == 4) { this->computeDinv_4x4(A); } else if (A.get_block_dimx() == 5 && A.get_block_dimy() == 5) { this->computeDinv_bxb<5>(A); } else if (A.get_block_dimx() == A.get_block_dimy() && A.get_block_dimy() > 5) { this->computeDinv_Big(A, A.get_block_dimx()); } A.setView(oldView); } // Method to compute the inverse of the diagonal blocks template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::computeDinv_1x1(const Matrix_d &A) { Matrix_d *A_as_matrix = (Matrix_d *) this->m_A; // supports both diag this->Dinv.resize(A.get_num_rows()*A.get_block_dimx()*A.get_block_dimy(), types::util<ValueTypeA>::get_zero()); if ( A_as_matrix->hasProps(DIAG) ) { const int num_values = A_as_matrix->diagOffset() * A_as_matrix->get_block_size(); thrust::copy( A_as_matrix->values.begin() + num_values, A_as_matrix->values.begin() + num_values + A_as_matrix->get_num_rows()*A_as_matrix->get_block_size(), this->Dinv.begin() ); cudaCheckError(); } else { find_diag( *A_as_matrix ); } } // Method to compute the inverse of the diagonal blocks template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::computeDinv_1x1(const Matrix_h &A) { // Do nothing } // Method to compute the inverse of the diagonal blocks template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::computeDinv_4x4(const Matrix_h &A) { //FatalError("Block Jacobi smoother not implemented with this format, exiting"); //std::cout << "Warning, CPU version doesn't store the inverse of the blocks, like the GPU version" << std::endl; } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::computeDinv_3x3(const Matrix_h &A) { FatalError("3*3 Block Jacobi smoother not implemented with host format, exiting", AMGX_ERR_NOT_IMPLEMENTED); //std::cout << "Warning, CPU version doesn't store the inverse of the blocks, like the GPU version" << std::endl; } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::computeDinv_Big(const Matrix_h &A, const int bsize) { FatalError("Big Block Jacobi smoother not implemented with host format, exiting", AMGX_ERR_NOT_IMPLEMENTED); //std::cout << "Warning, CPU version doesn't store the inverse of the blocks, like the GPU version" << std::endl; } // Finding diag on device, CSR format template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::find_diag( const Matrix_h &A ) { //for each row for (int i = 0; i < A.get_num_rows(); i++) { //for each column for (int j = A.row_offsets[i]; j < A.row_offsets[i + 1]; j++) { if (A.col_indices[j] == i) { this->Dinv[i] = A.values[j]; break; } if (j == A.row_offsets[i + 1] - 1) { FatalError("Could not find a diagonal value", AMGX_ERR_BAD_PARAMETERS); } } } } // Finding diag on device, CSR format template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::find_diag( const Matrix_d &A ) { AMGX_CPU_PROFILER( "JacobiSolver::find_diag " ); const size_t THREADS_PER_BLOCK = 128; const size_t NUM_BLOCKS = std::min(AMGX_GRID_MAX_SIZE, (int)ceil((float)(A.get_num_rows()) / (float)(THREADS_PER_BLOCK))); find_diag_kernel_indexed_dia <<< (unsigned int)NUM_BLOCKS, (unsigned int)THREADS_PER_BLOCK>>>( A.get_num_rows(), A.diag.raw(), A.values.raw(), this->Dinv.raw()); cudaCheckError(); } // Method to compute the inverse of the diagonal blocks template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::computeDinv_4x4(const Matrix_d &A) { // supports both diag this->Dinv.resize(A.get_num_rows()*A.get_block_dimx()*A.get_block_dimy(), types::util<ValueTypeA>::get_zero()); const IndexType *A_row_offsets_ptr = A.row_offsets.raw(); const IndexType *A_column_indices_ptr = A.col_indices.raw(); const IndexType *A_dia_indices_ptr = A.diag.raw(); const ValueTypeA *A_values = A.values.raw(); ValueTypeA *Dinv_ptr = this->Dinv.raw(); #if 1 const int threads_per_block = 512; const int halfwarps_per_block = threads_per_block / 16; const int num_blocks = std::min(AMGX_GRID_MAX_SIZE, (int) (A.get_num_rows() - 1) / halfwarps_per_block + 1); cudaFuncSetCacheConfig(setupBlockJacobiSmooth4by4BlockDiaCsrKernel_V2<ValueTypeA, ValueTypeB, IndexType, threads_per_block, halfwarps_per_block, 4, 2, 16, 4>, cudaFuncCachePreferL1); setupBlockJacobiSmooth4by4BlockDiaCsrKernel_V2<ValueTypeA, ValueTypeB, IndexType, threads_per_block, halfwarps_per_block, 4, 2, 16, 4> <<< num_blocks, threads_per_block>>> (A_dia_indices_ptr, A_values, Dinv_ptr, A.get_num_rows()); cudaCheckError(); #else cudaFuncSetCacheConfig(matinv_matrix_per_thread_pivot<ValueTypeA, ValueTypeB, 4>, cudaFuncCachePreferL1); matinv_matrix_per_thread_pivot<ValueTypeA, ValueTypeB, 4> <<< (A.num_block_rows + 127) / 128, 128 >>> (A_dia_values_ptr, Dinv_ptr, A.num_block_rows); cudaCheckError(); #endif } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::computeDinv_3x3(const Matrix_d &A) { const int bsize = 3; // supports both diag this->Dinv.resize(A.get_num_rows()*A.get_block_dimx()*A.get_block_dimy(), types::util<ValueTypeA>::get_zero()); const IndexType *A_row_offsets_ptr = A.row_offsets.raw(); const IndexType *A_column_indices_ptr = A.col_indices.raw(); const IndexType *A_dia_idx_ptr = A.diag.raw(); ValueTypeA *Dinv_ptr = this->Dinv.raw(); const ValueTypeA *A_nonzero_values_ptr = A.values.raw(); // MUST BE MULTIPLE OF 16 const int threads_per_block = 256; const int blockrows_per_warp = 32 / (bsize * bsize); // blockrows per cta = blockrows_per_warp * number_of_warps_per_cta const int blockrows_per_cta = (threads_per_block / 32) * blockrows_per_warp ; const int num_blocks = std::min(AMGX_GRID_MAX_SIZE, (int) (A.get_num_rows() - 1) / blockrows_per_cta + 1); cudaFuncSetCacheConfig(setupBlockJacobiSmoothBbyBBlockDiaCsrKernel<IndexType, ValueTypeA, blockrows_per_cta, blockrows_per_warp, bsize, bsize *bsize>, cudaFuncCachePreferL1); setupBlockJacobiSmoothBbyBBlockDiaCsrKernel<IndexType, ValueTypeA, blockrows_per_cta, blockrows_per_warp, bsize, bsize *bsize> <<< num_blocks, threads_per_block>>> (A_row_offsets_ptr, A_column_indices_ptr, A_nonzero_values_ptr, A_dia_idx_ptr, Dinv_ptr, A.get_num_rows()); cudaCheckError(); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::computeDinv_Big(const Matrix_d &A, const int bsize) { //both DIAG supported this->Dinv.resize(A.get_num_rows()*A.get_block_dimx()*A.get_block_dimy(), types::util<ValueTypeA>::get_zero()); const IndexType *A_row_offsets_ptr = A.row_offsets.raw(); const IndexType *A_column_indices_ptr = A.col_indices.raw(); const IndexType *A_dia_idx_ptr = A.diag.raw(); ValueTypeA *Dinv_ptr = this->Dinv.raw(); const ValueTypeA *A_nonzero_values_ptr = A.values.raw(); MVector temp(AMGX_GRID_MAX_SIZE * ((bsize - 1) / 4 + 1) * ((bsize - 1) / 4 + 1)); ValueTypeA *temp_ptr = temp.raw(); // MUST BE MULTIPLE OF 16 const int threads_per_block = 512; const int halfwarps_per_block = threads_per_block / 16; const int num_blocks = std::min(AMGX_GRID_MAX_SIZE, (int) (A.get_num_rows() - 1) / halfwarps_per_block + 1); cudaFuncSetCacheConfig(setupBlockJacobiSmoothbBigBlockDiaCsrKernel<IndexType, ValueTypeA, threads_per_block, halfwarps_per_block>, cudaFuncCachePreferL1); setupBlockJacobiSmoothbBigBlockDiaCsrKernel<IndexType, ValueTypeA, threads_per_block, halfwarps_per_block> <<< num_blocks, threads_per_block, sizeof(ValueTypeA)*bsize *bsize *halfwarps_per_block>>> (A_row_offsets_ptr, A_column_indices_ptr, A_nonzero_values_ptr, A_dia_idx_ptr, Dinv_ptr, A.get_num_rows(), bsize, bsize * bsize, temp_ptr); cudaCheckError(); } template<class T_Config> template<int bsize> void BlockJacobiSolver_Base<T_Config>::computeDinv_bxb(const Matrix<T_Config> &A) { if (TConfig::memSpace == AMGX_host) { FatalError("BlockJacobiSmooth Not implemented for host", AMGX_ERR_NOT_IMPLEMENTED); } else { // supports both diag this->Dinv.resize(A.get_num_rows()*A.get_block_dimx()*A.get_block_dimy(), types::util<ValueTypeA>::get_zero()); const IndexType *A_row_offsets_ptr = A.row_offsets.raw(); const IndexType *A_column_indices_ptr = A.col_indices.raw(); const IndexType *A_dia_idx_ptr = A.diag.raw(); ValueTypeA *Dinv_ptr = this->Dinv.raw(); const ValueTypeA *A_nonzero_values_ptr = A.values.raw(); // MUST BE MULTIPLE OF 16 const int threads_per_block = 256; const int blockrows_per_warp = 32 / (bsize * bsize); const int blockrows_per_cta = (threads_per_block / 32) * blockrows_per_warp; const int num_blocks = std::min(AMGX_GRID_MAX_SIZE, (int) (A.get_num_rows() - 1) / blockrows_per_cta + 1); cudaFuncSetCacheConfig(setupBlockJacobiSmoothBbyBBlockDiaCsrKernel<IndexType, ValueTypeA, blockrows_per_cta, blockrows_per_warp, bsize, bsize *bsize>, cudaFuncCachePreferL1); setupBlockJacobiSmoothBbyBBlockDiaCsrKernel<IndexType, ValueTypeA, blockrows_per_cta, blockrows_per_warp, bsize, bsize *bsize> <<< num_blocks, threads_per_block>>> (A_row_offsets_ptr, A_column_indices_ptr, A_nonzero_values_ptr, A_dia_idx_ptr, Dinv_ptr, A.get_num_rows()); cudaCheckError(); } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::smooth_BxB(Matrix_h &A, VVector &b, VVector &x, bool firstStep, ViewType separation_flags) { FatalError("M*M Block Jacobi smoother not implemented with host format, exiting", AMGX_ERR_NOT_IMPLEMENTED); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::smooth_4x4(const Matrix_h &A, const VVector &b, VVector &x, ViewType separation_flags) { VVector newx(x.size()); int bsize = A.get_block_dimx(); // Allocate space for block_matrix ValueTypeA **E = new ValueTypeA* [bsize]; for ( int i = 0; i < bsize; i++) { E[i] = new ValueTypeA[bsize]; } ValueTypeB *bmAx = new ValueTypeB[bsize]; ValueTypeB *temp = new ValueTypeB[bsize]; //for each block row for (int i = 0; i < A.get_num_rows(); i++) { // Compute b - sum_j A_j x_j (denoted by bmAx) for block_row i // Load diagonal for (int m = 0; m < bsize; m++) { for (int n = 0; n < bsize; n++) { E[m][n] = A.values[A.diag[i * bsize * bsize + bsize * m + n]]; } bmAx[m] = types::util<ValueTypeB>::get_zero(); temp[m] = types::util<ValueTypeB>::get_zero(); } // Contribution from diagonal for (int m = 0; m < bsize; m++) for (int n = 0; n < bsize; n++) { bmAx[m] = bmAx[m] - E[m][n] * x[i * bsize + n]; } // Contribution from each nonzero column for (int j = A.row_offsets[i]; j < A.row_offsets[i + 1]; j++) { IndexType jcol = A.col_indices[j]; for (int m = 0; m < bsize; m++) for (int n = 0; n < bsize; n++) { bmAx[m] = bmAx[m] - A.values[j * bsize * bsize + bsize * m + n] * x[jcol * bsize + n]; } } // Add b for (int m = 0; m < bsize; m++) { bmAx[m] = bmAx[m] + b[i * bsize + m]; } gaussianEliminationRowMajor(E, temp, bmAx, bsize); // Compute new value of x for (int m = 0; m < bsize; m++) { newx[i * bsize + m] = x[i * bsize + m] + temp[m] * this->weight; } } x.swap(newx); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::smooth_with_0_initial_guess_4x4(const Matrix_h &A, const VVector &b, VVector &x, ViewType separation_flags) { IndexType bsize = A.get_block_dimy(); ValueTypeA **E = new ValueTypeA* [bsize]; for ( int i = 0; i < bsize; i++) { E[i] = new ValueTypeA[bsize]; } ValueTypeB *rhs = new ValueTypeB[bsize]; ValueTypeB *temp = new ValueTypeB[bsize]; //for each block row for (int i = 0; i < A.get_num_rows(); i++) { // Load diagonal for (int m = 0; m < bsize; m++) { for (int n = 0; n < bsize; n++) { E[m][n] = A.values[A.diag[i * bsize * bsize + bsize * m + n]]; } rhs[m] = types::util<ValueTypeB>::get_zero(); } //rhs for (int m = 0; m < bsize; m++) { rhs[m] = rhs[m] + b[i * bsize + m]; } // Solve for temp gaussianEliminationRowMajor(E, temp, rhs, bsize); for (int m = 0; m < bsize; m++) { x[i * bsize + m] = temp[m] * this->weight; } } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::smooth_1x1_const(const Matrix_h &A, const VVector &b, VVector &x, ViewType separation_flags) { VVector newx(x.size()); //for each row for (int i = 0; i < A.get_num_rows(); i++) { ValueTypeB Axi = types::util<ValueTypeB>::get_zero(); ValueTypeB d = types::util<ValueTypeB>::get_one() * A.values[A.diag[i]]; ValueTypeB mydiaginv = types::util<ValueTypeB>::get_one() * this->weight / (isNotCloseToZero(d) ? d : epsilon(d) ); //for each column for (int j = A.row_offsets[i]; j < A.row_offsets[i + 1]; j++) { Axi = Axi + A.values[j] * x[A.col_indices[j]]; } newx[i] = x[i] + (b[i] - Axi) * mydiaginv ; } x.swap(newx); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::smooth_1x1(Matrix_h &A, VVector &b, VVector &x, ViewType separation_flags) { this->smooth_1x1_const(A, b, x, separation_flags); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::smooth_1x1(Matrix_d &A, VVector &b, VVector &x, ViewType separation_flags) { AMGX_CPU_PROFILER( "JacobiSolver::smooth_1x1 " ); if (this->t_res.size() != x.size()) { this->t_res.resize(x.size()); } if (this->y.size() != b.size()) { this->y.resize(b.size()); this->y.tag = this->tag * 100 + 3; this->y.set_block_dimx(b.get_block_dimx()); this->y.set_block_dimy(b.get_block_dimy()); } int num_rows = A.get_num_rows(); int offset = 0; A.getOffsetAndSizeForView(separation_flags, &offset, &num_rows); this->y.dirtybit = 0; multiply( A, x, this->y, separation_flags ); thrust::transform( thrust::make_zip_iterator(thrust::make_tuple( x.begin() + offset, this->Dinv.begin() + offset, b.begin() + offset, this->y.begin() + offset)), thrust::make_zip_iterator(thrust::make_tuple( x.begin() + A.get_num_rows(), this->Dinv.begin() + A.get_num_rows(), b.begin() + A.get_num_rows(), this->y.begin() + A.get_num_rows())), x.begin() + offset, jacobi_postsmooth_functor<ValueTypeA, ValueTypeB>( this->weight )); cudaCheckError(); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::smooth_with_0_initial_guess_1x1(const Matrix_h &A, const VVector &b, VVector &x, ViewType separation_flags) { //for each row for (int i = 0; i < A.get_num_rows(); i++) { ValueTypeB d = types::util<ValueTypeB>::get_one() * A.values[A.diag[i]]; ValueTypeB mydiag = types::util<ValueTypeB>::get_one() * this->weight / (isNotCloseToZero(d) ? d : epsilon(d)); x[i] = b[i] * mydiag; } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::smooth_with_0_initial_guess_1x1(const Matrix_d &A, const VVector &b, VVector &x, ViewType separation_flags) { AMGX_CPU_PROFILER( "JacobiSolver::smooth_with_0_initial_guess_1x1 " ); int num_rows = A.get_num_rows(); int offset = 0; A.getOffsetAndSizeForView(separation_flags, &offset, &num_rows); thrust::transform( b.begin( ) + offset, b.begin( ) + A.get_num_rows(), this->Dinv.begin( ) + offset, x.begin( ) + offset, jacobi_presmooth_functor<ValueTypeA, ValueTypeB>( this->weight )); cudaCheckError(); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::smooth_BxB(Matrix_d &A, VVector &b, VVector &x, bool firstStep, ViewType separation_flags) { IndexType num_rows; IndexType offset; A.getOffsetAndSizeForView(separation_flags, &offset, &num_rows); // aux vector initialization if (this->y.size() != b.size()) { this->y.resize(b.size()); this->y.tag = this->tag * 100 + 3; this->y.set_block_dimx(b.get_block_dimx()); this->y.set_block_dimy(b.get_block_dimy()); } thrust::copy(b.begin(), b.end(), this->y.begin()); // copy of vector b cudaCheckError(); Cusparse::bsrmv(types::util<ValueTypeB>::get_minus_one(), A, x, types::util<ValueTypeB>::get_one(), this->y, separation_flags); // y= -1.0f*(A.x) + y cudaCheckError(); Cusparse::bsrmv(types::util<ValueTypeB>::get_one() * this->weight, A, this->Dinv, this->y, types::util<ValueTypeB>::get_one(), x, separation_flags); // t_res = t_res + w*(Dinv.y) @ view cudaCheckError(); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::smooth_4x4(const Matrix_d &A, const VVector &b, VVector &x, ViewType separation_flags) { if (this->t_res.size() != x.size()) { this->t_res.resize(x.size()); } const IndexType *A_row_offsets_ptr = A.row_offsets.raw(); const IndexType *A_column_indices_ptr = A.col_indices.raw(); const IndexType *A_dia_idx_ptr = A.diag.raw(); const ValueTypeA *Dinv_ptr = this->Dinv.raw(); const ValueTypeA *A_nonzero_values_ptr = A.values.raw(); const ValueTypeB *b_ptr = b.raw(); ValueTypeB *x_ptr = x.raw(); ValueTypeB *xout_ptr = this->t_res.raw(); // always store original x IndexType num_rows = A.get_num_rows(); IndexType offset = 0; A.getOffsetAndSizeForView(separation_flags, &offset, &num_rows); const int threads_per_block = 512; const int eightwarps_per_block = threads_per_block / 4; const int num_blocks = std::min( AMGX_GRID_MAX_SIZE, (int) (num_rows - 1) / eightwarps_per_block + 1); // XXX We need to add latency hiding here if (!A.is_matrix_singleGPU()) { A.manager->exchange_halo(x, x.tag); } cudaFuncSetCacheConfig(jacobiSmooth4by4BlockDiaCsrKernel_NAIVE_tex_readDinv2<IndexType, ValueTypeA, ValueTypeB, eightwarps_per_block, 4, 2, 2, 16>, cudaFuncCachePreferL1); jacobiSmooth4by4BlockDiaCsrKernel_NAIVE_tex_readDinv2<IndexType, ValueTypeA, ValueTypeB, eightwarps_per_block, 4, 2, 2, 16> <<< num_blocks, threads_per_block>>> (A_row_offsets_ptr, A_column_indices_ptr, A_dia_idx_ptr, A_nonzero_values_ptr, Dinv_ptr, b_ptr, x_ptr, this->weight, offset + num_rows, xout_ptr, offset); cudaCheckError(); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void BlockJacobiSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::smooth_with_0_initial_guess_4x4(const Matrix_d &A, const VVector &b, VVector &x, ViewType separation_flags) { cudaCheckError(); const ValueTypeA *A_values_ptr = A.values.raw(); const ValueTypeB *b_ptr = b.raw(); const ValueTypeA *Dinv_ptr = this->Dinv.raw(); ValueTypeB *x_ptr = x.raw(); IndexType num_rows = A.get_num_rows(); IndexType offset = 0; A.getOffsetAndSizeForView(separation_flags, &offset, &num_rows); const int threads_per_block = 512; const int eightwarps_per_block = threads_per_block / 4; const int num_blocks = std::min( AMGX_GRID_MAX_SIZE, (int) (num_rows - 1) / eightwarps_per_block + 1); cudaFuncSetCacheConfig(jacobiSmooth4by4ZeroBlockDiaCsrKernel_NAIVE_tex_readDinv2<IndexType, ValueTypeA, ValueTypeB, eightwarps_per_block, 4, 2, 2>, cudaFuncCachePreferL1); jacobiSmooth4by4ZeroBlockDiaCsrKernel_NAIVE_tex_readDinv2<IndexType, ValueTypeA, ValueTypeB, eightwarps_per_block, 4, 2, 2> <<< num_blocks, threads_per_block>>> (Dinv_ptr, b_ptr, this->weight, offset + num_rows, x_ptr, offset); cudaCheckError(); } /**************************************** * Explict instantiations ***************************************/ #define AMGX_CASE_LINE(CASE) template class BlockJacobiSolver_Base<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE #define AMGX_CASE_LINE(CASE) template class BlockJacobiSolver<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE } // namespace block_jacobi } // namespace amgx
93fe13539dbee2beb7076281336fe49210a64448.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/cu_inc/common.cuh" #include "image_scaler_impl.h" using namespace onnxruntime::cuda; namespace onnxruntime { namespace contrib { namespace cuda { template <typename T> __global__ void _CropKernel( const T* input_data, const int src_start_x, const int src_start_y, const int src_w, const int src_hw, const fast_divmod fdm_dst_w, const fast_divmod fdm_dst_hw, T* output_data, const CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); int dst_xy, dst_nc; fdm_dst_hw.divmod(id, dst_nc, dst_xy); int dst_x, dst_y; fdm_dst_w.divmod(dst_xy, dst_y, dst_x); output_data[id] = input_data[dst_nc * src_hw + (dst_y + src_start_y) * src_w + (dst_x + src_start_x)]; } template <typename T> void CropImpl( const T* input_data, const int src_start_x, const int src_start_y, const int src_w, const int src_hw, const fast_divmod& fdm_dst_w, const fast_divmod& fdm_dst_hw, T* output_data, const size_t N) { int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock)); hipLaunchKernelGGL(( _CropKernel), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0, input_data, src_start_x, src_start_y, src_w, src_hw, fdm_dst_w, fdm_dst_hw, output_data, (CUDA_LONG)N); } #define SPECIALIZED_IMPL(T) \ template void CropImpl<T>(const T* input_data, const int src_start_x, const int src_start_y, const int src_w, const int src_hw, const fast_divmod& fdm_dst_w, const fast_divmod& fdm_dst_hw, T* output_data, const size_t N); SPECIALIZED_IMPL(float) SPECIALIZED_IMPL(double) SPECIALIZED_IMPL(half) } // namespace cuda } // namespace contrib } // namespace onnxruntime
93fe13539dbee2beb7076281336fe49210a64448.cu
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/cu_inc/common.cuh" #include "image_scaler_impl.h" using namespace onnxruntime::cuda; namespace onnxruntime { namespace contrib { namespace cuda { template <typename T> __global__ void _CropKernel( const T* input_data, const int src_start_x, const int src_start_y, const int src_w, const int src_hw, const fast_divmod fdm_dst_w, const fast_divmod fdm_dst_hw, T* output_data, const CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); int dst_xy, dst_nc; fdm_dst_hw.divmod(id, dst_nc, dst_xy); int dst_x, dst_y; fdm_dst_w.divmod(dst_xy, dst_y, dst_x); output_data[id] = input_data[dst_nc * src_hw + (dst_y + src_start_y) * src_w + (dst_x + src_start_x)]; } template <typename T> void CropImpl( const T* input_data, const int src_start_x, const int src_start_y, const int src_w, const int src_hw, const fast_divmod& fdm_dst_w, const fast_divmod& fdm_dst_hw, T* output_data, const size_t N) { int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock)); _CropKernel<<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>( input_data, src_start_x, src_start_y, src_w, src_hw, fdm_dst_w, fdm_dst_hw, output_data, (CUDA_LONG)N); } #define SPECIALIZED_IMPL(T) \ template void CropImpl<T>(const T* input_data, const int src_start_x, const int src_start_y, const int src_w, const int src_hw, const fast_divmod& fdm_dst_w, const fast_divmod& fdm_dst_hw, T* output_data, const size_t N); SPECIALIZED_IMPL(float) SPECIALIZED_IMPL(double) SPECIALIZED_IMPL(half) } // namespace cuda } // namespace contrib } // namespace onnxruntime
779882d4a880272e3f1a101090cd3a04401072e0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "gpufit.h" #include "cuda_kernels.cuh" #include "definitions.h" #include "linear_1d.cuh" #include "gauss_1d_hip.cuh" #include "gauss_2d.cuh" #include "gauss_2d_elliptic.cuh" #include "gauss_2d_rotated.cuh" #include "cauchy_2d_elliptic.cuh" #include "lse.cuh" #include "mle_hip.cuh" /* Description of the cuda_calc_curve_values function * =================================================== * * This function calls one of the fitting curve functions depending on the input * parameter model_id. The fitting curve function calculates the values of * the fitting curves and its partial derivatives with respect to the fitting * curve parameters. Multiple fits are calculated in parallel. * * Parameters: * * parameters: An input vector of concatenated sets of model parameters. * * n_fits: The number of fits. * * n_points: The number of data points per fit. * * n_parameters: The number of curve parameters. * * finished: An input vector which allows the calculation to be skipped for single * fits. * * values: An output vector of concatenated sets of model function values. * * derivatives: An output vector of concatenated sets of model function partial * derivatives. * * n_fits_per_block: The number of fits calculated by each thread block. * * n_blocks_per_fit: The number of thread blocks used to calculate one fit. * * model_id: The fitting model ID. * * chunk_index: The data chunk index. * * user_info: An input vector containing user information. * * user_info_size: The size of user_info in bytes. * * Calling the cuda_calc_curve_values function * =========================================== * * When calling the function, the blocks and threads must be set up correctly, * as shown in the following example code. * * dim3 threads(1, 1, 1); * dim3 blocks(1, 1, 1); * * threads.x = n_points * n_fits_per_block / n_blocks_per_fit; * blocks.x = n_fits / n_fits_per_block * n_blocks_per_fit; * * cuda_calc_curve_values<<< blocks, threads >>>( * parameters, * n_fits, * n_points, * n_parameters, * finished, * values, * derivatives, * n_fits_per_block, * n_blocks_per_fit, * model_id, * chunk_index, * user_info, * user_info_size); * */ __global__ void cuda_calc_curve_values( float const * parameters, int const n_fits, int const n_points, int const n_parameters, int const * finished, float * values, float * derivatives, int const n_fits_per_block, int const n_blocks_per_fit, int const model_id, int const chunk_index, char * user_info, std::size_t const user_info_size) { int const fit_in_block = threadIdx.x / n_points; int const fit_index = blockIdx.x * n_fits_per_block / n_blocks_per_fit + fit_in_block; int const fit_piece = blockIdx.x % n_blocks_per_fit; int const point_index = threadIdx.x - fit_in_block * n_points + fit_piece * blockDim.x; int const first_point = fit_index * n_points; float * current_values = values + first_point; float * current_derivatives = derivatives + first_point * n_parameters; float const * current_parameters = parameters + fit_index * n_parameters; if (finished[fit_index]) return; if (point_index >= n_points) return; if (model_id == GAUSS_1D) calculate_gauss1d(current_parameters, n_fits, n_points, current_values, current_derivatives, point_index, fit_index, chunk_index, user_info, user_info_size); else if (model_id == GAUSS_2D) calculate_gauss2d(current_parameters, n_fits, n_points, current_values, current_derivatives, point_index, fit_index, chunk_index, user_info, user_info_size); else if (model_id == GAUSS_2D_ELLIPTIC) calculate_gauss2delliptic(current_parameters, n_fits, n_points, current_values, current_derivatives, point_index, fit_index, chunk_index, user_info, user_info_size); else if (model_id == GAUSS_2D_ROTATED) calculate_gauss2drotated(current_parameters, n_fits, n_points, current_values, current_derivatives, point_index, fit_index, chunk_index, user_info, user_info_size); else if (model_id == CAUCHY_2D_ELLIPTIC) calculate_cauchy2delliptic(current_parameters, n_fits, n_points, current_values, current_derivatives, point_index, fit_index, chunk_index, user_info, user_info_size); else if (model_id == LINEAR_1D) calculate_linear1d(current_parameters, n_fits, n_points, current_values, current_derivatives, point_index, fit_index, chunk_index, user_info, user_info_size); } /* Description of the sum_up_floats function * ========================================== * * This function sums up a vector of float values and stores the result at the * first place of the vector. * * Parameters: * * shared_array: An input vector of float values. The vector must be stored * on the shared memory of the GPU. The size of this vector must be a * power of two. Use zero padding to extend it to the next highest * power of 2 greater than the number of elements. * * size: The number of elements in the input vector considering zero padding. * * Calling the sum_up_floats function * ================================== * * This __device__ function can be only called from a __global__ function or * an other __device__ function. When calling the function, the blocks and threads * of the __global__ function must be set up correctly, as shown in the following * example code. * * dim3 threads(1, 1, 1); * dim3 blocks(1, 1, 1); * * threads.x = size * vectors_per_block; * blocks.x = n_vectors / vectors_per_block; * * global_function<<< blocks, threads >>>(parameter1, ...); * */ __device__ void sum_up_floats(volatile float* shared_array, int const size) { int const fit_in_block = threadIdx.x / size; int const point_index = threadIdx.x - (fit_in_block*size); int current_n_points = size >> 1; __syncthreads(); while (current_n_points) { if (point_index < current_n_points) { shared_array[point_index] += shared_array[point_index + current_n_points]; } current_n_points >>= 1; __syncthreads(); } } /* Description of the cuda_sum_chi_square_subtotals function * ========================================================== * * This function sums up chi_square subtotals in place. * * Parameters: * * chi_squares: A vector of chi-square values for multiple fits. * in: subtotals * out: totals * * n_blocks_per_fit: The number of blocks used to calculate one fit. It is * equivalent to the number of subtotals per fit. * * n_fits: The number of fits. * * finished: An input vector which allows the calculation to be skipped * for single fits. * * Calling the cuda_sum_chi_square_subtotals function * ================================================== * * When calling the function, the blocks and threads must be set up correctly, * as shown in the following example code. * * dim3 threads(1, 1, 1); * dim3 blocks(1, 1, 1); * * int const example_value = 256; * * threads.x = min(n_fits, example_value); * blocks.x = int(ceil(float(n_fits) / float(threads.x))); * * cuda_sum_chi_square_subtotals<<< blocks, threads >>>( * chi_squares, * n_blocks_per_fit, * n_fits, * finished); * */ __global__ void cuda_sum_chi_square_subtotals( float * chi_squares, int const n_blocks_per_fit, int const n_fits, int const * finished) { int const index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n_fits || finished[index]) return; float * chi_square = chi_squares + index; double sum = 0.0; for (int i = 0; i < n_blocks_per_fit; i++) sum += chi_square[i * n_fits]; chi_square[0] = sum; } /* Description of the cuda_check_fit_improvement function * ======================================================= * * This function checks after each calculation of chi-square values whether the * currently calculated chi-square values are lower than chi-square values calculated * in the previous iteration and sets the iteration_failed flags. * * Parameters: * * iteration_failed: An output vector of flags which indicate whether the fitting * process improved the fit in the last iteration. If yes it is set * to 0 otherwise to 1. * * chi_squares: An input vector of chi-square values for multiple fits. * * prev_chi_squares: An input vector of chi-square values for multiple fits calculated * in the previous iteration. * * n_fits: The number of fits. * * finished: An input vector which allows the calculation to be skipped * for single fits. * * Calling the cuda_check_fit_improvement function * =============================================== * * When calling the function, the blocks and threads must be set up correctly, * as shown in the following example code. * * dim3 threads(1, 1, 1); * dim3 blocks(1, 1, 1); * * int const example_value = 256; * * threads.x = min(n_fits, example_value); * blocks.x = int(ceil(float(n_fits) / float(threads.x))); * * cuda_check_fit_improvement <<< blocks, threads >>>( * iteration_failed, * chi_squares, * prev_chi_squares, * n_fits, * finished); * */ __global__ void cuda_check_fit_improvement( int * iteration_failed, float const * chi_squares, float const * prev_chi_squares, int const n_fits, int const * finished) { int const index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n_fits || finished[index]) return; bool const prev_chi_squares_initialized = prev_chi_squares[index] != 0.f; bool const chi_square_increased = (chi_squares[index] >= prev_chi_squares[index]); if (prev_chi_squares_initialized && chi_square_increased) { iteration_failed[index] = 1; } else { iteration_failed[index] = 0; } } /* Description of the cuda_calculate_chi_squares function * ======================================================== * * This function calls one of the estimator funktions depending on the input * parameter estimator_id. The estimator function calculates the chi-square values. * The calcluation is performed for multiple fits in parallel. * * Parameters: * * chi_squares: An output vector of concatenated chi-square values. * * states: An output vector of values which indicate whether the fitting process * was carreid out correctly or which problem occurred. In this function * it is only used for MLE. It is set to 3 if a fitting curve value is * negative. This vector includes the states for multiple fits. * * data: An input vector of data for multiple fits * * values: An input vector of concatenated sets of model function values. * * weights: An input vector of values for weighting chi-square, gradient and hessian, * while using LSE * * n_points: The number of data points per fit. * * n_fits: The number of fits. * * estimator_id: The estimator ID. * * finished: An input vector which allows the calculation to be skipped for single * fits. * * n_fits_per_block: The number of fits calculated by each thread block. * * user_info: An input vector containing user information. * * user_info_size: The size of user_info in bytes. * * Calling the cuda_calculate_chi_squares function * ================================================ * * When calling the function, the blocks and threads must be set up correctly, * as shown in the following example code. * * dim3 threads(1, 1, 1); * dim3 blocks(1, 1, 1); * * threads.x = power_of_two_n_points * n_fits_per_block / n_blocks_per_fit; * blocks.x = n_fits / n_fits_per_block * n_blocks_per_fit; * * int const shared_size = sizeof(float) * threads.x; * * cuda_calculate_chi_squares<<< blocks, threads, shared_size >>>( * chi_squares, * states, * data, * values, * weights, * n_points, * n_fits, * estimator_id, * finished, * n_fits_per_block, * user_info, * user_info_size); * */ __global__ void cuda_calculate_chi_squares( float * chi_squares, int * states, float const * data, float const * values, float const * weights, int const n_points, int const n_fits, int const estimator_id, int const * finished, int const n_fits_per_block, char * user_info, std::size_t const user_info_size) { int const shared_size = blockDim.x / n_fits_per_block; int const fit_in_block = threadIdx.x / shared_size; int const fit_piece = blockIdx.x / n_fits; int const fit_index = blockIdx.x * n_fits_per_block + fit_in_block - fit_piece * n_fits; int const point_index = threadIdx.x - fit_in_block * shared_size + fit_piece * shared_size; int const first_point = fit_index * n_points; if (finished[fit_index]) { return; } float const * current_data = &data[first_point]; float const * current_weight = weights ? &weights[first_point] : NULL; float const * current_value = &values[first_point]; int * current_state = &states[fit_index]; extern __shared__ float extern_array[]; volatile float * shared_chi_square = extern_array + (fit_in_block - fit_piece) * shared_size; if (point_index >= n_points) { shared_chi_square[point_index] = 0.f; } if (point_index < n_points) { if (estimator_id == LSE) { calculate_chi_square_lse( shared_chi_square, point_index, current_data, current_value, current_weight, current_state, user_info, user_info_size); } else if (estimator_id == MLE) { calculate_chi_square_mle( shared_chi_square, point_index, current_data, current_value, current_weight, current_state, user_info, user_info_size); } } shared_chi_square += fit_piece * shared_size; sum_up_floats(shared_chi_square, shared_size); chi_squares[fit_index + fit_piece * n_fits] = shared_chi_square[0]; } /* Description of the cuda_sum_gradient_subtotals function * ======================================================== * * This function sums up the chi-square gradient subtotals in place. * * Parameters: * * gradients: A vector of gradient values for multiple fits. * in: subtotals * out: totals * * n_blocks_per_fit: The number of blocks used to calculate one fit * * n_fits: The number of fits. * * n_parameters_to_fit: The number of model parameters, that are not held fixed. * * skip: An input vector which allows the calculation to be skipped for single fits. * * finished: An input vector which allows the calculation to be skipped for single * fits. * * Calling the cuda_sum_gradient_subtotals function * ================================================ * * When calling the function, the blocks and threads must be set up correctly, * as shown in the following example code. * * dim3 threads(1, 1, 1); * dim3 blocks(1, 1, 1); * * int const example_value = 256; * * threads.x = min(n_fits, example_value); * blocks.x = int(ceil(float(n_fits) / float(threads.x))); * * cuda_sum_gradient_subtotals<<< blocks,threads >>>( * gradients, * n_blocks_per_fit, * n_fits, * n_parameters_to_fit, * skip, * finished); * */ __global__ void cuda_sum_gradient_subtotals( float * gradients, int const n_blocks_per_fit, int const n_fits, int const n_parameters, int const * skip, int const * finished) { int const index = blockIdx.x * blockDim.x + threadIdx.x; int const fit_index = index / n_parameters; if (fit_index >= n_fits || finished[fit_index] || skip[fit_index]) return; float * gradient = gradients + index; double sum = 0.0; for (int i = 0; i < n_blocks_per_fit; i++) sum += gradient[i * n_fits * n_parameters]; gradient[0] = sum; } /* Description of the cuda_calculate_gradients function * ===================================================== * * This function calls one of the gradient functions depending on the input * parameter estimator_id. The gradient function calculates the gradient values * of the chi-square function calling a __device__ function. The calcluation is * performed for multiple fits in parallel. * * Parameters: * * gradients: An output vector of concatenated sets of gradient vector values. * * data: An input vector of data for multiple fits * * values: An input vector of concatenated sets of model function values. * * derivatives: An input vector of concatenated sets of model function partial * derivatives. * * weights: An input vector of values for weighting chi-square, gradient and hessian, * while using LSE * * n_points: The number of data points per fit. * * n_fits: The number of fits. * * n_parameters: The number of fitting curve parameters. * * n_parameters_to_fit: The number of fitting curve parameters, that are not held * fixed. * * parameters_to_fit_indices: An input vector of indices of fitting curve parameters, * that are not held fixed. * * estimator_id: The estimator ID. * * finished: An input vector which allows the calculation to be skipped for single * fits. * * skip: An input vector which allows the calculation to be skipped for single fits. * * n_fits_per_block: The number of fits calculated by each thread block. * * user_info: An input vector containing user information. * * user_info_size: The number of elements in user_info. * * Calling the cuda_calculate_gradients function * ============================================= * * When calling the function, the blocks and threads must be set up correctly, * as shown in the following example code. * * dim3 threads(1, 1, 1); * dim3 blocks(1, 1, 1); * * threads.x = power_of_two_n_points * n_fits_per_block / n_blocks_per_fit; * blocks.x = n_fits / n_fits_per_block * n_blocks_per_fit; * * int const shared_size = sizeof(float) * threads.x; * * cuda_calculate_gradients<<< blocks, threads, shared_size >>>( * gradients, * data, * values, * derivatives, * weight, * n_points, * n_fits, * n_parameters, * n_parameters_to_fit, * parameters_to_fit_indices, * estimator_id, * finished, * skip, * n_fits_per_block, * user_info, * user_info_size); * */ __global__ void cuda_calculate_gradients( float * gradients, float const * data, float const * values, float const * derivatives, float const * weights, int const n_points, int const n_fits, int const n_parameters, int const n_parameters_to_fit, int const * parameters_to_fit_indices, int const estimator_id, int const * finished, int const * skip, int const n_fits_per_block, char * user_info, std::size_t const user_info_size) { int const shared_size = blockDim.x / n_fits_per_block; int const fit_in_block = threadIdx.x / shared_size; int const fit_piece = blockIdx.x / n_fits; int const fit_index = blockIdx.x * n_fits_per_block + fit_in_block - fit_piece * n_fits; int const point_index = threadIdx.x - fit_in_block * shared_size + fit_piece * shared_size; int const first_point = fit_index * n_points; if (finished[fit_index] || skip[fit_index]) { return; } float const * current_data = &data[first_point]; float const * current_weight = weights ? &weights[first_point] : NULL; float const * current_derivative = &derivatives[first_point * n_parameters]; float const * current_value = &values[first_point]; extern __shared__ float extern_array[]; volatile float * shared_gradient = extern_array + (fit_in_block - fit_piece) * shared_size; if (point_index >= n_points) { shared_gradient[point_index] = 0.f; } for (int parameter_index = 0; parameter_index < n_parameters_to_fit; parameter_index++) { if (point_index < n_points) { int const derivative_index = parameters_to_fit_indices[parameter_index] * n_points + point_index; if (estimator_id == LSE) { calculate_gradient_lse( shared_gradient, point_index, derivative_index, current_data, current_value, current_derivative, current_weight, user_info, user_info_size); } else if (estimator_id == MLE) { calculate_gradient_mle( shared_gradient, point_index, derivative_index, current_data, current_value, current_derivative, current_weight, user_info, user_info_size); } } sum_up_floats(shared_gradient + fit_piece * shared_size, shared_size); gradients[(fit_index * n_parameters_to_fit + parameter_index) + fit_piece * n_fits * n_parameters_to_fit] = shared_gradient[fit_piece * shared_size]; } } /* Description of the cuda_calculate_hessians function * ==================================================== * * This function calls one of the hessian function depending on the input * parameter estimator_id. The hessian funcion calculates the hessian matrix * values of the chi-square function calling a __device__ functions. The * calcluation is performed for multiple fits in parallel. * * Parameters: * * hessians: An output vector of concatenated sets of hessian matrix values. * * data: An input vector of data for multiple fits * * values: An input vector of concatenated sets of model function values. * * derivatives: An input vector of concatenated sets of model function partial * derivatives. * * weights: An input vector of values for weighting chi-square, gradient and hessian, * while using LSE * * n_points: The number of data points per fit. * * n_parameters: The number of fitting curve parameters. * * n_parameters_to_fit: The number of fitting curve parameters, that are not held * fixed. * * parameters_to_fit_indices: An input vector of indices of fitting curve parameters, * that are not held fixed. * * estimator_id: The estimator ID. * * skip: An input vector which allows the calculation to be skipped for single fits. * * finished: An input vector which allows the calculation to be skipped for single * fits. * * user_info: An input vector containing user information. * * user_info_size: The size of user_info in bytes. * * Calling the cuda_calculate_hessians function * ============================================ * * When calling the function, the blocks and threads must be set up correctly, * as shown in the following example code. * * dim3 threads(1, 1, 1); * dim3 blocks(1, 1, 1); * * threads.x = n_parameters_to_fit; * threads.y = n_parameters_to_fit; * blocks.x = n_fits; * * cuda_calculate_hessians<<< blocks, threads >>>( * hessians, * data, * values, * derivatives, * weight, * n_points, * n_parameters, * n_parameters_to_fit, * parameters_to_fit_indices, * estimator_id, * skip, * finished, * user_info, * user_info_size); * */ __global__ void cuda_calculate_hessians( float * hessians, float const * data, float const * values, float const * derivatives, float const * weights, int const n_points, int const n_parameters, int const n_parameters_to_fit, int const * parameters_to_fit_indices, int const estimator_id, int const * skip, int const * finished, char * user_info, std::size_t const user_info_size) { int const fit_index = blockIdx.x; int const first_point = fit_index * n_points; int const parameter_index_i = threadIdx.x; int const parameter_index_j = threadIdx.y; if (finished[fit_index] || skip[fit_index]) { return; } float * current_hessian = &hessians[fit_index * n_parameters_to_fit * n_parameters_to_fit]; float const * current_data = &data[first_point]; float const * current_weight = weights ? &weights[first_point] : NULL; float const * current_derivative = &derivatives[first_point*n_parameters]; float const * current_value = &values[first_point]; int const hessian_index_ij = parameter_index_i * n_parameters_to_fit + parameter_index_j; int const derivative_index_i = parameters_to_fit_indices[parameter_index_i] * n_points; int const derivative_index_j = parameters_to_fit_indices[parameter_index_j] * n_points; double sum = 0.0; for (int point_index = 0; point_index < n_points; point_index++) { if (estimator_id == LSE) { calculate_hessian_lse( &sum, point_index, derivative_index_i + point_index, derivative_index_j + point_index, current_data, current_value, current_derivative, current_weight, user_info, user_info_size); } else if (estimator_id == MLE) { calculate_hessian_mle( &sum, point_index, derivative_index_i + point_index, derivative_index_j + point_index, current_data, current_value, current_derivative, current_weight, user_info, user_info_size); } } current_hessian[hessian_index_ij] = sum; } /* Description of the cuda_modify_step_widths function * ==================================================== * * This function midifies the diagonal elements of the hessian matrices by multiplying * them by the factor (1+ lambda). This operation controls the step widths of the * iteration. If the last iteration failed, befor modifying the hessian, the diagonal * elements of the hessian are calculated back to represent unmodified values. * * hessians: An input and output vector of hessian matrices, which are modified by * the lambda values. * * lambdas: An input vector of values for modifying the hessians. * * n_parameters: The number of fitting curve parameters. * * iteration_failed: An input vector which indicates whether the previous iteration * failed. * * finished: An input vector which allows the calculation to be skipped for single fits. * * n_fits_per_block: The number of fits calculated by each thread block. * * Calling the cuda_modify_step_widths function * ============================================ * * When calling the function, the blocks and threads must be set up correctly, * as shown in the following example code. * * dim3 threads(1, 1, 1); * dim3 blocks(1, 1, 1); * * threads.x = n_parameters_to_fit * n_fits_per_block; * blocks.x = n_fits / n_fits_per_block; * * cuda_modify_step_width<<< blocks, threads >>>( * hessians, * lambdas, * n_parameters, * iteration_failed, * finished, * n_fits_per_block); * */ __global__ void cuda_modify_step_widths( float * hessians, float const * lambdas, unsigned int const n_parameters, int const * iteration_failed, int const * finished, int const n_fits_per_block) { int const shared_size = blockDim.x / n_fits_per_block; int const fit_in_block = threadIdx.x / shared_size; int const parameter_index = threadIdx.x - fit_in_block * shared_size; int const fit_index = blockIdx.x * n_fits_per_block + fit_in_block; if (finished[fit_index]) { return; } float * current_hessian = &hessians[fit_index * n_parameters * n_parameters]; if (iteration_failed[fit_index]) { current_hessian[parameter_index * n_parameters + parameter_index] = current_hessian[parameter_index * n_parameters + parameter_index] / (1.0f + lambdas[fit_index] / 10.f); } current_hessian[parameter_index * n_parameters + parameter_index] = current_hessian[parameter_index * n_parameters + parameter_index] * (1.0f + lambdas[fit_index]); } /* Description of the cuda_update_parameters function * =================================================== * * This function stores the fitting curve parameter values in prev_parameters and * updates them after each iteration. * * Parameters: * * parameters: An input and output vector of concatenated sets of model * parameters. * * prev_parameters: An input and output vector of concatenated sets of model * parameters calculated by the previous iteration. * * deltas: An input vector of concatenated delta values, which are added to the * model parameters. * * n_parameters_to_fit: The number of fitted curve parameters. * * parameters_to_fit_indices: The indices of fitted curve parameters. * * finished: An input vector which allows the parameter update to be skipped for single fits. * * n_fits_per_block: The number of fits calculated by each threadblock. * * Calling the cuda_update_parameters function * =========================================== * * When calling the function, the blocks and threads must be set up correctly, * as shown in the following example code. * * dim3 threads(1, 1, 1); * dim3 blocks(1, 1, 1); * * threads.x = n_parameters * n_fits_per_block; * blocks.x = n_fits / n_fits_per_block; * * cuda_update_parameters<<< blocks, threads >>>( * parameters, * prev_parameters, * deltas, * n_parameters_to_fit, * parameters_to_fit_indices, * finished, * n_fits_per_block); * */ __global__ void cuda_update_parameters( float * parameters, float * prev_parameters, float const * deltas, int const n_parameters_to_fit, int const * parameters_to_fit_indices, int const * finished, int const n_fits_per_block) { int const n_parameters = blockDim.x / n_fits_per_block; int const fit_in_block = threadIdx.x / n_parameters; int const parameter_index = threadIdx.x - fit_in_block * n_parameters; int const fit_index = blockIdx.x * n_fits_per_block + fit_in_block; float * current_parameters = &parameters[fit_index * n_parameters]; float * current_prev_parameters = &prev_parameters[fit_index * n_parameters]; current_prev_parameters[parameter_index] = current_parameters[parameter_index]; if (finished[fit_index]) { return; } if (parameter_index >= n_parameters_to_fit) { return; } float const * current_deltas = &deltas[fit_index * n_parameters_to_fit]; current_parameters[parameters_to_fit_indices[parameter_index]] += current_deltas[parameter_index]; } /* Description of the cuda_update_state_after_gaussjordan function * ================================================================ * * This function interprets the singular flag vector of the Gauss Jordan function * according to this LM implementation. * * Parameters: * * n_fits: The number of fits. * * singular_checks: An input vector used to report whether a fit is singular. * * states: An output vector of values which indicate whether the fitting process * was carreid out correctly or which problem occurred. If a hessian * matrix of a fit is singular, it is set to 2. * * Calling the cuda_update_state_after_gaussjordan function * ======================================================== * * When calling the function, the blocks and threads must be set up correctly, * as shown in the following example code. * * dim3 threads(1, 1, 1); * dim3 blocks(1, 1, 1); * * int const example_value = 256; * * threads.x = min(n_fits, example_value); * blocks.x = int(ceil(float(n_fits) / float(threads.x))); * * cuda_update_state_after_gaussjordan<<< blocks, threads >>>( * n_fits, * singular_checks, * states); * */ __global__ void cuda_update_state_after_gaussjordan( int const n_fits, int const * singular_checks, int * states) { int const fit_index = blockIdx.x * blockDim.x + threadIdx.x; if (fit_index >= n_fits) { return; } if (singular_checks[fit_index] == 1) { states[fit_index] = STATE_SINGULAR_HESSIAN; } } /* Description of the cuda_check_for_convergence function * ======================================================= * * This function checks after each iteration whether the fits are converged or not. * It also checks whether the set maximum number of iterations is reached. * * Parameters: * * finished: An input and output vector which allows the calculation to be skipped * for single fits. * * tolerance: The tolerance value for the convergence set by user. * * states: An output vector of values which indicate whether the fitting process * was carreid out correctly or which problem occurred. If the maximum * number of iterations is reached without converging, it is set to 1. If * the fit converged it keeps its initial value of 0. * * chi_squares: An input vector of chi-square values for multiple fits. Used for the * convergence check. * * prev_chi_squares: An input vector of chi-square values for multiple fits calculated * in the previous iteration. Used for the convergence check. * * iteration: The value of the current iteration. It is compared to the value * of the maximum number of iteration set by user. * * max_n_iterations: The maximum number of iterations set by user. * * n_fits: The number of fits. * * Calling the cuda_check_for_convergence function * =============================================== * * When calling the function, the blocks and threads must be set up correctly, * as shown in the following example code. * * dim3 threads(1, 1, 1); * dim3 blocks(1, 1, 1); * * int const example_value = 256; * * threads.x = min(n_fits, example_value); * blocks.x = int(ceil(float(n_fits) / float(threads.x))); * * cuda_check_for_convergence<<< blocks, threads >>>( * finished, * tolerance, * states, * chi_squares, * prev_chi_squares, * iteration, * max_n_iterations, * n_fits); * */ __global__ void cuda_check_for_convergence( int * finished, float const tolerance, int * states, float const * chi_squares, float const * prev_chi_squares, int const iteration, int const max_n_iterations, int const n_fits) { int const fit_index = blockIdx.x * blockDim.x + threadIdx.x; if (fit_index >= n_fits) { return; } if (finished[fit_index]) { return; } int const fit_found = abs(chi_squares[fit_index] - prev_chi_squares[fit_index]) < tolerance * fmaxf(1, chi_squares[fit_index]); int const max_n_iterations_reached = iteration == max_n_iterations - 1; if (fit_found) { finished[fit_index] = 1; } else if (max_n_iterations_reached) { states[fit_index] = STATE_MAX_ITERATION; } } /* Description of the cuda_evaluate_iteration function * ==================================================== * * This function evaluates the current iteration. * - It marks a fit as finished if a problem occured. * - It saves the needed number of iterations if a fit finished. * - It checks if all fits finished * * Parameters: * * all_finished: An output flag, that indicates whether all fits finished. * * n_iterations: An output vector of needed iterations for each fit. * * finished: An input and output vector which allows the evaluation to be skipped * for single fits * * iteration: The values of the current iteration. * * states: An input vector of values which indicate whether the fitting process * was carreid out correctly or which problem occurred. * * n_fits: The number of fits. * * Calling the cuda_evaluate_iteration function * ============================================ * * When calling the function, the blocks and threads must be set up correctly, * as shown in the following example code. * * dim3 threads(1, 1, 1); * dim3 blocks(1, 1, 1); * * int const example_value = 256; * * threads.x = min(n_fits, example_value); * blocks.x = int(ceil(float(n_fits) / float(threads.x))); * * cuda_evaluate_iteration<<< blocks, threads >>>( * all_finished, * n_iterations, * finished, * iteration, * states, * n_fits); * */ __global__ void cuda_evaluate_iteration( int * all_finished, int * n_iterations, int * finished, int const iteration, int const * states, int const n_fits) { int const fit_index = blockIdx.x * blockDim.x + threadIdx.x; if (fit_index >= n_fits) { return; } if (states[fit_index] != STATE_CONVERGED) { finished[fit_index] = 1; } if (finished[fit_index] && n_iterations[fit_index] == 0) { n_iterations[fit_index] = iteration + 1; } if (!finished[fit_index]) { * all_finished = 0; } } /* Description of the cuda_prepare_next_iteration function * ======================================================== * * This function prepares the next iteration. It either updates previous * chi-square values or sets currently calculated chi-square values and * parameters to values calculated by the previous iteration. This function also * updates lambda values. * * Parameters: * * lambdas: An output vector of values which control the step width by modifying * the diagonal elements of the hessian matrices. * * chi_squares: An input and output vector of chi-square values for multiple fits. * * prev_chi_squares: An input and output vector of chi-square values for multiple * fits calculated in the previous iteration. * * parameters: An output vector of concatenated sets of model parameters. * * prev_parameters: An input vector of concatenated sets of model parameters * calculated in the previous iteration. * * n_fits: The number of fits. * * n_parameters: The number of fitting curve parameters. * * Calling the cuda_prepare_next_iteration function * ================================================ * * When calling the function, the blocks and threads must be set up correctly, * as shown in the following example code. * * dim3 threads(1, 1, 1); * dim3 blocks(1, 1, 1); * * int const example_value = 256; * * threads.x = min(n_fits, example_value); * blocks.x = int(ceil(float(n_fits) / float(threads.x))); * * cuda_prepare_next_iteration<<< blocks, threads >>>( * lambdas, * chi_squares, * prev_chi_squares, * parameters, * prev_parameters, * n_fits, * n_parameters); * */ __global__ void cuda_prepare_next_iteration( float * lambdas, float * chi_squares, float * prev_chi_squares, float * parameters, float const * prev_parameters, int const n_fits, int const n_parameters) { int const fit_index = blockIdx.x * blockDim.x + threadIdx.x; if (fit_index >= n_fits) { return; } if (chi_squares[fit_index] < prev_chi_squares[fit_index]) { lambdas[fit_index] *= 0.1f; prev_chi_squares[fit_index] = chi_squares[fit_index]; } else { lambdas[fit_index] *= 10.f; chi_squares[fit_index] = prev_chi_squares[fit_index]; for (int iparameter = 0; iparameter < n_parameters; iparameter++) { parameters[fit_index * n_parameters + iparameter] = prev_parameters[fit_index * n_parameters + iparameter]; } } }
779882d4a880272e3f1a101090cd3a04401072e0.cu
#include "gpufit.h" #include "cuda_kernels.cuh" #include "definitions.h" #include "linear_1d.cuh" #include "gauss_1d.cuh" #include "gauss_2d.cuh" #include "gauss_2d_elliptic.cuh" #include "gauss_2d_rotated.cuh" #include "cauchy_2d_elliptic.cuh" #include "lse.cuh" #include "mle.cuh" /* Description of the cuda_calc_curve_values function * =================================================== * * This function calls one of the fitting curve functions depending on the input * parameter model_id. The fitting curve function calculates the values of * the fitting curves and its partial derivatives with respect to the fitting * curve parameters. Multiple fits are calculated in parallel. * * Parameters: * * parameters: An input vector of concatenated sets of model parameters. * * n_fits: The number of fits. * * n_points: The number of data points per fit. * * n_parameters: The number of curve parameters. * * finished: An input vector which allows the calculation to be skipped for single * fits. * * values: An output vector of concatenated sets of model function values. * * derivatives: An output vector of concatenated sets of model function partial * derivatives. * * n_fits_per_block: The number of fits calculated by each thread block. * * n_blocks_per_fit: The number of thread blocks used to calculate one fit. * * model_id: The fitting model ID. * * chunk_index: The data chunk index. * * user_info: An input vector containing user information. * * user_info_size: The size of user_info in bytes. * * Calling the cuda_calc_curve_values function * =========================================== * * When calling the function, the blocks and threads must be set up correctly, * as shown in the following example code. * * dim3 threads(1, 1, 1); * dim3 blocks(1, 1, 1); * * threads.x = n_points * n_fits_per_block / n_blocks_per_fit; * blocks.x = n_fits / n_fits_per_block * n_blocks_per_fit; * * cuda_calc_curve_values<<< blocks, threads >>>( * parameters, * n_fits, * n_points, * n_parameters, * finished, * values, * derivatives, * n_fits_per_block, * n_blocks_per_fit, * model_id, * chunk_index, * user_info, * user_info_size); * */ __global__ void cuda_calc_curve_values( float const * parameters, int const n_fits, int const n_points, int const n_parameters, int const * finished, float * values, float * derivatives, int const n_fits_per_block, int const n_blocks_per_fit, int const model_id, int const chunk_index, char * user_info, std::size_t const user_info_size) { int const fit_in_block = threadIdx.x / n_points; int const fit_index = blockIdx.x * n_fits_per_block / n_blocks_per_fit + fit_in_block; int const fit_piece = blockIdx.x % n_blocks_per_fit; int const point_index = threadIdx.x - fit_in_block * n_points + fit_piece * blockDim.x; int const first_point = fit_index * n_points; float * current_values = values + first_point; float * current_derivatives = derivatives + first_point * n_parameters; float const * current_parameters = parameters + fit_index * n_parameters; if (finished[fit_index]) return; if (point_index >= n_points) return; if (model_id == GAUSS_1D) calculate_gauss1d(current_parameters, n_fits, n_points, current_values, current_derivatives, point_index, fit_index, chunk_index, user_info, user_info_size); else if (model_id == GAUSS_2D) calculate_gauss2d(current_parameters, n_fits, n_points, current_values, current_derivatives, point_index, fit_index, chunk_index, user_info, user_info_size); else if (model_id == GAUSS_2D_ELLIPTIC) calculate_gauss2delliptic(current_parameters, n_fits, n_points, current_values, current_derivatives, point_index, fit_index, chunk_index, user_info, user_info_size); else if (model_id == GAUSS_2D_ROTATED) calculate_gauss2drotated(current_parameters, n_fits, n_points, current_values, current_derivatives, point_index, fit_index, chunk_index, user_info, user_info_size); else if (model_id == CAUCHY_2D_ELLIPTIC) calculate_cauchy2delliptic(current_parameters, n_fits, n_points, current_values, current_derivatives, point_index, fit_index, chunk_index, user_info, user_info_size); else if (model_id == LINEAR_1D) calculate_linear1d(current_parameters, n_fits, n_points, current_values, current_derivatives, point_index, fit_index, chunk_index, user_info, user_info_size); } /* Description of the sum_up_floats function * ========================================== * * This function sums up a vector of float values and stores the result at the * first place of the vector. * * Parameters: * * shared_array: An input vector of float values. The vector must be stored * on the shared memory of the GPU. The size of this vector must be a * power of two. Use zero padding to extend it to the next highest * power of 2 greater than the number of elements. * * size: The number of elements in the input vector considering zero padding. * * Calling the sum_up_floats function * ================================== * * This __device__ function can be only called from a __global__ function or * an other __device__ function. When calling the function, the blocks and threads * of the __global__ function must be set up correctly, as shown in the following * example code. * * dim3 threads(1, 1, 1); * dim3 blocks(1, 1, 1); * * threads.x = size * vectors_per_block; * blocks.x = n_vectors / vectors_per_block; * * global_function<<< blocks, threads >>>(parameter1, ...); * */ __device__ void sum_up_floats(volatile float* shared_array, int const size) { int const fit_in_block = threadIdx.x / size; int const point_index = threadIdx.x - (fit_in_block*size); int current_n_points = size >> 1; __syncthreads(); while (current_n_points) { if (point_index < current_n_points) { shared_array[point_index] += shared_array[point_index + current_n_points]; } current_n_points >>= 1; __syncthreads(); } } /* Description of the cuda_sum_chi_square_subtotals function * ========================================================== * * This function sums up chi_square subtotals in place. * * Parameters: * * chi_squares: A vector of chi-square values for multiple fits. * in: subtotals * out: totals * * n_blocks_per_fit: The number of blocks used to calculate one fit. It is * equivalent to the number of subtotals per fit. * * n_fits: The number of fits. * * finished: An input vector which allows the calculation to be skipped * for single fits. * * Calling the cuda_sum_chi_square_subtotals function * ================================================== * * When calling the function, the blocks and threads must be set up correctly, * as shown in the following example code. * * dim3 threads(1, 1, 1); * dim3 blocks(1, 1, 1); * * int const example_value = 256; * * threads.x = min(n_fits, example_value); * blocks.x = int(ceil(float(n_fits) / float(threads.x))); * * cuda_sum_chi_square_subtotals<<< blocks, threads >>>( * chi_squares, * n_blocks_per_fit, * n_fits, * finished); * */ __global__ void cuda_sum_chi_square_subtotals( float * chi_squares, int const n_blocks_per_fit, int const n_fits, int const * finished) { int const index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n_fits || finished[index]) return; float * chi_square = chi_squares + index; double sum = 0.0; for (int i = 0; i < n_blocks_per_fit; i++) sum += chi_square[i * n_fits]; chi_square[0] = sum; } /* Description of the cuda_check_fit_improvement function * ======================================================= * * This function checks after each calculation of chi-square values whether the * currently calculated chi-square values are lower than chi-square values calculated * in the previous iteration and sets the iteration_failed flags. * * Parameters: * * iteration_failed: An output vector of flags which indicate whether the fitting * process improved the fit in the last iteration. If yes it is set * to 0 otherwise to 1. * * chi_squares: An input vector of chi-square values for multiple fits. * * prev_chi_squares: An input vector of chi-square values for multiple fits calculated * in the previous iteration. * * n_fits: The number of fits. * * finished: An input vector which allows the calculation to be skipped * for single fits. * * Calling the cuda_check_fit_improvement function * =============================================== * * When calling the function, the blocks and threads must be set up correctly, * as shown in the following example code. * * dim3 threads(1, 1, 1); * dim3 blocks(1, 1, 1); * * int const example_value = 256; * * threads.x = min(n_fits, example_value); * blocks.x = int(ceil(float(n_fits) / float(threads.x))); * * cuda_check_fit_improvement <<< blocks, threads >>>( * iteration_failed, * chi_squares, * prev_chi_squares, * n_fits, * finished); * */ __global__ void cuda_check_fit_improvement( int * iteration_failed, float const * chi_squares, float const * prev_chi_squares, int const n_fits, int const * finished) { int const index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n_fits || finished[index]) return; bool const prev_chi_squares_initialized = prev_chi_squares[index] != 0.f; bool const chi_square_increased = (chi_squares[index] >= prev_chi_squares[index]); if (prev_chi_squares_initialized && chi_square_increased) { iteration_failed[index] = 1; } else { iteration_failed[index] = 0; } } /* Description of the cuda_calculate_chi_squares function * ======================================================== * * This function calls one of the estimator funktions depending on the input * parameter estimator_id. The estimator function calculates the chi-square values. * The calcluation is performed for multiple fits in parallel. * * Parameters: * * chi_squares: An output vector of concatenated chi-square values. * * states: An output vector of values which indicate whether the fitting process * was carreid out correctly or which problem occurred. In this function * it is only used for MLE. It is set to 3 if a fitting curve value is * negative. This vector includes the states for multiple fits. * * data: An input vector of data for multiple fits * * values: An input vector of concatenated sets of model function values. * * weights: An input vector of values for weighting chi-square, gradient and hessian, * while using LSE * * n_points: The number of data points per fit. * * n_fits: The number of fits. * * estimator_id: The estimator ID. * * finished: An input vector which allows the calculation to be skipped for single * fits. * * n_fits_per_block: The number of fits calculated by each thread block. * * user_info: An input vector containing user information. * * user_info_size: The size of user_info in bytes. * * Calling the cuda_calculate_chi_squares function * ================================================ * * When calling the function, the blocks and threads must be set up correctly, * as shown in the following example code. * * dim3 threads(1, 1, 1); * dim3 blocks(1, 1, 1); * * threads.x = power_of_two_n_points * n_fits_per_block / n_blocks_per_fit; * blocks.x = n_fits / n_fits_per_block * n_blocks_per_fit; * * int const shared_size = sizeof(float) * threads.x; * * cuda_calculate_chi_squares<<< blocks, threads, shared_size >>>( * chi_squares, * states, * data, * values, * weights, * n_points, * n_fits, * estimator_id, * finished, * n_fits_per_block, * user_info, * user_info_size); * */ __global__ void cuda_calculate_chi_squares( float * chi_squares, int * states, float const * data, float const * values, float const * weights, int const n_points, int const n_fits, int const estimator_id, int const * finished, int const n_fits_per_block, char * user_info, std::size_t const user_info_size) { int const shared_size = blockDim.x / n_fits_per_block; int const fit_in_block = threadIdx.x / shared_size; int const fit_piece = blockIdx.x / n_fits; int const fit_index = blockIdx.x * n_fits_per_block + fit_in_block - fit_piece * n_fits; int const point_index = threadIdx.x - fit_in_block * shared_size + fit_piece * shared_size; int const first_point = fit_index * n_points; if (finished[fit_index]) { return; } float const * current_data = &data[first_point]; float const * current_weight = weights ? &weights[first_point] : NULL; float const * current_value = &values[first_point]; int * current_state = &states[fit_index]; extern __shared__ float extern_array[]; volatile float * shared_chi_square = extern_array + (fit_in_block - fit_piece) * shared_size; if (point_index >= n_points) { shared_chi_square[point_index] = 0.f; } if (point_index < n_points) { if (estimator_id == LSE) { calculate_chi_square_lse( shared_chi_square, point_index, current_data, current_value, current_weight, current_state, user_info, user_info_size); } else if (estimator_id == MLE) { calculate_chi_square_mle( shared_chi_square, point_index, current_data, current_value, current_weight, current_state, user_info, user_info_size); } } shared_chi_square += fit_piece * shared_size; sum_up_floats(shared_chi_square, shared_size); chi_squares[fit_index + fit_piece * n_fits] = shared_chi_square[0]; } /* Description of the cuda_sum_gradient_subtotals function * ======================================================== * * This function sums up the chi-square gradient subtotals in place. * * Parameters: * * gradients: A vector of gradient values for multiple fits. * in: subtotals * out: totals * * n_blocks_per_fit: The number of blocks used to calculate one fit * * n_fits: The number of fits. * * n_parameters_to_fit: The number of model parameters, that are not held fixed. * * skip: An input vector which allows the calculation to be skipped for single fits. * * finished: An input vector which allows the calculation to be skipped for single * fits. * * Calling the cuda_sum_gradient_subtotals function * ================================================ * * When calling the function, the blocks and threads must be set up correctly, * as shown in the following example code. * * dim3 threads(1, 1, 1); * dim3 blocks(1, 1, 1); * * int const example_value = 256; * * threads.x = min(n_fits, example_value); * blocks.x = int(ceil(float(n_fits) / float(threads.x))); * * cuda_sum_gradient_subtotals<<< blocks,threads >>>( * gradients, * n_blocks_per_fit, * n_fits, * n_parameters_to_fit, * skip, * finished); * */ __global__ void cuda_sum_gradient_subtotals( float * gradients, int const n_blocks_per_fit, int const n_fits, int const n_parameters, int const * skip, int const * finished) { int const index = blockIdx.x * blockDim.x + threadIdx.x; int const fit_index = index / n_parameters; if (fit_index >= n_fits || finished[fit_index] || skip[fit_index]) return; float * gradient = gradients + index; double sum = 0.0; for (int i = 0; i < n_blocks_per_fit; i++) sum += gradient[i * n_fits * n_parameters]; gradient[0] = sum; } /* Description of the cuda_calculate_gradients function * ===================================================== * * This function calls one of the gradient functions depending on the input * parameter estimator_id. The gradient function calculates the gradient values * of the chi-square function calling a __device__ function. The calcluation is * performed for multiple fits in parallel. * * Parameters: * * gradients: An output vector of concatenated sets of gradient vector values. * * data: An input vector of data for multiple fits * * values: An input vector of concatenated sets of model function values. * * derivatives: An input vector of concatenated sets of model function partial * derivatives. * * weights: An input vector of values for weighting chi-square, gradient and hessian, * while using LSE * * n_points: The number of data points per fit. * * n_fits: The number of fits. * * n_parameters: The number of fitting curve parameters. * * n_parameters_to_fit: The number of fitting curve parameters, that are not held * fixed. * * parameters_to_fit_indices: An input vector of indices of fitting curve parameters, * that are not held fixed. * * estimator_id: The estimator ID. * * finished: An input vector which allows the calculation to be skipped for single * fits. * * skip: An input vector which allows the calculation to be skipped for single fits. * * n_fits_per_block: The number of fits calculated by each thread block. * * user_info: An input vector containing user information. * * user_info_size: The number of elements in user_info. * * Calling the cuda_calculate_gradients function * ============================================= * * When calling the function, the blocks and threads must be set up correctly, * as shown in the following example code. * * dim3 threads(1, 1, 1); * dim3 blocks(1, 1, 1); * * threads.x = power_of_two_n_points * n_fits_per_block / n_blocks_per_fit; * blocks.x = n_fits / n_fits_per_block * n_blocks_per_fit; * * int const shared_size = sizeof(float) * threads.x; * * cuda_calculate_gradients<<< blocks, threads, shared_size >>>( * gradients, * data, * values, * derivatives, * weight, * n_points, * n_fits, * n_parameters, * n_parameters_to_fit, * parameters_to_fit_indices, * estimator_id, * finished, * skip, * n_fits_per_block, * user_info, * user_info_size); * */ __global__ void cuda_calculate_gradients( float * gradients, float const * data, float const * values, float const * derivatives, float const * weights, int const n_points, int const n_fits, int const n_parameters, int const n_parameters_to_fit, int const * parameters_to_fit_indices, int const estimator_id, int const * finished, int const * skip, int const n_fits_per_block, char * user_info, std::size_t const user_info_size) { int const shared_size = blockDim.x / n_fits_per_block; int const fit_in_block = threadIdx.x / shared_size; int const fit_piece = blockIdx.x / n_fits; int const fit_index = blockIdx.x * n_fits_per_block + fit_in_block - fit_piece * n_fits; int const point_index = threadIdx.x - fit_in_block * shared_size + fit_piece * shared_size; int const first_point = fit_index * n_points; if (finished[fit_index] || skip[fit_index]) { return; } float const * current_data = &data[first_point]; float const * current_weight = weights ? &weights[first_point] : NULL; float const * current_derivative = &derivatives[first_point * n_parameters]; float const * current_value = &values[first_point]; extern __shared__ float extern_array[]; volatile float * shared_gradient = extern_array + (fit_in_block - fit_piece) * shared_size; if (point_index >= n_points) { shared_gradient[point_index] = 0.f; } for (int parameter_index = 0; parameter_index < n_parameters_to_fit; parameter_index++) { if (point_index < n_points) { int const derivative_index = parameters_to_fit_indices[parameter_index] * n_points + point_index; if (estimator_id == LSE) { calculate_gradient_lse( shared_gradient, point_index, derivative_index, current_data, current_value, current_derivative, current_weight, user_info, user_info_size); } else if (estimator_id == MLE) { calculate_gradient_mle( shared_gradient, point_index, derivative_index, current_data, current_value, current_derivative, current_weight, user_info, user_info_size); } } sum_up_floats(shared_gradient + fit_piece * shared_size, shared_size); gradients[(fit_index * n_parameters_to_fit + parameter_index) + fit_piece * n_fits * n_parameters_to_fit] = shared_gradient[fit_piece * shared_size]; } } /* Description of the cuda_calculate_hessians function * ==================================================== * * This function calls one of the hessian function depending on the input * parameter estimator_id. The hessian funcion calculates the hessian matrix * values of the chi-square function calling a __device__ functions. The * calcluation is performed for multiple fits in parallel. * * Parameters: * * hessians: An output vector of concatenated sets of hessian matrix values. * * data: An input vector of data for multiple fits * * values: An input vector of concatenated sets of model function values. * * derivatives: An input vector of concatenated sets of model function partial * derivatives. * * weights: An input vector of values for weighting chi-square, gradient and hessian, * while using LSE * * n_points: The number of data points per fit. * * n_parameters: The number of fitting curve parameters. * * n_parameters_to_fit: The number of fitting curve parameters, that are not held * fixed. * * parameters_to_fit_indices: An input vector of indices of fitting curve parameters, * that are not held fixed. * * estimator_id: The estimator ID. * * skip: An input vector which allows the calculation to be skipped for single fits. * * finished: An input vector which allows the calculation to be skipped for single * fits. * * user_info: An input vector containing user information. * * user_info_size: The size of user_info in bytes. * * Calling the cuda_calculate_hessians function * ============================================ * * When calling the function, the blocks and threads must be set up correctly, * as shown in the following example code. * * dim3 threads(1, 1, 1); * dim3 blocks(1, 1, 1); * * threads.x = n_parameters_to_fit; * threads.y = n_parameters_to_fit; * blocks.x = n_fits; * * cuda_calculate_hessians<<< blocks, threads >>>( * hessians, * data, * values, * derivatives, * weight, * n_points, * n_parameters, * n_parameters_to_fit, * parameters_to_fit_indices, * estimator_id, * skip, * finished, * user_info, * user_info_size); * */ __global__ void cuda_calculate_hessians( float * hessians, float const * data, float const * values, float const * derivatives, float const * weights, int const n_points, int const n_parameters, int const n_parameters_to_fit, int const * parameters_to_fit_indices, int const estimator_id, int const * skip, int const * finished, char * user_info, std::size_t const user_info_size) { int const fit_index = blockIdx.x; int const first_point = fit_index * n_points; int const parameter_index_i = threadIdx.x; int const parameter_index_j = threadIdx.y; if (finished[fit_index] || skip[fit_index]) { return; } float * current_hessian = &hessians[fit_index * n_parameters_to_fit * n_parameters_to_fit]; float const * current_data = &data[first_point]; float const * current_weight = weights ? &weights[first_point] : NULL; float const * current_derivative = &derivatives[first_point*n_parameters]; float const * current_value = &values[first_point]; int const hessian_index_ij = parameter_index_i * n_parameters_to_fit + parameter_index_j; int const derivative_index_i = parameters_to_fit_indices[parameter_index_i] * n_points; int const derivative_index_j = parameters_to_fit_indices[parameter_index_j] * n_points; double sum = 0.0; for (int point_index = 0; point_index < n_points; point_index++) { if (estimator_id == LSE) { calculate_hessian_lse( &sum, point_index, derivative_index_i + point_index, derivative_index_j + point_index, current_data, current_value, current_derivative, current_weight, user_info, user_info_size); } else if (estimator_id == MLE) { calculate_hessian_mle( &sum, point_index, derivative_index_i + point_index, derivative_index_j + point_index, current_data, current_value, current_derivative, current_weight, user_info, user_info_size); } } current_hessian[hessian_index_ij] = sum; } /* Description of the cuda_modify_step_widths function * ==================================================== * * This function midifies the diagonal elements of the hessian matrices by multiplying * them by the factor (1+ lambda). This operation controls the step widths of the * iteration. If the last iteration failed, befor modifying the hessian, the diagonal * elements of the hessian are calculated back to represent unmodified values. * * hessians: An input and output vector of hessian matrices, which are modified by * the lambda values. * * lambdas: An input vector of values for modifying the hessians. * * n_parameters: The number of fitting curve parameters. * * iteration_failed: An input vector which indicates whether the previous iteration * failed. * * finished: An input vector which allows the calculation to be skipped for single fits. * * n_fits_per_block: The number of fits calculated by each thread block. * * Calling the cuda_modify_step_widths function * ============================================ * * When calling the function, the blocks and threads must be set up correctly, * as shown in the following example code. * * dim3 threads(1, 1, 1); * dim3 blocks(1, 1, 1); * * threads.x = n_parameters_to_fit * n_fits_per_block; * blocks.x = n_fits / n_fits_per_block; * * cuda_modify_step_width<<< blocks, threads >>>( * hessians, * lambdas, * n_parameters, * iteration_failed, * finished, * n_fits_per_block); * */ __global__ void cuda_modify_step_widths( float * hessians, float const * lambdas, unsigned int const n_parameters, int const * iteration_failed, int const * finished, int const n_fits_per_block) { int const shared_size = blockDim.x / n_fits_per_block; int const fit_in_block = threadIdx.x / shared_size; int const parameter_index = threadIdx.x - fit_in_block * shared_size; int const fit_index = blockIdx.x * n_fits_per_block + fit_in_block; if (finished[fit_index]) { return; } float * current_hessian = &hessians[fit_index * n_parameters * n_parameters]; if (iteration_failed[fit_index]) { current_hessian[parameter_index * n_parameters + parameter_index] = current_hessian[parameter_index * n_parameters + parameter_index] / (1.0f + lambdas[fit_index] / 10.f); } current_hessian[parameter_index * n_parameters + parameter_index] = current_hessian[parameter_index * n_parameters + parameter_index] * (1.0f + lambdas[fit_index]); } /* Description of the cuda_update_parameters function * =================================================== * * This function stores the fitting curve parameter values in prev_parameters and * updates them after each iteration. * * Parameters: * * parameters: An input and output vector of concatenated sets of model * parameters. * * prev_parameters: An input and output vector of concatenated sets of model * parameters calculated by the previous iteration. * * deltas: An input vector of concatenated delta values, which are added to the * model parameters. * * n_parameters_to_fit: The number of fitted curve parameters. * * parameters_to_fit_indices: The indices of fitted curve parameters. * * finished: An input vector which allows the parameter update to be skipped for single fits. * * n_fits_per_block: The number of fits calculated by each threadblock. * * Calling the cuda_update_parameters function * =========================================== * * When calling the function, the blocks and threads must be set up correctly, * as shown in the following example code. * * dim3 threads(1, 1, 1); * dim3 blocks(1, 1, 1); * * threads.x = n_parameters * n_fits_per_block; * blocks.x = n_fits / n_fits_per_block; * * cuda_update_parameters<<< blocks, threads >>>( * parameters, * prev_parameters, * deltas, * n_parameters_to_fit, * parameters_to_fit_indices, * finished, * n_fits_per_block); * */ __global__ void cuda_update_parameters( float * parameters, float * prev_parameters, float const * deltas, int const n_parameters_to_fit, int const * parameters_to_fit_indices, int const * finished, int const n_fits_per_block) { int const n_parameters = blockDim.x / n_fits_per_block; int const fit_in_block = threadIdx.x / n_parameters; int const parameter_index = threadIdx.x - fit_in_block * n_parameters; int const fit_index = blockIdx.x * n_fits_per_block + fit_in_block; float * current_parameters = &parameters[fit_index * n_parameters]; float * current_prev_parameters = &prev_parameters[fit_index * n_parameters]; current_prev_parameters[parameter_index] = current_parameters[parameter_index]; if (finished[fit_index]) { return; } if (parameter_index >= n_parameters_to_fit) { return; } float const * current_deltas = &deltas[fit_index * n_parameters_to_fit]; current_parameters[parameters_to_fit_indices[parameter_index]] += current_deltas[parameter_index]; } /* Description of the cuda_update_state_after_gaussjordan function * ================================================================ * * This function interprets the singular flag vector of the Gauss Jordan function * according to this LM implementation. * * Parameters: * * n_fits: The number of fits. * * singular_checks: An input vector used to report whether a fit is singular. * * states: An output vector of values which indicate whether the fitting process * was carreid out correctly or which problem occurred. If a hessian * matrix of a fit is singular, it is set to 2. * * Calling the cuda_update_state_after_gaussjordan function * ======================================================== * * When calling the function, the blocks and threads must be set up correctly, * as shown in the following example code. * * dim3 threads(1, 1, 1); * dim3 blocks(1, 1, 1); * * int const example_value = 256; * * threads.x = min(n_fits, example_value); * blocks.x = int(ceil(float(n_fits) / float(threads.x))); * * cuda_update_state_after_gaussjordan<<< blocks, threads >>>( * n_fits, * singular_checks, * states); * */ __global__ void cuda_update_state_after_gaussjordan( int const n_fits, int const * singular_checks, int * states) { int const fit_index = blockIdx.x * blockDim.x + threadIdx.x; if (fit_index >= n_fits) { return; } if (singular_checks[fit_index] == 1) { states[fit_index] = STATE_SINGULAR_HESSIAN; } } /* Description of the cuda_check_for_convergence function * ======================================================= * * This function checks after each iteration whether the fits are converged or not. * It also checks whether the set maximum number of iterations is reached. * * Parameters: * * finished: An input and output vector which allows the calculation to be skipped * for single fits. * * tolerance: The tolerance value for the convergence set by user. * * states: An output vector of values which indicate whether the fitting process * was carreid out correctly or which problem occurred. If the maximum * number of iterations is reached without converging, it is set to 1. If * the fit converged it keeps its initial value of 0. * * chi_squares: An input vector of chi-square values for multiple fits. Used for the * convergence check. * * prev_chi_squares: An input vector of chi-square values for multiple fits calculated * in the previous iteration. Used for the convergence check. * * iteration: The value of the current iteration. It is compared to the value * of the maximum number of iteration set by user. * * max_n_iterations: The maximum number of iterations set by user. * * n_fits: The number of fits. * * Calling the cuda_check_for_convergence function * =============================================== * * When calling the function, the blocks and threads must be set up correctly, * as shown in the following example code. * * dim3 threads(1, 1, 1); * dim3 blocks(1, 1, 1); * * int const example_value = 256; * * threads.x = min(n_fits, example_value); * blocks.x = int(ceil(float(n_fits) / float(threads.x))); * * cuda_check_for_convergence<<< blocks, threads >>>( * finished, * tolerance, * states, * chi_squares, * prev_chi_squares, * iteration, * max_n_iterations, * n_fits); * */ __global__ void cuda_check_for_convergence( int * finished, float const tolerance, int * states, float const * chi_squares, float const * prev_chi_squares, int const iteration, int const max_n_iterations, int const n_fits) { int const fit_index = blockIdx.x * blockDim.x + threadIdx.x; if (fit_index >= n_fits) { return; } if (finished[fit_index]) { return; } int const fit_found = abs(chi_squares[fit_index] - prev_chi_squares[fit_index]) < tolerance * fmaxf(1, chi_squares[fit_index]); int const max_n_iterations_reached = iteration == max_n_iterations - 1; if (fit_found) { finished[fit_index] = 1; } else if (max_n_iterations_reached) { states[fit_index] = STATE_MAX_ITERATION; } } /* Description of the cuda_evaluate_iteration function * ==================================================== * * This function evaluates the current iteration. * - It marks a fit as finished if a problem occured. * - It saves the needed number of iterations if a fit finished. * - It checks if all fits finished * * Parameters: * * all_finished: An output flag, that indicates whether all fits finished. * * n_iterations: An output vector of needed iterations for each fit. * * finished: An input and output vector which allows the evaluation to be skipped * for single fits * * iteration: The values of the current iteration. * * states: An input vector of values which indicate whether the fitting process * was carreid out correctly or which problem occurred. * * n_fits: The number of fits. * * Calling the cuda_evaluate_iteration function * ============================================ * * When calling the function, the blocks and threads must be set up correctly, * as shown in the following example code. * * dim3 threads(1, 1, 1); * dim3 blocks(1, 1, 1); * * int const example_value = 256; * * threads.x = min(n_fits, example_value); * blocks.x = int(ceil(float(n_fits) / float(threads.x))); * * cuda_evaluate_iteration<<< blocks, threads >>>( * all_finished, * n_iterations, * finished, * iteration, * states, * n_fits); * */ __global__ void cuda_evaluate_iteration( int * all_finished, int * n_iterations, int * finished, int const iteration, int const * states, int const n_fits) { int const fit_index = blockIdx.x * blockDim.x + threadIdx.x; if (fit_index >= n_fits) { return; } if (states[fit_index] != STATE_CONVERGED) { finished[fit_index] = 1; } if (finished[fit_index] && n_iterations[fit_index] == 0) { n_iterations[fit_index] = iteration + 1; } if (!finished[fit_index]) { * all_finished = 0; } } /* Description of the cuda_prepare_next_iteration function * ======================================================== * * This function prepares the next iteration. It either updates previous * chi-square values or sets currently calculated chi-square values and * parameters to values calculated by the previous iteration. This function also * updates lambda values. * * Parameters: * * lambdas: An output vector of values which control the step width by modifying * the diagonal elements of the hessian matrices. * * chi_squares: An input and output vector of chi-square values for multiple fits. * * prev_chi_squares: An input and output vector of chi-square values for multiple * fits calculated in the previous iteration. * * parameters: An output vector of concatenated sets of model parameters. * * prev_parameters: An input vector of concatenated sets of model parameters * calculated in the previous iteration. * * n_fits: The number of fits. * * n_parameters: The number of fitting curve parameters. * * Calling the cuda_prepare_next_iteration function * ================================================ * * When calling the function, the blocks and threads must be set up correctly, * as shown in the following example code. * * dim3 threads(1, 1, 1); * dim3 blocks(1, 1, 1); * * int const example_value = 256; * * threads.x = min(n_fits, example_value); * blocks.x = int(ceil(float(n_fits) / float(threads.x))); * * cuda_prepare_next_iteration<<< blocks, threads >>>( * lambdas, * chi_squares, * prev_chi_squares, * parameters, * prev_parameters, * n_fits, * n_parameters); * */ __global__ void cuda_prepare_next_iteration( float * lambdas, float * chi_squares, float * prev_chi_squares, float * parameters, float const * prev_parameters, int const n_fits, int const n_parameters) { int const fit_index = blockIdx.x * blockDim.x + threadIdx.x; if (fit_index >= n_fits) { return; } if (chi_squares[fit_index] < prev_chi_squares[fit_index]) { lambdas[fit_index] *= 0.1f; prev_chi_squares[fit_index] = chi_squares[fit_index]; } else { lambdas[fit_index] *= 10.f; chi_squares[fit_index] = prev_chi_squares[fit_index]; for (int iparameter = 0; iparameter < n_parameters; iparameter++) { parameters[fit_index * n_parameters + iparameter] = prev_parameters[fit_index * n_parameters + iparameter]; } } }
df486a8d12bdfc4c8a45525941495fe66e6681f5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2014-2022, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. // Written by the LBANN Research Team (B. Van Essen, et al.) listed in // the CONTRIBUTORS file. <[email protected]> // // LLNL-CODE-697807. // All rights reserved. // // This file is part of LBANN: Livermore Big Artificial Neural Network // Toolkit. For details, see http://software.llnl.gov/LBANN or // https://github.com/LLNL/LBANN. // // Licensed under the Apache License, Version 2.0 (the "Licensee"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// #define LBANN_ENTRYWISE_BATCH_NORMALIZATION_LAYER_INSTANTIATE #include "lbann/comm_impl.hpp" #include "lbann/layers/regularizers/entrywise_batch_normalization.hpp" #include "lbann/weights/weights_helpers.hpp" #include "lbann/utils/gpu/helpers.hpp" namespace lbann { namespace { /** * On input, sums and sqsums are assumed to be filled with zeros. * * Block dimensions: bsize x 1 x 1 * * Grid dimensions: (height / bsize) x 1 x 1 */ template <typename TensorDataType> __global__ void row_sums_kernel(size_t height, size_t width, const TensorDataType* __restrict__ vals, size_t vals_ldim, TensorDataType* __restrict__ sums, TensorDataType* __restrict__ sqsums) { const size_t gid = threadIdx.x + blockIdx.x * blockDim.x; const size_t nthreads = blockDim.x * gridDim.x; for (size_t row = gid; row < height; row += nthreads) { auto& sum = sums[row]; auto& sqsum = sqsums[row]; for (size_t col = 0; col < width; ++col) { const auto& x = vals[row + col * vals_ldim]; sum += x; sqsum += x * x; } } } /** * On input, batch_mean and batch_var are assumed to contain sums and * squares of sums, respectively. * * Block dimensions: bsize x 1 x 1 * * Grid dimensions: (size / bsize) x 1 x 1 */ template <typename TensorDataType> __global__ void compute_statistics_kernel(size_t size, unsigned long long statistics_count, TensorDataType decay, TensorDataType* __restrict__ batch_mean, TensorDataType* __restrict__ batch_var, TensorDataType* __restrict__ running_mean, TensorDataType* __restrict__ running_var) { const size_t gid = threadIdx.x + blockIdx.x * blockDim.x; const size_t nthreads = blockDim.x * gridDim.x; for (size_t i = gid; i < size; i += nthreads) { auto& mean = batch_mean[i]; auto& var = batch_var[i]; auto& _running_mean = running_mean[i]; auto& _running_var = running_var[i]; const auto sum = batch_mean[i]; const auto sqsum = batch_var[i]; const TensorDataType statistics_count_dt = TensorDataType(statistics_count); mean = sum / statistics_count_dt; const auto sqmean = sqsum / statistics_count_dt; var = (sqmean - mean * mean) * statistics_count_dt / TensorDataType(statistics_count - 1); _running_mean = decay * _running_mean + (TensorDataType{1.f} - decay) * mean; _running_var = decay * _running_var + (TensorDataType{1.f} - decay) * var; } } /** * mean = sum(x_i) / n * * var = ( sum(x_i^2)/n - mean^2 ) * n/(n-1) */ template <typename TensorDataType> void compute_batch_statistics(lbann_comm& comm, TensorDataType decay, const El::AbstractDistMatrix<TensorDataType>& input, El::AbstractDistMatrix<TensorDataType>& batch_statistics, El::AbstractDistMatrix<TensorDataType>& running_mean, El::AbstractDistMatrix<TensorDataType>& running_var) { // Local matrices const auto& local_input = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(input.LockedMatrix()); auto& local_batch_statistics = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(batch_statistics.Matrix()); auto local_batch_mean = El::View(local_batch_statistics, El::ALL, El::IR(0)); auto local_batch_var = El::View(local_batch_statistics, El::ALL, El::IR(1)); auto& local_running_mean = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(running_mean.Matrix()); auto& local_running_var = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(running_var.Matrix()); // Dimensions const size_t local_height = local_input.Height(); const size_t local_width = local_input.Width(); // Compute local sums El::Zero(batch_statistics); if (local_height > 0) { auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_batch_statistics), gpu::get_sync_info(local_input)); constexpr size_t block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (local_height + block_size - 1) / block_size; hydrogen::gpu::LaunchKernel( row_sums_kernel<TensorDataType>, grid_dims, block_dims, 0, multisync, local_height, local_width, local_input.LockedBuffer(), local_input.LDim(), local_batch_mean.Buffer(), local_batch_var.Buffer()); } // Accumulate sums between processes /// @todo Local statistics /// @todo Arbitrary group sizes comm.allreduce(batch_statistics, batch_statistics.RedundantComm(), El::mpi::SUM); const size_t statistics_count = input.Width(); // Compute mini-batch statistics from sums if (statistics_count <= 1) { // local_mean already has correct values El::Fill(local_batch_var, El::TypeTraits<TensorDataType>::One()); } else { if (local_height > 0) { auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_batch_statistics), gpu::get_sync_info(local_running_mean), gpu::get_sync_info(local_running_var)); constexpr size_t block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (local_height + block_size - 1) / block_size; hydrogen::gpu::LaunchKernel( compute_statistics_kernel<TensorDataType>, grid_dims, block_dims, 0, multisync, local_height, statistics_count, decay, local_batch_mean.Buffer(), local_batch_var.Buffer(), local_running_mean.Buffer(), local_running_var.Buffer()); } } } /** * Block dimensions: bsizex x bsizey x 1 * * Grid dimensions: (height / bsizex) x (width / bsizey) x 1 */ template <typename TensorDataType> __global__ void batchnorm_kernel(size_t height, size_t width, TensorDataType epsilon, const TensorDataType* __restrict__ input, size_t input_ldim, TensorDataType* __restrict__ output, size_t output_ldim, const TensorDataType* __restrict__ mean, const TensorDataType* __restrict__ var) { const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x; const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y; const size_t nthreadsx = blockDim.x * gridDim.x; const size_t nthreadsy = blockDim.y * gridDim.y; for (size_t row = gidx; row < height; row += nthreadsx) { const auto& _mean = mean[row]; const auto& _var = var[row]; const auto inv_stdev = gpu_lib::rsqrt(_var + epsilon); for (size_t col = gidy; col < width; col += nthreadsy) { const auto& x = input[row + col*input_ldim]; auto& y = output[row + col*output_ldim]; y = (x - _mean) * inv_stdev; } } } /** * y_i = (x_i - mean) / sqrt(var + epsilon) */ template <typename TensorDataType> void apply_batchnorm(DataType epsilon, const El::Matrix<TensorDataType, El::Device::GPU>& local_input, El::Matrix<TensorDataType, El::Device::GPU>& local_output, const El::Matrix<TensorDataType, El::Device::GPU>& local_mean, const El::Matrix<TensorDataType, El::Device::GPU>& local_var) { if (!local_input.IsEmpty()) { auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_output), gpu::get_sync_info(local_input), gpu::get_sync_info(local_mean), gpu::get_sync_info(local_var)); const size_t local_height = local_input.Height(); const size_t local_width = local_input.Width(); constexpr size_t block_size_x = 256; constexpr size_t block_size_y = 1; dim3 block_dims, grid_dims; block_dims.x = block_size_x; block_dims.y = block_size_y; grid_dims.x = (local_height + block_size_x - 1) / block_size_x; grid_dims.y = (local_width + block_size_y - 1) / block_size_y; hydrogen::gpu::LaunchKernel( batchnorm_kernel<TensorDataType>, grid_dims, block_dims, 0, multisync, local_height, local_width, epsilon, local_input.LockedBuffer(), local_input.LDim(), local_output.Buffer(), local_output.LDim(), local_mean.LockedBuffer(), local_var.LockedBuffer()); } } template <typename TensorDataType> void fp_impl(lbann_comm& comm, TensorDataType decay, TensorDataType epsilon, bool is_training, const El::AbstractDistMatrix<TensorDataType>& input, El::AbstractDistMatrix<TensorDataType>& output, El::AbstractDistMatrix<TensorDataType>& batch_statistics, El::AbstractDistMatrix<TensorDataType>& running_mean, El::AbstractDistMatrix<TensorDataType>& running_var) { // Local matrices const auto& local_input = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(input.LockedMatrix()); auto& local_output = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(output.Matrix()); // Batchnorm has different behavior for training and inference if (is_training) { // For training, normalize with batch statistics compute_batch_statistics<TensorDataType>(comm, decay, input, batch_statistics, running_mean, running_var); const auto& local_batch_statistics = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(batch_statistics.LockedMatrix()); const auto local_batch_mean = El::LockedView(local_batch_statistics, El::ALL, El::IR(0)); const auto local_batch_var = El::LockedView(local_batch_statistics, El::ALL, El::IR(1)); apply_batchnorm<TensorDataType>(epsilon, local_input, local_output, local_batch_mean, local_batch_var); } else { // For inference, normalize with running statistics const auto& local_running_mean = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(running_mean.LockedMatrix()); const auto& local_running_var = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(running_var.LockedMatrix()); apply_batchnorm<TensorDataType>(epsilon, local_input, local_output, local_running_mean, local_running_var); } } /** * On input, gradient_wrt_mean and gradient_wrt_var are assumed to be * filled with zeros. * * dL/dmean = - sum(dL/dy_i) / sqrt(var+epsilon) * * dL/dvar = - sum(dL/dy_i * (x_i-mean)) * (var+epsilon)^(-3/2) / 2 * * Block dimensions: bsize x 1 x 1 * * Grid dimensions: (height / bsize) x 1 x 1 */ template <typename TensorDataType> __global__ void bp_training_stats_gradient_kernel(size_t height, size_t width, TensorDataType epsilon, const TensorDataType* __restrict__ input, size_t input_ldim, const TensorDataType* __restrict__ gradient_wrt_output, size_t gradient_wrt_output_ldim, const TensorDataType* __restrict__ mean, const TensorDataType* __restrict__ var, TensorDataType* __restrict__ gradient_wrt_mean, TensorDataType* __restrict__ gradient_wrt_var) { const size_t gid = threadIdx.x + blockIdx.x * blockDim.x; const size_t nthreads = blockDim.x * gridDim.x; for (size_t row = gid; row < height; row += nthreads) { const auto& _mean = mean[row]; const auto& _var = var[row]; const auto inv_stdev = gpu_lib::rsqrt(_var + epsilon); auto& dmean = gradient_wrt_mean[row]; auto& dvar = gradient_wrt_var[row]; for (size_t col = 0; col < width; ++col) { const auto& x = input[row + col * input_ldim]; const auto& dy = gradient_wrt_output[row + col * gradient_wrt_output_ldim]; dmean += - dy * inv_stdev; dvar += - dy * (x - _mean) * inv_stdev*inv_stdev*inv_stdev / TensorDataType(2); } } } /** * dL/dx_i = ( dL/dy_i / sqrt(var+epsilon) * + dL/dmean / n * + dL/dvar * (x_i - mean) * 2/(n-1) ) * * Block dimensions: bsizex x bsizey x 1 * * Grid dimensions: (height / bsizex) x (width / bsizey) x 1 */ template <typename TensorDataType> __global__ void bp_training_error_signal_kernel(size_t height, size_t width, TensorDataType epsilon, unsigned long long statistics_count, const TensorDataType* __restrict__ input, size_t input_ldim, const TensorDataType* __restrict__ gradient_wrt_output, size_t gradient_wrt_output_ldim, TensorDataType* __restrict__ gradient_wrt_input, size_t gradient_wrt_input_ldim, const TensorDataType* __restrict__ mean, const TensorDataType* __restrict__ var, const TensorDataType* __restrict__ gradient_wrt_mean, const TensorDataType* __restrict__ gradient_wrt_var) { const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x; const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y; const size_t nthreadsx = blockDim.x * gridDim.x; const size_t nthreadsy = blockDim.y * gridDim.y; for (size_t row = gidx; row < height; row += nthreadsx) { const auto& _mean = mean[row]; const auto& _var = var[row]; const auto& dmean = gradient_wrt_mean[row]; const auto& dvar = gradient_wrt_var[row]; const auto inv_stdev = gpu_lib::rsqrt(_var + epsilon); for (size_t col = gidy; col < width; col += nthreadsy) { const auto& x = input[row + col * input_ldim]; const auto& dy = gradient_wrt_output[row + col * gradient_wrt_output_ldim]; auto& dx = gradient_wrt_input[row + col * gradient_wrt_input_ldim]; dx = (dy * inv_stdev + dmean / TensorDataType(statistics_count) + dvar * (x - _mean) * TensorDataType(2) / TensorDataType(statistics_count - 1)); } } } /** @brief Backprop for training. * * Assumes forward prop uses mini-batch statistics. In other words, * statistics are dependent on input. */ template <typename TensorDataType> void bp_training_impl(lbann_comm& comm, TensorDataType epsilon, const El::AbstractDistMatrix<TensorDataType>& input, const El::AbstractDistMatrix<TensorDataType>& gradient_wrt_output, El::AbstractDistMatrix<TensorDataType>& gradient_wrt_input, const El::AbstractDistMatrix<TensorDataType>& statistics, El::AbstractDistMatrix<TensorDataType>& gradient_wrt_statistics) { // Local matrices const auto& local_input = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(input.LockedMatrix()); const auto& local_gradient_wrt_output = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(gradient_wrt_output.LockedMatrix()); auto& local_gradient_wrt_input = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(gradient_wrt_input.Matrix()); const auto& local_statistics = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(statistics.LockedMatrix()); const auto local_mean = El::LockedView(local_statistics, El::ALL, El::IR(0)); const auto local_var = El::LockedView(local_statistics, El::ALL, El::IR(1)); auto& local_gradient_wrt_statistics = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(gradient_wrt_statistics.Matrix()); auto local_gradient_wrt_mean = El::View(local_gradient_wrt_statistics, El::ALL, El::IR(0)); auto local_gradient_wrt_var = El::View(local_gradient_wrt_statistics, El::ALL, El::IR(1)); // Dimensions const size_t local_height = local_gradient_wrt_input.Height(); const size_t local_width = local_gradient_wrt_input.Width(); // Count for statistics // Note: Output is constant if statistics count is <=1, so error // signal is zero. /// @todo Local statistics /// @todo Arbitrary group sizes const size_t statistics_count = input.Width(); if (statistics_count <= 1) { El::Zero(local_gradient_wrt_input); return; } // Compute local gradient w.r.t. batch statistics El::Zero(gradient_wrt_statistics); if (local_height > 0) { auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_gradient_wrt_statistics), gpu::get_sync_info(local_statistics), gpu::get_sync_info(local_gradient_wrt_output), gpu::get_sync_info(local_input)); constexpr size_t block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (local_height + block_size - 1) / block_size; hydrogen::gpu::LaunchKernel( bp_training_stats_gradient_kernel<TensorDataType>, grid_dims, block_dims, 0, multisync, local_height, local_width, epsilon, local_input.LockedBuffer(), local_input.LDim(), local_gradient_wrt_output.LockedBuffer(), local_gradient_wrt_output.LDim(), local_mean.LockedBuffer(), local_var.LockedBuffer(), local_gradient_wrt_mean.Buffer(), local_gradient_wrt_var.Buffer()); } // Accumulate gradient w.r.t. statistics across processes /// @todo Local statistics /// @todo Arbitrary group sizes comm.allreduce(gradient_wrt_statistics, gradient_wrt_statistics.RedundantComm(), El::mpi::SUM); // Compute gradient w.r.t. input if (!local_input.IsEmpty()) { auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_gradient_wrt_input), gpu::get_sync_info(local_gradient_wrt_statistics), gpu::get_sync_info(local_statistics), gpu::get_sync_info(local_gradient_wrt_output), gpu::get_sync_info(local_input)); const size_t local_height = local_input.Height(); const size_t local_width = local_input.Width(); constexpr size_t block_size_x = 256; constexpr size_t block_size_y = 1; dim3 block_dims, grid_dims; block_dims.x = block_size_x; block_dims.y = block_size_y; grid_dims.x = (local_height + block_size_x - 1) / block_size_x; grid_dims.y = (local_width + block_size_y - 1) / block_size_y; hydrogen::gpu::LaunchKernel( bp_training_error_signal_kernel<TensorDataType>, grid_dims, block_dims, 0, multisync, local_height, local_width, epsilon, statistics_count, local_input.LockedBuffer(), local_input.LDim(), local_gradient_wrt_output.LockedBuffer(), local_gradient_wrt_output.LDim(), local_gradient_wrt_input.Buffer(), local_gradient_wrt_input.LDim(), local_mean.LockedBuffer(), local_var.LockedBuffer(), local_gradient_wrt_mean.LockedBuffer(), local_gradient_wrt_var.LockedBuffer()); } } /** * dL/dx_i = dL/dy_i / sqrt(var+epsilon) * * Block dimensions: bsizex x bsizey x 1 * * Grid dimensions: (height / bsizex) x (width / bsizey) x 1 */ template <typename TensorDataType> __global__ void bp_inference_kernel(size_t height, size_t width, TensorDataType epsilon, const TensorDataType* __restrict__ gradient_wrt_output, size_t gradient_wrt_output_ldim, TensorDataType* __restrict__ gradient_wrt_input, size_t gradient_wrt_input_ldim, const TensorDataType* __restrict__ running_var) { const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x; const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y; const size_t nthreadsx = blockDim.x * gridDim.x; const size_t nthreadsy = blockDim.y * gridDim.y; for (size_t row = gidx; row < height; row += nthreadsx) { const auto& var = running_var[row]; const auto inv_stdev = gpu_lib::rsqrt(var + epsilon); for (size_t col = gidy; col < width; col += nthreadsy) { const auto& dy = gradient_wrt_output[row + col * gradient_wrt_output_ldim]; auto& dx = gradient_wrt_input[row + col * gradient_wrt_input_ldim]; dx = dy * inv_stdev; } } } /** @brief Backprop for inference. * * Assumes forward prop uses running statistics. In other words, * statistics are independent of input. */ template <typename TensorDataType> void bp_inference_impl(DataType epsilon, const El::AbstractDistMatrix<TensorDataType>& gradient_wrt_output, El::AbstractDistMatrix<TensorDataType>& gradient_wrt_input, const El::AbstractDistMatrix<TensorDataType>& running_var) { // Local matrices const auto& local_gradient_wrt_output = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(gradient_wrt_output.LockedMatrix()); auto& local_gradient_wrt_input = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(gradient_wrt_input.Matrix()); const auto& local_running_var = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(running_var.LockedMatrix()); // Compute gradient w.r.t. input if (!local_gradient_wrt_output.IsEmpty()) { auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_gradient_wrt_input), gpu::get_sync_info(local_gradient_wrt_output), gpu::get_sync_info(local_running_var)); const size_t local_height = local_gradient_wrt_output.Height(); const size_t local_width = local_gradient_wrt_output.Width(); constexpr size_t block_size_x = 256; constexpr size_t block_size_y = 1; dim3 block_dims, grid_dims; block_dims.x = block_size_x; block_dims.y = block_size_y; grid_dims.x = (local_height + block_size_x - 1) / block_size_x; grid_dims.y = (local_width + block_size_y - 1) / block_size_y; hydrogen::gpu::LaunchKernel( bp_inference_kernel<TensorDataType>, grid_dims, block_dims, 0, multisync, local_height, local_width, epsilon, local_gradient_wrt_output.LockedBuffer(), local_gradient_wrt_output.LDim(), local_gradient_wrt_input.Buffer(), local_gradient_wrt_input.LDim(), local_running_var.LockedBuffer()); } } template <typename TensorDataType> void bp_impl(lbann_comm& comm, TensorDataType epsilon, bool is_training, const El::AbstractDistMatrix<TensorDataType>& input, const El::AbstractDistMatrix<TensorDataType>& gradient_wrt_output, El::AbstractDistMatrix<TensorDataType>& gradient_wrt_input, const El::AbstractDistMatrix<TensorDataType>& batch_statistics, El::AbstractDistMatrix<TensorDataType>& gradient_wrt_batch_statistics, const El::AbstractDistMatrix<TensorDataType>& running_var) { // Batchnorm has different behavior for training and inference if (is_training) { bp_training_impl<TensorDataType>(comm, epsilon, input, gradient_wrt_output, gradient_wrt_input, batch_statistics, gradient_wrt_batch_statistics); } else { bp_inference_impl<TensorDataType>(epsilon, gradient_wrt_output, gradient_wrt_input, running_var); } } } // namespace // Template instantiation template <typename TensorDataType, data_layout T_layout, El::Device Dev> void entrywise_batch_normalization_layer<TensorDataType, T_layout, Dev>::fp_compute() { using ValuesGetter = weights_details::SafeWeightsAccessor<TensorDataType>; const auto mode = this->get_model()->get_execution_context().get_execution_mode(); fp_impl(*this->get_comm(), this->m_decay, this->m_epsilon, mode == execution_mode::training, this->get_prev_activations(), this->get_activations(), *this->m_batch_statistics, ValuesGetter::mutable_values(this->get_weights(0)), ValuesGetter::mutable_values(this->get_weights(1))); } template <typename TensorDataType, data_layout T_layout, El::Device Dev> void entrywise_batch_normalization_layer<TensorDataType, T_layout, Dev>::bp_compute() { const auto mode = this->get_model()->get_execution_context().get_execution_mode(); bp_impl(*this->get_comm(), this->m_epsilon, mode == execution_mode::training, this->get_prev_activations(), this->get_prev_error_signals(), this->get_error_signals(), *this->m_batch_statistics, *this->m_batch_statistics_gradient, this->weights_values(1)); } #define PROTO(T) \ template class entrywise_batch_normalization_layer< \ T, data_layout::DATA_PARALLEL, El::Device::GPU>; \ template class entrywise_batch_normalization_layer< \ T, data_layout::MODEL_PARALLEL, El::Device::GPU> #include "lbann/macros/instantiate.hpp" } // namespace lbann
df486a8d12bdfc4c8a45525941495fe66e6681f5.cu
//////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2014-2022, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. // Written by the LBANN Research Team (B. Van Essen, et al.) listed in // the CONTRIBUTORS file. <[email protected]> // // LLNL-CODE-697807. // All rights reserved. // // This file is part of LBANN: Livermore Big Artificial Neural Network // Toolkit. For details, see http://software.llnl.gov/LBANN or // https://github.com/LLNL/LBANN. // // Licensed under the Apache License, Version 2.0 (the "Licensee"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// #define LBANN_ENTRYWISE_BATCH_NORMALIZATION_LAYER_INSTANTIATE #include "lbann/comm_impl.hpp" #include "lbann/layers/regularizers/entrywise_batch_normalization.hpp" #include "lbann/weights/weights_helpers.hpp" #include "lbann/utils/gpu/helpers.hpp" namespace lbann { namespace { /** * On input, sums and sqsums are assumed to be filled with zeros. * * Block dimensions: bsize x 1 x 1 * * Grid dimensions: (height / bsize) x 1 x 1 */ template <typename TensorDataType> __global__ void row_sums_kernel(size_t height, size_t width, const TensorDataType* __restrict__ vals, size_t vals_ldim, TensorDataType* __restrict__ sums, TensorDataType* __restrict__ sqsums) { const size_t gid = threadIdx.x + blockIdx.x * blockDim.x; const size_t nthreads = blockDim.x * gridDim.x; for (size_t row = gid; row < height; row += nthreads) { auto& sum = sums[row]; auto& sqsum = sqsums[row]; for (size_t col = 0; col < width; ++col) { const auto& x = vals[row + col * vals_ldim]; sum += x; sqsum += x * x; } } } /** * On input, batch_mean and batch_var are assumed to contain sums and * squares of sums, respectively. * * Block dimensions: bsize x 1 x 1 * * Grid dimensions: (size / bsize) x 1 x 1 */ template <typename TensorDataType> __global__ void compute_statistics_kernel(size_t size, unsigned long long statistics_count, TensorDataType decay, TensorDataType* __restrict__ batch_mean, TensorDataType* __restrict__ batch_var, TensorDataType* __restrict__ running_mean, TensorDataType* __restrict__ running_var) { const size_t gid = threadIdx.x + blockIdx.x * blockDim.x; const size_t nthreads = blockDim.x * gridDim.x; for (size_t i = gid; i < size; i += nthreads) { auto& mean = batch_mean[i]; auto& var = batch_var[i]; auto& _running_mean = running_mean[i]; auto& _running_var = running_var[i]; const auto sum = batch_mean[i]; const auto sqsum = batch_var[i]; const TensorDataType statistics_count_dt = TensorDataType(statistics_count); mean = sum / statistics_count_dt; const auto sqmean = sqsum / statistics_count_dt; var = (sqmean - mean * mean) * statistics_count_dt / TensorDataType(statistics_count - 1); _running_mean = decay * _running_mean + (TensorDataType{1.f} - decay) * mean; _running_var = decay * _running_var + (TensorDataType{1.f} - decay) * var; } } /** * mean = sum(x_i) / n * * var = ( sum(x_i^2)/n - mean^2 ) * n/(n-1) */ template <typename TensorDataType> void compute_batch_statistics(lbann_comm& comm, TensorDataType decay, const El::AbstractDistMatrix<TensorDataType>& input, El::AbstractDistMatrix<TensorDataType>& batch_statistics, El::AbstractDistMatrix<TensorDataType>& running_mean, El::AbstractDistMatrix<TensorDataType>& running_var) { // Local matrices const auto& local_input = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(input.LockedMatrix()); auto& local_batch_statistics = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(batch_statistics.Matrix()); auto local_batch_mean = El::View(local_batch_statistics, El::ALL, El::IR(0)); auto local_batch_var = El::View(local_batch_statistics, El::ALL, El::IR(1)); auto& local_running_mean = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(running_mean.Matrix()); auto& local_running_var = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(running_var.Matrix()); // Dimensions const size_t local_height = local_input.Height(); const size_t local_width = local_input.Width(); // Compute local sums El::Zero(batch_statistics); if (local_height > 0) { auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_batch_statistics), gpu::get_sync_info(local_input)); constexpr size_t block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (local_height + block_size - 1) / block_size; hydrogen::gpu::LaunchKernel( row_sums_kernel<TensorDataType>, grid_dims, block_dims, 0, multisync, local_height, local_width, local_input.LockedBuffer(), local_input.LDim(), local_batch_mean.Buffer(), local_batch_var.Buffer()); } // Accumulate sums between processes /// @todo Local statistics /// @todo Arbitrary group sizes comm.allreduce(batch_statistics, batch_statistics.RedundantComm(), El::mpi::SUM); const size_t statistics_count = input.Width(); // Compute mini-batch statistics from sums if (statistics_count <= 1) { // local_mean already has correct values El::Fill(local_batch_var, El::TypeTraits<TensorDataType>::One()); } else { if (local_height > 0) { auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_batch_statistics), gpu::get_sync_info(local_running_mean), gpu::get_sync_info(local_running_var)); constexpr size_t block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (local_height + block_size - 1) / block_size; hydrogen::gpu::LaunchKernel( compute_statistics_kernel<TensorDataType>, grid_dims, block_dims, 0, multisync, local_height, statistics_count, decay, local_batch_mean.Buffer(), local_batch_var.Buffer(), local_running_mean.Buffer(), local_running_var.Buffer()); } } } /** * Block dimensions: bsizex x bsizey x 1 * * Grid dimensions: (height / bsizex) x (width / bsizey) x 1 */ template <typename TensorDataType> __global__ void batchnorm_kernel(size_t height, size_t width, TensorDataType epsilon, const TensorDataType* __restrict__ input, size_t input_ldim, TensorDataType* __restrict__ output, size_t output_ldim, const TensorDataType* __restrict__ mean, const TensorDataType* __restrict__ var) { const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x; const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y; const size_t nthreadsx = blockDim.x * gridDim.x; const size_t nthreadsy = blockDim.y * gridDim.y; for (size_t row = gidx; row < height; row += nthreadsx) { const auto& _mean = mean[row]; const auto& _var = var[row]; const auto inv_stdev = gpu_lib::rsqrt(_var + epsilon); for (size_t col = gidy; col < width; col += nthreadsy) { const auto& x = input[row + col*input_ldim]; auto& y = output[row + col*output_ldim]; y = (x - _mean) * inv_stdev; } } } /** * y_i = (x_i - mean) / sqrt(var + epsilon) */ template <typename TensorDataType> void apply_batchnorm(DataType epsilon, const El::Matrix<TensorDataType, El::Device::GPU>& local_input, El::Matrix<TensorDataType, El::Device::GPU>& local_output, const El::Matrix<TensorDataType, El::Device::GPU>& local_mean, const El::Matrix<TensorDataType, El::Device::GPU>& local_var) { if (!local_input.IsEmpty()) { auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_output), gpu::get_sync_info(local_input), gpu::get_sync_info(local_mean), gpu::get_sync_info(local_var)); const size_t local_height = local_input.Height(); const size_t local_width = local_input.Width(); constexpr size_t block_size_x = 256; constexpr size_t block_size_y = 1; dim3 block_dims, grid_dims; block_dims.x = block_size_x; block_dims.y = block_size_y; grid_dims.x = (local_height + block_size_x - 1) / block_size_x; grid_dims.y = (local_width + block_size_y - 1) / block_size_y; hydrogen::gpu::LaunchKernel( batchnorm_kernel<TensorDataType>, grid_dims, block_dims, 0, multisync, local_height, local_width, epsilon, local_input.LockedBuffer(), local_input.LDim(), local_output.Buffer(), local_output.LDim(), local_mean.LockedBuffer(), local_var.LockedBuffer()); } } template <typename TensorDataType> void fp_impl(lbann_comm& comm, TensorDataType decay, TensorDataType epsilon, bool is_training, const El::AbstractDistMatrix<TensorDataType>& input, El::AbstractDistMatrix<TensorDataType>& output, El::AbstractDistMatrix<TensorDataType>& batch_statistics, El::AbstractDistMatrix<TensorDataType>& running_mean, El::AbstractDistMatrix<TensorDataType>& running_var) { // Local matrices const auto& local_input = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(input.LockedMatrix()); auto& local_output = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(output.Matrix()); // Batchnorm has different behavior for training and inference if (is_training) { // For training, normalize with batch statistics compute_batch_statistics<TensorDataType>(comm, decay, input, batch_statistics, running_mean, running_var); const auto& local_batch_statistics = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(batch_statistics.LockedMatrix()); const auto local_batch_mean = El::LockedView(local_batch_statistics, El::ALL, El::IR(0)); const auto local_batch_var = El::LockedView(local_batch_statistics, El::ALL, El::IR(1)); apply_batchnorm<TensorDataType>(epsilon, local_input, local_output, local_batch_mean, local_batch_var); } else { // For inference, normalize with running statistics const auto& local_running_mean = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(running_mean.LockedMatrix()); const auto& local_running_var = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(running_var.LockedMatrix()); apply_batchnorm<TensorDataType>(epsilon, local_input, local_output, local_running_mean, local_running_var); } } /** * On input, gradient_wrt_mean and gradient_wrt_var are assumed to be * filled with zeros. * * dL/dmean = - sum(dL/dy_i) / sqrt(var+epsilon) * * dL/dvar = - sum(dL/dy_i * (x_i-mean)) * (var+epsilon)^(-3/2) / 2 * * Block dimensions: bsize x 1 x 1 * * Grid dimensions: (height / bsize) x 1 x 1 */ template <typename TensorDataType> __global__ void bp_training_stats_gradient_kernel(size_t height, size_t width, TensorDataType epsilon, const TensorDataType* __restrict__ input, size_t input_ldim, const TensorDataType* __restrict__ gradient_wrt_output, size_t gradient_wrt_output_ldim, const TensorDataType* __restrict__ mean, const TensorDataType* __restrict__ var, TensorDataType* __restrict__ gradient_wrt_mean, TensorDataType* __restrict__ gradient_wrt_var) { const size_t gid = threadIdx.x + blockIdx.x * blockDim.x; const size_t nthreads = blockDim.x * gridDim.x; for (size_t row = gid; row < height; row += nthreads) { const auto& _mean = mean[row]; const auto& _var = var[row]; const auto inv_stdev = gpu_lib::rsqrt(_var + epsilon); auto& dmean = gradient_wrt_mean[row]; auto& dvar = gradient_wrt_var[row]; for (size_t col = 0; col < width; ++col) { const auto& x = input[row + col * input_ldim]; const auto& dy = gradient_wrt_output[row + col * gradient_wrt_output_ldim]; dmean += - dy * inv_stdev; dvar += - dy * (x - _mean) * inv_stdev*inv_stdev*inv_stdev / TensorDataType(2); } } } /** * dL/dx_i = ( dL/dy_i / sqrt(var+epsilon) * + dL/dmean / n * + dL/dvar * (x_i - mean) * 2/(n-1) ) * * Block dimensions: bsizex x bsizey x 1 * * Grid dimensions: (height / bsizex) x (width / bsizey) x 1 */ template <typename TensorDataType> __global__ void bp_training_error_signal_kernel(size_t height, size_t width, TensorDataType epsilon, unsigned long long statistics_count, const TensorDataType* __restrict__ input, size_t input_ldim, const TensorDataType* __restrict__ gradient_wrt_output, size_t gradient_wrt_output_ldim, TensorDataType* __restrict__ gradient_wrt_input, size_t gradient_wrt_input_ldim, const TensorDataType* __restrict__ mean, const TensorDataType* __restrict__ var, const TensorDataType* __restrict__ gradient_wrt_mean, const TensorDataType* __restrict__ gradient_wrt_var) { const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x; const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y; const size_t nthreadsx = blockDim.x * gridDim.x; const size_t nthreadsy = blockDim.y * gridDim.y; for (size_t row = gidx; row < height; row += nthreadsx) { const auto& _mean = mean[row]; const auto& _var = var[row]; const auto& dmean = gradient_wrt_mean[row]; const auto& dvar = gradient_wrt_var[row]; const auto inv_stdev = gpu_lib::rsqrt(_var + epsilon); for (size_t col = gidy; col < width; col += nthreadsy) { const auto& x = input[row + col * input_ldim]; const auto& dy = gradient_wrt_output[row + col * gradient_wrt_output_ldim]; auto& dx = gradient_wrt_input[row + col * gradient_wrt_input_ldim]; dx = (dy * inv_stdev + dmean / TensorDataType(statistics_count) + dvar * (x - _mean) * TensorDataType(2) / TensorDataType(statistics_count - 1)); } } } /** @brief Backprop for training. * * Assumes forward prop uses mini-batch statistics. In other words, * statistics are dependent on input. */ template <typename TensorDataType> void bp_training_impl(lbann_comm& comm, TensorDataType epsilon, const El::AbstractDistMatrix<TensorDataType>& input, const El::AbstractDistMatrix<TensorDataType>& gradient_wrt_output, El::AbstractDistMatrix<TensorDataType>& gradient_wrt_input, const El::AbstractDistMatrix<TensorDataType>& statistics, El::AbstractDistMatrix<TensorDataType>& gradient_wrt_statistics) { // Local matrices const auto& local_input = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(input.LockedMatrix()); const auto& local_gradient_wrt_output = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(gradient_wrt_output.LockedMatrix()); auto& local_gradient_wrt_input = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(gradient_wrt_input.Matrix()); const auto& local_statistics = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(statistics.LockedMatrix()); const auto local_mean = El::LockedView(local_statistics, El::ALL, El::IR(0)); const auto local_var = El::LockedView(local_statistics, El::ALL, El::IR(1)); auto& local_gradient_wrt_statistics = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(gradient_wrt_statistics.Matrix()); auto local_gradient_wrt_mean = El::View(local_gradient_wrt_statistics, El::ALL, El::IR(0)); auto local_gradient_wrt_var = El::View(local_gradient_wrt_statistics, El::ALL, El::IR(1)); // Dimensions const size_t local_height = local_gradient_wrt_input.Height(); const size_t local_width = local_gradient_wrt_input.Width(); // Count for statistics // Note: Output is constant if statistics count is <=1, so error // signal is zero. /// @todo Local statistics /// @todo Arbitrary group sizes const size_t statistics_count = input.Width(); if (statistics_count <= 1) { El::Zero(local_gradient_wrt_input); return; } // Compute local gradient w.r.t. batch statistics El::Zero(gradient_wrt_statistics); if (local_height > 0) { auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_gradient_wrt_statistics), gpu::get_sync_info(local_statistics), gpu::get_sync_info(local_gradient_wrt_output), gpu::get_sync_info(local_input)); constexpr size_t block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (local_height + block_size - 1) / block_size; hydrogen::gpu::LaunchKernel( bp_training_stats_gradient_kernel<TensorDataType>, grid_dims, block_dims, 0, multisync, local_height, local_width, epsilon, local_input.LockedBuffer(), local_input.LDim(), local_gradient_wrt_output.LockedBuffer(), local_gradient_wrt_output.LDim(), local_mean.LockedBuffer(), local_var.LockedBuffer(), local_gradient_wrt_mean.Buffer(), local_gradient_wrt_var.Buffer()); } // Accumulate gradient w.r.t. statistics across processes /// @todo Local statistics /// @todo Arbitrary group sizes comm.allreduce(gradient_wrt_statistics, gradient_wrt_statistics.RedundantComm(), El::mpi::SUM); // Compute gradient w.r.t. input if (!local_input.IsEmpty()) { auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_gradient_wrt_input), gpu::get_sync_info(local_gradient_wrt_statistics), gpu::get_sync_info(local_statistics), gpu::get_sync_info(local_gradient_wrt_output), gpu::get_sync_info(local_input)); const size_t local_height = local_input.Height(); const size_t local_width = local_input.Width(); constexpr size_t block_size_x = 256; constexpr size_t block_size_y = 1; dim3 block_dims, grid_dims; block_dims.x = block_size_x; block_dims.y = block_size_y; grid_dims.x = (local_height + block_size_x - 1) / block_size_x; grid_dims.y = (local_width + block_size_y - 1) / block_size_y; hydrogen::gpu::LaunchKernel( bp_training_error_signal_kernel<TensorDataType>, grid_dims, block_dims, 0, multisync, local_height, local_width, epsilon, statistics_count, local_input.LockedBuffer(), local_input.LDim(), local_gradient_wrt_output.LockedBuffer(), local_gradient_wrt_output.LDim(), local_gradient_wrt_input.Buffer(), local_gradient_wrt_input.LDim(), local_mean.LockedBuffer(), local_var.LockedBuffer(), local_gradient_wrt_mean.LockedBuffer(), local_gradient_wrt_var.LockedBuffer()); } } /** * dL/dx_i = dL/dy_i / sqrt(var+epsilon) * * Block dimensions: bsizex x bsizey x 1 * * Grid dimensions: (height / bsizex) x (width / bsizey) x 1 */ template <typename TensorDataType> __global__ void bp_inference_kernel(size_t height, size_t width, TensorDataType epsilon, const TensorDataType* __restrict__ gradient_wrt_output, size_t gradient_wrt_output_ldim, TensorDataType* __restrict__ gradient_wrt_input, size_t gradient_wrt_input_ldim, const TensorDataType* __restrict__ running_var) { const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x; const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y; const size_t nthreadsx = blockDim.x * gridDim.x; const size_t nthreadsy = blockDim.y * gridDim.y; for (size_t row = gidx; row < height; row += nthreadsx) { const auto& var = running_var[row]; const auto inv_stdev = gpu_lib::rsqrt(var + epsilon); for (size_t col = gidy; col < width; col += nthreadsy) { const auto& dy = gradient_wrt_output[row + col * gradient_wrt_output_ldim]; auto& dx = gradient_wrt_input[row + col * gradient_wrt_input_ldim]; dx = dy * inv_stdev; } } } /** @brief Backprop for inference. * * Assumes forward prop uses running statistics. In other words, * statistics are independent of input. */ template <typename TensorDataType> void bp_inference_impl(DataType epsilon, const El::AbstractDistMatrix<TensorDataType>& gradient_wrt_output, El::AbstractDistMatrix<TensorDataType>& gradient_wrt_input, const El::AbstractDistMatrix<TensorDataType>& running_var) { // Local matrices const auto& local_gradient_wrt_output = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(gradient_wrt_output.LockedMatrix()); auto& local_gradient_wrt_input = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(gradient_wrt_input.Matrix()); const auto& local_running_var = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(running_var.LockedMatrix()); // Compute gradient w.r.t. input if (!local_gradient_wrt_output.IsEmpty()) { auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_gradient_wrt_input), gpu::get_sync_info(local_gradient_wrt_output), gpu::get_sync_info(local_running_var)); const size_t local_height = local_gradient_wrt_output.Height(); const size_t local_width = local_gradient_wrt_output.Width(); constexpr size_t block_size_x = 256; constexpr size_t block_size_y = 1; dim3 block_dims, grid_dims; block_dims.x = block_size_x; block_dims.y = block_size_y; grid_dims.x = (local_height + block_size_x - 1) / block_size_x; grid_dims.y = (local_width + block_size_y - 1) / block_size_y; hydrogen::gpu::LaunchKernel( bp_inference_kernel<TensorDataType>, grid_dims, block_dims, 0, multisync, local_height, local_width, epsilon, local_gradient_wrt_output.LockedBuffer(), local_gradient_wrt_output.LDim(), local_gradient_wrt_input.Buffer(), local_gradient_wrt_input.LDim(), local_running_var.LockedBuffer()); } } template <typename TensorDataType> void bp_impl(lbann_comm& comm, TensorDataType epsilon, bool is_training, const El::AbstractDistMatrix<TensorDataType>& input, const El::AbstractDistMatrix<TensorDataType>& gradient_wrt_output, El::AbstractDistMatrix<TensorDataType>& gradient_wrt_input, const El::AbstractDistMatrix<TensorDataType>& batch_statistics, El::AbstractDistMatrix<TensorDataType>& gradient_wrt_batch_statistics, const El::AbstractDistMatrix<TensorDataType>& running_var) { // Batchnorm has different behavior for training and inference if (is_training) { bp_training_impl<TensorDataType>(comm, epsilon, input, gradient_wrt_output, gradient_wrt_input, batch_statistics, gradient_wrt_batch_statistics); } else { bp_inference_impl<TensorDataType>(epsilon, gradient_wrt_output, gradient_wrt_input, running_var); } } } // namespace // Template instantiation template <typename TensorDataType, data_layout T_layout, El::Device Dev> void entrywise_batch_normalization_layer<TensorDataType, T_layout, Dev>::fp_compute() { using ValuesGetter = weights_details::SafeWeightsAccessor<TensorDataType>; const auto mode = this->get_model()->get_execution_context().get_execution_mode(); fp_impl(*this->get_comm(), this->m_decay, this->m_epsilon, mode == execution_mode::training, this->get_prev_activations(), this->get_activations(), *this->m_batch_statistics, ValuesGetter::mutable_values(this->get_weights(0)), ValuesGetter::mutable_values(this->get_weights(1))); } template <typename TensorDataType, data_layout T_layout, El::Device Dev> void entrywise_batch_normalization_layer<TensorDataType, T_layout, Dev>::bp_compute() { const auto mode = this->get_model()->get_execution_context().get_execution_mode(); bp_impl(*this->get_comm(), this->m_epsilon, mode == execution_mode::training, this->get_prev_activations(), this->get_prev_error_signals(), this->get_error_signals(), *this->m_batch_statistics, *this->m_batch_statistics_gradient, this->weights_values(1)); } #define PROTO(T) \ template class entrywise_batch_normalization_layer< \ T, data_layout::DATA_PARALLEL, El::Device::GPU>; \ template class entrywise_batch_normalization_layer< \ T, data_layout::MODEL_PARALLEL, El::Device::GPU> #include "lbann/macros/instantiate.hpp" } // namespace lbann
6fe8a4665ebef6da0a6d531aed3e462432b59c46.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "column_stats.h" #include <math_constants.h> #include <io/utilities/block_utils.cuh> namespace cudf { namespace io { struct stats_state_s { stats_column_desc col; statistics_group group; statistics_chunk ck; volatile statistics_val warp_min[32]; volatile statistics_val warp_max[32]; volatile statistics_val warp_sum[32]; }; struct merge_state_s { stats_column_desc col; statistics_merge_group group; statistics_chunk ck; volatile statistics_val warp_min[32]; volatile statistics_val warp_max[32]; volatile statistics_val warp_sum[32]; volatile uint32_t warp_non_nulls[32]; volatile uint32_t warp_nulls[32]; }; inline __device__ int64_t WarpReduceMinInt(int64_t vmin) { int64_t v = SHFL_XOR(vmin, 1); vmin = min(vmin, v); v = SHFL_XOR(vmin, 2); vmin = min(vmin, v); v = SHFL_XOR(vmin, 4); vmin = min(vmin, v); v = SHFL_XOR(vmin, 8); vmin = min(vmin, v); v = SHFL_XOR(vmin, 16); return min(vmin, v); } inline __device__ int64_t WarpReduceMaxInt(int64_t vmax) { int64_t v = SHFL_XOR(vmax, 1); vmax = max(vmax, v); v = SHFL_XOR(vmax, 2); vmax = max(vmax, v); v = SHFL_XOR(vmax, 4); vmax = max(vmax, v); v = SHFL_XOR(vmax, 8); vmax = max(vmax, v); v = SHFL_XOR(vmax, 16); return max(vmax, v); } inline __device__ double WarpReduceMinFloat(double vmin) { double v = SHFL_XOR(vmin, 1); vmin = fmin(vmin, v); v = SHFL_XOR(vmin, 2); vmin = fmin(vmin, v); v = SHFL_XOR(vmin, 4); vmin = fmin(vmin, v); v = SHFL_XOR(vmin, 8); vmin = fmin(vmin, v); v = SHFL_XOR(vmin, 16); return fmin(vmin, v); } inline __device__ double WarpReduceMaxFloat(double vmax) { double v = SHFL_XOR(vmax, 1); vmax = fmax(vmax, v); v = SHFL_XOR(vmax, 2); vmax = fmax(vmax, v); v = SHFL_XOR(vmax, 4); vmax = fmax(vmax, v); v = SHFL_XOR(vmax, 8); vmax = fmax(vmax, v); v = SHFL_XOR(vmax, 16); return fmax(vmax, v); } inline __device__ double WarpReduceSumFloat(double vsum) { double v = SHFL_XOR(vsum, 1); if (!isnan(v)) vsum += v; v = SHFL_XOR(vsum, 2); if (!isnan(v)) vsum += v; v = SHFL_XOR(vsum, 4); if (!isnan(v)) vsum += v; v = SHFL_XOR(vsum, 8); if (!isnan(v)) vsum += v; v = SHFL_XOR(vsum, 16); if (!isnan(v)) vsum += v; return vsum; } inline __device__ string_stats WarpReduceMinString(const char *smin, uint32_t lmin) { uint32_t len = SHFL_XOR(lmin, 1); const char *ptr = reinterpret_cast<const char *>(SHFL_XOR(reinterpret_cast<uintptr_t>(smin), 1)); if (!smin || (ptr && nvstr_is_lesser(ptr, len, smin, lmin))) { smin = ptr; lmin = len; } len = SHFL_XOR(lmin, 2); ptr = reinterpret_cast<const char *>(SHFL_XOR(reinterpret_cast<uintptr_t>(smin), 2)); if (!smin || (ptr && nvstr_is_lesser(ptr, len, smin, lmin))) { smin = ptr; lmin = len; } len = SHFL_XOR(lmin, 4); ptr = reinterpret_cast<const char *>(SHFL_XOR(reinterpret_cast<uintptr_t>(smin), 4)); if (!smin || (ptr && nvstr_is_lesser(ptr, len, smin, lmin))) { smin = ptr; lmin = len; } len = SHFL_XOR(lmin, 8); ptr = reinterpret_cast<const char *>(SHFL_XOR(reinterpret_cast<uintptr_t>(smin), 8)); if (!smin || (ptr && nvstr_is_lesser(ptr, len, smin, lmin))) { smin = ptr; lmin = len; } len = SHFL_XOR(lmin, 16); ptr = reinterpret_cast<const char *>(SHFL_XOR(reinterpret_cast<uintptr_t>(smin), 16)); if (!smin || (ptr && nvstr_is_lesser(ptr, len, smin, lmin))) { smin = ptr; lmin = len; } return {smin, lmin}; } inline __device__ string_stats WarpReduceMaxString(const char *smax, uint32_t lmax) { uint32_t len = SHFL_XOR(lmax, 1); const char *ptr = reinterpret_cast<const char *>(SHFL_XOR(reinterpret_cast<uintptr_t>(smax), 1)); if (!smax || (ptr && nvstr_is_greater(ptr, len, smax, lmax))) { smax = ptr; lmax = len; } len = SHFL_XOR(lmax, 2); ptr = reinterpret_cast<const char *>(SHFL_XOR(reinterpret_cast<uintptr_t>(smax), 2)); if (!smax || (ptr && nvstr_is_greater(ptr, len, smax, lmax))) { smax = ptr; lmax = len; } len = SHFL_XOR(lmax, 4); ptr = reinterpret_cast<const char *>(SHFL_XOR(reinterpret_cast<uintptr_t>(smax), 4)); if (!smax || (ptr && nvstr_is_greater(ptr, len, smax, lmax))) { smax = ptr; lmax = len; } len = SHFL_XOR(lmax, 8); ptr = reinterpret_cast<const char *>(SHFL_XOR(reinterpret_cast<uintptr_t>(smax), 8)); if (!smax || (ptr && nvstr_is_greater(ptr, len, smax, lmax))) { smax = ptr; lmax = len; } len = SHFL_XOR(lmax, 16); ptr = reinterpret_cast<const char *>(SHFL_XOR(reinterpret_cast<uintptr_t>(smax), 16)); if (!smax || (ptr && nvstr_is_greater(ptr, len, smax, lmax))) { smax = ptr; lmax = len; } return { smax, lmax }; } void __device__ gatherIntColumnStats(stats_state_s *s, statistics_dtype dtype, uint32_t t) { int64_t vmin = INT64_MAX; int64_t vmax = INT64_MIN; int64_t vsum = 0; int64_t v; uint32_t nn_cnt = 0; bool has_minmax; for (uint32_t i = 0; i < s->group.num_rows; i += 1024) { uint32_t r = i + t; uint32_t row = r + s->group.start_row; const uint32_t *valid_map = s->col.valid_map_base; uint32_t is_valid = (r < s->group.num_rows && row < s->col.num_rows) ? (valid_map) ? (valid_map[row >> 5] >> (row & 0x1f)) & 1 : 1 : 0; if (is_valid) { switch (dtype) { case dtype_int32: case dtype_date32: v = reinterpret_cast<const int32_t *>(s->col.column_data_base)[row]; break; case dtype_int64: case dtype_decimal64: v = reinterpret_cast<const int64_t *>(s->col.column_data_base)[row]; break; case dtype_int16: v = reinterpret_cast<const int16_t *>(s->col.column_data_base)[row]; break; case dtype_timestamp64: v = reinterpret_cast<const int64_t *>(s->col.column_data_base)[row]; if (s->col.ts_scale < -1) { v /= -s->col.ts_scale; } else if (s->col.ts_scale > 1) { v *= s->col.ts_scale; } break; default: v = reinterpret_cast<const int8_t *>(s->col.column_data_base)[row]; break; } vmin = min(vmin, v); vmax = max(vmax, v); vsum += v; } nn_cnt += __syncthreads_count(is_valid); } if (!t) { s->ck.non_nulls = nn_cnt; s->ck.null_count = s->group.num_rows - nn_cnt; } vmin = WarpReduceMinInt(vmin); vmax = WarpReduceMaxInt(vmax); vsum = WarpReduceSum32(vsum); if (!(t & 0x1f)) { s->warp_min[t >> 5].i_val = vmin; s->warp_max[t >> 5].i_val = vmax; s->warp_sum[t >> 5].i_val = vsum; } has_minmax = __syncthreads_or(vmin <= vmax); if (t < 32 * 1) { vmin = WarpReduceMinInt(s->warp_min[t].i_val); if (!(t & 0x1f)) { s->ck.min_value.i_val = vmin; s->ck.has_minmax = (has_minmax); } } else if (t < 32 * 2) { vmax = WarpReduceMaxInt(s->warp_max[t & 0x1f].i_val); if (!(t & 0x1f)) { s->ck.max_value.i_val = vmax; } } else if (t < 32 * 3) { vsum = WarpReduceSum32(s->warp_sum[t & 0x1f].i_val); if (!(t & 0x1f)) { s->ck.sum.i_val = vsum; // TODO: For now, don't set the sum flag with 64-bit values so we don't have to check for 64-bit sum overflow s->ck.has_sum = (dtype <= dtype_int32 && has_minmax); } } } void __device__ gatherFloatColumnStats(stats_state_s *s, statistics_dtype dtype, uint32_t t) { double vmin = CUDART_INF; double vmax = -CUDART_INF; double vsum = 0; double v; uint32_t nn_cnt = 0; bool has_minmax; for (uint32_t i = 0; i < s->group.num_rows; i += 1024) { uint32_t r = i + t; uint32_t row = r + s->group.start_row; const uint32_t *valid_map = s->col.valid_map_base; uint32_t is_valid = (r < s->group.num_rows && row < s->col.num_rows) ? (valid_map) ? (valid_map[row >> 5] >> (row & 0x1f)) & 1 : 1 : 0; if (is_valid) { if (dtype == dtype_float64) { v = reinterpret_cast<const double *>(s->col.column_data_base)[row]; } else { v = reinterpret_cast<const float *>(s->col.column_data_base)[row]; } if (v < vmin) { vmin = v; } if (v > vmax) { vmax = v; } } nn_cnt += __syncthreads_count(is_valid); } if (!t) { s->ck.non_nulls = nn_cnt; s->ck.null_count = s->group.num_rows - nn_cnt; } vmin = WarpReduceMinFloat(vmin); vmax = WarpReduceMaxFloat(vmax); vsum = WarpReduceSumFloat(vsum); if (!(t & 0x1f)) { s->warp_min[t >> 5].fp_val = vmin; s->warp_max[t >> 5].fp_val = vmax; s->warp_sum[t >> 5].fp_val = vsum; } has_minmax = __syncthreads_or(vmin <= vmax); if (t < 32 * 1) { vmin = WarpReduceMinFloat(s->warp_min[t].fp_val); if (!(t & 0x1f)) { s->ck.min_value.fp_val = (vmin != 0.0) ? vmin : CUDART_NEG_ZERO; s->ck.has_minmax = (has_minmax); } } else if (t < 32 * 2) { vmax = WarpReduceMaxFloat(s->warp_max[t & 0x1f].fp_val); if (!(t & 0x1f)) { s->ck.max_value.fp_val = (vmax != 0.0) ? vmax : CUDART_ZERO; } } else if (t < 32 * 3) { vsum = WarpReduceSumFloat(s->warp_sum[t & 0x1f].fp_val); if (!(t & 0x1f)) { s->ck.sum.fp_val = vsum; s->ck.has_sum = (has_minmax); // Implies sum is valid as well } } } // FIXME: Use native libcudf string type struct nvstrdesc_s { const char *ptr; size_t count; }; void __device__ gatherStringColumnStats(stats_state_s *s, uint32_t t) { uint32_t len_sum = 0; const char *smin = nullptr; const char *smax = nullptr; uint32_t lmin = 0; uint32_t lmax = 0; uint32_t nn_cnt = 0; bool has_minmax; string_stats minval, maxval; for (uint32_t i = 0; i < s->group.num_rows; i += 1024) { uint32_t r = i + t; uint32_t row = r + s->group.start_row; const uint32_t *valid_map = s->col.valid_map_base; uint32_t is_valid = (r < s->group.num_rows && row < s->col.num_rows) ? (valid_map) ? (valid_map[row >> 5] >> (row & 0x1f)) & 1 : 1 : 0; if (is_valid) { const nvstrdesc_s *str_col = reinterpret_cast<const nvstrdesc_s *>(s->col.column_data_base); uint32_t len = (uint32_t)str_col[row].count; const char *ptr = str_col[row].ptr; len_sum += len; if (!smin || nvstr_is_lesser(ptr, len, smin, lmin)) { lmin = len; smin = ptr; } if (!smax || nvstr_is_greater(ptr, len, smax, lmax)) { lmax = len; smax = ptr; } } nn_cnt += __syncthreads_count(is_valid); } if (!t) { s->ck.non_nulls = nn_cnt; s->ck.null_count = s->group.num_rows - nn_cnt; } minval = WarpReduceMinString(smin, lmin); maxval = WarpReduceMaxString(smax, lmax); len_sum = WarpReduceSum32(len_sum); if (!(t & 0x1f)) { s->warp_min[t >> 5].str_val.ptr = minval.ptr; s->warp_min[t >> 5].str_val.length = minval.length; s->warp_max[t >> 5].str_val.ptr = maxval.ptr; s->warp_max[t >> 5].str_val.length = maxval.length; s->warp_sum[t >> 5].str_val.length = len_sum; } has_minmax = __syncthreads_or(smin != nullptr); if (t < 32 * 1) { minval = WarpReduceMinString(s->warp_min[t].str_val.ptr, s->warp_min[t].str_val.length); if (!(t & 0x1f)) { s->ck.min_value.str_val.ptr = minval.ptr; s->ck.min_value.str_val.length = minval.length; s->ck.has_minmax = has_minmax; } } else if (t < 32 * 2) { maxval = WarpReduceMaxString(s->warp_max[t & 0x1f].str_val.ptr, s->warp_max[t & 0x1f].str_val.length); if (!(t & 0x1f)) { s->ck.max_value.str_val.ptr = maxval.ptr; s->ck.max_value.str_val.length = maxval.length; } } else if (t < 32 * 3) { len_sum = WarpReduceSum32(s->warp_sum[t & 0x1f].str_val.length); if (!(t & 0x1f)) { s->ck.sum.i_val = len_sum; s->ck.has_sum = has_minmax; } } } /** * @brief Gather column chunk statistics (min/max values, sum and null count) * for a group of rows. **/ // blockDim {1024,1,1} __global__ void __launch_bounds__(1024, 1) gpuGatherColumnStatistics(statistics_chunk *chunks, const statistics_group *groups) { __shared__ __align__(8) stats_state_s state_g; stats_state_s *const s = &state_g; uint32_t t = threadIdx.x; statistics_dtype dtype; if (t < sizeof(statistics_group) / sizeof(uint32_t)) { reinterpret_cast<uint32_t *>(&s->group)[t] = reinterpret_cast<const uint32_t *>(&groups[blockIdx.x])[t]; } if (t < sizeof(statistics_chunk) / sizeof(uint32_t)) { reinterpret_cast<uint32_t *>(&s->ck)[t] = 0; } __syncthreads(); if (t < sizeof(stats_column_desc) / sizeof(uint32_t)) { reinterpret_cast<uint32_t *>(&s->col)[t] = reinterpret_cast<const uint32_t *>(s->group.col)[t]; } __syncthreads(); dtype = s->col.stats_dtype; if (dtype >= dtype_bool8 && dtype <= dtype_decimal64) { gatherIntColumnStats(s, dtype, t); } else if (dtype >= dtype_float32 && dtype <= dtype_float64) { gatherFloatColumnStats(s, dtype, t); } else if (dtype == dtype_string) { gatherStringColumnStats(s, t); } __syncthreads(); if (t < sizeof(statistics_chunk) / sizeof(uint32_t)) { reinterpret_cast<uint32_t *>(&chunks[blockIdx.x])[t] = reinterpret_cast<uint32_t *>(&s->ck)[t]; } } void __device__ mergeIntColumnStats(merge_state_s *s, statistics_dtype dtype, const statistics_chunk *ck_in, uint32_t num_chunks, uint32_t t) { int64_t vmin = INT64_MAX; int64_t vmax = INT64_MIN; int64_t vsum = 0; uint32_t non_nulls = 0; uint32_t null_count = 0; bool has_minmax; for (uint32_t i = t; i < num_chunks; i += 1024) { const statistics_chunk *ck = &ck_in[i]; if (ck->has_minmax) { vmin = min(vmin, ck->min_value.i_val); vmax = max(vmax, ck->max_value.i_val); } if (ck->has_sum) { vsum += ck->sum.i_val; } non_nulls += ck->non_nulls; null_count += ck->null_count; } non_nulls = WarpReduceSum32(non_nulls); null_count = WarpReduceSum32(null_count); vmin = WarpReduceMinInt(vmin); vmax = WarpReduceMaxInt(vmax); vsum = WarpReduceSum32(vsum); if (!(t & 0x1f)) { s->warp_non_nulls[t >> 5] = non_nulls; s->warp_nulls[t >> 5] = null_count; s->warp_min[t >> 5].i_val = vmin; s->warp_max[t >> 5].i_val = vmax; s->warp_sum[t >> 5].i_val = vsum; } has_minmax = __syncthreads_or(vmin <= vmax); if (t < 32 * 1) { vmin = WarpReduceMinInt(s->warp_min[t].i_val); if (!(t & 0x1f)) { s->ck.min_value.i_val = vmin; s->ck.has_minmax = (has_minmax); } } else if (t < 32 * 2) { vmax = WarpReduceMaxInt(s->warp_max[t & 0x1f].i_val); if (!(t & 0x1f)) { s->ck.max_value.i_val = vmax; } } else if (t < 32 * 3) { vsum = WarpReduceSum32(s->warp_sum[t & 0x1f].i_val); if (!(t & 0x1f)) { s->ck.sum.i_val = vsum; // TODO: For now, don't set the sum flag with 64-bit values so we don't have to check for 64-bit sum overflow s->ck.has_sum = (dtype <= dtype_int32 && has_minmax); } } else if (t < 32 * 4) { non_nulls = WarpReduceSum32(s->warp_non_nulls[t & 0x1f]); if (!(t & 0x1f)) { s->ck.non_nulls = non_nulls; } } else if (t < 32 * 5) { null_count = WarpReduceSum32(s->warp_nulls[t & 0x1f]); if (!(t & 0x1f)) { s->ck.null_count = null_count; } } } void __device__ mergeFloatColumnStats(merge_state_s *s, const statistics_chunk *ck_in, uint32_t num_chunks, uint32_t t) { double vmin = CUDART_INF; double vmax = -CUDART_INF; double vsum = 0; uint32_t non_nulls = 0; uint32_t null_count = 0; bool has_minmax; for (uint32_t i = t; i < num_chunks; i += 1024) { const statistics_chunk *ck = &ck_in[i]; if (ck->has_minmax) { double v0 = ck->min_value.fp_val; double v1 = ck->max_value.fp_val; if (v0 < vmin) { vmin = v0; } if (v1 > vmax) { vmax = v1; } } if (ck->has_sum) { vsum += ck->sum.fp_val; } non_nulls += ck->non_nulls; null_count += ck->null_count; } non_nulls = WarpReduceSum32(non_nulls); null_count = WarpReduceSum32(null_count); vmin = WarpReduceMinFloat(vmin); vmax = WarpReduceMaxFloat(vmax); vsum = WarpReduceSumFloat(vsum); if (!(t & 0x1f)) { s->warp_non_nulls[t >> 5] = non_nulls; s->warp_nulls[t >> 5] = null_count; s->warp_min[t >> 5].fp_val = vmin; s->warp_max[t >> 5].fp_val = vmax; s->warp_sum[t >> 5].fp_val = vsum; } has_minmax = __syncthreads_or(vmin <= vmax); if (t < 32 * 1) { vmin = WarpReduceMinFloat(s->warp_min[t].fp_val); if (!(t & 0x1f)) { s->ck.min_value.fp_val = (vmin != 0.0) ? vmin : CUDART_NEG_ZERO; s->ck.has_minmax = (has_minmax); } } else if (t < 32 * 2) { vmax = WarpReduceMaxFloat(s->warp_max[t & 0x1f].fp_val); if (!(t & 0x1f)) { s->ck.max_value.fp_val = (vmax != 0.0) ? vmax : CUDART_ZERO; } } else if (t < 32 * 3) { vsum = WarpReduceSumFloat(s->warp_sum[t & 0x1f].fp_val); if (!(t & 0x1f)) { s->ck.sum.fp_val = vsum; s->ck.has_sum = (has_minmax); // Implies sum is valid as well } } else if (t < 32 * 4) { non_nulls = WarpReduceSum32(s->warp_non_nulls[t & 0x1f]); if (!(t & 0x1f)) { s->ck.non_nulls = non_nulls; } } else if (t < 32 * 5) { null_count = WarpReduceSum32(s->warp_nulls[t & 0x1f]); if (!(t & 0x1f)) { s->ck.null_count = null_count; } } } void __device__ mergeStringColumnStats(merge_state_s *s, const statistics_chunk *ck_in, uint32_t num_chunks, uint32_t t) { uint32_t len_sum = 0; const char *smin = nullptr; const char *smax = nullptr; uint32_t lmin = 0; uint32_t lmax = 0; uint32_t non_nulls = 0; uint32_t null_count = 0; bool has_minmax; string_stats minval, maxval; for (uint32_t i = t; i < num_chunks; i += 1024) { const statistics_chunk *ck = &ck_in[i]; if (ck->has_minmax) { uint32_t len0 = ck->min_value.str_val.length; const char *ptr0 = ck->min_value.str_val.ptr; uint32_t len1 = ck->max_value.str_val.length; const char *ptr1 = ck->max_value.str_val.ptr; if (!smin || (ptr0 && nvstr_is_lesser(ptr0, len0, smin, lmin))) { lmin = len0; smin = ptr0; } if (!smax || (ptr1 && nvstr_is_greater(ptr1, len1, smax, lmax))) { lmax = len1; smax = ptr1; } } if (ck->has_sum) { len_sum += (uint32_t)ck->sum.i_val; } non_nulls += ck->non_nulls; null_count += ck->null_count; } non_nulls = WarpReduceSum32(non_nulls); null_count = WarpReduceSum32(null_count); minval = WarpReduceMinString(smin, lmin); maxval = WarpReduceMaxString(smax, lmax); len_sum = WarpReduceSum32(len_sum); if (!(t & 0x1f)) { s->warp_non_nulls[t >> 5] = non_nulls; s->warp_nulls[t >> 5] = null_count; s->warp_min[t >> 5].str_val.ptr = minval.ptr; s->warp_min[t >> 5].str_val.length = minval.length; s->warp_max[t >> 5].str_val.ptr = maxval.ptr; s->warp_max[t >> 5].str_val.length = maxval.length; s->warp_sum[t >> 5].str_val.length = len_sum; } has_minmax = __syncthreads_or(smin != nullptr); if (t < 32 * 1) { minval = WarpReduceMinString(s->warp_min[t].str_val.ptr, s->warp_min[t].str_val.length); if (!(t & 0x1f)) { s->ck.min_value.str_val.ptr = minval.ptr; s->ck.min_value.str_val.length = minval.length; s->ck.has_minmax = has_minmax; } } else if (t < 32 * 2) { maxval = WarpReduceMaxString(s->warp_max[t & 0x1f].str_val.ptr, s->warp_max[t & 0x1f].str_val.length); if (!(t & 0x1f)) { s->ck.max_value.str_val.ptr = maxval.ptr; s->ck.max_value.str_val.length = maxval.length; } } else if (t < 32 * 3) { len_sum = WarpReduceSum32(s->warp_sum[t & 0x1f].str_val.length); if (!(t & 0x1f)) { s->ck.sum.i_val = len_sum; s->ck.has_sum = has_minmax; } } else if (t < 32 * 4) { non_nulls = WarpReduceSum32(s->warp_non_nulls[t & 0x1f]); if (!(t & 0x1f)) { s->ck.non_nulls = non_nulls; } } else if (t < 32 * 5) { null_count = WarpReduceSum32(s->warp_nulls[t & 0x1f]); if (!(t & 0x1f)) { s->ck.null_count = null_count; } } } /** * @brief Combine multiple statistics chunk together to form new statistics chunks **/ // blockDim {1024,1,1} __global__ void __launch_bounds__(1024, 1) gpuMergeColumnStatistics(statistics_chunk *chunks_out, const statistics_chunk *chunks_in, const statistics_merge_group *groups) { __shared__ __align__(8) merge_state_s state_g; merge_state_s *const s = &state_g; uint32_t t = threadIdx.x; statistics_dtype dtype; if (t < sizeof(statistics_merge_group) / sizeof(uint32_t)) { reinterpret_cast<uint32_t *>(&s->group)[t] = reinterpret_cast<const uint32_t *>(&groups[blockIdx.x])[t]; } __syncthreads(); if (t < sizeof(stats_column_desc) / sizeof(uint32_t)) { reinterpret_cast<uint32_t *>(&s->col)[t] = reinterpret_cast<const uint32_t *>(s->group.col)[t]; } __syncthreads(); dtype = s->col.stats_dtype; if (dtype >= dtype_bool8 && dtype <= dtype_decimal64) { mergeIntColumnStats(s, dtype, chunks_in + s->group.start_chunk, s->group.num_chunks, t); } else if (dtype >= dtype_float32 && dtype <= dtype_float64) { mergeFloatColumnStats(s, chunks_in + s->group.start_chunk, s->group.num_chunks, t); } else if (dtype == dtype_string) { mergeStringColumnStats(s, chunks_in + s->group.start_chunk, s->group.num_chunks, t); } __syncthreads(); if (t < sizeof(statistics_chunk) / sizeof(uint32_t)) { reinterpret_cast<uint32_t *>(&chunks_out[blockIdx.x])[t] = reinterpret_cast<uint32_t *>(&s->ck)[t]; } } /** * @brief Launches kernel to gather column statistics * * @param[out] chunks Statistics results [num_chunks] * @param[in] groups Statistics row groups [num_chunks] * @param[in] num_chunks Number of chunks & rowgroups * @param[in] stream CUDA stream to use, default 0 * * @return hipSuccess if successful, a CUDA error code otherwise **/ hipError_t GatherColumnStatistics(statistics_chunk *chunks, const statistics_group *groups, uint32_t num_chunks, hipStream_t stream) { hipLaunchKernelGGL(( gpuGatherColumnStatistics) , dim3(num_chunks), dim3(1024), 0, stream , chunks, groups); return hipSuccess; } /** * @brief Launches kernel to merge column statistics * * @param[out] chunks_out Statistics results [num_chunks] * @param[out] chunks_in Input statistics * @param[in] groups Statistics groups [num_chunks] * @param[in] num_chunks Number of chunks & groups * @param[in] stream CUDA stream to use, default 0 * * @return hipSuccess if successful, a CUDA error code otherwise **/ hipError_t MergeColumnStatistics(statistics_chunk *chunks_out, const statistics_chunk *chunks_in, const statistics_merge_group *groups, uint32_t num_chunks, hipStream_t stream) { hipLaunchKernelGGL(( gpuMergeColumnStatistics) , dim3(num_chunks), dim3(1024), 0, stream , chunks_out, chunks_in, groups); return hipSuccess; } } // namespace io } // namespace cudf
6fe8a4665ebef6da0a6d531aed3e462432b59c46.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "column_stats.h" #include <math_constants.h> #include <io/utilities/block_utils.cuh> namespace cudf { namespace io { struct stats_state_s { stats_column_desc col; statistics_group group; statistics_chunk ck; volatile statistics_val warp_min[32]; volatile statistics_val warp_max[32]; volatile statistics_val warp_sum[32]; }; struct merge_state_s { stats_column_desc col; statistics_merge_group group; statistics_chunk ck; volatile statistics_val warp_min[32]; volatile statistics_val warp_max[32]; volatile statistics_val warp_sum[32]; volatile uint32_t warp_non_nulls[32]; volatile uint32_t warp_nulls[32]; }; inline __device__ int64_t WarpReduceMinInt(int64_t vmin) { int64_t v = SHFL_XOR(vmin, 1); vmin = min(vmin, v); v = SHFL_XOR(vmin, 2); vmin = min(vmin, v); v = SHFL_XOR(vmin, 4); vmin = min(vmin, v); v = SHFL_XOR(vmin, 8); vmin = min(vmin, v); v = SHFL_XOR(vmin, 16); return min(vmin, v); } inline __device__ int64_t WarpReduceMaxInt(int64_t vmax) { int64_t v = SHFL_XOR(vmax, 1); vmax = max(vmax, v); v = SHFL_XOR(vmax, 2); vmax = max(vmax, v); v = SHFL_XOR(vmax, 4); vmax = max(vmax, v); v = SHFL_XOR(vmax, 8); vmax = max(vmax, v); v = SHFL_XOR(vmax, 16); return max(vmax, v); } inline __device__ double WarpReduceMinFloat(double vmin) { double v = SHFL_XOR(vmin, 1); vmin = fmin(vmin, v); v = SHFL_XOR(vmin, 2); vmin = fmin(vmin, v); v = SHFL_XOR(vmin, 4); vmin = fmin(vmin, v); v = SHFL_XOR(vmin, 8); vmin = fmin(vmin, v); v = SHFL_XOR(vmin, 16); return fmin(vmin, v); } inline __device__ double WarpReduceMaxFloat(double vmax) { double v = SHFL_XOR(vmax, 1); vmax = fmax(vmax, v); v = SHFL_XOR(vmax, 2); vmax = fmax(vmax, v); v = SHFL_XOR(vmax, 4); vmax = fmax(vmax, v); v = SHFL_XOR(vmax, 8); vmax = fmax(vmax, v); v = SHFL_XOR(vmax, 16); return fmax(vmax, v); } inline __device__ double WarpReduceSumFloat(double vsum) { double v = SHFL_XOR(vsum, 1); if (!isnan(v)) vsum += v; v = SHFL_XOR(vsum, 2); if (!isnan(v)) vsum += v; v = SHFL_XOR(vsum, 4); if (!isnan(v)) vsum += v; v = SHFL_XOR(vsum, 8); if (!isnan(v)) vsum += v; v = SHFL_XOR(vsum, 16); if (!isnan(v)) vsum += v; return vsum; } inline __device__ string_stats WarpReduceMinString(const char *smin, uint32_t lmin) { uint32_t len = SHFL_XOR(lmin, 1); const char *ptr = reinterpret_cast<const char *>(SHFL_XOR(reinterpret_cast<uintptr_t>(smin), 1)); if (!smin || (ptr && nvstr_is_lesser(ptr, len, smin, lmin))) { smin = ptr; lmin = len; } len = SHFL_XOR(lmin, 2); ptr = reinterpret_cast<const char *>(SHFL_XOR(reinterpret_cast<uintptr_t>(smin), 2)); if (!smin || (ptr && nvstr_is_lesser(ptr, len, smin, lmin))) { smin = ptr; lmin = len; } len = SHFL_XOR(lmin, 4); ptr = reinterpret_cast<const char *>(SHFL_XOR(reinterpret_cast<uintptr_t>(smin), 4)); if (!smin || (ptr && nvstr_is_lesser(ptr, len, smin, lmin))) { smin = ptr; lmin = len; } len = SHFL_XOR(lmin, 8); ptr = reinterpret_cast<const char *>(SHFL_XOR(reinterpret_cast<uintptr_t>(smin), 8)); if (!smin || (ptr && nvstr_is_lesser(ptr, len, smin, lmin))) { smin = ptr; lmin = len; } len = SHFL_XOR(lmin, 16); ptr = reinterpret_cast<const char *>(SHFL_XOR(reinterpret_cast<uintptr_t>(smin), 16)); if (!smin || (ptr && nvstr_is_lesser(ptr, len, smin, lmin))) { smin = ptr; lmin = len; } return {smin, lmin}; } inline __device__ string_stats WarpReduceMaxString(const char *smax, uint32_t lmax) { uint32_t len = SHFL_XOR(lmax, 1); const char *ptr = reinterpret_cast<const char *>(SHFL_XOR(reinterpret_cast<uintptr_t>(smax), 1)); if (!smax || (ptr && nvstr_is_greater(ptr, len, smax, lmax))) { smax = ptr; lmax = len; } len = SHFL_XOR(lmax, 2); ptr = reinterpret_cast<const char *>(SHFL_XOR(reinterpret_cast<uintptr_t>(smax), 2)); if (!smax || (ptr && nvstr_is_greater(ptr, len, smax, lmax))) { smax = ptr; lmax = len; } len = SHFL_XOR(lmax, 4); ptr = reinterpret_cast<const char *>(SHFL_XOR(reinterpret_cast<uintptr_t>(smax), 4)); if (!smax || (ptr && nvstr_is_greater(ptr, len, smax, lmax))) { smax = ptr; lmax = len; } len = SHFL_XOR(lmax, 8); ptr = reinterpret_cast<const char *>(SHFL_XOR(reinterpret_cast<uintptr_t>(smax), 8)); if (!smax || (ptr && nvstr_is_greater(ptr, len, smax, lmax))) { smax = ptr; lmax = len; } len = SHFL_XOR(lmax, 16); ptr = reinterpret_cast<const char *>(SHFL_XOR(reinterpret_cast<uintptr_t>(smax), 16)); if (!smax || (ptr && nvstr_is_greater(ptr, len, smax, lmax))) { smax = ptr; lmax = len; } return { smax, lmax }; } void __device__ gatherIntColumnStats(stats_state_s *s, statistics_dtype dtype, uint32_t t) { int64_t vmin = INT64_MAX; int64_t vmax = INT64_MIN; int64_t vsum = 0; int64_t v; uint32_t nn_cnt = 0; bool has_minmax; for (uint32_t i = 0; i < s->group.num_rows; i += 1024) { uint32_t r = i + t; uint32_t row = r + s->group.start_row; const uint32_t *valid_map = s->col.valid_map_base; uint32_t is_valid = (r < s->group.num_rows && row < s->col.num_rows) ? (valid_map) ? (valid_map[row >> 5] >> (row & 0x1f)) & 1 : 1 : 0; if (is_valid) { switch (dtype) { case dtype_int32: case dtype_date32: v = reinterpret_cast<const int32_t *>(s->col.column_data_base)[row]; break; case dtype_int64: case dtype_decimal64: v = reinterpret_cast<const int64_t *>(s->col.column_data_base)[row]; break; case dtype_int16: v = reinterpret_cast<const int16_t *>(s->col.column_data_base)[row]; break; case dtype_timestamp64: v = reinterpret_cast<const int64_t *>(s->col.column_data_base)[row]; if (s->col.ts_scale < -1) { v /= -s->col.ts_scale; } else if (s->col.ts_scale > 1) { v *= s->col.ts_scale; } break; default: v = reinterpret_cast<const int8_t *>(s->col.column_data_base)[row]; break; } vmin = min(vmin, v); vmax = max(vmax, v); vsum += v; } nn_cnt += __syncthreads_count(is_valid); } if (!t) { s->ck.non_nulls = nn_cnt; s->ck.null_count = s->group.num_rows - nn_cnt; } vmin = WarpReduceMinInt(vmin); vmax = WarpReduceMaxInt(vmax); vsum = WarpReduceSum32(vsum); if (!(t & 0x1f)) { s->warp_min[t >> 5].i_val = vmin; s->warp_max[t >> 5].i_val = vmax; s->warp_sum[t >> 5].i_val = vsum; } has_minmax = __syncthreads_or(vmin <= vmax); if (t < 32 * 1) { vmin = WarpReduceMinInt(s->warp_min[t].i_val); if (!(t & 0x1f)) { s->ck.min_value.i_val = vmin; s->ck.has_minmax = (has_minmax); } } else if (t < 32 * 2) { vmax = WarpReduceMaxInt(s->warp_max[t & 0x1f].i_val); if (!(t & 0x1f)) { s->ck.max_value.i_val = vmax; } } else if (t < 32 * 3) { vsum = WarpReduceSum32(s->warp_sum[t & 0x1f].i_val); if (!(t & 0x1f)) { s->ck.sum.i_val = vsum; // TODO: For now, don't set the sum flag with 64-bit values so we don't have to check for 64-bit sum overflow s->ck.has_sum = (dtype <= dtype_int32 && has_minmax); } } } void __device__ gatherFloatColumnStats(stats_state_s *s, statistics_dtype dtype, uint32_t t) { double vmin = CUDART_INF; double vmax = -CUDART_INF; double vsum = 0; double v; uint32_t nn_cnt = 0; bool has_minmax; for (uint32_t i = 0; i < s->group.num_rows; i += 1024) { uint32_t r = i + t; uint32_t row = r + s->group.start_row; const uint32_t *valid_map = s->col.valid_map_base; uint32_t is_valid = (r < s->group.num_rows && row < s->col.num_rows) ? (valid_map) ? (valid_map[row >> 5] >> (row & 0x1f)) & 1 : 1 : 0; if (is_valid) { if (dtype == dtype_float64) { v = reinterpret_cast<const double *>(s->col.column_data_base)[row]; } else { v = reinterpret_cast<const float *>(s->col.column_data_base)[row]; } if (v < vmin) { vmin = v; } if (v > vmax) { vmax = v; } } nn_cnt += __syncthreads_count(is_valid); } if (!t) { s->ck.non_nulls = nn_cnt; s->ck.null_count = s->group.num_rows - nn_cnt; } vmin = WarpReduceMinFloat(vmin); vmax = WarpReduceMaxFloat(vmax); vsum = WarpReduceSumFloat(vsum); if (!(t & 0x1f)) { s->warp_min[t >> 5].fp_val = vmin; s->warp_max[t >> 5].fp_val = vmax; s->warp_sum[t >> 5].fp_val = vsum; } has_minmax = __syncthreads_or(vmin <= vmax); if (t < 32 * 1) { vmin = WarpReduceMinFloat(s->warp_min[t].fp_val); if (!(t & 0x1f)) { s->ck.min_value.fp_val = (vmin != 0.0) ? vmin : CUDART_NEG_ZERO; s->ck.has_minmax = (has_minmax); } } else if (t < 32 * 2) { vmax = WarpReduceMaxFloat(s->warp_max[t & 0x1f].fp_val); if (!(t & 0x1f)) { s->ck.max_value.fp_val = (vmax != 0.0) ? vmax : CUDART_ZERO; } } else if (t < 32 * 3) { vsum = WarpReduceSumFloat(s->warp_sum[t & 0x1f].fp_val); if (!(t & 0x1f)) { s->ck.sum.fp_val = vsum; s->ck.has_sum = (has_minmax); // Implies sum is valid as well } } } // FIXME: Use native libcudf string type struct nvstrdesc_s { const char *ptr; size_t count; }; void __device__ gatherStringColumnStats(stats_state_s *s, uint32_t t) { uint32_t len_sum = 0; const char *smin = nullptr; const char *smax = nullptr; uint32_t lmin = 0; uint32_t lmax = 0; uint32_t nn_cnt = 0; bool has_minmax; string_stats minval, maxval; for (uint32_t i = 0; i < s->group.num_rows; i += 1024) { uint32_t r = i + t; uint32_t row = r + s->group.start_row; const uint32_t *valid_map = s->col.valid_map_base; uint32_t is_valid = (r < s->group.num_rows && row < s->col.num_rows) ? (valid_map) ? (valid_map[row >> 5] >> (row & 0x1f)) & 1 : 1 : 0; if (is_valid) { const nvstrdesc_s *str_col = reinterpret_cast<const nvstrdesc_s *>(s->col.column_data_base); uint32_t len = (uint32_t)str_col[row].count; const char *ptr = str_col[row].ptr; len_sum += len; if (!smin || nvstr_is_lesser(ptr, len, smin, lmin)) { lmin = len; smin = ptr; } if (!smax || nvstr_is_greater(ptr, len, smax, lmax)) { lmax = len; smax = ptr; } } nn_cnt += __syncthreads_count(is_valid); } if (!t) { s->ck.non_nulls = nn_cnt; s->ck.null_count = s->group.num_rows - nn_cnt; } minval = WarpReduceMinString(smin, lmin); maxval = WarpReduceMaxString(smax, lmax); len_sum = WarpReduceSum32(len_sum); if (!(t & 0x1f)) { s->warp_min[t >> 5].str_val.ptr = minval.ptr; s->warp_min[t >> 5].str_val.length = minval.length; s->warp_max[t >> 5].str_val.ptr = maxval.ptr; s->warp_max[t >> 5].str_val.length = maxval.length; s->warp_sum[t >> 5].str_val.length = len_sum; } has_minmax = __syncthreads_or(smin != nullptr); if (t < 32 * 1) { minval = WarpReduceMinString(s->warp_min[t].str_val.ptr, s->warp_min[t].str_val.length); if (!(t & 0x1f)) { s->ck.min_value.str_val.ptr = minval.ptr; s->ck.min_value.str_val.length = minval.length; s->ck.has_minmax = has_minmax; } } else if (t < 32 * 2) { maxval = WarpReduceMaxString(s->warp_max[t & 0x1f].str_val.ptr, s->warp_max[t & 0x1f].str_val.length); if (!(t & 0x1f)) { s->ck.max_value.str_val.ptr = maxval.ptr; s->ck.max_value.str_val.length = maxval.length; } } else if (t < 32 * 3) { len_sum = WarpReduceSum32(s->warp_sum[t & 0x1f].str_val.length); if (!(t & 0x1f)) { s->ck.sum.i_val = len_sum; s->ck.has_sum = has_minmax; } } } /** * @brief Gather column chunk statistics (min/max values, sum and null count) * for a group of rows. **/ // blockDim {1024,1,1} __global__ void __launch_bounds__(1024, 1) gpuGatherColumnStatistics(statistics_chunk *chunks, const statistics_group *groups) { __shared__ __align__(8) stats_state_s state_g; stats_state_s *const s = &state_g; uint32_t t = threadIdx.x; statistics_dtype dtype; if (t < sizeof(statistics_group) / sizeof(uint32_t)) { reinterpret_cast<uint32_t *>(&s->group)[t] = reinterpret_cast<const uint32_t *>(&groups[blockIdx.x])[t]; } if (t < sizeof(statistics_chunk) / sizeof(uint32_t)) { reinterpret_cast<uint32_t *>(&s->ck)[t] = 0; } __syncthreads(); if (t < sizeof(stats_column_desc) / sizeof(uint32_t)) { reinterpret_cast<uint32_t *>(&s->col)[t] = reinterpret_cast<const uint32_t *>(s->group.col)[t]; } __syncthreads(); dtype = s->col.stats_dtype; if (dtype >= dtype_bool8 && dtype <= dtype_decimal64) { gatherIntColumnStats(s, dtype, t); } else if (dtype >= dtype_float32 && dtype <= dtype_float64) { gatherFloatColumnStats(s, dtype, t); } else if (dtype == dtype_string) { gatherStringColumnStats(s, t); } __syncthreads(); if (t < sizeof(statistics_chunk) / sizeof(uint32_t)) { reinterpret_cast<uint32_t *>(&chunks[blockIdx.x])[t] = reinterpret_cast<uint32_t *>(&s->ck)[t]; } } void __device__ mergeIntColumnStats(merge_state_s *s, statistics_dtype dtype, const statistics_chunk *ck_in, uint32_t num_chunks, uint32_t t) { int64_t vmin = INT64_MAX; int64_t vmax = INT64_MIN; int64_t vsum = 0; uint32_t non_nulls = 0; uint32_t null_count = 0; bool has_minmax; for (uint32_t i = t; i < num_chunks; i += 1024) { const statistics_chunk *ck = &ck_in[i]; if (ck->has_minmax) { vmin = min(vmin, ck->min_value.i_val); vmax = max(vmax, ck->max_value.i_val); } if (ck->has_sum) { vsum += ck->sum.i_val; } non_nulls += ck->non_nulls; null_count += ck->null_count; } non_nulls = WarpReduceSum32(non_nulls); null_count = WarpReduceSum32(null_count); vmin = WarpReduceMinInt(vmin); vmax = WarpReduceMaxInt(vmax); vsum = WarpReduceSum32(vsum); if (!(t & 0x1f)) { s->warp_non_nulls[t >> 5] = non_nulls; s->warp_nulls[t >> 5] = null_count; s->warp_min[t >> 5].i_val = vmin; s->warp_max[t >> 5].i_val = vmax; s->warp_sum[t >> 5].i_val = vsum; } has_minmax = __syncthreads_or(vmin <= vmax); if (t < 32 * 1) { vmin = WarpReduceMinInt(s->warp_min[t].i_val); if (!(t & 0x1f)) { s->ck.min_value.i_val = vmin; s->ck.has_minmax = (has_minmax); } } else if (t < 32 * 2) { vmax = WarpReduceMaxInt(s->warp_max[t & 0x1f].i_val); if (!(t & 0x1f)) { s->ck.max_value.i_val = vmax; } } else if (t < 32 * 3) { vsum = WarpReduceSum32(s->warp_sum[t & 0x1f].i_val); if (!(t & 0x1f)) { s->ck.sum.i_val = vsum; // TODO: For now, don't set the sum flag with 64-bit values so we don't have to check for 64-bit sum overflow s->ck.has_sum = (dtype <= dtype_int32 && has_minmax); } } else if (t < 32 * 4) { non_nulls = WarpReduceSum32(s->warp_non_nulls[t & 0x1f]); if (!(t & 0x1f)) { s->ck.non_nulls = non_nulls; } } else if (t < 32 * 5) { null_count = WarpReduceSum32(s->warp_nulls[t & 0x1f]); if (!(t & 0x1f)) { s->ck.null_count = null_count; } } } void __device__ mergeFloatColumnStats(merge_state_s *s, const statistics_chunk *ck_in, uint32_t num_chunks, uint32_t t) { double vmin = CUDART_INF; double vmax = -CUDART_INF; double vsum = 0; uint32_t non_nulls = 0; uint32_t null_count = 0; bool has_minmax; for (uint32_t i = t; i < num_chunks; i += 1024) { const statistics_chunk *ck = &ck_in[i]; if (ck->has_minmax) { double v0 = ck->min_value.fp_val; double v1 = ck->max_value.fp_val; if (v0 < vmin) { vmin = v0; } if (v1 > vmax) { vmax = v1; } } if (ck->has_sum) { vsum += ck->sum.fp_val; } non_nulls += ck->non_nulls; null_count += ck->null_count; } non_nulls = WarpReduceSum32(non_nulls); null_count = WarpReduceSum32(null_count); vmin = WarpReduceMinFloat(vmin); vmax = WarpReduceMaxFloat(vmax); vsum = WarpReduceSumFloat(vsum); if (!(t & 0x1f)) { s->warp_non_nulls[t >> 5] = non_nulls; s->warp_nulls[t >> 5] = null_count; s->warp_min[t >> 5].fp_val = vmin; s->warp_max[t >> 5].fp_val = vmax; s->warp_sum[t >> 5].fp_val = vsum; } has_minmax = __syncthreads_or(vmin <= vmax); if (t < 32 * 1) { vmin = WarpReduceMinFloat(s->warp_min[t].fp_val); if (!(t & 0x1f)) { s->ck.min_value.fp_val = (vmin != 0.0) ? vmin : CUDART_NEG_ZERO; s->ck.has_minmax = (has_minmax); } } else if (t < 32 * 2) { vmax = WarpReduceMaxFloat(s->warp_max[t & 0x1f].fp_val); if (!(t & 0x1f)) { s->ck.max_value.fp_val = (vmax != 0.0) ? vmax : CUDART_ZERO; } } else if (t < 32 * 3) { vsum = WarpReduceSumFloat(s->warp_sum[t & 0x1f].fp_val); if (!(t & 0x1f)) { s->ck.sum.fp_val = vsum; s->ck.has_sum = (has_minmax); // Implies sum is valid as well } } else if (t < 32 * 4) { non_nulls = WarpReduceSum32(s->warp_non_nulls[t & 0x1f]); if (!(t & 0x1f)) { s->ck.non_nulls = non_nulls; } } else if (t < 32 * 5) { null_count = WarpReduceSum32(s->warp_nulls[t & 0x1f]); if (!(t & 0x1f)) { s->ck.null_count = null_count; } } } void __device__ mergeStringColumnStats(merge_state_s *s, const statistics_chunk *ck_in, uint32_t num_chunks, uint32_t t) { uint32_t len_sum = 0; const char *smin = nullptr; const char *smax = nullptr; uint32_t lmin = 0; uint32_t lmax = 0; uint32_t non_nulls = 0; uint32_t null_count = 0; bool has_minmax; string_stats minval, maxval; for (uint32_t i = t; i < num_chunks; i += 1024) { const statistics_chunk *ck = &ck_in[i]; if (ck->has_minmax) { uint32_t len0 = ck->min_value.str_val.length; const char *ptr0 = ck->min_value.str_val.ptr; uint32_t len1 = ck->max_value.str_val.length; const char *ptr1 = ck->max_value.str_val.ptr; if (!smin || (ptr0 && nvstr_is_lesser(ptr0, len0, smin, lmin))) { lmin = len0; smin = ptr0; } if (!smax || (ptr1 && nvstr_is_greater(ptr1, len1, smax, lmax))) { lmax = len1; smax = ptr1; } } if (ck->has_sum) { len_sum += (uint32_t)ck->sum.i_val; } non_nulls += ck->non_nulls; null_count += ck->null_count; } non_nulls = WarpReduceSum32(non_nulls); null_count = WarpReduceSum32(null_count); minval = WarpReduceMinString(smin, lmin); maxval = WarpReduceMaxString(smax, lmax); len_sum = WarpReduceSum32(len_sum); if (!(t & 0x1f)) { s->warp_non_nulls[t >> 5] = non_nulls; s->warp_nulls[t >> 5] = null_count; s->warp_min[t >> 5].str_val.ptr = minval.ptr; s->warp_min[t >> 5].str_val.length = minval.length; s->warp_max[t >> 5].str_val.ptr = maxval.ptr; s->warp_max[t >> 5].str_val.length = maxval.length; s->warp_sum[t >> 5].str_val.length = len_sum; } has_minmax = __syncthreads_or(smin != nullptr); if (t < 32 * 1) { minval = WarpReduceMinString(s->warp_min[t].str_val.ptr, s->warp_min[t].str_val.length); if (!(t & 0x1f)) { s->ck.min_value.str_val.ptr = minval.ptr; s->ck.min_value.str_val.length = minval.length; s->ck.has_minmax = has_minmax; } } else if (t < 32 * 2) { maxval = WarpReduceMaxString(s->warp_max[t & 0x1f].str_val.ptr, s->warp_max[t & 0x1f].str_val.length); if (!(t & 0x1f)) { s->ck.max_value.str_val.ptr = maxval.ptr; s->ck.max_value.str_val.length = maxval.length; } } else if (t < 32 * 3) { len_sum = WarpReduceSum32(s->warp_sum[t & 0x1f].str_val.length); if (!(t & 0x1f)) { s->ck.sum.i_val = len_sum; s->ck.has_sum = has_minmax; } } else if (t < 32 * 4) { non_nulls = WarpReduceSum32(s->warp_non_nulls[t & 0x1f]); if (!(t & 0x1f)) { s->ck.non_nulls = non_nulls; } } else if (t < 32 * 5) { null_count = WarpReduceSum32(s->warp_nulls[t & 0x1f]); if (!(t & 0x1f)) { s->ck.null_count = null_count; } } } /** * @brief Combine multiple statistics chunk together to form new statistics chunks **/ // blockDim {1024,1,1} __global__ void __launch_bounds__(1024, 1) gpuMergeColumnStatistics(statistics_chunk *chunks_out, const statistics_chunk *chunks_in, const statistics_merge_group *groups) { __shared__ __align__(8) merge_state_s state_g; merge_state_s *const s = &state_g; uint32_t t = threadIdx.x; statistics_dtype dtype; if (t < sizeof(statistics_merge_group) / sizeof(uint32_t)) { reinterpret_cast<uint32_t *>(&s->group)[t] = reinterpret_cast<const uint32_t *>(&groups[blockIdx.x])[t]; } __syncthreads(); if (t < sizeof(stats_column_desc) / sizeof(uint32_t)) { reinterpret_cast<uint32_t *>(&s->col)[t] = reinterpret_cast<const uint32_t *>(s->group.col)[t]; } __syncthreads(); dtype = s->col.stats_dtype; if (dtype >= dtype_bool8 && dtype <= dtype_decimal64) { mergeIntColumnStats(s, dtype, chunks_in + s->group.start_chunk, s->group.num_chunks, t); } else if (dtype >= dtype_float32 && dtype <= dtype_float64) { mergeFloatColumnStats(s, chunks_in + s->group.start_chunk, s->group.num_chunks, t); } else if (dtype == dtype_string) { mergeStringColumnStats(s, chunks_in + s->group.start_chunk, s->group.num_chunks, t); } __syncthreads(); if (t < sizeof(statistics_chunk) / sizeof(uint32_t)) { reinterpret_cast<uint32_t *>(&chunks_out[blockIdx.x])[t] = reinterpret_cast<uint32_t *>(&s->ck)[t]; } } /** * @brief Launches kernel to gather column statistics * * @param[out] chunks Statistics results [num_chunks] * @param[in] groups Statistics row groups [num_chunks] * @param[in] num_chunks Number of chunks & rowgroups * @param[in] stream CUDA stream to use, default 0 * * @return cudaSuccess if successful, a CUDA error code otherwise **/ cudaError_t GatherColumnStatistics(statistics_chunk *chunks, const statistics_group *groups, uint32_t num_chunks, cudaStream_t stream) { gpuGatherColumnStatistics <<< num_chunks, 1024, 0, stream >>> (chunks, groups); return cudaSuccess; } /** * @brief Launches kernel to merge column statistics * * @param[out] chunks_out Statistics results [num_chunks] * @param[out] chunks_in Input statistics * @param[in] groups Statistics groups [num_chunks] * @param[in] num_chunks Number of chunks & groups * @param[in] stream CUDA stream to use, default 0 * * @return cudaSuccess if successful, a CUDA error code otherwise **/ cudaError_t MergeColumnStatistics(statistics_chunk *chunks_out, const statistics_chunk *chunks_in, const statistics_merge_group *groups, uint32_t num_chunks, cudaStream_t stream) { gpuMergeColumnStatistics <<< num_chunks, 1024, 0, stream >>> (chunks_out, chunks_in, groups); return cudaSuccess; } } // namespace io } // namespace cudf
78b1dec34cbb702948863f68ab15c988d86c8479.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void kernel_push1_start_stochastic(int *g_left_weight, int *g_right_weight, int *g_down_weight, int *g_up_weight, int *g_sink_weight, int *g_push_reser, int *g_relabel_mask, int *g_graph_height, int *g_height_write, int graph_size, int width, int rows, int graph_size1, int width1, int rows1, int *d_relabel, int *d_stochastic, int *d_counter, bool *d_finish) { int x1 = threadIdx.x; int y1 = threadIdx.y; int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y; int thid = __umul24(y, width1) + x; __shared__ int height_fn[356]; int temp_mult = __umul24(y1 + 1, 34) + x1 + 1, temp_mult1 = __umul24(y1, 32) + x1; height_fn[temp_mult] = g_graph_height[thid]; (threadIdx.x == 31 && x < width1 - 1) ? height_fn[temp_mult + 1] = (g_graph_height[thid + 1]) : 0; (threadIdx.x == 0 && x > 0) ? height_fn[temp_mult - 1] = (g_graph_height[thid - 1]) : 0; (threadIdx.y == 7 && y < rows1 - 1) ? height_fn[temp_mult + 34] = (g_graph_height[thid + width1]) : 0; (threadIdx.y == 0 && y > 0) ? height_fn[temp_mult - 34] = (g_graph_height[thid - width1]) : 0; __syncthreads(); int flow_push = 0, min_flow_pushed = 0; flow_push = g_push_reser[thid]; if (thid < graph_size1 && g_relabel_mask[thid] == 1 && x < width - 1 && x > 0 && y < rows - 1 && y > 0) { int temp_weight = 0; temp_weight = g_sink_weight[thid]; min_flow_pushed = flow_push; if (temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == 1) { (temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0; temp_weight = temp_weight - min_flow_pushed; g_sink_weight[thid] = temp_weight; atomicSub(&g_push_reser[thid], min_flow_pushed); flow_push = flow_push - min_flow_pushed; } } __syncthreads(); min_flow_pushed = g_left_weight[thid]; (flow_push > 0 && (((height_fn[temp_mult] == height_fn[temp_mult - 1] + 1) && min_flow_pushed > 0) || ((height_fn[temp_mult] == height_fn[temp_mult + 1] + 1) && g_right_weight[thid] > 0) || ((height_fn[temp_mult] == height_fn[temp_mult + 34] + 1) && g_down_weight[thid] > 0) || ((height_fn[temp_mult] == height_fn[temp_mult - 34] + 1) && g_up_weight[thid] > 0) || (height_fn[temp_mult] == 1 && g_sink_weight[thid] > 0))) ? g_relabel_mask[thid] = 1 : g_relabel_mask[thid] = 0; if (thid < graph_size1 && x < width - 1 && x > 0 && y < rows - 1 && y > 0) { if (g_sink_weight[thid] > 0) { g_height_write[thid] = 1; } else { int min_height = graph_size; (min_flow_pushed > 0 && min_height > height_fn[temp_mult - 1]) ? min_height = height_fn[temp_mult - 1] : 0; (g_right_weight[thid] > 0 && min_height > height_fn[temp_mult + 1]) ? min_height = height_fn[temp_mult + 1] : 0; (g_down_weight[thid] > 0 && min_height > height_fn[temp_mult + 34]) ? min_height = height_fn[temp_mult + 34] : 0; (g_up_weight[thid] > 0 && min_height > height_fn[temp_mult - 34]) ? min_height = height_fn[temp_mult - 34] : 0; g_height_write[thid] = min_height + 1; } } }
78b1dec34cbb702948863f68ab15c988d86c8479.cu
#include "includes.h" __global__ void kernel_push1_start_stochastic(int *g_left_weight, int *g_right_weight, int *g_down_weight, int *g_up_weight, int *g_sink_weight, int *g_push_reser, int *g_relabel_mask, int *g_graph_height, int *g_height_write, int graph_size, int width, int rows, int graph_size1, int width1, int rows1, int *d_relabel, int *d_stochastic, int *d_counter, bool *d_finish) { int x1 = threadIdx.x; int y1 = threadIdx.y; int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y; int thid = __umul24(y, width1) + x; __shared__ int height_fn[356]; int temp_mult = __umul24(y1 + 1, 34) + x1 + 1, temp_mult1 = __umul24(y1, 32) + x1; height_fn[temp_mult] = g_graph_height[thid]; (threadIdx.x == 31 && x < width1 - 1) ? height_fn[temp_mult + 1] = (g_graph_height[thid + 1]) : 0; (threadIdx.x == 0 && x > 0) ? height_fn[temp_mult - 1] = (g_graph_height[thid - 1]) : 0; (threadIdx.y == 7 && y < rows1 - 1) ? height_fn[temp_mult + 34] = (g_graph_height[thid + width1]) : 0; (threadIdx.y == 0 && y > 0) ? height_fn[temp_mult - 34] = (g_graph_height[thid - width1]) : 0; __syncthreads(); int flow_push = 0, min_flow_pushed = 0; flow_push = g_push_reser[thid]; if (thid < graph_size1 && g_relabel_mask[thid] == 1 && x < width - 1 && x > 0 && y < rows - 1 && y > 0) { int temp_weight = 0; temp_weight = g_sink_weight[thid]; min_flow_pushed = flow_push; if (temp_weight > 0 && flow_push > 0 && height_fn[temp_mult] == 1) { (temp_weight < flow_push) ? min_flow_pushed = temp_weight : 0; temp_weight = temp_weight - min_flow_pushed; g_sink_weight[thid] = temp_weight; atomicSub(&g_push_reser[thid], min_flow_pushed); flow_push = flow_push - min_flow_pushed; } } __syncthreads(); min_flow_pushed = g_left_weight[thid]; (flow_push > 0 && (((height_fn[temp_mult] == height_fn[temp_mult - 1] + 1) && min_flow_pushed > 0) || ((height_fn[temp_mult] == height_fn[temp_mult + 1] + 1) && g_right_weight[thid] > 0) || ((height_fn[temp_mult] == height_fn[temp_mult + 34] + 1) && g_down_weight[thid] > 0) || ((height_fn[temp_mult] == height_fn[temp_mult - 34] + 1) && g_up_weight[thid] > 0) || (height_fn[temp_mult] == 1 && g_sink_weight[thid] > 0))) ? g_relabel_mask[thid] = 1 : g_relabel_mask[thid] = 0; if (thid < graph_size1 && x < width - 1 && x > 0 && y < rows - 1 && y > 0) { if (g_sink_weight[thid] > 0) { g_height_write[thid] = 1; } else { int min_height = graph_size; (min_flow_pushed > 0 && min_height > height_fn[temp_mult - 1]) ? min_height = height_fn[temp_mult - 1] : 0; (g_right_weight[thid] > 0 && min_height > height_fn[temp_mult + 1]) ? min_height = height_fn[temp_mult + 1] : 0; (g_down_weight[thid] > 0 && min_height > height_fn[temp_mult + 34]) ? min_height = height_fn[temp_mult + 34] : 0; (g_up_weight[thid] > 0 && min_height > height_fn[temp_mult - 34]) ? min_height = height_fn[temp_mult - 34] : 0; g_height_write[thid] = min_height + 1; } } }
9c155c21fb53db00ba6ab946c802dadf7f3aea74.hip
// !!! This is a file automatically generated by hipify!!! #include <cassert> #include <fstream> #include <Eigen/Dense> #include <thrust/for_each.h> #include <thrust/functional.h> #include <thrust/execution_policy.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "mpm_solver.h" #define IN_GRID(POS) (0 <= POS(0) && POS(0) < GRID_BOUND_X && \ 0 <= POS(1) && POS(1) < GRID_BOUND_Y && \ 0 <= POS(2) && POS(2) < GRID_BOUND_Z) __device__ float NX(const float& x) { if (x < 1.0f) { return 0.5f * (x * x * x) - (x * x) + (2.0f / 3.0f); } else if (x < 2.0f) { return (-1.0f / 6.0f) * (x * x * x) + (x * x) - (2.0f * x) + (4.0f / 3.0f); } else { return 0.0f; } } __device__ float dNX(const float& x) { float abs_x = fabs(x); if (abs_x < 1.0f) { return (1.5f * abs_x * x) - (2.0f * x); } else if (abs_x < 2.0f) { return -0.5f * (abs_x * x) + (2.0f * x) - (2.0f * x / abs_x); } else { return 0.0f; } } __device__ float weight(const Eigen::Vector3f& xpgp_diff) { return NX(xpgp_diff(0)) * NX(xpgp_diff(1)) * NX(xpgp_diff(2)); } __device__ Eigen::Vector3f gradientWeight(const Eigen::Vector3f& xpgp_diff) { const auto& v = xpgp_diff; return (1.0f / PARTICLE_DIAM) * Eigen::Vector3f(dNX(v(0)) * NX(fabs(v(1))) * NX(fabs(v(2))), NX(fabs(v(0))) * dNX(v(1)) * NX(fabs(v(2))), NX(fabs(v(0))) * NX(fabs(v(1))) * dNX(v(2))); /* return Eigen::Vector3f(dNX(v(0)) * NX(fabs(v(1))) * NX(fabs(v(2))), NX(fabs(v(0))) * dNX(v(1)) * NX(fabs(v(2))), NX(fabs(v(0))) * NX(fabs(v(1))) * dNX(v(2))); */ } __device__ int getGridIndex(const Eigen::Vector3i& pos) { return (pos(2) * GRID_BOUND_Y * GRID_BOUND_X) + (pos(1) * GRID_BOUND_X) + pos(0); } __device__ Eigen::Vector3f applyBoundaryCollision(const Eigen::Vector3f& position, const Eigen::Vector3f& velocity) { float vn; Eigen::Vector3f vt, normal, ret(velocity); bool collision; for (int i = 0; i < 3; i++) { collision = false; normal.setZero(); if (position(i) <= BOX_BOUNDARY_1) { collision = true; normal(i) = 1.0f; } else if (position(i) >= BOX_BOUNDARY_2) { collision = true; normal(i) = -1.0f; } if (collision) { vn = ret.dot(normal); if (vn >= 0.0f) continue; for (int j = 0; j < 3; j++) { if (j != i) { ret(j) *= STICKY_WALL; } } vt = ret - vn * normal; if (vt.norm() <= -FRICTION * vn) { ret.setZero(); return ret; } ret = vt + FRICTION * vn * vt.normalized(); } } return ret; } struct f { __host__ __device__ Grid operator()(const int& idx) { return Grid(Eigen::Vector3i(idx % GRID_BOUND_X, idx % (GRID_BOUND_X * GRID_BOUND_Y) / GRID_BOUND_X, idx / (GRID_BOUND_X * GRID_BOUND_Y))); } }; __host__ MPMSolver::MPMSolver(const std::vector<Particle>& _particles) { particles.resize(_particles.size()); thrust::copy(_particles.begin(), _particles.end(), particles.begin()); grids.resize(GRID_BOUND_X * GRID_BOUND_Y * GRID_BOUND_Z); thrust::tabulate( thrust::device, grids.begin(), grids.end(), f() ); } __host__ MPMSolver::MPMSolver(const std::vector<Particle>& _particles, const std::vector<Grid>& _grids) { particles.resize(_particles.size()); grids.resize(_grids.size()); thrust::copy(_particles.begin(), _particles.end(), particles.begin()); thrust::copy(_grids.begin(), _grids.end(), grids.begin()); } __host__ void MPMSolver::initialTransfer() { Grid *grid_ptr = thrust::raw_pointer_cast(&grids[0]); auto ff = [=] __device__ (Particle& p) { float h_inv = 1.0f / PARTICLE_DIAM; Eigen::Vector3i pos((p.position * h_inv).cast<int>()); for (int z = -G2P; z <= G2P; z++) { for (int y = -G2P; y <= G2P; y++) { for (int x = -G2P; x <= G2P; x++) { auto _pos = pos + Eigen::Vector3i(x, y, z); if (!IN_GRID(_pos)) continue; Eigen::Vector3f diff = (p.position - (_pos.cast<float>() * PARTICLE_DIAM)) * h_inv; int grid_idx = getGridIndex(_pos); float mi = p.mass * weight(diff.cwiseAbs()); atomicAdd(&(grid_ptr[grid_idx].mass), mi); } } } }; thrust::for_each(thrust::device, particles.begin(), particles.end(), ff); } __host__ void MPMSolver::resetGrid() { thrust::for_each( thrust::device, grids.begin(), grids.end(), [=] __device__ (Grid& g) { g.reset(); } ); } __host__ void MPMSolver::transferData() { Grid *grid_ptr = thrust::raw_pointer_cast(&grids[0]); auto ff = [=] __device__ (Particle& p) { float h_inv = 1.0f / PARTICLE_DIAM; Eigen::Vector3i pos((p.position * h_inv).cast<int>()); Eigen::Matrix3f volume_stress = -1.0f * p.energyDerivative(); for (int z = -G2P; z <= G2P; z++) { for (int y = -G2P; y <= G2P; y++) { for (int x = -G2P; x <= G2P; x++) { auto _pos = pos + Eigen::Vector3i(x, y, z); if (!IN_GRID(_pos)) continue; Eigen::Vector3f diff = (p.position - (_pos.cast<float>() * PARTICLE_DIAM)) * h_inv; auto gw = gradientWeight(diff); int grid_idx = getGridIndex(_pos); Eigen::Vector3f f = volume_stress * gw; float mi = p.mass * weight(diff.cwiseAbs()); atomicAdd(&(grid_ptr[grid_idx].mass), mi); atomicAdd(&(grid_ptr[grid_idx].velocity(0)), p.velocity(0) * mi); atomicAdd(&(grid_ptr[grid_idx].velocity(1)), p.velocity(1) * mi); atomicAdd(&(grid_ptr[grid_idx].velocity(2)), p.velocity(2) * mi); atomicAdd(&(grid_ptr[grid_idx].force(0)), f(0)); atomicAdd(&(grid_ptr[grid_idx].force(1)), f(1)); atomicAdd(&(grid_ptr[grid_idx].force(2)), f(2)); } } } }; thrust::for_each(thrust::device, particles.begin(), particles.end(), ff); } __host__ void MPMSolver::computeVolumes() { Grid *grid_ptr = thrust::raw_pointer_cast(&grids[0]); auto ff = [=] __device__ (Particle& p) { float h_inv = 1.0f / PARTICLE_DIAM; Eigen::Vector3i pos((p.position * h_inv).cast<int>()); float p_density = 0.0f; float inv_grid_volume = h_inv * h_inv * h_inv; for (int z = -G2P; z <= G2P; z++) { for (int y = -G2P; y <= G2P; y++) { for (int x = -G2P; x <= G2P; x++) { auto _pos = pos + Eigen::Vector3i(x, y, z); if (!IN_GRID(_pos)) continue; Eigen::Vector3f diff = (p.position - (_pos.cast<float>() * PARTICLE_DIAM)) * h_inv; int grid_idx = getGridIndex(_pos); p_density += grid_ptr[grid_idx].mass * inv_grid_volume * weight(diff.cwiseAbs()); } } } p.volume = p.mass / p_density; }; thrust::for_each(thrust::device, particles.begin(), particles.end(), ff); } __host__ void MPMSolver::updateVelocities() { thrust::for_each( thrust::device, grids.begin(), grids.end(), [=] __device__ (Grid& g) { g.updateVelocity(); } ); } __host__ void MPMSolver::bodyCollisions() { thrust::for_each( thrust::device, grids.begin(), grids.end(), [=] __device__ (Grid& g) { g.velocity_star = applyBoundaryCollision((g.idx.cast<float>() * PARTICLE_DIAM) + (TIMESTEP * g.velocity_star), g.velocity_star); } ); } #if ENABLE_IMPLICIT __host__ void MPMSolver::computeAr() { Grid *grid_ptr = thrust::raw_pointer_cast(&grids[0]); auto computeDeltaElastic = [=] __device__ (const Particle& p) -> Eigen::Matrix3f { float h_inv = 1.0f / PARTICLE_DIAM; Eigen::Vector3i pos((p.position * h_inv).cast<int>()); Eigen::Matrix3f f(Eigen::Matrix3f::Zero()); for (int z = -G2P; z <= G2P; z++) { for (int y = -G2P; y <= G2P; y++) { for (int x = -G2P; x <= G2P; x++) { auto _pos = pos + Eigen::Vector3i(x, y, z); if (!IN_GRID(_pos)) continue; Eigen::Vector3f diff = (p.position - (_pos.cast<float>() * PARTICLE_DIAM)) * h_inv; Eigen::Vector3f gw = gradientWeight(diff); int grid_idx = getGridIndex(_pos); f += TIMESTEP * grid_ptr[grid_idx].r * gw.transpose(); } } } return f * p.def_elastic; }; auto updateGridDeltaForce = [=] __device__ (const Particle& p, const Eigen::Matrix3f& delta_force) { float h_inv = 1.0f / PARTICLE_DIAM; Eigen::Vector3i pos((p.position * h_inv).cast<int>()); for (int z = -G2P; z <= G2P; z++) { for (int y = -G2P; y <= G2P; y++) { for (int x = -G2P; x <= G2P; x++) { auto _pos = pos + Eigen::Vector3i(x, y, z); if (!IN_GRID(_pos)) continue; Eigen::Vector3f diff = (p.position - (_pos.cast<float>() * PARTICLE_DIAM)) * h_inv; Eigen::Vector3f gw = gradientWeight(diff); int grid_idx = getGridIndex(_pos); auto fw = delta_force * gw; atomicAdd(&(grid_ptr[grid_idx].delta_force(0)), fw(0)); atomicAdd(&(grid_ptr[grid_idx].delta_force(1)), fw(1)); atomicAdd(&(grid_ptr[grid_idx].delta_force(2)), fw(2)); } } } }; thrust::for_each( thrust::device, particles.begin(), particles.end(), [=] __device__ (Particle& p) { auto delta_elastic = computeDeltaElastic(p); auto delta_force = p.computeDeltaForce(delta_elastic); updateGridDeltaForce(p, delta_force); } ); thrust::for_each( thrust::device, grids.begin(), grids.end(), [=] __device__ (Grid& g) { g.ar = g.r - BETA * TIMESTEP * g.delta_force * ((g.mass > 1e-8)? (1.0 / g.mass): 0.0f); g.delta_force.setZero(); } ); } __host__ void MPMSolver::integrateImplicit() { // http://alexey.stomakhin.com/research/siggraph2013_tech_report.pdf // https://nccastaff.bournemouth.ac.uk/jmacey/MastersProjects/MSc15/05Esther/thesisEMdeJong.pdf // initialize some variables thrust::for_each( thrust::device, grids.begin(), grids.end(), [=] __device__ (Grid& g) { g.v = g.velocity_star; g.r = g.v; } ); computeAr(); thrust::for_each( thrust::device, grids.begin(), grids.end(), [=] __device__ (Grid& g) { g.r = g.v - g.ar; g.p = g.r; } ); computeAr(); thrust::for_each( thrust::device, grids.begin(), grids.end(), [=] __device__ (Grid& g) { g.ap = g.ar; } ); // run conjugate residual method for (int i = 0; i < SOLVE_MAX_ITERATIONS; i++) { thrust::for_each( thrust::device, grids.begin(), grids.end(), [=] __device__ (Grid& g) { float rar = (g.r).cwiseProduct(g.ar).sum(), apap = (g.ap).cwiseProduct(g.ap).sum(); float alpha = (apap > 1e-8)? rar / apap: 0.0f; g.v += alpha * g.p; g.r += (-alpha * g.ap); g.rar_tmp = rar; } ); computeAr(); thrust::for_each( thrust::device, grids.begin(), grids.end(), [=] __device__ (Grid& g) { float beta = (g.rar_tmp > 1e-8)? (g.r).cwiseProduct(g.ar).sum() / g.rar_tmp: 0.0f; g.p = g.r + beta * g.p; g.ap = g.ar + beta * g.ap; } ); } // copy back thrust::for_each( grids.begin(), grids.end(), [=] __device__ (Grid& g) { g.velocity_star = g.v; } ); } #endif __host__ void MPMSolver::updateDeformationGradient() { Grid *grid_ptr = thrust::raw_pointer_cast(&grids[0]); auto computeVelocityGradient = [=] __device__ (const Particle& p) -> Eigen::Matrix3f { float h_inv = 1.0f / PARTICLE_DIAM; Eigen::Vector3i pos((p.position * h_inv).cast<int>()); Eigen::Matrix3f velocity_gradient(Eigen::Matrix3f::Zero()); for (int z = -G2P; z <= G2P; z++) { for (int y = -G2P; y <= G2P; y++) { for (int x = -G2P; x <= G2P; x++) { auto _pos = pos + Eigen::Vector3i(x, y, z); if (!IN_GRID(_pos)) continue; Eigen::Vector3f diff = (p.position - (_pos.cast<float>() * PARTICLE_DIAM)) * h_inv; Eigen::Vector3f gw = gradientWeight(diff); int grid_idx = getGridIndex(_pos); velocity_gradient += grid_ptr[grid_idx].velocity_star * gw.transpose(); } } } return velocity_gradient; }; thrust::for_each( thrust::device, particles.begin(), particles.end(), [=] __device__ (Particle& p) { auto velocity_gradient = computeVelocityGradient(p); p.updateDeformationGradient(velocity_gradient); } ); } __host__ void MPMSolver::updateParticleVelocities() { Grid *grid_ptr = thrust::raw_pointer_cast(&grids[0]); auto computeVelocity = [=] __device__ (const Particle& p) -> thrust::pair<Eigen::Vector3f, Eigen::Vector3f> { float h_inv = 1.0f / PARTICLE_DIAM; Eigen::Vector3i pos((p.position * h_inv).cast<int>()); Eigen::Vector3f velocity_pic(Eigen::Vector3f::Zero()), velocity_flip(p.velocity); for (int z = -G2P; z <= G2P; z++) { for (int y = -G2P; y <= G2P; y++) { for (int x = -G2P; x <= G2P; x++) { auto _pos = pos + Eigen::Vector3i(x, y, z); if (!IN_GRID(_pos)) continue; Eigen::Vector3f diff = (p.position - (_pos.cast<float>() * PARTICLE_DIAM)) * h_inv; int grid_idx = getGridIndex(_pos); float w = weight(diff.cwiseAbs()); auto grid = grid_ptr[grid_idx]; velocity_pic += grid.velocity_star * w; velocity_flip += (grid.velocity_star - grid.velocity) * w; } } } return thrust::make_pair(velocity_pic, velocity_flip); }; thrust::for_each( thrust::device, particles.begin(), particles.end(), [=] __device__ (Particle& p) { auto velocity_result = computeVelocity(p); p.updateVelocity(velocity_result.first, velocity_result.second); } ); } __host__ void MPMSolver::particleBodyCollisions() { thrust::for_each( thrust::device, particles.begin(), particles.end(), [=] __device__ (Particle& p) { p.velocity = applyBoundaryCollision(p.position + TIMESTEP * p.velocity, p.velocity); } ); } __host__ void MPMSolver::updateParticlePositions() { thrust::for_each( thrust::device, particles.begin(), particles.end(), [=] __device__ (Particle& p) { p.updatePosition(); } ); } __host__ void MPMSolver::simulate() { resetGrid(); if (initial_transfer) { initialTransfer(); computeVolumes(); initial_transfer = false; } else { transferData(); } updateVelocities(); bodyCollisions(); #if ENABLE_IMPLICIT integrateImplicit(); #endif updateDeformationGradient(); updateParticleVelocities(); particleBodyCollisions(); updateParticlePositions(); } __host__ void MPMSolver::bindGLBuffer(const GLuint buffer) { hipError_t ret; ret = hipGraphicsGLRegisterBuffer(&vbo_resource, buffer, hipGraphicsMapFlagsWriteDiscard); assert(ret == hipSuccess); } __host__ void MPMSolver::writeGLBuffer() { hipError_t ret; float4 *bufptr; size_t size; ret = hipGraphicsMapResources(1, &vbo_resource, NULL); assert(ret == hipSuccess); ret = hipGraphicsResourceGetMappedPointer((void **)&bufptr, &size, vbo_resource); assert(ret == hipSuccess); assert(bufptr != nullptr && size >= particles.size() * sizeof(float4)); thrust::transform( thrust::device, particles.begin(), particles.end(), bufptr, [=] __device__ (Particle& p) -> float4 { return make_float4(5.0 * p.position(0) - 2.5, 5.0 * p.position(1), 5.0 * p.position(2) - 2.5, 1.0); } ); ret = hipGraphicsUnmapResources(1, &vbo_resource, NULL); assert(ret == hipSuccess); } __host__ void MPMSolver::writeToFile(const std::string& filename) { std::ofstream output(filename, std::ios::binary | std::ios::out); int num_particles = particles.size(); float min_bound_x = 0, max_bound_x = GRID_BOUND_X; float min_bound_y = 0, max_bound_y = GRID_BOUND_Y; float min_bound_z = 0, max_bound_z = GRID_BOUND_Z; output.write(reinterpret_cast<char *>(&num_particles), sizeof(int)); output.write(reinterpret_cast<char *>(&min_bound_x), sizeof(float)); output.write(reinterpret_cast<char *>(&max_bound_x), sizeof(float)); output.write(reinterpret_cast<char *>(&min_bound_y), sizeof(float)); output.write(reinterpret_cast<char *>(&max_bound_y), sizeof(float)); output.write(reinterpret_cast<char *>(&min_bound_z), sizeof(float)); output.write(reinterpret_cast<char *>(&max_bound_z), sizeof(float)); thrust::copy( particles.begin(), particles.end(), std::ostream_iterator<Particle>(output) ); output.close(); }
9c155c21fb53db00ba6ab946c802dadf7f3aea74.cu
#include <cassert> #include <fstream> #include <Eigen/Dense> #include <thrust/for_each.h> #include <thrust/functional.h> #include <thrust/execution_policy.h> #include <cuda.h> #include <cuda_runtime.h> #include "mpm_solver.h" #define IN_GRID(POS) (0 <= POS(0) && POS(0) < GRID_BOUND_X && \ 0 <= POS(1) && POS(1) < GRID_BOUND_Y && \ 0 <= POS(2) && POS(2) < GRID_BOUND_Z) __device__ float NX(const float& x) { if (x < 1.0f) { return 0.5f * (x * x * x) - (x * x) + (2.0f / 3.0f); } else if (x < 2.0f) { return (-1.0f / 6.0f) * (x * x * x) + (x * x) - (2.0f * x) + (4.0f / 3.0f); } else { return 0.0f; } } __device__ float dNX(const float& x) { float abs_x = fabs(x); if (abs_x < 1.0f) { return (1.5f * abs_x * x) - (2.0f * x); } else if (abs_x < 2.0f) { return -0.5f * (abs_x * x) + (2.0f * x) - (2.0f * x / abs_x); } else { return 0.0f; } } __device__ float weight(const Eigen::Vector3f& xpgp_diff) { return NX(xpgp_diff(0)) * NX(xpgp_diff(1)) * NX(xpgp_diff(2)); } __device__ Eigen::Vector3f gradientWeight(const Eigen::Vector3f& xpgp_diff) { const auto& v = xpgp_diff; return (1.0f / PARTICLE_DIAM) * Eigen::Vector3f(dNX(v(0)) * NX(fabs(v(1))) * NX(fabs(v(2))), NX(fabs(v(0))) * dNX(v(1)) * NX(fabs(v(2))), NX(fabs(v(0))) * NX(fabs(v(1))) * dNX(v(2))); /* return Eigen::Vector3f(dNX(v(0)) * NX(fabs(v(1))) * NX(fabs(v(2))), NX(fabs(v(0))) * dNX(v(1)) * NX(fabs(v(2))), NX(fabs(v(0))) * NX(fabs(v(1))) * dNX(v(2))); */ } __device__ int getGridIndex(const Eigen::Vector3i& pos) { return (pos(2) * GRID_BOUND_Y * GRID_BOUND_X) + (pos(1) * GRID_BOUND_X) + pos(0); } __device__ Eigen::Vector3f applyBoundaryCollision(const Eigen::Vector3f& position, const Eigen::Vector3f& velocity) { float vn; Eigen::Vector3f vt, normal, ret(velocity); bool collision; for (int i = 0; i < 3; i++) { collision = false; normal.setZero(); if (position(i) <= BOX_BOUNDARY_1) { collision = true; normal(i) = 1.0f; } else if (position(i) >= BOX_BOUNDARY_2) { collision = true; normal(i) = -1.0f; } if (collision) { vn = ret.dot(normal); if (vn >= 0.0f) continue; for (int j = 0; j < 3; j++) { if (j != i) { ret(j) *= STICKY_WALL; } } vt = ret - vn * normal; if (vt.norm() <= -FRICTION * vn) { ret.setZero(); return ret; } ret = vt + FRICTION * vn * vt.normalized(); } } return ret; } struct f { __host__ __device__ Grid operator()(const int& idx) { return Grid(Eigen::Vector3i(idx % GRID_BOUND_X, idx % (GRID_BOUND_X * GRID_BOUND_Y) / GRID_BOUND_X, idx / (GRID_BOUND_X * GRID_BOUND_Y))); } }; __host__ MPMSolver::MPMSolver(const std::vector<Particle>& _particles) { particles.resize(_particles.size()); thrust::copy(_particles.begin(), _particles.end(), particles.begin()); grids.resize(GRID_BOUND_X * GRID_BOUND_Y * GRID_BOUND_Z); thrust::tabulate( thrust::device, grids.begin(), grids.end(), f() ); } __host__ MPMSolver::MPMSolver(const std::vector<Particle>& _particles, const std::vector<Grid>& _grids) { particles.resize(_particles.size()); grids.resize(_grids.size()); thrust::copy(_particles.begin(), _particles.end(), particles.begin()); thrust::copy(_grids.begin(), _grids.end(), grids.begin()); } __host__ void MPMSolver::initialTransfer() { Grid *grid_ptr = thrust::raw_pointer_cast(&grids[0]); auto ff = [=] __device__ (Particle& p) { float h_inv = 1.0f / PARTICLE_DIAM; Eigen::Vector3i pos((p.position * h_inv).cast<int>()); for (int z = -G2P; z <= G2P; z++) { for (int y = -G2P; y <= G2P; y++) { for (int x = -G2P; x <= G2P; x++) { auto _pos = pos + Eigen::Vector3i(x, y, z); if (!IN_GRID(_pos)) continue; Eigen::Vector3f diff = (p.position - (_pos.cast<float>() * PARTICLE_DIAM)) * h_inv; int grid_idx = getGridIndex(_pos); float mi = p.mass * weight(diff.cwiseAbs()); atomicAdd(&(grid_ptr[grid_idx].mass), mi); } } } }; thrust::for_each(thrust::device, particles.begin(), particles.end(), ff); } __host__ void MPMSolver::resetGrid() { thrust::for_each( thrust::device, grids.begin(), grids.end(), [=] __device__ (Grid& g) { g.reset(); } ); } __host__ void MPMSolver::transferData() { Grid *grid_ptr = thrust::raw_pointer_cast(&grids[0]); auto ff = [=] __device__ (Particle& p) { float h_inv = 1.0f / PARTICLE_DIAM; Eigen::Vector3i pos((p.position * h_inv).cast<int>()); Eigen::Matrix3f volume_stress = -1.0f * p.energyDerivative(); for (int z = -G2P; z <= G2P; z++) { for (int y = -G2P; y <= G2P; y++) { for (int x = -G2P; x <= G2P; x++) { auto _pos = pos + Eigen::Vector3i(x, y, z); if (!IN_GRID(_pos)) continue; Eigen::Vector3f diff = (p.position - (_pos.cast<float>() * PARTICLE_DIAM)) * h_inv; auto gw = gradientWeight(diff); int grid_idx = getGridIndex(_pos); Eigen::Vector3f f = volume_stress * gw; float mi = p.mass * weight(diff.cwiseAbs()); atomicAdd(&(grid_ptr[grid_idx].mass), mi); atomicAdd(&(grid_ptr[grid_idx].velocity(0)), p.velocity(0) * mi); atomicAdd(&(grid_ptr[grid_idx].velocity(1)), p.velocity(1) * mi); atomicAdd(&(grid_ptr[grid_idx].velocity(2)), p.velocity(2) * mi); atomicAdd(&(grid_ptr[grid_idx].force(0)), f(0)); atomicAdd(&(grid_ptr[grid_idx].force(1)), f(1)); atomicAdd(&(grid_ptr[grid_idx].force(2)), f(2)); } } } }; thrust::for_each(thrust::device, particles.begin(), particles.end(), ff); } __host__ void MPMSolver::computeVolumes() { Grid *grid_ptr = thrust::raw_pointer_cast(&grids[0]); auto ff = [=] __device__ (Particle& p) { float h_inv = 1.0f / PARTICLE_DIAM; Eigen::Vector3i pos((p.position * h_inv).cast<int>()); float p_density = 0.0f; float inv_grid_volume = h_inv * h_inv * h_inv; for (int z = -G2P; z <= G2P; z++) { for (int y = -G2P; y <= G2P; y++) { for (int x = -G2P; x <= G2P; x++) { auto _pos = pos + Eigen::Vector3i(x, y, z); if (!IN_GRID(_pos)) continue; Eigen::Vector3f diff = (p.position - (_pos.cast<float>() * PARTICLE_DIAM)) * h_inv; int grid_idx = getGridIndex(_pos); p_density += grid_ptr[grid_idx].mass * inv_grid_volume * weight(diff.cwiseAbs()); } } } p.volume = p.mass / p_density; }; thrust::for_each(thrust::device, particles.begin(), particles.end(), ff); } __host__ void MPMSolver::updateVelocities() { thrust::for_each( thrust::device, grids.begin(), grids.end(), [=] __device__ (Grid& g) { g.updateVelocity(); } ); } __host__ void MPMSolver::bodyCollisions() { thrust::for_each( thrust::device, grids.begin(), grids.end(), [=] __device__ (Grid& g) { g.velocity_star = applyBoundaryCollision((g.idx.cast<float>() * PARTICLE_DIAM) + (TIMESTEP * g.velocity_star), g.velocity_star); } ); } #if ENABLE_IMPLICIT __host__ void MPMSolver::computeAr() { Grid *grid_ptr = thrust::raw_pointer_cast(&grids[0]); auto computeDeltaElastic = [=] __device__ (const Particle& p) -> Eigen::Matrix3f { float h_inv = 1.0f / PARTICLE_DIAM; Eigen::Vector3i pos((p.position * h_inv).cast<int>()); Eigen::Matrix3f f(Eigen::Matrix3f::Zero()); for (int z = -G2P; z <= G2P; z++) { for (int y = -G2P; y <= G2P; y++) { for (int x = -G2P; x <= G2P; x++) { auto _pos = pos + Eigen::Vector3i(x, y, z); if (!IN_GRID(_pos)) continue; Eigen::Vector3f diff = (p.position - (_pos.cast<float>() * PARTICLE_DIAM)) * h_inv; Eigen::Vector3f gw = gradientWeight(diff); int grid_idx = getGridIndex(_pos); f += TIMESTEP * grid_ptr[grid_idx].r * gw.transpose(); } } } return f * p.def_elastic; }; auto updateGridDeltaForce = [=] __device__ (const Particle& p, const Eigen::Matrix3f& delta_force) { float h_inv = 1.0f / PARTICLE_DIAM; Eigen::Vector3i pos((p.position * h_inv).cast<int>()); for (int z = -G2P; z <= G2P; z++) { for (int y = -G2P; y <= G2P; y++) { for (int x = -G2P; x <= G2P; x++) { auto _pos = pos + Eigen::Vector3i(x, y, z); if (!IN_GRID(_pos)) continue; Eigen::Vector3f diff = (p.position - (_pos.cast<float>() * PARTICLE_DIAM)) * h_inv; Eigen::Vector3f gw = gradientWeight(diff); int grid_idx = getGridIndex(_pos); auto fw = delta_force * gw; atomicAdd(&(grid_ptr[grid_idx].delta_force(0)), fw(0)); atomicAdd(&(grid_ptr[grid_idx].delta_force(1)), fw(1)); atomicAdd(&(grid_ptr[grid_idx].delta_force(2)), fw(2)); } } } }; thrust::for_each( thrust::device, particles.begin(), particles.end(), [=] __device__ (Particle& p) { auto delta_elastic = computeDeltaElastic(p); auto delta_force = p.computeDeltaForce(delta_elastic); updateGridDeltaForce(p, delta_force); } ); thrust::for_each( thrust::device, grids.begin(), grids.end(), [=] __device__ (Grid& g) { g.ar = g.r - BETA * TIMESTEP * g.delta_force * ((g.mass > 1e-8)? (1.0 / g.mass): 0.0f); g.delta_force.setZero(); } ); } __host__ void MPMSolver::integrateImplicit() { // http://alexey.stomakhin.com/research/siggraph2013_tech_report.pdf // https://nccastaff.bournemouth.ac.uk/jmacey/MastersProjects/MSc15/05Esther/thesisEMdeJong.pdf // initialize some variables thrust::for_each( thrust::device, grids.begin(), grids.end(), [=] __device__ (Grid& g) { g.v = g.velocity_star; g.r = g.v; } ); computeAr(); thrust::for_each( thrust::device, grids.begin(), grids.end(), [=] __device__ (Grid& g) { g.r = g.v - g.ar; g.p = g.r; } ); computeAr(); thrust::for_each( thrust::device, grids.begin(), grids.end(), [=] __device__ (Grid& g) { g.ap = g.ar; } ); // run conjugate residual method for (int i = 0; i < SOLVE_MAX_ITERATIONS; i++) { thrust::for_each( thrust::device, grids.begin(), grids.end(), [=] __device__ (Grid& g) { float rar = (g.r).cwiseProduct(g.ar).sum(), apap = (g.ap).cwiseProduct(g.ap).sum(); float alpha = (apap > 1e-8)? rar / apap: 0.0f; g.v += alpha * g.p; g.r += (-alpha * g.ap); g.rar_tmp = rar; } ); computeAr(); thrust::for_each( thrust::device, grids.begin(), grids.end(), [=] __device__ (Grid& g) { float beta = (g.rar_tmp > 1e-8)? (g.r).cwiseProduct(g.ar).sum() / g.rar_tmp: 0.0f; g.p = g.r + beta * g.p; g.ap = g.ar + beta * g.ap; } ); } // copy back thrust::for_each( grids.begin(), grids.end(), [=] __device__ (Grid& g) { g.velocity_star = g.v; } ); } #endif __host__ void MPMSolver::updateDeformationGradient() { Grid *grid_ptr = thrust::raw_pointer_cast(&grids[0]); auto computeVelocityGradient = [=] __device__ (const Particle& p) -> Eigen::Matrix3f { float h_inv = 1.0f / PARTICLE_DIAM; Eigen::Vector3i pos((p.position * h_inv).cast<int>()); Eigen::Matrix3f velocity_gradient(Eigen::Matrix3f::Zero()); for (int z = -G2P; z <= G2P; z++) { for (int y = -G2P; y <= G2P; y++) { for (int x = -G2P; x <= G2P; x++) { auto _pos = pos + Eigen::Vector3i(x, y, z); if (!IN_GRID(_pos)) continue; Eigen::Vector3f diff = (p.position - (_pos.cast<float>() * PARTICLE_DIAM)) * h_inv; Eigen::Vector3f gw = gradientWeight(diff); int grid_idx = getGridIndex(_pos); velocity_gradient += grid_ptr[grid_idx].velocity_star * gw.transpose(); } } } return velocity_gradient; }; thrust::for_each( thrust::device, particles.begin(), particles.end(), [=] __device__ (Particle& p) { auto velocity_gradient = computeVelocityGradient(p); p.updateDeformationGradient(velocity_gradient); } ); } __host__ void MPMSolver::updateParticleVelocities() { Grid *grid_ptr = thrust::raw_pointer_cast(&grids[0]); auto computeVelocity = [=] __device__ (const Particle& p) -> thrust::pair<Eigen::Vector3f, Eigen::Vector3f> { float h_inv = 1.0f / PARTICLE_DIAM; Eigen::Vector3i pos((p.position * h_inv).cast<int>()); Eigen::Vector3f velocity_pic(Eigen::Vector3f::Zero()), velocity_flip(p.velocity); for (int z = -G2P; z <= G2P; z++) { for (int y = -G2P; y <= G2P; y++) { for (int x = -G2P; x <= G2P; x++) { auto _pos = pos + Eigen::Vector3i(x, y, z); if (!IN_GRID(_pos)) continue; Eigen::Vector3f diff = (p.position - (_pos.cast<float>() * PARTICLE_DIAM)) * h_inv; int grid_idx = getGridIndex(_pos); float w = weight(diff.cwiseAbs()); auto grid = grid_ptr[grid_idx]; velocity_pic += grid.velocity_star * w; velocity_flip += (grid.velocity_star - grid.velocity) * w; } } } return thrust::make_pair(velocity_pic, velocity_flip); }; thrust::for_each( thrust::device, particles.begin(), particles.end(), [=] __device__ (Particle& p) { auto velocity_result = computeVelocity(p); p.updateVelocity(velocity_result.first, velocity_result.second); } ); } __host__ void MPMSolver::particleBodyCollisions() { thrust::for_each( thrust::device, particles.begin(), particles.end(), [=] __device__ (Particle& p) { p.velocity = applyBoundaryCollision(p.position + TIMESTEP * p.velocity, p.velocity); } ); } __host__ void MPMSolver::updateParticlePositions() { thrust::for_each( thrust::device, particles.begin(), particles.end(), [=] __device__ (Particle& p) { p.updatePosition(); } ); } __host__ void MPMSolver::simulate() { resetGrid(); if (initial_transfer) { initialTransfer(); computeVolumes(); initial_transfer = false; } else { transferData(); } updateVelocities(); bodyCollisions(); #if ENABLE_IMPLICIT integrateImplicit(); #endif updateDeformationGradient(); updateParticleVelocities(); particleBodyCollisions(); updateParticlePositions(); } __host__ void MPMSolver::bindGLBuffer(const GLuint buffer) { cudaError_t ret; ret = cudaGraphicsGLRegisterBuffer(&vbo_resource, buffer, cudaGraphicsMapFlagsWriteDiscard); assert(ret == cudaSuccess); } __host__ void MPMSolver::writeGLBuffer() { cudaError_t ret; float4 *bufptr; size_t size; ret = cudaGraphicsMapResources(1, &vbo_resource, NULL); assert(ret == cudaSuccess); ret = cudaGraphicsResourceGetMappedPointer((void **)&bufptr, &size, vbo_resource); assert(ret == cudaSuccess); assert(bufptr != nullptr && size >= particles.size() * sizeof(float4)); thrust::transform( thrust::device, particles.begin(), particles.end(), bufptr, [=] __device__ (Particle& p) -> float4 { return make_float4(5.0 * p.position(0) - 2.5, 5.0 * p.position(1), 5.0 * p.position(2) - 2.5, 1.0); } ); ret = cudaGraphicsUnmapResources(1, &vbo_resource, NULL); assert(ret == cudaSuccess); } __host__ void MPMSolver::writeToFile(const std::string& filename) { std::ofstream output(filename, std::ios::binary | std::ios::out); int num_particles = particles.size(); float min_bound_x = 0, max_bound_x = GRID_BOUND_X; float min_bound_y = 0, max_bound_y = GRID_BOUND_Y; float min_bound_z = 0, max_bound_z = GRID_BOUND_Z; output.write(reinterpret_cast<char *>(&num_particles), sizeof(int)); output.write(reinterpret_cast<char *>(&min_bound_x), sizeof(float)); output.write(reinterpret_cast<char *>(&max_bound_x), sizeof(float)); output.write(reinterpret_cast<char *>(&min_bound_y), sizeof(float)); output.write(reinterpret_cast<char *>(&max_bound_y), sizeof(float)); output.write(reinterpret_cast<char *>(&min_bound_z), sizeof(float)); output.write(reinterpret_cast<char *>(&max_bound_z), sizeof(float)); thrust::copy( particles.begin(), particles.end(), std::ostream_iterator<Particle>(output) ); output.close(); }
ca2230ca0264ad1f4c6d1da3bdb5d4c5d8f4dd98.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stddef.h> #include <stdint.h> #include "model_gpu_utils.h" #include "ten_tusscher_2004_epi_S2_11.h" extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) { print_to_stdout_and_file("Using ten Tusscher 2004 epi GPU model\n"); // execution configuration const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t size = num_volumes*sizeof(real); check_cuda_error(hipMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ)); check_cuda_error(hipMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t))); hipLaunchKernelGGL(( kernel_set_model_inital_conditions) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, *sv, num_volumes); check_cuda_error( hipPeekAtLastError() ); hipDeviceSynchronize(); return pitch_h; } extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) { // execution configuration const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t stim_currents_size = sizeof(real)*num_cells_to_solve; size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve; real *stims_currents_device; check_cuda_error(hipMalloc((void **) &stims_currents_device, stim_currents_size)); check_cuda_error(hipMemcpy(stims_currents_device, stim_currents, stim_currents_size, hipMemcpyHostToDevice)); //the array cells to solve is passed when we are using and adapative mesh uint32_t *cells_to_solve_device = NULL; if(cells_to_solve != NULL) { check_cuda_error(hipMalloc((void **) &cells_to_solve_device, cells_to_solve_size)); check_cuda_error(hipMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, hipMemcpyHostToDevice)); } hipLaunchKernelGGL(( solve_gpu) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps); check_cuda_error( hipPeekAtLastError() ); check_cuda_error(hipFree(stims_currents_device)); if(cells_to_solve_device) check_cuda_error(hipFree(cells_to_solve_device)); } __global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes) { // Thread ID int threadID = blockDim.x * blockIdx.x + threadIdx.x; if(threadID < num_volumes) { /* *((real*)((char*)sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt *((real*)((char*)sv + pitch * 1) + threadID) = 0.f; //M *((real*)((char*)sv + pitch * 2) + threadID) = 0.75; //H *((real*)((char*)sv + pitch * 3) + threadID) = 0.75f; //J *((real*)((char*)sv + pitch * 4) + threadID) = 0.f; //Xr1 *((real*)((char*)sv + pitch * 5) + threadID) = 1.f; //Xr2 *((real*)((char*)sv + pitch * 6) + threadID) = 0.f; //Xs *((real*)((char*)sv + pitch * 7) + threadID) = 1.f; //S *((real*)((char*)sv + pitch * 8) + threadID) = 0.f; //R *((real*)((char*)sv + pitch * 9) + threadID) = 0.f; //D *((real*)((char*)sv + pitch * 10) + threadID) = 1.f; //F *((real*)((char*)sv + pitch * 11) + threadID) = 1.f; //FCa *((real*)((char*)sv + pitch * 12) + threadID) = 1.f; //G *((real*)((char*)sv + pitch * 13) + threadID) = 0.0002; //Cai *((real*)((char*)sv + pitch * 14) + threadID) = 0.2f; //CaSR *((real*)((char*)sv + pitch * 15) + threadID) = 11.6f; //Nai *((real*)((char*)sv + pitch * 16) + threadID) = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.5878770431026,0.00128467644044377,0.780190776060102,0.780031768712007,0.000174269347293292,0.485294334096334,0.00293619530930145,0.999998354577283,1.92718333358183e-08,1.88612615371809e-05,0.999770487779485,1.00715530958520,0.999996174757918,4.37641258651731e-05,0.481810864796698,10.5215306150078,139.090426708925}; for (uint32_t i = 0; i < NEQ; i++) *((real*)((char*)sv + pitch * i) + threadID) = sv_sst[i]; } } // Solving the model for each cell in the tissue matrix ni x nj __global__ void solve_gpu(real dt, real *sv, real* stim_currents, uint32_t *cells_to_solve, uint32_t num_cells_to_solve, int num_steps) { int threadID = blockDim.x * blockIdx.x + threadIdx.x; int sv_id; // Each thread solves one cell model if(threadID < num_cells_to_solve) { if(cells_to_solve) sv_id = cells_to_solve[threadID]; else sv_id = threadID; real rDY[NEQ]; for (int n = 0; n < num_steps; ++n) { RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt); *((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id); for(int i = 0; i < NEQ; i++) { *((real*)((char*)sv + pitch * i) + sv_id) = rDY[i]; } } } } inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt) { // State variables real svolt = *((real*)((char*)sv + pitch * 0) + threadID_); real sm = *((real*)((char*)sv + pitch * 1) + threadID_); real sh = *((real*)((char*)sv + pitch * 2) + threadID_); real sj = *((real*)((char*)sv + pitch * 3) + threadID_); real sxr1 = *((real*)((char*)sv + pitch * 4) + threadID_); real sxr2 = *((real*)((char*)sv + pitch * 5) + threadID_); real sxs = *((real*)((char*)sv + pitch * 6) + threadID_); real ss = *((real*)((char*)sv + pitch * 7) + threadID_); real sr = *((real*)((char*)sv + pitch * 8) + threadID_); real sd = *((real*)((char*)sv + pitch * 9) + threadID_); real sf = *((real*)((char*)sv + pitch * 10) + threadID_); real sfca = *((real*)((char*)sv + pitch * 11) + threadID_); real sg = *((real*)((char*)sv + pitch * 12) + threadID_); real Cai = *((real*)((char*)sv + pitch * 13) + threadID_); real CaSR = *((real*)((char*)sv + pitch * 14) + threadID_); real Nai = *((real*)((char*)sv + pitch * 15) + threadID_); real Ki = *((real*)((char*)sv + pitch * 16) + threadID_); //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; ///#ifdef EPI real Gks=0.245; ///#endif ///#ifdef ENDO /// real Gks=0.245; ///#endif ///#ifdef MCELL //real Gks=0.062; ///#endif //Parameters for Ik1 real GK1=5.405; //Parameters for Ito ///#ifdef EPI real Gto=0.294; ///#endif ///#ifdef ENDO /// real Gto=0.073; ///#endif ///#ifdef MCELL /// real Gto=0.294; ///#endif //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; // Setting Elnaz's parameters real parameters []={14.4067656911232,0.000251423214660846,0.000138644400006808,0.000171348168255836,0.271363539920663,0.152533735596316,0.167802952974848,4.50982141647208,0.0182925907891570,1.32742805103830,1087.64330176885,0.000521118477931967,0.130358693810526,0.0198787620687159,0.00477679600041959,4.82656795411010e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; /// A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; ///Ileak=0.00008f*(CaSR-Cai); Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; #ifdef EPI R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif #ifdef ENDO R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+28)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.; #endif #ifdef MCELL R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
ca2230ca0264ad1f4c6d1da3bdb5d4c5d8f4dd98.cu
#include <stddef.h> #include <stdint.h> #include "model_gpu_utils.h" #include "ten_tusscher_2004_epi_S2_11.h" extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) { print_to_stdout_and_file("Using ten Tusscher 2004 epi GPU model\n"); // execution configuration const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t size = num_volumes*sizeof(real); check_cuda_error(cudaMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ)); check_cuda_error(cudaMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t))); kernel_set_model_inital_conditions <<<GRID, BLOCK_SIZE>>>(*sv, num_volumes); check_cuda_error( cudaPeekAtLastError() ); cudaDeviceSynchronize(); return pitch_h; } extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) { // execution configuration const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t stim_currents_size = sizeof(real)*num_cells_to_solve; size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve; real *stims_currents_device; check_cuda_error(cudaMalloc((void **) &stims_currents_device, stim_currents_size)); check_cuda_error(cudaMemcpy(stims_currents_device, stim_currents, stim_currents_size, cudaMemcpyHostToDevice)); //the array cells to solve is passed when we are using and adapative mesh uint32_t *cells_to_solve_device = NULL; if(cells_to_solve != NULL) { check_cuda_error(cudaMalloc((void **) &cells_to_solve_device, cells_to_solve_size)); check_cuda_error(cudaMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, cudaMemcpyHostToDevice)); } solve_gpu <<<GRID, BLOCK_SIZE>>>(dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps); check_cuda_error( cudaPeekAtLastError() ); check_cuda_error(cudaFree(stims_currents_device)); if(cells_to_solve_device) check_cuda_error(cudaFree(cells_to_solve_device)); } __global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes) { // Thread ID int threadID = blockDim.x * blockIdx.x + threadIdx.x; if(threadID < num_volumes) { /* *((real*)((char*)sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt *((real*)((char*)sv + pitch * 1) + threadID) = 0.f; //M *((real*)((char*)sv + pitch * 2) + threadID) = 0.75; //H *((real*)((char*)sv + pitch * 3) + threadID) = 0.75f; //J *((real*)((char*)sv + pitch * 4) + threadID) = 0.f; //Xr1 *((real*)((char*)sv + pitch * 5) + threadID) = 1.f; //Xr2 *((real*)((char*)sv + pitch * 6) + threadID) = 0.f; //Xs *((real*)((char*)sv + pitch * 7) + threadID) = 1.f; //S *((real*)((char*)sv + pitch * 8) + threadID) = 0.f; //R *((real*)((char*)sv + pitch * 9) + threadID) = 0.f; //D *((real*)((char*)sv + pitch * 10) + threadID) = 1.f; //F *((real*)((char*)sv + pitch * 11) + threadID) = 1.f; //FCa *((real*)((char*)sv + pitch * 12) + threadID) = 1.f; //G *((real*)((char*)sv + pitch * 13) + threadID) = 0.0002; //Cai *((real*)((char*)sv + pitch * 14) + threadID) = 0.2f; //CaSR *((real*)((char*)sv + pitch * 15) + threadID) = 11.6f; //Nai *((real*)((char*)sv + pitch * 16) + threadID) = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.5878770431026,0.00128467644044377,0.780190776060102,0.780031768712007,0.000174269347293292,0.485294334096334,0.00293619530930145,0.999998354577283,1.92718333358183e-08,1.88612615371809e-05,0.999770487779485,1.00715530958520,0.999996174757918,4.37641258651731e-05,0.481810864796698,10.5215306150078,139.090426708925}; for (uint32_t i = 0; i < NEQ; i++) *((real*)((char*)sv + pitch * i) + threadID) = sv_sst[i]; } } // Solving the model for each cell in the tissue matrix ni x nj __global__ void solve_gpu(real dt, real *sv, real* stim_currents, uint32_t *cells_to_solve, uint32_t num_cells_to_solve, int num_steps) { int threadID = blockDim.x * blockIdx.x + threadIdx.x; int sv_id; // Each thread solves one cell model if(threadID < num_cells_to_solve) { if(cells_to_solve) sv_id = cells_to_solve[threadID]; else sv_id = threadID; real rDY[NEQ]; for (int n = 0; n < num_steps; ++n) { RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt); *((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id); for(int i = 0; i < NEQ; i++) { *((real*)((char*)sv + pitch * i) + sv_id) = rDY[i]; } } } } inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt) { // State variables real svolt = *((real*)((char*)sv + pitch * 0) + threadID_); real sm = *((real*)((char*)sv + pitch * 1) + threadID_); real sh = *((real*)((char*)sv + pitch * 2) + threadID_); real sj = *((real*)((char*)sv + pitch * 3) + threadID_); real sxr1 = *((real*)((char*)sv + pitch * 4) + threadID_); real sxr2 = *((real*)((char*)sv + pitch * 5) + threadID_); real sxs = *((real*)((char*)sv + pitch * 6) + threadID_); real ss = *((real*)((char*)sv + pitch * 7) + threadID_); real sr = *((real*)((char*)sv + pitch * 8) + threadID_); real sd = *((real*)((char*)sv + pitch * 9) + threadID_); real sf = *((real*)((char*)sv + pitch * 10) + threadID_); real sfca = *((real*)((char*)sv + pitch * 11) + threadID_); real sg = *((real*)((char*)sv + pitch * 12) + threadID_); real Cai = *((real*)((char*)sv + pitch * 13) + threadID_); real CaSR = *((real*)((char*)sv + pitch * 14) + threadID_); real Nai = *((real*)((char*)sv + pitch * 15) + threadID_); real Ki = *((real*)((char*)sv + pitch * 16) + threadID_); //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; ///#ifdef EPI real Gks=0.245; ///#endif ///#ifdef ENDO /// real Gks=0.245; ///#endif ///#ifdef MCELL //real Gks=0.062; ///#endif //Parameters for Ik1 real GK1=5.405; //Parameters for Ito ///#ifdef EPI real Gto=0.294; ///#endif ///#ifdef ENDO /// real Gto=0.073; ///#endif ///#ifdef MCELL /// real Gto=0.294; ///#endif //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; // Setting Elnaz's parameters real parameters []={14.4067656911232,0.000251423214660846,0.000138644400006808,0.000171348168255836,0.271363539920663,0.152533735596316,0.167802952974848,4.50982141647208,0.0182925907891570,1.32742805103830,1087.64330176885,0.000521118477931967,0.130358693810526,0.0198787620687159,0.00477679600041959,4.82656795411010e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; /// A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; ///Ileak=0.00008f*(CaSR-Cai); Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; #ifdef EPI R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif #ifdef ENDO R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+28)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.; #endif #ifdef MCELL R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
aadfc94d65e58dc33f3d324f84597eea9277c209.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 2015 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #include "../debug.h" #define N ( 1024 * 1024 * 2 * 2 ) #define RADIUS 7 #define THREADS_PER_BLOCK 256 __global__ void stencil_1d(int n, double *in, double *out) { /* calculate global index in the array */ /* insert code to calculate global index in the array using block and thread built-in variables */ int globalIndex = threadIdx.x + blockIdx.x * blockDim.x; /* return if my global index is larger than the array size */ if( globalIndex >= n ) return; /* code to handle the boundary conditions */ if( globalIndex < RADIUS || globalIndex >= (n - RADIUS) ) { out[globalIndex] = (double) globalIndex * ( (double)RADIUS*2 + 1) ; return; } /* end if */ double result = 0.0; for( int i = globalIndex-(RADIUS); i <= globalIndex+(RADIUS); i++ ) { /* add the required elements from the array "in" to the temporary variable "result */ result += in[i]; } out[globalIndex] = result; return; } int main() { /* get GPU device number and name */ int dev; hipDeviceProp_t deviceProp; checkCUDA( hipGetDevice( &dev ) ); checkCUDA( hipGetDeviceProperties( &deviceProp, dev ) ); printf("Using GPU %d: %s\n", dev, deviceProp.name ); double *in, *out; double *d_in, *d_out; int size = N * sizeof( double ); /* allocate space for device copies of in, out */ checkCUDA( hipMalloc( (void **) &d_in, size ) ); checkCUDA( hipMalloc( (void **) &d_out, size ) ); /* allocate space for host copies of in, out and setup input values */ in = (double *)malloc( size ); out = (double *)malloc( size ); for( int i = 0; i < N; i++ ) { in[i] = (double) i; out[i] = 0; } /* copy inputs to device */ checkCUDA( hipMemcpy( d_in, in, size, hipMemcpyHostToDevice ) ); checkCUDA( hipMemset( d_out, 0, size ) ); /* calculate block and grid sizes */ dim3 threads( THREADS_PER_BLOCK, 1, 1); /* insert code for proper number of blocks in X dimension */ dim3 blocks( N / THREADS_PER_BLOCK + 1, 1, 1); /* start the timers */ hipEvent_t start, stop; checkCUDA( hipEventCreate( &start ) ); checkCUDA( hipEventCreate( &stop ) ); checkCUDA( hipEventRecord( start, 0 ) ); /* launch the kernel on the GPU */ hipLaunchKernelGGL(( stencil_1d), dim3(blocks), dim3(threads) , 0, 0, N, d_in, d_out ); checkKERNEL() /* stop the timers */ checkCUDA( hipEventRecord( stop, 0 ) ); checkCUDA( hipEventSynchronize( stop ) ); float elapsedTime; checkCUDA( hipEventElapsedTime( &elapsedTime, start, stop ) ); printf("Total time for %d elements was %f ms\n", N, elapsedTime ); /* copy result back to host */ checkCUDA( hipMemcpy( out, d_out, size, hipMemcpyDeviceToHost ) ); int success = 1; for( int i = 0; i < N; i++ ) { if( in[i]*( (double)RADIUS*2+1 ) != out[i] ) { printf("error in element %d in = %f out %f\n",i,in[i],out[i] ); success = 0; break; } /* end if */ } /* end for */ if( success == 1 ) printf("PASS\n"); else printf("FAIL\n"); /* clean up */ free(in); free(out); checkCUDA( hipFree( d_in ) ); checkCUDA( hipFree( d_out ) ); checkCUDA( hipDeviceReset() ); return 0; } /* end main */
aadfc94d65e58dc33f3d324f84597eea9277c209.cu
/* * Copyright 2015 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #include "../debug.h" #define N ( 1024 * 1024 * 2 * 2 ) #define RADIUS 7 #define THREADS_PER_BLOCK 256 __global__ void stencil_1d(int n, double *in, double *out) { /* calculate global index in the array */ /* insert code to calculate global index in the array using block and thread built-in variables */ int globalIndex = threadIdx.x + blockIdx.x * blockDim.x; /* return if my global index is larger than the array size */ if( globalIndex >= n ) return; /* code to handle the boundary conditions */ if( globalIndex < RADIUS || globalIndex >= (n - RADIUS) ) { out[globalIndex] = (double) globalIndex * ( (double)RADIUS*2 + 1) ; return; } /* end if */ double result = 0.0; for( int i = globalIndex-(RADIUS); i <= globalIndex+(RADIUS); i++ ) { /* add the required elements from the array "in" to the temporary variable "result */ result += in[i]; } out[globalIndex] = result; return; } int main() { /* get GPU device number and name */ int dev; cudaDeviceProp deviceProp; checkCUDA( cudaGetDevice( &dev ) ); checkCUDA( cudaGetDeviceProperties( &deviceProp, dev ) ); printf("Using GPU %d: %s\n", dev, deviceProp.name ); double *in, *out; double *d_in, *d_out; int size = N * sizeof( double ); /* allocate space for device copies of in, out */ checkCUDA( cudaMalloc( (void **) &d_in, size ) ); checkCUDA( cudaMalloc( (void **) &d_out, size ) ); /* allocate space for host copies of in, out and setup input values */ in = (double *)malloc( size ); out = (double *)malloc( size ); for( int i = 0; i < N; i++ ) { in[i] = (double) i; out[i] = 0; } /* copy inputs to device */ checkCUDA( cudaMemcpy( d_in, in, size, cudaMemcpyHostToDevice ) ); checkCUDA( cudaMemset( d_out, 0, size ) ); /* calculate block and grid sizes */ dim3 threads( THREADS_PER_BLOCK, 1, 1); /* insert code for proper number of blocks in X dimension */ dim3 blocks( N / THREADS_PER_BLOCK + 1, 1, 1); /* start the timers */ cudaEvent_t start, stop; checkCUDA( cudaEventCreate( &start ) ); checkCUDA( cudaEventCreate( &stop ) ); checkCUDA( cudaEventRecord( start, 0 ) ); /* launch the kernel on the GPU */ stencil_1d<<< blocks, threads >>>( N, d_in, d_out ); checkKERNEL() /* stop the timers */ checkCUDA( cudaEventRecord( stop, 0 ) ); checkCUDA( cudaEventSynchronize( stop ) ); float elapsedTime; checkCUDA( cudaEventElapsedTime( &elapsedTime, start, stop ) ); printf("Total time for %d elements was %f ms\n", N, elapsedTime ); /* copy result back to host */ checkCUDA( cudaMemcpy( out, d_out, size, cudaMemcpyDeviceToHost ) ); int success = 1; for( int i = 0; i < N; i++ ) { if( in[i]*( (double)RADIUS*2+1 ) != out[i] ) { printf("error in element %d in = %f out %f\n",i,in[i],out[i] ); success = 0; break; } /* end if */ } /* end for */ if( success == 1 ) printf("PASS\n"); else printf("FAIL\n"); /* clean up */ free(in); free(out); checkCUDA( cudaFree( d_in ) ); checkCUDA( cudaFree( d_out ) ); checkCUDA( cudaDeviceReset() ); return 0; } /* end main */
f9c49ab5010066cc0d5e55eb6cae7188cddd610b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.0.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date February 2016 @generated from magmablas/zgetf2.cu normal z -> s, Tue Feb 9 16:05:36 2016 */ #include "magma_internal.h" #define sger_bs 512 // 512 is max threads for 1.x cards void magma_sgetf2_swap( magma_int_t n, float *x, magma_int_t i, magma_int_t j, magma_int_t incx, magma_queue_t queue ); void magma_sscal_sger( magma_int_t m, magma_int_t n, float *dA, magma_int_t ldda, magma_queue_t ); // TODO: this function could be in .cpp file -- it has no CUDA code in it. /** SGETF2 computes an LU factorization of a general m-by-n matrix A using partial pivoting with row interchanges. The factorization has the form A = P * L * U where P is a permutation matrix, L is lower triangular with unit diagonal elements (lower trapezoidal if m > n), and U is upper triangular (upper trapezoidal if m < n). This is the right-looking Level 2 BLAS version of the algorithm. Arguments --------- @param[in] m INTEGER The number of rows of the matrix A. M >= 0. @param[in] n INTEGER The number of columns of the matrix A. N >= 0 and N <= 1024. On CUDA architecture 1.x cards, N <= 512. @param[in,out] dA REAL array, dimension (LDDA,N) On entry, the m by n matrix to be factored. On exit, the factors L and U from the factorization A = P*L*U; the unit diagonal elements of L are not stored. @param[in] ldda INTEGER The leading dimension of the array A. LDDA >= max(1,M). @param[out] ipiv INTEGER array, dimension (min(M,N)) The pivot indices; for 1 <= i <= min(M,N), row i of the matrix was interchanged with row IPIV(i). @param[in] queue magma_queue_t Queue to execute in. @param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -k, the k-th argument had an illegal value - > 0: if INFO = k, U(k,k) is exactly zero. The factorization has been completed, but the factor U is exactly singular, and division by zero will occur if it is used to solve a system of equations. @ingroup magma_sgesv_aux ********************************************************************/ extern "C" magma_int_t magma_sgetf2_gpu( magma_int_t m, magma_int_t n, magmaFloat_ptr dA, magma_int_t ldda, magma_int_t *ipiv, magma_queue_t queue, magma_int_t *info ) { #define dA(i, j) (dA + (i) + (j)*ldda) *info = 0; if (m < 0) { *info = -1; } else if (n < 0 || n > sger_bs) { *info = -2; } else if (ldda < max(1,m)) { *info = -4; } if (*info != 0) { magma_xerbla( __func__, -(*info) ); return *info; } // Quick return if possible if (m == 0 || n == 0) { return *info; } magma_int_t min_mn = min(m, n); magma_int_t j, jp; for (j=0; j < min_mn; j++) { hipDeviceSetCacheConfig( hipFuncCachePreferShared ); // Find pivot and test for singularity. jp = j - 1 + magma_isamax( m-j, dA(j,j), 1, queue ); ipiv[j] = jp + 1; // ipiv uses Fortran one-based index // Can't check value of dA since it is on GPU //if ( dA(jp, j) != 0.0) { hipDeviceSetCacheConfig( hipFuncCachePreferL1 ); // Apply the interchange to columns 1:N. if (jp != j) { magma_sgetf2_swap( n, dA, j, jp, ldda, queue ); } // Compute elements J+1:M of J-th column. if (j < m) { magma_sscal_sger( m-j, n-j, dA(j, j), ldda, queue ); } //} //else if (*info == 0) { // *info = j; //} } return *info; } // =========================================================================== // TODO: use standard BLAS magma_sswap? #define sswap_bs 64 __global__ void kernel_sswap(int n, float *x, int i, int j, int incx) { int id = blockIdx.x * sswap_bs + threadIdx.x; if (id < n) { float tmp = x[i + incx*id]; x[i + incx*id] = x[j + incx*id]; x[j + incx*id] = tmp; } } void magma_sgetf2_swap( magma_int_t n, float *x, magma_int_t i, magma_int_t j, magma_int_t incx, magma_queue_t queue ) { /* sswap two row vectors: ith and jth */ dim3 threads( sswap_bs ); dim3 grid( magma_ceildiv( n, sswap_bs ) ); hipLaunchKernelGGL(( kernel_sswap) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, x, i, j, incx); } // =========================================================================== // dynamically allocated shared memory, set to size n when the kernel is launched. // See CUDA Guide B.2.3 extern __shared__ float shared_data[]; __global__ void kernel_sscal_sger(int m, int n, float *A, int lda) { float *shared_y = shared_data; int tid = blockIdx.x * sger_bs + threadIdx.x; float reg = MAGMA_S_ZERO; if (threadIdx.x < n) { shared_y[threadIdx.x] = A[lda * threadIdx.x]; } __syncthreads(); if (tid < m && tid > 0) { reg = A[tid]; reg *= MAGMA_S_DIV(MAGMA_S_ONE, shared_y[0]); A[tid] = reg; #pragma unroll for (int i=1; i < n; i++) { A[tid + i*lda] += (MAGMA_S_NEG_ONE) * shared_y[i] * reg; } } } void magma_sscal_sger( magma_int_t m, magma_int_t n, magmaFloat_ptr dA, magma_int_t ldda, magma_queue_t queue ) { /* Specialized kernel that merges sscal and sger 1) sscale the first column vector A(1:M-1,0) with 1/A(0,0); 2) Performe a sger Operation for trailing matrix of A(1:M-1,1:N-1) += alpha*x*y**T, where alpha := -1.0; x := A(1:M-1,0) and y:= A(0,1:N-1); */ dim3 threads( sger_bs ); dim3 grid( magma_ceildiv( m, sger_bs ) ); size_t shared_size = sizeof(float)*(n); hipLaunchKernelGGL(( kernel_sscal_sger) , dim3(grid), dim3(threads), shared_size, queue->cuda_stream() , m, n, dA, ldda); }
f9c49ab5010066cc0d5e55eb6cae7188cddd610b.cu
/* -- MAGMA (version 2.0.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date February 2016 @generated from magmablas/zgetf2.cu normal z -> s, Tue Feb 9 16:05:36 2016 */ #include "magma_internal.h" #define sger_bs 512 // 512 is max threads for 1.x cards void magma_sgetf2_swap( magma_int_t n, float *x, magma_int_t i, magma_int_t j, magma_int_t incx, magma_queue_t queue ); void magma_sscal_sger( magma_int_t m, magma_int_t n, float *dA, magma_int_t ldda, magma_queue_t ); // TODO: this function could be in .cpp file -- it has no CUDA code in it. /** SGETF2 computes an LU factorization of a general m-by-n matrix A using partial pivoting with row interchanges. The factorization has the form A = P * L * U where P is a permutation matrix, L is lower triangular with unit diagonal elements (lower trapezoidal if m > n), and U is upper triangular (upper trapezoidal if m < n). This is the right-looking Level 2 BLAS version of the algorithm. Arguments --------- @param[in] m INTEGER The number of rows of the matrix A. M >= 0. @param[in] n INTEGER The number of columns of the matrix A. N >= 0 and N <= 1024. On CUDA architecture 1.x cards, N <= 512. @param[in,out] dA REAL array, dimension (LDDA,N) On entry, the m by n matrix to be factored. On exit, the factors L and U from the factorization A = P*L*U; the unit diagonal elements of L are not stored. @param[in] ldda INTEGER The leading dimension of the array A. LDDA >= max(1,M). @param[out] ipiv INTEGER array, dimension (min(M,N)) The pivot indices; for 1 <= i <= min(M,N), row i of the matrix was interchanged with row IPIV(i). @param[in] queue magma_queue_t Queue to execute in. @param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -k, the k-th argument had an illegal value - > 0: if INFO = k, U(k,k) is exactly zero. The factorization has been completed, but the factor U is exactly singular, and division by zero will occur if it is used to solve a system of equations. @ingroup magma_sgesv_aux ********************************************************************/ extern "C" magma_int_t magma_sgetf2_gpu( magma_int_t m, magma_int_t n, magmaFloat_ptr dA, magma_int_t ldda, magma_int_t *ipiv, magma_queue_t queue, magma_int_t *info ) { #define dA(i, j) (dA + (i) + (j)*ldda) *info = 0; if (m < 0) { *info = -1; } else if (n < 0 || n > sger_bs) { *info = -2; } else if (ldda < max(1,m)) { *info = -4; } if (*info != 0) { magma_xerbla( __func__, -(*info) ); return *info; } // Quick return if possible if (m == 0 || n == 0) { return *info; } magma_int_t min_mn = min(m, n); magma_int_t j, jp; for (j=0; j < min_mn; j++) { cudaDeviceSetCacheConfig( cudaFuncCachePreferShared ); // Find pivot and test for singularity. jp = j - 1 + magma_isamax( m-j, dA(j,j), 1, queue ); ipiv[j] = jp + 1; // ipiv uses Fortran one-based index // Can't check value of dA since it is on GPU //if ( dA(jp, j) != 0.0) { cudaDeviceSetCacheConfig( cudaFuncCachePreferL1 ); // Apply the interchange to columns 1:N. if (jp != j) { magma_sgetf2_swap( n, dA, j, jp, ldda, queue ); } // Compute elements J+1:M of J-th column. if (j < m) { magma_sscal_sger( m-j, n-j, dA(j, j), ldda, queue ); } //} //else if (*info == 0) { // *info = j; //} } return *info; } // =========================================================================== // TODO: use standard BLAS magma_sswap? #define sswap_bs 64 __global__ void kernel_sswap(int n, float *x, int i, int j, int incx) { int id = blockIdx.x * sswap_bs + threadIdx.x; if (id < n) { float tmp = x[i + incx*id]; x[i + incx*id] = x[j + incx*id]; x[j + incx*id] = tmp; } } void magma_sgetf2_swap( magma_int_t n, float *x, magma_int_t i, magma_int_t j, magma_int_t incx, magma_queue_t queue ) { /* sswap two row vectors: ith and jth */ dim3 threads( sswap_bs ); dim3 grid( magma_ceildiv( n, sswap_bs ) ); kernel_sswap <<< grid, threads, 0, queue->cuda_stream() >>> (n, x, i, j, incx); } // =========================================================================== // dynamically allocated shared memory, set to size n when the kernel is launched. // See CUDA Guide B.2.3 extern __shared__ float shared_data[]; __global__ void kernel_sscal_sger(int m, int n, float *A, int lda) { float *shared_y = shared_data; int tid = blockIdx.x * sger_bs + threadIdx.x; float reg = MAGMA_S_ZERO; if (threadIdx.x < n) { shared_y[threadIdx.x] = A[lda * threadIdx.x]; } __syncthreads(); if (tid < m && tid > 0) { reg = A[tid]; reg *= MAGMA_S_DIV(MAGMA_S_ONE, shared_y[0]); A[tid] = reg; #pragma unroll for (int i=1; i < n; i++) { A[tid + i*lda] += (MAGMA_S_NEG_ONE) * shared_y[i] * reg; } } } void magma_sscal_sger( magma_int_t m, magma_int_t n, magmaFloat_ptr dA, magma_int_t ldda, magma_queue_t queue ) { /* Specialized kernel that merges sscal and sger 1) sscale the first column vector A(1:M-1,0) with 1/A(0,0); 2) Performe a sger Operation for trailing matrix of A(1:M-1,1:N-1) += alpha*x*y**T, where alpha := -1.0; x := A(1:M-1,0) and y:= A(0,1:N-1); */ dim3 threads( sger_bs ); dim3 grid( magma_ceildiv( m, sger_bs ) ); size_t shared_size = sizeof(float)*(n); kernel_sscal_sger <<< grid, threads, shared_size, queue->cuda_stream() >>> (m, n, dA, ldda); }
1d839c5bca7f3e330737df3ab63cd678fec29f4b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <blas_magma.h> #include <string.h> #include <vector> #include <algorithm> #include <util_quda.h> #include <quda_internal.h> #ifndef MAX #define MAX(a, b) (a > b) ? a : b; #endif #define MAGMA_17 //default version version of the MAGMA library #ifdef MAGMA_LIB #include <magma.h> #ifdef MAGMA_14 #define _cV 'V' #define _cU 'U' #define _cR 'R' #define _cL 'L' #define _cC 'C' #define _cN 'N' #define _cNV 'N' #else #define _cV MagmaVec #define _cU MagmaUpper #define _cR MagmaRight #define _cL MagmaLeft #define _cC MagmaConjTrans #define _cN MagmaNoTrans #define _cNV MagmaNoVec #endif #endif //Column major format: Big matrix times Little matrix. #ifdef MAGMA_LIB //Simplified version for the above: #define BLOCK_SIZE 16 __global__ void SMatCMatCuda_16x16(cuFloatComplex *outBuff, const int bldm, cuFloatComplex *sMat, const int sldm, hipDoubleComplex *cMat, const int cldm, const int scols) { //block coords: int by = blockIdx.x; int bx = blockIdx.y; //local coords: int ty = threadIdx.x; int tx = threadIdx.y; int sBegin = BLOCK_SIZE * by;//global offset in Y-direction for the Big matrix int sEnd = sBegin + sldm*scols - 1;//loop limit in X-direction for the Big matrix int sStep = sldm * BLOCK_SIZE;//step in X-direction for the Big matrix int cBegin = cldm * BLOCK_SIZE * bx;//global offset in X-direction for the Little matrix int cStep = BLOCK_SIZE;//step in Y-direction for the Little matrix hipDoubleComplex accum = make_cuDoubleComplex (0.0, 0.0); cuFloatComplex ftmp; hipDoubleComplex dtmp; for (int s = sBegin, c = cBegin; s <= sEnd; s += sStep, c += cStep) { __shared__ float reSmat[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float imSmat[BLOCK_SIZE][BLOCK_SIZE]; __shared__ double reCmat[BLOCK_SIZE][BLOCK_SIZE]; __shared__ double imCmat[BLOCK_SIZE][BLOCK_SIZE]; ftmp = sMat[s + sldm * tx + ty]; reSmat[ty][tx] = cuCrealf(ftmp); imSmat[ty][tx] = cuCimagf(ftmp); dtmp = cMat[c + cldm * tx + ty]; reCmat[ty][tx] = cuCreal(dtmp); imCmat[ty][tx] = cuCimag(dtmp); __syncthreads(); #pragma unroll for (int k = 0; k < BLOCK_SIZE; ++k) { ftmp = make_cuFloatComplex(reSmat[ty][k], imSmat[ty][k]); dtmp = make_cuDoubleComplex(reCmat[k][tx], imCmat[k][tx]); hipDoubleComplex dtmp2 = cuComplexFloatToDouble( ftmp ); accum = cuCfma(dtmp2, dtmp, accum); } __syncthreads(); } int idx = BLOCK_SIZE * by + bldm * BLOCK_SIZE * bx; outBuff[idx + bldm * tx + ty] = cuComplexDoubleToFloat( accum ); return; } #endif void sMM_v2(void *outBuff, const int bldm, void *sMat, const int srows, const int scols, const int sldm, void *cMat, const int crows, const int ccols, const int cldm) { #ifdef MAGMA_LIB // for test only: if(scols != crows) errorQuda("\nError: wrong dimensions\n"); const int block_size = 16; if (ccols % block_size != 0) errorQuda("\nError: wrong dimensions\n"); // Setup execution parameters (column-major format): dim3 threads(block_size, block_size); dim3 grid((srows+15) / threads.x, ccols / threads.y);//both ccols and srows must be multiple of block_size... hipFuncSetCacheConfig( SMatCMatCuda_16x16, hipFuncCachePreferShared ); hipLaunchKernelGGL(( SMatCMatCuda_16x16), dim3(grid), dim3(threads) , 0, 0, (cuFloatComplex*)outBuff, bldm, (cuFloatComplex*)sMat, sldm, (hipDoubleComplex*)cMat, cldm, scols); #endif } #undef BLOCK_SIZE void BlasMagmaArgs::OpenMagma(){ #ifdef MAGMA_LIB magma_int_t err = magma_init(); if(err != MAGMA_SUCCESS) errorQuda("\nError: cannot initialize MAGMA library\n"); int major, minor, micro; magma_version( &major, &minor, &micro); printfQuda("\nMAGMA library version: %d.%d\n\n", major, minor); #else errorQuda("\nError: MAGMA library was not compiled, check your compilation options...\n"); #endif return; } void BlasMagmaArgs::CloseMagma(){ #ifdef MAGMA_LIB if(magma_finalize() != MAGMA_SUCCESS) errorQuda("\nError: cannot close MAGMA library\n"); #else errorQuda("\nError: MAGMA library was not compiled, check your compilation options...\n"); #endif return; } BlasMagmaArgs::BlasMagmaArgs(const int prec) : m(0), max_nev(0), prec(prec), ldm(0), info(-1), llwork(0), lrwork(0), liwork(0), sideLR(0), htsize(0), dtsize(0), lwork_max(0), W(0), W2(0), hTau(0), dTau(0), lwork(0), rwork(0), iwork(0) { #ifdef MAGMA_LIB magma_int_t dev_info = magma_getdevice_arch();//mostly to check whether magma is intialized... if(dev_info == 0) exit(-1); printfQuda("\nMAGMA will use device architecture %d.\n", dev_info); alloc = false; init = true; #else errorQuda("\nError: MAGMA library was not compiled, check your compilation options...\n"); #endif return; } BlasMagmaArgs::BlasMagmaArgs(const int m, const int ldm, const int prec) : m(m), max_nev(0), prec(prec), ldm(ldm), info(-1), sideLR(0), htsize(0), dtsize(0), W(0), hTau(0), dTau(0) { #ifdef MAGMA_LIB magma_int_t dev_info = magma_getdevice_arch();//mostly to check whether magma is intialized... if(dev_info == 0) exit(-1); printfQuda("\nMAGMA will use device architecture %d.\n", dev_info); const int complex_prec = 2*prec; magma_int_t nbtrd = prec == 4 ? magma_get_chetrd_nb(m) : magma_get_zhetrd_nb(m);//ldm llwork = MAX(m + m*nbtrd, 2*m + m*m);//ldm lrwork = 1 + 5*m + 2*m*m;//ldm liwork = 3 + 5*m;//ldm magma_malloc_pinned((void**)&W2, ldm*m*complex_prec); magma_malloc_pinned((void**)&lwork, llwork*complex_prec); magma_malloc_cpu((void**)&rwork, lrwork*prec); magma_malloc_cpu((void**)&iwork, liwork*sizeof(magma_int_t)); init = true; alloc = true; #else errorQuda("\nError: MAGMA library was not compiled, check your compilation options...\n"); #endif return; } BlasMagmaArgs::BlasMagmaArgs(const int m, const int max_nev, const int ldm, const int prec) : m(m), max_nev(max_nev), prec(prec), ldm(ldm), info(-1) { #ifdef MAGMA_LIB magma_int_t dev_info = magma_getdevice_arch();//mostly to check whether magma is intialized... if(dev_info == 0) exit(-1); printfQuda("\nMAGMA will use device architecture %d.\n", dev_info); const int complex_prec = 2*prec; magma_int_t nbtrd = prec == 4 ? magma_get_chetrd_nb(ldm) : magma_get_zhetrd_nb(ldm);//ldm<-m magma_int_t nbqrf = prec == 4 ? magma_get_cgeqrf_nb(ldm) : magma_get_zgeqrf_nb(ldm);//ldm htsize = max_nev;//MIN(l,k)-number of Householder vectors, but we always have k <= MIN(m,n) dtsize = ( 2*htsize + ((htsize + 31)/32)*32 )*nbqrf;//in general: MIN(m,k) for side = 'L' and MIN(n,k) for side = 'R' magma_malloc_pinned((void**)&hTau, htsize*complex_prec); magma_malloc((void**)&dTau, dtsize*complex_prec); //these are needed for the eigCG solver only. sideLR = (m - max_nev + nbqrf)*(m + nbqrf) + m*nbqrf;//ldm magma_malloc_pinned((void**)&W, sideLR*complex_prec); magma_malloc_pinned((void**)&W2, ldm*m*complex_prec); llwork = MAX(m + m*nbtrd, 2*m + m*m);//ldm lrwork = 1 + 5*m + 2*m*m;//ldm liwork = 3 + 5*m;//ldm magma_malloc_pinned((void**)&lwork, llwork*complex_prec); magma_malloc_cpu((void**)&rwork, lrwork*prec); magma_malloc_cpu((void**)&iwork, liwork*sizeof(magma_int_t)); init = true; alloc = true; #else errorQuda("\nError: MAGMA library was not compiled, check your compilation options...\n"); #endif return; } BlasMagmaArgs::~BlasMagmaArgs() { #ifdef MAGMA_LIB if(alloc == true) { if(dTau) magma_free(dTau); if(hTau) magma_free_pinned(hTau); if(W) magma_free_pinned(W); if(W2) magma_free_pinned(W2); if(lwork) magma_free_pinned(lwork); if(rwork) magma_free_cpu(rwork); if(iwork) magma_free_cpu(iwork); alloc = false; } init = false; #endif return; } void BlasMagmaArgs::MagmaHEEVD(void *dTvecm, void *hTvalm, const int prob_size, bool host) { #ifdef MAGMA_LIB if(prob_size > m) errorQuda("\nError in MagmaHEEVD (problem size cannot exceed given search space %d), exit ...\n", m); hipPointerAttribute_t ptr_attr; if(!host) { //check if dTvecm is a device pointer.. hipPointerGetAttributes(&ptr_attr, dTvecm); if(ptr_attr.memoryType != hipMemoryTypeDevice || ptr_attr.devicePointer == NULL ) errorQuda("Error in MagmaHEEVD, no device pointer found."); if(prec == 4) { magma_cheevd_gpu(_cV, _cU, prob_size, (magmaFloatComplex*)dTvecm, ldm, (float*)hTvalm, (magmaFloatComplex*)W2, ldm, (magmaFloatComplex*)lwork, llwork, (float*)rwork, lrwork, iwork, liwork, &info); if(info != 0) errorQuda("\nError in MagmaHEEVD (magma_cheevd_gpu), exit ...\n"); } else { magma_zheevd_gpu(_cV, _cU, prob_size, (magmaDoubleComplex*)dTvecm, ldm, (double*)hTvalm, (magmaDoubleComplex*)W2, ldm, (magmaDoubleComplex*)lwork, llwork, (double*)rwork, lrwork, iwork, liwork, &info); if(info != 0) errorQuda("\nError in MagmaHEEVD (magma_zheevd_gpu), exit ...\n"); } } else { //check if dTvecm is a device pointer.. hipPointerGetAttributes(&ptr_attr, dTvecm); if(ptr_attr.memoryType != hipMemoryTypeHost || ptr_attr.hostPointer == NULL ) errorQuda("Error in MagmaHEEVD, no host pointer found."); if(prec == 4) { magma_cheevd(_cV, _cU, prob_size, (magmaFloatComplex*)dTvecm, ldm, (float*)hTvalm, (magmaFloatComplex*)lwork, llwork, (float*)rwork, lrwork, iwork, liwork, &info); if(info != 0) errorQuda("\nError in MagmaHEEVD (magma_cheevd_gpu), exit ...\n"); } else { magma_zheevd(_cV, _cU, prob_size, (magmaDoubleComplex*)dTvecm, ldm, (double*)hTvalm, (magmaDoubleComplex*)lwork, llwork, (double*)rwork, lrwork, iwork, liwork, &info); if(info != 0) errorQuda("\nError in MagmaHEEVD (magma_zheevd_gpu), exit ...\n"); } } #endif return; } int BlasMagmaArgs::MagmaORTH_2nev(void *dTvecm, void *dTm) { const int l = max_nev; #ifdef MAGMA_LIB if(prec == 4) { magma_int_t nb = magma_get_cgeqrf_nb(m);//ldm magma_cgeqrf_gpu(m, l, (magmaFloatComplex *)dTvecm, ldm, (magmaFloatComplex *)hTau, (magmaFloatComplex *)dTau, &info); if(info != 0) errorQuda("\nError in MagmaORTH_2nev (magma_cgeqrf_gpu), exit ...\n"); //compute dTevecm0=QHTmQ //get TQ product: magma_cunmqr_gpu(_cR, _cN, m, m, l, (magmaFloatComplex *)dTvecm, ldm, (magmaFloatComplex *)hTau, (magmaFloatComplex *)dTm, ldm, (magmaFloatComplex *)W, sideLR, (magmaFloatComplex *)dTau, nb, &info); if(info != 0) errorQuda("\nError in MagmaORTH_2nev (magma_cunmqr_gpu), exit ...\n"); //get QHT product: magma_cunmqr_gpu(_cL, _cC, m, l, l, (magmaFloatComplex *)dTvecm, ldm, (magmaFloatComplex *)hTau, (magmaFloatComplex *)dTm, ldm, (magmaFloatComplex *)W, sideLR, (magmaFloatComplex *)dTau, nb, &info); if(info != 0) errorQuda("\nError in MagmaORTH_2nev (magma_cunmqr_gpu), exit ...\n"); } else { magma_int_t nb = magma_get_zgeqrf_nb(m);//ldm magma_zgeqrf_gpu(m, l, (magmaDoubleComplex *)dTvecm, ldm, (magmaDoubleComplex *)hTau, (magmaDoubleComplex *)dTau, &info); if(info != 0) errorQuda("\nError in MagmaORTH_2nev (magma_zgeqrf_gpu), exit ...\n"); //compute dTevecm0=QHTmQ //get TQ product: magma_zunmqr_gpu(_cR, _cN, m, m, l, (magmaDoubleComplex *)dTvecm, ldm, (magmaDoubleComplex *)hTau, (magmaDoubleComplex *)dTm, ldm, (magmaDoubleComplex *)W, sideLR, (magmaDoubleComplex *)dTau, nb, &info); if(info != 0) errorQuda("\nError in MagmaORTH_2nev (magma_zunmqr_gpu), exit ...\n"); //get QHT product: magma_zunmqr_gpu(_cL, _cC, m, l, l, (magmaDoubleComplex *)dTvecm, ldm, (magmaDoubleComplex *)hTau, (magmaDoubleComplex *)dTm, ldm, (magmaDoubleComplex *)W, sideLR, (magmaDoubleComplex *)dTau, nb, &info); if(info != 0) errorQuda("\nError in MagmaORTH_2nev (magma_zunmqr_gpu), exit ...\n"); } #endif return l; } void BlasMagmaArgs::RestartV(void *dV, const int vld, const int vlen, const int vprec, void *dTevecm, void *dTm) { #ifdef MAGMA_LIB if( (vld % 32) != 0) errorQuda("\nError: leading dimension must be multiple of the warp size\n"); const int cvprec = 2*vprec; const int l = max_nev; //int bufferSize = 2*vld+l*l; //int bufferBlock = bufferSize / l; int bufferBlock = (2*vld) / l; bufferBlock = (bufferBlock / 32) * 32;//corrected bufferBlock to be multiple of the warp size int bufferSize = (bufferBlock * l); void *buffer = 0; magma_malloc(&buffer, bufferSize*cvprec); hipMemset(buffer, 0, bufferSize*cvprec); if(prec == 4) { magma_int_t nb = magma_get_cgeqrf_nb(m);//ldm magma_cunmqr_gpu(_cL, _cN, m, l, l, (magmaFloatComplex*)dTevecm, ldm, (magmaFloatComplex*)hTau, (magmaFloatComplex*)dTm, ldm, (magmaFloatComplex*)W, sideLR, (magmaFloatComplex*)dTau, nb, &info); if(info != 0) errorQuda("\nError in RestartV (magma_cunmqr_gpu), exit ...\n"); } else { magma_int_t nb = magma_get_zgeqrf_nb(m);//ldm magma_zunmqr_gpu(_cL, _cN, m, l, l, (magmaDoubleComplex*)dTevecm, ldm, (magmaDoubleComplex*)hTau, (magmaDoubleComplex*)dTm, ldm, (magmaDoubleComplex*)W, sideLR, (magmaDoubleComplex*)dTau, nb, &info); if(info != 0) errorQuda("\nError in RestartV (magma_zunmqr_gpu), exit ...\n"); } if(vprec == 4) { if(prec == vprec) errorQuda("\nError: option is not currently supported, exit ...\n"); for (int blockOffset = 0; blockOffset < vlen; blockOffset += bufferBlock) { if (bufferBlock > (vlen-blockOffset)) bufferBlock = (vlen-blockOffset); magmaFloatComplex *ptrV = &(((magmaFloatComplex*)dV)[blockOffset]); sMM_v2(buffer, bufferBlock, ptrV, bufferBlock, m, vld, dTm, m, l, ldm); hipMemcpy2D(ptrV, vld*cvprec, buffer, bufferBlock*cvprec, bufferBlock*cvprec, l, hipMemcpyDefault); } } else { for (int blockOffset = 0; blockOffset < vlen; blockOffset += bufferBlock) { if (bufferBlock > (vlen-blockOffset)) bufferBlock = (vlen-blockOffset); magmaDoubleComplex *ptrV = &(((magmaDoubleComplex*)dV)[blockOffset]); magmablas_zgemm(_cN, _cN, bufferBlock, l, m, MAGMA_Z_ONE, ptrV, vld, (magmaDoubleComplex*)dTm, ldm, MAGMA_Z_ZERO, (magmaDoubleComplex*)buffer, bufferBlock); hipMemcpy2D(ptrV, vld*cvprec, buffer, bufferBlock*cvprec, bufferBlock*cvprec, l, hipMemcpyDefault); } } magma_free(buffer); #endif return; } void BlasMagmaArgs::SolveProjMatrix(void* rhs, const int ldn, const int n, void* H, const int ldH) { #ifdef MAGMA_LIB const int complex_prec = 2*prec; void *tmp; magma_int_t *ipiv; magma_int_t err; magma_malloc_pinned((void**)&tmp, ldH*n*complex_prec); magma_malloc_pinned((void**)&ipiv, n*sizeof(magma_int_t)); memcpy(tmp, H, ldH*n*complex_prec); if (prec == 4) { err = magma_cgesv(n, 1, (magmaFloatComplex*)tmp, ldH, ipiv, (magmaFloatComplex*)rhs, ldn, &info); if(err != 0) errorQuda("\nError in SolveProjMatrix (magma_cgesv), exit ...\n"); } else { err = magma_zgesv(n, 1, (magmaDoubleComplex*)tmp, ldH, ipiv, (magmaDoubleComplex*)rhs, ldn, &info); if(err != 0) errorQuda("\nError in SolveProjMatrix (magma_zgesv), exit ...\n"); } magma_free_pinned(tmp); magma_free_pinned(ipiv); #endif return; } void BlasMagmaArgs::SolveGPUProjMatrix(void* rhs, const int ldn, const int n, void* H, const int ldH) { #ifdef MAGMA_LIB const int complex_prec = 2*prec; void *tmp; magma_int_t *ipiv; magma_int_t err; magma_malloc((void**)&tmp, ldH*n*complex_prec); magma_malloc_pinned((void**)&ipiv, n*sizeof(magma_int_t)); qudaMemcpy(tmp, H, ldH*n*complex_prec, hipMemcpyDefault); if (prec == 4) { err = magma_cgesv_gpu(n, 1, (magmaFloatComplex*)tmp, ldH, ipiv, (magmaFloatComplex*)rhs, ldn, &info); if(err != 0) errorQuda("\nError in SolveGPUProjMatrix (magma_cgesv), exit ...\n"); } else { err = magma_zgesv_gpu(n, 1, (magmaDoubleComplex*)tmp, ldH, ipiv, (magmaDoubleComplex*)rhs, ldn, &info); if(err != 0) errorQuda("\nError in SolveGPUProjMatrix (magma_zgesv), exit ...\n"); } magma_free(tmp); magma_free_pinned(ipiv); #endif return; } void BlasMagmaArgs::SpinorMatVec (void *spinorOut, const void *spinorSetIn, const int sld, const int slen, const void *vec, const int vlen) { #ifdef MAGMA_LIB if (prec == 4) { magmaFloatComplex *spmat = (magmaFloatComplex*)spinorSetIn; magmaFloatComplex *spout = (magmaFloatComplex*)spinorOut; magmablas_cgemv(_cN, slen, vlen, MAGMA_C_ONE, spmat, sld, (magmaFloatComplex*)vec, 1, MAGMA_C_ZERO, spout, 1);//in colour-major format } else { magmaDoubleComplex *spmat = (magmaDoubleComplex*)spinorSetIn; magmaDoubleComplex *spout = (magmaDoubleComplex*)spinorOut; magmablas_zgemv(_cN, slen, vlen, MAGMA_Z_ONE, spmat, sld, (magmaDoubleComplex*)vec, 1, MAGMA_Z_ZERO, spout, 1);//in colour-major format } #endif return; } void BlasMagmaArgs::MagmaRightNotrUNMQR(const int clen, const int qrlen, const int nrefls, void *QR, const int ldqr, void *Vm, const int cldn) { #ifdef MAGMA_LIB magma_int_t m = clen; magma_int_t n = qrlen; magma_int_t k = nrefls; magma_int_t lwork = -1; if(prec == 4) { } else { magmaDoubleComplex *dQR = NULL; magmaDoubleComplex *dtau = NULL; magmaDoubleComplex *htau = NULL; magmaDoubleComplex *hW = NULL; magmaDoubleComplex qW; magma_malloc_pinned((void**)&dQR, ldqr*k*sizeof(magmaDoubleComplex)); magma_malloc_pinned((void**)&htau, k*sizeof(magmaDoubleComplex)); // magma_malloc((void**)&dTau, k*sizeof(magmaDoubleComplex)); qudaMemcpy(dQR, QR, ldqr*k*sizeof(magmaDoubleComplex), hipMemcpyDefault); magma_int_t nb = magma_get_zgeqrf_nb(m);//ldm // magma_zgeqrf_gpu(n, k, (magmaDoubleComplex *)dQR, ldqr, (magmaDoubleComplex *)htau, (magmaDoubleComplex *)dtau, &info);//identical to zgeqrf? magma_zunmqr_gpu(_cR, _cN, m, n, k, dQR, ldqr, htau, (magmaDoubleComplex *)Vm, cldn, &qW, lwork, dtau, nb, &info); if(info != 0) errorQuda("\nError in MagmaORTH_2nev (magma_zunmqr_gpu), exit ...\n"); lwork = (magma_int_t) MAGMA_Z_REAL(qW); magma_malloc_cpu((void**)&hW, lwork*sizeof(magmaDoubleComplex)); //get TQ product: magma_zunmqr_gpu(_cR, _cN, m, n, k, dQR, ldqr, htau, (magmaDoubleComplex *)Vm, cldn, hW, lwork, dtau, nb, &info); if(info != 0) errorQuda("\nError in MagmaORTH_2nev (magma_zunmqr_gpu), exit ...\n"); magma_free_cpu(hW); magma_free(dtau); magma_free_pinned(htau); magma_free_pinned(dQR); } #endif return; } //STL based version: // struct SortedEval{ double eval_nrm; int eval_idx; SortedEval(double val, int idx) : eval_nrm(val), eval_idx(idx) {}; }; bool cmp_eigen_nrms (SortedEval v1, SortedEval v2) { return (v1.eval_nrm < v2.eval_nrm); } void BlasMagmaArgs::Sort(const int m, const int ldm, void *eVecs, const int nev, void *unsorted_eVecs, void *eVals) { if (prec == 4) errorQuda("\nSingle precision is currently not supported.\n"); std::vector<SortedEval> sorted_evals_cntr; for(int e = 0; e < m; e++) sorted_evals_cntr.push_back( SortedEval( abs(((std::complex<double>*)eVals)[e]), e )); std::stable_sort(sorted_evals_cntr.begin(), sorted_evals_cntr.end(), cmp_eigen_nrms); for(int e = 0; e < nev; e++) { memcpy(&(((std::complex<double>*)eVecs)[ldm*e]), &(((std::complex<double>*)unsorted_eVecs)[ldm*( sorted_evals_cntr[e].eval_idx)]), (ldm)*sizeof(std::complex<double>)); //set zero in m+1 element: ((std::complex<double>*)eVecs)[ldm*e+m] = std::complex<double>(0.0, 0.0); } return; } ///NEW STUFF: void BlasMagmaArgs::ComputeQR(const int nev, Complex * evmat, const int m, const int ldm, Complex *tau) { #ifdef MAGMA_LIB magma_int_t _m = m;//matrix size magma_int_t _nev = nev;//matrix size magma_int_t _ldm = ldm; //Lapack parameters: magma_int_t info = 0; magma_int_t lwork = -1; magmaDoubleComplex *work = NULL; magmaDoubleComplex qwork; //parameter to extract optimal size of work magma_zgeqrf(_m, _nev, (magmaDoubleComplex *)evmat, _ldm, (magmaDoubleComplex *)tau, &qwork, lwork, &info); if( (info != 0 ) ) errorQuda( "Error: MAGMA_ZGEQRF, info %d\n",info); lwork = (magma_int_t) MAGMA_Z_REAL(qwork); magma_malloc_cpu((void**)&work, lwork*sizeof(magmaDoubleComplex)); magma_zgeqrf(_m, _nev, (magmaDoubleComplex *)evmat, _ldm, (magmaDoubleComplex *)tau, work, lwork, &info); if( (info != 0 ) ) errorQuda( "Error: ZGEQRF, info %d\n",info); if(work) magma_free_cpu(work); #endif return; } void BlasMagmaArgs::LeftConjZUNMQR(const int k /*number of reflectors*/, const int n /*number of columns of H*/, Complex *H, const int dh /*number of rows*/, const int ldh, Complex * QR, const int ldqr, Complex *tau)//for vectors: n =1 { #ifdef MAGMA_LIB //Note: # rows of QR = # rows of H. magma_int_t _h = dh;//matrix size magma_int_t _n = n;//vector size magma_int_t _k = k; magma_int_t _ldh = ldh; magma_int_t _ldqr = ldqr; //Lapack parameters: magma_side_t _s = _cL;//apply QR-matrix from the left magma_trans_t _t = _cC;//conjugate magma_int_t info = 0; magma_int_t lwork = -1; magmaDoubleComplex *work = NULL; magmaDoubleComplex qwork; //parameter to extract optimal size of work //Pdagger_{k+1} PrevRes magma_zunmqr(_s, _t, _h, _n, _k, (magmaDoubleComplex *)QR, _ldqr, (magmaDoubleComplex *)tau, (magmaDoubleComplex *)H, _ldh, &qwork, lwork, &info); if( (info != 0 ) ) errorQuda( "Error: ZUNMQR, info %d\n",info); lwork = (magma_int_t) MAGMA_Z_REAL(qwork); magma_malloc_cpu((void**)&work, lwork*sizeof(magmaDoubleComplex)); magma_zunmqr(_s, _t, _h, _n, _k, (magmaDoubleComplex *)QR, _ldqr, (magmaDoubleComplex *)tau, (magmaDoubleComplex *)H, _ldh, work, lwork, &info); if( (info != 0 ) ) errorQuda( "Error: ZUNMQR, info %d\n",info); if(work) magma_free_cpu(work); #endif return; } void BlasMagmaArgs::Construct_harmonic_matrix(Complex * const harmH, Complex * const conjH, const double beta2, const int m, const int ldH) { #ifdef MAGMA_LIB //Lapack parameters: magma_int_t _m = m; // magma_int_t _ldH = ldH; // magma_int_t info = 0; // magma_int_t I_ONE = 1; // magma_int_t *ipiv; magma_malloc_cpu((void**)&ipiv, ldH*sizeof(magma_int_t)); // //Construct H + beta*H^{-H} e_m*e_m^{T} // 1. need to solve H^{H}y = e_m; Complex *em = new Complex[m]; em[m-1] = beta2;//in fact, we construct beta*em, magma_zgesv(_m, I_ONE, (magmaDoubleComplex *)conjH, _ldH, ipiv, (magmaDoubleComplex *)em, _ldH, &info); if( (info != 0 ) ) errorQuda( "Error: DGESV, info %d\n",info); //make this cleaner! //check solution: for (int j = 0; j < m; j++) { Complex accum = 0.0; for (int i = 0; i < m; i++) accum = (accum + harmH[ldH*j+i]*em[(ipiv[i])-1]); } // 2. Construct matrix for harmonic Ritz vectors: // Adjust last column with KroneckerProd((H^{-H}*beta*em)=em, em^{T}=[0,....,1]): for(int i = 0; i < m; i++) harmH[ldH*(m-1)+i] += em[i]; magma_free_cpu(ipiv); // delete [] em; #endif return; } void BlasMagmaArgs::Compute_harmonic_matrix_eigenpairs(Complex *harmH, const int m, const int ldH, Complex *vr, Complex *evalues, const int ldv) { #ifdef MAGMA_LIB magma_int_t _m = m;//matrix size magma_int_t _ldH = ldH; magma_int_t _ldv = ldv; //Lapack parameters: magma_int_t info = 0; // magma_vec_t _r = _cV; magma_vec_t _l = _cNV;//no left eigenvectors magma_int_t lwork = -1; magmaDoubleComplex *work = NULL; magmaDoubleComplex qwork; //parameter to extract optimal size of work double *rwork = NULL; magma_malloc_cpu((void**)&rwork, 2*_m*sizeof(double)); //Get optimal work: magma_zgeev(_l, _r, _m, (magmaDoubleComplex *)harmH, _ldH, (magmaDoubleComplex *)evalues, NULL, _ldv, (magmaDoubleComplex *)vr, _ldv, &qwork, lwork, rwork, &info); if( (info != 0 ) ) errorQuda( "Error: ZGEEVX, info %d\n",info); lwork = (magma_int_t) MAGMA_Z_REAL(qwork); magma_malloc_cpu((void**)&work, lwork*sizeof(magmaDoubleComplex)); //now get eigenpairs: magma_zgeev(_l, _r, _m, (magmaDoubleComplex *)harmH, _ldH, (magmaDoubleComplex *)evalues, NULL, _ldv, (magmaDoubleComplex *)vr, _ldv, work, lwork, rwork, &info); if( (info != 0 ) ) errorQuda( "Error: ZGEEVX, info %d\n",info); if(rwork) magma_free_cpu(rwork); // if(work) magma_free_cpu(work); // #endif return; } void BlasMagmaArgs::RestartVH(void *dV, const int vlen, const int vld, const int vprec, void *sortedHarVecs, void *H, const int ldh) { #ifdef MAGMA_LIB if(prec == 4) { errorQuda("\nError: single precision is not currently supported\n"); } if( (vld % 32) != 0) errorQuda("\nError: leading dimension must be multiple of the warp size\n"); int nev = (max_nev - 1); //(nev+1) - 1 for GMRESDR int _m = m;//matrix size int _k = nev; int _kp1 = max_nev; int _mp1 = (m+1); int _ldm = ldh; magma_side_t _s = _cR;//apply P-matrix from the right magma_trans_t _t = _cN;//no left eigenvectors int info = 0; int lwork = -1; Complex *work = NULL; Complex qwork; //parameter to extract optimal size of work const int cprec = 2*prec; //currently: sizeof(Complex) const int cvprec = 2*vprec; const int l = max_nev; int lbsize = 2*((nev / 16)*16); //const int bufferSize = 2*vld+lbsize*lbsize; //int bufferBlock = bufferSize / lbsize;//or: lbsize = (nev+1) int bufferBlock = (2*vld) / lbsize; bufferBlock = (bufferBlock / 32) * 32;//corrected bufferBlock to be multiple of the warp size int bufferSize = (bufferBlock * lbsize); void *buffer = NULL; void *dQmat = NULL; magma_malloc(&buffer, bufferSize*cvprec); hipMemset(buffer, 0, bufferSize*cvprec); magma_malloc(&dQmat, l*ldh*cprec); //GPU code: Complex *tau = new Complex[l];//nev+1 =>max_nev Complex *Qmat = new Complex[ldh*_mp1];//need (m+1)x(m+1) matrix on input... ComputeQR(l, (Complex*)sortedHarVecs, _mp1, ldh, tau);//lapack version //max_nev vectors are stored in Qmat (output): //restoreOrthVectors(Qmat, max_nev, (Complex*)sortedHarVecs, (m+1), ldh, tau); //Load diagonal units for(int d = 0; d < (m+1); d++) Qmat[ldh*d+d] = Complex(1.0, 0.0); magma_zunmqr(_s, _t, _mp1, _mp1, _kp1, (magmaDoubleComplex *)sortedHarVecs, _ldm, (magmaDoubleComplex *)tau, (magmaDoubleComplex *)Qmat, _ldm, (magmaDoubleComplex *)&qwork, lwork, &info); if( (info != 0 ) ) errorQuda( "Error: ZUNMQR, info %d\n",info); lwork = (int) qwork.real(); work = new Complex[lwork]; magma_zunmqr(_s, _t, _mp1, _mp1, _kp1, (magmaDoubleComplex *)sortedHarVecs, _ldm, (magmaDoubleComplex *)tau, (magmaDoubleComplex *)Qmat, _ldm, (magmaDoubleComplex *)work, lwork, &info); if( (info != 0 ) ) errorQuda( "Error: ZUNMQR, info %d\n",info); //Copy (nev+1) vectors on the device: qudaMemcpy(dQmat, Qmat, (max_nev)*ldh*cprec, hipMemcpyDefault); if(cvprec == sizeof(magmaDoubleComplex)) { for (int blockOffset = 0; blockOffset < vlen; blockOffset += bufferBlock) { if (bufferBlock > (vlen-blockOffset)) bufferBlock = (vlen-blockOffset); //printfQuda("\nBuffer block : %d\n", bufferBlock); magmaDoubleComplex *ptrV = &(((magmaDoubleComplex*)dV)[blockOffset]); magmablas_zgemm(_cN, _cN, bufferBlock, l, _mp1, MAGMA_Z_ONE, ptrV, vld, (magmaDoubleComplex*)dQmat, ldh, MAGMA_Z_ZERO, (magmaDoubleComplex*)buffer, bufferBlock); hipMemcpy2D(ptrV, vld*cvprec, buffer, bufferBlock*cvprec, bufferBlock*cvprec, l, hipMemcpyDefault);//make this async! } hipMemset(&(((magmaDoubleComplex*)dV)[vld*max_nev]), 0, (m+1-max_nev)*vld*sizeof(magmaDoubleComplex));//= m - nev } else // low precision field { for (int blockOffset = 0; blockOffset < vlen; blockOffset += bufferBlock) { if (bufferBlock > (vlen-blockOffset)) bufferBlock = (vlen-blockOffset); magmaFloatComplex *ptrV = &(((magmaFloatComplex*)dV)[blockOffset]); sMM_v2(buffer, bufferBlock, ptrV, bufferBlock, _mp1, vld, dQmat, _mp1, l, ldh); hipMemcpy2D(ptrV, vld*cvprec, buffer, bufferBlock*cvprec, bufferBlock*cvprec, l, hipMemcpyDefault); } hipMemset(&(((magmaFloatComplex*)dV)[vld*max_nev]), 0, (m+1-max_nev)*vld*sizeof(magmaFloatComplex));//= m - nev } //Construct H_new = Pdagger_{k+1} \bar{H}_{m} P_{k} //bar{H}_{m} P_{k} lwork = -1; magma_zunmqr(_s, _t, _mp1, _m, _k, (magmaDoubleComplex *)sortedHarVecs, _ldm, (magmaDoubleComplex *)tau, (magmaDoubleComplex *)H, _ldm, (magmaDoubleComplex *)&qwork, lwork, &info); if( (info != 0 ) ) errorQuda( "Error: ZUNMQR, info %d\n",info); delete[] work; lwork = (int) qwork.real(); work = new Complex[lwork]; magma_zunmqr(_s, _t, _mp1, _m, _k, (magmaDoubleComplex *)sortedHarVecs, _ldm, (magmaDoubleComplex *)tau, (magmaDoubleComplex *)H, _ldm, (magmaDoubleComplex *)work, lwork, &info); if( (info != 0 ) ) errorQuda( "Error: ZUNMQR, info %d\n",info); //Pdagger_{k+1} PrevRes lwork = -1; _s = _cL; _t = _cC; magma_zunmqr(_s, _t, _mp1, _k, _kp1, (magmaDoubleComplex *)sortedHarVecs, _ldm, (magmaDoubleComplex *)tau, (magmaDoubleComplex *)H, _ldm, (magmaDoubleComplex *)&qwork, lwork, &info); if( (info != 0 ) ) errorQuda( "Error: ZUNMQR, info %d\n",info); delete [] work; lwork = (int) qwork.real(); work = new Complex[lwork]; magma_zunmqr(_s, _t, _mp1, _k, _kp1, (magmaDoubleComplex *)sortedHarVecs, _ldm, (magmaDoubleComplex *)tau, (magmaDoubleComplex *)H, _ldm, (magmaDoubleComplex *)work, lwork, &info); if( (info != 0 ) ) errorQuda( "Error: ZUNMQR, info %d\n",info); const int len = ldh - nev-1; for(int i = 0; i < nev; i++) memset(&(((Complex*)H)[ldh*i+nev+1]), 0, len*sizeof(Complex) ); // memset(&(((Complex*)H)[ldh*(nev)]), 0, (m-nev)*ldh*sizeof(Complex)); delete [] work; magma_free(buffer); magma_free(dQmat); delete [] Qmat; delete [] tau ; #endif return; } #define FMULS_GETRF(m_, n_) ( ((m_) < (n_)) \ ? (0.5 * (m_) * ((m_) * ((n_) - (1./3.) * (m_) - 1. ) + (n_)) + (2. / 3.) * (m_)) \ : (0.5 * (n_) * ((n_) * ((m_) - (1./3.) * (n_) - 1. ) + (m_)) + (2. / 3.) * (n_)) ) #define FADDS_GETRF(m_, n_) ( ((m_) < (n_)) \ ? (0.5 * (m_) * ((m_) * ((n_) - (1./3.) * (m_) ) - (n_)) + (1. / 6.) * (m_)) \ : (0.5 * (n_) * ((n_) * ((m_) - (1./3.) * (n_) ) - (m_)) + (1. / 6.) * (n_)) ) #define FLOPS_ZGETRF(m_, n_) (6. * FMULS_GETRF((double)(m_), (double)(n_)) + 2.0 * FADDS_GETRF((double)(m_), (double)(n_)) ) #define FLOPS_CGETRF(m_, n_) (6. * FMULS_GETRF((double)(m_), (double)(n_)) + 2.0 * FADDS_GETRF((double)(m_), (double)(n_)) ) #define FMULS_GETRI(n_) ( (n_) * ((5. / 6.) + (n_) * ((2. / 3.) * (n_) + 0.5)) ) #define FADDS_GETRI(n_) ( (n_) * ((5. / 6.) + (n_) * ((2. / 3.) * (n_) - 1.5)) ) #define FLOPS_ZGETRI(n_) (6. * FMULS_GETRI((double)(n_)) + 2.0 * FADDS_GETRI((double)(n_)) ) #define FLOPS_CGETRI(n_) (6. * FMULS_GETRI((double)(n_)) + 2.0 * FADDS_GETRI((double)(n_)) ) void BlasMagmaArgs::BatchInvertMatrix(void *Ainv_h, void* A_h, const int n, const int batch) { #ifdef MAGMA_LIB printfQuda("%s with n=%d and batch=%d\n", __func__, n, batch); magma_queue_t queue = 0; size_t size = 2*n*n*prec*batch; void *A_d = device_malloc(size); void *Ainv_d = device_malloc(size); qudaMemcpy(A_d, A_h, size, hipMemcpyHostToDevice); magma_int_t **dipiv_array = static_cast<magma_int_t**>(device_malloc(batch*sizeof(magma_int_t*))); magma_int_t *dipiv_tmp = static_cast<magma_int_t*>(device_malloc(batch*n*sizeof(magma_int_t))); set_ipointer(dipiv_array, dipiv_tmp, 1, 0, 0, n, batch, queue); magma_int_t *dinfo_array = static_cast<magma_int_t*>(device_malloc(batch*sizeof(magma_int_t))); magma_int_t *info_array = static_cast<magma_int_t*>(safe_malloc(batch*sizeof(magma_int_t))); magma_int_t err; // FIXME do this in pipelined fashion to reduce memory overhead. if (prec == 4) { magmaFloatComplex **A_array = static_cast<magmaFloatComplex**>(device_malloc(batch*sizeof(magmaFloatComplex*))); magmaFloatComplex **Ainv_array = static_cast<magmaFloatComplex**>(device_malloc(batch*sizeof(magmaFloatComplex*))); cset_pointer(A_array, static_cast<magmaFloatComplex*>(A_d), n, 0, 0, n*n, batch, queue); cset_pointer(Ainv_array, static_cast<magmaFloatComplex*>(Ainv_d), n, 0, 0, n*n, batch, queue); double magma_time = magma_sync_wtime(queue); err = magma_cgetrf_batched(n, n, A_array, n, dipiv_array, dinfo_array, batch, queue); //err = magma_cgetrf_nopiv_batched(n, n, A_array, n, dinfo_array, batch, queue); (no getri support for nopiv?) magma_time = magma_sync_wtime(queue) - magma_time; printfQuda("LU factorization completed in %f seconds with GFLOPS = %f\n", magma_time, 1e-9 * batch * FLOPS_CGETRF(n,n) / magma_time); if(err != 0) errorQuda("\nError in LU decomposition (magma_cgetrf), error code = %d\n", err); qudaMemcpy(info_array, dinfo_array, batch*sizeof(magma_int_t), hipMemcpyDeviceToHost); for (int i=0; i<batch; i++) { if (info_array[i] < 0) { errorQuda("%d argument had an illegal value or another error occured, such as memory allocation failed", i); } else if (info_array[i] > 0) { errorQuda("%d factorization completed but the factor U is exactly singular", i); } } magma_time = magma_sync_wtime(queue); err = magma_cgetri_outofplace_batched(n, A_array, n, dipiv_array, Ainv_array, n, dinfo_array, batch, queue); magma_time = magma_sync_wtime(queue) - magma_time; printfQuda("Matrix inversion completed in %f seconds with GFLOPS = %f\n", magma_time, 1e-9 * batch * FLOPS_CGETRI(n) / magma_time); if(err != 0) errorQuda("\nError in matrix inversion (magma_cgetri), error code = %d\n", err); qudaMemcpy(info_array, dinfo_array, batch*sizeof(magma_int_t), hipMemcpyDeviceToHost); for (int i=0; i<batch; i++) { if (info_array[i] < 0) { errorQuda("%d argument had an illegal value or another error occured, such as memory allocation failed", i); } else if (info_array[i] > 0) { errorQuda("%d factorization completed but the factor U is exactly singular", i); } } device_free(Ainv_array); device_free(A_array); } else if (prec == 8) { magmaDoubleComplex **A_array = static_cast<magmaDoubleComplex**>(device_malloc(batch*sizeof(magmaDoubleComplex*))); zset_pointer(A_array, static_cast<magmaDoubleComplex*>(A_d), n, 0, 0, n*n, batch, queue); magmaDoubleComplex **Ainv_array = static_cast<magmaDoubleComplex**>(device_malloc(batch*sizeof(magmaDoubleComplex*))); zset_pointer(Ainv_array, static_cast<magmaDoubleComplex*>(Ainv_d), n, 0, 0, n*n, batch, queue); double magma_time = magma_sync_wtime(queue); err = magma_zgetrf_batched(n, n, A_array, n, dipiv_array, dinfo_array, batch, queue); magma_time = magma_sync_wtime(queue) - magma_time; printfQuda("LU factorization completed in %f seconds with GFLOPS = %f\n", magma_time, 1e-9 * batch * FLOPS_ZGETRF(n,n) / magma_time); if(err != 0) errorQuda("\nError in LU decomposition (magma_zgetrf), error code = %d\n", err); qudaMemcpy(info_array, dinfo_array, batch*sizeof(magma_int_t), hipMemcpyDeviceToHost); for (int i=0; i<batch; i++) { if (info_array[i] < 0) { errorQuda("%d argument had an illegal value or another error occured, such as memory allocation failed", i); } else if (info_array[i] > 0) { errorQuda("%d factorization completed but the factor U is exactly singular", i); } } magma_time = magma_sync_wtime(queue); err = magma_zgetri_outofplace_batched(n, A_array, n, dipiv_array, Ainv_array, n, dinfo_array, batch, queue); magma_time = magma_sync_wtime(queue) - magma_time; printfQuda("Matrix inversion completed in %f seconds with GFLOPS = %f\n", magma_time, 1e-9 * batch * FLOPS_ZGETRI(n) / magma_time); if(err != 0) errorQuda("\nError in matrix inversion (magma_cgetri), error code = %d\n", err); qudaMemcpy(info_array, dinfo_array, batch*sizeof(magma_int_t), hipMemcpyDeviceToHost); for (int i=0; i<batch; i++) { if (info_array[i] < 0) { errorQuda("%d argument had an illegal value or another error occured, such as memory allocation failed", i); } else if (info_array[i] > 0) { errorQuda("%d factorization completed but the factor U is exactly singular", i); } } device_free(Ainv_array); device_free(A_array); } else { errorQuda("%s not implemented for precision=%d", __func__, prec); } qudaMemcpy(Ainv_h, Ainv_d, size, hipMemcpyDeviceToHost); device_free(dipiv_tmp); device_free(dipiv_array); device_free(dinfo_array); host_free(info_array); device_free(Ainv_d); device_free(A_d); #endif return; } #ifdef MAGMA_LIB #undef _cV #undef _cU #undef _cR #undef _cL #undef _cC #undef _cN #undef _cNV #endif
1d839c5bca7f3e330737df3ab63cd678fec29f4b.cu
#include <blas_magma.h> #include <string.h> #include <vector> #include <algorithm> #include <util_quda.h> #include <quda_internal.h> #ifndef MAX #define MAX(a, b) (a > b) ? a : b; #endif #define MAGMA_17 //default version version of the MAGMA library #ifdef MAGMA_LIB #include <magma.h> #ifdef MAGMA_14 #define _cV 'V' #define _cU 'U' #define _cR 'R' #define _cL 'L' #define _cC 'C' #define _cN 'N' #define _cNV 'N' #else #define _cV MagmaVec #define _cU MagmaUpper #define _cR MagmaRight #define _cL MagmaLeft #define _cC MagmaConjTrans #define _cN MagmaNoTrans #define _cNV MagmaNoVec #endif #endif //Column major format: Big matrix times Little matrix. #ifdef MAGMA_LIB //Simplified version for the above: #define BLOCK_SIZE 16 __global__ void SMatCMatCuda_16x16(cuFloatComplex *outBuff, const int bldm, cuFloatComplex *sMat, const int sldm, cuDoubleComplex *cMat, const int cldm, const int scols) { //block coords: int by = blockIdx.x; int bx = blockIdx.y; //local coords: int ty = threadIdx.x; int tx = threadIdx.y; int sBegin = BLOCK_SIZE * by;//global offset in Y-direction for the Big matrix int sEnd = sBegin + sldm*scols - 1;//loop limit in X-direction for the Big matrix int sStep = sldm * BLOCK_SIZE;//step in X-direction for the Big matrix int cBegin = cldm * BLOCK_SIZE * bx;//global offset in X-direction for the Little matrix int cStep = BLOCK_SIZE;//step in Y-direction for the Little matrix cuDoubleComplex accum = make_cuDoubleComplex (0.0, 0.0); cuFloatComplex ftmp; cuDoubleComplex dtmp; for (int s = sBegin, c = cBegin; s <= sEnd; s += sStep, c += cStep) { __shared__ float reSmat[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float imSmat[BLOCK_SIZE][BLOCK_SIZE]; __shared__ double reCmat[BLOCK_SIZE][BLOCK_SIZE]; __shared__ double imCmat[BLOCK_SIZE][BLOCK_SIZE]; ftmp = sMat[s + sldm * tx + ty]; reSmat[ty][tx] = cuCrealf(ftmp); imSmat[ty][tx] = cuCimagf(ftmp); dtmp = cMat[c + cldm * tx + ty]; reCmat[ty][tx] = cuCreal(dtmp); imCmat[ty][tx] = cuCimag(dtmp); __syncthreads(); #pragma unroll for (int k = 0; k < BLOCK_SIZE; ++k) { ftmp = make_cuFloatComplex(reSmat[ty][k], imSmat[ty][k]); dtmp = make_cuDoubleComplex(reCmat[k][tx], imCmat[k][tx]); cuDoubleComplex dtmp2 = cuComplexFloatToDouble( ftmp ); accum = cuCfma(dtmp2, dtmp, accum); } __syncthreads(); } int idx = BLOCK_SIZE * by + bldm * BLOCK_SIZE * bx; outBuff[idx + bldm * tx + ty] = cuComplexDoubleToFloat( accum ); return; } #endif void sMM_v2(void *outBuff, const int bldm, void *sMat, const int srows, const int scols, const int sldm, void *cMat, const int crows, const int ccols, const int cldm) { #ifdef MAGMA_LIB // for test only: if(scols != crows) errorQuda("\nError: wrong dimensions\n"); const int block_size = 16; if (ccols % block_size != 0) errorQuda("\nError: wrong dimensions\n"); // Setup execution parameters (column-major format): dim3 threads(block_size, block_size); dim3 grid((srows+15) / threads.x, ccols / threads.y);//both ccols and srows must be multiple of block_size... cudaFuncSetCacheConfig( SMatCMatCuda_16x16, cudaFuncCachePreferShared ); SMatCMatCuda_16x16<<< grid, threads >>>((cuFloatComplex*)outBuff, bldm, (cuFloatComplex*)sMat, sldm, (cuDoubleComplex*)cMat, cldm, scols); #endif } #undef BLOCK_SIZE void BlasMagmaArgs::OpenMagma(){ #ifdef MAGMA_LIB magma_int_t err = magma_init(); if(err != MAGMA_SUCCESS) errorQuda("\nError: cannot initialize MAGMA library\n"); int major, minor, micro; magma_version( &major, &minor, &micro); printfQuda("\nMAGMA library version: %d.%d\n\n", major, minor); #else errorQuda("\nError: MAGMA library was not compiled, check your compilation options...\n"); #endif return; } void BlasMagmaArgs::CloseMagma(){ #ifdef MAGMA_LIB if(magma_finalize() != MAGMA_SUCCESS) errorQuda("\nError: cannot close MAGMA library\n"); #else errorQuda("\nError: MAGMA library was not compiled, check your compilation options...\n"); #endif return; } BlasMagmaArgs::BlasMagmaArgs(const int prec) : m(0), max_nev(0), prec(prec), ldm(0), info(-1), llwork(0), lrwork(0), liwork(0), sideLR(0), htsize(0), dtsize(0), lwork_max(0), W(0), W2(0), hTau(0), dTau(0), lwork(0), rwork(0), iwork(0) { #ifdef MAGMA_LIB magma_int_t dev_info = magma_getdevice_arch();//mostly to check whether magma is intialized... if(dev_info == 0) exit(-1); printfQuda("\nMAGMA will use device architecture %d.\n", dev_info); alloc = false; init = true; #else errorQuda("\nError: MAGMA library was not compiled, check your compilation options...\n"); #endif return; } BlasMagmaArgs::BlasMagmaArgs(const int m, const int ldm, const int prec) : m(m), max_nev(0), prec(prec), ldm(ldm), info(-1), sideLR(0), htsize(0), dtsize(0), W(0), hTau(0), dTau(0) { #ifdef MAGMA_LIB magma_int_t dev_info = magma_getdevice_arch();//mostly to check whether magma is intialized... if(dev_info == 0) exit(-1); printfQuda("\nMAGMA will use device architecture %d.\n", dev_info); const int complex_prec = 2*prec; magma_int_t nbtrd = prec == 4 ? magma_get_chetrd_nb(m) : magma_get_zhetrd_nb(m);//ldm llwork = MAX(m + m*nbtrd, 2*m + m*m);//ldm lrwork = 1 + 5*m + 2*m*m;//ldm liwork = 3 + 5*m;//ldm magma_malloc_pinned((void**)&W2, ldm*m*complex_prec); magma_malloc_pinned((void**)&lwork, llwork*complex_prec); magma_malloc_cpu((void**)&rwork, lrwork*prec); magma_malloc_cpu((void**)&iwork, liwork*sizeof(magma_int_t)); init = true; alloc = true; #else errorQuda("\nError: MAGMA library was not compiled, check your compilation options...\n"); #endif return; } BlasMagmaArgs::BlasMagmaArgs(const int m, const int max_nev, const int ldm, const int prec) : m(m), max_nev(max_nev), prec(prec), ldm(ldm), info(-1) { #ifdef MAGMA_LIB magma_int_t dev_info = magma_getdevice_arch();//mostly to check whether magma is intialized... if(dev_info == 0) exit(-1); printfQuda("\nMAGMA will use device architecture %d.\n", dev_info); const int complex_prec = 2*prec; magma_int_t nbtrd = prec == 4 ? magma_get_chetrd_nb(ldm) : magma_get_zhetrd_nb(ldm);//ldm<-m magma_int_t nbqrf = prec == 4 ? magma_get_cgeqrf_nb(ldm) : magma_get_zgeqrf_nb(ldm);//ldm htsize = max_nev;//MIN(l,k)-number of Householder vectors, but we always have k <= MIN(m,n) dtsize = ( 2*htsize + ((htsize + 31)/32)*32 )*nbqrf;//in general: MIN(m,k) for side = 'L' and MIN(n,k) for side = 'R' magma_malloc_pinned((void**)&hTau, htsize*complex_prec); magma_malloc((void**)&dTau, dtsize*complex_prec); //these are needed for the eigCG solver only. sideLR = (m - max_nev + nbqrf)*(m + nbqrf) + m*nbqrf;//ldm magma_malloc_pinned((void**)&W, sideLR*complex_prec); magma_malloc_pinned((void**)&W2, ldm*m*complex_prec); llwork = MAX(m + m*nbtrd, 2*m + m*m);//ldm lrwork = 1 + 5*m + 2*m*m;//ldm liwork = 3 + 5*m;//ldm magma_malloc_pinned((void**)&lwork, llwork*complex_prec); magma_malloc_cpu((void**)&rwork, lrwork*prec); magma_malloc_cpu((void**)&iwork, liwork*sizeof(magma_int_t)); init = true; alloc = true; #else errorQuda("\nError: MAGMA library was not compiled, check your compilation options...\n"); #endif return; } BlasMagmaArgs::~BlasMagmaArgs() { #ifdef MAGMA_LIB if(alloc == true) { if(dTau) magma_free(dTau); if(hTau) magma_free_pinned(hTau); if(W) magma_free_pinned(W); if(W2) magma_free_pinned(W2); if(lwork) magma_free_pinned(lwork); if(rwork) magma_free_cpu(rwork); if(iwork) magma_free_cpu(iwork); alloc = false; } init = false; #endif return; } void BlasMagmaArgs::MagmaHEEVD(void *dTvecm, void *hTvalm, const int prob_size, bool host) { #ifdef MAGMA_LIB if(prob_size > m) errorQuda("\nError in MagmaHEEVD (problem size cannot exceed given search space %d), exit ...\n", m); cudaPointerAttributes ptr_attr; if(!host) { //check if dTvecm is a device pointer.. cudaPointerGetAttributes(&ptr_attr, dTvecm); if(ptr_attr.memoryType != cudaMemoryTypeDevice || ptr_attr.devicePointer == NULL ) errorQuda("Error in MagmaHEEVD, no device pointer found."); if(prec == 4) { magma_cheevd_gpu(_cV, _cU, prob_size, (magmaFloatComplex*)dTvecm, ldm, (float*)hTvalm, (magmaFloatComplex*)W2, ldm, (magmaFloatComplex*)lwork, llwork, (float*)rwork, lrwork, iwork, liwork, &info); if(info != 0) errorQuda("\nError in MagmaHEEVD (magma_cheevd_gpu), exit ...\n"); } else { magma_zheevd_gpu(_cV, _cU, prob_size, (magmaDoubleComplex*)dTvecm, ldm, (double*)hTvalm, (magmaDoubleComplex*)W2, ldm, (magmaDoubleComplex*)lwork, llwork, (double*)rwork, lrwork, iwork, liwork, &info); if(info != 0) errorQuda("\nError in MagmaHEEVD (magma_zheevd_gpu), exit ...\n"); } } else { //check if dTvecm is a device pointer.. cudaPointerGetAttributes(&ptr_attr, dTvecm); if(ptr_attr.memoryType != cudaMemoryTypeHost || ptr_attr.hostPointer == NULL ) errorQuda("Error in MagmaHEEVD, no host pointer found."); if(prec == 4) { magma_cheevd(_cV, _cU, prob_size, (magmaFloatComplex*)dTvecm, ldm, (float*)hTvalm, (magmaFloatComplex*)lwork, llwork, (float*)rwork, lrwork, iwork, liwork, &info); if(info != 0) errorQuda("\nError in MagmaHEEVD (magma_cheevd_gpu), exit ...\n"); } else { magma_zheevd(_cV, _cU, prob_size, (magmaDoubleComplex*)dTvecm, ldm, (double*)hTvalm, (magmaDoubleComplex*)lwork, llwork, (double*)rwork, lrwork, iwork, liwork, &info); if(info != 0) errorQuda("\nError in MagmaHEEVD (magma_zheevd_gpu), exit ...\n"); } } #endif return; } int BlasMagmaArgs::MagmaORTH_2nev(void *dTvecm, void *dTm) { const int l = max_nev; #ifdef MAGMA_LIB if(prec == 4) { magma_int_t nb = magma_get_cgeqrf_nb(m);//ldm magma_cgeqrf_gpu(m, l, (magmaFloatComplex *)dTvecm, ldm, (magmaFloatComplex *)hTau, (magmaFloatComplex *)dTau, &info); if(info != 0) errorQuda("\nError in MagmaORTH_2nev (magma_cgeqrf_gpu), exit ...\n"); //compute dTevecm0=QHTmQ //get TQ product: magma_cunmqr_gpu(_cR, _cN, m, m, l, (magmaFloatComplex *)dTvecm, ldm, (magmaFloatComplex *)hTau, (magmaFloatComplex *)dTm, ldm, (magmaFloatComplex *)W, sideLR, (magmaFloatComplex *)dTau, nb, &info); if(info != 0) errorQuda("\nError in MagmaORTH_2nev (magma_cunmqr_gpu), exit ...\n"); //get QHT product: magma_cunmqr_gpu(_cL, _cC, m, l, l, (magmaFloatComplex *)dTvecm, ldm, (magmaFloatComplex *)hTau, (magmaFloatComplex *)dTm, ldm, (magmaFloatComplex *)W, sideLR, (magmaFloatComplex *)dTau, nb, &info); if(info != 0) errorQuda("\nError in MagmaORTH_2nev (magma_cunmqr_gpu), exit ...\n"); } else { magma_int_t nb = magma_get_zgeqrf_nb(m);//ldm magma_zgeqrf_gpu(m, l, (magmaDoubleComplex *)dTvecm, ldm, (magmaDoubleComplex *)hTau, (magmaDoubleComplex *)dTau, &info); if(info != 0) errorQuda("\nError in MagmaORTH_2nev (magma_zgeqrf_gpu), exit ...\n"); //compute dTevecm0=QHTmQ //get TQ product: magma_zunmqr_gpu(_cR, _cN, m, m, l, (magmaDoubleComplex *)dTvecm, ldm, (magmaDoubleComplex *)hTau, (magmaDoubleComplex *)dTm, ldm, (magmaDoubleComplex *)W, sideLR, (magmaDoubleComplex *)dTau, nb, &info); if(info != 0) errorQuda("\nError in MagmaORTH_2nev (magma_zunmqr_gpu), exit ...\n"); //get QHT product: magma_zunmqr_gpu(_cL, _cC, m, l, l, (magmaDoubleComplex *)dTvecm, ldm, (magmaDoubleComplex *)hTau, (magmaDoubleComplex *)dTm, ldm, (magmaDoubleComplex *)W, sideLR, (magmaDoubleComplex *)dTau, nb, &info); if(info != 0) errorQuda("\nError in MagmaORTH_2nev (magma_zunmqr_gpu), exit ...\n"); } #endif return l; } void BlasMagmaArgs::RestartV(void *dV, const int vld, const int vlen, const int vprec, void *dTevecm, void *dTm) { #ifdef MAGMA_LIB if( (vld % 32) != 0) errorQuda("\nError: leading dimension must be multiple of the warp size\n"); const int cvprec = 2*vprec; const int l = max_nev; //int bufferSize = 2*vld+l*l; //int bufferBlock = bufferSize / l; int bufferBlock = (2*vld) / l; bufferBlock = (bufferBlock / 32) * 32;//corrected bufferBlock to be multiple of the warp size int bufferSize = (bufferBlock * l); void *buffer = 0; magma_malloc(&buffer, bufferSize*cvprec); cudaMemset(buffer, 0, bufferSize*cvprec); if(prec == 4) { magma_int_t nb = magma_get_cgeqrf_nb(m);//ldm magma_cunmqr_gpu(_cL, _cN, m, l, l, (magmaFloatComplex*)dTevecm, ldm, (magmaFloatComplex*)hTau, (magmaFloatComplex*)dTm, ldm, (magmaFloatComplex*)W, sideLR, (magmaFloatComplex*)dTau, nb, &info); if(info != 0) errorQuda("\nError in RestartV (magma_cunmqr_gpu), exit ...\n"); } else { magma_int_t nb = magma_get_zgeqrf_nb(m);//ldm magma_zunmqr_gpu(_cL, _cN, m, l, l, (magmaDoubleComplex*)dTevecm, ldm, (magmaDoubleComplex*)hTau, (magmaDoubleComplex*)dTm, ldm, (magmaDoubleComplex*)W, sideLR, (magmaDoubleComplex*)dTau, nb, &info); if(info != 0) errorQuda("\nError in RestartV (magma_zunmqr_gpu), exit ...\n"); } if(vprec == 4) { if(prec == vprec) errorQuda("\nError: option is not currently supported, exit ...\n"); for (int blockOffset = 0; blockOffset < vlen; blockOffset += bufferBlock) { if (bufferBlock > (vlen-blockOffset)) bufferBlock = (vlen-blockOffset); magmaFloatComplex *ptrV = &(((magmaFloatComplex*)dV)[blockOffset]); sMM_v2(buffer, bufferBlock, ptrV, bufferBlock, m, vld, dTm, m, l, ldm); cudaMemcpy2D(ptrV, vld*cvprec, buffer, bufferBlock*cvprec, bufferBlock*cvprec, l, cudaMemcpyDefault); } } else { for (int blockOffset = 0; blockOffset < vlen; blockOffset += bufferBlock) { if (bufferBlock > (vlen-blockOffset)) bufferBlock = (vlen-blockOffset); magmaDoubleComplex *ptrV = &(((magmaDoubleComplex*)dV)[blockOffset]); magmablas_zgemm(_cN, _cN, bufferBlock, l, m, MAGMA_Z_ONE, ptrV, vld, (magmaDoubleComplex*)dTm, ldm, MAGMA_Z_ZERO, (magmaDoubleComplex*)buffer, bufferBlock); cudaMemcpy2D(ptrV, vld*cvprec, buffer, bufferBlock*cvprec, bufferBlock*cvprec, l, cudaMemcpyDefault); } } magma_free(buffer); #endif return; } void BlasMagmaArgs::SolveProjMatrix(void* rhs, const int ldn, const int n, void* H, const int ldH) { #ifdef MAGMA_LIB const int complex_prec = 2*prec; void *tmp; magma_int_t *ipiv; magma_int_t err; magma_malloc_pinned((void**)&tmp, ldH*n*complex_prec); magma_malloc_pinned((void**)&ipiv, n*sizeof(magma_int_t)); memcpy(tmp, H, ldH*n*complex_prec); if (prec == 4) { err = magma_cgesv(n, 1, (magmaFloatComplex*)tmp, ldH, ipiv, (magmaFloatComplex*)rhs, ldn, &info); if(err != 0) errorQuda("\nError in SolveProjMatrix (magma_cgesv), exit ...\n"); } else { err = magma_zgesv(n, 1, (magmaDoubleComplex*)tmp, ldH, ipiv, (magmaDoubleComplex*)rhs, ldn, &info); if(err != 0) errorQuda("\nError in SolveProjMatrix (magma_zgesv), exit ...\n"); } magma_free_pinned(tmp); magma_free_pinned(ipiv); #endif return; } void BlasMagmaArgs::SolveGPUProjMatrix(void* rhs, const int ldn, const int n, void* H, const int ldH) { #ifdef MAGMA_LIB const int complex_prec = 2*prec; void *tmp; magma_int_t *ipiv; magma_int_t err; magma_malloc((void**)&tmp, ldH*n*complex_prec); magma_malloc_pinned((void**)&ipiv, n*sizeof(magma_int_t)); qudaMemcpy(tmp, H, ldH*n*complex_prec, cudaMemcpyDefault); if (prec == 4) { err = magma_cgesv_gpu(n, 1, (magmaFloatComplex*)tmp, ldH, ipiv, (magmaFloatComplex*)rhs, ldn, &info); if(err != 0) errorQuda("\nError in SolveGPUProjMatrix (magma_cgesv), exit ...\n"); } else { err = magma_zgesv_gpu(n, 1, (magmaDoubleComplex*)tmp, ldH, ipiv, (magmaDoubleComplex*)rhs, ldn, &info); if(err != 0) errorQuda("\nError in SolveGPUProjMatrix (magma_zgesv), exit ...\n"); } magma_free(tmp); magma_free_pinned(ipiv); #endif return; } void BlasMagmaArgs::SpinorMatVec (void *spinorOut, const void *spinorSetIn, const int sld, const int slen, const void *vec, const int vlen) { #ifdef MAGMA_LIB if (prec == 4) { magmaFloatComplex *spmat = (magmaFloatComplex*)spinorSetIn; magmaFloatComplex *spout = (magmaFloatComplex*)spinorOut; magmablas_cgemv(_cN, slen, vlen, MAGMA_C_ONE, spmat, sld, (magmaFloatComplex*)vec, 1, MAGMA_C_ZERO, spout, 1);//in colour-major format } else { magmaDoubleComplex *spmat = (magmaDoubleComplex*)spinorSetIn; magmaDoubleComplex *spout = (magmaDoubleComplex*)spinorOut; magmablas_zgemv(_cN, slen, vlen, MAGMA_Z_ONE, spmat, sld, (magmaDoubleComplex*)vec, 1, MAGMA_Z_ZERO, spout, 1);//in colour-major format } #endif return; } void BlasMagmaArgs::MagmaRightNotrUNMQR(const int clen, const int qrlen, const int nrefls, void *QR, const int ldqr, void *Vm, const int cldn) { #ifdef MAGMA_LIB magma_int_t m = clen; magma_int_t n = qrlen; magma_int_t k = nrefls; magma_int_t lwork = -1; if(prec == 4) { } else { magmaDoubleComplex *dQR = NULL; magmaDoubleComplex *dtau = NULL; magmaDoubleComplex *htau = NULL; magmaDoubleComplex *hW = NULL; magmaDoubleComplex qW; magma_malloc_pinned((void**)&dQR, ldqr*k*sizeof(magmaDoubleComplex)); magma_malloc_pinned((void**)&htau, k*sizeof(magmaDoubleComplex)); // magma_malloc((void**)&dTau, k*sizeof(magmaDoubleComplex)); qudaMemcpy(dQR, QR, ldqr*k*sizeof(magmaDoubleComplex), cudaMemcpyDefault); magma_int_t nb = magma_get_zgeqrf_nb(m);//ldm // magma_zgeqrf_gpu(n, k, (magmaDoubleComplex *)dQR, ldqr, (magmaDoubleComplex *)htau, (magmaDoubleComplex *)dtau, &info);//identical to zgeqrf? magma_zunmqr_gpu(_cR, _cN, m, n, k, dQR, ldqr, htau, (magmaDoubleComplex *)Vm, cldn, &qW, lwork, dtau, nb, &info); if(info != 0) errorQuda("\nError in MagmaORTH_2nev (magma_zunmqr_gpu), exit ...\n"); lwork = (magma_int_t) MAGMA_Z_REAL(qW); magma_malloc_cpu((void**)&hW, lwork*sizeof(magmaDoubleComplex)); //get TQ product: magma_zunmqr_gpu(_cR, _cN, m, n, k, dQR, ldqr, htau, (magmaDoubleComplex *)Vm, cldn, hW, lwork, dtau, nb, &info); if(info != 0) errorQuda("\nError in MagmaORTH_2nev (magma_zunmqr_gpu), exit ...\n"); magma_free_cpu(hW); magma_free(dtau); magma_free_pinned(htau); magma_free_pinned(dQR); } #endif return; } //STL based version: // struct SortedEval{ double eval_nrm; int eval_idx; SortedEval(double val, int idx) : eval_nrm(val), eval_idx(idx) {}; }; bool cmp_eigen_nrms (SortedEval v1, SortedEval v2) { return (v1.eval_nrm < v2.eval_nrm); } void BlasMagmaArgs::Sort(const int m, const int ldm, void *eVecs, const int nev, void *unsorted_eVecs, void *eVals) { if (prec == 4) errorQuda("\nSingle precision is currently not supported.\n"); std::vector<SortedEval> sorted_evals_cntr; for(int e = 0; e < m; e++) sorted_evals_cntr.push_back( SortedEval( abs(((std::complex<double>*)eVals)[e]), e )); std::stable_sort(sorted_evals_cntr.begin(), sorted_evals_cntr.end(), cmp_eigen_nrms); for(int e = 0; e < nev; e++) { memcpy(&(((std::complex<double>*)eVecs)[ldm*e]), &(((std::complex<double>*)unsorted_eVecs)[ldm*( sorted_evals_cntr[e].eval_idx)]), (ldm)*sizeof(std::complex<double>)); //set zero in m+1 element: ((std::complex<double>*)eVecs)[ldm*e+m] = std::complex<double>(0.0, 0.0); } return; } ///NEW STUFF: void BlasMagmaArgs::ComputeQR(const int nev, Complex * evmat, const int m, const int ldm, Complex *tau) { #ifdef MAGMA_LIB magma_int_t _m = m;//matrix size magma_int_t _nev = nev;//matrix size magma_int_t _ldm = ldm; //Lapack parameters: magma_int_t info = 0; magma_int_t lwork = -1; magmaDoubleComplex *work = NULL; magmaDoubleComplex qwork; //parameter to extract optimal size of work magma_zgeqrf(_m, _nev, (magmaDoubleComplex *)evmat, _ldm, (magmaDoubleComplex *)tau, &qwork, lwork, &info); if( (info != 0 ) ) errorQuda( "Error: MAGMA_ZGEQRF, info %d\n",info); lwork = (magma_int_t) MAGMA_Z_REAL(qwork); magma_malloc_cpu((void**)&work, lwork*sizeof(magmaDoubleComplex)); magma_zgeqrf(_m, _nev, (magmaDoubleComplex *)evmat, _ldm, (magmaDoubleComplex *)tau, work, lwork, &info); if( (info != 0 ) ) errorQuda( "Error: ZGEQRF, info %d\n",info); if(work) magma_free_cpu(work); #endif return; } void BlasMagmaArgs::LeftConjZUNMQR(const int k /*number of reflectors*/, const int n /*number of columns of H*/, Complex *H, const int dh /*number of rows*/, const int ldh, Complex * QR, const int ldqr, Complex *tau)//for vectors: n =1 { #ifdef MAGMA_LIB //Note: # rows of QR = # rows of H. magma_int_t _h = dh;//matrix size magma_int_t _n = n;//vector size magma_int_t _k = k; magma_int_t _ldh = ldh; magma_int_t _ldqr = ldqr; //Lapack parameters: magma_side_t _s = _cL;//apply QR-matrix from the left magma_trans_t _t = _cC;//conjugate magma_int_t info = 0; magma_int_t lwork = -1; magmaDoubleComplex *work = NULL; magmaDoubleComplex qwork; //parameter to extract optimal size of work //Pdagger_{k+1} PrevRes magma_zunmqr(_s, _t, _h, _n, _k, (magmaDoubleComplex *)QR, _ldqr, (magmaDoubleComplex *)tau, (magmaDoubleComplex *)H, _ldh, &qwork, lwork, &info); if( (info != 0 ) ) errorQuda( "Error: ZUNMQR, info %d\n",info); lwork = (magma_int_t) MAGMA_Z_REAL(qwork); magma_malloc_cpu((void**)&work, lwork*sizeof(magmaDoubleComplex)); magma_zunmqr(_s, _t, _h, _n, _k, (magmaDoubleComplex *)QR, _ldqr, (magmaDoubleComplex *)tau, (magmaDoubleComplex *)H, _ldh, work, lwork, &info); if( (info != 0 ) ) errorQuda( "Error: ZUNMQR, info %d\n",info); if(work) magma_free_cpu(work); #endif return; } void BlasMagmaArgs::Construct_harmonic_matrix(Complex * const harmH, Complex * const conjH, const double beta2, const int m, const int ldH) { #ifdef MAGMA_LIB //Lapack parameters: magma_int_t _m = m; // magma_int_t _ldH = ldH; // magma_int_t info = 0; // magma_int_t I_ONE = 1; // magma_int_t *ipiv; magma_malloc_cpu((void**)&ipiv, ldH*sizeof(magma_int_t)); // //Construct H + beta*H^{-H} e_m*e_m^{T} // 1. need to solve H^{H}y = e_m; Complex *em = new Complex[m]; em[m-1] = beta2;//in fact, we construct beta*em, magma_zgesv(_m, I_ONE, (magmaDoubleComplex *)conjH, _ldH, ipiv, (magmaDoubleComplex *)em, _ldH, &info); if( (info != 0 ) ) errorQuda( "Error: DGESV, info %d\n",info); //make this cleaner! //check solution: for (int j = 0; j < m; j++) { Complex accum = 0.0; for (int i = 0; i < m; i++) accum = (accum + harmH[ldH*j+i]*em[(ipiv[i])-1]); } // 2. Construct matrix for harmonic Ritz vectors: // Adjust last column with KroneckerProd((H^{-H}*beta*em)=em, em^{T}=[0,....,1]): for(int i = 0; i < m; i++) harmH[ldH*(m-1)+i] += em[i]; magma_free_cpu(ipiv); // delete [] em; #endif return; } void BlasMagmaArgs::Compute_harmonic_matrix_eigenpairs(Complex *harmH, const int m, const int ldH, Complex *vr, Complex *evalues, const int ldv) { #ifdef MAGMA_LIB magma_int_t _m = m;//matrix size magma_int_t _ldH = ldH; magma_int_t _ldv = ldv; //Lapack parameters: magma_int_t info = 0; // magma_vec_t _r = _cV; magma_vec_t _l = _cNV;//no left eigenvectors magma_int_t lwork = -1; magmaDoubleComplex *work = NULL; magmaDoubleComplex qwork; //parameter to extract optimal size of work double *rwork = NULL; magma_malloc_cpu((void**)&rwork, 2*_m*sizeof(double)); //Get optimal work: magma_zgeev(_l, _r, _m, (magmaDoubleComplex *)harmH, _ldH, (magmaDoubleComplex *)evalues, NULL, _ldv, (magmaDoubleComplex *)vr, _ldv, &qwork, lwork, rwork, &info); if( (info != 0 ) ) errorQuda( "Error: ZGEEVX, info %d\n",info); lwork = (magma_int_t) MAGMA_Z_REAL(qwork); magma_malloc_cpu((void**)&work, lwork*sizeof(magmaDoubleComplex)); //now get eigenpairs: magma_zgeev(_l, _r, _m, (magmaDoubleComplex *)harmH, _ldH, (magmaDoubleComplex *)evalues, NULL, _ldv, (magmaDoubleComplex *)vr, _ldv, work, lwork, rwork, &info); if( (info != 0 ) ) errorQuda( "Error: ZGEEVX, info %d\n",info); if(rwork) magma_free_cpu(rwork); // if(work) magma_free_cpu(work); // #endif return; } void BlasMagmaArgs::RestartVH(void *dV, const int vlen, const int vld, const int vprec, void *sortedHarVecs, void *H, const int ldh) { #ifdef MAGMA_LIB if(prec == 4) { errorQuda("\nError: single precision is not currently supported\n"); } if( (vld % 32) != 0) errorQuda("\nError: leading dimension must be multiple of the warp size\n"); int nev = (max_nev - 1); //(nev+1) - 1 for GMRESDR int _m = m;//matrix size int _k = nev; int _kp1 = max_nev; int _mp1 = (m+1); int _ldm = ldh; magma_side_t _s = _cR;//apply P-matrix from the right magma_trans_t _t = _cN;//no left eigenvectors int info = 0; int lwork = -1; Complex *work = NULL; Complex qwork; //parameter to extract optimal size of work const int cprec = 2*prec; //currently: sizeof(Complex) const int cvprec = 2*vprec; const int l = max_nev; int lbsize = 2*((nev / 16)*16); //const int bufferSize = 2*vld+lbsize*lbsize; //int bufferBlock = bufferSize / lbsize;//or: lbsize = (nev+1) int bufferBlock = (2*vld) / lbsize; bufferBlock = (bufferBlock / 32) * 32;//corrected bufferBlock to be multiple of the warp size int bufferSize = (bufferBlock * lbsize); void *buffer = NULL; void *dQmat = NULL; magma_malloc(&buffer, bufferSize*cvprec); cudaMemset(buffer, 0, bufferSize*cvprec); magma_malloc(&dQmat, l*ldh*cprec); //GPU code: Complex *tau = new Complex[l];//nev+1 =>max_nev Complex *Qmat = new Complex[ldh*_mp1];//need (m+1)x(m+1) matrix on input... ComputeQR(l, (Complex*)sortedHarVecs, _mp1, ldh, tau);//lapack version //max_nev vectors are stored in Qmat (output): //restoreOrthVectors(Qmat, max_nev, (Complex*)sortedHarVecs, (m+1), ldh, tau); //Load diagonal units for(int d = 0; d < (m+1); d++) Qmat[ldh*d+d] = Complex(1.0, 0.0); magma_zunmqr(_s, _t, _mp1, _mp1, _kp1, (magmaDoubleComplex *)sortedHarVecs, _ldm, (magmaDoubleComplex *)tau, (magmaDoubleComplex *)Qmat, _ldm, (magmaDoubleComplex *)&qwork, lwork, &info); if( (info != 0 ) ) errorQuda( "Error: ZUNMQR, info %d\n",info); lwork = (int) qwork.real(); work = new Complex[lwork]; magma_zunmqr(_s, _t, _mp1, _mp1, _kp1, (magmaDoubleComplex *)sortedHarVecs, _ldm, (magmaDoubleComplex *)tau, (magmaDoubleComplex *)Qmat, _ldm, (magmaDoubleComplex *)work, lwork, &info); if( (info != 0 ) ) errorQuda( "Error: ZUNMQR, info %d\n",info); //Copy (nev+1) vectors on the device: qudaMemcpy(dQmat, Qmat, (max_nev)*ldh*cprec, cudaMemcpyDefault); if(cvprec == sizeof(magmaDoubleComplex)) { for (int blockOffset = 0; blockOffset < vlen; blockOffset += bufferBlock) { if (bufferBlock > (vlen-blockOffset)) bufferBlock = (vlen-blockOffset); //printfQuda("\nBuffer block : %d\n", bufferBlock); magmaDoubleComplex *ptrV = &(((magmaDoubleComplex*)dV)[blockOffset]); magmablas_zgemm(_cN, _cN, bufferBlock, l, _mp1, MAGMA_Z_ONE, ptrV, vld, (magmaDoubleComplex*)dQmat, ldh, MAGMA_Z_ZERO, (magmaDoubleComplex*)buffer, bufferBlock); cudaMemcpy2D(ptrV, vld*cvprec, buffer, bufferBlock*cvprec, bufferBlock*cvprec, l, cudaMemcpyDefault);//make this async! } cudaMemset(&(((magmaDoubleComplex*)dV)[vld*max_nev]), 0, (m+1-max_nev)*vld*sizeof(magmaDoubleComplex));//= m - nev } else // low precision field { for (int blockOffset = 0; blockOffset < vlen; blockOffset += bufferBlock) { if (bufferBlock > (vlen-blockOffset)) bufferBlock = (vlen-blockOffset); magmaFloatComplex *ptrV = &(((magmaFloatComplex*)dV)[blockOffset]); sMM_v2(buffer, bufferBlock, ptrV, bufferBlock, _mp1, vld, dQmat, _mp1, l, ldh); cudaMemcpy2D(ptrV, vld*cvprec, buffer, bufferBlock*cvprec, bufferBlock*cvprec, l, cudaMemcpyDefault); } cudaMemset(&(((magmaFloatComplex*)dV)[vld*max_nev]), 0, (m+1-max_nev)*vld*sizeof(magmaFloatComplex));//= m - nev } //Construct H_new = Pdagger_{k+1} \bar{H}_{m} P_{k} //bar{H}_{m} P_{k} lwork = -1; magma_zunmqr(_s, _t, _mp1, _m, _k, (magmaDoubleComplex *)sortedHarVecs, _ldm, (magmaDoubleComplex *)tau, (magmaDoubleComplex *)H, _ldm, (magmaDoubleComplex *)&qwork, lwork, &info); if( (info != 0 ) ) errorQuda( "Error: ZUNMQR, info %d\n",info); delete[] work; lwork = (int) qwork.real(); work = new Complex[lwork]; magma_zunmqr(_s, _t, _mp1, _m, _k, (magmaDoubleComplex *)sortedHarVecs, _ldm, (magmaDoubleComplex *)tau, (magmaDoubleComplex *)H, _ldm, (magmaDoubleComplex *)work, lwork, &info); if( (info != 0 ) ) errorQuda( "Error: ZUNMQR, info %d\n",info); //Pdagger_{k+1} PrevRes lwork = -1; _s = _cL; _t = _cC; magma_zunmqr(_s, _t, _mp1, _k, _kp1, (magmaDoubleComplex *)sortedHarVecs, _ldm, (magmaDoubleComplex *)tau, (magmaDoubleComplex *)H, _ldm, (magmaDoubleComplex *)&qwork, lwork, &info); if( (info != 0 ) ) errorQuda( "Error: ZUNMQR, info %d\n",info); delete [] work; lwork = (int) qwork.real(); work = new Complex[lwork]; magma_zunmqr(_s, _t, _mp1, _k, _kp1, (magmaDoubleComplex *)sortedHarVecs, _ldm, (magmaDoubleComplex *)tau, (magmaDoubleComplex *)H, _ldm, (magmaDoubleComplex *)work, lwork, &info); if( (info != 0 ) ) errorQuda( "Error: ZUNMQR, info %d\n",info); const int len = ldh - nev-1; for(int i = 0; i < nev; i++) memset(&(((Complex*)H)[ldh*i+nev+1]), 0, len*sizeof(Complex) ); // memset(&(((Complex*)H)[ldh*(nev)]), 0, (m-nev)*ldh*sizeof(Complex)); delete [] work; magma_free(buffer); magma_free(dQmat); delete [] Qmat; delete [] tau ; #endif return; } #define FMULS_GETRF(m_, n_) ( ((m_) < (n_)) \ ? (0.5 * (m_) * ((m_) * ((n_) - (1./3.) * (m_) - 1. ) + (n_)) + (2. / 3.) * (m_)) \ : (0.5 * (n_) * ((n_) * ((m_) - (1./3.) * (n_) - 1. ) + (m_)) + (2. / 3.) * (n_)) ) #define FADDS_GETRF(m_, n_) ( ((m_) < (n_)) \ ? (0.5 * (m_) * ((m_) * ((n_) - (1./3.) * (m_) ) - (n_)) + (1. / 6.) * (m_)) \ : (0.5 * (n_) * ((n_) * ((m_) - (1./3.) * (n_) ) - (m_)) + (1. / 6.) * (n_)) ) #define FLOPS_ZGETRF(m_, n_) (6. * FMULS_GETRF((double)(m_), (double)(n_)) + 2.0 * FADDS_GETRF((double)(m_), (double)(n_)) ) #define FLOPS_CGETRF(m_, n_) (6. * FMULS_GETRF((double)(m_), (double)(n_)) + 2.0 * FADDS_GETRF((double)(m_), (double)(n_)) ) #define FMULS_GETRI(n_) ( (n_) * ((5. / 6.) + (n_) * ((2. / 3.) * (n_) + 0.5)) ) #define FADDS_GETRI(n_) ( (n_) * ((5. / 6.) + (n_) * ((2. / 3.) * (n_) - 1.5)) ) #define FLOPS_ZGETRI(n_) (6. * FMULS_GETRI((double)(n_)) + 2.0 * FADDS_GETRI((double)(n_)) ) #define FLOPS_CGETRI(n_) (6. * FMULS_GETRI((double)(n_)) + 2.0 * FADDS_GETRI((double)(n_)) ) void BlasMagmaArgs::BatchInvertMatrix(void *Ainv_h, void* A_h, const int n, const int batch) { #ifdef MAGMA_LIB printfQuda("%s with n=%d and batch=%d\n", __func__, n, batch); magma_queue_t queue = 0; size_t size = 2*n*n*prec*batch; void *A_d = device_malloc(size); void *Ainv_d = device_malloc(size); qudaMemcpy(A_d, A_h, size, cudaMemcpyHostToDevice); magma_int_t **dipiv_array = static_cast<magma_int_t**>(device_malloc(batch*sizeof(magma_int_t*))); magma_int_t *dipiv_tmp = static_cast<magma_int_t*>(device_malloc(batch*n*sizeof(magma_int_t))); set_ipointer(dipiv_array, dipiv_tmp, 1, 0, 0, n, batch, queue); magma_int_t *dinfo_array = static_cast<magma_int_t*>(device_malloc(batch*sizeof(magma_int_t))); magma_int_t *info_array = static_cast<magma_int_t*>(safe_malloc(batch*sizeof(magma_int_t))); magma_int_t err; // FIXME do this in pipelined fashion to reduce memory overhead. if (prec == 4) { magmaFloatComplex **A_array = static_cast<magmaFloatComplex**>(device_malloc(batch*sizeof(magmaFloatComplex*))); magmaFloatComplex **Ainv_array = static_cast<magmaFloatComplex**>(device_malloc(batch*sizeof(magmaFloatComplex*))); cset_pointer(A_array, static_cast<magmaFloatComplex*>(A_d), n, 0, 0, n*n, batch, queue); cset_pointer(Ainv_array, static_cast<magmaFloatComplex*>(Ainv_d), n, 0, 0, n*n, batch, queue); double magma_time = magma_sync_wtime(queue); err = magma_cgetrf_batched(n, n, A_array, n, dipiv_array, dinfo_array, batch, queue); //err = magma_cgetrf_nopiv_batched(n, n, A_array, n, dinfo_array, batch, queue); (no getri support for nopiv?) magma_time = magma_sync_wtime(queue) - magma_time; printfQuda("LU factorization completed in %f seconds with GFLOPS = %f\n", magma_time, 1e-9 * batch * FLOPS_CGETRF(n,n) / magma_time); if(err != 0) errorQuda("\nError in LU decomposition (magma_cgetrf), error code = %d\n", err); qudaMemcpy(info_array, dinfo_array, batch*sizeof(magma_int_t), cudaMemcpyDeviceToHost); for (int i=0; i<batch; i++) { if (info_array[i] < 0) { errorQuda("%d argument had an illegal value or another error occured, such as memory allocation failed", i); } else if (info_array[i] > 0) { errorQuda("%d factorization completed but the factor U is exactly singular", i); } } magma_time = magma_sync_wtime(queue); err = magma_cgetri_outofplace_batched(n, A_array, n, dipiv_array, Ainv_array, n, dinfo_array, batch, queue); magma_time = magma_sync_wtime(queue) - magma_time; printfQuda("Matrix inversion completed in %f seconds with GFLOPS = %f\n", magma_time, 1e-9 * batch * FLOPS_CGETRI(n) / magma_time); if(err != 0) errorQuda("\nError in matrix inversion (magma_cgetri), error code = %d\n", err); qudaMemcpy(info_array, dinfo_array, batch*sizeof(magma_int_t), cudaMemcpyDeviceToHost); for (int i=0; i<batch; i++) { if (info_array[i] < 0) { errorQuda("%d argument had an illegal value or another error occured, such as memory allocation failed", i); } else if (info_array[i] > 0) { errorQuda("%d factorization completed but the factor U is exactly singular", i); } } device_free(Ainv_array); device_free(A_array); } else if (prec == 8) { magmaDoubleComplex **A_array = static_cast<magmaDoubleComplex**>(device_malloc(batch*sizeof(magmaDoubleComplex*))); zset_pointer(A_array, static_cast<magmaDoubleComplex*>(A_d), n, 0, 0, n*n, batch, queue); magmaDoubleComplex **Ainv_array = static_cast<magmaDoubleComplex**>(device_malloc(batch*sizeof(magmaDoubleComplex*))); zset_pointer(Ainv_array, static_cast<magmaDoubleComplex*>(Ainv_d), n, 0, 0, n*n, batch, queue); double magma_time = magma_sync_wtime(queue); err = magma_zgetrf_batched(n, n, A_array, n, dipiv_array, dinfo_array, batch, queue); magma_time = magma_sync_wtime(queue) - magma_time; printfQuda("LU factorization completed in %f seconds with GFLOPS = %f\n", magma_time, 1e-9 * batch * FLOPS_ZGETRF(n,n) / magma_time); if(err != 0) errorQuda("\nError in LU decomposition (magma_zgetrf), error code = %d\n", err); qudaMemcpy(info_array, dinfo_array, batch*sizeof(magma_int_t), cudaMemcpyDeviceToHost); for (int i=0; i<batch; i++) { if (info_array[i] < 0) { errorQuda("%d argument had an illegal value or another error occured, such as memory allocation failed", i); } else if (info_array[i] > 0) { errorQuda("%d factorization completed but the factor U is exactly singular", i); } } magma_time = magma_sync_wtime(queue); err = magma_zgetri_outofplace_batched(n, A_array, n, dipiv_array, Ainv_array, n, dinfo_array, batch, queue); magma_time = magma_sync_wtime(queue) - magma_time; printfQuda("Matrix inversion completed in %f seconds with GFLOPS = %f\n", magma_time, 1e-9 * batch * FLOPS_ZGETRI(n) / magma_time); if(err != 0) errorQuda("\nError in matrix inversion (magma_cgetri), error code = %d\n", err); qudaMemcpy(info_array, dinfo_array, batch*sizeof(magma_int_t), cudaMemcpyDeviceToHost); for (int i=0; i<batch; i++) { if (info_array[i] < 0) { errorQuda("%d argument had an illegal value or another error occured, such as memory allocation failed", i); } else if (info_array[i] > 0) { errorQuda("%d factorization completed but the factor U is exactly singular", i); } } device_free(Ainv_array); device_free(A_array); } else { errorQuda("%s not implemented for precision=%d", __func__, prec); } qudaMemcpy(Ainv_h, Ainv_d, size, cudaMemcpyDeviceToHost); device_free(dipiv_tmp); device_free(dipiv_array); device_free(dinfo_array); host_free(info_array); device_free(Ainv_d); device_free(A_d); #endif return; } #ifdef MAGMA_LIB #undef _cV #undef _cU #undef _cR #undef _cL #undef _cC #undef _cN #undef _cNV #endif
083eeb7e69a27f3371cf84a039f2f3715e6e841f.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <memory> #include <random> #include <thread> // NOLINT #include <vector> #include "gtest/gtest.h" #include "paddle/fluid/memory/allocation/allocator_facade.h" #include "paddle/fluid/memory/allocation/best_fit_allocator.h" #include "paddle/fluid/memory/allocation/cuda_allocator.h" #include "paddle/fluid/memory/allocation/locked_allocator.h" #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/platform/for_range.h" namespace paddle { namespace memory { namespace allocation { struct ForEachFill { size_t* ptr_; explicit ForEachFill(size_t* ptr) : ptr_(ptr) {} __device__ void operator()(size_t i) { ptr_[i] = i; } }; TEST(BestFitAllocator, concurrent_cuda) { HIPAllocator allocator(platform::CUDAPlace(0)); // 256 MB auto cuda_allocation = allocator.Allocate(256U * 1024 * 1024); LockedAllocator concurrent_allocator( std::unique_ptr<Allocator>(new BestFitAllocator(cuda_allocation.get()))); platform::CUDAPlace gpu(0); phi::GPUContext dev_ctx(gpu); dev_ctx.SetAllocator(paddle::memory::allocation::AllocatorFacade::Instance() .GetAllocator(gpu, dev_ctx.stream()) .get()); dev_ctx.PartialInitWithAllocator(); auto th_main = [&](std::random_device::result_type seed) { std::default_random_engine engine(seed); std::uniform_int_distribution<size_t> dist(1U, 1024U); std::array<size_t, 1024> buf; for (size_t i = 0; i < 128; ++i) { size_t allocate_size = dist(engine); auto allocation = concurrent_allocator.Allocate(sizeof(size_t) * allocate_size); size_t* data = reinterpret_cast<size_t*>(allocation->ptr()); ForEachFill fill(data); platform::ForRange<phi::GPUContext> for_range(dev_ctx, allocate_size); for_range(fill); memory::Copy(platform::CPUPlace(), buf.data(), gpu, data, sizeof(size_t) * allocate_size, dev_ctx.stream()); dev_ctx.Wait(); for (size_t j = 0; j < allocate_size; ++j) { ASSERT_EQ(buf[j], j); } allocation = nullptr; } }; { std::vector<std::thread> threads; for (size_t i = 0; i < 1024; ++i) { std::random_device dev; threads.emplace_back(th_main, dev()); } for (auto& th : threads) { th.join(); } } } } // namespace allocation } // namespace memory } // namespace paddle
083eeb7e69a27f3371cf84a039f2f3715e6e841f.cu
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <memory> #include <random> #include <thread> // NOLINT #include <vector> #include "gtest/gtest.h" #include "paddle/fluid/memory/allocation/allocator_facade.h" #include "paddle/fluid/memory/allocation/best_fit_allocator.h" #include "paddle/fluid/memory/allocation/cuda_allocator.h" #include "paddle/fluid/memory/allocation/locked_allocator.h" #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/platform/for_range.h" namespace paddle { namespace memory { namespace allocation { struct ForEachFill { size_t* ptr_; explicit ForEachFill(size_t* ptr) : ptr_(ptr) {} __device__ void operator()(size_t i) { ptr_[i] = i; } }; TEST(BestFitAllocator, concurrent_cuda) { CUDAAllocator allocator(platform::CUDAPlace(0)); // 256 MB auto cuda_allocation = allocator.Allocate(256U * 1024 * 1024); LockedAllocator concurrent_allocator( std::unique_ptr<Allocator>(new BestFitAllocator(cuda_allocation.get()))); platform::CUDAPlace gpu(0); phi::GPUContext dev_ctx(gpu); dev_ctx.SetAllocator(paddle::memory::allocation::AllocatorFacade::Instance() .GetAllocator(gpu, dev_ctx.stream()) .get()); dev_ctx.PartialInitWithAllocator(); auto th_main = [&](std::random_device::result_type seed) { std::default_random_engine engine(seed); std::uniform_int_distribution<size_t> dist(1U, 1024U); std::array<size_t, 1024> buf; for (size_t i = 0; i < 128; ++i) { size_t allocate_size = dist(engine); auto allocation = concurrent_allocator.Allocate(sizeof(size_t) * allocate_size); size_t* data = reinterpret_cast<size_t*>(allocation->ptr()); ForEachFill fill(data); platform::ForRange<phi::GPUContext> for_range(dev_ctx, allocate_size); for_range(fill); memory::Copy(platform::CPUPlace(), buf.data(), gpu, data, sizeof(size_t) * allocate_size, dev_ctx.stream()); dev_ctx.Wait(); for (size_t j = 0; j < allocate_size; ++j) { ASSERT_EQ(buf[j], j); } allocation = nullptr; } }; { std::vector<std::thread> threads; for (size_t i = 0; i < 1024; ++i) { std::random_device dev; threads.emplace_back(th_main, dev()); } for (auto& th : threads) { th.join(); } } } } // namespace allocation } // namespace memory } // namespace paddle
f9aedd499c971a73b1aa50dcd06432a7eaab9a5f.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_factories.hpp> #include <cudf/column/column_view.hpp> #include <cudf/datetime.hpp> #include <cudf/types.hpp> #include <cudf/wrappers/timestamps.hpp> #include <tests/utilities/base_fixture.hpp> #include <tests/utilities/column_utilities.hpp> #include <tests/utilities/column_wrapper.hpp> #include <tests/utilities/timestamp_utilities.cuh> #include <tests/utilities/type_lists.hpp> #include <gmock/gmock.h> template <typename T> struct NonTimestampTest : public cudf::test::BaseFixture { cudf::data_type type() { return cudf::data_type{cudf::experimental::type_to_id<T>()}; } }; using NonTimestampTypes = cudf::test::Concat<cudf::test::NumericTypes, cudf::test::StringTypes>; TYPED_TEST_CASE(NonTimestampTest, NonTimestampTypes); TYPED_TEST(NonTimestampTest, TestThrowsOnNonTimestamp) { using T = TypeParam; using namespace cudf::test; using namespace cudf::datetime; using namespace simt::std::chrono; cudf::data_type dtype{cudf::experimental::type_to_id<T>()}; cudf::column col{dtype, 0, rmm::device_buffer{0}}; EXPECT_THROW(extract_year(col), cudf::logic_error); EXPECT_THROW(extract_month(col), cudf::logic_error); EXPECT_THROW(extract_day(col), cudf::logic_error); EXPECT_THROW(extract_weekday(col), cudf::logic_error); EXPECT_THROW(extract_hour(col), cudf::logic_error); EXPECT_THROW(extract_minute(col), cudf::logic_error); EXPECT_THROW(extract_second(col), cudf::logic_error); } struct BasicDatetimeOpsTest : public cudf::test::BaseFixture {}; TEST_F(BasicDatetimeOpsTest, TestExtractingDatetimeComponents) { using namespace cudf::test; using namespace cudf::datetime; using namespace simt::std::chrono; auto timestamps_D = fixed_width_column_wrapper<cudf::timestamp_D>{ -1528, // 1965-10-26 GMT 17716, // 2018-07-04 GMT 19382, // 2023-01-25 GMT }; auto timestamps_s = fixed_width_column_wrapper<cudf::timestamp_s>{ -131968728, // 1965-10-26 14:01:12 GMT 1530705600, // 2018-07-04 12:00:00 GMT 1674631932, // 2023-01-25 07:32:12 GMT }; auto timestamps_ms = fixed_width_column_wrapper<cudf::timestamp_ms>{ -131968727238, // 1965-10-26 14:01:12.762 GMT 1530705600000, // 2018-07-04 12:00:00.000 GMT 1674631932929, // 2023-01-25 07:32:12.929 GMT }; expect_columns_equal(*extract_year(timestamps_D), fixed_width_column_wrapper<int16_t>{1965, 2018, 2023}); expect_columns_equal(*extract_year(timestamps_s), fixed_width_column_wrapper<int16_t>{1965, 2018, 2023}); expect_columns_equal(*extract_year(timestamps_ms), fixed_width_column_wrapper<int16_t>{1965, 2018, 2023}); expect_columns_equal(*extract_month(timestamps_D), fixed_width_column_wrapper<int16_t>{10, 7, 1}); expect_columns_equal(*extract_month(timestamps_s), fixed_width_column_wrapper<int16_t>{10, 7, 1}); expect_columns_equal(*extract_month(timestamps_ms), fixed_width_column_wrapper<int16_t>{10, 7, 1}); expect_columns_equal(*extract_day(timestamps_D), fixed_width_column_wrapper<int16_t>{26, 4, 25}); expect_columns_equal(*extract_day(timestamps_s), fixed_width_column_wrapper<int16_t>{26, 4, 25}); expect_columns_equal(*extract_day(timestamps_ms), fixed_width_column_wrapper<int16_t>{26, 4, 25}); expect_columns_equal(*extract_weekday(timestamps_D), fixed_width_column_wrapper<int16_t>{2, 3, 3}); expect_columns_equal(*extract_weekday(timestamps_s), fixed_width_column_wrapper<int16_t>{2, 3, 3}); expect_columns_equal(*extract_weekday(timestamps_ms), fixed_width_column_wrapper<int16_t>{2, 3, 3}); expect_columns_equal(*extract_hour(timestamps_D), fixed_width_column_wrapper<int16_t>{0, 0, 0}); expect_columns_equal(*extract_hour(timestamps_s), fixed_width_column_wrapper<int16_t>{14, 12, 7}); expect_columns_equal(*extract_hour(timestamps_ms), fixed_width_column_wrapper<int16_t>{14, 12, 7}); expect_columns_equal(*extract_minute(timestamps_D), fixed_width_column_wrapper<int16_t>{0, 0, 0}); expect_columns_equal(*extract_minute(timestamps_s), fixed_width_column_wrapper<int16_t>{1, 0, 32}); expect_columns_equal(*extract_minute(timestamps_ms), fixed_width_column_wrapper<int16_t>{1, 0, 32}); expect_columns_equal(*extract_second(timestamps_D), fixed_width_column_wrapper<int16_t>{0, 0, 0}); expect_columns_equal(*extract_second(timestamps_s), fixed_width_column_wrapper<int16_t>{12, 0, 12}); expect_columns_equal(*extract_second(timestamps_ms), fixed_width_column_wrapper<int16_t>{12, 0, 12}); } template <typename T> struct TypedDatetimeOpsTest : public cudf::test::BaseFixture { hipStream_t stream() { return hipStream_t(0); } cudf::size_type size() { return cudf::size_type(10); } cudf::data_type type() { return cudf::data_type{cudf::experimental::type_to_id<T>()}; } }; TYPED_TEST_CASE(TypedDatetimeOpsTest, cudf::test::TimestampTypes); TYPED_TEST(TypedDatetimeOpsTest, TestEmptyColumns) { using T = TypeParam; using namespace cudf::test; using namespace cudf::datetime; using namespace simt::std::chrono; auto int16s_dtype = cudf::data_type{cudf::experimental::type_to_id<int16_t>()}; auto timestamps_dtype = cudf::data_type{cudf::experimental::type_to_id<T>()}; cudf::column int16s{int16s_dtype, 0, rmm::device_buffer{0}}; cudf::column timestamps{timestamps_dtype, 0, rmm::device_buffer{0}}; expect_columns_equal(*extract_year(timestamps), int16s); expect_columns_equal(*extract_month(timestamps), int16s); expect_columns_equal(*extract_day(timestamps), int16s); expect_columns_equal(*extract_weekday(timestamps), int16s); expect_columns_equal(*extract_hour(timestamps), int16s); expect_columns_equal(*extract_minute(timestamps), int16s); expect_columns_equal(*extract_second(timestamps), int16s); } TYPED_TEST(TypedDatetimeOpsTest, TestExtractingGeneratedDatetimeComponents) { using T = TypeParam; using namespace cudf::test; using namespace cudf::datetime; using namespace simt::std::chrono; auto start = milliseconds(-2500000000000); // Sat, 11 Oct 1890 19:33:20 GMT auto stop_ = milliseconds(2500000000000); // Mon, 22 Mar 2049 04:26:40 GMT auto timestamps = generate_timestamps<T>(this->size(), time_point_ms(start), time_point_ms(stop_)); auto expected_years = fixed_width_column_wrapper<int16_t>{ 1890, 1906, 1922, 1938, 1954, 1970, 1985, 2001, 2017, 2033}; auto expected_months = fixed_width_column_wrapper<int16_t>{10, 8, 6, 4, 2, 1, 11, 9, 7, 5}; auto expected_days = fixed_width_column_wrapper<int16_t>{11, 16, 20, 24, 26, 1, 5, 9, 14, 18}; auto expected_weekdays = fixed_width_column_wrapper<int16_t>{6, 4, 2, 7, 5, 4, 2, 7, 5, 3}; auto expected_hours = fixed_width_column_wrapper<int16_t>{19, 20, 21, 22, 23, 0, 0, 1, 2, 3}; auto expected_minutes = fixed_width_column_wrapper<int16_t>{33, 26, 20, 13, 6, 0, 53, 46, 40, 33}; auto expected_seconds = fixed_width_column_wrapper<int16_t>{20, 40, 0, 20, 40, 0, 20, 40, 0, 20}; // Special cases for timestamp_D: zero out the expected hh/mm/ss cols if (std::is_same<TypeParam, cudf::timestamp_D>::value) { expected_hours = fixed_width_column_wrapper<int16_t>{0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; expected_minutes = fixed_width_column_wrapper<int16_t>{0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; expected_seconds = fixed_width_column_wrapper<int16_t>{0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; } expect_columns_equal(*extract_year(timestamps), expected_years); expect_columns_equal(*extract_month(timestamps), expected_months); expect_columns_equal(*extract_day(timestamps), expected_days); expect_columns_equal(*extract_weekday(timestamps), expected_weekdays); expect_columns_equal(*extract_hour(timestamps), expected_hours); expect_columns_equal(*extract_minute(timestamps), expected_minutes); expect_columns_equal(*extract_second(timestamps), expected_seconds); } TYPED_TEST(TypedDatetimeOpsTest, TestExtractingGeneratedNullableDatetimeComponents) { using T = TypeParam; using namespace cudf::test; using namespace cudf::datetime; using namespace simt::std::chrono; auto start = milliseconds(-2500000000000); // Sat, 11 Oct 1890 19:33:20 GMT auto stop_ = milliseconds(2500000000000); // Mon, 22 Mar 2049 04:26:40 GMT auto timestamps = generate_timestamps<T, true>( this->size(), time_point_ms(start), time_point_ms(stop_)); auto expected_years = fixed_width_column_wrapper<int16_t>{ {1890, 1906, 1922, 1938, 1954, 1970, 1985, 2001, 2017, 2033}, {true, false, true, false, true, false, true, false, true, false}}; auto expected_months = fixed_width_column_wrapper<int16_t>{ {10, 8, 6, 4, 2, 1, 11, 9, 7, 5}, {true, false, true, false, true, false, true, false, true, false}}; auto expected_days = fixed_width_column_wrapper<int16_t>{ {11, 16, 20, 24, 26, 1, 5, 9, 14, 18}, {true, false, true, false, true, false, true, false, true, false}}; auto expected_weekdays = fixed_width_column_wrapper<int16_t>{ {6, 4, 2, 7, 5, 4, 2, 7, 5, 3}, {true, false, true, false, true, false, true, false, true, false}}; auto expected_hours = fixed_width_column_wrapper<int16_t>{ {19, 20, 21, 22, 23, 0, 0, 1, 2, 3}, {true, false, true, false, true, false, true, false, true, false}}; auto expected_minutes = fixed_width_column_wrapper<int16_t>{ {33, 26, 20, 13, 6, 0, 53, 46, 40, 33}, {true, false, true, false, true, false, true, false, true, false}}; auto expected_seconds = fixed_width_column_wrapper<int16_t>{ {20, 40, 0, 20, 40, 0, 20, 40, 0, 20}, {true, false, true, false, true, false, true, false, true, false}}; // Special cases for timestamp_D: zero out the expected hh/mm/ss cols if (std::is_same<TypeParam, cudf::timestamp_D>::value) { expected_hours = fixed_width_column_wrapper<int16_t>{ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {true, false, true, false, true, false, true, false, true, false}}; expected_minutes = fixed_width_column_wrapper<int16_t>{ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {true, false, true, false, true, false, true, false, true, false}}; expected_seconds = fixed_width_column_wrapper<int16_t>{ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {true, false, true, false, true, false, true, false, true, false}}; } expect_columns_equal(*extract_year(timestamps), expected_years); expect_columns_equal(*extract_month(timestamps), expected_months); expect_columns_equal(*extract_day(timestamps), expected_days); expect_columns_equal(*extract_weekday(timestamps), expected_weekdays); expect_columns_equal(*extract_hour(timestamps), expected_hours); expect_columns_equal(*extract_minute(timestamps), expected_minutes); expect_columns_equal(*extract_second(timestamps), expected_seconds); }
f9aedd499c971a73b1aa50dcd06432a7eaab9a5f.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_factories.hpp> #include <cudf/column/column_view.hpp> #include <cudf/datetime.hpp> #include <cudf/types.hpp> #include <cudf/wrappers/timestamps.hpp> #include <tests/utilities/base_fixture.hpp> #include <tests/utilities/column_utilities.hpp> #include <tests/utilities/column_wrapper.hpp> #include <tests/utilities/timestamp_utilities.cuh> #include <tests/utilities/type_lists.hpp> #include <gmock/gmock.h> template <typename T> struct NonTimestampTest : public cudf::test::BaseFixture { cudf::data_type type() { return cudf::data_type{cudf::experimental::type_to_id<T>()}; } }; using NonTimestampTypes = cudf::test::Concat<cudf::test::NumericTypes, cudf::test::StringTypes>; TYPED_TEST_CASE(NonTimestampTest, NonTimestampTypes); TYPED_TEST(NonTimestampTest, TestThrowsOnNonTimestamp) { using T = TypeParam; using namespace cudf::test; using namespace cudf::datetime; using namespace simt::std::chrono; cudf::data_type dtype{cudf::experimental::type_to_id<T>()}; cudf::column col{dtype, 0, rmm::device_buffer{0}}; EXPECT_THROW(extract_year(col), cudf::logic_error); EXPECT_THROW(extract_month(col), cudf::logic_error); EXPECT_THROW(extract_day(col), cudf::logic_error); EXPECT_THROW(extract_weekday(col), cudf::logic_error); EXPECT_THROW(extract_hour(col), cudf::logic_error); EXPECT_THROW(extract_minute(col), cudf::logic_error); EXPECT_THROW(extract_second(col), cudf::logic_error); } struct BasicDatetimeOpsTest : public cudf::test::BaseFixture {}; TEST_F(BasicDatetimeOpsTest, TestExtractingDatetimeComponents) { using namespace cudf::test; using namespace cudf::datetime; using namespace simt::std::chrono; auto timestamps_D = fixed_width_column_wrapper<cudf::timestamp_D>{ -1528, // 1965-10-26 GMT 17716, // 2018-07-04 GMT 19382, // 2023-01-25 GMT }; auto timestamps_s = fixed_width_column_wrapper<cudf::timestamp_s>{ -131968728, // 1965-10-26 14:01:12 GMT 1530705600, // 2018-07-04 12:00:00 GMT 1674631932, // 2023-01-25 07:32:12 GMT }; auto timestamps_ms = fixed_width_column_wrapper<cudf::timestamp_ms>{ -131968727238, // 1965-10-26 14:01:12.762 GMT 1530705600000, // 2018-07-04 12:00:00.000 GMT 1674631932929, // 2023-01-25 07:32:12.929 GMT }; expect_columns_equal(*extract_year(timestamps_D), fixed_width_column_wrapper<int16_t>{1965, 2018, 2023}); expect_columns_equal(*extract_year(timestamps_s), fixed_width_column_wrapper<int16_t>{1965, 2018, 2023}); expect_columns_equal(*extract_year(timestamps_ms), fixed_width_column_wrapper<int16_t>{1965, 2018, 2023}); expect_columns_equal(*extract_month(timestamps_D), fixed_width_column_wrapper<int16_t>{10, 7, 1}); expect_columns_equal(*extract_month(timestamps_s), fixed_width_column_wrapper<int16_t>{10, 7, 1}); expect_columns_equal(*extract_month(timestamps_ms), fixed_width_column_wrapper<int16_t>{10, 7, 1}); expect_columns_equal(*extract_day(timestamps_D), fixed_width_column_wrapper<int16_t>{26, 4, 25}); expect_columns_equal(*extract_day(timestamps_s), fixed_width_column_wrapper<int16_t>{26, 4, 25}); expect_columns_equal(*extract_day(timestamps_ms), fixed_width_column_wrapper<int16_t>{26, 4, 25}); expect_columns_equal(*extract_weekday(timestamps_D), fixed_width_column_wrapper<int16_t>{2, 3, 3}); expect_columns_equal(*extract_weekday(timestamps_s), fixed_width_column_wrapper<int16_t>{2, 3, 3}); expect_columns_equal(*extract_weekday(timestamps_ms), fixed_width_column_wrapper<int16_t>{2, 3, 3}); expect_columns_equal(*extract_hour(timestamps_D), fixed_width_column_wrapper<int16_t>{0, 0, 0}); expect_columns_equal(*extract_hour(timestamps_s), fixed_width_column_wrapper<int16_t>{14, 12, 7}); expect_columns_equal(*extract_hour(timestamps_ms), fixed_width_column_wrapper<int16_t>{14, 12, 7}); expect_columns_equal(*extract_minute(timestamps_D), fixed_width_column_wrapper<int16_t>{0, 0, 0}); expect_columns_equal(*extract_minute(timestamps_s), fixed_width_column_wrapper<int16_t>{1, 0, 32}); expect_columns_equal(*extract_minute(timestamps_ms), fixed_width_column_wrapper<int16_t>{1, 0, 32}); expect_columns_equal(*extract_second(timestamps_D), fixed_width_column_wrapper<int16_t>{0, 0, 0}); expect_columns_equal(*extract_second(timestamps_s), fixed_width_column_wrapper<int16_t>{12, 0, 12}); expect_columns_equal(*extract_second(timestamps_ms), fixed_width_column_wrapper<int16_t>{12, 0, 12}); } template <typename T> struct TypedDatetimeOpsTest : public cudf::test::BaseFixture { cudaStream_t stream() { return cudaStream_t(0); } cudf::size_type size() { return cudf::size_type(10); } cudf::data_type type() { return cudf::data_type{cudf::experimental::type_to_id<T>()}; } }; TYPED_TEST_CASE(TypedDatetimeOpsTest, cudf::test::TimestampTypes); TYPED_TEST(TypedDatetimeOpsTest, TestEmptyColumns) { using T = TypeParam; using namespace cudf::test; using namespace cudf::datetime; using namespace simt::std::chrono; auto int16s_dtype = cudf::data_type{cudf::experimental::type_to_id<int16_t>()}; auto timestamps_dtype = cudf::data_type{cudf::experimental::type_to_id<T>()}; cudf::column int16s{int16s_dtype, 0, rmm::device_buffer{0}}; cudf::column timestamps{timestamps_dtype, 0, rmm::device_buffer{0}}; expect_columns_equal(*extract_year(timestamps), int16s); expect_columns_equal(*extract_month(timestamps), int16s); expect_columns_equal(*extract_day(timestamps), int16s); expect_columns_equal(*extract_weekday(timestamps), int16s); expect_columns_equal(*extract_hour(timestamps), int16s); expect_columns_equal(*extract_minute(timestamps), int16s); expect_columns_equal(*extract_second(timestamps), int16s); } TYPED_TEST(TypedDatetimeOpsTest, TestExtractingGeneratedDatetimeComponents) { using T = TypeParam; using namespace cudf::test; using namespace cudf::datetime; using namespace simt::std::chrono; auto start = milliseconds(-2500000000000); // Sat, 11 Oct 1890 19:33:20 GMT auto stop_ = milliseconds(2500000000000); // Mon, 22 Mar 2049 04:26:40 GMT auto timestamps = generate_timestamps<T>(this->size(), time_point_ms(start), time_point_ms(stop_)); auto expected_years = fixed_width_column_wrapper<int16_t>{ 1890, 1906, 1922, 1938, 1954, 1970, 1985, 2001, 2017, 2033}; auto expected_months = fixed_width_column_wrapper<int16_t>{10, 8, 6, 4, 2, 1, 11, 9, 7, 5}; auto expected_days = fixed_width_column_wrapper<int16_t>{11, 16, 20, 24, 26, 1, 5, 9, 14, 18}; auto expected_weekdays = fixed_width_column_wrapper<int16_t>{6, 4, 2, 7, 5, 4, 2, 7, 5, 3}; auto expected_hours = fixed_width_column_wrapper<int16_t>{19, 20, 21, 22, 23, 0, 0, 1, 2, 3}; auto expected_minutes = fixed_width_column_wrapper<int16_t>{33, 26, 20, 13, 6, 0, 53, 46, 40, 33}; auto expected_seconds = fixed_width_column_wrapper<int16_t>{20, 40, 0, 20, 40, 0, 20, 40, 0, 20}; // Special cases for timestamp_D: zero out the expected hh/mm/ss cols if (std::is_same<TypeParam, cudf::timestamp_D>::value) { expected_hours = fixed_width_column_wrapper<int16_t>{0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; expected_minutes = fixed_width_column_wrapper<int16_t>{0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; expected_seconds = fixed_width_column_wrapper<int16_t>{0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; } expect_columns_equal(*extract_year(timestamps), expected_years); expect_columns_equal(*extract_month(timestamps), expected_months); expect_columns_equal(*extract_day(timestamps), expected_days); expect_columns_equal(*extract_weekday(timestamps), expected_weekdays); expect_columns_equal(*extract_hour(timestamps), expected_hours); expect_columns_equal(*extract_minute(timestamps), expected_minutes); expect_columns_equal(*extract_second(timestamps), expected_seconds); } TYPED_TEST(TypedDatetimeOpsTest, TestExtractingGeneratedNullableDatetimeComponents) { using T = TypeParam; using namespace cudf::test; using namespace cudf::datetime; using namespace simt::std::chrono; auto start = milliseconds(-2500000000000); // Sat, 11 Oct 1890 19:33:20 GMT auto stop_ = milliseconds(2500000000000); // Mon, 22 Mar 2049 04:26:40 GMT auto timestamps = generate_timestamps<T, true>( this->size(), time_point_ms(start), time_point_ms(stop_)); auto expected_years = fixed_width_column_wrapper<int16_t>{ {1890, 1906, 1922, 1938, 1954, 1970, 1985, 2001, 2017, 2033}, {true, false, true, false, true, false, true, false, true, false}}; auto expected_months = fixed_width_column_wrapper<int16_t>{ {10, 8, 6, 4, 2, 1, 11, 9, 7, 5}, {true, false, true, false, true, false, true, false, true, false}}; auto expected_days = fixed_width_column_wrapper<int16_t>{ {11, 16, 20, 24, 26, 1, 5, 9, 14, 18}, {true, false, true, false, true, false, true, false, true, false}}; auto expected_weekdays = fixed_width_column_wrapper<int16_t>{ {6, 4, 2, 7, 5, 4, 2, 7, 5, 3}, {true, false, true, false, true, false, true, false, true, false}}; auto expected_hours = fixed_width_column_wrapper<int16_t>{ {19, 20, 21, 22, 23, 0, 0, 1, 2, 3}, {true, false, true, false, true, false, true, false, true, false}}; auto expected_minutes = fixed_width_column_wrapper<int16_t>{ {33, 26, 20, 13, 6, 0, 53, 46, 40, 33}, {true, false, true, false, true, false, true, false, true, false}}; auto expected_seconds = fixed_width_column_wrapper<int16_t>{ {20, 40, 0, 20, 40, 0, 20, 40, 0, 20}, {true, false, true, false, true, false, true, false, true, false}}; // Special cases for timestamp_D: zero out the expected hh/mm/ss cols if (std::is_same<TypeParam, cudf::timestamp_D>::value) { expected_hours = fixed_width_column_wrapper<int16_t>{ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {true, false, true, false, true, false, true, false, true, false}}; expected_minutes = fixed_width_column_wrapper<int16_t>{ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {true, false, true, false, true, false, true, false, true, false}}; expected_seconds = fixed_width_column_wrapper<int16_t>{ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {true, false, true, false, true, false, true, false, true, false}}; } expect_columns_equal(*extract_year(timestamps), expected_years); expect_columns_equal(*extract_month(timestamps), expected_months); expect_columns_equal(*extract_day(timestamps), expected_days); expect_columns_equal(*extract_weekday(timestamps), expected_weekdays); expect_columns_equal(*extract_hour(timestamps), expected_hours); expect_columns_equal(*extract_minute(timestamps), expected_minutes); expect_columns_equal(*extract_second(timestamps), expected_seconds); }
753008b0d6ef2f17d8ed9c3bc118d33af5f0e125.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "kernelBFRSMultishared.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const float *g_uquery = NULL; hipMalloc(&g_uquery, XSIZE*YSIZE); const float *g_vpointset = NULL; hipMalloc(&g_vpointset, XSIZE*YSIZE); int *g_npoints = NULL; hipMalloc(&g_npoints, XSIZE*YSIZE); int pointdim = 1; int triallength = 1; int signallength = 1; int exclude = 1; const float *vecradius = NULL; hipMalloc(&vecradius, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( kernelBFRSMultishared), dim3(gridBlock),dim3(threadBlock), 0, 0, g_uquery,g_vpointset,g_npoints,pointdim,triallength,signallength,exclude,vecradius); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( kernelBFRSMultishared), dim3(gridBlock),dim3(threadBlock), 0, 0, g_uquery,g_vpointset,g_npoints,pointdim,triallength,signallength,exclude,vecradius); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( kernelBFRSMultishared), dim3(gridBlock),dim3(threadBlock), 0, 0, g_uquery,g_vpointset,g_npoints,pointdim,triallength,signallength,exclude,vecradius); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
753008b0d6ef2f17d8ed9c3bc118d33af5f0e125.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "kernelBFRSMultishared.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const float *g_uquery = NULL; cudaMalloc(&g_uquery, XSIZE*YSIZE); const float *g_vpointset = NULL; cudaMalloc(&g_vpointset, XSIZE*YSIZE); int *g_npoints = NULL; cudaMalloc(&g_npoints, XSIZE*YSIZE); int pointdim = 1; int triallength = 1; int signallength = 1; int exclude = 1; const float *vecradius = NULL; cudaMalloc(&vecradius, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); kernelBFRSMultishared<<<gridBlock,threadBlock>>>(g_uquery,g_vpointset,g_npoints,pointdim,triallength,signallength,exclude,vecradius); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { kernelBFRSMultishared<<<gridBlock,threadBlock>>>(g_uquery,g_vpointset,g_npoints,pointdim,triallength,signallength,exclude,vecradius); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { kernelBFRSMultishared<<<gridBlock,threadBlock>>>(g_uquery,g_vpointset,g_npoints,pointdim,triallength,signallength,exclude,vecradius); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
d703a999fb18bd7f9c45747baf2e77437186fb36.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef USE_ROCM #include "dragon/core/context_cuda.h" #include "dragon/utils/math_functions.h" #include "dragon/utils/op_kernels.h" namespace dragon { namespace kernels { namespace { template <typename InputT, typename OutputT, int D> __global__ void _ChannelNormalize( const int N, const int axis, const int num_dims, const SimpleArray<int, D> X_strides, const SimpleArray<int, D> Y_dims, const InputT* x, const float* mean, const float* std, OutputT* y) { CUDA_1D_KERNEL_LOOP(yi, N) { int xi = 0, wi, tmp = yi; for (int d = num_dims - 1; d >= 0; --d) { int r; FIXED_DIVISOR_DIV_MOD(Y_dims.data[d], tmp, &tmp, &r); xi += r * X_strides.data[d]; if (d == axis) wi = r; } y[yi] = convert::To<OutputT>( (convert::To<float>(x[xi]) - __ldg(mean + wi)) / __ldg(std + wi)); } } } // namespace /* ------------------- Launcher Separator ------------------- */ #define DEFINE_KERNEL_LAUNCHER(InputT, OutputT) \ template <> \ void ChannelNormalize<InputT, OutputT, CUDAContext>( \ const int axis, \ const int num_dims, \ const int64_t* x_strides, \ const int64_t* y_dims, \ const InputT* x, \ const float* mean, \ const float* std, \ OutputT* y, \ CUDAContext* ctx) { \ CUDA_TENSOR_DIMS_CHECK(num_dims); \ SimpleArray<int, CUDA_TENSOR_MAX_DIMS> X_strides, Y_dims; \ const auto N = std::accumulate( \ y_dims, y_dims + num_dims, 1, std::multiplies<int64_t>()); \ for (int i = 0; i < num_dims; ++i) { \ X_strides.data[i] = x_strides[i]; \ Y_dims.data[i] = y_dims[i]; \ } \ hipLaunchKernelGGL(( _ChannelNormalize), \ CUDA_BLOCKS(N), \ CUDA_THREADS, \ 0, \ ctx->cuda_stream(), \ N, axis, num_dims, X_strides, Y_dims, x, mean, std, y); \ } DEFINE_KERNEL_LAUNCHER(uint8_t, float16); DEFINE_KERNEL_LAUNCHER(uint8_t, float); DEFINE_KERNEL_LAUNCHER(uint8_t, double); DEFINE_KERNEL_LAUNCHER(int8_t, float16); DEFINE_KERNEL_LAUNCHER(int8_t, float); DEFINE_KERNEL_LAUNCHER(int8_t, double); DEFINE_KERNEL_LAUNCHER(int, float16); DEFINE_KERNEL_LAUNCHER(int, float); DEFINE_KERNEL_LAUNCHER(int, double); DEFINE_KERNEL_LAUNCHER(int64_t, float16); DEFINE_KERNEL_LAUNCHER(int64_t, float); DEFINE_KERNEL_LAUNCHER(int64_t, double); DEFINE_KERNEL_LAUNCHER(float16, float16); DEFINE_KERNEL_LAUNCHER(float16, float); DEFINE_KERNEL_LAUNCHER(float16, double); DEFINE_KERNEL_LAUNCHER(float, float16); DEFINE_KERNEL_LAUNCHER(float, float); DEFINE_KERNEL_LAUNCHER(float, double); DEFINE_KERNEL_LAUNCHER(double, float16); DEFINE_KERNEL_LAUNCHER(double, float); DEFINE_KERNEL_LAUNCHER(double, double); #undef DEFINE_KERNEL_LAUNCHER } // namespace kernels } // namespace dragon #endif // USE_ROCM
d703a999fb18bd7f9c45747baf2e77437186fb36.cu
#ifdef USE_CUDA #include "dragon/core/context_cuda.h" #include "dragon/utils/math_functions.h" #include "dragon/utils/op_kernels.h" namespace dragon { namespace kernels { namespace { template <typename InputT, typename OutputT, int D> __global__ void _ChannelNormalize( const int N, const int axis, const int num_dims, const SimpleArray<int, D> X_strides, const SimpleArray<int, D> Y_dims, const InputT* x, const float* mean, const float* std, OutputT* y) { CUDA_1D_KERNEL_LOOP(yi, N) { int xi = 0, wi, tmp = yi; for (int d = num_dims - 1; d >= 0; --d) { int r; FIXED_DIVISOR_DIV_MOD(Y_dims.data[d], tmp, &tmp, &r); xi += r * X_strides.data[d]; if (d == axis) wi = r; } y[yi] = convert::To<OutputT>( (convert::To<float>(x[xi]) - __ldg(mean + wi)) / __ldg(std + wi)); } } } // namespace /* ------------------- Launcher Separator ------------------- */ #define DEFINE_KERNEL_LAUNCHER(InputT, OutputT) \ template <> \ void ChannelNormalize<InputT, OutputT, CUDAContext>( \ const int axis, \ const int num_dims, \ const int64_t* x_strides, \ const int64_t* y_dims, \ const InputT* x, \ const float* mean, \ const float* std, \ OutputT* y, \ CUDAContext* ctx) { \ CUDA_TENSOR_DIMS_CHECK(num_dims); \ SimpleArray<int, CUDA_TENSOR_MAX_DIMS> X_strides, Y_dims; \ const auto N = std::accumulate( \ y_dims, y_dims + num_dims, 1, std::multiplies<int64_t>()); \ for (int i = 0; i < num_dims; ++i) { \ X_strides.data[i] = x_strides[i]; \ Y_dims.data[i] = y_dims[i]; \ } \ _ChannelNormalize<<< \ CUDA_BLOCKS(N), \ CUDA_THREADS, \ 0, \ ctx->cuda_stream()>>>( \ N, axis, num_dims, X_strides, Y_dims, x, mean, std, y); \ } DEFINE_KERNEL_LAUNCHER(uint8_t, float16); DEFINE_KERNEL_LAUNCHER(uint8_t, float); DEFINE_KERNEL_LAUNCHER(uint8_t, double); DEFINE_KERNEL_LAUNCHER(int8_t, float16); DEFINE_KERNEL_LAUNCHER(int8_t, float); DEFINE_KERNEL_LAUNCHER(int8_t, double); DEFINE_KERNEL_LAUNCHER(int, float16); DEFINE_KERNEL_LAUNCHER(int, float); DEFINE_KERNEL_LAUNCHER(int, double); DEFINE_KERNEL_LAUNCHER(int64_t, float16); DEFINE_KERNEL_LAUNCHER(int64_t, float); DEFINE_KERNEL_LAUNCHER(int64_t, double); DEFINE_KERNEL_LAUNCHER(float16, float16); DEFINE_KERNEL_LAUNCHER(float16, float); DEFINE_KERNEL_LAUNCHER(float16, double); DEFINE_KERNEL_LAUNCHER(float, float16); DEFINE_KERNEL_LAUNCHER(float, float); DEFINE_KERNEL_LAUNCHER(float, double); DEFINE_KERNEL_LAUNCHER(double, float16); DEFINE_KERNEL_LAUNCHER(double, float); DEFINE_KERNEL_LAUNCHER(double, double); #undef DEFINE_KERNEL_LAUNCHER } // namespace kernels } // namespace dragon #endif // USE_CUDA
b89ec5daf59039bd14630dc98d988a84ef8c4f12.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "CopyConnectionsCoordinatesKernel.hip" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *connectionMatrix = NULL; hipMalloc(&connectionMatrix, XSIZE*YSIZE); float *pointsCoordinates = NULL; hipMalloc(&pointsCoordinates, XSIZE*YSIZE); float *vertexData = NULL; hipMalloc(&vertexData, XSIZE*YSIZE); int *connectionCount = NULL; hipMalloc(&connectionCount, XSIZE*YSIZE); int maxCells = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( CopyConnectionsCoordinatesKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, connectionMatrix,pointsCoordinates,vertexData,connectionCount,maxCells); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( CopyConnectionsCoordinatesKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, connectionMatrix,pointsCoordinates,vertexData,connectionCount,maxCells); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( CopyConnectionsCoordinatesKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, connectionMatrix,pointsCoordinates,vertexData,connectionCount,maxCells); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
b89ec5daf59039bd14630dc98d988a84ef8c4f12.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "CopyConnectionsCoordinatesKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *connectionMatrix = NULL; cudaMalloc(&connectionMatrix, XSIZE*YSIZE); float *pointsCoordinates = NULL; cudaMalloc(&pointsCoordinates, XSIZE*YSIZE); float *vertexData = NULL; cudaMalloc(&vertexData, XSIZE*YSIZE); int *connectionCount = NULL; cudaMalloc(&connectionCount, XSIZE*YSIZE); int maxCells = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); CopyConnectionsCoordinatesKernel<<<gridBlock,threadBlock>>>(connectionMatrix,pointsCoordinates,vertexData,connectionCount,maxCells); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { CopyConnectionsCoordinatesKernel<<<gridBlock,threadBlock>>>(connectionMatrix,pointsCoordinates,vertexData,connectionCount,maxCells); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { CopyConnectionsCoordinatesKernel<<<gridBlock,threadBlock>>>(connectionMatrix,pointsCoordinates,vertexData,connectionCount,maxCells); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
6f4650d979d392f7f24b6ccf2eba01e1473910a7.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <cstdlib> #include <helper_cuda.h> #include <helper_functions.h> #include <device_atomic_functions.h> #include <helper_math.h> #include <stdio.h> #include <thrust/device_ptr.h> #include <thrust/for_each.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/sort.h> #include <hip/hip_cooperative_groups.h> #include "cuda_simulation.cuh" #include "sph_kernel.cuh" #include <chrono> #include "imgui/imgui.h" namespace cg = cooperative_groups; // calculate position in uniform grid inline __device__ int3 calcGridPos(float3 p) { int3 gridPos; gridPos.x = floor((p.x - params.world_origin.x) / params.cell_size.x); gridPos.y = floor((p.y - params.world_origin.y) / params.cell_size.y); gridPos.z = floor((p.z - params.world_origin.z) / params.cell_size.z); return gridPos; } // calculate address in grid from position (clamping to edges) inline __device__ uint calcGridHash(int3 gridPos) { gridPos.x = gridPos.x & (params.grid_size.x - 1); // wrap grid, assumes size is power of 2 gridPos.y = gridPos.y & (params.grid_size.y - 1); gridPos.z = gridPos.z & (params.grid_size.z - 1); return __umul24(__umul24(gridPos.z, params.grid_size.y), params.grid_size.x) + __umul24(gridPos.y, params.grid_size.x) + gridPos.x; } // collide two spheres using DEM method inline __device__ float3 collideSpheres( float3 posA, float3 posB, float3 velA, float3 velB, float radiusA, float radiusB, float attraction) { // calculate relative position float3 relPos = posB - posA; float dist = length(relPos); float collideDist = radiusA + radiusB; float3 force = make_float3(0.0f); //printf("dist: %f\ncollideDist: %f", dist, collideDist); if (dist < collideDist) { float3 norm = relPos / (dist+0.00001f); // relative velocity float3 relVel = velB - velA; // relative tangential velocity float3 tanVel = relVel - (dot(relVel, norm) * norm); // spring force force = -params.spring * (collideDist - dist) * norm; // dashpot (damping) force force += params.damping * relVel; // tangential shear force force += params.shear * tanVel; // attraction force += attraction * relPos; //printf("%f %f %f\n", force.x, force.y, force.z); } return force; } inline __device__ float3 collideCell( int3 gridPos, uint index, float3 pos, float3 vel, float3* oldPos, float3* oldVel, uint* cellStart, uint* cellEnd) { uint gridHash = calcGridHash(gridPos); // get start of bucket for this cell uint startIndex = cellStart[gridHash]; float3 force = make_float3(0.0f); if (startIndex != 0xffffffff) // cell is not empty { // iterate over particles in this cell uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { if (j != index) // check not colliding with self { float3 pos2 = oldPos[j]; float3 vel2 = oldVel[j]; // collide two spheres force += collideSpheres( pos, pos2, vel, vel2, params.particle_radius, params.particle_radius, params.attraction); } } } return force; } inline __device__ float sph_boundary_volume( int3 grid_pos, uint index, float3 pos1, float* mass, CellData data ) { uint grid_hash = calcGridHash(grid_pos); uint start_index = data.cellStart[grid_hash]; float rho = 0.f; if (start_index != 0xffffffff) { uint end_index = data.cellEnd[grid_hash]; for (uint j = start_index; j < end_index; ++j) { if (j != index) { uint original_index = data.grid_index[j]; float3 pos2 = data.sorted_pos[j]; float3 vec = pos1 - pos2; float dist = length(vec); rho += mass[original_index] * Poly6_W_CUDA(dist, params.effective_radius); } } } return rho; } __global__ void calcHashD( CellData cell_data, // output float3* pos, // input: positions uint num_particles) { uint index = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; if (index >= num_particles) return; volatile float3 p = pos[index]; // get address in grid int3 gridPos = calcGridPos(make_float3(p.x, p.y, p.z)); uint hash = calcGridHash(gridPos); // store grid hash and particle index cell_data.grid_hash[index] = hash; cell_data.grid_index[index] = index; } __global__ void calcHash_boundary_D( CellData cell_data, float3* pos, // input: positions uint num_particles) { uint index = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; if (index >= num_particles) return; //printf("%u \n", index); volatile float3 p = pos[index]; // get address in grid int3 gridPos = calcGridPos(make_float3(p.x, p.y, p.z)); uint hash = calcGridHash(gridPos); // store grid hash and particle index cell_data.grid_hash[index] = hash; cell_data.grid_index[index] = index; } /* * Reorder data to find cell start and end (for neighbor searching) */ __global__ void reorderDataAndFindCellStartD( CellData cell_data, float3* oldPos, // input: sorted position array uint numParticles) { // Handle to thread block group cg::thread_block cta = cg::this_thread_block(); extern __shared__ uint sharedHash[]; // blockSize + 1 elements uint index = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; uint hash; // handle case when no. of particles not multiple of block size if (index < numParticles) { hash = cell_data.grid_hash[index]; // Load hash data into shared memory so that we can look // at neighboring particle's hash value without loading // two hash values per thread sharedHash[threadIdx.x + 1] = hash; if (index > 0 && threadIdx.x == 0) { // first thread in block must load neighbor particle hash sharedHash[0] = cell_data.grid_hash[index - 1]; } } cg::sync(cta); if (index < numParticles) { // If this particle has a different cell index to the previous // particle then it must be the first particle in the cell, // so store the index of this particle in the cell. // As it isn't the first particle, it must also be the cell end of // the previous particle's cell if (index == 0 || hash != sharedHash[threadIdx.x]) { cell_data.cellStart[hash] = index; if (index > 0) cell_data.cellEnd[sharedHash[threadIdx.x]] = index; } if (index == numParticles - 1) { cell_data.cellEnd[hash] = index + 1; } // Now use the sorted index to reorder the pos and vel data uint sortedIndex = cell_data.grid_index[index]; float3 pos = oldPos[sortedIndex]; cell_data.sorted_pos[index] = pos; } } /* __global__ void reorderData_boundary_D( CellData cell_data, float3* oldPos, // input: sorted position array uint numParticles) { // Handle to thread block group cg::thread_block cta = cg::this_thread_block(); extern __shared__ uint sharedHash[]; // blockSize + 1 elements uint index = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; uint hash; // handle case when no. of particles not multiple of block size if (index < numParticles) { hash = cell_data.grid_hash[index]; // Load hash data into shared memory so that we can look // at neighboring particle's hash value without loading // two hash values per thread sharedHash[threadIdx.x + 1] = hash; if (index > 0 && threadIdx.x == 0) { // first thread in block must load neighbor particle hash sharedHash[0] = cell_data.grid_hash[index - 1]; } } cg::sync(cta); if (index < numParticles) { // If this particle has a different cell index to the previous // particle then it must be the first particle in the cell, // so store the index of this particle in the cell. // As it isn't the first particle, it must also be the cell end of // the previous particle's cell if (index == 0 || hash != sharedHash[threadIdx.x]) { cell_data.cellStart[hash] = index; if (index > 0) cell_data.cellEnd[sharedHash[threadIdx.x]] = index; } if (index == numParticles - 1) { cell_data.cellEnd[hash] = index + 1; } // Now use the sorted index to reorder the pos data uint sortedIndex = cell_data.grid_index[index]; float3 pos = oldPos[sortedIndex]; cell_data.sorted_pos[index] = pos; } } */ __global__ void compute_boundary_volume_d( CellData data, float* mass, float* volume, uint numParticles) { uint index = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (index >= numParticles) return; uint originalIndex = data.grid_index[index]; // read particle data from sorted arrays float3 pos = data.sorted_pos[index]; // initial volume float rho = mass[originalIndex] * Poly6_W_CUDA(0, params.effective_radius); // get address in grid int3 gridPos = calcGridPos(pos); // traverse 27 neighbors for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbor_pos = gridPos + make_int3(x, y, z); rho += sph_boundary_volume( neighbor_pos, index, pos, mass, data ); } } } // Update volume volume[originalIndex] = mass[originalIndex] / rho; //printf("rho = %f\n", rho); //printf("C[%u]: %f\n", originalIndex, C[originalIndex]); } void compute_grid_size(uint n, uint block_size, uint& num_blocks, uint& num_threads) { num_threads = min(block_size, n); num_blocks = (n % num_threads != 0) ? (n / num_threads + 1) : (n / num_threads); } void calculate_hash( CellData cell_data, float3* pos, uint num_particles) { uint num_blocks, num_threads; compute_grid_size(num_particles, MAX_THREAD_NUM, num_blocks, num_threads); calcHashD << < num_blocks, num_threads >> > ( cell_data, pos, num_particles); getLastCudaError("Kernel execution failed: calc_hash"); } void reorder_data( CellData cell_data, float3* oldPos, uint numParticles, uint numCells) { uint numThreads, numBlocks; compute_grid_size(numParticles, MAX_THREAD_NUM, numBlocks, numThreads); // set all cells to empty checkCudaErrors(hipMemset(cell_data.cellStart, 0xffffffff, numCells * sizeof(uint))); uint smemSize = sizeof(uint) * (numThreads + 1); reorderDataAndFindCellStartD << < numBlocks, numThreads, smemSize >> > ( cell_data, oldPos, numParticles); getLastCudaError("Kernel execution failed: reorderDataAndFindCellStartD"); } /* void reorderData_boundary( CellData cell_data, float3* oldPos, uint numParticles, uint numCells) { uint numThreads, numBlocks; compute_grid_size(numParticles, MAX_THREAD_NUM, numBlocks, numThreads); // set all cells to empty checkCudaErrors(hipMemset(cell_data.cellStart, 0xffffffff, numCells * sizeof(uint))); uint smemSize = sizeof(uint) * (numThreads + 1); reorderData_boundary_D << < numBlocks, numThreads, smemSize >> > ( cell_data, oldPos, numParticles); getLastCudaError("Kernel execution failed: reorderDataAndFindCellStartD"); } */ void compute_boundary_volume(CellData data, float* mass, float* volume, uint numParticles) { uint numThreads, numBlocks; compute_grid_size(numParticles, MAX_THREAD_NUM, numBlocks, numThreads); compute_boundary_volume_d << <numBlocks, numThreads >> > ( data, mass, volume, numParticles); getLastCudaError("Kernel execution failed: copmute_boundary_volume"); } __global__ void test_offset(float3* positions) { int i = blockIdx.x * blockDim.x + threadIdx.x; /* if (i == 0) printf("particles[0]: %f, %f, %f\n", positions[i].x , positions[i].y, positions[i].z); */ positions[i].x = positions[i].x + 0.001f; positions[i].y = positions[i].y + 0.001f; positions[i].z = positions[i].z + 0.001f; } __global__ void integrate_pbd_d( float3* pos, float3* vel, float3* force, float* massInv, float3* predict_pos, float3* new_pos, float dt, uint numParticles) { uint index = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; float3 t_vel = vel[index] + dt * params.gravity; t_vel = t_vel * params.global_damping; float3 t_pos = pos[index] + dt * t_vel; if (t_pos.x >= 1.0f) { t_pos.x = 1.f; t_vel.x = -abs(t_vel.x); t_vel *= params.boundary_damping; } if (t_pos.x <= -1.0f) { t_pos.x = -1.f; t_vel.x = abs(t_vel.x); t_vel *= params.boundary_damping; } if (t_pos.z >= 1.0f) { t_pos.z = 1.f; t_vel.z = -abs(t_vel.z); t_vel *= params.boundary_damping; } if (t_pos.z <= -1.0f) { t_pos.z = -1.f; t_vel.z = abs(t_vel.z); t_vel *= params.boundary_damping; } if (t_pos.y <= 0.f) { t_pos.y = 0.f; t_vel.y = abs(t_vel.y); t_vel *= params.boundary_damping; } /* Velocity limitation if (length(t_vel) > 5.f) { t_vel = (5.f / length(t_vel)) * t_vel ; } */ predict_pos[index] = t_pos;// pos[index] + dt * t_vel; vel[index] = t_vel; new_pos[index] = predict_pos[index]; } // collide a particle against all other particles in a given cell /* Collision device code */ __global__ void collideD( float3* newVel, // output: new velocity float3* oldPos, // input: sorted positions float3* oldVel, // input: sorted velocities uint* gridParticleIndex, // input: sorted particle indices uint* cellStart, uint* cellEnd, uint numParticles, float dt) { uint index = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (index >= numParticles) return; // read particle data from sorted arrays float3 pos = oldPos[index]; float3 vel = oldVel[index]; // get address in grid int3 gridPos = calcGridPos(pos); // examine neighbouring cells float3 force = make_float3(0.0f); // traverse 27 neighbors for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbor_pos = gridPos + make_int3(x, y, z); force += collideCell(neighbor_pos, index, pos, vel, oldPos, oldVel, cellStart, cellEnd); } } } // write new velocity back to original unsorted location uint originalIndex = gridParticleIndex[index]; newVel[originalIndex] = vel + force * dt; // + force/mass * dt ? } inline __device__ float pbf_density_0( int3 grid_pos, uint index, float3 pos, float3* sorted_pos, float* mass, float* rest_density, uint* cell_start, uint* cell_end, uint* gridParticleIndex ) // type: 0->fluid fluid 1->boundary boundary { uint grid_hash = calcGridHash(grid_pos); // get start of bucket for this cell uint start_index = cell_start[grid_hash]; float density = 0.0f; if (start_index != 0xffffffff) // cell is not empty { // iterate over particles in this cell uint end_index = cell_end[grid_hash]; for (uint j = start_index; j < end_index; j++) { if (j != index) // check not colliding with self { uint original_index = gridParticleIndex[j]; float3 pos2 = sorted_pos[j]; float3 vec = pos - pos2; float dist = length(vec); float rho = 0.f; rho = mass[original_index] * Poly6_W_CUDA(dist, params.effective_radius); density += rho; } } } return density; } inline __device__ float pbf_density_1( int3 grid_pos, uint index, float3 pos, float3* sorted_pos, float* mass, float* rest_density, uint* cell_start, uint* cell_end, uint* gridParticleIndex, float* b_volume = nullptr) // type: 0->fluid fluid 1->boundary boundary { uint grid_hash = calcGridHash(grid_pos); // get start of bucket for this cell uint start_index = cell_start[grid_hash]; float density = 0.0f; if (start_index != 0xffffffff) // cell is not empty { // iterate over particles in this cell uint end_index = cell_end[grid_hash]; for (uint j = start_index; j < end_index; j++) { if (j != index) // check not colliding with self { uint original_index = gridParticleIndex[j]; float3 pos2 = sorted_pos[j]; float3 vec = pos - pos2; float dist = length(vec); float rho = 0.f; rho = (*rest_density) * b_volume[original_index] * Poly6_W_CUDA(dist, params.effective_radius); density += rho; } } } return density; } inline __device__ float pbf_density_boundary( int3 grid_pos, float3 pos1, float* rest_density, float* volume, CellData cell_data ) { uint grid_hash = calcGridHash(grid_pos); // get start of bucket for this cell uint start_index = cell_data.cellStart[grid_hash]; float density = 0.0f; // if cell of boundary cell data is not empty if (start_index != 0xffffffff) { // iterate over particles in this cell uint end_index = cell_data.cellEnd[grid_hash]; for (uint j = start_index; j < end_index; j++) { // no need to check collision (boundary cell data is not the same as fluid cell data) uint original_index = cell_data.grid_index[j]; float3 pos2 = cell_data.sorted_pos[j]; float3 vec = pos1 - pos2; float dist = length(vec); float rho = (*rest_density) * volume[original_index] * Poly6_W_CUDA(dist, params.effective_radius); density += rho; } } // return contributions of boundary paritcles return density; } // boundary - fluid inline __device__ float pbf_boundary_density( // boundary int3 grid_pos, // searching grid pos float3 pos1, // position of boundary particle // fluid float* mass, float3* sorted_pos, uint* cell_start, uint* cell_end, uint* gridParticleIndex ) { uint grid_hash = calcGridHash(grid_pos); // get start of bucket for this cell uint start_index = cell_start[grid_hash]; float density = 0.0f; // if cell of boundary cell data is not empty if (start_index != 0xffffffff) { // iterate over particles in this cell uint end_index = cell_end[grid_hash]; for (uint j = start_index; j < end_index; j++) { // no need to check collision (boundary cell data is not the same as fluid cell data) uint original_index = gridParticleIndex[j]; float3 pos2 = sorted_pos[j]; float3 vec = pos1 - pos2; float dist = length(vec); float rho = mass[original_index] * Poly6_W_CUDA(dist, params.effective_radius); density += rho; } } // return contributions of boundary paritcles return density; } inline __device__ float pbf_lambda_0( int3 grid_pos, uint index, float3 pos, float* rest_density, float* mass, float3* sorted_pos, uint* cell_start, uint* cell_end, uint* gridParticleIndex ) { uint grid_hash = calcGridHash(grid_pos); // get start of bucket for this cell uint start_index = cell_start[grid_hash]; float gradientC_sum = 0.f; if (start_index != 0xffffffff) // cell is not empty { // iterate over particles in this cell uint end_index = cell_end[grid_hash]; for (uint j = start_index; j < end_index; j++) { if (j != index) // check not colliding with self { uint original_index = gridParticleIndex[j]; //float particle_mass = mass[original_index]; float3 pos2 = sorted_pos[j]; float3 vec = pos - pos2; float dist = length(vec); float3 gradientC_j; gradientC_j = (1.f / (*rest_density)) * Poly6_W_Gradient_CUDA(vec, dist, params.effective_radius); float dot_val = dot(gradientC_j, gradientC_j); gradientC_sum += dot_val; } } } return gradientC_sum; } inline __device__ float pbf_lambda_1( int3 grid_pos, uint index, float3 pos, float* rest_density, float* mass, float3* sorted_pos, uint* cell_start, uint* cell_end, uint* gridParticleIndex, float* b_volume = nullptr) { uint grid_hash = calcGridHash(grid_pos); // get start of bucket for this cell uint start_index = cell_start[grid_hash]; float gradientC_sum = 0.f; if (start_index != 0xffffffff) // cell is not empty { // iterate over particles in this cell uint end_index = cell_end[grid_hash]; for (uint j = start_index; j < end_index; j++) { if (j != index) // check not colliding with self { uint original_index = gridParticleIndex[j]; float particle_mass = mass[original_index]; float3 pos2 = sorted_pos[j]; float3 vec = pos - pos2; float dist = length(vec); float3 gradientC_j; float vol = b_volume[original_index]; gradientC_j = (1.f / (*rest_density)) * ((*rest_density) * vol / particle_mass) * Poly6_W_Gradient_CUDA(vec, dist, params.effective_radius); float dot_val = dot(gradientC_j, gradientC_j); gradientC_sum += dot_val; } } } return gradientC_sum; } // fluid - boundary inline __device__ float pbf_lambda_boundary( int3 grid_pos, // searching grid pos float3 pos1, // position of fluid particle float* rest_density, float particle_mass, CellData cell_data, // cell data of boundary particle, float* volume ) { uint grid_hash = calcGridHash(grid_pos); // get start of bucket for this cell uint start_index = cell_data.cellStart[grid_hash]; float gradientC_sum = 0.f; if (start_index != 0xffffffff) // cell is not empty { // iterate over particles in this cell uint end_index = cell_data.cellEnd[grid_hash]; for (uint j = start_index; j < end_index; j++) { uint original_index = cell_data.grid_index[j]; float vol = volume[original_index]; float3 pos2 = cell_data.sorted_pos[j]; float3 vec = pos1 - pos2; float dist = length(vec); float3 gradientC_j = (1.f / (*rest_density)) * ((*rest_density) * vol / particle_mass) * Poly6_W_Gradient_CUDA(vec, dist, params.effective_radius); float dot_val = dot(gradientC_j, gradientC_j); gradientC_sum += dot_val; } } return gradientC_sum; } // Boundary - fluid inline __device__ float pbf_boundary_lambda( // boundary int3 grid_pos, // searching grid pos float3 pos1, // position of boundary particle float* rest_density, float particle_mass, float volume, // fluid float3* sorted_pos, uint* cell_start, uint* cell_end, uint* gridParticleIndex ) { uint grid_hash = calcGridHash(grid_pos); // get start of bucket for this cell uint start_index = cell_start[grid_hash]; float gradientC_sum = 0.f; // search in fluid cell if (start_index != 0xffffffff) // cell is not empty { // iterate over particles in this cell uint end_index = cell_end[grid_hash]; for (uint j = start_index; j < end_index; j++) { float3 pos2 = sorted_pos[j]; float3 vec = pos1 - pos2; float dist = length(vec); float3 gradientC_j = (1.f / (*rest_density)) * Poly6_W_Gradient_CUDA(vec, dist, params.effective_radius); float dot_val = dot(gradientC_j, gradientC_j); gradientC_sum += dot_val; } } return gradientC_sum; } inline __device__ float3 pbf_correction( int3 grid_pos, uint index, float3 pos, float lambda_i, float* rest_density, float3* sorted_pos, float* lambda, uint* cell_start, uint* cell_end, uint* gridParticleIndex, float dt) { uint grid_hash = calcGridHash(grid_pos); // get start of bucket for this cell uint start_index = cell_start[grid_hash]; float3 correction = make_float3(0, 0, 0); if (start_index != 0xffffffff) // cell is not empty { // iterate over particles in this cell uint end_index = cell_end[grid_hash]; for (uint j = start_index; j < end_index; j++) { if (j != index) // check not colliding with self { uint original_index = gridParticleIndex[j]; float3 pos2 = sorted_pos[j]; float3 vec = pos - pos2; float dist = length(vec); float3 gradient = Poly6_W_Gradient_CUDA(vec, dist, params.effective_radius); float scorr = -0.1f; float x = Poly6_W_CUDA(dist, params.effective_radius) / Poly6_W_CUDA(0.3f * params.effective_radius, params.effective_radius); x = pow(x, 4); scorr = scorr * x * dt * dt * dt; //printf("scorr: %f\n", scorr); float3 res = //(1.f / (*rest_density)) * (lambda_i + lambda[original_index] +scorr)* gradient; correction += res; } } //printf("Num neighbors: %u\n", end_index - start_index); } return correction; } // compute correction from boundary particles inline __device__ float3 pbf_correction_boundary( int3 grid_pos, uint index, float3 pos, float lambda_i, float* rest_density, // boundary CellData b_cell_data, float* b_lambda, float dt) { uint grid_hash = calcGridHash(grid_pos); // get start of bucket for this cell uint start_index = b_cell_data.cellStart[grid_hash]; float3 correction = make_float3(0, 0, 0); if (start_index != 0xffffffff) // cell is not empty { // iterate over particles in this cell uint end_index = b_cell_data.cellEnd[grid_hash]; for (uint j = start_index; j < end_index; j++) { if (j != index) // check not colliding with self { uint original_index = b_cell_data.grid_index[j]; float lambda_j = b_lambda[original_index]; float3 pos2 = b_cell_data.sorted_pos[j]; float3 vec = pos - pos2; float dist = length(vec); float3 gradient = Poly6_W_Gradient_CUDA(vec, dist, params.effective_radius); float scorr = -0.1f; float x = Poly6_W_CUDA(dist, params.effective_radius) / Poly6_W_CUDA(0.3f * params.effective_radius, params.effective_radius); x = pow(x, 4); scorr = scorr * x * dt * dt; //printf("scorr: %f\n", scorr); float3 res = //(1.f / (*rest_density)) * (lambda_i + lambda_j) *// +scorr)* gradient; correction += res; } } //printf("Num neighbors: %u\n", end_index - start_index); } return correction; } __global__ void compute_density_d( float* density, // output: computed density float* rest_density, // input: rest density float3* sorted_pos, // input: sorted mass float* mass, // input: mass float* C, // input: contraint uint* gridParticleIndex, // input: sorted particle indices uint* cellStart, uint* cellEnd, //boundary CellData cell_data, float* b_volume, uint numParticles ) { uint index = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (index >= numParticles) return; uint originalIndex = gridParticleIndex[index]; // read particle data from sorted arrays float3 pos = sorted_pos[index]; // initial density float rho = mass[originalIndex] * Poly6_W_CUDA(0, params.effective_radius); // get address in grid int3 gridPos = calcGridPos(pos); // traverse 27 neighbors (fluid - fluid) for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbor_pos = gridPos + make_int3(x, y, z); rho += pbf_density_0( neighbor_pos, index, pos, sorted_pos, mass, rest_density, cellStart, cellEnd, gridParticleIndex ); } } } // use gridPos to traverse 27 surrounding grids (fluid - boundary) for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbor_gridPos = gridPos + make_int3(x, y, z); rho += pbf_density_boundary( // fluid neighbor_gridPos, pos, rest_density, // boundary b_volume, cell_data ); } } } // Update date density and constraint value density[originalIndex] = rho; C[originalIndex] = (rho / (*rest_density)) - 1.f; //printf("rho = %f\n", rho); //printf("C[%u]: %f\n", originalIndex, C[originalIndex]); } __global__ void compute_boundary_density_d( // fluid float* rest_density, // input: rest density float3* sorted_pos, // input: sorted pos of fluid particle float* mass, // input: mass of fluid paritcle uint* cellStart, uint* cellEnd, uint* gridParticleIndex, // input: sorted particle indices (for original_index of fluid particles) // boundary CellData b_cell_data, float* b_mass, float* b_volume, float* b_C, float* b_density, // output: boundary density uint b_numParticles ) { uint index = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (index >= b_numParticles) return; // original index of boundary particle uint originalIndex = b_cell_data.grid_index[index]; // read position from sorted arrays float3 pos = b_cell_data.sorted_pos[index]; // initial density float rho = (*rest_density) * b_volume[originalIndex] * Poly6_W_CUDA(0, params.effective_radius); // get address in grid of boundary particles (basically the same as fluid particle) int3 gridPos = calcGridPos(pos); // use gridPos to traverse 27 surrounding grids (boundary - boundary) for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbor_gridPos = gridPos + make_int3(x, y, z); rho += pbf_density_1( neighbor_gridPos, index, pos, b_cell_data.sorted_pos, b_mass, rest_density, b_cell_data.cellStart, b_cell_data.cellEnd, b_cell_data.grid_index, b_volume ); } } } // use gridPos to traverse 27 surrounding grids (boundary - fluid) for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbor_gridPos = gridPos + make_int3(x, y, z); rho += pbf_boundary_density( // boundary neighbor_gridPos, pos, // fluid mass, sorted_pos, cellStart, cellEnd, gridParticleIndex ); } } } // Update density of fluid particle b_density[originalIndex] = rho; // **repeated code** // Recompute constraint value of fluid particle b_C[originalIndex] = (b_density[originalIndex] / (*rest_density)) - 1.f; } /* fluid - boundary */ __global__ void compute_lambdas_d( float* lambda, // output: computed density float* rest_density, // input: rest density float3* sorted_pos, // input: sorted mass float* C, // input: contraint float* mass, uint* gridParticleIndex, // input: sorted particle indices uint* cellStart, uint* cellEnd, CellData cell_data, float* b_volume, uint numParticles ) { uint index = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (index >= numParticles) return; uint originalIndex = gridParticleIndex[index]; // read particle data from sorted arrays float3 pos = sorted_pos[index]; // initial density lambda[originalIndex] = -C[originalIndex]; // get address in grid int3 gridPos = calcGridPos(pos); float3 gradientC_i = make_float3(0); //-(1.f / (*rest_density)) * //Poly6_W_Gradient_CUDA(make_float3(0, 0, 0), 0, params.effective_radius); float gradientC_sum = dot(gradientC_i, gradientC_i); // traverse 27 neighbors for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbor_pos = gridPos + make_int3(x, y, z); float res = pbf_lambda_0( neighbor_pos, index, pos, rest_density, mass, sorted_pos, cellStart, cellEnd, gridParticleIndex ); gradientC_sum += res; } } } // traverse 27 neighbors in "boundary cells" for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbor_pos = gridPos + make_int3(x, y, z); float res = pbf_lambda_boundary( neighbor_pos, pos, rest_density, mass[originalIndex], // paritcle_mass cell_data, b_volume ); gradientC_sum += res; } } } //printf("gradientC_sum: %f\n", gradientC_sum); lambda[originalIndex] /= gradientC_sum + params.epsilon; //lambda[originalIndex] = lambda_res; } __global__ void compute_boundary_lambdas_d( float* b_lambda, // lambda of boundary particles float* b_vol, float3* b_pos, float* b_C, float* b_mass, CellData b_cell_data, // Cell data of fluid particles float3* sorted_pos, uint* gridParticleIndex, // input: sorted particle indices uint* cellStart, uint* cellEnd, float* rest_density, uint b_numParticles // number of boundary particles ) { uint index = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (index >= b_numParticles) return; uint originalIndex = b_cell_data.grid_index[index]; // read particle data from sorted arrays float3 pos = b_cell_data.sorted_pos[index]; // initial density b_lambda[originalIndex] = -b_C[originalIndex]; float particle_mass = b_mass[originalIndex]; // get address in grid int3 gridPos = calcGridPos(pos); float3 gradientC_i = make_float3(0); //-(1.f / (*rest_density)) * //Poly6_W_Gradient_CUDA(make_float3(0, 0, 0), 0, params.effective_radius); float gradientC_sum = dot(gradientC_i, gradientC_i); // traverse 27 neighbors in boundary cells (boundary - boundary) for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbor_pos = gridPos + make_int3(x, y, z); float res = pbf_lambda_1( neighbor_pos, index, pos, rest_density, b_mass, b_cell_data.sorted_pos, b_cell_data.cellStart, b_cell_data.cellEnd, b_cell_data.grid_index, b_vol ); gradientC_sum += res; } } } // traverse 27 neighbors in "fluid cells" (boundary - fluid) for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbor_pos = gridPos + make_int3(x, y, z); float res = pbf_boundary_lambda( // boundary neighbor_pos, pos, rest_density, particle_mass, // paritcle_mass b_vol[originalIndex], // volume // fluid sorted_pos, cellStart, cellEnd, gridParticleIndex ); gradientC_sum += res; } } } //printf("gradientC_sum: %f\n", gradientC_sum); b_lambda[originalIndex] /= gradientC_sum + params.epsilon; //lambda[originalIndex] = lambda_res; } __global__ void compute_position_correction( float* lambda, // output: computed density float* rest_density, // input: rest density float3* sorted_pos, // input: sorted mass //float3* new_pos, // output: new_pos float3* correction, // output: accumulated correction uint* gridParticleIndex, // input: sorted particle indices uint* cellStart, uint* cellEnd, // boundary CellData b_cell_data, float* b_lambda, uint numParticles, float dt ) { uint index = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (index >= numParticles) return; uint originalIndex = gridParticleIndex[index]; // read particle data from sorted arrays float3 pos = sorted_pos[index]; // initial density float lambda_i = lambda[originalIndex]; // get address in grid int3 gridPos = calcGridPos(pos); float3 corr = make_float3(0, 0, 0); // traverse 27 neighbors for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbor_pos = gridPos + make_int3(x, y, z); corr += pbf_correction( neighbor_pos, index, pos, lambda_i, rest_density, sorted_pos, lambda, cellStart, cellEnd, gridParticleIndex, dt ); } } } for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbor_pos = gridPos + make_int3(x, y, z); corr += pbf_correction_boundary( neighbor_pos, index, pos, lambda_i, rest_density, b_cell_data, b_lambda, dt ); } } } corr = (1.f / (*rest_density)) * corr; correction[originalIndex] = corr; //compute new position //new_pos[originalIndex] = pos + corr; } __global__ void apply_correction( float3* new_pos, float3* predict_pos, float3* correction, CellData cell_data, uint numParticles ) { uint index = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (index >= numParticles) return; //predict_pos[index] = new_pos[index]; uint original_index = cell_data.grid_index[index]; new_pos[original_index] = cell_data.sorted_pos[index] + correction[original_index]; predict_pos[original_index] = new_pos[original_index]; // write back to sorted_pos for next iteration cell_data.sorted_pos[index] = new_pos[original_index]; correction[original_index] = make_float3(0, 0, 0); } __global__ void finalize_correction( float3* pos, float3* new_pos, float3* predict_pos, float3* velocity, uint numParticles, float dt ) { uint index = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (index >= numParticles) return; //float3 res = new_pos[index]; //float3 vel = (res - pos[index]) / dt; float3 t_pos = new_pos[index]; float3 t_vel = (t_pos - pos[index]) / dt; velocity[index] = t_vel; //predict_pos[index] = t_pos; pos[index] = t_pos; } void allocateArray(void** devPtr, size_t size) { checkCudaErrors(hipMalloc(devPtr, size)); } void setParams(SimParams* param_in) { checkCudaErrors(hipMemcpyToSymbol(params, param_in, sizeof(SimParams))); } /* Integration for Position based Dynamics */ void integratePBD( float3* pos, float3* vel, float3* force, float* massInv, float3* predict_pos, float3* new_pos, float deltaTime, uint numParticles ) { uint numThreads, numBlocks; compute_grid_size(numParticles, MAX_THREAD_NUM, numBlocks, numThreads); integrate_pbd_d << <numBlocks, numThreads >> > ( pos, vel, force, massInv, predict_pos, new_pos, deltaTime, numParticles ); getLastCudaError("Kernel execution failed: integrate_pbd_d "); } void sort_particles(CellData cell_data, uint numParticles) { uint* grid_hash = cell_data.grid_hash; uint* grid_index = cell_data.grid_index; thrust::sort_by_key( thrust::device_ptr<uint>(grid_hash), thrust::device_ptr<uint>(grid_hash + numParticles), thrust::device_ptr<uint>(grid_index) ); } void solve_dem_collision( float3* newVel, float3* sortedPos, float3* sortedVel, uint* gridParticleIndex, uint* cellStart, uint* cellEnd, uint numParticles, uint numCells, float dt) { // thread per particle uint numThreads, numBlocks; compute_grid_size(numParticles, MAX_THREAD_NUM, numBlocks, numThreads); // execute the kernel collideD << < numBlocks, numThreads >> > ( newVel, sortedPos, sortedVel, gridParticleIndex, cellStart, cellEnd, numParticles, dt ); // check if kernel invocation generated an error getLastCudaError("Kernel execution failed"); } void solve_sph_fluid( float* rest_density, ParticleSet* sph_particles, CellData sph_cell_data, uint numParticles, ParticleSet* boundary_particles, CellData b_cell_data, uint b_num_particles, float dt, int iterations ) { std::chrono::steady_clock::time_point t1, t2, t3, t4, t5; uint numThreads, numBlocks; compute_grid_size(numParticles, MAX_THREAD_NUM, numBlocks, numThreads); for (int i = 0; i < iterations; ++i) { // CUDA SPH Kernel // compute density t1 = std::chrono::high_resolution_clock::now(); compute_density_d << <numBlocks, numThreads >> > ( sph_particles->m_d_density, rest_density, sph_cell_data.sorted_pos, sph_particles->m_d_mass, sph_particles->m_d_C, sph_cell_data.grid_index, sph_cell_data.cellStart, sph_cell_data.cellEnd, b_cell_data, boundary_particles->m_d_volume, numParticles ); getLastCudaError("Kernel execution failed: compute_density_d "); // compute density contributed by boundary particles compute_boundary_density_d << <numBlocks, numThreads >> > ( rest_density, sph_cell_data.sorted_pos, sph_particles->m_d_mass, sph_cell_data.cellStart, sph_cell_data.cellEnd, sph_cell_data.grid_index, b_cell_data, boundary_particles->m_d_mass, boundary_particles->m_d_volume, boundary_particles->m_d_C, boundary_particles->m_d_density, b_num_particles ); // compute density of bounary particles // compute_boundary_density_d(); getLastCudaError("Kernel execution failed: compute_density_boundary_d "); t2 = std::chrono::high_resolution_clock::now(); // compute lambda compute_lambdas_d << <numBlocks, numThreads >> > ( sph_particles->m_d_lambda, rest_density, sph_cell_data.sorted_pos, sph_particles->m_d_C, sph_particles->m_d_mass, sph_cell_data.grid_index, sph_cell_data.cellStart, sph_cell_data.cellEnd, b_cell_data, boundary_particles->m_d_volume, numParticles ); getLastCudaError("Kernel execution failed: compute_lambdas_d "); compute_boundary_lambdas_d << <numBlocks, numThreads >> > ( boundary_particles->m_d_lambda, boundary_particles->m_d_volume, boundary_particles->m_d_positions, boundary_particles->m_d_C, boundary_particles->m_d_mass, b_cell_data, sph_cell_data.sorted_pos, sph_cell_data.grid_index, sph_cell_data.cellStart, sph_cell_data.cellEnd, rest_density, b_num_particles ); getLastCudaError("Kernel execution failed: compute_boundary_lambdas_d "); t3 = std::chrono::high_resolution_clock::now(); // compute new position compute_position_correction << <numBlocks, numThreads >> > ( sph_particles->m_d_lambda, rest_density, sph_cell_data.sorted_pos, //sph_particles->m_d_new_positions, sph_particles->m_d_correction, sph_cell_data.grid_index, sph_cell_data.cellStart, sph_cell_data.cellEnd, b_cell_data, boundary_particles->m_d_lambda, numParticles, dt ); getLastCudaError("Kernel execution failed: compute_position_correction "); // correct this iteration apply_correction << <numBlocks, numThreads >> > ( sph_particles->m_d_new_positions, sph_particles->m_d_predict_positions, sph_particles->m_d_correction, sph_cell_data, numParticles ); getLastCudaError("Kernel execution failed: apply_correction "); t4 = std::chrono::high_resolution_clock::now(); } // finalize correction finalize_correction << <numBlocks, numThreads >> > ( sph_particles->m_d_positions, sph_particles->m_d_new_positions, sph_particles->m_d_predict_positions, sph_particles->m_d_velocity, numParticles, dt ); getLastCudaError("Kernel execution failed: finalize_correction "); /* t5 = std::chrono::high_resolution_clock::now(); { ImGui::Begin("CUDA Performance"); ImGui::Text("Density: %.5lf (ms)", (t2 - t1).count() / 1000000.0f); ImGui::Text("Lambda: %.5lf (ms)", (t3 - t2).count() / 1000000.0f); ImGui::Text("Correction: %.5lf (ms)", (t4 - t3).count() / 1000000.0f); ImGui::Text("Finalize: %.5lf (ms)", (t5 - t4).count() / 1000000.0f); ImGui::End(); } */ } __device__ float3 pbd_distance_correction( int3 grid_pos, uint index, float3 pos, float w0, float* invMass, CellData cell_data ) { uint grid_hash = calcGridHash(grid_pos); // get start of bucket for this cell uint start_index = cell_data.cellStart[grid_hash]; float3 correction = make_float3(0, 0, 0); if (start_index != 0xffffffff) // cell is not empty { // iterate over particles in this cell uint end_index = cell_data.cellEnd[grid_hash]; // reuse C in searching float C = 0; for (uint j = start_index; j < end_index; j++) { float3 correction_j = make_float3(0, 0, 0); if (j != index) // check not colliding with self { uint original_index_j = cell_data.grid_index[j]; float3 pos2 = cell_data.sorted_pos[j]; float3 v = pos - pos2; float dist = length(v); // correct if distance is close if (dist <= 2.f * params.particle_radius) { // Non-penetration correction const float w1 = invMass[original_index_j]; float w_sum = w0 + w1; C = dist - 2.f * params.particle_radius; // normalize v + 0.000001f for vanish problem float3 n = v / (dist);// +0.000001f); correction_j = -w0 * (1.f / w_sum) * C * n; /* // Tangential correction // project on tangential direction float penetration = abs(C); float3 correction_j_t = correction_j - (dot(correction_j, n) * n); float threshold = params.static_friction * penetration; float len = length(correction_j_t); //printf("penetration: %f\n", penetration); //printf("Correction: %f, %f, %f\n", correction_j_t.x, correction_j_t.y, correction_j_t.z); // use kinematic friction model if (length(correction_j_t) < threshold) { float coeff = min(params.kinematic_friction * penetration / len, 1.f); correction_j_t = coeff * correction_j_t; } correction_j_t = (w0 / w_sum) * correction_j_t; correction_j += correction_j_t; */ } } correction += correction_j; } //printf("Num neighbors: %u\n", end_index - start_index); } return correction; } __device__ float3 pbd_distance_correction_boundary( int3 grid_pos, uint index, float3 pos, float w0, float* b_invMass, // invMass of boundary particles CellData b_cell_data // cell_data of boundary particles ) { uint grid_hash = calcGridHash(grid_pos); // get start of bucket for this cell uint start_index = b_cell_data.cellStart[grid_hash]; float3 correction = make_float3(0, 0, 0); if (start_index != 0xffffffff) // cell is not empty { // iterate over particles in this cell uint end_index = b_cell_data.cellEnd[grid_hash]; // reuse C in searching float C = 0; for (uint j = start_index; j < end_index; j++) { float3 correction_j = make_float3(0, 0, 0); uint original_index_j = b_cell_data.grid_index[j]; float3 pos2 = b_cell_data.sorted_pos[j]; float3 v = pos - pos2; float dist = length(v); // correct if distance is close if (dist <= 2.f * params.particle_radius) { // Non-penetration correction const float w1 = b_invMass[original_index_j]; float w_sum = w0 + w1; C = dist - 2.f * params.particle_radius; // normalize v + 0.000001f for vanish problem float3 n = v / (dist);// +0.000001f); correction_j = -w0 * (1.f / w_sum) * C * n; } correction += correction_j; } //printf("Num neighbors: %u\n", end_index - start_index); } return correction; } __global__ void compute_distance_correction( float3* correction, // output: corrected pos float* invMass, // input: mass float* b_invMass, CellData cell_data, // input: cell data of dem particles CellData b_cell_data, uint numParticles // input: number of DEM particles ) { uint index = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (index >= numParticles) return; uint original_index = cell_data.grid_index[index]; // read particle data from sorted arrays float3 pos = cell_data.sorted_pos[index]; float w0 = invMass[original_index]; // get address in grid int3 gridPos = calcGridPos(pos); float3 corr = make_float3(0, 0, 0); for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbor_pos = gridPos + make_int3(x, y, z); corr += pbd_distance_correction( neighbor_pos, index, pos, w0, invMass, cell_data ); } } } for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbor_pos = gridPos + make_int3(x, y, z); corr += pbd_distance_correction_boundary( neighbor_pos, index, pos, w0, b_invMass, b_cell_data ); } } } correction[original_index] = corr; } __device__ float3 pbd_friction_correction( int3 grid_pos, uint index, float3 predict_pos0, float3 original_pos0, float w0, float3* predict_pos, float3* original_pos, float* invMass, CellData cell_data ) { uint grid_hash = calcGridHash(grid_pos); // get start of bucket for this cell uint start_index = cell_data.cellStart[grid_hash]; float3 result = make_float3(0,0,0);// correction_i; if (start_index != 0xffffffff) // cell is not empty { // iterate over particles in this cell uint end_index = cell_data.cellEnd[grid_hash]; for (uint j = start_index; j < end_index; j++) { float3 correction_j = make_float3(0, 0, 0); if (j != index) // check not colliding with self { uint original_index_j = cell_data.grid_index[j]; float3 original_pos1 = original_pos[original_index_j]; //float3 predict_pos1 = predict_pos[original_index_j]; float3 predict_pos1 = cell_data.sorted_pos[j]; float3 v = predict_pos0 - predict_pos1; float dist = length(v); // correct if distance is close if (dist <= 2.f * params.particle_radius) { // Non-penetration correction const float w1 = invMass[original_index_j]; float w_sum = w0 + w1; // normalize v + 0.000001f for vanish problem float3 n = v / (dist);// +0.000001f); float penetration = 2.f * params.particle_radius - dist; float3 dx = (predict_pos0 - original_pos0) + (predict_pos1 - original_pos1); float3 dx_t = dx - (dot(dx, n) * n); //printf("dx: %f, %f, %f\n", dx_t.x, dx_t.y, dx_t.z); //printf("penetration: %f\n", penetration); float threshold = params.static_friction * penetration; float len = length(dx_t); // use kinematic friction model if (length(dx_t) > threshold) { float coeff = min(params.kinematic_friction * penetration / len, 1.f); dx_t = coeff * dx_t; }/* else { printf("static\n"); } */ dx_t = -(w0 / w_sum) * dx_t; correction_j += dx_t; //printf("dx: %f, %f, %f\n", dx_t.x, dx_t.y, dx_t.z); } } result += correction_j; } //printf("Num neighbors: %u\n", end_index - start_index); } return result; } __device__ float3 pbd_friction_correction_boundary( int3 grid_pos, uint index, float3 predict_pos0, float3 original_pos0, float w0, float* b_invMass, CellData b_cell_data ) { uint grid_hash = calcGridHash(grid_pos); // get start of bucket for this cell uint start_index = b_cell_data.cellStart[grid_hash]; float3 result = make_float3(0, 0, 0);// correction_i; if (start_index != 0xffffffff) // cell is not empty { // iterate over particles in this cell uint end_index = b_cell_data.cellEnd[grid_hash]; for (uint j = start_index; j < end_index; j++) { float3 correction_j = make_float3(0, 0, 0); uint original_index_j = b_cell_data.grid_index[j]; float3 pos1 = b_cell_data.sorted_pos[j]; float3 v = predict_pos0 - pos1; float dist = length(v); // correct if distance is close if (dist <= 2.f * params.particle_radius) { // Non-penetration correction const float w1 = b_invMass[original_index_j]; float w_sum = w0 + w1; // normalize v + 0.000001f for vanish problem float3 n = v / (dist);// +0.000001f); float penetration = 2.f * params.particle_radius - dist; float3 dx = (predict_pos0 - original_pos0); float3 dx_t = dx - (dot(dx, n) * n); //printf("dx: %f, %f, %f\n", dx.x, dx.y, dx.z); float threshold = params.static_friction * penetration; float len = length(dx_t); // if exceed threshold use kinematic friction model if (length(dx_t) > threshold) { float coeff = min(params.kinematic_friction * penetration / len, 1.f); dx_t = coeff * dx_t; } dx_t = -(w0 / w_sum) * dx_t; correction_j += dx_t; } result += correction_j; } //printf("Num neighbors: %u\n", end_index - start_index); } return result; } __global__ void compute_friction_correction( float3* correction, float3* new_pos, // output: corrected pos float3* original_pos, // input: position at the start of this time step float* invMass, // input: mass float* b_invMass, CellData cell_data, // input: cell data of dem particles CellData b_cell_data, uint numParticles // input: number of DEM particles ) { uint index = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (index >= numParticles) return; uint original_index = cell_data.grid_index[index]; // read particle data from sorted arrays float3 pos = cell_data.sorted_pos[index]; float3 new_pos0 = new_pos[original_index]; float3 original_pos0 = original_pos[original_index]; float w0 = invMass[original_index]; //float3 correction_i = correction[original_index]; // get address in grid int3 gridPos = calcGridPos(pos); float3 corr = make_float3(0, 0, 0); for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbor_pos = gridPos + make_int3(x, y, z); corr += pbd_friction_correction( neighbor_pos, index, pos, original_pos0,w0, new_pos, original_pos, invMass, cell_data ); } } } for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbor_pos = gridPos + make_int3(x, y, z); corr += pbd_friction_correction_boundary( neighbor_pos, index, pos, original_pos0, w0, b_invMass, b_cell_data ); } } } //printf("corr: %f %f %f\n", corr.x, corr.y, corr.z); //corr = 0.5f * (correction_i + corr); correction[original_index] = corr; //new_pos[original_index] = pos + corr; } void solve_pbd_dem( ParticleSet* dem_particles, ParticleSet* boundary_particles, CellData cell_data, CellData b_cell_data, uint numParticles, uint b_numParticles, float dt, int iteration ) { uint numThreads, numBlocks; compute_grid_size(numParticles, MAX_THREAD_NUM, numBlocks, numThreads); for (int i = 0; i < iteration; ++i) { compute_distance_correction << <numBlocks, numThreads >> > ( dem_particles->m_d_correction, dem_particles->m_d_massInv, boundary_particles->m_d_massInv, cell_data, b_cell_data, numParticles ); getLastCudaError("Kernel execution failed: compute_dem_correction "); apply_correction << <numBlocks, numThreads >> > ( dem_particles->m_d_new_positions, dem_particles->m_d_predict_positions, dem_particles->m_d_correction, cell_data, numParticles ); getLastCudaError("Kernel execution failed: apply_correction "); } compute_friction_correction << <numBlocks, numThreads >> > ( dem_particles->m_d_correction, dem_particles->m_d_new_positions, dem_particles->m_d_positions, dem_particles->m_d_massInv, boundary_particles->m_d_massInv, cell_data, b_cell_data, numParticles ); getLastCudaError("Kernel execution failed: compute_friction_correction "); apply_correction << <numBlocks, numThreads >> > ( dem_particles->m_d_new_positions, dem_particles->m_d_predict_positions, dem_particles->m_d_correction, cell_data, numParticles ); getLastCudaError("Kernel execution failed: apply_correction "); // finalize correction finalize_correction << <numBlocks, numThreads >> > ( dem_particles->m_d_positions, dem_particles->m_d_new_positions, dem_particles->m_d_predict_positions, dem_particles->m_d_velocity, numParticles, dt ); getLastCudaError("Kernel execution failed: finalize_correction "); }
6f4650d979d392f7f24b6ccf2eba01e1473910a7.cu
#include <cuda_runtime.h> #include <cstdlib> #include <helper_cuda.h> #include <helper_functions.h> #include <device_atomic_functions.h> #include <helper_math.h> #include <stdio.h> #include <thrust/device_ptr.h> #include <thrust/for_each.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/sort.h> #include <cooperative_groups.h> #include "cuda_simulation.cuh" #include "sph_kernel.cuh" #include <chrono> #include "imgui/imgui.h" namespace cg = cooperative_groups; // calculate position in uniform grid inline __device__ int3 calcGridPos(float3 p) { int3 gridPos; gridPos.x = floor((p.x - params.world_origin.x) / params.cell_size.x); gridPos.y = floor((p.y - params.world_origin.y) / params.cell_size.y); gridPos.z = floor((p.z - params.world_origin.z) / params.cell_size.z); return gridPos; } // calculate address in grid from position (clamping to edges) inline __device__ uint calcGridHash(int3 gridPos) { gridPos.x = gridPos.x & (params.grid_size.x - 1); // wrap grid, assumes size is power of 2 gridPos.y = gridPos.y & (params.grid_size.y - 1); gridPos.z = gridPos.z & (params.grid_size.z - 1); return __umul24(__umul24(gridPos.z, params.grid_size.y), params.grid_size.x) + __umul24(gridPos.y, params.grid_size.x) + gridPos.x; } // collide two spheres using DEM method inline __device__ float3 collideSpheres( float3 posA, float3 posB, float3 velA, float3 velB, float radiusA, float radiusB, float attraction) { // calculate relative position float3 relPos = posB - posA; float dist = length(relPos); float collideDist = radiusA + radiusB; float3 force = make_float3(0.0f); //printf("dist: %f\ncollideDist: %f", dist, collideDist); if (dist < collideDist) { float3 norm = relPos / (dist+0.00001f); // relative velocity float3 relVel = velB - velA; // relative tangential velocity float3 tanVel = relVel - (dot(relVel, norm) * norm); // spring force force = -params.spring * (collideDist - dist) * norm; // dashpot (damping) force force += params.damping * relVel; // tangential shear force force += params.shear * tanVel; // attraction force += attraction * relPos; //printf("%f %f %f\n", force.x, force.y, force.z); } return force; } inline __device__ float3 collideCell( int3 gridPos, uint index, float3 pos, float3 vel, float3* oldPos, float3* oldVel, uint* cellStart, uint* cellEnd) { uint gridHash = calcGridHash(gridPos); // get start of bucket for this cell uint startIndex = cellStart[gridHash]; float3 force = make_float3(0.0f); if (startIndex != 0xffffffff) // cell is not empty { // iterate over particles in this cell uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { if (j != index) // check not colliding with self { float3 pos2 = oldPos[j]; float3 vel2 = oldVel[j]; // collide two spheres force += collideSpheres( pos, pos2, vel, vel2, params.particle_radius, params.particle_radius, params.attraction); } } } return force; } inline __device__ float sph_boundary_volume( int3 grid_pos, uint index, float3 pos1, float* mass, CellData data ) { uint grid_hash = calcGridHash(grid_pos); uint start_index = data.cellStart[grid_hash]; float rho = 0.f; if (start_index != 0xffffffff) { uint end_index = data.cellEnd[grid_hash]; for (uint j = start_index; j < end_index; ++j) { if (j != index) { uint original_index = data.grid_index[j]; float3 pos2 = data.sorted_pos[j]; float3 vec = pos1 - pos2; float dist = length(vec); rho += mass[original_index] * Poly6_W_CUDA(dist, params.effective_radius); } } } return rho; } __global__ void calcHashD( CellData cell_data, // output float3* pos, // input: positions uint num_particles) { uint index = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; if (index >= num_particles) return; volatile float3 p = pos[index]; // get address in grid int3 gridPos = calcGridPos(make_float3(p.x, p.y, p.z)); uint hash = calcGridHash(gridPos); // store grid hash and particle index cell_data.grid_hash[index] = hash; cell_data.grid_index[index] = index; } __global__ void calcHash_boundary_D( CellData cell_data, float3* pos, // input: positions uint num_particles) { uint index = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; if (index >= num_particles) return; //printf("%u \n", index); volatile float3 p = pos[index]; // get address in grid int3 gridPos = calcGridPos(make_float3(p.x, p.y, p.z)); uint hash = calcGridHash(gridPos); // store grid hash and particle index cell_data.grid_hash[index] = hash; cell_data.grid_index[index] = index; } /* * Reorder data to find cell start and end (for neighbor searching) */ __global__ void reorderDataAndFindCellStartD( CellData cell_data, float3* oldPos, // input: sorted position array uint numParticles) { // Handle to thread block group cg::thread_block cta = cg::this_thread_block(); extern __shared__ uint sharedHash[]; // blockSize + 1 elements uint index = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; uint hash; // handle case when no. of particles not multiple of block size if (index < numParticles) { hash = cell_data.grid_hash[index]; // Load hash data into shared memory so that we can look // at neighboring particle's hash value without loading // two hash values per thread sharedHash[threadIdx.x + 1] = hash; if (index > 0 && threadIdx.x == 0) { // first thread in block must load neighbor particle hash sharedHash[0] = cell_data.grid_hash[index - 1]; } } cg::sync(cta); if (index < numParticles) { // If this particle has a different cell index to the previous // particle then it must be the first particle in the cell, // so store the index of this particle in the cell. // As it isn't the first particle, it must also be the cell end of // the previous particle's cell if (index == 0 || hash != sharedHash[threadIdx.x]) { cell_data.cellStart[hash] = index; if (index > 0) cell_data.cellEnd[sharedHash[threadIdx.x]] = index; } if (index == numParticles - 1) { cell_data.cellEnd[hash] = index + 1; } // Now use the sorted index to reorder the pos and vel data uint sortedIndex = cell_data.grid_index[index]; float3 pos = oldPos[sortedIndex]; cell_data.sorted_pos[index] = pos; } } /* __global__ void reorderData_boundary_D( CellData cell_data, float3* oldPos, // input: sorted position array uint numParticles) { // Handle to thread block group cg::thread_block cta = cg::this_thread_block(); extern __shared__ uint sharedHash[]; // blockSize + 1 elements uint index = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; uint hash; // handle case when no. of particles not multiple of block size if (index < numParticles) { hash = cell_data.grid_hash[index]; // Load hash data into shared memory so that we can look // at neighboring particle's hash value without loading // two hash values per thread sharedHash[threadIdx.x + 1] = hash; if (index > 0 && threadIdx.x == 0) { // first thread in block must load neighbor particle hash sharedHash[0] = cell_data.grid_hash[index - 1]; } } cg::sync(cta); if (index < numParticles) { // If this particle has a different cell index to the previous // particle then it must be the first particle in the cell, // so store the index of this particle in the cell. // As it isn't the first particle, it must also be the cell end of // the previous particle's cell if (index == 0 || hash != sharedHash[threadIdx.x]) { cell_data.cellStart[hash] = index; if (index > 0) cell_data.cellEnd[sharedHash[threadIdx.x]] = index; } if (index == numParticles - 1) { cell_data.cellEnd[hash] = index + 1; } // Now use the sorted index to reorder the pos data uint sortedIndex = cell_data.grid_index[index]; float3 pos = oldPos[sortedIndex]; cell_data.sorted_pos[index] = pos; } } */ __global__ void compute_boundary_volume_d( CellData data, float* mass, float* volume, uint numParticles) { uint index = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (index >= numParticles) return; uint originalIndex = data.grid_index[index]; // read particle data from sorted arrays float3 pos = data.sorted_pos[index]; // initial volume float rho = mass[originalIndex] * Poly6_W_CUDA(0, params.effective_radius); // get address in grid int3 gridPos = calcGridPos(pos); // traverse 27 neighbors for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbor_pos = gridPos + make_int3(x, y, z); rho += sph_boundary_volume( neighbor_pos, index, pos, mass, data ); } } } // Update volume volume[originalIndex] = mass[originalIndex] / rho; //printf("rho = %f\n", rho); //printf("C[%u]: %f\n", originalIndex, C[originalIndex]); } void compute_grid_size(uint n, uint block_size, uint& num_blocks, uint& num_threads) { num_threads = min(block_size, n); num_blocks = (n % num_threads != 0) ? (n / num_threads + 1) : (n / num_threads); } void calculate_hash( CellData cell_data, float3* pos, uint num_particles) { uint num_blocks, num_threads; compute_grid_size(num_particles, MAX_THREAD_NUM, num_blocks, num_threads); calcHashD << < num_blocks, num_threads >> > ( cell_data, pos, num_particles); getLastCudaError("Kernel execution failed: calc_hash"); } void reorder_data( CellData cell_data, float3* oldPos, uint numParticles, uint numCells) { uint numThreads, numBlocks; compute_grid_size(numParticles, MAX_THREAD_NUM, numBlocks, numThreads); // set all cells to empty checkCudaErrors(cudaMemset(cell_data.cellStart, 0xffffffff, numCells * sizeof(uint))); uint smemSize = sizeof(uint) * (numThreads + 1); reorderDataAndFindCellStartD << < numBlocks, numThreads, smemSize >> > ( cell_data, oldPos, numParticles); getLastCudaError("Kernel execution failed: reorderDataAndFindCellStartD"); } /* void reorderData_boundary( CellData cell_data, float3* oldPos, uint numParticles, uint numCells) { uint numThreads, numBlocks; compute_grid_size(numParticles, MAX_THREAD_NUM, numBlocks, numThreads); // set all cells to empty checkCudaErrors(cudaMemset(cell_data.cellStart, 0xffffffff, numCells * sizeof(uint))); uint smemSize = sizeof(uint) * (numThreads + 1); reorderData_boundary_D << < numBlocks, numThreads, smemSize >> > ( cell_data, oldPos, numParticles); getLastCudaError("Kernel execution failed: reorderDataAndFindCellStartD"); } */ void compute_boundary_volume(CellData data, float* mass, float* volume, uint numParticles) { uint numThreads, numBlocks; compute_grid_size(numParticles, MAX_THREAD_NUM, numBlocks, numThreads); compute_boundary_volume_d << <numBlocks, numThreads >> > ( data, mass, volume, numParticles); getLastCudaError("Kernel execution failed: copmute_boundary_volume"); } __global__ void test_offset(float3* positions) { int i = blockIdx.x * blockDim.x + threadIdx.x; /* if (i == 0) printf("particles[0]: %f, %f, %f\n", positions[i].x , positions[i].y, positions[i].z); */ positions[i].x = positions[i].x + 0.001f; positions[i].y = positions[i].y + 0.001f; positions[i].z = positions[i].z + 0.001f; } __global__ void integrate_pbd_d( float3* pos, float3* vel, float3* force, float* massInv, float3* predict_pos, float3* new_pos, float dt, uint numParticles) { uint index = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; float3 t_vel = vel[index] + dt * params.gravity; t_vel = t_vel * params.global_damping; float3 t_pos = pos[index] + dt * t_vel; if (t_pos.x >= 1.0f) { t_pos.x = 1.f; t_vel.x = -abs(t_vel.x); t_vel *= params.boundary_damping; } if (t_pos.x <= -1.0f) { t_pos.x = -1.f; t_vel.x = abs(t_vel.x); t_vel *= params.boundary_damping; } if (t_pos.z >= 1.0f) { t_pos.z = 1.f; t_vel.z = -abs(t_vel.z); t_vel *= params.boundary_damping; } if (t_pos.z <= -1.0f) { t_pos.z = -1.f; t_vel.z = abs(t_vel.z); t_vel *= params.boundary_damping; } if (t_pos.y <= 0.f) { t_pos.y = 0.f; t_vel.y = abs(t_vel.y); t_vel *= params.boundary_damping; } /* Velocity limitation if (length(t_vel) > 5.f) { t_vel = (5.f / length(t_vel)) * t_vel ; } */ predict_pos[index] = t_pos;// pos[index] + dt * t_vel; vel[index] = t_vel; new_pos[index] = predict_pos[index]; } // collide a particle against all other particles in a given cell /* Collision device code */ __global__ void collideD( float3* newVel, // output: new velocity float3* oldPos, // input: sorted positions float3* oldVel, // input: sorted velocities uint* gridParticleIndex, // input: sorted particle indices uint* cellStart, uint* cellEnd, uint numParticles, float dt) { uint index = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (index >= numParticles) return; // read particle data from sorted arrays float3 pos = oldPos[index]; float3 vel = oldVel[index]; // get address in grid int3 gridPos = calcGridPos(pos); // examine neighbouring cells float3 force = make_float3(0.0f); // traverse 27 neighbors for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbor_pos = gridPos + make_int3(x, y, z); force += collideCell(neighbor_pos, index, pos, vel, oldPos, oldVel, cellStart, cellEnd); } } } // write new velocity back to original unsorted location uint originalIndex = gridParticleIndex[index]; newVel[originalIndex] = vel + force * dt; // + force/mass * dt ? } inline __device__ float pbf_density_0( int3 grid_pos, uint index, float3 pos, float3* sorted_pos, float* mass, float* rest_density, uint* cell_start, uint* cell_end, uint* gridParticleIndex ) // type: 0->fluid fluid 1->boundary boundary { uint grid_hash = calcGridHash(grid_pos); // get start of bucket for this cell uint start_index = cell_start[grid_hash]; float density = 0.0f; if (start_index != 0xffffffff) // cell is not empty { // iterate over particles in this cell uint end_index = cell_end[grid_hash]; for (uint j = start_index; j < end_index; j++) { if (j != index) // check not colliding with self { uint original_index = gridParticleIndex[j]; float3 pos2 = sorted_pos[j]; float3 vec = pos - pos2; float dist = length(vec); float rho = 0.f; rho = mass[original_index] * Poly6_W_CUDA(dist, params.effective_radius); density += rho; } } } return density; } inline __device__ float pbf_density_1( int3 grid_pos, uint index, float3 pos, float3* sorted_pos, float* mass, float* rest_density, uint* cell_start, uint* cell_end, uint* gridParticleIndex, float* b_volume = nullptr) // type: 0->fluid fluid 1->boundary boundary { uint grid_hash = calcGridHash(grid_pos); // get start of bucket for this cell uint start_index = cell_start[grid_hash]; float density = 0.0f; if (start_index != 0xffffffff) // cell is not empty { // iterate over particles in this cell uint end_index = cell_end[grid_hash]; for (uint j = start_index; j < end_index; j++) { if (j != index) // check not colliding with self { uint original_index = gridParticleIndex[j]; float3 pos2 = sorted_pos[j]; float3 vec = pos - pos2; float dist = length(vec); float rho = 0.f; rho = (*rest_density) * b_volume[original_index] * Poly6_W_CUDA(dist, params.effective_radius); density += rho; } } } return density; } inline __device__ float pbf_density_boundary( int3 grid_pos, float3 pos1, float* rest_density, float* volume, CellData cell_data ) { uint grid_hash = calcGridHash(grid_pos); // get start of bucket for this cell uint start_index = cell_data.cellStart[grid_hash]; float density = 0.0f; // if cell of boundary cell data is not empty if (start_index != 0xffffffff) { // iterate over particles in this cell uint end_index = cell_data.cellEnd[grid_hash]; for (uint j = start_index; j < end_index; j++) { // no need to check collision (boundary cell data is not the same as fluid cell data) uint original_index = cell_data.grid_index[j]; float3 pos2 = cell_data.sorted_pos[j]; float3 vec = pos1 - pos2; float dist = length(vec); float rho = (*rest_density) * volume[original_index] * Poly6_W_CUDA(dist, params.effective_radius); density += rho; } } // return contributions of boundary paritcles return density; } // boundary - fluid inline __device__ float pbf_boundary_density( // boundary int3 grid_pos, // searching grid pos float3 pos1, // position of boundary particle // fluid float* mass, float3* sorted_pos, uint* cell_start, uint* cell_end, uint* gridParticleIndex ) { uint grid_hash = calcGridHash(grid_pos); // get start of bucket for this cell uint start_index = cell_start[grid_hash]; float density = 0.0f; // if cell of boundary cell data is not empty if (start_index != 0xffffffff) { // iterate over particles in this cell uint end_index = cell_end[grid_hash]; for (uint j = start_index; j < end_index; j++) { // no need to check collision (boundary cell data is not the same as fluid cell data) uint original_index = gridParticleIndex[j]; float3 pos2 = sorted_pos[j]; float3 vec = pos1 - pos2; float dist = length(vec); float rho = mass[original_index] * Poly6_W_CUDA(dist, params.effective_radius); density += rho; } } // return contributions of boundary paritcles return density; } inline __device__ float pbf_lambda_0( int3 grid_pos, uint index, float3 pos, float* rest_density, float* mass, float3* sorted_pos, uint* cell_start, uint* cell_end, uint* gridParticleIndex ) { uint grid_hash = calcGridHash(grid_pos); // get start of bucket for this cell uint start_index = cell_start[grid_hash]; float gradientC_sum = 0.f; if (start_index != 0xffffffff) // cell is not empty { // iterate over particles in this cell uint end_index = cell_end[grid_hash]; for (uint j = start_index; j < end_index; j++) { if (j != index) // check not colliding with self { uint original_index = gridParticleIndex[j]; //float particle_mass = mass[original_index]; float3 pos2 = sorted_pos[j]; float3 vec = pos - pos2; float dist = length(vec); float3 gradientC_j; gradientC_j = (1.f / (*rest_density)) * Poly6_W_Gradient_CUDA(vec, dist, params.effective_radius); float dot_val = dot(gradientC_j, gradientC_j); gradientC_sum += dot_val; } } } return gradientC_sum; } inline __device__ float pbf_lambda_1( int3 grid_pos, uint index, float3 pos, float* rest_density, float* mass, float3* sorted_pos, uint* cell_start, uint* cell_end, uint* gridParticleIndex, float* b_volume = nullptr) { uint grid_hash = calcGridHash(grid_pos); // get start of bucket for this cell uint start_index = cell_start[grid_hash]; float gradientC_sum = 0.f; if (start_index != 0xffffffff) // cell is not empty { // iterate over particles in this cell uint end_index = cell_end[grid_hash]; for (uint j = start_index; j < end_index; j++) { if (j != index) // check not colliding with self { uint original_index = gridParticleIndex[j]; float particle_mass = mass[original_index]; float3 pos2 = sorted_pos[j]; float3 vec = pos - pos2; float dist = length(vec); float3 gradientC_j; float vol = b_volume[original_index]; gradientC_j = (1.f / (*rest_density)) * ((*rest_density) * vol / particle_mass) * Poly6_W_Gradient_CUDA(vec, dist, params.effective_radius); float dot_val = dot(gradientC_j, gradientC_j); gradientC_sum += dot_val; } } } return gradientC_sum; } // fluid - boundary inline __device__ float pbf_lambda_boundary( int3 grid_pos, // searching grid pos float3 pos1, // position of fluid particle float* rest_density, float particle_mass, CellData cell_data, // cell data of boundary particle, float* volume ) { uint grid_hash = calcGridHash(grid_pos); // get start of bucket for this cell uint start_index = cell_data.cellStart[grid_hash]; float gradientC_sum = 0.f; if (start_index != 0xffffffff) // cell is not empty { // iterate over particles in this cell uint end_index = cell_data.cellEnd[grid_hash]; for (uint j = start_index; j < end_index; j++) { uint original_index = cell_data.grid_index[j]; float vol = volume[original_index]; float3 pos2 = cell_data.sorted_pos[j]; float3 vec = pos1 - pos2; float dist = length(vec); float3 gradientC_j = (1.f / (*rest_density)) * ((*rest_density) * vol / particle_mass) * Poly6_W_Gradient_CUDA(vec, dist, params.effective_radius); float dot_val = dot(gradientC_j, gradientC_j); gradientC_sum += dot_val; } } return gradientC_sum; } // Boundary - fluid inline __device__ float pbf_boundary_lambda( // boundary int3 grid_pos, // searching grid pos float3 pos1, // position of boundary particle float* rest_density, float particle_mass, float volume, // fluid float3* sorted_pos, uint* cell_start, uint* cell_end, uint* gridParticleIndex ) { uint grid_hash = calcGridHash(grid_pos); // get start of bucket for this cell uint start_index = cell_start[grid_hash]; float gradientC_sum = 0.f; // search in fluid cell if (start_index != 0xffffffff) // cell is not empty { // iterate over particles in this cell uint end_index = cell_end[grid_hash]; for (uint j = start_index; j < end_index; j++) { float3 pos2 = sorted_pos[j]; float3 vec = pos1 - pos2; float dist = length(vec); float3 gradientC_j = (1.f / (*rest_density)) * Poly6_W_Gradient_CUDA(vec, dist, params.effective_radius); float dot_val = dot(gradientC_j, gradientC_j); gradientC_sum += dot_val; } } return gradientC_sum; } inline __device__ float3 pbf_correction( int3 grid_pos, uint index, float3 pos, float lambda_i, float* rest_density, float3* sorted_pos, float* lambda, uint* cell_start, uint* cell_end, uint* gridParticleIndex, float dt) { uint grid_hash = calcGridHash(grid_pos); // get start of bucket for this cell uint start_index = cell_start[grid_hash]; float3 correction = make_float3(0, 0, 0); if (start_index != 0xffffffff) // cell is not empty { // iterate over particles in this cell uint end_index = cell_end[grid_hash]; for (uint j = start_index; j < end_index; j++) { if (j != index) // check not colliding with self { uint original_index = gridParticleIndex[j]; float3 pos2 = sorted_pos[j]; float3 vec = pos - pos2; float dist = length(vec); float3 gradient = Poly6_W_Gradient_CUDA(vec, dist, params.effective_radius); float scorr = -0.1f; float x = Poly6_W_CUDA(dist, params.effective_radius) / Poly6_W_CUDA(0.3f * params.effective_radius, params.effective_radius); x = pow(x, 4); scorr = scorr * x * dt * dt * dt; //printf("scorr: %f\n", scorr); float3 res = //(1.f / (*rest_density)) * (lambda_i + lambda[original_index] +scorr)* gradient; correction += res; } } //printf("Num neighbors: %u\n", end_index - start_index); } return correction; } // compute correction from boundary particles inline __device__ float3 pbf_correction_boundary( int3 grid_pos, uint index, float3 pos, float lambda_i, float* rest_density, // boundary CellData b_cell_data, float* b_lambda, float dt) { uint grid_hash = calcGridHash(grid_pos); // get start of bucket for this cell uint start_index = b_cell_data.cellStart[grid_hash]; float3 correction = make_float3(0, 0, 0); if (start_index != 0xffffffff) // cell is not empty { // iterate over particles in this cell uint end_index = b_cell_data.cellEnd[grid_hash]; for (uint j = start_index; j < end_index; j++) { if (j != index) // check not colliding with self { uint original_index = b_cell_data.grid_index[j]; float lambda_j = b_lambda[original_index]; float3 pos2 = b_cell_data.sorted_pos[j]; float3 vec = pos - pos2; float dist = length(vec); float3 gradient = Poly6_W_Gradient_CUDA(vec, dist, params.effective_radius); float scorr = -0.1f; float x = Poly6_W_CUDA(dist, params.effective_radius) / Poly6_W_CUDA(0.3f * params.effective_radius, params.effective_radius); x = pow(x, 4); scorr = scorr * x * dt * dt; //printf("scorr: %f\n", scorr); float3 res = //(1.f / (*rest_density)) * (lambda_i + lambda_j) *// +scorr)* gradient; correction += res; } } //printf("Num neighbors: %u\n", end_index - start_index); } return correction; } __global__ void compute_density_d( float* density, // output: computed density float* rest_density, // input: rest density float3* sorted_pos, // input: sorted mass float* mass, // input: mass float* C, // input: contraint uint* gridParticleIndex, // input: sorted particle indices uint* cellStart, uint* cellEnd, //boundary CellData cell_data, float* b_volume, uint numParticles ) { uint index = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (index >= numParticles) return; uint originalIndex = gridParticleIndex[index]; // read particle data from sorted arrays float3 pos = sorted_pos[index]; // initial density float rho = mass[originalIndex] * Poly6_W_CUDA(0, params.effective_radius); // get address in grid int3 gridPos = calcGridPos(pos); // traverse 27 neighbors (fluid - fluid) for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbor_pos = gridPos + make_int3(x, y, z); rho += pbf_density_0( neighbor_pos, index, pos, sorted_pos, mass, rest_density, cellStart, cellEnd, gridParticleIndex ); } } } // use gridPos to traverse 27 surrounding grids (fluid - boundary) for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbor_gridPos = gridPos + make_int3(x, y, z); rho += pbf_density_boundary( // fluid neighbor_gridPos, pos, rest_density, // boundary b_volume, cell_data ); } } } // Update date density and constraint value density[originalIndex] = rho; C[originalIndex] = (rho / (*rest_density)) - 1.f; //printf("rho = %f\n", rho); //printf("C[%u]: %f\n", originalIndex, C[originalIndex]); } __global__ void compute_boundary_density_d( // fluid float* rest_density, // input: rest density float3* sorted_pos, // input: sorted pos of fluid particle float* mass, // input: mass of fluid paritcle uint* cellStart, uint* cellEnd, uint* gridParticleIndex, // input: sorted particle indices (for original_index of fluid particles) // boundary CellData b_cell_data, float* b_mass, float* b_volume, float* b_C, float* b_density, // output: boundary density uint b_numParticles ) { uint index = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (index >= b_numParticles) return; // original index of boundary particle uint originalIndex = b_cell_data.grid_index[index]; // read position from sorted arrays float3 pos = b_cell_data.sorted_pos[index]; // initial density float rho = (*rest_density) * b_volume[originalIndex] * Poly6_W_CUDA(0, params.effective_radius); // get address in grid of boundary particles (basically the same as fluid particle) int3 gridPos = calcGridPos(pos); // use gridPos to traverse 27 surrounding grids (boundary - boundary) for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbor_gridPos = gridPos + make_int3(x, y, z); rho += pbf_density_1( neighbor_gridPos, index, pos, b_cell_data.sorted_pos, b_mass, rest_density, b_cell_data.cellStart, b_cell_data.cellEnd, b_cell_data.grid_index, b_volume ); } } } // use gridPos to traverse 27 surrounding grids (boundary - fluid) for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbor_gridPos = gridPos + make_int3(x, y, z); rho += pbf_boundary_density( // boundary neighbor_gridPos, pos, // fluid mass, sorted_pos, cellStart, cellEnd, gridParticleIndex ); } } } // Update density of fluid particle b_density[originalIndex] = rho; // **repeated code** // Recompute constraint value of fluid particle b_C[originalIndex] = (b_density[originalIndex] / (*rest_density)) - 1.f; } /* fluid - boundary */ __global__ void compute_lambdas_d( float* lambda, // output: computed density float* rest_density, // input: rest density float3* sorted_pos, // input: sorted mass float* C, // input: contraint float* mass, uint* gridParticleIndex, // input: sorted particle indices uint* cellStart, uint* cellEnd, CellData cell_data, float* b_volume, uint numParticles ) { uint index = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (index >= numParticles) return; uint originalIndex = gridParticleIndex[index]; // read particle data from sorted arrays float3 pos = sorted_pos[index]; // initial density lambda[originalIndex] = -C[originalIndex]; // get address in grid int3 gridPos = calcGridPos(pos); float3 gradientC_i = make_float3(0); //-(1.f / (*rest_density)) * //Poly6_W_Gradient_CUDA(make_float3(0, 0, 0), 0, params.effective_radius); float gradientC_sum = dot(gradientC_i, gradientC_i); // traverse 27 neighbors for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbor_pos = gridPos + make_int3(x, y, z); float res = pbf_lambda_0( neighbor_pos, index, pos, rest_density, mass, sorted_pos, cellStart, cellEnd, gridParticleIndex ); gradientC_sum += res; } } } // traverse 27 neighbors in "boundary cells" for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbor_pos = gridPos + make_int3(x, y, z); float res = pbf_lambda_boundary( neighbor_pos, pos, rest_density, mass[originalIndex], // paritcle_mass cell_data, b_volume ); gradientC_sum += res; } } } //printf("gradientC_sum: %f\n", gradientC_sum); lambda[originalIndex] /= gradientC_sum + params.epsilon; //lambda[originalIndex] = lambda_res; } __global__ void compute_boundary_lambdas_d( float* b_lambda, // lambda of boundary particles float* b_vol, float3* b_pos, float* b_C, float* b_mass, CellData b_cell_data, // Cell data of fluid particles float3* sorted_pos, uint* gridParticleIndex, // input: sorted particle indices uint* cellStart, uint* cellEnd, float* rest_density, uint b_numParticles // number of boundary particles ) { uint index = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (index >= b_numParticles) return; uint originalIndex = b_cell_data.grid_index[index]; // read particle data from sorted arrays float3 pos = b_cell_data.sorted_pos[index]; // initial density b_lambda[originalIndex] = -b_C[originalIndex]; float particle_mass = b_mass[originalIndex]; // get address in grid int3 gridPos = calcGridPos(pos); float3 gradientC_i = make_float3(0); //-(1.f / (*rest_density)) * //Poly6_W_Gradient_CUDA(make_float3(0, 0, 0), 0, params.effective_radius); float gradientC_sum = dot(gradientC_i, gradientC_i); // traverse 27 neighbors in boundary cells (boundary - boundary) for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbor_pos = gridPos + make_int3(x, y, z); float res = pbf_lambda_1( neighbor_pos, index, pos, rest_density, b_mass, b_cell_data.sorted_pos, b_cell_data.cellStart, b_cell_data.cellEnd, b_cell_data.grid_index, b_vol ); gradientC_sum += res; } } } // traverse 27 neighbors in "fluid cells" (boundary - fluid) for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbor_pos = gridPos + make_int3(x, y, z); float res = pbf_boundary_lambda( // boundary neighbor_pos, pos, rest_density, particle_mass, // paritcle_mass b_vol[originalIndex], // volume // fluid sorted_pos, cellStart, cellEnd, gridParticleIndex ); gradientC_sum += res; } } } //printf("gradientC_sum: %f\n", gradientC_sum); b_lambda[originalIndex] /= gradientC_sum + params.epsilon; //lambda[originalIndex] = lambda_res; } __global__ void compute_position_correction( float* lambda, // output: computed density float* rest_density, // input: rest density float3* sorted_pos, // input: sorted mass //float3* new_pos, // output: new_pos float3* correction, // output: accumulated correction uint* gridParticleIndex, // input: sorted particle indices uint* cellStart, uint* cellEnd, // boundary CellData b_cell_data, float* b_lambda, uint numParticles, float dt ) { uint index = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (index >= numParticles) return; uint originalIndex = gridParticleIndex[index]; // read particle data from sorted arrays float3 pos = sorted_pos[index]; // initial density float lambda_i = lambda[originalIndex]; // get address in grid int3 gridPos = calcGridPos(pos); float3 corr = make_float3(0, 0, 0); // traverse 27 neighbors for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbor_pos = gridPos + make_int3(x, y, z); corr += pbf_correction( neighbor_pos, index, pos, lambda_i, rest_density, sorted_pos, lambda, cellStart, cellEnd, gridParticleIndex, dt ); } } } for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbor_pos = gridPos + make_int3(x, y, z); corr += pbf_correction_boundary( neighbor_pos, index, pos, lambda_i, rest_density, b_cell_data, b_lambda, dt ); } } } corr = (1.f / (*rest_density)) * corr; correction[originalIndex] = corr; //compute new position //new_pos[originalIndex] = pos + corr; } __global__ void apply_correction( float3* new_pos, float3* predict_pos, float3* correction, CellData cell_data, uint numParticles ) { uint index = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (index >= numParticles) return; //predict_pos[index] = new_pos[index]; uint original_index = cell_data.grid_index[index]; new_pos[original_index] = cell_data.sorted_pos[index] + correction[original_index]; predict_pos[original_index] = new_pos[original_index]; // write back to sorted_pos for next iteration cell_data.sorted_pos[index] = new_pos[original_index]; correction[original_index] = make_float3(0, 0, 0); } __global__ void finalize_correction( float3* pos, float3* new_pos, float3* predict_pos, float3* velocity, uint numParticles, float dt ) { uint index = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (index >= numParticles) return; //float3 res = new_pos[index]; //float3 vel = (res - pos[index]) / dt; float3 t_pos = new_pos[index]; float3 t_vel = (t_pos - pos[index]) / dt; velocity[index] = t_vel; //predict_pos[index] = t_pos; pos[index] = t_pos; } void allocateArray(void** devPtr, size_t size) { checkCudaErrors(cudaMalloc(devPtr, size)); } void setParams(SimParams* param_in) { checkCudaErrors(cudaMemcpyToSymbol(params, param_in, sizeof(SimParams))); } /* Integration for Position based Dynamics */ void integratePBD( float3* pos, float3* vel, float3* force, float* massInv, float3* predict_pos, float3* new_pos, float deltaTime, uint numParticles ) { uint numThreads, numBlocks; compute_grid_size(numParticles, MAX_THREAD_NUM, numBlocks, numThreads); integrate_pbd_d << <numBlocks, numThreads >> > ( pos, vel, force, massInv, predict_pos, new_pos, deltaTime, numParticles ); getLastCudaError("Kernel execution failed: integrate_pbd_d "); } void sort_particles(CellData cell_data, uint numParticles) { uint* grid_hash = cell_data.grid_hash; uint* grid_index = cell_data.grid_index; thrust::sort_by_key( thrust::device_ptr<uint>(grid_hash), thrust::device_ptr<uint>(grid_hash + numParticles), thrust::device_ptr<uint>(grid_index) ); } void solve_dem_collision( float3* newVel, float3* sortedPos, float3* sortedVel, uint* gridParticleIndex, uint* cellStart, uint* cellEnd, uint numParticles, uint numCells, float dt) { // thread per particle uint numThreads, numBlocks; compute_grid_size(numParticles, MAX_THREAD_NUM, numBlocks, numThreads); // execute the kernel collideD << < numBlocks, numThreads >> > ( newVel, sortedPos, sortedVel, gridParticleIndex, cellStart, cellEnd, numParticles, dt ); // check if kernel invocation generated an error getLastCudaError("Kernel execution failed"); } void solve_sph_fluid( float* rest_density, ParticleSet* sph_particles, CellData sph_cell_data, uint numParticles, ParticleSet* boundary_particles, CellData b_cell_data, uint b_num_particles, float dt, int iterations ) { std::chrono::steady_clock::time_point t1, t2, t3, t4, t5; uint numThreads, numBlocks; compute_grid_size(numParticles, MAX_THREAD_NUM, numBlocks, numThreads); for (int i = 0; i < iterations; ++i) { // CUDA SPH Kernel // compute density t1 = std::chrono::high_resolution_clock::now(); compute_density_d << <numBlocks, numThreads >> > ( sph_particles->m_d_density, rest_density, sph_cell_data.sorted_pos, sph_particles->m_d_mass, sph_particles->m_d_C, sph_cell_data.grid_index, sph_cell_data.cellStart, sph_cell_data.cellEnd, b_cell_data, boundary_particles->m_d_volume, numParticles ); getLastCudaError("Kernel execution failed: compute_density_d "); // compute density contributed by boundary particles compute_boundary_density_d << <numBlocks, numThreads >> > ( rest_density, sph_cell_data.sorted_pos, sph_particles->m_d_mass, sph_cell_data.cellStart, sph_cell_data.cellEnd, sph_cell_data.grid_index, b_cell_data, boundary_particles->m_d_mass, boundary_particles->m_d_volume, boundary_particles->m_d_C, boundary_particles->m_d_density, b_num_particles ); // compute density of bounary particles // compute_boundary_density_d(); getLastCudaError("Kernel execution failed: compute_density_boundary_d "); t2 = std::chrono::high_resolution_clock::now(); // compute lambda compute_lambdas_d << <numBlocks, numThreads >> > ( sph_particles->m_d_lambda, rest_density, sph_cell_data.sorted_pos, sph_particles->m_d_C, sph_particles->m_d_mass, sph_cell_data.grid_index, sph_cell_data.cellStart, sph_cell_data.cellEnd, b_cell_data, boundary_particles->m_d_volume, numParticles ); getLastCudaError("Kernel execution failed: compute_lambdas_d "); compute_boundary_lambdas_d << <numBlocks, numThreads >> > ( boundary_particles->m_d_lambda, boundary_particles->m_d_volume, boundary_particles->m_d_positions, boundary_particles->m_d_C, boundary_particles->m_d_mass, b_cell_data, sph_cell_data.sorted_pos, sph_cell_data.grid_index, sph_cell_data.cellStart, sph_cell_data.cellEnd, rest_density, b_num_particles ); getLastCudaError("Kernel execution failed: compute_boundary_lambdas_d "); t3 = std::chrono::high_resolution_clock::now(); // compute new position compute_position_correction << <numBlocks, numThreads >> > ( sph_particles->m_d_lambda, rest_density, sph_cell_data.sorted_pos, //sph_particles->m_d_new_positions, sph_particles->m_d_correction, sph_cell_data.grid_index, sph_cell_data.cellStart, sph_cell_data.cellEnd, b_cell_data, boundary_particles->m_d_lambda, numParticles, dt ); getLastCudaError("Kernel execution failed: compute_position_correction "); // correct this iteration apply_correction << <numBlocks, numThreads >> > ( sph_particles->m_d_new_positions, sph_particles->m_d_predict_positions, sph_particles->m_d_correction, sph_cell_data, numParticles ); getLastCudaError("Kernel execution failed: apply_correction "); t4 = std::chrono::high_resolution_clock::now(); } // finalize correction finalize_correction << <numBlocks, numThreads >> > ( sph_particles->m_d_positions, sph_particles->m_d_new_positions, sph_particles->m_d_predict_positions, sph_particles->m_d_velocity, numParticles, dt ); getLastCudaError("Kernel execution failed: finalize_correction "); /* t5 = std::chrono::high_resolution_clock::now(); { ImGui::Begin("CUDA Performance"); ImGui::Text("Density: %.5lf (ms)", (t2 - t1).count() / 1000000.0f); ImGui::Text("Lambda: %.5lf (ms)", (t3 - t2).count() / 1000000.0f); ImGui::Text("Correction: %.5lf (ms)", (t4 - t3).count() / 1000000.0f); ImGui::Text("Finalize: %.5lf (ms)", (t5 - t4).count() / 1000000.0f); ImGui::End(); } */ } __device__ float3 pbd_distance_correction( int3 grid_pos, uint index, float3 pos, float w0, float* invMass, CellData cell_data ) { uint grid_hash = calcGridHash(grid_pos); // get start of bucket for this cell uint start_index = cell_data.cellStart[grid_hash]; float3 correction = make_float3(0, 0, 0); if (start_index != 0xffffffff) // cell is not empty { // iterate over particles in this cell uint end_index = cell_data.cellEnd[grid_hash]; // reuse C in searching float C = 0; for (uint j = start_index; j < end_index; j++) { float3 correction_j = make_float3(0, 0, 0); if (j != index) // check not colliding with self { uint original_index_j = cell_data.grid_index[j]; float3 pos2 = cell_data.sorted_pos[j]; float3 v = pos - pos2; float dist = length(v); // correct if distance is close if (dist <= 2.f * params.particle_radius) { // Non-penetration correction const float w1 = invMass[original_index_j]; float w_sum = w0 + w1; C = dist - 2.f * params.particle_radius; // normalize v + 0.000001f for vanish problem float3 n = v / (dist);// +0.000001f); correction_j = -w0 * (1.f / w_sum) * C * n; /* // Tangential correction // project on tangential direction float penetration = abs(C); float3 correction_j_t = correction_j - (dot(correction_j, n) * n); float threshold = params.static_friction * penetration; float len = length(correction_j_t); //printf("penetration: %f\n", penetration); //printf("Correction: %f, %f, %f\n", correction_j_t.x, correction_j_t.y, correction_j_t.z); // use kinematic friction model if (length(correction_j_t) < threshold) { float coeff = min(params.kinematic_friction * penetration / len, 1.f); correction_j_t = coeff * correction_j_t; } correction_j_t = (w0 / w_sum) * correction_j_t; correction_j += correction_j_t; */ } } correction += correction_j; } //printf("Num neighbors: %u\n", end_index - start_index); } return correction; } __device__ float3 pbd_distance_correction_boundary( int3 grid_pos, uint index, float3 pos, float w0, float* b_invMass, // invMass of boundary particles CellData b_cell_data // cell_data of boundary particles ) { uint grid_hash = calcGridHash(grid_pos); // get start of bucket for this cell uint start_index = b_cell_data.cellStart[grid_hash]; float3 correction = make_float3(0, 0, 0); if (start_index != 0xffffffff) // cell is not empty { // iterate over particles in this cell uint end_index = b_cell_data.cellEnd[grid_hash]; // reuse C in searching float C = 0; for (uint j = start_index; j < end_index; j++) { float3 correction_j = make_float3(0, 0, 0); uint original_index_j = b_cell_data.grid_index[j]; float3 pos2 = b_cell_data.sorted_pos[j]; float3 v = pos - pos2; float dist = length(v); // correct if distance is close if (dist <= 2.f * params.particle_radius) { // Non-penetration correction const float w1 = b_invMass[original_index_j]; float w_sum = w0 + w1; C = dist - 2.f * params.particle_radius; // normalize v + 0.000001f for vanish problem float3 n = v / (dist);// +0.000001f); correction_j = -w0 * (1.f / w_sum) * C * n; } correction += correction_j; } //printf("Num neighbors: %u\n", end_index - start_index); } return correction; } __global__ void compute_distance_correction( float3* correction, // output: corrected pos float* invMass, // input: mass float* b_invMass, CellData cell_data, // input: cell data of dem particles CellData b_cell_data, uint numParticles // input: number of DEM particles ) { uint index = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (index >= numParticles) return; uint original_index = cell_data.grid_index[index]; // read particle data from sorted arrays float3 pos = cell_data.sorted_pos[index]; float w0 = invMass[original_index]; // get address in grid int3 gridPos = calcGridPos(pos); float3 corr = make_float3(0, 0, 0); for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbor_pos = gridPos + make_int3(x, y, z); corr += pbd_distance_correction( neighbor_pos, index, pos, w0, invMass, cell_data ); } } } for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbor_pos = gridPos + make_int3(x, y, z); corr += pbd_distance_correction_boundary( neighbor_pos, index, pos, w0, b_invMass, b_cell_data ); } } } correction[original_index] = corr; } __device__ float3 pbd_friction_correction( int3 grid_pos, uint index, float3 predict_pos0, float3 original_pos0, float w0, float3* predict_pos, float3* original_pos, float* invMass, CellData cell_data ) { uint grid_hash = calcGridHash(grid_pos); // get start of bucket for this cell uint start_index = cell_data.cellStart[grid_hash]; float3 result = make_float3(0,0,0);// correction_i; if (start_index != 0xffffffff) // cell is not empty { // iterate over particles in this cell uint end_index = cell_data.cellEnd[grid_hash]; for (uint j = start_index; j < end_index; j++) { float3 correction_j = make_float3(0, 0, 0); if (j != index) // check not colliding with self { uint original_index_j = cell_data.grid_index[j]; float3 original_pos1 = original_pos[original_index_j]; //float3 predict_pos1 = predict_pos[original_index_j]; float3 predict_pos1 = cell_data.sorted_pos[j]; float3 v = predict_pos0 - predict_pos1; float dist = length(v); // correct if distance is close if (dist <= 2.f * params.particle_radius) { // Non-penetration correction const float w1 = invMass[original_index_j]; float w_sum = w0 + w1; // normalize v + 0.000001f for vanish problem float3 n = v / (dist);// +0.000001f); float penetration = 2.f * params.particle_radius - dist; float3 dx = (predict_pos0 - original_pos0) + (predict_pos1 - original_pos1); float3 dx_t = dx - (dot(dx, n) * n); //printf("dx: %f, %f, %f\n", dx_t.x, dx_t.y, dx_t.z); //printf("penetration: %f\n", penetration); float threshold = params.static_friction * penetration; float len = length(dx_t); // use kinematic friction model if (length(dx_t) > threshold) { float coeff = min(params.kinematic_friction * penetration / len, 1.f); dx_t = coeff * dx_t; }/* else { printf("static\n"); } */ dx_t = -(w0 / w_sum) * dx_t; correction_j += dx_t; //printf("dx: %f, %f, %f\n", dx_t.x, dx_t.y, dx_t.z); } } result += correction_j; } //printf("Num neighbors: %u\n", end_index - start_index); } return result; } __device__ float3 pbd_friction_correction_boundary( int3 grid_pos, uint index, float3 predict_pos0, float3 original_pos0, float w0, float* b_invMass, CellData b_cell_data ) { uint grid_hash = calcGridHash(grid_pos); // get start of bucket for this cell uint start_index = b_cell_data.cellStart[grid_hash]; float3 result = make_float3(0, 0, 0);// correction_i; if (start_index != 0xffffffff) // cell is not empty { // iterate over particles in this cell uint end_index = b_cell_data.cellEnd[grid_hash]; for (uint j = start_index; j < end_index; j++) { float3 correction_j = make_float3(0, 0, 0); uint original_index_j = b_cell_data.grid_index[j]; float3 pos1 = b_cell_data.sorted_pos[j]; float3 v = predict_pos0 - pos1; float dist = length(v); // correct if distance is close if (dist <= 2.f * params.particle_radius) { // Non-penetration correction const float w1 = b_invMass[original_index_j]; float w_sum = w0 + w1; // normalize v + 0.000001f for vanish problem float3 n = v / (dist);// +0.000001f); float penetration = 2.f * params.particle_radius - dist; float3 dx = (predict_pos0 - original_pos0); float3 dx_t = dx - (dot(dx, n) * n); //printf("dx: %f, %f, %f\n", dx.x, dx.y, dx.z); float threshold = params.static_friction * penetration; float len = length(dx_t); // if exceed threshold use kinematic friction model if (length(dx_t) > threshold) { float coeff = min(params.kinematic_friction * penetration / len, 1.f); dx_t = coeff * dx_t; } dx_t = -(w0 / w_sum) * dx_t; correction_j += dx_t; } result += correction_j; } //printf("Num neighbors: %u\n", end_index - start_index); } return result; } __global__ void compute_friction_correction( float3* correction, float3* new_pos, // output: corrected pos float3* original_pos, // input: position at the start of this time step float* invMass, // input: mass float* b_invMass, CellData cell_data, // input: cell data of dem particles CellData b_cell_data, uint numParticles // input: number of DEM particles ) { uint index = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (index >= numParticles) return; uint original_index = cell_data.grid_index[index]; // read particle data from sorted arrays float3 pos = cell_data.sorted_pos[index]; float3 new_pos0 = new_pos[original_index]; float3 original_pos0 = original_pos[original_index]; float w0 = invMass[original_index]; //float3 correction_i = correction[original_index]; // get address in grid int3 gridPos = calcGridPos(pos); float3 corr = make_float3(0, 0, 0); for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbor_pos = gridPos + make_int3(x, y, z); corr += pbd_friction_correction( neighbor_pos, index, pos, original_pos0,w0, new_pos, original_pos, invMass, cell_data ); } } } for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbor_pos = gridPos + make_int3(x, y, z); corr += pbd_friction_correction_boundary( neighbor_pos, index, pos, original_pos0, w0, b_invMass, b_cell_data ); } } } //printf("corr: %f %f %f\n", corr.x, corr.y, corr.z); //corr = 0.5f * (correction_i + corr); correction[original_index] = corr; //new_pos[original_index] = pos + corr; } void solve_pbd_dem( ParticleSet* dem_particles, ParticleSet* boundary_particles, CellData cell_data, CellData b_cell_data, uint numParticles, uint b_numParticles, float dt, int iteration ) { uint numThreads, numBlocks; compute_grid_size(numParticles, MAX_THREAD_NUM, numBlocks, numThreads); for (int i = 0; i < iteration; ++i) { compute_distance_correction << <numBlocks, numThreads >> > ( dem_particles->m_d_correction, dem_particles->m_d_massInv, boundary_particles->m_d_massInv, cell_data, b_cell_data, numParticles ); getLastCudaError("Kernel execution failed: compute_dem_correction "); apply_correction << <numBlocks, numThreads >> > ( dem_particles->m_d_new_positions, dem_particles->m_d_predict_positions, dem_particles->m_d_correction, cell_data, numParticles ); getLastCudaError("Kernel execution failed: apply_correction "); } compute_friction_correction << <numBlocks, numThreads >> > ( dem_particles->m_d_correction, dem_particles->m_d_new_positions, dem_particles->m_d_positions, dem_particles->m_d_massInv, boundary_particles->m_d_massInv, cell_data, b_cell_data, numParticles ); getLastCudaError("Kernel execution failed: compute_friction_correction "); apply_correction << <numBlocks, numThreads >> > ( dem_particles->m_d_new_positions, dem_particles->m_d_predict_positions, dem_particles->m_d_correction, cell_data, numParticles ); getLastCudaError("Kernel execution failed: apply_correction "); // finalize correction finalize_correction << <numBlocks, numThreads >> > ( dem_particles->m_d_positions, dem_particles->m_d_new_positions, dem_particles->m_d_predict_positions, dem_particles->m_d_velocity, numParticles, dt ); getLastCudaError("Kernel execution failed: finalize_correction "); }
ee4287ba89e6b74b1204937b8b137c2c112c2ac4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // #include <Environment.h> #include <loops/transform_bool.h> #include <types/types.h> #include <op_boilerplate.h> #include <loops/legacy_ops.h> #include <helpers/DebugHelper.h> using namespace simdOps; template <typename X, typename Z, typename OpType> __global__ void transformBoolSimple(void *x, Nd4jLong *xShapeInfo, int xRank, void *params, void *z, Nd4jLong *zShapeInfo, int zRank, int *allocationPointer, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { functions::transform::TransformBool<X,Z>::template transformCuda<OpType>(x,xShapeInfo,params,z,zShapeInfo,allocationPointer,reductionPointer,tadShapeInfo, tadOffsets); } namespace functions { namespace transform { template<typename X, typename Y> _CUDA_H void TransformBool<X,Y>::executeTransformShaped(dim3 launchDims, hipStream_t *stream, int opNum, void *x, Nd4jLong *xShape, int xRank, void *extraParams, void *z, Nd4jLong *zShape, int zRank, int *allocationPointer, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { DISPATCH_BY_OPNUM_TT(intermediateShaped, PARAMS(launchDims, stream, x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets), TRANSFORM_BOOL_OPS); DEBUG_KERNEL(stream, opNum); } template<typename X, typename Z> template <typename OpType> __device__ void TransformBool<X,Z>::transformCuda(void *vx, Nd4jLong *xShapeInfo, void *vparams, void *vz, Nd4jLong *zShapeInfo, int *allocationPointer, void *vreductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { auto x = static_cast<X*>(vx); auto z = static_cast<Z*>(vz); auto params = static_cast<X*>(vparams); auto reductionPointer = static_cast<Z*>(vreductionPointer); if(OpType::requiresSpecial) { OpType::execSpecialCuda(x,xShapeInfo,z,zShapeInfo,params, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets); return; } else { __shared__ Nd4jLong xEws; __shared__ Nd4jLong zEws; __shared__ char xOrder; __shared__ char zOrder; __shared__ Nd4jLong length; if (threadIdx.x == 0) { xEws = shape::elementWiseStride(xShapeInfo); zEws = shape::elementWiseStride(zShapeInfo); xOrder = shape::order(xShapeInfo); zOrder = shape::order(zShapeInfo); length = shape::length(xShapeInfo); } __syncthreads(); auto tid = blockIdx.x * blockDim.x + threadIdx.x; int totalThreads = gridDim.x * blockDim.x; if(xEws > 0 && zEws > 0 && xOrder == zOrder) { for (int i = tid; i < length; i += totalThreads) z[i * zEws] = OpType::op(x[i * xEws], params); } else { if(vx == vz) { for (Nd4jLong i = tid; i < length; i+= gridDim.x * blockDim.x) { auto xOffset = shape::getIndexOffset(i, xShapeInfo, length); z[xOffset] = OpType::op(x[xOffset], params); } } else { for (Nd4jLong i = tid; i < length; i+= gridDim.x * blockDim.x) { auto xOffset = shape::getIndexOffset(i, xShapeInfo, length); auto zOffset = shape::getIndexOffset(i, zShapeInfo, length); z[zOffset] = OpType::op(x[xOffset], params); } } } } }; template<typename X, typename Z> template <typename OpType> _CUDA_H void TransformBool<X,Z>::intermediateShaped(dim3 launchDims, hipStream_t *stream, void *x, Nd4jLong *xShape, int xRank, void *extraParams, void *z, Nd4jLong *zShape, int zRank, int *allocationPointer, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { hipLaunchKernelGGL(( transformBoolSimple<X, Z, OpType>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets); nd4j::DebugHelper::checkErrorCode(stream, "transformBool(...) failed"); } template<typename X, typename Z> void TransformBool<X,Z>::exec(int opNum, void *dx, Nd4jLong *xShapeInfo, void *result, Nd4jLong *resultShapeInfo, void *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { } template<typename X, typename Z> template <typename OpType> void TransformBool<X,Z>::exec(void *dx, Nd4jLong *xShapeInfo, void *result, Nd4jLong *resultShapeInfo, void *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { } BUILD_DOUBLE_TEMPLATE(template class ND4J_EXPORT TransformBool, , LIBND4J_TYPES, BOOL_TYPES); } }
ee4287ba89e6b74b1204937b8b137c2c112c2ac4.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // #include <Environment.h> #include <loops/transform_bool.h> #include <types/types.h> #include <op_boilerplate.h> #include <loops/legacy_ops.h> #include <helpers/DebugHelper.h> using namespace simdOps; template <typename X, typename Z, typename OpType> __global__ void transformBoolSimple(void *x, Nd4jLong *xShapeInfo, int xRank, void *params, void *z, Nd4jLong *zShapeInfo, int zRank, int *allocationPointer, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { functions::transform::TransformBool<X,Z>::template transformCuda<OpType>(x,xShapeInfo,params,z,zShapeInfo,allocationPointer,reductionPointer,tadShapeInfo, tadOffsets); } namespace functions { namespace transform { template<typename X, typename Y> _CUDA_H void TransformBool<X,Y>::executeTransformShaped(dim3 launchDims, cudaStream_t *stream, int opNum, void *x, Nd4jLong *xShape, int xRank, void *extraParams, void *z, Nd4jLong *zShape, int zRank, int *allocationPointer, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { DISPATCH_BY_OPNUM_TT(intermediateShaped, PARAMS(launchDims, stream, x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets), TRANSFORM_BOOL_OPS); DEBUG_KERNEL(stream, opNum); } template<typename X, typename Z> template <typename OpType> __device__ void TransformBool<X,Z>::transformCuda(void *vx, Nd4jLong *xShapeInfo, void *vparams, void *vz, Nd4jLong *zShapeInfo, int *allocationPointer, void *vreductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { auto x = static_cast<X*>(vx); auto z = static_cast<Z*>(vz); auto params = static_cast<X*>(vparams); auto reductionPointer = static_cast<Z*>(vreductionPointer); if(OpType::requiresSpecial) { OpType::execSpecialCuda(x,xShapeInfo,z,zShapeInfo,params, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets); return; } else { __shared__ Nd4jLong xEws; __shared__ Nd4jLong zEws; __shared__ char xOrder; __shared__ char zOrder; __shared__ Nd4jLong length; if (threadIdx.x == 0) { xEws = shape::elementWiseStride(xShapeInfo); zEws = shape::elementWiseStride(zShapeInfo); xOrder = shape::order(xShapeInfo); zOrder = shape::order(zShapeInfo); length = shape::length(xShapeInfo); } __syncthreads(); auto tid = blockIdx.x * blockDim.x + threadIdx.x; int totalThreads = gridDim.x * blockDim.x; if(xEws > 0 && zEws > 0 && xOrder == zOrder) { for (int i = tid; i < length; i += totalThreads) z[i * zEws] = OpType::op(x[i * xEws], params); } else { if(vx == vz) { for (Nd4jLong i = tid; i < length; i+= gridDim.x * blockDim.x) { auto xOffset = shape::getIndexOffset(i, xShapeInfo, length); z[xOffset] = OpType::op(x[xOffset], params); } } else { for (Nd4jLong i = tid; i < length; i+= gridDim.x * blockDim.x) { auto xOffset = shape::getIndexOffset(i, xShapeInfo, length); auto zOffset = shape::getIndexOffset(i, zShapeInfo, length); z[zOffset] = OpType::op(x[xOffset], params); } } } } }; template<typename X, typename Z> template <typename OpType> _CUDA_H void TransformBool<X,Z>::intermediateShaped(dim3 launchDims, cudaStream_t *stream, void *x, Nd4jLong *xShape, int xRank, void *extraParams, void *z, Nd4jLong *zShape, int zRank, int *allocationPointer, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { transformBoolSimple<X, Z, OpType><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets); nd4j::DebugHelper::checkErrorCode(stream, "transformBool(...) failed"); } template<typename X, typename Z> void TransformBool<X,Z>::exec(int opNum, void *dx, Nd4jLong *xShapeInfo, void *result, Nd4jLong *resultShapeInfo, void *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { } template<typename X, typename Z> template <typename OpType> void TransformBool<X,Z>::exec(void *dx, Nd4jLong *xShapeInfo, void *result, Nd4jLong *resultShapeInfo, void *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { } BUILD_DOUBLE_TEMPLATE(template class ND4J_EXPORT TransformBool, , LIBND4J_TYPES, BOOL_TYPES); } }
15f4de825ce792f13de01a9aa5f9a79c06954840.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<stdlib.h> __global__ void srt(int* a, int n){ int idx=threadIdx.x; for(int i=0; i<n-1; i++){ if(i%2==0 and idx%2==0 and idx+1<n){ if(a[idx]>a[idx+1]){ int t=a[idx]; a[idx]=a[idx+1]; a[idx+1]=t; } } else if(i%2==1 and idx%2==1 and idx+1<n){ if(a[idx]>a[idx+1]){ int t=a[idx]; a[idx]=a[idx+1]; a[idx+1]=t; } } } } int main() { int n; scanf("%d",&n); int *a_h; a_h=(int*)malloc(n*sizeof(int)); for(int i=0; i<n; i++) a_h[i]=rand()%1000; printf("Unsorted:\n"); for(int i=0; i<n; i++) printf("%d ",a_h[i]); printf("\n"); int *a_d; hipMalloc((void**)&a_d,n*sizeof(int)); hipMemcpy(a_d,a_h,n*sizeof(int),hipMemcpyHostToDevice); dim3 blockdim=n; dim3 griddim=1; hipLaunchKernelGGL(( srt), dim3(griddim),dim3(blockdim), 0, 0, a_d,n); hipMemcpy(a_h,a_d,n*sizeof(int),hipMemcpyDeviceToHost); printf("sorted\n"); for(int i=0; i<n; i++) printf("%d ",a_h[i]); }
15f4de825ce792f13de01a9aa5f9a79c06954840.cu
#include<stdio.h> #include<stdlib.h> __global__ void srt(int* a, int n){ int idx=threadIdx.x; for(int i=0; i<n-1; i++){ if(i%2==0 and idx%2==0 and idx+1<n){ if(a[idx]>a[idx+1]){ int t=a[idx]; a[idx]=a[idx+1]; a[idx+1]=t; } } else if(i%2==1 and idx%2==1 and idx+1<n){ if(a[idx]>a[idx+1]){ int t=a[idx]; a[idx]=a[idx+1]; a[idx+1]=t; } } } } int main() { int n; scanf("%d",&n); int *a_h; a_h=(int*)malloc(n*sizeof(int)); for(int i=0; i<n; i++) a_h[i]=rand()%1000; printf("Unsorted:\n"); for(int i=0; i<n; i++) printf("%d ",a_h[i]); printf("\n"); int *a_d; cudaMalloc((void**)&a_d,n*sizeof(int)); cudaMemcpy(a_d,a_h,n*sizeof(int),cudaMemcpyHostToDevice); dim3 blockdim=n; dim3 griddim=1; srt<<<griddim,blockdim>>>(a_d,n); cudaMemcpy(a_h,a_d,n*sizeof(int),cudaMemcpyDeviceToHost); printf("sorted\n"); for(int i=0; i<n; i++) printf("%d ",a_h[i]); }
0883938d897a74e6f50cf0b2eef3b3bb2dd83e39.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 2014 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #ifdef DEBUG #define CUDA_CALL(F) if( (F) != hipSuccess ) \ {printf("Error %s at %s:%d\n", hipGetErrorString(hipGetLastError()), \ __FILE__,__LINE__); exit(-1);} #define CUDA_CHECK() if( (hipPeekAtLastError()) != hipSuccess ) \ {printf("Error %s at %s:%d\n", hipGetErrorString(hipGetLastError()), \ __FILE__,__LINE__-1); exit(-1);} #else #define CUDA_CALL(F) (F) #define CUDA_CHECK() #endif /* definitions of threadblock size in X and Y directions */ #define THREADS_PER_BLOCK_X 16 #define THREADS_PER_BLOCK_Y 16 /* definition of matrix linear dimension */ #define SIZE 1024 /* macro to index a 1D memory array with 2D indices in column-major order */ #define INDX( row, col, ld ) ( ( (col) * (ld) ) + (row) ) /* CUDA kernel for naive matrix transpose */ __global__ void naive_cuda_transpose( const int m, double const * const a, double *c ) { const int myRow = blockDim.x * blockIdx.x + threadIdx.x; const int myCol = blockDim.y * blockIdx.y + threadIdx.y; if( myRow < m && myCol < m ) { c[INDX( myRow, myCol, m )] = a[INDX( myCol, myRow, m )]; } /* end if */ return; } /* end naive_cuda_transpose */ void host_transpose( const int m, double const * const a, double *c ) { /* * naive matrix transpose goes here. */ for( int j = 0; j < m; j++ ) { for( int i = 0; i < m; i++ ) { c[INDX(i,j,m)] = a[INDX(j,i,m)]; } /* end for i */ } /* end for j */ } /* end host_dgemm */ int main( int argc, char *argv[] ) { int size = SIZE; fprintf(stdout, "Matrix size is %d\n",size); /* declaring pointers for array */ double *h_a, *h_c; double *d_a, *d_c; size_t numbytes = (size_t) size * (size_t) size * sizeof( double ); /* allocating host memory */ h_a = (double *) malloc( numbytes ); if( h_a == NULL ) { fprintf(stderr,"Error in host malloc h_a\n"); return 911; } h_c = (double *) malloc( numbytes ); if( h_c == NULL ) { fprintf(stderr,"Error in host malloc h_c\n"); return 911; } /* allocating device memory */ CUDA_CALL( hipMalloc( (void**) &d_a, numbytes ) ); CUDA_CALL( hipMalloc( (void**) &d_c, numbytes ) ); /* set result matrices to zero */ memset( h_c, 0, numbytes ); CUDA_CALL( hipMemset( d_c, 0, numbytes ) ); fprintf( stdout, "Total memory required per matrix is %lf MB\n", (double) numbytes / 1000000.0 ); /* initialize input matrix with random value */ for( int i = 0; i < size * size; i++ ) { h_a[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 ); } /* copy input matrix from host to device */ CUDA_CALL( hipMemcpy( d_a, h_a, numbytes, hipMemcpyHostToDevice ) ); /* create and start timer */ hipEvent_t start, stop; CUDA_CALL( hipEventCreate( &start ) ); CUDA_CALL( hipEventCreate( &stop ) ); CUDA_CALL( hipEventRecord( start, 0 ) ); /* call naive cpu transpose function */ host_transpose( size, h_a, h_c ); /* stop CPU timer */ CUDA_CALL( hipEventRecord( stop, 0 ) ); CUDA_CALL( hipEventSynchronize( stop ) ); float elapsedTime; CUDA_CALL( hipEventElapsedTime( &elapsedTime, start, stop ) ); /* print CPU timing information */ fprintf(stdout, "Total time CPU is %f sec\n", elapsedTime / 1000.0f ); fprintf(stdout, "Performance is %f GB/s\n", 8.0 * 2.0 * (double) size * (double) size / ( (double) elapsedTime / 1000.0 ) * 1.e-9 ); /* setup threadblock size and grid sizes */ dim3 threads( THREADS_PER_BLOCK_X, THREADS_PER_BLOCK_Y, 1 ); dim3 blocks( ( size / THREADS_PER_BLOCK_X ) + 1, ( size / THREADS_PER_BLOCK_Y ) + 1, 1 ); /* start timers */ CUDA_CALL( hipEventRecord( start, 0 ) ); /* call naive GPU transpose kernel */ hipLaunchKernelGGL(( naive_cuda_transpose), dim3(blocks), dim3(threads) , 0, 0, size, d_a, d_c ); CUDA_CHECK() CUDA_CALL( hipDeviceSynchronize() ); /* stop the timers */ CUDA_CALL( hipEventRecord( stop, 0 ) ); CUDA_CALL( hipEventSynchronize( stop ) ); CUDA_CALL( hipEventElapsedTime( &elapsedTime, start, stop ) ); /* print GPU timing information */ fprintf(stdout, "Total time GPU is %f sec\n", elapsedTime / 1000.0f ); fprintf(stdout, "Performance is %f GB/s\n", 8.0 * 2.0 * (double) size * (double) size / ( (double) elapsedTime / 1000.0 ) * 1.e-9 ); /* copy data from device to host */ CUDA_CALL( hipMemset( d_a, 0, numbytes ) ); CUDA_CALL( hipMemcpy( h_a, d_c, numbytes, hipMemcpyDeviceToHost ) ); /* compare GPU to CPU for correctness */ for( int j = 0; j < size; j++ ) { for( int i = 0; i < size; i++ ) { if( h_c[INDX(i,j,size)] != h_a[INDX(i,j,size)] ) { printf("Error in element %d,%d\n", i,j ); printf("Host %f, device %f\n",h_c[INDX(i,j,size)], h_a[INDX(i,j,size)]); } /* end fi */ } /* end for i */ } /* end for j */ /* free the memory */ free( h_a ); free( h_c ); CUDA_CALL( hipFree( d_a ) ); CUDA_CALL( hipFree( d_c ) ); CUDA_CALL( hipDeviceReset() ); return 0; } /* end main */
0883938d897a74e6f50cf0b2eef3b3bb2dd83e39.cu
/* * Copyright 2014 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #ifdef DEBUG #define CUDA_CALL(F) if( (F) != cudaSuccess ) \ {printf("Error %s at %s:%d\n", cudaGetErrorString(cudaGetLastError()), \ __FILE__,__LINE__); exit(-1);} #define CUDA_CHECK() if( (cudaPeekAtLastError()) != cudaSuccess ) \ {printf("Error %s at %s:%d\n", cudaGetErrorString(cudaGetLastError()), \ __FILE__,__LINE__-1); exit(-1);} #else #define CUDA_CALL(F) (F) #define CUDA_CHECK() #endif /* definitions of threadblock size in X and Y directions */ #define THREADS_PER_BLOCK_X 16 #define THREADS_PER_BLOCK_Y 16 /* definition of matrix linear dimension */ #define SIZE 1024 /* macro to index a 1D memory array with 2D indices in column-major order */ #define INDX( row, col, ld ) ( ( (col) * (ld) ) + (row) ) /* CUDA kernel for naive matrix transpose */ __global__ void naive_cuda_transpose( const int m, double const * const a, double *c ) { const int myRow = blockDim.x * blockIdx.x + threadIdx.x; const int myCol = blockDim.y * blockIdx.y + threadIdx.y; if( myRow < m && myCol < m ) { c[INDX( myRow, myCol, m )] = a[INDX( myCol, myRow, m )]; } /* end if */ return; } /* end naive_cuda_transpose */ void host_transpose( const int m, double const * const a, double *c ) { /* * naive matrix transpose goes here. */ for( int j = 0; j < m; j++ ) { for( int i = 0; i < m; i++ ) { c[INDX(i,j,m)] = a[INDX(j,i,m)]; } /* end for i */ } /* end for j */ } /* end host_dgemm */ int main( int argc, char *argv[] ) { int size = SIZE; fprintf(stdout, "Matrix size is %d\n",size); /* declaring pointers for array */ double *h_a, *h_c; double *d_a, *d_c; size_t numbytes = (size_t) size * (size_t) size * sizeof( double ); /* allocating host memory */ h_a = (double *) malloc( numbytes ); if( h_a == NULL ) { fprintf(stderr,"Error in host malloc h_a\n"); return 911; } h_c = (double *) malloc( numbytes ); if( h_c == NULL ) { fprintf(stderr,"Error in host malloc h_c\n"); return 911; } /* allocating device memory */ CUDA_CALL( cudaMalloc( (void**) &d_a, numbytes ) ); CUDA_CALL( cudaMalloc( (void**) &d_c, numbytes ) ); /* set result matrices to zero */ memset( h_c, 0, numbytes ); CUDA_CALL( cudaMemset( d_c, 0, numbytes ) ); fprintf( stdout, "Total memory required per matrix is %lf MB\n", (double) numbytes / 1000000.0 ); /* initialize input matrix with random value */ for( int i = 0; i < size * size; i++ ) { h_a[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 ); } /* copy input matrix from host to device */ CUDA_CALL( cudaMemcpy( d_a, h_a, numbytes, cudaMemcpyHostToDevice ) ); /* create and start timer */ cudaEvent_t start, stop; CUDA_CALL( cudaEventCreate( &start ) ); CUDA_CALL( cudaEventCreate( &stop ) ); CUDA_CALL( cudaEventRecord( start, 0 ) ); /* call naive cpu transpose function */ host_transpose( size, h_a, h_c ); /* stop CPU timer */ CUDA_CALL( cudaEventRecord( stop, 0 ) ); CUDA_CALL( cudaEventSynchronize( stop ) ); float elapsedTime; CUDA_CALL( cudaEventElapsedTime( &elapsedTime, start, stop ) ); /* print CPU timing information */ fprintf(stdout, "Total time CPU is %f sec\n", elapsedTime / 1000.0f ); fprintf(stdout, "Performance is %f GB/s\n", 8.0 * 2.0 * (double) size * (double) size / ( (double) elapsedTime / 1000.0 ) * 1.e-9 ); /* setup threadblock size and grid sizes */ dim3 threads( THREADS_PER_BLOCK_X, THREADS_PER_BLOCK_Y, 1 ); dim3 blocks( ( size / THREADS_PER_BLOCK_X ) + 1, ( size / THREADS_PER_BLOCK_Y ) + 1, 1 ); /* start timers */ CUDA_CALL( cudaEventRecord( start, 0 ) ); /* call naive GPU transpose kernel */ naive_cuda_transpose<<< blocks, threads >>>( size, d_a, d_c ); CUDA_CHECK() CUDA_CALL( cudaDeviceSynchronize() ); /* stop the timers */ CUDA_CALL( cudaEventRecord( stop, 0 ) ); CUDA_CALL( cudaEventSynchronize( stop ) ); CUDA_CALL( cudaEventElapsedTime( &elapsedTime, start, stop ) ); /* print GPU timing information */ fprintf(stdout, "Total time GPU is %f sec\n", elapsedTime / 1000.0f ); fprintf(stdout, "Performance is %f GB/s\n", 8.0 * 2.0 * (double) size * (double) size / ( (double) elapsedTime / 1000.0 ) * 1.e-9 ); /* copy data from device to host */ CUDA_CALL( cudaMemset( d_a, 0, numbytes ) ); CUDA_CALL( cudaMemcpy( h_a, d_c, numbytes, cudaMemcpyDeviceToHost ) ); /* compare GPU to CPU for correctness */ for( int j = 0; j < size; j++ ) { for( int i = 0; i < size; i++ ) { if( h_c[INDX(i,j,size)] != h_a[INDX(i,j,size)] ) { printf("Error in element %d,%d\n", i,j ); printf("Host %f, device %f\n",h_c[INDX(i,j,size)], h_a[INDX(i,j,size)]); } /* end fi */ } /* end for i */ } /* end for j */ /* free the memory */ free( h_a ); free( h_c ); CUDA_CALL( cudaFree( d_a ) ); CUDA_CALL( cudaFree( d_c ) ); CUDA_CALL( cudaDeviceReset() ); return 0; } /* end main */
0557564bae8af7e5403ee5bd6ac265dabd38e9cf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <omp.h> #include <stdio.h> #include <math.h> #include <cv.h> #include <highgui.h> #define BLUE 0 #define GREEN 1 #define RED 2 #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } __device__ void loadShared(uchar shared[5][260][3], uchar * data, int i, int j, int x, int height, int step, int channels) { int k; for(k = 0; k < 5; k++) { int pos = i - 2 + k; if ( pos > 0 && pos < height) { shared[k][x][0] = data[pos * step + j * channels]; shared[k][x][1] = data[pos * step + j * channels + 1]; shared[k][x][2] = data[pos * step + j * channels + 2]; } } } __global__ void gpuSmooth(uchar * target, uchar * data, int width, int height, int step, int channels) { int i = blockIdx.x; int j = threadIdx.x + blockIdx.y * blockDim.x; int x = threadIdx.x + 2; int value[3]; __shared__ uchar mem[5][260][3]; int total; int k, l, m; // Spill treatment if((i > 0) && (i < height) && (j > 0) && (j < width)) { // Load values to shared memory loadShared(mem, data, i, j, x, height, step, channels); if (x == 2) { if (j > 1) { loadShared(mem, data, i, j-1, x-1, height, step, channels); loadShared(mem, data, i, j-2, x-2, height, step, channels); } } else if (x == 257) { if (j + 1 < width) { loadShared(mem, data, i, j+1, x+1, height, step, channels); } if (j + 2 < width) { loadShared(mem, data, i, j+2, x+2, height, step, channels); } } } __syncthreads(); if((i > 0) && (i < height) && (j > 0) && (j < width)) { total = value[0] = value[1] = value[2] = 0; for(k = 0; k < 5; k++) if ((i - 2 + k > 0) && (i - 2 + k < height)) for(l = x - 2, m = j - 2; l < x+3; l++, m++) if((m > 0) && (m < width)) { value[0] += mem[k][l][0]; value[1] += mem[k][l][1]; value[2] += mem[k][l][2]; ++total; } target[i * step + j * channels] = value[0] / total; target[i * step + j * channels + 1] = value[1] / total; target[i * step + j * channels + 2] = value[2] / total; } } int main(int argc, char *argv[]) { // original image IplImage* img = 0; if(argc<2){ printf("Usage: main <image-file-name>\n\7"); exit(0); } // load an image img=cvLoadImage(argv[1], CV_LOAD_IMAGE_COLOR); if(!img){ printf("Could not load image file: %s\n",argv[1]); exit(0); } int image_size = img->height*img->widthStep; uchar * gpu_data, *gpu_target; gpuErrchk(hipMalloc(&gpu_data, image_size)); gpuErrchk(hipMalloc(&gpu_target, image_size)); gpuErrchk(hipMemcpy(gpu_data, img->imageData, image_size, hipMemcpyHostToDevice)); dim3 grid(img->height, (img->width / 256) + (img->width % 256 != 0), 1); hipLaunchKernelGGL(( gpuSmooth), dim3(grid), dim3(256), 0, 0, gpu_target, gpu_data, img->width, img->height, img->widthStep, img->nChannels); gpuErrchk(hipMemcpy(img->imageData, gpu_target, image_size, hipMemcpyDeviceToHost)); cvSaveImage("result/result.jpg", img, 0); // release the image cvReleaseImage(&img); hipFree(gpu_data); hipFree(gpu_target); return 0; }
0557564bae8af7e5403ee5bd6ac265dabd38e9cf.cu
#include <stdlib.h> #include <omp.h> #include <stdio.h> #include <math.h> #include <cv.h> #include <highgui.h> #define BLUE 0 #define GREEN 1 #define RED 2 #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } __device__ void loadShared(uchar shared[5][260][3], uchar * data, int i, int j, int x, int height, int step, int channels) { int k; for(k = 0; k < 5; k++) { int pos = i - 2 + k; if ( pos > 0 && pos < height) { shared[k][x][0] = data[pos * step + j * channels]; shared[k][x][1] = data[pos * step + j * channels + 1]; shared[k][x][2] = data[pos * step + j * channels + 2]; } } } __global__ void gpuSmooth(uchar * target, uchar * data, int width, int height, int step, int channels) { int i = blockIdx.x; int j = threadIdx.x + blockIdx.y * blockDim.x; int x = threadIdx.x + 2; int value[3]; __shared__ uchar mem[5][260][3]; int total; int k, l, m; // Spill treatment if((i > 0) && (i < height) && (j > 0) && (j < width)) { // Load values to shared memory loadShared(mem, data, i, j, x, height, step, channels); if (x == 2) { if (j > 1) { loadShared(mem, data, i, j-1, x-1, height, step, channels); loadShared(mem, data, i, j-2, x-2, height, step, channels); } } else if (x == 257) { if (j + 1 < width) { loadShared(mem, data, i, j+1, x+1, height, step, channels); } if (j + 2 < width) { loadShared(mem, data, i, j+2, x+2, height, step, channels); } } } __syncthreads(); if((i > 0) && (i < height) && (j > 0) && (j < width)) { total = value[0] = value[1] = value[2] = 0; for(k = 0; k < 5; k++) if ((i - 2 + k > 0) && (i - 2 + k < height)) for(l = x - 2, m = j - 2; l < x+3; l++, m++) if((m > 0) && (m < width)) { value[0] += mem[k][l][0]; value[1] += mem[k][l][1]; value[2] += mem[k][l][2]; ++total; } target[i * step + j * channels] = value[0] / total; target[i * step + j * channels + 1] = value[1] / total; target[i * step + j * channels + 2] = value[2] / total; } } int main(int argc, char *argv[]) { // original image IplImage* img = 0; if(argc<2){ printf("Usage: main <image-file-name>\n\7"); exit(0); } // load an image img=cvLoadImage(argv[1], CV_LOAD_IMAGE_COLOR); if(!img){ printf("Could not load image file: %s\n",argv[1]); exit(0); } int image_size = img->height*img->widthStep; uchar * gpu_data, *gpu_target; gpuErrchk(cudaMalloc(&gpu_data, image_size)); gpuErrchk(cudaMalloc(&gpu_target, image_size)); gpuErrchk(cudaMemcpy(gpu_data, img->imageData, image_size, cudaMemcpyHostToDevice)); dim3 grid(img->height, (img->width / 256) + (img->width % 256 != 0), 1); gpuSmooth<<<grid, 256>>>(gpu_target, gpu_data, img->width, img->height, img->widthStep, img->nChannels); gpuErrchk(cudaMemcpy(img->imageData, gpu_target, image_size, cudaMemcpyDeviceToHost)); cvSaveImage("result/result.jpg", img, 0); // release the image cvReleaseImage(&img); cudaFree(gpu_data); cudaFree(gpu_target); return 0; }
457654d4de7bac8a96ebabac11c5a3819fbdbee3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // -------------------------------------------------------- // Fast R-CNN // Copyright (c) Microsoft. All rights reserved. // Written by Ross Girshick, 2015. // Licensed under the BSD 2-clause "Simplified" license. // See LICENSE in the Fast R-CNN project root for license // information. // -------------------------------------------------------- #include <algorithm> #include <cfloat> #include <vector> #include "thrust/device_vector.h" #include "caffe/fast_rcnn_layers.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void SmoothL1ForwardGPU(const int n, const Dtype* in, Dtype* out) { // f(x) = 0.5 * x^2 if |x| < 1 // |x| - 0.5 otherwise CUDA_KERNEL_LOOP(index, n) { Dtype val = in[index]; Dtype abs_val = abs(val); if (abs_val < 1) { out[index] = 0.5 * val * val; } else { out[index] = abs_val - 0.5; } } } template <typename Dtype> __global__ void kernel_channel_sum(const int num, const int channels, const int spatial_dim, const Dtype* data, Dtype* channel_sum) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; Dtype sum = 0; for (int c = 0; c < channels; ++c) { sum += data[(n * channels + c) * spatial_dim + s]; } channel_sum[index] = sum; } } template <typename Dtype> void SmoothL1LossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { int count = bottom[0]->count(); if ( count < 1 ){ top[ 0 ]->mutable_cpu_data()[ 0 ] = Dtype(0); return; } caffe_gpu_sub( count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), diff_.mutable_gpu_data()); // d := b0 - b1 if (has_weights_) { caffe_gpu_mul( count, bottom[2]->gpu_data(), diff_.gpu_data(), diff_.mutable_gpu_data()); // d := w * (b0 - b1) } SmoothL1ForwardGPU<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >( count, diff_.gpu_data(), errors_.mutable_gpu_data()); CUDA_POST_KERNEL_CHECK; Dtype loss; caffe_gpu_asum(count, errors_.gpu_data(), &loss); int spatial_dim = diff_.height() * diff_.width(); Dtype pre_fixed_normalizer = this->layer_param_.loss_param().pre_fixed_normalizer(); if ( has_weights_ ) top[ 0 ]->mutable_cpu_data()[ 0 ] = loss / ( valid_num_ + 1e-6 ); else top[0]->mutable_cpu_data()[0] = loss / (get_normalizer(normalization_, pre_fixed_normalizer)+1e-6); // Output per-instance loss if (top.size() >= 2) { kernel_channel_sum<Dtype> << <CAFFE_GET_BLOCKS(top[0]->count()), CAFFE_CUDA_NUM_THREADS >> > (outer_num_, bottom[0]->channels(), inner_num_, errors_.gpu_data(), top[1]->mutable_gpu_data()); } } template <typename Dtype> __global__ void SmoothL1BackwardGPU(const int n, const Dtype* in, Dtype* out) { // f'(x) = x if |x| < 1 // = sign(x) otherwise CUDA_KERNEL_LOOP(index, n) { Dtype val = in[index]; Dtype abs_val = abs(val); if (abs_val < 1) { out[index] = val; } else { out[index] = (Dtype(0) < val) - (val < Dtype(0)); } } } template <typename Dtype> void SmoothL1LossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { int count = diff_.count(); if ( has_weights_ ){ Dtype asum; caffe_gpu_asum<Dtype>(count, bottom[ 2 ]->gpu_data(), &asum); if ( asum < 1 ) return; } if ( count < 1 ){ return; } SmoothL1BackwardGPU<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >( count, diff_.gpu_data(), diff_.mutable_gpu_data()); CUDA_POST_KERNEL_CHECK; for (int i = 0; i < 2; ++i) { if (propagate_down[i]) { const Dtype sign = (i == 0) ? 1 : -1; int spatial_dim = diff_.height() * diff_.width(); Dtype pre_fixed_normalizer = this->layer_param_.loss_param().pre_fixed_normalizer(); Dtype normalizer = get_normalizer(normalization_, pre_fixed_normalizer); Dtype alpha = sign * top[0]->cpu_diff()[0] / normalizer; caffe_gpu_axpby( bottom[i]->count(), // count alpha, // alpha diff_.gpu_data(), // x Dtype(0), // beta bottom[i]->mutable_gpu_diff()); // y } } } INSTANTIATE_LAYER_GPU_FUNCS(SmoothL1LossLayer); } // namespace caffe
457654d4de7bac8a96ebabac11c5a3819fbdbee3.cu
// -------------------------------------------------------- // Fast R-CNN // Copyright (c) Microsoft. All rights reserved. // Written by Ross Girshick, 2015. // Licensed under the BSD 2-clause "Simplified" license. // See LICENSE in the Fast R-CNN project root for license // information. // -------------------------------------------------------- #include <algorithm> #include <cfloat> #include <vector> #include "thrust/device_vector.h" #include "caffe/fast_rcnn_layers.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void SmoothL1ForwardGPU(const int n, const Dtype* in, Dtype* out) { // f(x) = 0.5 * x^2 if |x| < 1 // |x| - 0.5 otherwise CUDA_KERNEL_LOOP(index, n) { Dtype val = in[index]; Dtype abs_val = abs(val); if (abs_val < 1) { out[index] = 0.5 * val * val; } else { out[index] = abs_val - 0.5; } } } template <typename Dtype> __global__ void kernel_channel_sum(const int num, const int channels, const int spatial_dim, const Dtype* data, Dtype* channel_sum) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; Dtype sum = 0; for (int c = 0; c < channels; ++c) { sum += data[(n * channels + c) * spatial_dim + s]; } channel_sum[index] = sum; } } template <typename Dtype> void SmoothL1LossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { int count = bottom[0]->count(); if ( count < 1 ){ top[ 0 ]->mutable_cpu_data()[ 0 ] = Dtype(0); return; } caffe_gpu_sub( count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), diff_.mutable_gpu_data()); // d := b0 - b1 if (has_weights_) { caffe_gpu_mul( count, bottom[2]->gpu_data(), diff_.gpu_data(), diff_.mutable_gpu_data()); // d := w * (b0 - b1) } SmoothL1ForwardGPU<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >( count, diff_.gpu_data(), errors_.mutable_gpu_data()); CUDA_POST_KERNEL_CHECK; Dtype loss; caffe_gpu_asum(count, errors_.gpu_data(), &loss); int spatial_dim = diff_.height() * diff_.width(); Dtype pre_fixed_normalizer = this->layer_param_.loss_param().pre_fixed_normalizer(); if ( has_weights_ ) top[ 0 ]->mutable_cpu_data()[ 0 ] = loss / ( valid_num_ + 1e-6 ); else top[0]->mutable_cpu_data()[0] = loss / (get_normalizer(normalization_, pre_fixed_normalizer)+1e-6); // Output per-instance loss if (top.size() >= 2) { kernel_channel_sum<Dtype> << <CAFFE_GET_BLOCKS(top[0]->count()), CAFFE_CUDA_NUM_THREADS >> > (outer_num_, bottom[0]->channels(), inner_num_, errors_.gpu_data(), top[1]->mutable_gpu_data()); } } template <typename Dtype> __global__ void SmoothL1BackwardGPU(const int n, const Dtype* in, Dtype* out) { // f'(x) = x if |x| < 1 // = sign(x) otherwise CUDA_KERNEL_LOOP(index, n) { Dtype val = in[index]; Dtype abs_val = abs(val); if (abs_val < 1) { out[index] = val; } else { out[index] = (Dtype(0) < val) - (val < Dtype(0)); } } } template <typename Dtype> void SmoothL1LossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { int count = diff_.count(); if ( has_weights_ ){ Dtype asum; caffe_gpu_asum<Dtype>(count, bottom[ 2 ]->gpu_data(), &asum); if ( asum < 1 ) return; } if ( count < 1 ){ return; } SmoothL1BackwardGPU<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >( count, diff_.gpu_data(), diff_.mutable_gpu_data()); CUDA_POST_KERNEL_CHECK; for (int i = 0; i < 2; ++i) { if (propagate_down[i]) { const Dtype sign = (i == 0) ? 1 : -1; int spatial_dim = diff_.height() * diff_.width(); Dtype pre_fixed_normalizer = this->layer_param_.loss_param().pre_fixed_normalizer(); Dtype normalizer = get_normalizer(normalization_, pre_fixed_normalizer); Dtype alpha = sign * top[0]->cpu_diff()[0] / normalizer; caffe_gpu_axpby( bottom[i]->count(), // count alpha, // alpha diff_.gpu_data(), // x Dtype(0), // beta bottom[i]->mutable_gpu_diff()); // y } } } INSTANTIATE_LAYER_GPU_FUNCS(SmoothL1LossLayer); } // namespace caffe
ef5c32d5853a74b5e34a2aec981eb9623d7cbe1c.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "binarize_weights_mean_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *weights = NULL; hipMalloc(&weights, XSIZE*YSIZE); int n = XSIZE*YSIZE; int size = XSIZE*YSIZE; float *binary = NULL; hipMalloc(&binary, XSIZE*YSIZE); float *mean_arr_gpu = NULL; hipMalloc(&mean_arr_gpu, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( binarize_weights_mean_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, weights,n,size,binary,mean_arr_gpu); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( binarize_weights_mean_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, weights,n,size,binary,mean_arr_gpu); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( binarize_weights_mean_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, weights,n,size,binary,mean_arr_gpu); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
ef5c32d5853a74b5e34a2aec981eb9623d7cbe1c.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "binarize_weights_mean_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *weights = NULL; cudaMalloc(&weights, XSIZE*YSIZE); int n = XSIZE*YSIZE; int size = XSIZE*YSIZE; float *binary = NULL; cudaMalloc(&binary, XSIZE*YSIZE); float *mean_arr_gpu = NULL; cudaMalloc(&mean_arr_gpu, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); binarize_weights_mean_kernel<<<gridBlock,threadBlock>>>(weights,n,size,binary,mean_arr_gpu); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { binarize_weights_mean_kernel<<<gridBlock,threadBlock>>>(weights,n,size,binary,mean_arr_gpu); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { binarize_weights_mean_kernel<<<gridBlock,threadBlock>>>(weights,n,size,binary,mean_arr_gpu); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
2e87b756d9be8883226e3b5aae45dfd8b04c0a23.hip
// !!! This is a file automatically generated by hipify!!! #include "rt_cuda.h" #include "rt_functions.h" // CUDA headers. #include "hip/hip_runtime.h" #include "hip/hip_runtime.h" #include "hiprand/hiprand_kernel.h" #include "cutil_math.h" #include "device_launch_parameters.h" #include <cassert> #include <ctime> #include <iostream> #include <vector> namespace ray_tracing_cuda { namespace { uint32_t constexpr kThreadsInRow = 8; std::vector<void *> delayedFreeMemory; template<typename T> class GPUPtr { public: explicit GPUPtr(uint32_t size, bool delayedFree = false) : m_size(size * sizeof(T)), m_delayedFree(delayedFree) { if (hipMalloc(&m_ptr, m_size) != hipSuccess) m_ptr = nullptr; if (m_delayedFree && m_ptr != nullptr) delayedFreeMemory.push_back(m_ptr); } ~GPUPtr() { if (!m_delayedFree && m_ptr != nullptr) hipFree(m_ptr); } operator bool() { return m_ptr != nullptr; } T * m_ptr = nullptr; uint32_t m_size = 0; bool m_delayedFree = false; }; struct TransferredGPUPtr { void * m_ptr = nullptr; uint32_t m_size = 0; TransferredGPUPtr() = default; TransferredGPUPtr(void * ptr, uint32_t size) : m_ptr(ptr), m_size(size) {} template<typename T> void Set(GPUPtr<T> const &ptr) { m_ptr = ptr.m_ptr; m_size = ptr.m_size; } }; TransferredGPUPtr transferredOutputPtr; } // namespace bool Initialize() { int count; hipError_t cudaStatus = hipGetDeviceCount(&count); if (cudaStatus != hipSuccess || count == 0) { std::cout << "Error call hipGetDeviceCount." << std::endl; return false; } if (hipSetDevice(0) != hipSuccess) { std::cout << "Error call hipSetDevice." << std::endl; return false; } hipDeviceProp_t prop; if (hipGetDeviceProperties(&prop, 0) != hipSuccess) { std::cout << "Error call cudaSetDeviceProperties." << std::endl; return false; } std::cout << "CUDA device: " << prop.name << std::endl; return true; } __global__ void InitRandom(hiprandState_t * randStates, unsigned long long seed) { int rndx = blockIdx.x * blockDim.x + threadIdx.x; int rndy = blockIdx.y * blockDim.y + threadIdx.y; int rndIndex = rndy * gridDim.x * blockDim.x + rndx; hiprand_init(seed, rndIndex, 0, &randStates[rndIndex]); } __global__ void TraceAllRaysGPU(CudaSphere * spheres, uint32_t spheresCount, CudaMaterial * materials, CudaLight * lightSources, uint32_t lightSourcesCount, float3 backgroundColor, float3 origin, float3 forward, float3 up, float3 right, float2 halfScreenSize, float2 cellSize, uint32_t samplesInRowCount, float invSampleCount, float znear, float zfar, uint32_t offsetX, uint32_t offsetY, uint32_t width, uint32_t height, hiprandState_t * randStates, float3 * output) { __shared__ float3 samples[kThreadsInRow][kThreadsInRow]; int x = blockIdx.x + offsetX; int y = blockIdx.y + offsetY; int rndx = blockIdx.x * blockDim.x + threadIdx.x; int rndy = blockIdx.y * blockDim.y + threadIdx.y; int rndIndex = rndy * gridDim.x * blockDim.x + rndx; samples[threadIdx.x][threadIdx.y] = make_float3(0.0f, 0.0f, 0.0f); if (x < width && y < height) { int tx = threadIdx.x; while (tx < samplesInRowCount) { int ty = threadIdx.y; while (ty < samplesInRowCount) { float const dx = (2.0f * x / width - 1.0f) * halfScreenSize.x; float const sdx = dx + cellSize.x * tx / samplesInRowCount; float const dy = (-2.0f * y / height + 1.0f) * halfScreenSize.y; float const sdy = dy - cellSize.y * ty / samplesInRowCount; CudaRay ray; ray.m_origin = origin; ray.m_direction = normalize(forward * znear + up * sdy + right * sdx); float3 outputColor; TraceRayGPU(&ray, spheres, spheresCount, materials, lightSources, lightSourcesCount, backgroundColor, znear, zfar, &randStates[rndIndex], &outputColor); samples[threadIdx.x][threadIdx.y] += outputColor; ty += blockDim.y; } tx += blockDim.x; } } // Samples reduction. __syncthreads(); int j = kThreadsInRow / 2; while (j != 0) { if (threadIdx.x < j && threadIdx.x + j < blockDim.x) samples[threadIdx.x][threadIdx.y] += samples[threadIdx.x + j][threadIdx.y]; __syncthreads(); if (threadIdx.y < j && threadIdx.y + j < blockDim.y) samples[threadIdx.x][threadIdx.y] += samples[threadIdx.x][threadIdx.y + j]; __syncthreads(); j /= 2; } if (threadIdx.x == 0 && threadIdx.y == 0 && x < width && y < height) output[x + y * width] = samples[0][0] * invSampleCount; } hipEvent_t RayTrace(CudaSphere * spheres, uint32_t spheresCount, CudaMaterial * materials, uint32_t materialsCount, CudaLight * lightSources, uint32_t lightSourcesCount, uint32_t samplesInRowCount, float3 backgroundColor, float3 cameraPosition, float3 cameraDirection, float fov, float znear, float zfar, uint32_t width, uint32_t height, std::function<bool()> && realtimeHandler) { hipEvent_t completion; if (hipEventCreate(&completion) != hipSuccess) { std::cout << "Error call hipEventCreate." << std::endl; return nullptr; } GPUPtr<CudaSphere> spheresGPU(spheresCount); if (!spheresGPU) { std::cout << "Error allocate GPU memory." << std::endl; return completion; } if (hipMemcpy(spheresGPU.m_ptr, spheres, spheresGPU.m_size, hipMemcpyHostToDevice) != hipSuccess) { std::cout << "Error call hipMemcpy (spheresGPU)." << std::endl; return completion; } GPUPtr<CudaMaterial> materialsGPU(materialsCount); if (!materialsGPU) { std::cout << "Error allocate GPU memory." << std::endl; return completion; } if (hipMemcpy(materialsGPU.m_ptr, materials, materialsGPU.m_size, hipMemcpyHostToDevice) != hipSuccess) { std::cout << "Error call hipMemcpy (materialsGPU)." << std::endl; return completion; } GPUPtr<CudaLight> lightSourcesGPU(lightSourcesCount); if (!lightSourcesGPU) { std::cout << "Error allocate GPU memory." << std::endl; return completion; } if (hipMemcpy(lightSourcesGPU.m_ptr, lightSources, lightSourcesGPU.m_size, hipMemcpyHostToDevice) != hipSuccess) { std::cout << "Error call hipMemcpy (lightSourcesGPU)." << std::endl; return completion; } uint32_t constexpr kPartsCount = 16; dim3 grids((width + kPartsCount - 1) / kPartsCount, (height + kPartsCount - 1) / kPartsCount); dim3 threads(kThreadsInRow, kThreadsInRow); GPUPtr<hiprandState_t> randStatesGPU(grids.x * grids.y * threads.x * threads.y); if (!randStatesGPU) { std::cout << "Error allocate GPU memory." << std::endl; return completion; } GPUPtr<float3> outputGPU(width * height, true /* delayedFree */); if (!outputGPU) { std::cout << "Error allocate GPU memory." << std::endl; return completion; } transferredOutputPtr.Set(outputGPU); hipLaunchKernelGGL(( InitRandom), dim3(grids), dim3(threads), 0, 0, randStatesGPU.m_ptr, static_cast<unsigned long long>(time(nullptr))); static float3 kUp = make_float3(0.0f, 1.0f, 0.0f); auto const aspect = static_cast<float>(height) / width; float3 const right = cross(kUp, cameraDirection); float3 const up = cross(cameraDirection, right); float const dw = znear / tan(0.5f * fov); float2 const halfScreenSize = make_float2(dw, dw * aspect); float2 const cellSize = make_float2(2.0f * halfScreenSize.x / width, 2.0f * halfScreenSize.y / height); float const invSampleCount = 1.0f / (samplesInRowCount * samplesInRowCount); for (uint32_t i = 0; i < kPartsCount; ++i) { bool needInterrupt = false; for (uint32_t j = 0; j < kPartsCount; ++j) { hipLaunchKernelGGL(( TraceAllRaysGPU), dim3(grids), dim3(threads), 0, 0, spheresGPU.m_ptr, spheresCount, materialsGPU.m_ptr, lightSourcesGPU.m_ptr, lightSourcesCount, backgroundColor, cameraPosition, cameraDirection, up, right, halfScreenSize, cellSize, samplesInRowCount, invSampleCount, znear, zfar, i * grids.x, j * grids.y, width, height, randStatesGPU.m_ptr, outputGPU.m_ptr); if (realtimeHandler) needInterrupt = realtimeHandler(); } if (needInterrupt) break; } if (hipEventRecord(completion, 0) != hipSuccess) std::cout << "Error call hipEventRecord." << std::endl; return completion; } bool InProgress(hipEvent_t completion) { if (hipEventQuery(completion) != hipErrorNotReady) { auto err = hipGetLastError(); if (err != hipSuccess) { std::cout << "Error CUDA: " << hipGetErrorString(err) << std::endl; return true; } return false; } return true; } void CopyOutputToBuffer(float * buffer) { if (hipMemcpy(buffer, transferredOutputPtr.m_ptr, transferredOutputPtr.m_size, hipMemcpyDeviceToHost) != hipSuccess) { std::cout << "Error call hipMemcpy (realtimeBuffer)." << std::endl; } } void FinishRayTrace(float * output, hipEvent_t completion) { if (hipMemcpy(output, transferredOutputPtr.m_ptr, transferredOutputPtr.m_size, hipMemcpyDeviceToHost) != hipSuccess) { std::cout << "Error call hipMemcpy (output)." << std::endl; } if (hipDeviceSynchronize() != hipSuccess) std::cout << "Error call hipDeviceSynchronize." << std::endl; auto err = hipGetLastError(); if (err != hipSuccess) std::cout << "Error CUDA: " << hipGetErrorString(err) << std::endl; for (size_t i = 0; i < delayedFreeMemory.size(); ++i) hipFree(delayedFreeMemory[i]); delayedFreeMemory.clear(); if (hipEventDestroy(completion) != hipSuccess) std::cout << "Error call hipEventDestroy." << std::endl; } } // namespace ray_tracing_cuda
2e87b756d9be8883226e3b5aae45dfd8b04c0a23.cu
#include "rt_cuda.h" #include "rt_functions.h" // CUDA headers. #include "cuda.h" #include "cuda_runtime.h" #include "curand_kernel.h" #include "cutil_math.h" #include "device_launch_parameters.h" #include <cassert> #include <ctime> #include <iostream> #include <vector> namespace ray_tracing_cuda { namespace { uint32_t constexpr kThreadsInRow = 8; std::vector<void *> delayedFreeMemory; template<typename T> class GPUPtr { public: explicit GPUPtr(uint32_t size, bool delayedFree = false) : m_size(size * sizeof(T)), m_delayedFree(delayedFree) { if (cudaMalloc(&m_ptr, m_size) != cudaSuccess) m_ptr = nullptr; if (m_delayedFree && m_ptr != nullptr) delayedFreeMemory.push_back(m_ptr); } ~GPUPtr() { if (!m_delayedFree && m_ptr != nullptr) cudaFree(m_ptr); } operator bool() { return m_ptr != nullptr; } T * m_ptr = nullptr; uint32_t m_size = 0; bool m_delayedFree = false; }; struct TransferredGPUPtr { void * m_ptr = nullptr; uint32_t m_size = 0; TransferredGPUPtr() = default; TransferredGPUPtr(void * ptr, uint32_t size) : m_ptr(ptr), m_size(size) {} template<typename T> void Set(GPUPtr<T> const &ptr) { m_ptr = ptr.m_ptr; m_size = ptr.m_size; } }; TransferredGPUPtr transferredOutputPtr; } // namespace bool Initialize() { int count; cudaError_t cudaStatus = cudaGetDeviceCount(&count); if (cudaStatus != cudaSuccess || count == 0) { std::cout << "Error call cudaGetDeviceCount." << std::endl; return false; } if (cudaSetDevice(0) != cudaSuccess) { std::cout << "Error call cudaSetDevice." << std::endl; return false; } cudaDeviceProp prop; if (cudaGetDeviceProperties(&prop, 0) != cudaSuccess) { std::cout << "Error call cudaSetDeviceProperties." << std::endl; return false; } std::cout << "CUDA device: " << prop.name << std::endl; return true; } __global__ void InitRandom(curandState * randStates, unsigned long long seed) { int rndx = blockIdx.x * blockDim.x + threadIdx.x; int rndy = blockIdx.y * blockDim.y + threadIdx.y; int rndIndex = rndy * gridDim.x * blockDim.x + rndx; curand_init(seed, rndIndex, 0, &randStates[rndIndex]); } __global__ void TraceAllRaysGPU(CudaSphere * spheres, uint32_t spheresCount, CudaMaterial * materials, CudaLight * lightSources, uint32_t lightSourcesCount, float3 backgroundColor, float3 origin, float3 forward, float3 up, float3 right, float2 halfScreenSize, float2 cellSize, uint32_t samplesInRowCount, float invSampleCount, float znear, float zfar, uint32_t offsetX, uint32_t offsetY, uint32_t width, uint32_t height, curandState * randStates, float3 * output) { __shared__ float3 samples[kThreadsInRow][kThreadsInRow]; int x = blockIdx.x + offsetX; int y = blockIdx.y + offsetY; int rndx = blockIdx.x * blockDim.x + threadIdx.x; int rndy = blockIdx.y * blockDim.y + threadIdx.y; int rndIndex = rndy * gridDim.x * blockDim.x + rndx; samples[threadIdx.x][threadIdx.y] = make_float3(0.0f, 0.0f, 0.0f); if (x < width && y < height) { int tx = threadIdx.x; while (tx < samplesInRowCount) { int ty = threadIdx.y; while (ty < samplesInRowCount) { float const dx = (2.0f * x / width - 1.0f) * halfScreenSize.x; float const sdx = dx + cellSize.x * tx / samplesInRowCount; float const dy = (-2.0f * y / height + 1.0f) * halfScreenSize.y; float const sdy = dy - cellSize.y * ty / samplesInRowCount; CudaRay ray; ray.m_origin = origin; ray.m_direction = normalize(forward * znear + up * sdy + right * sdx); float3 outputColor; TraceRayGPU(&ray, spheres, spheresCount, materials, lightSources, lightSourcesCount, backgroundColor, znear, zfar, &randStates[rndIndex], &outputColor); samples[threadIdx.x][threadIdx.y] += outputColor; ty += blockDim.y; } tx += blockDim.x; } } // Samples reduction. __syncthreads(); int j = kThreadsInRow / 2; while (j != 0) { if (threadIdx.x < j && threadIdx.x + j < blockDim.x) samples[threadIdx.x][threadIdx.y] += samples[threadIdx.x + j][threadIdx.y]; __syncthreads(); if (threadIdx.y < j && threadIdx.y + j < blockDim.y) samples[threadIdx.x][threadIdx.y] += samples[threadIdx.x][threadIdx.y + j]; __syncthreads(); j /= 2; } if (threadIdx.x == 0 && threadIdx.y == 0 && x < width && y < height) output[x + y * width] = samples[0][0] * invSampleCount; } cudaEvent_t RayTrace(CudaSphere * spheres, uint32_t spheresCount, CudaMaterial * materials, uint32_t materialsCount, CudaLight * lightSources, uint32_t lightSourcesCount, uint32_t samplesInRowCount, float3 backgroundColor, float3 cameraPosition, float3 cameraDirection, float fov, float znear, float zfar, uint32_t width, uint32_t height, std::function<bool()> && realtimeHandler) { cudaEvent_t completion; if (cudaEventCreate(&completion) != cudaSuccess) { std::cout << "Error call cudaEventCreate." << std::endl; return nullptr; } GPUPtr<CudaSphere> spheresGPU(spheresCount); if (!spheresGPU) { std::cout << "Error allocate GPU memory." << std::endl; return completion; } if (cudaMemcpy(spheresGPU.m_ptr, spheres, spheresGPU.m_size, cudaMemcpyHostToDevice) != cudaSuccess) { std::cout << "Error call cudaMemcpy (spheresGPU)." << std::endl; return completion; } GPUPtr<CudaMaterial> materialsGPU(materialsCount); if (!materialsGPU) { std::cout << "Error allocate GPU memory." << std::endl; return completion; } if (cudaMemcpy(materialsGPU.m_ptr, materials, materialsGPU.m_size, cudaMemcpyHostToDevice) != cudaSuccess) { std::cout << "Error call cudaMemcpy (materialsGPU)." << std::endl; return completion; } GPUPtr<CudaLight> lightSourcesGPU(lightSourcesCount); if (!lightSourcesGPU) { std::cout << "Error allocate GPU memory." << std::endl; return completion; } if (cudaMemcpy(lightSourcesGPU.m_ptr, lightSources, lightSourcesGPU.m_size, cudaMemcpyHostToDevice) != cudaSuccess) { std::cout << "Error call cudaMemcpy (lightSourcesGPU)." << std::endl; return completion; } uint32_t constexpr kPartsCount = 16; dim3 grids((width + kPartsCount - 1) / kPartsCount, (height + kPartsCount - 1) / kPartsCount); dim3 threads(kThreadsInRow, kThreadsInRow); GPUPtr<curandState> randStatesGPU(grids.x * grids.y * threads.x * threads.y); if (!randStatesGPU) { std::cout << "Error allocate GPU memory." << std::endl; return completion; } GPUPtr<float3> outputGPU(width * height, true /* delayedFree */); if (!outputGPU) { std::cout << "Error allocate GPU memory." << std::endl; return completion; } transferredOutputPtr.Set(outputGPU); InitRandom<<<grids, threads>>>(randStatesGPU.m_ptr, static_cast<unsigned long long>(time(nullptr))); static float3 kUp = make_float3(0.0f, 1.0f, 0.0f); auto const aspect = static_cast<float>(height) / width; float3 const right = cross(kUp, cameraDirection); float3 const up = cross(cameraDirection, right); float const dw = znear / tan(0.5f * fov); float2 const halfScreenSize = make_float2(dw, dw * aspect); float2 const cellSize = make_float2(2.0f * halfScreenSize.x / width, 2.0f * halfScreenSize.y / height); float const invSampleCount = 1.0f / (samplesInRowCount * samplesInRowCount); for (uint32_t i = 0; i < kPartsCount; ++i) { bool needInterrupt = false; for (uint32_t j = 0; j < kPartsCount; ++j) { TraceAllRaysGPU<<<grids, threads>>>( spheresGPU.m_ptr, spheresCount, materialsGPU.m_ptr, lightSourcesGPU.m_ptr, lightSourcesCount, backgroundColor, cameraPosition, cameraDirection, up, right, halfScreenSize, cellSize, samplesInRowCount, invSampleCount, znear, zfar, i * grids.x, j * grids.y, width, height, randStatesGPU.m_ptr, outputGPU.m_ptr); if (realtimeHandler) needInterrupt = realtimeHandler(); } if (needInterrupt) break; } if (cudaEventRecord(completion, 0) != cudaSuccess) std::cout << "Error call cudaEventRecord." << std::endl; return completion; } bool InProgress(cudaEvent_t completion) { if (cudaEventQuery(completion) != cudaErrorNotReady) { auto err = cudaGetLastError(); if (err != cudaSuccess) { std::cout << "Error CUDA: " << cudaGetErrorString(err) << std::endl; return true; } return false; } return true; } void CopyOutputToBuffer(float * buffer) { if (cudaMemcpy(buffer, transferredOutputPtr.m_ptr, transferredOutputPtr.m_size, cudaMemcpyDeviceToHost) != cudaSuccess) { std::cout << "Error call cudaMemcpy (realtimeBuffer)." << std::endl; } } void FinishRayTrace(float * output, cudaEvent_t completion) { if (cudaMemcpy(output, transferredOutputPtr.m_ptr, transferredOutputPtr.m_size, cudaMemcpyDeviceToHost) != cudaSuccess) { std::cout << "Error call cudaMemcpy (output)." << std::endl; } if (cudaDeviceSynchronize() != cudaSuccess) std::cout << "Error call cudaDeviceSynchronize." << std::endl; auto err = cudaGetLastError(); if (err != cudaSuccess) std::cout << "Error CUDA: " << cudaGetErrorString(err) << std::endl; for (size_t i = 0; i < delayedFreeMemory.size(); ++i) cudaFree(delayedFreeMemory[i]); delayedFreeMemory.clear(); if (cudaEventDestroy(completion) != cudaSuccess) std::cout << "Error call cudaEventDestroy." << std::endl; } } // namespace ray_tracing_cuda
b78a1942ba0548cded15b436d4dcb53faee83aa5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // #include <helpers/ConstantTadHelper.h> #include <helpers/PointersManager.h> #include <helpers/ShapeUtils.h> #include <helpers/TAD.h> #include <legacy/NativeOps.h> #include <ops/declarable/helpers/nth_element.h> #include "execution/cuda/LaunchDims.h" namespace sd { namespace ops { namespace helpers { template <typename T> static SD_KERNEL void fillUpElementKernel(void* outputBuffer, sd::LongType const* outputShapeInfo, void* inputBuffer, sd::LongType const* inputShapeInfo, sd::LongType const* pTadShape, sd::LongType const* pTadOffsets, sd::LongType n) { __shared__ sd::LongType bufferLength; auto z = reinterpret_cast<T*>(outputBuffer); auto x = reinterpret_cast<T*>(inputBuffer); if (threadIdx.x == 0) bufferLength = shape::length(outputShapeInfo); __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; const auto step = gridDim.x * blockDim.x; for (int t = tid; t < bufferLength; t += step) { auto tX = x + pTadOffsets[t]; z[shape::getIndexOffset(t, outputShapeInfo)] = tX[shape::getIndexOffset(n, pTadShape)]; // tX]; } } template <typename T> void nthElementFunctor_(sd::LaunchContext* context, NDArray* input, sd::LongType n, NDArray* output, bool reverse) { NDArray::prepareSpecialUse({output}, {input}); NDArray sortedVals(*input); sd::Pointer params[2]; params[0] = context; params[1] = context->getCudaStream(); // Nth element in sorted sequence : basic algorithm sort and retrieve nth element in sorted if (input->isVector()) { sort(params, nullptr, sortedVals.shapeInfo(), sortedVals.specialBuffer(), sortedVals.specialShapeInfo(), reverse); hipMemcpy(reinterpret_cast<T*>(output->specialBuffer()), reinterpret_cast<T*>(sortedVals.specialBuffer()) + n, sizeof(T), hipMemcpyDeviceToDevice); } else { // rank greater than 1 std::vector<sd::LongType> lastDims( {input->rankOf() - 1}); auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(sortedVals.shapeInfo(), &lastDims); auto pTadShape = packX->specialShapeInfo(); auto pTadShapeH = packX->primaryShapeInfo(); auto pTadOffsets = packX->specialOffsets(); sortTad(params, sortedVals.buffer(), sortedVals.shapeInfo(), sortedVals.specialBuffer(), sortedVals.specialShapeInfo(), lastDims.data(), lastDims.size(), pTadShape, pTadOffsets, reverse); sortedVals.tickWriteDevice(); sortedVals.syncToHost(); auto stream = context->getCudaStream(); dim3 launchDims = getLaunchDims("nth_element_fill"); hipLaunchKernelGGL(( fillUpElementKernel<T>), dim3(launchDims.y), dim3(launchDims.x), launchDims.z, *stream, output->specialBuffer(), output->specialShapeInfo(), sortedVals.specialBuffer(), sortedVals.specialShapeInfo(), pTadShape, pTadOffsets, n); } NDArray::registerSpecialUse({output}, {input}); } void nthElementFunctor(sd::LaunchContext* context, NDArray* input, sd::LongType n, NDArray* output, bool reverse) { BUILD_SINGLE_SELECTOR(input->dataType(), nthElementFunctor_, (context, input, n, output, reverse), SD_COMMON_TYPES); } } // namespace helpers } // namespace ops } // namespace sd
b78a1942ba0548cded15b436d4dcb53faee83aa5.cu
/* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // #include <helpers/ConstantTadHelper.h> #include <helpers/PointersManager.h> #include <helpers/ShapeUtils.h> #include <helpers/TAD.h> #include <legacy/NativeOps.h> #include <ops/declarable/helpers/nth_element.h> #include "execution/cuda/LaunchDims.h" namespace sd { namespace ops { namespace helpers { template <typename T> static SD_KERNEL void fillUpElementKernel(void* outputBuffer, sd::LongType const* outputShapeInfo, void* inputBuffer, sd::LongType const* inputShapeInfo, sd::LongType const* pTadShape, sd::LongType const* pTadOffsets, sd::LongType n) { __shared__ sd::LongType bufferLength; auto z = reinterpret_cast<T*>(outputBuffer); auto x = reinterpret_cast<T*>(inputBuffer); if (threadIdx.x == 0) bufferLength = shape::length(outputShapeInfo); __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; const auto step = gridDim.x * blockDim.x; for (int t = tid; t < bufferLength; t += step) { auto tX = x + pTadOffsets[t]; z[shape::getIndexOffset(t, outputShapeInfo)] = tX[shape::getIndexOffset(n, pTadShape)]; // tX]; } } template <typename T> void nthElementFunctor_(sd::LaunchContext* context, NDArray* input, sd::LongType n, NDArray* output, bool reverse) { NDArray::prepareSpecialUse({output}, {input}); NDArray sortedVals(*input); sd::Pointer params[2]; params[0] = context; params[1] = context->getCudaStream(); // Nth element in sorted sequence : basic algorithm sort and retrieve nth element in sorted if (input->isVector()) { sort(params, nullptr, sortedVals.shapeInfo(), sortedVals.specialBuffer(), sortedVals.specialShapeInfo(), reverse); cudaMemcpy(reinterpret_cast<T*>(output->specialBuffer()), reinterpret_cast<T*>(sortedVals.specialBuffer()) + n, sizeof(T), cudaMemcpyDeviceToDevice); } else { // rank greater than 1 std::vector<sd::LongType> lastDims( {input->rankOf() - 1}); auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(sortedVals.shapeInfo(), &lastDims); auto pTadShape = packX->specialShapeInfo(); auto pTadShapeH = packX->primaryShapeInfo(); auto pTadOffsets = packX->specialOffsets(); sortTad(params, sortedVals.buffer(), sortedVals.shapeInfo(), sortedVals.specialBuffer(), sortedVals.specialShapeInfo(), lastDims.data(), lastDims.size(), pTadShape, pTadOffsets, reverse); sortedVals.tickWriteDevice(); sortedVals.syncToHost(); auto stream = context->getCudaStream(); dim3 launchDims = getLaunchDims("nth_element_fill"); fillUpElementKernel<T><<<launchDims.y, launchDims.x, launchDims.z, *stream>>>(output->specialBuffer(), output->specialShapeInfo(), sortedVals.specialBuffer(), sortedVals.specialShapeInfo(), pTadShape, pTadOffsets, n); } NDArray::registerSpecialUse({output}, {input}); } void nthElementFunctor(sd::LaunchContext* context, NDArray* input, sd::LongType n, NDArray* output, bool reverse) { BUILD_SINGLE_SELECTOR(input->dataType(), nthElementFunctor_, (context, input, n, output, reverse), SD_COMMON_TYPES); } } // namespace helpers } // namespace ops } // namespace sd
a7b484be90a0ac4f77f0684d048d6c18e3206948.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "header.h" #include "util.h" #include "mapper.cuh" #include "reducer.cuh" #include "wtime.h" #include "barrier.cuh" #include "gpu_graph.cuh" #include "meta_data.cuh" #include "mapper_enactor.cuh" #include "cpu_bfs.hpp" /*user defined vertex behavior function*/ __inline__ __host__ __device__ feature_t user_mapper_push ( vertex_t src, vertex_t dest, feature_t level, index_t* beg_pos, weight_t edge_weight, feature_t* vert_status, feature_t* vert_status_prev) { feature_t feature_end = vert_status[dest]; return (feature_end == INFTY ? level+1 : feature_end); } /*user defined vertex behavior function*/ __inline__ __host__ __device__ bool vertex_selector_push ( vertex_t vert_id, feature_t level, vertex_t *adj_list, index_t *beg_pos, feature_t *vert_status, feature_t *vert_status_prev) { //if(vert_status[vert_id]==level) return true; //else return false; return (vert_status[vert_id]==level); } /*user defined vertex behavior function*/ __inline__ __host__ __device__ feature_t user_mapper_pull ( vertex_t src, vertex_t dest, feature_t level, index_t* beg_pos, weight_t edge_weight, feature_t* vert_status, feature_t* vert_status_prev) { return vert_status[src]; } /*user defined vertex behavior function*/ __inline__ __host__ __device__ bool vertex_selector_pull ( vertex_t vert_id, feature_t level, vertex_t *adj_list, index_t *beg_pos, feature_t *vert_status, feature_t *vert_status_prev) { //if(vert_status[vert_id]==INFTY) return true; //else return false; return (vert_status[vert_id]==INFTY); } __device__ cb_reducer vert_selector_push_d = vertex_selector_push; __device__ cb_reducer vert_selector_pull_d = vertex_selector_pull; __device__ cb_mapper vert_behave_push_d = user_mapper_push; __device__ cb_mapper vert_behave_pull_d = user_mapper_pull; /*init traversal*/ /*init traversal*/ __global__ void init(vertex_t src_v, vertex_t vert_count, meta_data mdata) { // ////status //mdata.vert_status[src_v] = 0; index_t tid = threadIdx.x + blockIdx.x * blockDim.x; while(tid < vert_count) { if(tid != src_v) mdata.vert_status[tid] = INFTY; else { mdata.vert_status[tid] = 0; mdata.worklist_mid[0] = src_v; mdata.worklist_sz_sml[0] = 0; mdata.worklist_sz_mid[0] = 1; mdata.worklist_sz_lrg[0] = 0; mdata.bitmap[src_v>>3] |= (1<<(src_v & 7)); } tid += blockDim.x * gridDim.x; } } int main(int args, char **argv) { std::cout<<"Input: /path/to/exe /path/to/beg_pos /path/to/adj_list /path/weight_list src blk_size swith_iter\n"; if(args<5){std::cout<<"Wrong input\n";exit(-1);} for(int i = 0; i < args; i++) std::cout<<argv[i]<<" "; std::cout<<"\n"; double tm_map,tm_red,tm_scan; char *file_beg_pos = argv[1]; char *file_adj_list = argv[2]; char *file_weight_list = argv[3]; vertex_t src_v = (vertex_t)atol(argv[4]); int blk_size = atoi(argv[5]); int switch_iter = atoi(argv[6]); H_ERR(hipSetDevice(0)); //Read graph to CPU graph<long, long, long,vertex_t, index_t, weight_t> *ginst=new graph<long, long, long,vertex_t, index_t, weight_t> (file_beg_pos, file_adj_list, file_weight_list); feature_t *level, *level_h; hipMalloc((void **)&level, sizeof(feature_t)); hipHostMalloc((void **)&level_h, sizeof(feature_t)); hipMemset(level, 0, sizeof(feature_t)); cb_reducer vert_selector_push_h; cb_reducer vert_selector_pull_h; hipMemcpyFromSymbol(&vert_selector_push_h,vert_selector_push_d,sizeof(cb_reducer)); hipMemcpyFromSymbol(&vert_selector_pull_h,vert_selector_pull_d,sizeof(cb_reducer)); cb_mapper vert_behave_push_h; cb_mapper vert_behave_pull_h; hipMemcpyFromSymbol(&vert_behave_push_h,vert_behave_push_d,sizeof(cb_reducer)); hipMemcpyFromSymbol(&vert_behave_pull_h,vert_behave_pull_d,sizeof(cb_reducer)); //Init three data structures gpu_graph ggraph(ginst); meta_data mdata(ginst->vert_count, ginst->edge_count); Barrier global_barrier(BLKS_NUM); hipLaunchKernelGGL(( init), dim3(256),dim3(256), 0, 0, src_v, ginst->vert_count, mdata); mapper compute_mapper(ggraph, mdata, vert_behave_push_h, vert_behave_pull_h); reducer worklist_gather(ggraph, mdata, vert_selector_push_h, vert_selector_pull_h); H_ERR(hipDeviceSynchronize()); //mapper_hybrid_push_merge(blk_size, level, ggraph, mdata, compute_mapper, worklist_gather, global_barrier); hipLaunchKernelGGL(( init), dim3(256),dim3(256), 0, 0, src_v, ginst->vert_count, mdata); hipMemset(level, 0, sizeof(feature_t)); //int blk_size = 128; double time = wtime(); balanced_push(blk_size, level, ggraph, mdata, compute_mapper, worklist_gather, global_barrier); //push_pull_opt(blk_size, level, ggraph, mdata, compute_mapper, worklist_gather, global_barrier); //mapper_merge_push(blk_size, level, ggraph, mdata, compute_mapper, worklist_gather, global_barrier); //mapper_hybrid_push_merge(blk_size, level, ggraph, mdata, compute_mapper, worklist_gather, global_barrier); //time = wtime() - time; //std::cout<<"Total time: "<<time<<" second(s).\n"; // //time=wtime(); if(switch_iter!=0) { mapper_merge_pull(blk_size,switch_iter,level, ggraph,mdata,compute_mapper, worklist_gather, global_barrier); //time = wtime() - time; //std::cout<<"Total time: "<<time<<" second(s).\n"; // //time=wtime(); //balanced_push(blk_size, level, ggraph, mdata, compute_mapper, worklist_gather, global_barrier); mapper_hybrid_push_merge(blk_size, level, ggraph, mdata, compute_mapper, worklist_gather, global_barrier); } time = wtime() - time; hipMemcpy(level_h, level, sizeof(feature_t), hipMemcpyDeviceToHost); std::cout<<"Total iteration: "<<level_h[0]<<"\n"; std::cout<<"Total time: "<<time<<" second(s).\n"; feature_t *gpu_dist = new feature_t[ginst->vert_count]; hipMemcpy(gpu_dist, mdata.vert_status, sizeof(feature_t) * ginst->vert_count, hipMemcpyDeviceToHost); feature_t *cpu_dist; cpu_bfs<index_t, vertex_t, feature_t> (cpu_dist, src_v, ginst->vert_count, ginst->edge_count, ginst->beg_pos, ginst->adj_list); if (memcmp(cpu_dist, gpu_dist, sizeof(feature_t) * ginst->vert_count) == 0) printf("Result correct\n"); else { printf("Result wrong! Below are top 10 wrongs\n"); int wrongcount = 0; for(index_t i = 0; i < ginst->vert_count; i ++) { if(cpu_dist[i] != gpu_dist[i]) { printf("vertex %d CPU-vs-GPU: %d %d\n", i, cpu_dist[i], gpu_dist[i]); wrongcount ++; if(wrongcount == 10) break; } } } }
a7b484be90a0ac4f77f0684d048d6c18e3206948.cu
#include "header.h" #include "util.h" #include "mapper.cuh" #include "reducer.cuh" #include "wtime.h" #include "barrier.cuh" #include "gpu_graph.cuh" #include "meta_data.cuh" #include "mapper_enactor.cuh" #include "cpu_bfs.hpp" /*user defined vertex behavior function*/ __inline__ __host__ __device__ feature_t user_mapper_push ( vertex_t src, vertex_t dest, feature_t level, index_t* beg_pos, weight_t edge_weight, feature_t* vert_status, feature_t* vert_status_prev) { feature_t feature_end = vert_status[dest]; return (feature_end == INFTY ? level+1 : feature_end); } /*user defined vertex behavior function*/ __inline__ __host__ __device__ bool vertex_selector_push ( vertex_t vert_id, feature_t level, vertex_t *adj_list, index_t *beg_pos, feature_t *vert_status, feature_t *vert_status_prev) { //if(vert_status[vert_id]==level) return true; //else return false; return (vert_status[vert_id]==level); } /*user defined vertex behavior function*/ __inline__ __host__ __device__ feature_t user_mapper_pull ( vertex_t src, vertex_t dest, feature_t level, index_t* beg_pos, weight_t edge_weight, feature_t* vert_status, feature_t* vert_status_prev) { return vert_status[src]; } /*user defined vertex behavior function*/ __inline__ __host__ __device__ bool vertex_selector_pull ( vertex_t vert_id, feature_t level, vertex_t *adj_list, index_t *beg_pos, feature_t *vert_status, feature_t *vert_status_prev) { //if(vert_status[vert_id]==INFTY) return true; //else return false; return (vert_status[vert_id]==INFTY); } __device__ cb_reducer vert_selector_push_d = vertex_selector_push; __device__ cb_reducer vert_selector_pull_d = vertex_selector_pull; __device__ cb_mapper vert_behave_push_d = user_mapper_push; __device__ cb_mapper vert_behave_pull_d = user_mapper_pull; /*init traversal*/ /*init traversal*/ __global__ void init(vertex_t src_v, vertex_t vert_count, meta_data mdata) { // ////status //mdata.vert_status[src_v] = 0; index_t tid = threadIdx.x + blockIdx.x * blockDim.x; while(tid < vert_count) { if(tid != src_v) mdata.vert_status[tid] = INFTY; else { mdata.vert_status[tid] = 0; mdata.worklist_mid[0] = src_v; mdata.worklist_sz_sml[0] = 0; mdata.worklist_sz_mid[0] = 1; mdata.worklist_sz_lrg[0] = 0; mdata.bitmap[src_v>>3] |= (1<<(src_v & 7)); } tid += blockDim.x * gridDim.x; } } int main(int args, char **argv) { std::cout<<"Input: /path/to/exe /path/to/beg_pos /path/to/adj_list /path/weight_list src blk_size swith_iter\n"; if(args<5){std::cout<<"Wrong input\n";exit(-1);} for(int i = 0; i < args; i++) std::cout<<argv[i]<<" "; std::cout<<"\n"; double tm_map,tm_red,tm_scan; char *file_beg_pos = argv[1]; char *file_adj_list = argv[2]; char *file_weight_list = argv[3]; vertex_t src_v = (vertex_t)atol(argv[4]); int blk_size = atoi(argv[5]); int switch_iter = atoi(argv[6]); H_ERR(cudaSetDevice(0)); //Read graph to CPU graph<long, long, long,vertex_t, index_t, weight_t> *ginst=new graph<long, long, long,vertex_t, index_t, weight_t> (file_beg_pos, file_adj_list, file_weight_list); feature_t *level, *level_h; cudaMalloc((void **)&level, sizeof(feature_t)); cudaMallocHost((void **)&level_h, sizeof(feature_t)); cudaMemset(level, 0, sizeof(feature_t)); cb_reducer vert_selector_push_h; cb_reducer vert_selector_pull_h; cudaMemcpyFromSymbol(&vert_selector_push_h,vert_selector_push_d,sizeof(cb_reducer)); cudaMemcpyFromSymbol(&vert_selector_pull_h,vert_selector_pull_d,sizeof(cb_reducer)); cb_mapper vert_behave_push_h; cb_mapper vert_behave_pull_h; cudaMemcpyFromSymbol(&vert_behave_push_h,vert_behave_push_d,sizeof(cb_reducer)); cudaMemcpyFromSymbol(&vert_behave_pull_h,vert_behave_pull_d,sizeof(cb_reducer)); //Init three data structures gpu_graph ggraph(ginst); meta_data mdata(ginst->vert_count, ginst->edge_count); Barrier global_barrier(BLKS_NUM); init<<<256,256>>>(src_v, ginst->vert_count, mdata); mapper compute_mapper(ggraph, mdata, vert_behave_push_h, vert_behave_pull_h); reducer worklist_gather(ggraph, mdata, vert_selector_push_h, vert_selector_pull_h); H_ERR(cudaThreadSynchronize()); //mapper_hybrid_push_merge(blk_size, level, ggraph, mdata, compute_mapper, worklist_gather, global_barrier); init<<<256,256>>>(src_v, ginst->vert_count, mdata); cudaMemset(level, 0, sizeof(feature_t)); //int blk_size = 128; double time = wtime(); balanced_push(blk_size, level, ggraph, mdata, compute_mapper, worklist_gather, global_barrier); //push_pull_opt(blk_size, level, ggraph, mdata, compute_mapper, worklist_gather, global_barrier); //mapper_merge_push(blk_size, level, ggraph, mdata, compute_mapper, worklist_gather, global_barrier); //mapper_hybrid_push_merge(blk_size, level, ggraph, mdata, compute_mapper, worklist_gather, global_barrier); //time = wtime() - time; //std::cout<<"Total time: "<<time<<" second(s).\n"; // //time=wtime(); if(switch_iter!=0) { mapper_merge_pull(blk_size,switch_iter,level, ggraph,mdata,compute_mapper, worklist_gather, global_barrier); //time = wtime() - time; //std::cout<<"Total time: "<<time<<" second(s).\n"; // //time=wtime(); //balanced_push(blk_size, level, ggraph, mdata, compute_mapper, worklist_gather, global_barrier); mapper_hybrid_push_merge(blk_size, level, ggraph, mdata, compute_mapper, worklist_gather, global_barrier); } time = wtime() - time; cudaMemcpy(level_h, level, sizeof(feature_t), cudaMemcpyDeviceToHost); std::cout<<"Total iteration: "<<level_h[0]<<"\n"; std::cout<<"Total time: "<<time<<" second(s).\n"; feature_t *gpu_dist = new feature_t[ginst->vert_count]; cudaMemcpy(gpu_dist, mdata.vert_status, sizeof(feature_t) * ginst->vert_count, cudaMemcpyDeviceToHost); feature_t *cpu_dist; cpu_bfs<index_t, vertex_t, feature_t> (cpu_dist, src_v, ginst->vert_count, ginst->edge_count, ginst->beg_pos, ginst->adj_list); if (memcmp(cpu_dist, gpu_dist, sizeof(feature_t) * ginst->vert_count) == 0) printf("Result correct\n"); else { printf("Result wrong! Below are top 10 wrongs\n"); int wrongcount = 0; for(index_t i = 0; i < ginst->vert_count; i ++) { if(cpu_dist[i] != gpu_dist[i]) { printf("vertex %d CPU-vs-GPU: %d %d\n", i, cpu_dist[i], gpu_dist[i]); wrongcount ++; if(wrongcount == 10) break; } } } }
10cfd62eba581558a34433b6ebf443b55e7cbbda.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @generated from magmablas/zlange.cu, normal z -> s, Thu Oct 8 23:05:33 2020 @author Mark Gates */ #include "magma_internal.h" #include "magma_templates.h" #define REAL #define NB_X 64 /* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:m-1, for || A ||_inf, * where m and n are any size. * Has ceil( m/NB_X ) blocks of NB_X threads. Each thread does one row. * See also slange_max_kernel code, below. */ extern "C" __global__ void slange_inf_kernel( int m, int n, const float * __restrict__ A, int lda, float * __restrict__ dwork ) { int i = blockIdx.x*NB_X + threadIdx.x; float rsum[4] = {0, 0, 0, 0}; int n_mod_4 = n % 4; n -= n_mod_4; // if beyond last row, skip row if ( i < m ) { A += i; if ( n >= 4 ) { const float *Aend = A + lda*n; float rA[4] = { A[0], A[lda], A[2*lda], A[3*lda] }; A += 4*lda; while( A < Aend ) { rsum[0] += MAGMA_S_ABS( rA[0] ); rA[0] = A[0]; rsum[1] += MAGMA_S_ABS( rA[1] ); rA[1] = A[lda]; rsum[2] += MAGMA_S_ABS( rA[2] ); rA[2] = A[2*lda]; rsum[3] += MAGMA_S_ABS( rA[3] ); rA[3] = A[3*lda]; A += 4*lda; } rsum[0] += MAGMA_S_ABS( rA[0] ); rsum[1] += MAGMA_S_ABS( rA[1] ); rsum[2] += MAGMA_S_ABS( rA[2] ); rsum[3] += MAGMA_S_ABS( rA[3] ); } /* clean up code */ switch( n_mod_4 ) { case 0: break; case 1: rsum[0] += MAGMA_S_ABS( A[0] ); break; case 2: rsum[0] += MAGMA_S_ABS( A[0] ); rsum[1] += MAGMA_S_ABS( A[lda] ); break; case 3: rsum[0] += MAGMA_S_ABS( A[0] ); rsum[1] += MAGMA_S_ABS( A[lda] ); rsum[2] += MAGMA_S_ABS( A[2*lda] ); break; } /* compute final result */ dwork[i] = rsum[0] + rsum[1] + rsum[2] + rsum[3]; } } /* Computes max of row dwork[i] = max( abs( A(i,:) )), i=0:m-1, for || A ||_max, * where m and n are any size. * Has ceil( m/NB_X ) blocks of NB_X threads. Each thread does one row. * Based on slange_inf_kernel code, above. */ extern "C" __global__ void slange_max_kernel( int m, int n, const float * __restrict__ A, int lda, float * __restrict__ dwork ) { int i = blockIdx.x*NB_X + threadIdx.x; float rmax[4] = {0, 0, 0, 0}; int n_mod_4 = n % 4; n -= n_mod_4; // if beyond last row, skip row if ( i < m ) { A += i; if ( n >= 4 ) { const float *Aend = A + lda*n; float rA[4] = { A[0], A[lda], A[2*lda], A[3*lda] }; A += 4*lda; while( A < Aend ) { rmax[0] = max_nan( rmax[0], MAGMA_S_ABS( rA[0] )); rA[0] = A[0]; rmax[1] = max_nan( rmax[1], MAGMA_S_ABS( rA[1] )); rA[1] = A[lda]; rmax[2] = max_nan( rmax[2], MAGMA_S_ABS( rA[2] )); rA[2] = A[2*lda]; rmax[3] = max_nan( rmax[3], MAGMA_S_ABS( rA[3] )); rA[3] = A[3*lda]; A += 4*lda; } rmax[0] = max_nan( rmax[0], MAGMA_S_ABS( rA[0] )); rmax[1] = max_nan( rmax[1], MAGMA_S_ABS( rA[1] )); rmax[2] = max_nan( rmax[2], MAGMA_S_ABS( rA[2] )); rmax[3] = max_nan( rmax[3], MAGMA_S_ABS( rA[3] )); } /* clean up code */ switch( n_mod_4 ) { case 0: break; case 1: rmax[0] = max_nan( rmax[0], MAGMA_S_ABS( A[0] )); break; case 2: rmax[0] = max_nan( rmax[0], MAGMA_S_ABS( A[ 0] )); rmax[1] = max_nan( rmax[1], MAGMA_S_ABS( A[lda] )); break; case 3: rmax[0] = max_nan( rmax[0], MAGMA_S_ABS( A[ 0] )); rmax[1] = max_nan( rmax[1], MAGMA_S_ABS( A[ lda] )); rmax[2] = max_nan( rmax[2], MAGMA_S_ABS( A[2*lda] )); break; } /* compute final result */ dwork[i] = max_nan( max_nan( max_nan( rmax[0], rmax[1] ), rmax[2] ), rmax[3] ); } } /* Computes col sums dwork[j] = sum( abs( A(:,j) )), j=0:n-1, for || A ||_one, * where m and n are any size. * Has n blocks of NB threads each. Block j sums one column, A(:,j) into dwork[j]. * Thread i accumulates A(i,j) + A(i+NB,j) + A(i+2*NB,j) + ... into ssum[i], * then threads collectively do a sum-reduction of ssum, * and finally thread 0 saves to dwork[j]. */ extern "C" __global__ void slange_one_kernel( int m, int n, const float * __restrict__ A, int lda, float * __restrict__ dwork ) { __shared__ float ssum[NB_X]; int tx = threadIdx.x; A += blockIdx.x*lda; // column j ssum[tx] = 0; for( int i = tx; i < m; i += NB_X ) { ssum[tx] += MAGMA_S_ABS( A[i] ); } magma_sum_reduce< NB_X >( tx, ssum ); if ( tx == 0 ) { dwork[ blockIdx.x ] = ssum[0]; } } /* Based on slange_one_kernel code, above. * Computes col sums dwork[j] = sum( abs( A(:,j) )^2 ), j=0:n-1, for || A ||_F, * where m and n are any size. * Has n blocks of NB threads each. Block j sums one column, A(:,j) into dwork[j]. * Thread i accumulates A(i,j) + A(i+NB,j) + A(i+2*NB,j) + ... into ssum[i], * then threads collectively do a sum-reduction of ssum, * and finally thread 0 saves to dwork[j]. */ extern "C" __global__ void slange_fro_kernel( int m, int n, const float * __restrict__ A, int lda, float * __restrict__ dwork ) { __shared__ float ssum[NB_X]; int tx = threadIdx.x; A += blockIdx.x*lda; // column j ssum[tx] = 0; for( int i = tx; i < m; i += NB_X ) { #ifdef COMPLEX float a = MAGMA_S_ABS( A[i] ); #else float a = A[i]; #endif ssum[tx] += a*a; } magma_sum_reduce< NB_X >( tx, ssum ); if ( tx == 0 ) { dwork[ blockIdx.x ] = ssum[0]; } } /***************************************************************************//** Purpose ------- SLANGE returns the value of the one norm, or the Frobenius norm, or the infinity norm, or the element of largest absolute value of a real matrix A. Description ----------- SLANGE returns the value SLANGE = ( max(abs(A(i,j))), NORM = MagmaMaxNorm ( ( norm1(A), NORM = MagmaOneNorm ( ( normI(A), NORM = MagmaInfNorm ( ( normF(A), NORM = MagmaFrobeniusNorm where norm1 denotes the one norm of a matrix (maximum column sum), normI denotes the infinity norm of a matrix (maximum row sum) and normF denotes the Frobenius norm of a matrix (square root of sum of squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. Arguments --------- @param[in] norm magma_norm_t Specifies the value to be returned in SLANGE as described above. @param[in] m INTEGER The number of rows of the matrix A. M >= 0. When M = 0, SLANGE is set to zero. @param[in] n INTEGER The number of columns of the matrix A. N >= 0. When N = 0, SLANGE is set to zero. @param[in] dA REAL array on the GPU, dimension (LDDA,N) The m by n matrix A. @param[in] ldda INTEGER The leading dimension of the array A. LDDA >= max(M,1). @param dwork (workspace) REAL array on the GPU, dimension (LWORK). @param[in] lwork INTEGER The dimension of the array WORK. If NORM = MagmaInfNorm or MagmaMaxNorm, LWORK >= max( 1, M ). If NORM = MagmaOneNorm, LWORK >= max( 1, N ). Note this is different than LAPACK, which requires WORK only for NORM = MagmaInfNorm, and does not pass LWORK. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_lange *******************************************************************************/ extern "C" float magmablas_slange( magma_norm_t norm, magma_int_t m, magma_int_t n, magmaFloat_const_ptr dA, magma_int_t ldda, magmaFloat_ptr dwork, magma_int_t lwork, magma_queue_t queue ) { magma_int_t info = 0; if ( ! (norm == MagmaInfNorm || norm == MagmaMaxNorm || norm == MagmaOneNorm || norm == MagmaFrobeniusNorm) ) info = -1; else if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( ldda < m ) info = -5; else if ( ((norm == MagmaInfNorm || norm == MagmaMaxNorm) && (lwork < m)) || ((norm == MagmaOneNorm || norm == MagmaFrobeniusNorm ) && (lwork < n)) ) info = -7; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return info; } /* Quick return */ if ( m == 0 || n == 0 ) return 0; //int i; dim3 threads( NB_X ); float result = -1; if ( norm == MagmaInfNorm ) { dim3 grid( magma_ceildiv( m, NB_X ) ); hipLaunchKernelGGL(( slange_inf_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dA, ldda, dwork ); hipLaunchKernelGGL(( magma_max_nan_kernel), dim3(1), dim3(512), 0, queue->cuda_stream() , m, dwork ); } else if ( norm == MagmaMaxNorm ) { dim3 grid( magma_ceildiv( m, NB_X ) ); hipLaunchKernelGGL(( slange_max_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dA, ldda, dwork ); hipLaunchKernelGGL(( magma_max_nan_kernel), dim3(1), dim3(512), 0, queue->cuda_stream() , m, dwork ); } else if ( norm == MagmaOneNorm ) { dim3 grid( n ); hipLaunchKernelGGL(( slange_one_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dA, ldda, dwork ); hipLaunchKernelGGL(( magma_max_nan_kernel), dim3(1), dim3(512), 0, queue->cuda_stream() , n, dwork ); // note n instead of m } else if ( norm == MagmaFrobeniusNorm ) { dim3 grid( n ); hipLaunchKernelGGL(( slange_fro_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dA, ldda, dwork ); hipLaunchKernelGGL(( magma_sum_reduce_kernel), dim3(1), dim3(512), 0, queue->cuda_stream() , n, dwork ); // note n instead of m } magma_sgetvector( 1, &dwork[0], 1, &result, 1, queue ); if( norm == MagmaFrobeniusNorm ) { result = sqrt(result); // Square root for final result. } return result; }
10cfd62eba581558a34433b6ebf443b55e7cbbda.cu
/* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @generated from magmablas/zlange.cu, normal z -> s, Thu Oct 8 23:05:33 2020 @author Mark Gates */ #include "magma_internal.h" #include "magma_templates.h" #define REAL #define NB_X 64 /* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:m-1, for || A ||_inf, * where m and n are any size. * Has ceil( m/NB_X ) blocks of NB_X threads. Each thread does one row. * See also slange_max_kernel code, below. */ extern "C" __global__ void slange_inf_kernel( int m, int n, const float * __restrict__ A, int lda, float * __restrict__ dwork ) { int i = blockIdx.x*NB_X + threadIdx.x; float rsum[4] = {0, 0, 0, 0}; int n_mod_4 = n % 4; n -= n_mod_4; // if beyond last row, skip row if ( i < m ) { A += i; if ( n >= 4 ) { const float *Aend = A + lda*n; float rA[4] = { A[0], A[lda], A[2*lda], A[3*lda] }; A += 4*lda; while( A < Aend ) { rsum[0] += MAGMA_S_ABS( rA[0] ); rA[0] = A[0]; rsum[1] += MAGMA_S_ABS( rA[1] ); rA[1] = A[lda]; rsum[2] += MAGMA_S_ABS( rA[2] ); rA[2] = A[2*lda]; rsum[3] += MAGMA_S_ABS( rA[3] ); rA[3] = A[3*lda]; A += 4*lda; } rsum[0] += MAGMA_S_ABS( rA[0] ); rsum[1] += MAGMA_S_ABS( rA[1] ); rsum[2] += MAGMA_S_ABS( rA[2] ); rsum[3] += MAGMA_S_ABS( rA[3] ); } /* clean up code */ switch( n_mod_4 ) { case 0: break; case 1: rsum[0] += MAGMA_S_ABS( A[0] ); break; case 2: rsum[0] += MAGMA_S_ABS( A[0] ); rsum[1] += MAGMA_S_ABS( A[lda] ); break; case 3: rsum[0] += MAGMA_S_ABS( A[0] ); rsum[1] += MAGMA_S_ABS( A[lda] ); rsum[2] += MAGMA_S_ABS( A[2*lda] ); break; } /* compute final result */ dwork[i] = rsum[0] + rsum[1] + rsum[2] + rsum[3]; } } /* Computes max of row dwork[i] = max( abs( A(i,:) )), i=0:m-1, for || A ||_max, * where m and n are any size. * Has ceil( m/NB_X ) blocks of NB_X threads. Each thread does one row. * Based on slange_inf_kernel code, above. */ extern "C" __global__ void slange_max_kernel( int m, int n, const float * __restrict__ A, int lda, float * __restrict__ dwork ) { int i = blockIdx.x*NB_X + threadIdx.x; float rmax[4] = {0, 0, 0, 0}; int n_mod_4 = n % 4; n -= n_mod_4; // if beyond last row, skip row if ( i < m ) { A += i; if ( n >= 4 ) { const float *Aend = A + lda*n; float rA[4] = { A[0], A[lda], A[2*lda], A[3*lda] }; A += 4*lda; while( A < Aend ) { rmax[0] = max_nan( rmax[0], MAGMA_S_ABS( rA[0] )); rA[0] = A[0]; rmax[1] = max_nan( rmax[1], MAGMA_S_ABS( rA[1] )); rA[1] = A[lda]; rmax[2] = max_nan( rmax[2], MAGMA_S_ABS( rA[2] )); rA[2] = A[2*lda]; rmax[3] = max_nan( rmax[3], MAGMA_S_ABS( rA[3] )); rA[3] = A[3*lda]; A += 4*lda; } rmax[0] = max_nan( rmax[0], MAGMA_S_ABS( rA[0] )); rmax[1] = max_nan( rmax[1], MAGMA_S_ABS( rA[1] )); rmax[2] = max_nan( rmax[2], MAGMA_S_ABS( rA[2] )); rmax[3] = max_nan( rmax[3], MAGMA_S_ABS( rA[3] )); } /* clean up code */ switch( n_mod_4 ) { case 0: break; case 1: rmax[0] = max_nan( rmax[0], MAGMA_S_ABS( A[0] )); break; case 2: rmax[0] = max_nan( rmax[0], MAGMA_S_ABS( A[ 0] )); rmax[1] = max_nan( rmax[1], MAGMA_S_ABS( A[lda] )); break; case 3: rmax[0] = max_nan( rmax[0], MAGMA_S_ABS( A[ 0] )); rmax[1] = max_nan( rmax[1], MAGMA_S_ABS( A[ lda] )); rmax[2] = max_nan( rmax[2], MAGMA_S_ABS( A[2*lda] )); break; } /* compute final result */ dwork[i] = max_nan( max_nan( max_nan( rmax[0], rmax[1] ), rmax[2] ), rmax[3] ); } } /* Computes col sums dwork[j] = sum( abs( A(:,j) )), j=0:n-1, for || A ||_one, * where m and n are any size. * Has n blocks of NB threads each. Block j sums one column, A(:,j) into dwork[j]. * Thread i accumulates A(i,j) + A(i+NB,j) + A(i+2*NB,j) + ... into ssum[i], * then threads collectively do a sum-reduction of ssum, * and finally thread 0 saves to dwork[j]. */ extern "C" __global__ void slange_one_kernel( int m, int n, const float * __restrict__ A, int lda, float * __restrict__ dwork ) { __shared__ float ssum[NB_X]; int tx = threadIdx.x; A += blockIdx.x*lda; // column j ssum[tx] = 0; for( int i = tx; i < m; i += NB_X ) { ssum[tx] += MAGMA_S_ABS( A[i] ); } magma_sum_reduce< NB_X >( tx, ssum ); if ( tx == 0 ) { dwork[ blockIdx.x ] = ssum[0]; } } /* Based on slange_one_kernel code, above. * Computes col sums dwork[j] = sum( abs( A(:,j) )^2 ), j=0:n-1, for || A ||_F, * where m and n are any size. * Has n blocks of NB threads each. Block j sums one column, A(:,j) into dwork[j]. * Thread i accumulates A(i,j) + A(i+NB,j) + A(i+2*NB,j) + ... into ssum[i], * then threads collectively do a sum-reduction of ssum, * and finally thread 0 saves to dwork[j]. */ extern "C" __global__ void slange_fro_kernel( int m, int n, const float * __restrict__ A, int lda, float * __restrict__ dwork ) { __shared__ float ssum[NB_X]; int tx = threadIdx.x; A += blockIdx.x*lda; // column j ssum[tx] = 0; for( int i = tx; i < m; i += NB_X ) { #ifdef COMPLEX float a = MAGMA_S_ABS( A[i] ); #else float a = A[i]; #endif ssum[tx] += a*a; } magma_sum_reduce< NB_X >( tx, ssum ); if ( tx == 0 ) { dwork[ blockIdx.x ] = ssum[0]; } } /***************************************************************************//** Purpose ------- SLANGE returns the value of the one norm, or the Frobenius norm, or the infinity norm, or the element of largest absolute value of a real matrix A. Description ----------- SLANGE returns the value SLANGE = ( max(abs(A(i,j))), NORM = MagmaMaxNorm ( ( norm1(A), NORM = MagmaOneNorm ( ( normI(A), NORM = MagmaInfNorm ( ( normF(A), NORM = MagmaFrobeniusNorm where norm1 denotes the one norm of a matrix (maximum column sum), normI denotes the infinity norm of a matrix (maximum row sum) and normF denotes the Frobenius norm of a matrix (square root of sum of squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. Arguments --------- @param[in] norm magma_norm_t Specifies the value to be returned in SLANGE as described above. @param[in] m INTEGER The number of rows of the matrix A. M >= 0. When M = 0, SLANGE is set to zero. @param[in] n INTEGER The number of columns of the matrix A. N >= 0. When N = 0, SLANGE is set to zero. @param[in] dA REAL array on the GPU, dimension (LDDA,N) The m by n matrix A. @param[in] ldda INTEGER The leading dimension of the array A. LDDA >= max(M,1). @param dwork (workspace) REAL array on the GPU, dimension (LWORK). @param[in] lwork INTEGER The dimension of the array WORK. If NORM = MagmaInfNorm or MagmaMaxNorm, LWORK >= max( 1, M ). If NORM = MagmaOneNorm, LWORK >= max( 1, N ). Note this is different than LAPACK, which requires WORK only for NORM = MagmaInfNorm, and does not pass LWORK. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_lange *******************************************************************************/ extern "C" float magmablas_slange( magma_norm_t norm, magma_int_t m, magma_int_t n, magmaFloat_const_ptr dA, magma_int_t ldda, magmaFloat_ptr dwork, magma_int_t lwork, magma_queue_t queue ) { magma_int_t info = 0; if ( ! (norm == MagmaInfNorm || norm == MagmaMaxNorm || norm == MagmaOneNorm || norm == MagmaFrobeniusNorm) ) info = -1; else if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( ldda < m ) info = -5; else if ( ((norm == MagmaInfNorm || norm == MagmaMaxNorm) && (lwork < m)) || ((norm == MagmaOneNorm || norm == MagmaFrobeniusNorm ) && (lwork < n)) ) info = -7; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return info; } /* Quick return */ if ( m == 0 || n == 0 ) return 0; //int i; dim3 threads( NB_X ); float result = -1; if ( norm == MagmaInfNorm ) { dim3 grid( magma_ceildiv( m, NB_X ) ); slange_inf_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( m, n, dA, ldda, dwork ); magma_max_nan_kernel<<< 1, 512, 0, queue->cuda_stream() >>>( m, dwork ); } else if ( norm == MagmaMaxNorm ) { dim3 grid( magma_ceildiv( m, NB_X ) ); slange_max_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( m, n, dA, ldda, dwork ); magma_max_nan_kernel<<< 1, 512, 0, queue->cuda_stream() >>>( m, dwork ); } else if ( norm == MagmaOneNorm ) { dim3 grid( n ); slange_one_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( m, n, dA, ldda, dwork ); magma_max_nan_kernel<<< 1, 512, 0, queue->cuda_stream() >>>( n, dwork ); // note n instead of m } else if ( norm == MagmaFrobeniusNorm ) { dim3 grid( n ); slange_fro_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( m, n, dA, ldda, dwork ); magma_sum_reduce_kernel<<< 1, 512, 0, queue->cuda_stream() >>>( n, dwork ); // note n instead of m } magma_sgetvector( 1, &dwork[0], 1, &result, 1, queue ); if( norm == MagmaFrobeniusNorm ) { result = sqrt(result); // Square root for final result. } return result; }
bb04854d8f7b8922eb5eda658606e96eefbf9a2e.hip
// !!! This is a file automatically generated by hipify!!! /* @Author: 3sne ( Mukur Panchani ) @FileName: q4MatAddAndMul2D.cu @Task: CUDA program that calculates multiplication and addition of two matrices using 2D Grid & 2D Block. */ #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <hip/hip_runtime.h> __device__ int getTid() { int blockSkip = (blockIdx.y * gridDim.x * blockDim.x * blockDim.y); int rowSkip = (threadIdx.y * gridDim.x * blockDim.x); int rowDisplacement = (blockIdx.x * blockDim.x) + threadIdx.x; int tid = blockSkip + rowSkip + rowDisplacement; return tid; } __global__ void MatAddElementThread(int *a, int *b, int *d) { int tid = getTid(); d[tid] = a[tid] + b[tid]; } __global__ void MatMulElementThread(int *a, int *b, int *c, int n, int q) { int tid = getTid(); int initDisp = tid % q; c[tid] = 0; for (int k = 0; k < n; k++) { c[tid] += a[tid - initDisp + k] * b[k * q + initDisp]; } } int main() { system("clear"); int *matA, *matB, *matProd, *matSum; int *da, *db, *dc, *dd; int m, n, p, q; int willMul = 1, willAdd = 1; printf("[NOTE] Both Matrices should have SAME and EVEN dimensions to successfully compute both the sum and the product (i.e, EVEN SQUARE MATRICES)\n"); printf("\n== Enter Dimension of Matrix A (m x n) ==\n"); printf("m >> "); scanf("%d", &m); printf("n >> "); scanf("%d", &n); matA = (int*)malloc(sizeof(int) * m * n); printf("== Matrix A Elements ==\n"); for(int i = 0; i < m * n; i++) { scanf("%d", &matA[i]); } printf("\n== Enter Dimension of Matrix B (p x q) ==\n"); printf("p >> "); scanf("%d", &p); printf("q >> "); scanf("%d", &q); if ( m % 2 || n % 2 || p % 2 || q % 2) { free(matA); printf("[PROGRAM] You don't listen to me, do you? I told you O N L Y E V E N dimensions.....\n"); sleep(2); printf("[PROGRAM] Why this restriction? Because I use a fixed block size of (2, 2). For odd dimensions, I don't like keeping some threads lonely. Thread lives matter. /\n"); sleep(2); printf("[PROGRAM] But you clearly don't care, so don't I. Byeee Noob \\(^.^)\n"); sleep(2); exit(EXIT_FAILURE); } if (n != p) { willMul = 0; printf("[MUL ERROR] n & p must be equal, Skipping Matrix Multiplication...\n"); sleep(1); } if (m != p || n != q) { willAdd = 0; printf("[ADD ERROR] Dimensions of matA and matB are unequal, skipping Matrix Addition...\n"); sleep(1); } matB = (int*)malloc(sizeof(int) * p * q); printf("== Matrix B Elements ==\n"); for(int i = 0; i < p * q; i++) { scanf("%d", &matB[i]); } matProd = (int*)malloc(sizeof(int) * m * q); matSum = (int*)malloc(sizeof(int) * m * n); hipMalloc((void **) &da, sizeof(int) * m * n); hipMalloc((void **) &db, sizeof(int) * p * q); hipMalloc((void **) &dc, sizeof(int) * m * q); hipMalloc((void **) &dd, sizeof(int) * m * n); hipMemcpy(da, matA, sizeof(int) * m * n, hipMemcpyHostToDevice); hipMemcpy(db, matB, sizeof(int) * p * q, hipMemcpyHostToDevice); dim3 grid_conf (q / 2, m / 2); dim3 block_conf (2, 2); if (willMul) { hipLaunchKernelGGL(( MatMulElementThread), dim3(grid_conf), dim3(block_conf), 0, 0, da, db, dc, n, q); hipMemcpy(matProd, dc, sizeof(int) * m * q, hipMemcpyDeviceToHost); printf("\n-=Result of Multiplication=-\n"); printf("----------------------------\n"); for (int i = 0; i < m; i++ ) { for (int j = 0; j < q; j++) { printf("%6d ", matProd[i * q + j]); } printf("\n"); } } if (willAdd) { hipLaunchKernelGGL(( MatAddElementThread), dim3(grid_conf), dim3(block_conf), 0, 0, da, db, dd); hipMemcpy(matSum, dd, sizeof(int) * m * n, hipMemcpyDeviceToHost); printf("\n-=Result of Addition=-\n"); printf("----------------------\n"); for (int i = 0; i < m; i++ ) { for (int j = 0; j < n; j++) { printf("%6d ", matSum[i * n + j]); } printf("\n"); } } if (!willAdd && !willMul) { printf("Bad Matrix dimensions, exiting...\n"); } printf("\n"); hipFree(da); hipFree(db); hipFree(dc); hipFree(dd); free(matA); free(matB); free(matProd); free(matSum); return 0; }
bb04854d8f7b8922eb5eda658606e96eefbf9a2e.cu
/* @Author: 3sne ( Mukur Panchani ) @FileName: q4MatAddAndMul2D.cu @Task: CUDA program that calculates multiplication and addition of two matrices using 2D Grid & 2D Block. */ #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <cuda_runtime.h> __device__ int getTid() { int blockSkip = (blockIdx.y * gridDim.x * blockDim.x * blockDim.y); int rowSkip = (threadIdx.y * gridDim.x * blockDim.x); int rowDisplacement = (blockIdx.x * blockDim.x) + threadIdx.x; int tid = blockSkip + rowSkip + rowDisplacement; return tid; } __global__ void MatAddElementThread(int *a, int *b, int *d) { int tid = getTid(); d[tid] = a[tid] + b[tid]; } __global__ void MatMulElementThread(int *a, int *b, int *c, int n, int q) { int tid = getTid(); int initDisp = tid % q; c[tid] = 0; for (int k = 0; k < n; k++) { c[tid] += a[tid - initDisp + k] * b[k * q + initDisp]; } } int main() { system("clear"); int *matA, *matB, *matProd, *matSum; int *da, *db, *dc, *dd; int m, n, p, q; int willMul = 1, willAdd = 1; printf("[NOTE] Both Matrices should have SAME and EVEN dimensions to successfully compute both the sum and the product (i.e, EVEN SQUARE MATRICES)\n"); printf("\n== Enter Dimension of Matrix A (m x n) ==\n"); printf("m >> "); scanf("%d", &m); printf("n >> "); scanf("%d", &n); matA = (int*)malloc(sizeof(int) * m * n); printf("== Matrix A Elements ==\n"); for(int i = 0; i < m * n; i++) { scanf("%d", &matA[i]); } printf("\n== Enter Dimension of Matrix B (p x q) ==\n"); printf("p >> "); scanf("%d", &p); printf("q >> "); scanf("%d", &q); if ( m % 2 || n % 2 || p % 2 || q % 2) { free(matA); printf("[PROGRAM] You don't listen to me, do you? I told you O N L Y E V E N dimensions.....\n"); sleep(2); printf("[PROGRAM] Why this restriction? Because I use a fixed block size of (2, 2). For odd dimensions, I don't like keeping some threads lonely. Thread lives matter. /\n"); sleep(2); printf("[PROGRAM] But you clearly don't care, so don't I. Byeee Noob \\(^.^)\n"); sleep(2); exit(EXIT_FAILURE); } if (n != p) { willMul = 0; printf("[MUL ERROR] n & p must be equal, Skipping Matrix Multiplication...\n"); sleep(1); } if (m != p || n != q) { willAdd = 0; printf("[ADD ERROR] Dimensions of matA and matB are unequal, skipping Matrix Addition...\n"); sleep(1); } matB = (int*)malloc(sizeof(int) * p * q); printf("== Matrix B Elements ==\n"); for(int i = 0; i < p * q; i++) { scanf("%d", &matB[i]); } matProd = (int*)malloc(sizeof(int) * m * q); matSum = (int*)malloc(sizeof(int) * m * n); cudaMalloc((void **) &da, sizeof(int) * m * n); cudaMalloc((void **) &db, sizeof(int) * p * q); cudaMalloc((void **) &dc, sizeof(int) * m * q); cudaMalloc((void **) &dd, sizeof(int) * m * n); cudaMemcpy(da, matA, sizeof(int) * m * n, cudaMemcpyHostToDevice); cudaMemcpy(db, matB, sizeof(int) * p * q, cudaMemcpyHostToDevice); dim3 grid_conf (q / 2, m / 2); dim3 block_conf (2, 2); if (willMul) { MatMulElementThread<<<grid_conf, block_conf>>>(da, db, dc, n, q); cudaMemcpy(matProd, dc, sizeof(int) * m * q, cudaMemcpyDeviceToHost); printf("\n-=Result of Multiplication=-\n"); printf("----------------------------\n"); for (int i = 0; i < m; i++ ) { for (int j = 0; j < q; j++) { printf("%6d ", matProd[i * q + j]); } printf("\n"); } } if (willAdd) { MatAddElementThread<<<grid_conf, block_conf>>>(da, db, dd); cudaMemcpy(matSum, dd, sizeof(int) * m * n, cudaMemcpyDeviceToHost); printf("\n-=Result of Addition=-\n"); printf("----------------------\n"); for (int i = 0; i < m; i++ ) { for (int j = 0; j < n; j++) { printf("%6d ", matSum[i * n + j]); } printf("\n"); } } if (!willAdd && !willMul) { printf("Bad Matrix dimensions, exiting...\n"); } printf("\n"); cudaFree(da); cudaFree(db); cudaFree(dc); cudaFree(dd); free(matA); free(matB); free(matProd); free(matSum); return 0; }
c7a4af6f07afed1ff1aec3ae8f4b8b2e1a316a3e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int dims_update_halo_kernel1_t1 [7][1]; static int dims_update_halo_kernel1_t1_h [7][1] = {0}; //user function __device__ inline void update_halo_kernel1_t1_gpu(ACC<double> &density0, ACC<double> &energy0, ACC<double> &energy1, ACC<double> &u, ACC<double> &p, ACC<double> &sd, const int* fields) { if(fields[FIELD_DENSITY] == 1) density0(0,0) = density0(0,-1); if(fields[FIELD_ENERGY0] == 1) energy0(0,0) = energy0(0,-1); if(fields[FIELD_ENERGY1] == 1) energy1(0,0) = energy1(0,-1); if(fields[FIELD_U] == 1) u(0,0) = u(0,-1); if(fields[FIELD_P] == 1) p(0,0) = p(0,-1); if(fields[FIELD_SD] == 1) sd(0,0) = sd(0,-1); } __global__ void ops_update_halo_kernel1_t1( double* __restrict arg0, double* __restrict arg1, double* __restrict arg2, double* __restrict arg3, double* __restrict arg4, double* __restrict arg5, const int* __restrict arg6, int size0, int size1 ){ int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_t1[0][0]; arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_t1[1][0]; arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_t1[2][0]; arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_t1[3][0]; arg4 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_t1[4][0]; arg5 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_t1[5][0]; if (idx_x < size0 && idx_y < size1) { ACC<double> argp0(dims_update_halo_kernel1_t1[0][0], arg0); ACC<double> argp1(dims_update_halo_kernel1_t1[1][0], arg1); ACC<double> argp2(dims_update_halo_kernel1_t1[2][0], arg2); ACC<double> argp3(dims_update_halo_kernel1_t1[3][0], arg3); ACC<double> argp4(dims_update_halo_kernel1_t1[4][0], arg4); ACC<double> argp5(dims_update_halo_kernel1_t1[5][0], arg5); update_halo_kernel1_t1_gpu(argp0, argp1, argp2, argp3, argp4, argp5, arg6); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel1_t1(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6) { #else void ops_par_loop_update_halo_kernel1_t1_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; #if OPS_MPI ops_block block = desc->block; #endif int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; ops_arg arg4 = desc->args[4]; ops_arg arg5 = desc->args[5]; ops_arg arg6 = desc->args[6]; #endif //Timing double t1,t2,c1,c2; ops_arg args[7] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,7,range,52)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(52,"update_halo_kernel1_t1"); OPS_kernels[52].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[2]; int end[2]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; #endif //OPS_MPI #ifdef OPS_MPI int arg_idx[2]; #endif #ifdef OPS_MPI if (compute_ranges(args, 7,block, range, start, end, arg_idx) < 0) return; #else //OPS_MPI for ( int n=0; n<2; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int xdim0 = args[0].dat->size[0]; int xdim1 = args[1].dat->size[0]; int xdim2 = args[2].dat->size[0]; int xdim3 = args[3].dat->size[0]; int xdim4 = args[4].dat->size[0]; int xdim5 = args[5].dat->size[0]; if (xdim0 != dims_update_halo_kernel1_t1_h[0][0] || xdim1 != dims_update_halo_kernel1_t1_h[1][0] || xdim2 != dims_update_halo_kernel1_t1_h[2][0] || xdim3 != dims_update_halo_kernel1_t1_h[3][0] || xdim4 != dims_update_halo_kernel1_t1_h[4][0] || xdim5 != dims_update_halo_kernel1_t1_h[5][0]) { dims_update_halo_kernel1_t1_h[0][0] = xdim0; dims_update_halo_kernel1_t1_h[1][0] = xdim1; dims_update_halo_kernel1_t1_h[2][0] = xdim2; dims_update_halo_kernel1_t1_h[3][0] = xdim3; dims_update_halo_kernel1_t1_h[4][0] = xdim4; dims_update_halo_kernel1_t1_h[5][0] = xdim5; cutilSafeCall(hipMemcpyToSymbol( dims_update_halo_kernel1_t1, dims_update_halo_kernel1_t1_h, sizeof(dims_update_halo_kernel1_t1))); } int *arg6h = (int *)arg6.data; int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg6.data = OPS_consts_h + consts_bytes; arg6.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg6.data)[d] = arg6h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size); int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size); char *p_a[7]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); p_a[3] = (char *)args[3].data_d + base3; int base4 = args[4].dat->base_offset + dat4 * 1 * (start[0] * args[4].stencil->stride[0]); base4 = base4+ dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]); p_a[4] = (char *)args[4].data_d + base4; int base5 = args[5].dat->base_offset + dat5 * 1 * (start[0] * args[5].stencil->stride[0]); base5 = base5+ dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1]); p_a[5] = (char *)args[5].data_d + base5; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 7); ops_halo_exchanges(args,7,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[52].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0) hipLaunchKernelGGL(( ops_update_halo_kernel1_t1), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], (int *)arg6.data_d,x_size, y_size); cutilSafeCall(hipGetLastError()); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[52].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 7); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); ops_set_halo_dirtybit3(&args[2],range); ops_set_halo_dirtybit3(&args[3],range); ops_set_halo_dirtybit3(&args[4],range); ops_set_halo_dirtybit3(&args[5],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[52].mpi_time += t2-t1; OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg4); OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg5); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel1_t1(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 52; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 52; for ( int i=0; i<4; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 7; desc->args = (ops_arg*)malloc(7*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->args[4] = arg4; desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index; desc->args[5] = arg5; desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index; desc->args[6] = arg6; char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int)); memcpy(tmp, arg6.data,NUM_FIELDS*sizeof(int)); desc->args[6].data = tmp; desc->function = ops_par_loop_update_halo_kernel1_t1_execute; if (OPS_diags > 1) { ops_timing_realloc(52,"update_halo_kernel1_t1"); } ops_enqueue_kernel(desc); } #endif
c7a4af6f07afed1ff1aec3ae8f4b8b2e1a316a3e.cu
// // auto-generated by ops.py // __constant__ int dims_update_halo_kernel1_t1 [7][1]; static int dims_update_halo_kernel1_t1_h [7][1] = {0}; //user function __device__ inline void update_halo_kernel1_t1_gpu(ACC<double> &density0, ACC<double> &energy0, ACC<double> &energy1, ACC<double> &u, ACC<double> &p, ACC<double> &sd, const int* fields) { if(fields[FIELD_DENSITY] == 1) density0(0,0) = density0(0,-1); if(fields[FIELD_ENERGY0] == 1) energy0(0,0) = energy0(0,-1); if(fields[FIELD_ENERGY1] == 1) energy1(0,0) = energy1(0,-1); if(fields[FIELD_U] == 1) u(0,0) = u(0,-1); if(fields[FIELD_P] == 1) p(0,0) = p(0,-1); if(fields[FIELD_SD] == 1) sd(0,0) = sd(0,-1); } __global__ void ops_update_halo_kernel1_t1( double* __restrict arg0, double* __restrict arg1, double* __restrict arg2, double* __restrict arg3, double* __restrict arg4, double* __restrict arg5, const int* __restrict arg6, int size0, int size1 ){ int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_t1[0][0]; arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_t1[1][0]; arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_t1[2][0]; arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_t1[3][0]; arg4 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_t1[4][0]; arg5 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_t1[5][0]; if (idx_x < size0 && idx_y < size1) { ACC<double> argp0(dims_update_halo_kernel1_t1[0][0], arg0); ACC<double> argp1(dims_update_halo_kernel1_t1[1][0], arg1); ACC<double> argp2(dims_update_halo_kernel1_t1[2][0], arg2); ACC<double> argp3(dims_update_halo_kernel1_t1[3][0], arg3); ACC<double> argp4(dims_update_halo_kernel1_t1[4][0], arg4); ACC<double> argp5(dims_update_halo_kernel1_t1[5][0], arg5); update_halo_kernel1_t1_gpu(argp0, argp1, argp2, argp3, argp4, argp5, arg6); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel1_t1(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6) { #else void ops_par_loop_update_halo_kernel1_t1_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; #if OPS_MPI ops_block block = desc->block; #endif int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; ops_arg arg4 = desc->args[4]; ops_arg arg5 = desc->args[5]; ops_arg arg6 = desc->args[6]; #endif //Timing double t1,t2,c1,c2; ops_arg args[7] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,7,range,52)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(52,"update_halo_kernel1_t1"); OPS_kernels[52].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[2]; int end[2]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; #endif //OPS_MPI #ifdef OPS_MPI int arg_idx[2]; #endif #ifdef OPS_MPI if (compute_ranges(args, 7,block, range, start, end, arg_idx) < 0) return; #else //OPS_MPI for ( int n=0; n<2; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int xdim0 = args[0].dat->size[0]; int xdim1 = args[1].dat->size[0]; int xdim2 = args[2].dat->size[0]; int xdim3 = args[3].dat->size[0]; int xdim4 = args[4].dat->size[0]; int xdim5 = args[5].dat->size[0]; if (xdim0 != dims_update_halo_kernel1_t1_h[0][0] || xdim1 != dims_update_halo_kernel1_t1_h[1][0] || xdim2 != dims_update_halo_kernel1_t1_h[2][0] || xdim3 != dims_update_halo_kernel1_t1_h[3][0] || xdim4 != dims_update_halo_kernel1_t1_h[4][0] || xdim5 != dims_update_halo_kernel1_t1_h[5][0]) { dims_update_halo_kernel1_t1_h[0][0] = xdim0; dims_update_halo_kernel1_t1_h[1][0] = xdim1; dims_update_halo_kernel1_t1_h[2][0] = xdim2; dims_update_halo_kernel1_t1_h[3][0] = xdim3; dims_update_halo_kernel1_t1_h[4][0] = xdim4; dims_update_halo_kernel1_t1_h[5][0] = xdim5; cutilSafeCall(cudaMemcpyToSymbol( dims_update_halo_kernel1_t1, dims_update_halo_kernel1_t1_h, sizeof(dims_update_halo_kernel1_t1))); } int *arg6h = (int *)arg6.data; int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg6.data = OPS_consts_h + consts_bytes; arg6.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg6.data)[d] = arg6h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size); int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size); char *p_a[7]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); p_a[3] = (char *)args[3].data_d + base3; int base4 = args[4].dat->base_offset + dat4 * 1 * (start[0] * args[4].stencil->stride[0]); base4 = base4+ dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]); p_a[4] = (char *)args[4].data_d + base4; int base5 = args[5].dat->base_offset + dat5 * 1 * (start[0] * args[5].stencil->stride[0]); base5 = base5+ dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1]); p_a[5] = (char *)args[5].data_d + base5; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 7); ops_halo_exchanges(args,7,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[52].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0) ops_update_halo_kernel1_t1<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], (int *)arg6.data_d,x_size, y_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[52].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 7); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); ops_set_halo_dirtybit3(&args[2],range); ops_set_halo_dirtybit3(&args[3],range); ops_set_halo_dirtybit3(&args[4],range); ops_set_halo_dirtybit3(&args[5],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[52].mpi_time += t2-t1; OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg4); OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg5); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel1_t1(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 52; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 52; for ( int i=0; i<4; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 7; desc->args = (ops_arg*)malloc(7*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->args[4] = arg4; desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index; desc->args[5] = arg5; desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index; desc->args[6] = arg6; char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int)); memcpy(tmp, arg6.data,NUM_FIELDS*sizeof(int)); desc->args[6].data = tmp; desc->function = ops_par_loop_update_halo_kernel1_t1_execute; if (OPS_diags > 1) { ops_timing_realloc(52,"update_halo_kernel1_t1"); } ops_enqueue_kernel(desc); } #endif
3c27771ace79bc18cb34b5315c97a74bd299ca97.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <device_launch_parameters.h> #include <thrust/system_error.h> #include <thrust/system/hip/error.h> #include <sstream> #define CUDA_ERROR_CHECK #define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ ) #define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ ) extern "C" __declspec(dllexport) void RCUDA2(double *z, double *noise, double *noisesq, double *variance, double *n, int *count, int *ncol, int *n_out, double *outz, double *outw, double *outwsq, double *outn, double *outvg, double *outneff, double *outvm, double *outvstat); __global__ void gallantSmoothing(const double *A, const double *B, const double *C, const double *D, const double *E, double *outz, double *outw, double *outwsq, double *outvg, double *outn, int x, int ncol, double *outvm, double *outneff, double *outvstat) { int col = (blockIdx.x * blockDim.x + threadIdx.x) * 3; int row = (blockIdx.y * blockDim.y + threadIdx.y) * 3; int id = col + row * (ncol); int colo = (blockIdx.x * blockDim.x + (threadIdx.x)); int rowo = (blockIdx.y * blockDim.y + (threadIdx.y)); int ido = colo + rowo * (ncol / 3); //input id // printf("this is the id %d \n", id); // define focal window processing dimensions if ( ( (colo > 0) && (colo < (ncol / 3) )) && ( (rowo > 0) && (rowo < (ncol / 3) ))) { double zbarWin[] = { A[id], A[id + 1], A[id + 2], A[id + ncol], A[id + 1 + ncol], A[id + 2 + ncol], A[id + (ncol * 2)], A[id + 1 + (ncol * 2)], A[id + 2 + (ncol * 2)] }; double wWin[] = { B[id], B[id + 1], B[id + 2], B[id + ncol], B[id + 1 + ncol], B[id + 2 + ncol], B[id + (ncol * 2)], B[id + 1 + (ncol * 2)], B[id + 2 + (ncol * 2)] }; double wsqWin[] = { C[id], C[id + 1], C[id + 2], C[id + ncol], C[id + 1 + ncol], C[id + 2 + ncol], C[id + (ncol * 2)], C[id + 1 + (ncol * 2)], C[id + 2 + (ncol * 2)] }; double vgWin[] = { D[id], D[id + 1], D[id + 2], D[id + ncol], D[id + 1 + ncol], D[id + 2 + ncol], D[id + (ncol * 2)], D[id + 1 + (ncol * 2)], D[id + 2 + (ncol * 2)] }; double nWin[] = { E[id], E[id + 1], E[id + 2], E[id + ncol], E[id + 1 + ncol], E[id + 2 + ncol], E[id + (ncol * 2)], E[id + 1 + (ncol * 2)], E[id + 2 + (ncol * 2)] }; // printf("this is wwin: %g \n", double(wWin[0]) ); // execute w and wsq double w = 0.0; for (int i = 0; i < 9; i++) { w = w + wWin[i]; } outw[ido] = w; // printf("this is w: %g \n", w); double wsq = 0.0; for (int i = 0; i < 9; i++) { wsq = wsq + wsqWin[i]; } // if ((colo > 0 && colo < (ncol / 3)) && (rowo > 0 && rowo < (ncol / 3))) { outwsq[ido] = wsq; // } // printf("this is wsq: %d \n", wsq); // execute zbar double tempZbar = 0.0; double tempWZ[9]; for (int i = 0; i < 9; i++) { tempWZ[i] = (zbarWin[i] * wWin[i])/w; } for (int i = 0; i < 9; i++) { tempZbar = tempZbar + tempWZ[i]; } double zbar = tempZbar ; // printf("this is zbar: %g \n", zbar); // if ((colo > 0 && colo < (ncol / 3)) && (rowo > 0 && rowo < (ncol / 3))) { outz[ido] = zbar; // } // execute vbg double tempvbgArray[9]; for (int i = 0; i < 9; i++) { double zbarDif= (zbarWin[i] - zbar); tempvbgArray[i] = (wWin[i] * pow(zbarDif, 2) )/w; } double tempvbgNum = 0.0; for (int i = 0; i < 9; i++) { tempvbgNum = (tempvbgNum + tempvbgArray[i]); } double vbg = (tempvbgNum ); /* if (vbg > 20) { printf("this is vbg: %g \n", vbg); } */ // execute vwg double tempvwgArray[9]; for (int i = 0; i < 9; i++) { tempvwgArray[i] = (wWin[i] * vgWin[i]); } double tempvwg = 0; for (int i = 0; i < 9; i++) { tempvwg = (tempvwg + tempvwgArray[i])/w; } double vwg = (tempvwg); // printf("this is vwg: %g \n", vwg); // execute vg double vg = (vbg + vwg); // if ((colo > 0 && colo < (ncol / 3)) && (rowo > 0 && rowo < (ncol / 3))) { outvg[ido] = vg; // } // printf("this is vg: %g \n", vg); // execute vm // if ((colo > 0 && colo < (ncol / 3)) && (rowo > 0 && rowo < (ncol / 3))) { outvm[ido] = 1 / w; // } // printf("this is vm: %d \n", vm); // execute n double n = 0.0; for (int i = 0; i < 9; i++) { n = n + nWin[i]; } // if ((colo > 0 && colo < (ncol / 3)) && (rowo > 0 && rowo < (ncol / 3))) { outn[ido] = n; // } // execute neff // if ((colo > 0 && colo < (ncol / 3)) && (rowo > 0 && rowo < (ncol / 3))) { outneff[ido] = (pow(w, 2) / wsq); // } // printf("this is neff: %g \n", neff); // execute mv double mv = n / w; // printf("this is mv: %g \n", mv); // get vstats // if ((colo > 0 && colo < (ncol / 3)) && (rowo > 0 && rowo < (ncol / 3))) { outvstat[ido] = vg / mv; } } inline void __cudaSafeCall(hipError_t err, const char *file, const int line) { #ifdef CUDA_ERROR_CHECK if (hipSuccess != err) { fprintf(stderr, "cudaSafeCall() failed at %s:%i : %s\n", file, line, hipGetErrorString(err)); // exit(-1); } #endif return; } inline void __cudaCheckError(const char *file, const int line) { #ifdef CUDA_ERROR_CHECK hipError_t err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "cudaCheckError() failed at %s:%i : %s\n", file, line, hipGetErrorString(err)); // exit(-1); } // More careful checking. However, this will affect performance. // Comment away if needed. err = hipDeviceSynchronize(); if (hipSuccess != err) { fprintf(stderr, "cudaCheckError() with sync failed at %s:%i : %s\n", file, line, hipGetErrorString(err)); // exit(-1); } #endif return; } // call host function void RCUDA2(double *z, double *noise, double *noisesq, double *variance, double *n, int *count, int *ncol, int *n_out, double *outz, double *outw, double *outwsq, double *outn, double *outvg, double *outneff, double *outvm, double *outvstat) { // initialize device memory variables double *d_n, *d_vg, *d_wsq, *d_z, *d_w, *d_outz, *d_outw, *d_outwsq, *d_outvg, *d_outn, *d_vm, *d_neff, *d_vstat, *d_xcrit; //inputs and outputs //define grid and total window size dim3 grid((*ncol / 24), (*ncol / 24)); // grid of 2D blocks //these dimensions should be equal to the window dimensions of a raster // the product of the 2d window dim should not exceed 1024 threads (depending on your GPU) dim3 block(24, 24); // block of 2D threads // allocate device memory for computations // input allocation CudaSafeCall(hipMalloc((void**)&d_z, *count * sizeof(double))); CudaSafeCall(hipMalloc((void**)&d_w, *count * sizeof(double))); CudaSafeCall(hipMalloc((void**)&d_wsq, *count * sizeof(double))); CudaSafeCall(hipMalloc((void**)&d_vg, *count * sizeof(double))); CudaSafeCall(hipMalloc((void**)&d_n, *count * sizeof(double))); CudaSafeCall(hipMalloc((void**)&d_xcrit, *count * sizeof(double))); //intermediate allocations CudaSafeCall(hipMalloc((void**)&d_vm, *n_out * sizeof(double))); CudaSafeCall(hipMalloc((void**)&d_neff, *n_out * sizeof(double))); CudaSafeCall(hipMalloc((void**)&d_vstat, *n_out * sizeof(double))); // output allocation CudaSafeCall(hipMalloc((void**)&d_outz, *n_out * sizeof(double))); CudaSafeCall(hipMalloc((void**)&d_outw, *n_out * sizeof(double))); CudaSafeCall(hipMalloc((void**)&d_outwsq, *n_out * sizeof(double))); CudaSafeCall(hipMalloc((void**)&d_outvg, *n_out * sizeof(double))); CudaSafeCall(hipMalloc((void**)&d_outn, *n_out * sizeof(double))); // copy host memory to allocated device memory CudaSafeCall(hipMemcpy(d_z, z, *count * sizeof(double), hipMemcpyHostToDevice)); CudaSafeCall(hipMemcpy(d_w, noise, *count * sizeof(double), hipMemcpyHostToDevice)); CudaSafeCall(hipMemcpy(d_wsq, noisesq, *count * sizeof(double), hipMemcpyHostToDevice)); CudaSafeCall(hipMemcpy(d_vg, variance, *count * sizeof(double), hipMemcpyHostToDevice)); CudaSafeCall(hipMemcpy(d_n, n, *count * sizeof(double), hipMemcpyHostToDevice)); // printf("this string works"); // call some_function which may throw something // launch kernel with predefined block and thread numbers gallantSmoothing << <grid, block >> > (d_z, d_w, d_wsq, d_vg, d_n, d_outz, d_outw, d_outwsq, d_outvg, d_outn, *count, *ncol, d_vm, d_neff, d_vstat); hipDeviceSynchronize(); // gallantSmoothing << <grid, block >> > (d_z, *ncol); CudaCheckError(); // copy device memory to host memory CudaSafeCall(hipMemcpy(outz, d_outz, *n_out * sizeof(double), hipMemcpyDeviceToHost)); CudaSafeCall(hipMemcpy(outw, d_outw, *n_out * sizeof(double), hipMemcpyDeviceToHost)); CudaSafeCall(hipMemcpy(outwsq, d_outwsq, *n_out * sizeof(double), hipMemcpyDeviceToHost)); CudaSafeCall(hipMemcpy(outvg, d_outvg, *n_out * sizeof(double), hipMemcpyDeviceToHost)); CudaSafeCall(hipMemcpy(outn, d_outn, *n_out * sizeof(double), hipMemcpyDeviceToHost)); CudaSafeCall(hipMemcpy(outneff, d_neff, *n_out * sizeof(double), hipMemcpyDeviceToHost)); CudaSafeCall(hipMemcpy(outvm, d_vm, *n_out * sizeof(double), hipMemcpyDeviceToHost)); CudaSafeCall(hipMemcpy(outvstat, d_vstat, *n_out * sizeof(double), hipMemcpyDeviceToHost)); hipFree(d_z); hipFree(d_w); hipFree(d_wsq); hipFree(d_vg); hipFree(d_n); hipFree(d_outz); hipFree(d_outw); hipFree(d_outwsq); hipFree(d_xcrit); hipFree(d_vm); hipFree(d_vstat); hipFree(d_outvg); hipFree(d_outn); hipFree(d_neff); hipDeviceReset(); }
3c27771ace79bc18cb34b5315c97a74bd299ca97.cu
#include <iostream> #include <cuda.h> #include <cuda_runtime.h> #include <stdio.h> #include <stdlib.h> #include <device_launch_parameters.h> #include <thrust/system_error.h> #include <thrust/system/cuda/error.h> #include <sstream> #define CUDA_ERROR_CHECK #define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ ) #define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ ) extern "C" __declspec(dllexport) void RCUDA2(double *z, double *noise, double *noisesq, double *variance, double *n, int *count, int *ncol, int *n_out, double *outz, double *outw, double *outwsq, double *outn, double *outvg, double *outneff, double *outvm, double *outvstat); __global__ void gallantSmoothing(const double *A, const double *B, const double *C, const double *D, const double *E, double *outz, double *outw, double *outwsq, double *outvg, double *outn, int x, int ncol, double *outvm, double *outneff, double *outvstat) { int col = (blockIdx.x * blockDim.x + threadIdx.x) * 3; int row = (blockIdx.y * blockDim.y + threadIdx.y) * 3; int id = col + row * (ncol); int colo = (blockIdx.x * blockDim.x + (threadIdx.x)); int rowo = (blockIdx.y * blockDim.y + (threadIdx.y)); int ido = colo + rowo * (ncol / 3); //input id // printf("this is the id %d \n", id); // define focal window processing dimensions if ( ( (colo > 0) && (colo < (ncol / 3) )) && ( (rowo > 0) && (rowo < (ncol / 3) ))) { double zbarWin[] = { A[id], A[id + 1], A[id + 2], A[id + ncol], A[id + 1 + ncol], A[id + 2 + ncol], A[id + (ncol * 2)], A[id + 1 + (ncol * 2)], A[id + 2 + (ncol * 2)] }; double wWin[] = { B[id], B[id + 1], B[id + 2], B[id + ncol], B[id + 1 + ncol], B[id + 2 + ncol], B[id + (ncol * 2)], B[id + 1 + (ncol * 2)], B[id + 2 + (ncol * 2)] }; double wsqWin[] = { C[id], C[id + 1], C[id + 2], C[id + ncol], C[id + 1 + ncol], C[id + 2 + ncol], C[id + (ncol * 2)], C[id + 1 + (ncol * 2)], C[id + 2 + (ncol * 2)] }; double vgWin[] = { D[id], D[id + 1], D[id + 2], D[id + ncol], D[id + 1 + ncol], D[id + 2 + ncol], D[id + (ncol * 2)], D[id + 1 + (ncol * 2)], D[id + 2 + (ncol * 2)] }; double nWin[] = { E[id], E[id + 1], E[id + 2], E[id + ncol], E[id + 1 + ncol], E[id + 2 + ncol], E[id + (ncol * 2)], E[id + 1 + (ncol * 2)], E[id + 2 + (ncol * 2)] }; // printf("this is wwin: %g \n", double(wWin[0]) ); // execute w and wsq double w = 0.0; for (int i = 0; i < 9; i++) { w = w + wWin[i]; } outw[ido] = w; // printf("this is w: %g \n", w); double wsq = 0.0; for (int i = 0; i < 9; i++) { wsq = wsq + wsqWin[i]; } // if ((colo > 0 && colo < (ncol / 3)) && (rowo > 0 && rowo < (ncol / 3))) { outwsq[ido] = wsq; // } // printf("this is wsq: %d \n", wsq); // execute zbar double tempZbar = 0.0; double tempWZ[9]; for (int i = 0; i < 9; i++) { tempWZ[i] = (zbarWin[i] * wWin[i])/w; } for (int i = 0; i < 9; i++) { tempZbar = tempZbar + tempWZ[i]; } double zbar = tempZbar ; // printf("this is zbar: %g \n", zbar); // if ((colo > 0 && colo < (ncol / 3)) && (rowo > 0 && rowo < (ncol / 3))) { outz[ido] = zbar; // } // execute vbg double tempvbgArray[9]; for (int i = 0; i < 9; i++) { double zbarDif= (zbarWin[i] - zbar); tempvbgArray[i] = (wWin[i] * pow(zbarDif, 2) )/w; } double tempvbgNum = 0.0; for (int i = 0; i < 9; i++) { tempvbgNum = (tempvbgNum + tempvbgArray[i]); } double vbg = (tempvbgNum ); /* if (vbg > 20) { printf("this is vbg: %g \n", vbg); } */ // execute vwg double tempvwgArray[9]; for (int i = 0; i < 9; i++) { tempvwgArray[i] = (wWin[i] * vgWin[i]); } double tempvwg = 0; for (int i = 0; i < 9; i++) { tempvwg = (tempvwg + tempvwgArray[i])/w; } double vwg = (tempvwg); // printf("this is vwg: %g \n", vwg); // execute vg double vg = (vbg + vwg); // if ((colo > 0 && colo < (ncol / 3)) && (rowo > 0 && rowo < (ncol / 3))) { outvg[ido] = vg; // } // printf("this is vg: %g \n", vg); // execute vm // if ((colo > 0 && colo < (ncol / 3)) && (rowo > 0 && rowo < (ncol / 3))) { outvm[ido] = 1 / w; // } // printf("this is vm: %d \n", vm); // execute n double n = 0.0; for (int i = 0; i < 9; i++) { n = n + nWin[i]; } // if ((colo > 0 && colo < (ncol / 3)) && (rowo > 0 && rowo < (ncol / 3))) { outn[ido] = n; // } // execute neff // if ((colo > 0 && colo < (ncol / 3)) && (rowo > 0 && rowo < (ncol / 3))) { outneff[ido] = (pow(w, 2) / wsq); // } // printf("this is neff: %g \n", neff); // execute mv double mv = n / w; // printf("this is mv: %g \n", mv); // get vstats // if ((colo > 0 && colo < (ncol / 3)) && (rowo > 0 && rowo < (ncol / 3))) { outvstat[ido] = vg / mv; } } inline void __cudaSafeCall(cudaError err, const char *file, const int line) { #ifdef CUDA_ERROR_CHECK if (cudaSuccess != err) { fprintf(stderr, "cudaSafeCall() failed at %s:%i : %s\n", file, line, cudaGetErrorString(err)); // exit(-1); } #endif return; } inline void __cudaCheckError(const char *file, const int line) { #ifdef CUDA_ERROR_CHECK cudaError err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "cudaCheckError() failed at %s:%i : %s\n", file, line, cudaGetErrorString(err)); // exit(-1); } // More careful checking. However, this will affect performance. // Comment away if needed. err = cudaDeviceSynchronize(); if (cudaSuccess != err) { fprintf(stderr, "cudaCheckError() with sync failed at %s:%i : %s\n", file, line, cudaGetErrorString(err)); // exit(-1); } #endif return; } // call host function void RCUDA2(double *z, double *noise, double *noisesq, double *variance, double *n, int *count, int *ncol, int *n_out, double *outz, double *outw, double *outwsq, double *outn, double *outvg, double *outneff, double *outvm, double *outvstat) { // initialize device memory variables double *d_n, *d_vg, *d_wsq, *d_z, *d_w, *d_outz, *d_outw, *d_outwsq, *d_outvg, *d_outn, *d_vm, *d_neff, *d_vstat, *d_xcrit; //inputs and outputs //define grid and total window size dim3 grid((*ncol / 24), (*ncol / 24)); // grid of 2D blocks //these dimensions should be equal to the window dimensions of a raster // the product of the 2d window dim should not exceed 1024 threads (depending on your GPU) dim3 block(24, 24); // block of 2D threads // allocate device memory for computations // input allocation CudaSafeCall(cudaMalloc((void**)&d_z, *count * sizeof(double))); CudaSafeCall(cudaMalloc((void**)&d_w, *count * sizeof(double))); CudaSafeCall(cudaMalloc((void**)&d_wsq, *count * sizeof(double))); CudaSafeCall(cudaMalloc((void**)&d_vg, *count * sizeof(double))); CudaSafeCall(cudaMalloc((void**)&d_n, *count * sizeof(double))); CudaSafeCall(cudaMalloc((void**)&d_xcrit, *count * sizeof(double))); //intermediate allocations CudaSafeCall(cudaMalloc((void**)&d_vm, *n_out * sizeof(double))); CudaSafeCall(cudaMalloc((void**)&d_neff, *n_out * sizeof(double))); CudaSafeCall(cudaMalloc((void**)&d_vstat, *n_out * sizeof(double))); // output allocation CudaSafeCall(cudaMalloc((void**)&d_outz, *n_out * sizeof(double))); CudaSafeCall(cudaMalloc((void**)&d_outw, *n_out * sizeof(double))); CudaSafeCall(cudaMalloc((void**)&d_outwsq, *n_out * sizeof(double))); CudaSafeCall(cudaMalloc((void**)&d_outvg, *n_out * sizeof(double))); CudaSafeCall(cudaMalloc((void**)&d_outn, *n_out * sizeof(double))); // copy host memory to allocated device memory CudaSafeCall(cudaMemcpy(d_z, z, *count * sizeof(double), cudaMemcpyHostToDevice)); CudaSafeCall(cudaMemcpy(d_w, noise, *count * sizeof(double), cudaMemcpyHostToDevice)); CudaSafeCall(cudaMemcpy(d_wsq, noisesq, *count * sizeof(double), cudaMemcpyHostToDevice)); CudaSafeCall(cudaMemcpy(d_vg, variance, *count * sizeof(double), cudaMemcpyHostToDevice)); CudaSafeCall(cudaMemcpy(d_n, n, *count * sizeof(double), cudaMemcpyHostToDevice)); // printf("this string works"); // call some_function which may throw something // launch kernel with predefined block and thread numbers gallantSmoothing << <grid, block >> > (d_z, d_w, d_wsq, d_vg, d_n, d_outz, d_outw, d_outwsq, d_outvg, d_outn, *count, *ncol, d_vm, d_neff, d_vstat); cudaDeviceSynchronize(); // gallantSmoothing << <grid, block >> > (d_z, *ncol); CudaCheckError(); // copy device memory to host memory CudaSafeCall(cudaMemcpy(outz, d_outz, *n_out * sizeof(double), cudaMemcpyDeviceToHost)); CudaSafeCall(cudaMemcpy(outw, d_outw, *n_out * sizeof(double), cudaMemcpyDeviceToHost)); CudaSafeCall(cudaMemcpy(outwsq, d_outwsq, *n_out * sizeof(double), cudaMemcpyDeviceToHost)); CudaSafeCall(cudaMemcpy(outvg, d_outvg, *n_out * sizeof(double), cudaMemcpyDeviceToHost)); CudaSafeCall(cudaMemcpy(outn, d_outn, *n_out * sizeof(double), cudaMemcpyDeviceToHost)); CudaSafeCall(cudaMemcpy(outneff, d_neff, *n_out * sizeof(double), cudaMemcpyDeviceToHost)); CudaSafeCall(cudaMemcpy(outvm, d_vm, *n_out * sizeof(double), cudaMemcpyDeviceToHost)); CudaSafeCall(cudaMemcpy(outvstat, d_vstat, *n_out * sizeof(double), cudaMemcpyDeviceToHost)); cudaFree(d_z); cudaFree(d_w); cudaFree(d_wsq); cudaFree(d_vg); cudaFree(d_n); cudaFree(d_outz); cudaFree(d_outw); cudaFree(d_outwsq); cudaFree(d_xcrit); cudaFree(d_vm); cudaFree(d_vstat); cudaFree(d_outvg); cudaFree(d_outn); cudaFree(d_neff); cudaThreadExit(); }
3303ab0bac3f5f9a2cfdd72a1737d078b30d4069.hip
// !!! This is a file automatically generated by hipify!!! #include "BvhExtNode.h" #include <cassert> #include <hip/hip_runtime.h> #include <helper_cuda.h> #include <thrust/scan.h> #include <thrust/fill.h> #include <thrust/sequence.h> #include "system\CudaDevice\CudaKernelLauncher.cu" #include "utility\CudaThrustUtils.hpp" #include "utility\CudaDeviceUtils.h" namespace mn { BvhExtNodeArray::BvhExtNodeArray() {} BvhExtNodeArray::~BvhExtNodeArray() {} void BvhExtNodeArray::setup(uint primSize, uint extSize) { assert(extSize <= primSize); _primArray.setup(primSize); _bvArray.setup(extSize); _extSize = extSize; /// build attribs checkCudaErrors(hipMalloc((void**)&_attribs[PAR], sizeof(int)*extSize)); checkCudaErrors(hipMalloc((void**)&_attribs[MARK], sizeof(uint)*extSize)); checkCudaErrors(hipMalloc((void**)&_attribs[LCA], sizeof(int)*(extSize + 1))); checkCudaErrors(hipMalloc((void**)&_attribs[RCL], sizeof(int)*(extSize + 1))); checkCudaErrors(hipMalloc((void**)&_attribs[STIDX], sizeof(int)*extSize)); checkCudaErrors(hipMalloc((void**)&_attribs[SEGLEN], sizeof(uint)*extSize)); checkCudaErrors(hipMalloc((void**)&_attribs[SPLIT_METRIC], sizeof(int)*extSize)); /// build ports portptr(COMPLETE) = new BvhExtNodeCompletePort(_bvArray.portobj<0>(), _primArray.portobj<0>()); /// link ports port<COMPLETE>()->link(_attribs, PAR); } void BvhExtNodeArray::cleanup() { /// clean attribs for (int i = 0; i < NUM_ATTRIBS; i++) checkCudaErrors(hipFree(_attribs[i])); /// clean ports delete port<COMPLETE>(); _bvArray.cleanup(); _primArray.cleanup(); } void BvhExtNodeArray::clearExtNodes(int size) { checkCudaErrors(hipMemset(_attribs[PAR], 0xff, sizeof(int)*size)); checkThrustErrors(thrust::fill(thrust::device_ptr<uint>((uint*)_attribs[MARK]), thrust::device_ptr<uint>((uint*)_attribs[MARK]) + size, 7)); checkCudaErrors(hipMemset(_attribs[SEGLEN], 0, sizeof(uint)*size)); checkCudaErrors(hipMemset(_attribs[LCA], 0xff, sizeof(int)*(size + 1))); checkCudaErrors(hipMemset(_attribs[RCL], 0xff, sizeof(int)*(size + 1))); _bvArray.clear(size); } void BvhExtNodeArray::clearExtBvs(int size) { _bvArray.clear(size); } int BvhExtNodeArray::buildExtNodes(int primsize) { uint* primMarks = _primArray.getMarks(); int* extIds = _primArray.getExtIds(); int extSize; /// should use strategy, delegate to //recordLaunch("MarkSplitPostions", (primsize + 255) / 256, 256, 0, markPrimSplitPos, // primsize, _primArray.portobj<0>(), primMarks); //checkThrustErrors(thrust::inclusive_scan(getDevicePtr(primMarks), getDevicePtr(primMarks) + primsize, getDevicePtr(extIds))); /// no primitive collapsing for now Logger::tick<TimerType::GPU>(); checkThrustErrors(thrust::fill(getDevicePtr(primMarks), getDevicePtr(primMarks) + primsize, 1)); checkThrustErrors(thrust::sequence(getDevicePtr(extIds), getDevicePtr(extIds) + primsize, 1)); checkCudaErrors(hipMemcpy(&extSize, extIds + primsize - 1, sizeof(int), hipMemcpyDeviceToHost)); Logger::tock<TimerType::GPU>("PrepareCollapsing"); clearExtNodes(extSize); //recordLaunch("CollapsePrimitives", (primsize + 255) / 256, 256, 0, collapsePrimitives, configuredLaunch({ "CollapsePrimitives", primsize }, collapsePrimitives, primsize, portobj<0>(), extIds); //printf("Collapsing %d primitives into %d leaves\n", primsize, extSize); return extSize; } void BvhExtNodeArray::calcSplitMetrics(int extsize) { //recordLaunch("CalcExtNodeSplitMetrics", (extsize + 255) / 256, 256, 0, calcExtNodeSplitMetrics, configuredLaunch({ "CalcExtNodeSplitMetrics", extsize }, calcExtNodeSplitMetrics, extsize, (const MCSize*)getMtCodes(), getMetrics()); } void BvhExtNodeArray::calcRestrSplitMetrics(int extsize, const int * _leafRestrRoots) { //recordLaunch("CalcExtNodeRestrSplitMetrics", (extsize + 255) / 256, 256, 0, calcExtNodeRestrSplitMetrics, configuredLaunch({ "CalcExtNodeRestrSplitMetrics", extsize }, calcExtNodeRestrSplitMetrics, extsize, _leafRestrRoots, (const MCSize*)getMtCodes(), getMetrics()); } void*& BvhExtNodeArray::portptr(EnumBvhExtNodePorts no) { assert(no >= COMPLETE && no < NUM_PORTS); return _ports[no]; } __global__ void markPrimSplitPos(int size, BvhPrimitiveCompletePort _prims, uint* _mark) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= size) return; _mark[idx] = 1; ///< should depend on collapsing policy //_mark[idx] = idx ? __clz(_prims.mtcode(idx) ^ _prims.mtcode(idx - 1)) <= MARK_TAG : 1; } __global__ void collapsePrimitives(int primsize, BvhExtNodeCompletePort _lvs, int* _extIds) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= primsize) return; int extId = _extIds[idx] - 1; //BvhBvCompletePort primBvs = _lvs.getPrimBvs(), extBvs = _lvs.getExtBvs(); const BvhBvCompletePort &primBvs = _lvs.primBvs(); auto &extBvs = _lvs.refExtBvs(); atomicAdd(&_lvs.seglen(extId), 1); extBvs.setBV(extId, primBvs, idx); if (_lvs.getPrimMark(idx)) _lvs.stidx(extId) = idx; /* atomicMinD(&extBvs.minx(extId), primBvs.getminx(idx)); atomicMinD(&extBvs.miny(extId), primBvs.getminy(idx)); atomicMinD(&extBvs.minz(extId), primBvs.getminz(idx)); atomicMaxD(&extBvs.maxx(extId), primBvs.getmaxx(idx)); atomicMaxD(&extBvs.maxy(extId), primBvs.getmaxy(idx)); atomicMaxD(&extBvs.maxz(extId), primBvs.getmaxz(idx)); */ } __global__ void calcExtNodeSplitMetrics(int extsize, const MCSize *_codes, int *_metrics) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= extsize) return; _metrics[idx] = idx != extsize - 1 ? 32 - __clz(_codes[idx] ^ _codes[idx + 1]) : 33; //if (idx < 10) // printf("%d-ext node: split metric %d\n", idx, _lvs.metric(idx)); } __global__ void calcExtNodeRestrSplitMetrics(int extsize, const int *_leafRestrRoots, const MCSize *_codes, int *_metrics) { int idx = blockDim.x * blockIdx.x + threadIdx.x; int subrt = _leafRestrRoots[idx]; if (idx >= extsize || !subrt) return; //_lvs.metric(idx) = idx != extsize - 1 ? (_leafRestrRoots[idx + 1] == subrt ? 64 - __clzll(_lvs.getmtcode(idx) ^ _lvs.getmtcode(idx + 1)) : 65) : 65; _metrics[idx] = _leafRestrRoots[idx + 1] == subrt ? 32 - __clz(_codes[idx] ^ _codes[idx + 1]) : 33; //_metrics[idx] = idx != extsize - 1 ? (_leafRestrRoots[idx + 1] == subrt ? 32 - __clz(_codes[idx] ^ _codes[idx + 1]) : 33) : 33; } }
3303ab0bac3f5f9a2cfdd72a1737d078b30d4069.cu
#include "BvhExtNode.h" #include <cassert> #include <cuda_runtime.h> #include <helper_cuda.h> #include <thrust/scan.h> #include <thrust/fill.h> #include <thrust/sequence.h> #include "system\CudaDevice\CudaKernelLauncher.cu" #include "utility\CudaThrustUtils.hpp" #include "utility\CudaDeviceUtils.h" namespace mn { BvhExtNodeArray::BvhExtNodeArray() {} BvhExtNodeArray::~BvhExtNodeArray() {} void BvhExtNodeArray::setup(uint primSize, uint extSize) { assert(extSize <= primSize); _primArray.setup(primSize); _bvArray.setup(extSize); _extSize = extSize; /// build attribs checkCudaErrors(cudaMalloc((void**)&_attribs[PAR], sizeof(int)*extSize)); checkCudaErrors(cudaMalloc((void**)&_attribs[MARK], sizeof(uint)*extSize)); checkCudaErrors(cudaMalloc((void**)&_attribs[LCA], sizeof(int)*(extSize + 1))); checkCudaErrors(cudaMalloc((void**)&_attribs[RCL], sizeof(int)*(extSize + 1))); checkCudaErrors(cudaMalloc((void**)&_attribs[STIDX], sizeof(int)*extSize)); checkCudaErrors(cudaMalloc((void**)&_attribs[SEGLEN], sizeof(uint)*extSize)); checkCudaErrors(cudaMalloc((void**)&_attribs[SPLIT_METRIC], sizeof(int)*extSize)); /// build ports portptr(COMPLETE) = new BvhExtNodeCompletePort(_bvArray.portobj<0>(), _primArray.portobj<0>()); /// link ports port<COMPLETE>()->link(_attribs, PAR); } void BvhExtNodeArray::cleanup() { /// clean attribs for (int i = 0; i < NUM_ATTRIBS; i++) checkCudaErrors(cudaFree(_attribs[i])); /// clean ports delete port<COMPLETE>(); _bvArray.cleanup(); _primArray.cleanup(); } void BvhExtNodeArray::clearExtNodes(int size) { checkCudaErrors(cudaMemset(_attribs[PAR], 0xff, sizeof(int)*size)); checkThrustErrors(thrust::fill(thrust::device_ptr<uint>((uint*)_attribs[MARK]), thrust::device_ptr<uint>((uint*)_attribs[MARK]) + size, 7)); checkCudaErrors(cudaMemset(_attribs[SEGLEN], 0, sizeof(uint)*size)); checkCudaErrors(cudaMemset(_attribs[LCA], 0xff, sizeof(int)*(size + 1))); checkCudaErrors(cudaMemset(_attribs[RCL], 0xff, sizeof(int)*(size + 1))); _bvArray.clear(size); } void BvhExtNodeArray::clearExtBvs(int size) { _bvArray.clear(size); } int BvhExtNodeArray::buildExtNodes(int primsize) { uint* primMarks = _primArray.getMarks(); int* extIds = _primArray.getExtIds(); int extSize; /// should use strategy, delegate to //recordLaunch("MarkSplitPostions", (primsize + 255) / 256, 256, 0, markPrimSplitPos, // primsize, _primArray.portobj<0>(), primMarks); //checkThrustErrors(thrust::inclusive_scan(getDevicePtr(primMarks), getDevicePtr(primMarks) + primsize, getDevicePtr(extIds))); /// no primitive collapsing for now Logger::tick<TimerType::GPU>(); checkThrustErrors(thrust::fill(getDevicePtr(primMarks), getDevicePtr(primMarks) + primsize, 1)); checkThrustErrors(thrust::sequence(getDevicePtr(extIds), getDevicePtr(extIds) + primsize, 1)); checkCudaErrors(cudaMemcpy(&extSize, extIds + primsize - 1, sizeof(int), cudaMemcpyDeviceToHost)); Logger::tock<TimerType::GPU>("PrepareCollapsing"); clearExtNodes(extSize); //recordLaunch("CollapsePrimitives", (primsize + 255) / 256, 256, 0, collapsePrimitives, configuredLaunch({ "CollapsePrimitives", primsize }, collapsePrimitives, primsize, portobj<0>(), extIds); //printf("Collapsing %d primitives into %d leaves\n", primsize, extSize); return extSize; } void BvhExtNodeArray::calcSplitMetrics(int extsize) { //recordLaunch("CalcExtNodeSplitMetrics", (extsize + 255) / 256, 256, 0, calcExtNodeSplitMetrics, configuredLaunch({ "CalcExtNodeSplitMetrics", extsize }, calcExtNodeSplitMetrics, extsize, (const MCSize*)getMtCodes(), getMetrics()); } void BvhExtNodeArray::calcRestrSplitMetrics(int extsize, const int * _leafRestrRoots) { //recordLaunch("CalcExtNodeRestrSplitMetrics", (extsize + 255) / 256, 256, 0, calcExtNodeRestrSplitMetrics, configuredLaunch({ "CalcExtNodeRestrSplitMetrics", extsize }, calcExtNodeRestrSplitMetrics, extsize, _leafRestrRoots, (const MCSize*)getMtCodes(), getMetrics()); } void*& BvhExtNodeArray::portptr(EnumBvhExtNodePorts no) { assert(no >= COMPLETE && no < NUM_PORTS); return _ports[no]; } __global__ void markPrimSplitPos(int size, BvhPrimitiveCompletePort _prims, uint* _mark) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= size) return; _mark[idx] = 1; ///< should depend on collapsing policy //_mark[idx] = idx ? __clz(_prims.mtcode(idx) ^ _prims.mtcode(idx - 1)) <= MARK_TAG : 1; } __global__ void collapsePrimitives(int primsize, BvhExtNodeCompletePort _lvs, int* _extIds) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= primsize) return; int extId = _extIds[idx] - 1; //BvhBvCompletePort primBvs = _lvs.getPrimBvs(), extBvs = _lvs.getExtBvs(); const BvhBvCompletePort &primBvs = _lvs.primBvs(); auto &extBvs = _lvs.refExtBvs(); atomicAdd(&_lvs.seglen(extId), 1); extBvs.setBV(extId, primBvs, idx); if (_lvs.getPrimMark(idx)) _lvs.stidx(extId) = idx; /* atomicMinD(&extBvs.minx(extId), primBvs.getminx(idx)); atomicMinD(&extBvs.miny(extId), primBvs.getminy(idx)); atomicMinD(&extBvs.minz(extId), primBvs.getminz(idx)); atomicMaxD(&extBvs.maxx(extId), primBvs.getmaxx(idx)); atomicMaxD(&extBvs.maxy(extId), primBvs.getmaxy(idx)); atomicMaxD(&extBvs.maxz(extId), primBvs.getmaxz(idx)); */ } __global__ void calcExtNodeSplitMetrics(int extsize, const MCSize *_codes, int *_metrics) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= extsize) return; _metrics[idx] = idx != extsize - 1 ? 32 - __clz(_codes[idx] ^ _codes[idx + 1]) : 33; //if (idx < 10) // printf("%d-ext node: split metric %d\n", idx, _lvs.metric(idx)); } __global__ void calcExtNodeRestrSplitMetrics(int extsize, const int *_leafRestrRoots, const MCSize *_codes, int *_metrics) { int idx = blockDim.x * blockIdx.x + threadIdx.x; int subrt = _leafRestrRoots[idx]; if (idx >= extsize || !subrt) return; //_lvs.metric(idx) = idx != extsize - 1 ? (_leafRestrRoots[idx + 1] == subrt ? 64 - __clzll(_lvs.getmtcode(idx) ^ _lvs.getmtcode(idx + 1)) : 65) : 65; _metrics[idx] = _leafRestrRoots[idx + 1] == subrt ? 32 - __clz(_codes[idx] ^ _codes[idx + 1]) : 33; //_metrics[idx] = idx != extsize - 1 ? (_leafRestrRoots[idx + 1] == subrt ? 32 - __clz(_codes[idx] ^ _codes[idx + 1]) : 33) : 33; } }
59002a2f42ca322a1baff1feb1dda85ed5747751.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "deviceImage.h" #include "deviceImage.cuh" namespace D3D_CUDA { namespace DeviceImageDeviceCode { __global__ void clearKernel(DeviceImage devImg, unsigned char value) { // get position of outupt unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; if (x < devImg.getWidth()*devImg.getNumChannels() && y < devImg.getHeight()) { devImg(x,y) = value; } } } } using namespace D3D; using namespace D3D_CUDA; using namespace DeviceImageDeviceCode; DeviceImage::DeviceImage() { addr = 0; } void DeviceImage::deallocate() { D3D_CUDA_CHECKED_CALL( hipFree((void *)addr); ) addr = 0; } void DeviceImage::clear(unsigned char value) { dim3 gridDim(getNumTiles(width*numChannels, TILE_WIDTH), getNumTiles(height, TILE_HEIGHT)); dim3 blockDim(TILE_WIDTH, TILE_HEIGHT); hipLaunchKernelGGL(( clearKernel), dim3(gridDim), dim3(blockDim), 0, 0, *this, value); }
59002a2f42ca322a1baff1feb1dda85ed5747751.cu
#include "deviceImage.h" #include "deviceImage.cuh" namespace D3D_CUDA { namespace DeviceImageDeviceCode { __global__ void clearKernel(DeviceImage devImg, unsigned char value) { // get position of outupt unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; if (x < devImg.getWidth()*devImg.getNumChannels() && y < devImg.getHeight()) { devImg(x,y) = value; } } } } using namespace D3D; using namespace D3D_CUDA; using namespace DeviceImageDeviceCode; DeviceImage::DeviceImage() { addr = 0; } void DeviceImage::deallocate() { D3D_CUDA_CHECKED_CALL( cudaFree((void *)addr); ) addr = 0; } void DeviceImage::clear(unsigned char value) { dim3 gridDim(getNumTiles(width*numChannels, TILE_WIDTH), getNumTiles(height, TILE_HEIGHT)); dim3 blockDim(TILE_WIDTH, TILE_HEIGHT); clearKernel<<<gridDim, blockDim>>>(*this, value); }
GPUinfo.hip
// !!! This is a file automatically generated by hipify!!! // Exemplo para o curso de Super Computacao // Criado por: Luciano P. Soares (10 de Abril de 2018) #include <stdio.h> #include <stdlib.h> //#include <hip/hip_runtime.h> //#include <hip/hip_runtime.h> /* Informacoes da GPU */ int main() { int dev_count; hipGetDeviceCount(&dev_count); printf("Numero de devices (GPU) = %d\n\n", dev_count ); hipDeviceProp_t dev_prop; for (int i = 0; i < dev_count; i++) { printf("\tDevice (%d)\n", i); hipGetDeviceProperties(&dev_prop, i); printf("\t\tNumero maximo de Bloco\n"); printf("\t\t\t Dimensao maxima em x = %d, y = %d, z = %d\n", dev_prop.maxGridSize[0],dev_prop.maxGridSize[1],dev_prop.maxGridSize[2] ); printf("\t\tNumero maximo de Threads por Bloco = %d\n", dev_prop.maxThreadsPerBlock ); printf("\t\t\t Dimensao maxima em x = %d, y = %d, z = %d\n", dev_prop.maxThreadsDim[0],dev_prop.maxThreadsDim[1],dev_prop.maxThreadsDim[2] ); printf("\t\tNumero maximo de Streaming Multiprocessors (SMs) = %d\n", dev_prop.multiProcessorCount ); printf("\t\tFrequencia de Clock = %d\n", dev_prop.clockRate ); printf("\t\tTamanho do Warp = %d\n", dev_prop.warpSize ); } return 0; }
GPUinfo.cu
// Exemplo para o curso de Super Computacao // Criado por: Luciano P. Soares (10 de Abril de 2018) #include <stdio.h> #include <stdlib.h> //#include <cuda.h> //#include <cuda_runtime.h> /* Informacoes da GPU */ int main() { int dev_count; cudaGetDeviceCount(&dev_count); printf("Numero de devices (GPU) = %d\n\n", dev_count ); cudaDeviceProp dev_prop; for (int i = 0; i < dev_count; i++) { printf("\tDevice (%d)\n", i); cudaGetDeviceProperties(&dev_prop, i); printf("\t\tNumero maximo de Bloco\n"); printf("\t\t\t Dimensao maxima em x = %d, y = %d, z = %d\n", dev_prop.maxGridSize[0],dev_prop.maxGridSize[1],dev_prop.maxGridSize[2] ); printf("\t\tNumero maximo de Threads por Bloco = %d\n", dev_prop.maxThreadsPerBlock ); printf("\t\t\t Dimensao maxima em x = %d, y = %d, z = %d\n", dev_prop.maxThreadsDim[0],dev_prop.maxThreadsDim[1],dev_prop.maxThreadsDim[2] ); printf("\t\tNumero maximo de Streaming Multiprocessors (SMs) = %d\n", dev_prop.multiProcessorCount ); printf("\t\tFrequencia de Clock = %d\n", dev_prop.clockRate ); printf("\t\tTamanho do Warp = %d\n", dev_prop.warpSize ); } return 0; }
247c12085aa414300e79ac7cc1aac14172fc97e0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <fstream> #include <iostream> #include <string> #include <vector> #include <cmath> #include <limits> #include <rocblas.h> #include <hiprand/hiprand.h> #include <cudnn.h> // #include <opencv2/opencv.hpp> #include <helper_cuda.h> typedef unsigned char uchar; #define FatalError(s) do { \ std::stringstream _where, _message; \ _where << __FILE__ << ':' << __LINE__; \ _message << std::string(s) + "\n" << __FILE__ << ':' << __LINE__; \ std::cerr << _message.str() << "\nAborting...\n"; \ hipDeviceReset(); \ exit(1); \ } while(0) // #define checkCudaErrors(status) do { \ // std::stringstream _error; \ // if (status != 0) { \ // _error << "Cuda failure: " << status; \ // FatalError(_error.str()); \ // } \ // } while(0) #define checkCUDNN(expression) \ { \ cudnnStatus_t status = (expression); \ if (status != CUDNN_STATUS_SUCCESS) { \ std::cerr << "Error on line " << __LINE__ << ": " \ << cudnnGetErrorString(status) << std::endl; \ std::exit(EXIT_FAILURE); \ } \ } #define checkCUBLAS(expression) \ { \ hipblasStatus_t status = (expression); \ if (status != HIPBLAS_STATUS_SUCCESS) { \ std::cerr << "Error on line " << __LINE__ << ": " \ << _cudaGetErrorEnum(status) << std::endl; \ std::exit(EXIT_FAILURE); \ } \ } #define checkCURAND(expression) \ { \ hiprandStatus_t status = (expression); \ if (status != HIPRAND_STATUS_SUCCESS) { \ std::cerr << "Error on line " << __LINE__ << ": " \ << _cudaGetErrorEnum(status) << std::endl; \ std::exit(EXIT_FAILURE); \ } \ } using namespace std; int N_train = 60000, N_test = 10000; int rows = 28, cols = 28, channels = 1; int BW = 16 * 16; // Block size for GPU kernel // void roundUp(int a, int b) { // } __global__ void fillValue(float *v, int size, int value) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= size) return; v[i] = value; } __global__ void softmaxLossBackProp(float *y, float *SO, float *dSO, int batch_size, int output_size, float eps) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= batch_size) return; int cur_class = static_cast<int>(y[i]); dSO[i * output_size + cur_class] = -1 / (SO[i * output_size + cur_class] * batch_size + eps); } __global__ void inferClass(float *O, float *IO, int batch_size, int output_size) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= batch_size) return; float max = O[i * output_size]; int index = 0; for (int j = 1; j < output_size; j++) { if (O[i * output_size + j] > max) { max = O[i * output_size + j]; index = j; } } IO[i] = (float)index; } int reverseInt(int n) { int bytes = 4; unsigned char ch[bytes]; for (int i = 0; i < bytes; i++) { ch[i] = (n >> i * 8) & 255; } int p = 0; for (int i = 0; i < bytes; i++) { p += (int) ch[i] << (bytes - i - 1) * 8; } return p; } void readMNIST(vector<vector<uchar> > &train_images, vector<vector<uchar> > &test_images, vector<uchar> &train_labels, vector<uchar> &test_labels) { string filename_train_images = "data/train-images.idx3-ubyte"; string filename_train_labels = "data/train-labels.idx1-ubyte"; string filename_test_images = "data/t10k-images.idx3-ubyte"; string filename_test_labels = "data/t10k-labels.idx1-ubyte"; // read train/test images for (int i = 0; i < 2; i++) { string filename; if (i == 0) filename = filename_train_images; else filename = filename_test_images; ifstream f(filename.c_str(), ios::binary); if (!f.is_open()) printf("Cannot read MNIST from %s\n", filename.c_str()); // read metadata int magic_number = 0, n_images = 0, n_rows = 0, n_cols = 0; f.read((char *) &magic_number, sizeof(magic_number)); magic_number = reverseInt(magic_number); f.read((char *) &n_images, sizeof(n_images)); n_images = reverseInt(n_images); f.read((char *) &n_rows, sizeof(n_rows)); n_rows = reverseInt(n_rows); f.read((char *) &n_cols, sizeof(n_cols)); n_cols = reverseInt(n_cols); for (int k = 0; k < n_images; k++) { vector<uchar> temp; temp.reserve(n_rows * n_cols); for (int j = 0; j < n_rows * n_cols; j++) { uchar t = 0; f.read((char *)&t, sizeof(t)); temp.push_back(t); } if (i == 0) train_images.push_back(temp); else test_images.push_back(temp); } f.close(); } // read train/test labels for (int i = 0; i < 2; i++) { string filename; if (i == 0) filename = filename_train_labels; else filename = filename_test_labels; ifstream f(filename.c_str(), ios::binary); if (!f.is_open()) printf("Cannot read MNIST from %s\n", filename.c_str()); // read metadata int magic_number = 0, n_labels = 0; f.read((char *) &magic_number, sizeof(magic_number)); magic_number = reverseInt(magic_number); f.read((char *) &n_labels, sizeof(n_labels)); n_labels = reverseInt(n_labels); for (int k = 0; k < n_labels; k++) { uchar t = 0; f.read((char *)&t, sizeof(t)); if (i == 0) train_labels.push_back(t); else test_labels.push_back(t); } f.close(); } } void printMatrix(float *M, int r, int c) { for (int i = 0; i < r; i++) { for (int j = 0; j < c; j++) { cout << M[i * c + j] << ' '; } cout << endl; } cout << endl; } class Context { public: int batch_size, channels; int input_rows, input_cols, output_rows, output_cols; float learning_rate; float *X, *W1, *b1, *W2, *b2, *H, *Hrelu, *O, *SO, *dSO, *dO, *dW1, *db1, *dW2, *db2, *dH, *dHrelu; float *IO; float *y; float *onevec; float *h_IO; int input_size; int input_size_fc; int hidden_size; int output_size; int input_feature, output_feature; hipblasHandle_t cublasHandle; // cudnnTensorDescriptor_t batchTensor, W1Tensor, b1Tensor, W2Tensor, b2Tensor, HTensor, OTensor; cudnnTensorDescriptor_t HTensor, OTensor; cudnnActivationDescriptor_t Reludesc; // cudnnOpTensorDescriptor_t Adddesc, Muldesc; cudnnHandle_t cudnnHandle; hiprandGenerator_t curandgen; float *h_W1, *h_W2, *h_b1, *h_b2, *h_SO, *h_y; float eps; // conv cudnnTensorDescriptor_t inputTensor, conv1OTensor, conv1bTensor; cudnnFilterDescriptor_t conv1Tensor; cudnnConvolutionDescriptor_t conv1Desc; cudnnConvolutionFwdAlgo_t conv1fAlgo; cudnnConvolutionBwdFilterAlgo_t conv1bfAlgo; size_t workspace_bytes; float *workspace; float *conv1O, *conv1OA; float *conv1filter, *conv1bias; float *dconv1filter, *dconv1bias; float *dconv1O, *dconv1OA; int filter_height, filter_width; // vdnn int req_algo_count; cudnnConvolutionFwdAlgoPerf_t *conv1fwdperf; cudnnConvolutionBwdFilterAlgoPerf_t *conv1bwdfperf; cudnnConvolutionBwdDataAlgoPerf_t *conv1bwddperf; cudnnPoolingDescriptor_t poolDesc; Context(int input_size, int batch_size, int hidden_size, float learning_rate, int output_size) { this->batch_size = batch_size; this->hidden_size = hidden_size; this->output_size = output_size; // number of classes; this->channels = 1; input_rows = 28; input_cols = 28; input_feature = 256; output_rows = 28; output_cols = 28; output_feature = 128; filter_height = 1, filter_width = 1; this->input_size = input_rows * input_cols * input_feature; cout << "input_size: " << this->input_size << endl; input_size_fc = output_rows * output_cols * output_feature; this->learning_rate = learning_rate; eps = 1e-8; workspace_bytes = 0; workspace = NULL; checkCUBLAS(hipblasCreate(&cublasHandle)); checkCUDNN(cudnnCreate(&cudnnHandle)); checkCURAND(hiprandCreateGenerator(&curandgen, HIPRAND_RNG_PSEUDO_DEFAULT)); //vdnn req_algo_count = 10; conv1fwdperf = (cudnnConvolutionFwdAlgoPerf_t *)malloc(req_algo_count * sizeof(cudnnConvolutionFwdAlgoPerf_t)); conv1bwdfperf = (cudnnConvolutionBwdFilterAlgoPerf_t *)malloc(req_algo_count * sizeof(cudnnConvolutionBwdFilterAlgoPerf_t)); conv1bwddperf = (cudnnConvolutionBwdDataAlgoPerf_t *)malloc(req_algo_count * sizeof(cudnnConvolutionBwdDataAlgoPerf_t)); // conv checkCUDNN(cudnnCreateTensorDescriptor(&inputTensor)); checkCUDNN(cudnnCreateTensorDescriptor(&conv1OTensor)); checkCUDNN(cudnnCreateTensorDescriptor(&conv1bTensor)); checkCUDNN(cudnnCreateFilterDescriptor(&conv1Tensor)); checkCUDNN(cudnnSetTensor4dDescriptor(inputTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch_size, input_feature, input_rows, input_cols)); checkCUDNN(cudnnSetTensor4dDescriptor(conv1OTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch_size, output_feature, output_rows, output_cols)); checkCUDNN(cudnnSetTensor4dDescriptor(conv1bTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, output_feature, 1, 1)); checkCUDNN(cudnnSetFilter4dDescriptor(conv1Tensor, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, output_feature, input_feature, filter_height, filter_width)); checkCUDNN(cudnnCreateConvolutionDescriptor(&conv1Desc)); int pad_h = 0, pah_w = 0, u = 1, v = 1, dilation_h = 1, dilation_w = 1; checkCUDNN(cudnnSetConvolution2dDescriptor(conv1Desc, pad_h, pah_w, u, v, dilation_h, dilation_w, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); checkCUDNN(cudnnCreatePoolingDescriptor(&poolDesc)); checkCUDNN(cudnnSetPooling2dDescriptor(poolDesc, CUDNN_POOLING_MAX, CUDNN_PROPAGATE_NAN, filter_height, filter_width, pad_h, pad_w, u, v)); int ret_algo_count; int n; // cout << "waiting..\n"; // cin >> n; checkCUDNN(cudnnFindConvolutionForwardAlgorithm(cudnnHandle, inputTensor, conv1Tensor, conv1Desc, conv1OTensor, req_algo_count, &ret_algo_count, conv1fwdperf)); cout << "Printing forward conv algo perf\n"; for (int i = 0; i < ret_algo_count; i++) { cout << i << endl; cout << "algo: " << conv1fwdperf[i].algo << endl; cout << "status: " << cudnnGetErrorString(conv1fwdperf[i].status) << endl; cout << "time(ms): " << conv1fwdperf[i].time << endl; cout << "memory(bytes): " << conv1fwdperf[i].memory << endl; cout << "mathType: " << conv1fwdperf[i].mathType << endl; cout << endl; } checkCUDNN(cudnnFindConvolutionBackwardFilterAlgorithm(cudnnHandle, inputTensor, conv1OTensor, conv1Desc, conv1Tensor, req_algo_count, &ret_algo_count, conv1bwdfperf)); cout << "Printing bwdfilter conv algo perf\n"; for (int i = 0; i < ret_algo_count; i++) { cout << i << endl; cout << "algo: " << conv1bwdfperf[i].algo << endl; cout << "status: " << cudnnGetErrorString(conv1bwdfperf[i].status) << endl; cout << "time(ms): " << conv1bwdfperf[i].time << endl; cout << "memory(bytes): " << conv1bwdfperf[i].memory << endl; cout << "mathType: " << conv1bwdfperf[i].mathType << endl; cout << endl; } checkCUDNN(cudnnFindConvolutionBackwardDataAlgorithm(cudnnHandle, conv1Tensor, conv1OTensor, conv1Desc, inputTensor, req_algo_count, &ret_algo_count, conv1bwddperf)); cout << "Printing bwddata conv algo perf\n"; for (int i = 0; i < ret_algo_count; i++) { cout << i << endl; cout << "algo: " << conv1bwdfperf[i].algo << endl; cout << "status: " << cudnnGetErrorString(conv1bwdfperf[i].status) << endl; cout << "time(ms): " << conv1bwdfperf[i].time << endl; cout << "memory(bytes): " << conv1bwdfperf[i].memory << endl; cout << "mathType: " << conv1bwdfperf[i].mathType << endl; cout << endl; } // checkCUDNN(cudnnGetConvolutionForwardAlgorithm(cudnnHandle, inputTensor, conv1Tensor, conv1Desc, conv1OTensor, // CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &conv1fAlgo)); // size_t fwd_wspace; // checkCUDNN(cudnnGetConvolutionForwardWorkspaceSize(cudnnHandle, inputTensor, conv1Tensor, conv1Desc, conv1OTensor, // conv1fAlgo, &fwd_wspace)); // cout << "fwd_wspace: " << fwd_wspace << endl; // cout << "algo_used: " << conv1fAlgo << endl; // // exit(0); // workspace_bytes = max(workspace_bytes, fwd_wspace); // checkCudaErrors(hipMalloc((void **)&conv1filter, filter_height * filter_width * input_feature * output_feature * sizeof(float) + sizeof(float))); // checkCudaErrors(hipMalloc((void **)&dconv1filter, filter_height * filter_width * input_feature * output_feature * sizeof(float))); // checkCudaErrors(hipMalloc((void **)&conv1bias, output_feature * sizeof(float))); // checkCudaErrors(hipMalloc((void **)&dconv1bias, output_feature * sizeof(float))); // fillValue<<<ceil(1.0 * output_feature / BW), BW>>>(conv1bias, output_feature, 0); // checkCURAND(hiprandGenerateNormal(curandgen, conv1filter, filter_height * filter_width * input_feature * output_feature + 1, // 0, 1 / sqrt(filter_height * filter_width * input_feature))); // checkCudaErrors(hipMalloc((void **)&conv1O, batch_size * input_size_fc * sizeof(float))); checkCudaErrors(hipMalloc((void **)&conv1OA, batch_size * this->input_size * sizeof(float))); // checkCudaErrors(hipMalloc((void **)&dconv1O, batch_size * input_size_fc * sizeof(float))); // checkCudaErrors(hipMalloc((void **)&dconv1OA, batch_size * input_size_fc * sizeof(float))); // checkCUDNN(cudnnGetConvolutionBackwardFilterAlgorithm(cudnnHandle, inputTensor, conv1OTensor, conv1Desc, conv1Tensor, // CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &conv1bfAlgo)); // size_t bwd_fwspace; // checkCUDNN(cudnnGetConvolutionBackwardFilterWorkspaceSize(cudnnHandle, inputTensor, conv1OTensor, conv1Desc, conv1Tensor, // conv1bfAlgo, &bwd_fwspace)); // workspace_bytes = max(workspace_bytes, bwd_fwspace); // if (workspace_bytes > 0) // checkCudaErrors(hipMalloc((void **)&workspace, workspace_bytes)); // // allocate memory in device checkCudaErrors(hipMalloc((void **)&X, batch_size * this->input_size * sizeof(float))); checkCudaErrors(hipMalloc((void **)&OP, batch_size * this->input_size_fc * sizeof(float))); // checkCudaErrors(hipMalloc((void **)&W1, input_size_fc * hidden_size * sizeof(float))); // checkCudaErrors(hipMalloc((void **)&dW1, input_size_fc * hidden_size * sizeof(float))); // checkCudaErrors(hipMalloc((void **)&b1, hidden_size * sizeof(float))); // checkCudaErrors(hipMalloc((void **)&db1, hidden_size * sizeof(float))); // checkCudaErrors(hipMalloc((void **)&W2, hidden_size * output_size * sizeof(float))); // checkCudaErrors(hipMalloc((void **)&dW2, hidden_size * output_size * sizeof(float))); // checkCudaErrors(hipMalloc((void **)&b2, output_size * sizeof(float))); // checkCudaErrors(hipMalloc((void **)&db2, output_size * sizeof(float))); // checkCudaErrors(hipMalloc((void **)&H, batch_size * hidden_size * sizeof(float))); // checkCudaErrors(hipMalloc((void **)&dH, batch_size * hidden_size * sizeof(float))); // checkCudaErrors(hipMalloc((void **)&Hrelu, batch_size * hidden_size * sizeof(float))); // checkCudaErrors(hipMalloc((void **)&dHrelu, batch_size * hidden_size * sizeof(float))); // checkCudaErrors(hipMalloc((void **)&O, batch_size * output_size * sizeof(float))); // checkCudaErrors(hipMalloc((void **)&IO, batch_size * sizeof(float))); // checkCudaErrors(hipMalloc((void **)&dO, batch_size * output_size * sizeof(float))); // checkCudaErrors(hipMalloc((void **)&SO, batch_size * output_size * sizeof(float))); // checkCudaErrors(hipMalloc((void **)&dSO, batch_size * output_size * sizeof(float))); // checkCudaErrors(hipMalloc((void **)&y, batch_size * sizeof(float))); // checkCudaErrors(hipMalloc((void **)&onevec, batch_size * sizeof(float))); // fillValue<<<ceil(1.0 * batch_size / BW), BW>>>(onevec, batch_size, 1); // // { // // cout << "waiting..\n"; // // int n; // // cin >> n; // // } // // h_IO = (float *)malloc(batch_size * sizeof(float)); // // checkCUDNN(cudnnCreateTensorDescriptor(&batchTensor)); // // checkCUDNN(cudnnCreateTensorDescriptor(&W1Tensor)); // // checkCUDNN(cudnnCreateTensorDescriptor(&b1Tensor)); // // checkCUDNN(cudnnCreateTensorDescriptor(&W2Tensor)); // // checkCUDNN(cudnnCreateTensorDescriptor(&b2Tensor)); // checkCUDNN(cudnnCreateTensorDescriptor(&HTensor)); // checkCUDNN(cudnnCreateTensorDescriptor(&OTensor)); // checkCUDNN(cudnnCreateActivationDescriptor(&Reludesc)); // // checkCUDNN(cudnnCreateOpTensorDescriptor(&Opdesc)); // // checkCUDNN(cudnnSetTensor4dDescriptor(batchTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch_size, rows * cols, 1, 1)); // // // just to be able to multiply properly // // checkCUDNN(cudnnSetTensor4dDescriptor(W1Tensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch_size, hidden_size, 1, 1)); // // checkCUDNN(cudnnSetTensor4dDescriptor(b1Tensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, hidden_size, 1, 1)); // checkCUDNN(cudnnSetTensor4dDescriptor(HTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch_size, hidden_size, 1, 1)); // // // just to be able to multiply properly // // checkCUDNN(cudnnSetTensor4dDescriptor(W2Tensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, hidden_size, output_size, 1, 1)); // // checkCUDNN(cudnnSetTensor4dDescriptor(b2Tensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, output_size, 1, 1)); // checkCUDNN(cudnnSetTensor4dDescriptor(OTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch_size, output_size, 1, 1)); // checkCUDNN(cudnnSetActivationDescriptor(Reludesc, CUDNN_ACTIVATION_RELU, CUDNN_PROPAGATE_NAN, 0)); // // initialization // fillValue<<<ceil(1.0 * hidden_size / BW), BW>>>(b1, hidden_size, 0); // fillValue<<<ceil(1.0 * output_size / BW), BW>>>(b2, output_size, 0); // checkCURAND(hiprandGenerateNormal(curandgen, W1, input_size_fc * hidden_size, 0, 0.1)); // checkCURAND(hiprandGenerateNormal(curandgen, W2, hidden_size * output_size, 0, 0.1)); checkCURAND(hiprandGenerateNormal(curandgen, conv1OA, batch_size * this->input_size, 0, 0.1)); // h_W1 = (float *)malloc(input_size_fc * hidden_size * sizeof(float)); // h_W2 = (float *)malloc(hidden_size * output_size * sizeof(float)); // h_b1 = (float *)malloc(hidden_size * sizeof(float)); // h_b2 = (float *)malloc(output_size * sizeof(float)); // h_SO = (float *)malloc(batch_size * output_size * sizeof(float)); // h_y = (float *)malloc(batch_size * sizeof(float)); } ~Context() { checkCUBLAS(hipblasDestroy(cublasHandle)); checkCURAND(hiprandDestroyGenerator(curandgen)); // checkCudaErrors(hipFree(X)); // checkCudaErrors(hipFree(W1)); // checkCudaErrors(hipFree(dW1)); // checkCudaErrors(hipFree(b1)); // checkCudaErrors(hipFree(db1)); // checkCudaErrors(hipFree(W2)); // checkCudaErrors(hipFree(dW2)); // checkCudaErrors(hipFree(b2)); // checkCudaErrors(hipFree(db2)); // checkCudaErrors(hipFree(H)); // checkCudaErrors(hipFree(dH)); // checkCudaErrors(hipFree(Hrelu)); // checkCudaErrors(hipFree(dHrelu)); // checkCudaErrors(hipFree(O)); // checkCudaErrors(hipFree(dO)); // checkCudaErrors(hipFree(SO)); // checkCudaErrors(hipFree(dSO)); // checkCudaErrors(hipFree(IO)); // free(h_IO); // checkCUDNN(cudnnDestroyActivationDescriptor(Reludesc)); // // checkCUDNN(cudnnDestroyTensorDescriptor(batchTensor)); // // checkCUDNN(cudnnDestroyTensorDescriptor(W1Tensor)); // // checkCUDNN(cudnnDestroyTensorDescriptor(b1Tensor)); // // checkCUDNN(cudnnDestroyTensorDescriptor(W2Tensor)); // // checkCUDNN(cudnnDestroyTensorDescriptor(b2Tensor)); // checkCUDNN(cudnnDestroyTensorDescriptor(HTensor)); // checkCUDNN(cudnnDestroyTensorDescriptor(OTensor)); // checkCUDNN(cudnnDestroy(cudnnHandle)); // free(h_W1); // free(h_W2); // free(h_b1); // free(h_b2); // free(h_SO); // free(h_y); // // conv // checkCUDNN(cudnnDestroyFilterDescriptor(conv1Tensor)); } void forwardPropagate(bool train=true) { // float alpha = 1.0f, beta = 0.0f; // conv // conv forward hipEvent_t start, stop; float milli = 0; checkCudaErrors(hipEventCreate(&start)); checkCudaErrors(hipEventCreate(&stop)); // // checkCudaErrors(hipEventRecord(start)); // // checkCUDNN(cudnnConvolutionForward(cudnnHandle, &alpha, inputTensor, X, conv1Tensor, conv1filter, conv1Desc, // // conv1fAlgo, workspace, workspace_bytes, &beta, conv1OTensor, conv1O)); // // checkCudaErrors(hipEventRecord(stop)); // // checkCudaErrors(hipEventSynchronize(stop)); // // checkCudaErrors(hipEventElapsedTime(&milli, start, stop)); // // cout << "time for conv: " << milli << endl; // // // add bias // // checkCUDNN(cudnnAddTensor(cudnnHandle, &alpha, conv1bTensor, conv1bias, &alpha, conv1OTensor, conv1O)); // // // activation // // checkCUDNN(cudnnActivationForward(cudnnHandle, Reludesc, &alpha, conv1OTensor, conv1O, &beta, conv1OTensor, conv1OA)); float *temp = NULL; checkCudaErrors(hipHostMalloc((void **)&temp, batch_size * input_size * sizeof(float))); checkCudaErrors(hipEventRecord(start)); checkCudaErrors(hipMemcpyAsync(temp, conv1OA, batch_size * input_size * sizeof(float), hipMemcpyDeviceToHost)); checkCudaErrors(hipEventRecord(stop)); hipEventSynchronize(stop); checkCudaErrors(hipEventElapsedTime(&milli, start, stop)); cout << "transfer time(ms): " << milli << endl; int n; cin >> n; for (int i = 0; i < n; i++) { cout << temp[i]; } checkCUDNN(cudnnPoolingForward(cudnnHandle, poolDesc, &alpha, inputTensor, X, &beta, conv1OTensor, OP)) // int n; // cout << "waiting..\n"; // cin >> n; exit(0); // // multiply weights to input // checkCudaErrors(hipEventRecord(start)); // checkCUBLAS(hipblasSgemm(cublasHandle, // HIPBLAS_OP_N, HIPBLAS_OP_N, // hidden_size, batch_size, input_size_fc, // &alpha, // W1, hidden_size, // conv1OA, input_size_fc, // &beta, // H, hidden_size)); // checkCudaErrors(hipEventRecord(stop)); // checkCudaErrors(hipEventSynchronize(stop)); // checkCudaErrors(hipEventElapsedTime(&milli, start, stop)); // cout << "time for mul: " << milli << endl; // // exit(0); // // float *h_X = (float *)malloc(batch_size * input_size * sizeof(float)); // // checkCudaErrors(hipMemcpy(h_X, X, batch_size * input_size * sizeof(float), hipMemcpyDeviceToHost)); // // cout << "X:\n"; // // printMatrix(h_X, batch_size, input_size); // // int n; // // cout << "waiting..\n"; // // cin >> n; // // checkCudaErrors(hipMemcpy(h_W1, W1, input_size * hidden_size * sizeof(float), hipMemcpyDeviceToHost)); // // cout << "W1:\n"; // // printMatrix(h_W1, input_size, hidden_size); // // cout << "waiting..\n"; // // cin >> n; // // float *h_H = (float *)malloc(batch_size * hidden_size * sizeof(float)); // // checkCudaErrors(hipMemcpy(h_H, H, batch_size * hidden_size * sizeof(float), hipMemcpyDeviceToHost)); // // cout << "H:\n"; // // printMatrix(h_H, batch_size, hidden_size); // // cout << "waiting..\n"; // // cin >> n; // // add bias to output // checkCUBLAS(hipblasSgemm(cublasHandle, // HIPBLAS_OP_N, HIPBLAS_OP_N, // hidden_size, batch_size, 1, // &alpha, // b1, hidden_size, // onevec, 1, // &alpha, // H, hidden_size)); // // checkCudaErrors(hipMemcpy(h_b1, b1, hidden_size * sizeof(float), hipMemcpyDeviceToHost)); // // cout << "b1:\n"; // // printMatrix(h_b1, 1, hidden_size); // // cout << "waiting..\n"; // // cin >> n; // // checkCudaErrors(hipMemcpy(h_H, H, batch_size * hidden_size * sizeof(float), hipMemcpyDeviceToHost)); // // cout << "H+b:\n"; // // printMatrix(h_H, batch_size, hidden_size); // // cout << "waiting..\n"; // // cin >> n; // // apply relu activation // checkCUDNN(cudnnActivationForward(cudnnHandle, Reludesc, &alpha, HTensor, H, &beta, HTensor, Hrelu)); // // checkCudaErrors(hipMemcpy(h_H, Hrelu, batch_size * hidden_size * sizeof(float), hipMemcpyDeviceToHost)); // // cout << "Hrelu:\n"; // // printMatrix(h_H, batch_size, hidden_size); // // cout << "waiting..\n"; // // cin >> n; // // multiply weights to input // checkCUBLAS(hipblasSgemm(cublasHandle, // HIPBLAS_OP_N, HIPBLAS_OP_N, // output_size, batch_size, hidden_size, // &alpha, // W2, output_size, // Hrelu, hidden_size, // &beta, // O, output_size)); // // checkCudaErrors(hipMemcpy(h_W2, W2, hidden_size * output_size * sizeof(float), hipMemcpyDeviceToHost)); // // cout << "W2:\n"; // // printMatrix(h_W2, hidden_size, output_size); // // cout << "waiting..\n"; // // cin >> n; // // float *h_O = (float *)malloc(batch_size * output_size * sizeof(float)); // // checkCudaErrors(hipMemcpy(h_O, O, batch_size * output_size * sizeof(float), hipMemcpyDeviceToHost)); // // cout << "O:\n"; // // printMatrix(h_O, batch_size, output_size); // // cout << "waiting..\n"; // // cin >> n; // // add bias to output // checkCUBLAS(hipblasSgemm(cublasHandle, // HIPBLAS_OP_N, HIPBLAS_OP_N, // output_size, batch_size, 1, // &alpha, // b2, output_size, // onevec, 1, // &alpha, // O, output_size)); // // checkCudaErrors(hipMemcpy(h_b2, b2, output_size * sizeof(float), hipMemcpyDeviceToHost)); // // cout << "b2:\n"; // // printMatrix(h_b2, 1, output_size); // // cout << "waiting..\n"; // // cin >> n; // // checkCudaErrors(hipMemcpy(h_O, O, batch_size * output_size * sizeof(float), hipMemcpyDeviceToHost)); // // cout << "O+b:\n"; // // printMatrix(h_O, batch_size, output_size); // // cout << "waiting..\n"; // // cin >> n; // if (train == false) { // inferClass<<<ceil(1.0 * batch_size / BW), BW>>>(O, IO, batch_size, output_size); // hipMemcpy(h_IO, IO, batch_size * sizeof(float), hipMemcpyDeviceToHost); // return; // } // // float *sm_test = (float *)malloc(batch_size * output_size * sizeof(float)); // // for (int i = 0; i < batch_size; i++) { // // for (int j = 0; j < output_size; j++) { // // sm_test[i * output_size + j] = i * output_size + j; // // } // // } // // cout << "sm_test:\n"; // // printMatrix(sm_test, batch_size, output_size); // // checkCudaErrors(hipMemcpy(O, sm_test, batch_size * output_size * sizeof(float), hipMemcpyHostToDevice)); // // checkCudaErrors(hipMemcpy(sm_test, O, batch_size * output_size * sizeof(float), hipMemcpyDeviceToHost)); // // cout << "O:\n"; // // printMatrix(sm_test, batch_size, output_size); // // apply softmax // // checkCudaErrors(hipMemcpy(h_SO, O, output_size * sizeof(float), hipMemcpyDeviceToHost)); // // printMatrix(h_SO, batch_size, output_size); // checkCudaErrors(hipDeviceSynchronize()); // checkCUDNN(cudnnSoftmaxForward(cudnnHandle, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_INSTANCE, &alpha, OTensor, O, &beta, OTensor, SO)); // checkCudaErrors(hipDeviceSynchronize()); // // checkCudaErrors(hipMemcpy(h_SO, SO, batch_size * output_size * sizeof(float), hipMemcpyDeviceToHost)); // // cout << "SO:\n"; // // printMatrix(h_SO, batch_size, output_size); // // cout << "waiting..\n"; // // cin >> n; // // checkCudaErrors(hipMemcpy(h_SO, SO, batch_size * output_size * sizeof(float), hipMemcpyDeviceToHost)); // // checkCudaErrors(hipMemcpy(h_y, y, batch_size * sizeof(float), hipMemcpyDeviceToHost)); // // float loss = 0; // // for (int i = 0; i < batch_size; i++) // // loss += -log(h_SO[i * output_size + int(h_y[i])]); // // // getSoftmaxLoss<<<ceil(1.0 * batch_size / BW), BW>>>(SO, y, batch_size, output_size, &loss); // // // printf("yay"); // // return loss; } // void backwardPropagate() { // float alpha = 1.0f, beta = 0.0f; // checkCudaErrors(hipMemset(dSO, 0, batch_size * output_size * sizeof(float))); // softmaxLossBackProp<<<ceil(1.0 * batch_size / BW), BW>>>(y, SO, dSO, batch_size, output_size, eps); // // int n; // // checkCudaErrors(hipMemcpy(h_SO, dSO, batch_size * output_size * sizeof(float), hipMemcpyDeviceToHost)); // // cout << "dSO:\n"; // // printMatrix(h_SO, batch_size, output_size); // // cout << "waiting..\n"; // // cin >> n; // // softmax backprop // checkCUDNN(cudnnSoftmaxBackward(cudnnHandle, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_INSTANCE, &alpha, // OTensor, SO, OTensor, dSO, // &beta, // OTensor, dO)); // // checkCudaErrors(hipMemcpy(h_SO, dO, batch_size * output_size * sizeof(float), hipMemcpyDeviceToHost)); // // cout << "dO:\n"; // // printMatrix(h_SO, batch_size, output_size); // // cout << "waiting..\n"; // // cin >> n; // // gradient w.r.t. b2 // checkCUBLAS(hipblasSgemm(cublasHandle, // HIPBLAS_OP_N, HIPBLAS_OP_N, // output_size, 1, batch_size, // &alpha, // dO, output_size, // onevec, batch_size, // &beta, // db2, output_size)); // // checkCudaErrors(hipMemcpy(h_b2, db2, output_size * sizeof(float), hipMemcpyDeviceToHost)); // // cout << "db2:\n"; // // printMatrix(h_b2, 1, output_size); // // cout << "waiting..\n"; // // cin >> n; // // checkCudaErrors(hipMemcpy(h_b2, db2, output_size * sizeof(float), hipMemcpyDeviceToHost)); // // printMatrix(h_b2, 1, output_size); // checkCudaErrors(hipDeviceSynchronize()); // // gradient w.r.t. W2 // checkCUBLAS(hipblasSgemm(cublasHandle, // HIPBLAS_OP_N, HIPBLAS_OP_T, // output_size, hidden_size, batch_size, // &alpha, // dO, output_size, // Hrelu, hidden_size, // &beta, // dW2, output_size)); // // checkCudaErrors(hipMemcpy(h_W2, dW2, hidden_size * output_size * sizeof(float), hipMemcpyDeviceToHost)); // // cout << "dW2:\n"; // // printMatrix(h_W2, hidden_size, output_size); // // cout << "waiting..\n"; // // cin >> n; // // gradient w.r.t. Hrelu // checkCUBLAS(hipblasSgemm(cublasHandle, // HIPBLAS_OP_T, HIPBLAS_OP_N, // hidden_size, batch_size, output_size, // &alpha, // W2, output_size, // dO, output_size, // &beta, // dHrelu, hidden_size)); // // float *h_H = (float *)malloc(batch_size * hidden_size * sizeof(float)); // // checkCudaErrors(hipMemcpy(h_H, dHrelu, batch_size * hidden_size * sizeof(float), hipMemcpyDeviceToHost)); // // cout << "dHrelu:\n"; // // printMatrix(h_H, batch_size, hidden_size); // // cout << "waiting..\n"; // // cin >> n; // // gradient w.r.t H // checkCUDNN(cudnnActivationBackward(cudnnHandle, Reludesc, &alpha, HTensor, Hrelu, HTensor, dHrelu, // HTensor, H, &beta, HTensor, dH)); // // gradient w.r.t. b1 // checkCUBLAS(hipblasSgemm(cublasHandle, // HIPBLAS_OP_N, HIPBLAS_OP_N, // hidden_size, 1, batch_size, // &alpha, // dH, hidden_size, // onevec, batch_size, // &beta, // db1, hidden_size)); // // gradient w.r.t. W1 // checkCUBLAS(hipblasSgemm(cublasHandle, // HIPBLAS_OP_N, HIPBLAS_OP_T, // hidden_size, input_size_fc, batch_size, // &alpha, // dH, hidden_size, // conv1OA, input_size_fc, // &beta, // dW1, hidden_size)); // // gradient w.r.t. conv1OA // checkCUBLAS(hipblasSgemm(cublasHandle, // HIPBLAS_OP_T, HIPBLAS_OP_N, // input_size_fc, batch_size, hidden_size, // &alpha, // W1, hidden_size, // dH, hidden_size, // &beta, // dconv1OA, input_size_fc)); // // gradient w.r.t conv1O // checkCUDNN(cudnnActivationBackward(cudnnHandle, Reludesc, &alpha, conv1OTensor, conv1OA, conv1OTensor, dconv1OA, // conv1OTensor, conv1O, &beta, conv1OTensor, dconv1O)); // // gradient w.r.t. conv1bias // checkCUDNN(cudnnConvolutionBackwardBias(cudnnHandle, &alpha, conv1OTensor, dconv1O, &beta, conv1bTensor, dconv1bias)); // // gradient w.r.t. conv1filter // checkCUDNN(cudnnConvolutionBackwardFilter(cudnnHandle, &alpha, inputTensor, X, conv1OTensor, dconv1O, conv1Desc, // conv1bfAlgo, workspace, workspace_bytes, &beta, conv1Tensor, dconv1filter)); // } // void updateWeights() { // float alpha = -learning_rate; // // update W1 // checkCUBLAS(hipblasSaxpy(cublasHandle, input_size * hidden_size, // &alpha, // dW1, 1, // W1, 1)); // //update b1 // checkCUBLAS(hipblasSaxpy(cublasHandle, hidden_size, // &alpha, // db1, 1, // b1, 1)); // // update W2 // checkCUBLAS(hipblasSaxpy(cublasHandle, hidden_size * output_size, // &alpha, // dW2, 1, // W2, 1)); // //update b2 // checkCUBLAS(hipblasSaxpy(cublasHandle, output_size, // &alpha, // db2, 1, // b2, 1)); // // update conv1bias // checkCUBLAS(hipblasSaxpy(cublasHandle, output_feature, // &alpha, // dconv1bias, 1, // conv1bias, 1)); // // update conv1filter // checkCUBLAS(hipblasSaxpy(cublasHandle, output_feature * input_feature * filter_height * filter_width, // &alpha, // dconv1filter, 1, // conv1filter, 1)); // // checkCudaErrors(hipMemcpy(h_W1, W1, hidden_size * sizeof(float), hipMemcpyDeviceToHost)); // // for (int i = 0; i < output_size; i++) { // // cout << h_W2[i] << ' '; // // } // // cout << endl; // // checkCudaErrors(hipDeviceSynchronize()); // } void train(int num_iter, float *train_images, float *train_labels, float *test_images, float *test_labels, int N) { // int image_size = rows * cols * channels; for (int iter = 0; iter < num_iter; iter++) { int image_id = iter % (N / batch_size); // checkCudaErrors(hipMemcpy(h_W1, W1, input_size_fc * hidden_size * sizeof(float), hipMemcpyDeviceToHost)); // checkCudaErrors(hipMemcpy(h_W2, W2, hidden_size * output_size * sizeof(float), hipMemcpyDeviceToHost)); // checkCudaErrors(hipMemcpy(h_b1, b1, hidden_size * sizeof(float), hipMemcpyDeviceToHost)); // checkCudaErrors(hipMemcpy(h_b2, b2, output_size * sizeof(float), hipMemcpyDeviceToHost)); // checkCudaErrors(hipMemcpy(X, &train_images[image_id * batch_size * input_size], input_size * batch_size * sizeof(float), hipMemcpyHostToDevice)); // checkCudaErrors(hipMemcpy(y, &train_labels[image_id * batch_size], batch_size * sizeof(float), hipMemcpyHostToDevice)); this->forwardPropagate(); // this->backwardPropagate(); // this->updateWeights(); checkCudaErrors(hipDeviceSynchronize()); exit(0); // checkCudaErrors(hipMemcpy(h_W2, W2, hidden_size * output_size * sizeof(float), hipMemcpyDeviceToHost)); // for (int i = 0; i < output_size; i++) { // cout << h_W2[i] << ' '; // } // cout << endl; checkCudaErrors(hipDeviceSynchronize()); } } int test(float *test_images, float *test_labels, int N) { // int image_size = rows * cols * channels; int start = 0; int size = batch_size; int count = 0; while (start < N) { if (start + size >= N) size = N - start; checkCudaErrors(hipMemcpy(X, &test_images[start * input_size], input_size * size * sizeof(float), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(y, &test_labels[start], size * sizeof(float), hipMemcpyHostToDevice)); this->forwardPropagate(false); checkCudaErrors(hipDeviceSynchronize()); for (int i = 0; i < size; i++) { if (h_IO[i] == test_labels[start + i]) count++; // cout << h_IO[i] << ' '; } start = start + size; } return count; } }; int main() { vector<vector<uchar> > train_images, test_images; vector<uchar> train_labels, test_labels; readMNIST(train_images, test_images, train_labels, test_labels); float *f_train_images, *f_train_labels, *f_test_images, *f_test_labels; int input_size = rows * cols * channels; f_train_images = (float *)malloc(N_train * input_size * sizeof(float)); f_train_labels = (float *)malloc(N_train * sizeof(float)); f_test_images = (float *)malloc(N_test * input_size * sizeof(float)); f_test_labels = (float *)malloc(N_test * sizeof(float)); // checkCudaErrors(hipHostMalloc((void **)&f_train_images, N_train * input_size * sizeof(float))); // checkCudaErrors(hipHostMalloc((void **)&f_train_labels, N_train * sizeof(float))); // checkCudaErrors(hipHostMalloc((void **)&f_test_images, N_test * input_size * sizeof(float))); // checkCudaErrors(hipHostMalloc((void **)&f_test_labels, N_test * sizeof(float))); float *mean_image; mean_image = (float *)malloc(input_size * sizeof(float)); for (int k = 0; k < N_train; k++) { for (int j = 0; j < rows * cols; j++) { f_train_images[k * input_size + j] = (float)train_images[k][j]; } f_train_labels[k] = (float)train_labels[k]; } for (int k = 0; k < N_test; k++) { for (int j = 0; j < rows * cols; j++) { f_test_images[k * input_size + j] = (float)test_images[k][j]; } f_test_labels[k] = (float)test_labels[k]; } for (int i = 0; i < input_size; i++) { mean_image[i] = 0; for (int k = 0; k < N_train; k++) { mean_image[i] += f_train_images[k * input_size + i]; } mean_image[i] /= N_train; } for (int i = 0; i < N_train; i++) { for (int j = 0; j < input_size; j++) f_train_images[i * input_size + j] -= mean_image[j]; } for (int i = 0; i < N_test; i++) { for (int j = 0; j < input_size; j++) f_test_images[i * input_size + j] -= mean_image[j]; } // int toy_input_size = 2; // int toy_hidden_size = 5; // int toy_output_size = 2; // int batch_size = 100; // float *toy_train, *toy_train_labels; // toy_train = (float *)malloc(batch_size * toy_input_size * sizeof(float)); // toy_train_labels = (float *)malloc(batch_size * sizeof(float)); // hiprandGenerator_t curandgen; // checkCURAND(hiprandCreateGeneratorHost(&curandgen, HIPRAND_RNG_PSEUDO_DEFAULT)); // printf("toy_train, before init:\n"); // printMatrix(toy_train, batch_size, toy_input_size); // checkCURAND(hiprandGenerateNormal(curandgen, toy_train, batch_size * toy_input_size * sizeof(float), 0, 10)); // printf("toy_train, after init:\n"); // printMatrix(toy_train, batch_size, toy_input_size); // for (int i = 0; i < batch_size; i++) { // cout << float(i % 2) << " p\n"; // toy_train_labels[i] = float(i % 2); // } // printf("toy_train_labels, after init\n"); // printMatrix(toy_train_labels, batch_size, 1); // int n; // cin >> n; // float toy_l_rate = 1e-1; // Context context(toy_input_size, batch_size, toy_hidden_size, toy_l_rate, toy_output_size); // int n_iter = 100; // int n_rep = 10; // for (int i = 0; i < n_rep; i++) { // context.train(n_iter, toy_train, toy_train_labels, toy_train, toy_train_labels, batch_size); // cout << context.test(toy_train, toy_train_labels, batch_size) << endl << flush; // } float l_rate = 1e-3; int hidden_size = 50; int batch_size = 128; int output_size = 10; Context context(input_size, batch_size, hidden_size, l_rate, output_size); int n_iter = 10000; int n_rep = 10; for (int i = 0; i < n_rep; i++) { context.train(n_iter, f_train_images, f_train_labels, f_test_images, f_test_labels, N_train); cout << context.test(f_test_images, f_test_labels, N_test) << endl; } }
247c12085aa414300e79ac7cc1aac14172fc97e0.cu
#include <fstream> #include <iostream> #include <string> #include <vector> #include <cmath> #include <limits> #include <cublas_v2.h> #include <curand.h> #include <cudnn.h> // #include <opencv2/opencv.hpp> #include <helper_cuda.h> typedef unsigned char uchar; #define FatalError(s) do { \ std::stringstream _where, _message; \ _where << __FILE__ << ':' << __LINE__; \ _message << std::string(s) + "\n" << __FILE__ << ':' << __LINE__; \ std::cerr << _message.str() << "\nAborting...\n"; \ cudaDeviceReset(); \ exit(1); \ } while(0) // #define checkCudaErrors(status) do { \ // std::stringstream _error; \ // if (status != 0) { \ // _error << "Cuda failure: " << status; \ // FatalError(_error.str()); \ // } \ // } while(0) #define checkCUDNN(expression) \ { \ cudnnStatus_t status = (expression); \ if (status != CUDNN_STATUS_SUCCESS) { \ std::cerr << "Error on line " << __LINE__ << ": " \ << cudnnGetErrorString(status) << std::endl; \ std::exit(EXIT_FAILURE); \ } \ } #define checkCUBLAS(expression) \ { \ cublasStatus_t status = (expression); \ if (status != CUBLAS_STATUS_SUCCESS) { \ std::cerr << "Error on line " << __LINE__ << ": " \ << _cudaGetErrorEnum(status) << std::endl; \ std::exit(EXIT_FAILURE); \ } \ } #define checkCURAND(expression) \ { \ curandStatus_t status = (expression); \ if (status != CURAND_STATUS_SUCCESS) { \ std::cerr << "Error on line " << __LINE__ << ": " \ << _cudaGetErrorEnum(status) << std::endl; \ std::exit(EXIT_FAILURE); \ } \ } using namespace std; int N_train = 60000, N_test = 10000; int rows = 28, cols = 28, channels = 1; int BW = 16 * 16; // Block size for GPU kernel // void roundUp(int a, int b) { // } __global__ void fillValue(float *v, int size, int value) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= size) return; v[i] = value; } __global__ void softmaxLossBackProp(float *y, float *SO, float *dSO, int batch_size, int output_size, float eps) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= batch_size) return; int cur_class = static_cast<int>(y[i]); dSO[i * output_size + cur_class] = -1 / (SO[i * output_size + cur_class] * batch_size + eps); } __global__ void inferClass(float *O, float *IO, int batch_size, int output_size) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= batch_size) return; float max = O[i * output_size]; int index = 0; for (int j = 1; j < output_size; j++) { if (O[i * output_size + j] > max) { max = O[i * output_size + j]; index = j; } } IO[i] = (float)index; } int reverseInt(int n) { int bytes = 4; unsigned char ch[bytes]; for (int i = 0; i < bytes; i++) { ch[i] = (n >> i * 8) & 255; } int p = 0; for (int i = 0; i < bytes; i++) { p += (int) ch[i] << (bytes - i - 1) * 8; } return p; } void readMNIST(vector<vector<uchar> > &train_images, vector<vector<uchar> > &test_images, vector<uchar> &train_labels, vector<uchar> &test_labels) { string filename_train_images = "data/train-images.idx3-ubyte"; string filename_train_labels = "data/train-labels.idx1-ubyte"; string filename_test_images = "data/t10k-images.idx3-ubyte"; string filename_test_labels = "data/t10k-labels.idx1-ubyte"; // read train/test images for (int i = 0; i < 2; i++) { string filename; if (i == 0) filename = filename_train_images; else filename = filename_test_images; ifstream f(filename.c_str(), ios::binary); if (!f.is_open()) printf("Cannot read MNIST from %s\n", filename.c_str()); // read metadata int magic_number = 0, n_images = 0, n_rows = 0, n_cols = 0; f.read((char *) &magic_number, sizeof(magic_number)); magic_number = reverseInt(magic_number); f.read((char *) &n_images, sizeof(n_images)); n_images = reverseInt(n_images); f.read((char *) &n_rows, sizeof(n_rows)); n_rows = reverseInt(n_rows); f.read((char *) &n_cols, sizeof(n_cols)); n_cols = reverseInt(n_cols); for (int k = 0; k < n_images; k++) { vector<uchar> temp; temp.reserve(n_rows * n_cols); for (int j = 0; j < n_rows * n_cols; j++) { uchar t = 0; f.read((char *)&t, sizeof(t)); temp.push_back(t); } if (i == 0) train_images.push_back(temp); else test_images.push_back(temp); } f.close(); } // read train/test labels for (int i = 0; i < 2; i++) { string filename; if (i == 0) filename = filename_train_labels; else filename = filename_test_labels; ifstream f(filename.c_str(), ios::binary); if (!f.is_open()) printf("Cannot read MNIST from %s\n", filename.c_str()); // read metadata int magic_number = 0, n_labels = 0; f.read((char *) &magic_number, sizeof(magic_number)); magic_number = reverseInt(magic_number); f.read((char *) &n_labels, sizeof(n_labels)); n_labels = reverseInt(n_labels); for (int k = 0; k < n_labels; k++) { uchar t = 0; f.read((char *)&t, sizeof(t)); if (i == 0) train_labels.push_back(t); else test_labels.push_back(t); } f.close(); } } void printMatrix(float *M, int r, int c) { for (int i = 0; i < r; i++) { for (int j = 0; j < c; j++) { cout << M[i * c + j] << ' '; } cout << endl; } cout << endl; } class Context { public: int batch_size, channels; int input_rows, input_cols, output_rows, output_cols; float learning_rate; float *X, *W1, *b1, *W2, *b2, *H, *Hrelu, *O, *SO, *dSO, *dO, *dW1, *db1, *dW2, *db2, *dH, *dHrelu; float *IO; float *y; float *onevec; float *h_IO; int input_size; int input_size_fc; int hidden_size; int output_size; int input_feature, output_feature; cublasHandle_t cublasHandle; // cudnnTensorDescriptor_t batchTensor, W1Tensor, b1Tensor, W2Tensor, b2Tensor, HTensor, OTensor; cudnnTensorDescriptor_t HTensor, OTensor; cudnnActivationDescriptor_t Reludesc; // cudnnOpTensorDescriptor_t Adddesc, Muldesc; cudnnHandle_t cudnnHandle; curandGenerator_t curandgen; float *h_W1, *h_W2, *h_b1, *h_b2, *h_SO, *h_y; float eps; // conv cudnnTensorDescriptor_t inputTensor, conv1OTensor, conv1bTensor; cudnnFilterDescriptor_t conv1Tensor; cudnnConvolutionDescriptor_t conv1Desc; cudnnConvolutionFwdAlgo_t conv1fAlgo; cudnnConvolutionBwdFilterAlgo_t conv1bfAlgo; size_t workspace_bytes; float *workspace; float *conv1O, *conv1OA; float *conv1filter, *conv1bias; float *dconv1filter, *dconv1bias; float *dconv1O, *dconv1OA; int filter_height, filter_width; // vdnn int req_algo_count; cudnnConvolutionFwdAlgoPerf_t *conv1fwdperf; cudnnConvolutionBwdFilterAlgoPerf_t *conv1bwdfperf; cudnnConvolutionBwdDataAlgoPerf_t *conv1bwddperf; cudnnPoolingDescriptor_t poolDesc; Context(int input_size, int batch_size, int hidden_size, float learning_rate, int output_size) { this->batch_size = batch_size; this->hidden_size = hidden_size; this->output_size = output_size; // number of classes; this->channels = 1; input_rows = 28; input_cols = 28; input_feature = 256; output_rows = 28; output_cols = 28; output_feature = 128; filter_height = 1, filter_width = 1; this->input_size = input_rows * input_cols * input_feature; cout << "input_size: " << this->input_size << endl; input_size_fc = output_rows * output_cols * output_feature; this->learning_rate = learning_rate; eps = 1e-8; workspace_bytes = 0; workspace = NULL; checkCUBLAS(cublasCreate(&cublasHandle)); checkCUDNN(cudnnCreate(&cudnnHandle)); checkCURAND(curandCreateGenerator(&curandgen, CURAND_RNG_PSEUDO_DEFAULT)); //vdnn req_algo_count = 10; conv1fwdperf = (cudnnConvolutionFwdAlgoPerf_t *)malloc(req_algo_count * sizeof(cudnnConvolutionFwdAlgoPerf_t)); conv1bwdfperf = (cudnnConvolutionBwdFilterAlgoPerf_t *)malloc(req_algo_count * sizeof(cudnnConvolutionBwdFilterAlgoPerf_t)); conv1bwddperf = (cudnnConvolutionBwdDataAlgoPerf_t *)malloc(req_algo_count * sizeof(cudnnConvolutionBwdDataAlgoPerf_t)); // conv checkCUDNN(cudnnCreateTensorDescriptor(&inputTensor)); checkCUDNN(cudnnCreateTensorDescriptor(&conv1OTensor)); checkCUDNN(cudnnCreateTensorDescriptor(&conv1bTensor)); checkCUDNN(cudnnCreateFilterDescriptor(&conv1Tensor)); checkCUDNN(cudnnSetTensor4dDescriptor(inputTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch_size, input_feature, input_rows, input_cols)); checkCUDNN(cudnnSetTensor4dDescriptor(conv1OTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch_size, output_feature, output_rows, output_cols)); checkCUDNN(cudnnSetTensor4dDescriptor(conv1bTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, output_feature, 1, 1)); checkCUDNN(cudnnSetFilter4dDescriptor(conv1Tensor, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, output_feature, input_feature, filter_height, filter_width)); checkCUDNN(cudnnCreateConvolutionDescriptor(&conv1Desc)); int pad_h = 0, pah_w = 0, u = 1, v = 1, dilation_h = 1, dilation_w = 1; checkCUDNN(cudnnSetConvolution2dDescriptor(conv1Desc, pad_h, pah_w, u, v, dilation_h, dilation_w, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); checkCUDNN(cudnnCreatePoolingDescriptor(&poolDesc)); checkCUDNN(cudnnSetPooling2dDescriptor(poolDesc, CUDNN_POOLING_MAX, CUDNN_PROPAGATE_NAN, filter_height, filter_width, pad_h, pad_w, u, v)); int ret_algo_count; int n; // cout << "waiting..\n"; // cin >> n; checkCUDNN(cudnnFindConvolutionForwardAlgorithm(cudnnHandle, inputTensor, conv1Tensor, conv1Desc, conv1OTensor, req_algo_count, &ret_algo_count, conv1fwdperf)); cout << "Printing forward conv algo perf\n"; for (int i = 0; i < ret_algo_count; i++) { cout << i << endl; cout << "algo: " << conv1fwdperf[i].algo << endl; cout << "status: " << cudnnGetErrorString(conv1fwdperf[i].status) << endl; cout << "time(ms): " << conv1fwdperf[i].time << endl; cout << "memory(bytes): " << conv1fwdperf[i].memory << endl; cout << "mathType: " << conv1fwdperf[i].mathType << endl; cout << endl; } checkCUDNN(cudnnFindConvolutionBackwardFilterAlgorithm(cudnnHandle, inputTensor, conv1OTensor, conv1Desc, conv1Tensor, req_algo_count, &ret_algo_count, conv1bwdfperf)); cout << "Printing bwdfilter conv algo perf\n"; for (int i = 0; i < ret_algo_count; i++) { cout << i << endl; cout << "algo: " << conv1bwdfperf[i].algo << endl; cout << "status: " << cudnnGetErrorString(conv1bwdfperf[i].status) << endl; cout << "time(ms): " << conv1bwdfperf[i].time << endl; cout << "memory(bytes): " << conv1bwdfperf[i].memory << endl; cout << "mathType: " << conv1bwdfperf[i].mathType << endl; cout << endl; } checkCUDNN(cudnnFindConvolutionBackwardDataAlgorithm(cudnnHandle, conv1Tensor, conv1OTensor, conv1Desc, inputTensor, req_algo_count, &ret_algo_count, conv1bwddperf)); cout << "Printing bwddata conv algo perf\n"; for (int i = 0; i < ret_algo_count; i++) { cout << i << endl; cout << "algo: " << conv1bwdfperf[i].algo << endl; cout << "status: " << cudnnGetErrorString(conv1bwdfperf[i].status) << endl; cout << "time(ms): " << conv1bwdfperf[i].time << endl; cout << "memory(bytes): " << conv1bwdfperf[i].memory << endl; cout << "mathType: " << conv1bwdfperf[i].mathType << endl; cout << endl; } // checkCUDNN(cudnnGetConvolutionForwardAlgorithm(cudnnHandle, inputTensor, conv1Tensor, conv1Desc, conv1OTensor, // CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &conv1fAlgo)); // size_t fwd_wspace; // checkCUDNN(cudnnGetConvolutionForwardWorkspaceSize(cudnnHandle, inputTensor, conv1Tensor, conv1Desc, conv1OTensor, // conv1fAlgo, &fwd_wspace)); // cout << "fwd_wspace: " << fwd_wspace << endl; // cout << "algo_used: " << conv1fAlgo << endl; // // exit(0); // workspace_bytes = max(workspace_bytes, fwd_wspace); // checkCudaErrors(cudaMalloc((void **)&conv1filter, filter_height * filter_width * input_feature * output_feature * sizeof(float) + sizeof(float))); // checkCudaErrors(cudaMalloc((void **)&dconv1filter, filter_height * filter_width * input_feature * output_feature * sizeof(float))); // checkCudaErrors(cudaMalloc((void **)&conv1bias, output_feature * sizeof(float))); // checkCudaErrors(cudaMalloc((void **)&dconv1bias, output_feature * sizeof(float))); // fillValue<<<ceil(1.0 * output_feature / BW), BW>>>(conv1bias, output_feature, 0); // checkCURAND(curandGenerateNormal(curandgen, conv1filter, filter_height * filter_width * input_feature * output_feature + 1, // 0, 1 / sqrt(filter_height * filter_width * input_feature))); // checkCudaErrors(cudaMalloc((void **)&conv1O, batch_size * input_size_fc * sizeof(float))); checkCudaErrors(cudaMalloc((void **)&conv1OA, batch_size * this->input_size * sizeof(float))); // checkCudaErrors(cudaMalloc((void **)&dconv1O, batch_size * input_size_fc * sizeof(float))); // checkCudaErrors(cudaMalloc((void **)&dconv1OA, batch_size * input_size_fc * sizeof(float))); // checkCUDNN(cudnnGetConvolutionBackwardFilterAlgorithm(cudnnHandle, inputTensor, conv1OTensor, conv1Desc, conv1Tensor, // CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &conv1bfAlgo)); // size_t bwd_fwspace; // checkCUDNN(cudnnGetConvolutionBackwardFilterWorkspaceSize(cudnnHandle, inputTensor, conv1OTensor, conv1Desc, conv1Tensor, // conv1bfAlgo, &bwd_fwspace)); // workspace_bytes = max(workspace_bytes, bwd_fwspace); // if (workspace_bytes > 0) // checkCudaErrors(cudaMalloc((void **)&workspace, workspace_bytes)); // // allocate memory in device checkCudaErrors(cudaMalloc((void **)&X, batch_size * this->input_size * sizeof(float))); checkCudaErrors(cudaMalloc((void **)&OP, batch_size * this->input_size_fc * sizeof(float))); // checkCudaErrors(cudaMalloc((void **)&W1, input_size_fc * hidden_size * sizeof(float))); // checkCudaErrors(cudaMalloc((void **)&dW1, input_size_fc * hidden_size * sizeof(float))); // checkCudaErrors(cudaMalloc((void **)&b1, hidden_size * sizeof(float))); // checkCudaErrors(cudaMalloc((void **)&db1, hidden_size * sizeof(float))); // checkCudaErrors(cudaMalloc((void **)&W2, hidden_size * output_size * sizeof(float))); // checkCudaErrors(cudaMalloc((void **)&dW2, hidden_size * output_size * sizeof(float))); // checkCudaErrors(cudaMalloc((void **)&b2, output_size * sizeof(float))); // checkCudaErrors(cudaMalloc((void **)&db2, output_size * sizeof(float))); // checkCudaErrors(cudaMalloc((void **)&H, batch_size * hidden_size * sizeof(float))); // checkCudaErrors(cudaMalloc((void **)&dH, batch_size * hidden_size * sizeof(float))); // checkCudaErrors(cudaMalloc((void **)&Hrelu, batch_size * hidden_size * sizeof(float))); // checkCudaErrors(cudaMalloc((void **)&dHrelu, batch_size * hidden_size * sizeof(float))); // checkCudaErrors(cudaMalloc((void **)&O, batch_size * output_size * sizeof(float))); // checkCudaErrors(cudaMalloc((void **)&IO, batch_size * sizeof(float))); // checkCudaErrors(cudaMalloc((void **)&dO, batch_size * output_size * sizeof(float))); // checkCudaErrors(cudaMalloc((void **)&SO, batch_size * output_size * sizeof(float))); // checkCudaErrors(cudaMalloc((void **)&dSO, batch_size * output_size * sizeof(float))); // checkCudaErrors(cudaMalloc((void **)&y, batch_size * sizeof(float))); // checkCudaErrors(cudaMalloc((void **)&onevec, batch_size * sizeof(float))); // fillValue<<<ceil(1.0 * batch_size / BW), BW>>>(onevec, batch_size, 1); // // { // // cout << "waiting..\n"; // // int n; // // cin >> n; // // } // // h_IO = (float *)malloc(batch_size * sizeof(float)); // // checkCUDNN(cudnnCreateTensorDescriptor(&batchTensor)); // // checkCUDNN(cudnnCreateTensorDescriptor(&W1Tensor)); // // checkCUDNN(cudnnCreateTensorDescriptor(&b1Tensor)); // // checkCUDNN(cudnnCreateTensorDescriptor(&W2Tensor)); // // checkCUDNN(cudnnCreateTensorDescriptor(&b2Tensor)); // checkCUDNN(cudnnCreateTensorDescriptor(&HTensor)); // checkCUDNN(cudnnCreateTensorDescriptor(&OTensor)); // checkCUDNN(cudnnCreateActivationDescriptor(&Reludesc)); // // checkCUDNN(cudnnCreateOpTensorDescriptor(&Opdesc)); // // checkCUDNN(cudnnSetTensor4dDescriptor(batchTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch_size, rows * cols, 1, 1)); // // // just to be able to multiply properly // // checkCUDNN(cudnnSetTensor4dDescriptor(W1Tensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch_size, hidden_size, 1, 1)); // // checkCUDNN(cudnnSetTensor4dDescriptor(b1Tensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, hidden_size, 1, 1)); // checkCUDNN(cudnnSetTensor4dDescriptor(HTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch_size, hidden_size, 1, 1)); // // // just to be able to multiply properly // // checkCUDNN(cudnnSetTensor4dDescriptor(W2Tensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, hidden_size, output_size, 1, 1)); // // checkCUDNN(cudnnSetTensor4dDescriptor(b2Tensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, output_size, 1, 1)); // checkCUDNN(cudnnSetTensor4dDescriptor(OTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch_size, output_size, 1, 1)); // checkCUDNN(cudnnSetActivationDescriptor(Reludesc, CUDNN_ACTIVATION_RELU, CUDNN_PROPAGATE_NAN, 0)); // // initialization // fillValue<<<ceil(1.0 * hidden_size / BW), BW>>>(b1, hidden_size, 0); // fillValue<<<ceil(1.0 * output_size / BW), BW>>>(b2, output_size, 0); // checkCURAND(curandGenerateNormal(curandgen, W1, input_size_fc * hidden_size, 0, 0.1)); // checkCURAND(curandGenerateNormal(curandgen, W2, hidden_size * output_size, 0, 0.1)); checkCURAND(curandGenerateNormal(curandgen, conv1OA, batch_size * this->input_size, 0, 0.1)); // h_W1 = (float *)malloc(input_size_fc * hidden_size * sizeof(float)); // h_W2 = (float *)malloc(hidden_size * output_size * sizeof(float)); // h_b1 = (float *)malloc(hidden_size * sizeof(float)); // h_b2 = (float *)malloc(output_size * sizeof(float)); // h_SO = (float *)malloc(batch_size * output_size * sizeof(float)); // h_y = (float *)malloc(batch_size * sizeof(float)); } ~Context() { checkCUBLAS(cublasDestroy(cublasHandle)); checkCURAND(curandDestroyGenerator(curandgen)); // checkCudaErrors(cudaFree(X)); // checkCudaErrors(cudaFree(W1)); // checkCudaErrors(cudaFree(dW1)); // checkCudaErrors(cudaFree(b1)); // checkCudaErrors(cudaFree(db1)); // checkCudaErrors(cudaFree(W2)); // checkCudaErrors(cudaFree(dW2)); // checkCudaErrors(cudaFree(b2)); // checkCudaErrors(cudaFree(db2)); // checkCudaErrors(cudaFree(H)); // checkCudaErrors(cudaFree(dH)); // checkCudaErrors(cudaFree(Hrelu)); // checkCudaErrors(cudaFree(dHrelu)); // checkCudaErrors(cudaFree(O)); // checkCudaErrors(cudaFree(dO)); // checkCudaErrors(cudaFree(SO)); // checkCudaErrors(cudaFree(dSO)); // checkCudaErrors(cudaFree(IO)); // free(h_IO); // checkCUDNN(cudnnDestroyActivationDescriptor(Reludesc)); // // checkCUDNN(cudnnDestroyTensorDescriptor(batchTensor)); // // checkCUDNN(cudnnDestroyTensorDescriptor(W1Tensor)); // // checkCUDNN(cudnnDestroyTensorDescriptor(b1Tensor)); // // checkCUDNN(cudnnDestroyTensorDescriptor(W2Tensor)); // // checkCUDNN(cudnnDestroyTensorDescriptor(b2Tensor)); // checkCUDNN(cudnnDestroyTensorDescriptor(HTensor)); // checkCUDNN(cudnnDestroyTensorDescriptor(OTensor)); // checkCUDNN(cudnnDestroy(cudnnHandle)); // free(h_W1); // free(h_W2); // free(h_b1); // free(h_b2); // free(h_SO); // free(h_y); // // conv // checkCUDNN(cudnnDestroyFilterDescriptor(conv1Tensor)); } void forwardPropagate(bool train=true) { // float alpha = 1.0f, beta = 0.0f; // conv // conv forward cudaEvent_t start, stop; float milli = 0; checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); // // checkCudaErrors(cudaEventRecord(start)); // // checkCUDNN(cudnnConvolutionForward(cudnnHandle, &alpha, inputTensor, X, conv1Tensor, conv1filter, conv1Desc, // // conv1fAlgo, workspace, workspace_bytes, &beta, conv1OTensor, conv1O)); // // checkCudaErrors(cudaEventRecord(stop)); // // checkCudaErrors(cudaEventSynchronize(stop)); // // checkCudaErrors(cudaEventElapsedTime(&milli, start, stop)); // // cout << "time for conv: " << milli << endl; // // // add bias // // checkCUDNN(cudnnAddTensor(cudnnHandle, &alpha, conv1bTensor, conv1bias, &alpha, conv1OTensor, conv1O)); // // // activation // // checkCUDNN(cudnnActivationForward(cudnnHandle, Reludesc, &alpha, conv1OTensor, conv1O, &beta, conv1OTensor, conv1OA)); float *temp = NULL; checkCudaErrors(cudaMallocHost((void **)&temp, batch_size * input_size * sizeof(float))); checkCudaErrors(cudaEventRecord(start)); checkCudaErrors(cudaMemcpyAsync(temp, conv1OA, batch_size * input_size * sizeof(float), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaEventRecord(stop)); cudaEventSynchronize(stop); checkCudaErrors(cudaEventElapsedTime(&milli, start, stop)); cout << "transfer time(ms): " << milli << endl; int n; cin >> n; for (int i = 0; i < n; i++) { cout << temp[i]; } checkCUDNN(cudnnPoolingForward(cudnnHandle, poolDesc, &alpha, inputTensor, X, &beta, conv1OTensor, OP)) // int n; // cout << "waiting..\n"; // cin >> n; exit(0); // // multiply weights to input // checkCudaErrors(cudaEventRecord(start)); // checkCUBLAS(cublasSgemm(cublasHandle, // CUBLAS_OP_N, CUBLAS_OP_N, // hidden_size, batch_size, input_size_fc, // &alpha, // W1, hidden_size, // conv1OA, input_size_fc, // &beta, // H, hidden_size)); // checkCudaErrors(cudaEventRecord(stop)); // checkCudaErrors(cudaEventSynchronize(stop)); // checkCudaErrors(cudaEventElapsedTime(&milli, start, stop)); // cout << "time for mul: " << milli << endl; // // exit(0); // // float *h_X = (float *)malloc(batch_size * input_size * sizeof(float)); // // checkCudaErrors(cudaMemcpy(h_X, X, batch_size * input_size * sizeof(float), cudaMemcpyDeviceToHost)); // // cout << "X:\n"; // // printMatrix(h_X, batch_size, input_size); // // int n; // // cout << "waiting..\n"; // // cin >> n; // // checkCudaErrors(cudaMemcpy(h_W1, W1, input_size * hidden_size * sizeof(float), cudaMemcpyDeviceToHost)); // // cout << "W1:\n"; // // printMatrix(h_W1, input_size, hidden_size); // // cout << "waiting..\n"; // // cin >> n; // // float *h_H = (float *)malloc(batch_size * hidden_size * sizeof(float)); // // checkCudaErrors(cudaMemcpy(h_H, H, batch_size * hidden_size * sizeof(float), cudaMemcpyDeviceToHost)); // // cout << "H:\n"; // // printMatrix(h_H, batch_size, hidden_size); // // cout << "waiting..\n"; // // cin >> n; // // add bias to output // checkCUBLAS(cublasSgemm(cublasHandle, // CUBLAS_OP_N, CUBLAS_OP_N, // hidden_size, batch_size, 1, // &alpha, // b1, hidden_size, // onevec, 1, // &alpha, // H, hidden_size)); // // checkCudaErrors(cudaMemcpy(h_b1, b1, hidden_size * sizeof(float), cudaMemcpyDeviceToHost)); // // cout << "b1:\n"; // // printMatrix(h_b1, 1, hidden_size); // // cout << "waiting..\n"; // // cin >> n; // // checkCudaErrors(cudaMemcpy(h_H, H, batch_size * hidden_size * sizeof(float), cudaMemcpyDeviceToHost)); // // cout << "H+b:\n"; // // printMatrix(h_H, batch_size, hidden_size); // // cout << "waiting..\n"; // // cin >> n; // // apply relu activation // checkCUDNN(cudnnActivationForward(cudnnHandle, Reludesc, &alpha, HTensor, H, &beta, HTensor, Hrelu)); // // checkCudaErrors(cudaMemcpy(h_H, Hrelu, batch_size * hidden_size * sizeof(float), cudaMemcpyDeviceToHost)); // // cout << "Hrelu:\n"; // // printMatrix(h_H, batch_size, hidden_size); // // cout << "waiting..\n"; // // cin >> n; // // multiply weights to input // checkCUBLAS(cublasSgemm(cublasHandle, // CUBLAS_OP_N, CUBLAS_OP_N, // output_size, batch_size, hidden_size, // &alpha, // W2, output_size, // Hrelu, hidden_size, // &beta, // O, output_size)); // // checkCudaErrors(cudaMemcpy(h_W2, W2, hidden_size * output_size * sizeof(float), cudaMemcpyDeviceToHost)); // // cout << "W2:\n"; // // printMatrix(h_W2, hidden_size, output_size); // // cout << "waiting..\n"; // // cin >> n; // // float *h_O = (float *)malloc(batch_size * output_size * sizeof(float)); // // checkCudaErrors(cudaMemcpy(h_O, O, batch_size * output_size * sizeof(float), cudaMemcpyDeviceToHost)); // // cout << "O:\n"; // // printMatrix(h_O, batch_size, output_size); // // cout << "waiting..\n"; // // cin >> n; // // add bias to output // checkCUBLAS(cublasSgemm(cublasHandle, // CUBLAS_OP_N, CUBLAS_OP_N, // output_size, batch_size, 1, // &alpha, // b2, output_size, // onevec, 1, // &alpha, // O, output_size)); // // checkCudaErrors(cudaMemcpy(h_b2, b2, output_size * sizeof(float), cudaMemcpyDeviceToHost)); // // cout << "b2:\n"; // // printMatrix(h_b2, 1, output_size); // // cout << "waiting..\n"; // // cin >> n; // // checkCudaErrors(cudaMemcpy(h_O, O, batch_size * output_size * sizeof(float), cudaMemcpyDeviceToHost)); // // cout << "O+b:\n"; // // printMatrix(h_O, batch_size, output_size); // // cout << "waiting..\n"; // // cin >> n; // if (train == false) { // inferClass<<<ceil(1.0 * batch_size / BW), BW>>>(O, IO, batch_size, output_size); // cudaMemcpy(h_IO, IO, batch_size * sizeof(float), cudaMemcpyDeviceToHost); // return; // } // // float *sm_test = (float *)malloc(batch_size * output_size * sizeof(float)); // // for (int i = 0; i < batch_size; i++) { // // for (int j = 0; j < output_size; j++) { // // sm_test[i * output_size + j] = i * output_size + j; // // } // // } // // cout << "sm_test:\n"; // // printMatrix(sm_test, batch_size, output_size); // // checkCudaErrors(cudaMemcpy(O, sm_test, batch_size * output_size * sizeof(float), cudaMemcpyHostToDevice)); // // checkCudaErrors(cudaMemcpy(sm_test, O, batch_size * output_size * sizeof(float), cudaMemcpyDeviceToHost)); // // cout << "O:\n"; // // printMatrix(sm_test, batch_size, output_size); // // apply softmax // // checkCudaErrors(cudaMemcpy(h_SO, O, output_size * sizeof(float), cudaMemcpyDeviceToHost)); // // printMatrix(h_SO, batch_size, output_size); // checkCudaErrors(cudaDeviceSynchronize()); // checkCUDNN(cudnnSoftmaxForward(cudnnHandle, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_INSTANCE, &alpha, OTensor, O, &beta, OTensor, SO)); // checkCudaErrors(cudaDeviceSynchronize()); // // checkCudaErrors(cudaMemcpy(h_SO, SO, batch_size * output_size * sizeof(float), cudaMemcpyDeviceToHost)); // // cout << "SO:\n"; // // printMatrix(h_SO, batch_size, output_size); // // cout << "waiting..\n"; // // cin >> n; // // checkCudaErrors(cudaMemcpy(h_SO, SO, batch_size * output_size * sizeof(float), cudaMemcpyDeviceToHost)); // // checkCudaErrors(cudaMemcpy(h_y, y, batch_size * sizeof(float), cudaMemcpyDeviceToHost)); // // float loss = 0; // // for (int i = 0; i < batch_size; i++) // // loss += -log(h_SO[i * output_size + int(h_y[i])]); // // // getSoftmaxLoss<<<ceil(1.0 * batch_size / BW), BW>>>(SO, y, batch_size, output_size, &loss); // // // printf("yay"); // // return loss; } // void backwardPropagate() { // float alpha = 1.0f, beta = 0.0f; // checkCudaErrors(cudaMemset(dSO, 0, batch_size * output_size * sizeof(float))); // softmaxLossBackProp<<<ceil(1.0 * batch_size / BW), BW>>>(y, SO, dSO, batch_size, output_size, eps); // // int n; // // checkCudaErrors(cudaMemcpy(h_SO, dSO, batch_size * output_size * sizeof(float), cudaMemcpyDeviceToHost)); // // cout << "dSO:\n"; // // printMatrix(h_SO, batch_size, output_size); // // cout << "waiting..\n"; // // cin >> n; // // softmax backprop // checkCUDNN(cudnnSoftmaxBackward(cudnnHandle, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_INSTANCE, &alpha, // OTensor, SO, OTensor, dSO, // &beta, // OTensor, dO)); // // checkCudaErrors(cudaMemcpy(h_SO, dO, batch_size * output_size * sizeof(float), cudaMemcpyDeviceToHost)); // // cout << "dO:\n"; // // printMatrix(h_SO, batch_size, output_size); // // cout << "waiting..\n"; // // cin >> n; // // gradient w.r.t. b2 // checkCUBLAS(cublasSgemm(cublasHandle, // CUBLAS_OP_N, CUBLAS_OP_N, // output_size, 1, batch_size, // &alpha, // dO, output_size, // onevec, batch_size, // &beta, // db2, output_size)); // // checkCudaErrors(cudaMemcpy(h_b2, db2, output_size * sizeof(float), cudaMemcpyDeviceToHost)); // // cout << "db2:\n"; // // printMatrix(h_b2, 1, output_size); // // cout << "waiting..\n"; // // cin >> n; // // checkCudaErrors(cudaMemcpy(h_b2, db2, output_size * sizeof(float), cudaMemcpyDeviceToHost)); // // printMatrix(h_b2, 1, output_size); // checkCudaErrors(cudaDeviceSynchronize()); // // gradient w.r.t. W2 // checkCUBLAS(cublasSgemm(cublasHandle, // CUBLAS_OP_N, CUBLAS_OP_T, // output_size, hidden_size, batch_size, // &alpha, // dO, output_size, // Hrelu, hidden_size, // &beta, // dW2, output_size)); // // checkCudaErrors(cudaMemcpy(h_W2, dW2, hidden_size * output_size * sizeof(float), cudaMemcpyDeviceToHost)); // // cout << "dW2:\n"; // // printMatrix(h_W2, hidden_size, output_size); // // cout << "waiting..\n"; // // cin >> n; // // gradient w.r.t. Hrelu // checkCUBLAS(cublasSgemm(cublasHandle, // CUBLAS_OP_T, CUBLAS_OP_N, // hidden_size, batch_size, output_size, // &alpha, // W2, output_size, // dO, output_size, // &beta, // dHrelu, hidden_size)); // // float *h_H = (float *)malloc(batch_size * hidden_size * sizeof(float)); // // checkCudaErrors(cudaMemcpy(h_H, dHrelu, batch_size * hidden_size * sizeof(float), cudaMemcpyDeviceToHost)); // // cout << "dHrelu:\n"; // // printMatrix(h_H, batch_size, hidden_size); // // cout << "waiting..\n"; // // cin >> n; // // gradient w.r.t H // checkCUDNN(cudnnActivationBackward(cudnnHandle, Reludesc, &alpha, HTensor, Hrelu, HTensor, dHrelu, // HTensor, H, &beta, HTensor, dH)); // // gradient w.r.t. b1 // checkCUBLAS(cublasSgemm(cublasHandle, // CUBLAS_OP_N, CUBLAS_OP_N, // hidden_size, 1, batch_size, // &alpha, // dH, hidden_size, // onevec, batch_size, // &beta, // db1, hidden_size)); // // gradient w.r.t. W1 // checkCUBLAS(cublasSgemm(cublasHandle, // CUBLAS_OP_N, CUBLAS_OP_T, // hidden_size, input_size_fc, batch_size, // &alpha, // dH, hidden_size, // conv1OA, input_size_fc, // &beta, // dW1, hidden_size)); // // gradient w.r.t. conv1OA // checkCUBLAS(cublasSgemm(cublasHandle, // CUBLAS_OP_T, CUBLAS_OP_N, // input_size_fc, batch_size, hidden_size, // &alpha, // W1, hidden_size, // dH, hidden_size, // &beta, // dconv1OA, input_size_fc)); // // gradient w.r.t conv1O // checkCUDNN(cudnnActivationBackward(cudnnHandle, Reludesc, &alpha, conv1OTensor, conv1OA, conv1OTensor, dconv1OA, // conv1OTensor, conv1O, &beta, conv1OTensor, dconv1O)); // // gradient w.r.t. conv1bias // checkCUDNN(cudnnConvolutionBackwardBias(cudnnHandle, &alpha, conv1OTensor, dconv1O, &beta, conv1bTensor, dconv1bias)); // // gradient w.r.t. conv1filter // checkCUDNN(cudnnConvolutionBackwardFilter(cudnnHandle, &alpha, inputTensor, X, conv1OTensor, dconv1O, conv1Desc, // conv1bfAlgo, workspace, workspace_bytes, &beta, conv1Tensor, dconv1filter)); // } // void updateWeights() { // float alpha = -learning_rate; // // update W1 // checkCUBLAS(cublasSaxpy(cublasHandle, input_size * hidden_size, // &alpha, // dW1, 1, // W1, 1)); // //update b1 // checkCUBLAS(cublasSaxpy(cublasHandle, hidden_size, // &alpha, // db1, 1, // b1, 1)); // // update W2 // checkCUBLAS(cublasSaxpy(cublasHandle, hidden_size * output_size, // &alpha, // dW2, 1, // W2, 1)); // //update b2 // checkCUBLAS(cublasSaxpy(cublasHandle, output_size, // &alpha, // db2, 1, // b2, 1)); // // update conv1bias // checkCUBLAS(cublasSaxpy(cublasHandle, output_feature, // &alpha, // dconv1bias, 1, // conv1bias, 1)); // // update conv1filter // checkCUBLAS(cublasSaxpy(cublasHandle, output_feature * input_feature * filter_height * filter_width, // &alpha, // dconv1filter, 1, // conv1filter, 1)); // // checkCudaErrors(cudaMemcpy(h_W1, W1, hidden_size * sizeof(float), cudaMemcpyDeviceToHost)); // // for (int i = 0; i < output_size; i++) { // // cout << h_W2[i] << ' '; // // } // // cout << endl; // // checkCudaErrors(cudaDeviceSynchronize()); // } void train(int num_iter, float *train_images, float *train_labels, float *test_images, float *test_labels, int N) { // int image_size = rows * cols * channels; for (int iter = 0; iter < num_iter; iter++) { int image_id = iter % (N / batch_size); // checkCudaErrors(cudaMemcpy(h_W1, W1, input_size_fc * hidden_size * sizeof(float), cudaMemcpyDeviceToHost)); // checkCudaErrors(cudaMemcpy(h_W2, W2, hidden_size * output_size * sizeof(float), cudaMemcpyDeviceToHost)); // checkCudaErrors(cudaMemcpy(h_b1, b1, hidden_size * sizeof(float), cudaMemcpyDeviceToHost)); // checkCudaErrors(cudaMemcpy(h_b2, b2, output_size * sizeof(float), cudaMemcpyDeviceToHost)); // checkCudaErrors(cudaMemcpy(X, &train_images[image_id * batch_size * input_size], input_size * batch_size * sizeof(float), cudaMemcpyHostToDevice)); // checkCudaErrors(cudaMemcpy(y, &train_labels[image_id * batch_size], batch_size * sizeof(float), cudaMemcpyHostToDevice)); this->forwardPropagate(); // this->backwardPropagate(); // this->updateWeights(); checkCudaErrors(cudaDeviceSynchronize()); exit(0); // checkCudaErrors(cudaMemcpy(h_W2, W2, hidden_size * output_size * sizeof(float), cudaMemcpyDeviceToHost)); // for (int i = 0; i < output_size; i++) { // cout << h_W2[i] << ' '; // } // cout << endl; checkCudaErrors(cudaDeviceSynchronize()); } } int test(float *test_images, float *test_labels, int N) { // int image_size = rows * cols * channels; int start = 0; int size = batch_size; int count = 0; while (start < N) { if (start + size >= N) size = N - start; checkCudaErrors(cudaMemcpy(X, &test_images[start * input_size], input_size * size * sizeof(float), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(y, &test_labels[start], size * sizeof(float), cudaMemcpyHostToDevice)); this->forwardPropagate(false); checkCudaErrors(cudaDeviceSynchronize()); for (int i = 0; i < size; i++) { if (h_IO[i] == test_labels[start + i]) count++; // cout << h_IO[i] << ' '; } start = start + size; } return count; } }; int main() { vector<vector<uchar> > train_images, test_images; vector<uchar> train_labels, test_labels; readMNIST(train_images, test_images, train_labels, test_labels); float *f_train_images, *f_train_labels, *f_test_images, *f_test_labels; int input_size = rows * cols * channels; f_train_images = (float *)malloc(N_train * input_size * sizeof(float)); f_train_labels = (float *)malloc(N_train * sizeof(float)); f_test_images = (float *)malloc(N_test * input_size * sizeof(float)); f_test_labels = (float *)malloc(N_test * sizeof(float)); // checkCudaErrors(cudaMallocHost((void **)&f_train_images, N_train * input_size * sizeof(float))); // checkCudaErrors(cudaMallocHost((void **)&f_train_labels, N_train * sizeof(float))); // checkCudaErrors(cudaMallocHost((void **)&f_test_images, N_test * input_size * sizeof(float))); // checkCudaErrors(cudaMallocHost((void **)&f_test_labels, N_test * sizeof(float))); float *mean_image; mean_image = (float *)malloc(input_size * sizeof(float)); for (int k = 0; k < N_train; k++) { for (int j = 0; j < rows * cols; j++) { f_train_images[k * input_size + j] = (float)train_images[k][j]; } f_train_labels[k] = (float)train_labels[k]; } for (int k = 0; k < N_test; k++) { for (int j = 0; j < rows * cols; j++) { f_test_images[k * input_size + j] = (float)test_images[k][j]; } f_test_labels[k] = (float)test_labels[k]; } for (int i = 0; i < input_size; i++) { mean_image[i] = 0; for (int k = 0; k < N_train; k++) { mean_image[i] += f_train_images[k * input_size + i]; } mean_image[i] /= N_train; } for (int i = 0; i < N_train; i++) { for (int j = 0; j < input_size; j++) f_train_images[i * input_size + j] -= mean_image[j]; } for (int i = 0; i < N_test; i++) { for (int j = 0; j < input_size; j++) f_test_images[i * input_size + j] -= mean_image[j]; } // int toy_input_size = 2; // int toy_hidden_size = 5; // int toy_output_size = 2; // int batch_size = 100; // float *toy_train, *toy_train_labels; // toy_train = (float *)malloc(batch_size * toy_input_size * sizeof(float)); // toy_train_labels = (float *)malloc(batch_size * sizeof(float)); // curandGenerator_t curandgen; // checkCURAND(curandCreateGeneratorHost(&curandgen, CURAND_RNG_PSEUDO_DEFAULT)); // printf("toy_train, before init:\n"); // printMatrix(toy_train, batch_size, toy_input_size); // checkCURAND(curandGenerateNormal(curandgen, toy_train, batch_size * toy_input_size * sizeof(float), 0, 10)); // printf("toy_train, after init:\n"); // printMatrix(toy_train, batch_size, toy_input_size); // for (int i = 0; i < batch_size; i++) { // cout << float(i % 2) << " p\n"; // toy_train_labels[i] = float(i % 2); // } // printf("toy_train_labels, after init\n"); // printMatrix(toy_train_labels, batch_size, 1); // int n; // cin >> n; // float toy_l_rate = 1e-1; // Context context(toy_input_size, batch_size, toy_hidden_size, toy_l_rate, toy_output_size); // int n_iter = 100; // int n_rep = 10; // for (int i = 0; i < n_rep; i++) { // context.train(n_iter, toy_train, toy_train_labels, toy_train, toy_train_labels, batch_size); // cout << context.test(toy_train, toy_train_labels, batch_size) << endl << flush; // } float l_rate = 1e-3; int hidden_size = 50; int batch_size = 128; int output_size = 10; Context context(input_size, batch_size, hidden_size, l_rate, output_size); int n_iter = 10000; int n_rep = 10; for (int i = 0; i < n_rep; i++) { context.train(n_iter, f_train_images, f_train_labels, f_test_images, f_test_labels, N_train); cout << context.test(f_test_images, f_test_labels, N_test) << endl; } }
09f96ba5809a0e2d49025bba07a1082c1624d603.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/Dispatch.h> #include <ATen/native/ForeachUtils.h> #include <ATen/native/hip/ForeachFunctors.cuh> namespace at { namespace native { template<template<class> class Op> std::vector<Tensor> foreach_binary_op(TensorList tensors, Scalar scalar) { std::vector<std::vector<at::Tensor>> tensor_lists; std::vector<at::Tensor> vec_res; vec_res.reserve(tensors.size()); for (const auto& t: tensors) { vec_res.emplace_back(at::native::empty_like(t)); } tensor_lists.emplace_back(tensors.vec()); tensor_lists.emplace_back(std::move(vec_res)); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kBool, kBFloat16, kHalf, tensors[0].scalar_type(), "foreach_binary_op_scalar_cuda", [&]() { using opmath_t = get_opmath_t<scalar_t>::opmath_t; multi_tensor_apply<2>(tensor_lists, BinaryOpScalarFunctor<scalar_t, /* depth */ 2, /* r_args_depth */ 1, /* res_arg_index */ 1>(), Op<opmath_t>(), scalar.to<opmath_t>()); }); return tensor_lists[1]; } template<template<class> class Op> void foreach_binary_op_(TensorList tensors, Scalar scalar) { std::vector<std::vector<at::Tensor>> tensor_lists; tensor_lists.emplace_back(tensors.vec()); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kBool, kBFloat16, kHalf, tensors[0].scalar_type(), "foreach_binary_op_scalar_cuda_", [&]() { using opmath_t = get_opmath_t<scalar_t>::opmath_t; multi_tensor_apply<1>(tensor_lists, BinaryOpScalarFunctor<scalar_t, /* depth */ 1, /* r_args_depth */ 1, /* res_arg_index */ 0>(), Op<opmath_t>(), scalar.to<opmath_t>()); }); } #define FOREACH_BINARY_OP_SCALAR(NAME, OP) \ void foreach_tensor_##NAME##_scalar_kernel_cuda_(TensorList tensors, Scalar scalar) { \ check_foreach_api_restrictions(tensors); \ if (!can_use_fast_route(tensors, scalar)) { \ return at::native::foreach_tensor_##NAME##_scalar_kernel_slow_(tensors, scalar); \ } \ \ foreach_binary_op_<OP>(tensors, scalar); \ } \ \ std::vector<Tensor> foreach_tensor_##NAME##_scalar_kernel_cuda(TensorList tensors, Scalar scalar) { \ check_foreach_api_restrictions(tensors); \ if (!can_use_fast_route(tensors, scalar)) { \ return at::native::foreach_tensor_##NAME##_scalar_kernel_slow(tensors, scalar); \ } \ \ return foreach_binary_op<OP>(tensors, scalar); \ } FOREACH_BINARY_OP_SCALAR(add, std::plus); FOREACH_BINARY_OP_SCALAR(sub, std::minus); FOREACH_BINARY_OP_SCALAR(mul, std::multiplies); FOREACH_BINARY_OP_SCALAR(div, std::divides); }} // namespace at::native
09f96ba5809a0e2d49025bba07a1082c1624d603.cu
#include <ATen/Dispatch.h> #include <ATen/native/ForeachUtils.h> #include <ATen/native/cuda/ForeachFunctors.cuh> namespace at { namespace native { template<template<class> class Op> std::vector<Tensor> foreach_binary_op(TensorList tensors, Scalar scalar) { std::vector<std::vector<at::Tensor>> tensor_lists; std::vector<at::Tensor> vec_res; vec_res.reserve(tensors.size()); for (const auto& t: tensors) { vec_res.emplace_back(at::native::empty_like(t)); } tensor_lists.emplace_back(tensors.vec()); tensor_lists.emplace_back(std::move(vec_res)); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kBool, kBFloat16, kHalf, tensors[0].scalar_type(), "foreach_binary_op_scalar_cuda", [&]() { using opmath_t = get_opmath_t<scalar_t>::opmath_t; multi_tensor_apply<2>(tensor_lists, BinaryOpScalarFunctor<scalar_t, /* depth */ 2, /* r_args_depth */ 1, /* res_arg_index */ 1>(), Op<opmath_t>(), scalar.to<opmath_t>()); }); return tensor_lists[1]; } template<template<class> class Op> void foreach_binary_op_(TensorList tensors, Scalar scalar) { std::vector<std::vector<at::Tensor>> tensor_lists; tensor_lists.emplace_back(tensors.vec()); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kBool, kBFloat16, kHalf, tensors[0].scalar_type(), "foreach_binary_op_scalar_cuda_", [&]() { using opmath_t = get_opmath_t<scalar_t>::opmath_t; multi_tensor_apply<1>(tensor_lists, BinaryOpScalarFunctor<scalar_t, /* depth */ 1, /* r_args_depth */ 1, /* res_arg_index */ 0>(), Op<opmath_t>(), scalar.to<opmath_t>()); }); } #define FOREACH_BINARY_OP_SCALAR(NAME, OP) \ void foreach_tensor_##NAME##_scalar_kernel_cuda_(TensorList tensors, Scalar scalar) { \ check_foreach_api_restrictions(tensors); \ if (!can_use_fast_route(tensors, scalar)) { \ return at::native::foreach_tensor_##NAME##_scalar_kernel_slow_(tensors, scalar); \ } \ \ foreach_binary_op_<OP>(tensors, scalar); \ } \ \ std::vector<Tensor> foreach_tensor_##NAME##_scalar_kernel_cuda(TensorList tensors, Scalar scalar) { \ check_foreach_api_restrictions(tensors); \ if (!can_use_fast_route(tensors, scalar)) { \ return at::native::foreach_tensor_##NAME##_scalar_kernel_slow(tensors, scalar); \ } \ \ return foreach_binary_op<OP>(tensors, scalar); \ } FOREACH_BINARY_OP_SCALAR(add, std::plus); FOREACH_BINARY_OP_SCALAR(sub, std::minus); FOREACH_BINARY_OP_SCALAR(mul, std::multiplies); FOREACH_BINARY_OP_SCALAR(div, std::divides); }} // namespace at::native
77705c7411924ddce3bc50e000ad5de4f5ab824d.hip
// !!! This is a file automatically generated by hipify!!! #include<iostream> #include<string> #include<cuda.h> using namespace std; int main(){ struct hipDeviceProp_t prop; hipError_t err; err = hipGetDeviceProperties(&prop,0); if(err!=hipSuccess){ cout<<"Get failed. Exiting."<<endl; } else{ cout<<"Name : "<<string(prop.name)<<endl; cout<<"Total global memory : "<<prop.totalGlobalMem/(1024*1024*1024.0)<<" GB"<<endl; cout<<"Shared memmory per block : "<<prop.sharedMemPerBlock/(1024.0)<<" KB"<<endl; cout<<"32 bit registers per block : "<<prop.regsPerBlock<<endl; cout<<"Warp size (in threads) : "<<prop.warpSize<<endl; cout<<"Max pitch allowed by mem copy : "<<prop.memPitch/(1024*1024*1024.0)<<" GB"<<endl; cout<<"Max threads per block : "<<prop.maxThreadsPerBlock<<endl; cout<<"Max thread dimensions : "<<"("<<prop.maxThreadsDim[0]<<","<<prop.maxThreadsDim[1]<<","<<prop.maxThreadsDim[2]<<")"<<endl; cout<<"Max grid dimensions : "<<"("<<prop.maxGridSize[0]<<","<<prop.maxGridSize[1]<<","<<prop.maxGridSize[2]<<")"<<endl; cout<<"Max const memory : "<<prop.totalConstMem/1024.0<<" KB"<<endl; cout<<"Major compute capability : "<<prop.major<<endl; cout<<"Minor compute capability : "<<prop.minor<<endl; cout<<"Clock frequency : "<<prop.clockRate/1000.0<<" MHz"<<endl; cout<<"Alignment requirement for textures : "<<prop.textureAlignment<<endl; cout<<"Device can concurrently copy memory and execute a kernel : "<<(bool)prop.deviceOverlap<<endl; cout<<"Number of multiprocessors on device : "<<prop.multiProcessorCount<<endl; cout<<"Specified whether there is a run time limit on kernels : "<<(bool)prop.kernelExecTimeoutEnabled<<endl; cout<<"Integrated : "<<(bool)prop.integrated<<endl; cout<<"Can map host memory : "<<(bool)prop.canMapHostMemory<<endl; cout<<"Compute Mode : "<<prop.computeMode<<endl; cout<<"Concurrent kernels : "<<(bool)prop.concurrentKernels<<endl; cout<<"ECC support : "<<(bool)prop.ECCEnabled<<endl; cout<<"PCI bus id : "<<prop.pciBusID<<endl; cout<<"PCI device id : "<<prop.pciDeviceID<<endl; cout<<"TCC Driver : "<<(bool)prop.tccDriver<<endl; } return 0; }
77705c7411924ddce3bc50e000ad5de4f5ab824d.cu
#include<iostream> #include<string> #include<cuda.h> using namespace std; int main(){ struct cudaDeviceProp prop; cudaError_t err; err = cudaGetDeviceProperties(&prop,0); if(err!=cudaSuccess){ cout<<"Get failed. Exiting."<<endl; } else{ cout<<"Name : "<<string(prop.name)<<endl; cout<<"Total global memory : "<<prop.totalGlobalMem/(1024*1024*1024.0)<<" GB"<<endl; cout<<"Shared memmory per block : "<<prop.sharedMemPerBlock/(1024.0)<<" KB"<<endl; cout<<"32 bit registers per block : "<<prop.regsPerBlock<<endl; cout<<"Warp size (in threads) : "<<prop.warpSize<<endl; cout<<"Max pitch allowed by mem copy : "<<prop.memPitch/(1024*1024*1024.0)<<" GB"<<endl; cout<<"Max threads per block : "<<prop.maxThreadsPerBlock<<endl; cout<<"Max thread dimensions : "<<"("<<prop.maxThreadsDim[0]<<","<<prop.maxThreadsDim[1]<<","<<prop.maxThreadsDim[2]<<")"<<endl; cout<<"Max grid dimensions : "<<"("<<prop.maxGridSize[0]<<","<<prop.maxGridSize[1]<<","<<prop.maxGridSize[2]<<")"<<endl; cout<<"Max const memory : "<<prop.totalConstMem/1024.0<<" KB"<<endl; cout<<"Major compute capability : "<<prop.major<<endl; cout<<"Minor compute capability : "<<prop.minor<<endl; cout<<"Clock frequency : "<<prop.clockRate/1000.0<<" MHz"<<endl; cout<<"Alignment requirement for textures : "<<prop.textureAlignment<<endl; cout<<"Device can concurrently copy memory and execute a kernel : "<<(bool)prop.deviceOverlap<<endl; cout<<"Number of multiprocessors on device : "<<prop.multiProcessorCount<<endl; cout<<"Specified whether there is a run time limit on kernels : "<<(bool)prop.kernelExecTimeoutEnabled<<endl; cout<<"Integrated : "<<(bool)prop.integrated<<endl; cout<<"Can map host memory : "<<(bool)prop.canMapHostMemory<<endl; cout<<"Compute Mode : "<<prop.computeMode<<endl; cout<<"Concurrent kernels : "<<(bool)prop.concurrentKernels<<endl; cout<<"ECC support : "<<(bool)prop.ECCEnabled<<endl; cout<<"PCI bus id : "<<prop.pciBusID<<endl; cout<<"PCI device id : "<<prop.pciDeviceID<<endl; cout<<"TCC Driver : "<<(bool)prop.tccDriver<<endl; } return 0; }
70762890667f016507e44daba56911bb24a6aaf8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * @brief * utils * * @copyright * Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey) * * @copyright * See LICENSE for clarification regarding multiple authors */ #include "k2/csrc/utils.h" namespace k2 { // See FillValues() where this is invoked. It fills a region with // a constant value. __global__ void FillValuesKernel(int32_t *data, int32_t num_values, int32_t value) { int32_t job_idx = (blockIdx.x * blockDim.x + threadIdx.x), stride = (gridDim.x * blockDim.x); for (; job_idx < num_values; job_idx += stride) data[job_idx] = value; } // This launches a kernel. It's the same as doing: // for (int32_t i = 0; i < num_values; i++) data[i] = value; __device__ void FillValues(int32_t *data, int32_t num_values, int32_t value) { int32_t block_size = 256; int32_t grid_size = NumBlocks(num_values, block_size); hipLaunchKernelGGL(( FillValuesKernel), dim3(grid_size), dim3(block_size), 0, 0, data, num_values, value); } // When we invoke this we make a big enough grid that there doesn't have to // be a loop over rows, i.e. (gridDim.x * blockDim.x) / threads_per_row >= // num_rows __global__ void RowSplitsToRowIdsKernel(int32_t num_rows, int32_t threads_per_row, const int32_t *row_splits, int32_t num_elems, int32_t *row_ids) { int32_t thread = blockIdx.x * blockDim.x + threadIdx.x, num_threads = gridDim.x * blockDim.x, row = thread / threads_per_row, thread_this_row = thread % threads_per_row; if (row >= num_rows) return; K2_CHECK_GE(num_threads / threads_per_row, num_rows); int32_t this_row_split = row_splits[row], next_row_split = row_splits[row + 1], row_length = next_row_split - this_row_split; const int32_t max_loop = 8; // `max_loop` is heuristically chosen. if (row_length / threads_per_row > max_loop) { // We decide that looping too many times will be too slow, so we launch // another kernel to fill in the value for this row. (This is CUDA dynamic // parallelism). if (thread_this_row == 0) { FillValues(row_ids + this_row_split, row_length, row); } } else { // TODO(dan): figure out how to unroll this? for (; thread_this_row < row_length; thread_this_row += threads_per_row) row_ids[this_row_split + thread_this_row] = row; } } static int32_t RoundUpToNearestPowerOfTwo(int32_t n) { K2_CHECK_GE(n, 0); n--; n |= n >> 1; n |= n >> 2; n |= n >> 4; n |= n >> 8; n |= n >> 16; return n + 1; } /* See declaration of RowSplitsToRowIds() in utils.h. These are implementation notes. Suppose the range we need to fill with a particular number (say, x) is from 1010 to 10000 inclusive (binary) The first kernel writes x to positions 1010, 1100, 10000; the significance of that sequence is we keep adding the smallest number we can add to get another zero at the end of the binary representation, until we exceed the range we're supposed to fill. The second kernel: for a given index into x that is must fill (say, 1111), it asks "is the index currently here already the right one?", which it can test using the function is_valid_index() below; if it's not already correct, it searches in a sequence of positions: 1110, 1100, 1000, 0000, like our sequence above but going downwards, again getting more zeros at the end of the binary representation, until it finds the correct value in the array at the searched position; then it copies the discovered value the original position requested (here, 1111). First kernel pseudocode: for each index 'i' into 't', it does: for (int32_t n=0, j = t[i]; j < t[i+1]; n++) { x[j] = i; if (j & (1<<n)) j += (1 << n); } Second kernel pseudocode: for each element of x, it searches for the right index. Suppose we're given num_indexes == length(n) == length(t) - 1. Define is_valid_index as follows: // returns true if j is the value that we should be putting at position 'i' in x: // that is, if t[j] <= i < t[j+1]. bool is_valid_index(i, j) { return (j >= 0 && j < num_indexes && t[j] <= i && i < t[j+1]); } // We suppose we are given i (the position into x that we're responsible for // setting: orig_i = i; for (int32_t n=0; !is_valid_index(i, x[i]); n++) { if (i & (1<<n)) i -= (1 << n); } x[orig_i] = x[i]; */ void RowSplitsToRowIds(ContextPtr &c, int32_t num_rows, const int32_t *row_splits, int32_t num_elems, int32_t *row_ids) { if (num_rows <= 0) return; DeviceType d = c->GetDeviceType(); if (d == kCpu) { int32_t cur_row_start = row_splits[0]; K2_CHECK_EQ(cur_row_start, 0); K2_CHECK_EQ(row_splits[num_rows], num_elems); for (int32_t row = 0; row < num_rows; ++row) { int32_t next_row_start = row_splits[row + 1]; for (; cur_row_start < next_row_start; ++cur_row_start) row_ids[cur_row_start] = row; } } else { K2_CHECK_EQ(d, kCuda); if (1) { // TODO: compare this for speed with the other branch. This is branch is // much simpler, and will be considerably faster for "normal" cases -> // probably preferred. int32_t avg_elems_per_row = (num_elems + num_rows - 1) / num_rows, threads_per_row = RoundUpToNearestPowerOfTwo(avg_elems_per_row), tot_threads = num_rows * threads_per_row; int32_t block_size = 256; int32_t grid_size = NumBlocks(tot_threads, block_size); hipLaunchKernelGGL(( K2_CUDA_SAFE_CALL(RowSplitsToRowIdsKernel), dim3(grid_size), dim3(block_size), 0, c->GetCudaStream(), num_rows, threads_per_row, row_splits, num_elems, row_ids)); } else { // TODO: Will probably just delete this branch at some point. // The following algorithm isn't particularly adapted to GPU hardware in // terms of coalesced reads and writes and so on, but it has reasonable // asymptotic time complexity (assuming all kernels run in parallel), // specifically: O(log(largest(row_splits[i+1]-row_splits[i]))) auto lambda_init_minus_one = [=] __host__ __device__(int32_t i) { row_ids[i] = -1; }; Eval(c, num_elems + 1, lambda_init_minus_one); auto lambda_phase_one = [=] __host__ __device__(int32_t i) { int32_t this_row_split = row_splits[i], next_row_split = (i < num_rows ? row_splits[i + 1] : this_row_split + 1); if (this_row_split < next_row_split) row_ids[this_row_split] = i; // we have to fill in row_ids[this_row_split], // row_ids[this_row_split+1]... row_ids[next_row_split-1] with the same // value but that could be a long loop. Instead we write at // this_row_split and all indexes this_row_split < i < next_row_split // such that i is the result of rounding up this_row_split to // (something)*2^n, for n = 1, 2, 3, ... this will take time logarithmic // in (next_row_split - this_row_split). we can then fill in the gaps // with a logarithmic-time loop, by looking for a value that's not (-1) // by rounding the current index down to successively higher powers // of 2. for (int32_t power = 0, j = this_row_split; j + (1 << power) < next_row_split; power++) { if (j & (1 << power)) { j += (1 << power); // we know that j is now < next_row_split, because we checked "j + // (1<<power) < next_row_split" in the loop condition. // Note, we don't want a loop-within-a-loop because of how SIMT // works... row_ids[j] = i; } } }; Eval(c, num_elems + 1, lambda_phase_one); auto lambda_phase_two = [=] __host__ __device__(int32_t j) { int32_t row_index = row_ids[j]; if (row_index != -1) return; int32_t power = 0, j2 = j; for (; row_index != -1; power++) { if (j2 & (1 << power)) { j2 -= (1 << power); row_index = row_ids[j2]; } assert(power < 31); } row_ids[j] = row_ids[j2]; }; // could do the next line for num_elems+1, but the element at `num_elems` // will already be set. Eval(c, num_elems, lambda_phase_two); } } } /* When we invoke this we make a big enough grid that there doesn't have to be a loop over elements, i.e. (gridDim.x * blockDim.x) / threads_per_elem > num_elems. (must be >=, because we imagine a phantom element at [num_elems] with the value `num_rows`.) @param [in] num_elems Number of elements in ragged matrix @param [in] threads_per_elem Number of threads we allocate per element. Must be >= 1. @param [in] row_ids The row_ids vector, of length `num_elems`; must be nonnegative and non-decreasing and all elements < num_rows. @param [in] num_rows Number of rows, must be greater than the largest (== last) element of `row_ids`. @param [out] row_splits This kernel will output a non-decreasing vector of length num_rows + 1, such that row_splits[0] == 0, row_splits[num_rows] == num_elems, and row_splits[row_ids[i]] <= i < row_splits[row_ids[i]+1] */ __global__ void RowIdsToRowSplitsKernel(int32_t num_elems, int32_t threads_per_elem, const int32_t *row_ids, int32_t num_rows, int32_t *row_splits) { int32_t thread = (blockIdx.x * blockDim.x + threadIdx.x), num_threads = gridDim.x * blockDim.x, elem = thread / threads_per_elem, thread_this_elem = thread % threads_per_elem; K2_CHECK_GE(num_threads / threads_per_elem, num_elems); if (elem > num_elems) return; int32_t this_row, prev_row; if (elem == 0) { prev_row = -1; this_row = row_ids[elem]; } else if (elem == num_elems) { prev_row = row_ids[elem - 1]; this_row = num_rows; } else { prev_row = row_ids[elem - 1]; this_row = row_ids[elem]; } // `num_splits` is the number of splits we have to write, usually 0 or 1 // but in principle unlimited as there could be empty rows. The // relationship between row_ids and row_splits is more symmetric than // you might expect. int32_t num_splits = this_row - prev_row; const int32_t max_loop = 8; // `max_loop` is heuristically chosen. if (num_splits / threads_per_elem > max_loop) { if (thread_this_elem == 0) { FillValues(row_splits + prev_row + 1, num_splits, elem); } } else { // TODO(dan): figure out how to unroll this? for (; thread_this_elem < num_splits; thread_this_elem += threads_per_elem) row_splits[prev_row + 1 + thread_this_elem] = elem; } } // see declaration in utils.h for documentation. void RowIdsToRowSplits(ContextPtr &c, int32_t num_elems, const int32_t *row_ids, bool no_empty_rows, int32_t num_rows, int32_t *row_splits) { // process corner case first if (num_elems == 0) { auto lambda_set_values = [=] __host__ __device__(int32_t i) { row_splits[i] = 0; }; Eval(c, num_rows + 1, lambda_set_values); return; } DeviceType d = c->GetDeviceType(); if (d == kCpu) { int32_t cur_row = -1; for (int32_t i = 0; i < num_elems; i++) { int32_t row = row_ids[i]; K2_CHECK_GE(row, cur_row); while (cur_row < row) { cur_row++; row_splits[cur_row] = i; } } // cur_row must be >= 0 here as num_elems > 0 K2_CHECK_GE(cur_row, 0); while (cur_row < num_rows) { row_splits[++cur_row] = num_elems; } } else { K2_CHECK_EQ(d, kCuda); if (no_empty_rows) { auto lambda_simple = [=] __host__ __device__(int32_t i) { int32_t this_row = row_ids[i], prev_row; if (i > 0) { // (normal case) prev_row = row_ids[i - 1]; } else { // i == 0 row_splits[num_rows] = num_elems; prev_row = -1; } K2_CHECK_LE(this_row, prev_row + 1); // no_empty_rows was asserted by // the user if (this_row > prev_row) { row_splits[this_row] = i; } }; Eval(c, num_elems, lambda_simple); return; } else { // By doing "+ 2" instead of "+ 1" we increase the minimum number of // threads-per-row, which may reduce latency when there are successive // empty rows. Any value >= 1 is correct though. int32_t avg_rows_per_elem = num_rows / num_elems + 2, threads_per_elem = RoundUpToNearestPowerOfTwo(avg_rows_per_elem), tot_threads = num_elems * threads_per_elem; int32_t block_size = 256; int32_t grid_size = NumBlocks(tot_threads, block_size); hipLaunchKernelGGL(( K2_CUDA_SAFE_CALL(RowIdsToRowSplitsKernel), dim3(grid_size), dim3(block_size), 0, c->GetCudaStream(), num_elems, threads_per_elem, row_ids, num_rows, row_splits)); } } } /* Called inside GetTaskRedirect(); see documentation of that in header. Each task with 0 <= task < num_tasks gets allocated `threads_per_job` threads, e.g. threads_per_job = 4 or 16. It's a kind of n-ary search (generalization of binary search) where each branch is handled by a different thread so they can happen in parallel. TODO(dan): there are a lot of opportunities to further optimize this using GPU hardware tricks. The thread-block size this is called with must be jobs_per_block * threads_per_job. */ /* template <int32_t jobs_per_block, int32_t threads_per_job> __global__ void GetTaskRedirect(int32_t num_tasks, const int32_t *row_splits, TaskRedirect *redirect_out) { __shared__ int32_t temp[tasks_per_block]; // we do __syncwarp() for synchronization below; we require threads_per_job <= // 32 for this reason. static_assert(threads_per_job >= 2 && threads_per_job <= 32); // We have work to do for 0 <= job_idx < num_tasks, but be careful: job_idx // may be >= num_tasks if num_tasks is small or not a power of two (we don't // return because we need to do __syncwarp()). So we have to avoid out of // bounds memory access. int32_t job_idx = (blockIdx.x * blockDim.x + threadIdx.x) / threads_per_job; // `branch_idx` is which member we are of the group of the `threads_per_job` threads for this job. int32_t branch_idx = threadIdx.x % threads_per_job; // we assume blockDim.x % threads_per_job == 0 // `temp_idx` is which index in the temporary storage `temp` we are assigned // (one per job). int32_t temp_idx = threadIdx.x / threads_per_job; // TODO: we may at some point decide that row_splits[0] has to be zero. int32_t row_splits0 = row_splits[0], row_splits_nt = row_splits[num_tasks], num_items = row_splits_nt - row_splits0; if (num_items <= 0) { assert(num_items == 0); // This is a special case where there is no work to do; we give a trivial // assignment of tasks to jobs and return static_assert(threads_per_job >= 2); if (branch_idx < 2 && job_idx < num_tasks) { TaskRedirect tr { job_idx, 2, branch_idx }; redirect_out[job_idx + branch_idx * num_tasks] = tr; } return; } else if (branch_idx == 0 && job_idx < num_tasks) { // This code writes to the jobs in the first half of the output array, // that are allocated to the same-numbered task. int32_t task_idx = job_idx, this_row_split = row_splits[task_idx], next_row_split = row_splits[task_idx + 1]; // `num_jobs` below is the number of jobs that will be active for // this task. (The "1 +".. is the job that we assign for each // task, one job per task, in the "first half" of the jobs). // the job_idx we're working out below is the job_idx for the // "second half" of int32_t num_jobs_this_task = 1 + (next_row_split/dart_separation - this_row_split/dart_separation); TaskRedirect tr { task_idx, num_jobs_this_task, 0 }; redirect_out[task_idx] = tr; } // Now we have the less-trivial task of assigning the jobs in the 2nd half of the // output array to tasks (these are allocated roughly proportional to the amount // of work to do for that task). // We do the selection by throwing darts at a dart-board, evenly spaced, and seeing which task they correspond // to. There are `num_tasks` darts). // Note: we know dart_location < row_splits_nt because job_idx < num_tasks and // because integer division rounds down. int32_t dart_separation = num_items / num_tasks, dart_location = row_splits0 + job_idx * dart_separation; // OK, from this point the goal is to find a task_idx such that // row_splits[task_idx] <= dart_location < row_splits[task_idx + 1]. // This is guaranteed to exist, as long as job_id < num_tasks. // As long as job_id < num_tasks, we maintain the property that // row_splits[lower_bound] <= dart_location && // (upper_bound > num_tasks || row_splits[upper_bound] > dart_location). // (where upper_bound == lower_bound + range), i.e. they are truly // lower and upper bounds int32_t lower_bound = 0, range = num_tasks; // we are responsible for items lower_bound through // (upper_bound = lower_bound + range) - 1. while (range > threads_per_job) { int32_t upper_bound = lower_bound + range; // We need to narrow the range of `task_idx` that might be the correct one. // We round *up* because we require that task_idx_step * threads_per_job >= // range, so that we cover the entire range. int32_t task_idx_step = (range + threads_per_job - 1) / threads_per_job, // >= 2 my_lower_task_idx = lower_bound + branch_idx * task_idx_step, my_upper_task_idx = my_lower_task_idx + task_idx_step; // The following avoids out-of-bounds memory accesses. if (my_upper_task_idx > upper_bound) my_upper_task_idx = upper_bound; // TODO (dan): it may be possible to use one of those special within-warp // commands involving bitmaps to make the second comparison (dart_location < // row_splits[my_upper_task_idx]) unnecessary. if (my_lower_task_idx < num_tasks && row_splits[my_lower_task_idx] <= dart_location && dart_location < row_splits[my_upper_task_idx]) { // I am the "chosen branch" (exactly one will be chosen, as long as // job_idx < num_tasks). temp[temp_idx] = branch_idx; } __syncwarp(); int32_t chosen_branch_idx = temp[temp_idx]; lower_bound = lower_bound + chosen_branch_idx * task_idx_step; upper_bound = lower_bound + task_idx_step; range = task_idx_step; // note, we don't limit upper_bound to be <= num_tasks because we need all // threads in the block to go around the while loop the same number of // times. Therefore it's possible that upper_bound > num_tasks. K2_DASSERT(job_idx >= num_tasks || (row_splits[lower_bound] <= dart_location && (upper_bound > num_tasks || row_splits[upper_bound] > dart_location))); // TODO: remove once debugged. } int32_t task_idx = lower_bound + branch_idx; // TODO (dan): it may be possible to use one of those special within-warp // commands involving bitmaps to make the second comparison (dart_location < // row_splits[my_upper_task_idx]) unnecessary. // // The check `task_idx < num_tasks` is to avoid out-of-bounds access of row_splits. // The check `job_idx < num_tasks` is to avoid out-of-bounds access of `redirect_out`; // for these out-of-range job_idx values, it's possible for task_idx to have // any value since it may be uninitialized memory. if (task_idx < num_tasks && job_idx < num_tasks) { int32_t this_row_split = row_splits[task_idx], next_row_split = row_splits[task_idx + 1]; if (this_row_split <= dart_location && dart_location < next_row_split) { // OK, exactly one branch per job will reach this point. `num_jobs` below // is the number of jobs that will be active for this task. (The "1 // +".. is the job that we assign for each task, one job per task, in the // "first half" of the jobs). The job_id_this_task we're working out // below is the job_id within the second half of the TaskRedirects, // the half that are allocated by throwing darts. int32_t num_jobs_this_task = 1 + (next_row_split/dart_separation - this_row_split/dart_separation), job_idx_this_task = 1 + (dart_location - this_row_split)/dart_separation; K2_CHECK(job_id_this_task < num_jobs_this_task); TaskRedirect tr { task_idx, num_jobs_this_task, job_idx_this_task }; redirect_out[num_tasks + job_idx] = tr; } } } */ /* This is a quite simple implementation of GetTaskRedirect... I had a more complicated one above that had better O(N) performance for hard cases, but this one will handle more normal/smaller cases better, plus is easier to debug. The basic idea is to throw lots of threads at it, i.e. threads_per_task should be, say, twice larger than the average / expected number of jobs per task, so that if a task has lots of jobs it doesn't have to loop too many times. */ template <int32_t threads_per_task> __global__ void GetTaskRedirect(int32_t num_tasks, const int32_t *row_splits, TaskRedirect *redirect_out) { int32_t task_idx = (blockIdx.x * blockDim.x + threadIdx.x) / threads_per_task; if (task_idx > num_tasks) return; // `thread_idx` is which member we are of the group of the `threads_per_job` // threads for this job. int32_t thread_idx = threadIdx.x % threads_per_task; // we assume blockDim.x % threads_per_job == 0 // `temp_idx` is which index in the temporary storage `temp` we are assigned // (one per job). int32_t row_splits0 = row_splits[0], row_splits_nt = row_splits[num_tasks], num_items = row_splits_nt - row_splits0; // the 'num_items' is the // total amount of work to // do, that we want to // distribute fairly evenly. // The idea with `dart_separation` is this: Half of the jobs we allocate to // the corresponding tasks. The other half we allocate by throwing darts onto // the interval [0, num_items - 1], evenly spaced starting from 0, and seeing // which tasks they land in. This is somewhat random but it ensures that if // any task has a very large amount of work to do, it will get a roughly // proportionate number of jobs. int32_t dart_separation = num_items / num_tasks; if (num_items <= 0) { K2_DCHECK_EQ(num_items, 0); // This is a special case where there is no work to do; we give a trivial // assignment of tasks to jobs and return static_assert(threads_per_task >= 2, "threads per task must >= 2"); if (thread_idx < 2 && task_idx < num_tasks) { TaskRedirect tr{task_idx, 2, static_cast<uint16_t>(thread_idx)}; redirect_out[task_idx + thread_idx * num_tasks] = tr; } return; } // TODO(dan): IDK how well the hardware combines these memory requests; could // consider loading to shared memory first. int32_t this_row_split = row_splits[task_idx], next_row_split = row_splits[task_idx + 1]; // `num_jobs` below is the number of jobs that will be active for // this task. (The "1 +".. is the job that we assign for each // task, one job per task, in the "first half" of the jobs). // the job_idx we're working out below is the job_idx for the // "second half" of int32_t num_jobs_this_task = 1 + (next_row_split / dart_separation - this_row_split / dart_separation); K2_CHECK_EQ(static_cast<int32_t>(static_cast<uint16_t>(num_jobs_this_task)), num_jobs_this_task); for (int32_t job_id_this_task = thread_idx; job_id_this_task < num_jobs_this_task; job_id_this_task += threads_per_task) { int32_t job_idx = (job_id_this_task == 0 ? task_idx : // 1st half num_tasks + (this_row_split / dart_separation) + job_id_this_task); // 2nd half. redirect_out[job_idx] = TaskRedirect{task_idx, static_cast<uint16_t>(num_jobs_this_task), static_cast<uint16_t>(job_id_this_task)}; // `job` is the job-index within this task, i.e. the } } void GetTaskRedirect(hipStream_t stream, int32_t num_tasks, const int32_t *row_splits, TaskRedirect *redirect_out) { if (stream == kCudaStreamInvalid) { // there's not much point in using this on CPU as there are better ways // to do things (sequentially), but this can be useful for debugging. for (int32_t task = 0; task < num_tasks; task++) { // The idea with `dart_separation` is this: Half of the jobs we allocate // to the corresponding tasks. The other half we allocate by throwing // darts onto the interval [0, num_items - 1], evenly spaced starting from // 0, and seeing which tasks they land in. This is somewhat random but it // ensures that if any task has a very large amount of work to do, it will // get a roughly proportionate number of jobs. int32_t row_splits0 = row_splits[0], row_splits_nt = row_splits[num_tasks], num_items = row_splits_nt - row_splits0, dart_separation = num_items / num_tasks; int32_t this_row_split = row_splits[task], next_row_split = row_splits[task + 1]; int32_t num_jobs_this_task = 1 + (next_row_split / dart_separation - this_row_split / dart_separation); K2_CHECK_EQ( static_cast<int32_t>(static_cast<uint16_t>(num_jobs_this_task)), num_jobs_this_task); for (int32_t job_id_this_task = 0; job_id_this_task < num_jobs_this_task; job_id_this_task++) { int32_t job_idx = (job_id_this_task == 0 ? task : // 1st half num_tasks + (this_row_split / dart_separation) + job_id_this_task); // 2nd half. redirect_out[job_idx] = TaskRedirect{task, static_cast<uint16_t>(num_jobs_this_task), static_cast<uint16_t>(job_id_this_task)}; // `job` is the job-index within this task, i.e. the } } } else { // compare 8 to 2, which is the expected number of jobs per task. having 8 // substantially greater than 2 gives a fairly big safety factor. However // this is still far from ideal in scenarios where the number of tasks might // be highly unbalanced. const int32_t threads_per_task = 8, tot_threads = threads_per_task * num_tasks; int32_t block_size = 256; int32_t grid_size = NumBlocks(tot_threads, block_size); hipLaunchKernelGGL(( K2_CUDA_SAFE_CALL(GetTaskRedirect<threads_per_task>) , dim3(block_size), dim3(grid_size), 0, stream, num_tasks, row_splits, redirect_out)); } } void GetTaskRedirect(ContextPtr &c, int32_t num_tasks, const int32_t *row_splits, TaskRedirect *redirect_out) { // TODO } } // namespace k2
70762890667f016507e44daba56911bb24a6aaf8.cu
/** * @brief * utils * * @copyright * Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey) * * @copyright * See LICENSE for clarification regarding multiple authors */ #include "k2/csrc/utils.h" namespace k2 { // See FillValues() where this is invoked. It fills a region with // a constant value. __global__ void FillValuesKernel(int32_t *data, int32_t num_values, int32_t value) { int32_t job_idx = (blockIdx.x * blockDim.x + threadIdx.x), stride = (gridDim.x * blockDim.x); for (; job_idx < num_values; job_idx += stride) data[job_idx] = value; } // This launches a kernel. It's the same as doing: // for (int32_t i = 0; i < num_values; i++) data[i] = value; __device__ void FillValues(int32_t *data, int32_t num_values, int32_t value) { int32_t block_size = 256; int32_t grid_size = NumBlocks(num_values, block_size); FillValuesKernel<<<grid_size, block_size>>>(data, num_values, value); } // When we invoke this we make a big enough grid that there doesn't have to // be a loop over rows, i.e. (gridDim.x * blockDim.x) / threads_per_row >= // num_rows __global__ void RowSplitsToRowIdsKernel(int32_t num_rows, int32_t threads_per_row, const int32_t *row_splits, int32_t num_elems, int32_t *row_ids) { int32_t thread = blockIdx.x * blockDim.x + threadIdx.x, num_threads = gridDim.x * blockDim.x, row = thread / threads_per_row, thread_this_row = thread % threads_per_row; if (row >= num_rows) return; K2_CHECK_GE(num_threads / threads_per_row, num_rows); int32_t this_row_split = row_splits[row], next_row_split = row_splits[row + 1], row_length = next_row_split - this_row_split; const int32_t max_loop = 8; // `max_loop` is heuristically chosen. if (row_length / threads_per_row > max_loop) { // We decide that looping too many times will be too slow, so we launch // another kernel to fill in the value for this row. (This is CUDA dynamic // parallelism). if (thread_this_row == 0) { FillValues(row_ids + this_row_split, row_length, row); } } else { // TODO(dan): figure out how to unroll this? for (; thread_this_row < row_length; thread_this_row += threads_per_row) row_ids[this_row_split + thread_this_row] = row; } } static int32_t RoundUpToNearestPowerOfTwo(int32_t n) { K2_CHECK_GE(n, 0); n--; n |= n >> 1; n |= n >> 2; n |= n >> 4; n |= n >> 8; n |= n >> 16; return n + 1; } /* See declaration of RowSplitsToRowIds() in utils.h. These are implementation notes. Suppose the range we need to fill with a particular number (say, x) is from 1010 to 10000 inclusive (binary) The first kernel writes x to positions 1010, 1100, 10000; the significance of that sequence is we keep adding the smallest number we can add to get another zero at the end of the binary representation, until we exceed the range we're supposed to fill. The second kernel: for a given index into x that is must fill (say, 1111), it asks "is the index currently here already the right one?", which it can test using the function is_valid_index() below; if it's not already correct, it searches in a sequence of positions: 1110, 1100, 1000, 0000, like our sequence above but going downwards, again getting more zeros at the end of the binary representation, until it finds the correct value in the array at the searched position; then it copies the discovered value the original position requested (here, 1111). First kernel pseudocode: for each index 'i' into 't', it does: for (int32_t n=0, j = t[i]; j < t[i+1]; n++) { x[j] = i; if (j & (1<<n)) j += (1 << n); } Second kernel pseudocode: for each element of x, it searches for the right index. Suppose we're given num_indexes == length(n) == length(t) - 1. Define is_valid_index as follows: // returns true if j is the value that we should be putting at position 'i' in x: // that is, if t[j] <= i < t[j+1]. bool is_valid_index(i, j) { return (j >= 0 && j < num_indexes && t[j] <= i && i < t[j+1]); } // We suppose we are given i (the position into x that we're responsible for // setting: orig_i = i; for (int32_t n=0; !is_valid_index(i, x[i]); n++) { if (i & (1<<n)) i -= (1 << n); } x[orig_i] = x[i]; */ void RowSplitsToRowIds(ContextPtr &c, int32_t num_rows, const int32_t *row_splits, int32_t num_elems, int32_t *row_ids) { if (num_rows <= 0) return; DeviceType d = c->GetDeviceType(); if (d == kCpu) { int32_t cur_row_start = row_splits[0]; K2_CHECK_EQ(cur_row_start, 0); K2_CHECK_EQ(row_splits[num_rows], num_elems); for (int32_t row = 0; row < num_rows; ++row) { int32_t next_row_start = row_splits[row + 1]; for (; cur_row_start < next_row_start; ++cur_row_start) row_ids[cur_row_start] = row; } } else { K2_CHECK_EQ(d, kCuda); if (1) { // TODO: compare this for speed with the other branch. This is branch is // much simpler, and will be considerably faster for "normal" cases -> // probably preferred. int32_t avg_elems_per_row = (num_elems + num_rows - 1) / num_rows, threads_per_row = RoundUpToNearestPowerOfTwo(avg_elems_per_row), tot_threads = num_rows * threads_per_row; int32_t block_size = 256; int32_t grid_size = NumBlocks(tot_threads, block_size); K2_CUDA_SAFE_CALL(RowSplitsToRowIdsKernel<<<grid_size, block_size, 0, c->GetCudaStream()>>>( num_rows, threads_per_row, row_splits, num_elems, row_ids)); } else { // TODO: Will probably just delete this branch at some point. // The following algorithm isn't particularly adapted to GPU hardware in // terms of coalesced reads and writes and so on, but it has reasonable // asymptotic time complexity (assuming all kernels run in parallel), // specifically: O(log(largest(row_splits[i+1]-row_splits[i]))) auto lambda_init_minus_one = [=] __host__ __device__(int32_t i) { row_ids[i] = -1; }; Eval(c, num_elems + 1, lambda_init_minus_one); auto lambda_phase_one = [=] __host__ __device__(int32_t i) { int32_t this_row_split = row_splits[i], next_row_split = (i < num_rows ? row_splits[i + 1] : this_row_split + 1); if (this_row_split < next_row_split) row_ids[this_row_split] = i; // we have to fill in row_ids[this_row_split], // row_ids[this_row_split+1]... row_ids[next_row_split-1] with the same // value but that could be a long loop. Instead we write at // this_row_split and all indexes this_row_split < i < next_row_split // such that i is the result of rounding up this_row_split to // (something)*2^n, for n = 1, 2, 3, ... this will take time logarithmic // in (next_row_split - this_row_split). we can then fill in the gaps // with a logarithmic-time loop, by looking for a value that's not (-1) // by rounding the current index down to successively higher powers // of 2. for (int32_t power = 0, j = this_row_split; j + (1 << power) < next_row_split; power++) { if (j & (1 << power)) { j += (1 << power); // we know that j is now < next_row_split, because we checked "j + // (1<<power) < next_row_split" in the loop condition. // Note, we don't want a loop-within-a-loop because of how SIMT // works... row_ids[j] = i; } } }; Eval(c, num_elems + 1, lambda_phase_one); auto lambda_phase_two = [=] __host__ __device__(int32_t j) { int32_t row_index = row_ids[j]; if (row_index != -1) return; int32_t power = 0, j2 = j; for (; row_index != -1; power++) { if (j2 & (1 << power)) { j2 -= (1 << power); row_index = row_ids[j2]; } assert(power < 31); } row_ids[j] = row_ids[j2]; }; // could do the next line for num_elems+1, but the element at `num_elems` // will already be set. Eval(c, num_elems, lambda_phase_two); } } } /* When we invoke this we make a big enough grid that there doesn't have to be a loop over elements, i.e. (gridDim.x * blockDim.x) / threads_per_elem > num_elems. (must be >=, because we imagine a phantom element at [num_elems] with the value `num_rows`.) @param [in] num_elems Number of elements in ragged matrix @param [in] threads_per_elem Number of threads we allocate per element. Must be >= 1. @param [in] row_ids The row_ids vector, of length `num_elems`; must be nonnegative and non-decreasing and all elements < num_rows. @param [in] num_rows Number of rows, must be greater than the largest (== last) element of `row_ids`. @param [out] row_splits This kernel will output a non-decreasing vector of length num_rows + 1, such that row_splits[0] == 0, row_splits[num_rows] == num_elems, and row_splits[row_ids[i]] <= i < row_splits[row_ids[i]+1] */ __global__ void RowIdsToRowSplitsKernel(int32_t num_elems, int32_t threads_per_elem, const int32_t *row_ids, int32_t num_rows, int32_t *row_splits) { int32_t thread = (blockIdx.x * blockDim.x + threadIdx.x), num_threads = gridDim.x * blockDim.x, elem = thread / threads_per_elem, thread_this_elem = thread % threads_per_elem; K2_CHECK_GE(num_threads / threads_per_elem, num_elems); if (elem > num_elems) return; int32_t this_row, prev_row; if (elem == 0) { prev_row = -1; this_row = row_ids[elem]; } else if (elem == num_elems) { prev_row = row_ids[elem - 1]; this_row = num_rows; } else { prev_row = row_ids[elem - 1]; this_row = row_ids[elem]; } // `num_splits` is the number of splits we have to write, usually 0 or 1 // but in principle unlimited as there could be empty rows. The // relationship between row_ids and row_splits is more symmetric than // you might expect. int32_t num_splits = this_row - prev_row; const int32_t max_loop = 8; // `max_loop` is heuristically chosen. if (num_splits / threads_per_elem > max_loop) { if (thread_this_elem == 0) { FillValues(row_splits + prev_row + 1, num_splits, elem); } } else { // TODO(dan): figure out how to unroll this? for (; thread_this_elem < num_splits; thread_this_elem += threads_per_elem) row_splits[prev_row + 1 + thread_this_elem] = elem; } } // see declaration in utils.h for documentation. void RowIdsToRowSplits(ContextPtr &c, int32_t num_elems, const int32_t *row_ids, bool no_empty_rows, int32_t num_rows, int32_t *row_splits) { // process corner case first if (num_elems == 0) { auto lambda_set_values = [=] __host__ __device__(int32_t i) { row_splits[i] = 0; }; Eval(c, num_rows + 1, lambda_set_values); return; } DeviceType d = c->GetDeviceType(); if (d == kCpu) { int32_t cur_row = -1; for (int32_t i = 0; i < num_elems; i++) { int32_t row = row_ids[i]; K2_CHECK_GE(row, cur_row); while (cur_row < row) { cur_row++; row_splits[cur_row] = i; } } // cur_row must be >= 0 here as num_elems > 0 K2_CHECK_GE(cur_row, 0); while (cur_row < num_rows) { row_splits[++cur_row] = num_elems; } } else { K2_CHECK_EQ(d, kCuda); if (no_empty_rows) { auto lambda_simple = [=] __host__ __device__(int32_t i) { int32_t this_row = row_ids[i], prev_row; if (i > 0) { // (normal case) prev_row = row_ids[i - 1]; } else { // i == 0 row_splits[num_rows] = num_elems; prev_row = -1; } K2_CHECK_LE(this_row, prev_row + 1); // no_empty_rows was asserted by // the user if (this_row > prev_row) { row_splits[this_row] = i; } }; Eval(c, num_elems, lambda_simple); return; } else { // By doing "+ 2" instead of "+ 1" we increase the minimum number of // threads-per-row, which may reduce latency when there are successive // empty rows. Any value >= 1 is correct though. int32_t avg_rows_per_elem = num_rows / num_elems + 2, threads_per_elem = RoundUpToNearestPowerOfTwo(avg_rows_per_elem), tot_threads = num_elems * threads_per_elem; int32_t block_size = 256; int32_t grid_size = NumBlocks(tot_threads, block_size); K2_CUDA_SAFE_CALL(RowIdsToRowSplitsKernel<<<grid_size, block_size, 0, c->GetCudaStream()>>>( num_elems, threads_per_elem, row_ids, num_rows, row_splits)); } } } /* Called inside GetTaskRedirect(); see documentation of that in header. Each task with 0 <= task < num_tasks gets allocated `threads_per_job` threads, e.g. threads_per_job = 4 or 16. It's a kind of n-ary search (generalization of binary search) where each branch is handled by a different thread so they can happen in parallel. TODO(dan): there are a lot of opportunities to further optimize this using GPU hardware tricks. The thread-block size this is called with must be jobs_per_block * threads_per_job. */ /* template <int32_t jobs_per_block, int32_t threads_per_job> __global__ void GetTaskRedirect(int32_t num_tasks, const int32_t *row_splits, TaskRedirect *redirect_out) { __shared__ int32_t temp[tasks_per_block]; // we do __syncwarp() for synchronization below; we require threads_per_job <= // 32 for this reason. static_assert(threads_per_job >= 2 && threads_per_job <= 32); // We have work to do for 0 <= job_idx < num_tasks, but be careful: job_idx // may be >= num_tasks if num_tasks is small or not a power of two (we don't // return because we need to do __syncwarp()). So we have to avoid out of // bounds memory access. int32_t job_idx = (blockIdx.x * blockDim.x + threadIdx.x) / threads_per_job; // `branch_idx` is which member we are of the group of the `threads_per_job` threads for this job. int32_t branch_idx = threadIdx.x % threads_per_job; // we assume blockDim.x % threads_per_job == 0 // `temp_idx` is which index in the temporary storage `temp` we are assigned // (one per job). int32_t temp_idx = threadIdx.x / threads_per_job; // TODO: we may at some point decide that row_splits[0] has to be zero. int32_t row_splits0 = row_splits[0], row_splits_nt = row_splits[num_tasks], num_items = row_splits_nt - row_splits0; if (num_items <= 0) { assert(num_items == 0); // This is a special case where there is no work to do; we give a trivial // assignment of tasks to jobs and return static_assert(threads_per_job >= 2); if (branch_idx < 2 && job_idx < num_tasks) { TaskRedirect tr { job_idx, 2, branch_idx }; redirect_out[job_idx + branch_idx * num_tasks] = tr; } return; } else if (branch_idx == 0 && job_idx < num_tasks) { // This code writes to the jobs in the first half of the output array, // that are allocated to the same-numbered task. int32_t task_idx = job_idx, this_row_split = row_splits[task_idx], next_row_split = row_splits[task_idx + 1]; // `num_jobs` below is the number of jobs that will be active for // this task. (The "1 +".. is the job that we assign for each // task, one job per task, in the "first half" of the jobs). // the job_idx we're working out below is the job_idx for the // "second half" of int32_t num_jobs_this_task = 1 + (next_row_split/dart_separation - this_row_split/dart_separation); TaskRedirect tr { task_idx, num_jobs_this_task, 0 }; redirect_out[task_idx] = tr; } // Now we have the less-trivial task of assigning the jobs in the 2nd half of the // output array to tasks (these are allocated roughly proportional to the amount // of work to do for that task). // We do the selection by throwing darts at a dart-board, evenly spaced, and seeing which task they correspond // to. There are `num_tasks` darts). // Note: we know dart_location < row_splits_nt because job_idx < num_tasks and // because integer division rounds down. int32_t dart_separation = num_items / num_tasks, dart_location = row_splits0 + job_idx * dart_separation; // OK, from this point the goal is to find a task_idx such that // row_splits[task_idx] <= dart_location < row_splits[task_idx + 1]. // This is guaranteed to exist, as long as job_id < num_tasks. // As long as job_id < num_tasks, we maintain the property that // row_splits[lower_bound] <= dart_location && // (upper_bound > num_tasks || row_splits[upper_bound] > dart_location). // (where upper_bound == lower_bound + range), i.e. they are truly // lower and upper bounds int32_t lower_bound = 0, range = num_tasks; // we are responsible for items lower_bound through // (upper_bound = lower_bound + range) - 1. while (range > threads_per_job) { int32_t upper_bound = lower_bound + range; // We need to narrow the range of `task_idx` that might be the correct one. // We round *up* because we require that task_idx_step * threads_per_job >= // range, so that we cover the entire range. int32_t task_idx_step = (range + threads_per_job - 1) / threads_per_job, // >= 2 my_lower_task_idx = lower_bound + branch_idx * task_idx_step, my_upper_task_idx = my_lower_task_idx + task_idx_step; // The following avoids out-of-bounds memory accesses. if (my_upper_task_idx > upper_bound) my_upper_task_idx = upper_bound; // TODO (dan): it may be possible to use one of those special within-warp // commands involving bitmaps to make the second comparison (dart_location < // row_splits[my_upper_task_idx]) unnecessary. if (my_lower_task_idx < num_tasks && row_splits[my_lower_task_idx] <= dart_location && dart_location < row_splits[my_upper_task_idx]) { // I am the "chosen branch" (exactly one will be chosen, as long as // job_idx < num_tasks). temp[temp_idx] = branch_idx; } __syncwarp(); int32_t chosen_branch_idx = temp[temp_idx]; lower_bound = lower_bound + chosen_branch_idx * task_idx_step; upper_bound = lower_bound + task_idx_step; range = task_idx_step; // note, we don't limit upper_bound to be <= num_tasks because we need all // threads in the block to go around the while loop the same number of // times. Therefore it's possible that upper_bound > num_tasks. K2_DASSERT(job_idx >= num_tasks || (row_splits[lower_bound] <= dart_location && (upper_bound > num_tasks || row_splits[upper_bound] > dart_location))); // TODO: remove once debugged. } int32_t task_idx = lower_bound + branch_idx; // TODO (dan): it may be possible to use one of those special within-warp // commands involving bitmaps to make the second comparison (dart_location < // row_splits[my_upper_task_idx]) unnecessary. // // The check `task_idx < num_tasks` is to avoid out-of-bounds access of row_splits. // The check `job_idx < num_tasks` is to avoid out-of-bounds access of `redirect_out`; // for these out-of-range job_idx values, it's possible for task_idx to have // any value since it may be uninitialized memory. if (task_idx < num_tasks && job_idx < num_tasks) { int32_t this_row_split = row_splits[task_idx], next_row_split = row_splits[task_idx + 1]; if (this_row_split <= dart_location && dart_location < next_row_split) { // OK, exactly one branch per job will reach this point. `num_jobs` below // is the number of jobs that will be active for this task. (The "1 // +".. is the job that we assign for each task, one job per task, in the // "first half" of the jobs). The job_id_this_task we're working out // below is the job_id within the second half of the TaskRedirects, // the half that are allocated by throwing darts. int32_t num_jobs_this_task = 1 + (next_row_split/dart_separation - this_row_split/dart_separation), job_idx_this_task = 1 + (dart_location - this_row_split)/dart_separation; K2_CHECK(job_id_this_task < num_jobs_this_task); TaskRedirect tr { task_idx, num_jobs_this_task, job_idx_this_task }; redirect_out[num_tasks + job_idx] = tr; } } } */ /* This is a quite simple implementation of GetTaskRedirect... I had a more complicated one above that had better O(N) performance for hard cases, but this one will handle more normal/smaller cases better, plus is easier to debug. The basic idea is to throw lots of threads at it, i.e. threads_per_task should be, say, twice larger than the average / expected number of jobs per task, so that if a task has lots of jobs it doesn't have to loop too many times. */ template <int32_t threads_per_task> __global__ void GetTaskRedirect(int32_t num_tasks, const int32_t *row_splits, TaskRedirect *redirect_out) { int32_t task_idx = (blockIdx.x * blockDim.x + threadIdx.x) / threads_per_task; if (task_idx > num_tasks) return; // `thread_idx` is which member we are of the group of the `threads_per_job` // threads for this job. int32_t thread_idx = threadIdx.x % threads_per_task; // we assume blockDim.x % threads_per_job == 0 // `temp_idx` is which index in the temporary storage `temp` we are assigned // (one per job). int32_t row_splits0 = row_splits[0], row_splits_nt = row_splits[num_tasks], num_items = row_splits_nt - row_splits0; // the 'num_items' is the // total amount of work to // do, that we want to // distribute fairly evenly. // The idea with `dart_separation` is this: Half of the jobs we allocate to // the corresponding tasks. The other half we allocate by throwing darts onto // the interval [0, num_items - 1], evenly spaced starting from 0, and seeing // which tasks they land in. This is somewhat random but it ensures that if // any task has a very large amount of work to do, it will get a roughly // proportionate number of jobs. int32_t dart_separation = num_items / num_tasks; if (num_items <= 0) { K2_DCHECK_EQ(num_items, 0); // This is a special case where there is no work to do; we give a trivial // assignment of tasks to jobs and return static_assert(threads_per_task >= 2, "threads per task must >= 2"); if (thread_idx < 2 && task_idx < num_tasks) { TaskRedirect tr{task_idx, 2, static_cast<uint16_t>(thread_idx)}; redirect_out[task_idx + thread_idx * num_tasks] = tr; } return; } // TODO(dan): IDK how well the hardware combines these memory requests; could // consider loading to shared memory first. int32_t this_row_split = row_splits[task_idx], next_row_split = row_splits[task_idx + 1]; // `num_jobs` below is the number of jobs that will be active for // this task. (The "1 +".. is the job that we assign for each // task, one job per task, in the "first half" of the jobs). // the job_idx we're working out below is the job_idx for the // "second half" of int32_t num_jobs_this_task = 1 + (next_row_split / dart_separation - this_row_split / dart_separation); K2_CHECK_EQ(static_cast<int32_t>(static_cast<uint16_t>(num_jobs_this_task)), num_jobs_this_task); for (int32_t job_id_this_task = thread_idx; job_id_this_task < num_jobs_this_task; job_id_this_task += threads_per_task) { int32_t job_idx = (job_id_this_task == 0 ? task_idx : // 1st half num_tasks + (this_row_split / dart_separation) + job_id_this_task); // 2nd half. redirect_out[job_idx] = TaskRedirect{task_idx, static_cast<uint16_t>(num_jobs_this_task), static_cast<uint16_t>(job_id_this_task)}; // `job` is the job-index within this task, i.e. the } } void GetTaskRedirect(cudaStream_t stream, int32_t num_tasks, const int32_t *row_splits, TaskRedirect *redirect_out) { if (stream == kCudaStreamInvalid) { // there's not much point in using this on CPU as there are better ways // to do things (sequentially), but this can be useful for debugging. for (int32_t task = 0; task < num_tasks; task++) { // The idea with `dart_separation` is this: Half of the jobs we allocate // to the corresponding tasks. The other half we allocate by throwing // darts onto the interval [0, num_items - 1], evenly spaced starting from // 0, and seeing which tasks they land in. This is somewhat random but it // ensures that if any task has a very large amount of work to do, it will // get a roughly proportionate number of jobs. int32_t row_splits0 = row_splits[0], row_splits_nt = row_splits[num_tasks], num_items = row_splits_nt - row_splits0, dart_separation = num_items / num_tasks; int32_t this_row_split = row_splits[task], next_row_split = row_splits[task + 1]; int32_t num_jobs_this_task = 1 + (next_row_split / dart_separation - this_row_split / dart_separation); K2_CHECK_EQ( static_cast<int32_t>(static_cast<uint16_t>(num_jobs_this_task)), num_jobs_this_task); for (int32_t job_id_this_task = 0; job_id_this_task < num_jobs_this_task; job_id_this_task++) { int32_t job_idx = (job_id_this_task == 0 ? task : // 1st half num_tasks + (this_row_split / dart_separation) + job_id_this_task); // 2nd half. redirect_out[job_idx] = TaskRedirect{task, static_cast<uint16_t>(num_jobs_this_task), static_cast<uint16_t>(job_id_this_task)}; // `job` is the job-index within this task, i.e. the } } } else { // compare 8 to 2, which is the expected number of jobs per task. having 8 // substantially greater than 2 gives a fairly big safety factor. However // this is still far from ideal in scenarios where the number of tasks might // be highly unbalanced. const int32_t threads_per_task = 8, tot_threads = threads_per_task * num_tasks; int32_t block_size = 256; int32_t grid_size = NumBlocks(tot_threads, block_size); K2_CUDA_SAFE_CALL(GetTaskRedirect<threads_per_task> <<<block_size, grid_size, 0, stream>>>( num_tasks, row_splits, redirect_out)); } } void GetTaskRedirect(ContextPtr &c, int32_t num_tasks, const int32_t *row_splits, TaskRedirect *redirect_out) { // TODO } } // namespace k2
b2b2afebb592853daad8021f7719c20462997143.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <THHUNN/THHUNN.h> #include <TH/THHalf.h> #include <THH/THHNumerics.cuh> #include <THH/THHAtomics.cuh> #include <THHUNN/common.h> #include <THH/THHDeviceTensor.cuh> #include <THH/THHDeviceTensorUtils.cuh> #include <THH/THHDeviceUtils.cuh> #include <THH/THHApply.cuh> #include <c10/macros/Macros.h> #include <ATen/hip/detail/KernelUtils.h> #include <thrust/functional.h> template <typename Dtype> __global__ void SpatialClassNLLCriterion_updateOutput_no_reduce_kernel( int64_t nthreads, THCDeviceTensor<Dtype, 4> input, THCDeviceTensor<THCIndex_t, 3> target, THCDeviceTensor<Dtype, 3> output, Dtype *weights, int64_t ignore_index) { int64_t batch_size = input.getSize(0); int64_t H = input.getSize(2); int64_t W = input.getSize(3); CUDA_KERNEL_LOOP(index, nthreads) { const int64_t b = index % batch_size; const int64_t h = (index / batch_size) % H; const int64_t w = (index / (batch_size * H)) % W; int64_t cur_target = target[b][h][w]; if (cur_target == ignore_index) { output[b][h][w] = ScalarConvert<int, Dtype>::to(0); continue; } Dtype value = input[b][cur_target][h][w]; Dtype weight = weights ? weights[cur_target] : ScalarConvert<int, Dtype>::to(1); output[b][h][w] = -value * weight; } } template <typename Dtype> __global__ void SpatialClassNLLCriterion_updateGradInput_no_reduce_kernel( int64_t nthreads, THCDeviceTensor<THCIndex_t, 3> target, THCDeviceTensor<Dtype, 3> gradOutput, THCDeviceTensor<Dtype, 4> gradInput, Dtype *weights, int64_t ignore_index) { int64_t batch_size = target.getSize(0); int64_t H = target.getSize(1); int64_t W = target.getSize(2); CUDA_KERNEL_LOOP(index, nthreads) { const int64_t b = index % batch_size; const int64_t h = (index / batch_size) % H; const int64_t w = (index / (batch_size * H)) % W; int64_t cur_target = target[b][h][w]; if (cur_target == ignore_index) { continue; } Dtype value = -(weights ? weights[cur_target] : ScalarConvert<int, Dtype>::to(1)); gradInput[b][cur_target][h][w] = value * gradOutput[b][h][w]; } } template <typename T, typename AccumT> #if defined(__HIP_PLATFORM_HCC__) C10_LAUNCH_BOUNDS_1(1024) #endif __global__ void cunn_SpatialClassNLLCriterion_updateOutput_kernel( T *output, T *total_weight, T *input, THCIndex_t *target, T *weights, int size_average, int batch_size, int n_classes, int map_nelem, int blocks_per_sample, int64_t ignore_index) { __shared__ AccumT partial_sums[CUDA_NUM_THREADS]; int i, t; T cur_weight; AccumT input_sum = 0; AccumT acc_weight = 0; int sample = blockIdx.x / blocks_per_sample; int toffset = sample * map_nelem; int ioffset = sample * map_nelem * n_classes; int step = blockDim.x * blocks_per_sample; for (i = (blockIdx.x % blocks_per_sample) * blockDim.x + threadIdx.x; i < map_nelem; i += step) { t = target[toffset + i]; if (t != ignore_index) { assert(t >= 0 && t < n_classes); cur_weight = weights ? weights[t] : ScalarConvert<int, T>::to(1); input_sum -= input[ioffset + i + map_nelem * t] * cur_weight; acc_weight += cur_weight; } } input_sum = reduceBlock(partial_sums, blockDim.x, input_sum, thrust::plus<AccumT>(), AccumT(0)); __syncthreads(); acc_weight = reduceBlock(partial_sums, blockDim.x, acc_weight, thrust::plus<AccumT>(), AccumT(0)); if (threadIdx.x == 0) { atomicAdd(total_weight, ScalarConvert<AccumT, T>::to(acc_weight)); atomicAdd(output, ScalarConvert<AccumT, T>::to(input_sum)); } } template<typename T> __global__ void cunn_SpatialClassNLLCriterion_sizeAverage_kernel( T *output, T *total_weight) { if (*total_weight > 0) *output = THCNumerics<T>::div(*output, *total_weight); } template<typename T> __global__ void cunn_SpatialClassNLLCriterion_updateGradInput_kernel( T *gradInput, T *gradOutput, THCIndex_t *target, T *weights, T *total_weight, int size_average, int batch_size, int n_classes, int map_nelem, int blocks_per_sample, int64_t ignore_index) { if (*total_weight <= 0) return; int i, t; T norm = size_average ? (ScalarConvert<int, T>::to(1) / *total_weight) : ScalarConvert<int, T>::to(1); int sample = blockIdx.x / blocks_per_sample; int step = blockDim.x * blocks_per_sample; int toffset = sample * map_nelem; int ioffset = sample * map_nelem * n_classes; for (i = (blockIdx.x % blocks_per_sample) * blockDim.x + threadIdx.x; i < map_nelem; i += step) { t = (int)target[toffset + i]; if (t != ignore_index) { assert(t >= 0 && t < n_classes); gradInput[ioffset + i + map_nelem * t] = -(weights ? weights[t] : ScalarConvert<int, T>::to(1)) * norm * gradOutput[0]; } } } #include <THHUNN/generic/SpatialClassNLLCriterion.hip> #include <THH/THHGenerateFloatTypes.h>
b2b2afebb592853daad8021f7719c20462997143.cu
#include <THCUNN/THCUNN.h> #include <TH/THHalf.h> #include <THC/THCNumerics.cuh> #include <THC/THCAtomics.cuh> #include <THCUNN/common.h> #include <THC/THCDeviceTensor.cuh> #include <THC/THCDeviceTensorUtils.cuh> #include <THC/THCDeviceUtils.cuh> #include <THC/THCApply.cuh> #include <c10/macros/Macros.h> #include <ATen/cuda/detail/KernelUtils.h> #include <thrust/functional.h> template <typename Dtype> __global__ void SpatialClassNLLCriterion_updateOutput_no_reduce_kernel( int64_t nthreads, THCDeviceTensor<Dtype, 4> input, THCDeviceTensor<THCIndex_t, 3> target, THCDeviceTensor<Dtype, 3> output, Dtype *weights, int64_t ignore_index) { int64_t batch_size = input.getSize(0); int64_t H = input.getSize(2); int64_t W = input.getSize(3); CUDA_KERNEL_LOOP(index, nthreads) { const int64_t b = index % batch_size; const int64_t h = (index / batch_size) % H; const int64_t w = (index / (batch_size * H)) % W; int64_t cur_target = target[b][h][w]; if (cur_target == ignore_index) { output[b][h][w] = ScalarConvert<int, Dtype>::to(0); continue; } Dtype value = input[b][cur_target][h][w]; Dtype weight = weights ? weights[cur_target] : ScalarConvert<int, Dtype>::to(1); output[b][h][w] = -value * weight; } } template <typename Dtype> __global__ void SpatialClassNLLCriterion_updateGradInput_no_reduce_kernel( int64_t nthreads, THCDeviceTensor<THCIndex_t, 3> target, THCDeviceTensor<Dtype, 3> gradOutput, THCDeviceTensor<Dtype, 4> gradInput, Dtype *weights, int64_t ignore_index) { int64_t batch_size = target.getSize(0); int64_t H = target.getSize(1); int64_t W = target.getSize(2); CUDA_KERNEL_LOOP(index, nthreads) { const int64_t b = index % batch_size; const int64_t h = (index / batch_size) % H; const int64_t w = (index / (batch_size * H)) % W; int64_t cur_target = target[b][h][w]; if (cur_target == ignore_index) { continue; } Dtype value = -(weights ? weights[cur_target] : ScalarConvert<int, Dtype>::to(1)); gradInput[b][cur_target][h][w] = value * gradOutput[b][h][w]; } } template <typename T, typename AccumT> #if defined(__HIP_PLATFORM_HCC__) C10_LAUNCH_BOUNDS_1(1024) #endif __global__ void cunn_SpatialClassNLLCriterion_updateOutput_kernel( T *output, T *total_weight, T *input, THCIndex_t *target, T *weights, int size_average, int batch_size, int n_classes, int map_nelem, int blocks_per_sample, int64_t ignore_index) { __shared__ AccumT partial_sums[CUDA_NUM_THREADS]; int i, t; T cur_weight; AccumT input_sum = 0; AccumT acc_weight = 0; int sample = blockIdx.x / blocks_per_sample; int toffset = sample * map_nelem; int ioffset = sample * map_nelem * n_classes; int step = blockDim.x * blocks_per_sample; for (i = (blockIdx.x % blocks_per_sample) * blockDim.x + threadIdx.x; i < map_nelem; i += step) { t = target[toffset + i]; if (t != ignore_index) { assert(t >= 0 && t < n_classes); cur_weight = weights ? weights[t] : ScalarConvert<int, T>::to(1); input_sum -= input[ioffset + i + map_nelem * t] * cur_weight; acc_weight += cur_weight; } } input_sum = reduceBlock(partial_sums, blockDim.x, input_sum, thrust::plus<AccumT>(), AccumT(0)); __syncthreads(); acc_weight = reduceBlock(partial_sums, blockDim.x, acc_weight, thrust::plus<AccumT>(), AccumT(0)); if (threadIdx.x == 0) { atomicAdd(total_weight, ScalarConvert<AccumT, T>::to(acc_weight)); atomicAdd(output, ScalarConvert<AccumT, T>::to(input_sum)); } } template<typename T> __global__ void cunn_SpatialClassNLLCriterion_sizeAverage_kernel( T *output, T *total_weight) { if (*total_weight > 0) *output = THCNumerics<T>::div(*output, *total_weight); } template<typename T> __global__ void cunn_SpatialClassNLLCriterion_updateGradInput_kernel( T *gradInput, T *gradOutput, THCIndex_t *target, T *weights, T *total_weight, int size_average, int batch_size, int n_classes, int map_nelem, int blocks_per_sample, int64_t ignore_index) { if (*total_weight <= 0) return; int i, t; T norm = size_average ? (ScalarConvert<int, T>::to(1) / *total_weight) : ScalarConvert<int, T>::to(1); int sample = blockIdx.x / blocks_per_sample; int step = blockDim.x * blocks_per_sample; int toffset = sample * map_nelem; int ioffset = sample * map_nelem * n_classes; for (i = (blockIdx.x % blocks_per_sample) * blockDim.x + threadIdx.x; i < map_nelem; i += step) { t = (int)target[toffset + i]; if (t != ignore_index) { assert(t >= 0 && t < n_classes); gradInput[ioffset + i + map_nelem * t] = -(weights ? weights[t] : ScalarConvert<int, T>::to(1)) * norm * gradOutput[0]; } } } #include <THCUNN/generic/SpatialClassNLLCriterion.cu> #include <THC/THCGenerateFloatTypes.h>
agent-4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /************************************************************************* /* ECE 277: GPU Programmming 2021 Winter /* Author and Instructer: Cheolhong An /* Copyright 2020 /* University of California, San Diego /*************************************************************************/ #define COLS 4 #define ROWS 4 #define RIGHT 0 #define DOWN 1 #define LEFT 2 #define UP 3 short *d_action; int size = sizeof(int); __global__ void cuda_init() {} __global__ void cuda_agent(int2 *cstate, short *d_action) { int idx = 0; int pos_x = cstate[idx].x, pos_y = cstate[idx].y; short action; if (pos_y == 0) { action = pos_x < COLS - 1 ? RIGHT : DOWN; } if (pos_x == COLS - 1) { action = pos_y < ROWS - 2 ? DOWN : LEFT; } d_action[idx] = action; } void agent_init() { // allocate a short-type global memory, d_action ptr (allocated GPU) hipMalloc((void **)&d_action, size); hipLaunchKernelGGL(( cuda_init) , dim3(1), dim3(1), 0, 0, ); } short* agent_action(int2* cstate) { // invokes an CUDA kernel (cuda_agent), cstate ptr (allocated GPU) hipLaunchKernelGGL(( cuda_agent) , dim3(1), dim3(1), 0, 0, cstate, d_action); return d_action; } // hipMemcpy(&d_action, source, size, hipMemcpyDeviceToHost);
agent-4.cu
/************************************************************************* /* ECE 277: GPU Programmming 2021 Winter /* Author and Instructer: Cheolhong An /* Copyright 2020 /* University of California, San Diego /*************************************************************************/ #define COLS 4 #define ROWS 4 #define RIGHT 0 #define DOWN 1 #define LEFT 2 #define UP 3 short *d_action; int size = sizeof(int); __global__ void cuda_init() {} __global__ void cuda_agent(int2 *cstate, short *d_action) { int idx = 0; int pos_x = cstate[idx].x, pos_y = cstate[idx].y; short action; if (pos_y == 0) { action = pos_x < COLS - 1 ? RIGHT : DOWN; } if (pos_x == COLS - 1) { action = pos_y < ROWS - 2 ? DOWN : LEFT; } d_action[idx] = action; } void agent_init() { // allocate a short-type global memory, d_action ptr (allocated GPU) cudaMalloc((void **)&d_action, size); cuda_init <<<1, 1>>> (); } short* agent_action(int2* cstate) { // invokes an CUDA kernel (cuda_agent), cstate ptr (allocated GPU) cuda_agent <<<1, 1>>> (cstate, d_action); return d_action; } // cudaMemcpy(&d_action, source, size, cudaMemcpyDeviceToHost);
d4d6b1b23f08cfc40a1d486533f52783872cc514.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdio> #include <cassert> __global__ void init_random_numbers(unsigned int seed) { printf("seed = %d\n", seed); atomicAdd(int *(12312433432), 123); assert(seed != 0); } int main() { hipLaunchKernelGGL(( init_random_numbers), dim3(1024), dim3(1024), 0, 0, 1); return 0; }
d4d6b1b23f08cfc40a1d486533f52783872cc514.cu
#include <cstdio> #include <cassert> __global__ void init_random_numbers(unsigned int seed) { printf("seed = %d\n", seed); atomicAdd(int *(12312433432), 123); assert(seed != 0); } int main() { init_random_numbers<<<1024, 1024>>>(1); return 0; }
b8312c536487317c02b7e946ac8f2a93562dfbf8.hip
// !!! This is a file automatically generated by hipify!!! #include "Layer_GPU.cuh" #include "Utilities.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <cmath> #include <random> #include <iostream> #include <limits> #define TILE_DIM 32 #define BLOCK_SIZE 32 Layer_GPU::Layer_GPU(const unsigned nrInputs, const unsigned nrNeurons) : nrInputs(nrInputs), nrNeurons(nrNeurons), sizeW((nrInputs) * (nrNeurons)), trainRate(0.1), momentum(0.1), bias(1.0) { utils::CheckError(hipMalloc((void**)&d_weights, sizeW * sizeof(double)), __FILE__, __LINE__); utils::CheckError(hipMalloc((void**)&d_deltaWeights, sizeW * sizeof(double)), __FILE__, __LINE__); utils::CheckError(hipMalloc((void**)&d_activationResult, nrNeurons * sizeof(double)), __FILE__, __LINE__); utils::CheckError(hipMalloc((void**)&d_gradients, nrNeurons * sizeof(double)), __FILE__, __LINE__); initDeltaWeights(); } Layer_GPU::~Layer_GPU() { hipFree(d_weights); hipFree(d_deltaWeights); hipFree(d_activationResult); hipFree(d_gradients); } void Layer_GPU::SetBias(const double bias) { this->bias = bias; } void Layer_GPU::InitWeights() { auto weights = new double[sizeW]; std::random_device rd; std::mt19937 gen(rd()); std::uniform_real_distribution<> dis(-1.0, 1.0); for (auto i = 0; i < static_cast<decltype(i)>(sizeW); ++i) { weights[i] = dis(gen); } utils::CheckError(hipMemcpy(d_weights, weights, sizeW * sizeof(double), hipMemcpyHostToDevice), __FILE__, __LINE__); delete[] weights; } void Layer_GPU::initDeltaWeights() { auto deltaWeights = new double[sizeW]; for (auto i = 0; i < static_cast<decltype(i)>(sizeW); ++i) { deltaWeights[i] = 0; } utils::CheckError(hipMemcpy(d_deltaWeights, deltaWeights, sizeW * sizeof(double), hipMemcpyHostToDevice), __FILE__, __LINE__); delete[] deltaWeights; } void Layer_GPU::SetTrainRate(const double trainRate) { this->trainRate = trainRate; } void Layer_GPU::SetMomentum(const double momentum) { this->momentum = momentum; } const double* Layer_GPU::Output_GPU() const { return d_activationResult; } unsigned Layer_GPU::OutputSize() const { return nrNeurons; } //------------------------------------------------------------------------ // cuda kernels //------------------------------------------------------------------------ __device__ double cuda_activationFunc(double value) { auto res = 1.0 / (1.0 + ::exp(-value)); return res; } __device__ double cuda_activeationFuncD(double value) { auto s = cuda_activationFunc(value); return s * (1.0 - s); } __global__ void matvec_kernel(const double* A, const double* B, double* C, unsigned ACols, unsigned BCols) { double CValue = 0; int Row = blockIdx.y * TILE_DIM + threadIdx.y; int Col = blockIdx.x * TILE_DIM + threadIdx.x; __shared__ float sd_A[TILE_DIM]; __shared__ float sd_B[TILE_DIM][TILE_DIM]; for (int k = 0; k < (TILE_DIM + ACols) / TILE_DIM; k++) { if (k*TILE_DIM + threadIdx.x < ACols && Row == 0) { sd_A[threadIdx.x] = A[k * TILE_DIM + threadIdx.x]; } else { //sd_A[threadIdx.x] = 0.0; } if (k * TILE_DIM + threadIdx.y < ACols && Col < BCols) { sd_B[threadIdx.y][threadIdx.x] = B[(k * TILE_DIM + threadIdx.y) * BCols + Col]; } else { //sd_B[threadIdx.y][threadIdx.x] = 0.0; } __syncthreads(); for (int n = 0; n < TILE_DIM; ++n) { CValue += sd_A[n] * sd_B[n][threadIdx.x]; } __syncthreads(); sd_A[threadIdx.x] = 0.0; sd_B[threadIdx.y][threadIdx.x] = 0.0; __syncthreads(); } if (Row < 1 && Col < BCols) { C[((blockIdx.y * blockDim.y + threadIdx.y)) + (blockIdx.x*blockDim.x) + threadIdx.x] = cuda_activationFunc(CValue); } } __global__ void cuda_total(double * d_input, double * d_output, int len) { // Load a segment of the input vector into shared memory __shared__ float partialSum[2 * BLOCK_SIZE]; int globalThreadId = blockIdx.x*blockDim.x + threadIdx.x; unsigned int t = threadIdx.x; unsigned int start = 2 * blockIdx.x*blockDim.x; if ((start + t) < len) { partialSum[t] = d_input[start + t]; } else { partialSum[t] = 0.0; } if ((start + blockDim.x + t) < len) { partialSum[blockDim.x + t] = d_input[start + blockDim.x + t]; } else { partialSum[blockDim.x + t] = 0.0; } // Traverse reduction tree for (unsigned int stride = blockDim.x; stride > 0; stride /= 2) { __syncthreads(); if (t < stride) partialSum[t] += partialSum[t + stride]; } __syncthreads(); // Write the computed sum of the block to the output vector at correct index if (t == 0 && (globalThreadId * 2) < len) { d_output[blockIdx.x] = partialSum[t]; } } __global__ void cuda_sumdw(double *d_weights, double *d_gradients, double *d_output, unsigned sizeW, unsigned sizeGrad, unsigned sizeOutput) { int threadId = blockIdx.x*blockDim.x + threadIdx.x; if (threadId < sizeOutput) { for (auto i = 0; i < static_cast<decltype(i)>(sizeGrad); ++i) { d_output[threadId] += d_weights[i * sizeOutput + threadId] * d_gradients[i]; } } } __global__ void cuda_gradientsLastLayer(const double *d_targetVals, const double *d_activationResults, double *d_gradients, unsigned nrNeurons) { unsigned i = blockIdx.x * blockDim.x + threadIdx.x; if (i < nrNeurons) { auto delta = d_targetVals[i] - d_activationResults[i]; d_gradients[i] = delta * cuda_activeationFuncD(d_activationResults[i]); } } __global__ void cuda_gradients(const double *d_deltas, const double *d_activationResults, double *d_gradients, unsigned nrNeurons) { unsigned i = blockIdx.x * blockDim.x + threadIdx.x; if (i < nrNeurons) { d_gradients[i] = d_deltas[i] * cuda_activeationFuncD(d_activationResults[i]); } } __global__ void cuda_updateWeights(double * d_weights, double * d_deltaWeights, double * d_activationResults, double * d_gradients, unsigned sizeW, unsigned nrNeurons, double trainRate, double momentum) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < sizeW) { auto oldDeltaWeight = d_deltaWeights[i]; auto idx = i % (nrNeurons); auto newDeltaWeight = trainRate * d_activationResults[idx] * d_gradients[idx] + momentum * oldDeltaWeight; d_deltaWeights[i] = newDeltaWeight; d_weights[i] += d_deltaWeights[i]; } } __global__ void cuda_min_max(double * d_input, double * d_outputMin, double *d_outputMax, int len, double lowest, double highest) { // Load a segment of the input vector into shared memory __shared__ double partialMin[2 * BLOCK_SIZE]; __shared__ double partialMax[2 * BLOCK_SIZE]; int globalThreadId = blockIdx.x*blockDim.x + threadIdx.x; unsigned int t = threadIdx.x; unsigned int start = 2 * blockIdx.x*blockDim.x; if ((start + t) < len) { partialMin[t] = d_input[start + t]; partialMax[t] = d_input[start + t]; } else { partialMin[t] = highest; partialMax[t] = lowest; } if ((start + blockDim.x + t) < len) { partialMin[blockDim.x + t] = d_input[start + blockDim.x + t]; partialMax[blockDim.x + t] = d_input[start + blockDim.x + t]; } else { partialMin[blockDim.x + t] = highest; partialMax[blockDim.x + t] = lowest; } // Traverse reduction tree for (unsigned int stride = blockDim.x; stride > 0; stride /= 2) { __syncthreads(); if (t < stride) { if (partialMin[t] > partialMin[t + stride]) { partialMin[t] = partialMin[t + stride]; } if (partialMax[t] < partialMax[t + stride]) { partialMax[t] = partialMax[t + stride]; } } } __syncthreads(); // Write the computed sum of the block to the output vector at correct index if (t == 0 && (globalThreadId * 2) < len) { d_outputMin[blockIdx.x] = partialMin[t]; d_outputMax[blockIdx.x] = partialMax[t]; } } __global__ void cuda_normalizeWeights(double * d_weights, unsigned sizeW, double min, double max) { /* const auto a = -1.0; const auto b = 1.0; int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < sizeW) { d_weights[i] = a + (b - a) * (d_weights[i] - min) / (max - min); } */ } //------------------------------------------------------------------------ // end of cuda kernels //------------------------------------------------------------------------ double *Layer_GPU::SumDW_GPU() const { double *d_sumdw = nullptr; utils::CheckError(hipMalloc((void**)&d_sumdw, nrInputs * sizeof(double)), __FILE__, __LINE__); dim3 DimGrid(32, 1, 1); dim3 DimBlock(BLOCK_SIZE, 1, 1); cuda_sumdw << <DimGrid, DimBlock >> > (d_weights, d_gradients, d_sumdw, sizeW, nrNeurons, nrInputs); return d_sumdw; } void Layer_GPU::FeedForward_GPU(const double* d_inputs) { dim3 dim_grid(1); dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE); matvec_kernel << <dim_grid, dim_block >> >(d_inputs, d_weights, d_activationResult, nrInputs, nrNeurons); } void Layer_GPU::BackPropagation_GPU(const double *inputs) { calcGradients_GPU(inputs); } void Layer_GPU::BackPropagation_GPU(const std::shared_ptr<Layer_GPU> &prevLayer) { calcGradients_GPU(prevLayer); } //calcualte the gradients for the output layer void Layer_GPU::calcGradients_GPU(const double* targetVals) { dim3 grids(32, 32, 1); dim3 blocks(32, 32, 1); double *d_targetVals = nullptr; utils::CheckError(hipMalloc((void**)&d_targetVals, nrInputs * sizeof(double)), __FILE__, __LINE__); //utils::CheckError(hipMemcpy(d_targetVals, targetVals, sizeW * sizeof(double), hipMemcpyHostToDevice), __FILE__, __LINE__); cuda_gradientsLastLayer << < grids, blocks >> > (targetVals, d_activationResult, d_gradients, nrNeurons); utils::CheckError(hipGetLastError(), __FILE__, __LINE__); } //calculate the gradients for the hidden layer void Layer_GPU::calcGradients_GPU(const std::shared_ptr<Layer_GPU> &prevLayer) { dim3 grids(32, 32, 1); dim3 blocks(32, 32, 1); cuda_gradients << < grids, blocks >> > (prevLayer->SumDW_GPU(), d_activationResult, d_gradients, nrNeurons); utils::CheckError(hipGetLastError(), __FILE__, __LINE__); } void Layer_GPU::UpdateWeights_GPU() { dim3 grids(::ceil(sizeW / 256.0), 1, 1); dim3 blocks(512, 1, 1); cuda_updateWeights << <grids, blocks >> > (d_weights, d_deltaWeights, d_activationResult, d_gradients, sizeW, nrNeurons, trainRate, momentum); utils::CheckError(hipGetLastError(), __FILE__, __LINE__); if (sizeW > 1) { unsigned numOutputElements = sizeW / (BLOCK_SIZE << 1); if (sizeW % (BLOCK_SIZE << 1)) { numOutputElements++; } double *d_outputMin = nullptr; double *d_outputMax = nullptr; utils::CheckError(hipMalloc((void**)&d_outputMin, numOutputElements * sizeof(double)), __FILE__, __LINE__); utils::CheckError(hipMalloc((void**)&d_outputMax, numOutputElements * sizeof(double)), __FILE__, __LINE__); dim3 DimGrid(numOutputElements, 1, 1); dim3 DimBlock(BLOCK_SIZE, 1, 1); cuda_min_max << <DimGrid, DimBlock >> > (d_weights, d_outputMin, d_outputMax, sizeW, std::numeric_limits<double>::min(), std::numeric_limits<double>::max()); double * outputMin = new double[numOutputElements]; double * outputMax = new double[numOutputElements]; utils::CheckError(hipMemcpy(outputMin, d_outputMin, numOutputElements * sizeof(double), hipMemcpyDeviceToHost), __FILE__, __LINE__); utils::CheckError(hipMemcpy(outputMax, d_outputMax, numOutputElements * sizeof(double), hipMemcpyDeviceToHost), __FILE__, __LINE__); double min = outputMin[0]; double max = outputMax[0]; for (unsigned i = 1; i < numOutputElements; i++) { if (min > outputMin[i]) { min = outputMin[i]; } if (max < outputMax[i]) { max = outputMax[i]; } } cuda_normalizeWeights << <grids, blocks >> > (d_weights, sizeW, min, max); utils::CheckError(hipGetLastError(), __FILE__, __LINE__); hipFree(d_outputMin); hipFree(d_outputMax); delete[] outputMin; delete[] outputMax; } }
b8312c536487317c02b7e946ac8f2a93562dfbf8.cu
#include "Layer_GPU.cuh" #include "Utilities.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <cmath> #include <random> #include <iostream> #include <limits> #define TILE_DIM 32 #define BLOCK_SIZE 32 Layer_GPU::Layer_GPU(const unsigned nrInputs, const unsigned nrNeurons) : nrInputs(nrInputs), nrNeurons(nrNeurons), sizeW((nrInputs) * (nrNeurons)), trainRate(0.1), momentum(0.1), bias(1.0) { utils::CheckError(cudaMalloc((void**)&d_weights, sizeW * sizeof(double)), __FILE__, __LINE__); utils::CheckError(cudaMalloc((void**)&d_deltaWeights, sizeW * sizeof(double)), __FILE__, __LINE__); utils::CheckError(cudaMalloc((void**)&d_activationResult, nrNeurons * sizeof(double)), __FILE__, __LINE__); utils::CheckError(cudaMalloc((void**)&d_gradients, nrNeurons * sizeof(double)), __FILE__, __LINE__); initDeltaWeights(); } Layer_GPU::~Layer_GPU() { cudaFree(d_weights); cudaFree(d_deltaWeights); cudaFree(d_activationResult); cudaFree(d_gradients); } void Layer_GPU::SetBias(const double bias) { this->bias = bias; } void Layer_GPU::InitWeights() { auto weights = new double[sizeW]; std::random_device rd; std::mt19937 gen(rd()); std::uniform_real_distribution<> dis(-1.0, 1.0); for (auto i = 0; i < static_cast<decltype(i)>(sizeW); ++i) { weights[i] = dis(gen); } utils::CheckError(cudaMemcpy(d_weights, weights, sizeW * sizeof(double), cudaMemcpyHostToDevice), __FILE__, __LINE__); delete[] weights; } void Layer_GPU::initDeltaWeights() { auto deltaWeights = new double[sizeW]; for (auto i = 0; i < static_cast<decltype(i)>(sizeW); ++i) { deltaWeights[i] = 0; } utils::CheckError(cudaMemcpy(d_deltaWeights, deltaWeights, sizeW * sizeof(double), cudaMemcpyHostToDevice), __FILE__, __LINE__); delete[] deltaWeights; } void Layer_GPU::SetTrainRate(const double trainRate) { this->trainRate = trainRate; } void Layer_GPU::SetMomentum(const double momentum) { this->momentum = momentum; } const double* Layer_GPU::Output_GPU() const { return d_activationResult; } unsigned Layer_GPU::OutputSize() const { return nrNeurons; } //------------------------------------------------------------------------ // cuda kernels //------------------------------------------------------------------------ __device__ double cuda_activationFunc(double value) { auto res = 1.0 / (1.0 + std::exp(-value)); return res; } __device__ double cuda_activeationFuncD(double value) { auto s = cuda_activationFunc(value); return s * (1.0 - s); } __global__ void matvec_kernel(const double* A, const double* B, double* C, unsigned ACols, unsigned BCols) { double CValue = 0; int Row = blockIdx.y * TILE_DIM + threadIdx.y; int Col = blockIdx.x * TILE_DIM + threadIdx.x; __shared__ float sd_A[TILE_DIM]; __shared__ float sd_B[TILE_DIM][TILE_DIM]; for (int k = 0; k < (TILE_DIM + ACols) / TILE_DIM; k++) { if (k*TILE_DIM + threadIdx.x < ACols && Row == 0) { sd_A[threadIdx.x] = A[k * TILE_DIM + threadIdx.x]; } else { //sd_A[threadIdx.x] = 0.0; } if (k * TILE_DIM + threadIdx.y < ACols && Col < BCols) { sd_B[threadIdx.y][threadIdx.x] = B[(k * TILE_DIM + threadIdx.y) * BCols + Col]; } else { //sd_B[threadIdx.y][threadIdx.x] = 0.0; } __syncthreads(); for (int n = 0; n < TILE_DIM; ++n) { CValue += sd_A[n] * sd_B[n][threadIdx.x]; } __syncthreads(); sd_A[threadIdx.x] = 0.0; sd_B[threadIdx.y][threadIdx.x] = 0.0; __syncthreads(); } if (Row < 1 && Col < BCols) { C[((blockIdx.y * blockDim.y + threadIdx.y)) + (blockIdx.x*blockDim.x) + threadIdx.x] = cuda_activationFunc(CValue); } } __global__ void cuda_total(double * d_input, double * d_output, int len) { // Load a segment of the input vector into shared memory __shared__ float partialSum[2 * BLOCK_SIZE]; int globalThreadId = blockIdx.x*blockDim.x + threadIdx.x; unsigned int t = threadIdx.x; unsigned int start = 2 * blockIdx.x*blockDim.x; if ((start + t) < len) { partialSum[t] = d_input[start + t]; } else { partialSum[t] = 0.0; } if ((start + blockDim.x + t) < len) { partialSum[blockDim.x + t] = d_input[start + blockDim.x + t]; } else { partialSum[blockDim.x + t] = 0.0; } // Traverse reduction tree for (unsigned int stride = blockDim.x; stride > 0; stride /= 2) { __syncthreads(); if (t < stride) partialSum[t] += partialSum[t + stride]; } __syncthreads(); // Write the computed sum of the block to the output vector at correct index if (t == 0 && (globalThreadId * 2) < len) { d_output[blockIdx.x] = partialSum[t]; } } __global__ void cuda_sumdw(double *d_weights, double *d_gradients, double *d_output, unsigned sizeW, unsigned sizeGrad, unsigned sizeOutput) { int threadId = blockIdx.x*blockDim.x + threadIdx.x; if (threadId < sizeOutput) { for (auto i = 0; i < static_cast<decltype(i)>(sizeGrad); ++i) { d_output[threadId] += d_weights[i * sizeOutput + threadId] * d_gradients[i]; } } } __global__ void cuda_gradientsLastLayer(const double *d_targetVals, const double *d_activationResults, double *d_gradients, unsigned nrNeurons) { unsigned i = blockIdx.x * blockDim.x + threadIdx.x; if (i < nrNeurons) { auto delta = d_targetVals[i] - d_activationResults[i]; d_gradients[i] = delta * cuda_activeationFuncD(d_activationResults[i]); } } __global__ void cuda_gradients(const double *d_deltas, const double *d_activationResults, double *d_gradients, unsigned nrNeurons) { unsigned i = blockIdx.x * blockDim.x + threadIdx.x; if (i < nrNeurons) { d_gradients[i] = d_deltas[i] * cuda_activeationFuncD(d_activationResults[i]); } } __global__ void cuda_updateWeights(double * d_weights, double * d_deltaWeights, double * d_activationResults, double * d_gradients, unsigned sizeW, unsigned nrNeurons, double trainRate, double momentum) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < sizeW) { auto oldDeltaWeight = d_deltaWeights[i]; auto idx = i % (nrNeurons); auto newDeltaWeight = trainRate * d_activationResults[idx] * d_gradients[idx] + momentum * oldDeltaWeight; d_deltaWeights[i] = newDeltaWeight; d_weights[i] += d_deltaWeights[i]; } } __global__ void cuda_min_max(double * d_input, double * d_outputMin, double *d_outputMax, int len, double lowest, double highest) { // Load a segment of the input vector into shared memory __shared__ double partialMin[2 * BLOCK_SIZE]; __shared__ double partialMax[2 * BLOCK_SIZE]; int globalThreadId = blockIdx.x*blockDim.x + threadIdx.x; unsigned int t = threadIdx.x; unsigned int start = 2 * blockIdx.x*blockDim.x; if ((start + t) < len) { partialMin[t] = d_input[start + t]; partialMax[t] = d_input[start + t]; } else { partialMin[t] = highest; partialMax[t] = lowest; } if ((start + blockDim.x + t) < len) { partialMin[blockDim.x + t] = d_input[start + blockDim.x + t]; partialMax[blockDim.x + t] = d_input[start + blockDim.x + t]; } else { partialMin[blockDim.x + t] = highest; partialMax[blockDim.x + t] = lowest; } // Traverse reduction tree for (unsigned int stride = blockDim.x; stride > 0; stride /= 2) { __syncthreads(); if (t < stride) { if (partialMin[t] > partialMin[t + stride]) { partialMin[t] = partialMin[t + stride]; } if (partialMax[t] < partialMax[t + stride]) { partialMax[t] = partialMax[t + stride]; } } } __syncthreads(); // Write the computed sum of the block to the output vector at correct index if (t == 0 && (globalThreadId * 2) < len) { d_outputMin[blockIdx.x] = partialMin[t]; d_outputMax[blockIdx.x] = partialMax[t]; } } __global__ void cuda_normalizeWeights(double * d_weights, unsigned sizeW, double min, double max) { /* const auto a = -1.0; const auto b = 1.0; int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < sizeW) { d_weights[i] = a + (b - a) * (d_weights[i] - min) / (max - min); } */ } //------------------------------------------------------------------------ // end of cuda kernels //------------------------------------------------------------------------ double *Layer_GPU::SumDW_GPU() const { double *d_sumdw = nullptr; utils::CheckError(cudaMalloc((void**)&d_sumdw, nrInputs * sizeof(double)), __FILE__, __LINE__); dim3 DimGrid(32, 1, 1); dim3 DimBlock(BLOCK_SIZE, 1, 1); cuda_sumdw << <DimGrid, DimBlock >> > (d_weights, d_gradients, d_sumdw, sizeW, nrNeurons, nrInputs); return d_sumdw; } void Layer_GPU::FeedForward_GPU(const double* d_inputs) { dim3 dim_grid(1); dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE); matvec_kernel << <dim_grid, dim_block >> >(d_inputs, d_weights, d_activationResult, nrInputs, nrNeurons); } void Layer_GPU::BackPropagation_GPU(const double *inputs) { calcGradients_GPU(inputs); } void Layer_GPU::BackPropagation_GPU(const std::shared_ptr<Layer_GPU> &prevLayer) { calcGradients_GPU(prevLayer); } //calcualte the gradients for the output layer void Layer_GPU::calcGradients_GPU(const double* targetVals) { dim3 grids(32, 32, 1); dim3 blocks(32, 32, 1); double *d_targetVals = nullptr; utils::CheckError(cudaMalloc((void**)&d_targetVals, nrInputs * sizeof(double)), __FILE__, __LINE__); //utils::CheckError(cudaMemcpy(d_targetVals, targetVals, sizeW * sizeof(double), cudaMemcpyHostToDevice), __FILE__, __LINE__); cuda_gradientsLastLayer << < grids, blocks >> > (targetVals, d_activationResult, d_gradients, nrNeurons); utils::CheckError(cudaGetLastError(), __FILE__, __LINE__); } //calculate the gradients for the hidden layer void Layer_GPU::calcGradients_GPU(const std::shared_ptr<Layer_GPU> &prevLayer) { dim3 grids(32, 32, 1); dim3 blocks(32, 32, 1); cuda_gradients << < grids, blocks >> > (prevLayer->SumDW_GPU(), d_activationResult, d_gradients, nrNeurons); utils::CheckError(cudaGetLastError(), __FILE__, __LINE__); } void Layer_GPU::UpdateWeights_GPU() { dim3 grids(std::ceil(sizeW / 256.0), 1, 1); dim3 blocks(512, 1, 1); cuda_updateWeights << <grids, blocks >> > (d_weights, d_deltaWeights, d_activationResult, d_gradients, sizeW, nrNeurons, trainRate, momentum); utils::CheckError(cudaGetLastError(), __FILE__, __LINE__); if (sizeW > 1) { unsigned numOutputElements = sizeW / (BLOCK_SIZE << 1); if (sizeW % (BLOCK_SIZE << 1)) { numOutputElements++; } double *d_outputMin = nullptr; double *d_outputMax = nullptr; utils::CheckError(cudaMalloc((void**)&d_outputMin, numOutputElements * sizeof(double)), __FILE__, __LINE__); utils::CheckError(cudaMalloc((void**)&d_outputMax, numOutputElements * sizeof(double)), __FILE__, __LINE__); dim3 DimGrid(numOutputElements, 1, 1); dim3 DimBlock(BLOCK_SIZE, 1, 1); cuda_min_max << <DimGrid, DimBlock >> > (d_weights, d_outputMin, d_outputMax, sizeW, std::numeric_limits<double>::min(), std::numeric_limits<double>::max()); double * outputMin = new double[numOutputElements]; double * outputMax = new double[numOutputElements]; utils::CheckError(cudaMemcpy(outputMin, d_outputMin, numOutputElements * sizeof(double), cudaMemcpyDeviceToHost), __FILE__, __LINE__); utils::CheckError(cudaMemcpy(outputMax, d_outputMax, numOutputElements * sizeof(double), cudaMemcpyDeviceToHost), __FILE__, __LINE__); double min = outputMin[0]; double max = outputMax[0]; for (unsigned i = 1; i < numOutputElements; i++) { if (min > outputMin[i]) { min = outputMin[i]; } if (max < outputMax[i]) { max = outputMax[i]; } } cuda_normalizeWeights << <grids, blocks >> > (d_weights, sizeW, min, max); utils::CheckError(cudaGetLastError(), __FILE__, __LINE__); cudaFree(d_outputMin); cudaFree(d_outputMax); delete[] outputMin; delete[] outputMax; } }
9b465475103c3bf5d0f5098c11f7c86e4f3aede1.hip
// !!! This is a file automatically generated by hipify!!! #include <helper_cuda.h> #include "cudaOpenMP.h" #include "matlabData.h" void CudaOpenMPQMMD::evolution_with_coriolis_with_p2p(const double dt, const int calculate_energy) { if(n_gpus() == 1) return; const int &n1 = MatlabData::r1()->n; const int &n2 = MatlabData::r2()->n; const int &n_legs = MatlabData::omega_states()->l_max; const int &n_theta = MatlabData::theta()->n; insist(n_theta >= n_legs); const size_t n = n1*n2*n_legs; for(int i_dev = 0; i_dev < n_gpus(); i_dev++) { OmegaWavepacketsOnSingleDevice * &omega_wpts_i_dev = omega_wavepackets_on_single_device[i_dev]; const Vec<OmegaWavepacket *> &omega_wpts_i = omega_wpts_i_dev->omega_wavepackets; for(int i = 0; i < omega_wpts_i.size(); i++) { const Complex *psi_i = omega_wpts_i[i]->legendre_psi_dev_pointer(); for(int j_dev = 0; j_dev < n_gpus(); j_dev++) { if(i_dev == j_dev) continue; OmegaWavepacketsOnSingleDevice * &omega_wpts_j_dev = omega_wavepackets_on_single_device[j_dev]; omega_wpts_j_dev->zero_work_dev_2(); checkCudaErrors(hipMemcpyPeer(omega_wpts_j_dev->work_dev_2, j_dev, psi_i, i_dev, n*sizeof(Complex))); const int &omega_i = omega_wpts_i[i]->omega_value(); omega_wpts_j_dev->evolution_with_coriolis(dt, omega_i, omega_wpts_j_dev->work_dev_2); if(calculate_energy) omega_wpts_j_dev->calculate_coriolis_energy_for_legendre_psi(omega_i, omega_wpts_j_dev->work_dev_2); } } } } void CudaOpenMPQMMD::evolution_with_coriolis_with_p2p_async(const double dt, const int calculate_energy) { if(n_gpus() == 1) return; const int &n1 = MatlabData::r1()->n; const int &n2 = MatlabData::r2()->n; const int &n_legs = MatlabData::omega_states()->l_max; const int &n_theta = MatlabData::theta()->n; insist(n_theta >= n_legs); const size_t n = n1*n2*n_legs; setup_streams_and_events(1, 0); for(int i_dev = 0; i_dev < n_gpus(); i_dev++) { OmegaWavepacketsOnSingleDevice * &omega_wpts_i_dev = omega_wavepackets_on_single_device[i_dev]; const Vec<OmegaWavepacket *> &omega_wpts_i = omega_wpts_i_dev->omega_wavepackets; for(int i = 0; i < omega_wpts_i.size(); i++) { const Complex *psi_i = omega_wpts_i[i]->legendre_psi_dev_pointer(); for(int j_dev = 0; j_dev < n_gpus(); j_dev++) { if(i_dev == j_dev) continue; OmegaWavepacketsOnSingleDevice * &omega_wpts_j_dev = omega_wavepackets_on_single_device[j_dev]; omega_wpts_j_dev->zero_work_dev_2(); checkCudaErrors(hipMemcpyPeer(omega_wpts_j_dev->work_dev_2, j_dev, psi_i, i_dev, n*sizeof(Complex))); omega_wpts_j_dev->evolution_with_coriolis(dt, omega_wpts_i[i]->omega_value(), omega_wpts_j_dev->work_dev_2, &streams[j_dev]); if(calculate_energy) { omega_wpts_j_dev->calculate_coriolis_energy_for_legendre_psi(omega_wpts_i[i]->omega_value(), omega_wpts_j_dev->work_dev_2, &streams[j_dev]); } } } } }
9b465475103c3bf5d0f5098c11f7c86e4f3aede1.cu
#include <helper_cuda.h> #include "cudaOpenMP.h" #include "matlabData.h" void CudaOpenMPQMMD::evolution_with_coriolis_with_p2p(const double dt, const int calculate_energy) { if(n_gpus() == 1) return; const int &n1 = MatlabData::r1()->n; const int &n2 = MatlabData::r2()->n; const int &n_legs = MatlabData::omega_states()->l_max; const int &n_theta = MatlabData::theta()->n; insist(n_theta >= n_legs); const size_t n = n1*n2*n_legs; for(int i_dev = 0; i_dev < n_gpus(); i_dev++) { OmegaWavepacketsOnSingleDevice * &omega_wpts_i_dev = omega_wavepackets_on_single_device[i_dev]; const Vec<OmegaWavepacket *> &omega_wpts_i = omega_wpts_i_dev->omega_wavepackets; for(int i = 0; i < omega_wpts_i.size(); i++) { const Complex *psi_i = omega_wpts_i[i]->legendre_psi_dev_pointer(); for(int j_dev = 0; j_dev < n_gpus(); j_dev++) { if(i_dev == j_dev) continue; OmegaWavepacketsOnSingleDevice * &omega_wpts_j_dev = omega_wavepackets_on_single_device[j_dev]; omega_wpts_j_dev->zero_work_dev_2(); checkCudaErrors(cudaMemcpyPeer(omega_wpts_j_dev->work_dev_2, j_dev, psi_i, i_dev, n*sizeof(Complex))); const int &omega_i = omega_wpts_i[i]->omega_value(); omega_wpts_j_dev->evolution_with_coriolis(dt, omega_i, omega_wpts_j_dev->work_dev_2); if(calculate_energy) omega_wpts_j_dev->calculate_coriolis_energy_for_legendre_psi(omega_i, omega_wpts_j_dev->work_dev_2); } } } } void CudaOpenMPQMMD::evolution_with_coriolis_with_p2p_async(const double dt, const int calculate_energy) { if(n_gpus() == 1) return; const int &n1 = MatlabData::r1()->n; const int &n2 = MatlabData::r2()->n; const int &n_legs = MatlabData::omega_states()->l_max; const int &n_theta = MatlabData::theta()->n; insist(n_theta >= n_legs); const size_t n = n1*n2*n_legs; setup_streams_and_events(1, 0); for(int i_dev = 0; i_dev < n_gpus(); i_dev++) { OmegaWavepacketsOnSingleDevice * &omega_wpts_i_dev = omega_wavepackets_on_single_device[i_dev]; const Vec<OmegaWavepacket *> &omega_wpts_i = omega_wpts_i_dev->omega_wavepackets; for(int i = 0; i < omega_wpts_i.size(); i++) { const Complex *psi_i = omega_wpts_i[i]->legendre_psi_dev_pointer(); for(int j_dev = 0; j_dev < n_gpus(); j_dev++) { if(i_dev == j_dev) continue; OmegaWavepacketsOnSingleDevice * &omega_wpts_j_dev = omega_wavepackets_on_single_device[j_dev]; omega_wpts_j_dev->zero_work_dev_2(); checkCudaErrors(cudaMemcpyPeer(omega_wpts_j_dev->work_dev_2, j_dev, psi_i, i_dev, n*sizeof(Complex))); omega_wpts_j_dev->evolution_with_coriolis(dt, omega_wpts_i[i]->omega_value(), omega_wpts_j_dev->work_dev_2, &streams[j_dev]); if(calculate_energy) { omega_wpts_j_dev->calculate_coriolis_energy_for_legendre_psi(omega_wpts_i[i]->omega_value(), omega_wpts_j_dev->work_dev_2, &streams[j_dev]); } } } } }
6a731b6fa30f6ca3d72feda1c13d284589cb284c.hip
// !!! This is a file automatically generated by hipify!!! /* Fractal code for CS 4380 / CS 5351 Copyright (c) 2020 Texas State University. All rights reserved. Redistribution in source or binary form, with or without modification, is *not* permitted. Use in source or binary form, with or without modification, is only permitted for academic use in CS 4380 or CS 5351 at Texas State University. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Author: Martin Burtscher */ //Blair Todd //CS4380 #include <cstdio> #include <cmath> #include <hip/hip_runtime.h> static const int ThreadsPerBlock = 1024; static __global__ void fractal(const int width, const int start_frame, const int stop_frame, unsigned char* const pic) { // todo: use the GPU to compute the requested frames (base the code on the previous project) const float Delta = 0.00304; const float xMid = -0.055846456; const float yMid = -0.668311119; // const int pixels = frames * width * width; const int i = threadIdx.x + blockIdx.x * blockDim.x; //+ (start_frame * width * width); const int pixels = (stop_frame + start_frame) * width * width; if(i < pixels){ const int frame = i / (width * width); const int row = (i / width) % width; const int col = i % width; const double delta = Delta * pow(0.975, frame); const double xMin = xMid - delta; const double yMin = yMid - delta; const double dw = 2.0 * delta / width; const double cy = yMin + row * dw; const double cx = xMin + col * dw; double x = cx; double y = cy; double x2, y2; int count = 256; do { x2 = x * x; y2 = y * y; y = 2.0 * x * y + cy; x = x2 - y2 + cx; count--; } while ((count > 0) && ((x2 + y2) <= 5.0)); pic[(frame - start_frame) * width * width + row * width + col] = (unsigned char)count; } } unsigned char* GPU_Init(const int gpu_frames, const int width) { unsigned char* d_pic; if (hipSuccess != hipMalloc((void **)&d_pic, gpu_frames * width * width * sizeof(unsigned char))) {fprintf(stderr, "ERROR: could not allocate memory\n"); exit(-1);} return d_pic; } void GPU_Exec(const int start_frame, const int stop_frame, const int width, unsigned char* d_pic) { // todo: launch the kernel with just the right number of blocks and ThreadsPerBlock threads per block and do nothing else hipLaunchKernelGGL(( fractal) , dim3(((stop_frame * width * width) + ThreadsPerBlock - 1) / ThreadsPerBlock), dim3(ThreadsPerBlock), 0, 0, start_frame,stop_frame, width,d_pic); } void GPU_Fini(const int gpu_frames, const int width, unsigned char* pic, unsigned char* d_pic) { // todo: copy the result from the device to the host and free the device memory const int size = gpu_frames * width * width * sizeof(unsigned char); if(hipSuccess != hipMemcpy(pic, d_pic, size, hipMemcpyDeviceToHost)){fprintf(stderr, "ERROR: could not copy memory\n"); exit(-1);} hipFree(d_pic); //pic or d_pic? }
6a731b6fa30f6ca3d72feda1c13d284589cb284c.cu
/* Fractal code for CS 4380 / CS 5351 Copyright (c) 2020 Texas State University. All rights reserved. Redistribution in source or binary form, with or without modification, is *not* permitted. Use in source or binary form, with or without modification, is only permitted for academic use in CS 4380 or CS 5351 at Texas State University. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Author: Martin Burtscher */ //Blair Todd //CS4380 #include <cstdio> #include <cmath> #include <cuda.h> static const int ThreadsPerBlock = 1024; static __global__ void fractal(const int width, const int start_frame, const int stop_frame, unsigned char* const pic) { // todo: use the GPU to compute the requested frames (base the code on the previous project) const float Delta = 0.00304; const float xMid = -0.055846456; const float yMid = -0.668311119; // const int pixels = frames * width * width; const int i = threadIdx.x + blockIdx.x * blockDim.x; //+ (start_frame * width * width); const int pixels = (stop_frame + start_frame) * width * width; if(i < pixels){ const int frame = i / (width * width); const int row = (i / width) % width; const int col = i % width; const double delta = Delta * pow(0.975, frame); const double xMin = xMid - delta; const double yMin = yMid - delta; const double dw = 2.0 * delta / width; const double cy = yMin + row * dw; const double cx = xMin + col * dw; double x = cx; double y = cy; double x2, y2; int count = 256; do { x2 = x * x; y2 = y * y; y = 2.0 * x * y + cy; x = x2 - y2 + cx; count--; } while ((count > 0) && ((x2 + y2) <= 5.0)); pic[(frame - start_frame) * width * width + row * width + col] = (unsigned char)count; } } unsigned char* GPU_Init(const int gpu_frames, const int width) { unsigned char* d_pic; if (cudaSuccess != cudaMalloc((void **)&d_pic, gpu_frames * width * width * sizeof(unsigned char))) {fprintf(stderr, "ERROR: could not allocate memory\n"); exit(-1);} return d_pic; } void GPU_Exec(const int start_frame, const int stop_frame, const int width, unsigned char* d_pic) { // todo: launch the kernel with just the right number of blocks and ThreadsPerBlock threads per block and do nothing else fractal <<< ((stop_frame * width * width) + ThreadsPerBlock - 1) / ThreadsPerBlock, ThreadsPerBlock>>>(start_frame,stop_frame, width,d_pic); } void GPU_Fini(const int gpu_frames, const int width, unsigned char* pic, unsigned char* d_pic) { // todo: copy the result from the device to the host and free the device memory const int size = gpu_frames * width * width * sizeof(unsigned char); if(cudaSuccess != cudaMemcpy(pic, d_pic, size, cudaMemcpyDeviceToHost)){fprintf(stderr, "ERROR: could not copy memory\n"); exit(-1);} cudaFree(d_pic); //pic or d_pic? }
be1a9dfaff2261429514701bf8f427b797950348.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // The example shows how to use cudaFlow to create a matrix multiplication // of two 2D matrices. #include <taskflow/taskflow.hpp> // Kernel: matmul __global__ void matmul(int *a, int *b, int *c, int m, int n, int k) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; int sum = 0; if(col < k && row < m) { for(int i = 0; i < n; i++) { sum += a[row * n + i] * b[i * k + col]; } c[row * k + col] = sum; } } // Matrix multiplication using GPU auto gpu(int M, int N, int K) { std::vector<int> ha, hb, hc; int *da, *db, *dc; tf::Taskflow taskflow("MatrixMultiplication"); tf::Executor executor; // allocate the host and device storage for a auto allocate_a = taskflow.emplace([&](){ ha.resize(M*N, M+N); TF_CHECK_CUDA(hipMalloc(&da, M*N*sizeof(int)), "failed to allocate a"); }).name("allocate_a"); // allocate the host and device storage for b auto allocate_b = taskflow.emplace([&](){ hb.resize(N*K, N+K); TF_CHECK_CUDA(hipMalloc(&db, N*K*sizeof(int)), "failed to allocate b"); }).name("allocate_b"); // allocate the host and device storage for c auto allocate_c = taskflow.emplace([&](){ hc.resize(M*K); TF_CHECK_CUDA(hipMalloc(&dc, M*K*sizeof(int)), "failed to allocate c"); }).name("allocate_c"); // create a cudaFlow to run the matrix multiplication auto cudaFlow = taskflow.emplace([&](tf::cudaFlow& cf){ // copy data to da, db, and dc auto copy_da = cf.copy(da, ha.data(), M*N).name("H2D_a"); auto copy_db = cf.copy(db, hb.data(), N*K).name("H2D_b"); auto copy_hc = cf.copy(hc.data(), dc, M*K).name("D2H_c"); dim3 grid ((K+16-1)/16, (M+16-1)/16); dim3 block (16, 16); auto kmatmul = cf.kernel(grid, block, 0, matmul, da, db, dc, M, N, K) .name("matmul"); kmatmul.succeed(copy_da, copy_db) .precede(copy_hc); }).name("cudaFlow"); auto free = taskflow.emplace([&](){ TF_CHECK_CUDA(hipFree(da), "failed to free da"); TF_CHECK_CUDA(hipFree(db), "failed to free db"); TF_CHECK_CUDA(hipFree(dc), "failed to free dc"); }).name("free"); cudaFlow.succeed(allocate_a, allocate_b, allocate_c) .precede(free); executor.run(taskflow).wait(); // You may uncomment the line below to dump the task graph //taskflow.dump(std::cout); return hc; } // Matrix multiplication using CPU auto cpu(int M, int N, int K) { std::vector<int> a, b, c; tf::Executor executor; tf::Taskflow taskflow; auto ha = taskflow.emplace([&](){ a.resize(M*N, M+N); }).name("allocate_a"); auto hb = taskflow.emplace([&](){ b.resize(N*K, N+K); }).name("allocate_b"); auto hc = taskflow.emplace([&](){ c.resize(M*K, 0); }).name("allocate_c"); auto pf = taskflow.parallel_for(0, M, 1, [&] (int m) { for(int k=0; k<K; k++) { for(int n=0; n<N; n++) { c[m*K+k] += (a[m*N+n]*b[n*K+k]); } } }); pf.succeed(ha, hb, hc); //taskflow.dump(std::cout); executor.run(taskflow).wait(); return c; } // Function: main int main(int argc, char *argv[]) { if(argc != 4) { std::cerr << "usage: matrix-multiplication M N K\n"; std::exit(EXIT_FAILURE); } int M = std::atoi(argv[1]); int N = std::atoi(argv[2]); int K = std::atoi(argv[3]); std::cout << "matrix A: " << M << 'x' << N << '\n' << "matrix B: " << N << 'x' << K << '\n' << "matrix C: " << M << 'x' << K << '\n'; // matrix multiplication using gpu std::cout << "running gpu matrix multiplication ... "; auto gbeg = std::chrono::steady_clock::now(); auto gres = gpu(M, N, K); auto gend = std::chrono::steady_clock::now(); std::cout << "completed with " << std::chrono::duration_cast<std::chrono::milliseconds>(gend-gbeg).count() << " ms\n"; // matrix multiplication using cpu std::cout << "running cpu matrix multiplication ... "; auto cbeg = std::chrono::steady_clock::now(); auto cres = cpu(M, N, K); auto cend = std::chrono::steady_clock::now(); std::cout << "completed with " << std::chrono::duration_cast<std::chrono::milliseconds>(cend-cbeg).count() << " ms\n"; // verify the result int64_t error = 0; std::cout << "verifying results ... "; for(int i=0; i<M*K; ++i) { error += abs(gres[i] - cres[i]); } std::cout << "abs-error=" << error << '\n'; return 0; }
be1a9dfaff2261429514701bf8f427b797950348.cu
// The example shows how to use cudaFlow to create a matrix multiplication // of two 2D matrices. #include <taskflow/taskflow.hpp> // Kernel: matmul __global__ void matmul(int *a, int *b, int *c, int m, int n, int k) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; int sum = 0; if(col < k && row < m) { for(int i = 0; i < n; i++) { sum += a[row * n + i] * b[i * k + col]; } c[row * k + col] = sum; } } // Matrix multiplication using GPU auto gpu(int M, int N, int K) { std::vector<int> ha, hb, hc; int *da, *db, *dc; tf::Taskflow taskflow("MatrixMultiplication"); tf::Executor executor; // allocate the host and device storage for a auto allocate_a = taskflow.emplace([&](){ ha.resize(M*N, M+N); TF_CHECK_CUDA(cudaMalloc(&da, M*N*sizeof(int)), "failed to allocate a"); }).name("allocate_a"); // allocate the host and device storage for b auto allocate_b = taskflow.emplace([&](){ hb.resize(N*K, N+K); TF_CHECK_CUDA(cudaMalloc(&db, N*K*sizeof(int)), "failed to allocate b"); }).name("allocate_b"); // allocate the host and device storage for c auto allocate_c = taskflow.emplace([&](){ hc.resize(M*K); TF_CHECK_CUDA(cudaMalloc(&dc, M*K*sizeof(int)), "failed to allocate c"); }).name("allocate_c"); // create a cudaFlow to run the matrix multiplication auto cudaFlow = taskflow.emplace([&](tf::cudaFlow& cf){ // copy data to da, db, and dc auto copy_da = cf.copy(da, ha.data(), M*N).name("H2D_a"); auto copy_db = cf.copy(db, hb.data(), N*K).name("H2D_b"); auto copy_hc = cf.copy(hc.data(), dc, M*K).name("D2H_c"); dim3 grid ((K+16-1)/16, (M+16-1)/16); dim3 block (16, 16); auto kmatmul = cf.kernel(grid, block, 0, matmul, da, db, dc, M, N, K) .name("matmul"); kmatmul.succeed(copy_da, copy_db) .precede(copy_hc); }).name("cudaFlow"); auto free = taskflow.emplace([&](){ TF_CHECK_CUDA(cudaFree(da), "failed to free da"); TF_CHECK_CUDA(cudaFree(db), "failed to free db"); TF_CHECK_CUDA(cudaFree(dc), "failed to free dc"); }).name("free"); cudaFlow.succeed(allocate_a, allocate_b, allocate_c) .precede(free); executor.run(taskflow).wait(); // You may uncomment the line below to dump the task graph //taskflow.dump(std::cout); return hc; } // Matrix multiplication using CPU auto cpu(int M, int N, int K) { std::vector<int> a, b, c; tf::Executor executor; tf::Taskflow taskflow; auto ha = taskflow.emplace([&](){ a.resize(M*N, M+N); }).name("allocate_a"); auto hb = taskflow.emplace([&](){ b.resize(N*K, N+K); }).name("allocate_b"); auto hc = taskflow.emplace([&](){ c.resize(M*K, 0); }).name("allocate_c"); auto pf = taskflow.parallel_for(0, M, 1, [&] (int m) { for(int k=0; k<K; k++) { for(int n=0; n<N; n++) { c[m*K+k] += (a[m*N+n]*b[n*K+k]); } } }); pf.succeed(ha, hb, hc); //taskflow.dump(std::cout); executor.run(taskflow).wait(); return c; } // Function: main int main(int argc, char *argv[]) { if(argc != 4) { std::cerr << "usage: matrix-multiplication M N K\n"; std::exit(EXIT_FAILURE); } int M = std::atoi(argv[1]); int N = std::atoi(argv[2]); int K = std::atoi(argv[3]); std::cout << "matrix A: " << M << 'x' << N << '\n' << "matrix B: " << N << 'x' << K << '\n' << "matrix C: " << M << 'x' << K << '\n'; // matrix multiplication using gpu std::cout << "running gpu matrix multiplication ... "; auto gbeg = std::chrono::steady_clock::now(); auto gres = gpu(M, N, K); auto gend = std::chrono::steady_clock::now(); std::cout << "completed with " << std::chrono::duration_cast<std::chrono::milliseconds>(gend-gbeg).count() << " ms\n"; // matrix multiplication using cpu std::cout << "running cpu matrix multiplication ... "; auto cbeg = std::chrono::steady_clock::now(); auto cres = cpu(M, N, K); auto cend = std::chrono::steady_clock::now(); std::cout << "completed with " << std::chrono::duration_cast<std::chrono::milliseconds>(cend-cbeg).count() << " ms\n"; // verify the result int64_t error = 0; std::cout << "verifying results ... "; for(int i=0; i<M*K; ++i) { error += abs(gres[i] - cres[i]); } std::cout << "abs-error=" << error << '\n'; return 0; }
faeb41b9eb67e748d1760b0af5cc6f113fb00835.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void one_vector_float(float *vec, const int n) { unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x; if ( xIndex < n ) vec[xIndex]=1.0f; }
faeb41b9eb67e748d1760b0af5cc6f113fb00835.cu
#include "includes.h" __global__ void one_vector_float(float *vec, const int n) { unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x; if ( xIndex < n ) vec[xIndex]=1.0f; }
f5085d9c31539f776742ab65dadc172846c2038a.hip
// !!! This is a file automatically generated by hipify!!! //xfail:NOT_ALL_VERIFIED //--blockDim=2 --gridDim=1 --no-inline #include <hip/hip_runtime.h> __global__ void race_test (unsigned int* i, int* A) { int tid = threadIdx.x; int j = atomicAdd(i,0); A[j] = tid; }
f5085d9c31539f776742ab65dadc172846c2038a.cu
//xfail:NOT_ALL_VERIFIED //--blockDim=2 --gridDim=1 --no-inline #include <cuda.h> __global__ void race_test (unsigned int* i, int* A) { int tid = threadIdx.x; int j = atomicAdd(i,0); A[j] = tid; }
463a40af0f3ff0e09d1b9e6f38567128184451d7.hip
// !!! This is a file automatically generated by hipify!!! // A C++ program for Bellman-Ford's queue-based single source // shortest path algorithm. #include <bits/stdc++.h> #include <chrono> #include <fstream> #include <omp.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <hip/hip_runtime.h> using namespace std; using namespace std::chrono; #define INF 2000000000 const string fin_str = "../matlab/gr_optimal_control_3rd_order.csv"; __global__ void bf(int n, int u, int const* d_weights, int* d_dist, bool* d_has_change, int* came_from) { int v = blockIdx.x * blockDim.x + threadIdx.x; if(v < n) { d_has_change[v] = false; int weight = d_weights[u * n + v]; if (weight < INF) { if (d_dist[v] > d_dist[u] + weight) { d_dist[v] = d_dist[u] + weight; d_has_change[v] = true; came_from[v] = u; } } } } //translate 2-dimension coordinate to 1-dimension int convert_dimension_2D_1D(int x, int y, int n) { return x * n + y; } // The main function that finds shortest distances void BellmanFord(int src, int goal, int n, int h_weights[]) { dim3 threadsPerBlock = 256; dim3 blocksPerGrid = ((n + threadsPerBlock.x - 1) / threadsPerBlock.x); // host int *h_dist = (int *)calloc(sizeof(int), n); int *h_came_from = (int *)calloc(sizeof(int), n); bool *h_has_change = (bool *)calloc(sizeof(bool), n); vector<bool>in_queue(n, false); for (int i=0; i<n; i++) { h_dist[i] = INF; h_came_from[i] = INF; } h_dist[src] = 0; h_came_from[src] = src; in_queue[src] = true; // device int* d_weights; int* d_dist; int* d_came_from; bool* d_has_change; hipMalloc(&d_weights, n * n * sizeof(int)); hipMalloc(&d_dist, n * sizeof(int)); hipMalloc(&d_came_from, n * sizeof(int)); hipMalloc(&d_has_change, n * sizeof(bool)); // copy host to device hipMemcpy(d_weights, h_weights, n * n * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_dist, h_dist, n * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_came_from, h_came_from, n * sizeof(int), hipMemcpyHostToDevice); deque<int> node_queue; node_queue.push_front(src); int counter = 0; // main loop auto start = high_resolution_clock::now(); while(!node_queue.empty()) { int u = node_queue.front(); node_queue.pop_front(); in_queue[u] = false; counter++; // invoke kernel hipLaunchKernelGGL(( bf) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, n, u, d_weights, d_dist, d_has_change, d_came_from); hipMemcpy(h_has_change, d_has_change, n * sizeof(bool), hipMemcpyDeviceToHost); hipMemcpy(h_dist, d_dist, sizeof(int) * n, hipMemcpyDeviceToHost); for (int i = 0; i < n; i++) { if (h_has_change[i]) { if(!in_queue[i]) { if(node_queue.empty() || h_dist[i] <= h_dist[node_queue.front()]) { node_queue.push_front(i); } else { node_queue.push_back(i); } in_queue[i] = true; } } } } auto stop = high_resolution_clock::now(); cout << "counter: " << counter << "\n"; hipMemcpy(h_came_from, d_came_from, n * sizeof(int), hipMemcpyDeviceToHost); hipFree(d_weights); hipFree(d_dist); hipFree(d_came_from); hipFree(d_has_change); // Print shortest distances stored in dist[] ofstream myfile ("slf.txt"); if (myfile.is_open()) { for (int i = 0; i < n; ++i) myfile << i << "\t\t" << h_dist[i] <<"\n"; myfile.close(); } else cout << "Unable to open file"; ofstream myfile_path ("slf_path.txt"); if (myfile_path.is_open()) { vector<int> path; int current = goal; while(current != src) { path.push_back(current); current = h_came_from[current]; } path.push_back(src); reverse(path.begin(), path.end()); for (vector<int>::iterator i = path.begin(); i < path.end(); ++i) { myfile_path << *i << "\t\t"; } myfile_path.close(); int total = 0; for (vector<int>::iterator i = path.begin(); i < path.end()-1;) { int u = *i; int v = *(++i); int weight = h_weights[convert_dimension_2D_1D(u, v, n)]; total += weight; cout << "u: " << u << ", v: " << v << ", weight: " << weight << "\n"; } cout << "total: " << total <<"\n"; } else cout << "Unable to open file"; auto duration = duration_cast<milliseconds>(stop - start); cout << "duration :" << duration.count() << endl; } void create_weights(int weights[], int n) { for (int i = 0; i < n * n; i++) { weights[i] = INF; } fstream fin; fin.open(fin_str, ios::in); vector<int> row; string line, word; getline(fin,line); while (!fin.eof()) { row.clear(); getline(fin, line); stringstream s(line); while (getline(s, word, ',')) { row.push_back(stoi(word)); } weights[convert_dimension_2D_1D(row[0]-1, row[1]-1, n)] = row[2]; weights[convert_dimension_2D_1D(row[1]-1, row[0]-1, n)] = row[2]; } fin.close(); } // Driver program to test above functions int main() { int N = 16456; int* mat = (int *)malloc(N * N * sizeof(int)); create_weights(mat, N); // for (int i=0; i< N*N; i++) // { // cout << mat[i] << " "; // } BellmanFord(0, 2324, N, mat); return 0; }
463a40af0f3ff0e09d1b9e6f38567128184451d7.cu
// A C++ program for Bellman-Ford's queue-based single source // shortest path algorithm. #include <bits/stdc++.h> #include <chrono> #include <fstream> #include <omp.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <cuda_runtime.h> using namespace std; using namespace std::chrono; #define INF 2000000000 const string fin_str = "../matlab/gr_optimal_control_3rd_order.csv"; __global__ void bf(int n, int u, int const* d_weights, int* d_dist, bool* d_has_change, int* came_from) { int v = blockIdx.x * blockDim.x + threadIdx.x; if(v < n) { d_has_change[v] = false; int weight = d_weights[u * n + v]; if (weight < INF) { if (d_dist[v] > d_dist[u] + weight) { d_dist[v] = d_dist[u] + weight; d_has_change[v] = true; came_from[v] = u; } } } } //translate 2-dimension coordinate to 1-dimension int convert_dimension_2D_1D(int x, int y, int n) { return x * n + y; } // The main function that finds shortest distances void BellmanFord(int src, int goal, int n, int h_weights[]) { dim3 threadsPerBlock = 256; dim3 blocksPerGrid = ((n + threadsPerBlock.x - 1) / threadsPerBlock.x); // host int *h_dist = (int *)calloc(sizeof(int), n); int *h_came_from = (int *)calloc(sizeof(int), n); bool *h_has_change = (bool *)calloc(sizeof(bool), n); vector<bool>in_queue(n, false); for (int i=0; i<n; i++) { h_dist[i] = INF; h_came_from[i] = INF; } h_dist[src] = 0; h_came_from[src] = src; in_queue[src] = true; // device int* d_weights; int* d_dist; int* d_came_from; bool* d_has_change; cudaMalloc(&d_weights, n * n * sizeof(int)); cudaMalloc(&d_dist, n * sizeof(int)); cudaMalloc(&d_came_from, n * sizeof(int)); cudaMalloc(&d_has_change, n * sizeof(bool)); // copy host to device cudaMemcpy(d_weights, h_weights, n * n * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_dist, h_dist, n * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_came_from, h_came_from, n * sizeof(int), cudaMemcpyHostToDevice); deque<int> node_queue; node_queue.push_front(src); int counter = 0; // main loop auto start = high_resolution_clock::now(); while(!node_queue.empty()) { int u = node_queue.front(); node_queue.pop_front(); in_queue[u] = false; counter++; // invoke kernel bf <<<blocksPerGrid, threadsPerBlock>>>(n, u, d_weights, d_dist, d_has_change, d_came_from); cudaMemcpy(h_has_change, d_has_change, n * sizeof(bool), cudaMemcpyDeviceToHost); cudaMemcpy(h_dist, d_dist, sizeof(int) * n, cudaMemcpyDeviceToHost); for (int i = 0; i < n; i++) { if (h_has_change[i]) { if(!in_queue[i]) { if(node_queue.empty() || h_dist[i] <= h_dist[node_queue.front()]) { node_queue.push_front(i); } else { node_queue.push_back(i); } in_queue[i] = true; } } } } auto stop = high_resolution_clock::now(); cout << "counter: " << counter << "\n"; cudaMemcpy(h_came_from, d_came_from, n * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(d_weights); cudaFree(d_dist); cudaFree(d_came_from); cudaFree(d_has_change); // Print shortest distances stored in dist[] ofstream myfile ("slf.txt"); if (myfile.is_open()) { for (int i = 0; i < n; ++i) myfile << i << "\t\t" << h_dist[i] <<"\n"; myfile.close(); } else cout << "Unable to open file"; ofstream myfile_path ("slf_path.txt"); if (myfile_path.is_open()) { vector<int> path; int current = goal; while(current != src) { path.push_back(current); current = h_came_from[current]; } path.push_back(src); reverse(path.begin(), path.end()); for (vector<int>::iterator i = path.begin(); i < path.end(); ++i) { myfile_path << *i << "\t\t"; } myfile_path.close(); int total = 0; for (vector<int>::iterator i = path.begin(); i < path.end()-1;) { int u = *i; int v = *(++i); int weight = h_weights[convert_dimension_2D_1D(u, v, n)]; total += weight; cout << "u: " << u << ", v: " << v << ", weight: " << weight << "\n"; } cout << "total: " << total <<"\n"; } else cout << "Unable to open file"; auto duration = duration_cast<milliseconds>(stop - start); cout << "duration :" << duration.count() << endl; } void create_weights(int weights[], int n) { for (int i = 0; i < n * n; i++) { weights[i] = INF; } fstream fin; fin.open(fin_str, ios::in); vector<int> row; string line, word; getline(fin,line); while (!fin.eof()) { row.clear(); getline(fin, line); stringstream s(line); while (getline(s, word, ',')) { row.push_back(stoi(word)); } weights[convert_dimension_2D_1D(row[0]-1, row[1]-1, n)] = row[2]; weights[convert_dimension_2D_1D(row[1]-1, row[0]-1, n)] = row[2]; } fin.close(); } // Driver program to test above functions int main() { int N = 16456; int* mat = (int *)malloc(N * N * sizeof(int)); create_weights(mat, N); // for (int i=0; i< N*N; i++) // { // cout << mat[i] << " "; // } BellmanFord(0, 2324, N, mat); return 0; }
490d5039825632913835c77b021ea828304ae9cb.hip
// !!! This is a file automatically generated by hipify!!! #if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2) // generated by gen_cutlass_matrix_mul_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #pragma GCC diagnostic ignored "-Wuninitialized" #pragma GCC diagnostic ignored "-Wmaybe-uninitialized" #include "src/cuda/matrix_mul/fp32_simt/matrix_mul_float_simt_cutlass_wrapper.cuinl" using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::ColumnMajor; using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using EpilogueOp = cutlass::epilogue::thread::LinearCombination<float, 1, float, float>; using Gemm = cutlass::gemm::device::Gemm< float, LayoutA, float, LayoutB, float, cutlass::layout::RowMajor, float, cutlass::arch::OpClassSimt, cutlass::arch::Sm50, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; template void megdnn::cuda::cutlass_wrapper::cutlass_matrix_mul_wrapper<Gemm>( const typename Gemm::ElementA* d_A, size_t lda, const typename Gemm::ElementB* d_B, size_t ldb, typename Gemm::ElementC* d_C, size_t ldc, int* workspace, cutlass::gemm::GemmCoord const& problem_size, typename Gemm::EpilogueOutputOp::Params const& epilogue, hipStream_t stream, int split_k_slices); #pragma GCC diagnostic pop #endif
490d5039825632913835c77b021ea828304ae9cb.cu
#if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2) // generated by gen_cutlass_matrix_mul_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #pragma GCC diagnostic ignored "-Wuninitialized" #pragma GCC diagnostic ignored "-Wmaybe-uninitialized" #include "src/cuda/matrix_mul/fp32_simt/matrix_mul_float_simt_cutlass_wrapper.cuinl" using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::ColumnMajor; using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using EpilogueOp = cutlass::epilogue::thread::LinearCombination<float, 1, float, float>; using Gemm = cutlass::gemm::device::Gemm< float, LayoutA, float, LayoutB, float, cutlass::layout::RowMajor, float, cutlass::arch::OpClassSimt, cutlass::arch::Sm50, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; template void megdnn::cuda::cutlass_wrapper::cutlass_matrix_mul_wrapper<Gemm>( const typename Gemm::ElementA* d_A, size_t lda, const typename Gemm::ElementB* d_B, size_t ldb, typename Gemm::ElementC* d_C, size_t ldc, int* workspace, cutlass::gemm::GemmCoord const& problem_size, typename Gemm::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream, int split_k_slices); #pragma GCC diagnostic pop #endif
ec60175367a550d04303bd383314552e70c6ad59.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "wb.h" #define wbCheck(stmt) \ do { \ hipError_t err = stmt; \ if (err != hipSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \ return -1; \ } \ } while (0) #define BLOCK_SIDE 16 // Compute C = A * B __global__ void matrixMultiplyShared(float *A, float *B, float *C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { //@@ Insert code to implement matrix multiplication here //@@ You have to use shared memory for this lab __shared__ float privateA[BLOCK_SIDE * BLOCK_SIDE]; __shared__ float privateB[BLOCK_SIDE * BLOCK_SIDE]; float cValue; int tile, i; int tx = threadIdx.x; int ty = threadIdx.y; int rowC = tx + (blockDim.x * blockIdx.x); int colC = ty + (blockDim.y * blockIdx.y); if ((rowC < numCRows) && (colC < numCColumns)) { cValue = 0; for (tile = 0; tile < (1 + ((numAColumns - 1) / BLOCK_SIDE)); ++tile) { if (((tile * BLOCK_SIDE) + tx) < numAColumns) { privateA[(tx * BLOCK_SIDE) + ty] = A[(rowC * numAColumns) + (tile * BLOCK_SIDE) + ty]; } else { privateA[(tx * BLOCK_SIDE) + ty] = 0.0f; } if (((tile * BLOCK_SIDE) + ty) < numBRows) { privateB[(tx * BLOCK_SIDE) + ty] = B[(((tile * BLOCK_SIDE) + tx) * numBColumns) + colC]; } else { privateB[(tx * BLOCK_SIDE) + ty] = 0.0f; } __syncthreads(); for (i = 0; i < BLOCK_SIDE; ++i) { cValue += privateA[(tx * BLOCK_SIDE) + i] * privateB[(i * BLOCK_SIDE) + ty]; } __syncthreads(); } C[(rowC * numCColumns) + colC] = cValue; } } int main(int argc, char **argv) { wbArg_t args; float *hostA; // The A matrix float *hostB; // The B matrix float *hostC; // The output C matrix float *deviceA; float *deviceB; float *deviceC; int numARows; // number of rows in the matrix A int numAColumns; // number of columns in the matrix A int numBRows; // number of rows in the matrix B int numBColumns; // number of columns in the matrix B int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostA = (float *)wbImport(wbArg_getInputFile(args, 0), &numARows, &numAColumns); hostB = (float *)wbImport(wbArg_getInputFile(args, 1), &numBRows, &numBColumns); //@@ Set numCRows and numCColumns numCRows = numARows; numCColumns = numBColumns; //@@ Allocate the hostC matrix hostC = (float *)malloc((size_t)(sizeof(float) * numCRows * numCColumns)); wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns); wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns); wbLog(TRACE, "The dimensions of C are ", numCRows, " x ", numCColumns); wbTime_start(GPU, "Allocating GPU memory."); //@@ Allocate GPU memory here wbCheck(hipMalloc(&deviceA, numARows * numAColumns * sizeof(float))); wbCheck(hipMalloc(&deviceB, numBRows * numBColumns * sizeof(float))); wbCheck(hipMalloc(&deviceC, numCRows * numCColumns * sizeof(float))); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU."); //@@ Copy memory to the GPU here wbCheck(hipMemcpy(deviceA, hostA, numARows * numAColumns * sizeof(float), hipMemcpyHostToDevice)); wbCheck(hipMemcpy(deviceB, hostB, numBRows * numBColumns * sizeof(float), hipMemcpyHostToDevice)); wbTime_stop(GPU, "Copying input memory to the GPU."); //@@ Initialize the grid and block dimensions here dim3 gridDim(1 + ((numCRows - 1) / BLOCK_SIDE), 1 + ((numCColumns - 1) / BLOCK_SIDE), 1); dim3 blockDim(BLOCK_SIDE, BLOCK_SIDE, 1); wbTime_start(Compute, "Performing CUDA computation"); //@@ Launch the GPU Kernel here hipLaunchKernelGGL(( matrixMultiplyShared), dim3(gridDim), dim3(blockDim), 0, 0, deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); hipDeviceSynchronize(); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU"); //@@ Copy the GPU memory back to the CPU here wbCheck(hipMemcpy(hostC, deviceC, numCRows * numCColumns * sizeof(float), hipMemcpyDeviceToHost)); wbTime_stop(Copy, "Copying output memory to the CPU"); wbTime_start(GPU, "Freeing GPU Memory"); //@@ Free the GPU memory here wbCheck(hipFree(deviceA)); wbCheck(hipFree(deviceB)); wbCheck(hipFree(deviceC)); wbTime_stop(GPU, "Freeing GPU Memory"); wbSolution(args, hostC, numCRows, numCColumns); free(hostA); free(hostB); free(hostC); return 0; }
ec60175367a550d04303bd383314552e70c6ad59.cu
#include "wb.h" #define wbCheck(stmt) \ do { \ cudaError_t err = stmt; \ if (err != cudaSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \ return -1; \ } \ } while (0) #define BLOCK_SIDE 16 // Compute C = A * B __global__ void matrixMultiplyShared(float *A, float *B, float *C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { //@@ Insert code to implement matrix multiplication here //@@ You have to use shared memory for this lab __shared__ float privateA[BLOCK_SIDE * BLOCK_SIDE]; __shared__ float privateB[BLOCK_SIDE * BLOCK_SIDE]; float cValue; int tile, i; int tx = threadIdx.x; int ty = threadIdx.y; int rowC = tx + (blockDim.x * blockIdx.x); int colC = ty + (blockDim.y * blockIdx.y); if ((rowC < numCRows) && (colC < numCColumns)) { cValue = 0; for (tile = 0; tile < (1 + ((numAColumns - 1) / BLOCK_SIDE)); ++tile) { if (((tile * BLOCK_SIDE) + tx) < numAColumns) { privateA[(tx * BLOCK_SIDE) + ty] = A[(rowC * numAColumns) + (tile * BLOCK_SIDE) + ty]; } else { privateA[(tx * BLOCK_SIDE) + ty] = 0.0f; } if (((tile * BLOCK_SIDE) + ty) < numBRows) { privateB[(tx * BLOCK_SIDE) + ty] = B[(((tile * BLOCK_SIDE) + tx) * numBColumns) + colC]; } else { privateB[(tx * BLOCK_SIDE) + ty] = 0.0f; } __syncthreads(); for (i = 0; i < BLOCK_SIDE; ++i) { cValue += privateA[(tx * BLOCK_SIDE) + i] * privateB[(i * BLOCK_SIDE) + ty]; } __syncthreads(); } C[(rowC * numCColumns) + colC] = cValue; } } int main(int argc, char **argv) { wbArg_t args; float *hostA; // The A matrix float *hostB; // The B matrix float *hostC; // The output C matrix float *deviceA; float *deviceB; float *deviceC; int numARows; // number of rows in the matrix A int numAColumns; // number of columns in the matrix A int numBRows; // number of rows in the matrix B int numBColumns; // number of columns in the matrix B int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostA = (float *)wbImport(wbArg_getInputFile(args, 0), &numARows, &numAColumns); hostB = (float *)wbImport(wbArg_getInputFile(args, 1), &numBRows, &numBColumns); //@@ Set numCRows and numCColumns numCRows = numARows; numCColumns = numBColumns; //@@ Allocate the hostC matrix hostC = (float *)malloc((size_t)(sizeof(float) * numCRows * numCColumns)); wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns); wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns); wbLog(TRACE, "The dimensions of C are ", numCRows, " x ", numCColumns); wbTime_start(GPU, "Allocating GPU memory."); //@@ Allocate GPU memory here wbCheck(cudaMalloc(&deviceA, numARows * numAColumns * sizeof(float))); wbCheck(cudaMalloc(&deviceB, numBRows * numBColumns * sizeof(float))); wbCheck(cudaMalloc(&deviceC, numCRows * numCColumns * sizeof(float))); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU."); //@@ Copy memory to the GPU here wbCheck(cudaMemcpy(deviceA, hostA, numARows * numAColumns * sizeof(float), cudaMemcpyHostToDevice)); wbCheck(cudaMemcpy(deviceB, hostB, numBRows * numBColumns * sizeof(float), cudaMemcpyHostToDevice)); wbTime_stop(GPU, "Copying input memory to the GPU."); //@@ Initialize the grid and block dimensions here dim3 gridDim(1 + ((numCRows - 1) / BLOCK_SIDE), 1 + ((numCColumns - 1) / BLOCK_SIDE), 1); dim3 blockDim(BLOCK_SIDE, BLOCK_SIDE, 1); wbTime_start(Compute, "Performing CUDA computation"); //@@ Launch the GPU Kernel here matrixMultiplyShared<<<gridDim, blockDim>>>(deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); cudaDeviceSynchronize(); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU"); //@@ Copy the GPU memory back to the CPU here wbCheck(cudaMemcpy(hostC, deviceC, numCRows * numCColumns * sizeof(float), cudaMemcpyDeviceToHost)); wbTime_stop(Copy, "Copying output memory to the CPU"); wbTime_start(GPU, "Freeing GPU Memory"); //@@ Free the GPU memory here wbCheck(cudaFree(deviceA)); wbCheck(cudaFree(deviceB)); wbCheck(cudaFree(deviceC)); wbTime_stop(GPU, "Freeing GPU Memory"); wbSolution(args, hostC, numCRows, numCColumns); free(hostA); free(hostB); free(hostC); return 0; }
fbe4fbc0b81c12231ac52b8272e338e63d7564bb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" int BlockDim() { hipDeviceProp_t prop; hipGetDeviceProperties(&prop,0); return prop.maxThreadsPerBlock; } int GridDim() { hipDeviceProp_t prop; hipGetDeviceProperties(&prop,0); return prop.maxGridSize[0]; } int major() { hipDeviceProp_t prop; hipGetDeviceProperties(&prop,0); return prop.major; } int minor() { hipDeviceProp_t prop; hipGetDeviceProperties(&prop,0); return prop.minor; }
fbe4fbc0b81c12231ac52b8272e338e63d7564bb.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" int BlockDim() { cudaDeviceProp prop; cudaGetDeviceProperties(&prop,0); return prop.maxThreadsPerBlock; } int GridDim() { cudaDeviceProp prop; cudaGetDeviceProperties(&prop,0); return prop.maxGridSize[0]; } int major() { cudaDeviceProp prop; cudaGetDeviceProperties(&prop,0); return prop.major; } int minor() { cudaDeviceProp prop; cudaGetDeviceProperties(&prop,0); return prop.minor; }
b3725c3a7f5613d4b6802d1fd57d35138d96a0f2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #define WIDTH 16 #define TILE_WIDTH 8 __device__ int minimo_multiplo(int num) { int i = 2; while (num % i != 0) { i++; } return i; } __global__ void Stencil(int *a, int *c) { //Se suman todos los numeros adyacentes al de la casilla central. Los adyacentes se ponen a 0 int y = blockIdx.y * TILE_WIDTH + threadIdx.y; int x = blockIdx.x * TILE_WIDTH + threadIdx.x; if (a[y*WIDTH + x] == 9) { int Pvalue = a[y*WIDTH + x]; c[(y - 1)*WIDTH + x] = 0; Pvalue += a[(y - 1)*WIDTH + x]; c[(y + 1)*WIDTH + x] = 0; Pvalue += a[(y + 1)*WIDTH + x]; c[y*WIDTH +(x - 1)] = 0; Pvalue += a[y*WIDTH + (x - 1)]; c[y*WIDTH + (x + 1)] = 0; Pvalue += a[y*WIDTH + (x + 1)]; c[y*WIDTH + x] = Pvalue; } else { c[y*WIDTH + x] = a[y*WIDTH + x]; } } __global__ void Scatter(int *a, int *c) { //Si el numero es 11, suma 11 a todos los numeros en su columna int y = blockIdx.y * TILE_WIDTH + threadIdx.y; int x = blockIdx.x * TILE_WIDTH + threadIdx.x; if (a[y*WIDTH + x] == 11) { for (int i = 0; (y - i) >= 0 ; i++) { c[(y - i)*WIDTH + x ] = a[(y - i)*WIDTH + x] + 11; } for (int i = 0; (y + i) < WIDTH; i++) { c[(y + i)*WIDTH + x ] = a[(y + i)*WIDTH + x ] + 11; } c[y*WIDTH + x] = 11; } else { c[y*WIDTH + x] = a[y*WIDTH + x]; } } __global__ void Gather(int *a, int *c) { //Si hay una fila con 3 o mas numeros con el minimo comun multiplo, la posicion que mas en el medio esta acumula la suma de los //numeros y los demas asumen valor 0 int y = blockIdx.y * TILE_WIDTH + threadIdx.y; int x = blockIdx.x * TILE_WIDTH + threadIdx.x; int comun = minimo_multiplo(a[y*WIDTH + x]); int izq = 0, der = 0; while (comun == minimo_multiplo(a[y*WIDTH + x + der]) ) { der++; } while (x - izq>=0 && comun == minimo_multiplo(a[y*WIDTH + x - izq]) ) { izq++; } izq--; der--; if ( (izq - der) ==1 && izq + der>1 ) { int Pvalue = 0; for (int i = x - izq; i <= x + der; i++) { Pvalue += a[y*WIDTH + i]; c[y*WIDTH + i] = 0; } c[y*WIDTH + x] = Pvalue; } else { c[y*WIDTH + x] = a[y*WIDTH + x]; } } /*__global__ void Gatherf(int *a, int *c) { //Si hay una fila con 3 numeros con un minimo comun multiplo, la posicion que mas en el medio esta acumula la suma de los //numeros y los demas asumen valor 0 int y = blockIdx.y * TILE_WIDTH + threadIdx.y; int x = blockIdx.x * TILE_WIDTH + threadIdx.x; int comun = minimo_multiplo(a[y*WIDTH + x]); int izq = 0, der = 0; while ( x + der < WIDTH && comun == minimo_multiplo(a[y*WIDTH + x + der])) { der++; } while (x - izq >= 0 && comun == minimo_multiplo(a[y*WIDTH + x - izq])) { izq++; } izq--; der--; if ( izq + der > 2) { if (( (izq - der) == 1 || (izq - der) == 0)) { int Pvalue = 0; for (int i = 1; i <= der; i++) { Pvalue += a[y*WIDTH + x+der]; } for (int i = 1; i <= izq; i++) { Pvalue += a[y*WIDTH + x -izq]; } c[y*WIDTH + x] = Pvalue + a[y*WIDTH + x]; } else { c[y*WIDTH + x] = 0; } } else { c[y*WIDTH + x] += a[y*WIDTH + x]; } } __global__ void Gatherc(int *a, int *c) { //Si hay una columna con 3 o mas numeros con el minimo comun multiplo, la posicion que mas en el medio esta acumula la suma de los //numeros y los demas asumen valor 0 int Pvalue = 0; int y = blockIdx.y * TILE_WIDTH + threadIdx.y; int x = blockIdx.x * TILE_WIDTH + threadIdx.x; int comun = minimo_multiplo(a[y*WIDTH + x]); int abj = 0, ari = 0; while (y + ari < WIDTH && comun == minimo_multiplo(a[(y+ ari+1 )*WIDTH + x])) { ari++; } while (y - abj >= 0 && comun == minimo_multiplo(a[(y- abj-1 )*WIDTH + x])) { abj++; } if (abj + ari > 2) { if (((abj - ari) == 1 || (abj - ari) == 0)) { c[y*WIDTH + x] = c[y*WIDTH + x] + a[y*WIDTH + x]; } else { int diff = ari - abj; if ((ari + abj) % 2 != 0 && diff % 2 != 0) { diff += 1; } diff = diff / 2; c[y*WIDTH + x] = 0; c[(y + diff)*WIDTH + x] = c[(y + diff)*WIDTH + x] + a[y*WIDTH + x]; } } else { c[y*WIDTH + x] = a[y*WIDTH + x]; } }*/ int main() { int a[WIDTH][WIDTH] = { 0 }; int c[WIDTH][WIDTH] = { 0 }; int* d_c; int* d_b; int* d_a; int* d_d; dim3 DimGrid(WIDTH / TILE_WIDTH, WIDTH / TILE_WIDTH); dim3 DimBlock(TILE_WIDTH, TILE_WIDTH); size_t size = WIDTH * WIDTH * sizeof(int); hipMalloc(&d_a, size); hipMalloc(&d_c, size); /*--------------------------------------------------------- ----------------------------------------------------------- -----------------------------------------------------------*/ //Rellenamos las matrices a y b de numeros aleatorios //1 + rand() % (99) for (int i = 0; i < WIDTH; i++) { for (int j = 0; j < WIDTH; j++) { a[i][j] = 1 + rand() % (99); } } //mostramos el resultado por pantalla printf("La matriz inicial es: \n"); for (int i = 0; i < WIDTH; i++) { for (int j = 0; j < WIDTH; j++) { printf("%d \t", a[i][j]); } printf("\n"); } printf("\n"); /*--------------------------------------------------------- ----------------------------------------------------------- -----------------------------------------------------------*/ hipMemcpy(d_a, a, size, hipMemcpyHostToDevice); hipMemcpy(d_c, c, size, hipMemcpyHostToDevice); // Setup the execution configuration+ //Declaramos que habra un solo grid Stencil << < DimGrid, DimBlock >> > (d_a, d_c); hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost); hipFree(d_a); hipFree(d_c); /*--------------------------------------------------------- ----------------------------------------------------------- -----------------------------------------------------------*/ printf("El resultado de stencil es: \n"); for (int i = 0; i < WIDTH; i++) { for (int j = 0; j < WIDTH; j++) { printf("%d \t", c[i][j]); } printf("\n"); } printf("\n"); /*--------------------------------------------------------- ----------------------------------------------------------- -----------------------------------------------------------*/ int b[WIDTH][WIDTH] = { 0 }; hipMalloc(&d_a, size); hipMalloc(&d_b, size); hipMemcpy(d_b, b, size, hipMemcpyHostToDevice); hipMemcpy(d_a, a, size, hipMemcpyHostToDevice); hipMemcpy(d_b, b, size, hipMemcpyHostToDevice); Scatter << < DimGrid, DimBlock >> > (d_a, d_b); hipMemcpy(b, d_b, size, hipMemcpyDeviceToHost); /*--------------------------------------------------------- ----------------------------------------------------------- -----------------------------------------------------------*/ printf("El resultado de scatter es: \n"); for (int i = 0; i < WIDTH; i++) { for (int j = 0; j < WIDTH; j++) { printf("%d \t", b[i][j]); } printf("\n"); } printf("\n"); /*--------------------------------------------------------- ----------------------------------------------------------- -----------------------------------------------------------*/ hipFree(d_a); hipFree(d_b); /*--------------------------------------------------------------*/ int d[WIDTH][WIDTH] = { 0 }; hipMalloc(&d_a, size); hipMalloc(&d_d, size); hipMemcpy(d_a, a, size, hipMemcpyHostToDevice); hipMemcpy(d_d, d, size, hipMemcpyHostToDevice); Gather << < DimGrid, DimBlock >> > (d_a, d_b); hipMemcpy(d, d_d, size, hipMemcpyDeviceToHost); /*--------------------------------------------------------- ----------------------------------------------------------- -----------------------------------------------------------*/ printf("El resultado de Gatherc es: \n"); for (int i = 0; i < WIDTH; i++) { for (int j = 0; j < WIDTH; j++) { printf("%d \t", d[i][j]); } printf("\n"); } printf("\n"); /*--------------------------------------------------------- ----------------------------------------------------------- -----------------------------------------------------------*/ hipFree(d_a); hipFree(d_d); }
b3725c3a7f5613d4b6802d1fd57d35138d96a0f2.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <cuda.h> #define WIDTH 16 #define TILE_WIDTH 8 __device__ int minimo_multiplo(int num) { int i = 2; while (num % i != 0) { i++; } return i; } __global__ void Stencil(int *a, int *c) { //Se suman todos los numeros adyacentes al de la casilla central. Los adyacentes se ponen a 0 int y = blockIdx.y * TILE_WIDTH + threadIdx.y; int x = blockIdx.x * TILE_WIDTH + threadIdx.x; if (a[y*WIDTH + x] == 9) { int Pvalue = a[y*WIDTH + x]; c[(y - 1)*WIDTH + x] = 0; Pvalue += a[(y - 1)*WIDTH + x]; c[(y + 1)*WIDTH + x] = 0; Pvalue += a[(y + 1)*WIDTH + x]; c[y*WIDTH +(x - 1)] = 0; Pvalue += a[y*WIDTH + (x - 1)]; c[y*WIDTH + (x + 1)] = 0; Pvalue += a[y*WIDTH + (x + 1)]; c[y*WIDTH + x] = Pvalue; } else { c[y*WIDTH + x] = a[y*WIDTH + x]; } } __global__ void Scatter(int *a, int *c) { //Si el numero es 11, suma 11 a todos los numeros en su columna int y = blockIdx.y * TILE_WIDTH + threadIdx.y; int x = blockIdx.x * TILE_WIDTH + threadIdx.x; if (a[y*WIDTH + x] == 11) { for (int i = 0; (y - i) >= 0 ; i++) { c[(y - i)*WIDTH + x ] = a[(y - i)*WIDTH + x] + 11; } for (int i = 0; (y + i) < WIDTH; i++) { c[(y + i)*WIDTH + x ] = a[(y + i)*WIDTH + x ] + 11; } c[y*WIDTH + x] = 11; } else { c[y*WIDTH + x] = a[y*WIDTH + x]; } } __global__ void Gather(int *a, int *c) { //Si hay una fila con 3 o mas numeros con el minimo comun multiplo, la posicion que mas en el medio esta acumula la suma de los //numeros y los demas asumen valor 0 int y = blockIdx.y * TILE_WIDTH + threadIdx.y; int x = blockIdx.x * TILE_WIDTH + threadIdx.x; int comun = minimo_multiplo(a[y*WIDTH + x]); int izq = 0, der = 0; while (comun == minimo_multiplo(a[y*WIDTH + x + der]) ) { der++; } while (x - izq>=0 && comun == minimo_multiplo(a[y*WIDTH + x - izq]) ) { izq++; } izq--; der--; if ( (izq - der) ==1 && izq + der>1 ) { int Pvalue = 0; for (int i = x - izq; i <= x + der; i++) { Pvalue += a[y*WIDTH + i]; c[y*WIDTH + i] = 0; } c[y*WIDTH + x] = Pvalue; } else { c[y*WIDTH + x] = a[y*WIDTH + x]; } } /*__global__ void Gatherf(int *a, int *c) { //Si hay una fila con 3 numeros con un minimo comun multiplo, la posicion que mas en el medio esta acumula la suma de los //numeros y los demas asumen valor 0 int y = blockIdx.y * TILE_WIDTH + threadIdx.y; int x = blockIdx.x * TILE_WIDTH + threadIdx.x; int comun = minimo_multiplo(a[y*WIDTH + x]); int izq = 0, der = 0; while ( x + der < WIDTH && comun == minimo_multiplo(a[y*WIDTH + x + der])) { der++; } while (x - izq >= 0 && comun == minimo_multiplo(a[y*WIDTH + x - izq])) { izq++; } izq--; der--; if ( izq + der > 2) { if (( (izq - der) == 1 || (izq - der) == 0)) { int Pvalue = 0; for (int i = 1; i <= der; i++) { Pvalue += a[y*WIDTH + x+der]; } for (int i = 1; i <= izq; i++) { Pvalue += a[y*WIDTH + x -izq]; } c[y*WIDTH + x] = Pvalue + a[y*WIDTH + x]; } else { c[y*WIDTH + x] = 0; } } else { c[y*WIDTH + x] += a[y*WIDTH + x]; } } __global__ void Gatherc(int *a, int *c) { //Si hay una columna con 3 o mas numeros con el minimo comun multiplo, la posicion que mas en el medio esta acumula la suma de los //numeros y los demas asumen valor 0 int Pvalue = 0; int y = blockIdx.y * TILE_WIDTH + threadIdx.y; int x = blockIdx.x * TILE_WIDTH + threadIdx.x; int comun = minimo_multiplo(a[y*WIDTH + x]); int abj = 0, ari = 0; while (y + ari < WIDTH && comun == minimo_multiplo(a[(y+ ari+1 )*WIDTH + x])) { ari++; } while (y - abj >= 0 && comun == minimo_multiplo(a[(y- abj-1 )*WIDTH + x])) { abj++; } if (abj + ari > 2) { if (((abj - ari) == 1 || (abj - ari) == 0)) { c[y*WIDTH + x] = c[y*WIDTH + x] + a[y*WIDTH + x]; } else { int diff = ari - abj; if ((ari + abj) % 2 != 0 && diff % 2 != 0) { diff += 1; } diff = diff / 2; c[y*WIDTH + x] = 0; c[(y + diff)*WIDTH + x] = c[(y + diff)*WIDTH + x] + a[y*WIDTH + x]; } } else { c[y*WIDTH + x] = a[y*WIDTH + x]; } }*/ int main() { int a[WIDTH][WIDTH] = { 0 }; int c[WIDTH][WIDTH] = { 0 }; int* d_c; int* d_b; int* d_a; int* d_d; dim3 DimGrid(WIDTH / TILE_WIDTH, WIDTH / TILE_WIDTH); dim3 DimBlock(TILE_WIDTH, TILE_WIDTH); size_t size = WIDTH * WIDTH * sizeof(int); cudaMalloc(&d_a, size); cudaMalloc(&d_c, size); /*--------------------------------------------------------- ----------------------------------------------------------- -----------------------------------------------------------*/ //Rellenamos las matrices a y b de numeros aleatorios //1 + rand() % (99) for (int i = 0; i < WIDTH; i++) { for (int j = 0; j < WIDTH; j++) { a[i][j] = 1 + rand() % (99); } } //mostramos el resultado por pantalla printf("La matriz inicial es: \n"); for (int i = 0; i < WIDTH; i++) { for (int j = 0; j < WIDTH; j++) { printf("%d \t", a[i][j]); } printf("\n"); } printf("\n"); /*--------------------------------------------------------- ----------------------------------------------------------- -----------------------------------------------------------*/ cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_c, c, size, cudaMemcpyHostToDevice); // Setup the execution configuration+ //Declaramos que habra un solo grid Stencil << < DimGrid, DimBlock >> > (d_a, d_c); cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost); cudaFree(d_a); cudaFree(d_c); /*--------------------------------------------------------- ----------------------------------------------------------- -----------------------------------------------------------*/ printf("El resultado de stencil es: \n"); for (int i = 0; i < WIDTH; i++) { for (int j = 0; j < WIDTH; j++) { printf("%d \t", c[i][j]); } printf("\n"); } printf("\n"); /*--------------------------------------------------------- ----------------------------------------------------------- -----------------------------------------------------------*/ int b[WIDTH][WIDTH] = { 0 }; cudaMalloc(&d_a, size); cudaMalloc(&d_b, size); cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); Scatter << < DimGrid, DimBlock >> > (d_a, d_b); cudaMemcpy(b, d_b, size, cudaMemcpyDeviceToHost); /*--------------------------------------------------------- ----------------------------------------------------------- -----------------------------------------------------------*/ printf("El resultado de scatter es: \n"); for (int i = 0; i < WIDTH; i++) { for (int j = 0; j < WIDTH; j++) { printf("%d \t", b[i][j]); } printf("\n"); } printf("\n"); /*--------------------------------------------------------- ----------------------------------------------------------- -----------------------------------------------------------*/ cudaFree(d_a); cudaFree(d_b); /*--------------------------------------------------------------*/ int d[WIDTH][WIDTH] = { 0 }; cudaMalloc(&d_a, size); cudaMalloc(&d_d, size); cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_d, d, size, cudaMemcpyHostToDevice); Gather << < DimGrid, DimBlock >> > (d_a, d_b); cudaMemcpy(d, d_d, size, cudaMemcpyDeviceToHost); /*--------------------------------------------------------- ----------------------------------------------------------- -----------------------------------------------------------*/ printf("El resultado de Gatherc es: \n"); for (int i = 0; i < WIDTH; i++) { for (int j = 0; j < WIDTH; j++) { printf("%d \t", d[i][j]); } printf("\n"); } printf("\n"); /*--------------------------------------------------------- ----------------------------------------------------------- -----------------------------------------------------------*/ cudaFree(d_a); cudaFree(d_d); }
c022189b0639a964ff8dd8cfc3604e5c6b07c7f1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void mykernel(void) { }
c022189b0639a964ff8dd8cfc3604e5c6b07c7f1.cu
#include "includes.h" __global__ void mykernel(void) { }
fb71943fa2ea75a9868b21a0ec18af33ddfb6c52.hip
// !!! This is a file automatically generated by hipify!!! #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensor.cu" #include "hip/hip_runtime.h" #else #ifdef CUDA_TEXTURE hipTextureObject_t THCTensor_(getTextureObject)(THCState *state, THCTensor *self) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self)); hipTextureObject_t texObj; struct hipResourceDesc resDesc; memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = hipResourceTypeLinear; resDesc.res.linear.devPtr = THCTensor_(data)(state, self); resDesc.res.linear.sizeInBytes = THCTensor_(nElement)(state, self) * 4; resDesc.res.linear.desc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat); struct hipTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); hipCreateTextureObject(&texObj, &resDesc, &texDesc, NULL); hipError_t errcode = hipGetLastError(); if(errcode != hipSuccess) { if (THCTensor_(nElement)(state, self) > 2>>27) THError("Failed to create texture object, " "nElement:%ld exceeds 27-bit addressing required for tex1Dfetch. Cuda Error: %s", THCTensor_(nElement)(state, self), hipGetErrorString(errcode)); else THError("Failed to create texture object: %s", hipGetErrorString(errcode)); } return texObj; } #endif THC_API int THCTensor_(getDevice)(THCState* state, const THCTensor* tensor) { if (!tensor->storage) return -1; return THCStorage_(getDevice)(state, tensor->storage); } #endif
fb71943fa2ea75a9868b21a0ec18af33ddfb6c52.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensor.cu" #include "hip/hip_runtime.h" #else #ifdef CUDA_TEXTURE cudaTextureObject_t THCTensor_(getTextureObject)(THCState *state, THCTensor *self) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self)); cudaTextureObject_t texObj; struct cudaResourceDesc resDesc; memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = cudaResourceTypeLinear; resDesc.res.linear.devPtr = THCTensor_(data)(state, self); resDesc.res.linear.sizeInBytes = THCTensor_(nElement)(state, self) * 4; resDesc.res.linear.desc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat); struct cudaTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); cudaCreateTextureObject(&texObj, &resDesc, &texDesc, NULL); hipError_t errcode = hipGetLastError(); if(errcode != hipSuccess) { if (THCTensor_(nElement)(state, self) > 2>>27) THError("Failed to create texture object, " "nElement:%ld exceeds 27-bit addressing required for tex1Dfetch. Cuda Error: %s", THCTensor_(nElement)(state, self), hipGetErrorString(errcode)); else THError("Failed to create texture object: %s", hipGetErrorString(errcode)); } return texObj; } #endif THC_API int THCTensor_(getDevice)(THCState* state, const THCTensor* tensor) { if (!tensor->storage) return -1; return THCStorage_(getDevice)(state, tensor->storage); } #endif
5d5345fed58acad0993e17465df23097fa56d5b7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //forward_kernel_PSV.cpp /* * Created by: Min Basnet * 2020.April.16 * Kathmandu, Nepal */ #include <iostream> #include <fstream> #include <cmath> #include <vector> #include "globvar.cuh" #include "util.hip" #include "fd_cpml.cuh" #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char* file, int line, bool abort = true) { if (code != hipSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } __global__ void kernel_I(int tf, int fwi_dt, int dt, int nzt, int fwi_z1, int fwi_z2, int fwi_x1, int fwi_x2, int fwi_dz, int fwi_dx, int nft, int nfz, int nfx, real_sim* fwi_sxx, real_sim* fwi_szx, real_sim* fwi_szz, real_sim* fwi_vx, real_sim* fwi_vz, real_sim* sxx, real_sim* szx, real_sim* szz, real_sim* vx, real_sim* vz, real_sim* mu, real_sim* lam, real_sim* grad_lam, real_sim* grad_mu, real_sim* grad_rho) { int iz = blockIdx.x * blockDim.x + threadIdx.x; int ix = blockIdx.y * blockDim.y + threadIdx.y; real_sim s1, s2, s3, s4; if (iz >= fwi_z1 && iz < fwi_z2 && iz % fwi_dz == fwi_z1 % fwi_dz) { // storing only a portion and with grid inteval int zf = (iz - fwi_z1) / fwi_dz; // z index for fwi gradient storage if (ix >= fwi_x1 && ix < fwi_x2 && ix % fwi_dx == fwi_x1 % fwi_dx) { int xf = (ix - fwi_x1) / fwi_dx; // x index for fwi gradient storage int offset = tf * nft * nfz + zf * nfz + xf; s1 = (fwi_sxx[offset] + fwi_szz[offset]) * (sxx[iz * nzt + ix] + szz[iz * nzt + ix]) * 0.25 / ((lam[iz * nzt + ix] + mu[iz * nzt + ix]) * (lam[iz * nzt + ix] + mu[iz * nzt + ix])); s2 = (fwi_sxx[offset] - fwi_szz[offset]) * (sxx[iz * nzt + ix] - szz[iz * nzt + ix]) / (mu[iz * nzt + ix] * mu[iz * nzt + ix]); s3 = (fwi_szx[offset] * szx[iz * nzt + ix]) / (mu[iz * nzt + ix] * mu[iz * nzt + ix]); // The time derivatives of the velocity may have to be computed differently s4 = vx[iz * nzt + ix] * fwi_vx[offset] + vz[iz * nzt + ix] * fwi_vz[offset]; grad_lam[zf * nfz + xf] += fwi_dt * dt * s1; grad_mu[zf * nfz + xf] += fwi_dt * dt * (s3 + s1 + s2); grad_rho[zf * nfz + xf] += fwi_dt * dt * s4; } } else { return; } } __global__ void kernel_II(int ishot, int nt, int nzt, int nxt, int fpad, int ppad, real_sim dt, real_sim dx, real_sim dz, int fdorder, real_sim* vx, real_sim* vz, real_sim* sxx, real_sim* szx, real_sim* szz, real_sim* lam, real_sim* mu, real_sim* mu_zx, real_sim* rho_zp, real_sim* rho_xp, int npml, real_sim* a, real_sim* b, real_sim* K, real_sim* a_half, real_sim* b_half, real_sim* K_half, real_sim* mem_vx_x, real_sim* mem_vx_z, real_sim* mem_vz_x, real_sim* mem_vz_z, real_sim* mem_sxx_x, real_sim* mem_szx_x, real_sim* mem_szz_z, real_sim* mem_szx_z, bool fsurf) { //********************************************************************************** real_sim sxx_x, szx_x, szx_z, szz_z; // spatial stress derivatives real_sim vx_x, vx_z, vz_x, vz_z; // spatial velocity derivatives int nz1, nz2, nx1, nx2; // The computational grid boundaries int px, pz; // index for PML arrys int isnap; // to take snapshots for data storage int tf, zf, xf; // Index parameters for fwi data storage real_sim hc[2] = { 1.0, 1.0 }; // Initial calculation of indices //--------------------------------------------- nz1 = fpad; nz2 = nzt - fpad; nx1 = fpad; nx2 = nxt - fpad; // index variables // index to map PML at the right or positive end int pnx, pnz; pnx = nxt - 2 * ppad + fpad - 1; // nx + ppad + npml + 1 and nx = nxt - 2*ppad if (fsurf) { pnz = nzt - 2 * ppad - 1; // nz + ppad + npml + 1 and nz = nzt - ppad - fpad } else { pnz = nzt - 2 * ppad + fpad - 1; // nz + ppad + npml + 1 and nz = nzt - 2*ppad } real_sim dxi = 1.0 / dx; real_sim dzi = 1.0 / dz; // inverse of dx and dz //********************************************************************************************************** //int k = blockIdx.x * blockDim.x + threadIdx.x; //int ix = k % nx2; //int iz = k % nz2; int iz = blockIdx.x * blockDim.x + threadIdx.x; int ix = blockIdx.y * blockDim.y + threadIdx.y; //if (threadIdx.x == 0 && blockIdx.x == 0) // printf("Debug Params : . .."); ////*********************************************** //switch (fdorder) { //case(2): { // // Calculate spatial velocity derivatives switch (fdorder) { case(2): if (ix < nx2 && ix >= nx1 && iz >= nz1 && iz < nz2) { // Calculate spatial velocity derivatives vx_x = dxi * hc[1] * (vx[iz * nzt + ix] - vx[iz * nzt + (ix - 1)]); vz_x = dxi * hc[1] * (vz[iz * nzt + (ix + 1)] - vz[iz * nzt + ix]); vx_z = dzi * hc[1] * (vx[(iz + 1) * nzt + ix] - vx[iz * nzt + ix]); vz_z = dzi * hc[1] * (vz[iz * nzt + ix] - vz[(iz - 1) * nzt + ix]); // --------------------------------------------------- // CPML layers for stress tensor kernel // --------------------------------------------------- if (npml > 0) { if (ix >= fpad && ix <= ppad) { // left CPML // Mapping the static CPML and memory variables to px = ix - fpad; // the memory array index //std::cout << std::endl << "Fault1 " << ix << std::endl; mem_vx_x[iz * nzt + px] = b[px] * mem_vx_x[iz * nzt + px] + a[px] * vx_x; mem_vz_x[iz * nzt + px] = b_half[px] * mem_vz_x[iz * nzt + px] + a_half[px] * vz_x; vx_x = vx_x / K[px] + mem_vx_x[iz * nzt + px]; vz_x = vz_x / K_half[px] + mem_vz_x[iz * nzt + px]; } // cpml left if (ix >= (nxt - ppad - 1) && ix < nxt - fpad) { // right CPML // Mapping the static CPML and memory variables to px = ix - pnx; // The PML factors index mem_vx_x[iz * nzt + px] = b[px] * mem_vx_x[iz * nzt + px] + a[px] * vx_x; mem_vz_x[iz * nzt + px] = b_half[px] * mem_vz_x[iz * nzt + px] + a_half[px] * vz_x; vx_x = vx_x / K[px] + mem_vx_x[iz * nzt + px]; vz_x = vz_x / K_half[px] + mem_vz_x[iz * nzt + px]; } // cpml right if (iz >= fpad && iz <= ppad && !fsurf) { // top CPML // Mapping the static CPML and memory variables to pz = iz - fpad; // the memory array index mem_vz_z[pz * 2 * (npml + 1) + ix] = b[pz] * mem_vz_z[pz * 2 * (npml + 1) + ix] + a[pz] * vz_z; mem_vx_z[pz * 2 * (npml + 1) + ix] = b_half[pz] * mem_vx_z[pz * 2 * (npml + 1) + ix] + a_half[pz] * vx_z; vz_z = vz_z / K[pz] + mem_vz_z[pz * 2 * (npml + 1) + ix]; vx_z = vx_z / K_half[pz] + mem_vx_z[pz * 2 * (npml + 1) + ix]; //std::cout << pz<< ", "; } // cpml top if (iz >= (nzt - ppad - 1) && iz < nzt - fpad) { // bottom CPML // Mapping the static CPML and memory variables to pz = iz - pnz; // The PML factors index mem_vz_z[pz * 2 * (npml + 1) + ix] = b[pz] * mem_vz_z[pz * 2 * (npml + 1) + ix] + a[pz] * vz_z; mem_vx_z[pz * 2 * (npml + 1) + ix] = b_half[pz] * mem_vx_z[pz * 2 * (npml + 1) + ix] + a_half[pz] * vx_z; vz_z = vz_z / K[pz] + mem_vz_z[pz * 2 * (npml + 1) + ix]; vx_z = vx_z / K_half[pz] + mem_vx_z[pz * 2 * (npml + 1) + ix]; //std::cout << pz<< ", "; } // cpml bottom } // npml>0 __syncthreads(); //// -------------------------------------------------------------------------- //// -------------------------------------------------------------------------- ////// updating stresses szx[iz * nzt + ix] += dt * mu_zx[iz * (nzt - 1) + ix] * (vz_x + vx_z); sxx[iz * nzt + ix] += dt * (lam[iz * nzt + ix] * (vx_x + vz_z) + (2.0f * mu[iz * nzt + ix] * vx_x)); szz[iz * nzt + ix] += dt * (lam[iz * nzt + ix] * (vx_x + vz_z) + (2.0f * mu[iz * nzt + ix] * vz_z)); // ----------------------------------------- // ----------------------------------------- // Override stress for free surface implementation if (fsurf && iz == fpad) { // Free surface at z = 0 or nz = fpad // Denise manual page 13 szz[fpad * nzt + ix] = 0.0; szx[fpad * nzt + ix] = 0.0; sxx[fpad * nzt + ix] = 4.0 * dt * vx_x * (lam[fpad * nzt + ix] * mu[fpad * nzt + ix] + mu[fpad * nzt + ix] * mu[fpad * nzt + ix]) / (lam[fpad * nzt + ix] + 2.0 * mu[fpad * nzt + ix]); } } else { return; } default: // std::cout << "FDORDER = " << fdorder << npml << std::endl; // std::cout << "FD order mismatch. <SIMULATION ABORTED>" << std::endl; //exit(0); // printf("FD order mismatch. <SIMULATION ABORTED>\n"); } // end of switch } __global__ void kernel_III(int ishot, int nt, int nzt, int nxt, int fpad, int ppad, real_sim dt, real_sim dx, real_sim dz, int fdorder, real_sim* vx, real_sim* vz, real_sim* sxx, real_sim* szx, real_sim* szz, real_sim* lam, real_sim* mu, real_sim* mu_zx, real_sim* rho_zp, real_sim* rho_xp, int npml, real_sim* a, real_sim* b, real_sim* K, real_sim* a_half, real_sim* b_half, real_sim* K_half, real_sim* mem_vx_x, real_sim* mem_vx_z, real_sim* mem_vz_x, real_sim* mem_vz_z, real_sim* mem_sxx_x, real_sim* mem_szx_x, real_sim* mem_szz_z, real_sim* mem_szx_z, bool fsurf) { real_sim sxx_x, szx_x, szx_z, szz_z; // spatial stress derivatives real_sim vx_x, vx_z, vz_x, vz_z; // spatial velocity derivatives int nz1, nz2, nx1, nx2; // The computational grid boundaries int px, pz; // index for PML arrys int isnap; // to take snapshots for data storage int tf, zf, xf; // Index parameters for fwi data storage real_sim hc[2] = { 1.0, 1.0 }; // Initial calculation of indices //--------------------------------------------- nz1 = fpad; nz2 = nzt - fpad; nx1 = fpad; nx2 = nxt - fpad; // index variables // index to map PML at the right or positive end int pnx, pnz; pnx = nxt - 2 * ppad + fpad - 1; // nx + ppad + npml + 1 and nx = nxt - 2*ppad if (fsurf) { pnz = nzt - 2 * ppad - 1; // nz + ppad + npml + 1 and nz = nzt - ppad - fpad } else { pnz = nzt - 2 * ppad + fpad - 1; // nz + ppad + npml + 1 and nz = nzt - 2*ppad } real_sim dxi = 1.0 / dx; real_sim dzi = 1.0 / dz; // inverse of dx and dz //************************************************* int iz = blockIdx.x * blockDim.x + threadIdx.x; int ix = blockIdx.y * blockDim.y + threadIdx.y; switch (fdorder) { case(2): if (ix < nx2 && ix >= nx1 && iz >= nz1 && iz < nz2) { // printf("abc"); // compute spatial stress derivatives sxx_x = dxi * hc[1] * (sxx[iz * nzt + ix + 1] - sxx[iz * nzt + ix]); szx_z = dxi * hc[1] * (szx[iz * nzt + ix] - szx[(iz - 1) * nzt + ix]); szx_x = dzi * hc[1] * (szx[iz * nzt + ix] - szx[iz * nzt + ix - 1]); szz_z = dzi * hc[1] * (szz[(iz + 1) * nzt + ix] - szz[iz * nzt + ix]); // --------------------------------------------------- // CPML layers for particle velocity kernel // --------------------------------------------------- if (npml > 0) { if (ix >= fpad && ix < ppad) { // left CPML // Mapping the static CPML and memory variables to px = ix - fpad; // the memory array index mem_sxx_x[iz * nzt + px] = b[px] * mem_sxx_x[iz * nzt + px] + a[px] * sxx_x; mem_szx_x[iz * nzt + px] = b_half[px] * mem_szx_x[iz * nzt + px] + a_half[px] * szx_x; sxx_x = sxx_x / K[px] + mem_sxx_x[iz * nzt + px]; szx_x = szx_x / K_half[px] + mem_szx_x[iz * nzt + px]; } // cpml left if (ix >= (nxt - ppad - 1) && ix < (nxt - fpad)) { // right CPML // Mapping the static CPML and memory variables to px = ix - pnx; // The PML factors index mem_sxx_x[iz * nzt + px] = b[px] * mem_sxx_x[iz * nzt + px] + a[px] * sxx_x; mem_szx_x[iz * nzt + px] = b_half[px] * mem_szx_x[iz * nzt + px] + a_half[px] * szx_x; sxx_x = sxx_x / K[px] + mem_sxx_x[iz * nzt + px]; szx_x = szx_x / K_half[px] + mem_szx_x[iz * nzt + px]; } // cpml right if (iz >= fpad && iz < ppad && !fsurf) { // top CPML // Mapping the static CPML and memory variables to pz = iz - fpad; // the memory array index mem_szz_z[pz * 2 * (npml + 1) + ix] = b[pz] * mem_szz_z[pz * 2 * (npml + 1) + ix] + a[pz] * szz_z; mem_szx_z[pz * 2 * (npml + 1) + ix] = b_half[pz] * mem_szx_z[pz * 2 * (npml + 1) + ix] + a_half[pz] * szx_z; szz_z = szz_z / K[pz] + mem_szz_z[pz * 2 * (npml + 1) + ix]; szx_z = szx_z / K_half[pz] + mem_szx_z[pz * 2 * (npml + 1) + ix]; } // cpml top if (iz >= (nzt - ppad - 1) && iz < nzt - fpad) { // bottom CPML // Mapping the static CPML and memory variables to pz = iz - pnz; // The PML factors index mem_szz_z[pz * 2 * (npml + 1) + ix] = b[pz] * mem_szz_z[pz * 2 * (npml + 1) + ix] + a[pz] * szz_z; mem_szx_z[pz * 2 * (npml + 1) + ix] = b_half[pz] * mem_szx_z[pz * 2 * (npml + 1) + ix] + a_half[pz] * szx_z; szz_z = szz_z / K[pz] + mem_szz_z[pz * 2 * (npml + 1) + ix]; szx_z = szx_z / K_half[pz] + mem_szx_z[pz * 2 * (npml + 1) + ix]; } // cpml bottom __syncthreads(); } // npml>0 // update particle velocities vx[iz * nzt + ix] += dt * rho_xp[iz * (nzt - 1) + ix] * (sxx_x + szx_z); vz[iz * nzt + ix] += dt * rho_zp[iz * (nzt - 1) + ix] * (szx_x + szz_z); } else { return; } // break; default: // std::cout << "FDORDER = " << fdorder << npml << std::endl; // std::cout << "FD order mismatch. <SIMULATION ABORTED>" << std::endl; //exit(0); // printf("FD order mismatch. <SIMULATION ABORTED>\n"); } // end of switch } __global__ void kernel_IV(int nx1, int nx2, int fpad, int nzt, real_sim* szx, real_sim* szz) { int iz = blockIdx.x * blockDim.x + threadIdx.x; int ix = blockIdx.y * blockDim.y + threadIdx.y; int sz = 1; if (ix >= nx1 && ix < nx2 && sz >= 1 && sz <= fpad) { // mirroring szx[(fpad - sz) * nzt + ix] = -szx[(fpad + sz) * nzt + ix]; szz[(fpad - sz) * nzt + ix] = -szz[(fpad + sz) * nzt + ix]; } } void adjoint_kernel_PSV_GPU(int ishot, // shot index // Time and space grid arguments int nt, int nzt, int nxt, int fpad, int ppad, real_sim dt, real_sim dx, real_sim dz, int snap_interval, bool fsurf, // computationsl arguments real_sim* hc, int fdorder, // Wave arguments real_sim** vx, real_sim** vz, real_sim** sxx, real_sim** szx, real_sim** szz, // Medium arguments real_sim** lam, real_sim** mu, real_sim** mu_zx, real_sim** rho_zp, real_sim** rho_xp, //PML arguments int npml, real_sim* a, real_sim* b, real_sim* K, real_sim* a_half, real_sim* b_half, real_sim* K_half, // PML memory arrays real_sim** mem_vx_x, real_sim** mem_vx_z, real_sim** mem_vz_x, real_sim** mem_vz_z, real_sim** mem_sxx_x, real_sim** mem_szx_x, real_sim** mem_szz_z, real_sim** mem_szx_z, // Source arguments int nsrc, ivec src_x, ivec src_z, ivec src_comp, real_sim** src_signal, ivec source_to_fire_shot, // FWI arguments bool fwinv, int fwi_dt, int fwi_dx, int fwi_dz, int fwi_x1, int fwi_x2, int fwi_z1, int fwi_z2, real_sim*** fwi_vx, real_sim*** fwi_vz, real_sim*** fwi_sxx, real_sim*** fwi_szx, real_sim*** fwi_szz, // Gradient of the materials real_sim** grad_lam, real_sim** grad_mu, real_sim** grad_rho, //*****************GPU PARAMS*************** real_sim* d_a, real_sim* d_b, real_sim* d_K, real_sim* d_a_half, real_sim* d_b_half, real_sim* d_K_half, // real_sim* d_vx, real_sim* d_vz, real_sim* d_sxx, real_sim* d_szx, real_sim* d_szz, // real_sim* d_fwi_vx, real_sim* d_fwi_vz, real_sim* d_fwi_sxx, real_sim* d_fwi_szx, real_sim* d_fwi_szz, // real_sim* d_mem_vx_x, real_sim* d_mem_vx_z, real_sim* d_mem_vz_x, real_sim* d_mem_vz_z, real_sim* d_mem_sxx_x, real_sim* d_mem_szx_x, real_sim* d_mem_szz_z, real_sim* d_mem_szx_z, // real_sim* d_grad_lam, real_sim* d_grad_mu, real_sim* d_grad_rho, // real_sim* d_lam, real_sim* d_mu, real_sim* d_mu_zx, real_sim* d_rho_zp, real_sim* d_rho_xp ) { //const bool fwi = 1; // int nt = number of timesteps // int nz1, nz2, nx1, nx2 = start and end grids along z and x directions // int dt, dx, dz = grid spacing in time and space // int* hc = holberg coefficients // real_sim **&vx, **&vz, **&sxx, **&szx, **&szz, // wave parameters (particle velocity and stresses) // real_sim **&lam, **&mu, **&mu_zx, **&rho_zp, **&rho_xp // medium parameters (lam's parameters') // real_sim *a, *b, *K;// CPML parameters // real_sim *a_half, *b_half, *K_half // CPML interpolated parameters // real_sim ** mem_vx_x, ** mem_vx_z, ** mem_vz_x, ** mem_vz_z; // PML velocity derivative memory // real_sim **&mem_sxx_x, **&mem_szx_x, **&mem_szz_z, real_sim **&mem_szx_z // PML stress derivative memory // bool fsurf :: free surface on the top // Source arguments // int nsrc = number of sources // int **src_loc = grid location of source + source parameter type for eg exploxive, vz only etc // real_sim ** src_signal = signal values for the sources //real_sim sxx_x, szx_x, szx_z, szz_z; // spatial stress derivatives //real_sim vx_x, vx_z, vz_x, vz_z; // spatial velocity derivatives int nz1, nz2, nx1, nx2; // The computational grid boundaries int px, pz; // index for PML arrys int isnap; // to take snapshots for data storage int tf, zf, xf; // Index parameters for fwi data storage real_sim s1, s2, s3, s4; // Intermediate variables for gradient calculation std::ofstream outFile; // file to print vz arrays // Initial calculation of indices //--------------------------------------------- nz1 = fpad; nz2 = nzt - fpad; nx1 = fpad; nx2 = nxt - fpad; // index variables // index to map PML at the right or positive end int pnx, pnz; pnx = nxt - 2 * ppad + fpad - 1; // nx + ppad + npml + 1 and nx = nxt - 2*ppad if (fsurf) { pnz = nzt - 2 * ppad - 1; // nz + ppad + npml + 1 and nz = nzt - ppad - fpad } else { pnz = nzt - 2 * ppad + fpad - 1; // nz + ppad + npml + 1 and nz = nzt - 2*ppad } real_sim dxi = 1.0 / dx; real_sim dzi = 1.0 / dz; // inverse of dx and dz // ----------------------------------------------------------- real_sim size = nzt * nxt; gpuErrchk(hipMemset(d_vz, 0, size * sizeof(real_sim))); gpuErrchk(hipMemset(d_vx, 0, size * sizeof(real_sim))); gpuErrchk(hipMemset(d_sxx, 0, size * sizeof(real_sim))); gpuErrchk(hipMemset(d_szx, 0, size * sizeof(real_sim))); gpuErrchk(hipMemset(d_szz, 0, size * sizeof(real_sim))); // Gradient kernels //----------------------------- const int nft = 1 + (nt - 1) / fwi_dt; const int nfz = 1 + (fwi_z2 - fwi_z1) / fwi_dz; const int nfx = 1 + (fwi_x2 - fwi_x1) / fwi_dx; int size_grad = nfz * nfx; gpuErrchk(hipMemset(d_grad_lam, 0, size_grad * sizeof(real_sim))); gpuErrchk(hipMemset(d_grad_mu, 0, size_grad * sizeof(real_sim))); gpuErrchk(hipMemset(d_grad_rho, 0, size_grad * sizeof(real_sim))); size = nzt * nxt; gpuErrchk(hipPeekAtLastError()); int box1 = 16, box2 = 16; dim3 threadsPerBlock(box1, box2); dim3 blocksPerGrid((nz2 + box1 - 1) / box1, (nx2 + box2 - 1) / box2); gpuErrchk(hipPeekAtLastError()); size = nzt * nxt; //************************************************************ // Start of time loop isnap = 0; for (int it = nt - 1; it >= 0; it--) { // --------------------------------------------------------- // Computation of gradient kernels gpuErrchk(hipMemcpy(d_vz, vz[0], size * sizeof(real_sim), hipMemcpyHostToDevice)); if (fwinv && !(it % fwi_dt)) { tf = it / fwi_dt; // t index for fwi gradient storage //std::cout<<"fwi time: " << it << ", adjoint simulation" << std::endl; kernel_I << < blocksPerGrid, threadsPerBlock >> > (tf, fwi_dt, dt, nzt, fwi_z1, fwi_z2, fwi_x1, fwi_x2, fwi_dz, fwi_dx, nft, nfz, nfx, d_fwi_sxx, d_fwi_szx, d_fwi_szz, d_fwi_vx, d_fwi_vz, d_sxx, d_szx, d_szz, d_vx, d_vz, d_mu, d_lam, d_grad_lam, d_grad_mu, d_grad_rho); } //****************************************************Kernrl calls GPU************************************* // Calculate spatial velocity derivatives kernel_II << < blocksPerGrid, threadsPerBlock >> > (ishot, nt, nzt, nxt, fpad, ppad, dt, dx, dz, fdorder, d_vx, d_vz, d_sxx, d_szx, d_szz, d_lam, d_mu, d_mu_zx, d_rho_zp, d_rho_xp, npml, d_a, d_b, d_K, d_a_half, d_b_half, d_K_half, d_mem_vx_x, d_mem_vx_z, d_mem_vz_x, d_mem_vz_z, d_mem_sxx_x, d_mem_szx_x, d_mem_szz_z, d_mem_szx_z, fsurf); gpuErrchk(hipPeekAtLastError()); // compute spatial stress derivatives kernel_III << < blocksPerGrid, threadsPerBlock >> > (ishot, nt, nzt, nxt, fpad, ppad, dt, dx, dz, fdorder, d_vx, d_vz, d_sxx, d_szx, d_szz, d_lam, d_mu, d_mu_zx, d_rho_zp, d_rho_xp, npml, d_a, d_b, d_K, d_a_half, d_b_half, d_K_half, d_mem_vx_x, d_mem_vx_z, d_mem_vz_x, d_mem_vz_z, d_mem_sxx_x, d_mem_szx_x, d_mem_szz_z, d_mem_szx_z, fsurf); gpuErrchk(hipPeekAtLastError()); if (fsurf) { // Mirroring stresses for free surface condition kernel_IV << < blocksPerGrid, threadsPerBlock >> > (nx1, nx2, fpad, nzt, d_szx, d_szz); gpuErrchk(hipPeekAtLastError()); } gpuErrchk(hipDeviceSynchronize()); gpuErrchk(hipMemcpy(vz[0], d_vz, size * sizeof(real_sim), hipMemcpyDeviceToHost)); //**************************************************************************** // Adding Velocity update related sources //---------------------------------------- for (int is = 0; is <= 0 /*nsrc*/; is++) { if (source_to_fire_shot[is] == ishot) { switch (src_comp[is]) {// defines the signal type case(2): // vz component only vz[src_z[is]][src_x[is]] += src_signal[is][it]; } } } // --------------------------------------- // Printing out AASCII data for snap intervals if (!(it % snap_interval || it == 0)) { std::cout << "Time step " << it << " of " << nt << " in adjoint kernel." << std::endl; isnap++; } } // end of time loop } // ***************** CPU********************** void adjoint_kernel_PSV(int ishot, // shot index // Time and space grid arguments int nt, int nzt, int nxt, int fpad, int ppad, real_sim dt, real_sim dx, real_sim dz, int snap_interval, bool fsurf, // computationsl arguments real_sim* hc, int fdorder, // Wave arguments real_sim** vx, real_sim** vz, real_sim** sxx, real_sim** szx, real_sim** szz, // Medium arguments real_sim** lam, real_sim** mu, real_sim** mu_zx, real_sim** rho_zp, real_sim** rho_xp, //PML arguments int npml, real_sim* a, real_sim* b, real_sim* K, real_sim* a_half, real_sim* b_half, real_sim* K_half, // PML memory arrays real_sim** mem_vx_x, real_sim** mem_vx_z, real_sim** mem_vz_x, real_sim** mem_vz_z, real_sim** mem_sxx_x, real_sim** mem_szx_x, real_sim** mem_szz_z, real_sim** mem_szx_z, // Source arguments int nsrc, ivec src_x, ivec src_z, ivec src_comp, real_sim** src_signal, ivec source_to_fire_shot, // FWI arguments bool fwinv, int fwi_dt, int fwi_dx, int fwi_dz, int fwi_x1, int fwi_x2, int fwi_z1, int fwi_z2, real_sim*** fwi_vx, real_sim*** fwi_vz, real_sim*** fwi_sxx, real_sim*** fwi_szx, real_sim*** fwi_szz, // Gradient of the materials real_sim** grad_lam, real_sim** grad_mu, real_sim** grad_rho) { //const bool fwi = 1; // int nt = number of timesteps // int nz1, nz2, nx1, nx2 = start and end grids along z and x directions // int dt, dx, dz = grid spacing in time and space // int* hc = holberg coefficients // real_sim **&vx, **&vz, **&sxx, **&szx, **&szz, // wave parameters (particle velocity and stresses) // real_sim **&lam, **&mu, **&mu_zx, **&rho_zp, **&rho_xp // medium parameters (lam's parameters') // real_sim *a, *b, *K;// CPML parameters // real_sim *a_half, *b_half, *K_half // CPML interpolated parameters // real_sim ** mem_vx_x, ** mem_vx_z, ** mem_vz_x, ** mem_vz_z; // PML velocity derivative memory // real_sim **&mem_sxx_x, **&mem_szx_x, **&mem_szz_z, real_sim **&mem_szx_z // PML stress derivative memory // bool fsurf :: free surface on the top // Source arguments // int nsrc = number of sources // int **src_loc = grid location of source + source parameter type for eg exploxive, vz only etc // real_sim ** src_signal = signal values for the sources real_sim sxx_x, szx_x, szx_z, szz_z; // spatial stress derivatives real_sim vx_x, vx_z, vz_x, vz_z; // spatial velocity derivatives int nz1, nz2, nx1, nx2; // The computational grid boundaries int px, pz; // index for PML arrys int isnap; // to take snapshots for data storage int tf, zf, xf; // Index parameters for fwi data storage real_sim s1, s2, s3, s4; // Intermediate variables for gradient calculation std::ofstream outFile; // file to print vz arrays // Initial calculation of indices //--------------------------------------------- nz1 = fpad; nz2 = nzt - fpad; nx1 = fpad; nx2 = nxt - fpad; // index variables // index to map PML at the right or positive end int pnx, pnz; pnx = nxt - 2 * ppad + fpad - 1; // nx + ppad + npml + 1 and nx = nxt - 2*ppad if (fsurf) { pnz = nzt - 2 * ppad - 1; // nz + ppad + npml + 1 and nz = nzt - ppad - fpad } else { pnz = nzt - 2 * ppad + fpad - 1; // nz + ppad + npml + 1 and nz = nzt - 2*ppad } real_sim dxi = 1.0 / dx; real_sim dzi = 1.0 / dz; // inverse of dx and dz // ----------------------------------------------------------- // Reset kernels // ----------------------------------------------------- // Stress and velocity kernels for (int iz = 0; iz < nzt; iz++) { for (int ix = 0; ix < nxt; ix++) { // Wave velocity and stress tensor arrays vx[iz][ix] = 0.0; vz[iz][ix] = 0.0; sxx[iz][ix] = 0.0; szx[iz][ix] = 0.0; szz[iz][ix] = 0.0; } } // Gradient kernels //----------------------------- const int nfz = 1 + (fwi_z2 - fwi_z1) / fwi_dz; const int nfx = 1 + (fwi_x2 - fwi_x1) / fwi_dx; for (int iz = 0; iz < nfz; iz++) { for (int ix = 0; ix < nfx; ix++) { // Gradients of the material grad_lam[iz][ix] = 0.0; grad_mu[iz][ix] = 0.0; grad_rho[iz][ix] = 0.0; } } // Start of time loop isnap = 0; for (int it = nt - 1; it >= 0; it--) { // --------------------------------------------------------- // Computation of gradient kernels if (fwinv && !(it % fwi_dt)) { tf = it / fwi_dt; // t index for fwi gradient storage //std::cout<<"fwi time: " << it << ", adjoint simulation" << std::endl; for (int iz = fwi_z1; iz < fwi_z2; iz += fwi_dz) { // storing only a portion and with grid inteval zf = (iz - fwi_z1) / fwi_dz; // z index for fwi gradient storage for (int ix = fwi_x1; ix < fwi_x2; ix += fwi_dx) { xf = (ix - fwi_x1) / fwi_dx; // x index for fwi gradient storage s1 = (fwi_sxx[tf][zf][xf] + fwi_szz[tf][zf][xf]) * (sxx[iz][ix] + szz[iz][ix]) * 0.25 / ((lam[iz][ix] + mu[iz][ix]) * (lam[iz][ix] + mu[iz][ix])); s2 = (fwi_sxx[tf][zf][xf] - fwi_szz[tf][zf][xf]) * (sxx[iz][ix] - szz[iz][ix]) / (mu[iz][ix] * mu[iz][ix]); s3 = (fwi_szx[tf][zf][xf] * szx[iz][ix]) / (mu[iz][ix] * mu[iz][ix]); // The time derivatives of the velocity may have to be computed differently s4 = vx[iz][ix] * fwi_vx[tf][zf][xf] + vz[iz][ix] * fwi_vz[tf][zf][xf]; grad_lam[zf][xf] += fwi_dt * dt * s1; grad_mu[zf][xf] += fwi_dt * dt * (s3 + s1 + s2); grad_rho[zf][xf] += fwi_dt * dt * s4; } } } // -------------------------------------------------------- // Time integration of dynamic stress fields switch (fdorder) { case(2): // updating stress tensors for (int iz = nz1; iz < nz2; iz++) { //std::cout << std::endl << "PML indices: " << std::endl; for (int ix = nx1; ix < nx2; ix++) { //std::cout << ix << ", " << iz << std::endl; // Calculate spatial velocity derivatives vx_x = dxi * hc[1] * (vx[iz][ix] - vx[iz][ix - 1]); vz_x = dxi * hc[1] * (vz[iz][ix + 1] - vz[iz][ix]); vx_z = dzi * hc[1] * (vx[iz + 1][ix] - vx[iz][ix]); vz_z = dzi * hc[1] * (vz[iz][ix] - vz[iz - 1][ix]); // --------------------------------------------------- // CPML layers for stress tensor kernel // --------------------------------------------------- if (npml > 0) { if (ix >= fpad && ix <= ppad) { // left CPML // Mapping the static CPML and memory variables to px = ix - fpad; // the memory array index //std::cout << std::endl << "Fault1 " << ix << std::endl; mem_vx_x[iz][px] = b[px] * mem_vx_x[iz][px] + a[px] * vx_x; mem_vz_x[iz][px] = b_half[px] * mem_vz_x[iz][px] + a_half[px] * vz_x; vx_x = vx_x / K[px] + mem_vx_x[iz][px]; vz_x = vz_x / K_half[px] + mem_vz_x[iz][px]; } // cpml left if (ix >= (nxt - ppad - 1) && ix < nxt - fpad) { // right CPML // Mapping the static CPML and memory variables to px = ix - pnx; // The PML factors index mem_vx_x[iz][px] = b[px] * mem_vx_x[iz][px] + a[px] * vx_x; mem_vz_x[iz][px] = b_half[px] * mem_vz_x[iz][px] + a_half[px] * vz_x; vx_x = vx_x / K[px] + mem_vx_x[iz][px]; vz_x = vz_x / K_half[px] + mem_vz_x[iz][px]; } // cpml right if (iz >= fpad && iz <= ppad && !fsurf) { // top CPML // Mapping the static CPML and memory variables to pz = iz - fpad; // the memory array index mem_vz_z[pz][ix] = b[pz] * mem_vz_z[pz][ix] + a[pz] * vz_z; mem_vx_z[pz][ix] = b_half[pz] * mem_vx_z[pz][ix] + a_half[pz] * vx_z; vz_z = vz_z / K[pz] + mem_vz_z[pz][ix]; vx_z = vx_z / K_half[pz] + mem_vx_z[pz][ix]; //std::cout << pz<< ", "; } // cpml top if (iz >= (nzt - ppad - 1) && iz < nzt - fpad) { // bottom CPML // Mapping the static CPML and memory variables to pz = iz - pnz; // The PML factors index mem_vz_z[pz][ix] = b[pz] * mem_vz_z[pz][ix] + a[pz] * vz_z; mem_vx_z[pz][ix] = b_half[pz] * mem_vx_z[pz][ix] + a_half[pz] * vx_z; vz_z = vz_z / K[pz] + mem_vz_z[pz][ix]; vx_z = vx_z / K_half[pz] + mem_vx_z[pz][ix]; //std::cout << pz<< ", "; } // cpml bottom } // npml>0 // -------------------------------------------------------------------------- // -------------------------------------------------------------------------- // updating stresses szx[iz][ix] += dt * mu_zx[iz][ix] * (vz_x + vx_z); sxx[iz][ix] += dt * (lam[iz][ix] * (vx_x + vz_z) + (2.0 * mu[iz][ix] * vx_x)); szz[iz][ix] += dt * (lam[iz][ix] * (vx_x + vz_z) + (2.0 * mu[iz][ix] * vz_z)); // ----------------------------------------- // ----------------------------------------- // Override stress for free surface implementation if (fsurf && iz == fpad) { // Free surface at z = 0 or nz = fpad // Denise manual page 13 szz[fpad][ix] = 0.0; szx[fpad][ix] = 0.0; sxx[fpad][ix] = 4.0 * dt * vx_x * (lam[fpad][ix] * mu[fpad][ix] + mu[fpad][ix] * mu[fpad][ix]) / (lam[fpad][ix] + 2.0 * mu[fpad][ix]); //} } } } // STRESS MIRRORING TECHNIQUE FOR FREE SURFACE CONDITION if (fsurf) { // Mirroring stresses for free surface condition for (int ix = nx1; ix < nx2; ix++) { for (int sz = 1; sz <= fpad; sz++) { // mirroring szx[fpad - sz][ix] = -szx[fpad + sz][ix]; szz[fpad - sz][ix] = -szz[fpad + sz][ix]; } } } // updating velocity tensors for (int iz = nz1; iz < nz2; iz++) { for (int ix = nx1; ix < nx2; ix++) { // compute spatial stress derivatives sxx_x = dxi * hc[1] * (sxx[iz][ix + 1] - sxx[iz][ix]); szx_x = dxi * hc[1] * (szx[iz][ix] - szx[iz][ix - 1]); szx_z = dzi * hc[1] * (szx[iz][ix] - szx[iz - 1][ix]); szz_z = dzi * hc[1] * (szz[iz + 1][ix] - szz[iz][ix]); // --------------------------------------------------- // CPML layers for particle velocity kernel // --------------------------------------------------- if (npml > 0) { if (ix >= fpad && ix < ppad) { // left CPML // Mapping the static CPML and memory variables to px = ix - fpad; // the memory array index mem_sxx_x[iz][px] = b[px] * mem_sxx_x[iz][px] + a[px] * sxx_x; mem_szx_x[iz][px] = b_half[px] * mem_szx_x[iz][px] + a_half[px] * szx_x; sxx_x = sxx_x / K[px] + mem_sxx_x[iz][px]; szx_x = szx_x / K_half[px] + mem_szx_x[iz][px]; } // cpml left if (ix >= (nxt - ppad - 1) && ix < nxt - fpad) { // right CPML // Mapping the static CPML and memory variables to px = ix - pnx; // The PML factors index mem_sxx_x[iz][px] = b[px] * mem_sxx_x[iz][px] + a[px] * sxx_x; mem_szx_x[iz][px] = b_half[px] * mem_szx_x[iz][px] + a_half[px] * szx_x; sxx_x = sxx_x / K[px] + mem_sxx_x[iz][px]; szx_x = szx_x / K_half[px] + mem_szx_x[iz][px]; } // cpml right if (iz >= fpad && iz < ppad && !fsurf) { // top CPML // Mapping the static CPML and memory variables to pz = iz - fpad; // the memory array index mem_szz_z[pz][ix] = b[pz] * mem_szz_z[pz][ix] + a[pz] * szz_z; mem_szx_z[pz][ix] = b_half[pz] * mem_szx_z[pz][ix] + a_half[pz] * szx_z; szz_z = szz_z / K[pz] + mem_szz_z[pz][ix]; szx_z = szx_z / K_half[pz] + mem_szx_z[pz][ix]; } // cpml top if (iz >= (nzt - ppad - 1) && iz < nzt - fpad) { // bottom CPML // Mapping the static CPML and memory variables to pz = iz - pnz; // The PML factors index mem_szz_z[pz][ix] = b[pz] * mem_szz_z[pz][ix] + a[pz] * szz_z; mem_szx_z[pz][ix] = b_half[pz] * mem_szx_z[pz][ix] + a_half[pz] * szx_z; szz_z = szz_z / K[pz] + mem_szz_z[pz][ix]; szx_z = szx_z / K_half[pz] + mem_szx_z[pz][ix]; } // cpml bottom } // npml>0 // -------------------------------------------------------------------------- // -------------------------------------------------------------------------- // update particle velocities vx[iz][ix] += dt * rho_xp[iz][ix] * (sxx_x + szx_z); vz[iz][ix] += dt * rho_zp[iz][ix] * (szx_x + szz_z); } } break; default: std::cout << "FDORDER = " << fdorder << npml << std::endl; std::cout << "FD order mismatch. <SIMULATION ABORTED>" << std::endl; exit(0); } // end of switch // Adding Velocity update related sources //---------------------------------------- for (int is = 0; is <= 0; is++) { if (source_to_fire_shot[is] == ishot) { switch (src_comp[is]) {// defines the signal type case(2): // vz component only vz[src_z[is]][src_x[is]] += src_signal[is][it]; } } } // --------------------------------------- // Printing out AASCII data for snap intervals if (!(it % snap_interval || it == 0)) { std::cout << "Time step " << it << " of " << nt << " in adjoint kernel." << std::endl; isnap++; } } // end of time loop }
5d5345fed58acad0993e17465df23097fa56d5b7.cu
//forward_kernel_PSV.cpp /* * Created by: Min Basnet * 2020.April.16 * Kathmandu, Nepal */ #include <iostream> #include <fstream> #include <cmath> #include <vector> #include "globvar.cuh" #include "util.cu" #include "fd_cpml.cuh" #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char* file, int line, bool abort = true) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } __global__ void kernel_I(int tf, int fwi_dt, int dt, int nzt, int fwi_z1, int fwi_z2, int fwi_x1, int fwi_x2, int fwi_dz, int fwi_dx, int nft, int nfz, int nfx, real_sim* fwi_sxx, real_sim* fwi_szx, real_sim* fwi_szz, real_sim* fwi_vx, real_sim* fwi_vz, real_sim* sxx, real_sim* szx, real_sim* szz, real_sim* vx, real_sim* vz, real_sim* mu, real_sim* lam, real_sim* grad_lam, real_sim* grad_mu, real_sim* grad_rho) { int iz = blockIdx.x * blockDim.x + threadIdx.x; int ix = blockIdx.y * blockDim.y + threadIdx.y; real_sim s1, s2, s3, s4; if (iz >= fwi_z1 && iz < fwi_z2 && iz % fwi_dz == fwi_z1 % fwi_dz) { // storing only a portion and with grid inteval int zf = (iz - fwi_z1) / fwi_dz; // z index for fwi gradient storage if (ix >= fwi_x1 && ix < fwi_x2 && ix % fwi_dx == fwi_x1 % fwi_dx) { int xf = (ix - fwi_x1) / fwi_dx; // x index for fwi gradient storage int offset = tf * nft * nfz + zf * nfz + xf; s1 = (fwi_sxx[offset] + fwi_szz[offset]) * (sxx[iz * nzt + ix] + szz[iz * nzt + ix]) * 0.25 / ((lam[iz * nzt + ix] + mu[iz * nzt + ix]) * (lam[iz * nzt + ix] + mu[iz * nzt + ix])); s2 = (fwi_sxx[offset] - fwi_szz[offset]) * (sxx[iz * nzt + ix] - szz[iz * nzt + ix]) / (mu[iz * nzt + ix] * mu[iz * nzt + ix]); s3 = (fwi_szx[offset] * szx[iz * nzt + ix]) / (mu[iz * nzt + ix] * mu[iz * nzt + ix]); // The time derivatives of the velocity may have to be computed differently s4 = vx[iz * nzt + ix] * fwi_vx[offset] + vz[iz * nzt + ix] * fwi_vz[offset]; grad_lam[zf * nfz + xf] += fwi_dt * dt * s1; grad_mu[zf * nfz + xf] += fwi_dt * dt * (s3 + s1 + s2); grad_rho[zf * nfz + xf] += fwi_dt * dt * s4; } } else { return; } } __global__ void kernel_II(int ishot, int nt, int nzt, int nxt, int fpad, int ppad, real_sim dt, real_sim dx, real_sim dz, int fdorder, real_sim* vx, real_sim* vz, real_sim* sxx, real_sim* szx, real_sim* szz, real_sim* lam, real_sim* mu, real_sim* mu_zx, real_sim* rho_zp, real_sim* rho_xp, int npml, real_sim* a, real_sim* b, real_sim* K, real_sim* a_half, real_sim* b_half, real_sim* K_half, real_sim* mem_vx_x, real_sim* mem_vx_z, real_sim* mem_vz_x, real_sim* mem_vz_z, real_sim* mem_sxx_x, real_sim* mem_szx_x, real_sim* mem_szz_z, real_sim* mem_szx_z, bool fsurf) { //********************************************************************************** real_sim sxx_x, szx_x, szx_z, szz_z; // spatial stress derivatives real_sim vx_x, vx_z, vz_x, vz_z; // spatial velocity derivatives int nz1, nz2, nx1, nx2; // The computational grid boundaries int px, pz; // index for PML arrys int isnap; // to take snapshots for data storage int tf, zf, xf; // Index parameters for fwi data storage real_sim hc[2] = { 1.0, 1.0 }; // Initial calculation of indices //--------------------------------------------- nz1 = fpad; nz2 = nzt - fpad; nx1 = fpad; nx2 = nxt - fpad; // index variables // index to map PML at the right or positive end int pnx, pnz; pnx = nxt - 2 * ppad + fpad - 1; // nx + ppad + npml + 1 and nx = nxt - 2*ppad if (fsurf) { pnz = nzt - 2 * ppad - 1; // nz + ppad + npml + 1 and nz = nzt - ppad - fpad } else { pnz = nzt - 2 * ppad + fpad - 1; // nz + ppad + npml + 1 and nz = nzt - 2*ppad } real_sim dxi = 1.0 / dx; real_sim dzi = 1.0 / dz; // inverse of dx and dz //********************************************************************************************************** //int k = blockIdx.x * blockDim.x + threadIdx.x; //int ix = k % nx2; //int iz = k % nz2; int iz = blockIdx.x * blockDim.x + threadIdx.x; int ix = blockIdx.y * blockDim.y + threadIdx.y; //if (threadIdx.x == 0 && blockIdx.x == 0) // printf("Debug Params : . .."); ////*********************************************** //switch (fdorder) { //case(2): { // // Calculate spatial velocity derivatives switch (fdorder) { case(2): if (ix < nx2 && ix >= nx1 && iz >= nz1 && iz < nz2) { // Calculate spatial velocity derivatives vx_x = dxi * hc[1] * (vx[iz * nzt + ix] - vx[iz * nzt + (ix - 1)]); vz_x = dxi * hc[1] * (vz[iz * nzt + (ix + 1)] - vz[iz * nzt + ix]); vx_z = dzi * hc[1] * (vx[(iz + 1) * nzt + ix] - vx[iz * nzt + ix]); vz_z = dzi * hc[1] * (vz[iz * nzt + ix] - vz[(iz - 1) * nzt + ix]); // --------------------------------------------------- // CPML layers for stress tensor kernel // --------------------------------------------------- if (npml > 0) { if (ix >= fpad && ix <= ppad) { // left CPML // Mapping the static CPML and memory variables to px = ix - fpad; // the memory array index //std::cout << std::endl << "Fault1 " << ix << std::endl; mem_vx_x[iz * nzt + px] = b[px] * mem_vx_x[iz * nzt + px] + a[px] * vx_x; mem_vz_x[iz * nzt + px] = b_half[px] * mem_vz_x[iz * nzt + px] + a_half[px] * vz_x; vx_x = vx_x / K[px] + mem_vx_x[iz * nzt + px]; vz_x = vz_x / K_half[px] + mem_vz_x[iz * nzt + px]; } // cpml left if (ix >= (nxt - ppad - 1) && ix < nxt - fpad) { // right CPML // Mapping the static CPML and memory variables to px = ix - pnx; // The PML factors index mem_vx_x[iz * nzt + px] = b[px] * mem_vx_x[iz * nzt + px] + a[px] * vx_x; mem_vz_x[iz * nzt + px] = b_half[px] * mem_vz_x[iz * nzt + px] + a_half[px] * vz_x; vx_x = vx_x / K[px] + mem_vx_x[iz * nzt + px]; vz_x = vz_x / K_half[px] + mem_vz_x[iz * nzt + px]; } // cpml right if (iz >= fpad && iz <= ppad && !fsurf) { // top CPML // Mapping the static CPML and memory variables to pz = iz - fpad; // the memory array index mem_vz_z[pz * 2 * (npml + 1) + ix] = b[pz] * mem_vz_z[pz * 2 * (npml + 1) + ix] + a[pz] * vz_z; mem_vx_z[pz * 2 * (npml + 1) + ix] = b_half[pz] * mem_vx_z[pz * 2 * (npml + 1) + ix] + a_half[pz] * vx_z; vz_z = vz_z / K[pz] + mem_vz_z[pz * 2 * (npml + 1) + ix]; vx_z = vx_z / K_half[pz] + mem_vx_z[pz * 2 * (npml + 1) + ix]; //std::cout << pz<< ", "; } // cpml top if (iz >= (nzt - ppad - 1) && iz < nzt - fpad) { // bottom CPML // Mapping the static CPML and memory variables to pz = iz - pnz; // The PML factors index mem_vz_z[pz * 2 * (npml + 1) + ix] = b[pz] * mem_vz_z[pz * 2 * (npml + 1) + ix] + a[pz] * vz_z; mem_vx_z[pz * 2 * (npml + 1) + ix] = b_half[pz] * mem_vx_z[pz * 2 * (npml + 1) + ix] + a_half[pz] * vx_z; vz_z = vz_z / K[pz] + mem_vz_z[pz * 2 * (npml + 1) + ix]; vx_z = vx_z / K_half[pz] + mem_vx_z[pz * 2 * (npml + 1) + ix]; //std::cout << pz<< ", "; } // cpml bottom } // npml>0 __syncthreads(); //// -------------------------------------------------------------------------- //// -------------------------------------------------------------------------- ////// updating stresses szx[iz * nzt + ix] += dt * mu_zx[iz * (nzt - 1) + ix] * (vz_x + vx_z); sxx[iz * nzt + ix] += dt * (lam[iz * nzt + ix] * (vx_x + vz_z) + (2.0f * mu[iz * nzt + ix] * vx_x)); szz[iz * nzt + ix] += dt * (lam[iz * nzt + ix] * (vx_x + vz_z) + (2.0f * mu[iz * nzt + ix] * vz_z)); // ----------------------------------------- // ----------------------------------------- // Override stress for free surface implementation if (fsurf && iz == fpad) { // Free surface at z = 0 or nz = fpad // Denise manual page 13 szz[fpad * nzt + ix] = 0.0; szx[fpad * nzt + ix] = 0.0; sxx[fpad * nzt + ix] = 4.0 * dt * vx_x * (lam[fpad * nzt + ix] * mu[fpad * nzt + ix] + mu[fpad * nzt + ix] * mu[fpad * nzt + ix]) / (lam[fpad * nzt + ix] + 2.0 * mu[fpad * nzt + ix]); } } else { return; } default: // std::cout << "FDORDER = " << fdorder << npml << std::endl; // std::cout << "FD order mismatch. <SIMULATION ABORTED>" << std::endl; //exit(0); // printf("FD order mismatch. <SIMULATION ABORTED>\n"); } // end of switch } __global__ void kernel_III(int ishot, int nt, int nzt, int nxt, int fpad, int ppad, real_sim dt, real_sim dx, real_sim dz, int fdorder, real_sim* vx, real_sim* vz, real_sim* sxx, real_sim* szx, real_sim* szz, real_sim* lam, real_sim* mu, real_sim* mu_zx, real_sim* rho_zp, real_sim* rho_xp, int npml, real_sim* a, real_sim* b, real_sim* K, real_sim* a_half, real_sim* b_half, real_sim* K_half, real_sim* mem_vx_x, real_sim* mem_vx_z, real_sim* mem_vz_x, real_sim* mem_vz_z, real_sim* mem_sxx_x, real_sim* mem_szx_x, real_sim* mem_szz_z, real_sim* mem_szx_z, bool fsurf) { real_sim sxx_x, szx_x, szx_z, szz_z; // spatial stress derivatives real_sim vx_x, vx_z, vz_x, vz_z; // spatial velocity derivatives int nz1, nz2, nx1, nx2; // The computational grid boundaries int px, pz; // index for PML arrys int isnap; // to take snapshots for data storage int tf, zf, xf; // Index parameters for fwi data storage real_sim hc[2] = { 1.0, 1.0 }; // Initial calculation of indices //--------------------------------------------- nz1 = fpad; nz2 = nzt - fpad; nx1 = fpad; nx2 = nxt - fpad; // index variables // index to map PML at the right or positive end int pnx, pnz; pnx = nxt - 2 * ppad + fpad - 1; // nx + ppad + npml + 1 and nx = nxt - 2*ppad if (fsurf) { pnz = nzt - 2 * ppad - 1; // nz + ppad + npml + 1 and nz = nzt - ppad - fpad } else { pnz = nzt - 2 * ppad + fpad - 1; // nz + ppad + npml + 1 and nz = nzt - 2*ppad } real_sim dxi = 1.0 / dx; real_sim dzi = 1.0 / dz; // inverse of dx and dz //************************************************* int iz = blockIdx.x * blockDim.x + threadIdx.x; int ix = blockIdx.y * blockDim.y + threadIdx.y; switch (fdorder) { case(2): if (ix < nx2 && ix >= nx1 && iz >= nz1 && iz < nz2) { // printf("abc"); // compute spatial stress derivatives sxx_x = dxi * hc[1] * (sxx[iz * nzt + ix + 1] - sxx[iz * nzt + ix]); szx_z = dxi * hc[1] * (szx[iz * nzt + ix] - szx[(iz - 1) * nzt + ix]); szx_x = dzi * hc[1] * (szx[iz * nzt + ix] - szx[iz * nzt + ix - 1]); szz_z = dzi * hc[1] * (szz[(iz + 1) * nzt + ix] - szz[iz * nzt + ix]); // --------------------------------------------------- // CPML layers for particle velocity kernel // --------------------------------------------------- if (npml > 0) { if (ix >= fpad && ix < ppad) { // left CPML // Mapping the static CPML and memory variables to px = ix - fpad; // the memory array index mem_sxx_x[iz * nzt + px] = b[px] * mem_sxx_x[iz * nzt + px] + a[px] * sxx_x; mem_szx_x[iz * nzt + px] = b_half[px] * mem_szx_x[iz * nzt + px] + a_half[px] * szx_x; sxx_x = sxx_x / K[px] + mem_sxx_x[iz * nzt + px]; szx_x = szx_x / K_half[px] + mem_szx_x[iz * nzt + px]; } // cpml left if (ix >= (nxt - ppad - 1) && ix < (nxt - fpad)) { // right CPML // Mapping the static CPML and memory variables to px = ix - pnx; // The PML factors index mem_sxx_x[iz * nzt + px] = b[px] * mem_sxx_x[iz * nzt + px] + a[px] * sxx_x; mem_szx_x[iz * nzt + px] = b_half[px] * mem_szx_x[iz * nzt + px] + a_half[px] * szx_x; sxx_x = sxx_x / K[px] + mem_sxx_x[iz * nzt + px]; szx_x = szx_x / K_half[px] + mem_szx_x[iz * nzt + px]; } // cpml right if (iz >= fpad && iz < ppad && !fsurf) { // top CPML // Mapping the static CPML and memory variables to pz = iz - fpad; // the memory array index mem_szz_z[pz * 2 * (npml + 1) + ix] = b[pz] * mem_szz_z[pz * 2 * (npml + 1) + ix] + a[pz] * szz_z; mem_szx_z[pz * 2 * (npml + 1) + ix] = b_half[pz] * mem_szx_z[pz * 2 * (npml + 1) + ix] + a_half[pz] * szx_z; szz_z = szz_z / K[pz] + mem_szz_z[pz * 2 * (npml + 1) + ix]; szx_z = szx_z / K_half[pz] + mem_szx_z[pz * 2 * (npml + 1) + ix]; } // cpml top if (iz >= (nzt - ppad - 1) && iz < nzt - fpad) { // bottom CPML // Mapping the static CPML and memory variables to pz = iz - pnz; // The PML factors index mem_szz_z[pz * 2 * (npml + 1) + ix] = b[pz] * mem_szz_z[pz * 2 * (npml + 1) + ix] + a[pz] * szz_z; mem_szx_z[pz * 2 * (npml + 1) + ix] = b_half[pz] * mem_szx_z[pz * 2 * (npml + 1) + ix] + a_half[pz] * szx_z; szz_z = szz_z / K[pz] + mem_szz_z[pz * 2 * (npml + 1) + ix]; szx_z = szx_z / K_half[pz] + mem_szx_z[pz * 2 * (npml + 1) + ix]; } // cpml bottom __syncthreads(); } // npml>0 // update particle velocities vx[iz * nzt + ix] += dt * rho_xp[iz * (nzt - 1) + ix] * (sxx_x + szx_z); vz[iz * nzt + ix] += dt * rho_zp[iz * (nzt - 1) + ix] * (szx_x + szz_z); } else { return; } // break; default: // std::cout << "FDORDER = " << fdorder << npml << std::endl; // std::cout << "FD order mismatch. <SIMULATION ABORTED>" << std::endl; //exit(0); // printf("FD order mismatch. <SIMULATION ABORTED>\n"); } // end of switch } __global__ void kernel_IV(int nx1, int nx2, int fpad, int nzt, real_sim* szx, real_sim* szz) { int iz = blockIdx.x * blockDim.x + threadIdx.x; int ix = blockIdx.y * blockDim.y + threadIdx.y; int sz = 1; if (ix >= nx1 && ix < nx2 && sz >= 1 && sz <= fpad) { // mirroring szx[(fpad - sz) * nzt + ix] = -szx[(fpad + sz) * nzt + ix]; szz[(fpad - sz) * nzt + ix] = -szz[(fpad + sz) * nzt + ix]; } } void adjoint_kernel_PSV_GPU(int ishot, // shot index // Time and space grid arguments int nt, int nzt, int nxt, int fpad, int ppad, real_sim dt, real_sim dx, real_sim dz, int snap_interval, bool fsurf, // computationsl arguments real_sim* hc, int fdorder, // Wave arguments real_sim** vx, real_sim** vz, real_sim** sxx, real_sim** szx, real_sim** szz, // Medium arguments real_sim** lam, real_sim** mu, real_sim** mu_zx, real_sim** rho_zp, real_sim** rho_xp, //PML arguments int npml, real_sim* a, real_sim* b, real_sim* K, real_sim* a_half, real_sim* b_half, real_sim* K_half, // PML memory arrays real_sim** mem_vx_x, real_sim** mem_vx_z, real_sim** mem_vz_x, real_sim** mem_vz_z, real_sim** mem_sxx_x, real_sim** mem_szx_x, real_sim** mem_szz_z, real_sim** mem_szx_z, // Source arguments int nsrc, ivec src_x, ivec src_z, ivec src_comp, real_sim** src_signal, ivec source_to_fire_shot, // FWI arguments bool fwinv, int fwi_dt, int fwi_dx, int fwi_dz, int fwi_x1, int fwi_x2, int fwi_z1, int fwi_z2, real_sim*** fwi_vx, real_sim*** fwi_vz, real_sim*** fwi_sxx, real_sim*** fwi_szx, real_sim*** fwi_szz, // Gradient of the materials real_sim** grad_lam, real_sim** grad_mu, real_sim** grad_rho, //*****************GPU PARAMS*************** real_sim* d_a, real_sim* d_b, real_sim* d_K, real_sim* d_a_half, real_sim* d_b_half, real_sim* d_K_half, // real_sim* d_vx, real_sim* d_vz, real_sim* d_sxx, real_sim* d_szx, real_sim* d_szz, // real_sim* d_fwi_vx, real_sim* d_fwi_vz, real_sim* d_fwi_sxx, real_sim* d_fwi_szx, real_sim* d_fwi_szz, // real_sim* d_mem_vx_x, real_sim* d_mem_vx_z, real_sim* d_mem_vz_x, real_sim* d_mem_vz_z, real_sim* d_mem_sxx_x, real_sim* d_mem_szx_x, real_sim* d_mem_szz_z, real_sim* d_mem_szx_z, // real_sim* d_grad_lam, real_sim* d_grad_mu, real_sim* d_grad_rho, // real_sim* d_lam, real_sim* d_mu, real_sim* d_mu_zx, real_sim* d_rho_zp, real_sim* d_rho_xp ) { //const bool fwi = 1; // int nt = number of timesteps // int nz1, nz2, nx1, nx2 = start and end grids along z and x directions // int dt, dx, dz = grid spacing in time and space // int* hc = holberg coefficients // real_sim **&vx, **&vz, **&sxx, **&szx, **&szz, // wave parameters (particle velocity and stresses) // real_sim **&lam, **&mu, **&mu_zx, **&rho_zp, **&rho_xp // medium parameters (lamé's parameters') // real_sim *a, *b, *K;// CPML parameters // real_sim *a_half, *b_half, *K_half // CPML interpolated parameters // real_sim ** mem_vx_x, ** mem_vx_z, ** mem_vz_x, ** mem_vz_z; // PML velocity derivative memory // real_sim **&mem_sxx_x, **&mem_szx_x, **&mem_szz_z, real_sim **&mem_szx_z // PML stress derivative memory // bool fsurf :: free surface on the top // Source arguments // int nsrc = number of sources // int **src_loc = grid location of source + source parameter type for eg exploxive, vz only etc // real_sim ** src_signal = signal values for the sources //real_sim sxx_x, szx_x, szx_z, szz_z; // spatial stress derivatives //real_sim vx_x, vx_z, vz_x, vz_z; // spatial velocity derivatives int nz1, nz2, nx1, nx2; // The computational grid boundaries int px, pz; // index for PML arrys int isnap; // to take snapshots for data storage int tf, zf, xf; // Index parameters for fwi data storage real_sim s1, s2, s3, s4; // Intermediate variables for gradient calculation std::ofstream outFile; // file to print vz arrays // Initial calculation of indices //--------------------------------------------- nz1 = fpad; nz2 = nzt - fpad; nx1 = fpad; nx2 = nxt - fpad; // index variables // index to map PML at the right or positive end int pnx, pnz; pnx = nxt - 2 * ppad + fpad - 1; // nx + ppad + npml + 1 and nx = nxt - 2*ppad if (fsurf) { pnz = nzt - 2 * ppad - 1; // nz + ppad + npml + 1 and nz = nzt - ppad - fpad } else { pnz = nzt - 2 * ppad + fpad - 1; // nz + ppad + npml + 1 and nz = nzt - 2*ppad } real_sim dxi = 1.0 / dx; real_sim dzi = 1.0 / dz; // inverse of dx and dz // ----------------------------------------------------------- real_sim size = nzt * nxt; gpuErrchk(cudaMemset(d_vz, 0, size * sizeof(real_sim))); gpuErrchk(cudaMemset(d_vx, 0, size * sizeof(real_sim))); gpuErrchk(cudaMemset(d_sxx, 0, size * sizeof(real_sim))); gpuErrchk(cudaMemset(d_szx, 0, size * sizeof(real_sim))); gpuErrchk(cudaMemset(d_szz, 0, size * sizeof(real_sim))); // Gradient kernels //----------------------------- const int nft = 1 + (nt - 1) / fwi_dt; const int nfz = 1 + (fwi_z2 - fwi_z1) / fwi_dz; const int nfx = 1 + (fwi_x2 - fwi_x1) / fwi_dx; int size_grad = nfz * nfx; gpuErrchk(cudaMemset(d_grad_lam, 0, size_grad * sizeof(real_sim))); gpuErrchk(cudaMemset(d_grad_mu, 0, size_grad * sizeof(real_sim))); gpuErrchk(cudaMemset(d_grad_rho, 0, size_grad * sizeof(real_sim))); size = nzt * nxt; gpuErrchk(cudaPeekAtLastError()); int box1 = 16, box2 = 16; dim3 threadsPerBlock(box1, box2); dim3 blocksPerGrid((nz2 + box1 - 1) / box1, (nx2 + box2 - 1) / box2); gpuErrchk(cudaPeekAtLastError()); size = nzt * nxt; //************************************************************ // Start of time loop isnap = 0; for (int it = nt - 1; it >= 0; it--) { // --------------------------------------------------------- // Computation of gradient kernels gpuErrchk(cudaMemcpy(d_vz, vz[0], size * sizeof(real_sim), cudaMemcpyHostToDevice)); if (fwinv && !(it % fwi_dt)) { tf = it / fwi_dt; // t index for fwi gradient storage //std::cout<<"fwi time: " << it << ", adjoint simulation" << std::endl; kernel_I << < blocksPerGrid, threadsPerBlock >> > (tf, fwi_dt, dt, nzt, fwi_z1, fwi_z2, fwi_x1, fwi_x2, fwi_dz, fwi_dx, nft, nfz, nfx, d_fwi_sxx, d_fwi_szx, d_fwi_szz, d_fwi_vx, d_fwi_vz, d_sxx, d_szx, d_szz, d_vx, d_vz, d_mu, d_lam, d_grad_lam, d_grad_mu, d_grad_rho); } //****************************************************Kernrl calls GPU************************************* // Calculate spatial velocity derivatives kernel_II << < blocksPerGrid, threadsPerBlock >> > (ishot, nt, nzt, nxt, fpad, ppad, dt, dx, dz, fdorder, d_vx, d_vz, d_sxx, d_szx, d_szz, d_lam, d_mu, d_mu_zx, d_rho_zp, d_rho_xp, npml, d_a, d_b, d_K, d_a_half, d_b_half, d_K_half, d_mem_vx_x, d_mem_vx_z, d_mem_vz_x, d_mem_vz_z, d_mem_sxx_x, d_mem_szx_x, d_mem_szz_z, d_mem_szx_z, fsurf); gpuErrchk(cudaPeekAtLastError()); // compute spatial stress derivatives kernel_III << < blocksPerGrid, threadsPerBlock >> > (ishot, nt, nzt, nxt, fpad, ppad, dt, dx, dz, fdorder, d_vx, d_vz, d_sxx, d_szx, d_szz, d_lam, d_mu, d_mu_zx, d_rho_zp, d_rho_xp, npml, d_a, d_b, d_K, d_a_half, d_b_half, d_K_half, d_mem_vx_x, d_mem_vx_z, d_mem_vz_x, d_mem_vz_z, d_mem_sxx_x, d_mem_szx_x, d_mem_szz_z, d_mem_szx_z, fsurf); gpuErrchk(cudaPeekAtLastError()); if (fsurf) { // Mirroring stresses for free surface condition kernel_IV << < blocksPerGrid, threadsPerBlock >> > (nx1, nx2, fpad, nzt, d_szx, d_szz); gpuErrchk(cudaPeekAtLastError()); } gpuErrchk(cudaDeviceSynchronize()); gpuErrchk(cudaMemcpy(vz[0], d_vz, size * sizeof(real_sim), cudaMemcpyDeviceToHost)); //**************************************************************************** // Adding Velocity update related sources //---------------------------------------- for (int is = 0; is <= 0 /*nsrc*/; is++) { if (source_to_fire_shot[is] == ishot) { switch (src_comp[is]) {// defines the signal type case(2): // vz component only vz[src_z[is]][src_x[is]] += src_signal[is][it]; } } } // --------------------------------------- // Printing out AASCII data for snap intervals if (!(it % snap_interval || it == 0)) { std::cout << "Time step " << it << " of " << nt << " in adjoint kernel." << std::endl; isnap++; } } // end of time loop } // ***************** CPU********************** void adjoint_kernel_PSV(int ishot, // shot index // Time and space grid arguments int nt, int nzt, int nxt, int fpad, int ppad, real_sim dt, real_sim dx, real_sim dz, int snap_interval, bool fsurf, // computationsl arguments real_sim* hc, int fdorder, // Wave arguments real_sim** vx, real_sim** vz, real_sim** sxx, real_sim** szx, real_sim** szz, // Medium arguments real_sim** lam, real_sim** mu, real_sim** mu_zx, real_sim** rho_zp, real_sim** rho_xp, //PML arguments int npml, real_sim* a, real_sim* b, real_sim* K, real_sim* a_half, real_sim* b_half, real_sim* K_half, // PML memory arrays real_sim** mem_vx_x, real_sim** mem_vx_z, real_sim** mem_vz_x, real_sim** mem_vz_z, real_sim** mem_sxx_x, real_sim** mem_szx_x, real_sim** mem_szz_z, real_sim** mem_szx_z, // Source arguments int nsrc, ivec src_x, ivec src_z, ivec src_comp, real_sim** src_signal, ivec source_to_fire_shot, // FWI arguments bool fwinv, int fwi_dt, int fwi_dx, int fwi_dz, int fwi_x1, int fwi_x2, int fwi_z1, int fwi_z2, real_sim*** fwi_vx, real_sim*** fwi_vz, real_sim*** fwi_sxx, real_sim*** fwi_szx, real_sim*** fwi_szz, // Gradient of the materials real_sim** grad_lam, real_sim** grad_mu, real_sim** grad_rho) { //const bool fwi = 1; // int nt = number of timesteps // int nz1, nz2, nx1, nx2 = start and end grids along z and x directions // int dt, dx, dz = grid spacing in time and space // int* hc = holberg coefficients // real_sim **&vx, **&vz, **&sxx, **&szx, **&szz, // wave parameters (particle velocity and stresses) // real_sim **&lam, **&mu, **&mu_zx, **&rho_zp, **&rho_xp // medium parameters (lamé's parameters') // real_sim *a, *b, *K;// CPML parameters // real_sim *a_half, *b_half, *K_half // CPML interpolated parameters // real_sim ** mem_vx_x, ** mem_vx_z, ** mem_vz_x, ** mem_vz_z; // PML velocity derivative memory // real_sim **&mem_sxx_x, **&mem_szx_x, **&mem_szz_z, real_sim **&mem_szx_z // PML stress derivative memory // bool fsurf :: free surface on the top // Source arguments // int nsrc = number of sources // int **src_loc = grid location of source + source parameter type for eg exploxive, vz only etc // real_sim ** src_signal = signal values for the sources real_sim sxx_x, szx_x, szx_z, szz_z; // spatial stress derivatives real_sim vx_x, vx_z, vz_x, vz_z; // spatial velocity derivatives int nz1, nz2, nx1, nx2; // The computational grid boundaries int px, pz; // index for PML arrys int isnap; // to take snapshots for data storage int tf, zf, xf; // Index parameters for fwi data storage real_sim s1, s2, s3, s4; // Intermediate variables for gradient calculation std::ofstream outFile; // file to print vz arrays // Initial calculation of indices //--------------------------------------------- nz1 = fpad; nz2 = nzt - fpad; nx1 = fpad; nx2 = nxt - fpad; // index variables // index to map PML at the right or positive end int pnx, pnz; pnx = nxt - 2 * ppad + fpad - 1; // nx + ppad + npml + 1 and nx = nxt - 2*ppad if (fsurf) { pnz = nzt - 2 * ppad - 1; // nz + ppad + npml + 1 and nz = nzt - ppad - fpad } else { pnz = nzt - 2 * ppad + fpad - 1; // nz + ppad + npml + 1 and nz = nzt - 2*ppad } real_sim dxi = 1.0 / dx; real_sim dzi = 1.0 / dz; // inverse of dx and dz // ----------------------------------------------------------- // Reset kernels // ----------------------------------------------------- // Stress and velocity kernels for (int iz = 0; iz < nzt; iz++) { for (int ix = 0; ix < nxt; ix++) { // Wave velocity and stress tensor arrays vx[iz][ix] = 0.0; vz[iz][ix] = 0.0; sxx[iz][ix] = 0.0; szx[iz][ix] = 0.0; szz[iz][ix] = 0.0; } } // Gradient kernels //----------------------------- const int nfz = 1 + (fwi_z2 - fwi_z1) / fwi_dz; const int nfx = 1 + (fwi_x2 - fwi_x1) / fwi_dx; for (int iz = 0; iz < nfz; iz++) { for (int ix = 0; ix < nfx; ix++) { // Gradients of the material grad_lam[iz][ix] = 0.0; grad_mu[iz][ix] = 0.0; grad_rho[iz][ix] = 0.0; } } // Start of time loop isnap = 0; for (int it = nt - 1; it >= 0; it--) { // --------------------------------------------------------- // Computation of gradient kernels if (fwinv && !(it % fwi_dt)) { tf = it / fwi_dt; // t index for fwi gradient storage //std::cout<<"fwi time: " << it << ", adjoint simulation" << std::endl; for (int iz = fwi_z1; iz < fwi_z2; iz += fwi_dz) { // storing only a portion and with grid inteval zf = (iz - fwi_z1) / fwi_dz; // z index for fwi gradient storage for (int ix = fwi_x1; ix < fwi_x2; ix += fwi_dx) { xf = (ix - fwi_x1) / fwi_dx; // x index for fwi gradient storage s1 = (fwi_sxx[tf][zf][xf] + fwi_szz[tf][zf][xf]) * (sxx[iz][ix] + szz[iz][ix]) * 0.25 / ((lam[iz][ix] + mu[iz][ix]) * (lam[iz][ix] + mu[iz][ix])); s2 = (fwi_sxx[tf][zf][xf] - fwi_szz[tf][zf][xf]) * (sxx[iz][ix] - szz[iz][ix]) / (mu[iz][ix] * mu[iz][ix]); s3 = (fwi_szx[tf][zf][xf] * szx[iz][ix]) / (mu[iz][ix] * mu[iz][ix]); // The time derivatives of the velocity may have to be computed differently s4 = vx[iz][ix] * fwi_vx[tf][zf][xf] + vz[iz][ix] * fwi_vz[tf][zf][xf]; grad_lam[zf][xf] += fwi_dt * dt * s1; grad_mu[zf][xf] += fwi_dt * dt * (s3 + s1 + s2); grad_rho[zf][xf] += fwi_dt * dt * s4; } } } // -------------------------------------------------------- // Time integration of dynamic stress fields switch (fdorder) { case(2): // updating stress tensors for (int iz = nz1; iz < nz2; iz++) { //std::cout << std::endl << "PML indices: " << std::endl; for (int ix = nx1; ix < nx2; ix++) { //std::cout << ix << ", " << iz << std::endl; // Calculate spatial velocity derivatives vx_x = dxi * hc[1] * (vx[iz][ix] - vx[iz][ix - 1]); vz_x = dxi * hc[1] * (vz[iz][ix + 1] - vz[iz][ix]); vx_z = dzi * hc[1] * (vx[iz + 1][ix] - vx[iz][ix]); vz_z = dzi * hc[1] * (vz[iz][ix] - vz[iz - 1][ix]); // --------------------------------------------------- // CPML layers for stress tensor kernel // --------------------------------------------------- if (npml > 0) { if (ix >= fpad && ix <= ppad) { // left CPML // Mapping the static CPML and memory variables to px = ix - fpad; // the memory array index //std::cout << std::endl << "Fault1 " << ix << std::endl; mem_vx_x[iz][px] = b[px] * mem_vx_x[iz][px] + a[px] * vx_x; mem_vz_x[iz][px] = b_half[px] * mem_vz_x[iz][px] + a_half[px] * vz_x; vx_x = vx_x / K[px] + mem_vx_x[iz][px]; vz_x = vz_x / K_half[px] + mem_vz_x[iz][px]; } // cpml left if (ix >= (nxt - ppad - 1) && ix < nxt - fpad) { // right CPML // Mapping the static CPML and memory variables to px = ix - pnx; // The PML factors index mem_vx_x[iz][px] = b[px] * mem_vx_x[iz][px] + a[px] * vx_x; mem_vz_x[iz][px] = b_half[px] * mem_vz_x[iz][px] + a_half[px] * vz_x; vx_x = vx_x / K[px] + mem_vx_x[iz][px]; vz_x = vz_x / K_half[px] + mem_vz_x[iz][px]; } // cpml right if (iz >= fpad && iz <= ppad && !fsurf) { // top CPML // Mapping the static CPML and memory variables to pz = iz - fpad; // the memory array index mem_vz_z[pz][ix] = b[pz] * mem_vz_z[pz][ix] + a[pz] * vz_z; mem_vx_z[pz][ix] = b_half[pz] * mem_vx_z[pz][ix] + a_half[pz] * vx_z; vz_z = vz_z / K[pz] + mem_vz_z[pz][ix]; vx_z = vx_z / K_half[pz] + mem_vx_z[pz][ix]; //std::cout << pz<< ", "; } // cpml top if (iz >= (nzt - ppad - 1) && iz < nzt - fpad) { // bottom CPML // Mapping the static CPML and memory variables to pz = iz - pnz; // The PML factors index mem_vz_z[pz][ix] = b[pz] * mem_vz_z[pz][ix] + a[pz] * vz_z; mem_vx_z[pz][ix] = b_half[pz] * mem_vx_z[pz][ix] + a_half[pz] * vx_z; vz_z = vz_z / K[pz] + mem_vz_z[pz][ix]; vx_z = vx_z / K_half[pz] + mem_vx_z[pz][ix]; //std::cout << pz<< ", "; } // cpml bottom } // npml>0 // -------------------------------------------------------------------------- // -------------------------------------------------------------------------- // updating stresses szx[iz][ix] += dt * mu_zx[iz][ix] * (vz_x + vx_z); sxx[iz][ix] += dt * (lam[iz][ix] * (vx_x + vz_z) + (2.0 * mu[iz][ix] * vx_x)); szz[iz][ix] += dt * (lam[iz][ix] * (vx_x + vz_z) + (2.0 * mu[iz][ix] * vz_z)); // ----------------------------------------- // ----------------------------------------- // Override stress for free surface implementation if (fsurf && iz == fpad) { // Free surface at z = 0 or nz = fpad // Denise manual page 13 szz[fpad][ix] = 0.0; szx[fpad][ix] = 0.0; sxx[fpad][ix] = 4.0 * dt * vx_x * (lam[fpad][ix] * mu[fpad][ix] + mu[fpad][ix] * mu[fpad][ix]) / (lam[fpad][ix] + 2.0 * mu[fpad][ix]); //} } } } // STRESS MIRRORING TECHNIQUE FOR FREE SURFACE CONDITION if (fsurf) { // Mirroring stresses for free surface condition for (int ix = nx1; ix < nx2; ix++) { for (int sz = 1; sz <= fpad; sz++) { // mirroring szx[fpad - sz][ix] = -szx[fpad + sz][ix]; szz[fpad - sz][ix] = -szz[fpad + sz][ix]; } } } // updating velocity tensors for (int iz = nz1; iz < nz2; iz++) { for (int ix = nx1; ix < nx2; ix++) { // compute spatial stress derivatives sxx_x = dxi * hc[1] * (sxx[iz][ix + 1] - sxx[iz][ix]); szx_x = dxi * hc[1] * (szx[iz][ix] - szx[iz][ix - 1]); szx_z = dzi * hc[1] * (szx[iz][ix] - szx[iz - 1][ix]); szz_z = dzi * hc[1] * (szz[iz + 1][ix] - szz[iz][ix]); // --------------------------------------------------- // CPML layers for particle velocity kernel // --------------------------------------------------- if (npml > 0) { if (ix >= fpad && ix < ppad) { // left CPML // Mapping the static CPML and memory variables to px = ix - fpad; // the memory array index mem_sxx_x[iz][px] = b[px] * mem_sxx_x[iz][px] + a[px] * sxx_x; mem_szx_x[iz][px] = b_half[px] * mem_szx_x[iz][px] + a_half[px] * szx_x; sxx_x = sxx_x / K[px] + mem_sxx_x[iz][px]; szx_x = szx_x / K_half[px] + mem_szx_x[iz][px]; } // cpml left if (ix >= (nxt - ppad - 1) && ix < nxt - fpad) { // right CPML // Mapping the static CPML and memory variables to px = ix - pnx; // The PML factors index mem_sxx_x[iz][px] = b[px] * mem_sxx_x[iz][px] + a[px] * sxx_x; mem_szx_x[iz][px] = b_half[px] * mem_szx_x[iz][px] + a_half[px] * szx_x; sxx_x = sxx_x / K[px] + mem_sxx_x[iz][px]; szx_x = szx_x / K_half[px] + mem_szx_x[iz][px]; } // cpml right if (iz >= fpad && iz < ppad && !fsurf) { // top CPML // Mapping the static CPML and memory variables to pz = iz - fpad; // the memory array index mem_szz_z[pz][ix] = b[pz] * mem_szz_z[pz][ix] + a[pz] * szz_z; mem_szx_z[pz][ix] = b_half[pz] * mem_szx_z[pz][ix] + a_half[pz] * szx_z; szz_z = szz_z / K[pz] + mem_szz_z[pz][ix]; szx_z = szx_z / K_half[pz] + mem_szx_z[pz][ix]; } // cpml top if (iz >= (nzt - ppad - 1) && iz < nzt - fpad) { // bottom CPML // Mapping the static CPML and memory variables to pz = iz - pnz; // The PML factors index mem_szz_z[pz][ix] = b[pz] * mem_szz_z[pz][ix] + a[pz] * szz_z; mem_szx_z[pz][ix] = b_half[pz] * mem_szx_z[pz][ix] + a_half[pz] * szx_z; szz_z = szz_z / K[pz] + mem_szz_z[pz][ix]; szx_z = szx_z / K_half[pz] + mem_szx_z[pz][ix]; } // cpml bottom } // npml>0 // -------------------------------------------------------------------------- // -------------------------------------------------------------------------- // update particle velocities vx[iz][ix] += dt * rho_xp[iz][ix] * (sxx_x + szx_z); vz[iz][ix] += dt * rho_zp[iz][ix] * (szx_x + szz_z); } } break; default: std::cout << "FDORDER = " << fdorder << npml << std::endl; std::cout << "FD order mismatch. <SIMULATION ABORTED>" << std::endl; exit(0); } // end of switch // Adding Velocity update related sources //---------------------------------------- for (int is = 0; is <= 0; is++) { if (source_to_fire_shot[is] == ishot) { switch (src_comp[is]) {// defines the signal type case(2): // vz component only vz[src_z[is]][src_x[is]] += src_signal[is][it]; } } } // --------------------------------------- // Printing out AASCII data for snap intervals if (!(it % snap_interval || it == 0)) { std::cout << "Time step " << it << " of " << nt << " in adjoint kernel." << std::endl; isnap++; } } // end of time loop }
d5a0ed774783521b50de4edbf1bea05d131dd19b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "common/Constants.h" #include "common/ConfigParser.h" #include "common/Stream.h" #include "common/Serializer.h" #include "common/BinaryFileStream.h" #include "common/sanity_check.h" #include "imgproc/correspondence/PatchColliderForest.h" #include "imgproc/correspondence/PatchColliderRGBCorrespondence.h" #include "imgproc/correspondence/gpc_feature.h" #include <device_launch_parameters.h> #include <boost/mpl/min_max.hpp> namespace surfelwarp { namespace device { template<int FeatureDim, int NumTrees> __device__ __forceinline__ unsigned searchGPCForest( const GPCPatchFeature<FeatureDim>& feature, const typename PatchColliderForest<FeatureDim, NumTrees>::GPCForestDevice& forest ) { unsigned hash = 0; #pragma unroll for(auto i = 0; i < NumTrees; i++) { const GPCTree<FeatureDim>& tree = forest.trees[i]; const unsigned leaf = tree.leafForPatch(feature); hash = hash * 67421 + leaf; } return hash; } __host__ __device__ __forceinline__ unsigned encode_pixel_impair(int rgb_x, int rgb_y, bool img_0) { unsigned encoded = rgb_x + 1024 * rgb_y; if(img_0) { encoded = encoded & (~(1 << 31)); } else { encoded = encoded | (1 << 31); } return encoded; } __host__ __device__ __forceinline__ void decode_pixel_impair( unsigned encoded, int& rgb_x, int& rgb_y, bool& img_0 ) { //Check where this pixel is from if ((encoded & (1 << 31)) != 0) { img_0 = false; } else { img_0 = true; } //Zero out highest bit encoded = encoded & (~(1 << 31)); rgb_x = encoded % 1024; rgb_y = encoded / 1024; } template<int PatchHalfSize, int NumTrees> __global__ void buildColliderKeyValueKernel( hipTextureObject_t new_rgb, bool swap, const typename PatchColliderForest<PatchColliderRGBCorrespondence::Parameters::feature_dim, NumTrees>::GPCForestDevice forest, const int stride, const int kv_rows, const int kv_cols, const int keys_size, const int values_size, unsigned* keys, unsigned* values ) { const auto kv_x = threadIdx.x + blockDim.x * blockIdx.x; const auto kv_y = threadIdx.y + blockDim.y * blockIdx.y; if(kv_x >= kv_cols || kv_y >= kv_rows) return; //Transfer to the center of rgb image const auto rgb_center_x = PatchHalfSize + kv_x * stride; const auto rgb_center_y = PatchHalfSize + kv_y * stride; //Build the feature GPCPatchFeature<PatchColliderRGBCorrespondence::Parameters::feature_dim> patch_feature; buildDCTPatchFeature<PatchHalfSize>(new_rgb, rgb_center_x, rgb_center_y, patch_feature); //Search it for the key const unsigned key = searchGPCForest<PatchColliderRGBCorrespondence::Parameters::feature_dim, NumTrees>(patch_feature, forest); //Build the value const unsigned value = encode_pixel_impair(rgb_center_x, rgb_center_y, swap); //Store it const auto offset = swap ? 2 * (kv_x + kv_cols * kv_y) : 2 * (kv_x + kv_cols * kv_y) + 1; if (offset >= keys_size || offset >= values_size) return; keys[offset] = key; values[offset] = value; } __global__ void markCorrespondenceCandidateKernel( const PtrSz<const unsigned> sorted_treeleaf_key, const unsigned* sorted_pixel_value, hipTextureObject_t foreground_1, unsigned* candidate_indicator ) { const auto idx = threadIdx.x + blockDim.x * blockIdx.x; if(idx >= sorted_treeleaf_key.size) return; //The indicator must be written unsigned is_candidate = 0; //Check idx is the first index of some key if(idx == 0 || sorted_treeleaf_key[idx] != sorted_treeleaf_key[idx - 1]) { //Read the value const auto hashed_key = sorted_treeleaf_key[idx]; //Count the number of matching int num_pixels_keys = 1; //The end of probing auto end = idx + 2; if(end >= sorted_treeleaf_key.size) end = sorted_treeleaf_key.size - 1; //Probe the next for(int j = idx + 1; j <= end; j++) { if(sorted_treeleaf_key[j] == hashed_key) num_pixels_keys++; } //Determine whether the pixel are from different img if(num_pixels_keys == 2) { int x, y; bool pixel0_img0, pixel1_img0; const auto encoded_pixel_0 = sorted_pixel_value[idx + 0]; //Now we are safe to read the idx + 1 without checking const auto encoded_pixel_1 = sorted_pixel_value[idx + 1]; decode_pixel_impair(encoded_pixel_0, x, y, pixel0_img0); decode_pixel_impair(encoded_pixel_1, x, y, pixel1_img0); //If the from different image if((pixel0_img0 && (!pixel1_img0)) || ((!pixel0_img0) && pixel1_img0) ) { //Determine which one is for image 1 if(!pixel0_img0) { decode_pixel_impair(encoded_pixel_0, x, y, pixel0_img0); } else { decode_pixel_impair(encoded_pixel_1, x, y, pixel1_img0); } //Check if this is foreground const unsigned char is_foreground = tex2D<unsigned char>(foreground_1, x, y); if(is_foreground != 0) is_candidate = 1; } } } // Ensure this idx is the first one of some new key //Write it candidate_indicator[idx] = is_candidate; } __global__ void collectCandidatePixelPairKernel( const PtrSz<const unsigned> candidate_indicator, const unsigned* sorted_pixel_value, const unsigned* prefixsum_indicator, //The output ushort4* pixel_pair_array ) { const auto idx = threadIdx.x + blockDim.x * blockIdx.x; if(idx >= candidate_indicator.size) return; //For any valid indicator, it is safe to read its successor if(candidate_indicator[idx] > 0) { ushort4 pixel_pair; int x, y; bool img_0; //These read must be safe const auto encoded_pixel_0 = sorted_pixel_value[idx + 0]; const auto encoded_pixel_1 = sorted_pixel_value[idx + 1]; //Decode and write decode_pixel_impair(encoded_pixel_0, x, y, img_0); if(img_0) { pixel_pair.x = x; pixel_pair.y = y; } else { pixel_pair.z = x; pixel_pair.w = y; } decode_pixel_impair(encoded_pixel_1, x, y, img_0); if(img_0) { pixel_pair.x = x; pixel_pair.y = y; } else { pixel_pair.z = x; pixel_pair.w = y; } //Write it const auto offset = prefixsum_indicator[idx] - 1; pixel_pair_array[offset] = pixel_pair; } } }; // namespace device }; // namespace surfelwarp surfelwarp::PatchColliderRGBCorrespondence::PatchColliderRGBCorrespondence() { m_rgb_cols = m_rgb_rows = 0; first_frame = true; swap = false; } void surfelwarp::PatchColliderRGBCorrespondence::AllocateBuffer(unsigned img_rows, unsigned img_cols) { //Read the gpc model and upload to device //BinaryFileStream in_fstream(Constants::kGPCModelPath.c_str(), BinaryFileStream::Mode::ReadOnly); const auto& config = ConfigParser::Instance(); BinaryFileStream in_fstream(config.gpc_model_path().string().c_str(), BinaryFileStream::Mode::ReadOnly); m_forest.Load(&in_fstream); m_forest.UploadToDevice(); //Restrict the maximum level? m_forest.UpdateSearchLevel(max_search_level); //Determine the size of key-value map m_rgb_rows = img_rows; m_rgb_cols = img_cols; m_kvmap_rows = (m_rgb_rows - patch_clip * 2) / patch_stride; // + 1; m_kvmap_cols = (m_rgb_cols - patch_clip * 2) / patch_stride; // + 1; //Allocate the key-value buffer const auto kv_size = m_kvmap_rows * m_kvmap_rows; //Both rgb_0 and rgb_1 will have key-value pairs m_treeleaf_key.create(2 * kv_size); m_pixel_value.create(2 * kv_size); m_collide_sort.AllocateBuffer(2 * kv_size); //The buffer for marking the valid indicator m_candidate_pixelpair_indicator.create(2 * kv_size); //The buffer for prefixsum and compaction m_prefixsum.AllocateBuffer(m_candidate_pixelpair_indicator.size()); cudaSafeCall(hipHostMalloc((void**)(&m_candidate_size_pagelock), sizeof(unsigned))); //The buffer for output m_correspondence_pixels.AllocateBuffer(max_num_correspondence); } void surfelwarp::PatchColliderRGBCorrespondence::ReleaseBuffer() { //Clear the key-value pair m_treeleaf_key.release(); m_pixel_value.release(); //Clear the indicator m_candidate_pixelpair_indicator.release(); //Clear the pagelock memory cudaSafeCall(hipHostFree(m_candidate_size_pagelock)); } void surfelwarp::PatchColliderRGBCorrespondence::SetInputImages( hipTextureObject_t rgb_0, hipTextureObject_t rgb_1, hipTextureObject_t foreground_1 ) { rgb_0_ = rgb_0; rgb_1_ = rgb_1; m_foreground_1 = foreground_1; } void surfelwarp::PatchColliderRGBCorrespondence::SetInputImages( hipTextureObject_t rgb_0, hipTextureObject_t rgb_1, hipTextureObject_t depth_0, hipTextureObject_t depth_1 ) { throw new std::runtime_error("This version of patch collider only accept rgb images"); } void surfelwarp::PatchColliderRGBCorrespondence::FindCorrespondence(hipStream_t stream) { dim3 kv_blk(8, 8); dim3 kv_grid(divUp(m_kvmap_cols, kv_blk.x), divUp(m_kvmap_rows, kv_blk.y)); const auto forest_dev = m_forest.OnDevice(); if(first_frame) hipLaunchKernelGGL(( device::buildColliderKeyValueKernel<patch_radius, num_trees>), dim3(kv_grid), dim3(kv_blk), 0, stream, rgb_0_, true, forest_dev, patch_stride, m_kvmap_rows, m_kvmap_cols, m_treeleaf_key.size(), m_pixel_value.size(), m_treeleaf_key.ptr(), m_pixel_value.ptr() ); hipLaunchKernelGGL(( device::buildColliderKeyValueKernel<patch_radius, num_trees>), dim3(kv_grid), dim3(kv_blk), 0, stream, rgb_1_, swap, forest_dev, patch_stride, m_kvmap_rows, m_kvmap_cols, m_treeleaf_key.size(), m_pixel_value.size(), m_treeleaf_key.ptr(), m_pixel_value.ptr() ); //Sort it m_collide_sort.Sort(m_treeleaf_key, m_pixel_value, stream); //Debug code //std::cout << "The number of unique elments " << numUniqueElement(m_treeleaf_key, 0xffffffffu) << std::endl; //Mark of candidate dim3 indicator_blk(64); dim3 indicator_grid(divUp(m_collide_sort.valid_sorted_key.size(), indicator_blk.x)); hipLaunchKernelGGL(( device::markCorrespondenceCandidateKernel), dim3(indicator_grid), dim3(indicator_blk), 0, stream, m_collide_sort.valid_sorted_key, m_collide_sort.valid_sorted_value, m_foreground_1, m_candidate_pixelpair_indicator.ptr() ); //Do a prefix-sum m_prefixsum.InclusiveSum(m_candidate_pixelpair_indicator, stream); //Get the size of sum cudaSafeCall(hipMemcpyAsync( m_candidate_size_pagelock, m_prefixsum.valid_prefixsum_array + m_prefixsum.valid_prefixsum_array.size() - 1, sizeof(unsigned), hipMemcpyDeviceToHost, stream )); //Invoke the collection kernel hipLaunchKernelGGL(( device::collectCandidatePixelPairKernel), dim3(indicator_grid), dim3(indicator_blk), 0, stream, m_candidate_pixelpair_indicator, m_collide_sort.valid_sorted_value.ptr(), m_prefixsum.valid_prefixsum_array.ptr(), m_correspondence_pixels.Ptr() ); //Construct the output // cudaSafeCall(hipStreamSynchronize(stream)); m_correspondence_pixels.ResizeArrayOrException(*m_candidate_size_pagelock); first_frame = false; swap = !swap; //Debug //std::cout << "The number of candidate array is " << m_valid_correspondence_array.size() << std::endl; //Check here #if defined(CUDA_DEBUG_SYNC_CHECK) cudaSafeCall(hipStreamSynchronize(stream)); cudaSafeCall(hipGetLastError()); #endif }
d5a0ed774783521b50de4edbf1bea05d131dd19b.cu
#include "common/Constants.h" #include "common/ConfigParser.h" #include "common/Stream.h" #include "common/Serializer.h" #include "common/BinaryFileStream.h" #include "common/sanity_check.h" #include "imgproc/correspondence/PatchColliderForest.h" #include "imgproc/correspondence/PatchColliderRGBCorrespondence.h" #include "imgproc/correspondence/gpc_feature.h" #include <device_launch_parameters.h> #include <boost/mpl/min_max.hpp> namespace surfelwarp { namespace device { template<int FeatureDim, int NumTrees> __device__ __forceinline__ unsigned searchGPCForest( const GPCPatchFeature<FeatureDim>& feature, const typename PatchColliderForest<FeatureDim, NumTrees>::GPCForestDevice& forest ) { unsigned hash = 0; #pragma unroll for(auto i = 0; i < NumTrees; i++) { const GPCTree<FeatureDim>& tree = forest.trees[i]; const unsigned leaf = tree.leafForPatch(feature); hash = hash * 67421 + leaf; } return hash; } __host__ __device__ __forceinline__ unsigned encode_pixel_impair(int rgb_x, int rgb_y, bool img_0) { unsigned encoded = rgb_x + 1024 * rgb_y; if(img_0) { encoded = encoded & (~(1 << 31)); } else { encoded = encoded | (1 << 31); } return encoded; } __host__ __device__ __forceinline__ void decode_pixel_impair( unsigned encoded, int& rgb_x, int& rgb_y, bool& img_0 ) { //Check where this pixel is from if ((encoded & (1 << 31)) != 0) { img_0 = false; } else { img_0 = true; } //Zero out highest bit encoded = encoded & (~(1 << 31)); rgb_x = encoded % 1024; rgb_y = encoded / 1024; } template<int PatchHalfSize, int NumTrees> __global__ void buildColliderKeyValueKernel( cudaTextureObject_t new_rgb, bool swap, const typename PatchColliderForest<PatchColliderRGBCorrespondence::Parameters::feature_dim, NumTrees>::GPCForestDevice forest, const int stride, const int kv_rows, const int kv_cols, const int keys_size, const int values_size, unsigned* keys, unsigned* values ) { const auto kv_x = threadIdx.x + blockDim.x * blockIdx.x; const auto kv_y = threadIdx.y + blockDim.y * blockIdx.y; if(kv_x >= kv_cols || kv_y >= kv_rows) return; //Transfer to the center of rgb image const auto rgb_center_x = PatchHalfSize + kv_x * stride; const auto rgb_center_y = PatchHalfSize + kv_y * stride; //Build the feature GPCPatchFeature<PatchColliderRGBCorrespondence::Parameters::feature_dim> patch_feature; buildDCTPatchFeature<PatchHalfSize>(new_rgb, rgb_center_x, rgb_center_y, patch_feature); //Search it for the key const unsigned key = searchGPCForest<PatchColliderRGBCorrespondence::Parameters::feature_dim, NumTrees>(patch_feature, forest); //Build the value const unsigned value = encode_pixel_impair(rgb_center_x, rgb_center_y, swap); //Store it const auto offset = swap ? 2 * (kv_x + kv_cols * kv_y) : 2 * (kv_x + kv_cols * kv_y) + 1; if (offset >= keys_size || offset >= values_size) return; keys[offset] = key; values[offset] = value; } __global__ void markCorrespondenceCandidateKernel( const PtrSz<const unsigned> sorted_treeleaf_key, const unsigned* sorted_pixel_value, cudaTextureObject_t foreground_1, unsigned* candidate_indicator ) { const auto idx = threadIdx.x + blockDim.x * blockIdx.x; if(idx >= sorted_treeleaf_key.size) return; //The indicator must be written unsigned is_candidate = 0; //Check idx is the first index of some key if(idx == 0 || sorted_treeleaf_key[idx] != sorted_treeleaf_key[idx - 1]) { //Read the value const auto hashed_key = sorted_treeleaf_key[idx]; //Count the number of matching int num_pixels_keys = 1; //The end of probing auto end = idx + 2; if(end >= sorted_treeleaf_key.size) end = sorted_treeleaf_key.size - 1; //Probe the next for(int j = idx + 1; j <= end; j++) { if(sorted_treeleaf_key[j] == hashed_key) num_pixels_keys++; } //Determine whether the pixel are from different img if(num_pixels_keys == 2) { int x, y; bool pixel0_img0, pixel1_img0; const auto encoded_pixel_0 = sorted_pixel_value[idx + 0]; //Now we are safe to read the idx + 1 without checking const auto encoded_pixel_1 = sorted_pixel_value[idx + 1]; decode_pixel_impair(encoded_pixel_0, x, y, pixel0_img0); decode_pixel_impair(encoded_pixel_1, x, y, pixel1_img0); //If the from different image if((pixel0_img0 && (!pixel1_img0)) || ((!pixel0_img0) && pixel1_img0) ) { //Determine which one is for image 1 if(!pixel0_img0) { decode_pixel_impair(encoded_pixel_0, x, y, pixel0_img0); } else { decode_pixel_impair(encoded_pixel_1, x, y, pixel1_img0); } //Check if this is foreground const unsigned char is_foreground = tex2D<unsigned char>(foreground_1, x, y); if(is_foreground != 0) is_candidate = 1; } } } // Ensure this idx is the first one of some new key //Write it candidate_indicator[idx] = is_candidate; } __global__ void collectCandidatePixelPairKernel( const PtrSz<const unsigned> candidate_indicator, const unsigned* sorted_pixel_value, const unsigned* prefixsum_indicator, //The output ushort4* pixel_pair_array ) { const auto idx = threadIdx.x + blockDim.x * blockIdx.x; if(idx >= candidate_indicator.size) return; //For any valid indicator, it is safe to read its successor if(candidate_indicator[idx] > 0) { ushort4 pixel_pair; int x, y; bool img_0; //These read must be safe const auto encoded_pixel_0 = sorted_pixel_value[idx + 0]; const auto encoded_pixel_1 = sorted_pixel_value[idx + 1]; //Decode and write decode_pixel_impair(encoded_pixel_0, x, y, img_0); if(img_0) { pixel_pair.x = x; pixel_pair.y = y; } else { pixel_pair.z = x; pixel_pair.w = y; } decode_pixel_impair(encoded_pixel_1, x, y, img_0); if(img_0) { pixel_pair.x = x; pixel_pair.y = y; } else { pixel_pair.z = x; pixel_pair.w = y; } //Write it const auto offset = prefixsum_indicator[idx] - 1; pixel_pair_array[offset] = pixel_pair; } } }; // namespace device }; // namespace surfelwarp surfelwarp::PatchColliderRGBCorrespondence::PatchColliderRGBCorrespondence() { m_rgb_cols = m_rgb_rows = 0; first_frame = true; swap = false; } void surfelwarp::PatchColliderRGBCorrespondence::AllocateBuffer(unsigned img_rows, unsigned img_cols) { //Read the gpc model and upload to device //BinaryFileStream in_fstream(Constants::kGPCModelPath.c_str(), BinaryFileStream::Mode::ReadOnly); const auto& config = ConfigParser::Instance(); BinaryFileStream in_fstream(config.gpc_model_path().string().c_str(), BinaryFileStream::Mode::ReadOnly); m_forest.Load(&in_fstream); m_forest.UploadToDevice(); //Restrict the maximum level? m_forest.UpdateSearchLevel(max_search_level); //Determine the size of key-value map m_rgb_rows = img_rows; m_rgb_cols = img_cols; m_kvmap_rows = (m_rgb_rows - patch_clip * 2) / patch_stride; // + 1; m_kvmap_cols = (m_rgb_cols - patch_clip * 2) / patch_stride; // + 1; //Allocate the key-value buffer const auto kv_size = m_kvmap_rows * m_kvmap_rows; //Both rgb_0 and rgb_1 will have key-value pairs m_treeleaf_key.create(2 * kv_size); m_pixel_value.create(2 * kv_size); m_collide_sort.AllocateBuffer(2 * kv_size); //The buffer for marking the valid indicator m_candidate_pixelpair_indicator.create(2 * kv_size); //The buffer for prefixsum and compaction m_prefixsum.AllocateBuffer(m_candidate_pixelpair_indicator.size()); cudaSafeCall(cudaMallocHost((void**)(&m_candidate_size_pagelock), sizeof(unsigned))); //The buffer for output m_correspondence_pixels.AllocateBuffer(max_num_correspondence); } void surfelwarp::PatchColliderRGBCorrespondence::ReleaseBuffer() { //Clear the key-value pair m_treeleaf_key.release(); m_pixel_value.release(); //Clear the indicator m_candidate_pixelpair_indicator.release(); //Clear the pagelock memory cudaSafeCall(cudaFreeHost(m_candidate_size_pagelock)); } void surfelwarp::PatchColliderRGBCorrespondence::SetInputImages( cudaTextureObject_t rgb_0, cudaTextureObject_t rgb_1, cudaTextureObject_t foreground_1 ) { rgb_0_ = rgb_0; rgb_1_ = rgb_1; m_foreground_1 = foreground_1; } void surfelwarp::PatchColliderRGBCorrespondence::SetInputImages( cudaTextureObject_t rgb_0, cudaTextureObject_t rgb_1, cudaTextureObject_t depth_0, cudaTextureObject_t depth_1 ) { throw new std::runtime_error("This version of patch collider only accept rgb images"); } void surfelwarp::PatchColliderRGBCorrespondence::FindCorrespondence(cudaStream_t stream) { dim3 kv_blk(8, 8); dim3 kv_grid(divUp(m_kvmap_cols, kv_blk.x), divUp(m_kvmap_rows, kv_blk.y)); const auto forest_dev = m_forest.OnDevice(); if(first_frame) device::buildColliderKeyValueKernel<patch_radius, num_trees><<<kv_grid, kv_blk, 0, stream>>>( rgb_0_, true, forest_dev, patch_stride, m_kvmap_rows, m_kvmap_cols, m_treeleaf_key.size(), m_pixel_value.size(), m_treeleaf_key.ptr(), m_pixel_value.ptr() ); device::buildColliderKeyValueKernel<patch_radius, num_trees><<<kv_grid, kv_blk, 0, stream>>>( rgb_1_, swap, forest_dev, patch_stride, m_kvmap_rows, m_kvmap_cols, m_treeleaf_key.size(), m_pixel_value.size(), m_treeleaf_key.ptr(), m_pixel_value.ptr() ); //Sort it m_collide_sort.Sort(m_treeleaf_key, m_pixel_value, stream); //Debug code //std::cout << "The number of unique elments " << numUniqueElement(m_treeleaf_key, 0xffffffffu) << std::endl; //Mark of candidate dim3 indicator_blk(64); dim3 indicator_grid(divUp(m_collide_sort.valid_sorted_key.size(), indicator_blk.x)); device::markCorrespondenceCandidateKernel<<<indicator_grid, indicator_blk, 0, stream>>>( m_collide_sort.valid_sorted_key, m_collide_sort.valid_sorted_value, m_foreground_1, m_candidate_pixelpair_indicator.ptr() ); //Do a prefix-sum m_prefixsum.InclusiveSum(m_candidate_pixelpair_indicator, stream); //Get the size of sum cudaSafeCall(cudaMemcpyAsync( m_candidate_size_pagelock, m_prefixsum.valid_prefixsum_array + m_prefixsum.valid_prefixsum_array.size() - 1, sizeof(unsigned), cudaMemcpyDeviceToHost, stream )); //Invoke the collection kernel device::collectCandidatePixelPairKernel<<<indicator_grid, indicator_blk, 0, stream>>>( m_candidate_pixelpair_indicator, m_collide_sort.valid_sorted_value.ptr(), m_prefixsum.valid_prefixsum_array.ptr(), m_correspondence_pixels.Ptr() ); //Construct the output // cudaSafeCall(cudaStreamSynchronize(stream)); m_correspondence_pixels.ResizeArrayOrException(*m_candidate_size_pagelock); first_frame = false; swap = !swap; //Debug //std::cout << "The number of candidate array is " << m_valid_correspondence_array.size() << std::endl; //Check here #if defined(CUDA_DEBUG_SYNC_CHECK) cudaSafeCall(cudaStreamSynchronize(stream)); cudaSafeCall(cudaGetLastError()); #endif }
500f8b10b74586b540494344ea1e5be28595d230.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @generated from zlange.cu normal z -> d, Wed Sep 17 15:08:23 2014 @author Mark Gates */ #include "common_magma.h" /* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:m-1, for || A ||_inf, * where m and n are any size. * Has ceil( m/64 ) blocks of 64 threads. Each thread does one row. */ extern "C" __global__ void dlange_inf_kernel( int m, int n, const double *A, int lda, double *dwork ) { int i = blockIdx.x*64 + threadIdx.x; double Cb[4] = {0, 0, 0, 0}; int n_mod_4 = n % 4; n -= n_mod_4; // if beyond last row, skip row if ( i < m ) { A += i; if ( n >= 4 ) { const double *Aend = A + lda*n; double rA[4] = { A[0], A[lda], A[2*lda], A[3*lda] }; A += 4*lda; while( A < Aend ) { Cb[0] += fabs( rA[0] ); rA[0] = A[0]; Cb[1] += fabs( rA[1] ); rA[1] = A[lda]; Cb[2] += fabs( rA[2] ); rA[2] = A[2*lda]; Cb[3] += fabs( rA[3] ); rA[3] = A[3*lda]; A += 4*lda; } Cb[0] += fabs( rA[0] ); Cb[1] += fabs( rA[1] ); Cb[2] += fabs( rA[2] ); Cb[3] += fabs( rA[3] ); } /* clean up code */ switch( n_mod_4 ) { case 0: break; case 1: Cb[0] += fabs( A[0] ); break; case 2: Cb[0] += fabs( A[0] ); Cb[1] += fabs( A[lda] ); break; case 3: Cb[0] += fabs( A[0] ); Cb[1] += fabs( A[lda] ); Cb[2] += fabs( A[2*lda] ); break; } /* compute final result */ dwork[i] = Cb[0] + Cb[1] + Cb[2] + Cb[3]; } } /** Purpose ------- DLANGE returns the value of the one norm, or the Frobenius norm, or the infinity norm, or the element of largest absolute value of a real matrix A. Description ----------- DLANGE returns the value DLANGE = ( max(abs(A(i,j))), NORM = 'M' or 'm' ** not yet supported ( ( norm1(A), NORM = '1', 'O' or 'o' ** not yet supported ( ( normI(A), NORM = 'I' or 'i' ( ( normF(A), NORM = 'F', 'f', 'E' or 'e' ** not yet supported where norm1 denotes the one norm of a matrix (maximum column sum), normI denotes the infinity norm of a matrix (maximum row sum) and normF denotes the Frobenius norm of a matrix (square root of sum of squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. Arguments --------- @param[in] norm CHARACTER*1 Specifies the value to be returned in DLANGE as described above. @param[in] m INTEGER The number of rows of the matrix A. M >= 0. When M = 0, DLANGE is set to zero. @param[in] n INTEGER The number of columns of the matrix A. N >= 0. When N = 0, DLANGE is set to zero. @param[in] A DOUBLE PRECISION array on the GPU, dimension (LDA,N) The m by n matrix A. @param[in] lda INTEGER The leading dimension of the array A. LDA >= max(M,1). @param dwork (workspace) DOUBLE PRECISION array on the GPU, dimension (MAX(1,LWORK)), where LWORK >= M when NORM = 'I'; otherwise, WORK is not referenced. @ingroup magma_daux2 ********************************************************************/ extern "C" double magmablas_dlange( magma_norm_t norm, magma_int_t m, magma_int_t n, const double *A, magma_int_t lda, double *dwork ) { magma_int_t info = 0; if ( norm != MagmaInfNorm ) info = -1; else if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( lda < m ) info = -5; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return info; } /* Quick return */ if ( m == 0 || n == 0 ) return 0; dim3 threads( 64 ); dim3 grid( (m-1)/64 + 1 ); hipLaunchKernelGGL(( dlange_inf_kernel), dim3(grid), dim3(threads), 0, magma_stream , m, n, A, lda, dwork ); int i = magma_idamax( m, dwork, 1 ) - 1; double res; hipMemcpy( &res, &dwork[i], sizeof(double), hipMemcpyDeviceToHost ); return res; }
500f8b10b74586b540494344ea1e5be28595d230.cu
/* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @generated from zlange.cu normal z -> d, Wed Sep 17 15:08:23 2014 @author Mark Gates */ #include "common_magma.h" /* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:m-1, for || A ||_inf, * where m and n are any size. * Has ceil( m/64 ) blocks of 64 threads. Each thread does one row. */ extern "C" __global__ void dlange_inf_kernel( int m, int n, const double *A, int lda, double *dwork ) { int i = blockIdx.x*64 + threadIdx.x; double Cb[4] = {0, 0, 0, 0}; int n_mod_4 = n % 4; n -= n_mod_4; // if beyond last row, skip row if ( i < m ) { A += i; if ( n >= 4 ) { const double *Aend = A + lda*n; double rA[4] = { A[0], A[lda], A[2*lda], A[3*lda] }; A += 4*lda; while( A < Aend ) { Cb[0] += fabs( rA[0] ); rA[0] = A[0]; Cb[1] += fabs( rA[1] ); rA[1] = A[lda]; Cb[2] += fabs( rA[2] ); rA[2] = A[2*lda]; Cb[3] += fabs( rA[3] ); rA[3] = A[3*lda]; A += 4*lda; } Cb[0] += fabs( rA[0] ); Cb[1] += fabs( rA[1] ); Cb[2] += fabs( rA[2] ); Cb[3] += fabs( rA[3] ); } /* clean up code */ switch( n_mod_4 ) { case 0: break; case 1: Cb[0] += fabs( A[0] ); break; case 2: Cb[0] += fabs( A[0] ); Cb[1] += fabs( A[lda] ); break; case 3: Cb[0] += fabs( A[0] ); Cb[1] += fabs( A[lda] ); Cb[2] += fabs( A[2*lda] ); break; } /* compute final result */ dwork[i] = Cb[0] + Cb[1] + Cb[2] + Cb[3]; } } /** Purpose ------- DLANGE returns the value of the one norm, or the Frobenius norm, or the infinity norm, or the element of largest absolute value of a real matrix A. Description ----------- DLANGE returns the value DLANGE = ( max(abs(A(i,j))), NORM = 'M' or 'm' ** not yet supported ( ( norm1(A), NORM = '1', 'O' or 'o' ** not yet supported ( ( normI(A), NORM = 'I' or 'i' ( ( normF(A), NORM = 'F', 'f', 'E' or 'e' ** not yet supported where norm1 denotes the one norm of a matrix (maximum column sum), normI denotes the infinity norm of a matrix (maximum row sum) and normF denotes the Frobenius norm of a matrix (square root of sum of squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. Arguments --------- @param[in] norm CHARACTER*1 Specifies the value to be returned in DLANGE as described above. @param[in] m INTEGER The number of rows of the matrix A. M >= 0. When M = 0, DLANGE is set to zero. @param[in] n INTEGER The number of columns of the matrix A. N >= 0. When N = 0, DLANGE is set to zero. @param[in] A DOUBLE PRECISION array on the GPU, dimension (LDA,N) The m by n matrix A. @param[in] lda INTEGER The leading dimension of the array A. LDA >= max(M,1). @param dwork (workspace) DOUBLE PRECISION array on the GPU, dimension (MAX(1,LWORK)), where LWORK >= M when NORM = 'I'; otherwise, WORK is not referenced. @ingroup magma_daux2 ********************************************************************/ extern "C" double magmablas_dlange( magma_norm_t norm, magma_int_t m, magma_int_t n, const double *A, magma_int_t lda, double *dwork ) { magma_int_t info = 0; if ( norm != MagmaInfNorm ) info = -1; else if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( lda < m ) info = -5; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return info; } /* Quick return */ if ( m == 0 || n == 0 ) return 0; dim3 threads( 64 ); dim3 grid( (m-1)/64 + 1 ); dlange_inf_kernel<<< grid, threads, 0, magma_stream >>>( m, n, A, lda, dwork ); int i = magma_idamax( m, dwork, 1 ) - 1; double res; cudaMemcpy( &res, &dwork[i], sizeof(double), cudaMemcpyDeviceToHost ); return res; }
e82280372843272df74a6adc725fbde9245e7048.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<iostream> using namespace std; /* a sum reduction on the array of floats 'in'. * The reduction result is written to the * address 'result'. The number of elements to * be reduced is given by 'size' * * The example contains data races because barrier * synchronisation statements, of the form: * __syncthreads(); * are missing. * * Can you add them to eliminate all data races? */ #define N 2 /* Same as blockDim */ #define tid threadIdx.x __global__ void reduce(int *in, int *result, int size) { __shared__ int partial_sums[N]; /* Each thread sums elements in[tid], in[tid + N], in[tid + 2*N], ... */ partial_sums[tid] = in[tid]; for(int i = tid + N; i < size; i += N) { partial_sums[i] += in[i]; } /* Tree reduction computes final sum into partial_sums[0] */ for(int d = N/2; d > 0; d >>= 1) { if(tid < d) { partial_sums[tid] += partial_sums[tid + d]; } } /* Master thread writes out result */ if(tid == 0) { *result = partial_sums[0]; } }
e82280372843272df74a6adc725fbde9245e7048.cu
#include<stdio.h> #include<iostream> using namespace std; /* a sum reduction on the array of floats 'in'. * The reduction result is written to the * address 'result'. The number of elements to * be reduced is given by 'size' * * The example contains data races because barrier * synchronisation statements, of the form: * __syncthreads(); * are missing. * * Can you add them to eliminate all data races? */ #define N 2 /* Same as blockDim */ #define tid threadIdx.x __global__ void reduce(int *in, int *result, int size) { __shared__ int partial_sums[N]; /* Each thread sums elements in[tid], in[tid + N], in[tid + 2*N], ... */ partial_sums[tid] = in[tid]; for(int i = tid + N; i < size; i += N) { partial_sums[i] += in[i]; } /* Tree reduction computes final sum into partial_sums[0] */ for(int d = N/2; d > 0; d >>= 1) { if(tid < d) { partial_sums[tid] += partial_sums[tid + d]; } } /* Master thread writes out result */ if(tid == 0) { *result = partial_sums[0]; } }
b6da464b726ccaa5c0a71e92f48324a71c14c9bb.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include "functional.h" __host__ void prepareMakeFluxCUDA (FVMesh2D &m, FVVect<double> &phi, FVVect< FVPoint2D<double> > &u, FVVect<double> &Vd,FVVect<double> &Vn, FVVect<double> &F,Parameter &para) { FVEdge2D edges[m.getNbEdge()]; } __device__ void makeFluxCUDA() { }
b6da464b726ccaa5c0a71e92f48324a71c14c9bb.cu
#include <cuda.h> #include "functional.h" __host__ void prepareMakeFluxCUDA (FVMesh2D &m, FVVect<double> &phi, FVVect< FVPoint2D<double> > &u, FVVect<double> &Vd,FVVect<double> &Vn, FVVect<double> &F,Parameter &para) { FVEdge2D edges[m.getNbEdge()]; } __device__ void makeFluxCUDA() { }
3cf0e6a321a8dac879641a095cd89aaacc910e7a.hip
// !!! This is a file automatically generated by hipify!!! #include <conio.h> #include <iostream> #include <cstdlib> #include <ctime> #include <fstream> #include <iomanip> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <sys/timeb.h> using namespace std; const double step_t = 0.1; const double step_xyz = 0.1; const int Nt = 10; const int rasmer = 3; const int Nxyz = 10; const double sigma = 2; __global__ void calculate_x(double * in, double * out, int n, double step_t, double step_xyz, double sigma, int Nxyz) { double * a = (double *)malloc(Nxyz * sizeof(double)); double * b = (double *)malloc(Nxyz * sizeof(double)); a[0] = 0; b[0] = sin((n * step_t) / 8); for (int j = 1; j < Nxyz - 1; j++) { a[j] = -(-sigma * step_t / step_xyz) / ((1 + 2 * sigma * step_t / step_xyz) + (-sigma * step_t / step_xyz) * a[j - 1]); b[j] = (in[threadIdx.y + threadIdx.y * threadIdx.x + threadIdx.y * threadIdx.x * (j - 1)] - (-sigma * step_t / step_xyz) * b[j - 1]) / ((1 + 2 * sigma * step_t / step_xyz) + (-sigma * step_t / step_xyz) * a[j - 1]); } out[threadIdx.y + threadIdx.y * threadIdx.x + threadIdx.y * threadIdx.x * (Nxyz - 1)] = sin((n * step_t) / 4); for (int j = Nxyz - 2; j > 0; j--) { out[threadIdx.y + threadIdx.y * threadIdx.x + threadIdx.y * threadIdx.x * j] = a[j] * out[threadIdx.y + threadIdx.y * threadIdx.x + threadIdx.y * threadIdx.x * (j + 1)] + b[j]; } free(a); free(b); __syncthreads(); } __global__ void calculate_y(double * in, double * out, int n, double step_t, double step_xyz, double sigma, int Nxyz) { double * a = (double *)malloc(Nxyz * sizeof(double)); double * b = (double *)malloc(Nxyz * sizeof(double)); a[0] = 0; b[0] = sin((n * step_t) / 8); for (int k = 1; k < Nxyz - 1; k++) { a[k] = -(-sigma * step_t / step_xyz) / ((1 + 2 * sigma * step_t / step_xyz) + (-sigma * step_t / step_xyz) * a[k - 1]); b[k] = (in[threadIdx.y + threadIdx.y * (k - 1) + threadIdx.y * (k - 1) * threadIdx.x] - (-sigma * step_t / step_xyz) * b[k - 1]) / ((1 + 2 * sigma * step_t / step_xyz) + (-sigma * step_t / step_xyz) * a[k - 1]); } out[threadIdx.y + threadIdx.y * (Nxyz - 1) + threadIdx.y * (Nxyz - 1) * threadIdx.x] = sin((n * step_t) / 4); for (int k = Nxyz - 2; k > 0; k--) { out[threadIdx.y + threadIdx.y * k + threadIdx.y * k * threadIdx.x ] = a[k] * out[threadIdx.y + threadIdx.y * (k + 1) + threadIdx.y * (k + 1) * threadIdx.x] + b[k]; } free(a); free(b); __syncthreads(); } __global__ void calculate_z(double * in, double * out, int n, double step_t, double step_xyz, double sigma, int Nxyz) { double * a = (double *)malloc(Nxyz * sizeof(double)); double * b = (double *)malloc(Nxyz * sizeof(double)); a[0] = 0; b[0] = sin((n * step_t) / 8); for (int m = 1; m < Nxyz - 1; m++) { a[m] = -(-sigma * step_t / step_xyz) / ((1 + 2 * sigma * step_t / step_xyz) + (-sigma * step_t / step_xyz) * a[m - 1]); b[m] = (in[(m - 1) + (m - 1) * threadIdx.y + (m - 1) * threadIdx.y * threadIdx.x ] - (-sigma * step_t / step_xyz) * b[m - 1]) / ((1 + 2 * sigma * step_t / step_xyz) + (-sigma * step_t / step_xyz) * a[m - 1]); } out[(Nxyz - 1) + (Nxyz - 1) * threadIdx.y + (Nxyz - 1) * threadIdx.y * threadIdx.x] = sin((n * step_t) / 4); for (int m = Nxyz - 2; m > 0; m--) { out[m + m * threadIdx.y + m * threadIdx.y * threadIdx.x] = a[m] * out[(m + 1) + (m + 1) * threadIdx.y + (m + 1) * threadIdx.y * threadIdx.x] + b[m]; } free(a); free(b); __syncthreads(); } int main() { setlocale(LC_ALL, "Russian"); cout << "hello world!" << endl; double U[rasmer * Nt + 1][Nxyz][Nxyz][Nxyz]; for (int j = 0; j < Nxyz; j++) { for (int k = 0; k < Nxyz; k++) { for (int m = 0; m < Nxyz; m++) { U[0][j][k][m] = 0; } } } double *row_in = new double [(Nxyz-2) * (Nxyz-2) * (Nxyz-2)]; double *row_out = new double [(Nxyz-2) * (Nxyz-2) * (Nxyz-2)]; double *row_in_gpu, *row_out_gpu; hipMalloc((void **)&row_in_gpu, sizeof(double) * (Nxyz-2) * (Nxyz-2) * (Nxyz-2)); hipMalloc((void **)&row_out_gpu, sizeof(double) * (Nxyz-2) * (Nxyz-2) * (Nxyz-2)); for (int n = 1; n <= rasmer; n ++) { for (int j = 1; j < Nxyz - 1; j++) { for (int k = 1; k < Nxyz - 1; k++) { for (int m = 1; m < Nxyz - 1; m++ ) { row_in[(m - 1) + (m - 1)*(k - 1) + (m - 1)*(k - 1)*(j - 1)] = U[n - 1][j][k][m]; } } } hipMemcpy(row_in_gpu, row_in, sizeof(double) * (Nxyz - 2) * (Nxyz - 2) * (Nxyz - 2), hipMemcpyHostToDevice); dim3 gridDim(1); dim3 blockDim(Nxyz - 2, Nxyz - 2); calculate_x < << gridDim, blockDim >> >(row_in_gpu, row_out_gpu, n, step_t, step_xyz, sigma, Nxyz); hipMemcpy(row_out, row_out_gpu, sizeof(double) * (Nxyz - 2) * (Nxyz - 2) * (Nxyz - 2), hipMemcpyDeviceToHost); // x hipMemcpy(row_in_gpu, row_out, sizeof(double) * (Nxyz - 2) * (Nxyz - 2) * (Nxyz - 2), hipMemcpyHostToDevice); calculate_y < << gridDim, blockDim >> >(row_in_gpu, row_out_gpu, n, step_t, step_xyz, sigma, Nxyz); hipMemcpy(row_out, row_out_gpu, sizeof(double) * (Nxyz - 2) * (Nxyz - 2) * (Nxyz - 2), hipMemcpyDeviceToHost); // y hipMemcpy(row_in_gpu, row_out, sizeof(double) * (Nxyz - 2) * (Nxyz - 2) * (Nxyz - 2), hipMemcpyHostToDevice); calculate_z < << gridDim, blockDim >> >(row_in_gpu, row_out_gpu, n, step_t, step_xyz, sigma, Nxyz); hipMemcpy(row_out, row_out_gpu, sizeof(double) * (Nxyz - 2) * (Nxyz - 2) * (Nxyz - 2), hipMemcpyDeviceToHost); // z for (int j = 1; j < Nxyz - 1; j++) { for (int k = 1; k < Nxyz - 1; k++) { for (int m = 1; m < Nxyz - 1; m++) { U[n][j][k][m] = row_out[(m - 1) + (m - 1)*(k - 1) + (m - 1)*(k - 1)*(j - 1)]; } } } cout << U[n][Nxyz - 2][Nxyz - 2][1]; } _getch(); return 0; }
3cf0e6a321a8dac879641a095cd89aaacc910e7a.cu
#include <conio.h> #include <iostream> #include <cstdlib> #include <ctime> #include <fstream> #include <iomanip> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <sys/timeb.h> using namespace std; const double step_t = 0.1; const double step_xyz = 0.1; const int Nt = 10; const int rasmer = 3; const int Nxyz = 10; const double sigma = 2; __global__ void calculate_x(double * in, double * out, int n, double step_t, double step_xyz, double sigma, int Nxyz) { double * a = (double *)malloc(Nxyz * sizeof(double)); double * b = (double *)malloc(Nxyz * sizeof(double)); a[0] = 0; b[0] = sin((n * step_t) / 8); for (int j = 1; j < Nxyz - 1; j++) { a[j] = -(-sigma * step_t / step_xyz) / ((1 + 2 * sigma * step_t / step_xyz) + (-sigma * step_t / step_xyz) * a[j - 1]); b[j] = (in[threadIdx.y + threadIdx.y * threadIdx.x + threadIdx.y * threadIdx.x * (j - 1)] - (-sigma * step_t / step_xyz) * b[j - 1]) / ((1 + 2 * sigma * step_t / step_xyz) + (-sigma * step_t / step_xyz) * a[j - 1]); } out[threadIdx.y + threadIdx.y * threadIdx.x + threadIdx.y * threadIdx.x * (Nxyz - 1)] = sin((n * step_t) / 4); for (int j = Nxyz - 2; j > 0; j--) { out[threadIdx.y + threadIdx.y * threadIdx.x + threadIdx.y * threadIdx.x * j] = a[j] * out[threadIdx.y + threadIdx.y * threadIdx.x + threadIdx.y * threadIdx.x * (j + 1)] + b[j]; } free(a); free(b); __syncthreads(); } __global__ void calculate_y(double * in, double * out, int n, double step_t, double step_xyz, double sigma, int Nxyz) { double * a = (double *)malloc(Nxyz * sizeof(double)); double * b = (double *)malloc(Nxyz * sizeof(double)); a[0] = 0; b[0] = sin((n * step_t) / 8); for (int k = 1; k < Nxyz - 1; k++) { a[k] = -(-sigma * step_t / step_xyz) / ((1 + 2 * sigma * step_t / step_xyz) + (-sigma * step_t / step_xyz) * a[k - 1]); b[k] = (in[threadIdx.y + threadIdx.y * (k - 1) + threadIdx.y * (k - 1) * threadIdx.x] - (-sigma * step_t / step_xyz) * b[k - 1]) / ((1 + 2 * sigma * step_t / step_xyz) + (-sigma * step_t / step_xyz) * a[k - 1]); } out[threadIdx.y + threadIdx.y * (Nxyz - 1) + threadIdx.y * (Nxyz - 1) * threadIdx.x] = sin((n * step_t) / 4); for (int k = Nxyz - 2; k > 0; k--) { out[threadIdx.y + threadIdx.y * k + threadIdx.y * k * threadIdx.x ] = a[k] * out[threadIdx.y + threadIdx.y * (k + 1) + threadIdx.y * (k + 1) * threadIdx.x] + b[k]; } free(a); free(b); __syncthreads(); } __global__ void calculate_z(double * in, double * out, int n, double step_t, double step_xyz, double sigma, int Nxyz) { double * a = (double *)malloc(Nxyz * sizeof(double)); double * b = (double *)malloc(Nxyz * sizeof(double)); a[0] = 0; b[0] = sin((n * step_t) / 8); for (int m = 1; m < Nxyz - 1; m++) { a[m] = -(-sigma * step_t / step_xyz) / ((1 + 2 * sigma * step_t / step_xyz) + (-sigma * step_t / step_xyz) * a[m - 1]); b[m] = (in[(m - 1) + (m - 1) * threadIdx.y + (m - 1) * threadIdx.y * threadIdx.x ] - (-sigma * step_t / step_xyz) * b[m - 1]) / ((1 + 2 * sigma * step_t / step_xyz) + (-sigma * step_t / step_xyz) * a[m - 1]); } out[(Nxyz - 1) + (Nxyz - 1) * threadIdx.y + (Nxyz - 1) * threadIdx.y * threadIdx.x] = sin((n * step_t) / 4); for (int m = Nxyz - 2; m > 0; m--) { out[m + m * threadIdx.y + m * threadIdx.y * threadIdx.x] = a[m] * out[(m + 1) + (m + 1) * threadIdx.y + (m + 1) * threadIdx.y * threadIdx.x] + b[m]; } free(a); free(b); __syncthreads(); } int main() { setlocale(LC_ALL, "Russian"); cout << "hello world!" << endl; double U[rasmer * Nt + 1][Nxyz][Nxyz][Nxyz]; for (int j = 0; j < Nxyz; j++) { for (int k = 0; k < Nxyz; k++) { for (int m = 0; m < Nxyz; m++) { U[0][j][k][m] = 0; } } } double *row_in = new double [(Nxyz-2) * (Nxyz-2) * (Nxyz-2)]; double *row_out = new double [(Nxyz-2) * (Nxyz-2) * (Nxyz-2)]; double *row_in_gpu, *row_out_gpu; cudaMalloc((void **)&row_in_gpu, sizeof(double) * (Nxyz-2) * (Nxyz-2) * (Nxyz-2)); cudaMalloc((void **)&row_out_gpu, sizeof(double) * (Nxyz-2) * (Nxyz-2) * (Nxyz-2)); for (int n = 1; n <= rasmer; n ++) { for (int j = 1; j < Nxyz - 1; j++) { for (int k = 1; k < Nxyz - 1; k++) { for (int m = 1; m < Nxyz - 1; m++ ) { row_in[(m - 1) + (m - 1)*(k - 1) + (m - 1)*(k - 1)*(j - 1)] = U[n - 1][j][k][m]; } } } cudaMemcpy(row_in_gpu, row_in, sizeof(double) * (Nxyz - 2) * (Nxyz - 2) * (Nxyz - 2), cudaMemcpyHostToDevice); dim3 gridDim(1); dim3 blockDim(Nxyz - 2, Nxyz - 2); calculate_x < << gridDim, blockDim >> >(row_in_gpu, row_out_gpu, n, step_t, step_xyz, sigma, Nxyz); cudaMemcpy(row_out, row_out_gpu, sizeof(double) * (Nxyz - 2) * (Nxyz - 2) * (Nxyz - 2), cudaMemcpyDeviceToHost); //посчитали x cudaMemcpy(row_in_gpu, row_out, sizeof(double) * (Nxyz - 2) * (Nxyz - 2) * (Nxyz - 2), cudaMemcpyHostToDevice); calculate_y < << gridDim, blockDim >> >(row_in_gpu, row_out_gpu, n, step_t, step_xyz, sigma, Nxyz); cudaMemcpy(row_out, row_out_gpu, sizeof(double) * (Nxyz - 2) * (Nxyz - 2) * (Nxyz - 2), cudaMemcpyDeviceToHost); //посчитали y cudaMemcpy(row_in_gpu, row_out, sizeof(double) * (Nxyz - 2) * (Nxyz - 2) * (Nxyz - 2), cudaMemcpyHostToDevice); calculate_z < << gridDim, blockDim >> >(row_in_gpu, row_out_gpu, n, step_t, step_xyz, sigma, Nxyz); cudaMemcpy(row_out, row_out_gpu, sizeof(double) * (Nxyz - 2) * (Nxyz - 2) * (Nxyz - 2), cudaMemcpyDeviceToHost); //посчитали z for (int j = 1; j < Nxyz - 1; j++) { for (int k = 1; k < Nxyz - 1; k++) { for (int m = 1; m < Nxyz - 1; m++) { U[n][j][k][m] = row_out[(m - 1) + (m - 1)*(k - 1) + (m - 1)*(k - 1)*(j - 1)]; } } } cout << U[n][Nxyz - 2][Nxyz - 2][1]; } _getch(); return 0; }
b446669302985e00ca37afe5b9dfc6d5bcea620c.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "fill.hip" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *A = NULL; hipMalloc(&A, XSIZE*YSIZE); double scalar = 1; int lenA = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( fill), dim3(gridBlock),dim3(threadBlock), 0, 0, A,scalar,lenA); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( fill), dim3(gridBlock),dim3(threadBlock), 0, 0, A,scalar,lenA); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( fill), dim3(gridBlock),dim3(threadBlock), 0, 0, A,scalar,lenA); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
b446669302985e00ca37afe5b9dfc6d5bcea620c.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "fill.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *A = NULL; cudaMalloc(&A, XSIZE*YSIZE); double scalar = 1; int lenA = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); fill<<<gridBlock,threadBlock>>>(A,scalar,lenA); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { fill<<<gridBlock,threadBlock>>>(A,scalar,lenA); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { fill<<<gridBlock,threadBlock>>>(A,scalar,lenA); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
0c779b4a77e3e0aac86307d6e08b05b1c8cbba8c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //program by Vijay manoharan SUNYB //header files #include <stdio.h> #include <stdlib.h> #include<math.h> #include <stdbool.h> #include<sys/time.h> //end of header file //cuda global function __global__ void cudeprime(int limit,int *threadcounter, int *maxi,int pid,int Num_processor) { int number, prime_div; number = 29+(blockIdx.x*2*blockDim.x)+(2*threadIdx.x*Num_processor)+(2*pid); //printf("number %d and process %d\n",number,pid); //printf("block -%d\t size -%d\tthread- %d\t the number is %d\n",blockIdx.x,blockDim.x,threadIdx.x,number); if(number>limit) return; if ((number % 3 == 0) || (number % 5 == 0) || (number % 7 == 0) || (number % 13 == 0) || (number % 11 == 0) || (number % 17 == 0) || (number % 19 == 0) || (number % 23 == 0)) { //do nothing } else { prime_div = 29; while (prime_div * prime_div <= number) { if ((number % prime_div == 0)) break; prime_div = prime_div + 2; } //if the number is not prime_divisible until the square root of the number then the number is prime. if (prime_div * prime_div > number) { //write the value to index. threadcounter[(blockIdx.x*blockDim.x)+threadIdx.x]=number; //printf("thread n number %d %d\n",threadIdx.x,number); } } } //main function extern "C" int *cuda_main(int p_id, int Num_Processor,int number_of_prime) { printf("in cuda n %d pid %d number of process %d \n",number_of_prime,p_id,Num_Processor); static int returnval[2]; //variables for MPI reduction int maxi = 0, count = 0; //variable for time stamp struct timeval tv1, tv2; //variables for checking the a number is prime or not //end of declaration //starting timer here gettimeofday(&tv1, NULL); //MPI declaration ends here //Cuda declaration starts here int threadSize=10; if(number_of_prime<(2000*Num_Processor)) threadSize=number_of_prime/(2*Num_Processor); else threadSize=1000; int blocksize = (number_of_prime / (2*threadSize*Num_Processor)); int totalthreads=blocksize*threadSize*Num_Processor; int arraycount[totalthreads]; int *dev_count, *dev_max; for(int i=0;i<totalthreads;i++) arraycount[i]=0; hipMalloc((void **) &dev_count, totalthreads* sizeof(int)); hipMalloc((void **) &dev_max, sizeof (int)); //end of cuda declaration if (number_of_prime > 10) { hipMemcpy(dev_max, &maxi, sizeof (int), hipMemcpyHostToDevice); hipMemcpy(dev_count, arraycount, totalthreads*sizeof (int), hipMemcpyHostToDevice); printf("number of blocks and threads %d / %d\n", blocksize, threadSize); hipLaunchKernelGGL(( cudeprime) , dim3(blocksize), dim3(threadSize) , 0, 0, number_of_prime,dev_count, dev_max,p_id,Num_Processor); hipMemcpy(&maxi, dev_max, sizeof (int), hipMemcpyDeviceToHost); hipMemcpy( arraycount,dev_count, totalthreads*sizeof (int), hipMemcpyDeviceToHost); gettimeofday(&tv2, NULL); for(int i=0;i<totalthreads;i++) { if(arraycount[i]>0) { count++; if(maxi<arraycount[i])maxi=arraycount[i]; //printf("%d is n and index %d\n",arraycount[i],i); } } } else { count = 4; maxi = 7; } //to broadcast he problem to other nodes from the root node printf("Local count = %d \t Local max value = %d\ncuda exit\n", count, maxi); hipFree(dev_max); hipFree(dev_count); returnval[0]=count; returnval[1]=maxi; return returnval; }
0c779b4a77e3e0aac86307d6e08b05b1c8cbba8c.cu
//program by Vijay manoharan SUNYB //header files #include <stdio.h> #include <stdlib.h> #include<math.h> #include <stdbool.h> #include<sys/time.h> //end of header file //cuda global function __global__ void cudeprime(int limit,int *threadcounter, int *maxi,int pid,int Num_processor) { int number, prime_div; number = 29+(blockIdx.x*2*blockDim.x)+(2*threadIdx.x*Num_processor)+(2*pid); //printf("number %d and process %d\n",number,pid); //printf("block -%d\t size -%d\tthread- %d\t the number is %d\n",blockIdx.x,blockDim.x,threadIdx.x,number); if(number>limit) return; if ((number % 3 == 0) || (number % 5 == 0) || (number % 7 == 0) || (number % 13 == 0) || (number % 11 == 0) || (number % 17 == 0) || (number % 19 == 0) || (number % 23 == 0)) { //do nothing } else { prime_div = 29; while (prime_div * prime_div <= number) { if ((number % prime_div == 0)) break; prime_div = prime_div + 2; } //if the number is not prime_divisible until the square root of the number then the number is prime. if (prime_div * prime_div > number) { //write the value to index. threadcounter[(blockIdx.x*blockDim.x)+threadIdx.x]=number; //printf("thread n number %d %d\n",threadIdx.x,number); } } } //main function extern "C" int *cuda_main(int p_id, int Num_Processor,int number_of_prime) { printf("in cuda n %d pid %d number of process %d \n",number_of_prime,p_id,Num_Processor); static int returnval[2]; //variables for MPI reduction int maxi = 0, count = 0; //variable for time stamp struct timeval tv1, tv2; //variables for checking the a number is prime or not //end of declaration //starting timer here gettimeofday(&tv1, NULL); //MPI declaration ends here //Cuda declaration starts here int threadSize=10; if(number_of_prime<(2000*Num_Processor)) threadSize=number_of_prime/(2*Num_Processor); else threadSize=1000; int blocksize = (number_of_prime / (2*threadSize*Num_Processor)); int totalthreads=blocksize*threadSize*Num_Processor; int arraycount[totalthreads]; int *dev_count, *dev_max; for(int i=0;i<totalthreads;i++) arraycount[i]=0; cudaMalloc((void **) &dev_count, totalthreads* sizeof(int)); cudaMalloc((void **) &dev_max, sizeof (int)); //end of cuda declaration if (number_of_prime > 10) { cudaMemcpy(dev_max, &maxi, sizeof (int), cudaMemcpyHostToDevice); cudaMemcpy(dev_count, arraycount, totalthreads*sizeof (int), cudaMemcpyHostToDevice); printf("number of blocks and threads %d / %d\n", blocksize, threadSize); cudeprime <<<blocksize, threadSize >>>(number_of_prime,dev_count, dev_max,p_id,Num_Processor); cudaMemcpy(&maxi, dev_max, sizeof (int), cudaMemcpyDeviceToHost); cudaMemcpy( arraycount,dev_count, totalthreads*sizeof (int), cudaMemcpyDeviceToHost); gettimeofday(&tv2, NULL); for(int i=0;i<totalthreads;i++) { if(arraycount[i]>0) { count++; if(maxi<arraycount[i])maxi=arraycount[i]; //printf("%d is n and index %d\n",arraycount[i],i); } } } else { count = 4; maxi = 7; } //to broadcast he problem to other nodes from the root node printf("Local count = %d \t Local max value = %d\ncuda exit\n", count, maxi); cudaFree(dev_max); cudaFree(dev_count); returnval[0]=count; returnval[1]=maxi; return returnval; }
6d61dc0d1d8728faf4423abca52413e863910493.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <THHUNN/THHUNN.h> #include <THHUNN/common.h> #include <TH/THHalf.h> #include <THHUNN/THHHalfAutoNumerics.cuh> #include <THH/THHAtomics.cuh> #include <THH/THHTensor.hpp> #include <THH/THHStorage.hpp> #define TEMPORAL_MAX_POOLING_THREADS 1024 template <typename Dtype> __global__ void cunn_TemporalMaxPooling_updateOutputKernel(Dtype *input, Dtype *output, THCIndex_t *indices, int input_w, int input_n, int output_w, int kW, int dW) { // Block idx is the batch index, thread idx + block idx y * MAX_THREADS is the time index Dtype *input_data = input + blockIdx.x * input_w * input_n + ( threadIdx.x + blockIdx.y * TEMPORAL_MAX_POOLING_THREADS) * input_n * dW; Dtype *output_data = output + blockIdx.x * output_w * input_n + ( threadIdx.x + blockIdx.y * TEMPORAL_MAX_POOLING_THREADS) * input_n; THCIndex_t *indices_data = indices + blockIdx.x * output_w * input_n + ( threadIdx.x + blockIdx.y * TEMPORAL_MAX_POOLING_THREADS) * input_n; int feat = 0; int time = 0; int max_time = input_n * kW; Dtype max_value; THCIndex_t max_index = 0; if (threadIdx.x + blockIdx.y * TEMPORAL_MAX_POOLING_THREADS < output_w) { // For all features for (feat = 0; feat < input_n; ++feat) { max_value = THCNumerics<Dtype>::min(); // For all values in the kernel space for (time = 0; time < max_time; time += input_n) { if (max_value < input_data[time + feat]) { max_value = input_data[time + feat]; max_index = time / input_n; } } output_data[feat] = max_value; indices_data[feat] = max_index; } } } template <typename Dtype> __global__ void cunn_TemporalMaxPooling_updateGradInputKernel(Dtype *gradInput, Dtype *gradOutput, THCIndex_t *indices, int input_w, int input_n, int output_w, int kW, int dW) { // Block idx is the batch index, thread idx + block idx y * MAX_THREADS is the time index Dtype *gradInput_data = gradInput + blockIdx.x * input_w * input_n + ( threadIdx.x + blockIdx.y * TEMPORAL_MAX_POOLING_THREADS) * input_n * dW; Dtype *gradOutput_data = gradOutput + blockIdx.x * output_w * input_n + ( threadIdx.x + blockIdx.y * TEMPORAL_MAX_POOLING_THREADS) * input_n; THCIndex_t *indices_data = indices + blockIdx.x * output_w * input_n + ( threadIdx.x + blockIdx.y * TEMPORAL_MAX_POOLING_THREADS) * input_n; int feat = 0; if (threadIdx.x + blockIdx.y * TEMPORAL_MAX_POOLING_THREADS < output_w) { // For all features for (feat = 0; feat < input_n; ++feat) { gradInput_data[indices_data[feat] * input_n + feat] += gradOutput_data[feat]; } } } template <typename Dtype> __global__ void cunn_TemporalMaxPooling_updateGradInputKernelAtomic(Dtype *gradInput, Dtype *gradOutput, THCIndex_t *indices, int input_w, int input_n, int output_w, int kW, int dW) { // Block idx is the batch index, thread idx + block idx y * MAX_THREADS is the time index Dtype *gradInput_data = gradInput + blockIdx.x * input_w * input_n + ( threadIdx.x + blockIdx.y * TEMPORAL_MAX_POOLING_THREADS) * input_n * dW; Dtype *gradOutput_data = gradOutput + blockIdx.x * output_w * input_n + ( threadIdx.x + blockIdx.y * TEMPORAL_MAX_POOLING_THREADS) * input_n; THCIndex_t *indices_data = indices + blockIdx.x * output_w * input_n + ( threadIdx.x + blockIdx.y * TEMPORAL_MAX_POOLING_THREADS) * input_n; int feat = 0; if (threadIdx.x + blockIdx.y * TEMPORAL_MAX_POOLING_THREADS < output_w) { // For all features for (feat = 0; feat < input_n; ++feat) { atomicAdd(&gradInput_data[indices_data[feat] * input_n + feat], gradOutput_data[feat]); } } } #include <THHUNN/generic/TemporalMaxPooling.hip> #include <THH/THHGenerateFloatTypes.h>
6d61dc0d1d8728faf4423abca52413e863910493.cu
#include <THCUNN/THCUNN.h> #include <THCUNN/common.h> #include <TH/THHalf.h> #include <THCUNN/THCHalfAutoNumerics.cuh> #include <THC/THCAtomics.cuh> #include <THC/THCTensor.hpp> #include <THC/THCStorage.hpp> #define TEMPORAL_MAX_POOLING_THREADS 1024 template <typename Dtype> __global__ void cunn_TemporalMaxPooling_updateOutputKernel(Dtype *input, Dtype *output, THCIndex_t *indices, int input_w, int input_n, int output_w, int kW, int dW) { // Block idx is the batch index, thread idx + block idx y * MAX_THREADS is the time index Dtype *input_data = input + blockIdx.x * input_w * input_n + ( threadIdx.x + blockIdx.y * TEMPORAL_MAX_POOLING_THREADS) * input_n * dW; Dtype *output_data = output + blockIdx.x * output_w * input_n + ( threadIdx.x + blockIdx.y * TEMPORAL_MAX_POOLING_THREADS) * input_n; THCIndex_t *indices_data = indices + blockIdx.x * output_w * input_n + ( threadIdx.x + blockIdx.y * TEMPORAL_MAX_POOLING_THREADS) * input_n; int feat = 0; int time = 0; int max_time = input_n * kW; Dtype max_value; THCIndex_t max_index = 0; if (threadIdx.x + blockIdx.y * TEMPORAL_MAX_POOLING_THREADS < output_w) { // For all features for (feat = 0; feat < input_n; ++feat) { max_value = THCNumerics<Dtype>::min(); // For all values in the kernel space for (time = 0; time < max_time; time += input_n) { if (max_value < input_data[time + feat]) { max_value = input_data[time + feat]; max_index = time / input_n; } } output_data[feat] = max_value; indices_data[feat] = max_index; } } } template <typename Dtype> __global__ void cunn_TemporalMaxPooling_updateGradInputKernel(Dtype *gradInput, Dtype *gradOutput, THCIndex_t *indices, int input_w, int input_n, int output_w, int kW, int dW) { // Block idx is the batch index, thread idx + block idx y * MAX_THREADS is the time index Dtype *gradInput_data = gradInput + blockIdx.x * input_w * input_n + ( threadIdx.x + blockIdx.y * TEMPORAL_MAX_POOLING_THREADS) * input_n * dW; Dtype *gradOutput_data = gradOutput + blockIdx.x * output_w * input_n + ( threadIdx.x + blockIdx.y * TEMPORAL_MAX_POOLING_THREADS) * input_n; THCIndex_t *indices_data = indices + blockIdx.x * output_w * input_n + ( threadIdx.x + blockIdx.y * TEMPORAL_MAX_POOLING_THREADS) * input_n; int feat = 0; if (threadIdx.x + blockIdx.y * TEMPORAL_MAX_POOLING_THREADS < output_w) { // For all features for (feat = 0; feat < input_n; ++feat) { gradInput_data[indices_data[feat] * input_n + feat] += gradOutput_data[feat]; } } } template <typename Dtype> __global__ void cunn_TemporalMaxPooling_updateGradInputKernelAtomic(Dtype *gradInput, Dtype *gradOutput, THCIndex_t *indices, int input_w, int input_n, int output_w, int kW, int dW) { // Block idx is the batch index, thread idx + block idx y * MAX_THREADS is the time index Dtype *gradInput_data = gradInput + blockIdx.x * input_w * input_n + ( threadIdx.x + blockIdx.y * TEMPORAL_MAX_POOLING_THREADS) * input_n * dW; Dtype *gradOutput_data = gradOutput + blockIdx.x * output_w * input_n + ( threadIdx.x + blockIdx.y * TEMPORAL_MAX_POOLING_THREADS) * input_n; THCIndex_t *indices_data = indices + blockIdx.x * output_w * input_n + ( threadIdx.x + blockIdx.y * TEMPORAL_MAX_POOLING_THREADS) * input_n; int feat = 0; if (threadIdx.x + blockIdx.y * TEMPORAL_MAX_POOLING_THREADS < output_w) { // For all features for (feat = 0; feat < input_n; ++feat) { atomicAdd(&gradInput_data[indices_data[feat] * input_n + feat], gradOutput_data[feat]); } } } #include <THCUNN/generic/TemporalMaxPooling.cu> #include <THC/THCGenerateFloatTypes.h>
2ca1680d3006e723aee46be8c80dd85f10409ccc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <vector> #include "caffe/neuron_layers.hpp" #ifdef USE_GREENTEA #include "caffe/greentea/greentea.hpp" #include "caffe/greentea/greentea_math_functions.hpp" #endif namespace caffe { #ifdef USE_ROCM template<typename Dtype> __global__ void ReLUForward(const int_tp n, const Dtype* in, Dtype* out, Dtype negative_slope) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope; } } #endif // USE_ROCM template<typename Dtype> void ReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int_tp count = bottom[0]->count(); Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_ROCM // NOLINT_NEXT_LINE(whitespace/operators) ReLUForward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( count, bottom_data, top_data, negative_slope); CUDA_POST_KERNEL_CHECK; #endif // USE_ROCM } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( this->device_->id()); viennacl::ocl::kernel &oclk_relu_forward = program.get_kernel( CL_KERNEL_SELECT("relu_forward")); viennacl::ocl::enqueue( oclk_relu_forward(count, WrapHandle((cl_mem) bottom_data, &ctx), WrapHandle((cl_mem) top_data, &ctx), negative_slope), ctx.get_queue()); ctx.get_queue().finish(); #endif // USE_GREENTEA } // << " count: " << count << " bottom_data: " // << (unsigned long)bottom_data // << " top_data: " << (unsigned long)top_data // << " blocks: " << CAFFE_GET_BLOCKS(count) // << " threads: " << CAFFE_CUDA_NUM_THREADS; } #ifdef USE_ROCM template<typename Dtype> __global__ void ReLUBackward(const int_tp n, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff, Dtype negative_slope) { CUDA_KERNEL_LOOP(index, n) { out_diff[index] = in_diff[index] * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope); } } #endif // USE_ROCM template<typename Dtype> void ReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[0]) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int_tp count = bottom[0]->count(); Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_ROCM // NOLINT_NEXT_LINE(whitespace/operators) ReLUBackward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( count, top_diff, bottom_data, bottom_diff, negative_slope); CUDA_POST_KERNEL_CHECK; #endif // USE_ROCM } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( this->device_->id()); viennacl::ocl::kernel &oclk_relu_backward = program.get_kernel( CL_KERNEL_SELECT("relu_backward")); viennacl::ocl::enqueue( oclk_relu_backward(count, WrapHandle((cl_mem) top_diff, &ctx), WrapHandle((cl_mem) bottom_data, &ctx), WrapHandle((cl_mem) bottom_diff, &ctx), negative_slope), ctx.get_queue()); ctx.get_queue().finish(); #endif // USE_GREENTEA } } } INSTANTIATE_LAYER_GPU_FUNCS(ReLULayer); } // namespace caffe
2ca1680d3006e723aee46be8c80dd85f10409ccc.cu
#include <algorithm> #include <vector> #include "caffe/neuron_layers.hpp" #ifdef USE_GREENTEA #include "caffe/greentea/greentea.hpp" #include "caffe/greentea/greentea_math_functions.hpp" #endif namespace caffe { #ifdef USE_CUDA template<typename Dtype> __global__ void ReLUForward(const int_tp n, const Dtype* in, Dtype* out, Dtype negative_slope) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope; } } #endif // USE_CUDA template<typename Dtype> void ReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int_tp count = bottom[0]->count(); Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA // NOLINT_NEXT_LINE(whitespace/operators) ReLUForward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( count, bottom_data, top_data, negative_slope); CUDA_POST_KERNEL_CHECK; #endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( this->device_->id()); viennacl::ocl::kernel &oclk_relu_forward = program.get_kernel( CL_KERNEL_SELECT("relu_forward")); viennacl::ocl::enqueue( oclk_relu_forward(count, WrapHandle((cl_mem) bottom_data, &ctx), WrapHandle((cl_mem) top_data, &ctx), negative_slope), ctx.get_queue()); ctx.get_queue().finish(); #endif // USE_GREENTEA } // << " count: " << count << " bottom_data: " // << (unsigned long)bottom_data // << " top_data: " << (unsigned long)top_data // << " blocks: " << CAFFE_GET_BLOCKS(count) // << " threads: " << CAFFE_CUDA_NUM_THREADS; } #ifdef USE_CUDA template<typename Dtype> __global__ void ReLUBackward(const int_tp n, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff, Dtype negative_slope) { CUDA_KERNEL_LOOP(index, n) { out_diff[index] = in_diff[index] * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope); } } #endif // USE_CUDA template<typename Dtype> void ReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[0]) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int_tp count = bottom[0]->count(); Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA // NOLINT_NEXT_LINE(whitespace/operators) ReLUBackward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( count, top_diff, bottom_data, bottom_diff, negative_slope); CUDA_POST_KERNEL_CHECK; #endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( this->device_->id()); viennacl::ocl::kernel &oclk_relu_backward = program.get_kernel( CL_KERNEL_SELECT("relu_backward")); viennacl::ocl::enqueue( oclk_relu_backward(count, WrapHandle((cl_mem) top_diff, &ctx), WrapHandle((cl_mem) bottom_data, &ctx), WrapHandle((cl_mem) bottom_diff, &ctx), negative_slope), ctx.get_queue()); ctx.get_queue().finish(); #endif // USE_GREENTEA } } } INSTANTIATE_LAYER_GPU_FUNCS(ReLULayer); } // namespace caffe
68a84ccb36c21199c96667526aeb474ea54941aa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include <math.h> #define N 60000 #define THREADS 1024 // Realiza la convolucin secuencial de los valores de los vectores float convolucionSecuencial(float *vectorA, float *vectorB) { int iPos; float fResultado = 0.0; // Se multiplican los dos vectores posicin a posicin for (iPos = 0; iPos < N; iPos++) vectorA[iPos] *= vectorB[iPos]; // Se realiza la convolucin for (iPos = 0; iPos < N; iPos++) fResultado += vectorA[iPos]; return fResultado; } __global__ void multParalelaElementoAElemento(float *vectorA, float *vectorB) { unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < N) vectorA[i] *= vectorB[i]; } // Kernell CUDA para la suma de los valores del vector __global__ void sumaParalela(float *vector, int n) { __shared__ float vectorCompartido[THREADS]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; // Si el dato est fuera del vector // o si la hebra no tiene que procesar ningn dato if (i >= N || tid >= n) vectorCompartido[tid] = 0.0; // Se rellena con ceros else vectorCompartido[tid] = vector[i]; // Se copia el dato a la memoria compartida __syncthreads(); for (unsigned int iPos = (blockDim.x >> 1); iPos >= 1; iPos = iPos >> 1) { if (tid < iPos) vectorCompartido[tid] += vectorCompartido[tid + iPos]; __syncthreads(); } if (tid == 0) vector[blockIdx.x] = vectorCompartido[0]; } int main(void) { float host_vA[N], host_vB[N]; float fResultadoParalelo, fResultadoSecuencial; float *dev_vA, *dev_vB; unsigned int blocks; unsigned int nDatos; // Se llena de forma aleatoria el vector sobre el que se realiza la suma srand((unsigned) time(NULL)); for (int i = 0; i < N; i++) { host_vA[i] = floorf(10*(rand()/(float)RAND_MAX)); host_vB[i] = floorf(10*(rand()/(float)RAND_MAX)); } // Pedir memoria en el Device para los vectores a sumar (dev_vA y dev_vB) /* COMPLETAR */ hipMalloc((void **) &dev_vA, N*sizeof(float)); hipMalloc((void **) &dev_vB, N*sizeof(float)); // Transferir los vectores del Host al Device /* COMPLETAR */ hipMemcpy(dev_vA, host_vA, N*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dev_vB, host_vB, N*sizeof(float), hipMemcpyHostToDevice); blocks = ceil((float) N / (float) THREADS); // Llamada al kernel para hacer la multiplicacin elemento a elemento /* COMPLETAR */ hipLaunchKernelGGL(( multParalelaElementoAElemento) , dim3(N/32), dim3(32) , 0, 0, dev_vA, dev_vB); blocks = N; // Llamar al kernell CUDA do { // Se calcula el nmero de datos que se procesarn por cada bloque if (blocks >= THREADS) nDatos = THREADS; else nDatos = blocks % THREADS; // Se calcula el nmero de bloques necesarios para el nmero de hebras blocks = ceil((float) blocks / (float) THREADS); // Llamar al kernel para hacer la resuccin /* COMPLETAR */ hipLaunchKernelGGL(( sumaParalela) , dim3(blocks), dim3(THREADS) , 0, 0, dev_vA, nDatos); } while (blocks > 1); // Copiar el resultado de la operacin del Device al Host /* COMPLETAR */ hipMemcpy(&fResultadoParalelo, dev_vA, sizeof(float), hipMemcpyDeviceToHost); // Se comprueba que el resultado es correcto y se muestra un mensaje fResultadoSecuencial = convolucionSecuencial(host_vA, host_vB); if (fResultadoParalelo == fResultadoSecuencial) printf("Operacion correcta\nDevice = %f\nHost = %f\n", fResultadoParalelo, fResultadoSecuencial); else printf("Operacion INCORRECTA\nDevice = %f\nHost = %f\n", fResultadoParalelo, fResultadoSecuencial); // Librerar la memoria solicitada en el Device /* COMPLETAR */ hipFree(dev_vA); hipFree(dev_vB); return 0; }
68a84ccb36c21199c96667526aeb474ea54941aa.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include <math.h> #define N 60000 #define THREADS 1024 // Realiza la convolución secuencial de los valores de los vectores float convolucionSecuencial(float *vectorA, float *vectorB) { int iPos; float fResultado = 0.0; // Se multiplican los dos vectores posición a posición for (iPos = 0; iPos < N; iPos++) vectorA[iPos] *= vectorB[iPos]; // Se realiza la convolución for (iPos = 0; iPos < N; iPos++) fResultado += vectorA[iPos]; return fResultado; } __global__ void multParalelaElementoAElemento(float *vectorA, float *vectorB) { unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < N) vectorA[i] *= vectorB[i]; } // Kernell CUDA para la suma de los valores del vector __global__ void sumaParalela(float *vector, int n) { __shared__ float vectorCompartido[THREADS]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; // Si el dato está fuera del vector // o si la hebra no tiene que procesar ningún dato if (i >= N || tid >= n) vectorCompartido[tid] = 0.0; // Se rellena con ceros else vectorCompartido[tid] = vector[i]; // Se copia el dato a la memoria compartida __syncthreads(); for (unsigned int iPos = (blockDim.x >> 1); iPos >= 1; iPos = iPos >> 1) { if (tid < iPos) vectorCompartido[tid] += vectorCompartido[tid + iPos]; __syncthreads(); } if (tid == 0) vector[blockIdx.x] = vectorCompartido[0]; } int main(void) { float host_vA[N], host_vB[N]; float fResultadoParalelo, fResultadoSecuencial; float *dev_vA, *dev_vB; unsigned int blocks; unsigned int nDatos; // Se llena de forma aleatoria el vector sobre el que se realiza la suma srand((unsigned) time(NULL)); for (int i = 0; i < N; i++) { host_vA[i] = floorf(10*(rand()/(float)RAND_MAX)); host_vB[i] = floorf(10*(rand()/(float)RAND_MAX)); } // Pedir memoria en el Device para los vectores a sumar (dev_vA y dev_vB) /* COMPLETAR */ cudaMalloc((void **) &dev_vA, N*sizeof(float)); cudaMalloc((void **) &dev_vB, N*sizeof(float)); // Transferir los vectores del Host al Device /* COMPLETAR */ cudaMemcpy(dev_vA, host_vA, N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_vB, host_vB, N*sizeof(float), cudaMemcpyHostToDevice); blocks = ceil((float) N / (float) THREADS); // Llamada al kernel para hacer la multiplicación elemento a elemento /* COMPLETAR */ multParalelaElementoAElemento <<< N/32, 32 >>>(dev_vA, dev_vB); blocks = N; // Llamar al kernell CUDA do { // Se calcula el número de datos que se procesarán por cada bloque if (blocks >= THREADS) nDatos = THREADS; else nDatos = blocks % THREADS; // Se calcula el número de bloques necesarios para el número de hebras blocks = ceil((float) blocks / (float) THREADS); // Llamar al kernel para hacer la resucción /* COMPLETAR */ sumaParalela <<< blocks, THREADS >>>(dev_vA, nDatos); } while (blocks > 1); // Copiar el resultado de la operación del Device al Host /* COMPLETAR */ cudaMemcpy(&fResultadoParalelo, dev_vA, sizeof(float), cudaMemcpyDeviceToHost); // Se comprueba que el resultado es correcto y se muestra un mensaje fResultadoSecuencial = convolucionSecuencial(host_vA, host_vB); if (fResultadoParalelo == fResultadoSecuencial) printf("Operacion correcta\nDevice = %f\nHost = %f\n", fResultadoParalelo, fResultadoSecuencial); else printf("Operacion INCORRECTA\nDevice = %f\nHost = %f\n", fResultadoParalelo, fResultadoSecuencial); // Librerar la memoria solicitada en el Device /* COMPLETAR */ cudaFree(dev_vA); cudaFree(dev_vB); return 0; }
f9866baf17d5043491851aaf0e2dc325e43b1b21.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "IUnityInterface.h" #include "DebugDLL.h" #include <hipfft.h> #include <math_functions.h> #include <stdio.h> inline static bool debugError(hipError_t error, const char* msg) { if (error != hipSuccess) { DebugDLL::ss << msg << " : " << error; DebugDLL::log(&DebugDLL::ss.str(), Color::Red); return true; } return false; } inline static bool debugResult(hipfftResult result, const char* msg) { if (result != hipSuccess) { DebugDLL::ss << msg << " : " << result; DebugDLL::log(&DebugDLL::ss.str(), Color::Red); return true; } return false; } extern "C" { UNITY_INTERFACE_EXPORT int cudaTest(float* data, int size) { DebugDLL::clear(); if (data == nullptr) { return -1; } hipfftHandle plan; hipfftComplex* complexHostData; hipfftComplex* complexDeviceData; hipfftResult result; hipError_t error; const int byteSize = sizeof(hipfftComplex) * size; // init host data complexHostData = (hipfftComplex*)malloc(byteSize); for (int i = 0; i < size; i++) complexHostData[i] = make_cuFloatComplex(data[i], 0); // create device data error = hipMalloc((void**)&complexDeviceData, byteSize); if (debugError(error, "Unable to hipMalloc complexData")) { goto CUDA_MALLOC_ERROR; } result = hipfftPlan1d(&plan, size, HIPFFT_C2C, 1); if (debugResult(result, "hipfftPlan1d Failed")) { goto CUFFT_PLAN_ERROR; } error = hipMemcpy((void*)complexDeviceData, complexHostData, byteSize, hipMemcpyHostToDevice); if (debugError(error, "hipMemcpy Host => Device failed")) { goto CPY_TO_DEVICE_ERR; } result = hipfftExecC2C(plan, complexDeviceData, complexDeviceData, HIPFFT_FORWARD); if (debugResult(result, "hipfftExecC2R failed")) { goto CUFFT_EXEC_ERR; } error = hipDeviceSynchronize(); if (debugError(error, "hipDeviceSynchronize failed")) { goto DEVICE_SYNCH_ERR; } error = hipMemcpy((void*)complexHostData, complexDeviceData, byteSize, hipMemcpyDeviceToHost); if (debugError(error, "hipMemcpy Device => Host failed")) { goto CPY_TO_HOST_ERR; } // free plan data result = hipfftDestroy(plan); if (debugResult(result, "hipfftDestroy failed")) { return -7; } error = hipFree(complexDeviceData); if (debugError(error, "hipFree failed")) { return -8; } for (int i = 0; i < size; i++) { auto complexNumber = complexHostData[i]; data[i] = sqrt(complexNumber.x * complexNumber.x + complexNumber.y * complexNumber.y); } free(complexHostData); return 0; CUDA_MALLOC_ERROR: free(complexHostData); return -1; CUFFT_PLAN_ERROR: error = hipFree(complexDeviceData); if (debugError(error, "hipFree failed")) { return -8; } free(complexHostData); return -2; CPY_TO_DEVICE_ERR: result = hipfftDestroy(plan); if (debugResult(result, "hipfftDestroy failed")) { return -7; } free(complexHostData); return -3; CUFFT_EXEC_ERR: result = hipfftDestroy(plan); if (debugResult(result, "hipfftDestroy failed")) { return -7; } free(complexHostData); return -4; DEVICE_SYNCH_ERR: result = hipfftDestroy(plan); if (debugResult(result, "hipfftDestroy failed")) { return -7; } free(complexHostData); return -5; CPY_TO_HOST_ERR: result = hipfftDestroy(plan); if (debugResult(result, "hipfftDestroy failed")) { return -7; } free(complexHostData); return -6; } } /* __global__ void addKernel(int* c, const int* a, const int* b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } // Helper function for using CUDA to add vectors in parallel hipError_t addWithCuda(int* c, const int* a, const int* b, unsigned int size) { int* dev_a = 0; int* dev_b = 0; int* dev_c = 0; hipError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. dim3 gridDimensions(2); dim3 blockDimensions(size / gridDimensions.x); addKernel<<<gridDimensions, blockDimensions>>>(dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } Error: hipFree(dev_c); hipFree(dev_a); hipFree(dev_b); return cudaStatus; } std::unique_ptr<int[]> getAArray(const unsigned int size) { auto a = std::unique_ptr<int[]>(new int[size]); for (int i = 0, int n = 1; i < size; i++, n++) { a[i] = n * n; } return a; } std::unique_ptr<int[]> getBArray(const unsigned int size) { auto a = std::unique_ptr<int[]>(new int[size]); for (int i = 0, int n = 1; i < size; i++, n++) { a[i] = n; } return a; } */
f9866baf17d5043491851aaf0e2dc325e43b1b21.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "IUnityInterface.h" #include "DebugDLL.h" #include <cufft.h> #include <math_functions.h> #include <stdio.h> inline static bool debugError(cudaError_t error, const char* msg) { if (error != cudaSuccess) { DebugDLL::ss << msg << " : " << error; DebugDLL::log(&DebugDLL::ss.str(), Color::Red); return true; } return false; } inline static bool debugResult(cufftResult result, const char* msg) { if (result != cudaSuccess) { DebugDLL::ss << msg << " : " << result; DebugDLL::log(&DebugDLL::ss.str(), Color::Red); return true; } return false; } extern "C" { UNITY_INTERFACE_EXPORT int cudaTest(float* data, int size) { DebugDLL::clear(); if (data == nullptr) { return -1; } cufftHandle plan; cufftComplex* complexHostData; cufftComplex* complexDeviceData; cufftResult result; cudaError_t error; const int byteSize = sizeof(cufftComplex) * size; // init host data complexHostData = (cufftComplex*)malloc(byteSize); for (int i = 0; i < size; i++) complexHostData[i] = make_cuFloatComplex(data[i], 0); // create device data error = cudaMalloc((void**)&complexDeviceData, byteSize); if (debugError(error, "Unable to cudaMalloc complexData")) { goto CUDA_MALLOC_ERROR; } result = cufftPlan1d(&plan, size, CUFFT_C2C, 1); if (debugResult(result, "cufftPlan1d Failed")) { goto CUFFT_PLAN_ERROR; } error = cudaMemcpy((void*)complexDeviceData, complexHostData, byteSize, cudaMemcpyHostToDevice); if (debugError(error, "cudaMemcpy Host => Device failed")) { goto CPY_TO_DEVICE_ERR; } result = cufftExecC2C(plan, complexDeviceData, complexDeviceData, CUFFT_FORWARD); if (debugResult(result, "cufftExecC2R failed")) { goto CUFFT_EXEC_ERR; } error = cudaDeviceSynchronize(); if (debugError(error, "cudaDeviceSynchronize failed")) { goto DEVICE_SYNCH_ERR; } error = cudaMemcpy((void*)complexHostData, complexDeviceData, byteSize, cudaMemcpyDeviceToHost); if (debugError(error, "cudaMemcpy Device => Host failed")) { goto CPY_TO_HOST_ERR; } // free plan data result = cufftDestroy(plan); if (debugResult(result, "cufftDestroy failed")) { return -7; } error = cudaFree(complexDeviceData); if (debugError(error, "cudaFree failed")) { return -8; } for (int i = 0; i < size; i++) { auto complexNumber = complexHostData[i]; data[i] = sqrt(complexNumber.x * complexNumber.x + complexNumber.y * complexNumber.y); } free(complexHostData); return 0; CUDA_MALLOC_ERROR: free(complexHostData); return -1; CUFFT_PLAN_ERROR: error = cudaFree(complexDeviceData); if (debugError(error, "cudaFree failed")) { return -8; } free(complexHostData); return -2; CPY_TO_DEVICE_ERR: result = cufftDestroy(plan); if (debugResult(result, "cufftDestroy failed")) { return -7; } free(complexHostData); return -3; CUFFT_EXEC_ERR: result = cufftDestroy(plan); if (debugResult(result, "cufftDestroy failed")) { return -7; } free(complexHostData); return -4; DEVICE_SYNCH_ERR: result = cufftDestroy(plan); if (debugResult(result, "cufftDestroy failed")) { return -7; } free(complexHostData); return -5; CPY_TO_HOST_ERR: result = cufftDestroy(plan); if (debugResult(result, "cufftDestroy failed")) { return -7; } free(complexHostData); return -6; } } /* __global__ void addKernel(int* c, const int* a, const int* b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } // Helper function for using CUDA to add vectors in parallel cudaError_t addWithCuda(int* c, const int* a, const int* b, unsigned int size) { int* dev_a = 0; int* dev_b = 0; int* dev_c = 0; cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. dim3 gridDimensions(2); dim3 blockDimensions(size / gridDimensions.x); addKernel<<<gridDimensions, blockDimensions>>>(dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(dev_c); cudaFree(dev_a); cudaFree(dev_b); return cudaStatus; } std::unique_ptr<int[]> getAArray(const unsigned int size) { auto a = std::unique_ptr<int[]>(new int[size]); for (int i = 0, int n = 1; i < size; i++, n++) { a[i] = n * n; } return a; } std::unique_ptr<int[]> getBArray(const unsigned int size) { auto a = std::unique_ptr<int[]>(new int[size]); for (int i = 0, int n = 1; i < size; i++, n++) { a[i] = n; } return a; } */
87fb35e19e79664c1834e8115e083a477080fdfa.hip
// !!! This is a file automatically generated by hipify!!! #include "Doc.cuh" Document::Document(string argFilePrefix, int argNumChunks, int argMaxTLLength, int argmaxDocLength, int argWordLength) { filePrefix = argFilePrefix; numChunks = argNumChunks; maxTLLength = argMaxTLLength; maxDocLength = argmaxDocLength; wordLength = argWordLength; chunksPerStream = numChunks / numStreams; //perplexityMid = new float[GridDim]; hipHostMalloc((void**)&perplexityMid, GridDim * sizeof(float)); /*perplexity = new float[maxTLLength];*/ hipHostMalloc((void**)&perplexity, maxTLLength * sizeof(float)); //perplexityAve = new float[1]; hipHostMalloc((void**)&perplexityAve, 1 * sizeof(float)); //effectiveTokenIndex = new int[maxTLLength]; hipHostMalloc((void**)&effectiveTokenIndex, maxTLLength * sizeof(int)); //newTokenCount = new int[wordLength]; hipHostMalloc((void**)&newTokenCount, wordLength * sizeof(int)); //maxTokenCount = new unsigned short int[maxTLLength]; hipHostMalloc((void**)&maxTokenCount, maxTLLength * sizeof(unsigned short int)); //Mflag = new unsigned short int[maxTLLength]; hipHostMalloc((void**)&Mflag, maxTLLength * sizeof(unsigned short int)); } void Document::loadDocument() { /*TLLengthVec = new int[numChunks]; docLengthVec = new int[numChunks]; numOfTokenVecD = new int[numChunks]; numOfTokenVecS = new int[numChunks]; timeRecord = new float[GridDim*BlockDim/32];*/ hipHostMalloc((void**)&TLLengthVec, numChunks * sizeof(int)); hipHostMalloc((void**)&docLengthVec, numChunks * sizeof(int)); hipHostMalloc((void**)&numOfTokenVecD, numChunks * sizeof(int)); hipHostMalloc((void**)&numOfTokenVecS, numChunks * sizeof(int)); hipHostMalloc((void**)&timeRecord, GridDim*BlockDim / 32 * sizeof(float)); ifstream docLength((filePrefix + string("/docLength.txt")).c_str(), ios::binary);//store max Doc and DT length ifstream TLLength((filePrefix + string("/TLLength.txt")).c_str(), ios::binary); ifstream TLSplit((filePrefix + string("/TLSplit.txt")).c_str(), ios::binary); for (int chunkId = 0; chunkId < numChunks; chunkId++) { TLLength >> TLLengthVec[chunkId]; docLength >> docLengthVec[chunkId]; TLSplit >> numOfTokenVecD[chunkId] >> numOfTokenVecS[chunkId]; totalNumOfTokens += TLLengthVec[chunkId]; DocChunk tmpDocChunk(TLLengthVec[chunkId], docLengthVec[chunkId], wordLength); tmpDocChunk.CPUMemSet(); tmpDocChunk.loadChunk(filePrefix, chunkId); docChunkVec.push_back(tmpDocChunk); //float* tmpProbMaxChunk = new float[TLLengthVec[chunkId]]; //memset(tmpProbMaxChunk, 0, TLLengthVec[chunkId] * sizeof(float)); //unsigned short int* tmpProbMaxTopicChunk = new unsigned short int[TLLengthVec[chunkId]]; //memset(tmpProbMaxTopicChunk, 0, TLLengthVec[chunkId] * sizeof(unsigned short int)); //unsigned short int* tmpProbMaxFlagChunk = new unsigned short int[TLLengthVec[chunkId]]; //memset(tmpProbMaxFlagChunk, 0, TLLengthVec[chunkId] * sizeof(unsigned short int)); //unsigned short int* tmpProbMaxTopicFlagChunk = new unsigned short int[TLLengthVec[chunkId]]; //memset(tmpProbMaxTopicFlagChunk, 0, TLLengthVec[chunkId] * sizeof(unsigned short int)); unsigned short int* tmpMaxTokenCount = new unsigned short int[TLLengthVec[chunkId]]; memset(tmpMaxTokenCount, 0, TLLengthVec[chunkId] * sizeof(unsigned short int)); //probMaxChunkVec.push_back(tmpProbMaxChunk); //probMaxTopicChunkVec.push_back(tmpProbMaxTopicChunk); //probMaxFlagChunkVec.push_back(tmpProbMaxFlagChunk); //probMaxTopicFlagChunkVec.push_back(tmpProbMaxTopicFlagChunk); maxTokenCountVec.push_back(tmpMaxTokenCount); } memset(effectiveTokenIndex, 0, maxTLLength * sizeof(int)); memset(newTokenCount, 0, wordLength * sizeof(int)); memset(maxTokenCount, 0, maxTLLength * sizeof(unsigned short int)); memset(Mflag, 0, maxTLLength * sizeof(unsigned short int)); printf("total num of tokens:%f\n", totalNumOfTokens); printf("All chunks loaded!"); docLength.close(); TLLength.close(); } void Document::CPU2GPUPerplexity(hipStream_t& stream) { //memset(perplexityMid, 0, GridDim * sizeof(float)); for (int i = 0; i < numStreams; i++) { hipMemsetAsync(devicePerplexityMid[i], 0, GridDim * sizeof(float), stream); //hipMemcpyAsync(devicePerplexityMid[i], perplexityMid, GridDim * sizeof(float), hipMemcpyHostToDevice, stream); } // /*hipMemset(devicePerplexity,0,maxTLLength*sizeof(float));*/ /*hipMemsetAsync(devicePerplexityMid, 0, GridDim * sizeof(float), stream);*/ } void Document::GPU2CPUPerplexity(hipStream_t& stream) { hipMemcpyAsync(perplexityMid, devicePerplexityMid, (GridDim) * sizeof(float), hipMemcpyDeviceToHost, stream); //hipMemcpy(perplexity, devicePerplexity, maxTLLength*sizeof(float), hipMemcpyDeviceToHost); sumPerplexity = 0.0; for (int i = 0; i < GridDim; i++) { //printf("Perplexity:%f \n", perplexityMid[i]); sumPerplexity += perplexityMid[i]/ 467723.0; } //printf("Parallel Perplexity:%f \n", sumPerplexity); } void Document::CPU2DiskPerplexity(string argFilePrefix) { ofstream OutPutPerplexity((argFilePrefix + string("/Perplexity.txt")).c_str(), ios::binary); for (int i = 0; i < maxTLLength; i++) { OutPutPerplexity << perplexity[i] << "\n"; } OutPutPerplexity.close(); } void Document::GPU2CPUEffectiveTokenIndex() { hipMemcpy(effectiveTokenIndex, deviceEffectiveTokenIndex, maxTLLength * sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(newTokenCount, deviceNewTokenCount, wordLength * sizeof(int), hipMemcpyDeviceToHost); } void Document::CPU2DiskEffectiveTokenIndex(string argFilePrefix) { ofstream OutPutEffectiveTokenIndex((argFilePrefix + string("/EffectiveTokenIndex.txt")).c_str(), ios::binary); for (int i = 0; i < maxTLLength; i++) { OutPutEffectiveTokenIndex << effectiveTokenIndex[i] << "\n"; } OutPutEffectiveTokenIndex.close(); ofstream OutPutNewTokenCount((argFilePrefix + string("/NewTokenCount.txt")).c_str(), ios::binary); for (int i = 0; i < wordLength; i++) { OutPutNewTokenCount << newTokenCount[i] << "\n"; } OutPutNewTokenCount.close(); } void Document::GPUMemAllocate() { for (int i = 0; i < numStreams; i++) { hipMalloc((void**)&deviceTLTopic[i], (maxTLLength) * sizeof(unsigned short int)); hipMalloc((void**)&deviceTLDocCount[i], (maxDocLength) * sizeof(int)); hipMalloc((void**)&deviceTLDocOffset[i], (maxDocLength) * sizeof(int)); hipMalloc((void**)&deviceTLWordCount[i], (wordLength) * sizeof(int)); hipMalloc((void**)&deviceTLWordOffset[i], (wordLength) * sizeof(int)); hipMalloc((void**)&deviceMapWord2Doc[i], (maxTLLength) * sizeof(int)); hipMalloc((void**)&deviceMapDoc2Word[i], (maxTLLength) * sizeof(int)); hipMalloc((void**)&deviceRandomfloat[i], (maxTLLength) * sizeof(float)); /*hipMalloc((void**)&deviceMflag[i], (maxTLLength) * sizeof(unsigned short int));*/ hipMalloc((void**)&deviceEffectiveTokenIndex[i], (maxTLLength) * sizeof(int)); hipMalloc((void**)&deviceNewTokenCount[i], (wordLength) * sizeof(int)); /*hipMalloc((void**)&devicePerplexity[i], (maxTLLength) * sizeof(float));*/ hipMalloc((void **)&d_blockCounter[i], sizeof(int)*(1)); hipMalloc((void **)&d_warpCounter[i], sizeof(int)*(1)); hipMalloc((void **)&d_dense[i], sizeof(int)*(GridDim*BlockDim*K/32)); hipMalloc((void **)&deviceWTHeadDense[i], sizeof(float)*(GridDim*K)); /* hipMalloc((void**)&deviceProbMax, (maxTLLength) * sizeof(float)); hipMalloc((void**)&deviceProbMaxTopic, (maxTLLength) * sizeof(unsigned short int)); hipMalloc((void**)&deviceProbMaxFlag, (maxTLLength) * sizeof(unsigned short int)); hipMalloc((void**)&deviceProbMaxTopicFlag, (maxTLLength) * sizeof(unsigned short int));*/ /*hipMalloc((void**)&deviceMaxTokenCount[i], (maxTLLength) * sizeof(unsigned short int)); hipMalloc((void**)&deviceMaxTopic[i], (maxTLLength) * sizeof(unsigned short int)); hipMalloc((void**)&deviceSecondMaxTokenCount[i], (maxTLLength) * sizeof(unsigned short int)); hipMalloc((void**)&deviceSecondMaxTopic[i], (maxTLLength) * sizeof(unsigned short int));*/ hipMalloc((void**)&deviceMaxSecTopic[i], (maxTLLength) * sizeof(long long int)); hipMalloc((void**)&deviceWordMaxTopic[i], (wordLength) * sizeof(unsigned short int)); hipMalloc((void**)&deviceWordSecondMaxTopic[i], (wordLength) * sizeof(unsigned short int)); hipMalloc((void**)&deviceWordThirdMaxTopic[i], (wordLength) * sizeof(unsigned short int)); hipMalloc((void**)&deviceWordMaxProb[i], (wordLength) * sizeof(float)); hipMalloc((void**)&deviceWordSecondMaxProb[i], (wordLength) * sizeof(float)); hipMalloc((void**)&deviceWordThirdMaxProb[i], (wordLength) * sizeof(float)); hipMalloc((void**)&deviceQArray[i], (wordLength) * sizeof(float)); hipMalloc((void**)&deviceMaxProb[i], (maxTLLength) * sizeof(float)); hipMalloc((void**)&deviceThresProb[i], (maxTLLength) * sizeof(float)); hipMalloc((void**)&deviceTimeRecord[i], (GridDim*BlockDim/32) * sizeof(float)); hipMalloc((void**)&devicePerplexityAve[i], 1 * sizeof(float)); hipMalloc((void**)&devicePerplexityMid[i], sizeof(float)*GridDim); //hipMalloc((void**)&deviceTotalTokenCount[i], (maxTLLength) * sizeof(unsigned short int)); } TLMemory = ((3 * maxTLLength + 2 * maxDocLength + 2 * wordLength + GridDim*K) * sizeof(int) + (maxTLLength + GridDim*BlockDim / 32 + GridDim*K) * sizeof(float))/ 1000000000.0; printf("Token list memory usage:%f GB\n", TLMemory); } void Document::GPU2CPUTime() { hipMemcpy(timeRecord, deviceTimeRecord, (GridDim*BlockDim / 32) * sizeof(float), hipMemcpyDeviceToHost); hipMemset(deviceTimeRecord, 0, (GridDim*BlockDim / 32) * sizeof(float)); } //void Document::CPU2DiskTime(ofstream argOutPutTime) { // // for (int i = 0; i < GridDim*BlockDim / 32; i++) { // argOutPutTime << timeRecord[i] << " "; // } // argOutPutTime << "\n"; //} void Document::CPU2GPU(int argChunkId, int argStreamId, hipStream_t& stream) { hipMemcpyAsync(deviceTLTopic[argStreamId], docChunkVec[argChunkId].TLTopic, (TLLengthVec[argChunkId]) * sizeof(unsigned short int), hipMemcpyHostToDevice, stream); hipMemcpyAsync(deviceTLDocCount[argStreamId], docChunkVec[argChunkId].TLDocCount, (docLengthVec[argChunkId]) * sizeof(int), hipMemcpyHostToDevice, stream); hipMemcpyAsync(deviceTLDocOffset[argStreamId], docChunkVec[argChunkId].TLDocOffset, (docLengthVec[argChunkId]) * sizeof(int), hipMemcpyHostToDevice, stream); hipMemcpyAsync(deviceTLWordCount[argStreamId], docChunkVec[argChunkId].TLWordCount, (wordLength) * sizeof(int), hipMemcpyHostToDevice, stream); hipMemcpyAsync(deviceTLWordOffset[argStreamId], docChunkVec[argChunkId].TLWordOffset, (wordLength) * sizeof(int), hipMemcpyHostToDevice, stream); hipMemcpyAsync(deviceMapWord2Doc[argStreamId], docChunkVec[argChunkId].mapWord2Doc, (TLLengthVec[argChunkId]) * sizeof(int), hipMemcpyHostToDevice, stream); //hipMemcpyAsync(deviceMapDoc2Word[argStreamId], docChunkVec[argChunkId].mapDoc2Word, (TLLengthVec[argChunkId]) * sizeof(int), hipMemcpyHostToDevice, stream); //hipMemcpyAsync(deviceTotalTokenCount[argStreamId], docChunkVec[argChunkId].totalTokenCount, (TLLengthVec[argChunkId]) * sizeof(unsigned short int), hipMemcpyHostToDevice, stream); //hipMemcpy(deviceProbMax, probMaxChunkVec[argChunkId], (TLLengthVec[argChunkId]) * sizeof(float), hipMemcpyHostToDevice); //hipMemcpy(deviceProbMaxTopic, probMaxTopicChunkVec[argChunkId], (TLLengthVec[argChunkId]) * sizeof(unsigned short int), hipMemcpyHostToDevice); //hipMemcpy(deviceProbMaxTopicFlag, probMaxTopicFlagChunkVec[argChunkId], (TLLengthVec[argChunkId]) * sizeof(unsigned short int), hipMemcpyHostToDevice); //hipMemcpy(deviceProbMaxFlag, probMaxFlagChunkVec[argChunkId], (TLLengthVec[argChunkId]) * sizeof(unsigned short int), hipMemcpyHostToDevice); /*hipMemcpy(deviceMaxTopic, docChunkVec[argChunkId].TLMaxTopic, (TLLengthVec[argChunkId]) * sizeof(unsigned short int), hipMemcpyHostToDevice);*/ //hipMemset(deviceProbMaxTopicFlag, 0, (maxTLLength) * sizeof(unsigned short int)); //hipMemset(deviceProbMaxFlag, 0, (maxTLLength) * sizeof(unsigned short int)); /*hipMemsetAsync(deviceMaxTokenCount[argStreamId], 0, (maxTLLength) * sizeof(unsigned short int), stream);*/ hipMemsetAsync(deviceMaxSecTopic[argStreamId], 0, (maxTLLength) * sizeof(long long int), stream); /*hipMemsetAsync(deviceMflag[argStreamId], 0, (maxTLLength) * sizeof(unsigned short int), stream);*/ hipMemsetAsync(deviceEffectiveTokenIndex[argStreamId], 0, (maxTLLength) * sizeof(int), stream); hipMemsetAsync(deviceNewTokenCount[argStreamId], 0, (wordLength) * sizeof(int), stream); //hipMemsetAsync(deviceTotalTokenCount[argStreamId], 0, (maxTLLength) * sizeof(unsigned short int), stream); /*hipMemcpyAsync(deviceMflag[argStreamId], Mflag, (maxTLLength) * sizeof(unsigned short int), hipMemcpyHostToDevice, stream);*/ /*hipMemcpyAsync(deviceMaxTokenCount[argStreamId], maxTokenCount, (maxTLLength) * sizeof(unsigned short int), hipMemcpyHostToDevice, stream); hipMemcpyAsync(deviceEffectiveTokenIndex[argStreamId], effectiveTokenIndex, (maxTLLength) * sizeof(int), hipMemcpyHostToDevice, stream); hipMemcpyAsync(deviceNewTokenCount[argStreamId], newTokenCount, (wordLength) * sizeof(int), hipMemcpyHostToDevice, stream);*/ } void Document::GPU2CPU(int argChunkId, int argStreamId, hipStream_t& stream) { hipMemcpyAsync(docChunkVec[argChunkId].TLTopic, deviceTLTopic[argStreamId], (TLLengthVec[argChunkId]) * sizeof(unsigned short int), hipMemcpyDeviceToHost, stream); /*hipMemcpy(probMaxTopicFlagChunkVec[argChunkId],deviceProbMaxTopicFlag, (TLLengthVec[argChunkId]) * sizeof(unsigned short int), hipMemcpyDeviceToHost); hipMemcpy( probMaxFlagChunkVec[argChunkId],deviceProbMaxFlag, (TLLengthVec[argChunkId]) * sizeof(unsigned short int), hipMemcpyDeviceToHost); hipMemcpy(probMaxChunkVec[argChunkId],deviceProbMax, (TLLengthVec[argChunkId]) * sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(probMaxTopicChunkVec[argChunkId], deviceProbMaxTopic, (TLLengthVec[argChunkId]) * sizeof(unsigned short int), hipMemcpyDeviceToHost);*/ //hipMemcpyAsync(docChunkVec[argChunkId].TLMaxTopic, deviceMaxTopic[argStreamId], (TLLengthVec[argChunkId]) * sizeof(unsigned short int), hipMemcpyDeviceToHost, stream); } // //void Document::PercentageCalculate() //{ // increasePercent = 0.0; // topicUnchangedPercent = 0.0; // for (int chunkId = 0; chunkId < numChunks; chunkId++) { // for (int i = 0; i < TLLengthVec[chunkId]; i++) { // increasePercent += float(probMaxFlagChunkVec[chunkId][i]); // topicUnchangedPercent += float(probMaxTopicFlagChunkVec[chunkId][i]); // } // } // printf("increasePercent:%f\n", increasePercent); // printf("topicUnchangedPercent:%f\n", topicUnchangedPercent); // printf("total num of tokens:%f\n", totalNumOfTokens); // increasePercent /= totalNumOfTokens; // topicUnchangedPercent /= totalNumOfTokens; // // //} // // void Document::deviceCounterMemAllocate() { for (int i = 0; i < numStreams; i++) { hipMalloc((void**)&deviceCounterWTUpdateKernel[i], sizeof(unsigned int)); hipMalloc((void**)&deviceCounterWTDenUpdateKernel[i], sizeof(unsigned int)); hipMalloc((void**)&deviceCounterWTAdditionKernel[i], sizeof(unsigned int)); hipMalloc((void**)&deviceCounterMaxTopicKernel[i], sizeof(unsigned int)); hipMalloc((void**)&deviceCounterDTUpdateKernel[i], sizeof(unsigned int)); hipMalloc((void**)&deviceCounterUpdateProbKernel[i], sizeof(unsigned int)); hipMalloc((void**)&deviceCounterSampleKernelD[i], sizeof(unsigned int)); hipMalloc((void**)&deviceCounterSampleKernelS[i], sizeof(unsigned int)); } }
87fb35e19e79664c1834e8115e083a477080fdfa.cu
#include "Doc.cuh" Document::Document(string argFilePrefix, int argNumChunks, int argMaxTLLength, int argmaxDocLength, int argWordLength) { filePrefix = argFilePrefix; numChunks = argNumChunks; maxTLLength = argMaxTLLength; maxDocLength = argmaxDocLength; wordLength = argWordLength; chunksPerStream = numChunks / numStreams; //perplexityMid = new float[GridDim]; cudaMallocHost((void**)&perplexityMid, GridDim * sizeof(float)); /*perplexity = new float[maxTLLength];*/ cudaMallocHost((void**)&perplexity, maxTLLength * sizeof(float)); //perplexityAve = new float[1]; cudaMallocHost((void**)&perplexityAve, 1 * sizeof(float)); //effectiveTokenIndex = new int[maxTLLength]; cudaMallocHost((void**)&effectiveTokenIndex, maxTLLength * sizeof(int)); //newTokenCount = new int[wordLength]; cudaMallocHost((void**)&newTokenCount, wordLength * sizeof(int)); //maxTokenCount = new unsigned short int[maxTLLength]; cudaMallocHost((void**)&maxTokenCount, maxTLLength * sizeof(unsigned short int)); //Mflag = new unsigned short int[maxTLLength]; cudaMallocHost((void**)&Mflag, maxTLLength * sizeof(unsigned short int)); } void Document::loadDocument() { /*TLLengthVec = new int[numChunks]; docLengthVec = new int[numChunks]; numOfTokenVecD = new int[numChunks]; numOfTokenVecS = new int[numChunks]; timeRecord = new float[GridDim*BlockDim/32];*/ cudaMallocHost((void**)&TLLengthVec, numChunks * sizeof(int)); cudaMallocHost((void**)&docLengthVec, numChunks * sizeof(int)); cudaMallocHost((void**)&numOfTokenVecD, numChunks * sizeof(int)); cudaMallocHost((void**)&numOfTokenVecS, numChunks * sizeof(int)); cudaMallocHost((void**)&timeRecord, GridDim*BlockDim / 32 * sizeof(float)); ifstream docLength((filePrefix + string("/docLength.txt")).c_str(), ios::binary);//store max Doc and DT length ifstream TLLength((filePrefix + string("/TLLength.txt")).c_str(), ios::binary); ifstream TLSplit((filePrefix + string("/TLSplit.txt")).c_str(), ios::binary); for (int chunkId = 0; chunkId < numChunks; chunkId++) { TLLength >> TLLengthVec[chunkId]; docLength >> docLengthVec[chunkId]; TLSplit >> numOfTokenVecD[chunkId] >> numOfTokenVecS[chunkId]; totalNumOfTokens += TLLengthVec[chunkId]; DocChunk tmpDocChunk(TLLengthVec[chunkId], docLengthVec[chunkId], wordLength); tmpDocChunk.CPUMemSet(); tmpDocChunk.loadChunk(filePrefix, chunkId); docChunkVec.push_back(tmpDocChunk); //float* tmpProbMaxChunk = new float[TLLengthVec[chunkId]]; //memset(tmpProbMaxChunk, 0, TLLengthVec[chunkId] * sizeof(float)); //unsigned short int* tmpProbMaxTopicChunk = new unsigned short int[TLLengthVec[chunkId]]; //memset(tmpProbMaxTopicChunk, 0, TLLengthVec[chunkId] * sizeof(unsigned short int)); //unsigned short int* tmpProbMaxFlagChunk = new unsigned short int[TLLengthVec[chunkId]]; //memset(tmpProbMaxFlagChunk, 0, TLLengthVec[chunkId] * sizeof(unsigned short int)); //unsigned short int* tmpProbMaxTopicFlagChunk = new unsigned short int[TLLengthVec[chunkId]]; //memset(tmpProbMaxTopicFlagChunk, 0, TLLengthVec[chunkId] * sizeof(unsigned short int)); unsigned short int* tmpMaxTokenCount = new unsigned short int[TLLengthVec[chunkId]]; memset(tmpMaxTokenCount, 0, TLLengthVec[chunkId] * sizeof(unsigned short int)); //probMaxChunkVec.push_back(tmpProbMaxChunk); //probMaxTopicChunkVec.push_back(tmpProbMaxTopicChunk); //probMaxFlagChunkVec.push_back(tmpProbMaxFlagChunk); //probMaxTopicFlagChunkVec.push_back(tmpProbMaxTopicFlagChunk); maxTokenCountVec.push_back(tmpMaxTokenCount); } memset(effectiveTokenIndex, 0, maxTLLength * sizeof(int)); memset(newTokenCount, 0, wordLength * sizeof(int)); memset(maxTokenCount, 0, maxTLLength * sizeof(unsigned short int)); memset(Mflag, 0, maxTLLength * sizeof(unsigned short int)); printf("total num of tokens:%f\n", totalNumOfTokens); printf("All chunks loaded!"); docLength.close(); TLLength.close(); } void Document::CPU2GPUPerplexity(cudaStream_t& stream) { //memset(perplexityMid, 0, GridDim * sizeof(float)); for (int i = 0; i < numStreams; i++) { cudaMemsetAsync(devicePerplexityMid[i], 0, GridDim * sizeof(float), stream); //cudaMemcpyAsync(devicePerplexityMid[i], perplexityMid, GridDim * sizeof(float), cudaMemcpyHostToDevice, stream); } // /*cudaMemset(devicePerplexity,0,maxTLLength*sizeof(float));*/ /*cudaMemsetAsync(devicePerplexityMid, 0, GridDim * sizeof(float), stream);*/ } void Document::GPU2CPUPerplexity(cudaStream_t& stream) { cudaMemcpyAsync(perplexityMid, devicePerplexityMid, (GridDim) * sizeof(float), cudaMemcpyDeviceToHost, stream); //cudaMemcpy(perplexity, devicePerplexity, maxTLLength*sizeof(float), cudaMemcpyDeviceToHost); sumPerplexity = 0.0; for (int i = 0; i < GridDim; i++) { //printf("Perplexity:%f \n", perplexityMid[i]); sumPerplexity += perplexityMid[i]/ 467723.0; } //printf("Parallel Perplexity:%f \n", sumPerplexity); } void Document::CPU2DiskPerplexity(string argFilePrefix) { ofstream OutPutPerplexity((argFilePrefix + string("/Perplexity.txt")).c_str(), ios::binary); for (int i = 0; i < maxTLLength; i++) { OutPutPerplexity << perplexity[i] << "\n"; } OutPutPerplexity.close(); } void Document::GPU2CPUEffectiveTokenIndex() { cudaMemcpy(effectiveTokenIndex, deviceEffectiveTokenIndex, maxTLLength * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(newTokenCount, deviceNewTokenCount, wordLength * sizeof(int), cudaMemcpyDeviceToHost); } void Document::CPU2DiskEffectiveTokenIndex(string argFilePrefix) { ofstream OutPutEffectiveTokenIndex((argFilePrefix + string("/EffectiveTokenIndex.txt")).c_str(), ios::binary); for (int i = 0; i < maxTLLength; i++) { OutPutEffectiveTokenIndex << effectiveTokenIndex[i] << "\n"; } OutPutEffectiveTokenIndex.close(); ofstream OutPutNewTokenCount((argFilePrefix + string("/NewTokenCount.txt")).c_str(), ios::binary); for (int i = 0; i < wordLength; i++) { OutPutNewTokenCount << newTokenCount[i] << "\n"; } OutPutNewTokenCount.close(); } void Document::GPUMemAllocate() { for (int i = 0; i < numStreams; i++) { cudaMalloc((void**)&deviceTLTopic[i], (maxTLLength) * sizeof(unsigned short int)); cudaMalloc((void**)&deviceTLDocCount[i], (maxDocLength) * sizeof(int)); cudaMalloc((void**)&deviceTLDocOffset[i], (maxDocLength) * sizeof(int)); cudaMalloc((void**)&deviceTLWordCount[i], (wordLength) * sizeof(int)); cudaMalloc((void**)&deviceTLWordOffset[i], (wordLength) * sizeof(int)); cudaMalloc((void**)&deviceMapWord2Doc[i], (maxTLLength) * sizeof(int)); cudaMalloc((void**)&deviceMapDoc2Word[i], (maxTLLength) * sizeof(int)); cudaMalloc((void**)&deviceRandomfloat[i], (maxTLLength) * sizeof(float)); /*cudaMalloc((void**)&deviceMflag[i], (maxTLLength) * sizeof(unsigned short int));*/ cudaMalloc((void**)&deviceEffectiveTokenIndex[i], (maxTLLength) * sizeof(int)); cudaMalloc((void**)&deviceNewTokenCount[i], (wordLength) * sizeof(int)); /*cudaMalloc((void**)&devicePerplexity[i], (maxTLLength) * sizeof(float));*/ cudaMalloc((void **)&d_blockCounter[i], sizeof(int)*(1)); cudaMalloc((void **)&d_warpCounter[i], sizeof(int)*(1)); cudaMalloc((void **)&d_dense[i], sizeof(int)*(GridDim*BlockDim*K/32)); cudaMalloc((void **)&deviceWTHeadDense[i], sizeof(float)*(GridDim*K)); /* cudaMalloc((void**)&deviceProbMax, (maxTLLength) * sizeof(float)); cudaMalloc((void**)&deviceProbMaxTopic, (maxTLLength) * sizeof(unsigned short int)); cudaMalloc((void**)&deviceProbMaxFlag, (maxTLLength) * sizeof(unsigned short int)); cudaMalloc((void**)&deviceProbMaxTopicFlag, (maxTLLength) * sizeof(unsigned short int));*/ /*cudaMalloc((void**)&deviceMaxTokenCount[i], (maxTLLength) * sizeof(unsigned short int)); cudaMalloc((void**)&deviceMaxTopic[i], (maxTLLength) * sizeof(unsigned short int)); cudaMalloc((void**)&deviceSecondMaxTokenCount[i], (maxTLLength) * sizeof(unsigned short int)); cudaMalloc((void**)&deviceSecondMaxTopic[i], (maxTLLength) * sizeof(unsigned short int));*/ cudaMalloc((void**)&deviceMaxSecTopic[i], (maxTLLength) * sizeof(long long int)); cudaMalloc((void**)&deviceWordMaxTopic[i], (wordLength) * sizeof(unsigned short int)); cudaMalloc((void**)&deviceWordSecondMaxTopic[i], (wordLength) * sizeof(unsigned short int)); cudaMalloc((void**)&deviceWordThirdMaxTopic[i], (wordLength) * sizeof(unsigned short int)); cudaMalloc((void**)&deviceWordMaxProb[i], (wordLength) * sizeof(float)); cudaMalloc((void**)&deviceWordSecondMaxProb[i], (wordLength) * sizeof(float)); cudaMalloc((void**)&deviceWordThirdMaxProb[i], (wordLength) * sizeof(float)); cudaMalloc((void**)&deviceQArray[i], (wordLength) * sizeof(float)); cudaMalloc((void**)&deviceMaxProb[i], (maxTLLength) * sizeof(float)); cudaMalloc((void**)&deviceThresProb[i], (maxTLLength) * sizeof(float)); cudaMalloc((void**)&deviceTimeRecord[i], (GridDim*BlockDim/32) * sizeof(float)); cudaMalloc((void**)&devicePerplexityAve[i], 1 * sizeof(float)); cudaMalloc((void**)&devicePerplexityMid[i], sizeof(float)*GridDim); //cudaMalloc((void**)&deviceTotalTokenCount[i], (maxTLLength) * sizeof(unsigned short int)); } TLMemory = ((3 * maxTLLength + 2 * maxDocLength + 2 * wordLength + GridDim*K) * sizeof(int) + (maxTLLength + GridDim*BlockDim / 32 + GridDim*K) * sizeof(float))/ 1000000000.0; printf("Token list memory usage:%f GB\n", TLMemory); } void Document::GPU2CPUTime() { cudaMemcpy(timeRecord, deviceTimeRecord, (GridDim*BlockDim / 32) * sizeof(float), cudaMemcpyDeviceToHost); cudaMemset(deviceTimeRecord, 0, (GridDim*BlockDim / 32) * sizeof(float)); } //void Document::CPU2DiskTime(ofstream argOutPutTime) { // // for (int i = 0; i < GridDim*BlockDim / 32; i++) { // argOutPutTime << timeRecord[i] << " "; // } // argOutPutTime << "\n"; //} void Document::CPU2GPU(int argChunkId, int argStreamId, cudaStream_t& stream) { cudaMemcpyAsync(deviceTLTopic[argStreamId], docChunkVec[argChunkId].TLTopic, (TLLengthVec[argChunkId]) * sizeof(unsigned short int), cudaMemcpyHostToDevice, stream); cudaMemcpyAsync(deviceTLDocCount[argStreamId], docChunkVec[argChunkId].TLDocCount, (docLengthVec[argChunkId]) * sizeof(int), cudaMemcpyHostToDevice, stream); cudaMemcpyAsync(deviceTLDocOffset[argStreamId], docChunkVec[argChunkId].TLDocOffset, (docLengthVec[argChunkId]) * sizeof(int), cudaMemcpyHostToDevice, stream); cudaMemcpyAsync(deviceTLWordCount[argStreamId], docChunkVec[argChunkId].TLWordCount, (wordLength) * sizeof(int), cudaMemcpyHostToDevice, stream); cudaMemcpyAsync(deviceTLWordOffset[argStreamId], docChunkVec[argChunkId].TLWordOffset, (wordLength) * sizeof(int), cudaMemcpyHostToDevice, stream); cudaMemcpyAsync(deviceMapWord2Doc[argStreamId], docChunkVec[argChunkId].mapWord2Doc, (TLLengthVec[argChunkId]) * sizeof(int), cudaMemcpyHostToDevice, stream); //cudaMemcpyAsync(deviceMapDoc2Word[argStreamId], docChunkVec[argChunkId].mapDoc2Word, (TLLengthVec[argChunkId]) * sizeof(int), cudaMemcpyHostToDevice, stream); //cudaMemcpyAsync(deviceTotalTokenCount[argStreamId], docChunkVec[argChunkId].totalTokenCount, (TLLengthVec[argChunkId]) * sizeof(unsigned short int), cudaMemcpyHostToDevice, stream); //cudaMemcpy(deviceProbMax, probMaxChunkVec[argChunkId], (TLLengthVec[argChunkId]) * sizeof(float), cudaMemcpyHostToDevice); //cudaMemcpy(deviceProbMaxTopic, probMaxTopicChunkVec[argChunkId], (TLLengthVec[argChunkId]) * sizeof(unsigned short int), cudaMemcpyHostToDevice); //cudaMemcpy(deviceProbMaxTopicFlag, probMaxTopicFlagChunkVec[argChunkId], (TLLengthVec[argChunkId]) * sizeof(unsigned short int), cudaMemcpyHostToDevice); //cudaMemcpy(deviceProbMaxFlag, probMaxFlagChunkVec[argChunkId], (TLLengthVec[argChunkId]) * sizeof(unsigned short int), cudaMemcpyHostToDevice); /*cudaMemcpy(deviceMaxTopic, docChunkVec[argChunkId].TLMaxTopic, (TLLengthVec[argChunkId]) * sizeof(unsigned short int), cudaMemcpyHostToDevice);*/ //cudaMemset(deviceProbMaxTopicFlag, 0, (maxTLLength) * sizeof(unsigned short int)); //cudaMemset(deviceProbMaxFlag, 0, (maxTLLength) * sizeof(unsigned short int)); /*cudaMemsetAsync(deviceMaxTokenCount[argStreamId], 0, (maxTLLength) * sizeof(unsigned short int), stream);*/ cudaMemsetAsync(deviceMaxSecTopic[argStreamId], 0, (maxTLLength) * sizeof(long long int), stream); /*cudaMemsetAsync(deviceMflag[argStreamId], 0, (maxTLLength) * sizeof(unsigned short int), stream);*/ cudaMemsetAsync(deviceEffectiveTokenIndex[argStreamId], 0, (maxTLLength) * sizeof(int), stream); cudaMemsetAsync(deviceNewTokenCount[argStreamId], 0, (wordLength) * sizeof(int), stream); //cudaMemsetAsync(deviceTotalTokenCount[argStreamId], 0, (maxTLLength) * sizeof(unsigned short int), stream); /*cudaMemcpyAsync(deviceMflag[argStreamId], Mflag, (maxTLLength) * sizeof(unsigned short int), cudaMemcpyHostToDevice, stream);*/ /*cudaMemcpyAsync(deviceMaxTokenCount[argStreamId], maxTokenCount, (maxTLLength) * sizeof(unsigned short int), cudaMemcpyHostToDevice, stream); cudaMemcpyAsync(deviceEffectiveTokenIndex[argStreamId], effectiveTokenIndex, (maxTLLength) * sizeof(int), cudaMemcpyHostToDevice, stream); cudaMemcpyAsync(deviceNewTokenCount[argStreamId], newTokenCount, (wordLength) * sizeof(int), cudaMemcpyHostToDevice, stream);*/ } void Document::GPU2CPU(int argChunkId, int argStreamId, cudaStream_t& stream) { cudaMemcpyAsync(docChunkVec[argChunkId].TLTopic, deviceTLTopic[argStreamId], (TLLengthVec[argChunkId]) * sizeof(unsigned short int), cudaMemcpyDeviceToHost, stream); /*cudaMemcpy(probMaxTopicFlagChunkVec[argChunkId],deviceProbMaxTopicFlag, (TLLengthVec[argChunkId]) * sizeof(unsigned short int), cudaMemcpyDeviceToHost); cudaMemcpy( probMaxFlagChunkVec[argChunkId],deviceProbMaxFlag, (TLLengthVec[argChunkId]) * sizeof(unsigned short int), cudaMemcpyDeviceToHost); cudaMemcpy(probMaxChunkVec[argChunkId],deviceProbMax, (TLLengthVec[argChunkId]) * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(probMaxTopicChunkVec[argChunkId], deviceProbMaxTopic, (TLLengthVec[argChunkId]) * sizeof(unsigned short int), cudaMemcpyDeviceToHost);*/ //cudaMemcpyAsync(docChunkVec[argChunkId].TLMaxTopic, deviceMaxTopic[argStreamId], (TLLengthVec[argChunkId]) * sizeof(unsigned short int), cudaMemcpyDeviceToHost, stream); } // //void Document::PercentageCalculate() //{ // increasePercent = 0.0; // topicUnchangedPercent = 0.0; // for (int chunkId = 0; chunkId < numChunks; chunkId++) { // for (int i = 0; i < TLLengthVec[chunkId]; i++) { // increasePercent += float(probMaxFlagChunkVec[chunkId][i]); // topicUnchangedPercent += float(probMaxTopicFlagChunkVec[chunkId][i]); // } // } // printf("increasePercent:%f\n", increasePercent); // printf("topicUnchangedPercent:%f\n", topicUnchangedPercent); // printf("total num of tokens:%f\n", totalNumOfTokens); // increasePercent /= totalNumOfTokens; // topicUnchangedPercent /= totalNumOfTokens; // // //} // // void Document::deviceCounterMemAllocate() { for (int i = 0; i < numStreams; i++) { cudaMalloc((void**)&deviceCounterWTUpdateKernel[i], sizeof(unsigned int)); cudaMalloc((void**)&deviceCounterWTDenUpdateKernel[i], sizeof(unsigned int)); cudaMalloc((void**)&deviceCounterWTAdditionKernel[i], sizeof(unsigned int)); cudaMalloc((void**)&deviceCounterMaxTopicKernel[i], sizeof(unsigned int)); cudaMalloc((void**)&deviceCounterDTUpdateKernel[i], sizeof(unsigned int)); cudaMalloc((void**)&deviceCounterUpdateProbKernel[i], sizeof(unsigned int)); cudaMalloc((void**)&deviceCounterSampleKernelD[i], sizeof(unsigned int)); cudaMalloc((void**)&deviceCounterSampleKernelS[i], sizeof(unsigned int)); } }
3b3a58324183535882d84de129ce2d9e30fb166d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void forward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride_x, int stride_y, int size, int pad, float *input, float *output, int *indexes) { int h = (in_h + pad - size) / stride_y + 1; int w = (in_w + pad - size) / stride_x + 1; int c = in_c; int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(id >= n) return; int j = id % w; id /= w; int i = id % h; id /= h; int k = id % c; id /= c; int b = id; int w_offset = -pad / 2; int h_offset = -pad / 2; int out_index = j + w*(i + h*(k + c*b)); float max = -INFINITY; int max_i = -1; int l, m; for(l = 0; l < size; ++l){ for(m = 0; m < size; ++m){ int cur_h = h_offset + i*stride_y + l; int cur_w = w_offset + j*stride_x + m; int index = cur_w + in_w*(cur_h + in_h*(k + b*in_c)); int valid = (cur_h >= 0 && cur_h < in_h && cur_w >= 0 && cur_w < in_w); float val = (valid != 0) ? input[index] : -INFINITY; max_i = (val > max) ? index : max_i; max = (val > max) ? val : max; } } output[out_index] = max; if (indexes) indexes[out_index] = max_i; }
3b3a58324183535882d84de129ce2d9e30fb166d.cu
#include "includes.h" __global__ void forward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride_x, int stride_y, int size, int pad, float *input, float *output, int *indexes) { int h = (in_h + pad - size) / stride_y + 1; int w = (in_w + pad - size) / stride_x + 1; int c = in_c; int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(id >= n) return; int j = id % w; id /= w; int i = id % h; id /= h; int k = id % c; id /= c; int b = id; int w_offset = -pad / 2; int h_offset = -pad / 2; int out_index = j + w*(i + h*(k + c*b)); float max = -INFINITY; int max_i = -1; int l, m; for(l = 0; l < size; ++l){ for(m = 0; m < size; ++m){ int cur_h = h_offset + i*stride_y + l; int cur_w = w_offset + j*stride_x + m; int index = cur_w + in_w*(cur_h + in_h*(k + b*in_c)); int valid = (cur_h >= 0 && cur_h < in_h && cur_w >= 0 && cur_w < in_w); float val = (valid != 0) ? input[index] : -INFINITY; max_i = (val > max) ? index : max_i; max = (val > max) ? val : max; } } output[out_index] = max; if (indexes) indexes[out_index] = max_i; }
3e68976bcbc6a065ab8f332f8ff9a5eb03a0396c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __device__ static int ros_Integrator_ros2(double * __restrict__ var, const double * __restrict__ fix, const double Tstart, const double Tend, double &T, // Integration parameters const int autonomous, const int vectorTol, const int Max_no_steps, const double roundoff, const double Hmin, const double Hmax, const double Hstart, double &Hexit, const double FacMin, const double FacMax, const double FacRej, const double FacSafe, // Status parameters int &Nfun, int &Njac, int &Nstp, int &Nacc, int &Nrej, int &Ndec, int &Nsol, int &Nsng, // cuda global mem buffers const double * __restrict__ rconst, const double * __restrict__ absTol, const double * __restrict__ relTol, double * __restrict__ varNew, double * __restrict__ Fcn0, double * __restrict__ K, double * __restrict__ dFdT, double * __restrict__ jac0, double * __restrict__ Ghimj, double * __restrict__ varErr, // for update_rconst const double * __restrict__ khet_st, const double * __restrict__ khet_tr, const double * __restrict__ jx, // VL_GLO const int VL_GLO) { int index = blockIdx.x*blockDim.x+threadIdx.x; double H, Hnew, HC, HG, Fac; // Tau - not used double Err; //*varErr; int direction; int rejectLastH, rejectMoreH; const double DELTAMIN = 1.0E-5; const int ros_S = 2; // ~~~> Initial preparations T = Tstart; Hexit = 0.0; H = fmin(Hstart,Hmax); if (fabs(H) <= 10.0*roundoff) H = DELTAMIN; if (Tend >= Tstart) { direction = + 1; } else { direction = - 1; } rejectLastH=0; rejectMoreH=0; // TimeLoop: while((direction > 0) && ((T- Tend)+ roundoff <= ZERO) || (direction < 0) && ((Tend-T)+ roundoff <= ZERO)) { if (Nstp > Max_no_steps) // Too many steps return -6; // Step size too small if (H <= roundoff){ // Step size too small //if (((T+ 0.1*H) == T) || (H <= roundoff)) { return -7; } // ~~~> Limit H if necessary to avoid going beyond Tend Hexit = H; H = fmin(H,fabs(Tend-T)); // ~~~> Compute the function at current time Fun(var, fix, rconst, Fcn0, Nfun, VL_GLO); /// VAR READ - Fcn0 Write // ~~~> Compute the function derivative with respect to T if (!autonomous) ros_FunTimeDerivative(T, roundoff, var, fix, rconst, dFdT, Fcn0, Nfun, khet_st, khet_tr, jx, VL_GLO); /// VAR READ - fcn0 read // ~~~> Compute the Jacobian at current time Jac_sp(var, fix, rconst, jac0, Njac, VL_GLO); /// VAR READ // ~~~> Repeat step calculation until current step accepted // UntilAccepted: while(1) { ros_PrepareMatrix(H, direction, 1.70710678118654752440084436210485, jac0, Ghimj, Nsng, Ndec, VL_GLO); // Stage 1 { for (int i=0; i<NVAR; i++) K(index,0,i) = Fcn0(index,i); if ((!autonomous)) { HG = direction*H*1.70710678118654752440084436210485; for (int i=0; i<NVAR; i++){ K(index,0,i) += dFdT(index,i)*HG; } } ros_Solve(Ghimj, K, Nsol, 0, 2); } // Stage 2 { for (int i=0; i<NVAR; i++){ varNew(index,i) = K(index,0,i)*.58578643762690495119831127579030 + var(index,i); } Fun(varNew, fix, rconst, varNew, Nfun,VL_GLO); // FCN <- varNew / not overlap HC = -1.17157287525380990239662255158060/(direction*H); for (int i=0; i<NVAR; i++){ K(index,1,i) = K(index,0,i)*HC + varNew(index,i) ; } if ((!autonomous)) { HG = direction*H*(-1.70710678118654752440084436210485); for (int i=0; i<NVAR; i++){ K(index,1,i) += dFdT(index,i)*HG; } } ros_Solve(Ghimj, K, Nsol, 1, 2); } // Stage for (int i=0; i<NVAR; i++){ varNew(index,i) = K(index,0,i)*(.87867965644035742679746691368545) + K(index,1,i)*(.29289321881345247559915563789515) + var(index,i) ; varErr(index,i) = K(index,0,i)*(.29289321881345247559915563789515) + K(index,1,i)*(.29289321881345247559915563789515) ; } Err = ros_ErrorNorm(var, varNew, varErr, absTol, relTol, vectorTol); /// VAR-varNew READ // ~~~> New step size is bounded by FacMin <= Hnew/H <= FacMax Fac = fmin(FacMax,fmax(FacMin,FacSafe/pow(Err,ONE/2.0))); Hnew = H*Fac; // ~~~> Check the error magnitude and adjust step size Nstp = Nstp+ 1; if((Err <= ONE) || (H <= Hmin)) // ~~~> Accept step { Nacc = Nacc + 1; for (int j=0; j<NVAR ; j++) var(index,j) = fmax(varNew(index,j),ZERO); /////////// VAR WRITE - last VarNew read T = T + direction*H; Hnew = fmax(Hmin,fmin(Hnew,Hmax)); if (rejectLastH) // No step size increase after a rejected step Hnew = fmin(Hnew,H); rejectLastH = 0; rejectMoreH = 0; H = Hnew; break; // EXIT THE LOOP: WHILE STEP NOT ACCEPTED } else // ~~~> Reject step { if (rejectMoreH) Hnew = H*FacRej; rejectMoreH = rejectLastH; rejectLastH = 1; H = Hnew; if (Nacc >= 1) Nrej += 1; } // Err <= 1 } // UntilAccepted } // TimeLoop // ~~~> Succesful exit return 0; // ~~~> The integration was successful } __global__ void Rosenbrock_ros2(double * __restrict__ conc, const double Tstart, const double Tend, double * __restrict__ rstatus, int * __restrict__ istatus, const int autonomous, const int vectorTol, const int UplimTol, const int Max_no_steps, double * __restrict__ d_jac0, double * __restrict__ d_Ghimj, double * __restrict__ d_varNew, double * __restrict__ d_K, double * __restrict__ d_varErr,double * __restrict__ d_dFdT ,double * __restrict__ d_Fcn0, double * __restrict__ d_var, double * __restrict__ d_fix, double * __restrict__ d_rconst, const double Hmin, const double Hmax, const double Hstart, const double FacMin, const double FacMax, const double FacRej, const double FacSafe, const double roundoff, // cuda global mem buffers const double * __restrict__ absTol, const double * __restrict__ relTol, const double * __restrict__ khet_st, const double * __restrict__ khet_tr, const double * __restrict__ jx, // extra const double * __restrict__ temp_gpu, const double * __restrict__ press_gpu, const double * __restrict__ cair_gpu, const int VL_GLO) { int index = blockIdx.x*blockDim.x+threadIdx.x; /* * In theory someone can aggregate accesses together, * however due to algorithm, threads access * different parts of memory, making it harder to * optimize accesses. * */ double *Ghimj = &d_Ghimj[index*LU_NONZERO]; double *K = &d_K[index*NVAR*3]; double *varNew = &d_varNew[index*NVAR]; double *Fcn0 = &d_Fcn0[index*NVAR]; double *dFdT = &d_dFdT[index*NVAR]; double *jac0 = &d_jac0[index*LU_NONZERO]; double *varErr = &d_varErr[index*NVAR]; double *var = &d_var[index*NSPEC]; double *fix = &d_fix[index*NFIX]; double *rconst = &d_rconst[index*NREACT]; if (index < VL_GLO) { int Nfun,Njac,Nstp,Nacc,Nrej,Ndec,Nsol,Nsng; double Texit, Hexit; Nfun = 0; Njac = 0; Nstp = 0; Nacc = 0; Nrej = 0; Ndec = 0; Nsol = 0; Nsng = 0; /* Copy data from global memory to temporary array */ /* * Optimization note: if we ever have enough constant * memory, we could use it for storing the data. * In current architectures if we use constant memory * only a few threads will be able to run on the fly. * */ for (int i=0; i<NSPEC; i++) var(index,i) = conc(index,i); for (int i=0; i<NFIX; i++) fix(index,i) = conc(index,NVAR+i); update_rconst(var, khet_st, khet_tr, jx, rconst, temp_gpu, press_gpu, cair_gpu, VL_GLO); ros_Integrator_ros2(var, fix, Tstart, Tend, Texit, // Integration parameters autonomous, vectorTol, Max_no_steps, roundoff, Hmin, Hmax, Hstart, Hexit, FacMin, FacMax, FacRej, FacSafe, // Status parameters Nfun, Njac, Nstp, Nacc, Nrej, Ndec, Nsol, Nsng, // cuda global mem buffers rconst, absTol, relTol, varNew, Fcn0, K, dFdT, jac0, Ghimj, varErr, // For update rconst khet_st, khet_tr, jx, VL_GLO ); for (int i=0; i<NVAR; i++) conc(index,i) = var(index,i); /* Statistics */ istatus(index,ifun) = Nfun; istatus(index,ijac) = Njac; istatus(index,istp) = Nstp; istatus(index,iacc) = Nacc; istatus(index,irej) = Nrej; istatus(index,idec) = Ndec; istatus(index,isol) = Nsol; istatus(index,isng) = Nsng; // Last T and H rstatus(index,itexit) = Texit; rstatus(index,ihexit) = Hexit; } }
3e68976bcbc6a065ab8f332f8ff9a5eb03a0396c.cu
__device__ static int ros_Integrator_ros2(double * __restrict__ var, const double * __restrict__ fix, const double Tstart, const double Tend, double &T, // Integration parameters const int autonomous, const int vectorTol, const int Max_no_steps, const double roundoff, const double Hmin, const double Hmax, const double Hstart, double &Hexit, const double FacMin, const double FacMax, const double FacRej, const double FacSafe, // Status parameters int &Nfun, int &Njac, int &Nstp, int &Nacc, int &Nrej, int &Ndec, int &Nsol, int &Nsng, // cuda global mem buffers const double * __restrict__ rconst, const double * __restrict__ absTol, const double * __restrict__ relTol, double * __restrict__ varNew, double * __restrict__ Fcn0, double * __restrict__ K, double * __restrict__ dFdT, double * __restrict__ jac0, double * __restrict__ Ghimj, double * __restrict__ varErr, // for update_rconst const double * __restrict__ khet_st, const double * __restrict__ khet_tr, const double * __restrict__ jx, // VL_GLO const int VL_GLO) { int index = blockIdx.x*blockDim.x+threadIdx.x; double H, Hnew, HC, HG, Fac; // Tau - not used double Err; //*varErr; int direction; int rejectLastH, rejectMoreH; const double DELTAMIN = 1.0E-5; const int ros_S = 2; // ~~~> Initial preparations T = Tstart; Hexit = 0.0; H = fmin(Hstart,Hmax); if (fabs(H) <= 10.0*roundoff) H = DELTAMIN; if (Tend >= Tstart) { direction = + 1; } else { direction = - 1; } rejectLastH=0; rejectMoreH=0; // TimeLoop: while((direction > 0) && ((T- Tend)+ roundoff <= ZERO) || (direction < 0) && ((Tend-T)+ roundoff <= ZERO)) { if (Nstp > Max_no_steps) // Too many steps return -6; // Step size too small if (H <= roundoff){ // Step size too small //if (((T+ 0.1*H) == T) || (H <= roundoff)) { return -7; } // ~~~> Limit H if necessary to avoid going beyond Tend Hexit = H; H = fmin(H,fabs(Tend-T)); // ~~~> Compute the function at current time Fun(var, fix, rconst, Fcn0, Nfun, VL_GLO); /// VAR READ - Fcn0 Write // ~~~> Compute the function derivative with respect to T if (!autonomous) ros_FunTimeDerivative(T, roundoff, var, fix, rconst, dFdT, Fcn0, Nfun, khet_st, khet_tr, jx, VL_GLO); /// VAR READ - fcn0 read // ~~~> Compute the Jacobian at current time Jac_sp(var, fix, rconst, jac0, Njac, VL_GLO); /// VAR READ // ~~~> Repeat step calculation until current step accepted // UntilAccepted: while(1) { ros_PrepareMatrix(H, direction, 1.70710678118654752440084436210485, jac0, Ghimj, Nsng, Ndec, VL_GLO); // Stage 1 { for (int i=0; i<NVAR; i++) K(index,0,i) = Fcn0(index,i); if ((!autonomous)) { HG = direction*H*1.70710678118654752440084436210485; for (int i=0; i<NVAR; i++){ K(index,0,i) += dFdT(index,i)*HG; } } ros_Solve(Ghimj, K, Nsol, 0, 2); } // Stage 2 { for (int i=0; i<NVAR; i++){ varNew(index,i) = K(index,0,i)*.58578643762690495119831127579030 + var(index,i); } Fun(varNew, fix, rconst, varNew, Nfun,VL_GLO); // FCN <- varNew / not overlap HC = -1.17157287525380990239662255158060/(direction*H); for (int i=0; i<NVAR; i++){ K(index,1,i) = K(index,0,i)*HC + varNew(index,i) ; } if ((!autonomous)) { HG = direction*H*(-1.70710678118654752440084436210485); for (int i=0; i<NVAR; i++){ K(index,1,i) += dFdT(index,i)*HG; } } ros_Solve(Ghimj, K, Nsol, 1, 2); } // Stage for (int i=0; i<NVAR; i++){ varNew(index,i) = K(index,0,i)*(.87867965644035742679746691368545) + K(index,1,i)*(.29289321881345247559915563789515) + var(index,i) ; varErr(index,i) = K(index,0,i)*(.29289321881345247559915563789515) + K(index,1,i)*(.29289321881345247559915563789515) ; } Err = ros_ErrorNorm(var, varNew, varErr, absTol, relTol, vectorTol); /// VAR-varNew READ // ~~~> New step size is bounded by FacMin <= Hnew/H <= FacMax Fac = fmin(FacMax,fmax(FacMin,FacSafe/pow(Err,ONE/2.0))); Hnew = H*Fac; // ~~~> Check the error magnitude and adjust step size Nstp = Nstp+ 1; if((Err <= ONE) || (H <= Hmin)) // ~~~> Accept step { Nacc = Nacc + 1; for (int j=0; j<NVAR ; j++) var(index,j) = fmax(varNew(index,j),ZERO); /////////// VAR WRITE - last VarNew read T = T + direction*H; Hnew = fmax(Hmin,fmin(Hnew,Hmax)); if (rejectLastH) // No step size increase after a rejected step Hnew = fmin(Hnew,H); rejectLastH = 0; rejectMoreH = 0; H = Hnew; break; // EXIT THE LOOP: WHILE STEP NOT ACCEPTED } else // ~~~> Reject step { if (rejectMoreH) Hnew = H*FacRej; rejectMoreH = rejectLastH; rejectLastH = 1; H = Hnew; if (Nacc >= 1) Nrej += 1; } // Err <= 1 } // UntilAccepted } // TimeLoop // ~~~> Succesful exit return 0; // ~~~> The integration was successful } __global__ void Rosenbrock_ros2(double * __restrict__ conc, const double Tstart, const double Tend, double * __restrict__ rstatus, int * __restrict__ istatus, const int autonomous, const int vectorTol, const int UplimTol, const int Max_no_steps, double * __restrict__ d_jac0, double * __restrict__ d_Ghimj, double * __restrict__ d_varNew, double * __restrict__ d_K, double * __restrict__ d_varErr,double * __restrict__ d_dFdT ,double * __restrict__ d_Fcn0, double * __restrict__ d_var, double * __restrict__ d_fix, double * __restrict__ d_rconst, const double Hmin, const double Hmax, const double Hstart, const double FacMin, const double FacMax, const double FacRej, const double FacSafe, const double roundoff, // cuda global mem buffers const double * __restrict__ absTol, const double * __restrict__ relTol, const double * __restrict__ khet_st, const double * __restrict__ khet_tr, const double * __restrict__ jx, // extra const double * __restrict__ temp_gpu, const double * __restrict__ press_gpu, const double * __restrict__ cair_gpu, const int VL_GLO) { int index = blockIdx.x*blockDim.x+threadIdx.x; /* * In theory someone can aggregate accesses together, * however due to algorithm, threads access * different parts of memory, making it harder to * optimize accesses. * */ double *Ghimj = &d_Ghimj[index*LU_NONZERO]; double *K = &d_K[index*NVAR*3]; double *varNew = &d_varNew[index*NVAR]; double *Fcn0 = &d_Fcn0[index*NVAR]; double *dFdT = &d_dFdT[index*NVAR]; double *jac0 = &d_jac0[index*LU_NONZERO]; double *varErr = &d_varErr[index*NVAR]; double *var = &d_var[index*NSPEC]; double *fix = &d_fix[index*NFIX]; double *rconst = &d_rconst[index*NREACT]; if (index < VL_GLO) { int Nfun,Njac,Nstp,Nacc,Nrej,Ndec,Nsol,Nsng; double Texit, Hexit; Nfun = 0; Njac = 0; Nstp = 0; Nacc = 0; Nrej = 0; Ndec = 0; Nsol = 0; Nsng = 0; /* Copy data from global memory to temporary array */ /* * Optimization note: if we ever have enough constant * memory, we could use it for storing the data. * In current architectures if we use constant memory * only a few threads will be able to run on the fly. * */ for (int i=0; i<NSPEC; i++) var(index,i) = conc(index,i); for (int i=0; i<NFIX; i++) fix(index,i) = conc(index,NVAR+i); update_rconst(var, khet_st, khet_tr, jx, rconst, temp_gpu, press_gpu, cair_gpu, VL_GLO); ros_Integrator_ros2(var, fix, Tstart, Tend, Texit, // Integration parameters autonomous, vectorTol, Max_no_steps, roundoff, Hmin, Hmax, Hstart, Hexit, FacMin, FacMax, FacRej, FacSafe, // Status parameters Nfun, Njac, Nstp, Nacc, Nrej, Ndec, Nsol, Nsng, // cuda global mem buffers rconst, absTol, relTol, varNew, Fcn0, K, dFdT, jac0, Ghimj, varErr, // For update rconst khet_st, khet_tr, jx, VL_GLO ); for (int i=0; i<NVAR; i++) conc(index,i) = var(index,i); /* Statistics */ istatus(index,ifun) = Nfun; istatus(index,ijac) = Njac; istatus(index,istp) = Nstp; istatus(index,iacc) = Nacc; istatus(index,irej) = Nrej; istatus(index,idec) = Ndec; istatus(index,isol) = Nsol; istatus(index,isng) = Nsng; // Last T and H rstatus(index,itexit) = Texit; rstatus(index,ihexit) = Hexit; } }
eaa76924de598290f7f60b6b5c4391ed4d32e845.hip
// !!! This is a file automatically generated by hipify!!! #include "acc.cuh" #include "gpu.hpp" #include <cmath> #include <climits> #include "util.hpp" #include <hip/hip_runtime.h> #include <chrono> using namespace std::chrono; /* This code was copied from * https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#atomic-functions * since there is no atomicAdd for GPUs with Compute Capability less than 6.0. * The dummy parameter fixes any overload or double definition errors. */ __device__ double atomicAdd(double* address, double val, double dummy) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return __longlong_as_double(old); } __host__ double gpu_probing(uint64_t n_ops, int64_t M, int64_t k) { high_resolution_clock::time_point t1 = high_resolution_clock::now(); gpu_integration(n_ops, M, k); high_resolution_clock::time_point t2 = high_resolution_clock::now(); duration<double> dur = duration_cast<duration<double>>(t2 - t1); double flops = n_ops / dur.count(); return flops; } __device__ double gpu_f(int64_t M, int64_t k, double x) { return (sin((2 * M + 1) * M_PI * x) * cos(2 * M_PI * k * x)) / sin(M_PI * x); } /** Calculates f for n_ops points and puts the sum in the global memory */ __global__ void gpu_calc(hiprandState_t *states, double *sum, double *sum_sq, uint64_t n_ops_thread, int64_t M, int64_t k, uint64_t n_threads, uint64_t n_leap_ops) { uint64_t tid = blockIdx.x * blockDim.x + threadIdx.x; int lid = threadIdx.x; __shared__ double block_sum[1024]; __shared__ double block_sum_sq[1024]; block_sum[lid] = block_sum_sq[lid] = 0; if (tid >= n_threads) return; double x, res; hiprandState_t local_state = states[tid]; hiprand_init (tid, 0, 0, &local_state); n_ops_thread += tid < n_leap_ops; for (uint64_t i = 0; i < n_ops_thread; i++) { x = hiprand_uniform_double(&local_state) / 2; res = gpu_f(M, k, x); block_sum[lid] += res; block_sum_sq[lid] += res * res; } __syncthreads(); // Reduction phase for (int s = blockDim.x / 2; s > 0 && lid < s; s >>= 1) { block_sum[lid] += block_sum[lid + s]; block_sum_sq[lid] += block_sum_sq[lid + s]; __syncthreads(); } if (lid == 0) { atomicAdd(sum, block_sum[0], 0); atomicAdd(sum_sq, block_sum_sq[0], 0); } } __host__ std::vector<double> gpu_integration(uint64_t n_ops, int64_t M, int64_t k) { if (!n_ops) { std::vector<double> res = {0, 0}; return res; } int block_size = 1024; // Divide the work // The GPU has n_ops operations to do uint64_t n_threads = min((uint64_t) ULONG_MAX * block_size, n_ops); uint64_t n_blocks = ceil(n_threads / (float) block_size); uint64_t n_ops_thread = n_ops / n_threads; uint64_t n_leap_ops = n_ops % n_threads; /********************************* * CUDA environment and device setup *********************************/ // States for the random generator hiprandState_t *states; // Allocate space for the states hipMalloc((void **)&states, n_ops * sizeof(hiprandState_t)); // Pointer for the device global sum and sum_sq variables double *sum; // Allocate both variables as an array to save code lines :) hipMalloc((void **)&sum, 2 * sizeof(double)); // I want to pass two variables to gpu_calc and not an array double *sum_sq = sum + 1; // Zero the variables as they will be part of a summation hipMemset(sum, 0, 2 * sizeof(double)); // Actually call the calc function hipLaunchKernelGGL(( gpu_calc), dim3(n_blocks), dim3(block_size), 0, 0, states, sum, sum_sq, n_ops_thread, M, k, n_threads, n_leap_ops); std::vector<double> sums(2); hipMemcpy(sums.data(), sum, 2 * sizeof(double), hipMemcpyDeviceToHost); hipFree(states); return sums; }
eaa76924de598290f7f60b6b5c4391ed4d32e845.cu
#include "acc.cuh" #include "gpu.hpp" #include <cmath> #include <climits> #include "util.hpp" #include <cuda_runtime.h> #include <chrono> using namespace std::chrono; /* This code was copied from * https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#atomic-functions * since there is no atomicAdd for GPUs with Compute Capability less than 6.0. * The dummy parameter fixes any overload or double definition errors. */ __device__ double atomicAdd(double* address, double val, double dummy) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return __longlong_as_double(old); } __host__ double gpu_probing(uint64_t n_ops, int64_t M, int64_t k) { high_resolution_clock::time_point t1 = high_resolution_clock::now(); gpu_integration(n_ops, M, k); high_resolution_clock::time_point t2 = high_resolution_clock::now(); duration<double> dur = duration_cast<duration<double>>(t2 - t1); double flops = n_ops / dur.count(); return flops; } __device__ double gpu_f(int64_t M, int64_t k, double x) { return (sin((2 * M + 1) * M_PI * x) * cos(2 * M_PI * k * x)) / sin(M_PI * x); } /** Calculates f for n_ops points and puts the sum in the global memory */ __global__ void gpu_calc(curandState_t *states, double *sum, double *sum_sq, uint64_t n_ops_thread, int64_t M, int64_t k, uint64_t n_threads, uint64_t n_leap_ops) { uint64_t tid = blockIdx.x * blockDim.x + threadIdx.x; int lid = threadIdx.x; __shared__ double block_sum[1024]; __shared__ double block_sum_sq[1024]; block_sum[lid] = block_sum_sq[lid] = 0; if (tid >= n_threads) return; double x, res; curandState_t local_state = states[tid]; curand_init (tid, 0, 0, &local_state); n_ops_thread += tid < n_leap_ops; for (uint64_t i = 0; i < n_ops_thread; i++) { x = curand_uniform_double(&local_state) / 2; res = gpu_f(M, k, x); block_sum[lid] += res; block_sum_sq[lid] += res * res; } __syncthreads(); // Reduction phase for (int s = blockDim.x / 2; s > 0 && lid < s; s >>= 1) { block_sum[lid] += block_sum[lid + s]; block_sum_sq[lid] += block_sum_sq[lid + s]; __syncthreads(); } if (lid == 0) { atomicAdd(sum, block_sum[0], 0); atomicAdd(sum_sq, block_sum_sq[0], 0); } } __host__ std::vector<double> gpu_integration(uint64_t n_ops, int64_t M, int64_t k) { if (!n_ops) { std::vector<double> res = {0, 0}; return res; } int block_size = 1024; // Divide the work // The GPU has n_ops operations to do uint64_t n_threads = min((uint64_t) ULONG_MAX * block_size, n_ops); uint64_t n_blocks = ceil(n_threads / (float) block_size); uint64_t n_ops_thread = n_ops / n_threads; uint64_t n_leap_ops = n_ops % n_threads; /********************************* * CUDA environment and device setup *********************************/ // States for the random generator curandState_t *states; // Allocate space for the states cudaMalloc((void **)&states, n_ops * sizeof(curandState)); // Pointer for the device global sum and sum_sq variables double *sum; // Allocate both variables as an array to save code lines :) cudaMalloc((void **)&sum, 2 * sizeof(double)); // I want to pass two variables to gpu_calc and not an array double *sum_sq = sum + 1; // Zero the variables as they will be part of a summation cudaMemset(sum, 0, 2 * sizeof(double)); // Actually call the calc function gpu_calc<<<n_blocks, block_size>>>(states, sum, sum_sq, n_ops_thread, M, k, n_threads, n_leap_ops); std::vector<double> sums(2); cudaMemcpy(sums.data(), sum, 2 * sizeof(double), cudaMemcpyDeviceToHost); cudaFree(states); return sums; }
3b1ce63c3983acbbb7c91d195dd44786c4e93ad3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* A program that shows the effect of increasing the thread count while keeping the input size and block count constant. Performance is improved as the number of threads increase. */ /******************************* 1 - Install nvidia-cuda-toolkit 2 - Compile this program using: nvcc add.cu -o add_cuda.out *******************************/ #include <iostream> #include <math.h> #include <ctime> #include <cstdio> //CUDA kernel to add elements of the matrix // __global__ converts a function into a CUDA kernel __global__ void add(int n, float *x, float *y) { // index of the current thread within the block int index = blockIdx.x * blockDim.x + threadIdx.x; // number of threads in a block int stride = blockDim.x * gridDim.x; // run each addition on a separate thread for (int i = index; i < n; i+=stride) y[i] = x[i] + y[i]; } int main(void) { for(int t = 32; t <= 1024; t+=32) { int N = 1<<24; // 1M elements // Memory allocation in CUDA is done with hipMallocManaged( , ) float *x; float *y; hipMallocManaged( &x, N*sizeof(float) ); hipMallocManaged( &y, N*sizeof(float) ); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } std::clock_t start = clock(); // Launch the 'add' kernel, which invokes it in the GPU // Run kernel on 1M elements on the CPU hipLaunchKernelGGL(( add), dim3(1),dim3(t), 0, 0, N, x, y); // Wait for the GPU to synchronize before accessign through host(CPU) hipDeviceSynchronize(); std::clock_t stop = clock(); int duration = 1000 * (stop - start) / (double)CLOCKS_PER_SEC; //std::cout << "Running time using " << t << " threads = " << duration << "\n"; std::cout << duration << "\n"; // Check for errors (all values should be 3.0f) /*float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i]-3.0f)); std::cout << "Max error: " << maxError << std::endl; */ // Deallocating memory using hipFree() hipFree(x); hipFree(y); } return 0; }
3b1ce63c3983acbbb7c91d195dd44786c4e93ad3.cu
/* A program that shows the effect of increasing the thread count while keeping the input size and block count constant. Performance is improved as the number of threads increase. */ /******************************* 1 - Install nvidia-cuda-toolkit 2 - Compile this program using: nvcc add.cu -o add_cuda.out *******************************/ #include <iostream> #include <math.h> #include <ctime> #include <cstdio> //CUDA kernel to add elements of the matrix // __global__ converts a function into a CUDA kernel __global__ void add(int n, float *x, float *y) { // index of the current thread within the block int index = blockIdx.x * blockDim.x + threadIdx.x; // number of threads in a block int stride = blockDim.x * gridDim.x; // run each addition on a separate thread for (int i = index; i < n; i+=stride) y[i] = x[i] + y[i]; } int main(void) { for(int t = 32; t <= 1024; t+=32) { int N = 1<<24; // 1M elements // Memory allocation in CUDA is done with cudaMallocManaged( , ) float *x; float *y; cudaMallocManaged( &x, N*sizeof(float) ); cudaMallocManaged( &y, N*sizeof(float) ); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } std::clock_t start = clock(); // Launch the 'add' kernel, which invokes it in the GPU // Run kernel on 1M elements on the CPU add<<<1,t>>>(N, x, y); // Wait for the GPU to synchronize before accessign through host(CPU) cudaDeviceSynchronize(); std::clock_t stop = clock(); int duration = 1000 * (stop - start) / (double)CLOCKS_PER_SEC; //std::cout << "Running time using " << t << " threads = " << duration << "\n"; std::cout << duration << "\n"; // Check for errors (all values should be 3.0f) /*float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i]-3.0f)); std::cout << "Max error: " << maxError << std::endl; */ // Deallocating memory using cudaFree() cudaFree(x); cudaFree(y); } return 0; }
7b260443aae1f23d27d1a22d6e989fb63927bf91.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <algorithm> #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/platform/cuda_primitives.h" #include "paddle/fluid/platform/float16.h" using paddle::platform::PADDLE_CUDA_NUM_THREADS; using paddle::platform::float16; namespace paddle { namespace operators { // CUDA: index helpers #define idx4_4(index, d1, d2, d3, d4) (index % d4) #define idx4_3(index, d1, d2, d3, d4) ((index / d4) % d3) #define idx4_2(index, d1, d2, d3, d4) ((index / d4 / d3) % d2) #define idx4_1(index, d1, d2, d3, d4) ((index / d4 / d3 / d2) % d1) #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ i += blockDim.x * gridDim.x) template <typename T> __device__ bool GT_E(T a, T b) { return (a > b) || fabs(a - b) < 1e-4; } template <typename T> __device__ bool LT_E(T a, T b) { return (a < b) || fabs(a - b) < 1e-4; } template <typename T> __device__ bool GT(T a, T b) { return (a - b) > 1e-4; } template <typename T> __device__ T max(T a, T b) { return a > b ? a : b; } template <typename T> __device__ T min(T a, T b) { return a < b ? a : b; } /* * check if (x, y) is in the boundary of roi */ template <typename T> __device__ bool in_quad(T x, T y, T roi_x[], T roi_y[]) { for (int i = 0; i < 4; i++) { T start_w = roi_x[i]; T start_h = roi_y[i]; T end_w = roi_x[(i + 1) % 4]; T end_h = roi_y[(i + 1) % 4]; if (fabs(start_h - end_h) < 1e-4) { if (fabs(y - start_h) < 1e-4 && fabs(y - end_h) < 1e-4 && GT_E<T>(x, min<T>(start_w, end_w)) && LT_E<T>(x, max<T>(start_w, end_w))) { return true; } } else { T intersec_x = (y - start_h) * (end_w - start_w) / (end_h - start_h) + start_w; if (fabs(intersec_x - x) < 1e-4 && GT_E(y, min<T>(start_h, end_h)) && LT_E<T>(y, max<T>(start_h, end_h))) { return true; } } } int n_cross = 0; for (int i = 0; i < 4; i++) { T start_w = roi_x[i]; T start_h = roi_y[i]; T end_w = roi_x[(i + 1) % 4]; T end_h = roi_y[(i + 1) % 4]; if (fabs(start_h - end_h) < 1e-4) { continue; } if (LT_E<T>(y, min<T>(start_h, end_h)) || GT<T>(y, max<T>(start_h, end_h))) { continue; } T intersec_x = (y - start_h) * (end_w - start_w) / (end_h - start_h) + start_w; if (fabs(intersec_x - x) < 1e-4) { return true; } if (GT<T>(intersec_x, x)) { n_cross++; } } return (n_cross % 2 == 1); } /** * Perform bilinear interpolation in the input feature map. */ template <typename T> __device__ void bilinear_interpolate(const T* in_data, const int channels, const int width, const int height, int in_n, int in_c, T in_w, T in_h, T* val) { // Deal with cases that source coords are out of feature map boundary if (GT<T>(-0.5, in_w) || GT<T>(in_w, width - 0.5) || GT<T>(-0.5, in_h) || GT<T>(in_h, height - 0.5)) { val[0] = 0.0; return; } if (GT<T>(0, in_w)) { in_w = 0; } if (GT<T>(0, in_h)) { in_h = 0; } int in_w_floor = floor(in_w); int in_h_floor = floor(in_h); int in_w_ceil; int in_h_ceil; if (GT_E<T>(in_w_floor, width - 1)) { in_w_ceil = in_w_floor = width - 1; in_w = static_cast<T>(in_w_floor); } else { in_w_ceil = in_w_floor + 1; } if (GT_E<T>(in_h_floor, height - 1)) { in_h_ceil = in_h_floor = height - 1; in_h = static_cast<T>(in_h_floor); } else { in_h_ceil = in_h_floor + 1; } T w_floor = in_w - in_w_floor; T h_floor = in_h - in_h_floor; T w_ceil = 1 - w_floor; T h_ceil = 1 - h_floor; const T* data = in_data + (in_n * channels + in_c) * height * width; // Do bilinear interpolation T v1 = data[in_h_floor * width + in_w_floor]; T v2 = data[in_h_ceil * width + in_w_floor]; T v3 = data[in_h_ceil * width + in_w_ceil]; T v4 = data[in_h_floor * width + in_w_ceil]; T w1 = w_ceil * h_ceil; T w2 = w_ceil * h_floor; T w3 = w_floor * h_floor; T w4 = w_floor * h_ceil; val[0] = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4; } /** * Get the source coordinates in the input feature map. * * (u, v, w)^matrix = T * (out_w, out_h, 1)^matrix * * in_w = u / w * in_h = v / w * */ template <typename T> __device__ void get_source_coords(T matrix[], int out_w, int out_h, T* in_w, T* in_h) { T u = matrix[0] * out_w + matrix[1] * out_h + matrix[2]; T v = matrix[3] * out_w + matrix[4] * out_h + matrix[5]; T w = matrix[6] * out_w + matrix[7] * out_h + matrix[8]; in_w[0] = u / w; in_h[0] = v / w; } /** * Get the matrix of perspective transform. * * dx1 = x1 - x2 * dx2 = x3 - x2 * dx3 = x0 - x1 + x2 - x3 * dy1 = y1 - y2 * dy2 = y3 - y2 * dy3 = y0 - y1 + y2 - y3 * * a11 = (x1 - x0 + a31 * (w - 1) * x1) / (w - 1) * a12 = (x3 - x0 + a32 * (h - 1) * x3) / (h - 1) * a13 = x0 * a21 = (y1 - y0 + a31 * (w - 1) * y1) / (w - 1) * a22 = (y3 - y0 + a32 * (h - 1) * y3) / (h - 1) * a23 = y0 * a31 = (dx3 * dy2 - dx2 * dy3) / (dx1 * dy2 - dx2 * dy1) / (w - 1) * a32 = (dx1 * dy3 - dx3 * dy1) / (dx1 * dy2 - dx2 * dy1) / (h - 1) * a33 = 1 * */ template <typename T> __device__ void get_transform_matrix(const int transformed_width, const int transformed_height, T roi_x[], T roi_y[], T matrix[]) { T x0 = roi_x[0]; T x1 = roi_x[1]; T x2 = roi_x[2]; T x3 = roi_x[3]; T y0 = roi_y[0]; T y1 = roi_y[1]; T y2 = roi_y[2]; T y3 = roi_y[3]; // Estimate the height and width of RoI T len1 = sqrt((x0 - x1) * (x0 - x1) + (y0 - y1) * (y0 - y1)); T len2 = sqrt((x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2)); T len3 = sqrt((x2 - x3) * (x2 - x3) + (y2 - y3) * (y2 - y3)); T len4 = sqrt((x3 - x0) * (x3 - x0) + (y3 - y0) * (y3 - y0)); T estimated_height = (len2 + len4) / 2.0; T estimated_width = (len1 + len3) / 2.0; // Get the normalized height and normalized width int normalized_height = transformed_height; int normalized_width = round(estimated_width * (normalized_height - 1) / estimated_height) + 1; normalized_width = min(normalized_width, transformed_width); T dx1 = x1 - x2; T dx2 = x3 - x2; T dx3 = x0 - x1 + x2 - x3; T dy1 = y1 - y2; T dy2 = y3 - y2; T dy3 = y0 - y1 + y2 - y3; matrix[6] = (dx3 * dy2 - dx2 * dy3) / (dx1 * dy2 - dx2 * dy1) / (normalized_width - 1); matrix[7] = (dx1 * dy3 - dx3 * dy1) / (dx1 * dy2 - dx2 * dy1) / (normalized_height - 1); matrix[8] = 1; matrix[3] = (y1 - y0 + matrix[6] * (normalized_width - 1) * y1) / (normalized_width - 1); matrix[4] = (y3 - y0 + matrix[7] * (normalized_height - 1) * y3) / (normalized_height - 1); matrix[5] = y0; matrix[0] = (x1 - x0 + matrix[6] * (normalized_width - 1) * x1) / (normalized_width - 1); matrix[1] = (x3 - x0 + matrix[7] * (normalized_height - 1) * x3) / (normalized_height - 1); matrix[2] = x0; } template <typename T> __global__ void RoiTransformKernel(const float* input_data, const float* rois_data, const int* roi2image_data, int num_rois, int in_height, int in_width, int channels, int transformed_height, int transformed_width, float spatial_scale, T* output_data) { int output_size = num_rois * transformed_height * transformed_width * channels; CUDA_1D_KERNEL_LOOP(index, output_size) { // (n, c, out_h, out_w) is an element in the transformed output int out_w = idx4_4(index, num_rois, channels, transformed_height, transformed_width); int out_h = idx4_3(index, num_rois, channels, transformed_height, transformed_width); int c = idx4_2(index, num_rois, channels, transformed_height, transformed_width); int n = idx4_1(index, num_rois, channels, transformed_height, transformed_width); auto bottom_rois = rois_data + n * 8; int roi_batch_ind = bottom_rois[0]; T roi_x[4]; T roi_y[4]; for (int k = 0; k < 4; ++k) { roi_x[k] = bottom_rois[2 * k] * spatial_scale; roi_y[k] = bottom_rois[2 * k + 1] * spatial_scale; } // Get transform matrix T matrix[9]; get_transform_matrix<T>(transformed_width, transformed_height, roi_x, roi_y, matrix); // Get source coords T in_w; T in_h; get_source_coords<T>(matrix, out_w, out_h, &in_w, &in_h); if (in_quad<T>(in_w, in_h, roi_x, roi_y)) { if (GT<T>(-0.5, in_w) || GT<T>(in_w, static_cast<T>(in_width - 0.5)) || GT<T>(-0.5, in_h) || GT<T>(in_h, static_cast<T>(in_height - 0.5))) { // Skip if source coords is not in input image output_data[index] = 0.0; } else { // Perform bilinear interpolation int in_n = roi2image_data[n]; bilinear_interpolate<T>(input_data, channels, in_width, in_height, in_n, c, in_w, in_h, output_data + index); } } else { // Skip if source coords is not in quad output_data[index] = 0.0; } } } template <typename T> class CUDAROIPerspectiveTransformOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* in = ctx.Input<framework::Tensor>("X"); auto* rois = ctx.Input<framework::LoDTensor>("ROIs"); auto* out = ctx.Output<framework::Tensor>("Out"); auto transformed_height = ctx.Attr<int>("transformed_height"); auto transformed_width = ctx.Attr<int>("transformed_width"); auto spatial_scale = ctx.Attr<float>("spatial_scale"); auto in_dims = in->dims(); int batch_size = in_dims[0]; int channels = in_dims[1]; int in_height = in_dims[2]; int in_width = in_dims[3]; int rois_num = rois->dims()[0]; const T* input_data = in->data<T>(); T* output_data = out->mutable_data<T>(ctx.GetPlace()); const T* rois_data = rois->data<T>(); framework::Tensor roi2image; framework::Tensor roi2image_dev; roi2image.Resize({rois_num}); int* roi2image_data = roi2image.mutable_data<int>(platform::CPUPlace()); auto lod = rois->lod().back(); for (size_t i = 0; i < lod.size() - 1; ++i) { for (size_t j = lod[i]; j < lod[i + 1]; ++j) { roi2image_data[j] = i; } } TensorCopySync(roi2image, ctx.GetPlace(), &roi2image_dev); int out_size = rois_num * transformed_height * transformed_width * channels; auto stream = ctx.cuda_device_context().stream(); int block = 512; int grid = (out_size + block - 1) / block; hipLaunchKernelGGL(( RoiTransformKernel<T>), dim3(grid), dim3(block), 0, stream, input_data, rois_data, roi2image_dev.data<int>(), rois_num, in_height, in_width, channels, transformed_height, transformed_width, spatial_scale, output_data); } }; template <typename T> __device__ T get_feature_gradient(T xs, T ys, int w, int h, const int width, const int height) { if (GT<T>(-0.5, xs) || GT<T>(xs, width - 0.5) || GT<T>(-0.5, ys) || GT<T>(ys, height - 0.5)) { return 0; } if (GT<T>(0, xs)) { xs = 0; } if (GT<T>(0, ys)) { ys = 0; } int xs_floor = floor(xs); int ys_floor = floor(ys); int xs_ceil; int ys_ceil; if (GT_E<T>(xs_floor, width - 1)) { xs_ceil = xs_floor = width - 1; xs = static_cast<T>(xs_floor); } else { xs_ceil = xs_floor + 1; } if (GT_E(ys_floor, height - 1)) { ys_ceil = ys_floor = height - 1; ys = static_cast<T>(ys_floor); } else { ys_ceil = ys_floor + 1; } T weight = 0; if (w == xs_floor) { if (h == ys_floor) { weight = (w + 1 - xs) * (h + 1 - ys); } else if (h == ys_ceil) { weight = (w + 1 - xs) * (ys + 1 - h); } } else if (w == xs_ceil) { if (h == ys_floor) { weight = (xs + 1 - w) * (h + 1 - ys); } else if (h == ys_ceil) { weight = (xs + 1 - w) * (ys + 1 - h); } } return weight; } template <typename T> __global__ void RoiTransformGradKernel( const size_t* lod, const T* rois_data, int batch_size, int num_rois, int in_height, int in_width, int channels, int transformed_height, int transformed_width, float spatial_scale, const T* out_grad_data, T* in_grad_data) { int input_size = batch_size * in_height * in_width * channels; CUDA_1D_KERNEL_LOOP(index, input_size) { // (n, c, h, w) coords in input int in_w = idx4_4(index, batch_size, channels, in_height, in_width); int in_h = idx4_3(index, batch_size, channels, in_height, in_width); int c = idx4_2(index, batch_size, channels, in_height, in_width); int n = idx4_1(index, batch_size, channels, in_height, in_width); T gradient = 0.0; // Accumulate gradient over all RoIs that interpolated this element for (size_t roi_idx = lod[n]; roi_idx < lod[n + 1]; ++roi_idx) { const T* rois = rois_data + roi_idx * 8; T roi_x[4]; T roi_y[4]; for (int k = 0; k < 4; ++k) { roi_x[k] = rois[2 * k] * spatial_scale; roi_y[k] = rois[2 * k + 1] * spatial_scale; } // Get transform matrix T matrix[9]; get_transform_matrix<T>(transformed_width, transformed_height, roi_x, roi_y, matrix); const T* out_grad_ptr = out_grad_data + (roi_idx * channels + c) * transformed_height * transformed_width; for (int out_h = 0; out_h < transformed_height; ++out_h) { for (int out_w = 0; out_w < transformed_width; ++out_w) { T src_w; T src_h; get_source_coords<T>(matrix, out_w, out_h, &src_w, &src_h); if (in_quad<T>(src_w, src_h, roi_x, roi_y)) { if (GT<T>(-0.5, src_w) || GT<T>(src_w, static_cast<T>(in_width - 0.5)) || GT<T>(-0.5, src_h) || GT<T>(src_h, static_cast<T>(in_height - 0.5))) { continue; } T weight = get_feature_gradient<T>(src_w, src_h, in_w, in_h, in_width, in_height); gradient += out_grad_ptr[out_h * transformed_width + out_w] * weight; } } } } in_grad_data[index] = gradient; } } template <typename T> class CUDAROIPerspectiveTransformGradOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* in = ctx.Input<framework::Tensor>("X"); auto* rois = ctx.Input<framework::LoDTensor>("ROIs"); auto* out_grad = ctx.Input<framework::Tensor>(framework::GradVarName("Out")); auto* in_grad = ctx.Output<framework::Tensor>(framework::GradVarName("X")); auto transformed_height = ctx.Attr<int>("transformed_height"); auto transformed_width = ctx.Attr<int>("transformed_width"); auto spatial_scale = ctx.Attr<float>("spatial_scale"); auto in_dims = in->dims(); int batch_size = in_dims[0]; int channels = in_dims[1]; int in_height = in_dims[2]; int in_width = in_dims[3]; int rois_num = rois->dims()[0]; T* in_grad_data = in_grad->mutable_data<T>(ctx.GetPlace()); const T* out_grad_data = out_grad->data<T>(); const T* rois_data = rois->data<T>(); auto lod = rois->lod().back(); auto lod_data = lod.CUDAData(ctx.GetPlace()); int in_size = in->numel(); auto stream = ctx.cuda_device_context().stream(); int block = 512; int grid = (in_size + block - 1) / block; hipLaunchKernelGGL(( RoiTransformGradKernel<T>), dim3(grid), dim3(block), 0, stream, lod_data, rois_data, batch_size, rois_num, in_height, in_width, channels, transformed_height, transformed_width, spatial_scale, out_grad_data, in_grad_data); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(roi_perspective_transform, ops::CUDAROIPerspectiveTransformOpKernel<float>); REGISTER_OP_CUDA_KERNEL(roi_perspective_transform_grad, ops::CUDAROIPerspectiveTransformGradOpKernel<float>);
7b260443aae1f23d27d1a22d6e989fb63927bf91.cu
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <algorithm> #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/platform/cuda_primitives.h" #include "paddle/fluid/platform/float16.h" using paddle::platform::PADDLE_CUDA_NUM_THREADS; using paddle::platform::float16; namespace paddle { namespace operators { // CUDA: index helpers #define idx4_4(index, d1, d2, d3, d4) (index % d4) #define idx4_3(index, d1, d2, d3, d4) ((index / d4) % d3) #define idx4_2(index, d1, d2, d3, d4) ((index / d4 / d3) % d2) #define idx4_1(index, d1, d2, d3, d4) ((index / d4 / d3 / d2) % d1) #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ i += blockDim.x * gridDim.x) template <typename T> __device__ bool GT_E(T a, T b) { return (a > b) || fabs(a - b) < 1e-4; } template <typename T> __device__ bool LT_E(T a, T b) { return (a < b) || fabs(a - b) < 1e-4; } template <typename T> __device__ bool GT(T a, T b) { return (a - b) > 1e-4; } template <typename T> __device__ T max(T a, T b) { return a > b ? a : b; } template <typename T> __device__ T min(T a, T b) { return a < b ? a : b; } /* * check if (x, y) is in the boundary of roi */ template <typename T> __device__ bool in_quad(T x, T y, T roi_x[], T roi_y[]) { for (int i = 0; i < 4; i++) { T start_w = roi_x[i]; T start_h = roi_y[i]; T end_w = roi_x[(i + 1) % 4]; T end_h = roi_y[(i + 1) % 4]; if (fabs(start_h - end_h) < 1e-4) { if (fabs(y - start_h) < 1e-4 && fabs(y - end_h) < 1e-4 && GT_E<T>(x, min<T>(start_w, end_w)) && LT_E<T>(x, max<T>(start_w, end_w))) { return true; } } else { T intersec_x = (y - start_h) * (end_w - start_w) / (end_h - start_h) + start_w; if (fabs(intersec_x - x) < 1e-4 && GT_E(y, min<T>(start_h, end_h)) && LT_E<T>(y, max<T>(start_h, end_h))) { return true; } } } int n_cross = 0; for (int i = 0; i < 4; i++) { T start_w = roi_x[i]; T start_h = roi_y[i]; T end_w = roi_x[(i + 1) % 4]; T end_h = roi_y[(i + 1) % 4]; if (fabs(start_h - end_h) < 1e-4) { continue; } if (LT_E<T>(y, min<T>(start_h, end_h)) || GT<T>(y, max<T>(start_h, end_h))) { continue; } T intersec_x = (y - start_h) * (end_w - start_w) / (end_h - start_h) + start_w; if (fabs(intersec_x - x) < 1e-4) { return true; } if (GT<T>(intersec_x, x)) { n_cross++; } } return (n_cross % 2 == 1); } /** * Perform bilinear interpolation in the input feature map. */ template <typename T> __device__ void bilinear_interpolate(const T* in_data, const int channels, const int width, const int height, int in_n, int in_c, T in_w, T in_h, T* val) { // Deal with cases that source coords are out of feature map boundary if (GT<T>(-0.5, in_w) || GT<T>(in_w, width - 0.5) || GT<T>(-0.5, in_h) || GT<T>(in_h, height - 0.5)) { val[0] = 0.0; return; } if (GT<T>(0, in_w)) { in_w = 0; } if (GT<T>(0, in_h)) { in_h = 0; } int in_w_floor = floor(in_w); int in_h_floor = floor(in_h); int in_w_ceil; int in_h_ceil; if (GT_E<T>(in_w_floor, width - 1)) { in_w_ceil = in_w_floor = width - 1; in_w = static_cast<T>(in_w_floor); } else { in_w_ceil = in_w_floor + 1; } if (GT_E<T>(in_h_floor, height - 1)) { in_h_ceil = in_h_floor = height - 1; in_h = static_cast<T>(in_h_floor); } else { in_h_ceil = in_h_floor + 1; } T w_floor = in_w - in_w_floor; T h_floor = in_h - in_h_floor; T w_ceil = 1 - w_floor; T h_ceil = 1 - h_floor; const T* data = in_data + (in_n * channels + in_c) * height * width; // Do bilinear interpolation T v1 = data[in_h_floor * width + in_w_floor]; T v2 = data[in_h_ceil * width + in_w_floor]; T v3 = data[in_h_ceil * width + in_w_ceil]; T v4 = data[in_h_floor * width + in_w_ceil]; T w1 = w_ceil * h_ceil; T w2 = w_ceil * h_floor; T w3 = w_floor * h_floor; T w4 = w_floor * h_ceil; val[0] = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4; } /** * Get the source coordinates in the input feature map. * * (u, v, w)^matrix = T * (out_w, out_h, 1)^matrix * * in_w = u / w * in_h = v / w * */ template <typename T> __device__ void get_source_coords(T matrix[], int out_w, int out_h, T* in_w, T* in_h) { T u = matrix[0] * out_w + matrix[1] * out_h + matrix[2]; T v = matrix[3] * out_w + matrix[4] * out_h + matrix[5]; T w = matrix[6] * out_w + matrix[7] * out_h + matrix[8]; in_w[0] = u / w; in_h[0] = v / w; } /** * Get the matrix of perspective transform. * * dx1 = x1 - x2 * dx2 = x3 - x2 * dx3 = x0 - x1 + x2 - x3 * dy1 = y1 - y2 * dy2 = y3 - y2 * dy3 = y0 - y1 + y2 - y3 * * a11 = (x1 - x0 + a31 * (w - 1) * x1) / (w - 1) * a12 = (x3 - x0 + a32 * (h - 1) * x3) / (h - 1) * a13 = x0 * a21 = (y1 - y0 + a31 * (w - 1) * y1) / (w - 1) * a22 = (y3 - y0 + a32 * (h - 1) * y3) / (h - 1) * a23 = y0 * a31 = (dx3 * dy2 - dx2 * dy3) / (dx1 * dy2 - dx2 * dy1) / (w - 1) * a32 = (dx1 * dy3 - dx3 * dy1) / (dx1 * dy2 - dx2 * dy1) / (h - 1) * a33 = 1 * */ template <typename T> __device__ void get_transform_matrix(const int transformed_width, const int transformed_height, T roi_x[], T roi_y[], T matrix[]) { T x0 = roi_x[0]; T x1 = roi_x[1]; T x2 = roi_x[2]; T x3 = roi_x[3]; T y0 = roi_y[0]; T y1 = roi_y[1]; T y2 = roi_y[2]; T y3 = roi_y[3]; // Estimate the height and width of RoI T len1 = sqrt((x0 - x1) * (x0 - x1) + (y0 - y1) * (y0 - y1)); T len2 = sqrt((x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2)); T len3 = sqrt((x2 - x3) * (x2 - x3) + (y2 - y3) * (y2 - y3)); T len4 = sqrt((x3 - x0) * (x3 - x0) + (y3 - y0) * (y3 - y0)); T estimated_height = (len2 + len4) / 2.0; T estimated_width = (len1 + len3) / 2.0; // Get the normalized height and normalized width int normalized_height = transformed_height; int normalized_width = round(estimated_width * (normalized_height - 1) / estimated_height) + 1; normalized_width = min(normalized_width, transformed_width); T dx1 = x1 - x2; T dx2 = x3 - x2; T dx3 = x0 - x1 + x2 - x3; T dy1 = y1 - y2; T dy2 = y3 - y2; T dy3 = y0 - y1 + y2 - y3; matrix[6] = (dx3 * dy2 - dx2 * dy3) / (dx1 * dy2 - dx2 * dy1) / (normalized_width - 1); matrix[7] = (dx1 * dy3 - dx3 * dy1) / (dx1 * dy2 - dx2 * dy1) / (normalized_height - 1); matrix[8] = 1; matrix[3] = (y1 - y0 + matrix[6] * (normalized_width - 1) * y1) / (normalized_width - 1); matrix[4] = (y3 - y0 + matrix[7] * (normalized_height - 1) * y3) / (normalized_height - 1); matrix[5] = y0; matrix[0] = (x1 - x0 + matrix[6] * (normalized_width - 1) * x1) / (normalized_width - 1); matrix[1] = (x3 - x0 + matrix[7] * (normalized_height - 1) * x3) / (normalized_height - 1); matrix[2] = x0; } template <typename T> __global__ void RoiTransformKernel(const float* input_data, const float* rois_data, const int* roi2image_data, int num_rois, int in_height, int in_width, int channels, int transformed_height, int transformed_width, float spatial_scale, T* output_data) { int output_size = num_rois * transformed_height * transformed_width * channels; CUDA_1D_KERNEL_LOOP(index, output_size) { // (n, c, out_h, out_w) is an element in the transformed output int out_w = idx4_4(index, num_rois, channels, transformed_height, transformed_width); int out_h = idx4_3(index, num_rois, channels, transformed_height, transformed_width); int c = idx4_2(index, num_rois, channels, transformed_height, transformed_width); int n = idx4_1(index, num_rois, channels, transformed_height, transformed_width); auto bottom_rois = rois_data + n * 8; int roi_batch_ind = bottom_rois[0]; T roi_x[4]; T roi_y[4]; for (int k = 0; k < 4; ++k) { roi_x[k] = bottom_rois[2 * k] * spatial_scale; roi_y[k] = bottom_rois[2 * k + 1] * spatial_scale; } // Get transform matrix T matrix[9]; get_transform_matrix<T>(transformed_width, transformed_height, roi_x, roi_y, matrix); // Get source coords T in_w; T in_h; get_source_coords<T>(matrix, out_w, out_h, &in_w, &in_h); if (in_quad<T>(in_w, in_h, roi_x, roi_y)) { if (GT<T>(-0.5, in_w) || GT<T>(in_w, static_cast<T>(in_width - 0.5)) || GT<T>(-0.5, in_h) || GT<T>(in_h, static_cast<T>(in_height - 0.5))) { // Skip if source coords is not in input image output_data[index] = 0.0; } else { // Perform bilinear interpolation int in_n = roi2image_data[n]; bilinear_interpolate<T>(input_data, channels, in_width, in_height, in_n, c, in_w, in_h, output_data + index); } } else { // Skip if source coords is not in quad output_data[index] = 0.0; } } } template <typename T> class CUDAROIPerspectiveTransformOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* in = ctx.Input<framework::Tensor>("X"); auto* rois = ctx.Input<framework::LoDTensor>("ROIs"); auto* out = ctx.Output<framework::Tensor>("Out"); auto transformed_height = ctx.Attr<int>("transformed_height"); auto transformed_width = ctx.Attr<int>("transformed_width"); auto spatial_scale = ctx.Attr<float>("spatial_scale"); auto in_dims = in->dims(); int batch_size = in_dims[0]; int channels = in_dims[1]; int in_height = in_dims[2]; int in_width = in_dims[3]; int rois_num = rois->dims()[0]; const T* input_data = in->data<T>(); T* output_data = out->mutable_data<T>(ctx.GetPlace()); const T* rois_data = rois->data<T>(); framework::Tensor roi2image; framework::Tensor roi2image_dev; roi2image.Resize({rois_num}); int* roi2image_data = roi2image.mutable_data<int>(platform::CPUPlace()); auto lod = rois->lod().back(); for (size_t i = 0; i < lod.size() - 1; ++i) { for (size_t j = lod[i]; j < lod[i + 1]; ++j) { roi2image_data[j] = i; } } TensorCopySync(roi2image, ctx.GetPlace(), &roi2image_dev); int out_size = rois_num * transformed_height * transformed_width * channels; auto stream = ctx.cuda_device_context().stream(); int block = 512; int grid = (out_size + block - 1) / block; RoiTransformKernel<T><<<grid, block, 0, stream>>>( input_data, rois_data, roi2image_dev.data<int>(), rois_num, in_height, in_width, channels, transformed_height, transformed_width, spatial_scale, output_data); } }; template <typename T> __device__ T get_feature_gradient(T xs, T ys, int w, int h, const int width, const int height) { if (GT<T>(-0.5, xs) || GT<T>(xs, width - 0.5) || GT<T>(-0.5, ys) || GT<T>(ys, height - 0.5)) { return 0; } if (GT<T>(0, xs)) { xs = 0; } if (GT<T>(0, ys)) { ys = 0; } int xs_floor = floor(xs); int ys_floor = floor(ys); int xs_ceil; int ys_ceil; if (GT_E<T>(xs_floor, width - 1)) { xs_ceil = xs_floor = width - 1; xs = static_cast<T>(xs_floor); } else { xs_ceil = xs_floor + 1; } if (GT_E(ys_floor, height - 1)) { ys_ceil = ys_floor = height - 1; ys = static_cast<T>(ys_floor); } else { ys_ceil = ys_floor + 1; } T weight = 0; if (w == xs_floor) { if (h == ys_floor) { weight = (w + 1 - xs) * (h + 1 - ys); } else if (h == ys_ceil) { weight = (w + 1 - xs) * (ys + 1 - h); } } else if (w == xs_ceil) { if (h == ys_floor) { weight = (xs + 1 - w) * (h + 1 - ys); } else if (h == ys_ceil) { weight = (xs + 1 - w) * (ys + 1 - h); } } return weight; } template <typename T> __global__ void RoiTransformGradKernel( const size_t* lod, const T* rois_data, int batch_size, int num_rois, int in_height, int in_width, int channels, int transformed_height, int transformed_width, float spatial_scale, const T* out_grad_data, T* in_grad_data) { int input_size = batch_size * in_height * in_width * channels; CUDA_1D_KERNEL_LOOP(index, input_size) { // (n, c, h, w) coords in input int in_w = idx4_4(index, batch_size, channels, in_height, in_width); int in_h = idx4_3(index, batch_size, channels, in_height, in_width); int c = idx4_2(index, batch_size, channels, in_height, in_width); int n = idx4_1(index, batch_size, channels, in_height, in_width); T gradient = 0.0; // Accumulate gradient over all RoIs that interpolated this element for (size_t roi_idx = lod[n]; roi_idx < lod[n + 1]; ++roi_idx) { const T* rois = rois_data + roi_idx * 8; T roi_x[4]; T roi_y[4]; for (int k = 0; k < 4; ++k) { roi_x[k] = rois[2 * k] * spatial_scale; roi_y[k] = rois[2 * k + 1] * spatial_scale; } // Get transform matrix T matrix[9]; get_transform_matrix<T>(transformed_width, transformed_height, roi_x, roi_y, matrix); const T* out_grad_ptr = out_grad_data + (roi_idx * channels + c) * transformed_height * transformed_width; for (int out_h = 0; out_h < transformed_height; ++out_h) { for (int out_w = 0; out_w < transformed_width; ++out_w) { T src_w; T src_h; get_source_coords<T>(matrix, out_w, out_h, &src_w, &src_h); if (in_quad<T>(src_w, src_h, roi_x, roi_y)) { if (GT<T>(-0.5, src_w) || GT<T>(src_w, static_cast<T>(in_width - 0.5)) || GT<T>(-0.5, src_h) || GT<T>(src_h, static_cast<T>(in_height - 0.5))) { continue; } T weight = get_feature_gradient<T>(src_w, src_h, in_w, in_h, in_width, in_height); gradient += out_grad_ptr[out_h * transformed_width + out_w] * weight; } } } } in_grad_data[index] = gradient; } } template <typename T> class CUDAROIPerspectiveTransformGradOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* in = ctx.Input<framework::Tensor>("X"); auto* rois = ctx.Input<framework::LoDTensor>("ROIs"); auto* out_grad = ctx.Input<framework::Tensor>(framework::GradVarName("Out")); auto* in_grad = ctx.Output<framework::Tensor>(framework::GradVarName("X")); auto transformed_height = ctx.Attr<int>("transformed_height"); auto transformed_width = ctx.Attr<int>("transformed_width"); auto spatial_scale = ctx.Attr<float>("spatial_scale"); auto in_dims = in->dims(); int batch_size = in_dims[0]; int channels = in_dims[1]; int in_height = in_dims[2]; int in_width = in_dims[3]; int rois_num = rois->dims()[0]; T* in_grad_data = in_grad->mutable_data<T>(ctx.GetPlace()); const T* out_grad_data = out_grad->data<T>(); const T* rois_data = rois->data<T>(); auto lod = rois->lod().back(); auto lod_data = lod.CUDAData(ctx.GetPlace()); int in_size = in->numel(); auto stream = ctx.cuda_device_context().stream(); int block = 512; int grid = (in_size + block - 1) / block; RoiTransformGradKernel<T><<<grid, block, 0, stream>>>( lod_data, rois_data, batch_size, rois_num, in_height, in_width, channels, transformed_height, transformed_width, spatial_scale, out_grad_data, in_grad_data); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(roi_perspective_transform, ops::CUDAROIPerspectiveTransformOpKernel<float>); REGISTER_OP_CUDA_KERNEL(roi_perspective_transform_grad, ops::CUDAROIPerspectiveTransformGradOpKernel<float>);
5c97550d8f5fda1713fb0054f8b7576b99d2f4f2.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <vector> #include <iostream> #include <iomanip> #include <sstream> #include <string> typedef float real_t; // TEXTURE: SINGLE PRECISION ONLY! texture< real_t > texRef; __global__ void copyFromTexture( real_t *odata, size_t offset ) { const size_t i = blockIdx.x * blockDim.x + threadIdx.x ; odata[ i ] = tex1Dfetch( texRef, i + offset); } template < typename T > T strTo( const char* str ) { if( !str ) throw std::runtime_error( "strToReal - NULL srting"); std::istringstream is( str ); T v = T(); is >> v; return v; } const double GB = 1024 * 1024 * 1024; double GBs( size_t numElements, double tms ) { return ( numElements * sizeof( real_t ) / GB ) / ( tms / 1000 ); } //a.exe 4194304 0 16 1 16 128 csv int main(int argc, char** argv ) { size_t NUM_ELEMENTS = 1024; size_t OFFSET_MIN = 0; size_t OFFSET_MAX = 0; size_t BLOCK_SIZE = 128; bool CSV = false; if( argc < 2 || argc > 6 ) { std::cout << "usage: " << argv[ 0 ] << " <num elements> <offset> <stride> <block size> [csv]\n"; std::cout << " using default: num elements= " << NUM_ELEMENTS << " offset= " << OFFSET_MIN << ',' << OFFSET_MAX << " block size= " << BLOCK_SIZE << std::endl; } else { NUM_ELEMENTS = strTo< size_t >( argv[ 1 ] ); OFFSET_MIN = strTo< size_t >( argv[ 2 ] ); OFFSET_MAX = strTo< size_t >( argv[ 3 ] ); BLOCK_SIZE = strTo< int >( argv[ 4 ] ); if( argc == 6 ) { CSV = std::string( argv[ 5 ] ) == "csv"; } } const dim3 BLOCKS( ( NUM_ELEMENTS + BLOCK_SIZE - 1 ) / BLOCK_SIZE ); const dim3 THREADS_PER_BLOCK( BLOCK_SIZE ); const size_t TOTAL_ELEMENTS = NUM_ELEMENTS + OFFSET_MAX; const size_t SIZE = TOTAL_ELEMENTS * sizeof( real_t ); std::vector< real_t > h_data( TOTAL_ELEMENTS, 1.f ); // allocate array and copy image data hipChannelFormatDesc channelDesc = hipCreateChannelDesc( 32, 0, 0, 0, hipChannelFormatKindFloat ); hipArray* cu_array = 0; hipMallocArray( &cu_array, &channelDesc, TOTAL_ELEMENTS, 1 ); hipMemcpyToArray( cu_array, 0, 0, &h_data[ 0 ], SIZE, hipMemcpyHostToDevice); // set texture parameters texRef.addressMode[0] = hipAddressModeClamp; texRef.filterMode = hipFilterModeLinear; texRef.normalized = false; // access with normalized texture coordinates // Bind the array to the texture hipBindTextureToArray( texRef, cu_array, channelDesc ); real_t* dev_out = 0; hipMalloc( &dev_out, SIZE ); hipEvent_t start = hipEvent_t(); hipEvent_t stop = hipEvent_t(); hipEventCreate( &start ); hipEventCreate( &stop ); for( size_t i = OFFSET_MIN; i <= OFFSET_MAX; ++i ) { float elapsed = 0.f; hipEventRecord( start, 0 ); hipLaunchKernelGGL(( copyFromTexture), dim3(BLOCKS), dim3(THREADS_PER_BLOCK) , 0, 0, dev_out, i ); hipEventRecord( stop, 0); hipEventSynchronize( stop ); hipEventElapsedTime( &elapsed, start, stop ); if( CSV ) { std::cout << NUM_ELEMENTS << ',' << i << ',' << BLOCK_SIZE << ',' << elapsed << ',' << GBs( NUM_ELEMENTS, elapsed ) << std::endl; } else { std::cout << "elapsed time (ms): " << elapsed << std::endl; } } hipFree( dev_out ); hipEventDestroy( start ); hipEventDestroy( stop ); return 0; } /* RESULTS: ------------------------------------------------ Penryn - Window 7 64bit - GTX 285 - CUDA 4.0 RC1 ------------------------------------------------ C:\tmp>a.exe 65520 0 16 512 csv 65520,0,512,0.024448,9.98368 65520,1,512,0.01328,18.3796 65520,2,512,0.01312,18.6037 65520,3,512,0.013152,18.5585 65520,4,512,0.013408,18.2041 65520,5,512,0.012704,19.2129 65520,6,512,0.01296,18.8334 65520,7,512,0.013184,18.5134 65520,8,512,0.012864,18.974 65520,9,512,0.013408,18.2041 65520,10,512,0.015008,16.2634 65520,11,512,0.01328,18.3796 65520,12,512,0.013184,18.5134 65520,13,512,0.012896,18.9269 65520,14,512,0.013056,18.6949 65520,15,512,0.01472,16.5816 65520,16,512,0.012512,19.5078 */
5c97550d8f5fda1713fb0054f8b7576b99d2f4f2.cu
#include <cuda_runtime.h> #include <vector> #include <iostream> #include <iomanip> #include <sstream> #include <string> typedef float real_t; // TEXTURE: SINGLE PRECISION ONLY! texture< real_t > texRef; __global__ void copyFromTexture( real_t *odata, size_t offset ) { const size_t i = blockIdx.x * blockDim.x + threadIdx.x ; odata[ i ] = tex1Dfetch( texRef, i + offset); } template < typename T > T strTo( const char* str ) { if( !str ) throw std::runtime_error( "strToReal - NULL srting"); std::istringstream is( str ); T v = T(); is >> v; return v; } const double GB = 1024 * 1024 * 1024; double GBs( size_t numElements, double tms ) { return ( numElements * sizeof( real_t ) / GB ) / ( tms / 1000 ); } //a.exe 4194304 0 16 1 16 128 csv int main(int argc, char** argv ) { size_t NUM_ELEMENTS = 1024; size_t OFFSET_MIN = 0; size_t OFFSET_MAX = 0; size_t BLOCK_SIZE = 128; bool CSV = false; if( argc < 2 || argc > 6 ) { std::cout << "usage: " << argv[ 0 ] << " <num elements> <offset> <stride> <block size> [csv]\n"; std::cout << " using default: num elements= " << NUM_ELEMENTS << " offset= " << OFFSET_MIN << ',' << OFFSET_MAX << " block size= " << BLOCK_SIZE << std::endl; } else { NUM_ELEMENTS = strTo< size_t >( argv[ 1 ] ); OFFSET_MIN = strTo< size_t >( argv[ 2 ] ); OFFSET_MAX = strTo< size_t >( argv[ 3 ] ); BLOCK_SIZE = strTo< int >( argv[ 4 ] ); if( argc == 6 ) { CSV = std::string( argv[ 5 ] ) == "csv"; } } const dim3 BLOCKS( ( NUM_ELEMENTS + BLOCK_SIZE - 1 ) / BLOCK_SIZE ); const dim3 THREADS_PER_BLOCK( BLOCK_SIZE ); const size_t TOTAL_ELEMENTS = NUM_ELEMENTS + OFFSET_MAX; const size_t SIZE = TOTAL_ELEMENTS * sizeof( real_t ); std::vector< real_t > h_data( TOTAL_ELEMENTS, 1.f ); // allocate array and copy image data cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc( 32, 0, 0, 0, cudaChannelFormatKindFloat ); cudaArray* cu_array = 0; cudaMallocArray( &cu_array, &channelDesc, TOTAL_ELEMENTS, 1 ); cudaMemcpyToArray( cu_array, 0, 0, &h_data[ 0 ], SIZE, cudaMemcpyHostToDevice); // set texture parameters texRef.addressMode[0] = cudaAddressModeClamp; texRef.filterMode = cudaFilterModeLinear; texRef.normalized = false; // access with normalized texture coordinates // Bind the array to the texture cudaBindTextureToArray( texRef, cu_array, channelDesc ); real_t* dev_out = 0; cudaMalloc( &dev_out, SIZE ); cudaEvent_t start = cudaEvent_t(); cudaEvent_t stop = cudaEvent_t(); cudaEventCreate( &start ); cudaEventCreate( &stop ); for( size_t i = OFFSET_MIN; i <= OFFSET_MAX; ++i ) { float elapsed = 0.f; cudaEventRecord( start, 0 ); copyFromTexture<<< BLOCKS, THREADS_PER_BLOCK >>>( dev_out, i ); cudaEventRecord( stop, 0); cudaEventSynchronize( stop ); cudaEventElapsedTime( &elapsed, start, stop ); if( CSV ) { std::cout << NUM_ELEMENTS << ',' << i << ',' << BLOCK_SIZE << ',' << elapsed << ',' << GBs( NUM_ELEMENTS, elapsed ) << std::endl; } else { std::cout << "elapsed time (ms): " << elapsed << std::endl; } } cudaFree( dev_out ); cudaEventDestroy( start ); cudaEventDestroy( stop ); return 0; } /* RESULTS: ------------------------------------------------ Penryn - Window 7 64bit - GTX 285 - CUDA 4.0 RC1 ------------------------------------------------ C:\tmp>a.exe 65520 0 16 512 csv 65520,0,512,0.024448,9.98368 65520,1,512,0.01328,18.3796 65520,2,512,0.01312,18.6037 65520,3,512,0.013152,18.5585 65520,4,512,0.013408,18.2041 65520,5,512,0.012704,19.2129 65520,6,512,0.01296,18.8334 65520,7,512,0.013184,18.5134 65520,8,512,0.012864,18.974 65520,9,512,0.013408,18.2041 65520,10,512,0.015008,16.2634 65520,11,512,0.01328,18.3796 65520,12,512,0.013184,18.5134 65520,13,512,0.012896,18.9269 65520,14,512,0.013056,18.6949 65520,15,512,0.01472,16.5816 65520,16,512,0.012512,19.5078 */
063a1e7927b3b37d3cd112916f449a328487fbdb.hip
// !!! This is a file automatically generated by hipify!!! #include <chrono> #include <hip/hip_runtime.h> #include <fstream> #include <iostream> #include <stdint.h> #include <stdio.h> #define TILE_SIZE 32 #define STAGE_SIZE 16 #define HANDLE_ERROR(status) \ { \ if (status != hipSuccess) \ { \ printf("%s failed at line %d \nError message: %s \n", \ __FILE__, __LINE__ ,hipGetErrorString(status)); \ exit(EXIT_FAILURE); \ } \ } __global__ void WakeGpuKernel(int reps) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= reps) return; } __global__ void CalculateKIterLeadBlock(uint32_t *graph, uint32_t n, uint32_t blockedIter) { const int locI = threadIdx.y; const int locJ = threadIdx.x; const int glI = TILE_SIZE * blockedIter + locI; const int glJ = TILE_SIZE * blockedIter + locJ; if (glI >= n || glJ >= n || glI >= TILE_SIZE * (blockedIter + 1) || glI < TILE_SIZE * blockedIter || glJ >= TILE_SIZE * (blockedIter + 1) || glJ < TILE_SIZE * blockedIter) { return; } __shared__ uint32_t leadBlock[TILE_SIZE * TILE_SIZE]; leadBlock[locI * TILE_SIZE + locJ] = graph[glI * n + glJ]; __syncthreads(); #pragma unroll for (size_t locIter = 0; locIter < TILE_SIZE; ++locIter) { uint32_t newPathLen = leadBlock[locI * TILE_SIZE + locIter] + leadBlock[locIter * TILE_SIZE + locJ]; if (newPathLen < leadBlock[locI * TILE_SIZE + locJ]) { leadBlock[locI * TILE_SIZE + locJ] = newPathLen; } __syncthreads(); } graph[glI * n + glJ] = leadBlock[locI * TILE_SIZE + locJ]; } __global__ void CalculateKIterLeadRowAndColumn(uint32_t *graph, uint32_t n, uint32_t blockedIter) { if (threadIdx.y * TILE_SIZE + threadIdx.x > TILE_SIZE * TILE_SIZE || blockIdx.x == blockedIter) { return; } int blockPosI, blockPosJ; if (blockIdx.y == 0) { // This is lead row blockPosI = blockedIter * TILE_SIZE; blockPosJ = blockIdx.x * TILE_SIZE; } else { // This is lead column blockPosI = blockIdx.x * TILE_SIZE; blockPosJ = blockedIter * TILE_SIZE; } int locI = threadIdx.y; int locJ = threadIdx.x; int glI = blockPosI + threadIdx.y; int glJ = blockPosJ + threadIdx.x; __shared__ uint32_t leadBlock[TILE_SIZE * STAGE_SIZE]; __shared__ uint32_t curBlock[TILE_SIZE * TILE_SIZE]; curBlock[locI * TILE_SIZE + locJ] = graph[glI * n + glJ]; __syncthreads(); size_t leadBlockOffset = blockedIter * TILE_SIZE; if (blockIdx.y == 0) { // This is lead row #pragma unroll for (size_t stage = 0; stage < TILE_SIZE / STAGE_SIZE; ++stage) { if (locI / STAGE_SIZE == stage) { leadBlock[locJ * STAGE_SIZE + (locI % STAGE_SIZE)] = graph[(leadBlockOffset + locJ) * n + leadBlockOffset + locI]; } __syncthreads(); #pragma unroll for (size_t locIter = 0; locIter < STAGE_SIZE; ++locIter) { uint32_t newPathLen = curBlock[(stage * STAGE_SIZE + locIter) * TILE_SIZE + locJ] + leadBlock[locI * STAGE_SIZE + locIter]; if (newPathLen < curBlock[locI * TILE_SIZE + locJ]) { curBlock[locI * TILE_SIZE + locJ] = newPathLen; } __syncthreads(); } } } else { // This is lead column #pragma unroll for (size_t stage = 0; stage < TILE_SIZE / STAGE_SIZE; ++stage) { if (locI / STAGE_SIZE == stage) { leadBlock[(locI % STAGE_SIZE) * TILE_SIZE + locJ] = graph[(leadBlockOffset + locI) * n + leadBlockOffset + locJ]; } __syncthreads(); #pragma unroll for (size_t locIter = 0; locIter < STAGE_SIZE; ++locIter) { uint32_t newPathLen = curBlock[locI * TILE_SIZE + stage * STAGE_SIZE + locIter] + leadBlock[locIter * TILE_SIZE + locJ]; if (newPathLen < curBlock[locI * TILE_SIZE + locJ]) { curBlock[locI * TILE_SIZE + locJ] = newPathLen; } __syncthreads(); } } } graph[glI * n + glJ] = curBlock[locI * TILE_SIZE + locJ]; } __global__ void CalculateKIterRestLeadBlocks(uint32_t *graph, uint32_t n, uint32_t blockedIter) { if (blockIdx.x == blockedIter || (blockIdx.y == 1 && blockIdx.x == blockedIter - 1)) { return; } __shared__ uint32_t leadRow[TILE_SIZE * STAGE_SIZE]; __shared__ uint32_t leadCol[TILE_SIZE * STAGE_SIZE]; uint32_t curBlockElem; int blockPosI, blockPosJ; if (blockIdx.y == 0) { // this is k-row blockPosI = (blockedIter - 1) * TILE_SIZE; blockPosJ = blockIdx.x * TILE_SIZE; } else { blockPosI = blockIdx.x * TILE_SIZE; blockPosJ = (blockedIter - 1) * TILE_SIZE; // this is k-column } int locI = threadIdx.y; int locJ = threadIdx.x; int glI = blockPosI + threadIdx.y; int glJ = blockPosJ + threadIdx.x; curBlockElem = graph[glI * n + glJ]; __syncthreads(); #pragma unroll for (int stage = 0; stage < TILE_SIZE / STAGE_SIZE; ++stage) { size_t leadBlocksOffset = blockedIter * TILE_SIZE; if (locI / STAGE_SIZE == stage) { leadRow[(locI % STAGE_SIZE) * TILE_SIZE + locJ] = graph[(leadBlocksOffset + locI) * n + (blockPosJ + locJ)]; leadCol[locJ * STAGE_SIZE + (locI % STAGE_SIZE)] = graph[(blockPosI + locJ) * n + (leadBlocksOffset + locI)]; } __syncthreads(); #pragma unroll for (int locIter = 0; locIter < STAGE_SIZE; ++locIter) { uint32_t newPathLen = leadCol[locI * STAGE_SIZE + locIter] + leadRow[locIter * TILE_SIZE + locJ]; if (newPathLen < curBlockElem) { curBlockElem = newPathLen; } } __syncthreads(); } graph[glI * n + glJ] = curBlockElem; } __global__ void CalculateK1IterLeadBlock(uint32_t *graph, uint32_t n, uint32_t blockedIter) { __shared__ uint32_t sharedMatrix[TILE_SIZE * TILE_SIZE]; uint32_t curBlockElem; int blockPosI = (blockedIter + 1) * TILE_SIZE; int blockPosJ = (blockedIter + 1) * TILE_SIZE; int locI = threadIdx.y; int locJ = threadIdx.x; int glI = blockPosI + threadIdx.y; int glJ = blockPosJ + threadIdx.x; // if (glI >= n || glJ >= n || // glI >= TILE_SIZE * (blockedIter + 1) || glI < TILE_SIZE * blockedIter || // glJ >= TILE_SIZE * (blockedIter + 1) || glJ < TILE_SIZE * blockedIter) // { // return; // } curBlockElem = graph[glI * n + glJ]; __syncthreads(); #pragma unroll for (int stage = 0; stage < TILE_SIZE / STAGE_SIZE; ++stage) { size_t leadBlocksOffset = blockedIter * TILE_SIZE; if (locI / STAGE_SIZE == stage) { sharedMatrix [(locI % STAGE_SIZE) * TILE_SIZE + locJ] = graph[(leadBlocksOffset + locI) * n + (blockPosJ + locJ)]; sharedMatrix[TILE_SIZE * STAGE_SIZE + locJ * STAGE_SIZE + (locI % STAGE_SIZE)] = graph[(blockPosI + locJ) * n + (leadBlocksOffset + locI)]; } __syncthreads(); #pragma unroll for (int locIter = 0; locIter < STAGE_SIZE; ++locIter) { uint32_t newPathLen = sharedMatrix[TILE_SIZE * STAGE_SIZE + locI * STAGE_SIZE + locIter] + sharedMatrix [locIter * TILE_SIZE + locJ]; if (newPathLen < curBlockElem) { curBlockElem = newPathLen; } } __syncthreads(); } // Now leadCol will be used as leadBlock sharedMatrix[locI * TILE_SIZE + locJ] = curBlockElem; __syncthreads(); #pragma unroll for (size_t locIter = 0; locIter < TILE_SIZE; ++locIter) { uint32_t newPathLen = sharedMatrix[locI * TILE_SIZE + locIter] + sharedMatrix[locIter * TILE_SIZE + locJ]; if (newPathLen < sharedMatrix[locI * TILE_SIZE + locJ]) { sharedMatrix[locI * TILE_SIZE + locJ] = newPathLen; } __syncthreads(); } graph[glI * n + glJ] = sharedMatrix[locI * TILE_SIZE + locJ]; } __global__ void CalculateK1IterRowAndColumn(uint32_t *graph, uint32_t n, uint32_t blockedIter) { __shared__ uint32_t leadRow[TILE_SIZE * STAGE_SIZE]; __shared__ uint32_t leadCol[TILE_SIZE * TILE_SIZE]; uint32_t curBlockElem; // if (threadIdx.y * TILE_SIZE + threadIdx.x > TILE_SIZE * TILE_SIZE // || blockIdx.x == blockedIter + 1) // { // return; // } int blockPosI, blockPosJ; if (blockIdx.y == 0) { // This is k+1 row blockPosI = (blockedIter + 1) * TILE_SIZE; blockPosJ = blockIdx.x * TILE_SIZE; } else { // This is k+1 column blockPosI = blockIdx.x * TILE_SIZE; blockPosJ = (blockedIter + 1)* TILE_SIZE; } int locI = threadIdx.y; int locJ = threadIdx.x; int glI = blockPosI + threadIdx.y; int glJ = blockPosJ + threadIdx.x; curBlockElem = graph[glI * n + glJ]; __syncthreads(); size_t leadBlockOffset = blockedIter * TILE_SIZE; #pragma unroll for (int stage = 0; stage < TILE_SIZE / STAGE_SIZE; ++stage) { if (locI / STAGE_SIZE == stage) { leadRow[(locI % STAGE_SIZE) * TILE_SIZE + locJ] = graph[(leadBlockOffset + locI) * n + (blockPosJ + locJ)]; leadCol[locJ * STAGE_SIZE + (locI % STAGE_SIZE)] = graph[(blockPosI + locJ) * n + (leadBlockOffset + locI)]; } __syncthreads(); #pragma unroll for (int locIter = 0; locIter < STAGE_SIZE; ++locIter) { uint32_t newPathLen = leadCol[locI * STAGE_SIZE + locIter] + leadRow[locIter * TILE_SIZE + locJ]; if (newPathLen < curBlockElem) { curBlockElem = newPathLen; } } __syncthreads(); } // now leadRow will be used as leadBlock, and leadColumn as curBlock leadCol[locI * TILE_SIZE + locJ] = curBlockElem; __syncthreads(); leadBlockOffset = (blockedIter + 1) * TILE_SIZE; if (blockIdx.y == 0) { // This is lead row #pragma unroll for (size_t stage = 0; stage < TILE_SIZE / STAGE_SIZE; ++stage) { if (locI / STAGE_SIZE == stage) { leadRow[locJ * STAGE_SIZE + (locI % STAGE_SIZE)] = graph[(leadBlockOffset + locJ) * n + leadBlockOffset + locI]; } __syncthreads(); #pragma unroll for (size_t locIter = 0; locIter < STAGE_SIZE; ++locIter) { uint32_t newPathLen = leadCol[(stage * STAGE_SIZE + locIter) * TILE_SIZE + locJ] + leadRow[locI * STAGE_SIZE + locIter]; if (newPathLen < leadCol[locI * TILE_SIZE + locJ]) { leadCol[locI * TILE_SIZE + locJ] = newPathLen; } __syncthreads(); } } } else { // This is lead column #pragma unroll for (size_t stage = 0; stage < TILE_SIZE / STAGE_SIZE; ++stage) { if (locI / STAGE_SIZE == stage) { leadRow[(locI % STAGE_SIZE) * TILE_SIZE + locJ] = graph[(leadBlockOffset + locI) * n + leadBlockOffset + locJ]; } __syncthreads(); #pragma unroll for (size_t locIter = 0; locIter < STAGE_SIZE; ++locIter) { uint32_t newPathLen = leadCol[locI * TILE_SIZE + stage * STAGE_SIZE + locIter] + leadRow[locIter * TILE_SIZE + locJ]; if (newPathLen < leadCol[locI * TILE_SIZE + locJ]) { leadCol[locI * TILE_SIZE + locJ] = newPathLen; } __syncthreads(); } } } graph[glI * n + glJ] = leadCol[locI * TILE_SIZE + locJ]; } __global__ void CalculateRestBlocks(uint32_t *graph, uint32_t n, uint32_t blockedIter) { __shared__ uint32_t leadRow[TILE_SIZE * STAGE_SIZE]; __shared__ uint32_t leadCol[TILE_SIZE * STAGE_SIZE]; uint32_t curBlockElem; if (blockIdx.x == blockedIter || blockIdx.y == blockedIter || blockIdx.x == blockedIter + 1 || blockIdx.y == blockedIter + 1) { return; } int blockPosI = blockIdx.y * TILE_SIZE; int blockPosJ = blockIdx.x * TILE_SIZE; int locI = threadIdx.y; int locJ = threadIdx.x; int glI = blockPosI + threadIdx.y; int glJ = blockPosJ + threadIdx.x; curBlockElem = graph[glI * n + glJ]; __syncthreads(); #pragma unroll for (int stage = 0; stage < TILE_SIZE / STAGE_SIZE; ++stage) { size_t leadBlocksOffset = blockedIter * TILE_SIZE; if (locI / STAGE_SIZE == stage) { leadRow[(locI % STAGE_SIZE) * TILE_SIZE + locJ] = graph[(leadBlocksOffset + locI) * n + (blockPosJ + locJ)]; leadCol[locJ * STAGE_SIZE + (locI % STAGE_SIZE)] = graph[(blockPosI + locJ) * n + (leadBlocksOffset + locI)]; } __syncthreads(); #pragma unroll for (int locIter = 0; locIter < STAGE_SIZE; ++locIter) { uint32_t newPathLen = leadCol[locI * STAGE_SIZE + locIter] + leadRow[locIter * TILE_SIZE + locJ]; if (newPathLen < curBlockElem) { curBlockElem = newPathLen; } } __syncthreads(); } #pragma unroll for (int stage = 0; stage < TILE_SIZE / STAGE_SIZE; ++stage) { size_t leadBlocksOffset = (blockedIter + 1) * TILE_SIZE; if (locI / STAGE_SIZE == stage) { leadRow[(locI % STAGE_SIZE) * TILE_SIZE + locJ] = graph[(leadBlocksOffset + locI) * n + (blockPosJ + locJ)]; leadCol[locJ * STAGE_SIZE + (locI % STAGE_SIZE)] = graph[(blockPosI + locJ) * n + (leadBlocksOffset + locI)]; } __syncthreads(); #pragma unroll for (int locIter = 0; locIter < STAGE_SIZE; ++locIter) { uint32_t newPathLen = leadCol[locI * STAGE_SIZE + locIter] + leadRow[locIter * TILE_SIZE + locJ]; if (newPathLen < curBlockElem) { curBlockElem = newPathLen; } } __syncthreads(); } graph[glI * n + glJ] = curBlockElem; } __host__ void FloydBlocked(uint32_t *h_graph, uint32_t *h_floydResult, uint32_t n) { // Copy graph to device global memory auto start = std::chrono::steady_clock::now(); uint32_t *d_graph; hipMalloc(&d_graph, sizeof(uint32_t) * n * n); hipMemcpy(d_graph, h_graph, sizeof(uint32_t) * n * n, hipMemcpyHostToDevice); dim3 firstStepGridSize(1, 1, 1); dim3 firstStepBlockSize(TILE_SIZE, TILE_SIZE, 1); dim3 secondStepGridSize((n - 1) / TILE_SIZE + 1, 2, 1); dim3 secondStepBlockSize(TILE_SIZE, TILE_SIZE, 1); dim3 thirdStepGridSize((n - 1)/ TILE_SIZE + 1, (n - 1)/ TILE_SIZE + 1, 1); dim3 thirdStepBlockSize(TILE_SIZE, TILE_SIZE, 1); hipError_t cudaStatus; hipEvent_t stepFinishedEvent; hipEventCreate(&stepFinishedEvent); for (int blockedIteration = 0; blockedIteration < n / TILE_SIZE; blockedIteration += 2) { // K Block + Row + Column - only k iterations hipLaunchKernelGGL(( CalculateKIterLeadBlock), dim3(firstStepGridSize), dim3(firstStepBlockSize), 0, 0, d_graph, n, blockedIteration); cudaStatus = hipGetLastError(); HANDLE_ERROR(cudaStatus); hipEventRecord(stepFinishedEvent); hipEventSynchronize(stepFinishedEvent); hipLaunchKernelGGL(( CalculateKIterLeadRowAndColumn), dim3(secondStepGridSize), dim3(secondStepBlockSize), 0, 0, d_graph, n, blockedIteration); cudaStatus = hipGetLastError(); HANDLE_ERROR(cudaStatus); hipEventRecord(stepFinishedEvent); hipEventSynchronize(stepFinishedEvent); // K + 1 Block + Row + Column - k+1 iterations hipLaunchKernelGGL(( CalculateK1IterLeadBlock), dim3(firstStepGridSize), dim3(firstStepBlockSize), 0, 0, d_graph, n, blockedIteration); cudaStatus = hipGetLastError(); HANDLE_ERROR(cudaStatus); hipEventRecord(stepFinishedEvent); hipEventSynchronize(stepFinishedEvent); hipLaunchKernelGGL(( CalculateK1IterRowAndColumn), dim3(secondStepGridSize), dim3(secondStepBlockSize), 0, 0, d_graph, n, blockedIteration); cudaStatus = hipGetLastError(); HANDLE_ERROR(cudaStatus); hipEventRecord(stepFinishedEvent); hipEventSynchronize(stepFinishedEvent); // K Block + Row + Column - k iterations (as 2-dependent blocks) hipLaunchKernelGGL(( CalculateKIterRestLeadBlocks), dim3(secondStepGridSize), dim3(secondStepBlockSize), 0, 0, d_graph, n, blockedIteration + 1); cudaStatus = hipGetLastError(); HANDLE_ERROR(cudaStatus); hipEventRecord(stepFinishedEvent); hipEventSynchronize(stepFinishedEvent); // Calculate all other blocks hipLaunchKernelGGL(( CalculateRestBlocks), dim3(thirdStepGridSize), dim3(thirdStepBlockSize), 0, 0, d_graph, n, blockedIteration); cudaStatus = hipGetLastError(); HANDLE_ERROR(cudaStatus); hipEventRecord(stepFinishedEvent); hipEventSynchronize(stepFinishedEvent); } cudaStatus = hipGetLastError(); HANDLE_ERROR(cudaStatus); // Copy results to host hipMemcpy(h_floydResult, d_graph, sizeof(int) * n * n, hipMemcpyDeviceToHost); // Calculate all time used by cuda, and print it to console auto duration = std::chrono::duration_cast<std::chrono::milliseconds> (std::chrono::steady_clock::now() - start); std::cout << n << " " << duration.count() << std::endl; hipFree(d_graph); } __host__ int main(int argc, char **argv) { if (argc < 3) { std::cout << "usage: " << argv[0] << " graph_path results_path" << std::endl; return 1; } // Read vertex count and all graph uint32_t n; std::fstream graph_reader(argv[1], std::fstream::in | std::fstream::binary); graph_reader.read((char*)&n, 4); if (n % (TILE_SIZE * 2) != 0) { std::cout << "Number of vertex shoud be divided by tile size (just for easier implementation). " << "Tile size: " << TILE_SIZE << ". Vertex's count: " << n << "." << std::endl; graph_reader.close(); return 1; } uint32_t *h_graph = new uint32_t[n * n]; uint32_t *h_floydResult = new uint32_t[n * n]; for (size_t i = 0; i < n * n; ++i) { uint8_t current_elem; graph_reader.read((char *)&current_elem, 1); h_graph[i] = current_elem; } graph_reader.close(); // Run empty task on cuda - it will decrease time of first run int threadNum = ::min(n, uint32_t(32)); dim3 gridSize(n / threadNum + 1, n / threadNum + 1, 1); dim3 cudaBlockSize(threadNum, threadNum, 1); hipLaunchKernelGGL(( WakeGpuKernel), dim3(1), dim3(cudaBlockSize), 0, 0, 32); // Blocked Floyd-Warshall algorithm on cuda FloydBlocked(h_graph, h_floydResult, n); // Write Floyd results to file std::fstream result_writer(argv[2], std::fstream::out | std::fstream::binary); for (size_t i = 0; i < n * n; ++i) { result_writer.write((char*)&h_floydResult[i], 4); } result_writer.close(); delete[] h_graph; delete[] h_floydResult; return 0; }
063a1e7927b3b37d3cd112916f449a328487fbdb.cu
#include <chrono> #include <cuda.h> #include <fstream> #include <iostream> #include <stdint.h> #include <stdio.h> #define TILE_SIZE 32 #define STAGE_SIZE 16 #define HANDLE_ERROR(status) \ { \ if (status != cudaSuccess) \ { \ printf("%s failed at line %d \nError message: %s \n", \ __FILE__, __LINE__ ,cudaGetErrorString(status)); \ exit(EXIT_FAILURE); \ } \ } __global__ void WakeGpuKernel(int reps) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= reps) return; } __global__ void CalculateKIterLeadBlock(uint32_t *graph, uint32_t n, uint32_t blockedIter) { const int locI = threadIdx.y; const int locJ = threadIdx.x; const int glI = TILE_SIZE * blockedIter + locI; const int glJ = TILE_SIZE * blockedIter + locJ; if (glI >= n || glJ >= n || glI >= TILE_SIZE * (blockedIter + 1) || glI < TILE_SIZE * blockedIter || glJ >= TILE_SIZE * (blockedIter + 1) || glJ < TILE_SIZE * blockedIter) { return; } __shared__ uint32_t leadBlock[TILE_SIZE * TILE_SIZE]; leadBlock[locI * TILE_SIZE + locJ] = graph[glI * n + glJ]; __syncthreads(); #pragma unroll for (size_t locIter = 0; locIter < TILE_SIZE; ++locIter) { uint32_t newPathLen = leadBlock[locI * TILE_SIZE + locIter] + leadBlock[locIter * TILE_SIZE + locJ]; if (newPathLen < leadBlock[locI * TILE_SIZE + locJ]) { leadBlock[locI * TILE_SIZE + locJ] = newPathLen; } __syncthreads(); } graph[glI * n + glJ] = leadBlock[locI * TILE_SIZE + locJ]; } __global__ void CalculateKIterLeadRowAndColumn(uint32_t *graph, uint32_t n, uint32_t blockedIter) { if (threadIdx.y * TILE_SIZE + threadIdx.x > TILE_SIZE * TILE_SIZE || blockIdx.x == blockedIter) { return; } int blockPosI, blockPosJ; if (blockIdx.y == 0) { // This is lead row blockPosI = blockedIter * TILE_SIZE; blockPosJ = blockIdx.x * TILE_SIZE; } else { // This is lead column blockPosI = blockIdx.x * TILE_SIZE; blockPosJ = blockedIter * TILE_SIZE; } int locI = threadIdx.y; int locJ = threadIdx.x; int glI = blockPosI + threadIdx.y; int glJ = blockPosJ + threadIdx.x; __shared__ uint32_t leadBlock[TILE_SIZE * STAGE_SIZE]; __shared__ uint32_t curBlock[TILE_SIZE * TILE_SIZE]; curBlock[locI * TILE_SIZE + locJ] = graph[glI * n + glJ]; __syncthreads(); size_t leadBlockOffset = blockedIter * TILE_SIZE; if (blockIdx.y == 0) { // This is lead row #pragma unroll for (size_t stage = 0; stage < TILE_SIZE / STAGE_SIZE; ++stage) { if (locI / STAGE_SIZE == stage) { leadBlock[locJ * STAGE_SIZE + (locI % STAGE_SIZE)] = graph[(leadBlockOffset + locJ) * n + leadBlockOffset + locI]; } __syncthreads(); #pragma unroll for (size_t locIter = 0; locIter < STAGE_SIZE; ++locIter) { uint32_t newPathLen = curBlock[(stage * STAGE_SIZE + locIter) * TILE_SIZE + locJ] + leadBlock[locI * STAGE_SIZE + locIter]; if (newPathLen < curBlock[locI * TILE_SIZE + locJ]) { curBlock[locI * TILE_SIZE + locJ] = newPathLen; } __syncthreads(); } } } else { // This is lead column #pragma unroll for (size_t stage = 0; stage < TILE_SIZE / STAGE_SIZE; ++stage) { if (locI / STAGE_SIZE == stage) { leadBlock[(locI % STAGE_SIZE) * TILE_SIZE + locJ] = graph[(leadBlockOffset + locI) * n + leadBlockOffset + locJ]; } __syncthreads(); #pragma unroll for (size_t locIter = 0; locIter < STAGE_SIZE; ++locIter) { uint32_t newPathLen = curBlock[locI * TILE_SIZE + stage * STAGE_SIZE + locIter] + leadBlock[locIter * TILE_SIZE + locJ]; if (newPathLen < curBlock[locI * TILE_SIZE + locJ]) { curBlock[locI * TILE_SIZE + locJ] = newPathLen; } __syncthreads(); } } } graph[glI * n + glJ] = curBlock[locI * TILE_SIZE + locJ]; } __global__ void CalculateKIterRestLeadBlocks(uint32_t *graph, uint32_t n, uint32_t blockedIter) { if (blockIdx.x == blockedIter || (blockIdx.y == 1 && blockIdx.x == blockedIter - 1)) { return; } __shared__ uint32_t leadRow[TILE_SIZE * STAGE_SIZE]; __shared__ uint32_t leadCol[TILE_SIZE * STAGE_SIZE]; uint32_t curBlockElem; int blockPosI, blockPosJ; if (blockIdx.y == 0) { // this is k-row blockPosI = (blockedIter - 1) * TILE_SIZE; blockPosJ = blockIdx.x * TILE_SIZE; } else { blockPosI = blockIdx.x * TILE_SIZE; blockPosJ = (blockedIter - 1) * TILE_SIZE; // this is k-column } int locI = threadIdx.y; int locJ = threadIdx.x; int glI = blockPosI + threadIdx.y; int glJ = blockPosJ + threadIdx.x; curBlockElem = graph[glI * n + glJ]; __syncthreads(); #pragma unroll for (int stage = 0; stage < TILE_SIZE / STAGE_SIZE; ++stage) { size_t leadBlocksOffset = blockedIter * TILE_SIZE; if (locI / STAGE_SIZE == stage) { leadRow[(locI % STAGE_SIZE) * TILE_SIZE + locJ] = graph[(leadBlocksOffset + locI) * n + (blockPosJ + locJ)]; leadCol[locJ * STAGE_SIZE + (locI % STAGE_SIZE)] = graph[(blockPosI + locJ) * n + (leadBlocksOffset + locI)]; } __syncthreads(); #pragma unroll for (int locIter = 0; locIter < STAGE_SIZE; ++locIter) { uint32_t newPathLen = leadCol[locI * STAGE_SIZE + locIter] + leadRow[locIter * TILE_SIZE + locJ]; if (newPathLen < curBlockElem) { curBlockElem = newPathLen; } } __syncthreads(); } graph[glI * n + glJ] = curBlockElem; } __global__ void CalculateK1IterLeadBlock(uint32_t *graph, uint32_t n, uint32_t blockedIter) { __shared__ uint32_t sharedMatrix[TILE_SIZE * TILE_SIZE]; uint32_t curBlockElem; int blockPosI = (blockedIter + 1) * TILE_SIZE; int blockPosJ = (blockedIter + 1) * TILE_SIZE; int locI = threadIdx.y; int locJ = threadIdx.x; int glI = blockPosI + threadIdx.y; int glJ = blockPosJ + threadIdx.x; // if (glI >= n || glJ >= n || // glI >= TILE_SIZE * (blockedIter + 1) || glI < TILE_SIZE * blockedIter || // glJ >= TILE_SIZE * (blockedIter + 1) || glJ < TILE_SIZE * blockedIter) // { // return; // } curBlockElem = graph[glI * n + glJ]; __syncthreads(); #pragma unroll for (int stage = 0; stage < TILE_SIZE / STAGE_SIZE; ++stage) { size_t leadBlocksOffset = blockedIter * TILE_SIZE; if (locI / STAGE_SIZE == stage) { sharedMatrix [(locI % STAGE_SIZE) * TILE_SIZE + locJ] = graph[(leadBlocksOffset + locI) * n + (blockPosJ + locJ)]; sharedMatrix[TILE_SIZE * STAGE_SIZE + locJ * STAGE_SIZE + (locI % STAGE_SIZE)] = graph[(blockPosI + locJ) * n + (leadBlocksOffset + locI)]; } __syncthreads(); #pragma unroll for (int locIter = 0; locIter < STAGE_SIZE; ++locIter) { uint32_t newPathLen = sharedMatrix[TILE_SIZE * STAGE_SIZE + locI * STAGE_SIZE + locIter] + sharedMatrix [locIter * TILE_SIZE + locJ]; if (newPathLen < curBlockElem) { curBlockElem = newPathLen; } } __syncthreads(); } // Now leadCol will be used as leadBlock sharedMatrix[locI * TILE_SIZE + locJ] = curBlockElem; __syncthreads(); #pragma unroll for (size_t locIter = 0; locIter < TILE_SIZE; ++locIter) { uint32_t newPathLen = sharedMatrix[locI * TILE_SIZE + locIter] + sharedMatrix[locIter * TILE_SIZE + locJ]; if (newPathLen < sharedMatrix[locI * TILE_SIZE + locJ]) { sharedMatrix[locI * TILE_SIZE + locJ] = newPathLen; } __syncthreads(); } graph[glI * n + glJ] = sharedMatrix[locI * TILE_SIZE + locJ]; } __global__ void CalculateK1IterRowAndColumn(uint32_t *graph, uint32_t n, uint32_t blockedIter) { __shared__ uint32_t leadRow[TILE_SIZE * STAGE_SIZE]; __shared__ uint32_t leadCol[TILE_SIZE * TILE_SIZE]; uint32_t curBlockElem; // if (threadIdx.y * TILE_SIZE + threadIdx.x > TILE_SIZE * TILE_SIZE // || blockIdx.x == blockedIter + 1) // { // return; // } int blockPosI, blockPosJ; if (blockIdx.y == 0) { // This is k+1 row blockPosI = (blockedIter + 1) * TILE_SIZE; blockPosJ = blockIdx.x * TILE_SIZE; } else { // This is k+1 column blockPosI = blockIdx.x * TILE_SIZE; blockPosJ = (blockedIter + 1)* TILE_SIZE; } int locI = threadIdx.y; int locJ = threadIdx.x; int glI = blockPosI + threadIdx.y; int glJ = blockPosJ + threadIdx.x; curBlockElem = graph[glI * n + glJ]; __syncthreads(); size_t leadBlockOffset = blockedIter * TILE_SIZE; #pragma unroll for (int stage = 0; stage < TILE_SIZE / STAGE_SIZE; ++stage) { if (locI / STAGE_SIZE == stage) { leadRow[(locI % STAGE_SIZE) * TILE_SIZE + locJ] = graph[(leadBlockOffset + locI) * n + (blockPosJ + locJ)]; leadCol[locJ * STAGE_SIZE + (locI % STAGE_SIZE)] = graph[(blockPosI + locJ) * n + (leadBlockOffset + locI)]; } __syncthreads(); #pragma unroll for (int locIter = 0; locIter < STAGE_SIZE; ++locIter) { uint32_t newPathLen = leadCol[locI * STAGE_SIZE + locIter] + leadRow[locIter * TILE_SIZE + locJ]; if (newPathLen < curBlockElem) { curBlockElem = newPathLen; } } __syncthreads(); } // now leadRow will be used as leadBlock, and leadColumn as curBlock leadCol[locI * TILE_SIZE + locJ] = curBlockElem; __syncthreads(); leadBlockOffset = (blockedIter + 1) * TILE_SIZE; if (blockIdx.y == 0) { // This is lead row #pragma unroll for (size_t stage = 0; stage < TILE_SIZE / STAGE_SIZE; ++stage) { if (locI / STAGE_SIZE == stage) { leadRow[locJ * STAGE_SIZE + (locI % STAGE_SIZE)] = graph[(leadBlockOffset + locJ) * n + leadBlockOffset + locI]; } __syncthreads(); #pragma unroll for (size_t locIter = 0; locIter < STAGE_SIZE; ++locIter) { uint32_t newPathLen = leadCol[(stage * STAGE_SIZE + locIter) * TILE_SIZE + locJ] + leadRow[locI * STAGE_SIZE + locIter]; if (newPathLen < leadCol[locI * TILE_SIZE + locJ]) { leadCol[locI * TILE_SIZE + locJ] = newPathLen; } __syncthreads(); } } } else { // This is lead column #pragma unroll for (size_t stage = 0; stage < TILE_SIZE / STAGE_SIZE; ++stage) { if (locI / STAGE_SIZE == stage) { leadRow[(locI % STAGE_SIZE) * TILE_SIZE + locJ] = graph[(leadBlockOffset + locI) * n + leadBlockOffset + locJ]; } __syncthreads(); #pragma unroll for (size_t locIter = 0; locIter < STAGE_SIZE; ++locIter) { uint32_t newPathLen = leadCol[locI * TILE_SIZE + stage * STAGE_SIZE + locIter] + leadRow[locIter * TILE_SIZE + locJ]; if (newPathLen < leadCol[locI * TILE_SIZE + locJ]) { leadCol[locI * TILE_SIZE + locJ] = newPathLen; } __syncthreads(); } } } graph[glI * n + glJ] = leadCol[locI * TILE_SIZE + locJ]; } __global__ void CalculateRestBlocks(uint32_t *graph, uint32_t n, uint32_t blockedIter) { __shared__ uint32_t leadRow[TILE_SIZE * STAGE_SIZE]; __shared__ uint32_t leadCol[TILE_SIZE * STAGE_SIZE]; uint32_t curBlockElem; if (blockIdx.x == blockedIter || blockIdx.y == blockedIter || blockIdx.x == blockedIter + 1 || blockIdx.y == blockedIter + 1) { return; } int blockPosI = blockIdx.y * TILE_SIZE; int blockPosJ = blockIdx.x * TILE_SIZE; int locI = threadIdx.y; int locJ = threadIdx.x; int glI = blockPosI + threadIdx.y; int glJ = blockPosJ + threadIdx.x; curBlockElem = graph[glI * n + glJ]; __syncthreads(); #pragma unroll for (int stage = 0; stage < TILE_SIZE / STAGE_SIZE; ++stage) { size_t leadBlocksOffset = blockedIter * TILE_SIZE; if (locI / STAGE_SIZE == stage) { leadRow[(locI % STAGE_SIZE) * TILE_SIZE + locJ] = graph[(leadBlocksOffset + locI) * n + (blockPosJ + locJ)]; leadCol[locJ * STAGE_SIZE + (locI % STAGE_SIZE)] = graph[(blockPosI + locJ) * n + (leadBlocksOffset + locI)]; } __syncthreads(); #pragma unroll for (int locIter = 0; locIter < STAGE_SIZE; ++locIter) { uint32_t newPathLen = leadCol[locI * STAGE_SIZE + locIter] + leadRow[locIter * TILE_SIZE + locJ]; if (newPathLen < curBlockElem) { curBlockElem = newPathLen; } } __syncthreads(); } #pragma unroll for (int stage = 0; stage < TILE_SIZE / STAGE_SIZE; ++stage) { size_t leadBlocksOffset = (blockedIter + 1) * TILE_SIZE; if (locI / STAGE_SIZE == stage) { leadRow[(locI % STAGE_SIZE) * TILE_SIZE + locJ] = graph[(leadBlocksOffset + locI) * n + (blockPosJ + locJ)]; leadCol[locJ * STAGE_SIZE + (locI % STAGE_SIZE)] = graph[(blockPosI + locJ) * n + (leadBlocksOffset + locI)]; } __syncthreads(); #pragma unroll for (int locIter = 0; locIter < STAGE_SIZE; ++locIter) { uint32_t newPathLen = leadCol[locI * STAGE_SIZE + locIter] + leadRow[locIter * TILE_SIZE + locJ]; if (newPathLen < curBlockElem) { curBlockElem = newPathLen; } } __syncthreads(); } graph[glI * n + glJ] = curBlockElem; } __host__ void FloydBlocked(uint32_t *h_graph, uint32_t *h_floydResult, uint32_t n) { // Copy graph to device global memory auto start = std::chrono::steady_clock::now(); uint32_t *d_graph; cudaMalloc(&d_graph, sizeof(uint32_t) * n * n); cudaMemcpy(d_graph, h_graph, sizeof(uint32_t) * n * n, cudaMemcpyHostToDevice); dim3 firstStepGridSize(1, 1, 1); dim3 firstStepBlockSize(TILE_SIZE, TILE_SIZE, 1); dim3 secondStepGridSize((n - 1) / TILE_SIZE + 1, 2, 1); dim3 secondStepBlockSize(TILE_SIZE, TILE_SIZE, 1); dim3 thirdStepGridSize((n - 1)/ TILE_SIZE + 1, (n - 1)/ TILE_SIZE + 1, 1); dim3 thirdStepBlockSize(TILE_SIZE, TILE_SIZE, 1); cudaError_t cudaStatus; cudaEvent_t stepFinishedEvent; cudaEventCreate(&stepFinishedEvent); for (int blockedIteration = 0; blockedIteration < n / TILE_SIZE; blockedIteration += 2) { // K Block + Row + Column - only k iterations CalculateKIterLeadBlock<<<firstStepGridSize, firstStepBlockSize>>> (d_graph, n, blockedIteration); cudaStatus = cudaGetLastError(); HANDLE_ERROR(cudaStatus); cudaEventRecord(stepFinishedEvent); cudaEventSynchronize(stepFinishedEvent); CalculateKIterLeadRowAndColumn<<<secondStepGridSize, secondStepBlockSize>>> (d_graph, n, blockedIteration); cudaStatus = cudaGetLastError(); HANDLE_ERROR(cudaStatus); cudaEventRecord(stepFinishedEvent); cudaEventSynchronize(stepFinishedEvent); // K + 1 Block + Row + Column - k+1 iterations CalculateK1IterLeadBlock<<<firstStepGridSize, firstStepBlockSize>>> (d_graph, n, blockedIteration); cudaStatus = cudaGetLastError(); HANDLE_ERROR(cudaStatus); cudaEventRecord(stepFinishedEvent); cudaEventSynchronize(stepFinishedEvent); CalculateK1IterRowAndColumn<<<secondStepGridSize, secondStepBlockSize>>> (d_graph, n, blockedIteration); cudaStatus = cudaGetLastError(); HANDLE_ERROR(cudaStatus); cudaEventRecord(stepFinishedEvent); cudaEventSynchronize(stepFinishedEvent); // K Block + Row + Column - k iterations (as 2-dependent blocks) CalculateKIterRestLeadBlocks<<<secondStepGridSize, secondStepBlockSize>>> (d_graph, n, blockedIteration + 1); cudaStatus = cudaGetLastError(); HANDLE_ERROR(cudaStatus); cudaEventRecord(stepFinishedEvent); cudaEventSynchronize(stepFinishedEvent); // Calculate all other blocks CalculateRestBlocks<<<thirdStepGridSize, thirdStepBlockSize>>> (d_graph, n, blockedIteration); cudaStatus = cudaGetLastError(); HANDLE_ERROR(cudaStatus); cudaEventRecord(stepFinishedEvent); cudaEventSynchronize(stepFinishedEvent); } cudaStatus = cudaGetLastError(); HANDLE_ERROR(cudaStatus); // Copy results to host cudaMemcpy(h_floydResult, d_graph, sizeof(int) * n * n, cudaMemcpyDeviceToHost); // Calculate all time used by cuda, and print it to console auto duration = std::chrono::duration_cast<std::chrono::milliseconds> (std::chrono::steady_clock::now() - start); std::cout << n << " " << duration.count() << std::endl; cudaFree(d_graph); } __host__ int main(int argc, char **argv) { if (argc < 3) { std::cout << "usage: " << argv[0] << " graph_path results_path" << std::endl; return 1; } // Read vertex count and all graph uint32_t n; std::fstream graph_reader(argv[1], std::fstream::in | std::fstream::binary); graph_reader.read((char*)&n, 4); if (n % (TILE_SIZE * 2) != 0) { std::cout << "Number of vertex shoud be divided by tile size (just for easier implementation). " << "Tile size: " << TILE_SIZE << ". Vertex's count: " << n << "." << std::endl; graph_reader.close(); return 1; } uint32_t *h_graph = new uint32_t[n * n]; uint32_t *h_floydResult = new uint32_t[n * n]; for (size_t i = 0; i < n * n; ++i) { uint8_t current_elem; graph_reader.read((char *)&current_elem, 1); h_graph[i] = current_elem; } graph_reader.close(); // Run empty task on cuda - it will decrease time of first run int threadNum = std::min(n, uint32_t(32)); dim3 gridSize(n / threadNum + 1, n / threadNum + 1, 1); dim3 cudaBlockSize(threadNum, threadNum, 1); WakeGpuKernel<<<1, cudaBlockSize>>>(32); // Blocked Floyd-Warshall algorithm on cuda FloydBlocked(h_graph, h_floydResult, n); // Write Floyd results to file std::fstream result_writer(argv[2], std::fstream::out | std::fstream::binary); for (size_t i = 0; i < n * n; ++i) { result_writer.write((char*)&h_floydResult[i], 4); } result_writer.close(); delete[] h_graph; delete[] h_floydResult; return 0; }
d51bb34e65524012e14cb68a8a96c5fd3aa2165f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define tw 2 __global__ void matadd(int *a, int *b, int *c, int n){ int ix = tw*blockIdx.x +threadIdx.x; int iy = tw*blockIdx.y + threadIdx.y; int idx = iy*n+ix; if(idx<n*n) c[idx]=a[idx]+b[idx]; } int main(void) { int n; scanf("%d",&n); int a[n][n]; int b[n][n]; int c[n][n]; for(int i=0; i<n; i++){ for(int j=0; j<n; j++){ scanf("%d",&a[i][j]); } } for(int i=0; i<n; i++){ for(int j=0; j<n; j++){ scanf("%d",&b[i][j]); } } int *a_d, *b_d, *c_d; hipMalloc((void **)&a_d, n*n*sizeof(int)); hipMalloc((void **)&b_d, n*n*sizeof(int)); hipMalloc((void **)&c_d, n*n*sizeof(int)); hipMemcpy(a_d, a, n*n*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(b_d, b, n*n*sizeof(int), hipMemcpyHostToDevice); dim3 dimGrid(n/2,n/2,1); dim3 dimBlock(tw,tw,1); hipLaunchKernelGGL(( matadd), dim3(dimGrid),dim3(dimBlock), 0, 0, a_d,b_d,c_d,n); hipMemcpy(c,c_d,n*n*sizeof(int),hipMemcpyDeviceToHost); for(int i=0; i<n; i++) printf("%d ",&c[i]); printf("\n"); return 0; }
d51bb34e65524012e14cb68a8a96c5fd3aa2165f.cu
#include <stdio.h> #define tw 2 __global__ void matadd(int *a, int *b, int *c, int n){ int ix = tw*blockIdx.x +threadIdx.x; int iy = tw*blockIdx.y + threadIdx.y; int idx = iy*n+ix; if(idx<n*n) c[idx]=a[idx]+b[idx]; } int main(void) { int n; scanf("%d",&n); int a[n][n]; int b[n][n]; int c[n][n]; for(int i=0; i<n; i++){ for(int j=0; j<n; j++){ scanf("%d",&a[i][j]); } } for(int i=0; i<n; i++){ for(int j=0; j<n; j++){ scanf("%d",&b[i][j]); } } int *a_d, *b_d, *c_d; cudaMalloc((void **)&a_d, n*n*sizeof(int)); cudaMalloc((void **)&b_d, n*n*sizeof(int)); cudaMalloc((void **)&c_d, n*n*sizeof(int)); cudaMemcpy(a_d, a, n*n*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(b_d, b, n*n*sizeof(int), cudaMemcpyHostToDevice); dim3 dimGrid(n/2,n/2,1); dim3 dimBlock(tw,tw,1); matadd<<<dimGrid,dimBlock>>>(a_d,b_d,c_d,n); cudaMemcpy(c,c_d,n*n*sizeof(int),cudaMemcpyDeviceToHost); for(int i=0; i<n; i++) printf("%d ",&c[i]); printf("\n"); return 0; }
5e2cc9eb113baa4a5909a55b367d105507d259a4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" #define real float // Bilinear sampling is done in BHWD (coalescing is not obvious in BDHW) // we assume BHWD format in inputImages // we assume BHW(YX) format on grids __device__ bool between(int value, int lowerBound, int upperBound) { return (value >= lowerBound && value <= upperBound); } __device__ void getTopLeft(float x, int width, int& point, float& weight) { /* for interpolation : stores in point and weight : - the x-coordinate of the pixel on the left (or y-coordinate of the upper pixel) - the weight for interpolating */ float xcoord = (x + 1) * (width - 1) / 2; point = floor(xcoord); weight = 1 - (xcoord - point); } __global__ void bilinearSamplingFromGrid(float* inputImages_data, int inputImages_strideBatch, int inputImages_strideChannels, int inputImages_strideHeight, int inputImages_strideWidth, float* grids_data, int grids_strideBatch, int grids_strideYX, int grids_strideHeight, int grids_strideWidth, float* output_data, int output_strideBatch, int output_strideChannels, int output_strideHeight, int output_strideWidth, int inputImages_channels, int inputImages_height, int inputImages_width, int output_width) { // each (32,16) block 16 output pixels (for coalescing the grid read) // x,y = coordinates (xOut = blockIdx.x*16+blockDim.y+threadIdx.y) // z = batch index // threadIdx.x : used for features (coalescing is trivial) const int xOut = blockIdx.x*blockDim.y+threadIdx.y; const bool withinImageBounds = xOut < output_width; const bool withinGridBounds = blockIdx.x*blockDim.y + threadIdx.x / 2 < output_width; const int yOut = blockIdx.y; const int width = inputImages_width; const int height = inputImages_height; const int b = blockIdx.z; float yf,xf; __shared__ float gridData[32]; if (threadIdx.y==0 && withinGridBounds) { gridData[threadIdx.x] = grids_data[b*grids_strideBatch + yOut*grids_strideHeight + xOut*grids_strideWidth + threadIdx.x]; } __syncthreads(); if(!withinImageBounds) return; yf = gridData[threadIdx.y*2]; xf = gridData[threadIdx.y*2+1]; int yInTopLeft, xInTopLeft; float yWeightTopLeft, xWeightTopLeft; getTopLeft(xf, inputImages_width, xInTopLeft, xWeightTopLeft); getTopLeft(yf, inputImages_height, yInTopLeft, yWeightTopLeft); const int outAddress = output_strideBatch * b + output_strideHeight * yOut + output_strideWidth * xOut; const int inTopLeftAddress = inputImages_strideBatch * b + inputImages_strideHeight * yInTopLeft + inputImages_strideWidth * xInTopLeft; const int inTopRightAddress = inTopLeftAddress + inputImages_strideWidth; const int inBottomLeftAddress = inTopLeftAddress + inputImages_strideHeight; const int inBottomRightAddress = inBottomLeftAddress + inputImages_strideWidth; float v=0; float inTopLeft=0; float inTopRight=0; float inBottomLeft=0; float inBottomRight=0; bool topLeftIsIn = between(xInTopLeft, 0, width-1) && between(yInTopLeft, 0, height-1); bool topRightIsIn = between(xInTopLeft+1, 0, width-1) && between(yInTopLeft, 0, height-1); bool bottomLeftIsIn = between(xInTopLeft, 0, width-1) && between(yInTopLeft+1, 0, height-1); bool bottomRightIsIn = between(xInTopLeft+1, 0, width-1) && between(yInTopLeft+1, 0, height-1); // interpolation happens here for(int t=threadIdx.x; t<inputImages_channels; t+= blockDim.x) { if(topLeftIsIn) inTopLeft = inputImages_data[inTopLeftAddress + t]; if(topRightIsIn) inTopRight = inputImages_data[inTopRightAddress + t]; if(bottomLeftIsIn) inBottomLeft = inputImages_data[inBottomLeftAddress + t]; if(bottomRightIsIn) inBottomRight = inputImages_data[inBottomRightAddress + t]; v = xWeightTopLeft * yWeightTopLeft * inTopLeft + (1 - xWeightTopLeft) * yWeightTopLeft * inTopRight + xWeightTopLeft * (1 - yWeightTopLeft) * inBottomLeft + (1 - xWeightTopLeft) * (1 - yWeightTopLeft) * inBottomRight; output_data[outAddress + t] = v; } }
5e2cc9eb113baa4a5909a55b367d105507d259a4.cu
#include "includes.h" #define real float // Bilinear sampling is done in BHWD (coalescing is not obvious in BDHW) // we assume BHWD format in inputImages // we assume BHW(YX) format on grids __device__ bool between(int value, int lowerBound, int upperBound) { return (value >= lowerBound && value <= upperBound); } __device__ void getTopLeft(float x, int width, int& point, float& weight) { /* for interpolation : stores in point and weight : - the x-coordinate of the pixel on the left (or y-coordinate of the upper pixel) - the weight for interpolating */ float xcoord = (x + 1) * (width - 1) / 2; point = floor(xcoord); weight = 1 - (xcoord - point); } __global__ void bilinearSamplingFromGrid(float* inputImages_data, int inputImages_strideBatch, int inputImages_strideChannels, int inputImages_strideHeight, int inputImages_strideWidth, float* grids_data, int grids_strideBatch, int grids_strideYX, int grids_strideHeight, int grids_strideWidth, float* output_data, int output_strideBatch, int output_strideChannels, int output_strideHeight, int output_strideWidth, int inputImages_channels, int inputImages_height, int inputImages_width, int output_width) { // each (32,16) block 16 output pixels (for coalescing the grid read) // x,y = coordinates (xOut = blockIdx.x*16+blockDim.y+threadIdx.y) // z = batch index // threadIdx.x : used for features (coalescing is trivial) const int xOut = blockIdx.x*blockDim.y+threadIdx.y; const bool withinImageBounds = xOut < output_width; const bool withinGridBounds = blockIdx.x*blockDim.y + threadIdx.x / 2 < output_width; const int yOut = blockIdx.y; const int width = inputImages_width; const int height = inputImages_height; const int b = blockIdx.z; float yf,xf; __shared__ float gridData[32]; if (threadIdx.y==0 && withinGridBounds) { gridData[threadIdx.x] = grids_data[b*grids_strideBatch + yOut*grids_strideHeight + xOut*grids_strideWidth + threadIdx.x]; } __syncthreads(); if(!withinImageBounds) return; yf = gridData[threadIdx.y*2]; xf = gridData[threadIdx.y*2+1]; int yInTopLeft, xInTopLeft; float yWeightTopLeft, xWeightTopLeft; getTopLeft(xf, inputImages_width, xInTopLeft, xWeightTopLeft); getTopLeft(yf, inputImages_height, yInTopLeft, yWeightTopLeft); const int outAddress = output_strideBatch * b + output_strideHeight * yOut + output_strideWidth * xOut; const int inTopLeftAddress = inputImages_strideBatch * b + inputImages_strideHeight * yInTopLeft + inputImages_strideWidth * xInTopLeft; const int inTopRightAddress = inTopLeftAddress + inputImages_strideWidth; const int inBottomLeftAddress = inTopLeftAddress + inputImages_strideHeight; const int inBottomRightAddress = inBottomLeftAddress + inputImages_strideWidth; float v=0; float inTopLeft=0; float inTopRight=0; float inBottomLeft=0; float inBottomRight=0; bool topLeftIsIn = between(xInTopLeft, 0, width-1) && between(yInTopLeft, 0, height-1); bool topRightIsIn = between(xInTopLeft+1, 0, width-1) && between(yInTopLeft, 0, height-1); bool bottomLeftIsIn = between(xInTopLeft, 0, width-1) && between(yInTopLeft+1, 0, height-1); bool bottomRightIsIn = between(xInTopLeft+1, 0, width-1) && between(yInTopLeft+1, 0, height-1); // interpolation happens here for(int t=threadIdx.x; t<inputImages_channels; t+= blockDim.x) { if(topLeftIsIn) inTopLeft = inputImages_data[inTopLeftAddress + t]; if(topRightIsIn) inTopRight = inputImages_data[inTopRightAddress + t]; if(bottomLeftIsIn) inBottomLeft = inputImages_data[inBottomLeftAddress + t]; if(bottomRightIsIn) inBottomRight = inputImages_data[inBottomRightAddress + t]; v = xWeightTopLeft * yWeightTopLeft * inTopLeft + (1 - xWeightTopLeft) * yWeightTopLeft * inTopRight + xWeightTopLeft * (1 - yWeightTopLeft) * inBottomLeft + (1 - xWeightTopLeft) * (1 - yWeightTopLeft) * inBottomRight; output_data[outAddress + t] = v; } }
b3cc481f38a3d13f4a124df0f21a05404b6aea8f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Created by jing on 2018/7/1. // // add vscode sup #include "cuckoo.h" #include <assert.h> #include <device_launch_parameters.h> #include "api.h" // Supported operations #define ADD (0) #define DELETE (1) #define SEARCH (2) #define debug_num 30 #define single_BUCKET 15629 /// hash table __constant__ cuckoo table; #define get_table_length(i) get_table_bucket_length(i) #define get_table_bucket_length(i) (table.Lsize[i]/BUCKET_SIZE) /// Lsize0 is the biggest #define Lock_pos(num,hash) ((num) * (get_table_length(0)) + hash) #define parameter_of_hash_function_a(num) (table.hash_fun[num].x) #define parameter_of_hash_function_b(num) (table.hash_fun[num].y) /// hash functiong __device__ __forceinline__ TYPE get_next_loc(TYPE k, TYPE num_table) { return ( k^ parameter_of_hash_function_a(num_table) + parameter_of_hash_function_b(num_table) ) % PRIME_uint % get_table_length(num_table); } /// for debug __device__ void pbucket(bucket *b,int num,int hash,int t_size) { printf("table.%d,%d/%d \n",num,hash,t_size); for(int i=0;i<BUCKET_SIZE;i++){ if(i%8==0) printf("\n\t"); printf("%d,%d ",b->key[i],b->value[i]); } printf("\n"); } __global__ void cuckoo_insert(TYPE* key, /// key to insert TYPE* value, /// value to insert TYPE size, /// insert size int* resize) /// insert error? { *resize = 0; int tid = blockIdx.x * blockDim.x + threadIdx.x; /// for every k /// warp cooperation int lan_id = threadIdx.x & 0x1f; int warp_num_in_all = tid >> 5; TYPE myk, myv; TYPE evict_time_of_one_thread = 0; int hash; TYPE operator_hash_table_num = 0; /// using for ballot & CAS int tmp=2; /// first read data myk = key[warp_num_in_all]; myv = value[warp_num_in_all]; while (warp_num_in_all < size) { ///for re lock , try other table operator_hash_table_num++; operator_hash_table_num %= TABLE_NUM; hash = get_next_loc(myk, operator_hash_table_num); /// step3.1 lock & un compress TODO: compress /// lock ,otherwise revote if (lan_id == 0) { tmp = atomicCAS(&(table.Lock[Lock_pos(operator_hash_table_num, hash)]), 0, 1); }//end if tmp=__shfl(tmp, 0); if(tmp==1) { continue; } /// bucket bucket *b = &(table.table[operator_hash_table_num][hash]); tmp = __ballot(b->key[lan_id] == myk); if (tmp != 0) { /// update if(lan_id==__ffs(tmp) - 1) { b->value[lan_id] = myv; } table.Lock[Lock_pos(operator_hash_table_num, hash)] = 0; tid += BLOCK_NUM * THREAD_NUM; warp_num_in_all = tid >> 5; evict_time_of_one_thread = 0; myk = key[warp_num_in_all]; myv = value[warp_num_in_all]; continue; }//end check update /// step3.3 check null & insert tmp = __ballot(b->key[lan_id] == 0); if (tmp != 0) { if (lan_id == __ffs(tmp) - 1) { b->key[lan_id] = myk; b->value[lan_id] = myv; }// insert table.Lock[Lock_pos(operator_hash_table_num, hash)] = 0; tid += BLOCK_NUM * THREAD_NUM; warp_num_in_all = tid >> 5; evict_time_of_one_thread = 0; myk = key[warp_num_in_all]; myv = value[warp_num_in_all]; continue; }/// null insert over /// step3.4 other,we need cuckoo evict TYPE tmpk=myk,tmpv=myv; /// choose pos:lan_id evict ,TODO: choose rand? int evict_pos=myk & 0x1f; myk = b->key[evict_pos]; myv = b->value[evict_pos]; b->key[evict_pos] = tmpk; b->value[evict_pos] = tmpv; evict_time_of_one_thread++; table.Lock[Lock_pos(operator_hash_table_num, hash)] = 0; /// when one always get leader , mark rehash /// check long chain if (evict_time_of_one_thread >= MAX_ITERATOR) { *resize = 1; tid += BLOCK_NUM * THREAD_NUM; warp_num_in_all = tid >> 5; evict_time_of_one_thread = 0; myk = key[warp_num_in_all]; myv = value[warp_num_in_all]; continue; } }//while size }//cucukoo insert __global__ void cuckoo_search(TYPE* key, /// key to s TYPE* value, /// value to key TYPE size) /// s size { int tid = blockIdx.x * blockDim.x + threadIdx.x; /// for every k #if head_info_debug if(tid==0) { printf(">>>search kernel:\n>>>size:%d \n", size); printf(">>>s_size:t1:%d, t2:%d, t3:%d, t4:%d\n", table.Lsize[0], table.Lsize[1], table.Lsize[2], table.Lsize[3]); } #endif int lan_id = threadIdx.x & 0x0000001f; int warp_num_in_block = threadIdx.x >> 5; volatile __shared__ int warp[( THREAD_NUM)>>5 ]; TYPE myk; int is_active; TYPE work_k = 0; /// for search int hash; int operator_hash_table_num; int ballot; bucket *b; /// ((size+31)>>5)<<5 :keep a warp to active while ( tid < (((size + 31) >> 5) << 5) ) { if(tid<size) { myk = key[tid]; is_active = 1;/// mark for work } /// while have work to do while (__any(is_active != 0)) { operator_hash_table_num=0; /// step1 start voting ================================== if (is_active != 0) warp[warp_num_in_block] = lan_id; #if search_debug if(lan_id==0) printf("voting: %d\t",warp[warp_num_in_block] ); #endif work_k = myk; /// step2 broadcast ==================================== work_k=__shfl(work_k, warp[warp_num_in_block]); /// step3 find in 5 table =========================== /// find null or too long for (int i = 0; i < TABLE_NUM; i++) { operator_hash_table_num = i; hash = get_next_loc(work_k, operator_hash_table_num); b=&table.table[operator_hash_table_num][hash]; ballot=__ballot(b->key[lan_id]==work_k); /// find it if(ballot!=0){ if(lan_id==warp[warp_num_in_block]){ value[tid]=b->value[__ffs(ballot)-1]; #if search_debug printf("find %d: %d\n",key[tid],value[tid]); #endif is_active=0; } break; } }/// end for /// can not find if(lan_id==warp[warp_num_in_block]){ if(is_active==1) value[tid]=2; //printf("cannot find k: %d ,tid:%d ",myk,tid); //pbucket(b,operator_hash_table_num,hash,get_table_length(operator_hash_table_num)); is_active=0; } } tid += BLOCK_NUM * THREAD_NUM; } }//cuckoo_search /// del and return value __global__ void cuckoo_delete(TYPE* key, /// key to del TYPE* value, /// value to return TYPE size) /// size { int tid = blockIdx.x * blockDim.x + threadIdx.x; /// for every k #if head_info_debug if(tid==0) { printf(">>>delete kernel:\n>>>size:%d \n", size); printf(">>>s_size:t1:%d, t2:%d, t3:%d, t4:%d\n", table.Lsize[0], table.Lsize[1], table.Lsize[2], table.Lsize[3]); } #endif int lan_id = threadIdx.x & 0x0000001f; int warp_num_in_block = threadIdx.x >> 5; volatile __shared__ int warp[( THREAD_NUM)>>5 ]; TYPE myk; int is_active; TYPE work_k = 0; /// for search int hash; int operator_hash_table_num; int ballot; bucket *b; /// ((size+31)>>5)<<5 :keep a warp to active while ( tid < (((size + 31) >> 5) << 5) ) { if(tid<size) { myk = key[tid]; is_active = 1;/// mark for work } /// while have work to do while (__any(is_active != 0)) { operator_hash_table_num=0; /// step1 start voting ================================== if (is_active != 0) warp[warp_num_in_block] = lan_id; #if search_debug if(lan_id==0) printf("voting: %d\t",warp[warp_num_in_block] ); #endif work_k = myk; /// step2 broadcast ==================================== work_k=__shfl(work_k, warp[warp_num_in_block]); /// step3 find in 5 table =========================== /// find null or too long for (int i = 0; i < TABLE_NUM; i++) { operator_hash_table_num = i; hash = get_next_loc(work_k, operator_hash_table_num); b=&table.table[operator_hash_table_num][hash]; ballot=__ballot(b->key[lan_id]==work_k); /// find it if(ballot!=0){ if(lan_id==warp[warp_num_in_block]){ value[tid]=b->value[__ffs(ballot)-1]; #if search_debug printf("find %d: %d\n",key[tid],value[tid]); #endif ///step3.1 if find, set to zero =========================== b->key[__ffs(ballot)-1]=0; b->value[__ffs(ballot)-1]=0; is_active=0; } break; } }/// end for /// can not find if(lan_id==warp[warp_num_in_block]){ is_active=0; } } tid += BLOCK_NUM * THREAD_NUM; } }//cuckoo_delete void __global__ cuckoo_resize_up(bucket* old_table, /// new table has been set to table int old_size, TYPE num_table_to_resize) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int warp_num_in_all = tid >> 5; int lan_id = tid & 0x1f; /// take kv to insert TYPE key, value; int hash; /// hold old one bucket to op bucket *b; ///step1 ====================== bucket *new_table = table.table[num_table_to_resize]; ///step2 warpbucket ====================== old_size /= BUCKET_SIZE; while (warp_num_in_all < old_size) { ///step2.1 bucket ====================== b = &old_table[warp_num_in_all]; ///step2.2 bucket====================== key = b->key[lan_id]; value = b->value[lan_id]; if (key != 0) { /// how to use tid & hash fun hash = get_next_loc(key, num_table_to_resize); new_table[hash].key[lan_id] = key; new_table[hash].value[lan_id] = value; } tid += BLOCK_NUM * THREAD_NUM; warp_num_in_all = tid >> 5; } }//cuckoo_resize_up void __global__ cuckoo_resize_down(bucket* old_table, /// small int old_size, int num_table_to_resize) { int tid = blockIdx.x * blockDim.x + threadIdx.x; #if head_info_debug if(tid==0) { printf(">>>down_size kernel: %d->%d\n",old_size,table.Lsize[num_table_to_resize]); printf(">>>s_size:t1:%d, t2:%d, t3:%d, t4:%d\n", table.Lsize[0], table.Lsize[1], table.Lsize[2], table.Lsize[3]); } #endif bucket *b=NULL; bucket *des_b=NULL; /// take kv to insert TYPE key, value; /// insert position int hash; int new_bucket_size = table.Lsize[num_table_to_resize] / BUCKET_SIZE; /// warp coopration int warp_num_in_all = tid >> 5; int warp_num_in_block = threadIdx.x >> 5; int lan_id = tid & 0x1f; int is_active; int ballot; /// in block , for voting volatile __shared__ int warp[(THREAD_NUM) >> 5]; ///step1 ====================== /// bucket *new_table = table.table[num_table_to_resize]; /// end ,next : old->new #if down_size_debug if (tid==0) printf("step start \n"); #endif ///step2 warp2bucket->bucket ====================== /// tid tid+new_bucket_size bucket tid bucket /// PROBLEM new_bucket_size * 2 = old_size (api.cpp line 47) /// old_size downsize /// PROBLEM: bucketbucket /// /// /// 1. add /// 2. kv scanshared /// 3. warp /// one thread one block while (warp_num_in_all < new_bucket_size) { /// new size is smaller ///step2.1 bucket ====================== /// warp_num_in_all is hash_value des_b = &new_table[warp_num_in_all]; #if down_size_debug if (tid==0) printf("step2.1 start \n"); #endif ///step2.2 bucket ====================== /// bucketkv b = &old_table[warp_num_in_all]; key = b->key[lan_id]; value = b->value[lan_id]; #if down_size_debug if(tid==0){ printf("old table1\n"); pbucket(b,0,0,0); } if(warp_num_in_all==0) printf("b1-%d: %d,%d\n",lan_id,key,value); #endif int crose_lan_id=31-lan_id; /// kvbucket b = &old_table[warp_num_in_all + new_bucket_size]; if (key == 0) { key = b->key[crose_lan_id]; value = b->value[crose_lan_id]; } ///bucket #if down_size_debug if(tid==0){ printf("old table2\n"); pbucket(b,0,0,0); } if(warp_num_in_all==0) printf("b1-%d: %d,%d\n",lan_id,key,value); #endif ///step2.3 kv===================== des_b->key[lan_id] = key; des_b->value[lan_id] = value; #if down_size_debug || down_size_cas_insert_debug if(tid==0) printf("write\n"); if(tid==0) pbucket(des_b,num_table_to_resize,tid,get_table_length(num_table_to_resize)); #endif is_active=0; ///step2.4 bucketkv ====================== if (key != b->key[crose_lan_id] /// && b->key[crose_lan_id] !=0) /// { key = b->key[crose_lan_id]; value = b->value[crose_lan_id]; is_active = 1; } #if down_size_debug || down_size_cas_insert_debug if(warp_num_in_block==0) printf("b1-%d: %d,%d - %d\n",lan_id,key,value,is_active); #endif ///step2.5 bucketkv====================== /// PROBLEM: how about skip step2.5 use step3 directly /// scan /// ballot = __ballot(des_b->key[lan_id] == 0); #if down_size_debug if( tid==0 && ballot == 0 ) printf("step 2.5 , full\n"); #endif while (__any(des_b->key[lan_id] == 0)) { #if down_size_debug if(tid==0) printf("step 2.5 \n"); #endif if(!__any(is_active==1)) break; #if down_size_debug if(tid==0) pbucket(des_b,num_table_to_resize,tid,get_table_length(num_table_to_resize)); #endif /// ballot = __ballot(des_b->key[lan_id] == 0); /// use hash as tmp to decrease register /// hash = __ffs(ballot) - 1; /// if (is_active == 1) warp[warp_num_in_block] = lan_id; /// insert if (warp[warp_num_in_block] == lan_id) { des_b->key[hash] = key; des_b->value[hash] = value; is_active=0; } } ///step 3 ====================== #if down_size_debug || down_size_cas_insert_debug if(tid==0) printf("after2.5 start3\n"); if(tid==0) pbucket(des_b,num_table_to_resize,tid,get_table_length(num_table_to_resize)); #endif /// key value has kv to insert TYPE work_k,work_v; int operator_hash_table_num=0; int lead_thread_num; #if down_size_cas_insert_debug if(warp_num_in_all==0) { printf("b1-%d: %d,%d - %d\n",lan_id,key,value,is_active); } #endif int times_of_evict=0; while (__any(is_active != 0)) { /// using logic of cuckoo_insert (__global__) /// how to reuse the code ? /// TODO , check too long evict work_k = key; work_v = value; /// step3.1 start voting ================================== if (is_active != 0)//&& warp[warp_num_in_block]!=lan_id ) warp[warp_num_in_block] = lan_id; /// leader is lead_thread_num lead_thread_num = warp[warp_num_in_block]; if(lead_thread_num==lan_id) times_of_evict++; /// step3.2 broadcast ==================================== work_k = __shfl(work_k, lead_thread_num); work_v = __shfl(work_v, lead_thread_num); /// step3.3 insert to the table. =========================== operator_hash_table_num ++; /// donot insert to table:num_table_to_resize full if (operator_hash_table_num==num_table_to_resize ) { operator_hash_table_num++; } operator_hash_table_num %= TABLE_NUM; hash = get_next_loc(work_k, operator_hash_table_num); /// step3.4 lock TODO: compress =========================== /// using ballot as tmp to decrease register /// lock ,otherwise revote if (lan_id == lead_thread_num) { /// TODO: different length need to sum ,tmp using double length ballot = atomicCAS(&(table.Lock[Lock_pos(operator_hash_table_num,hash)]), 0, 1); }//end if ballot = __shfl(ballot, lead_thread_num); if (ballot == 1) continue; b = &(table.table[operator_hash_table_num][hash]); #if down_size_cas_insert_debug ballot=__ballot(is_active==1); if(warp_num_in_block==0 && lan_id==0){ printf("\n\nactive ballot:%x kv %d,%d lead_thread_num:%d\n", ballot,work_k,work_v,lead_thread_num ); pbucket(b,operator_hash_table_num,hash,get_table_length(operator_hash_table_num)); } #endif /// step3.5 check exist & insert ballot = __ballot(b->key[lan_id] == work_k); if (ballot != 0) { /// update if (lan_id == lead_thread_num) { b->value[__ffs(ballot) - 1] = value; is_active = 0; #if down_size_cas_insert_debug if(warp_num_in_block==0) { printf("exit after insert \n"); pbucket(b, operator_hash_table_num, hash, get_table_length(operator_hash_table_num)); } #endif table.Lock[Lock_pos(operator_hash_table_num,hash)] = 0; times_of_evict=0; }// end if ,upadte continue; }//end check update /// step3.6 check null & insert ballot = __ballot(b->key[lan_id] == 0); #if down_size_cas_insert_debug if(warp_num_in_block==0) printf("%d,",lan_id); if(tid==0){ printf("\n\nnull ballot:%x kv %d,%d lead_thread_num:%d \n", ballot,work_k,work_v,lead_thread_num); } #endif if (ballot != 0) { /// set kv if (lan_id == __ffs(ballot) - 1) { b->key[lan_id] = work_k; b->value[lan_id] = work_v; /// free table.Lock[Lock_pos(operator_hash_table_num,hash)] = 0; #if down_size_cas_insert_debug if(warp_num_in_block==0) { printf("null after insert \n"); pbucket(b, operator_hash_table_num, hash, get_table_length(operator_hash_table_num)); } #endif }// insert /// mark active false if (lan_id == lead_thread_num){ times_of_evict=0; is_active = 0; } continue; }/// null insert over /// step3.7 other,we need cuckoo evict if (lan_id == lead_thread_num){ key = b->key[lan_id]; value = b->value[lan_id]; b->key[lan_id] = work_k; b->value[lan_id] = work_v; #if down_size_cas_insert_debug if(warp_num_in_block==0) { printf("evict after insert \n"); pbucket(b, operator_hash_table_num, hash, get_table_length(operator_hash_table_num)); } #endif table.Lock[Lock_pos(operator_hash_table_num,hash)] = 0; if(times_of_evict>MAX_ITERATOR){ is_active=0; printf("downsizeing can insert %d %d,tid %d ",key,value,tid); times_of_evict=0; } } // evict } /// TODO:auto configure ,what should be add to tid tid += BLOCK_NUM * THREAD_NUM; warp_num_in_all = tid >> 5; } }//cuckoo_resize_down int choose_block_num(TYPE size); void GPU_cuckoo_resize_up(int num_table_to_resize, TYPE old_size, bucket* new_table, cuckoo *h_table) { checkCudaErrors(hipGetLastError()); TYPE new_size=old_size*2; /// set table & size it needed bucket* old_table=h_table->table[num_table_to_resize]; h_table->Lsize[num_table_to_resize]=new_size; h_table->table[num_table_to_resize]=new_table; hipMemcpyToSymbol(table,h_table,sizeof(cuckoo)); /// TODO: auto configure /// kernel Configuration dim3 block=choose_block_num(old_size); /// kernel launch GpuTimer timer; timer.Start(); hipLaunchKernelGGL(( cuckoo_resize_up), dim3(block),dim3(THREAD_NUM), 0, 0, old_table,old_size,num_table_to_resize); timer.Stop(); double diff = timer.Elapsed()*1000000; printf("kernel <<<upsize>>>the time is %.2lf us, ( %.2f Mops)s\n", (double)diff, (double)(new_size) / diff); }//GPU_cuckoo_resize_up void GPU_cuckoo_resize_down(int num_table_to_resize, TYPE old_size, bucket* new_table, cuckoo *h_table) { /// bucket to size : << 5 int new_size=((get_table_bucket_size(num_table_to_resize)+1)/2) << 5; //printf("down_size : %d : szie%d->%d.",num_table_to_resize,old_size,new_size); /// set table & size it needed bucket* old_table=h_table->table[num_table_to_resize]; h_table->Lsize[num_table_to_resize]=new_size; h_table->table[num_table_to_resize]=new_table; hipMemcpyToSymbol(table,h_table,sizeof(cuckoo)); dim3 block=choose_block_num(old_size); /// kernel launch hipLaunchKernelGGL(( cuckoo_resize_down), dim3(block),dim3(THREAD_NUM), 0, 0, old_table,old_size,num_table_to_resize); }//GPU_cuckoo_resize_down /// show table by key,value __global__ void show_table() { if (blockIdx.x * blockDim.x + threadIdx.x > 0) return; /// i is the table num for (int i = 0; i < TABLE_NUM; i++) { printf("\n\n\ntable:%d\n", i); /// j is the bucket num for (int j = 0; j < get_table_length(i); j++) { printf("bucket:%d\n", j); /// t is every slot(one bucket has 32 slot) for (int t = 0; t < BUCKET_SIZE; t++) { /// 8 slot a line if (t % 8 == 0) printf("\n\t\t"); printf(" %d,%d ", table.table[i][j].key[t], table.table[i][j].value[t]); } printf("\n"); } } } void GPU_show_table(){ hipLaunchKernelGGL(( show_table), dim3(1),dim3(1), 0, 0, ); } void gpu_lp_insert(TYPE* key, TYPE* value, TYPE size, int* resize) { dim3 block=choose_block_num(size*32); GpuTimer time; time.Start(); hipLaunchKernelGGL(( cuckoo_insert) , dim3(block), dim3(THREAD_NUM) , 0, 0, key, value, size, resize); time.Stop(); double diff = time.Elapsed() * 1000000; printf("kernel <<<insert>>>the time is %.2lf us ( %.2f Mops)\n", (double) diff, (double) (size) / diff); }//gpu_lp_insert void gpu_lp_search(TYPE* key, TYPE* ans, TYPE size){ dim3 block=choose_block_num(size); GpuTimer time; time.Start(); hipLaunchKernelGGL(( cuckoo_search), dim3(block),dim3(THREAD_NUM), 0, 0, key,ans,size); time.Stop(); double diff = time.Elapsed() * 1000000; printf("kernel <<<search>>>the time is %.2lf us, ( %.2f Mops)s\n", (double)diff, (double)(size) / diff); // checkCudaErrors(hipGetLastError()); } void gpu_lp_delete(TYPE* key, TYPE* ans, TYPE size){ dim3 block=choose_block_num(size); GpuTimer time; time.Start(); hipLaunchKernelGGL(( cuckoo_delete), dim3(block),dim3(THREAD_NUM), 0, 0, key,ans,size); time.Stop(); double diff = time.Elapsed() * 1000000; printf("delete <<<delete>>>the time is %.2lf us, ( %.2f Mops)s\n", (double)diff, (double)(size) / diff); // checkCudaErrors(hipGetLastError()); } void gpu_lp_set_table(cuckoo *h_table) { //printf("seting table\n"); hipMemcpyToSymbol(table,h_table,sizeof(cuckoo)); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } int choose_block_num(TYPE size){ unsigned int real_block=(size+THREAD_NUM-1)/THREAD_NUM; /// BLOCK_NUM int block=real_block>BLOCK_NUM ? BLOCK_NUM : real_block; /// block=block<1?1:block; return block; }
b3cc481f38a3d13f4a124df0f21a05404b6aea8f.cu
// // Created by jing on 2018/7/1. // // add vscode sup #include "cuckoo.h" #include <assert.h> #include <device_launch_parameters.h> #include "api.h" // Supported operations #define ADD (0) #define DELETE (1) #define SEARCH (2) #define debug_num 30 #define single_BUCKET 15629 /// hash table __constant__ cuckoo table; #define get_table_length(i) get_table_bucket_length(i) #define get_table_bucket_length(i) (table.Lsize[i]/BUCKET_SIZE) /// Lsize0 is the biggest #define Lock_pos(num,hash) ((num) * (get_table_length(0)) + hash) #define parameter_of_hash_function_a(num) (table.hash_fun[num].x) #define parameter_of_hash_function_b(num) (table.hash_fun[num].y) /// hash functiong __device__ __forceinline__ TYPE get_next_loc(TYPE k, TYPE num_table) { return ( k^ parameter_of_hash_function_a(num_table) + parameter_of_hash_function_b(num_table) ) % PRIME_uint % get_table_length(num_table); } /// for debug __device__ void pbucket(bucket *b,int num,int hash,int t_size) { printf("table.%d,%d/%d \n",num,hash,t_size); for(int i=0;i<BUCKET_SIZE;i++){ if(i%8==0) printf("\n\t"); printf("%d,%d ",b->key[i],b->value[i]); } printf("\n"); } __global__ void cuckoo_insert(TYPE* key, /// key to insert TYPE* value, /// value to insert TYPE size, /// insert size int* resize) /// insert error? { *resize = 0; int tid = blockIdx.x * blockDim.x + threadIdx.x; /// for every k /// warp cooperation int lan_id = threadIdx.x & 0x1f; int warp_num_in_all = tid >> 5; TYPE myk, myv; TYPE evict_time_of_one_thread = 0; int hash; TYPE operator_hash_table_num = 0; /// using for ballot & CAS int tmp=2; /// first read data myk = key[warp_num_in_all]; myv = value[warp_num_in_all]; while (warp_num_in_all < size) { ///for re lock , try other table operator_hash_table_num++; operator_hash_table_num %= TABLE_NUM; hash = get_next_loc(myk, operator_hash_table_num); /// step3.1 lock & un compress TODO: compress /// lock ,otherwise revote if (lan_id == 0) { tmp = atomicCAS(&(table.Lock[Lock_pos(operator_hash_table_num, hash)]), 0, 1); }//end if tmp=__shfl(tmp, 0); if(tmp==1) { continue; } /// bucket bucket *b = &(table.table[operator_hash_table_num][hash]); tmp = __ballot(b->key[lan_id] == myk); if (tmp != 0) { /// update if(lan_id==__ffs(tmp) - 1) { b->value[lan_id] = myv; } table.Lock[Lock_pos(operator_hash_table_num, hash)] = 0; tid += BLOCK_NUM * THREAD_NUM; warp_num_in_all = tid >> 5; evict_time_of_one_thread = 0; myk = key[warp_num_in_all]; myv = value[warp_num_in_all]; continue; }//end check update /// step3.3 check null & insert tmp = __ballot(b->key[lan_id] == 0); if (tmp != 0) { if (lan_id == __ffs(tmp) - 1) { b->key[lan_id] = myk; b->value[lan_id] = myv; }// insert table.Lock[Lock_pos(operator_hash_table_num, hash)] = 0; tid += BLOCK_NUM * THREAD_NUM; warp_num_in_all = tid >> 5; evict_time_of_one_thread = 0; myk = key[warp_num_in_all]; myv = value[warp_num_in_all]; continue; }/// null insert over /// step3.4 other,we need cuckoo evict TYPE tmpk=myk,tmpv=myv; /// choose pos:lan_id evict ,TODO: choose rand? int evict_pos=myk & 0x1f; myk = b->key[evict_pos]; myv = b->value[evict_pos]; b->key[evict_pos] = tmpk; b->value[evict_pos] = tmpv; evict_time_of_one_thread++; table.Lock[Lock_pos(operator_hash_table_num, hash)] = 0; /// when one always get leader , mark rehash /// check long chain if (evict_time_of_one_thread >= MAX_ITERATOR) { *resize = 1; tid += BLOCK_NUM * THREAD_NUM; warp_num_in_all = tid >> 5; evict_time_of_one_thread = 0; myk = key[warp_num_in_all]; myv = value[warp_num_in_all]; continue; } }//while size }//cucukoo insert __global__ void cuckoo_search(TYPE* key, /// key to s TYPE* value, /// value to key TYPE size) /// s size { int tid = blockIdx.x * blockDim.x + threadIdx.x; /// for every k #if head_info_debug if(tid==0) { printf(">>>search kernel:\n>>>size:%d \n", size); printf(">>>s_size:t1:%d, t2:%d, t3:%d, t4:%d\n", table.Lsize[0], table.Lsize[1], table.Lsize[2], table.Lsize[3]); } #endif int lan_id = threadIdx.x & 0x0000001f; int warp_num_in_block = threadIdx.x >> 5; volatile __shared__ int warp[( THREAD_NUM)>>5 ]; TYPE myk; int is_active; TYPE work_k = 0; /// for search int hash; int operator_hash_table_num; int ballot; bucket *b; /// ((size+31)>>5)<<5 :keep a warp to active while ( tid < (((size + 31) >> 5) << 5) ) { if(tid<size) { myk = key[tid]; is_active = 1;/// mark for work } /// while have work to do while (__any(is_active != 0)) { operator_hash_table_num=0; /// step1 start voting ================================== if (is_active != 0) warp[warp_num_in_block] = lan_id; #if search_debug if(lan_id==0) printf("voting: %d\t",warp[warp_num_in_block] ); #endif work_k = myk; /// step2 broadcast ==================================== work_k=__shfl(work_k, warp[warp_num_in_block]); /// step3 find in 5 table =========================== /// find null or too long for (int i = 0; i < TABLE_NUM; i++) { operator_hash_table_num = i; hash = get_next_loc(work_k, operator_hash_table_num); b=&table.table[operator_hash_table_num][hash]; ballot=__ballot(b->key[lan_id]==work_k); /// find it if(ballot!=0){ if(lan_id==warp[warp_num_in_block]){ value[tid]=b->value[__ffs(ballot)-1]; #if search_debug printf("find %d: %d\n",key[tid],value[tid]); #endif is_active=0; } break; } }/// end for /// can not find if(lan_id==warp[warp_num_in_block]){ if(is_active==1) value[tid]=2; //printf("cannot find k: %d ,tid:%d ",myk,tid); //pbucket(b,operator_hash_table_num,hash,get_table_length(operator_hash_table_num)); is_active=0; } } tid += BLOCK_NUM * THREAD_NUM; } }//cuckoo_search /// del and return value __global__ void cuckoo_delete(TYPE* key, /// key to del TYPE* value, /// value to return TYPE size) /// size { int tid = blockIdx.x * blockDim.x + threadIdx.x; /// for every k #if head_info_debug if(tid==0) { printf(">>>delete kernel:\n>>>size:%d \n", size); printf(">>>s_size:t1:%d, t2:%d, t3:%d, t4:%d\n", table.Lsize[0], table.Lsize[1], table.Lsize[2], table.Lsize[3]); } #endif int lan_id = threadIdx.x & 0x0000001f; int warp_num_in_block = threadIdx.x >> 5; volatile __shared__ int warp[( THREAD_NUM)>>5 ]; TYPE myk; int is_active; TYPE work_k = 0; /// for search int hash; int operator_hash_table_num; int ballot; bucket *b; /// ((size+31)>>5)<<5 :keep a warp to active while ( tid < (((size + 31) >> 5) << 5) ) { if(tid<size) { myk = key[tid]; is_active = 1;/// mark for work } /// while have work to do while (__any(is_active != 0)) { operator_hash_table_num=0; /// step1 start voting ================================== if (is_active != 0) warp[warp_num_in_block] = lan_id; #if search_debug if(lan_id==0) printf("voting: %d\t",warp[warp_num_in_block] ); #endif work_k = myk; /// step2 broadcast ==================================== work_k=__shfl(work_k, warp[warp_num_in_block]); /// step3 find in 5 table =========================== /// find null or too long for (int i = 0; i < TABLE_NUM; i++) { operator_hash_table_num = i; hash = get_next_loc(work_k, operator_hash_table_num); b=&table.table[operator_hash_table_num][hash]; ballot=__ballot(b->key[lan_id]==work_k); /// find it if(ballot!=0){ if(lan_id==warp[warp_num_in_block]){ value[tid]=b->value[__ffs(ballot)-1]; #if search_debug printf("find %d: %d\n",key[tid],value[tid]); #endif ///step3.1 if find, set to zero =========================== b->key[__ffs(ballot)-1]=0; b->value[__ffs(ballot)-1]=0; is_active=0; } break; } }/// end for /// can not find if(lan_id==warp[warp_num_in_block]){ is_active=0; } } tid += BLOCK_NUM * THREAD_NUM; } }//cuckoo_delete void __global__ cuckoo_resize_up(bucket* old_table, /// new table has been set to table int old_size, TYPE num_table_to_resize) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int warp_num_in_all = tid >> 5; int lan_id = tid & 0x1f; /// take kv to insert TYPE key, value; int hash; /// hold old one bucket to op bucket *b; ///step1 取新表 ====================== bucket *new_table = table.table[num_table_to_resize]; ///step2 每个warp处理一个bucket ====================== old_size /= BUCKET_SIZE; while (warp_num_in_all < old_size) { ///step2.1 获取自己的bucket ====================== b = &old_table[warp_num_in_all]; ///step2.2 对bucket中各插入对应的位置====================== key = b->key[lan_id]; value = b->value[lan_id]; if (key != 0) { /// how to use tid & hash fun hash = get_next_loc(key, num_table_to_resize); new_table[hash].key[lan_id] = key; new_table[hash].value[lan_id] = value; } tid += BLOCK_NUM * THREAD_NUM; warp_num_in_all = tid >> 5; } }//cuckoo_resize_up void __global__ cuckoo_resize_down(bucket* old_table, /// small int old_size, int num_table_to_resize) { int tid = blockIdx.x * blockDim.x + threadIdx.x; #if head_info_debug if(tid==0) { printf(">>>down_size kernel: %d->%d\n",old_size,table.Lsize[num_table_to_resize]); printf(">>>s_size:t1:%d, t2:%d, t3:%d, t4:%d\n", table.Lsize[0], table.Lsize[1], table.Lsize[2], table.Lsize[3]); } #endif bucket *b=NULL; bucket *des_b=NULL; /// take kv to insert TYPE key, value; /// insert position int hash; int new_bucket_size = table.Lsize[num_table_to_resize] / BUCKET_SIZE; /// warp coopration int warp_num_in_all = tid >> 5; int warp_num_in_block = threadIdx.x >> 5; int lan_id = tid & 0x1f; int is_active; int ballot; /// in block , for voting volatile __shared__ int warp[(THREAD_NUM) >> 5]; ///step1 置换新表 ====================== /// 与新表对应的表长已设置好 bucket *new_table = table.table[num_table_to_resize]; /// end ,next : old->new #if down_size_debug if (tid==0) printf("step start \n"); #endif ///step2 每个warp处理2个bucket->一个bucket ====================== /// 分别将 旧表 tid tid+new_bucket_size 两个bucket插入到新表的 tid bucket中 /// PROBLEM: 这里默认 new_bucket_size * 2 = old_size (api.cpp line 47) /// 方法 部分条件下可将old_size 设置为偶数,这样只有在多次downsize之后才会不符合上述条件 /// PROBLEM: 将两个bucket映射到一个bucket,在元素较多的情况下势必造成部分 /// 溢出,除了将溢出部分插入到其他表,我们还需要合理安排两个到一个的映射关系使之高 /// 效转换。 /// 方法1. 逐个查询,使用原子add /// 方法2. 对空位置和非空kv scan,直接得到相应位置,:需要shared或其他数组支持 /// 方法3. 首先进行简单插入,然后使用warp通信找到空位置插入 /// one thread one block while (warp_num_in_all < new_bucket_size) { /// new size is smaller ///step2.1 获取新表的bucket ====================== /// warp_num_in_all is hash_value des_b = &new_table[warp_num_in_all]; #if down_size_debug if (tid==0) printf("step2.1 start \n"); #endif ///step2.2 获取第一个旧表的bucket ====================== /// 读入第一个bucket中kv到变量 b = &old_table[warp_num_in_all]; key = b->key[lan_id]; value = b->value[lan_id]; #if down_size_debug if(tid==0){ printf("old table1\n"); pbucket(b,0,0,0); } if(warp_num_in_all==0) printf("b1-%d: %d,%d\n",lan_id,key,value); #endif int crose_lan_id=31-lan_id; /// 空kv再此读入第二个bucket 交叉读取 b = &old_table[warp_num_in_all + new_bucket_size]; if (key == 0) { key = b->key[crose_lan_id]; value = b->value[crose_lan_id]; } ///到这里,第一个bucket全部会被读入后面接着写入,第二个部分还未读入 #if down_size_debug if(tid==0){ printf("old table2\n"); pbucket(b,0,0,0); } if(warp_num_in_all==0) printf("b1-%d: %d,%d\n",lan_id,key,value); #endif ///step2.3 将不为空的kv插入新表===================== des_b->key[lan_id] = key; des_b->value[lan_id] = value; #if down_size_debug || down_size_cas_insert_debug if(tid==0) printf("write\n"); if(tid==0) pbucket(des_b,num_table_to_resize,tid,get_table_length(num_table_to_resize)); #endif is_active=0; ///step2.4 读取第二个bucket中未存入的kv ====================== if (key != b->key[crose_lan_id] /// 从未写入过 && b->key[crose_lan_id] !=0) /// 存在值 { key = b->key[crose_lan_id]; value = b->value[crose_lan_id]; is_active = 1; } #if down_size_debug || down_size_cas_insert_debug if(warp_num_in_block==0) printf("b1-%d: %d,%d - %d\n",lan_id,key,value,is_active); #endif ///step2.5 对新bucket还有的空位进行插入kv====================== /// PROBLEM: how about skip step2.5 use step3 directly /// 如果空位置比较少会比较快,否则可能使用scan会更快 /// 如果还有空位 ballot = __ballot(des_b->key[lan_id] == 0); #if down_size_debug if( tid==0 && ballot == 0 ) printf("step 2.5 , full\n"); #endif while (__any(des_b->key[lan_id] == 0)) { #if down_size_debug if(tid==0) printf("step 2.5 \n"); #endif if(!__any(is_active==1)) break; #if down_size_debug if(tid==0) pbucket(des_b,num_table_to_resize,tid,get_table_length(num_table_to_resize)); #endif /// 找出空位 ballot = __ballot(des_b->key[lan_id] == 0); /// use hash as tmp to decrease register /// 选一个空位 hash = __ffs(ballot) - 1; /// 选一个线程 if (is_active == 1) warp[warp_num_in_block] = lan_id; /// insert if (warp[warp_num_in_block] == lan_id) { des_b->key[hash] = key; des_b->value[hash] = value; is_active=0; } } ///step 3 如果位将第二个表中元素全部插入完成,插入到其他表中====================== #if down_size_debug || down_size_cas_insert_debug if(tid==0) printf("after2.5 start3\n"); if(tid==0) pbucket(des_b,num_table_to_resize,tid,get_table_length(num_table_to_resize)); #endif /// key value has kv to insert TYPE work_k,work_v; int operator_hash_table_num=0; int lead_thread_num; #if down_size_cas_insert_debug if(warp_num_in_all==0) { printf("b1-%d: %d,%d - %d\n",lan_id,key,value,is_active); } #endif int times_of_evict=0; while (__any(is_active != 0)) { /// using logic of cuckoo_insert (__global__) /// how to reuse the code ? /// TODO , check too long evict work_k = key; work_v = value; /// step3.1 start voting ================================== if (is_active != 0)//&& warp[warp_num_in_block]!=lan_id ) warp[warp_num_in_block] = lan_id; /// leader is lead_thread_num lead_thread_num = warp[warp_num_in_block]; if(lead_thread_num==lan_id) times_of_evict++; /// step3.2 broadcast ==================================== work_k = __shfl(work_k, lead_thread_num); work_v = __shfl(work_v, lead_thread_num); /// step3.3 insert to the table. =========================== operator_hash_table_num ++; /// donot insert to table:num_table_to_resize full if (operator_hash_table_num==num_table_to_resize ) { operator_hash_table_num++; } operator_hash_table_num %= TABLE_NUM; hash = get_next_loc(work_k, operator_hash_table_num); /// step3.4 lock TODO: compress =========================== /// using ballot as tmp to decrease register /// lock ,otherwise revote if (lan_id == lead_thread_num) { /// TODO: different length need to sum ,tmp using double length ballot = atomicCAS(&(table.Lock[Lock_pos(operator_hash_table_num,hash)]), 0, 1); }//end if ballot = __shfl(ballot, lead_thread_num); if (ballot == 1) continue; b = &(table.table[operator_hash_table_num][hash]); #if down_size_cas_insert_debug ballot=__ballot(is_active==1); if(warp_num_in_block==0 && lan_id==0){ printf("\n\nactive ballot:%x kv %d,%d lead_thread_num:%d\n", ballot,work_k,work_v,lead_thread_num ); pbucket(b,operator_hash_table_num,hash,get_table_length(operator_hash_table_num)); } #endif /// step3.5 check exist & insert ballot = __ballot(b->key[lan_id] == work_k); if (ballot != 0) { /// update if (lan_id == lead_thread_num) { b->value[__ffs(ballot) - 1] = value; is_active = 0; #if down_size_cas_insert_debug if(warp_num_in_block==0) { printf("exit after insert \n"); pbucket(b, operator_hash_table_num, hash, get_table_length(operator_hash_table_num)); } #endif table.Lock[Lock_pos(operator_hash_table_num,hash)] = 0; times_of_evict=0; }// end if ,upadte continue; }//end check update /// step3.6 check null & insert ballot = __ballot(b->key[lan_id] == 0); #if down_size_cas_insert_debug if(warp_num_in_block==0) printf("%d,",lan_id); if(tid==0){ printf("\n\nnull ballot:%x kv %d,%d lead_thread_num:%d \n", ballot,work_k,work_v,lead_thread_num); } #endif if (ballot != 0) { /// set kv if (lan_id == __ffs(ballot) - 1) { b->key[lan_id] = work_k; b->value[lan_id] = work_v; /// free table.Lock[Lock_pos(operator_hash_table_num,hash)] = 0; #if down_size_cas_insert_debug if(warp_num_in_block==0) { printf("null after insert \n"); pbucket(b, operator_hash_table_num, hash, get_table_length(operator_hash_table_num)); } #endif }// insert /// mark active false if (lan_id == lead_thread_num){ times_of_evict=0; is_active = 0; } continue; }/// null insert over /// step3.7 other,we need cuckoo evict if (lan_id == lead_thread_num){ key = b->key[lan_id]; value = b->value[lan_id]; b->key[lan_id] = work_k; b->value[lan_id] = work_v; #if down_size_cas_insert_debug if(warp_num_in_block==0) { printf("evict after insert \n"); pbucket(b, operator_hash_table_num, hash, get_table_length(operator_hash_table_num)); } #endif table.Lock[Lock_pos(operator_hash_table_num,hash)] = 0; if(times_of_evict>MAX_ITERATOR){ is_active=0; printf("downsizeing can insert %d %d,tid %d ",key,value,tid); times_of_evict=0; } } // evict } /// TODO:auto configure ,what should be add to tid tid += BLOCK_NUM * THREAD_NUM; warp_num_in_all = tid >> 5; } }//cuckoo_resize_down int choose_block_num(TYPE size); void GPU_cuckoo_resize_up(int num_table_to_resize, TYPE old_size, bucket* new_table, cuckoo *h_table) { checkCudaErrors(cudaGetLastError()); TYPE new_size=old_size*2; /// set table & size it needed bucket* old_table=h_table->table[num_table_to_resize]; h_table->Lsize[num_table_to_resize]=new_size; h_table->table[num_table_to_resize]=new_table; cudaMemcpyToSymbol(table,h_table,sizeof(cuckoo)); /// TODO: auto configure /// kernel Configuration dim3 block=choose_block_num(old_size); /// kernel launch GpuTimer timer; timer.Start(); cuckoo_resize_up<<<block,THREAD_NUM>>>(old_table,old_size,num_table_to_resize); timer.Stop(); double diff = timer.Elapsed()*1000000; printf("kernel <<<upsize>>>:the time is %.2lf us, ( %.2f Mops)s\n", (double)diff, (double)(new_size) / diff); }//GPU_cuckoo_resize_up void GPU_cuckoo_resize_down(int num_table_to_resize, TYPE old_size, bucket* new_table, cuckoo *h_table) { /// bucket to size : << 5 int new_size=((get_table_bucket_size(num_table_to_resize)+1)/2) << 5; //printf("down_size : %d : szie%d->%d.",num_table_to_resize,old_size,new_size); /// set table & size it needed bucket* old_table=h_table->table[num_table_to_resize]; h_table->Lsize[num_table_to_resize]=new_size; h_table->table[num_table_to_resize]=new_table; cudaMemcpyToSymbol(table,h_table,sizeof(cuckoo)); dim3 block=choose_block_num(old_size); /// kernel launch cuckoo_resize_down<<<block,THREAD_NUM>>>(old_table,old_size,num_table_to_resize); }//GPU_cuckoo_resize_down /// show table by key,value __global__ void show_table() { if (blockIdx.x * blockDim.x + threadIdx.x > 0) return; /// i is the table num for (int i = 0; i < TABLE_NUM; i++) { printf("\n\n\ntable:%d\n", i); /// j is the bucket num for (int j = 0; j < get_table_length(i); j++) { printf("bucket:%d\n", j); /// t is every slot(one bucket has 32 slot) for (int t = 0; t < BUCKET_SIZE; t++) { /// 8 slot a line if (t % 8 == 0) printf("\n\t\t"); printf(" %d,%d ", table.table[i][j].key[t], table.table[i][j].value[t]); } printf("\n"); } } } void GPU_show_table(){ show_table<<<1,1>>>(); } void gpu_lp_insert(TYPE* key, TYPE* value, TYPE size, int* resize) { dim3 block=choose_block_num(size*32); GpuTimer time; time.Start(); cuckoo_insert <<< block, THREAD_NUM >>> (key, value, size, resize); time.Stop(); double diff = time.Elapsed() * 1000000; printf("kernel <<<insert>>>:the time is %.2lf us ( %.2f Mops)\n", (double) diff, (double) (size) / diff); }//gpu_lp_insert void gpu_lp_search(TYPE* key, TYPE* ans, TYPE size){ dim3 block=choose_block_num(size); GpuTimer time; time.Start(); cuckoo_search<<<block,THREAD_NUM>>>(key,ans,size); time.Stop(); double diff = time.Elapsed() * 1000000; printf("kernel <<<search>>>:the time is %.2lf us, ( %.2f Mops)s\n", (double)diff, (double)(size) / diff); // checkCudaErrors(cudaGetLastError()); } void gpu_lp_delete(TYPE* key, TYPE* ans, TYPE size){ dim3 block=choose_block_num(size); GpuTimer time; time.Start(); cuckoo_delete<<<block,THREAD_NUM>>>(key,ans,size); time.Stop(); double diff = time.Elapsed() * 1000000; printf("delete <<<delete>>>:the time is %.2lf us, ( %.2f Mops)s\n", (double)diff, (double)(size) / diff); // checkCudaErrors(cudaGetLastError()); } void gpu_lp_set_table(cuckoo *h_table) { //printf("seting table\n"); cudaMemcpyToSymbol(table,h_table,sizeof(cuckoo)); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } int choose_block_num(TYPE size){ unsigned int real_block=(size+THREAD_NUM-1)/THREAD_NUM; /// 不能超过 BLOCK_NUM int block=real_block>BLOCK_NUM ? BLOCK_NUM : real_block; /// block=block<1?1:block; return block; }
7a6dd7c58f950604f6fe3328be5a36efdaf06e55.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2013 William J. Brouwer, Pierre-Yves Taunay * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <rocblas.h> #include "main.h" #include "utilities.h" __device__ __constant__ int cmem_size_mmpb,cmem_size_MM_mmpb; __global__ void create_ptr_mmpb(float2 **q_A,float2 **q_B,float2 **q_C,float2 *q_temp,float2 *q_tempB,float2 *q_complete) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int mat = idx*MATRIX_SIDE*MATRIX_SIDE; __syncthreads(); if(idx < cmem_size_mmpb ) q_A[idx] = &q_temp[mat]; __syncthreads(); if(idx < cmem_size_mmpb ) q_B[idx] = &q_tempB[mat]; __syncthreads(); if(idx < cmem_size_mmpb ) q_C[idx] = &q_complete[mat]; } __global__ void copy_to_temp_mmpb(float2 *q_temp, float2 *q_complete) { int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y*blockDim.x*gridDim.x; __shared__ float2 buffer[NTH]; __syncthreads(); if(idx < cmem_size_MM_mmpb) buffer[threadIdx.x].x = q_complete[idx].x; __syncthreads(); if(idx < cmem_size_MM_mmpb) buffer[threadIdx.x].y = q_complete[idx].y; __syncthreads(); if(idx < cmem_size_MM_mmpb) q_temp[idx].x = buffer[threadIdx.x].x; __syncthreads(); if(idx < cmem_size_MM_mmpb) q_temp[idx].y = buffer[threadIdx.x].y; } __global__ void printf_ptr_mmpb(float2 **q_cblas,float2 *q) { float2 a = q[0]; float2 b = q[1]; for(int i = 0;i<cmem_size_mmpb;i++) { printf("Q-address: %p\t CBLAS: %p\n",q_cblas[i],&q[i*MATRIX_SIDE*MATRIX_SIDE]); } a.x += b.x; q[0] = a; } __device__ float cabs_sq_mmpb(float2 input){ return input.x * input.x + input.y * input.y; } /* Kernel Overview * * One block processes multiple complex matrix, performing one iteration of the qr * decomposition by givens rotations * * PYT 07/25 */ __global__ void qr_gpu_mmpb_bat(int col, float2 * matrices, float2 *q_temp, float2 *q_complete){ // values we use to calculate Givens __shared__ float2 lower[NMPBL]; __shared__ float2 upper[NMPBL]; // buffered c,s elements for constructing G^T contribution __shared__ float2 g_sin[ NMPBL*MATRIX_SIDE ]; __shared__ float2 g_cos[ NMPBL*MATRIX_SIDE ]; // buffer a row for multiplication __shared__ float2 row_left[NMPBL*MATRIX_SIDE]; // index to matrix for processing int myMatrix = threadIdx.x / MATRIX_SIDE; // index to vector for processing int vectorIndex = threadIdx.x % MATRIX_SIDE; // matrix offset for this block int memoryStride = ( blockIdx.x * NMPBL + myMatrix ) * MATRIX_SIDE * MATRIX_SIDE ; // an index into cos data int cosIndex = (vectorIndex < MATRIX_SIDE-1) ? MATRIX_SIDE-vectorIndex-2 : 0; // elements float2 row_ele_upper, row_ele_lower; // initialize cos &sin buffers g_sin[vectorIndex + smemstride].x = 0.0f; g_sin[vectorIndex + smemstride].y = 0.0f; g_cos[vectorIndex + smemstride].x = 1.0f; g_cos[vectorIndex + smemstride].y = 0.0f; int i=0; // init row_ele_lower.x = matrices [ global_lower_row ].x; row_ele_lower.y = matrices [ global_lower_row ].y; int steps = MATRIX_SIDE-1-col; float2 c,s; float2 tmp2,tmp3; //evaluate the sequence of Givens rotations for this column eg., lowest two rows after update : // [ a b c d e ] // [ ] // [ f g h i j ] // [ ] // [ k l m n o ] // [ ] // [c1 p + s11 u c1 q + s11 v c1 r + s11 w c1 s + s11 x c1 t + s11 y] // [ ] // [s12 p + c1 u s12 q + c1 v s12 r + c1 w s12 s + c1 x s12 t + c1 y] //each thread updates one matrix element, iterate up columns // cx == cos; x index is number rotation applied // sxy == sin; x index is number rotation applied // y index indicates first/second row in Givens rot (second == -complex conjugate of first) // main matrix update loops for (int i=0; i<steps; i++){ // load two rows __syncthreads(); row_ele_upper.x = matrices [ global_upper_row ].x; row_ele_upper.y = matrices [ global_upper_row ].y; if (vectorIndex == col){ lower[myMatrix] = row_ele_lower; upper[myMatrix] = row_ele_upper; } __syncthreads(); // calculate Givens elements float tmpA = cabs_sq_mmpb(upper[myMatrix]); float tmpB = cabs_sq_mmpb(lower[myMatrix]); float den = rsqrt(tmpA+tmpB); float2 tmpC; float2 sgn = {1.0,0.0}; if ((upper[myMatrix].x !=0) && (upper[myMatrix].y != 0)){ sgn.x = upper[myMatrix].x * rsqrt(tmpA); sgn.y = -upper[myMatrix].y * rsqrt(tmpA); } c.x = sqrt(tmpA) * den; c.y = 0.0; s.x = lower[myMatrix].x * den; s.y = lower[myMatrix].y * den; tmp2.x = (s.x*sgn.x - s.y*sgn.y); s.y *= sgn.x; s.y += s.x*sgn.y; s.y *= -1; s.x = tmp2.x; // [c1 p + s11 u c1 q + s11 v c1 r + s11 w c1 s + s11 x c1 t + s11 y] // [ ] // [s12 p + c1 u s12 q + c1 v s12 r + c1 w s12 s + c1 x s12 t + c1 y] //apply to elements and write out lower to global, it's done //-ve complex conj of sin s.x *= -1; tmp2.x = (s.x*row_ele_upper.x - s.y*row_ele_upper.y); tmp2.x += (c.x*row_ele_lower.x - c.y*row_ele_lower.y); tmp2.y = (s.y*row_ele_upper.x + s.x*row_ele_upper.y); tmp2.y += (c.y*row_ele_lower.x + c.x*row_ele_lower.y); matrices[ global_lower_row ] = tmp2; __syncthreads(); s.x *= -1; //update new lower element stored locally (the new upper row, but don't bother //with global write back tmp2.x = (c.x*row_ele_upper.x - row_ele_upper.y*c.y); tmp2.x += (row_ele_lower.x*s.x - row_ele_lower.y*s.y); tmp2.y = (row_ele_upper.y*c.x + row_ele_upper.x*c.y); tmp2.y += (row_ele_lower.y*s.x + row_ele_lower.x*s.y); row_ele_lower.x = tmp2.x; row_ele_lower.y = tmp2.y; //cache the calculated rotation if (vectorIndex == i){ g_cos [ rotation_ele_index + smemstride] = c; g_sin [ rotation_ele_index + smemstride] = s; } } i=steps-1; //write out final upper row matrices[ global_upper_row ] = tmp2; __syncthreads(); //build up contributions to G^T //these are orthogonal matrices ie., G^-1 == G^T //based on simple sequences in upper-hessenberg form eg., for a 5x5 // [c4 s41 c3 s41 s31 c2 s41 s31 s21 c1 s41 s31 s21 s11] // [ ] // [s42 c4 c3 c4 s31 c2 c4 s31 s21 c1 c4 s31 s21 s11 ] // [ ] // [ 0 s32 c3 c2 c3 s21 c1 c3 s21 s11 ] // [ ] // [ 0 0 s22 c2 c1 c2 s11 ] // [ ] // [ 0 0 0 s12 c1 ] // cx == cos; x index is number rotation applied // sxy == sin; x index is number rotation applied // y index indicates first/second row in Givens rot (second == -complex conjugate of first) //i==index into global memory (row) //j==index into shared memory //create and write lowest row //float2 tmp2,tmp3; float2 tmp = {0.0f,0.0f}; i = MATRIX_SIDE-1; //-ve sin on (diagonal-1) tmp.x = -(float) sub_diag_mask*g_sin[0+smemstride].x; tmp.y = (float) sub_diag_mask*g_sin[0+smemstride].y; //cos term on diagonal tmp.x += (float) for_diag_mask*g_cos[0 + smemstride].x; tmp.y += (float) for_diag_mask*g_cos[0 + smemstride].y; //write q_temp[ global_row ] = tmp; //seed second last row tmp.x = 0.0f; tmp.y = 0.0f; i = MATRIX_SIDE-2; // c0 term on diagonal tmp.x = (float) diag_mask*g_cos[0 + smemstride].x; tmp.y = (float) diag_mask*g_cos[0 + smemstride].y; // last row term, s0 tmp.x += (float) last_col_mask*g_sin[0 + smemstride].x; tmp.y += (float) last_col_mask*g_sin[0 + smemstride].y; if (steps>2){ //complete and write second last row //c1 terms, diagonal and forward tmp2.x = (float) for_diag_mask * g_cos[1 + smemstride].x; tmp2.y = (float) for_diag_mask * g_cos[1 + smemstride].y; //tmp.x = tmp.x*tmp2.x - tmp.y*tmp2.y; tmp3.x = tmp.x*tmp2.x - tmp.y*tmp2.y; tmp.y = tmp.y*tmp2.x + tmp.x*tmp2.y; tmp.x = tmp3.x; //s1 term, diagonal -1 tmp2.x = -(float) sub_diag_mask * g_sin[1 + smemstride].x; tmp2.y = (float) sub_diag_mask * g_sin[1 + smemstride].y; tmp.x += tmp2.x; tmp.y += tmp2.y; //write q_temp[ global_row ] = tmp; //a holder for building up products of sin terms float2 mult; mult.x = (float) last_two_col_mask * g_sin[1+smemstride].x; mult.x += (float) !(last_two_col_mask); mult.y = (float) last_two_col_mask * g_sin[1+smemstride].y; //complete and write third last row tmp.x = 0.0f; tmp.y = 0.0f; i = MATRIX_SIDE-3; // last column element s0 tmp.x = (float) last_col_mask * g_sin[0+smemstride].x; tmp.y = (float) last_col_mask * g_sin[0+smemstride].y; // prior row elements cn,...,c0 tmp.x += (float) ( for_diag_mask && !last_col_mask ) * g_cos [ cosIndex +smemstride].x; tmp.y += (float) ( for_diag_mask && !last_col_mask ) * g_cos [ cosIndex +smemstride].y; tmp3.x = tmp.x*mult.x-tmp.y*mult.y; tmp.y = tmp.y*mult.x+tmp.x*mult.y; tmp.x = tmp3.x; //cos terms, diagonal and forward tmp2.x = (float) for_diag_mask * g_cos[2 + smemstride].x; tmp2.y = (float) for_diag_mask * g_cos[2 + smemstride].y; //tmp.x = tmp2.x*tmp.x - tmp2.y*tmp.y; tmp3.x = tmp2.x*tmp.x - tmp2.y*tmp.y; tmp.y = tmp2.y*tmp.x + tmp2.x*tmp.y; tmp.x = tmp3.x; //-ve sin on diagonal-1 tmp.x -= (float) sub_diag_mask * g_sin[2+smemstride].x; tmp.y += (float) sub_diag_mask * g_sin[2+smemstride].y; //write q_temp[ global_row ] = tmp; //work up columns of matrix for (int j=3; j<=steps; j++){ tmp.x = 0.0f; tmp.y = 0.0f; i = MATRIX_SIDE-1-j; // last row element tmp.x = (float) last_col_mask * g_sin[0+smemstride].x; tmp.y = (float) last_col_mask * g_sin[0+smemstride].y; // prior row elements tmp.x += (float) ( for_diag_mask && !last_col_mask ) * g_cos [ cosIndex +smemstride ].x; tmp.y += (float) ( for_diag_mask && !last_col_mask ) * g_cos [ cosIndex +smemstride ].y; // multiply in the sin terms tmp2.x = (float) above_diag_mask * g_sin[j-1+smemstride].x; tmp2.x += (float) !(above_diag_mask); tmp2.y = (float) above_diag_mask * g_sin[j-1+smemstride].y; tmp3.x = mult.x*tmp2.x - mult.y*tmp2.y; mult.y = mult.y*tmp2.x + mult.x*tmp2.y; mult.x = tmp3.x; // check if nan first mult.x = (isnan(mult.x)) ? 0 : mult.x; mult.y = (isnan(mult.y)) ? 0 : mult.y; tmp3.x = tmp.x*mult.x - tmp.y*mult.y; tmp.y = tmp.y*mult.x + tmp.x*mult.y; tmp.x = tmp3.x; // final cos & sin terms tmp2.x = (float) for_diag_mask * g_cos[j + smemstride].x; tmp2.x += (float) !(for_diag_mask); tmp2.y = (float) for_diag_mask * g_cos[j + smemstride].y; tmp3.x = tmp.x*tmp2.x - tmp.y*tmp2.y; tmp.y = tmp.y*tmp2.x + tmp.x*tmp2.y; tmp.x = tmp3.x; tmp.x -= (float) sub_diag_mask * g_sin[j+smemstride].x; tmp.y += (float) sub_diag_mask * g_sin[j+smemstride].y; // write q_temp[ global_row ] = tmp; } if (steps < MATRIX_SIDE-1){ for (int i=0; i<MATRIX_SIDE-steps-1; i++){ tmp.x = (float) diag_mask; tmp.y = 0.0; q_temp[ global_row ] = tmp; } } } else{ q_temp[ global_row ] = tmp; for (int i=MATRIX_SIDE-3; i>=0; i--){ tmp.x = (float) diag_mask; tmp.y = 0.0; q_temp[ global_row ] = tmp; } } __syncthreads(); // now multiply in this contribution to q_complete // actual work we do in all iterations: // G_N^T ... G_0^T * A = R // // need to build up Q from G like this: // Q = G_0 * G_1 ... G_N // // so in these steps each thread loads a row value from Q // iterates along columns in the temporary q, in order to // create updated Q row values /* for (int i=0; i<MATRIX_SIDE; i++){ //matrix x conjugate-tranpose row_left[vectorIndex] = q_complete[global_row_i]; __syncthreads(); float2 partial_sum = {0.0,0.0}; float2 tmp,tmp1; for (int j=0; j<MATRIX_SIDE; j++){ //since we're loading conjugate transpose, want cols of q_temp tmp = q_temp[global_col_j]; tmp1.x = tmp.x*row_left[j].x + tmp.y*row_left[j].y; tmp.y = tmp.x*row_left[j].y - tmp.y*row_left[j].x; tmp.x = tmp1.x; partial_sum.x += tmp.x; partial_sum.y += tmp.y; } __syncthreads(); //write out q_complete[global_row_i] = partial_sum; } */ } extern "C"{ void givens_qr_mmpb_bat(float * mats, int size, float * q){ float2 *q_temp,*q_tempB,*q_complete,*matrices; // Array of pointers to matrix locations float2 **q_cblas_tempA,**q_cblas_tempB, **q_cblas_complete; //initialize q for (int k=0; k<size; k++) for (int i=0; i<MATRIX_SIDE; i++) for (int j=0; j<2*MATRIX_SIDE; j++) q[j+i*2*MATRIX_SIDE+k*MATRIX_SIDE*MATRIX_SIDE*2] = (2*i==j) ? 1.0 :0.0; int qsize = size*MATRIX_SIDE*MATRIX_SIDE; hipMemcpyToSymbol(cmem_size_mmpb,&size,sizeof(size)); hipMemcpyToSymbol(cmem_size_MM_mmpb,&qsize,sizeof(qsize)); // Allocate memory and copy data hipMalloc((void**) &q_complete, sizeof(float2)*size*MATRIX_SIDE*MATRIX_SIDE); hipMemcpy(q_complete, q, sizeof(float2)*MATRIX_SIDE*MATRIX_SIDE*size, hipMemcpyHostToDevice); hipMalloc((void**) &q_temp, sizeof(float2)*size*MATRIX_SIDE*MATRIX_SIDE); hipMalloc((void**) &q_tempB, sizeof(float2)*size*MATRIX_SIDE*MATRIX_SIDE); hipMalloc((void**) &matrices, sizeof(float2)*size*MATRIX_SIDE*MATRIX_SIDE); hipMemcpy(matrices, mats, sizeof(float2)*MATRIX_SIDE*MATRIX_SIDE*size, hipMemcpyHostToDevice); // Allocate the arrays of pointers hipMalloc((void**)&q_cblas_tempA, sizeof(float2*)*size); hipMalloc((void**)&q_cblas_tempB,sizeof(float2*)*size); hipMalloc((void**)&q_cblas_complete,sizeof(float2*)*size); // Create the array of pointers hipLaunchKernelGGL(( create_ptr_mmpb) , dim3((int)ceil((float)size/(float)NTH)) , dim3(NTH) , 0, 0, q_cblas_tempA, q_cblas_tempB, q_cblas_complete,q_temp,q_tempB,q_complete); hipblasStatus_t status; hipblasHandle_t handle; status = hipblasCreate(&handle); if(status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr,"ERROR: CUBLAS Initialization error\n"); exit(0); } hipComplex c_one = {1.0,0.0}; hipComplex c_zro = {0.0,0.0}; int nrows = MATRIX_SIDE; int ncol = MATRIX_SIDE; dim3 threads,blocks; threads.x = NTH; blocks.x = (int)ceil((float)size/(float)NMPBL); printf("Launch configuration = %d BLOCKS, %d THREADS\n",blocks.x,threads.x); double begin = omp_get_wtime(); int dim1d_copy2tmp = (int)ceil(sqrt((float)size*(float)MATRIX_SIDE*(float)MATRIX_SIDE/(float)NTH)); dim3 grid_copy2tmp; grid_copy2tmp.x = dim1d_copy2tmp; grid_copy2tmp.y = dim1d_copy2tmp; for (int i=0; i<MATRIX_SIDE-1; i++){ //int i=MATRIX_SIDE-2; hipLaunchKernelGGL(( qr_gpu_mmpb_bat), dim3(blocks),dim3(threads), 0, 0, i, matrices, q_temp, q_complete); // Copy content of q_complete to q_tempB for batch GEMM hipLaunchKernelGGL(( copy_to_temp_mmpb) , dim3(grid_copy2tmp), dim3(NTH) , 0, 0, q_tempB,q_complete); status = hipblasCgemmBatched( handle, HIPBLAS_OP_N,HIPBLAS_OP_N, MATRIX_SIDE,MATRIX_SIDE,MATRIX_SIDE, &c_one, (const hipComplex**)q_cblas_tempB,nrows, (const hipComplex**)q_cblas_tempA,ncol, &c_zro, (hipComplex**)q_cblas_complete,nrows, size); if(status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr,"ERROR: CUBLAS CGEMM BATCHED error\n"); exit(0); } }//end main loops #ifdef _QR_VERBOSE_ float2 * q_dump = (float2 *) malloc(sizeof(float2)*size*MATRIX_SIDE*MATRIX_SIDE); hipMemcpy(q_dump, q_complete, sizeof(float2)*MATRIX_SIDE*MATRIX_SIDE*size, hipMemcpyDeviceToHost); float2 * r_dump = (float2 *) malloc(sizeof(float2)*size*MATRIX_SIDE*MATRIX_SIDE); hipMemcpy(r_dump, matrices, sizeof(float2)*MATRIX_SIDE*MATRIX_SIDE*size, hipMemcpyDeviceToHost); // Take the transpose of q_complete is CUBLAS for (int k=0; k<size; k++){ printf("q %i\n",k); for (int j=0; j<MATRIX_SIDE; j++) { for (int i=0; i<MATRIX_SIDE; i++){ printf("%f+%fi, ",q_dump[j+MATRIX_SIDE*i+k*MATRIX_SIDE*MATRIX_SIDE].x, q_dump[j+MATRIX_SIDE*i+k*MATRIX_SIDE*MATRIX_SIDE].y); } printf(";\n"); } } for (int k=0; k<size; k++){ printf("r_mat %i\n",k); for (int i=0; i<MATRIX_SIDE; i++){ for (int j=0; j<MATRIX_SIDE; j++) printf("%f+%fi, ",r_dump[j+MATRIX_SIDE*i+k*MATRIX_SIDE*MATRIX_SIDE].x, r_dump[j+MATRIX_SIDE*i+k*MATRIX_SIDE*MATRIX_SIDE].y); printf(";\n",i); } } free(r_dump); free(q_dump); #endif hipDeviceSynchronize(); double end = omp_get_wtime(); printf("QR MMPB\t %f\n",end-begin); status = hipblasDestroy(handle); hipFree(q_temp); hipFree(q_tempB); hipFree(q_complete); hipFree(matrices); hipFree(q_cblas_tempA); hipFree(q_cblas_tempB); hipFree(q_cblas_complete); } }
7a6dd7c58f950604f6fe3328be5a36efdaf06e55.cu
/* * Copyright 2013 William J. Brouwer, Pierre-Yves Taunay * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuda.h> #include <cuda_runtime.h> #include <cuda_runtime_api.h> #include <cublas_v2.h> #include "main.h" #include "utilities.h" __device__ __constant__ int cmem_size_mmpb,cmem_size_MM_mmpb; __global__ void create_ptr_mmpb(float2 **q_A,float2 **q_B,float2 **q_C,float2 *q_temp,float2 *q_tempB,float2 *q_complete) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int mat = idx*MATRIX_SIDE*MATRIX_SIDE; __syncthreads(); if(idx < cmem_size_mmpb ) q_A[idx] = &q_temp[mat]; __syncthreads(); if(idx < cmem_size_mmpb ) q_B[idx] = &q_tempB[mat]; __syncthreads(); if(idx < cmem_size_mmpb ) q_C[idx] = &q_complete[mat]; } __global__ void copy_to_temp_mmpb(float2 *q_temp, float2 *q_complete) { int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y*blockDim.x*gridDim.x; __shared__ float2 buffer[NTH]; __syncthreads(); if(idx < cmem_size_MM_mmpb) buffer[threadIdx.x].x = q_complete[idx].x; __syncthreads(); if(idx < cmem_size_MM_mmpb) buffer[threadIdx.x].y = q_complete[idx].y; __syncthreads(); if(idx < cmem_size_MM_mmpb) q_temp[idx].x = buffer[threadIdx.x].x; __syncthreads(); if(idx < cmem_size_MM_mmpb) q_temp[idx].y = buffer[threadIdx.x].y; } __global__ void printf_ptr_mmpb(float2 **q_cblas,float2 *q) { float2 a = q[0]; float2 b = q[1]; for(int i = 0;i<cmem_size_mmpb;i++) { printf("Q-address: %p\t CBLAS: %p\n",q_cblas[i],&q[i*MATRIX_SIDE*MATRIX_SIDE]); } a.x += b.x; q[0] = a; } __device__ float cabs_sq_mmpb(float2 input){ return input.x * input.x + input.y * input.y; } /* Kernel Overview * * One block processes multiple complex matrix, performing one iteration of the qr * decomposition by givens rotations * * PYT 07/25 */ __global__ void qr_gpu_mmpb_bat(int col, float2 * matrices, float2 *q_temp, float2 *q_complete){ // values we use to calculate Givens __shared__ float2 lower[NMPBL]; __shared__ float2 upper[NMPBL]; // buffered c,s elements for constructing G^T contribution __shared__ float2 g_sin[ NMPBL*MATRIX_SIDE ]; __shared__ float2 g_cos[ NMPBL*MATRIX_SIDE ]; // buffer a row for multiplication __shared__ float2 row_left[NMPBL*MATRIX_SIDE]; // index to matrix for processing int myMatrix = threadIdx.x / MATRIX_SIDE; // index to vector for processing int vectorIndex = threadIdx.x % MATRIX_SIDE; // matrix offset for this block int memoryStride = ( blockIdx.x * NMPBL + myMatrix ) * MATRIX_SIDE * MATRIX_SIDE ; // an index into cos data int cosIndex = (vectorIndex < MATRIX_SIDE-1) ? MATRIX_SIDE-vectorIndex-2 : 0; // elements float2 row_ele_upper, row_ele_lower; // initialize cos &sin buffers g_sin[vectorIndex + smemstride].x = 0.0f; g_sin[vectorIndex + smemstride].y = 0.0f; g_cos[vectorIndex + smemstride].x = 1.0f; g_cos[vectorIndex + smemstride].y = 0.0f; int i=0; // init row_ele_lower.x = matrices [ global_lower_row ].x; row_ele_lower.y = matrices [ global_lower_row ].y; int steps = MATRIX_SIDE-1-col; float2 c,s; float2 tmp2,tmp3; //evaluate the sequence of Givens rotations for this column eg., lowest two rows after update : // [ a b c d e ] // [ ] // [ f g h i j ] // [ ] // [ k l m n o ] // [ ] // [c1 p + s11 u c1 q + s11 v c1 r + s11 w c1 s + s11 x c1 t + s11 y] // [ ] // [s12 p + c1 u s12 q + c1 v s12 r + c1 w s12 s + c1 x s12 t + c1 y] //each thread updates one matrix element, iterate up columns // cx == cos; x index is number rotation applied // sxy == sin; x index is number rotation applied // y index indicates first/second row in Givens rot (second == -complex conjugate of first) // main matrix update loops for (int i=0; i<steps; i++){ // load two rows __syncthreads(); row_ele_upper.x = matrices [ global_upper_row ].x; row_ele_upper.y = matrices [ global_upper_row ].y; if (vectorIndex == col){ lower[myMatrix] = row_ele_lower; upper[myMatrix] = row_ele_upper; } __syncthreads(); // calculate Givens elements float tmpA = cabs_sq_mmpb(upper[myMatrix]); float tmpB = cabs_sq_mmpb(lower[myMatrix]); float den = rsqrt(tmpA+tmpB); float2 tmpC; float2 sgn = {1.0,0.0}; if ((upper[myMatrix].x !=0) && (upper[myMatrix].y != 0)){ sgn.x = upper[myMatrix].x * rsqrt(tmpA); sgn.y = -upper[myMatrix].y * rsqrt(tmpA); } c.x = sqrt(tmpA) * den; c.y = 0.0; s.x = lower[myMatrix].x * den; s.y = lower[myMatrix].y * den; tmp2.x = (s.x*sgn.x - s.y*sgn.y); s.y *= sgn.x; s.y += s.x*sgn.y; s.y *= -1; s.x = tmp2.x; // [c1 p + s11 u c1 q + s11 v c1 r + s11 w c1 s + s11 x c1 t + s11 y] // [ ] // [s12 p + c1 u s12 q + c1 v s12 r + c1 w s12 s + c1 x s12 t + c1 y] //apply to elements and write out lower to global, it's done //-ve complex conj of sin s.x *= -1; tmp2.x = (s.x*row_ele_upper.x - s.y*row_ele_upper.y); tmp2.x += (c.x*row_ele_lower.x - c.y*row_ele_lower.y); tmp2.y = (s.y*row_ele_upper.x + s.x*row_ele_upper.y); tmp2.y += (c.y*row_ele_lower.x + c.x*row_ele_lower.y); matrices[ global_lower_row ] = tmp2; __syncthreads(); s.x *= -1; //update new lower element stored locally (the new upper row, but don't bother //with global write back tmp2.x = (c.x*row_ele_upper.x - row_ele_upper.y*c.y); tmp2.x += (row_ele_lower.x*s.x - row_ele_lower.y*s.y); tmp2.y = (row_ele_upper.y*c.x + row_ele_upper.x*c.y); tmp2.y += (row_ele_lower.y*s.x + row_ele_lower.x*s.y); row_ele_lower.x = tmp2.x; row_ele_lower.y = tmp2.y; //cache the calculated rotation if (vectorIndex == i){ g_cos [ rotation_ele_index + smemstride] = c; g_sin [ rotation_ele_index + smemstride] = s; } } i=steps-1; //write out final upper row matrices[ global_upper_row ] = tmp2; __syncthreads(); //build up contributions to G^T //these are orthogonal matrices ie., G^-1 == G^T //based on simple sequences in upper-hessenberg form eg., for a 5x5 // [c4 s41 c3 s41 s31 c2 s41 s31 s21 c1 s41 s31 s21 s11] // [ ] // [s42 c4 c3 c4 s31 c2 c4 s31 s21 c1 c4 s31 s21 s11 ] // [ ] // [ 0 s32 c3 c2 c3 s21 c1 c3 s21 s11 ] // [ ] // [ 0 0 s22 c2 c1 c2 s11 ] // [ ] // [ 0 0 0 s12 c1 ] // cx == cos; x index is number rotation applied // sxy == sin; x index is number rotation applied // y index indicates first/second row in Givens rot (second == -complex conjugate of first) //i==index into global memory (row) //j==index into shared memory //create and write lowest row //float2 tmp2,tmp3; float2 tmp = {0.0f,0.0f}; i = MATRIX_SIDE-1; //-ve sin on (diagonal-1) tmp.x = -(float) sub_diag_mask*g_sin[0+smemstride].x; tmp.y = (float) sub_diag_mask*g_sin[0+smemstride].y; //cos term on diagonal tmp.x += (float) for_diag_mask*g_cos[0 + smemstride].x; tmp.y += (float) for_diag_mask*g_cos[0 + smemstride].y; //write q_temp[ global_row ] = tmp; //seed second last row tmp.x = 0.0f; tmp.y = 0.0f; i = MATRIX_SIDE-2; // c0 term on diagonal tmp.x = (float) diag_mask*g_cos[0 + smemstride].x; tmp.y = (float) diag_mask*g_cos[0 + smemstride].y; // last row term, s0 tmp.x += (float) last_col_mask*g_sin[0 + smemstride].x; tmp.y += (float) last_col_mask*g_sin[0 + smemstride].y; if (steps>2){ //complete and write second last row //c1 terms, diagonal and forward tmp2.x = (float) for_diag_mask * g_cos[1 + smemstride].x; tmp2.y = (float) for_diag_mask * g_cos[1 + smemstride].y; //tmp.x = tmp.x*tmp2.x - tmp.y*tmp2.y; tmp3.x = tmp.x*tmp2.x - tmp.y*tmp2.y; tmp.y = tmp.y*tmp2.x + tmp.x*tmp2.y; tmp.x = tmp3.x; //s1 term, diagonal -1 tmp2.x = -(float) sub_diag_mask * g_sin[1 + smemstride].x; tmp2.y = (float) sub_diag_mask * g_sin[1 + smemstride].y; tmp.x += tmp2.x; tmp.y += tmp2.y; //write q_temp[ global_row ] = tmp; //a holder for building up products of sin terms float2 mult; mult.x = (float) last_two_col_mask * g_sin[1+smemstride].x; mult.x += (float) !(last_two_col_mask); mult.y = (float) last_two_col_mask * g_sin[1+smemstride].y; //complete and write third last row tmp.x = 0.0f; tmp.y = 0.0f; i = MATRIX_SIDE-3; // last column element s0 tmp.x = (float) last_col_mask * g_sin[0+smemstride].x; tmp.y = (float) last_col_mask * g_sin[0+smemstride].y; // prior row elements cn,...,c0 tmp.x += (float) ( for_diag_mask && !last_col_mask ) * g_cos [ cosIndex +smemstride].x; tmp.y += (float) ( for_diag_mask && !last_col_mask ) * g_cos [ cosIndex +smemstride].y; tmp3.x = tmp.x*mult.x-tmp.y*mult.y; tmp.y = tmp.y*mult.x+tmp.x*mult.y; tmp.x = tmp3.x; //cos terms, diagonal and forward tmp2.x = (float) for_diag_mask * g_cos[2 + smemstride].x; tmp2.y = (float) for_diag_mask * g_cos[2 + smemstride].y; //tmp.x = tmp2.x*tmp.x - tmp2.y*tmp.y; tmp3.x = tmp2.x*tmp.x - tmp2.y*tmp.y; tmp.y = tmp2.y*tmp.x + tmp2.x*tmp.y; tmp.x = tmp3.x; //-ve sin on diagonal-1 tmp.x -= (float) sub_diag_mask * g_sin[2+smemstride].x; tmp.y += (float) sub_diag_mask * g_sin[2+smemstride].y; //write q_temp[ global_row ] = tmp; //work up columns of matrix for (int j=3; j<=steps; j++){ tmp.x = 0.0f; tmp.y = 0.0f; i = MATRIX_SIDE-1-j; // last row element tmp.x = (float) last_col_mask * g_sin[0+smemstride].x; tmp.y = (float) last_col_mask * g_sin[0+smemstride].y; // prior row elements tmp.x += (float) ( for_diag_mask && !last_col_mask ) * g_cos [ cosIndex +smemstride ].x; tmp.y += (float) ( for_diag_mask && !last_col_mask ) * g_cos [ cosIndex +smemstride ].y; // multiply in the sin terms tmp2.x = (float) above_diag_mask * g_sin[j-1+smemstride].x; tmp2.x += (float) !(above_diag_mask); tmp2.y = (float) above_diag_mask * g_sin[j-1+smemstride].y; tmp3.x = mult.x*tmp2.x - mult.y*tmp2.y; mult.y = mult.y*tmp2.x + mult.x*tmp2.y; mult.x = tmp3.x; // check if nan first mult.x = (isnan(mult.x)) ? 0 : mult.x; mult.y = (isnan(mult.y)) ? 0 : mult.y; tmp3.x = tmp.x*mult.x - tmp.y*mult.y; tmp.y = tmp.y*mult.x + tmp.x*mult.y; tmp.x = tmp3.x; // final cos & sin terms tmp2.x = (float) for_diag_mask * g_cos[j + smemstride].x; tmp2.x += (float) !(for_diag_mask); tmp2.y = (float) for_diag_mask * g_cos[j + smemstride].y; tmp3.x = tmp.x*tmp2.x - tmp.y*tmp2.y; tmp.y = tmp.y*tmp2.x + tmp.x*tmp2.y; tmp.x = tmp3.x; tmp.x -= (float) sub_diag_mask * g_sin[j+smemstride].x; tmp.y += (float) sub_diag_mask * g_sin[j+smemstride].y; // write q_temp[ global_row ] = tmp; } if (steps < MATRIX_SIDE-1){ for (int i=0; i<MATRIX_SIDE-steps-1; i++){ tmp.x = (float) diag_mask; tmp.y = 0.0; q_temp[ global_row ] = tmp; } } } else{ q_temp[ global_row ] = tmp; for (int i=MATRIX_SIDE-3; i>=0; i--){ tmp.x = (float) diag_mask; tmp.y = 0.0; q_temp[ global_row ] = tmp; } } __syncthreads(); // now multiply in this contribution to q_complete // actual work we do in all iterations: // G_N^T ... G_0^T * A = R // // need to build up Q from G like this: // Q = G_0 * G_1 ... G_N // // so in these steps each thread loads a row value from Q // iterates along columns in the temporary q, in order to // create updated Q row values /* for (int i=0; i<MATRIX_SIDE; i++){ //matrix x conjugate-tranpose row_left[vectorIndex] = q_complete[global_row_i]; __syncthreads(); float2 partial_sum = {0.0,0.0}; float2 tmp,tmp1; for (int j=0; j<MATRIX_SIDE; j++){ //since we're loading conjugate transpose, want cols of q_temp tmp = q_temp[global_col_j]; tmp1.x = tmp.x*row_left[j].x + tmp.y*row_left[j].y; tmp.y = tmp.x*row_left[j].y - tmp.y*row_left[j].x; tmp.x = tmp1.x; partial_sum.x += tmp.x; partial_sum.y += tmp.y; } __syncthreads(); //write out q_complete[global_row_i] = partial_sum; } */ } extern "C"{ void givens_qr_mmpb_bat(float * mats, int size, float * q){ float2 *q_temp,*q_tempB,*q_complete,*matrices; // Array of pointers to matrix locations float2 **q_cblas_tempA,**q_cblas_tempB, **q_cblas_complete; //initialize q for (int k=0; k<size; k++) for (int i=0; i<MATRIX_SIDE; i++) for (int j=0; j<2*MATRIX_SIDE; j++) q[j+i*2*MATRIX_SIDE+k*MATRIX_SIDE*MATRIX_SIDE*2] = (2*i==j) ? 1.0 :0.0; int qsize = size*MATRIX_SIDE*MATRIX_SIDE; cudaMemcpyToSymbol(cmem_size_mmpb,&size,sizeof(size)); cudaMemcpyToSymbol(cmem_size_MM_mmpb,&qsize,sizeof(qsize)); // Allocate memory and copy data cudaMalloc((void**) &q_complete, sizeof(float2)*size*MATRIX_SIDE*MATRIX_SIDE); cudaMemcpy(q_complete, q, sizeof(float2)*MATRIX_SIDE*MATRIX_SIDE*size, cudaMemcpyHostToDevice); cudaMalloc((void**) &q_temp, sizeof(float2)*size*MATRIX_SIDE*MATRIX_SIDE); cudaMalloc((void**) &q_tempB, sizeof(float2)*size*MATRIX_SIDE*MATRIX_SIDE); cudaMalloc((void**) &matrices, sizeof(float2)*size*MATRIX_SIDE*MATRIX_SIDE); cudaMemcpy(matrices, mats, sizeof(float2)*MATRIX_SIDE*MATRIX_SIDE*size, cudaMemcpyHostToDevice); // Allocate the arrays of pointers cudaMalloc((void**)&q_cblas_tempA, sizeof(float2*)*size); cudaMalloc((void**)&q_cblas_tempB,sizeof(float2*)*size); cudaMalloc((void**)&q_cblas_complete,sizeof(float2*)*size); // Create the array of pointers create_ptr_mmpb <<< (int)ceil((float)size/(float)NTH) , NTH >>> ( q_cblas_tempA, q_cblas_tempB, q_cblas_complete,q_temp,q_tempB,q_complete); cublasStatus_t status; cublasHandle_t handle; status = cublasCreate(&handle); if(status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr,"ERROR: CUBLAS Initialization error\n"); exit(0); } cuComplex c_one = {1.0,0.0}; cuComplex c_zro = {0.0,0.0}; int nrows = MATRIX_SIDE; int ncol = MATRIX_SIDE; dim3 threads,blocks; threads.x = NTH; blocks.x = (int)ceil((float)size/(float)NMPBL); printf("Launch configuration = %d BLOCKS, %d THREADS\n",blocks.x,threads.x); double begin = omp_get_wtime(); int dim1d_copy2tmp = (int)ceil(sqrt((float)size*(float)MATRIX_SIDE*(float)MATRIX_SIDE/(float)NTH)); dim3 grid_copy2tmp; grid_copy2tmp.x = dim1d_copy2tmp; grid_copy2tmp.y = dim1d_copy2tmp; for (int i=0; i<MATRIX_SIDE-1; i++){ //int i=MATRIX_SIDE-2; qr_gpu_mmpb_bat<<<blocks,threads>>>(i, matrices, q_temp, q_complete); // Copy content of q_complete to q_tempB for batch GEMM copy_to_temp_mmpb <<< grid_copy2tmp, NTH >>> (q_tempB,q_complete); status = cublasCgemmBatched( handle, CUBLAS_OP_N,CUBLAS_OP_N, MATRIX_SIDE,MATRIX_SIDE,MATRIX_SIDE, &c_one, (const cuComplex**)q_cblas_tempB,nrows, (const cuComplex**)q_cblas_tempA,ncol, &c_zro, (cuComplex**)q_cblas_complete,nrows, size); if(status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr,"ERROR: CUBLAS CGEMM BATCHED error\n"); exit(0); } }//end main loops #ifdef _QR_VERBOSE_ float2 * q_dump = (float2 *) malloc(sizeof(float2)*size*MATRIX_SIDE*MATRIX_SIDE); cudaMemcpy(q_dump, q_complete, sizeof(float2)*MATRIX_SIDE*MATRIX_SIDE*size, cudaMemcpyDeviceToHost); float2 * r_dump = (float2 *) malloc(sizeof(float2)*size*MATRIX_SIDE*MATRIX_SIDE); cudaMemcpy(r_dump, matrices, sizeof(float2)*MATRIX_SIDE*MATRIX_SIDE*size, cudaMemcpyDeviceToHost); // Take the transpose of q_complete is CUBLAS for (int k=0; k<size; k++){ printf("q %i\n",k); for (int j=0; j<MATRIX_SIDE; j++) { for (int i=0; i<MATRIX_SIDE; i++){ printf("%f+%fi, ",q_dump[j+MATRIX_SIDE*i+k*MATRIX_SIDE*MATRIX_SIDE].x, q_dump[j+MATRIX_SIDE*i+k*MATRIX_SIDE*MATRIX_SIDE].y); } printf(";\n"); } } for (int k=0; k<size; k++){ printf("r_mat %i\n",k); for (int i=0; i<MATRIX_SIDE; i++){ for (int j=0; j<MATRIX_SIDE; j++) printf("%f+%fi, ",r_dump[j+MATRIX_SIDE*i+k*MATRIX_SIDE*MATRIX_SIDE].x, r_dump[j+MATRIX_SIDE*i+k*MATRIX_SIDE*MATRIX_SIDE].y); printf(";\n",i); } } free(r_dump); free(q_dump); #endif cudaDeviceSynchronize(); double end = omp_get_wtime(); printf("QR MMPB\t %f\n",end-begin); status = cublasDestroy(handle); cudaFree(q_temp); cudaFree(q_tempB); cudaFree(q_complete); cudaFree(matrices); cudaFree(q_cblas_tempA); cudaFree(q_cblas_tempB); cudaFree(q_cblas_complete); } }
e353091a58735ab92440b92e7d4eb537453142d8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <hip/hip_runtime.h> #include <cstdlib> #include <time.h> #include <cctype> #include <cassert> #include <cstdio> #include <ctime> #define DATA_SIZE 1048576 #define BLOCK_NUM 32 #define THREAD_NUM 256 #ifndef nullptr #define nullptr 0 #endif using namespace std; ///////////////////////////////////////////////////// __global__ static void Kernel_SquareSum( int* pIn, size_t* pDataSize, int*pOut, clock_t* pTime ) { // extern __shared__ int sharedData[]; const size_t computeSize =*pDataSize / THREAD_NUM; const size_t tID = size_t(threadIdx.x );// const size_t bID = size_t(blockIdx.x );// // if ( tID == 0 ) pTime[bID] =clock( );// // for ( size_t i = bID * THREAD_NUM+ tID; i < DATA_SIZE; i += BLOCK_NUM * THREAD_NUM ) { sharedData[tID] += pIn[i] * pIn[i]; } // __syncthreads( ); if ( tID == 0 )// { pOut[bID] = 0;// for ( size_t i = 1; i <THREAD_NUM; i++ ) { pOut[bID] += sharedData[i]; } } if ( tID == 0 ) pTime[bID +BLOCK_NUM] = clock( );// } bool CUDA_SquareSum( int* pOut,clock_t* pTime, int* pIn, size_t dataSize ) { assert( pIn != nullptr ); assert( pOut != nullptr ); int* pDevIn = nullptr; int* pDevOut = nullptr; size_t* pDevDataSize = nullptr; clock_t* pDevTime = nullptr; // 1 hipError_t cudaStatus = hipSetDevice( 0 );// if ( cudaStatus != hipSuccess ) { fprintf( stderr, "cudaSetDevice()" ); return false; } switch ( true) { default: // 2 cudaStatus = hipMalloc( (void**)&pDevIn,dataSize * sizeof( int) ); if ( cudaStatus != hipSuccess) { fprintf( stderr, "cudaMalloc()" ); break; } cudaStatus = hipMalloc( (void**)&pDevOut,BLOCK_NUM * sizeof( int) ); if ( cudaStatus != hipSuccess) { fprintf( stderr, "cudaMalloc()" ); break; } cudaStatus = hipMalloc( (void**)&pDevDataSize,sizeof( size_t ) ); if ( cudaStatus != hipSuccess) { fprintf( stderr, "cudaMalloc()" ); break; } cudaStatus = hipMalloc( (void**)&pDevTime,BLOCK_NUM * 2 * sizeof( clock_t ) ); if ( cudaStatus != hipSuccess) { fprintf( stderr, "cudaMalloc()" ); break; } // 3 cudaStatus = hipMemcpy( pDevIn, pIn, dataSize * sizeof( int ),hipMemcpyHostToDevice ); if ( cudaStatus != hipSuccess) { fprintf( stderr, "cudaMemcpy()" ); break; } cudaStatus = hipMemcpy( pDevDataSize, &dataSize, sizeof( size_t ), hipMemcpyHostToDevice ); if ( cudaStatus != hipSuccess) { fprintf( stderr, "cudaMemcpy()" ); break; } // 4 hipLaunchKernelGGL(( Kernel_SquareSum), dim3(BLOCK_NUM),dim3(THREAD_NUM), THREAD_NUM * sizeof( int ), 0, pDevIn, pDevDataSize, pDevOut, pDevTime ); // 5 cudaStatus = hipGetLastError( ); if ( cudaStatus != hipSuccess) { fprintf( stderr, "" ); break; } // 6 cudaStatus = hipDeviceSynchronize( ); if ( cudaStatus != hipSuccess) { fprintf( stderr, "" ); break; } // 7 cudaStatus = hipMemcpy( pOut, pDevOut, BLOCK_NUM * sizeof( int ), hipMemcpyDeviceToHost); if ( cudaStatus != hipSuccess) { fprintf( stderr, "" ); break; } cudaStatus = hipMemcpy( pTime, pDevTime, BLOCK_NUM * 2 * sizeof( clock_t ), hipMemcpyDeviceToHost ); if ( cudaStatus != hipSuccess) { fprintf( stderr, "" ); break; } hipFree( pDevIn ); hipFree( pDevOut ); hipFree( pDevDataSize ); hipFree( pDevTime ); return true; } hipFree( pDevIn ); hipFree( pDevOut ); hipFree( pDevDataSize ); hipFree( pDevTime ); return false; } void GenerateData( int* pData,size_t dataSize )// { assert( pData != nullptr ); for ( size_t i = 0; i <dataSize; i++ ) { srand( i + 3 ); pData[i] = rand( ) % 100; } } int main( int argc, char** argv )// { int* pData = nullptr; int* pResult = nullptr; clock_t* pTime = nullptr; // CUDAhost hipError_t cudaStatus = hipHostMalloc( &pData, DATA_SIZE * sizeof( int ) ); if ( cudaStatus != hipSuccess ) { fprintf( stderr, "" ); return 1; } cudaStatus = hipHostMalloc( &pResult, BLOCK_NUM * sizeof( int ) ); if ( cudaStatus != hipSuccess ) { fprintf( stderr, "" ); return 1; } cudaStatus = hipHostMalloc( &pTime, BLOCK_NUM * 2 * sizeof( clock_t ) ); if ( cudaStatus != hipSuccess ) { fprintf( stderr, "" ); return 1; } GenerateData( pData, DATA_SIZE );// CUDA_SquareSum( pResult, pTime, pData, DATA_SIZE );// // CPU int totalResult=0; for ( int i = 0; i < BLOCK_NUM; ++i ) { totalResult += pResult[i]; } // clock_t startTime = pTime[0]; clock_t endTime = pTime[BLOCK_NUM]; for ( int i = 0; i < BLOCK_NUM; ++i ) { if ( startTime > pTime[i] )startTime = pTime[i]; if ( endTime < pTime[i +BLOCK_NUM] ) endTime = pTime[i + BLOCK_NUM]; } clock_t elapsed = endTime - startTime; // char* pOverFlow = nullptr; if ( totalResult < 0 )pOverFlow = ""; else pOverFlow = ""; // printf( "CUDA%d%s\n%d\n", totalResult, pOverFlow, elapsed ); hipDeviceProp_t prop; if ( hipGetDeviceProperties(&prop, 0 ) == hipSuccess ) { float actualTime = float( elapsed ) / float(prop.clockRate ); printf( "%.2fms\n", actualTime ); printf( "%.2fMB/s\n", float( DATA_SIZE * sizeof( int )>> 20 ) * 1000.0f / actualTime ); printf( "GPU%s\n", prop.name ); } hipHostFree( pData ); hipHostFree( pResult ); hipHostFree( pTime ); return 0; }
e353091a58735ab92440b92e7d4eb537453142d8.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <cuda_runtime.h> #include <cstdlib> #include <time.h> #include <cctype> #include <cassert> #include <cstdio> #include <ctime> #define DATA_SIZE 1048576 #define BLOCK_NUM 32 #define THREAD_NUM 256 #ifndef nullptr #define nullptr 0 #endif using namespace std; ////////////////////////在设备上运行的内核函数///////////////////////////// __global__ static void Kernel_SquareSum( int* pIn, size_t* pDataSize, int*pOut, clock_t* pTime ) { // 声明一个动态分配的共享存储器 extern __shared__ int sharedData[]; const size_t computeSize =*pDataSize / THREAD_NUM; const size_t tID = size_t(threadIdx.x );// 线程 const size_t bID = size_t(blockIdx.x );// 块 // 开始计时 if ( tID == 0 ) pTime[bID] =clock( );// 选择任意一个线程进行计时 // 执行计算 for ( size_t i = bID * THREAD_NUM+ tID; i < DATA_SIZE; i += BLOCK_NUM * THREAD_NUM ) { sharedData[tID] += pIn[i] * pIn[i]; } // 同步一个块中的其它线程 __syncthreads( ); if ( tID == 0 )// 由号线程完成数据的累加 { pOut[bID] = 0;// 先初始化为 for ( size_t i = 1; i <THREAD_NUM; i++ ) { pOut[bID] += sharedData[i]; } } if ( tID == 0 ) pTime[bID +BLOCK_NUM] = clock( );// 将结束时间放至后半部分 } bool CUDA_SquareSum( int* pOut,clock_t* pTime, int* pIn, size_t dataSize ) { assert( pIn != nullptr ); assert( pOut != nullptr ); int* pDevIn = nullptr; int* pDevOut = nullptr; size_t* pDevDataSize = nullptr; clock_t* pDevTime = nullptr; // 1、设置设备 cudaError_t cudaStatus = cudaSetDevice( 0 );// 只要机器安装了英伟达显卡,那么会调用成功 if ( cudaStatus != cudaSuccess ) { fprintf( stderr, "调用cudaSetDevice()函数失败!" ); return false; } switch ( true) { default: // 2、分配显存空间 cudaStatus = cudaMalloc( (void**)&pDevIn,dataSize * sizeof( int) ); if ( cudaStatus != cudaSuccess) { fprintf( stderr, "调用cudaMalloc()函数初始化显卡中数组时失败!" ); break; } cudaStatus = cudaMalloc( (void**)&pDevOut,BLOCK_NUM * sizeof( int) ); if ( cudaStatus != cudaSuccess) { fprintf( stderr, "调用cudaMalloc()函数初始化显卡中返回值时失败!" ); break; } cudaStatus = cudaMalloc( (void**)&pDevDataSize,sizeof( size_t ) ); if ( cudaStatus != cudaSuccess) { fprintf( stderr, "调用cudaMalloc()函数初始化显卡中数据大小时失败!" ); break; } cudaStatus = cudaMalloc( (void**)&pDevTime,BLOCK_NUM * 2 * sizeof( clock_t ) ); if ( cudaStatus != cudaSuccess) { fprintf( stderr, "调用cudaMalloc()函数初始化显卡中耗费用时变量失败!" ); break; } // 3、将宿主程序数据复制到显存中 cudaStatus = cudaMemcpy( pDevIn, pIn, dataSize * sizeof( int ),cudaMemcpyHostToDevice ); if ( cudaStatus != cudaSuccess) { fprintf( stderr, "调用cudaMemcpy()函数初始化宿主程序数据数组到显卡时失败!" ); break; } cudaStatus = cudaMemcpy( pDevDataSize, &dataSize, sizeof( size_t ), cudaMemcpyHostToDevice ); if ( cudaStatus != cudaSuccess) { fprintf( stderr, "调用cudaMemcpy()函数初始化宿主程序数据大小到显卡时失败!" ); break; } // 4、执行程序,宿主程序等待显卡执行完毕 Kernel_SquareSum<<<BLOCK_NUM,THREAD_NUM, THREAD_NUM * sizeof( int )>>> ( pDevIn, pDevDataSize, pDevOut, pDevTime ); // 5、查询内核初始化的时候是否出错 cudaStatus = cudaGetLastError( ); if ( cudaStatus != cudaSuccess) { fprintf( stderr, "显卡执行程序时失败!" ); break; } // 6、与内核同步等待执行完毕 cudaStatus = cudaDeviceSynchronize( ); if ( cudaStatus != cudaSuccess) { fprintf( stderr, "在与内核同步的过程中发生问题!" ); break; } // 7、获取数据 cudaStatus = cudaMemcpy( pOut, pDevOut, BLOCK_NUM * sizeof( int ), cudaMemcpyDeviceToHost); if ( cudaStatus != cudaSuccess) { fprintf( stderr, "在将结果数据从显卡复制到宿主程序中失败!" ); break; } cudaStatus = cudaMemcpy( pTime, pDevTime, BLOCK_NUM * 2 * sizeof( clock_t ), cudaMemcpyDeviceToHost ); if ( cudaStatus != cudaSuccess) { fprintf( stderr, "在将耗费用时数据从显卡复制到宿主程序中失败!" ); break; } cudaFree( pDevIn ); cudaFree( pDevOut ); cudaFree( pDevDataSize ); cudaFree( pDevTime ); return true; } cudaFree( pDevIn ); cudaFree( pDevOut ); cudaFree( pDevDataSize ); cudaFree( pDevTime ); return false; } void GenerateData( int* pData,size_t dataSize )// 产生数据 { assert( pData != nullptr ); for ( size_t i = 0; i <dataSize; i++ ) { srand( i + 3 ); pData[i] = rand( ) % 100; } } int main( int argc, char** argv )// 函数的主入口 { int* pData = nullptr; int* pResult = nullptr; clock_t* pTime = nullptr; // 使用CUDA内存分配器分配host端 cudaError_t cudaStatus = cudaMallocHost( &pData, DATA_SIZE * sizeof( int ) ); if ( cudaStatus != cudaSuccess ) { fprintf( stderr, "在主机中分配资源失败!" ); return 1; } cudaStatus = cudaMallocHost( &pResult, BLOCK_NUM * sizeof( int ) ); if ( cudaStatus != cudaSuccess ) { fprintf( stderr, "在主机中分配资源失败!" ); return 1; } cudaStatus = cudaMallocHost( &pTime, BLOCK_NUM * 2 * sizeof( clock_t ) ); if ( cudaStatus != cudaSuccess ) { fprintf( stderr, "在主机中分配资源失败!" ); return 1; } GenerateData( pData, DATA_SIZE );// 通过随机数产生数据 CUDA_SquareSum( pResult, pTime, pData, DATA_SIZE );// 执行平方和 // 在CPU中将结果组合起来 int totalResult=0; for ( int i = 0; i < BLOCK_NUM; ++i ) { totalResult += pResult[i]; } // 计算执行的时间 clock_t startTime = pTime[0]; clock_t endTime = pTime[BLOCK_NUM]; for ( int i = 0; i < BLOCK_NUM; ++i ) { if ( startTime > pTime[i] )startTime = pTime[i]; if ( endTime < pTime[i +BLOCK_NUM] ) endTime = pTime[i + BLOCK_NUM]; } clock_t elapsed = endTime - startTime; // 判断是否溢出 char* pOverFlow = nullptr; if ( totalResult < 0 )pOverFlow = "(溢出)"; else pOverFlow = ""; // 显示基准测试 printf( "用CUDA计算平方和的结果是:%d%s\n耗费用时:%d\n", totalResult, pOverFlow, elapsed ); cudaDeviceProp prop; if ( cudaGetDeviceProperties(&prop, 0 ) == cudaSuccess ) { float actualTime = float( elapsed ) / float(prop.clockRate ); printf( "实际执行时间为:%.2fms\n", actualTime ); printf( "带宽为:%.2fMB/s\n", float( DATA_SIZE * sizeof( int )>> 20 ) * 1000.0f / actualTime ); printf( "GPU设备型号:%s\n", prop.name ); } cudaFreeHost( pData ); cudaFreeHost( pResult ); cudaFreeHost( pTime ); return 0; }
4d4beba2dad1e68ff544bf6709f2d6faff096d99.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //===------------ omp_data.cu - NVPTX OpenMP GPU objects --------- CUDA -*-===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.txt for details. // //===----------------------------------------------------------------------===// // // This file contains the data objects used on the GPU device. // //===----------------------------------------------------------------------===// #include "omptarget-nvptx.h" //////////////////////////////////////////////////////////////////////////////// // global device envrionment //////////////////////////////////////////////////////////////////////////////// __device__ omptarget_device_environmentTy omptarget_device_environment; //////////////////////////////////////////////////////////////////////////////// // global data holding OpenMP state information //////////////////////////////////////////////////////////////////////////////// __device__ omptarget_nvptx_Queue<omptarget_nvptx_ThreadPrivateContext, OMP_STATE_COUNT> omptarget_nvptx_device_State[MAX_SM]; __device__ omptarget_nvptx_Queue<omptarget_nvptx_SimpleThreadPrivateContext, OMP_STATE_COUNT> omptarget_nvptx_device_simpleState[MAX_SM]; // Pointer to this team's OpenMP state object __device__ __shared__ omptarget_nvptx_ThreadPrivateContext *omptarget_nvptx_threadPrivateContext; __device__ __shared__ omptarget_nvptx_SimpleThreadPrivateContext *omptarget_nvptx_simpleThreadPrivateContext; __device__ __shared__ void *omptarget_nvptx_simpleGlobalData; //////////////////////////////////////////////////////////////////////////////// // The team master sets the outlined parallel function in this variable to // communicate with the workers. Since it is in shared memory, there is one // copy of these variables for each kernel, instance, and team. //////////////////////////////////////////////////////////////////////////////// volatile __device__ __shared__ omptarget_nvptx_WorkFn omptarget_nvptx_workFn; //////////////////////////////////////////////////////////////////////////////// // OpenMP kernel execution parameters //////////////////////////////////////////////////////////////////////////////// __device__ __shared__ uint32_t execution_param; //////////////////////////////////////////////////////////////////////////////// // Data sharing state //////////////////////////////////////////////////////////////////////////////// __device__ __shared__ DataSharingStateTy DataSharingState; //////////////////////////////////////////////////////////////////////////////// // Scratchpad for teams reduction. //////////////////////////////////////////////////////////////////////////////// __device__ __shared__ void *ReductionScratchpadPtr; //////////////////////////////////////////////////////////////////////////////// // Data sharing related variables. //////////////////////////////////////////////////////////////////////////////// __device__ __shared__ omptarget_nvptx_SharedArgs omptarget_nvptx_globalArgs;
4d4beba2dad1e68ff544bf6709f2d6faff096d99.cu
//===------------ omp_data.cu - NVPTX OpenMP GPU objects --------- CUDA -*-===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.txt for details. // //===----------------------------------------------------------------------===// // // This file contains the data objects used on the GPU device. // //===----------------------------------------------------------------------===// #include "omptarget-nvptx.h" //////////////////////////////////////////////////////////////////////////////// // global device envrionment //////////////////////////////////////////////////////////////////////////////// __device__ omptarget_device_environmentTy omptarget_device_environment; //////////////////////////////////////////////////////////////////////////////// // global data holding OpenMP state information //////////////////////////////////////////////////////////////////////////////// __device__ omptarget_nvptx_Queue<omptarget_nvptx_ThreadPrivateContext, OMP_STATE_COUNT> omptarget_nvptx_device_State[MAX_SM]; __device__ omptarget_nvptx_Queue<omptarget_nvptx_SimpleThreadPrivateContext, OMP_STATE_COUNT> omptarget_nvptx_device_simpleState[MAX_SM]; // Pointer to this team's OpenMP state object __device__ __shared__ omptarget_nvptx_ThreadPrivateContext *omptarget_nvptx_threadPrivateContext; __device__ __shared__ omptarget_nvptx_SimpleThreadPrivateContext *omptarget_nvptx_simpleThreadPrivateContext; __device__ __shared__ void *omptarget_nvptx_simpleGlobalData; //////////////////////////////////////////////////////////////////////////////// // The team master sets the outlined parallel function in this variable to // communicate with the workers. Since it is in shared memory, there is one // copy of these variables for each kernel, instance, and team. //////////////////////////////////////////////////////////////////////////////// volatile __device__ __shared__ omptarget_nvptx_WorkFn omptarget_nvptx_workFn; //////////////////////////////////////////////////////////////////////////////// // OpenMP kernel execution parameters //////////////////////////////////////////////////////////////////////////////// __device__ __shared__ uint32_t execution_param; //////////////////////////////////////////////////////////////////////////////// // Data sharing state //////////////////////////////////////////////////////////////////////////////// __device__ __shared__ DataSharingStateTy DataSharingState; //////////////////////////////////////////////////////////////////////////////// // Scratchpad for teams reduction. //////////////////////////////////////////////////////////////////////////////// __device__ __shared__ void *ReductionScratchpadPtr; //////////////////////////////////////////////////////////////////////////////// // Data sharing related variables. //////////////////////////////////////////////////////////////////////////////// __device__ __shared__ omptarget_nvptx_SharedArgs omptarget_nvptx_globalArgs;
6e89c94385eacac75b0b9ffa3428c2d311ab848a.hip
// !!! This is a file automatically generated by hipify!!! # include "PixelTracklet.cuh" #ifdef __HIPCC__ #define CUDA_CONST_VAR __device__ #endif //#ifdef CACHE_ALLOC #include "allocate.h" //#endif void SDL::createPixelTrackletsInUnifiedMemory(struct pixelTracklets& pixelTrackletsInGPU, unsigned int maxPixelTracklets) { #ifdef CACHE_ALLOC hipStream_t stream =0; pixelTrackletsInGPU.segmentIndices = (unsigned int*)cms::cuda::allocate_managed(maxPixelTracklets * sizeof(unsigned int) * 2,stream); pixelTrackletsInGPU.lowerModuleIndices = (unsigned int*)cms::cuda::allocate_managed(maxPixelTracklets * sizeof(unsigned int) * 2,stream);//split up to avoid runtime error of exceeding max byte allocation at a time pixelTrackletsInGPU.nPixelTracklets = (unsigned int*)cms::cuda::allocate_managed(sizeof(unsigned int),stream); pixelTrackletsInGPU.zOut = (float*)cms::cuda::allocate_managed(maxPixelTracklets * sizeof(float) * 4,stream); pixelTrackletsInGPU.betaIn = (float*)cms::cuda::allocate_managed(maxPixelTracklets * sizeof(float) * 3,stream); #else hipMallocManaged(&pixelTrackletsInGPU.segmentIndices, 2 * maxPixelTracklets * sizeof(unsigned int)); hipMallocManaged(&pixelTrackletsInGPU.lowerModuleIndices, 2 * maxPixelTracklets * sizeof(unsigned int)); hipMallocManaged(&pixelTrackletsInGPU.nPixelTracklets, sizeof(unsigned int)); hipMallocManaged(&pixelTrackletsInGPU.zOut, maxPixelTracklets *4* sizeof(float)); hipMallocManaged(&pixelTrackletsInGPU.betaIn, maxPixelTracklets *3* sizeof(float)); #ifdef CUT_VALUE_DEBUG hipMallocManaged(&pixelTrackletsInGPU.zLo, maxPixelTracklets * sizeof(float)); hipMallocManaged(&pixelTrackletsInGPU.zHi, maxPixelTracklets * sizeof(float)); hipMallocManaged(&pixelTrackletsInGPU.zLoPointed, maxPixelTracklets * sizeof(float)); hipMallocManaged(&pixelTrackletsInGPU.zHiPointed, maxPixelTracklets * sizeof(float)); hipMallocManaged(&pixelTrackletsInGPU.sdlCut, maxPixelTracklets * sizeof(float)); hipMallocManaged(&pixelTrackletsInGPU.betaInCut, maxPixelTracklets * sizeof(float)); hipMallocManaged(&pixelTrackletsInGPU.betaOutCut, maxPixelTracklets * sizeof(float)); hipMallocManaged(&pixelTrackletsInGPU.deltaBetaCut, maxPixelTracklets * sizeof(float)); hipMallocManaged(&pixelTrackletsInGPU.rtLo, maxPixelTracklets * sizeof(float)); hipMallocManaged(&pixelTrackletsInGPU.rtHi, maxPixelTracklets * sizeof(float)); hipMallocManaged(&pixelTrackletsInGPU.kZ, maxPixelTracklets * sizeof(float)); #endif #endif pixelTrackletsInGPU.rtOut = pixelTrackletsInGPU.zOut + maxPixelTracklets; pixelTrackletsInGPU.deltaPhiPos = pixelTrackletsInGPU.zOut + maxPixelTracklets * 2; pixelTrackletsInGPU.deltaPhi = pixelTrackletsInGPU.zOut + maxPixelTracklets * 3; pixelTrackletsInGPU.betaOut = pixelTrackletsInGPU.betaIn + maxPixelTracklets; pixelTrackletsInGPU.pt_beta = pixelTrackletsInGPU.betaIn + maxPixelTracklets * 2; hipMemset(pixelTrackletsInGPU.nPixelTracklets, 0, sizeof(unsigned int)); } void SDL::createPixelTrackletsInExplicitMemory(struct pixelTracklets& pixelTrackletsInGPU, unsigned int maxPixelTracklets) { #ifdef CACHE_ALLOC hipStream_t stream = 0; int dev; hipGetDevice(&dev); pixelTrackletsInGPU.segmentIndices = (unsigned int*)cms::cuda::allocate_device(dev, maxPixelTracklets * sizeof(unsigned int) * 2,stream); pixelTrackletsInGPU.lowerModuleIndices = (unsigned int*)cms::cuda::allocate_device(dev, maxPixelTracklets * sizeof(unsigned int) * 2,stream);//split up to avoid runtime error of exceeding max byte allocation at a time pixelTrackletsInGPU.nPixelTracklets = (unsigned int*)cms::cuda::allocate_device(dev, sizeof(unsigned int),stream); pixelTrackletsInGPU.zOut = (float*)cms::cuda::allocate_device(dev, maxPixelTracklets * sizeof(float) * 4,stream); pixelTrackletsInGPU.betaIn = (float*)cms::cuda::allocate_device(dev, maxPixelTracklets * sizeof(float) * 3,stream); #else hipMalloc(&pixelTrackletsInGPU.segmentIndices, 2 * maxPixelTracklets * sizeof(unsigned int)); hipMalloc(&pixelTrackletsInGPU.lowerModuleIndices, 2 * maxPixelTracklets * sizeof(unsigned int)); hipMalloc(&pixelTrackletsInGPU.nPixelTracklets, sizeof(unsigned int)); hipMalloc(&pixelTrackletsInGPU.zOut, maxPixelTracklets *4* sizeof(float)); hipMalloc(&pixelTrackletsInGPU.betaIn, maxPixelTracklets *3* sizeof(float)); #endif pixelTrackletsInGPU.rtOut = pixelTrackletsInGPU.zOut + maxPixelTracklets; pixelTrackletsInGPU.deltaPhiPos = pixelTrackletsInGPU.zOut + maxPixelTracklets * 2; pixelTrackletsInGPU.deltaPhi = pixelTrackletsInGPU.zOut + maxPixelTracklets * 3; pixelTrackletsInGPU.betaOut = pixelTrackletsInGPU.betaIn + maxPixelTracklets; pixelTrackletsInGPU.pt_beta = pixelTrackletsInGPU.betaIn + maxPixelTracklets * 2; hipMemset(pixelTrackletsInGPU.nPixelTracklets, 0, sizeof(unsigned int)); } #ifdef CUT_VALUE_DEBUG __device__ void SDL::addPixelTrackletToMemory(struct pixelTracklets& pixelTrackletsInGPU, unsigned int innerSegmentIndex, unsigned int outerSegmentIndex, unsigned int innerInnerLowerModuleIndex, unsigned int innerOuterLowerModuleIndex, unsigned int outerInnerLowerModuleIndex, unsigned int outerOuterLowerModuleIndex, float& zOut, float& rtOut, float& deltaPhiPos, float& deltaPhi, float& betaIn, float& betaOut, float pt_beta, float& zLo, float& zHi, float& rtLo, float& rtHi, float& zLoPointed, float& zHiPointed, float& sdlCut, float& betaInCut, float& betaOutCut, float& deltaBetaCut, float& kZ, unsigned int pixelTrackletIndex) #else __device__ void SDL::addPixelTrackletToMemory(struct pixelTracklets& pixelTrackletsInGPU, unsigned int innerSegmentIndex, unsigned int outerSegmentIndex, unsigned int innerInnerLowerModuleIndex, unsigned int innerOuterLowerModuleIndex, unsigned int outerInnerLowerModuleIndex, unsigned int outerOuterLowerModuleIndex, float& zOut, float& rtOut, float& deltaPhiPos, float& deltaPhi, float& betaIn, float& betaOut, float pt_beta, unsigned int pixelTrackletIndex) #endif { pixelTrackletsInGPU.segmentIndices[2 * pixelTrackletIndex] = innerSegmentIndex; pixelTrackletsInGPU.segmentIndices[2 * pixelTrackletIndex + 1] = outerSegmentIndex; pixelTrackletsInGPU.lowerModuleIndices[2 * pixelTrackletIndex] = outerInnerLowerModuleIndex; pixelTrackletsInGPU.lowerModuleIndices[2 * pixelTrackletIndex + 1] = outerOuterLowerModuleIndex; pixelTrackletsInGPU.zOut[pixelTrackletIndex] = zOut; pixelTrackletsInGPU.rtOut[pixelTrackletIndex] = rtOut; pixelTrackletsInGPU.deltaPhiPos[pixelTrackletIndex] = deltaPhiPos; pixelTrackletsInGPU.deltaPhi[pixelTrackletIndex] = deltaPhi; pixelTrackletsInGPU.betaIn[pixelTrackletIndex] = betaIn; pixelTrackletsInGPU.betaOut[pixelTrackletIndex] = betaOut; pixelTrackletsInGPU.pt_beta[pixelTrackletIndex] = pt_beta; #ifdef CUT_VALUE_DEBUG pixelTrackletsInGPU.zLo[pixelTrackletIndex] = zLo; pixelTrackletsInGPU.zHi[pixelTrackletIndex] = zHi; pixelTrackletsInGPU.rtLo[pixelTrackletIndex] = rtLo; pixelTrackletsInGPU.rtHi[pixelTrackletIndex] = rtHi; pixelTrackletsInGPU.zLoPointed[pixelTrackletIndex] = zLoPointed; pixelTrackletsInGPU.zHiPointed[pixelTrackletIndex] = zHiPointed; pixelTrackletsInGPU.sdlCut[pixelTrackletIndex] = sdlCut; pixelTrackletsInGPU.betaInCut[pixelTrackletIndex] = betaInCut; pixelTrackletsInGPU.betaOutCut[pixelTrackletIndex] = betaOutCut; pixelTrackletsInGPU.deltaBetaCut[pixelTrackletIndex] = deltaBetaCut; pixelTrackletsInGPU.kZ[pixelTrackletIndex] = kZ; #endif } void SDL::pixelTracklets::freeMemoryCache() { #ifdef Explicit_Tracklet int dev; hipGetDevice(&dev); cms::cuda::free_device(dev,segmentIndices); cms::cuda::free_device(dev,lowerModuleIndices); cms::cuda::free_device(dev,zOut); cms::cuda::free_device(dev,betaIn); cms::cuda::free_device(dev,nPixelTracklets); #else cms::cuda::free_managed(segmentIndices); cms::cuda::free_managed(lowerModuleIndices); cms::cuda::free_managed(zOut); cms::cuda::free_managed(betaIn); cms::cuda::free_managed(nPixelTracklets); #endif } void SDL::pixelTracklets::freeMemory() { hipFree(segmentIndices); hipFree(lowerModuleIndices); hipFree(nPixelTracklets); hipFree(zOut); hipFree(betaIn); #ifdef CUT_VALUE_DEBUG hipFree(zLo); hipFree(zHi); hipFree(rtLo); hipFree(rtHi); hipFree(zLoPointed); hipFree(zHiPointed); hipFree(sdlCut); hipFree(betaInCut); hipFree(betaOutCut); hipFree(deltaBetaCut); hipFree(kZ); #endif } SDL::pixelTracklets::pixelTracklets() { segmentIndices = nullptr; lowerModuleIndices = nullptr; nPixelTracklets = nullptr; zOut = nullptr; rtOut = nullptr; deltaPhiPos = nullptr; deltaPhi = nullptr; betaIn = nullptr; betaOut = nullptr; pt_beta = nullptr; #ifdef CUT_VALUE_DEBUG zLo = nullptr; zHi = nullptr; rtLo = nullptr; rtHi = nullptr; zLoPointed = nullptr; zHiPointed = nullptr; sdlCut = nullptr; betaInCut = nullptr; betaOutCut = nullptr; deltaBetaCut = nullptr; kZ = nullptr; #endif } SDL::pixelTracklets::~pixelTracklets() { } __device__ bool SDL::runPixelTrackletDefaultAlgo(struct modules& modulesInGPU, struct hits& hitsInGPU, struct miniDoublets& mdsInGPU, struct segments& segmentsInGPU, unsigned int innerInnerLowerModuleIndex, unsigned int innerOuterLowerModuleIndex, unsigned int outerInnerLowerModuleIndex, unsigned int outerOuterLowerModuleIndex, unsigned int innerSegmentIndex, unsigned int outerSegmentIndex, float& zOut, float& rtOut, float& deltaPhiPos, float& deltaPhi, float& betaIn, float& betaOut, float& pt_beta, float& zLo, float& zHi, float& rtLo, float& rtHi, float& zLoPointed, float& zHiPointed, float& sdlCut, float& betaInCut, float& betaOutCut, float& deltaBetaCut, float& kZ, unsigned int N_MAX_SEGMENTS_PER_MODULE) { bool pass = false; zLo = -999; zHi = -999; rtLo = -999; rtHi = -999; zLoPointed = -999; zHiPointed = -999; kZ = -999; betaInCut = -999; short outerInnerLowerModuleSubdet = modulesInGPU.subdets[outerInnerLowerModuleIndex]; short outerOuterLowerModuleSubdet = modulesInGPU.subdets[outerOuterLowerModuleIndex]; if(outerInnerLowerModuleSubdet == SDL::Barrel and outerOuterLowerModuleSubdet == SDL::Barrel) { pass = runTrackletDefaultAlgoPPBB(modulesInGPU, hitsInGPU, mdsInGPU, segmentsInGPU, innerInnerLowerModuleIndex, outerInnerLowerModuleIndex, outerOuterLowerModuleIndex, innerSegmentIndex, outerSegmentIndex,zOut,rtOut,deltaPhiPos,deltaPhi,betaIn,betaOut,pt_beta,N_MAX_SEGMENTS_PER_MODULE, zLo, zHi, zLoPointed, zHiPointed, sdlCut, betaOutCut, deltaBetaCut); } else if(outerInnerLowerModuleSubdet == SDL::Barrel and outerOuterLowerModuleSubdet == SDL::Endcap) { pass = runTrackletDefaultAlgoPPBB(modulesInGPU, hitsInGPU, mdsInGPU, segmentsInGPU, innerInnerLowerModuleIndex, outerInnerLowerModuleIndex, outerOuterLowerModuleIndex, innerSegmentIndex, outerSegmentIndex,zOut,rtOut,deltaPhiPos,deltaPhi,betaIn,betaOut,pt_beta,N_MAX_SEGMENTS_PER_MODULE, zLo, zHi, zLoPointed, zHiPointed, sdlCut, betaOutCut, deltaBetaCut); } else if(outerInnerLowerModuleSubdet == SDL::Endcap and outerOuterLowerModuleSubdet == SDL::Endcap) { pass = runTrackletDefaultAlgoPPEE(modulesInGPU, hitsInGPU, mdsInGPU, segmentsInGPU, innerInnerLowerModuleIndex, outerInnerLowerModuleIndex, outerOuterLowerModuleIndex, innerSegmentIndex, outerSegmentIndex,zOut,rtOut,deltaPhiPos,deltaPhi,betaIn,betaOut,pt_beta,N_MAX_SEGMENTS_PER_MODULE, zLo, rtLo, rtHi, sdlCut, betaInCut, betaOutCut, deltaBetaCut, kZ); } return pass; } __device__ bool SDL::runTrackletDefaultAlgoPPBB(struct modules& modulesInGPU, struct hits& hitsInGPU, struct miniDoublets& mdsInGPU ,struct segments& segmentsInGPU, unsigned int pixelModuleIndex, unsigned int outerInnerLowerModuleIndex, unsigned int outerOuterLowerModuleIndex, unsigned int innerSegmentIndex, unsigned int outerSegmentIndex, float& zOut, float& rtOut, float& dPhiPos, float& dPhi, float& betaIn, float& betaOut, float& pt_beta, unsigned int N_MAX_SEGMENTS_PER_MODULE, float& zLo, float& zHi, float& zLoPointed, float& zHiPointed, float& sdlCut, float& betaOutCut, float& deltaBetaCut) // pixel to BB and BE segments { bool pass = true; bool isPS_OutLo = (modulesInGPU.moduleType[outerInnerLowerModuleIndex] == SDL::PS); unsigned int innerInnerAnchorHitIndex = segmentsInGPU.innerMiniDoubletAnchorHitIndices[innerSegmentIndex]; unsigned int outerInnerAnchorHitIndex = segmentsInGPU.innerMiniDoubletAnchorHitIndices[outerSegmentIndex]; unsigned int innerOuterAnchorHitIndex = segmentsInGPU.outerMiniDoubletAnchorHitIndices[innerSegmentIndex]; unsigned int outerOuterAnchorHitIndex= segmentsInGPU.outerMiniDoubletAnchorHitIndices[outerSegmentIndex]; if(fabsf(deltaPhi(hitsInGPU.xs[innerOuterAnchorHitIndex], hitsInGPU.ys[innerOuterAnchorHitIndex], hitsInGPU.zs[innerOuterAnchorHitIndex], hitsInGPU.xs[outerInnerAnchorHitIndex], hitsInGPU.ys[outerInnerAnchorHitIndex], hitsInGPU.zs[outerInnerAnchorHitIndex])) > M_PI/2.) { pass = false; } unsigned int pixelSegmentArrayIndex = innerSegmentIndex - (pixelModuleIndex * N_MAX_SEGMENTS_PER_MODULE); float ptIn = segmentsInGPU.ptIn[pixelSegmentArrayIndex]; float ptSLo = ptIn; float px = segmentsInGPU.px[pixelSegmentArrayIndex]; float py = segmentsInGPU.py[pixelSegmentArrayIndex]; float pz = segmentsInGPU.pz[pixelSegmentArrayIndex]; float ptErr = segmentsInGPU.ptErr[pixelSegmentArrayIndex]; float etaErr = segmentsInGPU.etaErr[pixelSegmentArrayIndex]; ptSLo = fmaxf(PTCUT, ptSLo - 10.0f*fmaxf(ptErr, 0.005f*ptSLo)); ptSLo = fminf(10.0f, ptSLo); float rt_InLo = hitsInGPU.rts[innerInnerAnchorHitIndex]; float rt_InUp = hitsInGPU.rts[innerOuterAnchorHitIndex]; float rt_OutLo = hitsInGPU.rts[outerInnerAnchorHitIndex]; float z_InLo = hitsInGPU.zs[innerInnerAnchorHitIndex]; float z_InUp = hitsInGPU.zs[innerOuterAnchorHitIndex]; float z_OutLo = hitsInGPU.zs[outerInnerAnchorHitIndex]; float rt_InOut = hitsInGPU.rts[innerOuterAnchorHitIndex]; float z_InOut = hitsInGPU.zs[innerOuterAnchorHitIndex]; float alpha1GeV_OutLo = asinf(fminf(rt_OutLo * k2Rinv1GeVf / ptCut, sinAlphaMax)); float rtRatio_OutLoInLo = rt_OutLo / rt_InLo; // Outer segment beginning rt divided by inner segment beginning rt; const float rtRatio_OutLoInOut = rt_OutLo / rt_InOut; // Outer segment beginning rt divided by inner segment beginning rt; float dzDrtScale = tanf(alpha1GeV_OutLo) / alpha1GeV_OutLo; // The track can bend in r-z plane slightly const float zpitch_InLo = 0.05f; const float zpitch_InOut = 0.05f; float zpitch_OutLo = (isPS_OutLo ? pixelPSZpitch : strip2SZpitch); float zGeom = zpitch_InLo + zpitch_OutLo; zHi = z_InOut + (z_InOut + deltaZLum) * (rtRatio_OutLoInOut - 1.f) * (z_InOut < 0.f ? 1.f : dzDrtScale) + (zpitch_InOut + zpitch_OutLo); zLo = z_InOut + (z_InOut - deltaZLum) * (rtRatio_OutLoInOut - 1.f) * (z_InOut > 0.f ? 1.f : dzDrtScale) - (zpitch_InOut + zpitch_OutLo); //slope-correction only on outer end if (not (z_OutLo >= zLo and z_OutLo <= zHi)) { pass = false; } const float coshEta = sqrtf(ptIn * ptIn + pz * pz) / ptIn; // const float drt_OutLo_InLo = (rt_OutLo - rt_InLo); const float drt_OutLo_InUp = (rt_OutLo - rt_InUp); const float invRt_InLo = 1. / rt_InLo; const float r3_InLo = sqrtf(z_InLo * z_InLo + rt_InLo * rt_InLo); const float r3_InUp = sqrtf(z_InUp * z_InUp + rt_InUp * rt_InUp); float drt_InSeg = hitsInGPU.rts[innerOuterAnchorHitIndex] - hitsInGPU.rts[innerInnerAnchorHitIndex]; float dz_InSeg = hitsInGPU.zs[innerOuterAnchorHitIndex] - hitsInGPU.zs[innerInnerAnchorHitIndex]; float dr3_InSeg = sqrtf(hitsInGPU.rts[innerOuterAnchorHitIndex] * hitsInGPU.rts[innerOuterAnchorHitIndex] + hitsInGPU.zs[innerOuterAnchorHitIndex] * hitsInGPU.zs[innerOuterAnchorHitIndex]) - sqrtf(hitsInGPU.rts[innerInnerAnchorHitIndex] * hitsInGPU.rts[innerInnerAnchorHitIndex] + hitsInGPU.zs[innerInnerAnchorHitIndex] * hitsInGPU.zs[innerInnerAnchorHitIndex]); const float sdlThetaMulsF = 0.015f * sqrt(0.1f + 0.2 * (rt_OutLo - rt_InUp) / 50.f) * sqrt(r3_InUp / rt_InUp); const float sdlMuls = sdlThetaMulsF * 3.f / ptCut * 4.f; // will need a better guess than x4? float dzErr = drt_OutLo_InUp*etaErr*coshEta; //FIXME: check with the calc in the endcap dzErr *= dzErr; dzErr += 0.03f*0.03f; // pixel size x2. ... random for now dzErr *= 9.f; //3 sigma dzErr += sdlMuls*sdlMuls*drt_OutLo_InUp*drt_OutLo_InUp/3.f*coshEta*coshEta;//sloppy dzErr += zGeom*zGeom; dzErr = sqrtf(dzErr); const float dzDrIn = pz / ptIn; const float zWindow = dzErr / drt_InSeg * drt_OutLo_InUp + zGeom; const float dzMean = dzDrIn * drt_OutLo_InUp * (1.f + drt_OutLo_InUp * drt_OutLo_InUp * 4 * k2Rinv1GeVf * k2Rinv1GeVf / ptIn / ptIn / 24.f); // with curved path correction // Constructing upper and lower bound zLoPointed = z_InUp + dzMean - zWindow; zHiPointed = z_InUp + dzMean + zWindow; if (not (z_OutLo >= zLoPointed and z_OutLo <= zHiPointed)) { pass = false; } const float sdlPVoff = 0.1f / rt_OutLo; sdlCut = alpha1GeV_OutLo + sqrtf(sdlMuls * sdlMuls + sdlPVoff * sdlPVoff); dPhiPos = deltaPhi(hitsInGPU.xs[innerOuterAnchorHitIndex], hitsInGPU.ys[innerOuterAnchorHitIndex], hitsInGPU.zs[innerOuterAnchorHitIndex], hitsInGPU.xs[outerOuterAnchorHitIndex], hitsInGPU.ys[outerOuterAnchorHitIndex], hitsInGPU.zs[outerOuterAnchorHitIndex]); //no dphipos cut float midPointX = (hitsInGPU.xs[innerInnerAnchorHitIndex] + hitsInGPU.xs[outerInnerAnchorHitIndex])/2; float midPointY = (hitsInGPU.ys[innerInnerAnchorHitIndex] + hitsInGPU.ys[outerInnerAnchorHitIndex])/2; float midPointZ = (hitsInGPU.zs[innerInnerAnchorHitIndex] + hitsInGPU.zs[outerInnerAnchorHitIndex])/2; float diffX = hitsInGPU.xs[outerInnerAnchorHitIndex] - hitsInGPU.xs[innerInnerAnchorHitIndex]; float diffY = hitsInGPU.ys[outerInnerAnchorHitIndex] - hitsInGPU.ys[innerInnerAnchorHitIndex] ; float diffZ = hitsInGPU.zs[outerInnerAnchorHitIndex] - hitsInGPU.zs[innerInnerAnchorHitIndex]; dPhi = deltaPhi(midPointX, midPointY, midPointZ, diffX, diffY, diffZ); if (not (fabsf(dPhi) <= sdlCut)) { pass = false; } float alpha_InLo = segmentsInGPU.dPhiChanges[innerSegmentIndex]; float alpha_OutLo = segmentsInGPU.dPhiChanges[outerSegmentIndex]; bool isEC_lastLayer = modulesInGPU.subdets[outerOuterLowerModuleIndex] == SDL::Endcap and modulesInGPU.moduleType[outerOuterLowerModuleIndex] == SDL::TwoS; //unsigned int outerOuterEdgeIndex = hitsInGPU.edge2SMap[outerOuterAnchorHitIndex]; //POTENTIAL NUCLEAR GANDHI unsigned int outerOuterEdgeIndex = outerOuterAnchorHitIndex; float alpha_OutUp,alpha_OutUp_highEdge,alpha_OutUp_lowEdge; alpha_OutUp = deltaPhi(hitsInGPU.xs[outerOuterAnchorHitIndex],hitsInGPU.ys[outerOuterAnchorHitIndex],hitsInGPU.zs[outerOuterAnchorHitIndex],hitsInGPU.xs[outerOuterAnchorHitIndex] - hitsInGPU.xs[outerInnerAnchorHitIndex], hitsInGPU.ys[outerOuterAnchorHitIndex] - hitsInGPU.ys[outerInnerAnchorHitIndex], hitsInGPU.zs[outerOuterAnchorHitIndex] - hitsInGPU.zs[outerInnerAnchorHitIndex]); alpha_OutUp_highEdge = alpha_OutUp; alpha_OutUp_lowEdge = alpha_OutUp; float tl_axis_x = hitsInGPU.xs[outerOuterAnchorHitIndex] - hitsInGPU.xs[innerOuterAnchorHitIndex]; float tl_axis_y = hitsInGPU.ys[outerOuterAnchorHitIndex] - hitsInGPU.ys[innerOuterAnchorHitIndex]; float tl_axis_z = hitsInGPU.zs[outerOuterAnchorHitIndex] - hitsInGPU.zs[innerOuterAnchorHitIndex]; float tl_axis_highEdge_x = tl_axis_x; float tl_axis_highEdge_y = tl_axis_y; float tl_axis_highEdge_z = tl_axis_z; float tl_axis_lowEdge_x = tl_axis_x; float tl_axis_lowEdge_y = tl_axis_y; float tl_axis_lowEdge_z = tl_axis_z; betaIn = -deltaPhi(px, py, pz, tl_axis_x, tl_axis_y, tl_axis_z); float betaInRHmin = betaIn; float betaInRHmax = betaIn; betaOut = -alpha_OutUp + deltaPhi(hitsInGPU.xs[outerOuterAnchorHitIndex], hitsInGPU.ys[outerOuterAnchorHitIndex], hitsInGPU.zs[outerOuterAnchorHitIndex], tl_axis_x, tl_axis_y, tl_axis_z); float betaOutRHmin = betaOut; float betaOutRHmax = betaOut; if(isEC_lastLayer) { alpha_OutUp_highEdge = deltaPhi(hitsInGPU.highEdgeXs[outerOuterEdgeIndex],hitsInGPU.highEdgeYs[outerOuterEdgeIndex],hitsInGPU.zs[outerOuterAnchorHitIndex],hitsInGPU.highEdgeXs[outerOuterEdgeIndex] - hitsInGPU.xs[outerInnerAnchorHitIndex], hitsInGPU.highEdgeYs[outerOuterEdgeIndex] - hitsInGPU.ys[outerInnerAnchorHitIndex], hitsInGPU.zs[outerOuterAnchorHitIndex] - hitsInGPU.zs[outerInnerAnchorHitIndex]); alpha_OutUp_lowEdge = deltaPhi(hitsInGPU.lowEdgeXs[outerOuterEdgeIndex],hitsInGPU.lowEdgeYs[outerOuterEdgeIndex],hitsInGPU.zs[outerOuterAnchorHitIndex],hitsInGPU.lowEdgeXs[outerOuterEdgeIndex] - hitsInGPU.xs[outerInnerAnchorHitIndex], hitsInGPU.lowEdgeYs[outerOuterEdgeIndex] - hitsInGPU.ys[outerInnerAnchorHitIndex], hitsInGPU.zs[outerOuterAnchorHitIndex] - hitsInGPU.zs[outerInnerAnchorHitIndex]); tl_axis_highEdge_x = hitsInGPU.highEdgeXs[outerOuterEdgeIndex] - hitsInGPU.xs[innerOuterAnchorHitIndex]; tl_axis_highEdge_y = hitsInGPU.highEdgeYs[outerOuterEdgeIndex] - hitsInGPU.ys[innerOuterAnchorHitIndex]; tl_axis_highEdge_z = hitsInGPU.zs[outerOuterAnchorHitIndex] - hitsInGPU.zs[innerOuterAnchorHitIndex]; tl_axis_lowEdge_x = hitsInGPU.lowEdgeXs[outerOuterEdgeIndex] - hitsInGPU.xs[innerOuterAnchorHitIndex]; tl_axis_lowEdge_y = hitsInGPU.lowEdgeYs[outerOuterEdgeIndex] - hitsInGPU.ys[innerOuterAnchorHitIndex]; tl_axis_lowEdge_z = hitsInGPU.zs[outerOuterAnchorHitIndex] - hitsInGPU.zs[innerOuterAnchorHitIndex]; betaOutRHmin = -alpha_OutUp_highEdge + deltaPhi(hitsInGPU.highEdgeXs[outerOuterEdgeIndex], hitsInGPU.highEdgeYs[outerOuterEdgeIndex], hitsInGPU.zs[outerOuterAnchorHitIndex], tl_axis_highEdge_x, tl_axis_highEdge_y, tl_axis_highEdge_z); betaOutRHmax = -alpha_OutUp_lowEdge + deltaPhi(hitsInGPU.lowEdgeXs[outerOuterEdgeIndex], hitsInGPU.lowEdgeYs[outerOuterEdgeIndex], hitsInGPU.zs[outerOuterAnchorHitIndex], tl_axis_lowEdge_x, tl_axis_lowEdge_y, tl_axis_lowEdge_z); } //beta computation float drt_tl_axis = sqrtf(tl_axis_x * tl_axis_x + tl_axis_y * tl_axis_y); float drt_tl_lowEdge = sqrtf(tl_axis_lowEdge_x * tl_axis_lowEdge_x + tl_axis_lowEdge_y * tl_axis_lowEdge_y); float drt_tl_highEdge = sqrtf(tl_axis_highEdge_x * tl_axis_highEdge_x + tl_axis_highEdge_y * tl_axis_highEdge_y); //innerOuterAnchor - innerInnerAnchor const float rt_InSeg = sqrtf((hitsInGPU.xs[innerOuterAnchorHitIndex] - hitsInGPU.xs[innerInnerAnchorHitIndex]) * (hitsInGPU.xs[innerOuterAnchorHitIndex] - hitsInGPU.xs[innerInnerAnchorHitIndex]) + (hitsInGPU.ys[innerOuterAnchorHitIndex] - hitsInGPU.ys[innerInnerAnchorHitIndex]) * (hitsInGPU.ys[innerOuterAnchorHitIndex] - hitsInGPU.ys[innerInnerAnchorHitIndex])); //no betaIn cut for the pixels float betaAv = 0.5f * (betaIn + betaOut); pt_beta = ptIn; const float pt_betaMax = 7.0f; int lIn = 0; int lOut = isEC_lastLayer ? 11 : 5; float sdOut_dr = sqrtf((hitsInGPU.xs[outerOuterAnchorHitIndex] - hitsInGPU.xs[outerInnerAnchorHitIndex]) * (hitsInGPU.xs[outerOuterAnchorHitIndex] - hitsInGPU.xs[outerInnerAnchorHitIndex]) + (hitsInGPU.ys[outerOuterAnchorHitIndex] - hitsInGPU.ys[outerInnerAnchorHitIndex]) * (hitsInGPU.ys[outerOuterAnchorHitIndex] - hitsInGPU.ys[outerInnerAnchorHitIndex])); float sdOut_d = hitsInGPU.rts[outerOuterAnchorHitIndex] - hitsInGPU.rts[outerInnerAnchorHitIndex]; const float diffDr = fabsf(rt_InSeg - sdOut_dr) / fabsf(rt_InSeg + sdOut_dr); runDeltaBetaIterations(betaIn, betaOut, betaAv, pt_beta, rt_InSeg, sdOut_dr, drt_tl_axis, lIn); const float betaInMMSF = (fabsf(betaInRHmin + betaInRHmax) > 0) ? (2.f * betaIn / fabsf(betaInRHmin + betaInRHmax)) : 0.; //mean value of min,max is the old betaIn const float betaOutMMSF = (fabsf(betaOutRHmin + betaOutRHmax) > 0) ? (2.f * betaOut / fabsf(betaOutRHmin + betaOutRHmax)) : 0.; betaInRHmin *= betaInMMSF; betaInRHmax *= betaInMMSF; betaOutRHmin *= betaOutMMSF; betaOutRHmax *= betaOutMMSF; const float dBetaMuls = sdlThetaMulsF * 4.f / fminf(fabsf(pt_beta), pt_betaMax); //need to confirm the range-out value of 7 GeV float sdIn_rt = hitsInGPU.rts[innerOuterAnchorHitIndex]; float sdOut_rt = hitsInGPU.rts[outerInnerAnchorHitIndex]; float sdIn_z = hitsInGPU.zs[innerOuterAnchorHitIndex]; float sdOut_z = hitsInGPU.zs[outerInnerAnchorHitIndex]; const float alphaInAbsReg = fmaxf(fabsf(alpha_InLo), asinf(fminf(sdIn_rt * k2Rinv1GeVf / 3.0f, sinAlphaMax))); const float alphaOutAbsReg = fmaxf(fabsf(alpha_OutLo), asinf(fminf(sdOut_rt * k2Rinv1GeVf / 3.0f, sinAlphaMax))); const float dBetaInLum = lIn < 11 ? 0.0f : fabsf(alphaInAbsReg*deltaZLum / sdIn_z); const float dBetaOutLum = lOut < 11 ? 0.0f : fabsf(alphaOutAbsReg*deltaZLum / sdOut_z); const float dBetaLum2 = (dBetaInLum + dBetaOutLum) * (dBetaInLum + dBetaOutLum); const float sinDPhi = sinf(dPhi); const float dBetaRIn2 = 0; // TODO-RH float dBetaROut = 0; if(isEC_lastLayer) { dBetaROut = (sqrtf(hitsInGPU.highEdgeXs[outerOuterEdgeIndex] * hitsInGPU.highEdgeXs[outerOuterEdgeIndex] + hitsInGPU.highEdgeYs[outerOuterEdgeIndex] * hitsInGPU.highEdgeYs[outerOuterEdgeIndex]) - sqrtf(hitsInGPU.lowEdgeXs[outerOuterEdgeIndex] * hitsInGPU.lowEdgeXs[outerOuterEdgeIndex] + hitsInGPU.lowEdgeYs[outerOuterEdgeIndex] * hitsInGPU.lowEdgeYs[outerOuterEdgeIndex])) * sinDPhi / drt_tl_axis; } const float dBetaROut2 = dBetaROut * dBetaROut; betaOutCut = asinf(fminf(drt_tl_axis*k2Rinv1GeVf / ptCut, sinAlphaMax)) //FIXME: need faster version + (0.02f / sdOut_d) + sqrtf(dBetaLum2 + dBetaMuls*dBetaMuls); //Cut #6: The real beta cut if (not (fabsf(betaOut) < betaOutCut)) { pass = false; } const float pt_betaOut = drt_tl_axis * k2Rinv1GeVf / sin(betaOut); const float dBetaRes = 0.02f / fminf(sdOut_d, drt_InSeg); const float dBetaCut2 = (dBetaRes*dBetaRes * 2.0f + dBetaMuls * dBetaMuls + dBetaLum2 + dBetaRIn2 + dBetaROut2 + 0.25 * (fabsf(betaInRHmin - betaInRHmax) + fabsf(betaOutRHmin - betaOutRHmax)) * (fabsf(betaInRHmin - betaInRHmax) + fabsf(betaOutRHmin - betaOutRHmax))); float dBeta = betaIn - betaOut; deltaBetaCut = sqrtf(dBetaCut2); if (not (dBeta * dBeta <= dBetaCut2)) { //printf("dBeta2 = %f, dBetaCut2 = %f\n",dBeta * dBeta, dBetaCut2); pass = false; } return pass; } __device__ bool SDL::runTrackletDefaultAlgoPPEE(struct modules& modulesInGPU, struct hits& hitsInGPU, struct miniDoublets& mdsInGPU ,struct segments& segmentsInGPU, unsigned int pixelModuleIndex, unsigned int outerInnerLowerModuleIndex, unsigned int outerOuterLowerModuleIndex, unsigned int innerSegmentIndex, unsigned int outerSegmentIndex, float& zOut, float& rtOut, float& deltaPhiPos, float& dPhi, float& betaIn, float& betaOut, float& pt_beta, unsigned int N_MAX_SEGMENTS_PER_MODULE, float& zLo, float& rtLo, float& rtHi, float& sdlCut, float& betaInCut, float& betaOutCut, float& deltaBetaCut, float& kZ) // pixel to EE segments { bool pass = true; bool isPS_OutLo = (modulesInGPU.moduleType[outerInnerLowerModuleIndex] == SDL::PS); unsigned int innerInnerAnchorHitIndex = segmentsInGPU.innerMiniDoubletAnchorHitIndices[innerSegmentIndex]; unsigned int outerInnerAnchorHitIndex = segmentsInGPU.innerMiniDoubletAnchorHitIndices[outerSegmentIndex]; unsigned int innerOuterAnchorHitIndex = segmentsInGPU.outerMiniDoubletAnchorHitIndices[innerSegmentIndex]; unsigned int outerOuterAnchorHitIndex= segmentsInGPU.outerMiniDoubletAnchorHitIndices[outerSegmentIndex]; unsigned int pixelSegmentArrayIndex = innerSegmentIndex - (pixelModuleIndex * N_MAX_SEGMENTS_PER_MODULE); float ptIn = segmentsInGPU.ptIn[pixelSegmentArrayIndex]; float ptSLo = ptIn; float px = segmentsInGPU.px[pixelSegmentArrayIndex]; float py = segmentsInGPU.py[pixelSegmentArrayIndex]; float pz = segmentsInGPU.pz[pixelSegmentArrayIndex]; float ptErr = segmentsInGPU.ptErr[pixelSegmentArrayIndex]; float etaErr = segmentsInGPU.etaErr[pixelSegmentArrayIndex]; ptSLo = fmaxf(PTCUT, ptSLo - 10.0f*fmaxf(ptErr, 0.005f*ptSLo)); ptSLo = fminf(10.0f, ptSLo); float rtIn = hitsInGPU.rts[innerOuterAnchorHitIndex]; rtOut = hitsInGPU.rts[outerInnerAnchorHitIndex]; float zIn = hitsInGPU.zs[innerOuterAnchorHitIndex]; zOut = hitsInGPU.zs[outerInnerAnchorHitIndex]; float rtOut_o_rtIn = rtOut/rtIn; const float zpitch_InLo = 0.05f; float zpitch_OutLo = (isPS_OutLo ? pixelPSZpitch : strip2SZpitch); float zGeom = zpitch_InLo + zpitch_OutLo; const float sdlSlope = asinf(fminf(rtOut * k2Rinv1GeVf / ptCut, sinAlphaMax)); const float dzDrtScale = tanf(sdlSlope) / sdlSlope;//FIXME: need approximate value zLo = zIn + (zIn - deltaZLum) * (rtOut_o_rtIn - 1.f) * (zIn > 0.f ? 1.f : dzDrtScale) - zGeom; //slope-correction only on outer end if (not (zIn * zOut > 0)) { pass = false; } const float dLum = copysignf(deltaZLum, zIn); bool isOutSgInnerMDPS = modulesInGPU.moduleType[outerInnerLowerModuleIndex] == SDL::PS; const float rtGeom1 = isOutSgInnerMDPS ? pixelPSZpitch : strip2SZpitch;//FIXME: make this chosen by configuration for lay11,12 full PS const float zGeom1 = copysignf(zGeom, zIn); //used in B-E region rtLo = rtIn * (1.f + (zOut - zIn - zGeom1) / (zIn + zGeom1 + dLum) / dzDrtScale) - rtGeom1; //slope correction only on the lower end if (not (rtOut >= rtLo)) { pass = false; } float zInForHi = zIn - zGeom1 - dLum; if (zInForHi * zIn < 0) zInForHi = copysignf(0.1f, zIn); rtHi = rtIn * (1.f + (zOut - zIn + zGeom1) / zInForHi) + rtGeom1; // Cut #2: rt condition if (not (rtOut >= rtLo and rtOut <= rtHi)) { pass = false; } const float rt_InUp = hitsInGPU.rts[innerOuterAnchorHitIndex]; const float rt_OutLo = hitsInGPU.rts[outerInnerAnchorHitIndex]; const float z_InUp = hitsInGPU.zs[innerOuterAnchorHitIndex]; const float dzOutInAbs = fabsf(zOut - zIn); const float coshEta = hypotf(ptIn, pz) / ptIn; const float multDzDr = dzOutInAbs*coshEta/(coshEta*coshEta - 1.f); const float r3_InUp = sqrtf(z_InUp * z_InUp + rt_InUp * rt_InUp); const float sdlThetaMulsF = 0.015f * sqrtf(0.1f + 0.2 * (rt_OutLo - rtIn) / 50.f) * sqrtf(r3_InUp / rtIn); const float sdlMuls = sdlThetaMulsF * 3.f / ptCut * 4.f; // will need a better guess than x4? float drtErr = etaErr*multDzDr; drtErr *= drtErr; drtErr += 0.03f*0.03f; // pixel size x2. ... random for now drtErr *= 9.f; //3 sigma drtErr += sdlMuls*sdlMuls*multDzDr*multDzDr/3.f*coshEta*coshEta;//sloppy: relative muls is 1/3 of total muls drtErr = sqrtf(drtErr); const float drtDzIn = fabsf(ptIn / pz);//all tracks are out-going in endcaps? const float drt_OutLo_InUp = (rt_OutLo - rt_InUp); // drOutIn const float rtWindow = drtErr + rtGeom1; const float drtMean = drtDzIn * dzOutInAbs * (1.f - drt_OutLo_InUp * drt_OutLo_InUp * 4 * k2Rinv1GeVf * k2Rinv1GeVf / ptIn / ptIn / 24.f); // with curved path correction const float rtLo_point = rtIn + drtMean - rtWindow; const float rtHi_point = rtIn + drtMean + rtWindow; // Cut #3: rt-z pointed if (not (rtOut >= rtLo_point and rtOut <= rtHi_point)) { pass = false; } const float alpha1GeV_OutLo = asinf(fminf(rt_OutLo * k2Rinv1GeVf / ptCut, sinAlphaMax)); const float sdlPVoff = 0.1f / rt_OutLo; sdlCut = alpha1GeV_OutLo + sqrtf(sdlMuls * sdlMuls + sdlPVoff * sdlPVoff); deltaPhiPos = deltaPhi(hitsInGPU.xs[innerOuterAnchorHitIndex], hitsInGPU.ys[innerOuterAnchorHitIndex], hitsInGPU.zs[innerOuterAnchorHitIndex], hitsInGPU.xs[outerOuterAnchorHitIndex], hitsInGPU.ys[outerOuterAnchorHitIndex], hitsInGPU.zs[outerOuterAnchorHitIndex]); //no deltaphipos cut float midPointX = (hitsInGPU.xs[innerInnerAnchorHitIndex] + hitsInGPU.xs[outerInnerAnchorHitIndex])/2; float midPointY = (hitsInGPU.ys[innerInnerAnchorHitIndex] + hitsInGPU.ys[outerInnerAnchorHitIndex])/2; float midPointZ = (hitsInGPU.zs[innerInnerAnchorHitIndex] + hitsInGPU.zs[outerInnerAnchorHitIndex])/2; float diffX = (-hitsInGPU.xs[innerInnerAnchorHitIndex] + hitsInGPU.xs[outerInnerAnchorHitIndex]); float diffY = (-hitsInGPU.ys[innerInnerAnchorHitIndex] + hitsInGPU.ys[outerInnerAnchorHitIndex]); float diffZ = (-hitsInGPU.zs[innerInnerAnchorHitIndex] + hitsInGPU.zs[outerInnerAnchorHitIndex]); dPhi = deltaPhi(midPointX, midPointY, midPointZ, diffX, diffY, diffZ); // Cut #5: deltaPhiChange if (not (fabsf(dPhi) <= sdlCut)) { pass = false; } float alpha_InLo = segmentsInGPU.dPhiChanges[innerSegmentIndex]; float alpha_OutLo = segmentsInGPU.dPhiChanges[outerSegmentIndex]; bool isEC_lastLayer = modulesInGPU.subdets[outerOuterLowerModuleIndex] == SDL::Endcap and modulesInGPU.moduleType[outerOuterLowerModuleIndex] == SDL::TwoS; unsigned int outerOuterEdgeIndex = outerOuterAnchorHitIndex; float alpha_OutUp,alpha_OutUp_highEdge,alpha_OutUp_lowEdge; alpha_OutUp = deltaPhi(hitsInGPU.xs[outerOuterAnchorHitIndex],hitsInGPU.ys[outerOuterAnchorHitIndex],hitsInGPU.zs[outerOuterAnchorHitIndex],hitsInGPU.xs[outerOuterAnchorHitIndex] - hitsInGPU.xs[outerInnerAnchorHitIndex], hitsInGPU.ys[outerOuterAnchorHitIndex] - hitsInGPU.ys[outerInnerAnchorHitIndex], hitsInGPU.zs[outerOuterAnchorHitIndex] - hitsInGPU.zs[outerInnerAnchorHitIndex]); alpha_OutUp_highEdge = alpha_OutUp; alpha_OutUp_lowEdge = alpha_OutUp; float tl_axis_x = hitsInGPU.xs[outerOuterAnchorHitIndex] - hitsInGPU.xs[innerOuterAnchorHitIndex]; float tl_axis_y = hitsInGPU.ys[outerOuterAnchorHitIndex] - hitsInGPU.ys[innerOuterAnchorHitIndex]; float tl_axis_z = hitsInGPU.zs[outerOuterAnchorHitIndex] - hitsInGPU.zs[innerOuterAnchorHitIndex]; float tl_axis_highEdge_x = tl_axis_x; float tl_axis_highEdge_y = tl_axis_y; float tl_axis_highEdge_z = tl_axis_z; float tl_axis_lowEdge_x = tl_axis_x; float tl_axis_lowEdge_y = tl_axis_y; float tl_axis_lowEdge_z = tl_axis_z; betaIn = -deltaPhi(px, py, pz, tl_axis_x, tl_axis_y, tl_axis_z); float betaInRHmin = betaIn; float betaInRHmax = betaIn; betaOut = -alpha_OutUp + deltaPhi(hitsInGPU.xs[outerOuterAnchorHitIndex], hitsInGPU.ys[outerOuterAnchorHitIndex], hitsInGPU.zs[outerOuterAnchorHitIndex], tl_axis_x, tl_axis_y, tl_axis_z); float betaOutRHmin = betaOut; float betaOutRHmax = betaOut; if(isEC_lastLayer) { alpha_OutUp_highEdge = deltaPhi(hitsInGPU.highEdgeXs[outerOuterEdgeIndex],hitsInGPU.highEdgeYs[outerOuterEdgeIndex],hitsInGPU.zs[outerOuterAnchorHitIndex],hitsInGPU.highEdgeXs[outerOuterEdgeIndex] - hitsInGPU.xs[outerInnerAnchorHitIndex], hitsInGPU.highEdgeYs[outerOuterEdgeIndex] - hitsInGPU.ys[outerInnerAnchorHitIndex], hitsInGPU.zs[outerOuterAnchorHitIndex] - hitsInGPU.zs[outerInnerAnchorHitIndex]); alpha_OutUp_lowEdge = deltaPhi(hitsInGPU.lowEdgeXs[outerOuterEdgeIndex],hitsInGPU.lowEdgeYs[outerOuterEdgeIndex],hitsInGPU.zs[outerOuterAnchorHitIndex],hitsInGPU.lowEdgeXs[outerOuterEdgeIndex] - hitsInGPU.xs[outerInnerAnchorHitIndex], hitsInGPU.lowEdgeYs[outerOuterEdgeIndex] - hitsInGPU.ys[outerInnerAnchorHitIndex], hitsInGPU.zs[outerOuterAnchorHitIndex] - hitsInGPU.zs[outerInnerAnchorHitIndex]); tl_axis_highEdge_x = hitsInGPU.highEdgeXs[outerOuterEdgeIndex] - hitsInGPU.xs[innerOuterAnchorHitIndex]; tl_axis_highEdge_y = hitsInGPU.highEdgeYs[outerOuterEdgeIndex] - hitsInGPU.ys[innerOuterAnchorHitIndex]; tl_axis_highEdge_z = hitsInGPU.zs[outerOuterAnchorHitIndex] - hitsInGPU.zs[innerOuterAnchorHitIndex]; tl_axis_lowEdge_x = hitsInGPU.lowEdgeXs[outerOuterEdgeIndex] - hitsInGPU.xs[innerOuterAnchorHitIndex]; tl_axis_lowEdge_y = hitsInGPU.lowEdgeYs[outerOuterEdgeIndex] - hitsInGPU.ys[innerOuterAnchorHitIndex]; tl_axis_lowEdge_z = hitsInGPU.zs[outerOuterAnchorHitIndex] - hitsInGPU.zs[innerOuterAnchorHitIndex]; betaOutRHmin = -alpha_OutUp_highEdge + deltaPhi(hitsInGPU.highEdgeXs[outerOuterEdgeIndex], hitsInGPU.highEdgeYs[outerOuterEdgeIndex], hitsInGPU.zs[outerOuterAnchorHitIndex], tl_axis_highEdge_x, tl_axis_highEdge_y, tl_axis_highEdge_z); betaOutRHmax = -alpha_OutUp_lowEdge + deltaPhi(hitsInGPU.lowEdgeXs[outerOuterEdgeIndex], hitsInGPU.lowEdgeYs[outerOuterEdgeIndex], hitsInGPU.zs[outerOuterAnchorHitIndex], tl_axis_lowEdge_x, tl_axis_lowEdge_y, tl_axis_lowEdge_z); } //beta computation float drt_tl_axis = sqrtf(tl_axis_x * tl_axis_x + tl_axis_y * tl_axis_y); float drt_tl_lowEdge = sqrtf(tl_axis_lowEdge_x * tl_axis_lowEdge_x + tl_axis_lowEdge_y * tl_axis_lowEdge_y); float drt_tl_highEdge = sqrtf(tl_axis_highEdge_x * tl_axis_highEdge_x + tl_axis_highEdge_y * tl_axis_highEdge_y); //no betaIn cut for the pixels const float rt_InSeg = sqrtf((hitsInGPU.xs[innerOuterAnchorHitIndex] - hitsInGPU.xs[innerInnerAnchorHitIndex]) * (hitsInGPU.xs[innerOuterAnchorHitIndex] - hitsInGPU.xs[innerInnerAnchorHitIndex]) + (hitsInGPU.ys[innerOuterAnchorHitIndex] - hitsInGPU.ys[innerInnerAnchorHitIndex]) * (hitsInGPU.ys[innerOuterAnchorHitIndex] - hitsInGPU.ys[innerInnerAnchorHitIndex])); float betaAv = 0.5f * (betaIn + betaOut); pt_beta = ptIn; const float pt_betaMax = 7.0f; int lIn = 0; int lOut = isEC_lastLayer ? 11 : 5; float sdOut_dr = sqrtf((hitsInGPU.xs[outerOuterAnchorHitIndex] - hitsInGPU.xs[outerInnerAnchorHitIndex]) * (hitsInGPU.xs[outerOuterAnchorHitIndex] - hitsInGPU.xs[outerInnerAnchorHitIndex]) + (hitsInGPU.ys[outerOuterAnchorHitIndex] - hitsInGPU.ys[outerInnerAnchorHitIndex]) * (hitsInGPU.ys[outerOuterAnchorHitIndex] - hitsInGPU.ys[outerInnerAnchorHitIndex])); float sdOut_d = hitsInGPU.rts[outerOuterAnchorHitIndex] - hitsInGPU.rts[outerInnerAnchorHitIndex]; const float diffDr = fabsf(rt_InSeg - sdOut_dr) / fabsf(rt_InSeg + sdOut_dr); runDeltaBetaIterations(betaIn, betaOut, betaAv, pt_beta, rt_InSeg, sdOut_dr, drt_tl_axis, lIn); const float betaInMMSF = (fabsf(betaInRHmin + betaInRHmax) > 0) ? (2.f * betaIn / fabsf(betaInRHmin + betaInRHmax)) : 0.; //mean value of min,max is the old betaIn const float betaOutMMSF = (fabsf(betaOutRHmin + betaOutRHmax) > 0) ? (2.f * betaOut / fabsf(betaOutRHmin + betaOutRHmax)) : 0.; betaInRHmin *= betaInMMSF; betaInRHmax *= betaInMMSF; betaOutRHmin *= betaOutMMSF; betaOutRHmax *= betaOutMMSF; const float dBetaMuls = sdlThetaMulsF * 4.f / fminf(fabsf(pt_beta), pt_betaMax); //need to confirm the range-out value of 7 GeV float sdIn_rt = hitsInGPU.rts[innerOuterAnchorHitIndex]; float sdOut_rt = hitsInGPU.rts[outerInnerAnchorHitIndex]; float sdIn_z = hitsInGPU.zs[innerOuterAnchorHitIndex]; float sdOut_z = hitsInGPU.zs[outerInnerAnchorHitIndex]; const float alphaInAbsReg = fmaxf(fabsf(alpha_InLo), asinf(fminf(sdIn_rt * k2Rinv1GeVf / 3.0f, sinAlphaMax))); const float alphaOutAbsReg = fmaxf(fabsf(alpha_OutLo), asinf(fminf(sdOut_rt * k2Rinv1GeVf / 3.0f, sinAlphaMax))); const float dBetaInLum = lIn < 11 ? 0.0f : fabsf(alphaInAbsReg*deltaZLum / sdIn_z); const float dBetaOutLum = lOut < 11 ? 0.0f : fabsf(alphaOutAbsReg*deltaZLum / sdOut_z); const float dBetaLum2 = (dBetaInLum + dBetaOutLum) * (dBetaInLum + dBetaOutLum); const float sinDPhi = sinf(dPhi); const float dBetaRIn2 = 0; // TODO-RH float dBetaROut = 0; if(isEC_lastLayer) { dBetaROut = (sqrtf(hitsInGPU.highEdgeXs[outerOuterEdgeIndex] * hitsInGPU.highEdgeXs[outerOuterEdgeIndex] + hitsInGPU.highEdgeYs[outerOuterEdgeIndex] * hitsInGPU.highEdgeYs[outerOuterEdgeIndex]) - sqrtf(hitsInGPU.lowEdgeXs[outerOuterEdgeIndex] * hitsInGPU.lowEdgeXs[outerOuterEdgeIndex] + hitsInGPU.lowEdgeYs[outerOuterEdgeIndex] * hitsInGPU.lowEdgeYs[outerOuterEdgeIndex])) * sinDPhi / drt_tl_axis; } const float dBetaROut2 = dBetaROut * dBetaROut; betaOutCut = asinf(fminf(drt_tl_axis*k2Rinv1GeVf / ptCut, sinAlphaMax)) //FIXME: need faster version + (0.02f / sdOut_d) + sqrtf(dBetaLum2 + dBetaMuls*dBetaMuls); //Cut #6: The real beta cut if (not (fabsf(betaOut) < betaOutCut)) { pass = false; } const float pt_betaOut = drt_tl_axis * k2Rinv1GeVf / sin(betaOut); float drt_InSeg = hitsInGPU.rts[innerOuterAnchorHitIndex] - hitsInGPU.rts[innerInnerAnchorHitIndex]; const float dBetaRes = 0.02f / fminf(sdOut_d, drt_InSeg); const float dBetaCut2 = (dBetaRes*dBetaRes * 2.0f + dBetaMuls * dBetaMuls + dBetaLum2 + dBetaRIn2 + dBetaROut2 + 0.25 * (fabsf(betaInRHmin - betaInRHmax) + fabsf(betaOutRHmin - betaOutRHmax)) * (fabsf(betaInRHmin - betaInRHmax) + fabsf(betaOutRHmin - betaOutRHmax))); float dBeta = betaIn - betaOut; deltaBetaCut = sqrtf(dBetaCut2); if (not (dBeta * dBeta <= dBetaCut2)) { pass = false; } return pass; }
6e89c94385eacac75b0b9ffa3428c2d311ab848a.cu
# include "PixelTracklet.cuh" #ifdef __CUDACC__ #define CUDA_CONST_VAR __device__ #endif //#ifdef CACHE_ALLOC #include "allocate.h" //#endif void SDL::createPixelTrackletsInUnifiedMemory(struct pixelTracklets& pixelTrackletsInGPU, unsigned int maxPixelTracklets) { #ifdef CACHE_ALLOC cudaStream_t stream =0; pixelTrackletsInGPU.segmentIndices = (unsigned int*)cms::cuda::allocate_managed(maxPixelTracklets * sizeof(unsigned int) * 2,stream); pixelTrackletsInGPU.lowerModuleIndices = (unsigned int*)cms::cuda::allocate_managed(maxPixelTracklets * sizeof(unsigned int) * 2,stream);//split up to avoid runtime error of exceeding max byte allocation at a time pixelTrackletsInGPU.nPixelTracklets = (unsigned int*)cms::cuda::allocate_managed(sizeof(unsigned int),stream); pixelTrackletsInGPU.zOut = (float*)cms::cuda::allocate_managed(maxPixelTracklets * sizeof(float) * 4,stream); pixelTrackletsInGPU.betaIn = (float*)cms::cuda::allocate_managed(maxPixelTracklets * sizeof(float) * 3,stream); #else cudaMallocManaged(&pixelTrackletsInGPU.segmentIndices, 2 * maxPixelTracklets * sizeof(unsigned int)); cudaMallocManaged(&pixelTrackletsInGPU.lowerModuleIndices, 2 * maxPixelTracklets * sizeof(unsigned int)); cudaMallocManaged(&pixelTrackletsInGPU.nPixelTracklets, sizeof(unsigned int)); cudaMallocManaged(&pixelTrackletsInGPU.zOut, maxPixelTracklets *4* sizeof(float)); cudaMallocManaged(&pixelTrackletsInGPU.betaIn, maxPixelTracklets *3* sizeof(float)); #ifdef CUT_VALUE_DEBUG cudaMallocManaged(&pixelTrackletsInGPU.zLo, maxPixelTracklets * sizeof(float)); cudaMallocManaged(&pixelTrackletsInGPU.zHi, maxPixelTracklets * sizeof(float)); cudaMallocManaged(&pixelTrackletsInGPU.zLoPointed, maxPixelTracklets * sizeof(float)); cudaMallocManaged(&pixelTrackletsInGPU.zHiPointed, maxPixelTracklets * sizeof(float)); cudaMallocManaged(&pixelTrackletsInGPU.sdlCut, maxPixelTracklets * sizeof(float)); cudaMallocManaged(&pixelTrackletsInGPU.betaInCut, maxPixelTracklets * sizeof(float)); cudaMallocManaged(&pixelTrackletsInGPU.betaOutCut, maxPixelTracklets * sizeof(float)); cudaMallocManaged(&pixelTrackletsInGPU.deltaBetaCut, maxPixelTracklets * sizeof(float)); cudaMallocManaged(&pixelTrackletsInGPU.rtLo, maxPixelTracklets * sizeof(float)); cudaMallocManaged(&pixelTrackletsInGPU.rtHi, maxPixelTracklets * sizeof(float)); cudaMallocManaged(&pixelTrackletsInGPU.kZ, maxPixelTracklets * sizeof(float)); #endif #endif pixelTrackletsInGPU.rtOut = pixelTrackletsInGPU.zOut + maxPixelTracklets; pixelTrackletsInGPU.deltaPhiPos = pixelTrackletsInGPU.zOut + maxPixelTracklets * 2; pixelTrackletsInGPU.deltaPhi = pixelTrackletsInGPU.zOut + maxPixelTracklets * 3; pixelTrackletsInGPU.betaOut = pixelTrackletsInGPU.betaIn + maxPixelTracklets; pixelTrackletsInGPU.pt_beta = pixelTrackletsInGPU.betaIn + maxPixelTracklets * 2; cudaMemset(pixelTrackletsInGPU.nPixelTracklets, 0, sizeof(unsigned int)); } void SDL::createPixelTrackletsInExplicitMemory(struct pixelTracklets& pixelTrackletsInGPU, unsigned int maxPixelTracklets) { #ifdef CACHE_ALLOC cudaStream_t stream = 0; int dev; cudaGetDevice(&dev); pixelTrackletsInGPU.segmentIndices = (unsigned int*)cms::cuda::allocate_device(dev, maxPixelTracklets * sizeof(unsigned int) * 2,stream); pixelTrackletsInGPU.lowerModuleIndices = (unsigned int*)cms::cuda::allocate_device(dev, maxPixelTracklets * sizeof(unsigned int) * 2,stream);//split up to avoid runtime error of exceeding max byte allocation at a time pixelTrackletsInGPU.nPixelTracklets = (unsigned int*)cms::cuda::allocate_device(dev, sizeof(unsigned int),stream); pixelTrackletsInGPU.zOut = (float*)cms::cuda::allocate_device(dev, maxPixelTracklets * sizeof(float) * 4,stream); pixelTrackletsInGPU.betaIn = (float*)cms::cuda::allocate_device(dev, maxPixelTracklets * sizeof(float) * 3,stream); #else cudaMalloc(&pixelTrackletsInGPU.segmentIndices, 2 * maxPixelTracklets * sizeof(unsigned int)); cudaMalloc(&pixelTrackletsInGPU.lowerModuleIndices, 2 * maxPixelTracklets * sizeof(unsigned int)); cudaMalloc(&pixelTrackletsInGPU.nPixelTracklets, sizeof(unsigned int)); cudaMalloc(&pixelTrackletsInGPU.zOut, maxPixelTracklets *4* sizeof(float)); cudaMalloc(&pixelTrackletsInGPU.betaIn, maxPixelTracklets *3* sizeof(float)); #endif pixelTrackletsInGPU.rtOut = pixelTrackletsInGPU.zOut + maxPixelTracklets; pixelTrackletsInGPU.deltaPhiPos = pixelTrackletsInGPU.zOut + maxPixelTracklets * 2; pixelTrackletsInGPU.deltaPhi = pixelTrackletsInGPU.zOut + maxPixelTracklets * 3; pixelTrackletsInGPU.betaOut = pixelTrackletsInGPU.betaIn + maxPixelTracklets; pixelTrackletsInGPU.pt_beta = pixelTrackletsInGPU.betaIn + maxPixelTracklets * 2; cudaMemset(pixelTrackletsInGPU.nPixelTracklets, 0, sizeof(unsigned int)); } #ifdef CUT_VALUE_DEBUG __device__ void SDL::addPixelTrackletToMemory(struct pixelTracklets& pixelTrackletsInGPU, unsigned int innerSegmentIndex, unsigned int outerSegmentIndex, unsigned int innerInnerLowerModuleIndex, unsigned int innerOuterLowerModuleIndex, unsigned int outerInnerLowerModuleIndex, unsigned int outerOuterLowerModuleIndex, float& zOut, float& rtOut, float& deltaPhiPos, float& deltaPhi, float& betaIn, float& betaOut, float pt_beta, float& zLo, float& zHi, float& rtLo, float& rtHi, float& zLoPointed, float& zHiPointed, float& sdlCut, float& betaInCut, float& betaOutCut, float& deltaBetaCut, float& kZ, unsigned int pixelTrackletIndex) #else __device__ void SDL::addPixelTrackletToMemory(struct pixelTracklets& pixelTrackletsInGPU, unsigned int innerSegmentIndex, unsigned int outerSegmentIndex, unsigned int innerInnerLowerModuleIndex, unsigned int innerOuterLowerModuleIndex, unsigned int outerInnerLowerModuleIndex, unsigned int outerOuterLowerModuleIndex, float& zOut, float& rtOut, float& deltaPhiPos, float& deltaPhi, float& betaIn, float& betaOut, float pt_beta, unsigned int pixelTrackletIndex) #endif { pixelTrackletsInGPU.segmentIndices[2 * pixelTrackletIndex] = innerSegmentIndex; pixelTrackletsInGPU.segmentIndices[2 * pixelTrackletIndex + 1] = outerSegmentIndex; pixelTrackletsInGPU.lowerModuleIndices[2 * pixelTrackletIndex] = outerInnerLowerModuleIndex; pixelTrackletsInGPU.lowerModuleIndices[2 * pixelTrackletIndex + 1] = outerOuterLowerModuleIndex; pixelTrackletsInGPU.zOut[pixelTrackletIndex] = zOut; pixelTrackletsInGPU.rtOut[pixelTrackletIndex] = rtOut; pixelTrackletsInGPU.deltaPhiPos[pixelTrackletIndex] = deltaPhiPos; pixelTrackletsInGPU.deltaPhi[pixelTrackletIndex] = deltaPhi; pixelTrackletsInGPU.betaIn[pixelTrackletIndex] = betaIn; pixelTrackletsInGPU.betaOut[pixelTrackletIndex] = betaOut; pixelTrackletsInGPU.pt_beta[pixelTrackletIndex] = pt_beta; #ifdef CUT_VALUE_DEBUG pixelTrackletsInGPU.zLo[pixelTrackletIndex] = zLo; pixelTrackletsInGPU.zHi[pixelTrackletIndex] = zHi; pixelTrackletsInGPU.rtLo[pixelTrackletIndex] = rtLo; pixelTrackletsInGPU.rtHi[pixelTrackletIndex] = rtHi; pixelTrackletsInGPU.zLoPointed[pixelTrackletIndex] = zLoPointed; pixelTrackletsInGPU.zHiPointed[pixelTrackletIndex] = zHiPointed; pixelTrackletsInGPU.sdlCut[pixelTrackletIndex] = sdlCut; pixelTrackletsInGPU.betaInCut[pixelTrackletIndex] = betaInCut; pixelTrackletsInGPU.betaOutCut[pixelTrackletIndex] = betaOutCut; pixelTrackletsInGPU.deltaBetaCut[pixelTrackletIndex] = deltaBetaCut; pixelTrackletsInGPU.kZ[pixelTrackletIndex] = kZ; #endif } void SDL::pixelTracklets::freeMemoryCache() { #ifdef Explicit_Tracklet int dev; cudaGetDevice(&dev); cms::cuda::free_device(dev,segmentIndices); cms::cuda::free_device(dev,lowerModuleIndices); cms::cuda::free_device(dev,zOut); cms::cuda::free_device(dev,betaIn); cms::cuda::free_device(dev,nPixelTracklets); #else cms::cuda::free_managed(segmentIndices); cms::cuda::free_managed(lowerModuleIndices); cms::cuda::free_managed(zOut); cms::cuda::free_managed(betaIn); cms::cuda::free_managed(nPixelTracklets); #endif } void SDL::pixelTracklets::freeMemory() { cudaFree(segmentIndices); cudaFree(lowerModuleIndices); cudaFree(nPixelTracklets); cudaFree(zOut); cudaFree(betaIn); #ifdef CUT_VALUE_DEBUG cudaFree(zLo); cudaFree(zHi); cudaFree(rtLo); cudaFree(rtHi); cudaFree(zLoPointed); cudaFree(zHiPointed); cudaFree(sdlCut); cudaFree(betaInCut); cudaFree(betaOutCut); cudaFree(deltaBetaCut); cudaFree(kZ); #endif } SDL::pixelTracklets::pixelTracklets() { segmentIndices = nullptr; lowerModuleIndices = nullptr; nPixelTracklets = nullptr; zOut = nullptr; rtOut = nullptr; deltaPhiPos = nullptr; deltaPhi = nullptr; betaIn = nullptr; betaOut = nullptr; pt_beta = nullptr; #ifdef CUT_VALUE_DEBUG zLo = nullptr; zHi = nullptr; rtLo = nullptr; rtHi = nullptr; zLoPointed = nullptr; zHiPointed = nullptr; sdlCut = nullptr; betaInCut = nullptr; betaOutCut = nullptr; deltaBetaCut = nullptr; kZ = nullptr; #endif } SDL::pixelTracklets::~pixelTracklets() { } __device__ bool SDL::runPixelTrackletDefaultAlgo(struct modules& modulesInGPU, struct hits& hitsInGPU, struct miniDoublets& mdsInGPU, struct segments& segmentsInGPU, unsigned int innerInnerLowerModuleIndex, unsigned int innerOuterLowerModuleIndex, unsigned int outerInnerLowerModuleIndex, unsigned int outerOuterLowerModuleIndex, unsigned int innerSegmentIndex, unsigned int outerSegmentIndex, float& zOut, float& rtOut, float& deltaPhiPos, float& deltaPhi, float& betaIn, float& betaOut, float& pt_beta, float& zLo, float& zHi, float& rtLo, float& rtHi, float& zLoPointed, float& zHiPointed, float& sdlCut, float& betaInCut, float& betaOutCut, float& deltaBetaCut, float& kZ, unsigned int N_MAX_SEGMENTS_PER_MODULE) { bool pass = false; zLo = -999; zHi = -999; rtLo = -999; rtHi = -999; zLoPointed = -999; zHiPointed = -999; kZ = -999; betaInCut = -999; short outerInnerLowerModuleSubdet = modulesInGPU.subdets[outerInnerLowerModuleIndex]; short outerOuterLowerModuleSubdet = modulesInGPU.subdets[outerOuterLowerModuleIndex]; if(outerInnerLowerModuleSubdet == SDL::Barrel and outerOuterLowerModuleSubdet == SDL::Barrel) { pass = runTrackletDefaultAlgoPPBB(modulesInGPU, hitsInGPU, mdsInGPU, segmentsInGPU, innerInnerLowerModuleIndex, outerInnerLowerModuleIndex, outerOuterLowerModuleIndex, innerSegmentIndex, outerSegmentIndex,zOut,rtOut,deltaPhiPos,deltaPhi,betaIn,betaOut,pt_beta,N_MAX_SEGMENTS_PER_MODULE, zLo, zHi, zLoPointed, zHiPointed, sdlCut, betaOutCut, deltaBetaCut); } else if(outerInnerLowerModuleSubdet == SDL::Barrel and outerOuterLowerModuleSubdet == SDL::Endcap) { pass = runTrackletDefaultAlgoPPBB(modulesInGPU, hitsInGPU, mdsInGPU, segmentsInGPU, innerInnerLowerModuleIndex, outerInnerLowerModuleIndex, outerOuterLowerModuleIndex, innerSegmentIndex, outerSegmentIndex,zOut,rtOut,deltaPhiPos,deltaPhi,betaIn,betaOut,pt_beta,N_MAX_SEGMENTS_PER_MODULE, zLo, zHi, zLoPointed, zHiPointed, sdlCut, betaOutCut, deltaBetaCut); } else if(outerInnerLowerModuleSubdet == SDL::Endcap and outerOuterLowerModuleSubdet == SDL::Endcap) { pass = runTrackletDefaultAlgoPPEE(modulesInGPU, hitsInGPU, mdsInGPU, segmentsInGPU, innerInnerLowerModuleIndex, outerInnerLowerModuleIndex, outerOuterLowerModuleIndex, innerSegmentIndex, outerSegmentIndex,zOut,rtOut,deltaPhiPos,deltaPhi,betaIn,betaOut,pt_beta,N_MAX_SEGMENTS_PER_MODULE, zLo, rtLo, rtHi, sdlCut, betaInCut, betaOutCut, deltaBetaCut, kZ); } return pass; } __device__ bool SDL::runTrackletDefaultAlgoPPBB(struct modules& modulesInGPU, struct hits& hitsInGPU, struct miniDoublets& mdsInGPU ,struct segments& segmentsInGPU, unsigned int pixelModuleIndex, unsigned int outerInnerLowerModuleIndex, unsigned int outerOuterLowerModuleIndex, unsigned int innerSegmentIndex, unsigned int outerSegmentIndex, float& zOut, float& rtOut, float& dPhiPos, float& dPhi, float& betaIn, float& betaOut, float& pt_beta, unsigned int N_MAX_SEGMENTS_PER_MODULE, float& zLo, float& zHi, float& zLoPointed, float& zHiPointed, float& sdlCut, float& betaOutCut, float& deltaBetaCut) // pixel to BB and BE segments { bool pass = true; bool isPS_OutLo = (modulesInGPU.moduleType[outerInnerLowerModuleIndex] == SDL::PS); unsigned int innerInnerAnchorHitIndex = segmentsInGPU.innerMiniDoubletAnchorHitIndices[innerSegmentIndex]; unsigned int outerInnerAnchorHitIndex = segmentsInGPU.innerMiniDoubletAnchorHitIndices[outerSegmentIndex]; unsigned int innerOuterAnchorHitIndex = segmentsInGPU.outerMiniDoubletAnchorHitIndices[innerSegmentIndex]; unsigned int outerOuterAnchorHitIndex= segmentsInGPU.outerMiniDoubletAnchorHitIndices[outerSegmentIndex]; if(fabsf(deltaPhi(hitsInGPU.xs[innerOuterAnchorHitIndex], hitsInGPU.ys[innerOuterAnchorHitIndex], hitsInGPU.zs[innerOuterAnchorHitIndex], hitsInGPU.xs[outerInnerAnchorHitIndex], hitsInGPU.ys[outerInnerAnchorHitIndex], hitsInGPU.zs[outerInnerAnchorHitIndex])) > M_PI/2.) { pass = false; } unsigned int pixelSegmentArrayIndex = innerSegmentIndex - (pixelModuleIndex * N_MAX_SEGMENTS_PER_MODULE); float ptIn = segmentsInGPU.ptIn[pixelSegmentArrayIndex]; float ptSLo = ptIn; float px = segmentsInGPU.px[pixelSegmentArrayIndex]; float py = segmentsInGPU.py[pixelSegmentArrayIndex]; float pz = segmentsInGPU.pz[pixelSegmentArrayIndex]; float ptErr = segmentsInGPU.ptErr[pixelSegmentArrayIndex]; float etaErr = segmentsInGPU.etaErr[pixelSegmentArrayIndex]; ptSLo = fmaxf(PTCUT, ptSLo - 10.0f*fmaxf(ptErr, 0.005f*ptSLo)); ptSLo = fminf(10.0f, ptSLo); float rt_InLo = hitsInGPU.rts[innerInnerAnchorHitIndex]; float rt_InUp = hitsInGPU.rts[innerOuterAnchorHitIndex]; float rt_OutLo = hitsInGPU.rts[outerInnerAnchorHitIndex]; float z_InLo = hitsInGPU.zs[innerInnerAnchorHitIndex]; float z_InUp = hitsInGPU.zs[innerOuterAnchorHitIndex]; float z_OutLo = hitsInGPU.zs[outerInnerAnchorHitIndex]; float rt_InOut = hitsInGPU.rts[innerOuterAnchorHitIndex]; float z_InOut = hitsInGPU.zs[innerOuterAnchorHitIndex]; float alpha1GeV_OutLo = asinf(fminf(rt_OutLo * k2Rinv1GeVf / ptCut, sinAlphaMax)); float rtRatio_OutLoInLo = rt_OutLo / rt_InLo; // Outer segment beginning rt divided by inner segment beginning rt; const float rtRatio_OutLoInOut = rt_OutLo / rt_InOut; // Outer segment beginning rt divided by inner segment beginning rt; float dzDrtScale = tanf(alpha1GeV_OutLo) / alpha1GeV_OutLo; // The track can bend in r-z plane slightly const float zpitch_InLo = 0.05f; const float zpitch_InOut = 0.05f; float zpitch_OutLo = (isPS_OutLo ? pixelPSZpitch : strip2SZpitch); float zGeom = zpitch_InLo + zpitch_OutLo; zHi = z_InOut + (z_InOut + deltaZLum) * (rtRatio_OutLoInOut - 1.f) * (z_InOut < 0.f ? 1.f : dzDrtScale) + (zpitch_InOut + zpitch_OutLo); zLo = z_InOut + (z_InOut - deltaZLum) * (rtRatio_OutLoInOut - 1.f) * (z_InOut > 0.f ? 1.f : dzDrtScale) - (zpitch_InOut + zpitch_OutLo); //slope-correction only on outer end if (not (z_OutLo >= zLo and z_OutLo <= zHi)) { pass = false; } const float coshEta = sqrtf(ptIn * ptIn + pz * pz) / ptIn; // const float drt_OutLo_InLo = (rt_OutLo - rt_InLo); const float drt_OutLo_InUp = (rt_OutLo - rt_InUp); const float invRt_InLo = 1. / rt_InLo; const float r3_InLo = sqrtf(z_InLo * z_InLo + rt_InLo * rt_InLo); const float r3_InUp = sqrtf(z_InUp * z_InUp + rt_InUp * rt_InUp); float drt_InSeg = hitsInGPU.rts[innerOuterAnchorHitIndex] - hitsInGPU.rts[innerInnerAnchorHitIndex]; float dz_InSeg = hitsInGPU.zs[innerOuterAnchorHitIndex] - hitsInGPU.zs[innerInnerAnchorHitIndex]; float dr3_InSeg = sqrtf(hitsInGPU.rts[innerOuterAnchorHitIndex] * hitsInGPU.rts[innerOuterAnchorHitIndex] + hitsInGPU.zs[innerOuterAnchorHitIndex] * hitsInGPU.zs[innerOuterAnchorHitIndex]) - sqrtf(hitsInGPU.rts[innerInnerAnchorHitIndex] * hitsInGPU.rts[innerInnerAnchorHitIndex] + hitsInGPU.zs[innerInnerAnchorHitIndex] * hitsInGPU.zs[innerInnerAnchorHitIndex]); const float sdlThetaMulsF = 0.015f * sqrt(0.1f + 0.2 * (rt_OutLo - rt_InUp) / 50.f) * sqrt(r3_InUp / rt_InUp); const float sdlMuls = sdlThetaMulsF * 3.f / ptCut * 4.f; // will need a better guess than x4? float dzErr = drt_OutLo_InUp*etaErr*coshEta; //FIXME: check with the calc in the endcap dzErr *= dzErr; dzErr += 0.03f*0.03f; // pixel size x2. ... random for now dzErr *= 9.f; //3 sigma dzErr += sdlMuls*sdlMuls*drt_OutLo_InUp*drt_OutLo_InUp/3.f*coshEta*coshEta;//sloppy dzErr += zGeom*zGeom; dzErr = sqrtf(dzErr); const float dzDrIn = pz / ptIn; const float zWindow = dzErr / drt_InSeg * drt_OutLo_InUp + zGeom; const float dzMean = dzDrIn * drt_OutLo_InUp * (1.f + drt_OutLo_InUp * drt_OutLo_InUp * 4 * k2Rinv1GeVf * k2Rinv1GeVf / ptIn / ptIn / 24.f); // with curved path correction // Constructing upper and lower bound zLoPointed = z_InUp + dzMean - zWindow; zHiPointed = z_InUp + dzMean + zWindow; if (not (z_OutLo >= zLoPointed and z_OutLo <= zHiPointed)) { pass = false; } const float sdlPVoff = 0.1f / rt_OutLo; sdlCut = alpha1GeV_OutLo + sqrtf(sdlMuls * sdlMuls + sdlPVoff * sdlPVoff); dPhiPos = deltaPhi(hitsInGPU.xs[innerOuterAnchorHitIndex], hitsInGPU.ys[innerOuterAnchorHitIndex], hitsInGPU.zs[innerOuterAnchorHitIndex], hitsInGPU.xs[outerOuterAnchorHitIndex], hitsInGPU.ys[outerOuterAnchorHitIndex], hitsInGPU.zs[outerOuterAnchorHitIndex]); //no dphipos cut float midPointX = (hitsInGPU.xs[innerInnerAnchorHitIndex] + hitsInGPU.xs[outerInnerAnchorHitIndex])/2; float midPointY = (hitsInGPU.ys[innerInnerAnchorHitIndex] + hitsInGPU.ys[outerInnerAnchorHitIndex])/2; float midPointZ = (hitsInGPU.zs[innerInnerAnchorHitIndex] + hitsInGPU.zs[outerInnerAnchorHitIndex])/2; float diffX = hitsInGPU.xs[outerInnerAnchorHitIndex] - hitsInGPU.xs[innerInnerAnchorHitIndex]; float diffY = hitsInGPU.ys[outerInnerAnchorHitIndex] - hitsInGPU.ys[innerInnerAnchorHitIndex] ; float diffZ = hitsInGPU.zs[outerInnerAnchorHitIndex] - hitsInGPU.zs[innerInnerAnchorHitIndex]; dPhi = deltaPhi(midPointX, midPointY, midPointZ, diffX, diffY, diffZ); if (not (fabsf(dPhi) <= sdlCut)) { pass = false; } float alpha_InLo = segmentsInGPU.dPhiChanges[innerSegmentIndex]; float alpha_OutLo = segmentsInGPU.dPhiChanges[outerSegmentIndex]; bool isEC_lastLayer = modulesInGPU.subdets[outerOuterLowerModuleIndex] == SDL::Endcap and modulesInGPU.moduleType[outerOuterLowerModuleIndex] == SDL::TwoS; //unsigned int outerOuterEdgeIndex = hitsInGPU.edge2SMap[outerOuterAnchorHitIndex]; //POTENTIAL NUCLEAR GANDHI unsigned int outerOuterEdgeIndex = outerOuterAnchorHitIndex; float alpha_OutUp,alpha_OutUp_highEdge,alpha_OutUp_lowEdge; alpha_OutUp = deltaPhi(hitsInGPU.xs[outerOuterAnchorHitIndex],hitsInGPU.ys[outerOuterAnchorHitIndex],hitsInGPU.zs[outerOuterAnchorHitIndex],hitsInGPU.xs[outerOuterAnchorHitIndex] - hitsInGPU.xs[outerInnerAnchorHitIndex], hitsInGPU.ys[outerOuterAnchorHitIndex] - hitsInGPU.ys[outerInnerAnchorHitIndex], hitsInGPU.zs[outerOuterAnchorHitIndex] - hitsInGPU.zs[outerInnerAnchorHitIndex]); alpha_OutUp_highEdge = alpha_OutUp; alpha_OutUp_lowEdge = alpha_OutUp; float tl_axis_x = hitsInGPU.xs[outerOuterAnchorHitIndex] - hitsInGPU.xs[innerOuterAnchorHitIndex]; float tl_axis_y = hitsInGPU.ys[outerOuterAnchorHitIndex] - hitsInGPU.ys[innerOuterAnchorHitIndex]; float tl_axis_z = hitsInGPU.zs[outerOuterAnchorHitIndex] - hitsInGPU.zs[innerOuterAnchorHitIndex]; float tl_axis_highEdge_x = tl_axis_x; float tl_axis_highEdge_y = tl_axis_y; float tl_axis_highEdge_z = tl_axis_z; float tl_axis_lowEdge_x = tl_axis_x; float tl_axis_lowEdge_y = tl_axis_y; float tl_axis_lowEdge_z = tl_axis_z; betaIn = -deltaPhi(px, py, pz, tl_axis_x, tl_axis_y, tl_axis_z); float betaInRHmin = betaIn; float betaInRHmax = betaIn; betaOut = -alpha_OutUp + deltaPhi(hitsInGPU.xs[outerOuterAnchorHitIndex], hitsInGPU.ys[outerOuterAnchorHitIndex], hitsInGPU.zs[outerOuterAnchorHitIndex], tl_axis_x, tl_axis_y, tl_axis_z); float betaOutRHmin = betaOut; float betaOutRHmax = betaOut; if(isEC_lastLayer) { alpha_OutUp_highEdge = deltaPhi(hitsInGPU.highEdgeXs[outerOuterEdgeIndex],hitsInGPU.highEdgeYs[outerOuterEdgeIndex],hitsInGPU.zs[outerOuterAnchorHitIndex],hitsInGPU.highEdgeXs[outerOuterEdgeIndex] - hitsInGPU.xs[outerInnerAnchorHitIndex], hitsInGPU.highEdgeYs[outerOuterEdgeIndex] - hitsInGPU.ys[outerInnerAnchorHitIndex], hitsInGPU.zs[outerOuterAnchorHitIndex] - hitsInGPU.zs[outerInnerAnchorHitIndex]); alpha_OutUp_lowEdge = deltaPhi(hitsInGPU.lowEdgeXs[outerOuterEdgeIndex],hitsInGPU.lowEdgeYs[outerOuterEdgeIndex],hitsInGPU.zs[outerOuterAnchorHitIndex],hitsInGPU.lowEdgeXs[outerOuterEdgeIndex] - hitsInGPU.xs[outerInnerAnchorHitIndex], hitsInGPU.lowEdgeYs[outerOuterEdgeIndex] - hitsInGPU.ys[outerInnerAnchorHitIndex], hitsInGPU.zs[outerOuterAnchorHitIndex] - hitsInGPU.zs[outerInnerAnchorHitIndex]); tl_axis_highEdge_x = hitsInGPU.highEdgeXs[outerOuterEdgeIndex] - hitsInGPU.xs[innerOuterAnchorHitIndex]; tl_axis_highEdge_y = hitsInGPU.highEdgeYs[outerOuterEdgeIndex] - hitsInGPU.ys[innerOuterAnchorHitIndex]; tl_axis_highEdge_z = hitsInGPU.zs[outerOuterAnchorHitIndex] - hitsInGPU.zs[innerOuterAnchorHitIndex]; tl_axis_lowEdge_x = hitsInGPU.lowEdgeXs[outerOuterEdgeIndex] - hitsInGPU.xs[innerOuterAnchorHitIndex]; tl_axis_lowEdge_y = hitsInGPU.lowEdgeYs[outerOuterEdgeIndex] - hitsInGPU.ys[innerOuterAnchorHitIndex]; tl_axis_lowEdge_z = hitsInGPU.zs[outerOuterAnchorHitIndex] - hitsInGPU.zs[innerOuterAnchorHitIndex]; betaOutRHmin = -alpha_OutUp_highEdge + deltaPhi(hitsInGPU.highEdgeXs[outerOuterEdgeIndex], hitsInGPU.highEdgeYs[outerOuterEdgeIndex], hitsInGPU.zs[outerOuterAnchorHitIndex], tl_axis_highEdge_x, tl_axis_highEdge_y, tl_axis_highEdge_z); betaOutRHmax = -alpha_OutUp_lowEdge + deltaPhi(hitsInGPU.lowEdgeXs[outerOuterEdgeIndex], hitsInGPU.lowEdgeYs[outerOuterEdgeIndex], hitsInGPU.zs[outerOuterAnchorHitIndex], tl_axis_lowEdge_x, tl_axis_lowEdge_y, tl_axis_lowEdge_z); } //beta computation float drt_tl_axis = sqrtf(tl_axis_x * tl_axis_x + tl_axis_y * tl_axis_y); float drt_tl_lowEdge = sqrtf(tl_axis_lowEdge_x * tl_axis_lowEdge_x + tl_axis_lowEdge_y * tl_axis_lowEdge_y); float drt_tl_highEdge = sqrtf(tl_axis_highEdge_x * tl_axis_highEdge_x + tl_axis_highEdge_y * tl_axis_highEdge_y); //innerOuterAnchor - innerInnerAnchor const float rt_InSeg = sqrtf((hitsInGPU.xs[innerOuterAnchorHitIndex] - hitsInGPU.xs[innerInnerAnchorHitIndex]) * (hitsInGPU.xs[innerOuterAnchorHitIndex] - hitsInGPU.xs[innerInnerAnchorHitIndex]) + (hitsInGPU.ys[innerOuterAnchorHitIndex] - hitsInGPU.ys[innerInnerAnchorHitIndex]) * (hitsInGPU.ys[innerOuterAnchorHitIndex] - hitsInGPU.ys[innerInnerAnchorHitIndex])); //no betaIn cut for the pixels float betaAv = 0.5f * (betaIn + betaOut); pt_beta = ptIn; const float pt_betaMax = 7.0f; int lIn = 0; int lOut = isEC_lastLayer ? 11 : 5; float sdOut_dr = sqrtf((hitsInGPU.xs[outerOuterAnchorHitIndex] - hitsInGPU.xs[outerInnerAnchorHitIndex]) * (hitsInGPU.xs[outerOuterAnchorHitIndex] - hitsInGPU.xs[outerInnerAnchorHitIndex]) + (hitsInGPU.ys[outerOuterAnchorHitIndex] - hitsInGPU.ys[outerInnerAnchorHitIndex]) * (hitsInGPU.ys[outerOuterAnchorHitIndex] - hitsInGPU.ys[outerInnerAnchorHitIndex])); float sdOut_d = hitsInGPU.rts[outerOuterAnchorHitIndex] - hitsInGPU.rts[outerInnerAnchorHitIndex]; const float diffDr = fabsf(rt_InSeg - sdOut_dr) / fabsf(rt_InSeg + sdOut_dr); runDeltaBetaIterations(betaIn, betaOut, betaAv, pt_beta, rt_InSeg, sdOut_dr, drt_tl_axis, lIn); const float betaInMMSF = (fabsf(betaInRHmin + betaInRHmax) > 0) ? (2.f * betaIn / fabsf(betaInRHmin + betaInRHmax)) : 0.; //mean value of min,max is the old betaIn const float betaOutMMSF = (fabsf(betaOutRHmin + betaOutRHmax) > 0) ? (2.f * betaOut / fabsf(betaOutRHmin + betaOutRHmax)) : 0.; betaInRHmin *= betaInMMSF; betaInRHmax *= betaInMMSF; betaOutRHmin *= betaOutMMSF; betaOutRHmax *= betaOutMMSF; const float dBetaMuls = sdlThetaMulsF * 4.f / fminf(fabsf(pt_beta), pt_betaMax); //need to confirm the range-out value of 7 GeV float sdIn_rt = hitsInGPU.rts[innerOuterAnchorHitIndex]; float sdOut_rt = hitsInGPU.rts[outerInnerAnchorHitIndex]; float sdIn_z = hitsInGPU.zs[innerOuterAnchorHitIndex]; float sdOut_z = hitsInGPU.zs[outerInnerAnchorHitIndex]; const float alphaInAbsReg = fmaxf(fabsf(alpha_InLo), asinf(fminf(sdIn_rt * k2Rinv1GeVf / 3.0f, sinAlphaMax))); const float alphaOutAbsReg = fmaxf(fabsf(alpha_OutLo), asinf(fminf(sdOut_rt * k2Rinv1GeVf / 3.0f, sinAlphaMax))); const float dBetaInLum = lIn < 11 ? 0.0f : fabsf(alphaInAbsReg*deltaZLum / sdIn_z); const float dBetaOutLum = lOut < 11 ? 0.0f : fabsf(alphaOutAbsReg*deltaZLum / sdOut_z); const float dBetaLum2 = (dBetaInLum + dBetaOutLum) * (dBetaInLum + dBetaOutLum); const float sinDPhi = sinf(dPhi); const float dBetaRIn2 = 0; // TODO-RH float dBetaROut = 0; if(isEC_lastLayer) { dBetaROut = (sqrtf(hitsInGPU.highEdgeXs[outerOuterEdgeIndex] * hitsInGPU.highEdgeXs[outerOuterEdgeIndex] + hitsInGPU.highEdgeYs[outerOuterEdgeIndex] * hitsInGPU.highEdgeYs[outerOuterEdgeIndex]) - sqrtf(hitsInGPU.lowEdgeXs[outerOuterEdgeIndex] * hitsInGPU.lowEdgeXs[outerOuterEdgeIndex] + hitsInGPU.lowEdgeYs[outerOuterEdgeIndex] * hitsInGPU.lowEdgeYs[outerOuterEdgeIndex])) * sinDPhi / drt_tl_axis; } const float dBetaROut2 = dBetaROut * dBetaROut; betaOutCut = asinf(fminf(drt_tl_axis*k2Rinv1GeVf / ptCut, sinAlphaMax)) //FIXME: need faster version + (0.02f / sdOut_d) + sqrtf(dBetaLum2 + dBetaMuls*dBetaMuls); //Cut #6: The real beta cut if (not (fabsf(betaOut) < betaOutCut)) { pass = false; } const float pt_betaOut = drt_tl_axis * k2Rinv1GeVf / sin(betaOut); const float dBetaRes = 0.02f / fminf(sdOut_d, drt_InSeg); const float dBetaCut2 = (dBetaRes*dBetaRes * 2.0f + dBetaMuls * dBetaMuls + dBetaLum2 + dBetaRIn2 + dBetaROut2 + 0.25 * (fabsf(betaInRHmin - betaInRHmax) + fabsf(betaOutRHmin - betaOutRHmax)) * (fabsf(betaInRHmin - betaInRHmax) + fabsf(betaOutRHmin - betaOutRHmax))); float dBeta = betaIn - betaOut; deltaBetaCut = sqrtf(dBetaCut2); if (not (dBeta * dBeta <= dBetaCut2)) { //printf("dBeta2 = %f, dBetaCut2 = %f\n",dBeta * dBeta, dBetaCut2); pass = false; } return pass; } __device__ bool SDL::runTrackletDefaultAlgoPPEE(struct modules& modulesInGPU, struct hits& hitsInGPU, struct miniDoublets& mdsInGPU ,struct segments& segmentsInGPU, unsigned int pixelModuleIndex, unsigned int outerInnerLowerModuleIndex, unsigned int outerOuterLowerModuleIndex, unsigned int innerSegmentIndex, unsigned int outerSegmentIndex, float& zOut, float& rtOut, float& deltaPhiPos, float& dPhi, float& betaIn, float& betaOut, float& pt_beta, unsigned int N_MAX_SEGMENTS_PER_MODULE, float& zLo, float& rtLo, float& rtHi, float& sdlCut, float& betaInCut, float& betaOutCut, float& deltaBetaCut, float& kZ) // pixel to EE segments { bool pass = true; bool isPS_OutLo = (modulesInGPU.moduleType[outerInnerLowerModuleIndex] == SDL::PS); unsigned int innerInnerAnchorHitIndex = segmentsInGPU.innerMiniDoubletAnchorHitIndices[innerSegmentIndex]; unsigned int outerInnerAnchorHitIndex = segmentsInGPU.innerMiniDoubletAnchorHitIndices[outerSegmentIndex]; unsigned int innerOuterAnchorHitIndex = segmentsInGPU.outerMiniDoubletAnchorHitIndices[innerSegmentIndex]; unsigned int outerOuterAnchorHitIndex= segmentsInGPU.outerMiniDoubletAnchorHitIndices[outerSegmentIndex]; unsigned int pixelSegmentArrayIndex = innerSegmentIndex - (pixelModuleIndex * N_MAX_SEGMENTS_PER_MODULE); float ptIn = segmentsInGPU.ptIn[pixelSegmentArrayIndex]; float ptSLo = ptIn; float px = segmentsInGPU.px[pixelSegmentArrayIndex]; float py = segmentsInGPU.py[pixelSegmentArrayIndex]; float pz = segmentsInGPU.pz[pixelSegmentArrayIndex]; float ptErr = segmentsInGPU.ptErr[pixelSegmentArrayIndex]; float etaErr = segmentsInGPU.etaErr[pixelSegmentArrayIndex]; ptSLo = fmaxf(PTCUT, ptSLo - 10.0f*fmaxf(ptErr, 0.005f*ptSLo)); ptSLo = fminf(10.0f, ptSLo); float rtIn = hitsInGPU.rts[innerOuterAnchorHitIndex]; rtOut = hitsInGPU.rts[outerInnerAnchorHitIndex]; float zIn = hitsInGPU.zs[innerOuterAnchorHitIndex]; zOut = hitsInGPU.zs[outerInnerAnchorHitIndex]; float rtOut_o_rtIn = rtOut/rtIn; const float zpitch_InLo = 0.05f; float zpitch_OutLo = (isPS_OutLo ? pixelPSZpitch : strip2SZpitch); float zGeom = zpitch_InLo + zpitch_OutLo; const float sdlSlope = asinf(fminf(rtOut * k2Rinv1GeVf / ptCut, sinAlphaMax)); const float dzDrtScale = tanf(sdlSlope) / sdlSlope;//FIXME: need approximate value zLo = zIn + (zIn - deltaZLum) * (rtOut_o_rtIn - 1.f) * (zIn > 0.f ? 1.f : dzDrtScale) - zGeom; //slope-correction only on outer end if (not (zIn * zOut > 0)) { pass = false; } const float dLum = copysignf(deltaZLum, zIn); bool isOutSgInnerMDPS = modulesInGPU.moduleType[outerInnerLowerModuleIndex] == SDL::PS; const float rtGeom1 = isOutSgInnerMDPS ? pixelPSZpitch : strip2SZpitch;//FIXME: make this chosen by configuration for lay11,12 full PS const float zGeom1 = copysignf(zGeom, zIn); //used in B-E region rtLo = rtIn * (1.f + (zOut - zIn - zGeom1) / (zIn + zGeom1 + dLum) / dzDrtScale) - rtGeom1; //slope correction only on the lower end if (not (rtOut >= rtLo)) { pass = false; } float zInForHi = zIn - zGeom1 - dLum; if (zInForHi * zIn < 0) zInForHi = copysignf(0.1f, zIn); rtHi = rtIn * (1.f + (zOut - zIn + zGeom1) / zInForHi) + rtGeom1; // Cut #2: rt condition if (not (rtOut >= rtLo and rtOut <= rtHi)) { pass = false; } const float rt_InUp = hitsInGPU.rts[innerOuterAnchorHitIndex]; const float rt_OutLo = hitsInGPU.rts[outerInnerAnchorHitIndex]; const float z_InUp = hitsInGPU.zs[innerOuterAnchorHitIndex]; const float dzOutInAbs = fabsf(zOut - zIn); const float coshEta = hypotf(ptIn, pz) / ptIn; const float multDzDr = dzOutInAbs*coshEta/(coshEta*coshEta - 1.f); const float r3_InUp = sqrtf(z_InUp * z_InUp + rt_InUp * rt_InUp); const float sdlThetaMulsF = 0.015f * sqrtf(0.1f + 0.2 * (rt_OutLo - rtIn) / 50.f) * sqrtf(r3_InUp / rtIn); const float sdlMuls = sdlThetaMulsF * 3.f / ptCut * 4.f; // will need a better guess than x4? float drtErr = etaErr*multDzDr; drtErr *= drtErr; drtErr += 0.03f*0.03f; // pixel size x2. ... random for now drtErr *= 9.f; //3 sigma drtErr += sdlMuls*sdlMuls*multDzDr*multDzDr/3.f*coshEta*coshEta;//sloppy: relative muls is 1/3 of total muls drtErr = sqrtf(drtErr); const float drtDzIn = fabsf(ptIn / pz);//all tracks are out-going in endcaps? const float drt_OutLo_InUp = (rt_OutLo - rt_InUp); // drOutIn const float rtWindow = drtErr + rtGeom1; const float drtMean = drtDzIn * dzOutInAbs * (1.f - drt_OutLo_InUp * drt_OutLo_InUp * 4 * k2Rinv1GeVf * k2Rinv1GeVf / ptIn / ptIn / 24.f); // with curved path correction const float rtLo_point = rtIn + drtMean - rtWindow; const float rtHi_point = rtIn + drtMean + rtWindow; // Cut #3: rt-z pointed if (not (rtOut >= rtLo_point and rtOut <= rtHi_point)) { pass = false; } const float alpha1GeV_OutLo = asinf(fminf(rt_OutLo * k2Rinv1GeVf / ptCut, sinAlphaMax)); const float sdlPVoff = 0.1f / rt_OutLo; sdlCut = alpha1GeV_OutLo + sqrtf(sdlMuls * sdlMuls + sdlPVoff * sdlPVoff); deltaPhiPos = deltaPhi(hitsInGPU.xs[innerOuterAnchorHitIndex], hitsInGPU.ys[innerOuterAnchorHitIndex], hitsInGPU.zs[innerOuterAnchorHitIndex], hitsInGPU.xs[outerOuterAnchorHitIndex], hitsInGPU.ys[outerOuterAnchorHitIndex], hitsInGPU.zs[outerOuterAnchorHitIndex]); //no deltaphipos cut float midPointX = (hitsInGPU.xs[innerInnerAnchorHitIndex] + hitsInGPU.xs[outerInnerAnchorHitIndex])/2; float midPointY = (hitsInGPU.ys[innerInnerAnchorHitIndex] + hitsInGPU.ys[outerInnerAnchorHitIndex])/2; float midPointZ = (hitsInGPU.zs[innerInnerAnchorHitIndex] + hitsInGPU.zs[outerInnerAnchorHitIndex])/2; float diffX = (-hitsInGPU.xs[innerInnerAnchorHitIndex] + hitsInGPU.xs[outerInnerAnchorHitIndex]); float diffY = (-hitsInGPU.ys[innerInnerAnchorHitIndex] + hitsInGPU.ys[outerInnerAnchorHitIndex]); float diffZ = (-hitsInGPU.zs[innerInnerAnchorHitIndex] + hitsInGPU.zs[outerInnerAnchorHitIndex]); dPhi = deltaPhi(midPointX, midPointY, midPointZ, diffX, diffY, diffZ); // Cut #5: deltaPhiChange if (not (fabsf(dPhi) <= sdlCut)) { pass = false; } float alpha_InLo = segmentsInGPU.dPhiChanges[innerSegmentIndex]; float alpha_OutLo = segmentsInGPU.dPhiChanges[outerSegmentIndex]; bool isEC_lastLayer = modulesInGPU.subdets[outerOuterLowerModuleIndex] == SDL::Endcap and modulesInGPU.moduleType[outerOuterLowerModuleIndex] == SDL::TwoS; unsigned int outerOuterEdgeIndex = outerOuterAnchorHitIndex; float alpha_OutUp,alpha_OutUp_highEdge,alpha_OutUp_lowEdge; alpha_OutUp = deltaPhi(hitsInGPU.xs[outerOuterAnchorHitIndex],hitsInGPU.ys[outerOuterAnchorHitIndex],hitsInGPU.zs[outerOuterAnchorHitIndex],hitsInGPU.xs[outerOuterAnchorHitIndex] - hitsInGPU.xs[outerInnerAnchorHitIndex], hitsInGPU.ys[outerOuterAnchorHitIndex] - hitsInGPU.ys[outerInnerAnchorHitIndex], hitsInGPU.zs[outerOuterAnchorHitIndex] - hitsInGPU.zs[outerInnerAnchorHitIndex]); alpha_OutUp_highEdge = alpha_OutUp; alpha_OutUp_lowEdge = alpha_OutUp; float tl_axis_x = hitsInGPU.xs[outerOuterAnchorHitIndex] - hitsInGPU.xs[innerOuterAnchorHitIndex]; float tl_axis_y = hitsInGPU.ys[outerOuterAnchorHitIndex] - hitsInGPU.ys[innerOuterAnchorHitIndex]; float tl_axis_z = hitsInGPU.zs[outerOuterAnchorHitIndex] - hitsInGPU.zs[innerOuterAnchorHitIndex]; float tl_axis_highEdge_x = tl_axis_x; float tl_axis_highEdge_y = tl_axis_y; float tl_axis_highEdge_z = tl_axis_z; float tl_axis_lowEdge_x = tl_axis_x; float tl_axis_lowEdge_y = tl_axis_y; float tl_axis_lowEdge_z = tl_axis_z; betaIn = -deltaPhi(px, py, pz, tl_axis_x, tl_axis_y, tl_axis_z); float betaInRHmin = betaIn; float betaInRHmax = betaIn; betaOut = -alpha_OutUp + deltaPhi(hitsInGPU.xs[outerOuterAnchorHitIndex], hitsInGPU.ys[outerOuterAnchorHitIndex], hitsInGPU.zs[outerOuterAnchorHitIndex], tl_axis_x, tl_axis_y, tl_axis_z); float betaOutRHmin = betaOut; float betaOutRHmax = betaOut; if(isEC_lastLayer) { alpha_OutUp_highEdge = deltaPhi(hitsInGPU.highEdgeXs[outerOuterEdgeIndex],hitsInGPU.highEdgeYs[outerOuterEdgeIndex],hitsInGPU.zs[outerOuterAnchorHitIndex],hitsInGPU.highEdgeXs[outerOuterEdgeIndex] - hitsInGPU.xs[outerInnerAnchorHitIndex], hitsInGPU.highEdgeYs[outerOuterEdgeIndex] - hitsInGPU.ys[outerInnerAnchorHitIndex], hitsInGPU.zs[outerOuterAnchorHitIndex] - hitsInGPU.zs[outerInnerAnchorHitIndex]); alpha_OutUp_lowEdge = deltaPhi(hitsInGPU.lowEdgeXs[outerOuterEdgeIndex],hitsInGPU.lowEdgeYs[outerOuterEdgeIndex],hitsInGPU.zs[outerOuterAnchorHitIndex],hitsInGPU.lowEdgeXs[outerOuterEdgeIndex] - hitsInGPU.xs[outerInnerAnchorHitIndex], hitsInGPU.lowEdgeYs[outerOuterEdgeIndex] - hitsInGPU.ys[outerInnerAnchorHitIndex], hitsInGPU.zs[outerOuterAnchorHitIndex] - hitsInGPU.zs[outerInnerAnchorHitIndex]); tl_axis_highEdge_x = hitsInGPU.highEdgeXs[outerOuterEdgeIndex] - hitsInGPU.xs[innerOuterAnchorHitIndex]; tl_axis_highEdge_y = hitsInGPU.highEdgeYs[outerOuterEdgeIndex] - hitsInGPU.ys[innerOuterAnchorHitIndex]; tl_axis_highEdge_z = hitsInGPU.zs[outerOuterAnchorHitIndex] - hitsInGPU.zs[innerOuterAnchorHitIndex]; tl_axis_lowEdge_x = hitsInGPU.lowEdgeXs[outerOuterEdgeIndex] - hitsInGPU.xs[innerOuterAnchorHitIndex]; tl_axis_lowEdge_y = hitsInGPU.lowEdgeYs[outerOuterEdgeIndex] - hitsInGPU.ys[innerOuterAnchorHitIndex]; tl_axis_lowEdge_z = hitsInGPU.zs[outerOuterAnchorHitIndex] - hitsInGPU.zs[innerOuterAnchorHitIndex]; betaOutRHmin = -alpha_OutUp_highEdge + deltaPhi(hitsInGPU.highEdgeXs[outerOuterEdgeIndex], hitsInGPU.highEdgeYs[outerOuterEdgeIndex], hitsInGPU.zs[outerOuterAnchorHitIndex], tl_axis_highEdge_x, tl_axis_highEdge_y, tl_axis_highEdge_z); betaOutRHmax = -alpha_OutUp_lowEdge + deltaPhi(hitsInGPU.lowEdgeXs[outerOuterEdgeIndex], hitsInGPU.lowEdgeYs[outerOuterEdgeIndex], hitsInGPU.zs[outerOuterAnchorHitIndex], tl_axis_lowEdge_x, tl_axis_lowEdge_y, tl_axis_lowEdge_z); } //beta computation float drt_tl_axis = sqrtf(tl_axis_x * tl_axis_x + tl_axis_y * tl_axis_y); float drt_tl_lowEdge = sqrtf(tl_axis_lowEdge_x * tl_axis_lowEdge_x + tl_axis_lowEdge_y * tl_axis_lowEdge_y); float drt_tl_highEdge = sqrtf(tl_axis_highEdge_x * tl_axis_highEdge_x + tl_axis_highEdge_y * tl_axis_highEdge_y); //no betaIn cut for the pixels const float rt_InSeg = sqrtf((hitsInGPU.xs[innerOuterAnchorHitIndex] - hitsInGPU.xs[innerInnerAnchorHitIndex]) * (hitsInGPU.xs[innerOuterAnchorHitIndex] - hitsInGPU.xs[innerInnerAnchorHitIndex]) + (hitsInGPU.ys[innerOuterAnchorHitIndex] - hitsInGPU.ys[innerInnerAnchorHitIndex]) * (hitsInGPU.ys[innerOuterAnchorHitIndex] - hitsInGPU.ys[innerInnerAnchorHitIndex])); float betaAv = 0.5f * (betaIn + betaOut); pt_beta = ptIn; const float pt_betaMax = 7.0f; int lIn = 0; int lOut = isEC_lastLayer ? 11 : 5; float sdOut_dr = sqrtf((hitsInGPU.xs[outerOuterAnchorHitIndex] - hitsInGPU.xs[outerInnerAnchorHitIndex]) * (hitsInGPU.xs[outerOuterAnchorHitIndex] - hitsInGPU.xs[outerInnerAnchorHitIndex]) + (hitsInGPU.ys[outerOuterAnchorHitIndex] - hitsInGPU.ys[outerInnerAnchorHitIndex]) * (hitsInGPU.ys[outerOuterAnchorHitIndex] - hitsInGPU.ys[outerInnerAnchorHitIndex])); float sdOut_d = hitsInGPU.rts[outerOuterAnchorHitIndex] - hitsInGPU.rts[outerInnerAnchorHitIndex]; const float diffDr = fabsf(rt_InSeg - sdOut_dr) / fabsf(rt_InSeg + sdOut_dr); runDeltaBetaIterations(betaIn, betaOut, betaAv, pt_beta, rt_InSeg, sdOut_dr, drt_tl_axis, lIn); const float betaInMMSF = (fabsf(betaInRHmin + betaInRHmax) > 0) ? (2.f * betaIn / fabsf(betaInRHmin + betaInRHmax)) : 0.; //mean value of min,max is the old betaIn const float betaOutMMSF = (fabsf(betaOutRHmin + betaOutRHmax) > 0) ? (2.f * betaOut / fabsf(betaOutRHmin + betaOutRHmax)) : 0.; betaInRHmin *= betaInMMSF; betaInRHmax *= betaInMMSF; betaOutRHmin *= betaOutMMSF; betaOutRHmax *= betaOutMMSF; const float dBetaMuls = sdlThetaMulsF * 4.f / fminf(fabsf(pt_beta), pt_betaMax); //need to confirm the range-out value of 7 GeV float sdIn_rt = hitsInGPU.rts[innerOuterAnchorHitIndex]; float sdOut_rt = hitsInGPU.rts[outerInnerAnchorHitIndex]; float sdIn_z = hitsInGPU.zs[innerOuterAnchorHitIndex]; float sdOut_z = hitsInGPU.zs[outerInnerAnchorHitIndex]; const float alphaInAbsReg = fmaxf(fabsf(alpha_InLo), asinf(fminf(sdIn_rt * k2Rinv1GeVf / 3.0f, sinAlphaMax))); const float alphaOutAbsReg = fmaxf(fabsf(alpha_OutLo), asinf(fminf(sdOut_rt * k2Rinv1GeVf / 3.0f, sinAlphaMax))); const float dBetaInLum = lIn < 11 ? 0.0f : fabsf(alphaInAbsReg*deltaZLum / sdIn_z); const float dBetaOutLum = lOut < 11 ? 0.0f : fabsf(alphaOutAbsReg*deltaZLum / sdOut_z); const float dBetaLum2 = (dBetaInLum + dBetaOutLum) * (dBetaInLum + dBetaOutLum); const float sinDPhi = sinf(dPhi); const float dBetaRIn2 = 0; // TODO-RH float dBetaROut = 0; if(isEC_lastLayer) { dBetaROut = (sqrtf(hitsInGPU.highEdgeXs[outerOuterEdgeIndex] * hitsInGPU.highEdgeXs[outerOuterEdgeIndex] + hitsInGPU.highEdgeYs[outerOuterEdgeIndex] * hitsInGPU.highEdgeYs[outerOuterEdgeIndex]) - sqrtf(hitsInGPU.lowEdgeXs[outerOuterEdgeIndex] * hitsInGPU.lowEdgeXs[outerOuterEdgeIndex] + hitsInGPU.lowEdgeYs[outerOuterEdgeIndex] * hitsInGPU.lowEdgeYs[outerOuterEdgeIndex])) * sinDPhi / drt_tl_axis; } const float dBetaROut2 = dBetaROut * dBetaROut; betaOutCut = asinf(fminf(drt_tl_axis*k2Rinv1GeVf / ptCut, sinAlphaMax)) //FIXME: need faster version + (0.02f / sdOut_d) + sqrtf(dBetaLum2 + dBetaMuls*dBetaMuls); //Cut #6: The real beta cut if (not (fabsf(betaOut) < betaOutCut)) { pass = false; } const float pt_betaOut = drt_tl_axis * k2Rinv1GeVf / sin(betaOut); float drt_InSeg = hitsInGPU.rts[innerOuterAnchorHitIndex] - hitsInGPU.rts[innerInnerAnchorHitIndex]; const float dBetaRes = 0.02f / fminf(sdOut_d, drt_InSeg); const float dBetaCut2 = (dBetaRes*dBetaRes * 2.0f + dBetaMuls * dBetaMuls + dBetaLum2 + dBetaRIn2 + dBetaROut2 + 0.25 * (fabsf(betaInRHmin - betaInRHmax) + fabsf(betaOutRHmin - betaOutRHmax)) * (fabsf(betaInRHmin - betaInRHmax) + fabsf(betaOutRHmin - betaOutRHmax))); float dBeta = betaIn - betaOut; deltaBetaCut = sqrtf(dBetaCut2); if (not (dBeta * dBeta <= dBetaCut2)) { pass = false; } return pass; }
8ce2daf5db8d34a66aa497223a3902b3c8ace096.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "d_rms_error.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *m = NULL; hipMalloc(&m, XSIZE*YSIZE); double *c = NULL; hipMalloc(&c, XSIZE*YSIZE); double *error_sum_arr = NULL; hipMalloc(&error_sum_arr, XSIZE*YSIZE); point_t *d_data = NULL; hipMalloc(&d_data, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( d_rms_error), dim3(gridBlock),dim3(threadBlock), 0, 0, m,c,error_sum_arr,d_data); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( d_rms_error), dim3(gridBlock),dim3(threadBlock), 0, 0, m,c,error_sum_arr,d_data); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( d_rms_error), dim3(gridBlock),dim3(threadBlock), 0, 0, m,c,error_sum_arr,d_data); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
8ce2daf5db8d34a66aa497223a3902b3c8ace096.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "d_rms_error.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *m = NULL; cudaMalloc(&m, XSIZE*YSIZE); double *c = NULL; cudaMalloc(&c, XSIZE*YSIZE); double *error_sum_arr = NULL; cudaMalloc(&error_sum_arr, XSIZE*YSIZE); point_t *d_data = NULL; cudaMalloc(&d_data, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); d_rms_error<<<gridBlock,threadBlock>>>(m,c,error_sum_arr,d_data); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { d_rms_error<<<gridBlock,threadBlock>>>(m,c,error_sum_arr,d_data); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { d_rms_error<<<gridBlock,threadBlock>>>(m,c,error_sum_arr,d_data); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
76e76d13929add187b74d87b767e18a642c43372.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN #include <doctest.h> #include <taskflow/taskflow.hpp> #include <taskflow/cudaflow.hpp> constexpr float eps = 0.0001f; // -------------------------------------------------------- // Testcase: add2 // -------------------------------------------------------- template <typename T, typename F> void add2() { //const unsigned N = 1<<20; for(size_t N=1; N<=(1<<20); N <<= 1) { tf::Taskflow taskflow; tf::Executor executor; T v1 = ::rand() % 100; T v2 = ::rand() % 100; T v3 = v1 + v2; std::vector<T> hx, hy; T* dx {nullptr}; T* dy {nullptr}; // allocate x auto allocate_x = taskflow.emplace([&]() { hx.resize(N, v1); REQUIRE(hipMalloc(&dx, N*sizeof(T)) == hipSuccess); }).name("allocate_x"); // allocate y auto allocate_y = taskflow.emplace([&]() { hy.resize(N, v2); REQUIRE(hipMalloc(&dy, N*sizeof(T)) == hipSuccess); }).name("allocate_y"); // axpy auto cudaflow = taskflow.emplace([&](F& cf) { auto h2d_x = cf.copy(dx, hx.data(), N).name("h2d_x"); auto h2d_y = cf.copy(dy, hy.data(), N).name("h2d_y"); auto d2h_x = cf.copy(hx.data(), dx, N).name("d2h_x"); auto d2h_y = cf.copy(hy.data(), dy, N).name("d2h_y"); //auto kernel = cf.add(dx, N, dx, dy); auto kernel = cf.transform( dx, dx+N, [] __device__ (T& v1, T& v2) { return v1 + v2; }, dx, dy ); kernel.succeed(h2d_x, h2d_y) .precede(d2h_x, d2h_y); }).name("saxpy"); cudaflow.succeed(allocate_x, allocate_y); // Add a verification task auto verifier = taskflow.emplace([&](){ for (size_t i = 0; i < N; i++) { REQUIRE(::fabs(hx[i] - v3) < eps); } }).succeed(cudaflow).name("verify"); // free memory auto deallocate_x = taskflow.emplace([&](){ REQUIRE(hipFree(dx) == hipSuccess); }).name("deallocate_x"); auto deallocate_y = taskflow.emplace([&](){ REQUIRE(hipFree(dy) == hipSuccess); }).name("deallocate_y"); verifier.precede(deallocate_x, deallocate_y); executor.run(taskflow).wait(); } } TEST_CASE("add2.int" * doctest::timeout(300)) { add2<int, tf::cudaFlow>(); } TEST_CASE("add2.float" * doctest::timeout(300)) { add2<float, tf::cudaFlow>(); } TEST_CASE("add2.double" * doctest::timeout(300)) { add2<double, tf::cudaFlow>(); } TEST_CASE("capture_add2.int" * doctest::timeout(300)) { add2<int, tf::cudaFlowCapturer>(); } TEST_CASE("capture_add2.float" * doctest::timeout(300)) { add2<float, tf::cudaFlowCapturer>(); } TEST_CASE("capture_add2.double" * doctest::timeout(300)) { add2<double, tf::cudaFlowCapturer>(); } // -------------------------------------------------------- // Testcase: add3 // -------------------------------------------------------- template <typename T, typename F> void add3() { //const unsigned N = 1<<20; for(size_t N=1; N<=(1<<20); N <<= 1) { tf::Taskflow taskflow; tf::Executor executor; T v1 = ::rand() % 100; T v2 = ::rand() % 100; T v3 = ::rand() % 100; T v4 = v1 + v2 + v3; std::vector<T> hx, hy, hz; T* dx {nullptr}; T* dy {nullptr}; T* dz {nullptr}; // allocate x auto allocate_x = taskflow.emplace([&]() { hx.resize(N, v1); REQUIRE(hipMalloc(&dx, N*sizeof(T)) == hipSuccess); }).name("allocate_x"); // allocate y auto allocate_y = taskflow.emplace([&]() { hy.resize(N, v2); REQUIRE(hipMalloc(&dy, N*sizeof(T)) == hipSuccess); }).name("allocate_y"); // allocate z auto allocate_z = taskflow.emplace([&]() { hz.resize(N, v3); REQUIRE(hipMalloc(&dz, N*sizeof(T)) == hipSuccess); }).name("allocate_y"); // saxpy auto cudaflow = taskflow.emplace([&](F& cf) { auto h2d_x = cf.copy(dx, hx.data(), N).name("h2d_x"); auto h2d_y = cf.copy(dy, hy.data(), N).name("h2d_y"); auto h2d_z = cf.copy(dz, hz.data(), N).name("h2d_z"); auto d2h_x = cf.copy(hx.data(), dx, N).name("d2h_x"); auto d2h_y = cf.copy(hy.data(), dy, N).name("d2h_y"); auto d2h_z = cf.copy(hz.data(), dz, N).name("d2h_z"); //auto kernel = cf.add(dx, N, dx, dy, dz); auto kernel = cf.transform( dx, dx+N, [] __device__ (T& v1, T& v2, T& v3) { return v1 + v2 + v3; }, dx, dy, dz ); kernel.succeed(h2d_x, h2d_y, h2d_z) .precede(d2h_x, d2h_y, d2h_z); }).name("saxpy"); cudaflow.succeed(allocate_x, allocate_y, allocate_z); // Add a verification task auto verifier = taskflow.emplace([&](){ for (size_t i = 0; i < N; i++) { REQUIRE(::fabs(hx[i] - v4) < eps); } }).succeed(cudaflow).name("verify"); // free memory auto deallocate_x = taskflow.emplace([&](){ REQUIRE(hipFree(dx) == hipSuccess); }).name("deallocate_x"); auto deallocate_y = taskflow.emplace([&](){ REQUIRE(hipFree(dy) == hipSuccess); }).name("deallocate_y"); auto deallocate_z = taskflow.emplace([&](){ REQUIRE(hipFree(dz) == hipSuccess); }).name("deallocate_z"); verifier.precede(deallocate_x, deallocate_y, deallocate_z); executor.run(taskflow).wait(); } } TEST_CASE("add3.int" * doctest::timeout(300)) { add3<int, tf::cudaFlow>(); } TEST_CASE("add3.float" * doctest::timeout(300)) { add3<float, tf::cudaFlow>(); } TEST_CASE("add3.double" * doctest::timeout(300)) { add3<double, tf::cudaFlow>(); } TEST_CASE("capture_add3.int" * doctest::timeout(300)) { add3<int, tf::cudaFlowCapturer>(); } TEST_CASE("capture_add3.float" * doctest::timeout(300)) { add3<float, tf::cudaFlowCapturer>(); } TEST_CASE("capture_add3.double" * doctest::timeout(300)) { add3<double, tf::cudaFlowCapturer>(); } // -------------------------------------------------------- // Testcase: multiply2 // -------------------------------------------------------- template <typename T, typename F> void multiply2() { //const unsigned N = 1<<20; for(size_t N=1; N<=(1<<20); N <<= 1) { tf::Taskflow taskflow; tf::Executor executor; T v1 = ::rand() % 100; T v2 = ::rand() % 100; T v3 = v1 * v2; std::vector<T> hx, hy; T* dx {nullptr}; T* dy {nullptr}; // allocate x auto allocate_x = taskflow.emplace([&]() { hx.resize(N, v1); REQUIRE(hipMalloc(&dx, N*sizeof(T)) == hipSuccess); }).name("allocate_x"); // allocate y auto allocate_y = taskflow.emplace([&]() { hy.resize(N, v2); REQUIRE(hipMalloc(&dy, N*sizeof(T)) == hipSuccess); }).name("allocate_y"); // saxpy auto cudaflow = taskflow.emplace([&](F& cf) { auto h2d_x = cf.copy(dx, hx.data(), N).name("h2d_x"); auto h2d_y = cf.copy(dy, hy.data(), N).name("h2d_y"); auto d2h_x = cf.copy(hx.data(), dx, N).name("d2h_x"); auto d2h_y = cf.copy(hy.data(), dy, N).name("d2h_y"); //auto kernel = cf.multiply(dx, N, dx, dy); auto kernel = cf.transform( dx, dx+N, [] __device__ (T& v1, T& v2) { return v1 * v2; }, dx, dy ); kernel.succeed(h2d_x, h2d_y) .precede(d2h_x, d2h_y); }).name("saxpy"); cudaflow.succeed(allocate_x, allocate_y); // Add a verification task auto verifier = taskflow.emplace([&](){ for (size_t i = 0; i < N; i++) { REQUIRE(::fabs(hx[i] - v3) < eps); } }).succeed(cudaflow).name("verify"); // free memory auto deallocate_x = taskflow.emplace([&](){ REQUIRE(hipFree(dx) == hipSuccess); }).name("deallocate_x"); auto deallocate_y = taskflow.emplace([&](){ REQUIRE(hipFree(dy) == hipSuccess); }).name("deallocate_y"); verifier.precede(deallocate_x, deallocate_y); executor.run(taskflow).wait(); } } TEST_CASE("multiply2.int" * doctest::timeout(300)) { multiply2<int, tf::cudaFlow>(); } TEST_CASE("multiply2.float" * doctest::timeout(300)) { multiply2<float, tf::cudaFlow>(); } TEST_CASE("multiply2.double" * doctest::timeout(300)) { multiply2<double, tf::cudaFlow>(); } TEST_CASE("capture_multiply2.int" * doctest::timeout(300)) { multiply2<int, tf::cudaFlowCapturer>(); } TEST_CASE("capture_multiply2.float" * doctest::timeout(300)) { multiply2<float, tf::cudaFlowCapturer>(); } TEST_CASE("capture_multiply2.double" * doctest::timeout(300)) { multiply2<double, tf::cudaFlowCapturer>(); } // ---------------------------------------------------------------------------- // for_each // ---------------------------------------------------------------------------- template <typename T, typename F> void for_each() { for(int n=1; n<=1234567; n = n*2 + 1) { tf::Taskflow taskflow; tf::Executor executor; T* cpu = nullptr; T* gpu = nullptr; auto cputask = taskflow.emplace([&](){ cpu = static_cast<T*>(std::calloc(n, sizeof(T))); REQUIRE(hipMalloc(&gpu, n*sizeof(T)) == hipSuccess); }); tf::Task gputask; gputask = taskflow.emplace([&](F& cf) { auto d2h = cf.copy(cpu, gpu, n); auto h2d = cf.copy(gpu, cpu, n); auto kernel = cf.for_each( gpu, gpu+n, [] __device__ (T& val) { val = 65536; } ); h2d.precede(kernel); d2h.succeed(kernel); }); cputask.precede(gputask); executor.run(taskflow).wait(); for(int i=0; i<n; i++) { REQUIRE(::fabs(cpu[i] - (T)65536) < eps); } std::free(cpu); REQUIRE(hipFree(gpu) == hipSuccess); } } TEST_CASE("for_each.int" * doctest::timeout(300)) { for_each<int, tf::cudaFlow>(); } TEST_CASE("for_each.float" * doctest::timeout(300)) { for_each<float, tf::cudaFlow>(); } TEST_CASE("for_each.double" * doctest::timeout(300)) { for_each<double, tf::cudaFlow>(); } TEST_CASE("capture_for_each.int" * doctest::timeout(300)) { for_each<int, tf::cudaFlowCapturer>(); } TEST_CASE("capture_for_each.float" * doctest::timeout(300)) { for_each<float, tf::cudaFlowCapturer>(); } TEST_CASE("capture_for_each.double" * doctest::timeout(300)) { for_each<double, tf::cudaFlowCapturer>(); } // -------------------------------------------------------- // Testcase: for_each_index // -------------------------------------------------------- template <typename T, typename F> void for_each_index() { for(int n=10; n<=1234567; n = n*2 + 1) { tf::Taskflow taskflow; tf::Executor executor; T* cpu = nullptr; T* gpu = nullptr; auto cputask = taskflow.emplace([&](){ cpu = static_cast<T*>(std::calloc(n, sizeof(T))); REQUIRE(hipMalloc(&gpu, n*sizeof(T)) == hipSuccess); }); auto gputask = taskflow.emplace([&](F& cf) { auto d2h = cf.copy(cpu, gpu, n); auto h2d = cf.copy(gpu, cpu, n); //auto kernel = cf.for_each_index(gpu, n, [] __device__ (T& value){ value = 17; }); auto kernel1 = cf.for_each_index( 0, n, 2, [gpu] __device__ (int i) { gpu[i] = 17; } ); auto kernel2 = cf.for_each_index( 1, n, 2, [=] __device__ (int i) { gpu[i] = -17; } ); h2d.precede(kernel1, kernel2); d2h.succeed(kernel1, kernel2); }); cputask.precede(gputask); executor.run(taskflow).wait(); for(int i=0; i<n; i++) { if(i % 2 == 0) { REQUIRE(::fabs(cpu[i] - (T)17) < eps); } else { REQUIRE(::fabs(cpu[i] - (T)(-17)) < eps); } } std::free(cpu); REQUIRE(hipFree(gpu) == hipSuccess); } } TEST_CASE("for_each_index.int" * doctest::timeout(300)) { for_each_index<int, tf::cudaFlow>(); } TEST_CASE("for_each_index.float" * doctest::timeout(300)) { for_each_index<float, tf::cudaFlow>(); } TEST_CASE("for_each_index.double" * doctest::timeout(300)) { for_each_index<double, tf::cudaFlow>(); } TEST_CASE("capture_for_each_index.int" * doctest::timeout(300)) { for_each_index<int, tf::cudaFlowCapturer>(); } TEST_CASE("capture_for_each_index.float" * doctest::timeout(300)) { for_each_index<float, tf::cudaFlowCapturer>(); } TEST_CASE("capture_for_each_index.double" * doctest::timeout(300)) { for_each_index<double, tf::cudaFlowCapturer>(); } // ---------------------------------------------------------------------------- // transform // ---------------------------------------------------------------------------- template <typename F> void transform() { for(unsigned n=1; n<=1234567; n = n*2 + 1) { tf::Taskflow taskflow; tf::Executor executor; int* htgt = nullptr; int* tgt = nullptr; int* hsrc1 = nullptr; int* src1 = nullptr; float* hsrc2 = nullptr; float* src2 = nullptr; double* hsrc3 = nullptr; double* src3 = nullptr; auto htgttask = taskflow.emplace([&](){ htgt = static_cast<int*>(std::calloc(n, sizeof(int))); hsrc1 = static_cast<int*>(std::calloc(n, sizeof(int))); hsrc2 = static_cast<float*>(std::calloc(n, sizeof(float))); hsrc3 = static_cast<double*>(std::calloc(n, sizeof(double))); REQUIRE(hipMalloc(&tgt, n*sizeof(int)) == hipSuccess); REQUIRE(hipMalloc(&src1, n*sizeof(int)) == hipSuccess); REQUIRE(hipMalloc(&src2, n*sizeof(float)) == hipSuccess); REQUIRE(hipMalloc(&src3, n*sizeof(double)) == hipSuccess); }); auto gputask = taskflow.emplace([&](F& cf) { auto d2h = cf.copy(htgt, tgt, n); auto d2h3 = cf.copy(hsrc3, src3, n); auto d2h2 = cf.copy(hsrc2, src2, n); auto d2h1 = cf.copy(hsrc1, src1, n); auto kernel = cf.transform( tgt, tgt+n, [] __device__ (int& v1, float& v2, double& v3) -> int { v1 = 1; v2 = 3.0f; v3 = 5.0; return 17; }, src1, src2, src3 ); auto h2d = cf.copy(tgt, htgt, n); h2d.precede(kernel); kernel.precede(d2h, d2h1, d2h2, d2h3); }); htgttask.precede(gputask); executor.run(taskflow).wait(); for(unsigned i=0; i<n; ++i) { REQUIRE(htgt[i] == 17); REQUIRE(hsrc1[i] == 1); REQUIRE(::fabs(hsrc2[i] - 3.0f) < eps); REQUIRE(::fabs(hsrc3[i] - 5.0) < eps); } std::free(htgt); std::free(hsrc1); std::free(hsrc2); std::free(hsrc3); REQUIRE(hipFree(tgt) == hipSuccess); REQUIRE(hipFree(src1) == hipSuccess); REQUIRE(hipFree(src2) == hipSuccess); REQUIRE(hipFree(src3) == hipSuccess); } } TEST_CASE("transform" * doctest::timeout(300)) { transform<tf::cudaFlow>(); } TEST_CASE("capture_transform" * doctest::timeout(300) ) { transform<tf::cudaFlowCapturer>(); } // ---------------------------------------------------------------------------- // reduce // ---------------------------------------------------------------------------- template <typename T, typename F> void reduce() { for(int n=1; n<=1234567; n = n*2 + 1) { tf::Taskflow taskflow; tf::Executor executor; T sum = 0; std::vector<T> cpu(n); for(auto& i : cpu) { i = ::rand()%100-50; sum += i; } T sol; T* gpu = nullptr; T* res = nullptr; auto cputask = taskflow.emplace([&](){ REQUIRE(hipMalloc(&gpu, n*sizeof(T)) == hipSuccess); REQUIRE(hipMalloc(&res, 1*sizeof(T)) == hipSuccess); }); tf::Task gputask; gputask = taskflow.emplace([&](F& cf) { auto d2h = cf.copy(&sol, res, 1); auto h2d = cf.copy(gpu, cpu.data(), n); auto set = cf.single_task([res] __device__ () mutable { *res = 1000; }); auto kernel = cf.reduce( gpu, gpu+n, res, [] __device__ (T a, T b) mutable { return a + b; } ); kernel.succeed(h2d, set); d2h.succeed(kernel); }); cputask.precede(gputask); executor.run(taskflow).wait(); REQUIRE(::fabs(sum-sol+1000) < 0.0001); REQUIRE(hipFree(gpu) == hipSuccess); REQUIRE(hipFree(res) == hipSuccess); } } TEST_CASE("reduce.int" * doctest::timeout(300)) { reduce<int, tf::cudaFlow>(); } TEST_CASE("reduce.float" * doctest::timeout(300)) { reduce<float, tf::cudaFlow>(); } TEST_CASE("reduce.double" * doctest::timeout(300)) { reduce<double, tf::cudaFlow>(); } TEST_CASE("capture_reduce.int" * doctest::timeout(300)) { reduce<int, tf::cudaFlowCapturer>(); } TEST_CASE("capture_reduce.float" * doctest::timeout(300)) { reduce<float, tf::cudaFlowCapturer>(); } TEST_CASE("capture_reduce.double" * doctest::timeout(300)) { reduce<double, tf::cudaFlow>(); } // ---------------------------------------------------------------------------- // uninitialized_reduce // ---------------------------------------------------------------------------- template <typename T, typename F> void uninitialized_reduce() { for(int n=1; n<=1234567; n = n*2 + 1) { tf::Taskflow taskflow; tf::Executor executor; T sum = 0; std::vector<T> cpu(n); for(auto& i : cpu) { i = ::rand()%100-50; sum += i; } T sol; T* gpu = nullptr; T* res = nullptr; auto cputask = taskflow.emplace([&](){ REQUIRE(hipMalloc(&gpu, n*sizeof(T)) == hipSuccess); REQUIRE(hipMalloc(&res, 1*sizeof(T)) == hipSuccess); }); tf::Task gputask; gputask = taskflow.emplace([&](F& cf) { auto d2h = cf.copy(&sol, res, 1); auto h2d = cf.copy(gpu, cpu.data(), n); auto set = cf.single_task([res] __device__ () mutable { *res = 1000; }); auto kernel = cf.uninitialized_reduce( gpu, gpu+n, res, [] __device__ (T a, T b) { return a + b; } ); kernel.succeed(h2d, set); d2h.succeed(kernel); }); cputask.precede(gputask); executor.run(taskflow).wait(); REQUIRE(::fabs(sum-sol) < 0.0001); REQUIRE(hipFree(gpu) == hipSuccess); REQUIRE(hipFree(res) == hipSuccess); } } TEST_CASE("uninitialized_reduce.int" * doctest::timeout(300)) { uninitialized_reduce<int, tf::cudaFlow>(); } TEST_CASE("uninitialized_reduce.float" * doctest::timeout(300)) { uninitialized_reduce<float, tf::cudaFlow>(); } TEST_CASE("uninitialized_reduce.double" * doctest::timeout(300)) { uninitialized_reduce<double, tf::cudaFlow>(); } TEST_CASE("capture_uninitialized_reduce.int" * doctest::timeout(300)) { uninitialized_reduce<int, tf::cudaFlowCapturer>(); } TEST_CASE("capture_uninitialized_reduce.float" * doctest::timeout(300)) { uninitialized_reduce<float, tf::cudaFlowCapturer>(); } TEST_CASE("capture_uninitialized_reduce.double" * doctest::timeout(300)) { uninitialized_reduce<double, tf::cudaFlow>(); } // ---------------------------------------------------------------------------- // transform_reduce // ---------------------------------------------------------------------------- template <typename T, typename F> void transform_reduce() { tf::Executor executor; for(int n=1; n<=1234567; n = n*2 + 1) { tf::Taskflow taskflow; T sum = 0; std::vector<T> cpu(n); for(auto& i : cpu) { i = ::rand()%100-50; sum += i; } T sol; T* gpu = nullptr; T* res = nullptr; auto cputask = taskflow.emplace([&](){ REQUIRE(hipMalloc(&gpu, n*sizeof(T)) == hipSuccess); REQUIRE(hipMalloc(&res, 1*sizeof(T)) == hipSuccess); }); tf::Task gputask; gputask = taskflow.emplace([&](F& cf) { auto d2h = cf.copy(&sol, res, 1); auto h2d = cf.copy(gpu, cpu.data(), n); auto set = cf.single_task([res] __device__ () mutable { *res = 1000; }); auto kernel = cf.transform_reduce( gpu, gpu+n, res, [] __device__ (T a, T b) { return a + b; }, [] __device__ (T a) { return a + 1; } ); kernel.succeed(h2d, set); d2h.succeed(kernel); }); cputask.precede(gputask); executor.run(taskflow).wait(); REQUIRE(::fabs(sum+n+1000-sol) < 0.0001); REQUIRE(hipFree(gpu) == hipSuccess); REQUIRE(hipFree(res) == hipSuccess); } } TEST_CASE("capture_transform_reduce.int" * doctest::timeout(300)) { transform_reduce<int, tf::cudaFlowCapturer>(); } TEST_CASE("capture_transform_reduce.float" * doctest::timeout(300)) { transform_reduce<float, tf::cudaFlowCapturer>(); } TEST_CASE("capture_transform_reduce.double" * doctest::timeout(300)) { transform_reduce<double, tf::cudaFlowCapturer>(); } // ---------------------------------------------------------------------------- // transform_uninitialized_reduce // ---------------------------------------------------------------------------- template <typename T, typename F> void transform_uninitialized_reduce() { tf::Executor executor; for(int n=1; n<=1234567; n = n*2 + 1) { tf::Taskflow taskflow; T sum = 0; std::vector<T> cpu(n); for(auto& i : cpu) { i = ::rand()%100-50; sum += i; } T sol; T* gpu = nullptr; T* res = nullptr; auto cputask = taskflow.emplace([&](){ REQUIRE(hipMalloc(&gpu, n*sizeof(T)) == hipSuccess); REQUIRE(hipMalloc(&res, 1*sizeof(T)) == hipSuccess); }); tf::Task gputask; gputask = taskflow.emplace([&](F& cf) { auto d2h = cf.copy(&sol, res, 1); auto h2d = cf.copy(gpu, cpu.data(), n); auto set = cf.single_task([res] __device__ () mutable { *res = 1000; }); auto kernel = cf.transform_uninitialized_reduce( gpu, gpu+n, res, [] __device__ (T a, T b) { return a + b; }, [] __device__ (T a) { return a + 1; } ); kernel.succeed(h2d, set); d2h.succeed(kernel); }); cputask.precede(gputask); executor.run(taskflow).wait(); REQUIRE(::fabs(sum+n-sol) < 0.0001); REQUIRE(hipFree(gpu) == hipSuccess); REQUIRE(hipFree(res) == hipSuccess); } } TEST_CASE("capture_transform_uninitialized_reduce.int" * doctest::timeout(300)) { transform_uninitialized_reduce<int, tf::cudaFlowCapturer>(); } TEST_CASE("capture_transform_uninitialized_reduce.float" * doctest::timeout(300)) { transform_uninitialized_reduce<float, tf::cudaFlowCapturer>(); } TEST_CASE("capture_transform_uninitialized_reduce.double" * doctest::timeout(300)) { transform_uninitialized_reduce<double, tf::cudaFlowCapturer>(); } // ---------------------------------------------------------------------------- // scan // ---------------------------------------------------------------------------- template <typename T, typename F> void scan() { tf::Executor executor; tf::Taskflow taskflow; for(int N=1; N<=1234567; N = N*2 + 1) { taskflow.clear(); auto data1 = tf::cuda_malloc_shared<int>(N); auto data2 = tf::cuda_malloc_shared<int>(N); auto scan1 = tf::cuda_malloc_shared<int>(N); auto scan2 = tf::cuda_malloc_shared<int>(N); // initialize the data for(int i=0; i<N; i++) { data1[i] = i; data2[i] = i; } // perform reduction taskflow.emplace([&](F& cudaflow){ // inclusive scan cudaflow.inclusive_scan( data1, data1+N, scan1, [] __device__ (int a, int b){ return a+b; } ); // exclusive scan cudaflow.exclusive_scan( data2, data2+N, scan2, [] __device__ (int a, int b){ return a+b; } ); }); executor.run(taskflow).wait(); // inspect for(int i=1; i<N; i++) { if(scan1[i] != scan1[i-1] + data1[i]) { throw std::runtime_error("incorrect inclusive scan result"); } if(scan2[i] != scan2[i-1] + data2[i-1]) { throw std::runtime_error("incorrect exclusive scan result"); } } REQUIRE(hipFree(data1) == hipSuccess); REQUIRE(hipFree(data2) == hipSuccess); REQUIRE(hipFree(scan1) == hipSuccess); REQUIRE(hipFree(scan2) == hipSuccess); } } TEST_CASE("capture_scan.int" * doctest::timeout(300)) { scan<int, tf::cudaFlowCapturer>(); } TEST_CASE("capture_scan.float" * doctest::timeout(300)) { scan<float, tf::cudaFlowCapturer>(); } /*// -------------------------------------------------------------------------- // row-major transpose // ---------------------------------------------------------------------------- // Disable for now - better to use cublasFlowCapturer template <typename T> __global__ void verify(const T* din_mat, const T* dout_mat, bool* check, size_t rows, size_t cols) { size_t tid = blockDim.x * blockIdx.x + threadIdx.x; size_t size = rows * cols; for(; tid < size; tid += gridDim.x * blockDim.x) { if(din_mat[tid] != dout_mat[tid / cols + (tid % cols) * rows]) { *check = false; return; } } } template <typename T> void transpose() { tf::Executor executor; for(size_t rows = 1; rows <= 7999; rows*=2+3) { for(size_t cols = 1; cols <= 8021; cols*=3+5) { tf::Taskflow taskflow; std::vector<T> hinput_mat(rows * cols); std::generate_n(hinput_mat.begin(), rows * cols, [](){ return ::rand(); }); T* dinput_mat {nullptr}; T* doutput_mat {nullptr}; bool* check {nullptr}; //allocate auto allocate = taskflow.emplace([&]() { REQUIRE(hipMalloc(&dinput_mat, (rows * cols) * sizeof(T)) == hipSuccess); REQUIRE(hipMalloc(&doutput_mat, (rows * cols) * sizeof(T)) == hipSuccess); REQUIRE(hipMallocManaged(&check, sizeof(bool)) == hipSuccess); *check = true; }).name("allocate"); //transpose auto cudaflow = taskflow.emplace([&](tf::cudaFlow& cf) { auto h2d_input_t = cf.copy(dinput_mat, hinput_mat.data(), rows * cols).name("h2d"); auto kernel_t = tf::cudaBLAF(cf).transpose( dinput_mat, doutput_mat, rows, cols ); auto verify_t = cf.kernel( 32, 512, 0, verify<T>, dinput_mat, doutput_mat, check, rows, cols ); h2d_input_t.precede(kernel_t); kernel_t.precede(verify_t); }).name("transpose"); //free memory auto deallocate = taskflow.emplace([&](){ REQUIRE(hipFree(dinput_mat) == hipSuccess); REQUIRE(hipFree(doutput_mat) == hipSuccess); }).name("deallocate"); allocate.precede(cudaflow); cudaflow.precede(deallocate); executor.run(taskflow).wait(); REQUIRE(*check); } } } TEST_CASE("transpose.int" * doctest::timeout(300) ) { transpose<int>(); } TEST_CASE("transpose.float" * doctest::timeout(300) ) { transpose<float>(); } TEST_CASE("transpose.double" * doctest::timeout(300) ) { transpose<double>(); } // ---------------------------------------------------------------------------- // row-major matrix multiplication // ---------------------------------------------------------------------------- template <typename T> void matmul() { tf::Taskflow taskflow; tf::Executor executor; std::vector<T> a, b, c; for(int m=1; m<=1992; m=2*m+1) { for(int k=1; k<=1012; k=2*k+3) { for(int n=1; n<=1998; n=2*n+8) { taskflow.clear(); T* ha {nullptr}; T* hb {nullptr}; T* hc {nullptr}; T* da {nullptr}; T* db {nullptr}; T* dc {nullptr}; T val_a = ::rand()%5-1; T val_b = ::rand()%7-3; auto hosta = taskflow.emplace([&](){ a.resize(m*k); std::fill_n(a.begin(), m*k, val_a); ha = a.data(); REQUIRE(hipMalloc(&da, m*k*sizeof(T)) == hipSuccess); }).name("ha"); auto hostb = taskflow.emplace([&](){ b.resize(k*n); std::fill_n(b.begin(), k*n, val_b); hb = b.data(); REQUIRE(hipMalloc(&db, k*n*sizeof(T)) == hipSuccess); }).name("hb"); auto hostc = taskflow.emplace([&](){ c.resize(m*n); hc = c.data(); REQUIRE(hipMalloc(&dc, m*n*sizeof(T)) == hipSuccess); }).name("hc"); auto cuda = taskflow.emplace([&](tf::cudaFlow& cf){ auto pa = cf.copy(da, ha, m*k); auto pb = cf.copy(db, hb, k*n); auto op = tf::cudaBLAF(cf).matmul( da, db, dc, m, k, n ).name("op"); auto cc = cf.copy(hc, dc, m*n).name("cc"); op.precede(cc).succeed(pa, pb); }); cuda.succeed(hosta, hostb, hostc); executor.run(taskflow).wait(); int ans = val_a*val_b*k; for(const auto& x : c) { REQUIRE((int)x == ans); } REQUIRE(hipFree(da) == hipSuccess); REQUIRE(hipFree(db) == hipSuccess); REQUIRE(hipFree(dc) == hipSuccess); } } } } TEST_CASE("matmul.int" * doctest::timeout(300) ) { matmul<int>(); } TEST_CASE("matmul.float" * doctest::timeout(300) ) { matmul<float>(); } TEST_CASE("matmul.double" * doctest::timeout(300) ) { matmul<double>(); }*/
76e76d13929add187b74d87b767e18a642c43372.cu
#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN #include <doctest.h> #include <taskflow/taskflow.hpp> #include <taskflow/cudaflow.hpp> constexpr float eps = 0.0001f; // -------------------------------------------------------- // Testcase: add2 // -------------------------------------------------------- template <typename T, typename F> void add2() { //const unsigned N = 1<<20; for(size_t N=1; N<=(1<<20); N <<= 1) { tf::Taskflow taskflow; tf::Executor executor; T v1 = ::rand() % 100; T v2 = ::rand() % 100; T v3 = v1 + v2; std::vector<T> hx, hy; T* dx {nullptr}; T* dy {nullptr}; // allocate x auto allocate_x = taskflow.emplace([&]() { hx.resize(N, v1); REQUIRE(cudaMalloc(&dx, N*sizeof(T)) == cudaSuccess); }).name("allocate_x"); // allocate y auto allocate_y = taskflow.emplace([&]() { hy.resize(N, v2); REQUIRE(cudaMalloc(&dy, N*sizeof(T)) == cudaSuccess); }).name("allocate_y"); // axpy auto cudaflow = taskflow.emplace([&](F& cf) { auto h2d_x = cf.copy(dx, hx.data(), N).name("h2d_x"); auto h2d_y = cf.copy(dy, hy.data(), N).name("h2d_y"); auto d2h_x = cf.copy(hx.data(), dx, N).name("d2h_x"); auto d2h_y = cf.copy(hy.data(), dy, N).name("d2h_y"); //auto kernel = cf.add(dx, N, dx, dy); auto kernel = cf.transform( dx, dx+N, [] __device__ (T& v1, T& v2) { return v1 + v2; }, dx, dy ); kernel.succeed(h2d_x, h2d_y) .precede(d2h_x, d2h_y); }).name("saxpy"); cudaflow.succeed(allocate_x, allocate_y); // Add a verification task auto verifier = taskflow.emplace([&](){ for (size_t i = 0; i < N; i++) { REQUIRE(std::fabs(hx[i] - v3) < eps); } }).succeed(cudaflow).name("verify"); // free memory auto deallocate_x = taskflow.emplace([&](){ REQUIRE(cudaFree(dx) == cudaSuccess); }).name("deallocate_x"); auto deallocate_y = taskflow.emplace([&](){ REQUIRE(cudaFree(dy) == cudaSuccess); }).name("deallocate_y"); verifier.precede(deallocate_x, deallocate_y); executor.run(taskflow).wait(); } } TEST_CASE("add2.int" * doctest::timeout(300)) { add2<int, tf::cudaFlow>(); } TEST_CASE("add2.float" * doctest::timeout(300)) { add2<float, tf::cudaFlow>(); } TEST_CASE("add2.double" * doctest::timeout(300)) { add2<double, tf::cudaFlow>(); } TEST_CASE("capture_add2.int" * doctest::timeout(300)) { add2<int, tf::cudaFlowCapturer>(); } TEST_CASE("capture_add2.float" * doctest::timeout(300)) { add2<float, tf::cudaFlowCapturer>(); } TEST_CASE("capture_add2.double" * doctest::timeout(300)) { add2<double, tf::cudaFlowCapturer>(); } // -------------------------------------------------------- // Testcase: add3 // -------------------------------------------------------- template <typename T, typename F> void add3() { //const unsigned N = 1<<20; for(size_t N=1; N<=(1<<20); N <<= 1) { tf::Taskflow taskflow; tf::Executor executor; T v1 = ::rand() % 100; T v2 = ::rand() % 100; T v3 = ::rand() % 100; T v4 = v1 + v2 + v3; std::vector<T> hx, hy, hz; T* dx {nullptr}; T* dy {nullptr}; T* dz {nullptr}; // allocate x auto allocate_x = taskflow.emplace([&]() { hx.resize(N, v1); REQUIRE(cudaMalloc(&dx, N*sizeof(T)) == cudaSuccess); }).name("allocate_x"); // allocate y auto allocate_y = taskflow.emplace([&]() { hy.resize(N, v2); REQUIRE(cudaMalloc(&dy, N*sizeof(T)) == cudaSuccess); }).name("allocate_y"); // allocate z auto allocate_z = taskflow.emplace([&]() { hz.resize(N, v3); REQUIRE(cudaMalloc(&dz, N*sizeof(T)) == cudaSuccess); }).name("allocate_y"); // saxpy auto cudaflow = taskflow.emplace([&](F& cf) { auto h2d_x = cf.copy(dx, hx.data(), N).name("h2d_x"); auto h2d_y = cf.copy(dy, hy.data(), N).name("h2d_y"); auto h2d_z = cf.copy(dz, hz.data(), N).name("h2d_z"); auto d2h_x = cf.copy(hx.data(), dx, N).name("d2h_x"); auto d2h_y = cf.copy(hy.data(), dy, N).name("d2h_y"); auto d2h_z = cf.copy(hz.data(), dz, N).name("d2h_z"); //auto kernel = cf.add(dx, N, dx, dy, dz); auto kernel = cf.transform( dx, dx+N, [] __device__ (T& v1, T& v2, T& v3) { return v1 + v2 + v3; }, dx, dy, dz ); kernel.succeed(h2d_x, h2d_y, h2d_z) .precede(d2h_x, d2h_y, d2h_z); }).name("saxpy"); cudaflow.succeed(allocate_x, allocate_y, allocate_z); // Add a verification task auto verifier = taskflow.emplace([&](){ for (size_t i = 0; i < N; i++) { REQUIRE(std::fabs(hx[i] - v4) < eps); } }).succeed(cudaflow).name("verify"); // free memory auto deallocate_x = taskflow.emplace([&](){ REQUIRE(cudaFree(dx) == cudaSuccess); }).name("deallocate_x"); auto deallocate_y = taskflow.emplace([&](){ REQUIRE(cudaFree(dy) == cudaSuccess); }).name("deallocate_y"); auto deallocate_z = taskflow.emplace([&](){ REQUIRE(cudaFree(dz) == cudaSuccess); }).name("deallocate_z"); verifier.precede(deallocate_x, deallocate_y, deallocate_z); executor.run(taskflow).wait(); } } TEST_CASE("add3.int" * doctest::timeout(300)) { add3<int, tf::cudaFlow>(); } TEST_CASE("add3.float" * doctest::timeout(300)) { add3<float, tf::cudaFlow>(); } TEST_CASE("add3.double" * doctest::timeout(300)) { add3<double, tf::cudaFlow>(); } TEST_CASE("capture_add3.int" * doctest::timeout(300)) { add3<int, tf::cudaFlowCapturer>(); } TEST_CASE("capture_add3.float" * doctest::timeout(300)) { add3<float, tf::cudaFlowCapturer>(); } TEST_CASE("capture_add3.double" * doctest::timeout(300)) { add3<double, tf::cudaFlowCapturer>(); } // -------------------------------------------------------- // Testcase: multiply2 // -------------------------------------------------------- template <typename T, typename F> void multiply2() { //const unsigned N = 1<<20; for(size_t N=1; N<=(1<<20); N <<= 1) { tf::Taskflow taskflow; tf::Executor executor; T v1 = ::rand() % 100; T v2 = ::rand() % 100; T v3 = v1 * v2; std::vector<T> hx, hy; T* dx {nullptr}; T* dy {nullptr}; // allocate x auto allocate_x = taskflow.emplace([&]() { hx.resize(N, v1); REQUIRE(cudaMalloc(&dx, N*sizeof(T)) == cudaSuccess); }).name("allocate_x"); // allocate y auto allocate_y = taskflow.emplace([&]() { hy.resize(N, v2); REQUIRE(cudaMalloc(&dy, N*sizeof(T)) == cudaSuccess); }).name("allocate_y"); // saxpy auto cudaflow = taskflow.emplace([&](F& cf) { auto h2d_x = cf.copy(dx, hx.data(), N).name("h2d_x"); auto h2d_y = cf.copy(dy, hy.data(), N).name("h2d_y"); auto d2h_x = cf.copy(hx.data(), dx, N).name("d2h_x"); auto d2h_y = cf.copy(hy.data(), dy, N).name("d2h_y"); //auto kernel = cf.multiply(dx, N, dx, dy); auto kernel = cf.transform( dx, dx+N, [] __device__ (T& v1, T& v2) { return v1 * v2; }, dx, dy ); kernel.succeed(h2d_x, h2d_y) .precede(d2h_x, d2h_y); }).name("saxpy"); cudaflow.succeed(allocate_x, allocate_y); // Add a verification task auto verifier = taskflow.emplace([&](){ for (size_t i = 0; i < N; i++) { REQUIRE(std::fabs(hx[i] - v3) < eps); } }).succeed(cudaflow).name("verify"); // free memory auto deallocate_x = taskflow.emplace([&](){ REQUIRE(cudaFree(dx) == cudaSuccess); }).name("deallocate_x"); auto deallocate_y = taskflow.emplace([&](){ REQUIRE(cudaFree(dy) == cudaSuccess); }).name("deallocate_y"); verifier.precede(deallocate_x, deallocate_y); executor.run(taskflow).wait(); } } TEST_CASE("multiply2.int" * doctest::timeout(300)) { multiply2<int, tf::cudaFlow>(); } TEST_CASE("multiply2.float" * doctest::timeout(300)) { multiply2<float, tf::cudaFlow>(); } TEST_CASE("multiply2.double" * doctest::timeout(300)) { multiply2<double, tf::cudaFlow>(); } TEST_CASE("capture_multiply2.int" * doctest::timeout(300)) { multiply2<int, tf::cudaFlowCapturer>(); } TEST_CASE("capture_multiply2.float" * doctest::timeout(300)) { multiply2<float, tf::cudaFlowCapturer>(); } TEST_CASE("capture_multiply2.double" * doctest::timeout(300)) { multiply2<double, tf::cudaFlowCapturer>(); } // ---------------------------------------------------------------------------- // for_each // ---------------------------------------------------------------------------- template <typename T, typename F> void for_each() { for(int n=1; n<=1234567; n = n*2 + 1) { tf::Taskflow taskflow; tf::Executor executor; T* cpu = nullptr; T* gpu = nullptr; auto cputask = taskflow.emplace([&](){ cpu = static_cast<T*>(std::calloc(n, sizeof(T))); REQUIRE(cudaMalloc(&gpu, n*sizeof(T)) == cudaSuccess); }); tf::Task gputask; gputask = taskflow.emplace([&](F& cf) { auto d2h = cf.copy(cpu, gpu, n); auto h2d = cf.copy(gpu, cpu, n); auto kernel = cf.for_each( gpu, gpu+n, [] __device__ (T& val) { val = 65536; } ); h2d.precede(kernel); d2h.succeed(kernel); }); cputask.precede(gputask); executor.run(taskflow).wait(); for(int i=0; i<n; i++) { REQUIRE(std::fabs(cpu[i] - (T)65536) < eps); } std::free(cpu); REQUIRE(cudaFree(gpu) == cudaSuccess); } } TEST_CASE("for_each.int" * doctest::timeout(300)) { for_each<int, tf::cudaFlow>(); } TEST_CASE("for_each.float" * doctest::timeout(300)) { for_each<float, tf::cudaFlow>(); } TEST_CASE("for_each.double" * doctest::timeout(300)) { for_each<double, tf::cudaFlow>(); } TEST_CASE("capture_for_each.int" * doctest::timeout(300)) { for_each<int, tf::cudaFlowCapturer>(); } TEST_CASE("capture_for_each.float" * doctest::timeout(300)) { for_each<float, tf::cudaFlowCapturer>(); } TEST_CASE("capture_for_each.double" * doctest::timeout(300)) { for_each<double, tf::cudaFlowCapturer>(); } // -------------------------------------------------------- // Testcase: for_each_index // -------------------------------------------------------- template <typename T, typename F> void for_each_index() { for(int n=10; n<=1234567; n = n*2 + 1) { tf::Taskflow taskflow; tf::Executor executor; T* cpu = nullptr; T* gpu = nullptr; auto cputask = taskflow.emplace([&](){ cpu = static_cast<T*>(std::calloc(n, sizeof(T))); REQUIRE(cudaMalloc(&gpu, n*sizeof(T)) == cudaSuccess); }); auto gputask = taskflow.emplace([&](F& cf) { auto d2h = cf.copy(cpu, gpu, n); auto h2d = cf.copy(gpu, cpu, n); //auto kernel = cf.for_each_index(gpu, n, [] __device__ (T& value){ value = 17; }); auto kernel1 = cf.for_each_index( 0, n, 2, [gpu] __device__ (int i) { gpu[i] = 17; } ); auto kernel2 = cf.for_each_index( 1, n, 2, [=] __device__ (int i) { gpu[i] = -17; } ); h2d.precede(kernel1, kernel2); d2h.succeed(kernel1, kernel2); }); cputask.precede(gputask); executor.run(taskflow).wait(); for(int i=0; i<n; i++) { if(i % 2 == 0) { REQUIRE(std::fabs(cpu[i] - (T)17) < eps); } else { REQUIRE(std::fabs(cpu[i] - (T)(-17)) < eps); } } std::free(cpu); REQUIRE(cudaFree(gpu) == cudaSuccess); } } TEST_CASE("for_each_index.int" * doctest::timeout(300)) { for_each_index<int, tf::cudaFlow>(); } TEST_CASE("for_each_index.float" * doctest::timeout(300)) { for_each_index<float, tf::cudaFlow>(); } TEST_CASE("for_each_index.double" * doctest::timeout(300)) { for_each_index<double, tf::cudaFlow>(); } TEST_CASE("capture_for_each_index.int" * doctest::timeout(300)) { for_each_index<int, tf::cudaFlowCapturer>(); } TEST_CASE("capture_for_each_index.float" * doctest::timeout(300)) { for_each_index<float, tf::cudaFlowCapturer>(); } TEST_CASE("capture_for_each_index.double" * doctest::timeout(300)) { for_each_index<double, tf::cudaFlowCapturer>(); } // ---------------------------------------------------------------------------- // transform // ---------------------------------------------------------------------------- template <typename F> void transform() { for(unsigned n=1; n<=1234567; n = n*2 + 1) { tf::Taskflow taskflow; tf::Executor executor; int* htgt = nullptr; int* tgt = nullptr; int* hsrc1 = nullptr; int* src1 = nullptr; float* hsrc2 = nullptr; float* src2 = nullptr; double* hsrc3 = nullptr; double* src3 = nullptr; auto htgttask = taskflow.emplace([&](){ htgt = static_cast<int*>(std::calloc(n, sizeof(int))); hsrc1 = static_cast<int*>(std::calloc(n, sizeof(int))); hsrc2 = static_cast<float*>(std::calloc(n, sizeof(float))); hsrc3 = static_cast<double*>(std::calloc(n, sizeof(double))); REQUIRE(cudaMalloc(&tgt, n*sizeof(int)) == cudaSuccess); REQUIRE(cudaMalloc(&src1, n*sizeof(int)) == cudaSuccess); REQUIRE(cudaMalloc(&src2, n*sizeof(float)) == cudaSuccess); REQUIRE(cudaMalloc(&src3, n*sizeof(double)) == cudaSuccess); }); auto gputask = taskflow.emplace([&](F& cf) { auto d2h = cf.copy(htgt, tgt, n); auto d2h3 = cf.copy(hsrc3, src3, n); auto d2h2 = cf.copy(hsrc2, src2, n); auto d2h1 = cf.copy(hsrc1, src1, n); auto kernel = cf.transform( tgt, tgt+n, [] __device__ (int& v1, float& v2, double& v3) -> int { v1 = 1; v2 = 3.0f; v3 = 5.0; return 17; }, src1, src2, src3 ); auto h2d = cf.copy(tgt, htgt, n); h2d.precede(kernel); kernel.precede(d2h, d2h1, d2h2, d2h3); }); htgttask.precede(gputask); executor.run(taskflow).wait(); for(unsigned i=0; i<n; ++i) { REQUIRE(htgt[i] == 17); REQUIRE(hsrc1[i] == 1); REQUIRE(std::fabs(hsrc2[i] - 3.0f) < eps); REQUIRE(std::fabs(hsrc3[i] - 5.0) < eps); } std::free(htgt); std::free(hsrc1); std::free(hsrc2); std::free(hsrc3); REQUIRE(cudaFree(tgt) == cudaSuccess); REQUIRE(cudaFree(src1) == cudaSuccess); REQUIRE(cudaFree(src2) == cudaSuccess); REQUIRE(cudaFree(src3) == cudaSuccess); } } TEST_CASE("transform" * doctest::timeout(300)) { transform<tf::cudaFlow>(); } TEST_CASE("capture_transform" * doctest::timeout(300) ) { transform<tf::cudaFlowCapturer>(); } // ---------------------------------------------------------------------------- // reduce // ---------------------------------------------------------------------------- template <typename T, typename F> void reduce() { for(int n=1; n<=1234567; n = n*2 + 1) { tf::Taskflow taskflow; tf::Executor executor; T sum = 0; std::vector<T> cpu(n); for(auto& i : cpu) { i = ::rand()%100-50; sum += i; } T sol; T* gpu = nullptr; T* res = nullptr; auto cputask = taskflow.emplace([&](){ REQUIRE(cudaMalloc(&gpu, n*sizeof(T)) == cudaSuccess); REQUIRE(cudaMalloc(&res, 1*sizeof(T)) == cudaSuccess); }); tf::Task gputask; gputask = taskflow.emplace([&](F& cf) { auto d2h = cf.copy(&sol, res, 1); auto h2d = cf.copy(gpu, cpu.data(), n); auto set = cf.single_task([res] __device__ () mutable { *res = 1000; }); auto kernel = cf.reduce( gpu, gpu+n, res, [] __device__ (T a, T b) mutable { return a + b; } ); kernel.succeed(h2d, set); d2h.succeed(kernel); }); cputask.precede(gputask); executor.run(taskflow).wait(); REQUIRE(std::fabs(sum-sol+1000) < 0.0001); REQUIRE(cudaFree(gpu) == cudaSuccess); REQUIRE(cudaFree(res) == cudaSuccess); } } TEST_CASE("reduce.int" * doctest::timeout(300)) { reduce<int, tf::cudaFlow>(); } TEST_CASE("reduce.float" * doctest::timeout(300)) { reduce<float, tf::cudaFlow>(); } TEST_CASE("reduce.double" * doctest::timeout(300)) { reduce<double, tf::cudaFlow>(); } TEST_CASE("capture_reduce.int" * doctest::timeout(300)) { reduce<int, tf::cudaFlowCapturer>(); } TEST_CASE("capture_reduce.float" * doctest::timeout(300)) { reduce<float, tf::cudaFlowCapturer>(); } TEST_CASE("capture_reduce.double" * doctest::timeout(300)) { reduce<double, tf::cudaFlow>(); } // ---------------------------------------------------------------------------- // uninitialized_reduce // ---------------------------------------------------------------------------- template <typename T, typename F> void uninitialized_reduce() { for(int n=1; n<=1234567; n = n*2 + 1) { tf::Taskflow taskflow; tf::Executor executor; T sum = 0; std::vector<T> cpu(n); for(auto& i : cpu) { i = ::rand()%100-50; sum += i; } T sol; T* gpu = nullptr; T* res = nullptr; auto cputask = taskflow.emplace([&](){ REQUIRE(cudaMalloc(&gpu, n*sizeof(T)) == cudaSuccess); REQUIRE(cudaMalloc(&res, 1*sizeof(T)) == cudaSuccess); }); tf::Task gputask; gputask = taskflow.emplace([&](F& cf) { auto d2h = cf.copy(&sol, res, 1); auto h2d = cf.copy(gpu, cpu.data(), n); auto set = cf.single_task([res] __device__ () mutable { *res = 1000; }); auto kernel = cf.uninitialized_reduce( gpu, gpu+n, res, [] __device__ (T a, T b) { return a + b; } ); kernel.succeed(h2d, set); d2h.succeed(kernel); }); cputask.precede(gputask); executor.run(taskflow).wait(); REQUIRE(std::fabs(sum-sol) < 0.0001); REQUIRE(cudaFree(gpu) == cudaSuccess); REQUIRE(cudaFree(res) == cudaSuccess); } } TEST_CASE("uninitialized_reduce.int" * doctest::timeout(300)) { uninitialized_reduce<int, tf::cudaFlow>(); } TEST_CASE("uninitialized_reduce.float" * doctest::timeout(300)) { uninitialized_reduce<float, tf::cudaFlow>(); } TEST_CASE("uninitialized_reduce.double" * doctest::timeout(300)) { uninitialized_reduce<double, tf::cudaFlow>(); } TEST_CASE("capture_uninitialized_reduce.int" * doctest::timeout(300)) { uninitialized_reduce<int, tf::cudaFlowCapturer>(); } TEST_CASE("capture_uninitialized_reduce.float" * doctest::timeout(300)) { uninitialized_reduce<float, tf::cudaFlowCapturer>(); } TEST_CASE("capture_uninitialized_reduce.double" * doctest::timeout(300)) { uninitialized_reduce<double, tf::cudaFlow>(); } // ---------------------------------------------------------------------------- // transform_reduce // ---------------------------------------------------------------------------- template <typename T, typename F> void transform_reduce() { tf::Executor executor; for(int n=1; n<=1234567; n = n*2 + 1) { tf::Taskflow taskflow; T sum = 0; std::vector<T> cpu(n); for(auto& i : cpu) { i = ::rand()%100-50; sum += i; } T sol; T* gpu = nullptr; T* res = nullptr; auto cputask = taskflow.emplace([&](){ REQUIRE(cudaMalloc(&gpu, n*sizeof(T)) == cudaSuccess); REQUIRE(cudaMalloc(&res, 1*sizeof(T)) == cudaSuccess); }); tf::Task gputask; gputask = taskflow.emplace([&](F& cf) { auto d2h = cf.copy(&sol, res, 1); auto h2d = cf.copy(gpu, cpu.data(), n); auto set = cf.single_task([res] __device__ () mutable { *res = 1000; }); auto kernel = cf.transform_reduce( gpu, gpu+n, res, [] __device__ (T a, T b) { return a + b; }, [] __device__ (T a) { return a + 1; } ); kernel.succeed(h2d, set); d2h.succeed(kernel); }); cputask.precede(gputask); executor.run(taskflow).wait(); REQUIRE(std::fabs(sum+n+1000-sol) < 0.0001); REQUIRE(cudaFree(gpu) == cudaSuccess); REQUIRE(cudaFree(res) == cudaSuccess); } } TEST_CASE("capture_transform_reduce.int" * doctest::timeout(300)) { transform_reduce<int, tf::cudaFlowCapturer>(); } TEST_CASE("capture_transform_reduce.float" * doctest::timeout(300)) { transform_reduce<float, tf::cudaFlowCapturer>(); } TEST_CASE("capture_transform_reduce.double" * doctest::timeout(300)) { transform_reduce<double, tf::cudaFlowCapturer>(); } // ---------------------------------------------------------------------------- // transform_uninitialized_reduce // ---------------------------------------------------------------------------- template <typename T, typename F> void transform_uninitialized_reduce() { tf::Executor executor; for(int n=1; n<=1234567; n = n*2 + 1) { tf::Taskflow taskflow; T sum = 0; std::vector<T> cpu(n); for(auto& i : cpu) { i = ::rand()%100-50; sum += i; } T sol; T* gpu = nullptr; T* res = nullptr; auto cputask = taskflow.emplace([&](){ REQUIRE(cudaMalloc(&gpu, n*sizeof(T)) == cudaSuccess); REQUIRE(cudaMalloc(&res, 1*sizeof(T)) == cudaSuccess); }); tf::Task gputask; gputask = taskflow.emplace([&](F& cf) { auto d2h = cf.copy(&sol, res, 1); auto h2d = cf.copy(gpu, cpu.data(), n); auto set = cf.single_task([res] __device__ () mutable { *res = 1000; }); auto kernel = cf.transform_uninitialized_reduce( gpu, gpu+n, res, [] __device__ (T a, T b) { return a + b; }, [] __device__ (T a) { return a + 1; } ); kernel.succeed(h2d, set); d2h.succeed(kernel); }); cputask.precede(gputask); executor.run(taskflow).wait(); REQUIRE(std::fabs(sum+n-sol) < 0.0001); REQUIRE(cudaFree(gpu) == cudaSuccess); REQUIRE(cudaFree(res) == cudaSuccess); } } TEST_CASE("capture_transform_uninitialized_reduce.int" * doctest::timeout(300)) { transform_uninitialized_reduce<int, tf::cudaFlowCapturer>(); } TEST_CASE("capture_transform_uninitialized_reduce.float" * doctest::timeout(300)) { transform_uninitialized_reduce<float, tf::cudaFlowCapturer>(); } TEST_CASE("capture_transform_uninitialized_reduce.double" * doctest::timeout(300)) { transform_uninitialized_reduce<double, tf::cudaFlowCapturer>(); } // ---------------------------------------------------------------------------- // scan // ---------------------------------------------------------------------------- template <typename T, typename F> void scan() { tf::Executor executor; tf::Taskflow taskflow; for(int N=1; N<=1234567; N = N*2 + 1) { taskflow.clear(); auto data1 = tf::cuda_malloc_shared<int>(N); auto data2 = tf::cuda_malloc_shared<int>(N); auto scan1 = tf::cuda_malloc_shared<int>(N); auto scan2 = tf::cuda_malloc_shared<int>(N); // initialize the data for(int i=0; i<N; i++) { data1[i] = i; data2[i] = i; } // perform reduction taskflow.emplace([&](F& cudaflow){ // inclusive scan cudaflow.inclusive_scan( data1, data1+N, scan1, [] __device__ (int a, int b){ return a+b; } ); // exclusive scan cudaflow.exclusive_scan( data2, data2+N, scan2, [] __device__ (int a, int b){ return a+b; } ); }); executor.run(taskflow).wait(); // inspect for(int i=1; i<N; i++) { if(scan1[i] != scan1[i-1] + data1[i]) { throw std::runtime_error("incorrect inclusive scan result"); } if(scan2[i] != scan2[i-1] + data2[i-1]) { throw std::runtime_error("incorrect exclusive scan result"); } } REQUIRE(cudaFree(data1) == cudaSuccess); REQUIRE(cudaFree(data2) == cudaSuccess); REQUIRE(cudaFree(scan1) == cudaSuccess); REQUIRE(cudaFree(scan2) == cudaSuccess); } } TEST_CASE("capture_scan.int" * doctest::timeout(300)) { scan<int, tf::cudaFlowCapturer>(); } TEST_CASE("capture_scan.float" * doctest::timeout(300)) { scan<float, tf::cudaFlowCapturer>(); } /*// -------------------------------------------------------------------------- // row-major transpose // ---------------------------------------------------------------------------- // Disable for now - better to use cublasFlowCapturer template <typename T> __global__ void verify(const T* din_mat, const T* dout_mat, bool* check, size_t rows, size_t cols) { size_t tid = blockDim.x * blockIdx.x + threadIdx.x; size_t size = rows * cols; for(; tid < size; tid += gridDim.x * blockDim.x) { if(din_mat[tid] != dout_mat[tid / cols + (tid % cols) * rows]) { *check = false; return; } } } template <typename T> void transpose() { tf::Executor executor; for(size_t rows = 1; rows <= 7999; rows*=2+3) { for(size_t cols = 1; cols <= 8021; cols*=3+5) { tf::Taskflow taskflow; std::vector<T> hinput_mat(rows * cols); std::generate_n(hinput_mat.begin(), rows * cols, [](){ return ::rand(); }); T* dinput_mat {nullptr}; T* doutput_mat {nullptr}; bool* check {nullptr}; //allocate auto allocate = taskflow.emplace([&]() { REQUIRE(cudaMalloc(&dinput_mat, (rows * cols) * sizeof(T)) == cudaSuccess); REQUIRE(cudaMalloc(&doutput_mat, (rows * cols) * sizeof(T)) == cudaSuccess); REQUIRE(cudaMallocManaged(&check, sizeof(bool)) == cudaSuccess); *check = true; }).name("allocate"); //transpose auto cudaflow = taskflow.emplace([&](tf::cudaFlow& cf) { auto h2d_input_t = cf.copy(dinput_mat, hinput_mat.data(), rows * cols).name("h2d"); auto kernel_t = tf::cudaBLAF(cf).transpose( dinput_mat, doutput_mat, rows, cols ); auto verify_t = cf.kernel( 32, 512, 0, verify<T>, dinput_mat, doutput_mat, check, rows, cols ); h2d_input_t.precede(kernel_t); kernel_t.precede(verify_t); }).name("transpose"); //free memory auto deallocate = taskflow.emplace([&](){ REQUIRE(cudaFree(dinput_mat) == cudaSuccess); REQUIRE(cudaFree(doutput_mat) == cudaSuccess); }).name("deallocate"); allocate.precede(cudaflow); cudaflow.precede(deallocate); executor.run(taskflow).wait(); REQUIRE(*check); } } } TEST_CASE("transpose.int" * doctest::timeout(300) ) { transpose<int>(); } TEST_CASE("transpose.float" * doctest::timeout(300) ) { transpose<float>(); } TEST_CASE("transpose.double" * doctest::timeout(300) ) { transpose<double>(); } // ---------------------------------------------------------------------------- // row-major matrix multiplication // ---------------------------------------------------------------------------- template <typename T> void matmul() { tf::Taskflow taskflow; tf::Executor executor; std::vector<T> a, b, c; for(int m=1; m<=1992; m=2*m+1) { for(int k=1; k<=1012; k=2*k+3) { for(int n=1; n<=1998; n=2*n+8) { taskflow.clear(); T* ha {nullptr}; T* hb {nullptr}; T* hc {nullptr}; T* da {nullptr}; T* db {nullptr}; T* dc {nullptr}; T val_a = ::rand()%5-1; T val_b = ::rand()%7-3; auto hosta = taskflow.emplace([&](){ a.resize(m*k); std::fill_n(a.begin(), m*k, val_a); ha = a.data(); REQUIRE(cudaMalloc(&da, m*k*sizeof(T)) == cudaSuccess); }).name("ha"); auto hostb = taskflow.emplace([&](){ b.resize(k*n); std::fill_n(b.begin(), k*n, val_b); hb = b.data(); REQUIRE(cudaMalloc(&db, k*n*sizeof(T)) == cudaSuccess); }).name("hb"); auto hostc = taskflow.emplace([&](){ c.resize(m*n); hc = c.data(); REQUIRE(cudaMalloc(&dc, m*n*sizeof(T)) == cudaSuccess); }).name("hc"); auto cuda = taskflow.emplace([&](tf::cudaFlow& cf){ auto pa = cf.copy(da, ha, m*k); auto pb = cf.copy(db, hb, k*n); auto op = tf::cudaBLAF(cf).matmul( da, db, dc, m, k, n ).name("op"); auto cc = cf.copy(hc, dc, m*n).name("cc"); op.precede(cc).succeed(pa, pb); }); cuda.succeed(hosta, hostb, hostc); executor.run(taskflow).wait(); int ans = val_a*val_b*k; for(const auto& x : c) { REQUIRE((int)x == ans); } REQUIRE(cudaFree(da) == cudaSuccess); REQUIRE(cudaFree(db) == cudaSuccess); REQUIRE(cudaFree(dc) == cudaSuccess); } } } } TEST_CASE("matmul.int" * doctest::timeout(300) ) { matmul<int>(); } TEST_CASE("matmul.float" * doctest::timeout(300) ) { matmul<float>(); } TEST_CASE("matmul.double" * doctest::timeout(300) ) { matmul<double>(); }*/
af88cd28126e7fe0e802ddb1424b74867e8eaf88.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void kernel(int k, int m, int n, float* searchPoints, float* referencePoints, int* indices) { int minIndex; float minSquareSum, diff, squareSum; int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < m) { minSquareSum = -1; // Iterate over all reference points for (int nInd = 0; nInd < n; nInd++) { squareSum = 0; for (int kInd = 0; kInd < k; kInd++) { diff = searchPoints[k * tid + kInd] - referencePoints[k * nInd + kInd]; squareSum += (diff * diff); } if (minSquareSum < 0 || squareSum < minSquareSum) { minSquareSum = squareSum; minIndex = nInd; } } indices[tid] = minIndex; } }
af88cd28126e7fe0e802ddb1424b74867e8eaf88.cu
#include "includes.h" __global__ void kernel(int k, int m, int n, float* searchPoints, float* referencePoints, int* indices) { int minIndex; float minSquareSum, diff, squareSum; int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < m) { minSquareSum = -1; // Iterate over all reference points for (int nInd = 0; nInd < n; nInd++) { squareSum = 0; for (int kInd = 0; kInd < k; kInd++) { diff = searchPoints[k * tid + kInd] - referencePoints[k * nInd + kInd]; squareSum += (diff * diff); } if (minSquareSum < 0 || squareSum < minSquareSum) { minSquareSum = squareSum; minIndex = nInd; } } indices[tid] = minIndex; } }
7fe1159a0726d84452439380e4752284eee2aea6.hip
// !!! This is a file automatically generated by hipify!!! #include "raft_radon_gpu.h" #include "raft_radon_gpu_function.h" #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #define INIC -1.0 /* Autor: Joao Carlos Cerqueira email: [email protected] */ extern "C" { void raft_radon_slantstack_gpu(float* h_output, float* h_input, int sizeImage, int nrays, int nangles) { float *d_output, *d_input; // Allocate GPU buffers for the output sinogram hipMalloc(&d_output, sizeof(float) * nrays * nangles); // Allocate GPU memory for input image and copy hipMalloc(&d_input, sizeof(float) * sizeImage * sizeImage); hipMemcpy(d_input, h_input, sizeof(float) * sizeImage * sizeImage, hipMemcpyHostToDevice); raft_radon_gpu_function(d_output, d_input, sizeImage, nrays, nangles, 0.0); // Copy output vector from GPU buffer to host memory. hipMemcpy(h_output, d_output, sizeof(float) * nrays * nangles, hipMemcpyDeviceToHost); hipFree(d_input); hipFree(d_output); hipDeviceReset(); } }
7fe1159a0726d84452439380e4752284eee2aea6.cu
#include "raft_radon_gpu.h" #include "raft_radon_gpu_function.h" #include <cuda.h> #include <cuda_runtime_api.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #define INIC -1.0 /* Autor: Joao Carlos Cerqueira email: [email protected] */ extern "C" { void raft_radon_slantstack_gpu(float* h_output, float* h_input, int sizeImage, int nrays, int nangles) { float *d_output, *d_input; // Allocate GPU buffers for the output sinogram cudaMalloc(&d_output, sizeof(float) * nrays * nangles); // Allocate GPU memory for input image and copy cudaMalloc(&d_input, sizeof(float) * sizeImage * sizeImage); cudaMemcpy(d_input, h_input, sizeof(float) * sizeImage * sizeImage, cudaMemcpyHostToDevice); raft_radon_gpu_function(d_output, d_input, sizeImage, nrays, nangles, 0.0); // Copy output vector from GPU buffer to host memory. cudaMemcpy(h_output, d_output, sizeof(float) * nrays * nangles, cudaMemcpyDeviceToHost); cudaFree(d_input); cudaFree(d_output); cudaDeviceReset(); } }