hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
4eb375430b0126862c763c4ce5cc79de69eb954c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/shape_inference.h" #include "tensorflow/core/framework/op_kernel.h" using namespace tensorflow; using CPUDevice = Eigen::ThreadPoolDevice; using GPUDevice = Eigen::GpuDevice; #define SH_PER_TB 32768 REGISTER_OP("Bspmm") .Attr("adjoint_a: bool") .Attr("adjoint_b: bool") .Attr("TI: list(type)") .Attr("TV: list(type)") .Input("sp_ids: TI") .Input("sp_values: TV") .Input("sp_shape: TI") .Input("rhs: TV") .Output("out: TV") .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { int numTensor = c->num_outputs(); for (int i = 0; i < numTensor; ++i) { ::tensorflow::shape_inference::ShapeHandle sp_shape_shape; TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(numTensor * 2 + i, &sp_shape_shape)); c->set_output(i, c->Matrix(c->Dim(sp_shape_shape, 0), c->Dim(c->input(numTensor * 3 + i), 1))); } return Status::OK(); }); REGISTER_OP("Bspmdt") .Attr("adjoint_a: bool") .Attr("adjoint_b: bool") .Attr("TI: list(type)") .Attr("TV: list(type)") .Input("sp_ids: TI") .Input("sp_values: TV") .Input("sp_shape: TI") .Input("rhs: float") .Output("out: TV") .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { int numTensor = c->num_outputs(); for (int i = 0; i < numTensor; ++i) { ::tensorflow::shape_inference::ShapeHandle sp_shape_shape; TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(numTensor * 2 + i, &sp_shape_shape)); c->set_output(i, c->Matrix(c->Dim(sp_shape_shape, 0), c->Dim(c->input(numTensor * 3), 1))); } return Status::OK(); }); /* CUDA kernel: Initialize output tensors (matrices) */ template <typename idType, typename valType> __global__ void BatchedInitOutputs(valType **d_out, const idType* __restrict__ d_outRows, const idType nvector) { int target = blockIdx.y; int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= d_outRows[target] * nvector) return; d_out[target][i] = 0; } /* CUDA kernel: Compute sparse tensor x dense matrix of batched tensors (matrices) One CUDA kernel for all SpMMs One thread block for one SpMM One subTB (1, 2, ..., 32 threads) for one non-zero */ template <typename idType, typename valType, int subTB, int adjoint_a> __global__ void BatchedSpMM(const idType* __restrict__ d_outNnz, idType **d_sp_ids, valType **d_sp_values, valType **d_rhs, valType **d_out, const idType nvector) { idType targetTensor = blockIdx.y; idType i = blockIdx.x * blockDim.x + threadIdx.x; idType targetNnz = i / subTB; if (targetNnz >= d_outNnz[targetTensor]) return; idType row_id = d_sp_ids[targetTensor][2 * targetNnz + adjoint_a] * nvector; idType column_id = d_sp_ids[targetTensor][2 * targetNnz + (1 - adjoint_a)] * nvector; valType val = d_sp_values[targetTensor][targetNnz]; for (idType localN = i & (subTB - 1); localN < nvector; localN += subTB) { atomicAdd(d_out[targetTensor] + row_id + localN, val * d_rhs[targetTensor][column_id + localN]); } } /* CUDA kernel: Compute sparse tensor x dense matrix of batched tensors (matrices) One CUDA kernel for all SpMMs One thread block for one SpMM One subTB (1, 2, ..., 32 threads) for one non-zero */ template <typename idType, typename valType, int sh_size, int subTB, int adjoint_a> __global__ void BatchedSpMM_small(const idType* __restrict__ d_outRows, const idType* __restrict__ d_nnz, idType **d_sp_ids, valType **d_sp_values, valType **d_rhs, valType **d_out, const idType outColumn) { const idType targetTensor = blockIdx.x; __shared__ valType tmp_out[sh_size]; const idType r = d_outRows[targetTensor]; const idType i = threadIdx.x; for (idType j = i; j < r * outColumn; j += blockDim.x) { tmp_out[j] = 0; } __syncthreads(); for (idType targetNnz = i / subTB; targetNnz < d_nnz[targetTensor]; targetNnz += (blockDim.x / subTB)) { idType row_id = d_sp_ids[targetTensor][(targetNnz << 1) + adjoint_a] * outColumn; idType column_id = d_sp_ids[targetTensor][(targetNnz << 1) + (1 - adjoint_a)] * outColumn; valType val = d_sp_values[targetTensor][targetNnz]; for (idType localN = i & (subTB - 1); localN < outColumn; localN += subTB) { atomicAdd(tmp_out + row_id + localN, val * d_rhs[targetTensor][column_id + localN]); } } __syncthreads(); for (idType j = i; j < r * outColumn; j += blockDim.x) { d_out[targetTensor][j] = tmp_out[j]; } } /* CUDA kernel: Compute sparse tensor x dense matrix of batched tensors (matrices) One CUDA kernel for all SpMMs One thread block for one partition One subTB (1, 2, ..., 32 threads) for one non-zero */ template <typename idType, typename valType, int max_size, int subTB, int adjoint_a> __global__ void BatchedSpMM_partition(const idType* __restrict__ d_outRows, const idType* __restrict__ d_nnz, const idType partition, const idType nPartition, const idType partition_bit, idType **d_sp_ids, valType **d_sp_values, valType **d_rhs, valType **d_out, const idType nvector) { __shared__ valType tmp_out[max_size]; const idType targetTensor = blockIdx.x / nPartition; const idType targetPartition = blockIdx.x % nPartition; const idType offset = targetPartition << partition_bit; const idType nrow = d_outRows[targetTensor]; const idType nnz = d_nnz[targetTensor]; const idType p = (targetPartition == nPartition - 1)? (nvector - partition * (nPartition - 1)) : partition; const idType i = threadIdx.x; for (idType j = i; j < (nrow << partition_bit); j += blockDim.x) { tmp_out[j] = 0; } __syncthreads(); for (idType targetNnz = i / subTB; targetNnz < nnz; targetNnz += (blockDim.x / subTB)) { idType row_id = d_sp_ids[targetTensor][(targetNnz << 1) + adjoint_a] << partition_bit; idType column_id = d_sp_ids[targetTensor][(targetNnz << 1) + (1 - adjoint_a)] * nvector + offset; valType val = d_sp_values[targetTensor][targetNnz]; for (idType localN = i & (subTB - 1); localN < p; localN += subTB) { atomicAdd(tmp_out + row_id + localN, val * d_rhs[targetTensor][column_id + localN]); } } __syncthreads(); for (idType j = i / subTB; j < nrow; j += (blockDim.x / subTB)) { for (idType k = i & (subTB -1); k < p; k += subTB) { d_out[targetTensor][j * nvector + offset + k] = tmp_out[(j << partition_bit) + k]; } } } template <typename idType, typename valType, int adjoint_a> void spmms_batched_coo_static(const idType *outRows, const idType *nnz, const idType *d_outRows, const idType *d_nnz, idType **d_sp_ids, valType **d_sp_values, valType **d_d_x, valType **d_d_y, const idType nvector, const idType batch) { const idType max_size = SH_PER_TB / sizeof(valType); idType max_nrow = 0; for (idType i = 0; i < batch; ++i) { if (max_nrow < outRows[i]) { max_nrow = outRows[i]; } } const int bs = 1024; // Any output matrix in batch can be placed on shared memory if (max_nrow * nvector <= max_size) { // Launch a kernel with appropriate subTB for nvector if (nvector > 16) { hipLaunchKernelGGL(( BatchedSpMM_small<idType, valType, max_size, 32, adjoint_a>), dim3(batch), dim3(bs), 0, 0, d_outRows, d_nnz, d_sp_ids, d_sp_values, d_d_x, d_d_y, nvector); } else if (nvector > 8) { hipLaunchKernelGGL(( BatchedSpMM_small<idType, valType, max_size, 16, adjoint_a>), dim3(batch), dim3(bs), 0, 0, d_outRows, d_nnz, d_sp_ids, d_sp_values, d_d_x, d_d_y, nvector); } else if (nvector > 4) { hipLaunchKernelGGL(( BatchedSpMM_small<idType, valType, max_size, 8, adjoint_a>), dim3(batch), dim3(bs), 0, 0, d_outRows, d_nnz, d_sp_ids, d_sp_values, d_d_x, d_d_y, nvector); } else if (nvector > 2) { hipLaunchKernelGGL(( BatchedSpMM_small<idType, valType, max_size, 4, adjoint_a>), dim3(batch), dim3(bs), 0, 0, d_outRows, d_nnz, d_sp_ids, d_sp_values, d_d_x, d_d_y, nvector); } else if (nvector > 1) { hipLaunchKernelGGL(( BatchedSpMM_small<idType, valType, max_size, 2, adjoint_a>), dim3(batch), dim3(bs), 0, 0, d_outRows, d_nnz, d_sp_ids, d_sp_values, d_d_x, d_d_y, nvector); } else if (nvector == 1) { hipLaunchKernelGGL(( BatchedSpMM_small<idType, valType, max_size, 1, adjoint_a>), dim3(batch), dim3(bs), 0, 0, d_outRows, d_nnz, d_sp_ids, d_sp_values, d_d_x, d_d_y, nvector); } else { return; } } else if (max_nrow > max_size) { /* Initialization */ idType max_thread = 0; for (idType i = 0; i < batch; ++i) { if (max_thread < outRows[i]) { max_thread = outRows[i]; } } max_thread *= nvector; idType gs = (max_thread + bs - 1) / bs; hipLaunchKernelGGL(( BatchedInitOutputs), dim3(dim3(gs, batch)), dim3(bs), 0, 0, d_d_y, d_outRows, nvector); /* Batched Spmm Kernel */ max_thread = 0; for (idType i = 0; i < batch; ++i) { if (max_thread < nnz[i]) { max_thread = nnz[i]; } } if (nvector > 16) { max_thread *= 32; gs = (max_thread + bs - 1) / bs; hipLaunchKernelGGL(( BatchedSpMM<idType, valType, 32, adjoint_a>), dim3(dim3(gs, batch)), dim3(bs), 0, 0, d_nnz, d_sp_ids, d_sp_values, d_d_x, d_d_y, nvector); } else if (nvector > 8) { max_thread *= 16; gs = (max_thread + bs - 1) / bs; hipLaunchKernelGGL(( BatchedSpMM<idType, valType, 16, adjoint_a>), dim3(dim3(gs, batch)), dim3(bs), 0, 0, d_nnz, d_sp_ids, d_sp_values, d_d_x, d_d_y, nvector); } else if (nvector > 4) { max_thread *= 8; gs = (max_thread + bs - 1) / bs; hipLaunchKernelGGL(( BatchedSpMM<idType, valType, 8, adjoint_a>), dim3(dim3(gs, batch)), dim3(bs), 0, 0, d_nnz, d_sp_ids, d_sp_values, d_d_x, d_d_y, nvector); } else if (nvector > 2) { max_thread *= 4; gs = (max_thread + bs - 1) / bs; hipLaunchKernelGGL(( BatchedSpMM<idType, valType, 4, adjoint_a>), dim3(dim3(gs, batch)), dim3(bs), 0, 0, d_nnz, d_sp_ids, d_sp_values, d_d_x, d_d_y, nvector); } else if (nvector > 1) { max_thread *= 2; gs = (max_thread + bs - 1) / bs; hipLaunchKernelGGL(( BatchedSpMM<idType, valType, 2, adjoint_a>), dim3(dim3(gs, batch)), dim3(bs), 0, 0, d_nnz, d_sp_ids, d_sp_values, d_d_x, d_d_y, nvector); } else if (nvector == 1) { gs = (max_thread + bs - 1) / bs; hipLaunchKernelGGL(( BatchedSpMM<idType, valType, 1, adjoint_a>), dim3(dim3(gs, batch)), dim3(bs), 0, 0, d_nnz, d_sp_ids, d_sp_values, d_d_x, d_d_y, nvector); } else { return; } } else { idType p_bit = 0; while (((max_nrow << (p_bit + 1)) <= max_size) && ((1 << (p_bit + 1)) <= nvector)) { p_bit++; } idType p = 1 << p_bit; idType nPartition = (nvector + p - 1) / p; // One thread block for one partition if (nvector > 16) { hipLaunchKernelGGL(( BatchedSpMM_partition<idType, valType, max_size, 32, adjoint_a>), dim3(nPartition * batch), dim3(bs), 0, 0, d_outRows, d_nnz, p, nPartition, p_bit, d_sp_ids, d_sp_values, d_d_x, d_d_y, nvector); } else if (nvector > 8) { hipLaunchKernelGGL(( BatchedSpMM_partition<idType, valType, max_size, 16, adjoint_a>), dim3(nPartition * batch), dim3(bs), 0, 0, d_outRows, d_nnz, p, nPartition, p_bit, d_sp_ids, d_sp_values, d_d_x, d_d_y, nvector); } else if (nvector > 4) { hipLaunchKernelGGL(( BatchedSpMM_partition<idType, valType, max_size, 8, adjoint_a>), dim3(nPartition * batch), dim3(bs), 0, 0, d_outRows, d_nnz, p, nPartition, p_bit, d_sp_ids, d_sp_values, d_d_x, d_d_y, nvector); } else if (nvector > 2) { hipLaunchKernelGGL(( BatchedSpMM_partition<idType, valType, max_size, 4, adjoint_a>), dim3(nPartition * batch), dim3(bs), 0, 0, d_outRows, d_nnz, p, nPartition, p_bit, d_sp_ids, d_sp_values, d_d_x, d_d_y, nvector); } else if (nvector > 1) { hipLaunchKernelGGL(( BatchedSpMM_partition<idType, valType, max_size, 2, adjoint_a>), dim3(nPartition * batch), dim3(bs), 0, 0, d_outRows, d_nnz, p, nPartition, p_bit, d_sp_ids, d_sp_values, d_d_x, d_d_y, nvector); } else if (nvector == 1) { hipLaunchKernelGGL(( BatchedSpMM_partition<idType, valType, max_size, 1, adjoint_a>), dim3(nPartition * batch), dim3(bs), 0, 0, d_outRows, d_nnz, p, nPartition, p_bit, d_sp_ids, d_sp_values, d_d_x, d_d_y, nvector); } else { return; } } } template <typename Device, typename idType, typename valType, bool ADJ_A, bool ADJ_B> struct BspmmFunctor { void operator()(OpKernelContext* context, Device &d, idType numTensor, idType *outRows, idType *outColumns, idType *nnz, idType **sp_ids, valType **sp_values, valType **rhs, valType **out); }; /* Functor for CPU */ template <typename idType, typename valType, bool ADJ_A, bool ADJ_B> struct BspmmFunctor<CPUDevice, idType, valType, ADJ_A, ADJ_B> { void operator()(OpKernelContext* context, const CPUDevice &d, idType numTensor, idType *outRows, idType *outColumns, idType *nnz, idType **sp_ids, valType **sp_values, valType **rhs, valType **out) { /* Initialization */ for (idType t = 0; t < numTensor; ++t) { for (idType i = 0; i < outRows[t] * outColumns[t]; ++i) { out[t][i] = 0; } } /* SpMM */ const int sp_inc = (ADJ_A)? 1 : 0; for (idType t = 0; t < numTensor; ++t) { for (idType i = 0; i < nnz[t]; ++i) { idType row_id = sp_ids[t][2 * i + sp_inc]; idType column_id = sp_ids[t][2 * i + (1 - sp_inc)]; valType val = sp_values[t][i]; for (idType j = 0; j < outColumns[t]; ++j) { idType r = (ADJ_B)? j * outRows[t] + column_id : column_id * outColumns[t] + j; out[t][row_id * outColumns[t] + j] += val * rhs[t][r]; } } } } }; /* Functor for GPU */ template <typename idType, typename valType, bool ADJ_A, bool ADJ_B> struct BspmmFunctor<GPUDevice, idType, valType, ADJ_A, ADJ_B> { void operator()(OpKernelContext* context, const GPUDevice &d, idType numTensor, idType *outRows, idType *outColumns, idType *nnz, idType **sp_ids, valType **sp_values, valType **rhs, valType **out) { /* Memory allocation and copyHtD */ const TensorShape s({numTensor}); Tensor d_outRows_t, d_outColumns_t, d_nnz_t, d_sp_ids_t, d_sp_values_t, d_rhs_t, d_out_t; context->allocate_temp(DT_INT64, s, &d_outRows_t); context->allocate_temp(DT_INT64, s, &d_outColumns_t); context->allocate_temp(DT_INT64, s, &d_nnz_t); context->allocate_temp(DT_INT64, s, &d_sp_ids_t); context->allocate_temp(DT_INT64, s, &d_sp_values_t); context->allocate_temp(DT_INT64, s, &d_rhs_t); context->allocate_temp(DT_INT64, s, &d_out_t); idType *d_outRows, *d_outColumns, *d_nnz; idType **d_sp_ids; valType **d_sp_values, **d_rhs, **d_out; d_outRows = (idType *)(d_outRows_t.vec<int64>().data()); d_outColumns = (idType *)(d_outColumns_t.vec<int64>().data()); d_nnz = (idType *)(d_nnz_t.vec<int64>().data()); d_sp_ids = (idType **)(d_sp_ids_t.vec<int64>().data()); d_sp_values = (valType **)(d_sp_values_t.vec<int64>().data()); d_rhs = (valType **)(d_rhs_t.vec<int64>().data()); d_out = (valType **)(d_out_t.vec<int64>().data()); hipMemcpyAsync(d_outRows, outRows, sizeof(idType) * numTensor, hipMemcpyHostToDevice); hipMemcpyAsync(d_outColumns, outColumns, sizeof(idType) * numTensor, hipMemcpyHostToDevice); hipMemcpyAsync(d_nnz, nnz, sizeof(idType) * numTensor, hipMemcpyHostToDevice); hipMemcpyAsync(d_sp_ids, sp_ids, sizeof(idType*) * numTensor, hipMemcpyHostToDevice); hipMemcpyAsync(d_sp_values, sp_values, sizeof(valType*) * numTensor, hipMemcpyHostToDevice); hipMemcpyAsync(d_rhs, rhs, sizeof(valType*) * numTensor, hipMemcpyHostToDevice); hipMemcpyAsync(d_out, out, sizeof(valType*) * numTensor, hipMemcpyHostToDevice); hipDeviceSynchronize(); if (ADJ_A == false) { spmms_batched_coo_static<idType, valType, 0>(outRows, nnz, d_outRows, d_nnz, d_sp_ids, d_sp_values, d_rhs, d_out, outColumns[0], numTensor); } else { spmms_batched_coo_static<idType, valType, 1>(outRows, nnz, d_outRows, d_nnz, d_sp_ids, d_sp_values, d_rhs, d_out, outColumns[0], numTensor); } } }; template <typename Device, typename idType, typename valType> class BspmmOp : public OpKernel { public: explicit BspmmOp(OpKernelConstruction* context) : OpKernel(context) { // Grab the attributes OP_REQUIRES_OK(context, context->GetAttr("adjoint_a", &adjoint_a)); OP_REQUIRES_OK(context, context->GetAttr("adjoint_b", &adjoint_b)); } void Compute(OpKernelContext* context) override { // Grab the input tensor OpInputList sp_shape_list(context, 0, 0); OpInputList sp_ids_list(context, 0, 0); OpInputList sp_values_list(context, 0, 0); OpInputList rhs_list(context, 0, 0); OP_REQUIRES_OK(context, context->input_list(StringPiece("sp_shape"), &sp_shape_list)); OP_REQUIRES_OK(context, context->input_list(StringPiece("sp_ids"), &sp_ids_list)); OP_REQUIRES_OK(context, context->input_list(StringPiece("sp_values"), &sp_values_list)); OP_REQUIRES_OK(context, context->input_list(StringPiece("rhs"), &rhs_list)); // int numTensor = n_tensor; int numTensor = rhs_list.size(); OpOutputList olist(context, 0, numTensor); Tensor** output_tensor = new Tensor*[numTensor]; idType *outColumns = new idType[numTensor]; idType *outRows = new idType[numTensor]; idType *nnz = new idType[numTensor]; idType **sp_ids = new idType*[numTensor]; valType **sp_values = new valType*[numTensor]; valType **rhs = new valType*[numTensor]; valType **output = new valType*[numTensor]; for (int i = 0; i < numTensor; ++i) { auto sp_shape_t = (sp_shape_list[i]).vec<idType>(); auto rhs_shape_t = (rhs_list[i]).shape(); outRows[i] = (adjoint_a)? sp_shape_t(1) : sp_shape_t(0); outColumns[i] = (adjoint_b)? rhs_shape_t.dim_size(0) : rhs_shape_t.dim_size(1); nnz[i] = (sp_values_list[i]).shape().dim_size(0); /* Create an output tensor */ TensorShape output_shape({outRows[i], outColumns[i]}); olist.allocate(i, output_shape, output_tensor + i); sp_ids[i] = (idType *)((sp_ids_list[i]).matrix<idType>().data()); sp_values[i] = (valType *)((sp_values_list[i]).vec<valType>().data()); rhs[i] = (valType *)((rhs_list[i]).matrix<valType>().data()); output[i] = (valType *)((output_tensor[i])->matrix<valType>().data()); } // Execute Batched SpMM #define GENERATE_ADJOOINT_PAIR(ADJ_A, ADJ_B) \ if (adjoint_a == ADJ_A && adjoint_b == ADJ_B) { \ BspmmFunctor<Device, idType, valType, ADJ_A, ADJ_B>()(context, context->eigen_device<Device>(), numTensor, outRows, outColumns, nnz, sp_ids, sp_values, rhs, output); \ } GENERATE_ADJOOINT_PAIR(false, false); GENERATE_ADJOOINT_PAIR(true, false); GENERATE_ADJOOINT_PAIR(false, true); GENERATE_ADJOOINT_PAIR(true, true); delete[] outRows; delete[] outColumns; delete[] nnz; delete[] sp_ids; delete[] sp_values; delete[] rhs; delete[] output; delete[] output_tensor; } private: bool adjoint_a; bool adjoint_b; }; template <typename Device, typename idType, typename valType> class BspmdtOp : public OpKernel { public: explicit BspmdtOp(OpKernelConstruction* context) : OpKernel(context) { // Grab the attributes OP_REQUIRES_OK(context, context->GetAttr("adjoint_a", &adjoint_a)); OP_REQUIRES_OK(context, context->GetAttr("adjoint_b", &adjoint_b)); } void Compute(OpKernelContext* context) override { // Grab the input tensor OpInputList sp_shape_list(context, 0, 0); OpInputList sp_ids_list(context, 0, 0); OpInputList sp_values_list(context, 0, 0); const Tensor *rhs_t; OP_REQUIRES_OK(context, context->input_list(StringPiece("sp_shape"), &sp_shape_list)); OP_REQUIRES_OK(context, context->input_list(StringPiece("sp_ids"), &sp_ids_list)); OP_REQUIRES_OK(context, context->input_list(StringPiece("sp_values"), &sp_values_list)); OP_REQUIRES_OK(context, context->input(StringPiece("rhs"), &rhs_t)); // int numTensor = n_tensor; int numTensor = sp_shape_list.size(); OpOutputList olist(context, 0, numTensor); Tensor** output_tensor = new Tensor*[numTensor]; idType *outColumns = new idType[numTensor]; idType *outRows = new idType[numTensor]; idType *nnz = new idType[numTensor]; idType **sp_ids = new idType*[numTensor]; valType **sp_values = new valType*[numTensor]; valType **rhs = new valType*[numTensor]; valType **output = new valType*[numTensor]; for (int i = 0; i < numTensor; ++i) { auto sp_shape_t = (sp_shape_list[i]).vec<idType>(); auto rhs_shape_t = rhs_t->shape(); outRows[i] = (adjoint_a)? sp_shape_t(1) : sp_shape_t(0); outColumns[i] = (adjoint_b)? rhs_shape_t.dim_size(0) : rhs_shape_t.dim_size(1); nnz[i] = (sp_values_list[i]).shape().dim_size(0); /* Create an output tensor */ TensorShape output_shape({outRows[i], outColumns[i]}); olist.allocate(i, output_shape, output_tensor + i); sp_ids[i] = (idType *)((sp_ids_list[i]).matrix<idType>().data()); sp_values[i] = (valType *)((sp_values_list[i]).vec<valType>().data()); idType rhsRow = (adjoint_b)? rhs_shape_t.dim_size(1) : rhs_shape_t.dim_size(0); rhs[i] = (valType *)(rhs_t->matrix<valType>().data() + i * (rhsRow / numTensor) * outColumns[i]); output[i] = (valType *)((output_tensor[i])->matrix<valType>().data()); } // Execute Batched SpMM #define GENERATE_ADJOOINT_PAIR(ADJ_A, ADJ_B) \ if (adjoint_a == ADJ_A && adjoint_b == ADJ_B) { \ BspmmFunctor<Device, idType, valType, ADJ_A, ADJ_B>()(context, context->eigen_device<Device>(), numTensor, outRows, outColumns, nnz, sp_ids, sp_values, rhs, output); \ } GENERATE_ADJOOINT_PAIR(false, false); GENERATE_ADJOOINT_PAIR(true, false); GENERATE_ADJOOINT_PAIR(false, true); GENERATE_ADJOOINT_PAIR(true, true); delete[] outRows; delete[] outColumns; delete[] nnz; delete[] sp_ids; delete[] sp_values; delete[] rhs; delete[] output; delete[] output_tensor; } private: bool adjoint_a; bool adjoint_b; }; #define REGISTER_BSPMM_CPU(idType, valType) \ REGISTER_KERNEL_BUILDER(Name("Bspmm").Device(DEVICE_CPU) \ .TypeConstraint<idType>("TI") \ .TypeConstraint<valType>("TV") \ .HostMemory("sp_shape"), \ BspmmOp<CPUDevice, idType, valType>); #define REGISTER_BSPMM_GPU(idType, valType) \ REGISTER_KERNEL_BUILDER(Name("Bspmm").Device(DEVICE_GPU) \ .TypeConstraint<idType>("TI") \ .TypeConstraint<valType>("TV") \ .HostMemory("sp_shape"), \ BspmmOp<GPUDevice, idType, valType>); REGISTER_BSPMM_CPU(int32, float) // REGISTER_BSPMM_CPU(int32, double) REGISTER_BSPMM_CPU(int64, float) // REGISTER_BSPMM_CPU(int64, double) REGISTER_BSPMM_GPU(int32, float) // REGISTER_BSPMM_GPU(int32, double) REGISTER_BSPMM_GPU(int64, float) // REGISTER_BSPMM_GPU(int64, double) #define REGISTER_BSPMDT_CPU(idType, valType) \ REGISTER_KERNEL_BUILDER(Name("Bspmdt").Device(DEVICE_CPU) \ .TypeConstraint<idType>("TI") \ .TypeConstraint<valType>("TV") \ .HostMemory("sp_shape"), \ BspmdtOp<CPUDevice, idType, valType>); #define REGISTER_BSPMDT_GPU(idType, valType) \ REGISTER_KERNEL_BUILDER(Name("Bspmdt").Device(DEVICE_GPU) \ .TypeConstraint<idType>("TI") \ .TypeConstraint<valType>("TV") \ .HostMemory("sp_shape"), \ BspmdtOp<GPUDevice, idType, valType>); REGISTER_BSPMDT_CPU(int32, float) // REGISTER_BSPMDT_CPU(int32, double) REGISTER_BSPMDT_CPU(int64, float) // REGISTER_BSPMDT_CPU(int64, double) REGISTER_BSPMDT_GPU(int32, float) // REGISTER_BSPMDT_GPU(int32, double) REGISTER_BSPMDT_GPU(int64, float) // REGISTER_BSPMDT_GPU(int64, double)
4eb375430b0126862c763c4ce5cc79de69eb954c.cu
#include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/shape_inference.h" #include "tensorflow/core/framework/op_kernel.h" using namespace tensorflow; using CPUDevice = Eigen::ThreadPoolDevice; using GPUDevice = Eigen::GpuDevice; #define SH_PER_TB 32768 REGISTER_OP("Bspmm") .Attr("adjoint_a: bool") .Attr("adjoint_b: bool") .Attr("TI: list(type)") .Attr("TV: list(type)") .Input("sp_ids: TI") .Input("sp_values: TV") .Input("sp_shape: TI") .Input("rhs: TV") .Output("out: TV") .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { int numTensor = c->num_outputs(); for (int i = 0; i < numTensor; ++i) { ::tensorflow::shape_inference::ShapeHandle sp_shape_shape; TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(numTensor * 2 + i, &sp_shape_shape)); c->set_output(i, c->Matrix(c->Dim(sp_shape_shape, 0), c->Dim(c->input(numTensor * 3 + i), 1))); } return Status::OK(); }); REGISTER_OP("Bspmdt") .Attr("adjoint_a: bool") .Attr("adjoint_b: bool") .Attr("TI: list(type)") .Attr("TV: list(type)") .Input("sp_ids: TI") .Input("sp_values: TV") .Input("sp_shape: TI") .Input("rhs: float") .Output("out: TV") .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { int numTensor = c->num_outputs(); for (int i = 0; i < numTensor; ++i) { ::tensorflow::shape_inference::ShapeHandle sp_shape_shape; TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(numTensor * 2 + i, &sp_shape_shape)); c->set_output(i, c->Matrix(c->Dim(sp_shape_shape, 0), c->Dim(c->input(numTensor * 3), 1))); } return Status::OK(); }); /* CUDA kernel: Initialize output tensors (matrices) */ template <typename idType, typename valType> __global__ void BatchedInitOutputs(valType **d_out, const idType* __restrict__ d_outRows, const idType nvector) { int target = blockIdx.y; int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= d_outRows[target] * nvector) return; d_out[target][i] = 0; } /* CUDA kernel: Compute sparse tensor x dense matrix of batched tensors (matrices) One CUDA kernel for all SpMMs One thread block for one SpMM One subTB (1, 2, ..., 32 threads) for one non-zero */ template <typename idType, typename valType, int subTB, int adjoint_a> __global__ void BatchedSpMM(const idType* __restrict__ d_outNnz, idType **d_sp_ids, valType **d_sp_values, valType **d_rhs, valType **d_out, const idType nvector) { idType targetTensor = blockIdx.y; idType i = blockIdx.x * blockDim.x + threadIdx.x; idType targetNnz = i / subTB; if (targetNnz >= d_outNnz[targetTensor]) return; idType row_id = d_sp_ids[targetTensor][2 * targetNnz + adjoint_a] * nvector; idType column_id = d_sp_ids[targetTensor][2 * targetNnz + (1 - adjoint_a)] * nvector; valType val = d_sp_values[targetTensor][targetNnz]; for (idType localN = i & (subTB - 1); localN < nvector; localN += subTB) { atomicAdd(d_out[targetTensor] + row_id + localN, val * d_rhs[targetTensor][column_id + localN]); } } /* CUDA kernel: Compute sparse tensor x dense matrix of batched tensors (matrices) One CUDA kernel for all SpMMs One thread block for one SpMM One subTB (1, 2, ..., 32 threads) for one non-zero */ template <typename idType, typename valType, int sh_size, int subTB, int adjoint_a> __global__ void BatchedSpMM_small(const idType* __restrict__ d_outRows, const idType* __restrict__ d_nnz, idType **d_sp_ids, valType **d_sp_values, valType **d_rhs, valType **d_out, const idType outColumn) { const idType targetTensor = blockIdx.x; __shared__ valType tmp_out[sh_size]; const idType r = d_outRows[targetTensor]; const idType i = threadIdx.x; for (idType j = i; j < r * outColumn; j += blockDim.x) { tmp_out[j] = 0; } __syncthreads(); for (idType targetNnz = i / subTB; targetNnz < d_nnz[targetTensor]; targetNnz += (blockDim.x / subTB)) { idType row_id = d_sp_ids[targetTensor][(targetNnz << 1) + adjoint_a] * outColumn; idType column_id = d_sp_ids[targetTensor][(targetNnz << 1) + (1 - adjoint_a)] * outColumn; valType val = d_sp_values[targetTensor][targetNnz]; for (idType localN = i & (subTB - 1); localN < outColumn; localN += subTB) { atomicAdd(tmp_out + row_id + localN, val * d_rhs[targetTensor][column_id + localN]); } } __syncthreads(); for (idType j = i; j < r * outColumn; j += blockDim.x) { d_out[targetTensor][j] = tmp_out[j]; } } /* CUDA kernel: Compute sparse tensor x dense matrix of batched tensors (matrices) One CUDA kernel for all SpMMs One thread block for one partition One subTB (1, 2, ..., 32 threads) for one non-zero */ template <typename idType, typename valType, int max_size, int subTB, int adjoint_a> __global__ void BatchedSpMM_partition(const idType* __restrict__ d_outRows, const idType* __restrict__ d_nnz, const idType partition, const idType nPartition, const idType partition_bit, idType **d_sp_ids, valType **d_sp_values, valType **d_rhs, valType **d_out, const idType nvector) { __shared__ valType tmp_out[max_size]; const idType targetTensor = blockIdx.x / nPartition; const idType targetPartition = blockIdx.x % nPartition; const idType offset = targetPartition << partition_bit; const idType nrow = d_outRows[targetTensor]; const idType nnz = d_nnz[targetTensor]; const idType p = (targetPartition == nPartition - 1)? (nvector - partition * (nPartition - 1)) : partition; const idType i = threadIdx.x; for (idType j = i; j < (nrow << partition_bit); j += blockDim.x) { tmp_out[j] = 0; } __syncthreads(); for (idType targetNnz = i / subTB; targetNnz < nnz; targetNnz += (blockDim.x / subTB)) { idType row_id = d_sp_ids[targetTensor][(targetNnz << 1) + adjoint_a] << partition_bit; idType column_id = d_sp_ids[targetTensor][(targetNnz << 1) + (1 - adjoint_a)] * nvector + offset; valType val = d_sp_values[targetTensor][targetNnz]; for (idType localN = i & (subTB - 1); localN < p; localN += subTB) { atomicAdd(tmp_out + row_id + localN, val * d_rhs[targetTensor][column_id + localN]); } } __syncthreads(); for (idType j = i / subTB; j < nrow; j += (blockDim.x / subTB)) { for (idType k = i & (subTB -1); k < p; k += subTB) { d_out[targetTensor][j * nvector + offset + k] = tmp_out[(j << partition_bit) + k]; } } } template <typename idType, typename valType, int adjoint_a> void spmms_batched_coo_static(const idType *outRows, const idType *nnz, const idType *d_outRows, const idType *d_nnz, idType **d_sp_ids, valType **d_sp_values, valType **d_d_x, valType **d_d_y, const idType nvector, const idType batch) { const idType max_size = SH_PER_TB / sizeof(valType); idType max_nrow = 0; for (idType i = 0; i < batch; ++i) { if (max_nrow < outRows[i]) { max_nrow = outRows[i]; } } const int bs = 1024; // Any output matrix in batch can be placed on shared memory if (max_nrow * nvector <= max_size) { // Launch a kernel with appropriate subTB for nvector if (nvector > 16) { BatchedSpMM_small<idType, valType, max_size, 32, adjoint_a><<<batch, bs>>>(d_outRows, d_nnz, d_sp_ids, d_sp_values, d_d_x, d_d_y, nvector); } else if (nvector > 8) { BatchedSpMM_small<idType, valType, max_size, 16, adjoint_a><<<batch, bs>>>(d_outRows, d_nnz, d_sp_ids, d_sp_values, d_d_x, d_d_y, nvector); } else if (nvector > 4) { BatchedSpMM_small<idType, valType, max_size, 8, adjoint_a><<<batch, bs>>>(d_outRows, d_nnz, d_sp_ids, d_sp_values, d_d_x, d_d_y, nvector); } else if (nvector > 2) { BatchedSpMM_small<idType, valType, max_size, 4, adjoint_a><<<batch, bs>>>(d_outRows, d_nnz, d_sp_ids, d_sp_values, d_d_x, d_d_y, nvector); } else if (nvector > 1) { BatchedSpMM_small<idType, valType, max_size, 2, adjoint_a><<<batch, bs>>>(d_outRows, d_nnz, d_sp_ids, d_sp_values, d_d_x, d_d_y, nvector); } else if (nvector == 1) { BatchedSpMM_small<idType, valType, max_size, 1, adjoint_a><<<batch, bs>>>(d_outRows, d_nnz, d_sp_ids, d_sp_values, d_d_x, d_d_y, nvector); } else { return; } } else if (max_nrow > max_size) { /* Initialization */ idType max_thread = 0; for (idType i = 0; i < batch; ++i) { if (max_thread < outRows[i]) { max_thread = outRows[i]; } } max_thread *= nvector; idType gs = (max_thread + bs - 1) / bs; BatchedInitOutputs<<<dim3(gs, batch), bs>>>(d_d_y, d_outRows, nvector); /* Batched Spmm Kernel */ max_thread = 0; for (idType i = 0; i < batch; ++i) { if (max_thread < nnz[i]) { max_thread = nnz[i]; } } if (nvector > 16) { max_thread *= 32; gs = (max_thread + bs - 1) / bs; BatchedSpMM<idType, valType, 32, adjoint_a><<<dim3(gs, batch), bs>>> (d_nnz, d_sp_ids, d_sp_values, d_d_x, d_d_y, nvector); } else if (nvector > 8) { max_thread *= 16; gs = (max_thread + bs - 1) / bs; BatchedSpMM<idType, valType, 16, adjoint_a><<<dim3(gs, batch), bs>>> (d_nnz, d_sp_ids, d_sp_values, d_d_x, d_d_y, nvector); } else if (nvector > 4) { max_thread *= 8; gs = (max_thread + bs - 1) / bs; BatchedSpMM<idType, valType, 8, adjoint_a><<<dim3(gs, batch), bs>>> (d_nnz, d_sp_ids, d_sp_values, d_d_x, d_d_y, nvector); } else if (nvector > 2) { max_thread *= 4; gs = (max_thread + bs - 1) / bs; BatchedSpMM<idType, valType, 4, adjoint_a><<<dim3(gs, batch), bs>>> (d_nnz, d_sp_ids, d_sp_values, d_d_x, d_d_y, nvector); } else if (nvector > 1) { max_thread *= 2; gs = (max_thread + bs - 1) / bs; BatchedSpMM<idType, valType, 2, adjoint_a><<<dim3(gs, batch), bs>>> (d_nnz, d_sp_ids, d_sp_values, d_d_x, d_d_y, nvector); } else if (nvector == 1) { gs = (max_thread + bs - 1) / bs; BatchedSpMM<idType, valType, 1, adjoint_a><<<dim3(gs, batch), bs>>> (d_nnz, d_sp_ids, d_sp_values, d_d_x, d_d_y, nvector); } else { return; } } else { idType p_bit = 0; while (((max_nrow << (p_bit + 1)) <= max_size) && ((1 << (p_bit + 1)) <= nvector)) { p_bit++; } idType p = 1 << p_bit; idType nPartition = (nvector + p - 1) / p; // One thread block for one partition if (nvector > 16) { BatchedSpMM_partition<idType, valType, max_size, 32, adjoint_a><<<nPartition * batch, bs>>>(d_outRows, d_nnz, p, nPartition, p_bit, d_sp_ids, d_sp_values, d_d_x, d_d_y, nvector); } else if (nvector > 8) { BatchedSpMM_partition<idType, valType, max_size, 16, adjoint_a><<<nPartition * batch, bs>>>(d_outRows, d_nnz, p, nPartition, p_bit, d_sp_ids, d_sp_values, d_d_x, d_d_y, nvector); } else if (nvector > 4) { BatchedSpMM_partition<idType, valType, max_size, 8, adjoint_a><<<nPartition * batch, bs>>>(d_outRows, d_nnz, p, nPartition, p_bit, d_sp_ids, d_sp_values, d_d_x, d_d_y, nvector); } else if (nvector > 2) { BatchedSpMM_partition<idType, valType, max_size, 4, adjoint_a><<<nPartition * batch, bs>>>(d_outRows, d_nnz, p, nPartition, p_bit, d_sp_ids, d_sp_values, d_d_x, d_d_y, nvector); } else if (nvector > 1) { BatchedSpMM_partition<idType, valType, max_size, 2, adjoint_a><<<nPartition * batch, bs>>>(d_outRows, d_nnz, p, nPartition, p_bit, d_sp_ids, d_sp_values, d_d_x, d_d_y, nvector); } else if (nvector == 1) { BatchedSpMM_partition<idType, valType, max_size, 1, adjoint_a><<<nPartition * batch, bs>>>(d_outRows, d_nnz, p, nPartition, p_bit, d_sp_ids, d_sp_values, d_d_x, d_d_y, nvector); } else { return; } } } template <typename Device, typename idType, typename valType, bool ADJ_A, bool ADJ_B> struct BspmmFunctor { void operator()(OpKernelContext* context, Device &d, idType numTensor, idType *outRows, idType *outColumns, idType *nnz, idType **sp_ids, valType **sp_values, valType **rhs, valType **out); }; /* Functor for CPU */ template <typename idType, typename valType, bool ADJ_A, bool ADJ_B> struct BspmmFunctor<CPUDevice, idType, valType, ADJ_A, ADJ_B> { void operator()(OpKernelContext* context, const CPUDevice &d, idType numTensor, idType *outRows, idType *outColumns, idType *nnz, idType **sp_ids, valType **sp_values, valType **rhs, valType **out) { /* Initialization */ for (idType t = 0; t < numTensor; ++t) { for (idType i = 0; i < outRows[t] * outColumns[t]; ++i) { out[t][i] = 0; } } /* SpMM */ const int sp_inc = (ADJ_A)? 1 : 0; for (idType t = 0; t < numTensor; ++t) { for (idType i = 0; i < nnz[t]; ++i) { idType row_id = sp_ids[t][2 * i + sp_inc]; idType column_id = sp_ids[t][2 * i + (1 - sp_inc)]; valType val = sp_values[t][i]; for (idType j = 0; j < outColumns[t]; ++j) { idType r = (ADJ_B)? j * outRows[t] + column_id : column_id * outColumns[t] + j; out[t][row_id * outColumns[t] + j] += val * rhs[t][r]; } } } } }; /* Functor for GPU */ template <typename idType, typename valType, bool ADJ_A, bool ADJ_B> struct BspmmFunctor<GPUDevice, idType, valType, ADJ_A, ADJ_B> { void operator()(OpKernelContext* context, const GPUDevice &d, idType numTensor, idType *outRows, idType *outColumns, idType *nnz, idType **sp_ids, valType **sp_values, valType **rhs, valType **out) { /* Memory allocation and copyHtD */ const TensorShape s({numTensor}); Tensor d_outRows_t, d_outColumns_t, d_nnz_t, d_sp_ids_t, d_sp_values_t, d_rhs_t, d_out_t; context->allocate_temp(DT_INT64, s, &d_outRows_t); context->allocate_temp(DT_INT64, s, &d_outColumns_t); context->allocate_temp(DT_INT64, s, &d_nnz_t); context->allocate_temp(DT_INT64, s, &d_sp_ids_t); context->allocate_temp(DT_INT64, s, &d_sp_values_t); context->allocate_temp(DT_INT64, s, &d_rhs_t); context->allocate_temp(DT_INT64, s, &d_out_t); idType *d_outRows, *d_outColumns, *d_nnz; idType **d_sp_ids; valType **d_sp_values, **d_rhs, **d_out; d_outRows = (idType *)(d_outRows_t.vec<int64>().data()); d_outColumns = (idType *)(d_outColumns_t.vec<int64>().data()); d_nnz = (idType *)(d_nnz_t.vec<int64>().data()); d_sp_ids = (idType **)(d_sp_ids_t.vec<int64>().data()); d_sp_values = (valType **)(d_sp_values_t.vec<int64>().data()); d_rhs = (valType **)(d_rhs_t.vec<int64>().data()); d_out = (valType **)(d_out_t.vec<int64>().data()); cudaMemcpyAsync(d_outRows, outRows, sizeof(idType) * numTensor, cudaMemcpyHostToDevice); cudaMemcpyAsync(d_outColumns, outColumns, sizeof(idType) * numTensor, cudaMemcpyHostToDevice); cudaMemcpyAsync(d_nnz, nnz, sizeof(idType) * numTensor, cudaMemcpyHostToDevice); cudaMemcpyAsync(d_sp_ids, sp_ids, sizeof(idType*) * numTensor, cudaMemcpyHostToDevice); cudaMemcpyAsync(d_sp_values, sp_values, sizeof(valType*) * numTensor, cudaMemcpyHostToDevice); cudaMemcpyAsync(d_rhs, rhs, sizeof(valType*) * numTensor, cudaMemcpyHostToDevice); cudaMemcpyAsync(d_out, out, sizeof(valType*) * numTensor, cudaMemcpyHostToDevice); cudaDeviceSynchronize(); if (ADJ_A == false) { spmms_batched_coo_static<idType, valType, 0>(outRows, nnz, d_outRows, d_nnz, d_sp_ids, d_sp_values, d_rhs, d_out, outColumns[0], numTensor); } else { spmms_batched_coo_static<idType, valType, 1>(outRows, nnz, d_outRows, d_nnz, d_sp_ids, d_sp_values, d_rhs, d_out, outColumns[0], numTensor); } } }; template <typename Device, typename idType, typename valType> class BspmmOp : public OpKernel { public: explicit BspmmOp(OpKernelConstruction* context) : OpKernel(context) { // Grab the attributes OP_REQUIRES_OK(context, context->GetAttr("adjoint_a", &adjoint_a)); OP_REQUIRES_OK(context, context->GetAttr("adjoint_b", &adjoint_b)); } void Compute(OpKernelContext* context) override { // Grab the input tensor OpInputList sp_shape_list(context, 0, 0); OpInputList sp_ids_list(context, 0, 0); OpInputList sp_values_list(context, 0, 0); OpInputList rhs_list(context, 0, 0); OP_REQUIRES_OK(context, context->input_list(StringPiece("sp_shape"), &sp_shape_list)); OP_REQUIRES_OK(context, context->input_list(StringPiece("sp_ids"), &sp_ids_list)); OP_REQUIRES_OK(context, context->input_list(StringPiece("sp_values"), &sp_values_list)); OP_REQUIRES_OK(context, context->input_list(StringPiece("rhs"), &rhs_list)); // int numTensor = n_tensor; int numTensor = rhs_list.size(); OpOutputList olist(context, 0, numTensor); Tensor** output_tensor = new Tensor*[numTensor]; idType *outColumns = new idType[numTensor]; idType *outRows = new idType[numTensor]; idType *nnz = new idType[numTensor]; idType **sp_ids = new idType*[numTensor]; valType **sp_values = new valType*[numTensor]; valType **rhs = new valType*[numTensor]; valType **output = new valType*[numTensor]; for (int i = 0; i < numTensor; ++i) { auto sp_shape_t = (sp_shape_list[i]).vec<idType>(); auto rhs_shape_t = (rhs_list[i]).shape(); outRows[i] = (adjoint_a)? sp_shape_t(1) : sp_shape_t(0); outColumns[i] = (adjoint_b)? rhs_shape_t.dim_size(0) : rhs_shape_t.dim_size(1); nnz[i] = (sp_values_list[i]).shape().dim_size(0); /* Create an output tensor */ TensorShape output_shape({outRows[i], outColumns[i]}); olist.allocate(i, output_shape, output_tensor + i); sp_ids[i] = (idType *)((sp_ids_list[i]).matrix<idType>().data()); sp_values[i] = (valType *)((sp_values_list[i]).vec<valType>().data()); rhs[i] = (valType *)((rhs_list[i]).matrix<valType>().data()); output[i] = (valType *)((output_tensor[i])->matrix<valType>().data()); } // Execute Batched SpMM #define GENERATE_ADJOOINT_PAIR(ADJ_A, ADJ_B) \ if (adjoint_a == ADJ_A && adjoint_b == ADJ_B) { \ BspmmFunctor<Device, idType, valType, ADJ_A, ADJ_B>()(context, context->eigen_device<Device>(), numTensor, outRows, outColumns, nnz, sp_ids, sp_values, rhs, output); \ } GENERATE_ADJOOINT_PAIR(false, false); GENERATE_ADJOOINT_PAIR(true, false); GENERATE_ADJOOINT_PAIR(false, true); GENERATE_ADJOOINT_PAIR(true, true); delete[] outRows; delete[] outColumns; delete[] nnz; delete[] sp_ids; delete[] sp_values; delete[] rhs; delete[] output; delete[] output_tensor; } private: bool adjoint_a; bool adjoint_b; }; template <typename Device, typename idType, typename valType> class BspmdtOp : public OpKernel { public: explicit BspmdtOp(OpKernelConstruction* context) : OpKernel(context) { // Grab the attributes OP_REQUIRES_OK(context, context->GetAttr("adjoint_a", &adjoint_a)); OP_REQUIRES_OK(context, context->GetAttr("adjoint_b", &adjoint_b)); } void Compute(OpKernelContext* context) override { // Grab the input tensor OpInputList sp_shape_list(context, 0, 0); OpInputList sp_ids_list(context, 0, 0); OpInputList sp_values_list(context, 0, 0); const Tensor *rhs_t; OP_REQUIRES_OK(context, context->input_list(StringPiece("sp_shape"), &sp_shape_list)); OP_REQUIRES_OK(context, context->input_list(StringPiece("sp_ids"), &sp_ids_list)); OP_REQUIRES_OK(context, context->input_list(StringPiece("sp_values"), &sp_values_list)); OP_REQUIRES_OK(context, context->input(StringPiece("rhs"), &rhs_t)); // int numTensor = n_tensor; int numTensor = sp_shape_list.size(); OpOutputList olist(context, 0, numTensor); Tensor** output_tensor = new Tensor*[numTensor]; idType *outColumns = new idType[numTensor]; idType *outRows = new idType[numTensor]; idType *nnz = new idType[numTensor]; idType **sp_ids = new idType*[numTensor]; valType **sp_values = new valType*[numTensor]; valType **rhs = new valType*[numTensor]; valType **output = new valType*[numTensor]; for (int i = 0; i < numTensor; ++i) { auto sp_shape_t = (sp_shape_list[i]).vec<idType>(); auto rhs_shape_t = rhs_t->shape(); outRows[i] = (adjoint_a)? sp_shape_t(1) : sp_shape_t(0); outColumns[i] = (adjoint_b)? rhs_shape_t.dim_size(0) : rhs_shape_t.dim_size(1); nnz[i] = (sp_values_list[i]).shape().dim_size(0); /* Create an output tensor */ TensorShape output_shape({outRows[i], outColumns[i]}); olist.allocate(i, output_shape, output_tensor + i); sp_ids[i] = (idType *)((sp_ids_list[i]).matrix<idType>().data()); sp_values[i] = (valType *)((sp_values_list[i]).vec<valType>().data()); idType rhsRow = (adjoint_b)? rhs_shape_t.dim_size(1) : rhs_shape_t.dim_size(0); rhs[i] = (valType *)(rhs_t->matrix<valType>().data() + i * (rhsRow / numTensor) * outColumns[i]); output[i] = (valType *)((output_tensor[i])->matrix<valType>().data()); } // Execute Batched SpMM #define GENERATE_ADJOOINT_PAIR(ADJ_A, ADJ_B) \ if (adjoint_a == ADJ_A && adjoint_b == ADJ_B) { \ BspmmFunctor<Device, idType, valType, ADJ_A, ADJ_B>()(context, context->eigen_device<Device>(), numTensor, outRows, outColumns, nnz, sp_ids, sp_values, rhs, output); \ } GENERATE_ADJOOINT_PAIR(false, false); GENERATE_ADJOOINT_PAIR(true, false); GENERATE_ADJOOINT_PAIR(false, true); GENERATE_ADJOOINT_PAIR(true, true); delete[] outRows; delete[] outColumns; delete[] nnz; delete[] sp_ids; delete[] sp_values; delete[] rhs; delete[] output; delete[] output_tensor; } private: bool adjoint_a; bool adjoint_b; }; #define REGISTER_BSPMM_CPU(idType, valType) \ REGISTER_KERNEL_BUILDER(Name("Bspmm").Device(DEVICE_CPU) \ .TypeConstraint<idType>("TI") \ .TypeConstraint<valType>("TV") \ .HostMemory("sp_shape"), \ BspmmOp<CPUDevice, idType, valType>); #define REGISTER_BSPMM_GPU(idType, valType) \ REGISTER_KERNEL_BUILDER(Name("Bspmm").Device(DEVICE_GPU) \ .TypeConstraint<idType>("TI") \ .TypeConstraint<valType>("TV") \ .HostMemory("sp_shape"), \ BspmmOp<GPUDevice, idType, valType>); REGISTER_BSPMM_CPU(int32, float) // REGISTER_BSPMM_CPU(int32, double) REGISTER_BSPMM_CPU(int64, float) // REGISTER_BSPMM_CPU(int64, double) REGISTER_BSPMM_GPU(int32, float) // REGISTER_BSPMM_GPU(int32, double) REGISTER_BSPMM_GPU(int64, float) // REGISTER_BSPMM_GPU(int64, double) #define REGISTER_BSPMDT_CPU(idType, valType) \ REGISTER_KERNEL_BUILDER(Name("Bspmdt").Device(DEVICE_CPU) \ .TypeConstraint<idType>("TI") \ .TypeConstraint<valType>("TV") \ .HostMemory("sp_shape"), \ BspmdtOp<CPUDevice, idType, valType>); #define REGISTER_BSPMDT_GPU(idType, valType) \ REGISTER_KERNEL_BUILDER(Name("Bspmdt").Device(DEVICE_GPU) \ .TypeConstraint<idType>("TI") \ .TypeConstraint<valType>("TV") \ .HostMemory("sp_shape"), \ BspmdtOp<GPUDevice, idType, valType>); REGISTER_BSPMDT_CPU(int32, float) // REGISTER_BSPMDT_CPU(int32, double) REGISTER_BSPMDT_CPU(int64, float) // REGISTER_BSPMDT_CPU(int64, double) REGISTER_BSPMDT_GPU(int32, float) // REGISTER_BSPMDT_GPU(int32, double) REGISTER_BSPMDT_GPU(int64, float) // REGISTER_BSPMDT_GPU(int64, double)
186f8f54462f70221b3e016115d3aa9008c3a00b.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020 NVIDIA Corporation. * Copyright (c) 2018-2020 Chris Choy ([email protected]). * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * * Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural * Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part * of the code. */ #include "coordinate_map.hpp" #include "coordinate_map_key.hpp" #include "coordinate_map_manager.hpp" #include "errors.hpp" #include "types.hpp" #include "utils.hpp" #include "convolution_kernel.cuh" #include "kernel_map.cuh" // Ninja #include "convolution_transpose_cpu.cpp" #include <ATen/hip/HIPUtils.h> #include <pybind11/pybind11.h> #include <torch/extension.h> namespace minkowski { template <typename coordinate_type, template <typename C> class TemplatedAllocator> at::Tensor ConvolutionTransposeForwardGPU( at::Tensor const &in_feat, // at::Tensor const &kernel, // default_types::stride_type const &kernel_size, // default_types::stride_type const &kernel_stride, // default_types::stride_type const &kernel_dilation, // RegionType::Type const region_type, // at::Tensor const &offset, // bool generate_new_coordinates, // ConvolutionMode::Type const convolution_mode, // CoordinateMapKey *p_in_map_key, // CoordinateMapKey *p_out_map_key, // gpu_manager_type<coordinate_type, TemplatedAllocator> *p_map_manager) { ASSERT(in_feat.is_contiguous(), "in_feat must be contiguous"); ASSERT(kernel.is_contiguous(), "kernel must be contiguous"); ASSERT(in_feat.is_cuda(), "in_feat must be CUDA"); ASSERT(kernel.is_cuda(), "kernel must be CUDA"); ASSERT(at::cuda::check_device({in_feat, kernel}), "in_feat and kernel must be on the same device"); ASSERT(in_feat.scalar_type() == kernel.scalar_type(), "type mismatch"); ASSERT(in_feat.dim() == 2, "in_feat.dim():", in_feat.dim()); ASSERT(kernel.dim() == 3, "kernel.dim():", kernel.dim()); ASSERT(in_feat.size(1) == kernel.size(1), "Input feature size and kernel size mismatch"); // TODO kernel volume assertion. // create out coordinate map // TODO: custom upsampling coordinate_map_key_type in_key = p_in_map_key->get_key(); ASSERT(p_map_manager->exists(in_key), ERROR_MAP_NOT_FOUND); ASSERT(in_feat.size(0) == p_map_manager->size(in_key), "Invalid in_feat size", in_feat.size(0), "!=", p_map_manager->size(in_key)); if (!p_out_map_key->is_key_set()) { auto map_it = p_map_manager->find(p_in_map_key->get_key()); ASSERT(map_it != p_map_manager->map_end(), ERROR_MAP_NOT_FOUND); auto const &in_map = (*map_it).second; auto out_tensor_stride = detail::stride_tensor_stride( in_map.get_tensor_stride(), kernel_stride, true /* is_transpose */); auto kernel_region = cpu_kernel_region<coordinate_type>( region_type, // in_map.coordinate_size(), // out_tensor_stride.data(), // kernel_size.data(), // kernel_dilation.data(), // 0, // volume offset.data_ptr<coordinate_type>(), offset.size(0), true // is_transpose ); coordinate_map_key_type out_key = std::get<0>(p_map_manager->stride_region( in_key, kernel_region, out_tensor_stride, generate_new_coordinates)); LOG_DEBUG("ConvolutionTranspose out key:", out_key); p_out_map_key->set_key(out_key); } auto const &in_out = p_map_manager->kernel_map(p_in_map_key, // p_out_map_key, // kernel_size, // kernel_stride, // kernel_dilation, // region_type, // offset, // true /* is_transpose */, // false /* is_pool */); #ifdef DEBUG LOG_DEBUG("Transposed kernel map in_maps:", in_out.out_maps.begin() - in_out.in_maps.begin()); #endif auto const out_nrows = p_map_manager->size(p_out_map_key->get_key()); at::Tensor out_feat = torch::zeros({out_nrows, kernel.size(2)}, in_feat.options()); LOG_DEBUG("In feat:", in_feat.size(0), "x", in_feat.size(1), "-> out feat", out_feat.size(0), "x", out_feat.size(1)); hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream(); hipblasSetStream(handle, stream); AT_DISPATCH_FLOATING_TYPES( in_feat.scalar_type(), "convolution_transpose_forward_gpu", [&] { TemplatedAllocator<char> byte_allocator; ConvolutionForwardKernelGPU<scalar_t, default_types::index_type, TemplatedAllocator<char>>( in_feat.template data_ptr<scalar_t>(), // in_feat.size(1), // out_feat.template data_ptr<scalar_t>(), // out_feat.size(1), // kernel.template data_ptr<scalar_t>(), // in_out, // in_feat.size(0), // out_nrows, // byte_allocator, // p_map_manager->algorithm(), // convolution_mode, handle, stream); }); return out_feat; } template <typename coordinate_type, template <typename C> class TemplatedAllocator> std::pair<at::Tensor, at::Tensor> ConvolutionTransposeBackwardGPU( at::Tensor const &in_feat, // at::Tensor const &grad_out_feat, // at::Tensor const &kernel, // default_types::stride_type const &kernel_size, // default_types::stride_type const &kernel_stride, // default_types::stride_type const &kernel_dilation, // RegionType::Type const region_type, // at::Tensor const &offset, // ConvolutionMode::Type const convolution_mode, // CoordinateMapKey *p_in_map_key, // CoordinateMapKey *p_out_map_key, // gpu_manager_type<coordinate_type, TemplatedAllocator> *p_map_manager) { ASSERT(in_feat.is_contiguous(), "in_feat must be contiguous"); ASSERT(grad_out_feat.is_contiguous(), "grad_out_feat must be contiguous"); ASSERT(kernel.is_contiguous(), "kernel must be contiguous"); ASSERT(in_feat.is_cuda(), "in_feat must be CUDA"); ASSERT(grad_out_feat.is_cuda(), "in_feat must be CUDA"); ASSERT(kernel.is_cuda(), "kernel must be CUDA"); ASSERT(at::cuda::check_device({in_feat, grad_out_feat, kernel}), "in_feat, grad_out_feat, kernel must be on the same device"); ASSERT(in_feat.scalar_type() == kernel.scalar_type(), "type mismatch"); ASSERT(in_feat.scalar_type() == grad_out_feat.scalar_type(), "type mismatch"); ASSERT(in_feat.dim() == 2, "in_feat.dim():", in_feat.dim()); ASSERT(grad_out_feat.dim() == 2, "grad_out_feat.dim():", grad_out_feat.dim()); ASSERT(kernel.dim() == 3, "kernel.dim():", kernel.dim()); ASSERT(in_feat.size(1) == kernel.size(1), "Input feature size and kernel size mismatch"); coordinate_map_key_type in_key = p_in_map_key->get_key(); ASSERT(p_map_manager->exists(in_key), ERROR_MAP_NOT_FOUND); coordinate_map_key_type out_key = p_out_map_key->get_key(); ASSERT(p_map_manager->exists(out_key), ERROR_MAP_NOT_FOUND); auto const &in_out = p_map_manager->kernel_map( p_in_map_key, // p_out_map_key, // kernel_size, // kernel_stride, // kernel_dilation, // region_type, // offset, true /* is_transpose */, false /* is_pool */); at::Tensor grad_in_feat = torch::zeros({in_feat.size(0), in_feat.size(1)}, in_feat.options()); at::Tensor grad_kernel = torch::zeros( {kernel.size(0), kernel.size(1), kernel.size(2)}, kernel.options()); hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream(); hipblasSetStream(handle, stream); AT_DISPATCH_FLOATING_TYPES( in_feat.scalar_type(), "convolution_transpose_backward_gpu", [&] { TemplatedAllocator<char> byte_allocator; ConvolutionBackwardKernelGPU<scalar_t, default_types::index_type, TemplatedAllocator<char>>( in_feat.template data_ptr<scalar_t>(), // grad_in_feat.template data_ptr<scalar_t>(), // in_feat.size(1), // grad_out_feat.template data_ptr<scalar_t>(), // grad_out_feat.size(1), // kernel.template data_ptr<scalar_t>(), // grad_kernel.template data_ptr<scalar_t>(), // in_out, // in_feat.size(0), // grad_out_feat.size(0), // byte_allocator, // p_map_manager->algorithm(), // convolution_mode, // handle, stream); }); return std::make_pair(grad_in_feat, grad_kernel); } // Forward // default_allocator template at::Tensor ConvolutionTransposeForwardGPU<default_types::dcoordinate_type, detail::default_allocator>( at::Tensor const &in_feat, // at::Tensor const &kernel, // default_types::stride_type const &kernel_size, // default_types::stride_type const &kernel_stride, // default_types::stride_type const &kernel_dilation, // RegionType::Type const region_type, // at::Tensor const &offset, // bool generate_new_coordinates, // ConvolutionMode::Type const convolution_mode, // CoordinateMapKey *p_in_map_key, // CoordinateMapKey *p_out_map_key, // gpu_manager_type<default_types::dcoordinate_type, detail::default_allocator> *p_map_manager); // c10_allocator template at::Tensor ConvolutionTransposeForwardGPU<default_types::dcoordinate_type, detail::c10_allocator>( at::Tensor const &in_feat, // at::Tensor const &kernel, // default_types::stride_type const &kernel_size, // default_types::stride_type const &kernel_stride, // default_types::stride_type const &kernel_dilation, // RegionType::Type const region_type, // at::Tensor const &offset, // bool generate_new_coordinates, // ConvolutionMode::Type const convolution_mode, // CoordinateMapKey *p_in_map_key, // CoordinateMapKey *p_out_map_key, // gpu_manager_type<default_types::dcoordinate_type, detail::c10_allocator> *p_map_manager); // Backward // default_allocator template std::pair<at::Tensor, at::Tensor> ConvolutionTransposeBackwardGPU<default_types::dcoordinate_type, detail::default_allocator>( at::Tensor const &in_feat, // at::Tensor const &grad_out_feat, // at::Tensor const &kernel, // default_types::stride_type const &kernel_size, // default_types::stride_type const &kernel_stride, // default_types::stride_type const &kernel_dilation, // RegionType::Type const region_type, // at::Tensor const &offset, // ConvolutionMode::Type const convolution_mode, // CoordinateMapKey *p_in_map_key, // CoordinateMapKey *p_out_map_key, // gpu_manager_type<default_types::dcoordinate_type, detail::default_allocator> *p_map_manager); // c10_allocator template std::pair<at::Tensor, at::Tensor> ConvolutionTransposeBackwardGPU<default_types::dcoordinate_type, detail::c10_allocator>( at::Tensor const &in_feat, // at::Tensor const &grad_out_feat, // at::Tensor const &kernel, // default_types::stride_type const &kernel_size, // default_types::stride_type const &kernel_stride, // default_types::stride_type const &kernel_dilation, // RegionType::Type const region_type, // at::Tensor const &offset, // ConvolutionMode::Type const convolution_mode, // CoordinateMapKey *p_in_map_key, // CoordinateMapKey *p_out_map_key, // gpu_manager_type<default_types::dcoordinate_type, detail::c10_allocator> *p_map_manager); } // end namespace minkowski
186f8f54462f70221b3e016115d3aa9008c3a00b.cu
/* * Copyright (c) 2020 NVIDIA Corporation. * Copyright (c) 2018-2020 Chris Choy ([email protected]). * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * * Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural * Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part * of the code. */ #include "coordinate_map.hpp" #include "coordinate_map_key.hpp" #include "coordinate_map_manager.hpp" #include "errors.hpp" #include "types.hpp" #include "utils.hpp" #include "convolution_kernel.cuh" #include "kernel_map.cuh" // Ninja #include "convolution_transpose_cpu.cpp" #include <ATen/cuda/CUDAUtils.h> #include <pybind11/pybind11.h> #include <torch/extension.h> namespace minkowski { template <typename coordinate_type, template <typename C> class TemplatedAllocator> at::Tensor ConvolutionTransposeForwardGPU( at::Tensor const &in_feat, // at::Tensor const &kernel, // default_types::stride_type const &kernel_size, // default_types::stride_type const &kernel_stride, // default_types::stride_type const &kernel_dilation, // RegionType::Type const region_type, // at::Tensor const &offset, // bool generate_new_coordinates, // ConvolutionMode::Type const convolution_mode, // CoordinateMapKey *p_in_map_key, // CoordinateMapKey *p_out_map_key, // gpu_manager_type<coordinate_type, TemplatedAllocator> *p_map_manager) { ASSERT(in_feat.is_contiguous(), "in_feat must be contiguous"); ASSERT(kernel.is_contiguous(), "kernel must be contiguous"); ASSERT(in_feat.is_cuda(), "in_feat must be CUDA"); ASSERT(kernel.is_cuda(), "kernel must be CUDA"); ASSERT(at::cuda::check_device({in_feat, kernel}), "in_feat and kernel must be on the same device"); ASSERT(in_feat.scalar_type() == kernel.scalar_type(), "type mismatch"); ASSERT(in_feat.dim() == 2, "in_feat.dim():", in_feat.dim()); ASSERT(kernel.dim() == 3, "kernel.dim():", kernel.dim()); ASSERT(in_feat.size(1) == kernel.size(1), "Input feature size and kernel size mismatch"); // TODO kernel volume assertion. // create out coordinate map // TODO: custom upsampling coordinate_map_key_type in_key = p_in_map_key->get_key(); ASSERT(p_map_manager->exists(in_key), ERROR_MAP_NOT_FOUND); ASSERT(in_feat.size(0) == p_map_manager->size(in_key), "Invalid in_feat size", in_feat.size(0), "!=", p_map_manager->size(in_key)); if (!p_out_map_key->is_key_set()) { auto map_it = p_map_manager->find(p_in_map_key->get_key()); ASSERT(map_it != p_map_manager->map_end(), ERROR_MAP_NOT_FOUND); auto const &in_map = (*map_it).second; auto out_tensor_stride = detail::stride_tensor_stride( in_map.get_tensor_stride(), kernel_stride, true /* is_transpose */); auto kernel_region = cpu_kernel_region<coordinate_type>( region_type, // in_map.coordinate_size(), // out_tensor_stride.data(), // kernel_size.data(), // kernel_dilation.data(), // 0, // volume offset.data_ptr<coordinate_type>(), offset.size(0), true // is_transpose ); coordinate_map_key_type out_key = std::get<0>(p_map_manager->stride_region( in_key, kernel_region, out_tensor_stride, generate_new_coordinates)); LOG_DEBUG("ConvolutionTranspose out key:", out_key); p_out_map_key->set_key(out_key); } auto const &in_out = p_map_manager->kernel_map(p_in_map_key, // p_out_map_key, // kernel_size, // kernel_stride, // kernel_dilation, // region_type, // offset, // true /* is_transpose */, // false /* is_pool */); #ifdef DEBUG LOG_DEBUG("Transposed kernel map in_maps:", in_out.out_maps.begin() - in_out.in_maps.begin()); #endif auto const out_nrows = p_map_manager->size(p_out_map_key->get_key()); at::Tensor out_feat = torch::zeros({out_nrows, kernel.size(2)}, in_feat.options()); LOG_DEBUG("In feat:", in_feat.size(0), "x", in_feat.size(1), "-> out feat", out_feat.size(0), "x", out_feat.size(1)); cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream(); cublasSetStream(handle, stream); AT_DISPATCH_FLOATING_TYPES( in_feat.scalar_type(), "convolution_transpose_forward_gpu", [&] { TemplatedAllocator<char> byte_allocator; ConvolutionForwardKernelGPU<scalar_t, default_types::index_type, TemplatedAllocator<char>>( in_feat.template data_ptr<scalar_t>(), // in_feat.size(1), // out_feat.template data_ptr<scalar_t>(), // out_feat.size(1), // kernel.template data_ptr<scalar_t>(), // in_out, // in_feat.size(0), // out_nrows, // byte_allocator, // p_map_manager->algorithm(), // convolution_mode, handle, stream); }); return out_feat; } template <typename coordinate_type, template <typename C> class TemplatedAllocator> std::pair<at::Tensor, at::Tensor> ConvolutionTransposeBackwardGPU( at::Tensor const &in_feat, // at::Tensor const &grad_out_feat, // at::Tensor const &kernel, // default_types::stride_type const &kernel_size, // default_types::stride_type const &kernel_stride, // default_types::stride_type const &kernel_dilation, // RegionType::Type const region_type, // at::Tensor const &offset, // ConvolutionMode::Type const convolution_mode, // CoordinateMapKey *p_in_map_key, // CoordinateMapKey *p_out_map_key, // gpu_manager_type<coordinate_type, TemplatedAllocator> *p_map_manager) { ASSERT(in_feat.is_contiguous(), "in_feat must be contiguous"); ASSERT(grad_out_feat.is_contiguous(), "grad_out_feat must be contiguous"); ASSERT(kernel.is_contiguous(), "kernel must be contiguous"); ASSERT(in_feat.is_cuda(), "in_feat must be CUDA"); ASSERT(grad_out_feat.is_cuda(), "in_feat must be CUDA"); ASSERT(kernel.is_cuda(), "kernel must be CUDA"); ASSERT(at::cuda::check_device({in_feat, grad_out_feat, kernel}), "in_feat, grad_out_feat, kernel must be on the same device"); ASSERT(in_feat.scalar_type() == kernel.scalar_type(), "type mismatch"); ASSERT(in_feat.scalar_type() == grad_out_feat.scalar_type(), "type mismatch"); ASSERT(in_feat.dim() == 2, "in_feat.dim():", in_feat.dim()); ASSERT(grad_out_feat.dim() == 2, "grad_out_feat.dim():", grad_out_feat.dim()); ASSERT(kernel.dim() == 3, "kernel.dim():", kernel.dim()); ASSERT(in_feat.size(1) == kernel.size(1), "Input feature size and kernel size mismatch"); coordinate_map_key_type in_key = p_in_map_key->get_key(); ASSERT(p_map_manager->exists(in_key), ERROR_MAP_NOT_FOUND); coordinate_map_key_type out_key = p_out_map_key->get_key(); ASSERT(p_map_manager->exists(out_key), ERROR_MAP_NOT_FOUND); auto const &in_out = p_map_manager->kernel_map( p_in_map_key, // p_out_map_key, // kernel_size, // kernel_stride, // kernel_dilation, // region_type, // offset, true /* is_transpose */, false /* is_pool */); at::Tensor grad_in_feat = torch::zeros({in_feat.size(0), in_feat.size(1)}, in_feat.options()); at::Tensor grad_kernel = torch::zeros( {kernel.size(0), kernel.size(1), kernel.size(2)}, kernel.options()); cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream(); cublasSetStream(handle, stream); AT_DISPATCH_FLOATING_TYPES( in_feat.scalar_type(), "convolution_transpose_backward_gpu", [&] { TemplatedAllocator<char> byte_allocator; ConvolutionBackwardKernelGPU<scalar_t, default_types::index_type, TemplatedAllocator<char>>( in_feat.template data_ptr<scalar_t>(), // grad_in_feat.template data_ptr<scalar_t>(), // in_feat.size(1), // grad_out_feat.template data_ptr<scalar_t>(), // grad_out_feat.size(1), // kernel.template data_ptr<scalar_t>(), // grad_kernel.template data_ptr<scalar_t>(), // in_out, // in_feat.size(0), // grad_out_feat.size(0), // byte_allocator, // p_map_manager->algorithm(), // convolution_mode, // handle, stream); }); return std::make_pair(grad_in_feat, grad_kernel); } // Forward // default_allocator template at::Tensor ConvolutionTransposeForwardGPU<default_types::dcoordinate_type, detail::default_allocator>( at::Tensor const &in_feat, // at::Tensor const &kernel, // default_types::stride_type const &kernel_size, // default_types::stride_type const &kernel_stride, // default_types::stride_type const &kernel_dilation, // RegionType::Type const region_type, // at::Tensor const &offset, // bool generate_new_coordinates, // ConvolutionMode::Type const convolution_mode, // CoordinateMapKey *p_in_map_key, // CoordinateMapKey *p_out_map_key, // gpu_manager_type<default_types::dcoordinate_type, detail::default_allocator> *p_map_manager); // c10_allocator template at::Tensor ConvolutionTransposeForwardGPU<default_types::dcoordinate_type, detail::c10_allocator>( at::Tensor const &in_feat, // at::Tensor const &kernel, // default_types::stride_type const &kernel_size, // default_types::stride_type const &kernel_stride, // default_types::stride_type const &kernel_dilation, // RegionType::Type const region_type, // at::Tensor const &offset, // bool generate_new_coordinates, // ConvolutionMode::Type const convolution_mode, // CoordinateMapKey *p_in_map_key, // CoordinateMapKey *p_out_map_key, // gpu_manager_type<default_types::dcoordinate_type, detail::c10_allocator> *p_map_manager); // Backward // default_allocator template std::pair<at::Tensor, at::Tensor> ConvolutionTransposeBackwardGPU<default_types::dcoordinate_type, detail::default_allocator>( at::Tensor const &in_feat, // at::Tensor const &grad_out_feat, // at::Tensor const &kernel, // default_types::stride_type const &kernel_size, // default_types::stride_type const &kernel_stride, // default_types::stride_type const &kernel_dilation, // RegionType::Type const region_type, // at::Tensor const &offset, // ConvolutionMode::Type const convolution_mode, // CoordinateMapKey *p_in_map_key, // CoordinateMapKey *p_out_map_key, // gpu_manager_type<default_types::dcoordinate_type, detail::default_allocator> *p_map_manager); // c10_allocator template std::pair<at::Tensor, at::Tensor> ConvolutionTransposeBackwardGPU<default_types::dcoordinate_type, detail::c10_allocator>( at::Tensor const &in_feat, // at::Tensor const &grad_out_feat, // at::Tensor const &kernel, // default_types::stride_type const &kernel_size, // default_types::stride_type const &kernel_stride, // default_types::stride_type const &kernel_dilation, // RegionType::Type const region_type, // at::Tensor const &offset, // ConvolutionMode::Type const convolution_mode, // CoordinateMapKey *p_in_map_key, // CoordinateMapKey *p_out_map_key, // gpu_manager_type<default_types::dcoordinate_type, detail::c10_allocator> *p_map_manager); } // end namespace minkowski
42baa492ab2832e02b14f20609046b9f62cb3eb5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2014 @generated from zlat2c.cu mixed zc -> ds, Sat Nov 15 19:53:59 2014 @author Mark Gates */ #include "common_magma.h" #define PRECISION_d #define BLK_X 64 #define BLK_Y 32 // TODO get rid of global variable! static __device__ int flag = 0; /* Divides matrix into ceil( n/BLK_X ) x ceil( n/BLK_Y ) blocks. Each block has BLK_X threads. Each thread loops across one row, updating BLK_Y entries. Updates only the diagonal and below. Blocks that are fully above the diagonal exit immediately. Code similar to dlag2s and zlaset. */ __global__ void dlat2s_lower( int n, const double *A, int lda, float *SA, int ldsa, double rmax ) { double tmp; double neg_rmax = - rmax; int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (below diag) */ bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y)); /* do only rows inside matrix, and blocks not above diag */ if ( ind < n && ind + BLK_X > iby ) { A += ind + iby*lda; SA += ind + iby*ldsa; if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int j=0; j < BLK_Y; ++j ) { tmp = A[j*lda]; if ( ((tmp) < neg_rmax) || ((tmp) > rmax) #if defined(PRECISION_z) || defined(PRECISION_c) || ((tmp) < neg_rmax) || ((tmp) > rmax) #endif ) { flag = 1; } SA[j*ldsa] = (float)( tmp ); } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n && ind >= iby+j; ++j ) { tmp = A[j*lda]; if ( ((tmp) < neg_rmax) || ((tmp) > rmax) #if defined(PRECISION_z) || defined(PRECISION_c) || ((tmp) < neg_rmax) || ((tmp) > rmax) #endif ) { flag = 1; } SA[j*ldsa] = (float)( tmp ); } } } } /* Similar to dlat2s_full, but updates only the diagonal and above. Blocks that are fully below the diagonal exit immediately. Code similar to dlag2s and zlaset. */ __global__ void dlat2s_upper( int n, const double *A, int lda, float *SA, int ldsa, double rmax ) { double tmp; double neg_rmax = - rmax; int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (above diag) */ bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby)); /* do only rows inside matrix, and blocks not below diag */ if ( ind < n && ind < iby + BLK_Y ) { A += ind + iby*lda; SA += ind + iby*ldsa; if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int j=0; j < BLK_Y; ++j ) { tmp = A[j*lda]; if ( ((tmp) < neg_rmax) || ((tmp) > rmax) #if defined(PRECISION_z) || defined(PRECISION_c) || ((tmp) < neg_rmax) || ((tmp) > rmax) #endif ) { flag = 1; } SA[j*ldsa] = (float)( tmp ); } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n; ++j ) { if ( ind <= iby+j ) { tmp = A[j*lda]; if ( ((tmp) < neg_rmax) || ((tmp) > rmax) #if defined(PRECISION_z) || defined(PRECISION_c) || ((tmp) < neg_rmax) || ((tmp) > rmax) #endif ) { flag = 1; } SA[j*ldsa] = (float)( tmp ); } } } } } /** Purpose ------- DLAT2S converts a double-real matrix, A, to a single-real matrix, SA. RMAX is the overflow for the single-real arithmetic. DLAT2S checks that all the entries of A are between -RMAX and RMAX. If not, the conversion is aborted and a flag is raised. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix A to be converted. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part @param[in] n INTEGER The number of columns of the matrix A. n >= 0. @param[in] A DOUBLE PRECISION array, dimension (LDA,n) On entry, the n-by-n coefficient matrix A. @param[in] lda INTEGER The leading dimension of the array A. LDA >= max(1,n). @param[out] SA SINGLE PRECISION array, dimension (LDSA,n) On exit, if INFO=0, the n-by-n coefficient matrix SA; if INFO > 0, the content of SA is unspecified. @param[in] ldsa INTEGER The leading dimension of the array SA. LDSA >= max(1,n). @param[out] info INTEGER - = 0: successful exit. - < 0: if INFO = -i, the i-th argument had an illegal value - = 1: an entry of the matrix A is greater than the SINGLE PRECISION overflow threshold, in this case, the content of SA on exit is unspecified. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dlat2s_q( magma_uplo_t uplo, magma_int_t n, magmaDouble_const_ptr A, magma_int_t lda, magmaFloat_ptr SA, magma_int_t ldsa, magma_int_t *info, magma_queue_t queue ) { *info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper ) *info = -1; else if ( n < 0 ) *info = -2; else if ( lda < max(1,n) ) *info = -4; else if ( ldsa < max(1,n) ) *info = -6; if (*info != 0) { magma_xerbla( __func__, -(*info) ); return; //*info; } /* quick return */ if ( n == 0 ) { return; } double rmax = (double)lapackf77_slamch("O"); dim3 threads( BLK_X, 1 ); dim3 grid( (n+BLK_X-1)/BLK_X, (n+BLK_Y-1)/BLK_Y ); hipMemcpyToSymbol( flag, info, sizeof(flag) ); // flag = 0 if (uplo == MagmaLower) hipLaunchKernelGGL(( dlat2s_lower), dim3(grid), dim3(threads), 0, queue , n, A, lda, SA, ldsa, rmax); else if (uplo == MagmaUpper) hipLaunchKernelGGL(( dlat2s_upper), dim3(grid), dim3(threads), 0, queue , n, A, lda, SA, ldsa, rmax); hipMemcpyFromSymbol( info, flag, sizeof(flag) ); // info = flag } /** @see magmablas_dlat2s_q @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dlat2s( magma_uplo_t uplo, magma_int_t n, magmaDouble_const_ptr A, magma_int_t lda, magmaFloat_ptr SA, magma_int_t ldsa, magma_int_t *info ) { magmablas_dlat2s_q( uplo, n, A, lda, SA, ldsa, info, magma_stream ); }
42baa492ab2832e02b14f20609046b9f62cb3eb5.cu
/* -- MAGMA (version 1.6.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2014 @generated from zlat2c.cu mixed zc -> ds, Sat Nov 15 19:53:59 2014 @author Mark Gates */ #include "common_magma.h" #define PRECISION_d #define BLK_X 64 #define BLK_Y 32 // TODO get rid of global variable! static __device__ int flag = 0; /* Divides matrix into ceil( n/BLK_X ) x ceil( n/BLK_Y ) blocks. Each block has BLK_X threads. Each thread loops across one row, updating BLK_Y entries. Updates only the diagonal and below. Blocks that are fully above the diagonal exit immediately. Code similar to dlag2s and zlaset. */ __global__ void dlat2s_lower( int n, const double *A, int lda, float *SA, int ldsa, double rmax ) { double tmp; double neg_rmax = - rmax; int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (below diag) */ bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y)); /* do only rows inside matrix, and blocks not above diag */ if ( ind < n && ind + BLK_X > iby ) { A += ind + iby*lda; SA += ind + iby*ldsa; if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int j=0; j < BLK_Y; ++j ) { tmp = A[j*lda]; if ( ((tmp) < neg_rmax) || ((tmp) > rmax) #if defined(PRECISION_z) || defined(PRECISION_c) || ((tmp) < neg_rmax) || ((tmp) > rmax) #endif ) { flag = 1; } SA[j*ldsa] = (float)( tmp ); } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n && ind >= iby+j; ++j ) { tmp = A[j*lda]; if ( ((tmp) < neg_rmax) || ((tmp) > rmax) #if defined(PRECISION_z) || defined(PRECISION_c) || ((tmp) < neg_rmax) || ((tmp) > rmax) #endif ) { flag = 1; } SA[j*ldsa] = (float)( tmp ); } } } } /* Similar to dlat2s_full, but updates only the diagonal and above. Blocks that are fully below the diagonal exit immediately. Code similar to dlag2s and zlaset. */ __global__ void dlat2s_upper( int n, const double *A, int lda, float *SA, int ldsa, double rmax ) { double tmp; double neg_rmax = - rmax; int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (above diag) */ bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby)); /* do only rows inside matrix, and blocks not below diag */ if ( ind < n && ind < iby + BLK_Y ) { A += ind + iby*lda; SA += ind + iby*ldsa; if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int j=0; j < BLK_Y; ++j ) { tmp = A[j*lda]; if ( ((tmp) < neg_rmax) || ((tmp) > rmax) #if defined(PRECISION_z) || defined(PRECISION_c) || ((tmp) < neg_rmax) || ((tmp) > rmax) #endif ) { flag = 1; } SA[j*ldsa] = (float)( tmp ); } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n; ++j ) { if ( ind <= iby+j ) { tmp = A[j*lda]; if ( ((tmp) < neg_rmax) || ((tmp) > rmax) #if defined(PRECISION_z) || defined(PRECISION_c) || ((tmp) < neg_rmax) || ((tmp) > rmax) #endif ) { flag = 1; } SA[j*ldsa] = (float)( tmp ); } } } } } /** Purpose ------- DLAT2S converts a double-real matrix, A, to a single-real matrix, SA. RMAX is the overflow for the single-real arithmetic. DLAT2S checks that all the entries of A are between -RMAX and RMAX. If not, the conversion is aborted and a flag is raised. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix A to be converted. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part @param[in] n INTEGER The number of columns of the matrix A. n >= 0. @param[in] A DOUBLE PRECISION array, dimension (LDA,n) On entry, the n-by-n coefficient matrix A. @param[in] lda INTEGER The leading dimension of the array A. LDA >= max(1,n). @param[out] SA SINGLE PRECISION array, dimension (LDSA,n) On exit, if INFO=0, the n-by-n coefficient matrix SA; if INFO > 0, the content of SA is unspecified. @param[in] ldsa INTEGER The leading dimension of the array SA. LDSA >= max(1,n). @param[out] info INTEGER - = 0: successful exit. - < 0: if INFO = -i, the i-th argument had an illegal value - = 1: an entry of the matrix A is greater than the SINGLE PRECISION overflow threshold, in this case, the content of SA on exit is unspecified. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dlat2s_q( magma_uplo_t uplo, magma_int_t n, magmaDouble_const_ptr A, magma_int_t lda, magmaFloat_ptr SA, magma_int_t ldsa, magma_int_t *info, magma_queue_t queue ) { *info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper ) *info = -1; else if ( n < 0 ) *info = -2; else if ( lda < max(1,n) ) *info = -4; else if ( ldsa < max(1,n) ) *info = -6; if (*info != 0) { magma_xerbla( __func__, -(*info) ); return; //*info; } /* quick return */ if ( n == 0 ) { return; } double rmax = (double)lapackf77_slamch("O"); dim3 threads( BLK_X, 1 ); dim3 grid( (n+BLK_X-1)/BLK_X, (n+BLK_Y-1)/BLK_Y ); cudaMemcpyToSymbol( flag, info, sizeof(flag) ); // flag = 0 if (uplo == MagmaLower) dlat2s_lower<<< grid, threads, 0, queue >>> (n, A, lda, SA, ldsa, rmax); else if (uplo == MagmaUpper) dlat2s_upper<<< grid, threads, 0, queue >>> (n, A, lda, SA, ldsa, rmax); cudaMemcpyFromSymbol( info, flag, sizeof(flag) ); // info = flag } /** @see magmablas_dlat2s_q @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dlat2s( magma_uplo_t uplo, magma_int_t n, magmaDouble_const_ptr A, magma_int_t lda, magmaFloat_ptr SA, magma_int_t ldsa, magma_int_t *info ) { magmablas_dlat2s_q( uplo, n, A, lda, SA, ldsa, info, magma_stream ); }
6c5181bcbe1ee09c26549c2a5170195257b5caab.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <fstream> #include <string> #include <stdio.h> #include <math.h> #include <vector> #include <time.h> using namespace std; __global__ void tryy(float *d_engrec,float *d_xrec,float *d_yrec, float *d_xx, float *d_yy, float *d_engg, float *d_inx, int blocks){ int is,il; int count2; int globalIndex= (blockIdx.x * blocks) + threadIdx.x; is= d_inx[globalIndex]; il= d_inx[globalIndex+1]; count2=is; for(int j=is;j<il;j++){ if((d_yy[j]<(d_yy[j+1]-1))||(d_yy[j]==d_yy[j+1]&& d_xx[j]<(d_xx[j+1]-2))){ d_xrec[count2]=d_xx[j]; d_yrec[count2]=d_yy[j]; d_engrec[count2]=d_engg[j]; count2++; } //================================================double else if (((d_yy[j]== d_yy[j+1]) && (d_xx[j+1]== (d_xx[j]+1)) && ((d_xx[j+2]>d_xx[j+1]+1))) || ((((d_yy[j+2]>d_yy[j+1]+1)) && ( ((d_yy[j+1]+1== d_yy[j+1]) && (d_xx[j]== (d_xx[j+1]))) )))) { d_engrec[count2]= d_engg[j]+ d_engg[j+1]; if(d_xx[j]== d_xx[j+1] ){ if(d_engg[j]> d_engg[j+1]){ d_xrec[count2]=d_xx[j]; d_yrec[count2]=d_yy[j]; } else{ d_xrec[count2]=d_xx[j+1]; d_yrec[count2]=d_yy[j+1]; }} if(d_yy[j]== d_yy[j+1]){ if(d_engg[j]> d_engg[j+1]){ d_yrec[count2]=d_yy[j]; d_xrec[count2]=d_xx[j]; } else{ d_yrec[count2]=d_yy[j+1]; d_xrec[count2]=d_xx[j+1]; }} j++; count2++; } ///========================================triple event recombination else if ((d_yy[j]==d_yy[j+2]&& d_yy[j+3]>(d_yy[j+2]+1))||( d_yy[j]==(d_yy[j+2]+1)&& d_xx[j]<(d_xx[j+3]+1))) { d_engrec[count2]= d_engg[j]+ d_engg[j+1]+ d_engg[j+2]; d_yrec[count2]=d_yy[j+2]; d_xrec[count2]=d_xx[j+2]; j++; j++; count2++; } ///==========================================quad event recombination else if(d_yy[j]== d_yy[j+1] && d_yy[j+2]== d_yy[j+3]/*&& y[o]<(y[o+4]-1)*/&&d_xx[j]==(d_xx[j+1]+1) ) { d_engrec[count2]= d_engg[j]+ d_engg[j+1]+ d_engg[j+2]+ d_engg[j+3]; d_yrec[count2]=d_yy[j+2]; d_xrec[count2]=d_xx[j+2]; // if ((eng.at(o)>eng.at(o+1))&&(eng.at(o)>eng.at(o+2))&&(eng.at(o)>eng.at(o+3))){ // x_rec.push_back(x.at(o)); // y_rec.push_back(y.at(o)); // } // if ((eng.at(o+1)>eng.at(o))&&(eng.at(o+1)>eng.at(o+2))&&(eng.at(o+1)>eng.at(o+3))){ // x_rec.push_back(x.at(o+1)); // y_rec.push_back(y.at(o+1)); // } // if ((eng.at(o+2)>eng.at(o+1))&&(eng.at(o+2)>eng.at(o))&&(eng.at(o+2)>eng.at(o+3))){ // x_rec.push_back(x.at(o+2)); // y_rec.push_back(y.at(o+2)); // } // if ((eng.at(o+3)>eng.at(o+1))&&(eng.at(o+3)>eng.at(o+2))&&(eng.at(o+3)>eng.at(o))){ // x_rec.push_back(x.at(o+3)); // y_rec.push_back(y.at(o+3)); // } //cout << "quad"<< endl; //cout << x_rec.at(o)<< y_rec.at(o)<<endl; j++; j++; j++; count2++; } }} int main(){ ifstream file( "c12_siegen_19072017_01", ios::in ); string part1,part2; string dd= "HT"; string dd2= "SF"; int num1, num2, num3; int numb=0 ; int subnumb=0 ; int nframe=0; int cou=0; if( !file ) cerr << "Cant open " << endl; while( file >> part1 ) { if (part1 == dd){ numb++; } if (part1 == dd2){ nframe++; } } //=========================================================================================================================== float frameIndexr[nframe+1];//x[numb], y[numb] , eng[numb], float *x= new float[numb]; float *y= new float[numb]; float *eng= new float[numb]; frameIndexr[0]=0; int cou2=1; int rf=1000; //cout<<"i am here "<< numb<<endl; ifstream file2( "c12_siegen_19072017_01", ios::in ); while( file2 >> part2 >> num1 >> num2>> num3 ) { if (cou2>rf)break; if (part2 == dd){ x[cou]= num1; y[cou]=num2; eng[cou]=num3; // cout<<eng[cou]<<endl; cou++; subnumb++; } if (part2 == dd2){ frameIndexr[cou2]=frameIndexr[cou2-1]+subnumb; //cout<<frameIndexr[cou2]<<endl; subnumb=0; cou2++; } } //=================================================================================== int sizeFrame=nframe+1; //cout<<" "<<sizeFrame<<" "<< nframe<<endl; //int x[numb],y[numb],eng[numb],frameIndex[sizeFrame]; // for (int i=0 ; i<numb ; i++){ // x[i]=xr[i]; // y[i]=yr[i]; // eng[i]=engr[i]; // } // int count=0; // for (int i2=1 ; i2<sizeFrame ; i2++){ // count=count+frameIndexr[i2-1]; // frameIndexr[i2]=count; // //cout<<frameIndex[i2]<<endl; // } const int data_bytes= numb* sizeof(float); //the required memory const int data_bytes_2= sizeFrame * sizeof(float); ///===========================Declaration=============================== // int h_engres[numb]; // CPU array for results // int h_xres[numb]; // int h_yres[numb]; //cout<<"i am here "<<endl; //=====================declaration of GPU float *d_yin; float *d_xin; float *d_engin; float *d_engres; float *d_xres; float *d_yres; float *d_ind; ///=================== allocate GPU mem=============== hipMalloc((void **) &d_engin, data_bytes); hipMalloc((void **) &d_engres, data_bytes); hipMalloc((void **) &d_xres, data_bytes); hipMalloc((void **) &d_yres, data_bytes); hipMalloc((void **) &d_xin, data_bytes); hipMalloc((void **) &d_yin, data_bytes); hipMalloc((void **) &d_ind, data_bytes_2); ///================== define number of blocks with constant 1024 threads per block=========== int nthreadsperblock=32; //number of threads per block int nblock; //number of blocks if(sizeFrame%nthreadsperblock == 0){ nblock=sizeFrame/nthreadsperblock; } else{nblock=(sizeFrame/nthreadsperblock)+1;} //cout<< nblock << " "<< nthreadsperblock<<endl; ///===================== copy the data to the GPU============= hipMemcpy(d_xin, x, data_bytes, hipMemcpyHostToDevice); hipMemcpy(d_yin, y, data_bytes, hipMemcpyHostToDevice); hipMemcpy(d_engin, eng, data_bytes, hipMemcpyHostToDevice); hipMemcpy(d_ind,frameIndexr, data_bytes_2, hipMemcpyHostToDevice); clock_t tG0=clock();hipLaunchKernelGGL(( tryy), dim3(nblock),dim3(nthreadsperblock), 0, 0, d_engres,d_xres,d_yres,d_xin,d_yin,d_engin,d_ind,nthreadsperblock); hipMemcpy(eng,d_engres, data_bytes, hipMemcpyDeviceToHost); hipMemcpy(x,d_xres, data_bytes, hipMemcpyDeviceToHost); hipMemcpy(y,d_yres, data_bytes, hipMemcpyDeviceToHost); clock_t tGf=clock(); int single=0; for (int i2=0 ; i2<numb ; i2++){ if(eng[i2]>0){ //cout<<eng[i2]<<" "<<x[i2]<<" "<<y[i2]<<endl; single++; }} ///=====================================================CPU================================================================================================= //========================================================================================================================================================== int frame[384][384]={{}}; int bg[384][384]={{}}; vector<int> xc; vector<int> yc; vector<int> engc; vector<int> x_rec; vector<int> y_rec; vector<int> eng_rec; clock_t t1=clock(); numb=0; nframe=0; int thres =50; ifstream file3( "c12_siegen_19072017_01", ios::in ); if( !file3 ){ cerr << "Cant open " << endl; } while( file3 >> part1 >> num1 >> num2>> num3 ) {if (nframe>rf)break; if (part1 == dd){ xc.push_back( num1); yc.push_back( num2); engc.push_back( num3); numb++;} if (part1 == dd2){ nframe++; for (int k2=0;k2<384;k2++){ for(int j2=0;j2<384;j2++){ frame[j2][k2]=0; }} ///================================starting recombination ====================================================================== for (int i=0;i<xc.size();i++)///filling the frame matrix { frame[xc[i]][yc[i]]=engc[i]; bg[xc[i]][yc[i]]=50; } for (int kk=1;kk<384;kk++){ for(int jj=1;jj<384;jj++){ int k= jj, j=kk; if (frame[j][k]>bg[j][k]){ ///================================single======================= if(frame[j+1][k]<bg[j+1][k] && frame[j][k+1]<bg[j][k+1] &&frame[j-1][k]<bg[j-1][k]&&frame[j][k-1]<bg[j-1][k] ){ x_rec.push_back(j); y_rec.push_back(k); eng_rec.push_back(frame[j][k]); frame[j][k]=0; } ///================================double======================= /////==========horizontal double============================================ else if(frame[j+1][k]>bg[j+1][k] &&frame[j+2][k]<bg[j+2][k]&&frame[j][k+1]<bg[j][k+1] &&frame[j-1][k]<bg[j-1][k]&&frame[j][k-1]<bg[j][k-1]&&frame[j+1][k+1]<bg[j+1][k+1]&&frame[j+1][k-1]<bg[j+1][k-1] ) { eng_rec.push_back((frame[j][k]+frame[j+1][k])); if(frame[j][k]>frame[j+1][k]){ x_rec.push_back(j); y_rec.push_back(k); } else{ x_rec.push_back(j+1); y_rec.push_back(k); } frame[j][k]=0; frame[j+1][k]=0;} ////===============================vertical double ======================================== else if(frame[j][k+1]>bg[j][k+1]&&frame[j+1][k]<bg[j+1][k] &&frame[j][k+2]<bg[j][k+2] && frame[j+1][k+1]<bg[j+1][k+1]&&frame[j-1][k]<bg[j-1][k]&&frame[j-1][k+1]<bg[j-1][k+1]&&frame[j][k-1]<bg[j][k-1]) { eng_rec.push_back((frame[j][k]+frame[j][k+1])); if(frame[j][k]>frame[j][k+1]){ x_rec.push_back(j); y_rec.push_back(k); } else{ x_rec.push_back(j); y_rec.push_back(k+1); } frame[j][k]=0; frame[j][k+1]=0;} ///================================quadrad======================= else if(frame[j+1][k]>bg[j+1][k]&&frame[j+1][k+1]>bg[j+1][k+1]&&frame[j][k+1]>bg[j][k+1]&&frame[j+2][k]<bg[j+2][k]&&frame[j-1][k]<bg[j-1][k]&&frame[j][k-1]<bg[j][k-1]&&frame[j+1][k-1]<bg[j+1][k-1] && frame[j+2][k+1]<bg[j+2][k+1] && frame[j-1][k+1]<bg[j-1][k+1] && frame[j][k+2]<bg[j][k+2] && frame[j+1][k+2]<bg[j+1][k+2] ) { eng_rec.push_back((frame[j][k]+frame[j][k+1]+frame[j+1][k]+frame[j+1][k+1])); if(frame[j][k]>frame[j+1][k]&&frame[j][k]>frame[j][k+1]&&frame[j][k]>frame[j+1][k+1]){ x_rec.push_back(j); y_rec.push_back(k); } else if(frame[j+1][k]>frame[j][k]&&frame[j+1][k]>frame[j][k+1]&&frame[j+1][k]>frame[j+1][k+1]){ x_rec.push_back(j+1); y_rec.push_back(k);} else if(frame[j][k+1]>frame[j][k]&&frame[j][k+1]>frame[j+1][k]&&frame[j][k+1]>frame[j+1][k+1]){ x_rec.push_back(j); y_rec.push_back(k+1); } else{ x_rec.push_back(j+1); y_rec.push_back(k+1); } //cout<< frame[j][k]<<" "<<frame[j][k+1]<<" "<<frame[j+1][k]<<" "<<frame[j+1][k+1]<<endl; frame[j][k]=0; frame[j][k+1]=0; frame[j+1][k]=0; frame[j+1][k+1]=0; } //================================================================== ///================================triple L======================= else if(frame[j+1][k+1]>thres && frame[j][k+1]>thres &&frame[j+1][k]<thres&&frame[j][k+2]<thres&&frame[j+1][k+2]<thres&&frame[j][k-1]<thres&&frame[j-1][k]<thres&&frame[j-1][k+1]<thres&&frame[j+2][k+1]<thres&&frame[j][k+1]>frame[j][k]&&frame[j][k+1]>frame[j+1][k+1]) { eng_rec.push_back((frame[j][k]+frame[j][k+1]+frame[j+1][k+1])); x_rec.push_back(j); y_rec.push_back(k+1); frame[j][k]=0; frame[j][k+1]=0; frame[j+1][k+1]=0; } ///============================triple J======================================================== else if (frame[j-1][k+1]>thres && frame[j][k+1]>thres&&frame[j+1][k]<thres &&frame[j-1][k]<thres&&frame[j][k-1]<thres&&frame[j-2][k+1]<thres&&frame[j-1][k+2]<thres &&frame[j][k+2]<thres&&frame[j+1][k+1]<thres&&frame[j][k+1]>frame[j][k]&&frame[j][k+1]>frame[j-1][k+1] ) { eng_rec.push_back((frame[j][k]+frame[j-1][k+1]+frame[j][k+1])); x_rec.push_back(j); y_rec.push_back(k+1); frame[j][k]=0; frame[j-1][k+1]=0; frame[j][k+1]=0; } ///================================== triple F =================================== else if(frame[j][k+1]>thres &&frame[j+1][k]>thres&&frame[j+2][k]<thres &&frame[j][k+2]<thres&&frame[j+1][k+1]<thres&&frame[j][k-1]<thres&& frame[j+1][k-1]<thres&&frame[j-1][k]<thres&&frame[j-1][k+1]<thres&&frame[j][k]>frame[j+1][k]&&frame[j][k]>frame[j][k+1]) { eng_rec.push_back((frame[j][k]+frame[j+1][k]+frame[j][k+1])); x_rec.push_back(j); y_rec.push_back(k); frame[j][k]=0; frame[j][k+1]=0; frame[j+1][k]=0; } ///====================================== triple 7 ==================================================== else if(frame[j+1][k]>thres &&frame[j+1][k+1]>thres&&frame[j-1][k]<thres&&frame[j][k-1]<thres&&frame[j][k+1]<thres&&frame[j+1][k+2]<thres&&frame[j+1][k-1]<thres &&frame[j+2][k]<thres &&frame[j+2][k+1]<thres&&frame[j+1][k]>frame[j][k]&&frame[j+1][k]>frame[j+1][k+1] ) { eng_rec.push_back((frame[j][k]+frame[j+1][k+1]+frame[j+1][k])); x_rec.push_back(j+1); y_rec.push_back(k); frame[j][k]=0; frame[j+1][k]=0; frame[j+1][k+1]=0; } }}} xc.clear(); yc.clear(); engc.clear(); }} clock_t t=clock(); cout<<"The total number of frames= "<<nframe<<endl; cout<<"The total number of frames= "<<cou2<<endl; float gpu_time =((float)(tGf-tG0))/(CLOCKS_PER_SEC); printf ("The GPU (%f sec).\n",gpu_time); float cpu_time =((float)(t-t1))/(CLOCKS_PER_SEC); printf ("The CPU (%f sec).\n",cpu_time); float speed_up = (cpu_time/gpu_time)/75; printf ("SU (%f ).\n", ceil(speed_up)); hipFree(d_yin); hipFree(d_xin); hipFree(d_engin); hipFree(d_engres); hipFree(d_xres); hipFree(d_yres); hipFree(d_ind); delete[] x; delete[] y; delete[] eng; return 0 ; }
6c5181bcbe1ee09c26549c2a5170195257b5caab.cu
#include <iostream> #include <fstream> #include <string> #include <stdio.h> #include <math.h> #include <vector> #include <time.h> using namespace std; __global__ void tryy(float *d_engrec,float *d_xrec,float *d_yrec, float *d_xx, float *d_yy, float *d_engg, float *d_inx, int blocks){ int is,il; int count2; int globalIndex= (blockIdx.x * blocks) + threadIdx.x; is= d_inx[globalIndex]; il= d_inx[globalIndex+1]; count2=is; for(int j=is;j<il;j++){ if((d_yy[j]<(d_yy[j+1]-1))||(d_yy[j]==d_yy[j+1]&& d_xx[j]<(d_xx[j+1]-2))){ d_xrec[count2]=d_xx[j]; d_yrec[count2]=d_yy[j]; d_engrec[count2]=d_engg[j]; count2++; } //================================================double else if (((d_yy[j]== d_yy[j+1]) && (d_xx[j+1]== (d_xx[j]+1)) && ((d_xx[j+2]>d_xx[j+1]+1))) || ((((d_yy[j+2]>d_yy[j+1]+1)) && ( ((d_yy[j+1]+1== d_yy[j+1]) && (d_xx[j]== (d_xx[j+1]))) )))) { d_engrec[count2]= d_engg[j]+ d_engg[j+1]; if(d_xx[j]== d_xx[j+1] ){ if(d_engg[j]> d_engg[j+1]){ d_xrec[count2]=d_xx[j]; d_yrec[count2]=d_yy[j]; } else{ d_xrec[count2]=d_xx[j+1]; d_yrec[count2]=d_yy[j+1]; }} if(d_yy[j]== d_yy[j+1]){ if(d_engg[j]> d_engg[j+1]){ d_yrec[count2]=d_yy[j]; d_xrec[count2]=d_xx[j]; } else{ d_yrec[count2]=d_yy[j+1]; d_xrec[count2]=d_xx[j+1]; }} j++; count2++; } ///========================================triple event recombination else if ((d_yy[j]==d_yy[j+2]&& d_yy[j+3]>(d_yy[j+2]+1))||( d_yy[j]==(d_yy[j+2]+1)&& d_xx[j]<(d_xx[j+3]+1))) { d_engrec[count2]= d_engg[j]+ d_engg[j+1]+ d_engg[j+2]; d_yrec[count2]=d_yy[j+2]; d_xrec[count2]=d_xx[j+2]; j++; j++; count2++; } ///==========================================quad event recombination else if(d_yy[j]== d_yy[j+1] && d_yy[j+2]== d_yy[j+3]/*&& y[o]<(y[o+4]-1)*/&&d_xx[j]==(d_xx[j+1]+1) ) { d_engrec[count2]= d_engg[j]+ d_engg[j+1]+ d_engg[j+2]+ d_engg[j+3]; d_yrec[count2]=d_yy[j+2]; d_xrec[count2]=d_xx[j+2]; // if ((eng.at(o)>eng.at(o+1))&&(eng.at(o)>eng.at(o+2))&&(eng.at(o)>eng.at(o+3))){ // x_rec.push_back(x.at(o)); // y_rec.push_back(y.at(o)); // } // if ((eng.at(o+1)>eng.at(o))&&(eng.at(o+1)>eng.at(o+2))&&(eng.at(o+1)>eng.at(o+3))){ // x_rec.push_back(x.at(o+1)); // y_rec.push_back(y.at(o+1)); // } // if ((eng.at(o+2)>eng.at(o+1))&&(eng.at(o+2)>eng.at(o))&&(eng.at(o+2)>eng.at(o+3))){ // x_rec.push_back(x.at(o+2)); // y_rec.push_back(y.at(o+2)); // } // if ((eng.at(o+3)>eng.at(o+1))&&(eng.at(o+3)>eng.at(o+2))&&(eng.at(o+3)>eng.at(o))){ // x_rec.push_back(x.at(o+3)); // y_rec.push_back(y.at(o+3)); // } //cout << "quad"<< endl; //cout << x_rec.at(o)<< y_rec.at(o)<<endl; j++; j++; j++; count2++; } }} int main(){ ifstream file( "c12_siegen_19072017_01", ios::in ); string part1,part2; string dd= "HT"; string dd2= "SF"; int num1, num2, num3; int numb=0 ; int subnumb=0 ; int nframe=0; int cou=0; if( !file ) cerr << "Cant open " << endl; while( file >> part1 ) { if (part1 == dd){ numb++; } if (part1 == dd2){ nframe++; } } //=========================================================================================================================== float frameIndexr[nframe+1];//x[numb], y[numb] , eng[numb], float *x= new float[numb]; float *y= new float[numb]; float *eng= new float[numb]; frameIndexr[0]=0; int cou2=1; int rf=1000; //cout<<"i am here "<< numb<<endl; ifstream file2( "c12_siegen_19072017_01", ios::in ); while( file2 >> part2 >> num1 >> num2>> num3 ) { if (cou2>rf)break; if (part2 == dd){ x[cou]= num1; y[cou]=num2; eng[cou]=num3; // cout<<eng[cou]<<endl; cou++; subnumb++; } if (part2 == dd2){ frameIndexr[cou2]=frameIndexr[cou2-1]+subnumb; //cout<<frameIndexr[cou2]<<endl; subnumb=0; cou2++; } } //=================================================================================== int sizeFrame=nframe+1; //cout<<" "<<sizeFrame<<" "<< nframe<<endl; //int x[numb],y[numb],eng[numb],frameIndex[sizeFrame]; // for (int i=0 ; i<numb ; i++){ // x[i]=xr[i]; // y[i]=yr[i]; // eng[i]=engr[i]; // } // int count=0; // for (int i2=1 ; i2<sizeFrame ; i2++){ // count=count+frameIndexr[i2-1]; // frameIndexr[i2]=count; // //cout<<frameIndex[i2]<<endl; // } const int data_bytes= numb* sizeof(float); //the required memory const int data_bytes_2= sizeFrame * sizeof(float); ///===========================Declaration=============================== // int h_engres[numb]; // CPU array for results // int h_xres[numb]; // int h_yres[numb]; //cout<<"i am here "<<endl; //=====================declaration of GPU float *d_yin; float *d_xin; float *d_engin; float *d_engres; float *d_xres; float *d_yres; float *d_ind; ///=================== allocate GPU mem=============== cudaMalloc((void **) &d_engin, data_bytes); cudaMalloc((void **) &d_engres, data_bytes); cudaMalloc((void **) &d_xres, data_bytes); cudaMalloc((void **) &d_yres, data_bytes); cudaMalloc((void **) &d_xin, data_bytes); cudaMalloc((void **) &d_yin, data_bytes); cudaMalloc((void **) &d_ind, data_bytes_2); ///================== define number of blocks with constant 1024 threads per block=========== int nthreadsperblock=32; //number of threads per block int nblock; //number of blocks if(sizeFrame%nthreadsperblock == 0){ nblock=sizeFrame/nthreadsperblock; } else{nblock=(sizeFrame/nthreadsperblock)+1;} //cout<< nblock << " "<< nthreadsperblock<<endl; ///===================== copy the data to the GPU============= cudaMemcpy(d_xin, x, data_bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_yin, y, data_bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_engin, eng, data_bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_ind,frameIndexr, data_bytes_2, cudaMemcpyHostToDevice); clock_t tG0=clock(); tryy<<<nblock,nthreadsperblock>>>(d_engres,d_xres,d_yres,d_xin,d_yin,d_engin,d_ind,nthreadsperblock); cudaMemcpy(eng,d_engres, data_bytes, cudaMemcpyDeviceToHost); cudaMemcpy(x,d_xres, data_bytes, cudaMemcpyDeviceToHost); cudaMemcpy(y,d_yres, data_bytes, cudaMemcpyDeviceToHost); clock_t tGf=clock(); int single=0; for (int i2=0 ; i2<numb ; i2++){ if(eng[i2]>0){ //cout<<eng[i2]<<" "<<x[i2]<<" "<<y[i2]<<endl; single++; }} ///=====================================================CPU================================================================================================= //========================================================================================================================================================== int frame[384][384]={{}}; int bg[384][384]={{}}; vector<int> xc; vector<int> yc; vector<int> engc; vector<int> x_rec; vector<int> y_rec; vector<int> eng_rec; clock_t t1=clock(); numb=0; nframe=0; int thres =50; ifstream file3( "c12_siegen_19072017_01", ios::in ); if( !file3 ){ cerr << "Cant open " << endl; } while( file3 >> part1 >> num1 >> num2>> num3 ) {if (nframe>rf)break; if (part1 == dd){ xc.push_back( num1); yc.push_back( num2); engc.push_back( num3); numb++;} if (part1 == dd2){ nframe++; for (int k2=0;k2<384;k2++){ for(int j2=0;j2<384;j2++){ frame[j2][k2]=0; }} ///================================starting recombination ====================================================================== for (int i=0;i<xc.size();i++)///filling the frame matrix { frame[xc[i]][yc[i]]=engc[i]; bg[xc[i]][yc[i]]=50; } for (int kk=1;kk<384;kk++){ for(int jj=1;jj<384;jj++){ int k= jj, j=kk; if (frame[j][k]>bg[j][k]){ ///================================single======================= if(frame[j+1][k]<bg[j+1][k] && frame[j][k+1]<bg[j][k+1] &&frame[j-1][k]<bg[j-1][k]&&frame[j][k-1]<bg[j-1][k] ){ x_rec.push_back(j); y_rec.push_back(k); eng_rec.push_back(frame[j][k]); frame[j][k]=0; } ///================================double======================= /////==========horizontal double============================================ else if(frame[j+1][k]>bg[j+1][k] &&frame[j+2][k]<bg[j+2][k]&&frame[j][k+1]<bg[j][k+1] &&frame[j-1][k]<bg[j-1][k]&&frame[j][k-1]<bg[j][k-1]&&frame[j+1][k+1]<bg[j+1][k+1]&&frame[j+1][k-1]<bg[j+1][k-1] ) { eng_rec.push_back((frame[j][k]+frame[j+1][k])); if(frame[j][k]>frame[j+1][k]){ x_rec.push_back(j); y_rec.push_back(k); } else{ x_rec.push_back(j+1); y_rec.push_back(k); } frame[j][k]=0; frame[j+1][k]=0;} ////===============================vertical double ======================================== else if(frame[j][k+1]>bg[j][k+1]&&frame[j+1][k]<bg[j+1][k] &&frame[j][k+2]<bg[j][k+2] && frame[j+1][k+1]<bg[j+1][k+1]&&frame[j-1][k]<bg[j-1][k]&&frame[j-1][k+1]<bg[j-1][k+1]&&frame[j][k-1]<bg[j][k-1]) { eng_rec.push_back((frame[j][k]+frame[j][k+1])); if(frame[j][k]>frame[j][k+1]){ x_rec.push_back(j); y_rec.push_back(k); } else{ x_rec.push_back(j); y_rec.push_back(k+1); } frame[j][k]=0; frame[j][k+1]=0;} ///================================quadrad======================= else if(frame[j+1][k]>bg[j+1][k]&&frame[j+1][k+1]>bg[j+1][k+1]&&frame[j][k+1]>bg[j][k+1]&&frame[j+2][k]<bg[j+2][k]&&frame[j-1][k]<bg[j-1][k]&&frame[j][k-1]<bg[j][k-1]&&frame[j+1][k-1]<bg[j+1][k-1] && frame[j+2][k+1]<bg[j+2][k+1] && frame[j-1][k+1]<bg[j-1][k+1] && frame[j][k+2]<bg[j][k+2] && frame[j+1][k+2]<bg[j+1][k+2] ) { eng_rec.push_back((frame[j][k]+frame[j][k+1]+frame[j+1][k]+frame[j+1][k+1])); if(frame[j][k]>frame[j+1][k]&&frame[j][k]>frame[j][k+1]&&frame[j][k]>frame[j+1][k+1]){ x_rec.push_back(j); y_rec.push_back(k); } else if(frame[j+1][k]>frame[j][k]&&frame[j+1][k]>frame[j][k+1]&&frame[j+1][k]>frame[j+1][k+1]){ x_rec.push_back(j+1); y_rec.push_back(k);} else if(frame[j][k+1]>frame[j][k]&&frame[j][k+1]>frame[j+1][k]&&frame[j][k+1]>frame[j+1][k+1]){ x_rec.push_back(j); y_rec.push_back(k+1); } else{ x_rec.push_back(j+1); y_rec.push_back(k+1); } //cout<< frame[j][k]<<" "<<frame[j][k+1]<<" "<<frame[j+1][k]<<" "<<frame[j+1][k+1]<<endl; frame[j][k]=0; frame[j][k+1]=0; frame[j+1][k]=0; frame[j+1][k+1]=0; } //================================================================== ///================================triple L======================= else if(frame[j+1][k+1]>thres && frame[j][k+1]>thres &&frame[j+1][k]<thres&&frame[j][k+2]<thres&&frame[j+1][k+2]<thres&&frame[j][k-1]<thres&&frame[j-1][k]<thres&&frame[j-1][k+1]<thres&&frame[j+2][k+1]<thres&&frame[j][k+1]>frame[j][k]&&frame[j][k+1]>frame[j+1][k+1]) { eng_rec.push_back((frame[j][k]+frame[j][k+1]+frame[j+1][k+1])); x_rec.push_back(j); y_rec.push_back(k+1); frame[j][k]=0; frame[j][k+1]=0; frame[j+1][k+1]=0; } ///============================triple J======================================================== else if (frame[j-1][k+1]>thres && frame[j][k+1]>thres&&frame[j+1][k]<thres &&frame[j-1][k]<thres&&frame[j][k-1]<thres&&frame[j-2][k+1]<thres&&frame[j-1][k+2]<thres &&frame[j][k+2]<thres&&frame[j+1][k+1]<thres&&frame[j][k+1]>frame[j][k]&&frame[j][k+1]>frame[j-1][k+1] ) { eng_rec.push_back((frame[j][k]+frame[j-1][k+1]+frame[j][k+1])); x_rec.push_back(j); y_rec.push_back(k+1); frame[j][k]=0; frame[j-1][k+1]=0; frame[j][k+1]=0; } ///================================== triple F =================================== else if(frame[j][k+1]>thres &&frame[j+1][k]>thres&&frame[j+2][k]<thres &&frame[j][k+2]<thres&&frame[j+1][k+1]<thres&&frame[j][k-1]<thres&& frame[j+1][k-1]<thres&&frame[j-1][k]<thres&&frame[j-1][k+1]<thres&&frame[j][k]>frame[j+1][k]&&frame[j][k]>frame[j][k+1]) { eng_rec.push_back((frame[j][k]+frame[j+1][k]+frame[j][k+1])); x_rec.push_back(j); y_rec.push_back(k); frame[j][k]=0; frame[j][k+1]=0; frame[j+1][k]=0; } ///====================================== triple 7 ==================================================== else if(frame[j+1][k]>thres &&frame[j+1][k+1]>thres&&frame[j-1][k]<thres&&frame[j][k-1]<thres&&frame[j][k+1]<thres&&frame[j+1][k+2]<thres&&frame[j+1][k-1]<thres &&frame[j+2][k]<thres &&frame[j+2][k+1]<thres&&frame[j+1][k]>frame[j][k]&&frame[j+1][k]>frame[j+1][k+1] ) { eng_rec.push_back((frame[j][k]+frame[j+1][k+1]+frame[j+1][k])); x_rec.push_back(j+1); y_rec.push_back(k); frame[j][k]=0; frame[j+1][k]=0; frame[j+1][k+1]=0; } }}} xc.clear(); yc.clear(); engc.clear(); }} clock_t t=clock(); cout<<"The total number of frames= "<<nframe<<endl; cout<<"The total number of frames= "<<cou2<<endl; float gpu_time =((float)(tGf-tG0))/(CLOCKS_PER_SEC); printf ("The GPU (%f sec).\n",gpu_time); float cpu_time =((float)(t-t1))/(CLOCKS_PER_SEC); printf ("The CPU (%f sec).\n",cpu_time); float speed_up = (cpu_time/gpu_time)/75; printf ("SU (%f ).\n", ceil(speed_up)); cudaFree(d_yin); cudaFree(d_xin); cudaFree(d_engin); cudaFree(d_engres); cudaFree(d_xres); cudaFree(d_yres); cudaFree(d_ind); delete[] x; delete[] y; delete[] eng; return 0 ; }
dae34ab69e5e92af1457b2bcf1132a4351c24229.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf_test/base_fixture.hpp> #include <cudf_test/column_utilities.hpp> #include <cudf_test/column_wrapper.hpp> #include <cudf_test/cudf_gtest.hpp> #include <cudf_test/type_lists.hpp> #include <cudf/column/column.hpp> #include <cudf/column/column_device_view.cuh> #include <cudf/copying.hpp> #include <cudf/detail/copy_if_else.cuh> #include <cudf/detail/iterator.cuh> #include <cudf/scalar/scalar.hpp> #include <rmm/cuda_stream_view.hpp> template <typename T> struct CopyTest : public cudf::test::BaseFixture { }; TYPED_TEST_CASE(CopyTest, cudf::test::FixedWidthTypesWithoutFixedPoint); #define wrapper cudf::test::fixed_width_column_wrapper TYPED_TEST(CopyTest, CopyIfElseTestShort) { using T = TypeParam; cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 0, 0, 0}; wrapper<T, int32_t> lhs_w({5, 5, 5, 5}, {1, 1, 1, 1}); wrapper<T, int32_t> rhs_w({6, 6, 6, 6}, {1, 1, 1, 1}); wrapper<T, int32_t> expected_w({5, 6, 6, 6}); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } TYPED_TEST(CopyTest, CopyIfElseTestManyNulls) { using T = TypeParam; cudf::test::fixed_width_column_wrapper<bool> mask_w{{1, 0, 0, 0, 0, 0, 1}, {1, 1, 1, 1, 1, 1, 0}}; wrapper<T, int32_t> lhs_w({5, 5, 5, 5, 5, 5, 5}, {1, 1, 1, 1, 1, 1, 1}); wrapper<T, int32_t> rhs_w({6, 6, 6, 6, 6, 6, 6}, {1, 0, 0, 0, 0, 0, 1}); wrapper<T, int32_t> expected_w({5, 6, 6, 6, 6, 6, 6}, {1, 0, 0, 0, 0, 0, 1}); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } struct copy_if_else_tiny_grid_functor { template <typename T, typename Filter, std::enable_if_t<cudf::is_fixed_width<T>()>* = nullptr> std::unique_ptr<cudf::column> operator()(cudf::column_view const& lhs, cudf::column_view const& rhs, Filter filter, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { // output std::unique_ptr<cudf::column> out = cudf::allocate_like(lhs, lhs.size(), cudf::mask_allocation_policy::RETAIN, mr); // device views auto lhs_view = cudf::column_device_view::create(lhs); auto rhs_view = cudf::column_device_view::create(rhs); auto lhs_iter = cudf::detail::make_pair_iterator<T>(*lhs_view); auto rhs_iter = cudf::detail::make_pair_iterator<T>(*rhs_view); auto out_dv = cudf::mutable_column_device_view::create(*out); // call the kernel with an artificially small grid hipLaunchKernelGGL(( cudf::detail::copy_if_else_kernel<32, T, decltype(lhs_iter), decltype(rhs_iter), Filter, false>) , dim3(1), dim3(32), 0, stream.value(), lhs_iter, rhs_iter, filter, *out_dv, nullptr); return out; } template <typename T, typename Filter, std::enable_if_t<not cudf::is_fixed_width<T>()>* = nullptr> std::unique_ptr<cudf::column> operator()(cudf::column_view const& lhs, cudf::column_view const& rhs, Filter filter, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FAIL("Unexpected test execution"); } }; std::unique_ptr<cudf::column> tiny_grid_launch(cudf::column_view const& lhs, cudf::column_view const& rhs, cudf::column_view const& boolean_mask) { auto bool_mask_device_p = cudf::column_device_view::create(boolean_mask); cudf::column_device_view bool_mask_device = *bool_mask_device_p; auto filter = [bool_mask_device] __device__(cudf::size_type i) { return bool_mask_device.element<bool>(i); }; return cudf::type_dispatcher(lhs.type(), copy_if_else_tiny_grid_functor{}, lhs, rhs, filter, rmm::cuda_stream_default, rmm::mr::get_current_device_resource()); } TYPED_TEST(CopyTest, CopyIfElseTestTinyGrid) { using T = TypeParam; // make sure we span at least 2 warps int num_els = 64; bool mask[] = {1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els); wrapper<T, int32_t> lhs_w({5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5}); wrapper<T, int32_t> rhs_w({6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6}); wrapper<T, int32_t> expected_w({5, 6, 5, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5}); auto out = tiny_grid_launch(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } TYPED_TEST(CopyTest, CopyIfElseTestLong) { using T = TypeParam; // make sure we span at least 2 warps int num_els = 64; bool mask[] = {1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els); bool lhs_v[] = {1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; wrapper<T, int32_t> lhs_w({5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5}, lhs_v); bool rhs_v[] = {1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; wrapper<T, int32_t> rhs_w({6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6}, rhs_v); bool exp_v[] = {1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; wrapper<T, int32_t> expected_w({5, 6, 5, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5}, exp_v); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } TYPED_TEST(CopyTest, CopyIfElseTestEmptyInputs) { using T = TypeParam; cudf::test::fixed_width_column_wrapper<bool> mask_w{}; wrapper<T> lhs_w{}; wrapper<T> rhs_w{}; wrapper<T> expected_w{}; auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } TYPED_TEST(CopyTest, CopyIfElseMixedInputValidity) { using T = TypeParam; cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 0, 1, 1}; wrapper<T, int32_t> lhs_w({5, 5, 5, 5}, {1, 1, 1, 0}); wrapper<T, int32_t> rhs_w({6, 6, 6, 6}, {1, 0, 1, 1}); wrapper<T, int32_t> expected_w({5, 6, 5, 5}, {1, 0, 1, 0}); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } TYPED_TEST(CopyTest, CopyIfElseMixedInputValidity2) { using T = TypeParam; cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 0, 1, 1}; wrapper<T, int32_t> lhs_w({5, 5, 5, 5}, {1, 1, 1, 0}); wrapper<T, int32_t> rhs_w({6, 6, 6, 6}); wrapper<T, int32_t> expected_w({5, 6, 5, 5}, {1, 1, 1, 0}); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } TYPED_TEST(CopyTest, CopyIfElseMixedInputValidity3) { using T = TypeParam; cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 0, 1, 1}; wrapper<T, int32_t> lhs_w({5, 5, 5, 5}); wrapper<T, int32_t> rhs_w({6, 6, 6, 6}, {1, 0, 1, 1}); wrapper<T, int32_t> expected_w({5, 6, 5, 5}, {1, 0, 1, 1}); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } TYPED_TEST(CopyTest, CopyIfElseMixedInputValidity4) { using T = TypeParam; cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 0, 1, 1}; wrapper<T, int32_t> lhs_w({5, 5, 5, 5}); wrapper<T, int32_t> rhs_w({6, 6, 6, 6}); wrapper<T, int32_t> expected_w({5, 6, 5, 5}); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } TYPED_TEST(CopyTest, CopyIfElseBadInputLength) { using T = TypeParam; // mask length mismatch { cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 1, 1}; wrapper<T, int32_t> lhs_w({5, 5, 5, 5}); wrapper<T, int32_t> rhs_w({6, 6, 6, 6}); EXPECT_THROW(cudf::copy_if_else(lhs_w, rhs_w, mask_w), cudf::logic_error); } // column length mismatch { cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 1, 1, 1}; wrapper<T, int32_t> lhs_w({5, 5, 5}); wrapper<T, int32_t> rhs_w({6, 6, 6, 6}); EXPECT_THROW(cudf::copy_if_else(lhs_w, rhs_w, mask_w), cudf::logic_error); } } template <typename T> struct CopyTestNumeric : public cudf::test::BaseFixture { }; TYPED_TEST_CASE(CopyTestNumeric, cudf::test::NumericTypes); TYPED_TEST(CopyTestNumeric, CopyIfElseTestScalarColumn) { using T = TypeParam; int num_els = 4; bool mask[] = {1, 0, 0, 1}; cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els); cudf::numeric_scalar<T> lhs_w(5); const auto rhs = cudf::test::make_type_param_vector<T>({6, 6, 6, 6}); bool rhs_v[] = {1, 0, 1, 1}; wrapper<T> rhs_w(rhs.begin(), rhs.end(), rhs_v); const auto expected = cudf::test::make_type_param_vector<T>({5, 6, 6, 5}); wrapper<T> expected_w(expected.begin(), expected.end(), rhs_v); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } TYPED_TEST(CopyTestNumeric, CopyIfElseTestColumnScalar) { using T = TypeParam; int num_els = 4; bool mask[] = {1, 0, 0, 1}; bool mask_v[] = {1, 1, 1, 0}; cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els, mask_v); const auto lhs = cudf::test::make_type_param_vector<T>({5, 5, 5, 5}); bool lhs_v[] = {0, 1, 1, 1}; wrapper<T> lhs_w(lhs.begin(), lhs.end(), lhs_v); cudf::numeric_scalar<T> rhs_w(6); const auto expected = cudf::test::make_type_param_vector<T>({5, 6, 6, 6}); wrapper<T> expected_w(expected.begin(), expected.end(), lhs_v); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } TYPED_TEST(CopyTestNumeric, CopyIfElseTestScalarScalar) { using T = TypeParam; int num_els = 4; bool mask[] = {1, 0, 0, 1}; cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els); cudf::numeric_scalar<T> lhs_w(5); cudf::numeric_scalar<T> rhs_w(6, false); const auto expected = cudf::test::make_type_param_vector<T>({5, 6, 6, 5}); wrapper<T> expected_w(expected.begin(), expected.end(), mask); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } template <typename T> struct create_chrono_scalar { template <typename ChronoT = T, typename... Args> typename std::enable_if_t< std::is_same<typename cudf::is_timestamp_t<ChronoT>::type, std::true_type>::value, cudf::timestamp_scalar<ChronoT>> operator()(Args&&... args) const { return cudf::timestamp_scalar<T>(std::forward<Args>(args)...); } template <typename ChronoT = T, typename... Args> typename std::enable_if_t< std::is_same<typename cudf::is_duration_t<ChronoT>::type, std::true_type>::value, cudf::duration_scalar<ChronoT>> operator()(Args&&... args) const { return cudf::duration_scalar<T>(std::forward<Args>(args)...); } }; template <typename T> struct CopyTestChrono : public cudf::test::BaseFixture { }; TYPED_TEST_CASE(CopyTestChrono, cudf::test::ChronoTypes); TYPED_TEST(CopyTestChrono, CopyIfElseTestScalarColumn) { using T = TypeParam; int num_els = 4; bool mask[] = {1, 0, 0, 1}; cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els); auto lhs_w = create_chrono_scalar<T>{}(cudf::test::make_type_param_scalar<T>(5), true); bool rhs_v[] = {1, 0, 1, 1}; wrapper<T, int32_t> rhs_w({6, 6, 6, 6}, rhs_v); wrapper<T, int32_t> expected_w({5, 6, 6, 5}, rhs_v); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } TYPED_TEST(CopyTestChrono, CopyIfElseTestColumnScalar) { using T = TypeParam; int num_els = 4; bool mask[] = {1, 0, 0, 1}; cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els); bool lhs_v[] = {0, 1, 1, 1}; wrapper<T, int32_t> lhs_w({5, 5, 5, 5}, lhs_v); auto rhs_w = create_chrono_scalar<T>{}(cudf::test::make_type_param_scalar<T>(6), true); wrapper<T, int32_t> expected_w({5, 6, 6, 5}, lhs_v); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } TYPED_TEST(CopyTestChrono, CopyIfElseTestScalarScalar) { using T = TypeParam; int num_els = 4; bool mask[] = {1, 0, 0, 1}; cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els); auto lhs_w = create_chrono_scalar<T>{}(cudf::test::make_type_param_scalar<T>(5), true); auto rhs_w = create_chrono_scalar<T>{}(cudf::test::make_type_param_scalar<T>(6), false); wrapper<T, int32_t> expected_w({5, 6, 6, 5}, mask); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } struct CopyTestUntyped : public cudf::test::BaseFixture { }; TEST_F(CopyTestUntyped, CopyIfElseTypeMismatch) { cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 1, 1, 1}; wrapper<float> lhs_w{5, 5, 5, 5}; wrapper<int32_t> rhs_w{6, 6, 6, 6}; EXPECT_THROW(cudf::copy_if_else(lhs_w, rhs_w, mask_w), cudf::logic_error); } struct StringsCopyIfElseTest : public cudf::test::BaseFixture { }; TEST_F(StringsCopyIfElseTest, CopyIfElse) { auto valids = cudf::test::make_counting_transform_iterator( 0, [](auto i) { return i % 2 == 0 ? true : false; }); std::vector<const char*> h_strings1{"eee", "bb", "", "aa", "bbb", ""}; cudf::test::strings_column_wrapper strings1(h_strings1.begin(), h_strings1.end(), valids); std::vector<const char*> h_strings2{"zz", "", "yyy", "w", "", "ooo"}; cudf::test::strings_column_wrapper strings2(h_strings2.begin(), h_strings2.end(), valids); bool mask[] = {1, 1, 0, 1, 0, 1}; bool mask_v[] = {1, 1, 1, 1, 1, 0}; cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + 6, mask_v); auto results = cudf::copy_if_else(strings1, strings2, mask_w); std::vector<const char*> h_expected; for (cudf::size_type idx = 0; idx < static_cast<cudf::size_type>(h_strings1.size()); ++idx) { if (mask[idx] and mask_v[idx]) h_expected.push_back(h_strings1[idx]); else h_expected.push_back(h_strings2[idx]); } cudf::test::strings_column_wrapper expected(h_expected.begin(), h_expected.end(), valids); CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected); } TEST_F(StringsCopyIfElseTest, CopyIfElseScalarColumn) { auto valids = cudf::test::make_counting_transform_iterator( 0, [](auto i) { return i % 2 == 0 ? true : false; }); std::vector<const char*> h_string1{"eee"}; cudf::string_scalar strings1{h_string1[0]}; std::vector<const char*> h_strings2{"zz", "", "yyy", "w", "", "ooo"}; cudf::test::strings_column_wrapper strings2(h_strings2.begin(), h_strings2.end(), valids); bool mask[] = {1, 0, 1, 0, 1, 0}; bool mask_v[] = {1, 1, 1, 1, 1, 0}; cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + 6, mask_v); auto results = cudf::copy_if_else(strings1, strings2, mask_w); std::vector<const char*> h_expected; for (cudf::size_type idx = 0; idx < static_cast<cudf::size_type>(h_strings2.size()); ++idx) { if (mask[idx] and mask_v[idx]) { h_expected.push_back(h_string1[0]); } else { h_expected.push_back(h_strings2[idx]); } } cudf::test::strings_column_wrapper expected(h_expected.begin(), h_expected.end(), valids); CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected); } TEST_F(StringsCopyIfElseTest, CopyIfElseColumnScalar) { auto valids = cudf::test::make_counting_transform_iterator( 0, [](auto i) { return i % 2 == 0 ? true : false; }); std::vector<const char*> h_string1{"eee"}; cudf::string_scalar strings1{h_string1[0]}; std::vector<const char*> h_strings2{"zz", "", "yyy", "w", "", "ooo"}; cudf::test::strings_column_wrapper strings2(h_strings2.begin(), h_strings2.end(), valids); bool mask[] = {0, 1, 1, 1, 0, 1}; cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + 6); auto results = cudf::copy_if_else(strings2, strings1, mask_w); std::vector<const char*> h_expected; for (cudf::size_type idx = 0; idx < static_cast<cudf::size_type>(h_strings2.size()); ++idx) { if (mask[idx]) { h_expected.push_back(h_strings2[idx]); } else { h_expected.push_back(h_string1[0]); } } cudf::test::strings_column_wrapper expected(h_expected.begin(), h_expected.end(), valids); CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected); } TEST_F(StringsCopyIfElseTest, CopyIfElseScalarScalar) { auto valids = cudf::test::make_counting_transform_iterator( 0, [](auto i) { return i % 2 == 0 ? true : false; }); std::vector<const char*> h_string1{"eee"}; cudf::string_scalar string1{h_string1[0]}; std::vector<const char*> h_string2{"aaa"}; cudf::string_scalar string2{h_string2[0], false}; constexpr cudf::size_type mask_size = 6; bool mask[] = {1, 0, 1, 0, 1, 0}; cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + mask_size); auto results = cudf::copy_if_else(string1, string2, mask_w); std::vector<const char*> h_expected; for (cudf::size_type idx = 0; idx < static_cast<cudf::size_type>(mask_size); ++idx) { if (mask[idx]) { h_expected.push_back(h_string1[0]); } else { h_expected.push_back(h_string2[0]); } } cudf::test::strings_column_wrapper expected(h_expected.begin(), h_expected.end(), valids); CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected); } template <typename T> struct FixedPointTypes : public cudf::test::BaseFixture { }; TYPED_TEST_CASE(FixedPointTypes, cudf::test::FixedPointTypes); TYPED_TEST(FixedPointTypes, FixedPointSimple) { using namespace numeric; using decimalXX = TypeParam; using RepType = cudf::device_storage_type_t<decimalXX>; using fp_wrapper = cudf::test::fixed_point_column_wrapper<RepType>; auto const mask = cudf::test::fixed_width_column_wrapper<bool>{0, 1, 1, 1, 0, 0}; auto const a = fp_wrapper{{110, 220, 330, 440, 550, 660}, scale_type{-2}}; auto const b = fp_wrapper{{0, 0, 0, 0, 0, 0}, scale_type{-2}}; auto const expected = fp_wrapper{{0, 220, 330, 440, 0, 0}, scale_type{-2}}; auto const result = cudf::copy_if_else(a, b, mask); CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, result->view()); } TYPED_TEST(FixedPointTypes, FixedPointLarge) { using namespace numeric; using namespace cudf::test; using decimalXX = TypeParam; using RepType = cudf::device_storage_type_t<decimalXX>; using fp_wrapper = cudf::test::fixed_point_column_wrapper<RepType>; auto a = thrust::make_counting_iterator(-1000); auto b = thrust::make_constant_iterator(0); auto m = make_counting_transform_iterator(-1000, [](int i) { return i > 0; }); auto e = make_counting_transform_iterator(-1000, [](int i) { return ::max(0, i); }); auto const mask = cudf::test::fixed_width_column_wrapper<bool>(m, m + 2000); auto const A = fp_wrapper{a, a + 2000, scale_type{-3}}; auto const B = fp_wrapper{b, b + 2000, scale_type{-3}}; auto const expected = fp_wrapper{e, e + 2000, scale_type{-3}}; auto const result = cudf::copy_if_else(A, B, mask); CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, result->view()); } TYPED_TEST(FixedPointTypes, FixedPointScaleMismatch) { using namespace numeric; using decimalXX = TypeParam; using RepType = cudf::device_storage_type_t<decimalXX>; using fp_wrapper = cudf::test::fixed_point_column_wrapper<RepType>; auto const mask = cudf::test::fixed_width_column_wrapper<bool>{0, 1, 1, 1, 0, 0}; auto const a = fp_wrapper{{110, 220, 330, 440, 550, 660}, scale_type{-2}}; auto const b = fp_wrapper{{0, 0, 0, 0, 0, 0}, scale_type{-1}}; EXPECT_THROW(cudf::copy_if_else(a, b, mask), cudf::logic_error); }
dae34ab69e5e92af1457b2bcf1132a4351c24229.cu
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf_test/base_fixture.hpp> #include <cudf_test/column_utilities.hpp> #include <cudf_test/column_wrapper.hpp> #include <cudf_test/cudf_gtest.hpp> #include <cudf_test/type_lists.hpp> #include <cudf/column/column.hpp> #include <cudf/column/column_device_view.cuh> #include <cudf/copying.hpp> #include <cudf/detail/copy_if_else.cuh> #include <cudf/detail/iterator.cuh> #include <cudf/scalar/scalar.hpp> #include <rmm/cuda_stream_view.hpp> template <typename T> struct CopyTest : public cudf::test::BaseFixture { }; TYPED_TEST_CASE(CopyTest, cudf::test::FixedWidthTypesWithoutFixedPoint); #define wrapper cudf::test::fixed_width_column_wrapper TYPED_TEST(CopyTest, CopyIfElseTestShort) { using T = TypeParam; cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 0, 0, 0}; wrapper<T, int32_t> lhs_w({5, 5, 5, 5}, {1, 1, 1, 1}); wrapper<T, int32_t> rhs_w({6, 6, 6, 6}, {1, 1, 1, 1}); wrapper<T, int32_t> expected_w({5, 6, 6, 6}); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } TYPED_TEST(CopyTest, CopyIfElseTestManyNulls) { using T = TypeParam; cudf::test::fixed_width_column_wrapper<bool> mask_w{{1, 0, 0, 0, 0, 0, 1}, {1, 1, 1, 1, 1, 1, 0}}; wrapper<T, int32_t> lhs_w({5, 5, 5, 5, 5, 5, 5}, {1, 1, 1, 1, 1, 1, 1}); wrapper<T, int32_t> rhs_w({6, 6, 6, 6, 6, 6, 6}, {1, 0, 0, 0, 0, 0, 1}); wrapper<T, int32_t> expected_w({5, 6, 6, 6, 6, 6, 6}, {1, 0, 0, 0, 0, 0, 1}); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } struct copy_if_else_tiny_grid_functor { template <typename T, typename Filter, std::enable_if_t<cudf::is_fixed_width<T>()>* = nullptr> std::unique_ptr<cudf::column> operator()(cudf::column_view const& lhs, cudf::column_view const& rhs, Filter filter, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { // output std::unique_ptr<cudf::column> out = cudf::allocate_like(lhs, lhs.size(), cudf::mask_allocation_policy::RETAIN, mr); // device views auto lhs_view = cudf::column_device_view::create(lhs); auto rhs_view = cudf::column_device_view::create(rhs); auto lhs_iter = cudf::detail::make_pair_iterator<T>(*lhs_view); auto rhs_iter = cudf::detail::make_pair_iterator<T>(*rhs_view); auto out_dv = cudf::mutable_column_device_view::create(*out); // call the kernel with an artificially small grid cudf::detail::copy_if_else_kernel<32, T, decltype(lhs_iter), decltype(rhs_iter), Filter, false> <<<1, 32, 0, stream.value()>>>(lhs_iter, rhs_iter, filter, *out_dv, nullptr); return out; } template <typename T, typename Filter, std::enable_if_t<not cudf::is_fixed_width<T>()>* = nullptr> std::unique_ptr<cudf::column> operator()(cudf::column_view const& lhs, cudf::column_view const& rhs, Filter filter, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FAIL("Unexpected test execution"); } }; std::unique_ptr<cudf::column> tiny_grid_launch(cudf::column_view const& lhs, cudf::column_view const& rhs, cudf::column_view const& boolean_mask) { auto bool_mask_device_p = cudf::column_device_view::create(boolean_mask); cudf::column_device_view bool_mask_device = *bool_mask_device_p; auto filter = [bool_mask_device] __device__(cudf::size_type i) { return bool_mask_device.element<bool>(i); }; return cudf::type_dispatcher(lhs.type(), copy_if_else_tiny_grid_functor{}, lhs, rhs, filter, rmm::cuda_stream_default, rmm::mr::get_current_device_resource()); } TYPED_TEST(CopyTest, CopyIfElseTestTinyGrid) { using T = TypeParam; // make sure we span at least 2 warps int num_els = 64; bool mask[] = {1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els); wrapper<T, int32_t> lhs_w({5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5}); wrapper<T, int32_t> rhs_w({6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6}); wrapper<T, int32_t> expected_w({5, 6, 5, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5}); auto out = tiny_grid_launch(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } TYPED_TEST(CopyTest, CopyIfElseTestLong) { using T = TypeParam; // make sure we span at least 2 warps int num_els = 64; bool mask[] = {1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els); bool lhs_v[] = {1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; wrapper<T, int32_t> lhs_w({5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5}, lhs_v); bool rhs_v[] = {1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; wrapper<T, int32_t> rhs_w({6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6}, rhs_v); bool exp_v[] = {1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; wrapper<T, int32_t> expected_w({5, 6, 5, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5}, exp_v); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } TYPED_TEST(CopyTest, CopyIfElseTestEmptyInputs) { using T = TypeParam; cudf::test::fixed_width_column_wrapper<bool> mask_w{}; wrapper<T> lhs_w{}; wrapper<T> rhs_w{}; wrapper<T> expected_w{}; auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } TYPED_TEST(CopyTest, CopyIfElseMixedInputValidity) { using T = TypeParam; cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 0, 1, 1}; wrapper<T, int32_t> lhs_w({5, 5, 5, 5}, {1, 1, 1, 0}); wrapper<T, int32_t> rhs_w({6, 6, 6, 6}, {1, 0, 1, 1}); wrapper<T, int32_t> expected_w({5, 6, 5, 5}, {1, 0, 1, 0}); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } TYPED_TEST(CopyTest, CopyIfElseMixedInputValidity2) { using T = TypeParam; cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 0, 1, 1}; wrapper<T, int32_t> lhs_w({5, 5, 5, 5}, {1, 1, 1, 0}); wrapper<T, int32_t> rhs_w({6, 6, 6, 6}); wrapper<T, int32_t> expected_w({5, 6, 5, 5}, {1, 1, 1, 0}); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } TYPED_TEST(CopyTest, CopyIfElseMixedInputValidity3) { using T = TypeParam; cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 0, 1, 1}; wrapper<T, int32_t> lhs_w({5, 5, 5, 5}); wrapper<T, int32_t> rhs_w({6, 6, 6, 6}, {1, 0, 1, 1}); wrapper<T, int32_t> expected_w({5, 6, 5, 5}, {1, 0, 1, 1}); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } TYPED_TEST(CopyTest, CopyIfElseMixedInputValidity4) { using T = TypeParam; cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 0, 1, 1}; wrapper<T, int32_t> lhs_w({5, 5, 5, 5}); wrapper<T, int32_t> rhs_w({6, 6, 6, 6}); wrapper<T, int32_t> expected_w({5, 6, 5, 5}); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } TYPED_TEST(CopyTest, CopyIfElseBadInputLength) { using T = TypeParam; // mask length mismatch { cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 1, 1}; wrapper<T, int32_t> lhs_w({5, 5, 5, 5}); wrapper<T, int32_t> rhs_w({6, 6, 6, 6}); EXPECT_THROW(cudf::copy_if_else(lhs_w, rhs_w, mask_w), cudf::logic_error); } // column length mismatch { cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 1, 1, 1}; wrapper<T, int32_t> lhs_w({5, 5, 5}); wrapper<T, int32_t> rhs_w({6, 6, 6, 6}); EXPECT_THROW(cudf::copy_if_else(lhs_w, rhs_w, mask_w), cudf::logic_error); } } template <typename T> struct CopyTestNumeric : public cudf::test::BaseFixture { }; TYPED_TEST_CASE(CopyTestNumeric, cudf::test::NumericTypes); TYPED_TEST(CopyTestNumeric, CopyIfElseTestScalarColumn) { using T = TypeParam; int num_els = 4; bool mask[] = {1, 0, 0, 1}; cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els); cudf::numeric_scalar<T> lhs_w(5); const auto rhs = cudf::test::make_type_param_vector<T>({6, 6, 6, 6}); bool rhs_v[] = {1, 0, 1, 1}; wrapper<T> rhs_w(rhs.begin(), rhs.end(), rhs_v); const auto expected = cudf::test::make_type_param_vector<T>({5, 6, 6, 5}); wrapper<T> expected_w(expected.begin(), expected.end(), rhs_v); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } TYPED_TEST(CopyTestNumeric, CopyIfElseTestColumnScalar) { using T = TypeParam; int num_els = 4; bool mask[] = {1, 0, 0, 1}; bool mask_v[] = {1, 1, 1, 0}; cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els, mask_v); const auto lhs = cudf::test::make_type_param_vector<T>({5, 5, 5, 5}); bool lhs_v[] = {0, 1, 1, 1}; wrapper<T> lhs_w(lhs.begin(), lhs.end(), lhs_v); cudf::numeric_scalar<T> rhs_w(6); const auto expected = cudf::test::make_type_param_vector<T>({5, 6, 6, 6}); wrapper<T> expected_w(expected.begin(), expected.end(), lhs_v); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } TYPED_TEST(CopyTestNumeric, CopyIfElseTestScalarScalar) { using T = TypeParam; int num_els = 4; bool mask[] = {1, 0, 0, 1}; cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els); cudf::numeric_scalar<T> lhs_w(5); cudf::numeric_scalar<T> rhs_w(6, false); const auto expected = cudf::test::make_type_param_vector<T>({5, 6, 6, 5}); wrapper<T> expected_w(expected.begin(), expected.end(), mask); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } template <typename T> struct create_chrono_scalar { template <typename ChronoT = T, typename... Args> typename std::enable_if_t< std::is_same<typename cudf::is_timestamp_t<ChronoT>::type, std::true_type>::value, cudf::timestamp_scalar<ChronoT>> operator()(Args&&... args) const { return cudf::timestamp_scalar<T>(std::forward<Args>(args)...); } template <typename ChronoT = T, typename... Args> typename std::enable_if_t< std::is_same<typename cudf::is_duration_t<ChronoT>::type, std::true_type>::value, cudf::duration_scalar<ChronoT>> operator()(Args&&... args) const { return cudf::duration_scalar<T>(std::forward<Args>(args)...); } }; template <typename T> struct CopyTestChrono : public cudf::test::BaseFixture { }; TYPED_TEST_CASE(CopyTestChrono, cudf::test::ChronoTypes); TYPED_TEST(CopyTestChrono, CopyIfElseTestScalarColumn) { using T = TypeParam; int num_els = 4; bool mask[] = {1, 0, 0, 1}; cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els); auto lhs_w = create_chrono_scalar<T>{}(cudf::test::make_type_param_scalar<T>(5), true); bool rhs_v[] = {1, 0, 1, 1}; wrapper<T, int32_t> rhs_w({6, 6, 6, 6}, rhs_v); wrapper<T, int32_t> expected_w({5, 6, 6, 5}, rhs_v); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } TYPED_TEST(CopyTestChrono, CopyIfElseTestColumnScalar) { using T = TypeParam; int num_els = 4; bool mask[] = {1, 0, 0, 1}; cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els); bool lhs_v[] = {0, 1, 1, 1}; wrapper<T, int32_t> lhs_w({5, 5, 5, 5}, lhs_v); auto rhs_w = create_chrono_scalar<T>{}(cudf::test::make_type_param_scalar<T>(6), true); wrapper<T, int32_t> expected_w({5, 6, 6, 5}, lhs_v); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } TYPED_TEST(CopyTestChrono, CopyIfElseTestScalarScalar) { using T = TypeParam; int num_els = 4; bool mask[] = {1, 0, 0, 1}; cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els); auto lhs_w = create_chrono_scalar<T>{}(cudf::test::make_type_param_scalar<T>(5), true); auto rhs_w = create_chrono_scalar<T>{}(cudf::test::make_type_param_scalar<T>(6), false); wrapper<T, int32_t> expected_w({5, 6, 6, 5}, mask); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } struct CopyTestUntyped : public cudf::test::BaseFixture { }; TEST_F(CopyTestUntyped, CopyIfElseTypeMismatch) { cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 1, 1, 1}; wrapper<float> lhs_w{5, 5, 5, 5}; wrapper<int32_t> rhs_w{6, 6, 6, 6}; EXPECT_THROW(cudf::copy_if_else(lhs_w, rhs_w, mask_w), cudf::logic_error); } struct StringsCopyIfElseTest : public cudf::test::BaseFixture { }; TEST_F(StringsCopyIfElseTest, CopyIfElse) { auto valids = cudf::test::make_counting_transform_iterator( 0, [](auto i) { return i % 2 == 0 ? true : false; }); std::vector<const char*> h_strings1{"eee", "bb", "", "aa", "bbb", "ééé"}; cudf::test::strings_column_wrapper strings1(h_strings1.begin(), h_strings1.end(), valids); std::vector<const char*> h_strings2{"zz", "", "yyy", "w", "ééé", "ooo"}; cudf::test::strings_column_wrapper strings2(h_strings2.begin(), h_strings2.end(), valids); bool mask[] = {1, 1, 0, 1, 0, 1}; bool mask_v[] = {1, 1, 1, 1, 1, 0}; cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + 6, mask_v); auto results = cudf::copy_if_else(strings1, strings2, mask_w); std::vector<const char*> h_expected; for (cudf::size_type idx = 0; idx < static_cast<cudf::size_type>(h_strings1.size()); ++idx) { if (mask[idx] and mask_v[idx]) h_expected.push_back(h_strings1[idx]); else h_expected.push_back(h_strings2[idx]); } cudf::test::strings_column_wrapper expected(h_expected.begin(), h_expected.end(), valids); CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected); } TEST_F(StringsCopyIfElseTest, CopyIfElseScalarColumn) { auto valids = cudf::test::make_counting_transform_iterator( 0, [](auto i) { return i % 2 == 0 ? true : false; }); std::vector<const char*> h_string1{"eee"}; cudf::string_scalar strings1{h_string1[0]}; std::vector<const char*> h_strings2{"zz", "", "yyy", "w", "ééé", "ooo"}; cudf::test::strings_column_wrapper strings2(h_strings2.begin(), h_strings2.end(), valids); bool mask[] = {1, 0, 1, 0, 1, 0}; bool mask_v[] = {1, 1, 1, 1, 1, 0}; cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + 6, mask_v); auto results = cudf::copy_if_else(strings1, strings2, mask_w); std::vector<const char*> h_expected; for (cudf::size_type idx = 0; idx < static_cast<cudf::size_type>(h_strings2.size()); ++idx) { if (mask[idx] and mask_v[idx]) { h_expected.push_back(h_string1[0]); } else { h_expected.push_back(h_strings2[idx]); } } cudf::test::strings_column_wrapper expected(h_expected.begin(), h_expected.end(), valids); CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected); } TEST_F(StringsCopyIfElseTest, CopyIfElseColumnScalar) { auto valids = cudf::test::make_counting_transform_iterator( 0, [](auto i) { return i % 2 == 0 ? true : false; }); std::vector<const char*> h_string1{"eee"}; cudf::string_scalar strings1{h_string1[0]}; std::vector<const char*> h_strings2{"zz", "", "yyy", "w", "ééé", "ooo"}; cudf::test::strings_column_wrapper strings2(h_strings2.begin(), h_strings2.end(), valids); bool mask[] = {0, 1, 1, 1, 0, 1}; cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + 6); auto results = cudf::copy_if_else(strings2, strings1, mask_w); std::vector<const char*> h_expected; for (cudf::size_type idx = 0; idx < static_cast<cudf::size_type>(h_strings2.size()); ++idx) { if (mask[idx]) { h_expected.push_back(h_strings2[idx]); } else { h_expected.push_back(h_string1[0]); } } cudf::test::strings_column_wrapper expected(h_expected.begin(), h_expected.end(), valids); CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected); } TEST_F(StringsCopyIfElseTest, CopyIfElseScalarScalar) { auto valids = cudf::test::make_counting_transform_iterator( 0, [](auto i) { return i % 2 == 0 ? true : false; }); std::vector<const char*> h_string1{"eee"}; cudf::string_scalar string1{h_string1[0]}; std::vector<const char*> h_string2{"aaa"}; cudf::string_scalar string2{h_string2[0], false}; constexpr cudf::size_type mask_size = 6; bool mask[] = {1, 0, 1, 0, 1, 0}; cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + mask_size); auto results = cudf::copy_if_else(string1, string2, mask_w); std::vector<const char*> h_expected; for (cudf::size_type idx = 0; idx < static_cast<cudf::size_type>(mask_size); ++idx) { if (mask[idx]) { h_expected.push_back(h_string1[0]); } else { h_expected.push_back(h_string2[0]); } } cudf::test::strings_column_wrapper expected(h_expected.begin(), h_expected.end(), valids); CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected); } template <typename T> struct FixedPointTypes : public cudf::test::BaseFixture { }; TYPED_TEST_CASE(FixedPointTypes, cudf::test::FixedPointTypes); TYPED_TEST(FixedPointTypes, FixedPointSimple) { using namespace numeric; using decimalXX = TypeParam; using RepType = cudf::device_storage_type_t<decimalXX>; using fp_wrapper = cudf::test::fixed_point_column_wrapper<RepType>; auto const mask = cudf::test::fixed_width_column_wrapper<bool>{0, 1, 1, 1, 0, 0}; auto const a = fp_wrapper{{110, 220, 330, 440, 550, 660}, scale_type{-2}}; auto const b = fp_wrapper{{0, 0, 0, 0, 0, 0}, scale_type{-2}}; auto const expected = fp_wrapper{{0, 220, 330, 440, 0, 0}, scale_type{-2}}; auto const result = cudf::copy_if_else(a, b, mask); CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, result->view()); } TYPED_TEST(FixedPointTypes, FixedPointLarge) { using namespace numeric; using namespace cudf::test; using decimalXX = TypeParam; using RepType = cudf::device_storage_type_t<decimalXX>; using fp_wrapper = cudf::test::fixed_point_column_wrapper<RepType>; auto a = thrust::make_counting_iterator(-1000); auto b = thrust::make_constant_iterator(0); auto m = make_counting_transform_iterator(-1000, [](int i) { return i > 0; }); auto e = make_counting_transform_iterator(-1000, [](int i) { return std::max(0, i); }); auto const mask = cudf::test::fixed_width_column_wrapper<bool>(m, m + 2000); auto const A = fp_wrapper{a, a + 2000, scale_type{-3}}; auto const B = fp_wrapper{b, b + 2000, scale_type{-3}}; auto const expected = fp_wrapper{e, e + 2000, scale_type{-3}}; auto const result = cudf::copy_if_else(A, B, mask); CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, result->view()); } TYPED_TEST(FixedPointTypes, FixedPointScaleMismatch) { using namespace numeric; using decimalXX = TypeParam; using RepType = cudf::device_storage_type_t<decimalXX>; using fp_wrapper = cudf::test::fixed_point_column_wrapper<RepType>; auto const mask = cudf::test::fixed_width_column_wrapper<bool>{0, 1, 1, 1, 0, 0}; auto const a = fp_wrapper{{110, 220, 330, 440, 550, 660}, scale_type{-2}}; auto const b = fp_wrapper{{0, 0, 0, 0, 0, 0}, scale_type{-1}}; EXPECT_THROW(cudf::copy_if_else(a, b, mask), cudf::logic_error); }
LinearAlgebraKernels.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "LinearAlgebraKernels.cuh" __global__ void transpose_kernel(double* src, double* dest, size_t src_M, size_t src_N) { int r = blockIdx.x * blockDim.x + threadIdx.x; int c = blockIdx.y * blockDim.y + threadIdx.y; if (r < src_M && c < src_N) { auto srcInd = columnMajorZeroIndex(r, c, src_M, src_N); auto destInd = columnMajorZeroIndex(c, r, src_N, src_M); dest[destInd] = src[srcInd]; } }; __global__ void find_column_maxes_kernel(double* src, double* dest, size_t src_M, size_t src_N) { int c = blockIdx.x * blockDim.x + threadIdx.x; if (c < src_N) { double* cell = dest + columnMajorZeroIndex(0, c, 1, src_N); *cell = -DBL_MAX; for (size_t r = 0; r < src_M; ++src_M) { double* src_val = src + columnMajorZeroIndex(r, c, src_M, src_N); *cell = fmax(fabs(*src_val), fabs(*cell)); } } } __global__ void column_normalize_kernel(double* matrix, double* scaling, size_t mat_M, size_t mat_N) { int r = blockIdx.x * blockDim.x + threadIdx.x; int c = blockIdx.y * blockDim.y + threadIdx.y; if (r < mat_M && c < mat_N) { auto mat_ind = columnMajorZeroIndex(r, c, mat_M, mat_N); auto scale_ind = columnMajorZeroIndex(0, c, 1, mat_N); matrix[mat_ind] = matrix[mat_ind] / scaling[scale_ind]; } } __global__ void set_ones_kernel(double* src, size_t N) { int c = blockIdx.x * blockDim.x + threadIdx.x; if (c < N) { src[c] = 1.0; } } __device__ size_t lookup_ind(size_t* src, size_t sz, size_t lookup_ind) { size_t start = 0, end = sz - 1; while (start < end - 1) { size_t mid = (start + end) / 2; if (src[mid] < lookup_ind) { end = mid; } else if (src[mid] > lookup_ind) { start = mid; } else { return mid; } } return start; } __global__ void get_matrix_squared_kernel(double* matrix, size_t M, size_t N, double* matrix_squared, size_t* lookup) { int r = blockIdx.x * blockDim.x + threadIdx.x; int c = blockIdx.y * blockDim.y + threadIdx.y; int width = (N * N + N) / 2; if (r < M && c < width) { size_t right_col_ind = width - 1 - c; size_t primary_col_ind = lookup_ind(lookup, N, right_col_ind); size_t secondary_col_ind = lookup[primary_col_ind] - right_col_ind; size_t mat_sq_ind = columnMajorZeroIndex(r, c, M, width), primary_ind = columnMajorZeroIndex(r, primary_col_ind, M, N), secondary_ind = columnMajorZeroIndex(r, secondary_col_ind, M, N); matrix_squared[mat_sq_ind] = matrix[primary_ind] * matrix[secondary_ind]; } }
LinearAlgebraKernels.cu
#include "LinearAlgebraKernels.cuh" __global__ void transpose_kernel(double* src, double* dest, size_t src_M, size_t src_N) { int r = blockIdx.x * blockDim.x + threadIdx.x; int c = blockIdx.y * blockDim.y + threadIdx.y; if (r < src_M && c < src_N) { auto srcInd = columnMajorZeroIndex(r, c, src_M, src_N); auto destInd = columnMajorZeroIndex(c, r, src_N, src_M); dest[destInd] = src[srcInd]; } }; __global__ void find_column_maxes_kernel(double* src, double* dest, size_t src_M, size_t src_N) { int c = blockIdx.x * blockDim.x + threadIdx.x; if (c < src_N) { double* cell = dest + columnMajorZeroIndex(0, c, 1, src_N); *cell = -DBL_MAX; for (size_t r = 0; r < src_M; ++src_M) { double* src_val = src + columnMajorZeroIndex(r, c, src_M, src_N); *cell = fmax(fabs(*src_val), fabs(*cell)); } } } __global__ void column_normalize_kernel(double* matrix, double* scaling, size_t mat_M, size_t mat_N) { int r = blockIdx.x * blockDim.x + threadIdx.x; int c = blockIdx.y * blockDim.y + threadIdx.y; if (r < mat_M && c < mat_N) { auto mat_ind = columnMajorZeroIndex(r, c, mat_M, mat_N); auto scale_ind = columnMajorZeroIndex(0, c, 1, mat_N); matrix[mat_ind] = matrix[mat_ind] / scaling[scale_ind]; } } __global__ void set_ones_kernel(double* src, size_t N) { int c = blockIdx.x * blockDim.x + threadIdx.x; if (c < N) { src[c] = 1.0; } } __device__ size_t lookup_ind(size_t* src, size_t sz, size_t lookup_ind) { size_t start = 0, end = sz - 1; while (start < end - 1) { size_t mid = (start + end) / 2; if (src[mid] < lookup_ind) { end = mid; } else if (src[mid] > lookup_ind) { start = mid; } else { return mid; } } return start; } __global__ void get_matrix_squared_kernel(double* matrix, size_t M, size_t N, double* matrix_squared, size_t* lookup) { int r = blockIdx.x * blockDim.x + threadIdx.x; int c = blockIdx.y * blockDim.y + threadIdx.y; int width = (N * N + N) / 2; if (r < M && c < width) { size_t right_col_ind = width - 1 - c; size_t primary_col_ind = lookup_ind(lookup, N, right_col_ind); size_t secondary_col_ind = lookup[primary_col_ind] - right_col_ind; size_t mat_sq_ind = columnMajorZeroIndex(r, c, M, width), primary_ind = columnMajorZeroIndex(r, primary_col_ind, M, N), secondary_ind = columnMajorZeroIndex(r, secondary_col_ind, M, N); matrix_squared[mat_sq_ind] = matrix[primary_ind] * matrix[secondary_ind]; } }
e554a47c721d8dee14f78647f281958e19ecaf24.hip
// !!! This is a file automatically generated by hipify!!! #include <THH/THHReduceApplyUtils.cuh> #include <assert.h> #include <stdlib.h> // Maximum size per grid dimension that we assume (compute capability >= 2.0) #define MAX_GRID_SIZE 65535LL void THCCheckTensorDims(THCState* state, THCudaTensor* tensor, int arg) { int64_t dims = THCudaTensor_nDimensionLegacyAll(state, tensor); THArgCheck(dims <= MAX_CUTORCH_DIMS, arg, CUTORCH_DIM_WARNING); } bool THC_getGridFromTiles(ptrdiff_t gridTiles, dim3& grid) { if (gridTiles > MAX_GRID_SIZE * MAX_GRID_SIZE * MAX_GRID_SIZE) { return false; } int64_t gridX = gridTiles > MAX_GRID_SIZE ? MAX_GRID_SIZE : gridTiles; int64_t gridY = 1; int64_t gridZ = 1; if (gridTiles > MAX_GRID_SIZE) { gridTiles = THCCeilDiv(gridTiles, (ptrdiff_t) MAX_GRID_SIZE); gridY = gridTiles > MAX_GRID_SIZE ? MAX_GRID_SIZE : gridTiles; if (gridTiles > MAX_GRID_SIZE) { gridTiles = THCCeilDiv(gridTiles, (ptrdiff_t) MAX_GRID_SIZE); gridZ = gridTiles > MAX_GRID_SIZE ? MAX_GRID_SIZE : gridTiles; } } grid = dim3(gridX, gridY, gridZ); return true; }
e554a47c721d8dee14f78647f281958e19ecaf24.cu
#include <THC/THCReduceApplyUtils.cuh> #include <assert.h> #include <stdlib.h> // Maximum size per grid dimension that we assume (compute capability >= 2.0) #define MAX_GRID_SIZE 65535LL void THCCheckTensorDims(THCState* state, THCudaTensor* tensor, int arg) { int64_t dims = THCudaTensor_nDimensionLegacyAll(state, tensor); THArgCheck(dims <= MAX_CUTORCH_DIMS, arg, CUTORCH_DIM_WARNING); } bool THC_getGridFromTiles(ptrdiff_t gridTiles, dim3& grid) { if (gridTiles > MAX_GRID_SIZE * MAX_GRID_SIZE * MAX_GRID_SIZE) { return false; } int64_t gridX = gridTiles > MAX_GRID_SIZE ? MAX_GRID_SIZE : gridTiles; int64_t gridY = 1; int64_t gridZ = 1; if (gridTiles > MAX_GRID_SIZE) { gridTiles = THCCeilDiv(gridTiles, (ptrdiff_t) MAX_GRID_SIZE); gridY = gridTiles > MAX_GRID_SIZE ? MAX_GRID_SIZE : gridTiles; if (gridTiles > MAX_GRID_SIZE) { gridTiles = THCCeilDiv(gridTiles, (ptrdiff_t) MAX_GRID_SIZE); gridZ = gridTiles > MAX_GRID_SIZE ? MAX_GRID_SIZE : gridTiles; } } grid = dim3(gridX, gridY, gridZ); return true; }
796d7676369ffcb9e354c54b89cf1fb43b8ac01e.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <limits.h> //#include <hip/hip_runtime.h> #include <omp.h> #include <time.h> #include "config.h" #include "cuda-util.h" #include "mtgp-1.1/mtgp32-cuda.h" #include "billionga.h" struct termination_criteria { int max_iteration_count; }; inline void termination_criteria_init(struct termination_criteria *term_state, int max_iteration_count) { term_state->max_iteration_count = max_iteration_count; } inline int termination_criteria_eval(struct termination_criteria *term_state, struct bga_state *problem_state, int iteration_count, float fitness_sample_avg) { return ((iteration_count == term_state->max_iteration_count) || (fitness_sample_avg >= problem_state->max_prob_sum)); } void timming_start(timespec &ts) { clock_gettime(CLOCK_REALTIME, &ts); } double timming_end(timespec &ts) { timespec ts_end; clock_gettime(CLOCK_REALTIME, &ts_end); double elapsed; elapsed = ((ts_end.tv_sec - ts.tv_sec) * 1000000.0) + ((ts_end.tv_nsec - ts.tv_nsec) / 1000.0); return elapsed / 1000; } int main(int argc, char **argv) { double cputime; float gputime; #if defined(MACRO_TIMMING) timespec full_start; timming_start(full_start); #endif timespec stats_timer; timming_start(stats_timer); if (argc != 5) { fprintf(stdout, "Wrong! RFM!\n\nUsage: %s <problem size> <max iteration> <prng vector size> <gpu device>\n(where 1 <= problem size <= %ld and problem_size can be divided by 8)\n\n", argv[0], LONG_MAX); return EXIT_FAILURE; } #if defined(INFO) || defined(DEBUG) fprintf(stdout, "[INFO] === Starting... ===============================\n"); #endif #if defined(MACRO_TIMMING) timespec init1_start; timming_start(init1_start); #endif long problem_size; problem_size = atol(argv[1]); int max_iteration_count = atoi(argv[2]); struct termination_criteria term_state; termination_criteria_init(&term_state, max_iteration_count); fprintf(stdout, "[INFO] Cantidad de iteraciones %d.\n", max_iteration_count); // === GPU. int number_gpus = 0; hipGetDeviceCount(&number_gpus); if (number_gpus < 1) { fprintf(stderr, "[ERROR] No CUDA capable devices were detected.\n"); exit(EXIT_FAILURE); } int starting_gpu_device = atoi(argv[4]); assert(starting_gpu_device >= 0 && starting_gpu_device < number_gpus); #if defined(MACRO_TIMMING) cputime = timming_end(full_start); fprintf(stdout, "[TIME] Init (1) processing time: %f (microseconds)\n", cputime); #endif // === PRNG. int prng_vector_size = atoi(argv[3]); unsigned int prng_seeds[4] = {3822712292, 495793398, 4202624243, 3503457871}; // generated with: od -vAn -N4 -tu4 < /dev/urandom // === OpenMP int nthreads = omp_get_max_threads(); //omp_get_num_threads(); //#if defined(INFO) || defined(DEBUG) fprintf(stdout, "[INFO] Number of threads %d.\n", nthreads); //#endif //assert(nthreads <= 4); assert(nthreads <= number_gpus); // === Inicializacin del cGA struct bga_state problem_state; bga_initialization(&problem_state, problem_size, nthreads, NUMBER_OF_SAMPLES); #pragma omp parallel // private(th_id) { int current_iteration = 0; int th_id = omp_get_thread_num(); int th_device = (starting_gpu_device + th_id) % number_gpus; ccudaSetDevice(th_device); #if defined(MACRO_TIMMING) hipEvent_t start; hipEvent_t end; ccudaEventCreate(&start); ccudaEventCreate(&end); ccudaEventRecord(start, 0); #endif #if defined(INFO) || defined(DEBUG) fprintf(stdout, "[INFO] Thread %d using device %d.\n", th_id, th_device); #endif assert(omp_get_num_threads() == nthreads); // === Inicializacin del Mersenne Twister. mtgp32_status mt_status; mtgp32_initialize(&mt_status, prng_vector_size, prng_seeds[th_id]); // === Inicializacin del BillionGA. bga_initialize_thread(&problem_state, th_id); #if defined(MACRO_TIMMING) ccudaEventRecord(end, 0); ccudaEventSynchronize(end); ccudaEventElapsedTime(&gputime, start, end); fprintf(stdout, "[TIME] Init (2) processing time: %f (ms)\n", gputime); #endif #if defined(DEBUG) #pragma omp barrier if (th_id == 0) bga_show_prob_vector_state(&problem_state); #pragma omp barrier #endif long current_acc_prob = 0; long aux; float fitness_sample_avg = 0, probability_avg = 0, avg_fitness_porcentage = 0; long fitness_sample_a = 0, fitness_sample_b = 0; fprintf(stdout, "iter"); #if defined(DEBUG) fprintf(stdout, ",avg. prob., abs. value,abs. improv."); #endif fprintf(stdout, ",f1,f2,avg f1 f2"); #if defined(DEBUG) fprintf(stdout, ",gt 75,gt 50,lt 50,lt 25"); #endif fprintf(stdout, ",time\n"); while (!termination_criteria_eval(&term_state, &problem_state, current_iteration, fitness_sample_avg)) { int display_stats; display_stats = current_iteration % SHOW_UPDATE_EVERY; if (th_id == 0) { if (display_stats == 0) { fprintf(stdout, "%d", current_iteration); #if defined(DEBUG) aux = bga_get_full_accumulated_prob(&problem_state); probability_avg = (float)(aux * 100.0 / problem_state.max_prob_sum); fprintf(stdout, ",%.4f", probability_avg); fprintf(stdout, ",%ld", aux); fprintf(stdout, ",%ld", aux - current_acc_prob); #endif avg_fitness_porcentage = (fitness_sample_avg / problem_size) * 100; fprintf(stdout, ",%ld,%ld,%.4f,%.4f", fitness_sample_a, fitness_sample_b, fitness_sample_avg, avg_fitness_porcentage); current_acc_prob = aux; #if defined(DEBUG) aux = bga_get_part_stats_prob(&problem_state, th_id, 1, (POPULATION_SIZE >> 1) + (POPULATION_SIZE >> 2)) * nthreads; fprintf(stdout, ",%ld", aux); aux = bga_get_part_stats_prob(&problem_state, th_id, 1, POPULATION_SIZE >> 1) * nthreads; fprintf(stdout, ",%ld", aux); aux = bga_get_part_stats_prob(&problem_state, th_id, -1, POPULATION_SIZE >> 1) * nthreads; fprintf(stdout, ",%ld", aux); aux = bga_get_part_stats_prob(&problem_state, th_id, -1, POPULATION_SIZE >> 2) * nthreads; fprintf(stdout, ",%ld", aux); #endif fprintf(stdout, ",%f", timming_end(stats_timer)); fprintf(stdout, "\n"); } } current_iteration++; #if defined(MACRO_TIMMING) if (display_stats == 0) { ccudaEventRecord(start, 0); } #endif bga_model_sampling_mt(&problem_state, &mt_status, th_id); #if defined(MACRO_TIMMING) if (display_stats == 0) { ccudaEventRecord(end, 0); ccudaEventSynchronize(end); ccudaEventElapsedTime(&gputime, start, end); fprintf(stdout, "[TIME] Sampling processing time: %f (ms)\n", gputime); } #endif #if defined(DEBUG) #pragma omp barrier #endif #if defined(MACRO_TIMMING) if (display_stats == 0) { ccudaEventRecord(start, 0); } #endif bga_compute_sample_part_fitness(&problem_state, th_id); #if defined(DEBUG) #pragma omp barrier if (th_id == 0) { bga_show_samples(&problem_state); } #pragma omp barrier #endif #if defined(FULL_FITNESS_UPDATE) #pragma omp barrier if (th_id == 0) { bga_compute_sample_full_fitness(&problem_state); } #pragma omp barrier #endif #if defined(MACRO_TIMMING) if (display_stats == 0) { ccudaEventRecord(end, 0); ccudaEventSynchronize(end); ccudaEventElapsedTime(&gputime, start, end); fprintf(stdout, "[TIME] Eval processing time: %f (ms)\n", gputime); } #endif #if defined(MACRO_TIMMING) if (display_stats == 0) { ccudaEventRecord(start, 0); } #endif bga_model_update(&problem_state, th_id); #if defined(MACRO_TIMMING) if (display_stats == 0) { ccudaEventRecord(end, 0); ccudaEventSynchronize(end); ccudaEventElapsedTime(&gputime, start, end); fprintf(stdout, "[TIME] Update processing time: %f (ms)\n", gputime); } #endif #if defined(FULL_FITNESS_UPDATE) fitness_sample_a = problem_state.samples_fitness[0]; fitness_sample_b = problem_state.samples_fitness[1]; #endif #if defined(PARTIAL_FITNESS_UPDATE) fitness_sample_a = problem_state.samples_vector_fitness[0][th_id]; fitness_sample_b = problem_state.samples_vector_fitness[1][th_id]; #endif fitness_sample_avg = (float)(fitness_sample_a + fitness_sample_b) / 2.0; } long aux0[4], aux1[4], aux2[4], aux3[4], aux4[4]; aux1[0] = aux2[0] = aux3[0] = aux4[0] = 0; aux1[1] = aux2[1] = aux3[1] = aux4[1] = 0; aux1[2] = aux2[2] = aux3[2] = aux4[2] = 0; aux1[3] = aux2[3] = aux3[3] = aux4[3] = 0; aux1[th_id] = bga_get_part_stats_prob(&problem_state, th_id, 1, (POPULATION_SIZE >> 1) + (POPULATION_SIZE >> 2)); aux2[th_id] = bga_get_part_stats_prob(&problem_state, th_id, 1, POPULATION_SIZE >> 1); aux3[th_id] = bga_get_part_stats_prob(&problem_state, th_id, -1, POPULATION_SIZE >> 1); aux4[th_id] = bga_get_part_stats_prob(&problem_state, th_id, -1, POPULATION_SIZE >> 2); bga_get_part_accumulated_prob(&problem_state, th_id); #pragma omp barrier if (th_id == 0) { long final_acc_prob = bga_get_full_accumulated_prob(&problem_state); fprintf(stdout, ">>>>\n"); fprintf(stdout, "iter,avg. prob., abs. value,abs. improv.,gt 75,gt 50,lt 50,lt 25\n"); fprintf(stdout, "%d,%.4f,%ld,%ld,%ld,%ld,%ld\n",current_iteration, (double)(final_acc_prob * 100) / (double)problem_state.max_prob_sum, final_acc_prob, aux1[0]+aux1[1]+aux1[2]+aux1[3], aux2[0]+aux2[1]+aux2[2]+aux2[3], aux3[0]+aux3[1]+aux3[2]+aux3[3], aux4[0]+aux4[1]+aux4[2]+aux4[3]); } // === Libero la memoria del Mersenne Twister. mtgp32_free(&mt_status); #if defined(MACRO_TIMMING) ccudaEventDestroy(start); ccudaEventDestroy(end); #endif } #if defined(MACRO_TIMMING) cputime = timming_end(full_start); fprintf(stdout, "[TIME] Total processing time: %f (microseconds)\n", cputime); #endif // === Libero la memoria del cGA. bga_free(&problem_state); return EXIT_SUCCESS; }
796d7676369ffcb9e354c54b89cf1fb43b8ac01e.cu
#include <stdio.h> #include <stdlib.h> #include <limits.h> //#include <cuda.h> #include <omp.h> #include <time.h> #include "config.h" #include "cuda-util.h" #include "mtgp-1.1/mtgp32-cuda.h" #include "billionga.h" struct termination_criteria { int max_iteration_count; }; inline void termination_criteria_init(struct termination_criteria *term_state, int max_iteration_count) { term_state->max_iteration_count = max_iteration_count; } inline int termination_criteria_eval(struct termination_criteria *term_state, struct bga_state *problem_state, int iteration_count, float fitness_sample_avg) { return ((iteration_count == term_state->max_iteration_count) || (fitness_sample_avg >= problem_state->max_prob_sum)); } void timming_start(timespec &ts) { clock_gettime(CLOCK_REALTIME, &ts); } double timming_end(timespec &ts) { timespec ts_end; clock_gettime(CLOCK_REALTIME, &ts_end); double elapsed; elapsed = ((ts_end.tv_sec - ts.tv_sec) * 1000000.0) + ((ts_end.tv_nsec - ts.tv_nsec) / 1000.0); return elapsed / 1000; } int main(int argc, char **argv) { double cputime; float gputime; #if defined(MACRO_TIMMING) timespec full_start; timming_start(full_start); #endif timespec stats_timer; timming_start(stats_timer); if (argc != 5) { fprintf(stdout, "Wrong! RFM!\n\nUsage: %s <problem size> <max iteration> <prng vector size> <gpu device>\n(where 1 <= problem size <= %ld and problem_size can be divided by 8)\n\n", argv[0], LONG_MAX); return EXIT_FAILURE; } #if defined(INFO) || defined(DEBUG) fprintf(stdout, "[INFO] === Starting... ===============================\n"); #endif #if defined(MACRO_TIMMING) timespec init1_start; timming_start(init1_start); #endif long problem_size; problem_size = atol(argv[1]); int max_iteration_count = atoi(argv[2]); struct termination_criteria term_state; termination_criteria_init(&term_state, max_iteration_count); fprintf(stdout, "[INFO] Cantidad de iteraciones %d.\n", max_iteration_count); // === GPU. int number_gpus = 0; cudaGetDeviceCount(&number_gpus); if (number_gpus < 1) { fprintf(stderr, "[ERROR] No CUDA capable devices were detected.\n"); exit(EXIT_FAILURE); } int starting_gpu_device = atoi(argv[4]); assert(starting_gpu_device >= 0 && starting_gpu_device < number_gpus); #if defined(MACRO_TIMMING) cputime = timming_end(full_start); fprintf(stdout, "[TIME] Init (1) processing time: %f (microseconds)\n", cputime); #endif // === PRNG. int prng_vector_size = atoi(argv[3]); unsigned int prng_seeds[4] = {3822712292, 495793398, 4202624243, 3503457871}; // generated with: od -vAn -N4 -tu4 < /dev/urandom // === OpenMP int nthreads = omp_get_max_threads(); //omp_get_num_threads(); //#if defined(INFO) || defined(DEBUG) fprintf(stdout, "[INFO] Number of threads %d.\n", nthreads); //#endif //assert(nthreads <= 4); assert(nthreads <= number_gpus); // === Inicialización del cGA struct bga_state problem_state; bga_initialization(&problem_state, problem_size, nthreads, NUMBER_OF_SAMPLES); #pragma omp parallel // private(th_id) { int current_iteration = 0; int th_id = omp_get_thread_num(); int th_device = (starting_gpu_device + th_id) % number_gpus; ccudaSetDevice(th_device); #if defined(MACRO_TIMMING) cudaEvent_t start; cudaEvent_t end; ccudaEventCreate(&start); ccudaEventCreate(&end); ccudaEventRecord(start, 0); #endif #if defined(INFO) || defined(DEBUG) fprintf(stdout, "[INFO] Thread %d using device %d.\n", th_id, th_device); #endif assert(omp_get_num_threads() == nthreads); // === Inicialización del Mersenne Twister. mtgp32_status mt_status; mtgp32_initialize(&mt_status, prng_vector_size, prng_seeds[th_id]); // === Inicialización del BillionGA. bga_initialize_thread(&problem_state, th_id); #if defined(MACRO_TIMMING) ccudaEventRecord(end, 0); ccudaEventSynchronize(end); ccudaEventElapsedTime(&gputime, start, end); fprintf(stdout, "[TIME] Init (2) processing time: %f (ms)\n", gputime); #endif #if defined(DEBUG) #pragma omp barrier if (th_id == 0) bga_show_prob_vector_state(&problem_state); #pragma omp barrier #endif long current_acc_prob = 0; long aux; float fitness_sample_avg = 0, probability_avg = 0, avg_fitness_porcentage = 0; long fitness_sample_a = 0, fitness_sample_b = 0; fprintf(stdout, "iter"); #if defined(DEBUG) fprintf(stdout, ",avg. prob., abs. value,abs. improv."); #endif fprintf(stdout, ",f1,f2,avg f1 f2"); #if defined(DEBUG) fprintf(stdout, ",gt 75,gt 50,lt 50,lt 25"); #endif fprintf(stdout, ",time\n"); while (!termination_criteria_eval(&term_state, &problem_state, current_iteration, fitness_sample_avg)) { int display_stats; display_stats = current_iteration % SHOW_UPDATE_EVERY; if (th_id == 0) { if (display_stats == 0) { fprintf(stdout, "%d", current_iteration); #if defined(DEBUG) aux = bga_get_full_accumulated_prob(&problem_state); probability_avg = (float)(aux * 100.0 / problem_state.max_prob_sum); fprintf(stdout, ",%.4f", probability_avg); fprintf(stdout, ",%ld", aux); fprintf(stdout, ",%ld", aux - current_acc_prob); #endif avg_fitness_porcentage = (fitness_sample_avg / problem_size) * 100; fprintf(stdout, ",%ld,%ld,%.4f,%.4f", fitness_sample_a, fitness_sample_b, fitness_sample_avg, avg_fitness_porcentage); current_acc_prob = aux; #if defined(DEBUG) aux = bga_get_part_stats_prob(&problem_state, th_id, 1, (POPULATION_SIZE >> 1) + (POPULATION_SIZE >> 2)) * nthreads; fprintf(stdout, ",%ld", aux); aux = bga_get_part_stats_prob(&problem_state, th_id, 1, POPULATION_SIZE >> 1) * nthreads; fprintf(stdout, ",%ld", aux); aux = bga_get_part_stats_prob(&problem_state, th_id, -1, POPULATION_SIZE >> 1) * nthreads; fprintf(stdout, ",%ld", aux); aux = bga_get_part_stats_prob(&problem_state, th_id, -1, POPULATION_SIZE >> 2) * nthreads; fprintf(stdout, ",%ld", aux); #endif fprintf(stdout, ",%f", timming_end(stats_timer)); fprintf(stdout, "\n"); } } current_iteration++; #if defined(MACRO_TIMMING) if (display_stats == 0) { ccudaEventRecord(start, 0); } #endif bga_model_sampling_mt(&problem_state, &mt_status, th_id); #if defined(MACRO_TIMMING) if (display_stats == 0) { ccudaEventRecord(end, 0); ccudaEventSynchronize(end); ccudaEventElapsedTime(&gputime, start, end); fprintf(stdout, "[TIME] Sampling processing time: %f (ms)\n", gputime); } #endif #if defined(DEBUG) #pragma omp barrier #endif #if defined(MACRO_TIMMING) if (display_stats == 0) { ccudaEventRecord(start, 0); } #endif bga_compute_sample_part_fitness(&problem_state, th_id); #if defined(DEBUG) #pragma omp barrier if (th_id == 0) { bga_show_samples(&problem_state); } #pragma omp barrier #endif #if defined(FULL_FITNESS_UPDATE) #pragma omp barrier if (th_id == 0) { bga_compute_sample_full_fitness(&problem_state); } #pragma omp barrier #endif #if defined(MACRO_TIMMING) if (display_stats == 0) { ccudaEventRecord(end, 0); ccudaEventSynchronize(end); ccudaEventElapsedTime(&gputime, start, end); fprintf(stdout, "[TIME] Eval processing time: %f (ms)\n", gputime); } #endif #if defined(MACRO_TIMMING) if (display_stats == 0) { ccudaEventRecord(start, 0); } #endif bga_model_update(&problem_state, th_id); #if defined(MACRO_TIMMING) if (display_stats == 0) { ccudaEventRecord(end, 0); ccudaEventSynchronize(end); ccudaEventElapsedTime(&gputime, start, end); fprintf(stdout, "[TIME] Update processing time: %f (ms)\n", gputime); } #endif #if defined(FULL_FITNESS_UPDATE) fitness_sample_a = problem_state.samples_fitness[0]; fitness_sample_b = problem_state.samples_fitness[1]; #endif #if defined(PARTIAL_FITNESS_UPDATE) fitness_sample_a = problem_state.samples_vector_fitness[0][th_id]; fitness_sample_b = problem_state.samples_vector_fitness[1][th_id]; #endif fitness_sample_avg = (float)(fitness_sample_a + fitness_sample_b) / 2.0; } long aux0[4], aux1[4], aux2[4], aux3[4], aux4[4]; aux1[0] = aux2[0] = aux3[0] = aux4[0] = 0; aux1[1] = aux2[1] = aux3[1] = aux4[1] = 0; aux1[2] = aux2[2] = aux3[2] = aux4[2] = 0; aux1[3] = aux2[3] = aux3[3] = aux4[3] = 0; aux1[th_id] = bga_get_part_stats_prob(&problem_state, th_id, 1, (POPULATION_SIZE >> 1) + (POPULATION_SIZE >> 2)); aux2[th_id] = bga_get_part_stats_prob(&problem_state, th_id, 1, POPULATION_SIZE >> 1); aux3[th_id] = bga_get_part_stats_prob(&problem_state, th_id, -1, POPULATION_SIZE >> 1); aux4[th_id] = bga_get_part_stats_prob(&problem_state, th_id, -1, POPULATION_SIZE >> 2); bga_get_part_accumulated_prob(&problem_state, th_id); #pragma omp barrier if (th_id == 0) { long final_acc_prob = bga_get_full_accumulated_prob(&problem_state); fprintf(stdout, ">>>>\n"); fprintf(stdout, "iter,avg. prob., abs. value,abs. improv.,gt 75,gt 50,lt 50,lt 25\n"); fprintf(stdout, "%d,%.4f,%ld,%ld,%ld,%ld,%ld\n",current_iteration, (double)(final_acc_prob * 100) / (double)problem_state.max_prob_sum, final_acc_prob, aux1[0]+aux1[1]+aux1[2]+aux1[3], aux2[0]+aux2[1]+aux2[2]+aux2[3], aux3[0]+aux3[1]+aux3[2]+aux3[3], aux4[0]+aux4[1]+aux4[2]+aux4[3]); } // === Libero la memoria del Mersenne Twister. mtgp32_free(&mt_status); #if defined(MACRO_TIMMING) ccudaEventDestroy(start); ccudaEventDestroy(end); #endif } #if defined(MACRO_TIMMING) cputime = timming_end(full_start); fprintf(stdout, "[TIME] Total processing time: %f (microseconds)\n", cputime); #endif // === Libero la memoria del cGA. bga_free(&problem_state); return EXIT_SUCCESS; }
c73698431ecb5592d9971ca00ddd03994a6f28f5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* This file includes all the functions required to calculate the hermite interpolating polynomial and its gradients, which is the essence of the high accuracy of the current advection scheme. */ #ifndef _HermiteCUDA_h #define _HermiteCUDA_h __global__ void devicetodevicecopy(double *dphi, double *dpsix, double *dpsiy, double *mphi, double *mpsix, double *mpsiy, unsigned int nx, unsigned int TileSize) { unsigned int bx = blockIdx.x; unsigned int by = blockIdx.y; unsigned int tx = threadIdx.x; unsigned int ty = threadIdx.y; unsigned int index_x = bx * TileSize + tx; unsigned int index_y = by * TileSize + ty; unsigned int indexToWrite = index_y * nx + index_x; mphi[indexToWrite] = dphi[indexToWrite]; mpsix[indexToWrite] = dpsix[indexToWrite]; mpsiy[indexToWrite] = dpsiy[indexToWrite]; } __device__ double basepolynomial(double x, double y, int alphax, int alphay, int vx, int vy, double dx, double dy, double xo, double yo){ double bpx = 0.0, bpy = 0.0; double etax = (x - xo)/dx; double etay = (y - yo)/dy; switch(2 * alphax + vx + 1){ // Switching between base polynomials for x case 1: bpx = 1 - 3 * pow(etax,2) + 2 * pow(etax,3); break; case 2: bpx = -2 * pow(etax,3) + 3 * pow(etax,2); break; case 3: bpx = pow(etax,3) - 2 * pow(etax,2) + etax; break; case 4: bpx = pow(etax,3) - pow(etax,2); break; } switch(2 * alphay + vy + 1){ // Switching between base polynomials for y case 1: bpy = 1 - 3 * pow(etay,2) + 2 * pow(etay,3); break; case 2: bpy = -2 * pow(etay,3) + 3 * pow(etay,2); break; case 3: bpy = pow(etay,3) - 2 * pow(etay,2) + etay; break; case 4: bpy = pow(etay,3) - pow(etay,2); break; } double result = bpx * bpy; return result; } __device__ double gradbpx(double x, double y, int alphax, int alphay, int vx, int vy, double dx, double dy, double xo, double yo){ double bpx = 0.0, bpy = 0.0; double etax = (x - xo)/dx; double etay = (y - yo)/dy; switch(2 * alphax + vx + 1){ // Switching between base polynomials for x case 1: bpx = - 6 * etax + 6 * pow(etax,2); break; case 2: bpx = -6 * pow(etax,2) + 6 * etax; break; case 3: bpx = 3 * pow(etax,2) - 4 * etax + 1; break; case 4: bpx = 3 * pow(etax,2) - 2 * etax; break; } switch(2 * alphay + vy + 1){ // Switching between base polynomials for y case 1: bpy = 1 - 3 * pow(etay,2) + 2 * pow(etay,3); break; case 2: bpy = -2 * pow(etay,3) + 3 * pow(etay,2); break; case 3: bpy = pow(etay,3) - 2 * pow(etay,2) + etay; break; case 4: bpy = pow(etay,3) - pow(etay,2); break; } double result = bpx * bpy; return result; } __device__ double gradbpy(double x, double y, int alphax, int alphay, int vx, int vy, double dx, double dy, double xo, double yo){ double bpx = 0.0, bpy = 0.0; double etax = (x - xo)/dx; double etay = (y - yo)/dy; switch(2 * alphax + vx + 1){ // Switching between base polynomials for x case 1: bpx = 1 - 3 * pow(etax,2) + 2 * pow(etax,3); break; case 2: bpx = -2 * pow(etax,3) + 3 * pow(etax,2); break; case 3: bpx = pow(etax,3) - 2 * pow(etax,2) + etax; break; case 4: bpx = pow(etax,3) - pow(etax,2); break; } switch(2 * alphay + vy + 1){ // Switching between base polynomials for y case 1: bpy = - 6 * etay + 6 * pow(etay,2); break; case 2: bpy = -6 * pow(etay,2) + 6 * etay; break; case 3: bpy = 3 * pow(etay,2) - 4 * etay + 1; break; case 4: bpy = 3 * pow(etay,2) - 2 * etay; break; } double result = bpx * bpy; return result; } __device__ double hp(double phi[4], double psix[4], double psiy[4], double psixy[4], double x, double y, double xo, double yo, double dx, double dy){ double H = 0, delta[4], d; int alphax, alphay, vx, vy; //double bp; //Base Polynomial Section for(int i = 0; i < 4; i++){ switch (i){ case 0:{ alphax = 0; alphay = 0; memcpy(delta, phi, sizeof(delta)); break; } case 1:{ alphax = 0; alphay = 1; memcpy(delta, psiy, sizeof(delta)); break; } case 2:{ alphax = 1; alphay = 0; memcpy(delta, psix, sizeof(delta)); break; } case 3:{ alphax = 1; alphay = 1; memcpy(delta, psixy, sizeof(delta)); break; } } for(int j = 0; j < 4; j++){ switch (j){ case 0:{ vx = 0; vy = 0; d = delta[0]; break; } case 1:{ vx = 0; vy = 1; d = delta[2]; break; } case 2:{ vx = 1; vy = 0; d = delta[1]; break; } case 3:{ vx = 1; vy = 1; d = delta[3]; break; } } //bp = basepolynomial(x, y, alphax, alphay, vx, vy, dx, dy, xo, yo); H = H + pow(dx, alphax) * pow(dy, alphay) * basepolynomial(x, y, alphax, alphay, vx, vy, dx, dy, xo, yo) * d; } } return H; } __device__ double hermx(double* phi, double* psix, double* psiy, double* psixy, double xadv, double yadv, double xo, double yo, double dx, double dy){ double gradientx = 0, delta[4], d; int alphax, alphay, vx, vy; //double bp; //Base Polynomial Section for(int i = 0; i < 4; i++){ switch (i){ case 0:{ alphax = 0; alphay = 0; memcpy(delta, phi, sizeof(delta)); break; } case 1:{ alphax = 0; alphay = 1; memcpy(delta, psiy, sizeof(delta)); break; } case 2:{ alphax = 1; alphay = 0; memcpy(delta, psix, sizeof(delta)); break; } case 3:{ alphax = 1; alphay = 1; memcpy(delta, psixy, sizeof(delta)); break; } } for(int j = 0; j < 4; j++){ switch (j){ case 0:{ vx = 0; vy = 0; d = delta[0]; break; } case 1:{ vx = 0; vy = 1; d = delta[2]; break; } case 2:{ vx = 1; vy = 0; d = delta[1]; break; } case 3:{ vx = 1; vy = 1; d = delta[3]; break; } } //bp = gradbpx(xadv, yadv, alphax, alphay, vx, vy, dx, dy, xo, yo); gradientx = gradientx + pow(dx, alphax) * pow(dy, alphay) * gradbpx(xadv, yadv, alphax, alphay, vx, vy, dx, dy, xo, yo) * d * (1/dx); } } return gradientx; } __device__ double hermy(double* phi, double* psix, double* psiy, double* psixy, double xadv, double yadv, double xo, double yo, double dx, double dy){ double gradienty = 0, d, delta[4]; int alphax, alphay, vx, vy; //double bp; //Base Polynomial Section for(int i = 0; i < 4; i++){ switch (i){ case 0:{ alphax = 0; alphay = 0; memcpy(delta, phi, sizeof(delta)); break; } case 1:{ alphax = 0; alphay = 1; memcpy(delta, psiy, sizeof(delta)); break; } case 2:{ alphax = 1; alphay = 0; memcpy(delta, psix, sizeof(delta)); break; } case 3:{ alphax = 1; alphay = 1; memcpy(delta, psixy, sizeof(delta)); break; } } for(int j = 0; j < 4; j++){ switch (j){ case 0:{ vx = 0; vy = 0; d = delta[0]; break; } case 1:{ vx = 0; vy = 1; d = delta[2]; break; } case 2:{ vx = 1; vy = 0; d = delta[1]; break; } case 3:{ vx = 1; vy = 1; d = delta[3]; break; } } //bp = gradbpy(xadv, yadv, alphax, alphay, vx, vy, dx, dy, xo, yo); gradienty = gradienty + pow(dx, alphax) * pow(dy, alphay) * gradbpy(xadv, yadv, alphax, alphay, vx, vy, dx, dy, xo, yo) * d * (1/dy); } } return gradienty; } __device__ double hp1D(double phi[2], double psix[2], double x, double xo, double dx){ double H = 0; double etax = (x - xo)/dx; H = (1 - 3*pow(etax,2) + 2 * pow(etax,3)) * phi[0] + (3 * pow(etax,2) - 2 * pow(etax,3)) * phi[1] + (etax + pow(etax,3) - 2 * pow(etax,2)) * psix[0] + (pow(etax,3) - pow(etax,2)) * psix[1]; return H; } #endif
c73698431ecb5592d9971ca00ddd03994a6f28f5.cu
/* This file includes all the functions required to calculate the hermite interpolating polynomial and its gradients, which is the essence of the high accuracy of the current advection scheme. */ #ifndef _HermiteCUDA_h #define _HermiteCUDA_h __global__ void devicetodevicecopy(double *dphi, double *dpsix, double *dpsiy, double *mphi, double *mpsix, double *mpsiy, unsigned int nx, unsigned int TileSize) { unsigned int bx = blockIdx.x; unsigned int by = blockIdx.y; unsigned int tx = threadIdx.x; unsigned int ty = threadIdx.y; unsigned int index_x = bx * TileSize + tx; unsigned int index_y = by * TileSize + ty; unsigned int indexToWrite = index_y * nx + index_x; mphi[indexToWrite] = dphi[indexToWrite]; mpsix[indexToWrite] = dpsix[indexToWrite]; mpsiy[indexToWrite] = dpsiy[indexToWrite]; } __device__ double basepolynomial(double x, double y, int alphax, int alphay, int vx, int vy, double dx, double dy, double xo, double yo){ double bpx = 0.0, bpy = 0.0; double etax = (x - xo)/dx; double etay = (y - yo)/dy; switch(2 * alphax + vx + 1){ // Switching between base polynomials for x case 1: bpx = 1 - 3 * pow(etax,2) + 2 * pow(etax,3); break; case 2: bpx = -2 * pow(etax,3) + 3 * pow(etax,2); break; case 3: bpx = pow(etax,3) - 2 * pow(etax,2) + etax; break; case 4: bpx = pow(etax,3) - pow(etax,2); break; } switch(2 * alphay + vy + 1){ // Switching between base polynomials for y case 1: bpy = 1 - 3 * pow(etay,2) + 2 * pow(etay,3); break; case 2: bpy = -2 * pow(etay,3) + 3 * pow(etay,2); break; case 3: bpy = pow(etay,3) - 2 * pow(etay,2) + etay; break; case 4: bpy = pow(etay,3) - pow(etay,2); break; } double result = bpx * bpy; return result; } __device__ double gradbpx(double x, double y, int alphax, int alphay, int vx, int vy, double dx, double dy, double xo, double yo){ double bpx = 0.0, bpy = 0.0; double etax = (x - xo)/dx; double etay = (y - yo)/dy; switch(2 * alphax + vx + 1){ // Switching between base polynomials for x case 1: bpx = - 6 * etax + 6 * pow(etax,2); break; case 2: bpx = -6 * pow(etax,2) + 6 * etax; break; case 3: bpx = 3 * pow(etax,2) - 4 * etax + 1; break; case 4: bpx = 3 * pow(etax,2) - 2 * etax; break; } switch(2 * alphay + vy + 1){ // Switching between base polynomials for y case 1: bpy = 1 - 3 * pow(etay,2) + 2 * pow(etay,3); break; case 2: bpy = -2 * pow(etay,3) + 3 * pow(etay,2); break; case 3: bpy = pow(etay,3) - 2 * pow(etay,2) + etay; break; case 4: bpy = pow(etay,3) - pow(etay,2); break; } double result = bpx * bpy; return result; } __device__ double gradbpy(double x, double y, int alphax, int alphay, int vx, int vy, double dx, double dy, double xo, double yo){ double bpx = 0.0, bpy = 0.0; double etax = (x - xo)/dx; double etay = (y - yo)/dy; switch(2 * alphax + vx + 1){ // Switching between base polynomials for x case 1: bpx = 1 - 3 * pow(etax,2) + 2 * pow(etax,3); break; case 2: bpx = -2 * pow(etax,3) + 3 * pow(etax,2); break; case 3: bpx = pow(etax,3) - 2 * pow(etax,2) + etax; break; case 4: bpx = pow(etax,3) - pow(etax,2); break; } switch(2 * alphay + vy + 1){ // Switching between base polynomials for y case 1: bpy = - 6 * etay + 6 * pow(etay,2); break; case 2: bpy = -6 * pow(etay,2) + 6 * etay; break; case 3: bpy = 3 * pow(etay,2) - 4 * etay + 1; break; case 4: bpy = 3 * pow(etay,2) - 2 * etay; break; } double result = bpx * bpy; return result; } __device__ double hp(double phi[4], double psix[4], double psiy[4], double psixy[4], double x, double y, double xo, double yo, double dx, double dy){ double H = 0, delta[4], d; int alphax, alphay, vx, vy; //double bp; //Base Polynomial Section for(int i = 0; i < 4; i++){ switch (i){ case 0:{ alphax = 0; alphay = 0; memcpy(delta, phi, sizeof(delta)); break; } case 1:{ alphax = 0; alphay = 1; memcpy(delta, psiy, sizeof(delta)); break; } case 2:{ alphax = 1; alphay = 0; memcpy(delta, psix, sizeof(delta)); break; } case 3:{ alphax = 1; alphay = 1; memcpy(delta, psixy, sizeof(delta)); break; } } for(int j = 0; j < 4; j++){ switch (j){ case 0:{ vx = 0; vy = 0; d = delta[0]; break; } case 1:{ vx = 0; vy = 1; d = delta[2]; break; } case 2:{ vx = 1; vy = 0; d = delta[1]; break; } case 3:{ vx = 1; vy = 1; d = delta[3]; break; } } //bp = basepolynomial(x, y, alphax, alphay, vx, vy, dx, dy, xo, yo); H = H + pow(dx, alphax) * pow(dy, alphay) * basepolynomial(x, y, alphax, alphay, vx, vy, dx, dy, xo, yo) * d; } } return H; } __device__ double hermx(double* phi, double* psix, double* psiy, double* psixy, double xadv, double yadv, double xo, double yo, double dx, double dy){ double gradientx = 0, delta[4], d; int alphax, alphay, vx, vy; //double bp; //Base Polynomial Section for(int i = 0; i < 4; i++){ switch (i){ case 0:{ alphax = 0; alphay = 0; memcpy(delta, phi, sizeof(delta)); break; } case 1:{ alphax = 0; alphay = 1; memcpy(delta, psiy, sizeof(delta)); break; } case 2:{ alphax = 1; alphay = 0; memcpy(delta, psix, sizeof(delta)); break; } case 3:{ alphax = 1; alphay = 1; memcpy(delta, psixy, sizeof(delta)); break; } } for(int j = 0; j < 4; j++){ switch (j){ case 0:{ vx = 0; vy = 0; d = delta[0]; break; } case 1:{ vx = 0; vy = 1; d = delta[2]; break; } case 2:{ vx = 1; vy = 0; d = delta[1]; break; } case 3:{ vx = 1; vy = 1; d = delta[3]; break; } } //bp = gradbpx(xadv, yadv, alphax, alphay, vx, vy, dx, dy, xo, yo); gradientx = gradientx + pow(dx, alphax) * pow(dy, alphay) * gradbpx(xadv, yadv, alphax, alphay, vx, vy, dx, dy, xo, yo) * d * (1/dx); } } return gradientx; } __device__ double hermy(double* phi, double* psix, double* psiy, double* psixy, double xadv, double yadv, double xo, double yo, double dx, double dy){ double gradienty = 0, d, delta[4]; int alphax, alphay, vx, vy; //double bp; //Base Polynomial Section for(int i = 0; i < 4; i++){ switch (i){ case 0:{ alphax = 0; alphay = 0; memcpy(delta, phi, sizeof(delta)); break; } case 1:{ alphax = 0; alphay = 1; memcpy(delta, psiy, sizeof(delta)); break; } case 2:{ alphax = 1; alphay = 0; memcpy(delta, psix, sizeof(delta)); break; } case 3:{ alphax = 1; alphay = 1; memcpy(delta, psixy, sizeof(delta)); break; } } for(int j = 0; j < 4; j++){ switch (j){ case 0:{ vx = 0; vy = 0; d = delta[0]; break; } case 1:{ vx = 0; vy = 1; d = delta[2]; break; } case 2:{ vx = 1; vy = 0; d = delta[1]; break; } case 3:{ vx = 1; vy = 1; d = delta[3]; break; } } //bp = gradbpy(xadv, yadv, alphax, alphay, vx, vy, dx, dy, xo, yo); gradienty = gradienty + pow(dx, alphax) * pow(dy, alphay) * gradbpy(xadv, yadv, alphax, alphay, vx, vy, dx, dy, xo, yo) * d * (1/dy); } } return gradienty; } __device__ double hp1D(double phi[2], double psix[2], double x, double xo, double dx){ double H = 0; double etax = (x - xo)/dx; H = (1 - 3*pow(etax,2) + 2 * pow(etax,3)) * phi[0] + (3 * pow(etax,2) - 2 * pow(etax,3)) * phi[1] + (etax + pow(etax,3) - 2 * pow(etax,2)) * psix[0] + (pow(etax,3) - pow(etax,2)) * psix[1]; return H; } #endif
e515b6cdd93fac75f99b2d22cfe8c46f81f70a7d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <optix_world.h> RT_CALLABLE_PROGRAM float3 scale_color(float3 input_color, float multiplier) { return multiplier * input_color; } // Stubs only needed for sm_1x #if __CUDA_ARCH__ < 200 __global__ void scale_color_stub() { (void) scale_color( make_float3(0,0,0), 0 ); } #endif rtDeclareVariable(uint2, launch_index, rtLaunchIndex, ); rtDeclareVariable(uint2, launch_dim, rtLaunchDim, ); rtDeclareVariable(float, scale,,); RT_CALLABLE_PROGRAM float3 checker_color(float3 input_color, float multiplier) { uint2 tile_size = make_uint2(launch_dim.x / 5, launch_dim.y / 5); if ((launch_index.x/tile_size.x + launch_index.y/tile_size.y) % 2 == 0) return input_color * multiplier; else return input_color * scale; } // Stubs only needed for sm_1x #if __CUDA_ARCH__ < 200 __global__ void checker_color_stub() { (void) checker_color( make_float3(0,0,0), 0 ); } #endif RT_CALLABLE_PROGRAM float3 wavey_color(float3 input_color, float multiplier) { uint2 tile_size = make_uint2(launch_dim.x / 5, launch_dim.y / 5); if (((int)(launch_index.x+10*sinf(launch_index.y/10.f))/tile_size.x + launch_index.y/tile_size.y) % 2 == 0) return input_color * multiplier; else return input_color * scale; } // Stubs only needed for sm_1x #if __CUDA_ARCH__ < 200 __global__ void wavey_color_stub() { (void) wavey_color( make_float3(0,0,0), 0 ); } #endif RT_CALLABLE_PROGRAM float3 return_same_color(float3 input_color) { return input_color; } // Stubs only needed for sm_1x #if __CUDA_ARCH__ < 200 __global__ void return_same_color_stub() { (void) return_same_color( make_float3(0,0,0) ); } #endif
e515b6cdd93fac75f99b2d22cfe8c46f81f70a7d.cu
#include <optix_world.h> RT_CALLABLE_PROGRAM float3 scale_color(float3 input_color, float multiplier) { return multiplier * input_color; } // Stubs only needed for sm_1x #if __CUDA_ARCH__ < 200 __global__ void scale_color_stub() { (void) scale_color( make_float3(0,0,0), 0 ); } #endif rtDeclareVariable(uint2, launch_index, rtLaunchIndex, ); rtDeclareVariable(uint2, launch_dim, rtLaunchDim, ); rtDeclareVariable(float, scale,,); RT_CALLABLE_PROGRAM float3 checker_color(float3 input_color, float multiplier) { uint2 tile_size = make_uint2(launch_dim.x / 5, launch_dim.y / 5); if ((launch_index.x/tile_size.x + launch_index.y/tile_size.y) % 2 == 0) return input_color * multiplier; else return input_color * scale; } // Stubs only needed for sm_1x #if __CUDA_ARCH__ < 200 __global__ void checker_color_stub() { (void) checker_color( make_float3(0,0,0), 0 ); } #endif RT_CALLABLE_PROGRAM float3 wavey_color(float3 input_color, float multiplier) { uint2 tile_size = make_uint2(launch_dim.x / 5, launch_dim.y / 5); if (((int)(launch_index.x+10*sinf(launch_index.y/10.f))/tile_size.x + launch_index.y/tile_size.y) % 2 == 0) return input_color * multiplier; else return input_color * scale; } // Stubs only needed for sm_1x #if __CUDA_ARCH__ < 200 __global__ void wavey_color_stub() { (void) wavey_color( make_float3(0,0,0), 0 ); } #endif RT_CALLABLE_PROGRAM float3 return_same_color(float3 input_color) { return input_color; } // Stubs only needed for sm_1x #if __CUDA_ARCH__ < 200 __global__ void return_same_color_stub() { (void) return_same_color( make_float3(0,0,0) ); } #endif
da32b74b07b7fd820f7dc13179d020d5d51bd035.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" // C++ 17 Includes: // Project Includes: // Defines: __global__ void sum_dynamic_kernel(const int* pIn, int* pOut, size_t numInts) { extern __shared__ int ps[]; // Automatically points to our shared memory array // Load shared memory: ps[threadIdx.x] = pIn[threadIdx.x]; if (threadIdx.x + blockDim.x < numInts) ps[threadIdx.x + blockDim.x] = pIn[threadIdx.x + blockDim.x]; if (0 == threadIdx.x && 1 == (1 & numInts)) ps[numInts - 1] = pIn[numInts - 1]; size_t prevNumThreads{numInts}; for (size_t numThreads{blockDim.x}; numThreads > 0; numThreads >>= 1) { if (threadIdx.x > numThreads) return; __syncthreads(); ps[threadIdx.x] += ps[threadIdx.x + numThreads]; if (1 == (prevNumThreads & 1)) ps[0] += ps[prevNumThreads - 1]; prevNumThreads = numThreads; } *pOut = ps[0]; }
da32b74b07b7fd820f7dc13179d020d5d51bd035.cu
#include "includes.h" // C++ 17 Includes: // Project Includes: // Defines: __global__ void sum_dynamic_kernel(const int* pIn, int* pOut, size_t numInts) { extern __shared__ int ps[]; // Automatically points to our shared memory array // Load shared memory: ps[threadIdx.x] = pIn[threadIdx.x]; if (threadIdx.x + blockDim.x < numInts) ps[threadIdx.x + blockDim.x] = pIn[threadIdx.x + blockDim.x]; if (0 == threadIdx.x && 1 == (1 & numInts)) ps[numInts - 1] = pIn[numInts - 1]; size_t prevNumThreads{numInts}; for (size_t numThreads{blockDim.x}; numThreads > 0; numThreads >>= 1) { if (threadIdx.x > numThreads) return; __syncthreads(); ps[threadIdx.x] += ps[threadIdx.x + numThreads]; if (1 == (prevNumThreads & 1)) ps[0] += ps[prevNumThreads - 1]; prevNumThreads = numThreads; } *pOut = ps[0]; }
878cf5da956b3c2e30d8452be1115fc9c00be7bc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <THH/THH.h> #include "sparse_kernel.h" extern THCState *state; __global__ void spmv_backward_matrix_kernel( const int* p_cooRow, const int* p_csrCol, const float* p_vector, const float* p_grad_output, float* p_grad_matrix, const int rows, const int cols, const int nnz) { const int64_t idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx < nnz) { int row = p_cooRow[idx]; int col = p_csrCol[idx]; p_grad_matrix[idx] = p_grad_output[row]*p_vector[col]; } } void spmv_backward_matrix_cuda( const int* p_cooRow, const int* p_csrCol, const float* p_vector, const float* p_grad_output, float* p_grad_matrix, const int rows, const int cols, const int nnz) { const int64_t block_sz = 512; const int64_t nblocks = (nnz + block_sz - 1) / block_sz; hipLaunchKernelGGL(( spmv_backward_matrix_kernel), dim3(nblocks), dim3(block_sz), 0, THCState_getCurrentStream(state), p_cooRow, p_csrCol, p_vector, p_grad_output, p_grad_matrix, rows, cols, nnz); THCudaCheck(hipPeekAtLastError()); } __global__ void spadd_backward_kernel( const int* p_csr_rowA, const int* p_csr_colA, float* p_gradA, const int nnzA, const int* p_csr_rowB, const int* p_csr_colB, float* p_gradB, const int nnzB, const int* p_coo_rowC, const int* p_csr_colC, const float* p_gradC, const int nnzC, const float alpha, const float beta, const int rows, const int cols) { const int64_t idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx < nnzC) { int row = p_coo_rowC[idx]; int col = p_csr_colC[idx]; int ptrA = p_csr_rowA[row]; int endA = p_csr_rowA[row+1]; while(ptrA < endA && p_csr_colA[ptrA] < col) { ++ptrA; } if (ptrA < endA && p_csr_colA[ptrA] == col) { // update gradient p_gradA[ptrA] = p_gradC[idx]; } int ptrB = p_csr_rowB[row]; int endB = p_csr_rowB[row+1]; while(ptrB < endB && p_csr_colB[ptrB] < col) { ++ptrB; } if (ptrB < endB && p_csr_colB[ptrB] == col) { // update gradient p_gradB[ptrB] = p_gradC[idx]; } } } void spadd_backward_cuda( const int* p_csr_rowA, const int* p_csr_colA, float* p_gradA, const int nnzA, const int* p_csr_rowB, const int* p_csr_colB, float* p_gradB, const int nnzB, const int* p_coo_rowC, const int* p_csr_colC, const float* p_gradC, const int nnzC, const float alpha, const float beta, const int rows, const int cols) { const int64_t block_sz = 512; const int64_t nblocks = (nnzC + block_sz - 1) / block_sz; hipLaunchKernelGGL(( spadd_backward_kernel), dim3(nblocks), dim3(block_sz), 0, THCState_getCurrentStream(state), p_csr_rowA, p_csr_colA, p_gradA, nnzA, p_csr_rowB, p_csr_colB, p_gradB, nnzB, p_coo_rowC, p_csr_colC, p_gradC, nnzC, alpha, beta, rows, cols); THCudaCheck(hipPeekAtLastError()); } __global__ void matmul_preserve_sparsity_kernel( const int* p_csr_row1, const int* p_csr_col1, const float* p_data1, const int* p_csr_row2, const int* p_csr_col2, const float* p_data2, const int* p_coo_row_out, const int* p_coo_col_out, float* p_out, const int nnz_out) { const int64_t idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx < nnz_out) { int row = p_coo_row_out[idx]; int col = p_coo_col_out[idx]; int ptr1 = p_csr_row1[row]; int ptr2 = p_csr_row2[col]; int end1 = p_csr_row1[row+1]; int end2 = p_csr_row2[col+1]; float sum = 0.0f; while(ptr1 < end1 && ptr2 < end2) { int col1 = p_csr_col1[ptr1]; int col2 = p_csr_col2[ptr2]; if( col1 == col2 ) { sum += p_data1[ptr1]*p_data2[ptr2]; ++ptr1; ++ptr2; } else if (col1 < col2){ ++ptr1; } else { ++ptr2; } } p_out[idx] = sum; } } /** * in1.in2(T) */ void matmul_preserve_sparsity_cuda( const int* p_csr_row1, const int* p_csr_col1, const float* p_data1, const int* p_csr_row2, const int* p_csr_col2, const float* p_data2, const int* p_coo_row_out, const int* p_coo_col_out, float* p_out, const int nnz_out) { const int64_t block_sz = 512; const int64_t nblocks = (nnz_out + block_sz - 1) / block_sz; hipLaunchKernelGGL(( matmul_preserve_sparsity_kernel), dim3(nblocks), dim3(block_sz), 0, THCState_getCurrentStream(state), p_csr_row1, p_csr_col1, p_data1, p_csr_row2, p_csr_col2, p_data2, p_coo_row_out, p_coo_col_out, p_out, nnz_out); THCudaCheck(hipPeekAtLastError()); }
878cf5da956b3c2e30d8452be1115fc9c00be7bc.cu
#include <THC/THC.h> #include "sparse_kernel.h" extern THCState *state; __global__ void spmv_backward_matrix_kernel( const int* p_cooRow, const int* p_csrCol, const float* p_vector, const float* p_grad_output, float* p_grad_matrix, const int rows, const int cols, const int nnz) { const int64_t idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx < nnz) { int row = p_cooRow[idx]; int col = p_csrCol[idx]; p_grad_matrix[idx] = p_grad_output[row]*p_vector[col]; } } void spmv_backward_matrix_cuda( const int* p_cooRow, const int* p_csrCol, const float* p_vector, const float* p_grad_output, float* p_grad_matrix, const int rows, const int cols, const int nnz) { const int64_t block_sz = 512; const int64_t nblocks = (nnz + block_sz - 1) / block_sz; spmv_backward_matrix_kernel<<<nblocks, block_sz, 0, THCState_getCurrentStream(state)>>>( p_cooRow, p_csrCol, p_vector, p_grad_output, p_grad_matrix, rows, cols, nnz); THCudaCheck(cudaPeekAtLastError()); } __global__ void spadd_backward_kernel( const int* p_csr_rowA, const int* p_csr_colA, float* p_gradA, const int nnzA, const int* p_csr_rowB, const int* p_csr_colB, float* p_gradB, const int nnzB, const int* p_coo_rowC, const int* p_csr_colC, const float* p_gradC, const int nnzC, const float alpha, const float beta, const int rows, const int cols) { const int64_t idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx < nnzC) { int row = p_coo_rowC[idx]; int col = p_csr_colC[idx]; int ptrA = p_csr_rowA[row]; int endA = p_csr_rowA[row+1]; while(ptrA < endA && p_csr_colA[ptrA] < col) { ++ptrA; } if (ptrA < endA && p_csr_colA[ptrA] == col) { // update gradient p_gradA[ptrA] = p_gradC[idx]; } int ptrB = p_csr_rowB[row]; int endB = p_csr_rowB[row+1]; while(ptrB < endB && p_csr_colB[ptrB] < col) { ++ptrB; } if (ptrB < endB && p_csr_colB[ptrB] == col) { // update gradient p_gradB[ptrB] = p_gradC[idx]; } } } void spadd_backward_cuda( const int* p_csr_rowA, const int* p_csr_colA, float* p_gradA, const int nnzA, const int* p_csr_rowB, const int* p_csr_colB, float* p_gradB, const int nnzB, const int* p_coo_rowC, const int* p_csr_colC, const float* p_gradC, const int nnzC, const float alpha, const float beta, const int rows, const int cols) { const int64_t block_sz = 512; const int64_t nblocks = (nnzC + block_sz - 1) / block_sz; spadd_backward_kernel<<<nblocks, block_sz, 0, THCState_getCurrentStream(state)>>>( p_csr_rowA, p_csr_colA, p_gradA, nnzA, p_csr_rowB, p_csr_colB, p_gradB, nnzB, p_coo_rowC, p_csr_colC, p_gradC, nnzC, alpha, beta, rows, cols); THCudaCheck(cudaPeekAtLastError()); } __global__ void matmul_preserve_sparsity_kernel( const int* p_csr_row1, const int* p_csr_col1, const float* p_data1, const int* p_csr_row2, const int* p_csr_col2, const float* p_data2, const int* p_coo_row_out, const int* p_coo_col_out, float* p_out, const int nnz_out) { const int64_t idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx < nnz_out) { int row = p_coo_row_out[idx]; int col = p_coo_col_out[idx]; int ptr1 = p_csr_row1[row]; int ptr2 = p_csr_row2[col]; int end1 = p_csr_row1[row+1]; int end2 = p_csr_row2[col+1]; float sum = 0.0f; while(ptr1 < end1 && ptr2 < end2) { int col1 = p_csr_col1[ptr1]; int col2 = p_csr_col2[ptr2]; if( col1 == col2 ) { sum += p_data1[ptr1]*p_data2[ptr2]; ++ptr1; ++ptr2; } else if (col1 < col2){ ++ptr1; } else { ++ptr2; } } p_out[idx] = sum; } } /** * in1.in2(T) */ void matmul_preserve_sparsity_cuda( const int* p_csr_row1, const int* p_csr_col1, const float* p_data1, const int* p_csr_row2, const int* p_csr_col2, const float* p_data2, const int* p_coo_row_out, const int* p_coo_col_out, float* p_out, const int nnz_out) { const int64_t block_sz = 512; const int64_t nblocks = (nnz_out + block_sz - 1) / block_sz; matmul_preserve_sparsity_kernel<<<nblocks, block_sz, 0, THCState_getCurrentStream(state)>>>( p_csr_row1, p_csr_col1, p_data1, p_csr_row2, p_csr_col2, p_data2, p_coo_row_out, p_coo_col_out, p_out, nnz_out); THCudaCheck(cudaPeekAtLastError()); }
4b10bf783be2bf2735e5d8813668dd80897ec2a7.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "copyRow.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *in = NULL; hipMalloc(&in, XSIZE*YSIZE); int *out = NULL; hipMalloc(&out, XSIZE*YSIZE); const int nx = 1; const int ny = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( copyRow), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out,nx,ny); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( copyRow), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out,nx,ny); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( copyRow), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out,nx,ny); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
4b10bf783be2bf2735e5d8813668dd80897ec2a7.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "copyRow.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *in = NULL; cudaMalloc(&in, XSIZE*YSIZE); int *out = NULL; cudaMalloc(&out, XSIZE*YSIZE); const int nx = 1; const int ny = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); copyRow<<<gridBlock,threadBlock>>>(in,out,nx,ny); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { copyRow<<<gridBlock,threadBlock>>>(in,out,nx,ny); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { copyRow<<<gridBlock,threadBlock>>>(in,out,nx,ny); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
f0a13c8103d85becaaf1d207e8f99aeef38642d8.hip
// !!! This is a file automatically generated by hipify!!! #include "generator.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include "log.h" #include "chunk.h" #include "simplex.cuh" #include "generator_config.h" #include "generator_cpu.h" static uint8_t* gpuChunkBlocks; static uint32_t* gpuTerrainHeightMap; __constant__ uint8_t terrainConst[CONFIG_TERRAIN_HEIGHT]; __device__ int generate_terrain_height(int worldX, int worldZ) { float simplexValue = repeaterSimplex(make_float3(static_cast<float>(worldX) / 64.0f, 0.0f, static_cast<float>(worldZ) / 64.0f), 1.0f, CONFIG_SEED_BASE, 3, 3.0f, 0.25f); simplexValue = ((simplexValue + 1.0f) / 2.0f); return (int)floorf(simplexValue * CONFIG_TERRAIN_HEIGHT) + 1; } __device__ uint32_t hashInt3(int x, int y, int z) { return (x * 607495) + (y * 359609) + (z * 654846); } __global__ void kernel_generator_fillChunk_nonOpt_dim3(uint8_t* chunkData, int3 worldPosition) { int idx = threadIdx.x + blockDim.x * blockIdx.x; int idy = threadIdx.y + blockDim.y * blockIdx.y; int idz = threadIdx.z + blockDim.z * blockIdx.z; int worldX = idx + worldPosition.x; int worldY = idy + worldPosition.y; int worldZ = idz + worldPosition.z; int height = generate_terrain_height(worldX, worldZ); uint8_t block = 0; if (worldY == height) block = 3; else if (worldY < height && worldY > height - 3) block = 2; else if (worldY < height) block = 1; chunkData[CHUNK_OFFSET(idx, idy, idz)] = block; } __global__ void kernel_generator_fillChunk_dim3(uint8_t* chunkData, int3 worldPosition) { __shared__ int terrainHeight[CHUNK_SIZE * CHUNK_SIZE]; int idx = threadIdx.x + blockDim.x * blockIdx.x; int idy = threadIdx.y + blockDim.y * blockIdx.y; int idz = threadIdx.z + blockDim.z * blockIdx.z; terrainHeight[idx + CHUNK_SIZE * idz] = 0; __syncthreads(); int worldX = idx + worldPosition.x; int worldY = idy + worldPosition.y; int worldZ = idz + worldPosition.z; int height = 0; if (terrainHeight[idx + CHUNK_SIZE * idz] == 0) { terrainHeight[idx + CHUNK_SIZE * idz] = height = generate_terrain_height(worldX, worldZ); } else { height = terrainHeight[idx + CHUNK_SIZE * idz]; } if (worldY > height) { return; } float cave = repeaterSimplex(make_float3(static_cast<float>(worldX) / 64.0f, static_cast<float>(worldY) / 64.0f, static_cast<float>(worldZ) / 64.0f), 1.0f, CONFIG_SEED_BASE, 3, 3.0f, 0.25f); cave = ((cave + 1.0f) / 2.0f); if (cave >= CONFIG_CAVE_THRESHOLD) { return; } int offset = height - worldY; chunkData[CHUNK_OFFSET(idx, idy, idz)] = terrainConst[offset]; } __global__ void kernel_generator_fillChunk_memHeightmap_dim3(uint8_t* chunkData, int3 worldPosition, uint32_t* terrainHeight) { int idx = threadIdx.x + blockDim.x * blockIdx.x; int idy = threadIdx.y + blockDim.y * blockIdx.y; int idz = threadIdx.z + blockDim.z * blockIdx.z; int worldX = idx + worldPosition.x; int worldY = idy + worldPosition.y; int worldZ = idz + worldPosition.z; int height = 0; if (terrainHeight[idx + CHUNK_SIZE * idz] == 0) { terrainHeight[idx + CHUNK_SIZE * idz] = height = generate_terrain_height(worldX, worldZ); } else { height = terrainHeight[idx + CHUNK_SIZE * idz]; } if (worldY > height) { return; } float cave = repeaterSimplex(make_float3(static_cast<float>(worldX) / 64.0f, static_cast<float>(worldY) / 64.0f, static_cast<float>(worldZ) / 64.0f), 1.0f, CONFIG_SEED_BASE, 3, 3.0f, 0.25f); cave = ((cave + 1.0f) / 2.0f); if (cave >= CONFIG_CAVE_THRESHOLD) { return; } int offset = height - worldY; chunkData[CHUNK_OFFSET(idx, idy, idz)] = terrainConst[offset]; } #define O BLOCK_LOG #define L BLOCK_LEAVES #define TREE_TEMPLATE_SIZE 5, 7, 5 uint8_t* gpuTreeTemplate; uint8_t treeTemplate[] = { // 0 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, O, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 1 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, O, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 2 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, O, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 3 L, L, L, L, L, L, L, L, L, L, L, L, O, L, L, L, L, L, L, L, L, L, L, L, L, // 4 L, L, L, L, L, L, L, L, L, L, L, L, O, L, L, L, L, L, L, L, L, L, L, L, L, // 5 0, 0, 0, 0, 0, 0, 0, L, 0, 0, 0, L, O, L, 0, 0, 0, L, 0, 0, 0, 0, 0, 0, 0, // 6 0, 0, 0, 0, 0, 0, 0, L, 0, 0, 0, L, L, L, 0, 0, 0, L, 0, 0, 0, 0, 0, 0, 0, }; __global__ void kernel_decorator_trees(uint8_t* chunkData, int3 worldPosition, uint8_t* templateData, int3 templateSize) { hiprandState_t randBase; int idx = threadIdx.x + blockDim.x * blockIdx.x; uint32_t seed = hashInt3(worldPosition.x, CONFIG_SEED_BASE, worldPosition.z); hiprand_init(seed, idx, 0, &randBase); int testX = clamp((int)floorf(hiprand_uniform(&randBase) * (CHUNK_SIZE - 4)) + 2, 0, CHUNK_SIZE - 1); int testZ = clamp((int)floorf(hiprand_uniform(&randBase) * (CHUNK_SIZE - 4)) + 2, 0, CHUNK_SIZE - 1); int height = generate_terrain_height(testX + worldPosition.x, testZ + worldPosition.z); int treeBottom = height + 1; int treeTop = treeBottom + templateSize.y; if ((treeTop < worldPosition.y && treeBottom < worldPosition.y) || (treeTop >= worldPosition.y + CHUNK_SIZE && treeBottom >= worldPosition.y + CHUNK_SIZE)) { return; } int treeX = testX - 2; int treeZ = testZ - 2; for (int templateY = 0; templateY < templateSize.y; templateY++) { int blockY = treeBottom + templateY - worldPosition.y; if (blockY < 0) continue; if (blockY >= CHUNK_SIZE) break; for (int templateX = 0; templateX < templateSize.x; templateX++) { for (int templateZ = 0; templateZ < templateSize.z; templateZ++) { uint8_t templateBlock = templateData[templateX + templateZ * templateSize.x + templateY * templateSize.x * templateSize.z]; if (templateBlock == 0) { continue; } chunkData[CHUNK_OFFSET(treeX + templateX, blockY, treeZ + templateZ)] = templateBlock; } } } } void cuda_generate_init() { cpu_generate_init(); CUDA_CHECK(hipSetDevice(0)); CUDA_CHECK(hipMalloc((void**)&gpuChunkBlocks, CHUNK_BLOCKS)); CUDA_CHECK(hipMalloc((void**)&gpuTreeTemplate, sizeof(treeTemplate))); CUDA_CHECK(hipMemcpy(gpuTreeTemplate, treeTemplate, sizeof(treeTemplate), hipMemcpyKind::hipMemcpyHostToDevice)); // Konstans memria uint8_t terrainData[CONFIG_TERRAIN_HEIGHT] = { 0 }; for (int x = 0; x < CONFIG_TERRAIN_HEIGHT; x++) { if (x == 0) { terrainData[x] = BLOCK_GRASS; } else if (x < CONFIG_DIRT_HEIGHT + 1) { terrainData[x] = BLOCK_DIRT; } else { terrainData[x] = BLOCK_STONE; } } CUDA_CHECK(hipMemcpyToSymbol(terrainConst, terrainData, sizeof(uint8_t) * CONFIG_TERRAIN_HEIGHT, 0, hipMemcpyKind::hipMemcpyHostToDevice)); // Magassg trkp CUDA_CHECK(hipMalloc((void**)&gpuTerrainHeightMap, CHUNK_SIZE * CHUNK_SIZE * sizeof(uint32_t))); } void cuda_generate_clean() { cpu_generate_clean(); CUDA_CHECK(hipFree(gpuChunkBlocks)); CUDA_CHECK(hipFree(gpuTreeTemplate)); } // Els: ~18ms // Tbbi tlag: 0.9ms void cuda_generate_chunk(Chunk* chunk) { #ifndef GEN_ON_GPU cpu_generate_chunk(chunk); return; #endif CUDA_CHECK(hipMemset(gpuChunkBlocks, 0, CHUNK_BLOCKS)); CUDA_CHECK(hipMemset(gpuTerrainHeightMap, 0, CHUNK_SIZE * CHUNK_SIZE * sizeof(uint32_t))); int3 chunkPos = chunk->getChunkPosition(); vec3 worldPosition = vec3(chunkPos.x, chunkPos.y, chunkPos.z) * (float)CHUNK_SIZE; dim3 block(4, 4, 4); dim3 grid(4, 4, 4); // Time hipEvent_t start, stop; CUDA_CHECK(hipEventCreate(&start)); CUDA_CHECK(hipEventRecord(start, 0)); int3 worldBlockPos = make_int3((int)worldPosition.x, (int)worldPosition.y, (int)worldPosition.z); // Eredeti CUDA: #ifdef GEN_NAIV kernel_generator_fillChunk_nonOpt_dim3 << <grid, block >> > (gpuChunkBlocks, worldBlockPos); #endif // Shared memria CUDA: #ifdef GEN_CONST_MEM_SHARED_MEM kernel_generator_fillChunk_dim3 << <grid, block >> > (gpuChunkBlocks, worldBlockPos); #endif // GPU memria magassg trkp: #ifdef GEN_HEIGHTMAP kernel_generator_fillChunk_memHeightmap_dim3 << <grid, block >> > (gpuChunkBlocks, worldBlockPos, gpuTerrainHeightMap); #endif // Dekorls (tl optimalizlatlan) /*if (CONFIG_NUM_TREES > 0) { kernel_decorator_trees << <1, CONFIG_NUM_TREES >> > (gpuChunkBlocks, worldBlockPos, gpuTreeTemplate, make_int3(TREE_TEMPLATE_SIZE)); }*/ // Time CUDA_CHECK(hipEventCreate(&stop)); CUDA_CHECK(hipEventRecord(stop, 0)); CUDA_CHECK(hipDeviceSynchronize()); CUDA_CHECK(hipEventSynchronize(stop)); float elapsed; CUDA_CHECK(hipEventElapsedTime(&elapsed, start, stop)); printf("Generate time: %f ms\n", elapsed); CUDA_CHECK(hipMemcpy(chunk->blocks, gpuChunkBlocks, CHUNK_BLOCKS, hipMemcpyKind::hipMemcpyDeviceToHost)); CUDA_CHECK(hipDeviceSynchronize()); }
f0a13c8103d85becaaf1d207e8f99aeef38642d8.cu
#include "generator.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <curand.h> #include <curand_kernel.h> #include "log.h" #include "chunk.h" #include "simplex.cuh" #include "generator_config.h" #include "generator_cpu.h" static uint8_t* gpuChunkBlocks; static uint32_t* gpuTerrainHeightMap; __constant__ uint8_t terrainConst[CONFIG_TERRAIN_HEIGHT]; __device__ int generate_terrain_height(int worldX, int worldZ) { float simplexValue = repeaterSimplex(make_float3(static_cast<float>(worldX) / 64.0f, 0.0f, static_cast<float>(worldZ) / 64.0f), 1.0f, CONFIG_SEED_BASE, 3, 3.0f, 0.25f); simplexValue = ((simplexValue + 1.0f) / 2.0f); return (int)floorf(simplexValue * CONFIG_TERRAIN_HEIGHT) + 1; } __device__ uint32_t hashInt3(int x, int y, int z) { return (x * 607495) + (y * 359609) + (z * 654846); } __global__ void kernel_generator_fillChunk_nonOpt_dim3(uint8_t* chunkData, int3 worldPosition) { int idx = threadIdx.x + blockDim.x * blockIdx.x; int idy = threadIdx.y + blockDim.y * blockIdx.y; int idz = threadIdx.z + blockDim.z * blockIdx.z; int worldX = idx + worldPosition.x; int worldY = idy + worldPosition.y; int worldZ = idz + worldPosition.z; int height = generate_terrain_height(worldX, worldZ); uint8_t block = 0; if (worldY == height) block = 3; else if (worldY < height && worldY > height - 3) block = 2; else if (worldY < height) block = 1; chunkData[CHUNK_OFFSET(idx, idy, idz)] = block; } __global__ void kernel_generator_fillChunk_dim3(uint8_t* chunkData, int3 worldPosition) { __shared__ int terrainHeight[CHUNK_SIZE * CHUNK_SIZE]; int idx = threadIdx.x + blockDim.x * blockIdx.x; int idy = threadIdx.y + blockDim.y * blockIdx.y; int idz = threadIdx.z + blockDim.z * blockIdx.z; terrainHeight[idx + CHUNK_SIZE * idz] = 0; __syncthreads(); int worldX = idx + worldPosition.x; int worldY = idy + worldPosition.y; int worldZ = idz + worldPosition.z; int height = 0; if (terrainHeight[idx + CHUNK_SIZE * idz] == 0) { terrainHeight[idx + CHUNK_SIZE * idz] = height = generate_terrain_height(worldX, worldZ); } else { height = terrainHeight[idx + CHUNK_SIZE * idz]; } if (worldY > height) { return; } float cave = repeaterSimplex(make_float3(static_cast<float>(worldX) / 64.0f, static_cast<float>(worldY) / 64.0f, static_cast<float>(worldZ) / 64.0f), 1.0f, CONFIG_SEED_BASE, 3, 3.0f, 0.25f); cave = ((cave + 1.0f) / 2.0f); if (cave >= CONFIG_CAVE_THRESHOLD) { return; } int offset = height - worldY; chunkData[CHUNK_OFFSET(idx, idy, idz)] = terrainConst[offset]; } __global__ void kernel_generator_fillChunk_memHeightmap_dim3(uint8_t* chunkData, int3 worldPosition, uint32_t* terrainHeight) { int idx = threadIdx.x + blockDim.x * blockIdx.x; int idy = threadIdx.y + blockDim.y * blockIdx.y; int idz = threadIdx.z + blockDim.z * blockIdx.z; int worldX = idx + worldPosition.x; int worldY = idy + worldPosition.y; int worldZ = idz + worldPosition.z; int height = 0; if (terrainHeight[idx + CHUNK_SIZE * idz] == 0) { terrainHeight[idx + CHUNK_SIZE * idz] = height = generate_terrain_height(worldX, worldZ); } else { height = terrainHeight[idx + CHUNK_SIZE * idz]; } if (worldY > height) { return; } float cave = repeaterSimplex(make_float3(static_cast<float>(worldX) / 64.0f, static_cast<float>(worldY) / 64.0f, static_cast<float>(worldZ) / 64.0f), 1.0f, CONFIG_SEED_BASE, 3, 3.0f, 0.25f); cave = ((cave + 1.0f) / 2.0f); if (cave >= CONFIG_CAVE_THRESHOLD) { return; } int offset = height - worldY; chunkData[CHUNK_OFFSET(idx, idy, idz)] = terrainConst[offset]; } #define O BLOCK_LOG #define L BLOCK_LEAVES #define TREE_TEMPLATE_SIZE 5, 7, 5 uint8_t* gpuTreeTemplate; uint8_t treeTemplate[] = { // 0 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, O, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 1 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, O, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 2 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, O, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 3 L, L, L, L, L, L, L, L, L, L, L, L, O, L, L, L, L, L, L, L, L, L, L, L, L, // 4 L, L, L, L, L, L, L, L, L, L, L, L, O, L, L, L, L, L, L, L, L, L, L, L, L, // 5 0, 0, 0, 0, 0, 0, 0, L, 0, 0, 0, L, O, L, 0, 0, 0, L, 0, 0, 0, 0, 0, 0, 0, // 6 0, 0, 0, 0, 0, 0, 0, L, 0, 0, 0, L, L, L, 0, 0, 0, L, 0, 0, 0, 0, 0, 0, 0, }; __global__ void kernel_decorator_trees(uint8_t* chunkData, int3 worldPosition, uint8_t* templateData, int3 templateSize) { curandState randBase; int idx = threadIdx.x + blockDim.x * blockIdx.x; uint32_t seed = hashInt3(worldPosition.x, CONFIG_SEED_BASE, worldPosition.z); curand_init(seed, idx, 0, &randBase); int testX = clamp((int)floorf(curand_uniform(&randBase) * (CHUNK_SIZE - 4)) + 2, 0, CHUNK_SIZE - 1); int testZ = clamp((int)floorf(curand_uniform(&randBase) * (CHUNK_SIZE - 4)) + 2, 0, CHUNK_SIZE - 1); int height = generate_terrain_height(testX + worldPosition.x, testZ + worldPosition.z); int treeBottom = height + 1; int treeTop = treeBottom + templateSize.y; if ((treeTop < worldPosition.y && treeBottom < worldPosition.y) || (treeTop >= worldPosition.y + CHUNK_SIZE && treeBottom >= worldPosition.y + CHUNK_SIZE)) { return; } int treeX = testX - 2; int treeZ = testZ - 2; for (int templateY = 0; templateY < templateSize.y; templateY++) { int blockY = treeBottom + templateY - worldPosition.y; if (blockY < 0) continue; if (blockY >= CHUNK_SIZE) break; for (int templateX = 0; templateX < templateSize.x; templateX++) { for (int templateZ = 0; templateZ < templateSize.z; templateZ++) { uint8_t templateBlock = templateData[templateX + templateZ * templateSize.x + templateY * templateSize.x * templateSize.z]; if (templateBlock == 0) { continue; } chunkData[CHUNK_OFFSET(treeX + templateX, blockY, treeZ + templateZ)] = templateBlock; } } } } void cuda_generate_init() { cpu_generate_init(); CUDA_CHECK(cudaSetDevice(0)); CUDA_CHECK(cudaMalloc((void**)&gpuChunkBlocks, CHUNK_BLOCKS)); CUDA_CHECK(cudaMalloc((void**)&gpuTreeTemplate, sizeof(treeTemplate))); CUDA_CHECK(cudaMemcpy(gpuTreeTemplate, treeTemplate, sizeof(treeTemplate), cudaMemcpyKind::cudaMemcpyHostToDevice)); // Konstans memória uint8_t terrainData[CONFIG_TERRAIN_HEIGHT] = { 0 }; for (int x = 0; x < CONFIG_TERRAIN_HEIGHT; x++) { if (x == 0) { terrainData[x] = BLOCK_GRASS; } else if (x < CONFIG_DIRT_HEIGHT + 1) { terrainData[x] = BLOCK_DIRT; } else { terrainData[x] = BLOCK_STONE; } } CUDA_CHECK(cudaMemcpyToSymbol(terrainConst, terrainData, sizeof(uint8_t) * CONFIG_TERRAIN_HEIGHT, 0, cudaMemcpyKind::cudaMemcpyHostToDevice)); // Magasság térkép CUDA_CHECK(cudaMalloc((void**)&gpuTerrainHeightMap, CHUNK_SIZE * CHUNK_SIZE * sizeof(uint32_t))); } void cuda_generate_clean() { cpu_generate_clean(); CUDA_CHECK(cudaFree(gpuChunkBlocks)); CUDA_CHECK(cudaFree(gpuTreeTemplate)); } // Első: ~18ms // Többi átlag: 0.9ms void cuda_generate_chunk(Chunk* chunk) { #ifndef GEN_ON_GPU cpu_generate_chunk(chunk); return; #endif CUDA_CHECK(cudaMemset(gpuChunkBlocks, 0, CHUNK_BLOCKS)); CUDA_CHECK(cudaMemset(gpuTerrainHeightMap, 0, CHUNK_SIZE * CHUNK_SIZE * sizeof(uint32_t))); int3 chunkPos = chunk->getChunkPosition(); vec3 worldPosition = vec3(chunkPos.x, chunkPos.y, chunkPos.z) * (float)CHUNK_SIZE; dim3 block(4, 4, 4); dim3 grid(4, 4, 4); // Time cudaEvent_t start, stop; CUDA_CHECK(cudaEventCreate(&start)); CUDA_CHECK(cudaEventRecord(start, 0)); int3 worldBlockPos = make_int3((int)worldPosition.x, (int)worldPosition.y, (int)worldPosition.z); // Eredeti CUDA: #ifdef GEN_NAIV kernel_generator_fillChunk_nonOpt_dim3 << <grid, block >> > (gpuChunkBlocks, worldBlockPos); #endif // Shared memória CUDA: #ifdef GEN_CONST_MEM_SHARED_MEM kernel_generator_fillChunk_dim3 << <grid, block >> > (gpuChunkBlocks, worldBlockPos); #endif // GPU memória magasság térkép: #ifdef GEN_HEIGHTMAP kernel_generator_fillChunk_memHeightmap_dim3 << <grid, block >> > (gpuChunkBlocks, worldBlockPos, gpuTerrainHeightMap); #endif // Dekorálás (túl optimalizálatlan) /*if (CONFIG_NUM_TREES > 0) { kernel_decorator_trees << <1, CONFIG_NUM_TREES >> > (gpuChunkBlocks, worldBlockPos, gpuTreeTemplate, make_int3(TREE_TEMPLATE_SIZE)); }*/ // Time CUDA_CHECK(cudaEventCreate(&stop)); CUDA_CHECK(cudaEventRecord(stop, 0)); CUDA_CHECK(cudaDeviceSynchronize()); CUDA_CHECK(cudaEventSynchronize(stop)); float elapsed; CUDA_CHECK(cudaEventElapsedTime(&elapsed, start, stop)); printf("Generate time: %f ms\n", elapsed); CUDA_CHECK(cudaMemcpy(chunk->blocks, gpuChunkBlocks, CHUNK_BLOCKS, cudaMemcpyKind::cudaMemcpyDeviceToHost)); CUDA_CHECK(cudaDeviceSynchronize()); }
a51b0cf52c9be8fafc3d781488fa78b87f5ca9dd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2017, The OctNet authors // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of the <organization> nor the // names of its contributors may be used to endorse or promote products // derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL OCTNET AUTHORS BE LIABLE FOR ANY // DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "octnet/gpu/loss.h" #include "octnet/gpu/gpu.h" #include <cstdio> #include <cstdlib> #include <thrust/execution_policy.h> #include <thrust/transform_reduce.h> #include <thrust/functional.h> #include <thrust/iterator/counting_iterator.h> struct octree_plus_float2_bfcn : public thrust::binary_function<float2, float2, float2> { __host__ __device__ float2 operator()(float2 in1, float2 in2) { float2 ret; ret.x = in1.x + in2.x; ret.y = in1.y + in2.y; return ret; } }; struct octree_mse_loss_from_leaf_idx : public thrust::unary_function<int, ot_data_t> { const octree input; const octree target; octree_mse_loss_from_leaf_idx(const octree input_, const octree target_) : input(input_), target(target_) { } __host__ __device__ ot_data_t operator()(const int leaf_idx) { const int grid_idx = leaf_idx_to_grid_idx(&input, leaf_idx); const ot_tree_t* tree = octree_get_tree(&input, grid_idx); // const int cum_n_leafs = n_leafs_upto(&input, grid_idx); const int cum_n_leafs = input.prefix_leafs[grid_idx]; const int data_idx = leaf_idx - cum_n_leafs; const int bit_idx = data_idx_to_bit_idx(tree, data_idx); // const ot_data_t* in = input.data_ptrs[grid_idx] + data_idx * input.feature_size; const ot_data_t* in = octree_get_data(&input, grid_idx) + data_idx * input.feature_size; // const ot_data_t* ta = target.data_ptrs[grid_idx] + data_idx * target.feature_size; const ot_data_t* ta = octree_get_data(&target, grid_idx) + data_idx * target.feature_size; const int depth = depth_from_bit_idx(bit_idx); const ot_data_t vol = depth == 0 ? 512 : (depth == 1 ? 64 : (depth == 2 ? 8 : 1)); ot_data_t sum = 0; for(int f = 0; f < input.feature_size; ++f) { const ot_data_t z = in[f] - ta[f]; sum += vol * z * z; } return sum; } }; extern "C" ot_data_t octree_mse_loss_gpu(const octree* input, const octree* target, bool size_average, bool check) { if(!octree_equal_shape(input, target)) { printf("[ERROR] mse_loss - shape of inputs do not match\n"); exit(-1); } if(check && !octree_equal_trees_gpu(input, target)) { printf("[ERROR] mse_loss - tree structure of inputs do not match\n"); exit(-1); } thrust::counting_iterator<int> iter(0); ot_data_t output = thrust::transform_reduce( thrust::device, iter, iter + input->n_leafs, octree_mse_loss_from_leaf_idx(*input, *target), ot_data_t(0), thrust::plus<ot_data_t>()); if(size_average) { output = output / (octree_num_blocks(input) * input->feature_size * 8 * 8 * 8); } return output; } __global__ void kernel_mse_loss_bwd(octree grad, int n_leafs, const octree input, const octree target, const ot_data_t norm) { CUDA_KERNEL_LOOP(leaf_idx, n_leafs) { const int grid_idx = grad.data[leaf_idx * grad.feature_size]; const ot_tree_t* tree = octree_get_tree(&grad, grid_idx); // const int cum_n_leafs = n_leafs_upto(&grad, grid_idx); const int cum_n_leafs = grad.prefix_leafs[grid_idx]; const int data_idx = leaf_idx - cum_n_leafs; const int bit_idx = data_idx_to_bit_idx(tree, data_idx); // ot_data_t* grad_data = grad.data_ptrs[grid_idx] + data_idx * grad.feature_size; ot_data_t* grad_data = octree_get_data(&grad, grid_idx) + data_idx * grad.feature_size; // ot_data_t* input_data = input.data_ptrs[grid_idx] + data_idx * input.feature_size; ot_data_t* input_data = octree_get_data(&input, grid_idx) + data_idx * input.feature_size; // ot_data_t* target_data = target.data_ptrs[grid_idx] + data_idx * target.feature_size; ot_data_t* target_data = octree_get_data(&target, grid_idx) + data_idx * target.feature_size; const int depth = depth_from_bit_idx(bit_idx); const ot_data_t vol = depth == 0 ? 512 : (depth == 1 ? 64 : (depth == 2 ? 8 : 1)); for(int f = 0; f < input.feature_size; ++f) { const ot_data_t z = input_data[f] - target_data[f]; grad_data[f] = vol * norm * z; } } } extern "C" void octree_mse_loss_bwd_gpu(const octree* input, const octree* target, bool size_average, bool check, octree* grad) { if(!octree_equal_shape(input, target)) { printf("[ERROR] mse_loss_bwd - shape of inputs do not match\n"); exit(-1); } if(check && !octree_equal_trees_gpu(input, target)) { printf("[ERROR] mse_loss_bwd - tree structure of inputs do not match\n"); exit(-1); } octree_cpy_scalars(input, grad); octree_resize_as_gpu(input, grad); octree_cpy_trees_gpu_gpu(input, grad); octree_cpy_prefix_leafs_gpu_gpu(input, grad); ot_data_t norm = 2.0; if(size_average) { norm = norm / (octree_num_blocks(input) * input->feature_size * 8 * 8 * 8); } octree_leaf_idx_to_grid_idx_gpu(grad, grad->feature_size, grad->data_capacity, grad->data); hipLaunchKernelGGL(( kernel_mse_loss_bwd), dim3(GET_BLOCKS(grad->n_leafs)), dim3(CUDA_NUM_THREADS), 0, 0, *grad, grad->n_leafs, *input, *target, norm ); CUDA_POST_KERNEL_CHECK; } struct octree_mse_ds_loss_from_leaf_idx : public thrust::unary_function<int, ot_data_t> { const octree input; const octree target; octree_mse_ds_loss_from_leaf_idx(const octree input_, const octree target_) : input(input_), target(target_) { } __host__ __device__ ot_data_t operator()(const int leaf_idx) { const int in_grid_idx = leaf_idx_to_grid_idx(&input, leaf_idx); const ot_tree_t* in_tree = octree_get_tree(&input, in_grid_idx); int in_data_idx = leaf_idx - input.prefix_leafs[in_grid_idx]; int in_bit_idx = data_idx_to_bit_idx(in_tree, in_data_idx); int n,ds,hs,ws; int depth = octree_ind_to_dense_ind(&input, in_grid_idx, in_bit_idx, &n, &ds,&hs,&ws); int width = width_from_depth(depth); ot_data_t grid_out = 0; for(int d = ds; d < (ds+width); ++d) { for(int h = hs; h < (hs+width); ++h) { for(int w = ws; w < (ws+width); ++w) { int gd = d / 8; int gh = h / 8; int gw = w / 8; int bd = d % 8; int bh = h % 8; int bw = w % 8; int ta_grid_idx = octree_grid_idx(&target, n, gd,gh,gw); const ot_tree_t* ta_tree = octree_get_tree(&target, ta_grid_idx); int ta_bit_idx = tree_bit_idx(ta_tree, bd,bh,bw); int ta_data_idx = tree_data_idx(ta_tree, ta_bit_idx, target.feature_size); const ot_data_t* ta_data = octree_get_data(&target, ta_grid_idx); for(int f = 0; f < input.feature_size; ++f) { ot_data_t x = input.data[leaf_idx * input.feature_size + f]; ot_data_t y = ta_data[ta_data_idx + f]; grid_out += (x-y) * (x-y); } } } } return grid_out; } }; extern "C" ot_data_t octree_mse_ds_loss_gpu(const octree* input, const octree* target, bool size_average) { if(!octree_equal_shape(input, target)) { printf("[ERROR] mse_ds_loss - shape of inputs do not match\n"); exit(-1); } thrust::counting_iterator<int> iter(0); ot_data_t output = thrust::transform_reduce( thrust::device, iter, iter + input->n_leafs, octree_mse_ds_loss_from_leaf_idx(*input, *target), ot_data_t(0), thrust::plus<ot_data_t>()); if(size_average) { output = output / (octree_num_blocks(input) * input->feature_size * 8 * 8 * 8); } return output; } __global__ void kernel_mse_ds_loss_bwd(octree grad, int n_leafs, const octree input, const octree target, const ot_data_t norm) { CUDA_KERNEL_LOOP(leaf_idx, n_leafs) { const int in_grid_idx = grad.data[leaf_idx * grad.feature_size]; const ot_tree_t* in_tree = octree_get_tree(&input, in_grid_idx); int in_data_idx = leaf_idx - input.prefix_leafs[in_grid_idx]; int in_bit_idx = data_idx_to_bit_idx(in_tree, in_data_idx); int n,ds,hs,ws; int depth = octree_ind_to_dense_ind(&input, in_grid_idx, in_bit_idx, &n, &ds,&hs,&ws); int width = width_from_depth(depth); for(int f = 0; f < grad.feature_size; ++f) { grad.data[leaf_idx * grad.feature_size + f] = 0; } for(int d = ds; d < (ds+width); ++d) { for(int h = hs; h < (hs+width); ++h) { for(int w = ws; w < (ws+width); ++w) { int gd = d / 8; int gh = h / 8; int gw = w / 8; int bd = d % 8; int bh = h % 8; int bw = w % 8; int ta_grid_idx = octree_grid_idx(&target, n, gd,gh,gw); const ot_tree_t* ta_tree = octree_get_tree(&target, ta_grid_idx); int ta_bit_idx = tree_bit_idx(ta_tree, bd,bh,bw); int ta_data_idx = tree_data_idx(ta_tree, ta_bit_idx, target.feature_size); const ot_data_t* ta_data = octree_get_data(&target, ta_grid_idx); for(int f = 0; f < input.feature_size; ++f) { ot_data_t x = input.data[leaf_idx * input.feature_size + f]; ot_data_t y = ta_data[ta_data_idx + f]; grad.data[leaf_idx * grad.feature_size + f] += norm * (x-y); } } } } } } extern "C" void octree_mse_loss_ds_bwd_gpu(const octree* input, const octree* target, bool size_average, octree* grad) { if(!octree_equal_shape(input, target)) { printf("[ERROR] mse_ds_loss_bwd - shape of inputs do not match\n"); exit(-1); } octree_cpy_scalars(input, grad); octree_resize_as_gpu(input, grad); octree_cpy_trees_gpu_gpu(input, grad); octree_cpy_prefix_leafs_gpu_gpu(input, grad); ot_data_t norm = 2.0; if(size_average) { norm = norm / (octree_num_blocks(input) * input->feature_size * 8 * 8 * 8); } octree_leaf_idx_to_grid_idx_gpu(grad, grad->feature_size, grad->data_capacity, grad->data); hipLaunchKernelGGL(( kernel_mse_ds_loss_bwd), dim3(GET_BLOCKS(grad->n_leafs)), dim3(CUDA_NUM_THREADS), 0, 0, *grad, grad->n_leafs, *input, *target, norm ); CUDA_POST_KERNEL_CHECK; } struct octree_nll_loss_from_leaf_idx : public thrust::unary_function<int, float2> { const octree input; const octree target; const ot_data_t* weights; const int class_base; octree_nll_loss_from_leaf_idx(const octree input_, const octree target_, const ot_data_t* weights_, int class_base_) : input(input_), target(target_), weights(weights_), class_base(class_base_) { } __host__ __device__ float2 operator()(const int leaf_idx) { const int grid_idx = leaf_idx_to_grid_idx(&input, leaf_idx); const ot_tree_t* tree = octree_get_tree(&input, grid_idx); // const int cum_n_leafs = n_leafs_upto(&input, grid_idx); const int cum_n_leafs = input.prefix_leafs[grid_idx]; const int data_idx = leaf_idx - cum_n_leafs; const int bit_idx = data_idx_to_bit_idx(tree, data_idx); // const ot_data_t* in = input.data_ptrs[grid_idx] + data_idx * input.feature_size; const ot_data_t* in = octree_get_data(&input, grid_idx) + data_idx * input.feature_size; // const ot_data_t* ta = target.data_ptrs[grid_idx] + data_idx * target.feature_size; const ot_data_t* ta = octree_get_data(&target, grid_idx) + data_idx * target.feature_size; const int depth = depth_from_bit_idx(bit_idx); const ot_data_t vol = depth == 0 ? 512 : (depth == 1 ? 64 : (depth == 2 ? 8 : 1)); int cur_target = round(ta[0]) - class_base; assert(cur_target >= 0 && cur_target < input.feature_size); ot_data_t weight = vol * weights[cur_target]; // if(cur_target >= input.feature_size || cur_target <= 0) // printf(" ERROR cur_target=%d, weight=%f\n", cur_target, weight); float2 ret_val; ret_val.x = -weight * in[cur_target]; ret_val.y = weight; return ret_val; } }; extern "C" void octree_nll_loss_gpu(const octree* input, const octree* target, const ot_data_t* weights, int class_base, bool size_average, bool check, ot_data_t* output, ot_data_t* total_weight) { if(input->n != target->n || input->grid_depth != target->grid_depth || input->grid_height != target->grid_height || input->grid_width != target->grid_width || 1 != target->feature_size) { printf("[ERROR] nll_loss - shape of inputs do not match\n"); exit(-1); } if(check && !octree_equal_trees_gpu(input, target)) { printf("[ERROR] nll_loss - tree structure of inputs do not match\n"); exit(-1); } float2 init; init.x = 0; init.y = 0; thrust::counting_iterator<int> iter(0); float2 res = thrust::transform_reduce( thrust::device, iter, iter + input->n_leafs, octree_nll_loss_from_leaf_idx(*input, *target, weights, class_base), init, octree_plus_float2_bfcn()); output[0] = res.x; total_weight[0] = res.y; if(size_average && total_weight[0] != 0) { output[0] /= total_weight[0]; } } __global__ void kernel_nll_loss_bwd(octree grad, int n_leafs, const octree input, const octree target, const ot_data_t* weights, const ot_data_t norm, const int class_base) { CUDA_KERNEL_LOOP(leaf_idx, n_leafs) { const int grid_idx = grad.data[leaf_idx * grad.feature_size]; const ot_tree_t* tree = octree_get_tree(&grad, grid_idx); // const int cum_n_leafs = n_leafs_upto(&grad, grid_idx); const int cum_n_leafs = grad.prefix_leafs[grid_idx]; const int data_idx = leaf_idx - cum_n_leafs; const int bit_idx = data_idx_to_bit_idx(tree, data_idx); // ot_data_t* grad_data = grad.data_ptrs[grid_idx] + data_idx * grad.feature_size; ot_data_t* grad_data = octree_get_data(&grad, grid_idx) + data_idx * grad.feature_size; // ot_data_t* target_data = target.data_ptrs[grid_idx] + data_idx * target.feature_size; ot_data_t* target_data = octree_get_data(&target, grid_idx) + data_idx * target.feature_size; const int depth = depth_from_bit_idx(bit_idx); const ot_data_t vol = depth == 0 ? 512 : (depth == 1 ? 64 : (depth == 2 ? 8 : 1)); int cur_target = round(target_data[0]) - class_base; assert(cur_target >= 0 && cur_target < input.feature_size); for(int f = 0; f < grad.feature_size; ++f) { grad_data[f] = 0; } grad_data[cur_target] = vol * -weights[cur_target] * norm; } } extern "C" void octree_nll_loss_bwd_gpu(const octree* input, const octree* target, const ot_data_t* weights, const ot_data_t total_weight, int class_base, bool size_average, bool check, octree* grad) { if(input->n != target->n || input->grid_depth != target->grid_depth || input->grid_height != target->grid_height || input->grid_width != target->grid_width || 1 != target->feature_size) { printf("[ERROR] nll_loss_bwd - shape of inputs do not match\n"); exit(-1); } if(check && !octree_equal_trees_gpu(input, target)) { printf("[ERROR] nll_loss_bwd - tree structure of inputs do not match\n"); exit(-1); } octree_cpy_scalars(input, grad); octree_resize_as_gpu(input, grad); octree_cpy_trees_gpu_gpu(input, grad); octree_cpy_prefix_leafs_gpu_gpu(input, grad); ot_data_t norm = 1.0; if(size_average) { norm /= total_weight; } octree_leaf_idx_to_grid_idx_gpu(grad, grad->feature_size, grad->data_capacity, grad->data); hipLaunchKernelGGL(( kernel_nll_loss_bwd), dim3(GET_BLOCKS(grad->n_leafs)), dim3(CUDA_NUM_THREADS), 0, 0, *grad, grad->n_leafs, *input, *target, weights, norm, class_base ); CUDA_POST_KERNEL_CHECK; } #define EPS 1e-12 struct octree_bce_loss_from_leaf_idx : public thrust::unary_function<int, ot_data_t> { const octree input; const octree target; octree_bce_loss_from_leaf_idx(const octree input_, const octree target_) : input(input_), target(target_) { } __host__ __device__ ot_data_t operator()(const int leaf_idx) { const int grid_idx = leaf_idx_to_grid_idx(&input, leaf_idx); const ot_tree_t* tree = octree_get_tree(&input, grid_idx); // const int cum_n_leafs = n_leafs_upto(&input, grid_idx); const int cum_n_leafs = input.prefix_leafs[grid_idx]; const int data_idx = leaf_idx - cum_n_leafs; const int bit_idx = data_idx_to_bit_idx(tree, data_idx); // const ot_data_t* in = input.data_ptrs[grid_idx] + data_idx * input.feature_size; const ot_data_t* in = octree_get_data(&input, grid_idx) + data_idx * input.feature_size; // const ot_data_t* ta = target.data_ptrs[grid_idx] + data_idx * target.feature_size; const ot_data_t* ta = octree_get_data(&target, grid_idx) + data_idx * target.feature_size; const int depth = depth_from_bit_idx(bit_idx); const ot_data_t vol = depth == 0 ? 512 : (depth == 1 ? 64 : (depth == 2 ? 8 : 1)); ot_data_t sum = 0; for(int f = 0; f < input.feature_size; ++f) { const ot_data_t x = in[f]; const ot_data_t y = ta[f]; sum -= vol *(log(x + EPS) * y + log(1. - x + EPS) * (1. - y)); } return sum; } }; extern "C" void octree_bce_loss_gpu(const octree* input, const octree* target, bool size_average, bool check, ot_data_t* output, ot_data_t* total_weight) { if(!octree_equal_shape(input, target)) { printf("[ERROR] bce_loss - shape of inputs do not match\n"); exit(-1); } if(check && !octree_equal_trees_gpu(input, target)) { printf("[ERROR] bce_loss - tree structure of inputs do not match\n"); exit(-1); } thrust::counting_iterator<int> iter(0); *output = thrust::transform_reduce( thrust::device, iter, iter + input->n_leafs, octree_bce_loss_from_leaf_idx(*input, *target), ot_data_t(0), thrust::plus<ot_data_t>()); if(size_average) { *total_weight = octree_num_blocks(input) * input->feature_size * 8 * 8 * 8; *output = (*output) / (*total_weight) ; } else { *total_weight = 1; } } __global__ void kernel_bce_loss_bwd(octree grad, int n_leafs, const octree input, const octree target, const ot_data_t norm) { CUDA_KERNEL_LOOP(leaf_idx, n_leafs) { const int grid_idx = grad.data[leaf_idx * grad.feature_size]; const ot_tree_t* tree = octree_get_tree(&grad, grid_idx); // const int cum_n_leafs = n_leafs_upto(&grad, grid_idx); const int cum_n_leafs = grad.prefix_leafs[grid_idx]; const int data_idx = leaf_idx - cum_n_leafs; const int bit_idx = data_idx_to_bit_idx(tree, data_idx); // ot_data_t* grad_data = grad.data_ptrs[grid_idx] + data_idx * grad.feature_size; ot_data_t* grad_data = octree_get_data(&grad, grid_idx) + data_idx * grad.feature_size; // ot_data_t* input_data = input.data_ptrs[grid_idx] + data_idx * input.feature_size; ot_data_t* input_data = octree_get_data(&input, grid_idx) + data_idx * input.feature_size; // ot_data_t* target_data = target.data_ptrs[grid_idx] + data_idx * target.feature_size; ot_data_t* target_data = octree_get_data(&target, grid_idx) + data_idx * target.feature_size; const int depth = depth_from_bit_idx(bit_idx); const ot_data_t vol = depth == 0 ? 512 : (depth == 1 ? 64 : (depth == 2 ? 8 : 1)); for(int f = 0; f < input.feature_size; ++f) { const ot_data_t x = input_data[f]; const ot_data_t y = target_data[f]; grad_data[f] = - vol * norm * (y - x) / ((1. - x + EPS) * (x + EPS)); } } } extern "C" void octree_bce_loss_bwd_gpu(const octree* input, const octree* target, bool size_average, bool check, octree* grad) { if(!octree_equal_shape(input, target)) { printf("[ERROR] bce_loss_bwd - shape of inputs do not match\n"); exit(-1); } if(check && !octree_equal_trees_gpu(input, target)) { printf("[ERROR] bce_loss_bwd - tree structure of inputs do not match\n"); exit(-1); } octree_cpy_scalars(input, grad); octree_resize_as_gpu(input, grad); octree_cpy_trees_gpu_gpu(input, grad); octree_cpy_prefix_leafs_gpu_gpu(input, grad); ot_data_t norm = 1.0; if(size_average) { norm = norm / (octree_num_blocks(input) * input->feature_size * 8 * 8 * 8); } octree_leaf_idx_to_grid_idx_gpu(grad, grad->feature_size, grad->data_capacity, grad->data); hipLaunchKernelGGL(( kernel_bce_loss_bwd), dim3(GET_BLOCKS(grad->n_leafs)), dim3(CUDA_NUM_THREADS), 0, 0, *grad, grad->n_leafs, *input, *target, norm ); CUDA_POST_KERNEL_CHECK; } struct octree_bce_dense_loss_from_leaf_idx : public thrust::unary_function<int, ot_data_t> { const octree input; const ot_data_t* target; octree_bce_dense_loss_from_leaf_idx(const octree input_, const ot_data_t* target_) : input(input_), target(target_) { } __host__ __device__ ot_data_t operator()(const int leaf_idx) { const int grid_idx = leaf_idx_to_grid_idx(&input, leaf_idx); const ot_tree_t* tree = octree_get_tree(&input, grid_idx); // const int cum_n_leafs = n_leafs_upto(&input, grid_idx); const int cum_n_leafs = input.prefix_leafs[grid_idx]; const int data_idx = leaf_idx - cum_n_leafs; const int bit_idx = data_idx_to_bit_idx(tree, data_idx); // const ot_data_t* estimate = input.data_ptrs[grid_idx] + data_idx * input.feature_size; const ot_data_t* estimate = octree_get_data(&input, grid_idx) + data_idx * input.feature_size; int n,ds,hs,ws; const int depth = octree_ind_to_dense_ind(&input, grid_idx, bit_idx, &n, &ds,&hs,&ws); const int size = width_from_depth(depth); const int dense_depth = 8 * input.grid_depth; const int dense_height = 8 * input.grid_height; const int dense_width = 8 * input.grid_width; // printf("leaf_idx=%d, n=%d, ds=%d,hs=%d,ws=%d, size=%d\n", leaf_idx, n, ds,hs,ws, size); ot_data_t out = 0; for(int d = ds; d < (ds+size); ++d) { for(int h = hs; h < (hs+size); ++h) { for(int w = ws; w < (ws+size); ++w) { for(int f = 0; f < input.feature_size; ++f) { ot_data_t x = estimate[f]; ot_data_t y = target[(((n * input.feature_size + f) * dense_depth + d) * dense_height + h) * dense_width + w]; // printf(" x=%f, y=%f\n", x, y); out += (log(x + EPS) * y + log(1. - x + EPS) * (1. - y)); } } } } return -out; } }; extern "C" void octree_bce_dense_loss_gpu(const octree* input, const ot_data_t* target, bool size_average, ot_data_t* output, ot_data_t* total_weight) { thrust::counting_iterator<int> iter(0); *output = thrust::transform_reduce( thrust::device, iter, iter + input->n_leafs, octree_bce_dense_loss_from_leaf_idx(*input, target), ot_data_t(0), thrust::plus<ot_data_t>()); if(size_average) { *total_weight = octree_num_blocks(input) * input->feature_size * 8 * 8 * 8; *output = (*output) / (*total_weight) ; } else { *total_weight = 1; } } __global__ void kernel_bce_dense_loss_bwd(octree grad, int n_leafs, const octree input, const ot_data_t* target, const ot_data_t norm) { CUDA_KERNEL_LOOP(leaf_idx, n_leafs) { const int grid_idx = grad.data[leaf_idx * grad.feature_size]; const ot_tree_t* tree = octree_get_tree(&grad, grid_idx); // const int cum_n_leafs = n_leafs_upto(&grad, grid_idx); const int cum_n_leafs = grad.prefix_leafs[grid_idx]; const int data_idx = leaf_idx - cum_n_leafs; const int bit_idx = data_idx_to_bit_idx(tree, data_idx); // ot_data_t* grad_data = grad.data_ptrs[grid_idx] + data_idx * grad.feature_size; ot_data_t* grad_data = octree_get_data(&grad, grid_idx) + data_idx * grad.feature_size; // ot_data_t* estimate = input.data_ptrs[grid_idx] + data_idx * input.feature_size; ot_data_t* estimate = octree_get_data(&input, grid_idx) + data_idx * input.feature_size; int n,ds,hs,ws; const int depth = octree_ind_to_dense_ind(&input, grid_idx, bit_idx, &n, &ds,&hs,&ws); const int size = width_from_depth(depth); const int dense_depth = 8 * input.grid_depth; const int dense_height = 8 * input.grid_height; const int dense_width = 8 * input.grid_width; for(int f = 0; f < input.feature_size; ++f) { grad_data[f] = 0; } for(int d = ds; d < (ds+size); ++d) { for(int h = hs; h < (hs+size); ++h) { for(int w = ws; w < (ws+size); ++w) { for(int f = 0; f < input.feature_size; ++f) { ot_data_t x = estimate[f]; ot_data_t y = target[(((n * input.feature_size + f) * dense_depth + d) * dense_height + h) * dense_width + w]; grad_data[f] -= norm * (y - x) / ((1. - x + EPS) * (x + EPS)); } } } } } } extern "C" void octree_bce_dense_loss_bwd_gpu(const octree* input, const ot_data_t* target, bool size_average, octree* grad) { octree_cpy_scalars(input, grad); octree_resize_as_gpu(input, grad); octree_cpy_trees_gpu_gpu(input, grad); octree_cpy_prefix_leafs_gpu_gpu(input, grad); ot_data_t norm = 1.0; if(size_average) { norm = norm / (octree_num_blocks(input) * input->feature_size * 8 * 8 * 8); } octree_leaf_idx_to_grid_idx_gpu(grad, grad->feature_size, grad->data_capacity, grad->data); hipLaunchKernelGGL(( kernel_bce_dense_loss_bwd), dim3(GET_BLOCKS(grad->n_leafs)), dim3(CUDA_NUM_THREADS), 0, 0, *grad, grad->n_leafs, *input, target, norm ); CUDA_POST_KERNEL_CHECK; } struct octree_bce_ds_loss_from_leaf_idx : public thrust::unary_function<int, float2> { const octree input; const octree target; const octree weights; bool use_weights; octree_bce_ds_loss_from_leaf_idx(const octree input_, const octree target_, const octree weights_, bool use_weights_) : input(input_), target(target_), weights(weights_), use_weights(use_weights_) { } __host__ __device__ float2 operator()(const int leaf_idx) { const int in_grid_idx = leaf_idx_to_grid_idx(&input, leaf_idx); const ot_tree_t* in_tree = octree_get_tree(&input, in_grid_idx); int in_data_idx = leaf_idx - input.prefix_leafs[in_grid_idx]; int in_bit_idx = data_idx_to_bit_idx(in_tree, in_data_idx); int n,ds,hs,ws; int depth = octree_ind_to_dense_ind(&input, in_grid_idx, in_bit_idx, &n, &ds,&hs,&ws); int width = width_from_depth(depth); ot_data_t grid_out = 0; ot_data_t grid_weight = 0; for(int d = ds; d < (ds+width); ++d) { for(int h = hs; h < (hs+width); ++h) { for(int w = ws; w < (ws+width); ++w) { int gd = d / 8; int gh = h / 8; int gw = w / 8; int bd = d % 8; int bh = h % 8; int bw = w % 8; int ta_grid_idx = octree_grid_idx(&target, n, gd,gh,gw); const ot_tree_t* ta_tree = octree_get_tree(&target, ta_grid_idx); int ta_bit_idx = tree_bit_idx(ta_tree, bd,bh,bw); int ta_data_idx = tree_data_idx(ta_tree, ta_bit_idx, target.feature_size); const ot_data_t* ta_data = octree_get_data(&target, ta_grid_idx); const ot_data_t* we_data = use_weights ? octree_get_data(&weights, ta_grid_idx) : 0; for(int f = 0; f < input.feature_size; ++f) { ot_data_t x = input.data[leaf_idx * input.feature_size + f]; ot_data_t y = ta_data[ta_data_idx + f]; ot_data_t w = use_weights ? we_data[ta_data_idx + f] : 1; grid_out += w * (log(x + EPS) * y + log(1. - x + EPS) * (1. - y)); grid_weight += w; } } } } float2 ret; ret.x = -grid_out; ret.y = grid_weight; return ret; } }; extern "C" void octree_bce_ds_loss_gpu(const octree* input, const octree* target, const octree* weights, bool size_average, ot_data_t* output, ot_data_t* total_weight) { if(!octree_equal_shape(input, target)) { printf("[ERROR] bce_ds_loss - shape of inputs do not match\n"); exit(-1); } if(!octree_equal_shape(input, weights)) { printf("[ERROR] bce_ds_loss - shape of inputs do not match\n"); exit(-1); } float2 init; init.x = 0; init.y = 0; thrust::counting_iterator<int> iter(0); bool use_weights = weights != 0; float2 ret = thrust::transform_reduce( thrust::device, iter, iter + input->n_leafs, octree_bce_ds_loss_from_leaf_idx(*input, *target, use_weights ? *weights : *target, use_weights), init, octree_plus_float2_bfcn()); if(ret.y > 0) { if(size_average) { *total_weight = ret.y; *output = ret.x / ret.y ; } else { *output = ret.x; *total_weight = 1; } } else { *output = 0; *total_weight = 1; } } __global__ void kernel_bce_ds_loss_bwd(octree grad, int n_leafs, const octree input, const octree target, const octree weights, bool use_weights, const ot_data_t norm) { CUDA_KERNEL_LOOP(leaf_idx, n_leafs) { const int in_grid_idx = grad.data[leaf_idx * grad.feature_size]; const ot_tree_t* in_tree = octree_get_tree(&input, in_grid_idx); int in_data_idx = leaf_idx - input.prefix_leafs[in_grid_idx]; int in_bit_idx = data_idx_to_bit_idx(in_tree, in_data_idx); int n,ds,hs,ws; int depth = octree_ind_to_dense_ind(&input, in_grid_idx, in_bit_idx, &n, &ds,&hs,&ws); int width = width_from_depth(depth); for(int f = 0; f < grad.feature_size; ++f) { grad.data[leaf_idx * grad.feature_size + f] = 0; } for(int d = ds; d < (ds+width); ++d) { for(int h = hs; h < (hs+width); ++h) { for(int w = ws; w < (ws+width); ++w) { int gd = d / 8; int gh = h / 8; int gw = w / 8; int bd = d % 8; int bh = h % 8; int bw = w % 8; int ta_grid_idx = octree_grid_idx(&target, n, gd,gh,gw); const ot_tree_t* ta_tree = octree_get_tree(&target, ta_grid_idx); int ta_bit_idx = tree_bit_idx(ta_tree, bd,bh,bw); int ta_data_idx = tree_data_idx(ta_tree, ta_bit_idx, target.feature_size); const ot_data_t* ta_data = octree_get_data(&target, ta_grid_idx); const ot_data_t* we_data = use_weights ? octree_get_data(&weights, ta_grid_idx) : 0; for(int f = 0; f < input.feature_size; ++f) { ot_data_t x = input.data[leaf_idx * input.feature_size + f]; ot_data_t y = ta_data[ta_data_idx + f]; ot_data_t w = use_weights ? we_data[ta_data_idx + f] : 1; grad.data[leaf_idx * grad.feature_size + f] -= norm * w * (y - x) / ((1. - x + EPS) * (x + EPS)); } } } } } } extern "C" void octree_bce_ds_loss_bwd_gpu(const octree* input, const octree* target, const octree* weights, bool size_average, ot_data_t total_weight, octree* grad) { if(!octree_equal_shape(input, target)) { printf("[ERROR] bce_ds_loss_bwd - shape of inputs do not match\n"); exit(-1); } octree_cpy_scalars(input, grad); octree_resize_as_gpu(input, grad); octree_cpy_trees_gpu_gpu(input, grad); octree_cpy_prefix_leafs_gpu_gpu(input, grad); ot_data_t norm = 1.0; if(size_average && total_weight > 0) { norm = norm / total_weight; } bool use_weights = weights != 0; octree_leaf_idx_to_grid_idx_gpu(grad, grad->feature_size, grad->data_capacity, grad->data); hipLaunchKernelGGL(( kernel_bce_ds_loss_bwd), dim3(GET_BLOCKS(grad->n_leafs)), dim3(CUDA_NUM_THREADS), 0, 0, *grad, grad->n_leafs, *input, *target, use_weights ? *weights : *target, use_weights, norm ); CUDA_POST_KERNEL_CHECK; } struct dense_bce_loss_fcn : public thrust::unary_function<int, float2> { const ot_data_t* input; const ot_data_t* target; const ot_data_t* weights; dense_bce_loss_fcn(const ot_data_t* input_, const ot_data_t* target_, const ot_data_t* weights_) : input(input_), target(target_), weights(weights_) { } __host__ __device__ float2 operator()(const int idx) { ot_data_t w = weights != 0 ? weights[idx] : 1; ot_data_t x = input[idx]; ot_data_t y = target[idx]; float2 ret; ret.x = w * -( (log(x + EPS) * y + log(1. - x + EPS) * (1. - y)) ); ret.y = w; return ret; } }; extern "C" void dense_bce_loss_gpu(const ot_data_t* input, const ot_data_t* target, const ot_data_t* weights, ot_size_t N, ot_data_t* output, ot_data_t* total_weight) { float2 init; init.x = 0; init.y = 0; thrust::counting_iterator<int> iter(0); float2 result = thrust::transform_reduce( thrust::device, iter, iter + N, dense_bce_loss_fcn(input, target, weights), init, octree_plus_float2_bfcn()); if(result.y > 0) { *output = result.x / result.y; *total_weight = result.y; } else { *output = 0; *total_weight = 1; } } __global__ void kernel_dense_bce_loss_bwd(ot_data_t* grad, ot_size_t N, const ot_data_t* input, const ot_data_t* target, const ot_data_t* weights, ot_data_t norm) { CUDA_KERNEL_LOOP(idx, N) { ot_data_t x = input[idx]; ot_data_t y = target[idx]; ot_data_t w = weights != 0 ? weights[idx] : 1; w *= norm; grad[idx] = w * -(y - x) / ((1. - x + EPS) * (x + EPS)); } } extern "C" void dense_bce_loss_bwd_gpu(const ot_data_t* input, const ot_data_t* target, const ot_data_t* weights, ot_size_t N, ot_data_t total_weight, ot_data_t* grad) { ot_data_t norm = total_weight > 0 ? 1.0 / total_weight : 0; hipLaunchKernelGGL(( kernel_dense_bce_loss_bwd), dim3(GET_BLOCKS(N)), dim3(CUDA_NUM_THREADS), 0, 0, grad, N, input, target, weights, norm ); CUDA_POST_KERNEL_CHECK; } #undef EPS
a51b0cf52c9be8fafc3d781488fa78b87f5ca9dd.cu
// Copyright (c) 2017, The OctNet authors // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of the <organization> nor the // names of its contributors may be used to endorse or promote products // derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL OCTNET AUTHORS BE LIABLE FOR ANY // DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "octnet/gpu/loss.h" #include "octnet/gpu/gpu.h" #include <cstdio> #include <cstdlib> #include <thrust/execution_policy.h> #include <thrust/transform_reduce.h> #include <thrust/functional.h> #include <thrust/iterator/counting_iterator.h> struct octree_plus_float2_bfcn : public thrust::binary_function<float2, float2, float2> { __host__ __device__ float2 operator()(float2 in1, float2 in2) { float2 ret; ret.x = in1.x + in2.x; ret.y = in1.y + in2.y; return ret; } }; struct octree_mse_loss_from_leaf_idx : public thrust::unary_function<int, ot_data_t> { const octree input; const octree target; octree_mse_loss_from_leaf_idx(const octree input_, const octree target_) : input(input_), target(target_) { } __host__ __device__ ot_data_t operator()(const int leaf_idx) { const int grid_idx = leaf_idx_to_grid_idx(&input, leaf_idx); const ot_tree_t* tree = octree_get_tree(&input, grid_idx); // const int cum_n_leafs = n_leafs_upto(&input, grid_idx); const int cum_n_leafs = input.prefix_leafs[grid_idx]; const int data_idx = leaf_idx - cum_n_leafs; const int bit_idx = data_idx_to_bit_idx(tree, data_idx); // const ot_data_t* in = input.data_ptrs[grid_idx] + data_idx * input.feature_size; const ot_data_t* in = octree_get_data(&input, grid_idx) + data_idx * input.feature_size; // const ot_data_t* ta = target.data_ptrs[grid_idx] + data_idx * target.feature_size; const ot_data_t* ta = octree_get_data(&target, grid_idx) + data_idx * target.feature_size; const int depth = depth_from_bit_idx(bit_idx); const ot_data_t vol = depth == 0 ? 512 : (depth == 1 ? 64 : (depth == 2 ? 8 : 1)); ot_data_t sum = 0; for(int f = 0; f < input.feature_size; ++f) { const ot_data_t z = in[f] - ta[f]; sum += vol * z * z; } return sum; } }; extern "C" ot_data_t octree_mse_loss_gpu(const octree* input, const octree* target, bool size_average, bool check) { if(!octree_equal_shape(input, target)) { printf("[ERROR] mse_loss - shape of inputs do not match\n"); exit(-1); } if(check && !octree_equal_trees_gpu(input, target)) { printf("[ERROR] mse_loss - tree structure of inputs do not match\n"); exit(-1); } thrust::counting_iterator<int> iter(0); ot_data_t output = thrust::transform_reduce( thrust::device, iter, iter + input->n_leafs, octree_mse_loss_from_leaf_idx(*input, *target), ot_data_t(0), thrust::plus<ot_data_t>()); if(size_average) { output = output / (octree_num_blocks(input) * input->feature_size * 8 * 8 * 8); } return output; } __global__ void kernel_mse_loss_bwd(octree grad, int n_leafs, const octree input, const octree target, const ot_data_t norm) { CUDA_KERNEL_LOOP(leaf_idx, n_leafs) { const int grid_idx = grad.data[leaf_idx * grad.feature_size]; const ot_tree_t* tree = octree_get_tree(&grad, grid_idx); // const int cum_n_leafs = n_leafs_upto(&grad, grid_idx); const int cum_n_leafs = grad.prefix_leafs[grid_idx]; const int data_idx = leaf_idx - cum_n_leafs; const int bit_idx = data_idx_to_bit_idx(tree, data_idx); // ot_data_t* grad_data = grad.data_ptrs[grid_idx] + data_idx * grad.feature_size; ot_data_t* grad_data = octree_get_data(&grad, grid_idx) + data_idx * grad.feature_size; // ot_data_t* input_data = input.data_ptrs[grid_idx] + data_idx * input.feature_size; ot_data_t* input_data = octree_get_data(&input, grid_idx) + data_idx * input.feature_size; // ot_data_t* target_data = target.data_ptrs[grid_idx] + data_idx * target.feature_size; ot_data_t* target_data = octree_get_data(&target, grid_idx) + data_idx * target.feature_size; const int depth = depth_from_bit_idx(bit_idx); const ot_data_t vol = depth == 0 ? 512 : (depth == 1 ? 64 : (depth == 2 ? 8 : 1)); for(int f = 0; f < input.feature_size; ++f) { const ot_data_t z = input_data[f] - target_data[f]; grad_data[f] = vol * norm * z; } } } extern "C" void octree_mse_loss_bwd_gpu(const octree* input, const octree* target, bool size_average, bool check, octree* grad) { if(!octree_equal_shape(input, target)) { printf("[ERROR] mse_loss_bwd - shape of inputs do not match\n"); exit(-1); } if(check && !octree_equal_trees_gpu(input, target)) { printf("[ERROR] mse_loss_bwd - tree structure of inputs do not match\n"); exit(-1); } octree_cpy_scalars(input, grad); octree_resize_as_gpu(input, grad); octree_cpy_trees_gpu_gpu(input, grad); octree_cpy_prefix_leafs_gpu_gpu(input, grad); ot_data_t norm = 2.0; if(size_average) { norm = norm / (octree_num_blocks(input) * input->feature_size * 8 * 8 * 8); } octree_leaf_idx_to_grid_idx_gpu(grad, grad->feature_size, grad->data_capacity, grad->data); kernel_mse_loss_bwd<<<GET_BLOCKS(grad->n_leafs), CUDA_NUM_THREADS>>>( *grad, grad->n_leafs, *input, *target, norm ); CUDA_POST_KERNEL_CHECK; } struct octree_mse_ds_loss_from_leaf_idx : public thrust::unary_function<int, ot_data_t> { const octree input; const octree target; octree_mse_ds_loss_from_leaf_idx(const octree input_, const octree target_) : input(input_), target(target_) { } __host__ __device__ ot_data_t operator()(const int leaf_idx) { const int in_grid_idx = leaf_idx_to_grid_idx(&input, leaf_idx); const ot_tree_t* in_tree = octree_get_tree(&input, in_grid_idx); int in_data_idx = leaf_idx - input.prefix_leafs[in_grid_idx]; int in_bit_idx = data_idx_to_bit_idx(in_tree, in_data_idx); int n,ds,hs,ws; int depth = octree_ind_to_dense_ind(&input, in_grid_idx, in_bit_idx, &n, &ds,&hs,&ws); int width = width_from_depth(depth); ot_data_t grid_out = 0; for(int d = ds; d < (ds+width); ++d) { for(int h = hs; h < (hs+width); ++h) { for(int w = ws; w < (ws+width); ++w) { int gd = d / 8; int gh = h / 8; int gw = w / 8; int bd = d % 8; int bh = h % 8; int bw = w % 8; int ta_grid_idx = octree_grid_idx(&target, n, gd,gh,gw); const ot_tree_t* ta_tree = octree_get_tree(&target, ta_grid_idx); int ta_bit_idx = tree_bit_idx(ta_tree, bd,bh,bw); int ta_data_idx = tree_data_idx(ta_tree, ta_bit_idx, target.feature_size); const ot_data_t* ta_data = octree_get_data(&target, ta_grid_idx); for(int f = 0; f < input.feature_size; ++f) { ot_data_t x = input.data[leaf_idx * input.feature_size + f]; ot_data_t y = ta_data[ta_data_idx + f]; grid_out += (x-y) * (x-y); } } } } return grid_out; } }; extern "C" ot_data_t octree_mse_ds_loss_gpu(const octree* input, const octree* target, bool size_average) { if(!octree_equal_shape(input, target)) { printf("[ERROR] mse_ds_loss - shape of inputs do not match\n"); exit(-1); } thrust::counting_iterator<int> iter(0); ot_data_t output = thrust::transform_reduce( thrust::device, iter, iter + input->n_leafs, octree_mse_ds_loss_from_leaf_idx(*input, *target), ot_data_t(0), thrust::plus<ot_data_t>()); if(size_average) { output = output / (octree_num_blocks(input) * input->feature_size * 8 * 8 * 8); } return output; } __global__ void kernel_mse_ds_loss_bwd(octree grad, int n_leafs, const octree input, const octree target, const ot_data_t norm) { CUDA_KERNEL_LOOP(leaf_idx, n_leafs) { const int in_grid_idx = grad.data[leaf_idx * grad.feature_size]; const ot_tree_t* in_tree = octree_get_tree(&input, in_grid_idx); int in_data_idx = leaf_idx - input.prefix_leafs[in_grid_idx]; int in_bit_idx = data_idx_to_bit_idx(in_tree, in_data_idx); int n,ds,hs,ws; int depth = octree_ind_to_dense_ind(&input, in_grid_idx, in_bit_idx, &n, &ds,&hs,&ws); int width = width_from_depth(depth); for(int f = 0; f < grad.feature_size; ++f) { grad.data[leaf_idx * grad.feature_size + f] = 0; } for(int d = ds; d < (ds+width); ++d) { for(int h = hs; h < (hs+width); ++h) { for(int w = ws; w < (ws+width); ++w) { int gd = d / 8; int gh = h / 8; int gw = w / 8; int bd = d % 8; int bh = h % 8; int bw = w % 8; int ta_grid_idx = octree_grid_idx(&target, n, gd,gh,gw); const ot_tree_t* ta_tree = octree_get_tree(&target, ta_grid_idx); int ta_bit_idx = tree_bit_idx(ta_tree, bd,bh,bw); int ta_data_idx = tree_data_idx(ta_tree, ta_bit_idx, target.feature_size); const ot_data_t* ta_data = octree_get_data(&target, ta_grid_idx); for(int f = 0; f < input.feature_size; ++f) { ot_data_t x = input.data[leaf_idx * input.feature_size + f]; ot_data_t y = ta_data[ta_data_idx + f]; grad.data[leaf_idx * grad.feature_size + f] += norm * (x-y); } } } } } } extern "C" void octree_mse_loss_ds_bwd_gpu(const octree* input, const octree* target, bool size_average, octree* grad) { if(!octree_equal_shape(input, target)) { printf("[ERROR] mse_ds_loss_bwd - shape of inputs do not match\n"); exit(-1); } octree_cpy_scalars(input, grad); octree_resize_as_gpu(input, grad); octree_cpy_trees_gpu_gpu(input, grad); octree_cpy_prefix_leafs_gpu_gpu(input, grad); ot_data_t norm = 2.0; if(size_average) { norm = norm / (octree_num_blocks(input) * input->feature_size * 8 * 8 * 8); } octree_leaf_idx_to_grid_idx_gpu(grad, grad->feature_size, grad->data_capacity, grad->data); kernel_mse_ds_loss_bwd<<<GET_BLOCKS(grad->n_leafs), CUDA_NUM_THREADS>>>( *grad, grad->n_leafs, *input, *target, norm ); CUDA_POST_KERNEL_CHECK; } struct octree_nll_loss_from_leaf_idx : public thrust::unary_function<int, float2> { const octree input; const octree target; const ot_data_t* weights; const int class_base; octree_nll_loss_from_leaf_idx(const octree input_, const octree target_, const ot_data_t* weights_, int class_base_) : input(input_), target(target_), weights(weights_), class_base(class_base_) { } __host__ __device__ float2 operator()(const int leaf_idx) { const int grid_idx = leaf_idx_to_grid_idx(&input, leaf_idx); const ot_tree_t* tree = octree_get_tree(&input, grid_idx); // const int cum_n_leafs = n_leafs_upto(&input, grid_idx); const int cum_n_leafs = input.prefix_leafs[grid_idx]; const int data_idx = leaf_idx - cum_n_leafs; const int bit_idx = data_idx_to_bit_idx(tree, data_idx); // const ot_data_t* in = input.data_ptrs[grid_idx] + data_idx * input.feature_size; const ot_data_t* in = octree_get_data(&input, grid_idx) + data_idx * input.feature_size; // const ot_data_t* ta = target.data_ptrs[grid_idx] + data_idx * target.feature_size; const ot_data_t* ta = octree_get_data(&target, grid_idx) + data_idx * target.feature_size; const int depth = depth_from_bit_idx(bit_idx); const ot_data_t vol = depth == 0 ? 512 : (depth == 1 ? 64 : (depth == 2 ? 8 : 1)); int cur_target = round(ta[0]) - class_base; assert(cur_target >= 0 && cur_target < input.feature_size); ot_data_t weight = vol * weights[cur_target]; // if(cur_target >= input.feature_size || cur_target <= 0) // printf(" ERROR cur_target=%d, weight=%f\n", cur_target, weight); float2 ret_val; ret_val.x = -weight * in[cur_target]; ret_val.y = weight; return ret_val; } }; extern "C" void octree_nll_loss_gpu(const octree* input, const octree* target, const ot_data_t* weights, int class_base, bool size_average, bool check, ot_data_t* output, ot_data_t* total_weight) { if(input->n != target->n || input->grid_depth != target->grid_depth || input->grid_height != target->grid_height || input->grid_width != target->grid_width || 1 != target->feature_size) { printf("[ERROR] nll_loss - shape of inputs do not match\n"); exit(-1); } if(check && !octree_equal_trees_gpu(input, target)) { printf("[ERROR] nll_loss - tree structure of inputs do not match\n"); exit(-1); } float2 init; init.x = 0; init.y = 0; thrust::counting_iterator<int> iter(0); float2 res = thrust::transform_reduce( thrust::device, iter, iter + input->n_leafs, octree_nll_loss_from_leaf_idx(*input, *target, weights, class_base), init, octree_plus_float2_bfcn()); output[0] = res.x; total_weight[0] = res.y; if(size_average && total_weight[0] != 0) { output[0] /= total_weight[0]; } } __global__ void kernel_nll_loss_bwd(octree grad, int n_leafs, const octree input, const octree target, const ot_data_t* weights, const ot_data_t norm, const int class_base) { CUDA_KERNEL_LOOP(leaf_idx, n_leafs) { const int grid_idx = grad.data[leaf_idx * grad.feature_size]; const ot_tree_t* tree = octree_get_tree(&grad, grid_idx); // const int cum_n_leafs = n_leafs_upto(&grad, grid_idx); const int cum_n_leafs = grad.prefix_leafs[grid_idx]; const int data_idx = leaf_idx - cum_n_leafs; const int bit_idx = data_idx_to_bit_idx(tree, data_idx); // ot_data_t* grad_data = grad.data_ptrs[grid_idx] + data_idx * grad.feature_size; ot_data_t* grad_data = octree_get_data(&grad, grid_idx) + data_idx * grad.feature_size; // ot_data_t* target_data = target.data_ptrs[grid_idx] + data_idx * target.feature_size; ot_data_t* target_data = octree_get_data(&target, grid_idx) + data_idx * target.feature_size; const int depth = depth_from_bit_idx(bit_idx); const ot_data_t vol = depth == 0 ? 512 : (depth == 1 ? 64 : (depth == 2 ? 8 : 1)); int cur_target = round(target_data[0]) - class_base; assert(cur_target >= 0 && cur_target < input.feature_size); for(int f = 0; f < grad.feature_size; ++f) { grad_data[f] = 0; } grad_data[cur_target] = vol * -weights[cur_target] * norm; } } extern "C" void octree_nll_loss_bwd_gpu(const octree* input, const octree* target, const ot_data_t* weights, const ot_data_t total_weight, int class_base, bool size_average, bool check, octree* grad) { if(input->n != target->n || input->grid_depth != target->grid_depth || input->grid_height != target->grid_height || input->grid_width != target->grid_width || 1 != target->feature_size) { printf("[ERROR] nll_loss_bwd - shape of inputs do not match\n"); exit(-1); } if(check && !octree_equal_trees_gpu(input, target)) { printf("[ERROR] nll_loss_bwd - tree structure of inputs do not match\n"); exit(-1); } octree_cpy_scalars(input, grad); octree_resize_as_gpu(input, grad); octree_cpy_trees_gpu_gpu(input, grad); octree_cpy_prefix_leafs_gpu_gpu(input, grad); ot_data_t norm = 1.0; if(size_average) { norm /= total_weight; } octree_leaf_idx_to_grid_idx_gpu(grad, grad->feature_size, grad->data_capacity, grad->data); kernel_nll_loss_bwd<<<GET_BLOCKS(grad->n_leafs), CUDA_NUM_THREADS>>>( *grad, grad->n_leafs, *input, *target, weights, norm, class_base ); CUDA_POST_KERNEL_CHECK; } #define EPS 1e-12 struct octree_bce_loss_from_leaf_idx : public thrust::unary_function<int, ot_data_t> { const octree input; const octree target; octree_bce_loss_from_leaf_idx(const octree input_, const octree target_) : input(input_), target(target_) { } __host__ __device__ ot_data_t operator()(const int leaf_idx) { const int grid_idx = leaf_idx_to_grid_idx(&input, leaf_idx); const ot_tree_t* tree = octree_get_tree(&input, grid_idx); // const int cum_n_leafs = n_leafs_upto(&input, grid_idx); const int cum_n_leafs = input.prefix_leafs[grid_idx]; const int data_idx = leaf_idx - cum_n_leafs; const int bit_idx = data_idx_to_bit_idx(tree, data_idx); // const ot_data_t* in = input.data_ptrs[grid_idx] + data_idx * input.feature_size; const ot_data_t* in = octree_get_data(&input, grid_idx) + data_idx * input.feature_size; // const ot_data_t* ta = target.data_ptrs[grid_idx] + data_idx * target.feature_size; const ot_data_t* ta = octree_get_data(&target, grid_idx) + data_idx * target.feature_size; const int depth = depth_from_bit_idx(bit_idx); const ot_data_t vol = depth == 0 ? 512 : (depth == 1 ? 64 : (depth == 2 ? 8 : 1)); ot_data_t sum = 0; for(int f = 0; f < input.feature_size; ++f) { const ot_data_t x = in[f]; const ot_data_t y = ta[f]; sum -= vol *(log(x + EPS) * y + log(1. - x + EPS) * (1. - y)); } return sum; } }; extern "C" void octree_bce_loss_gpu(const octree* input, const octree* target, bool size_average, bool check, ot_data_t* output, ot_data_t* total_weight) { if(!octree_equal_shape(input, target)) { printf("[ERROR] bce_loss - shape of inputs do not match\n"); exit(-1); } if(check && !octree_equal_trees_gpu(input, target)) { printf("[ERROR] bce_loss - tree structure of inputs do not match\n"); exit(-1); } thrust::counting_iterator<int> iter(0); *output = thrust::transform_reduce( thrust::device, iter, iter + input->n_leafs, octree_bce_loss_from_leaf_idx(*input, *target), ot_data_t(0), thrust::plus<ot_data_t>()); if(size_average) { *total_weight = octree_num_blocks(input) * input->feature_size * 8 * 8 * 8; *output = (*output) / (*total_weight) ; } else { *total_weight = 1; } } __global__ void kernel_bce_loss_bwd(octree grad, int n_leafs, const octree input, const octree target, const ot_data_t norm) { CUDA_KERNEL_LOOP(leaf_idx, n_leafs) { const int grid_idx = grad.data[leaf_idx * grad.feature_size]; const ot_tree_t* tree = octree_get_tree(&grad, grid_idx); // const int cum_n_leafs = n_leafs_upto(&grad, grid_idx); const int cum_n_leafs = grad.prefix_leafs[grid_idx]; const int data_idx = leaf_idx - cum_n_leafs; const int bit_idx = data_idx_to_bit_idx(tree, data_idx); // ot_data_t* grad_data = grad.data_ptrs[grid_idx] + data_idx * grad.feature_size; ot_data_t* grad_data = octree_get_data(&grad, grid_idx) + data_idx * grad.feature_size; // ot_data_t* input_data = input.data_ptrs[grid_idx] + data_idx * input.feature_size; ot_data_t* input_data = octree_get_data(&input, grid_idx) + data_idx * input.feature_size; // ot_data_t* target_data = target.data_ptrs[grid_idx] + data_idx * target.feature_size; ot_data_t* target_data = octree_get_data(&target, grid_idx) + data_idx * target.feature_size; const int depth = depth_from_bit_idx(bit_idx); const ot_data_t vol = depth == 0 ? 512 : (depth == 1 ? 64 : (depth == 2 ? 8 : 1)); for(int f = 0; f < input.feature_size; ++f) { const ot_data_t x = input_data[f]; const ot_data_t y = target_data[f]; grad_data[f] = - vol * norm * (y - x) / ((1. - x + EPS) * (x + EPS)); } } } extern "C" void octree_bce_loss_bwd_gpu(const octree* input, const octree* target, bool size_average, bool check, octree* grad) { if(!octree_equal_shape(input, target)) { printf("[ERROR] bce_loss_bwd - shape of inputs do not match\n"); exit(-1); } if(check && !octree_equal_trees_gpu(input, target)) { printf("[ERROR] bce_loss_bwd - tree structure of inputs do not match\n"); exit(-1); } octree_cpy_scalars(input, grad); octree_resize_as_gpu(input, grad); octree_cpy_trees_gpu_gpu(input, grad); octree_cpy_prefix_leafs_gpu_gpu(input, grad); ot_data_t norm = 1.0; if(size_average) { norm = norm / (octree_num_blocks(input) * input->feature_size * 8 * 8 * 8); } octree_leaf_idx_to_grid_idx_gpu(grad, grad->feature_size, grad->data_capacity, grad->data); kernel_bce_loss_bwd<<<GET_BLOCKS(grad->n_leafs), CUDA_NUM_THREADS>>>( *grad, grad->n_leafs, *input, *target, norm ); CUDA_POST_KERNEL_CHECK; } struct octree_bce_dense_loss_from_leaf_idx : public thrust::unary_function<int, ot_data_t> { const octree input; const ot_data_t* target; octree_bce_dense_loss_from_leaf_idx(const octree input_, const ot_data_t* target_) : input(input_), target(target_) { } __host__ __device__ ot_data_t operator()(const int leaf_idx) { const int grid_idx = leaf_idx_to_grid_idx(&input, leaf_idx); const ot_tree_t* tree = octree_get_tree(&input, grid_idx); // const int cum_n_leafs = n_leafs_upto(&input, grid_idx); const int cum_n_leafs = input.prefix_leafs[grid_idx]; const int data_idx = leaf_idx - cum_n_leafs; const int bit_idx = data_idx_to_bit_idx(tree, data_idx); // const ot_data_t* estimate = input.data_ptrs[grid_idx] + data_idx * input.feature_size; const ot_data_t* estimate = octree_get_data(&input, grid_idx) + data_idx * input.feature_size; int n,ds,hs,ws; const int depth = octree_ind_to_dense_ind(&input, grid_idx, bit_idx, &n, &ds,&hs,&ws); const int size = width_from_depth(depth); const int dense_depth = 8 * input.grid_depth; const int dense_height = 8 * input.grid_height; const int dense_width = 8 * input.grid_width; // printf("leaf_idx=%d, n=%d, ds=%d,hs=%d,ws=%d, size=%d\n", leaf_idx, n, ds,hs,ws, size); ot_data_t out = 0; for(int d = ds; d < (ds+size); ++d) { for(int h = hs; h < (hs+size); ++h) { for(int w = ws; w < (ws+size); ++w) { for(int f = 0; f < input.feature_size; ++f) { ot_data_t x = estimate[f]; ot_data_t y = target[(((n * input.feature_size + f) * dense_depth + d) * dense_height + h) * dense_width + w]; // printf(" x=%f, y=%f\n", x, y); out += (log(x + EPS) * y + log(1. - x + EPS) * (1. - y)); } } } } return -out; } }; extern "C" void octree_bce_dense_loss_gpu(const octree* input, const ot_data_t* target, bool size_average, ot_data_t* output, ot_data_t* total_weight) { thrust::counting_iterator<int> iter(0); *output = thrust::transform_reduce( thrust::device, iter, iter + input->n_leafs, octree_bce_dense_loss_from_leaf_idx(*input, target), ot_data_t(0), thrust::plus<ot_data_t>()); if(size_average) { *total_weight = octree_num_blocks(input) * input->feature_size * 8 * 8 * 8; *output = (*output) / (*total_weight) ; } else { *total_weight = 1; } } __global__ void kernel_bce_dense_loss_bwd(octree grad, int n_leafs, const octree input, const ot_data_t* target, const ot_data_t norm) { CUDA_KERNEL_LOOP(leaf_idx, n_leafs) { const int grid_idx = grad.data[leaf_idx * grad.feature_size]; const ot_tree_t* tree = octree_get_tree(&grad, grid_idx); // const int cum_n_leafs = n_leafs_upto(&grad, grid_idx); const int cum_n_leafs = grad.prefix_leafs[grid_idx]; const int data_idx = leaf_idx - cum_n_leafs; const int bit_idx = data_idx_to_bit_idx(tree, data_idx); // ot_data_t* grad_data = grad.data_ptrs[grid_idx] + data_idx * grad.feature_size; ot_data_t* grad_data = octree_get_data(&grad, grid_idx) + data_idx * grad.feature_size; // ot_data_t* estimate = input.data_ptrs[grid_idx] + data_idx * input.feature_size; ot_data_t* estimate = octree_get_data(&input, grid_idx) + data_idx * input.feature_size; int n,ds,hs,ws; const int depth = octree_ind_to_dense_ind(&input, grid_idx, bit_idx, &n, &ds,&hs,&ws); const int size = width_from_depth(depth); const int dense_depth = 8 * input.grid_depth; const int dense_height = 8 * input.grid_height; const int dense_width = 8 * input.grid_width; for(int f = 0; f < input.feature_size; ++f) { grad_data[f] = 0; } for(int d = ds; d < (ds+size); ++d) { for(int h = hs; h < (hs+size); ++h) { for(int w = ws; w < (ws+size); ++w) { for(int f = 0; f < input.feature_size; ++f) { ot_data_t x = estimate[f]; ot_data_t y = target[(((n * input.feature_size + f) * dense_depth + d) * dense_height + h) * dense_width + w]; grad_data[f] -= norm * (y - x) / ((1. - x + EPS) * (x + EPS)); } } } } } } extern "C" void octree_bce_dense_loss_bwd_gpu(const octree* input, const ot_data_t* target, bool size_average, octree* grad) { octree_cpy_scalars(input, grad); octree_resize_as_gpu(input, grad); octree_cpy_trees_gpu_gpu(input, grad); octree_cpy_prefix_leafs_gpu_gpu(input, grad); ot_data_t norm = 1.0; if(size_average) { norm = norm / (octree_num_blocks(input) * input->feature_size * 8 * 8 * 8); } octree_leaf_idx_to_grid_idx_gpu(grad, grad->feature_size, grad->data_capacity, grad->data); kernel_bce_dense_loss_bwd<<<GET_BLOCKS(grad->n_leafs), CUDA_NUM_THREADS>>>( *grad, grad->n_leafs, *input, target, norm ); CUDA_POST_KERNEL_CHECK; } struct octree_bce_ds_loss_from_leaf_idx : public thrust::unary_function<int, float2> { const octree input; const octree target; const octree weights; bool use_weights; octree_bce_ds_loss_from_leaf_idx(const octree input_, const octree target_, const octree weights_, bool use_weights_) : input(input_), target(target_), weights(weights_), use_weights(use_weights_) { } __host__ __device__ float2 operator()(const int leaf_idx) { const int in_grid_idx = leaf_idx_to_grid_idx(&input, leaf_idx); const ot_tree_t* in_tree = octree_get_tree(&input, in_grid_idx); int in_data_idx = leaf_idx - input.prefix_leafs[in_grid_idx]; int in_bit_idx = data_idx_to_bit_idx(in_tree, in_data_idx); int n,ds,hs,ws; int depth = octree_ind_to_dense_ind(&input, in_grid_idx, in_bit_idx, &n, &ds,&hs,&ws); int width = width_from_depth(depth); ot_data_t grid_out = 0; ot_data_t grid_weight = 0; for(int d = ds; d < (ds+width); ++d) { for(int h = hs; h < (hs+width); ++h) { for(int w = ws; w < (ws+width); ++w) { int gd = d / 8; int gh = h / 8; int gw = w / 8; int bd = d % 8; int bh = h % 8; int bw = w % 8; int ta_grid_idx = octree_grid_idx(&target, n, gd,gh,gw); const ot_tree_t* ta_tree = octree_get_tree(&target, ta_grid_idx); int ta_bit_idx = tree_bit_idx(ta_tree, bd,bh,bw); int ta_data_idx = tree_data_idx(ta_tree, ta_bit_idx, target.feature_size); const ot_data_t* ta_data = octree_get_data(&target, ta_grid_idx); const ot_data_t* we_data = use_weights ? octree_get_data(&weights, ta_grid_idx) : 0; for(int f = 0; f < input.feature_size; ++f) { ot_data_t x = input.data[leaf_idx * input.feature_size + f]; ot_data_t y = ta_data[ta_data_idx + f]; ot_data_t w = use_weights ? we_data[ta_data_idx + f] : 1; grid_out += w * (log(x + EPS) * y + log(1. - x + EPS) * (1. - y)); grid_weight += w; } } } } float2 ret; ret.x = -grid_out; ret.y = grid_weight; return ret; } }; extern "C" void octree_bce_ds_loss_gpu(const octree* input, const octree* target, const octree* weights, bool size_average, ot_data_t* output, ot_data_t* total_weight) { if(!octree_equal_shape(input, target)) { printf("[ERROR] bce_ds_loss - shape of inputs do not match\n"); exit(-1); } if(!octree_equal_shape(input, weights)) { printf("[ERROR] bce_ds_loss - shape of inputs do not match\n"); exit(-1); } float2 init; init.x = 0; init.y = 0; thrust::counting_iterator<int> iter(0); bool use_weights = weights != 0; float2 ret = thrust::transform_reduce( thrust::device, iter, iter + input->n_leafs, octree_bce_ds_loss_from_leaf_idx(*input, *target, use_weights ? *weights : *target, use_weights), init, octree_plus_float2_bfcn()); if(ret.y > 0) { if(size_average) { *total_weight = ret.y; *output = ret.x / ret.y ; } else { *output = ret.x; *total_weight = 1; } } else { *output = 0; *total_weight = 1; } } __global__ void kernel_bce_ds_loss_bwd(octree grad, int n_leafs, const octree input, const octree target, const octree weights, bool use_weights, const ot_data_t norm) { CUDA_KERNEL_LOOP(leaf_idx, n_leafs) { const int in_grid_idx = grad.data[leaf_idx * grad.feature_size]; const ot_tree_t* in_tree = octree_get_tree(&input, in_grid_idx); int in_data_idx = leaf_idx - input.prefix_leafs[in_grid_idx]; int in_bit_idx = data_idx_to_bit_idx(in_tree, in_data_idx); int n,ds,hs,ws; int depth = octree_ind_to_dense_ind(&input, in_grid_idx, in_bit_idx, &n, &ds,&hs,&ws); int width = width_from_depth(depth); for(int f = 0; f < grad.feature_size; ++f) { grad.data[leaf_idx * grad.feature_size + f] = 0; } for(int d = ds; d < (ds+width); ++d) { for(int h = hs; h < (hs+width); ++h) { for(int w = ws; w < (ws+width); ++w) { int gd = d / 8; int gh = h / 8; int gw = w / 8; int bd = d % 8; int bh = h % 8; int bw = w % 8; int ta_grid_idx = octree_grid_idx(&target, n, gd,gh,gw); const ot_tree_t* ta_tree = octree_get_tree(&target, ta_grid_idx); int ta_bit_idx = tree_bit_idx(ta_tree, bd,bh,bw); int ta_data_idx = tree_data_idx(ta_tree, ta_bit_idx, target.feature_size); const ot_data_t* ta_data = octree_get_data(&target, ta_grid_idx); const ot_data_t* we_data = use_weights ? octree_get_data(&weights, ta_grid_idx) : 0; for(int f = 0; f < input.feature_size; ++f) { ot_data_t x = input.data[leaf_idx * input.feature_size + f]; ot_data_t y = ta_data[ta_data_idx + f]; ot_data_t w = use_weights ? we_data[ta_data_idx + f] : 1; grad.data[leaf_idx * grad.feature_size + f] -= norm * w * (y - x) / ((1. - x + EPS) * (x + EPS)); } } } } } } extern "C" void octree_bce_ds_loss_bwd_gpu(const octree* input, const octree* target, const octree* weights, bool size_average, ot_data_t total_weight, octree* grad) { if(!octree_equal_shape(input, target)) { printf("[ERROR] bce_ds_loss_bwd - shape of inputs do not match\n"); exit(-1); } octree_cpy_scalars(input, grad); octree_resize_as_gpu(input, grad); octree_cpy_trees_gpu_gpu(input, grad); octree_cpy_prefix_leafs_gpu_gpu(input, grad); ot_data_t norm = 1.0; if(size_average && total_weight > 0) { norm = norm / total_weight; } bool use_weights = weights != 0; octree_leaf_idx_to_grid_idx_gpu(grad, grad->feature_size, grad->data_capacity, grad->data); kernel_bce_ds_loss_bwd<<<GET_BLOCKS(grad->n_leafs), CUDA_NUM_THREADS>>>( *grad, grad->n_leafs, *input, *target, use_weights ? *weights : *target, use_weights, norm ); CUDA_POST_KERNEL_CHECK; } struct dense_bce_loss_fcn : public thrust::unary_function<int, float2> { const ot_data_t* input; const ot_data_t* target; const ot_data_t* weights; dense_bce_loss_fcn(const ot_data_t* input_, const ot_data_t* target_, const ot_data_t* weights_) : input(input_), target(target_), weights(weights_) { } __host__ __device__ float2 operator()(const int idx) { ot_data_t w = weights != 0 ? weights[idx] : 1; ot_data_t x = input[idx]; ot_data_t y = target[idx]; float2 ret; ret.x = w * -( (log(x + EPS) * y + log(1. - x + EPS) * (1. - y)) ); ret.y = w; return ret; } }; extern "C" void dense_bce_loss_gpu(const ot_data_t* input, const ot_data_t* target, const ot_data_t* weights, ot_size_t N, ot_data_t* output, ot_data_t* total_weight) { float2 init; init.x = 0; init.y = 0; thrust::counting_iterator<int> iter(0); float2 result = thrust::transform_reduce( thrust::device, iter, iter + N, dense_bce_loss_fcn(input, target, weights), init, octree_plus_float2_bfcn()); if(result.y > 0) { *output = result.x / result.y; *total_weight = result.y; } else { *output = 0; *total_weight = 1; } } __global__ void kernel_dense_bce_loss_bwd(ot_data_t* grad, ot_size_t N, const ot_data_t* input, const ot_data_t* target, const ot_data_t* weights, ot_data_t norm) { CUDA_KERNEL_LOOP(idx, N) { ot_data_t x = input[idx]; ot_data_t y = target[idx]; ot_data_t w = weights != 0 ? weights[idx] : 1; w *= norm; grad[idx] = w * -(y - x) / ((1. - x + EPS) * (x + EPS)); } } extern "C" void dense_bce_loss_bwd_gpu(const ot_data_t* input, const ot_data_t* target, const ot_data_t* weights, ot_size_t N, ot_data_t total_weight, ot_data_t* grad) { ot_data_t norm = total_weight > 0 ? 1.0 / total_weight : 0; kernel_dense_bce_loss_bwd<<<GET_BLOCKS(N), CUDA_NUM_THREADS>>>( grad, N, input, target, weights, norm ); CUDA_POST_KERNEL_CHECK; } #undef EPS
33a687e65943d58c80e9b2b7dd43c48c7c26dd72.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <quda_internal.h> #include <quda_matrix.h> #include <tune_quda.h> #include <gauge_field.h> #include <gauge_field_order.h> #include <launch_kernel.cuh> #include <comm_quda.h> #include <unitarization_links.h> #include <pgauge_monte.h> #include <random_quda.h> #include <hipcub/hipcub.hpp> #include <index_helper.cuh> #ifndef PI #define PI 3.1415926535897932384626433832795 // pi #endif #ifndef PII #define PII 6.2831853071795864769252867665590 // 2 * pi #endif namespace quda { #ifdef GPU_GAUGE_ALG template <typename Gauge> struct InitGaugeColdArg { int threads; // number of active threads required int X[4]; // grid dimensions Gauge dataOr; InitGaugeColdArg(const Gauge &dataOr, const cudaGaugeField &data) : dataOr(dataOr) { for ( int dir = 0; dir < 4; ++dir ) X[dir] = data.X()[dir]; threads = X[0] * X[1] * X[2] * X[3]; } }; template<typename Float, typename Gauge, int NCOLORS> __global__ void compute_InitGauge_ColdStart(InitGaugeColdArg<Gauge> arg){ int idx = threadIdx.x + blockIdx.x * blockDim.x; if ( idx >= arg.threads ) return; typedef typename ComplexTypeId<Float>::Type Cmplx; int parity = 0; if ( idx >= arg.threads / 2 ) { parity = 1; idx -= arg.threads / 2; } Matrix<Cmplx,NCOLORS> U; setIdentity(&U); for ( int d = 0; d < 4; d++ ) arg.dataOr.save((Float*)(U.data),idx, d, parity); } template<typename Float, typename Gauge, int NCOLORS> class InitGaugeCold : Tunable { InitGaugeColdArg<Gauge> arg; mutable char aux_string[128]; // used as a label in the autotuner private: unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { return 0; } //bool tuneSharedBytes() const { return false; } // Don't tune shared memory bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. unsigned int minThreads() const { return arg.threads; } public: InitGaugeCold(InitGaugeColdArg<Gauge> &arg) : arg(arg) { } ~InitGaugeCold () { } void apply(const hipStream_t &stream){ TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); compute_InitGauge_ColdStart<Float, Gauge, NCOLORS><< < tp.grid,tp.block >> > (arg); //hipDeviceSynchronize(); } TuneKey tuneKey() const { std::stringstream vol; vol << arg.X[0] << "x"; vol << arg.X[1] << "x"; vol << arg.X[2] << "x"; vol << arg.X[3]; sprintf(aux_string,"threads=%d,prec=%d",arg.threads, sizeof(Float)); return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string); } std::string paramString(const TuneParam &param) const { std::stringstream ps; ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << ")"; ps << "shared=" << param.shared_bytes; return ps.str(); } void preTune(){ } void postTune(){ } long long flops() const { return 0; } // Only correct if there is no link reconstruction, no cub reduction accounted also long long bytes() const { return 0; } //no accounting the reduction!!!! }; template<typename Float, int NCOLORS, typename Gauge> void InitGaugeField( Gauge dataOr, cudaGaugeField& data) { InitGaugeColdArg<Gauge> initarg(dataOr, data); InitGaugeCold<Float, Gauge, NCOLORS> init(initarg); init.apply(0); checkCudaError(); } template<typename Float> void InitGaugeField( cudaGaugeField& data) { if ( data.isNative() ) { if ( data.Reconstruct() == QUDA_RECONSTRUCT_NO ) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type Gauge; InitGaugeField<Float, 3>(Gauge(data), data); } else if ( data.Reconstruct() == QUDA_RECONSTRUCT_12 ) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type Gauge; InitGaugeField<Float, 3>(Gauge(data), data); } else if ( data.Reconstruct() == QUDA_RECONSTRUCT_8 ) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_8>::type Gauge; InitGaugeField<Float, 3>(Gauge(data), data); } else { errorQuda("Reconstruction type %d of gauge field not supported", data.Reconstruct()); } } else { errorQuda("Invalid Gauge Order\n"); } } /** @brief Perform a cold start to the gauge field, identity SU(3) matrix, also fills the ghost links in multi-GPU case (no need to exchange data) * * @param[in,out] data Gauge field */ void InitGaugeField( cudaGaugeField& data) { if ( data.Precision() == QUDA_SINGLE_PRECISION ) { InitGaugeField<float> (data); } else if ( data.Precision() == QUDA_DOUBLE_PRECISION ) { InitGaugeField<double>(data); } else { errorQuda("Precision %d not supported", data.Precision()); } } template <typename Gauge> struct InitGaugeHotArg { int threads; // number of active threads required int X[4]; // grid dimensions RNG rngstate; #ifdef MULTI_GPU int border[4]; #endif Gauge dataOr; InitGaugeHotArg(const Gauge &dataOr, const cudaGaugeField &data, RNG &rngstate) : dataOr(dataOr), rngstate(rngstate) { #ifdef MULTI_GPU for ( int dir = 0; dir < 4; ++dir ) { border[dir] = data.R()[dir]; X[dir] = data.X()[dir] - border[dir] * 2; } #else for ( int dir = 0; dir < 4; ++dir ) X[dir] = data.X()[dir]; #endif //the optimal number of RNG states in rngstate array must be equal to half the lattice volume //this number is the same used in heatbath... threads = X[0] * X[1] * X[2] * X[3] >> 1; } }; template<typename Cmplx> __device__ __host__ static inline typename RealTypeId<Cmplx>::Type Abs2(const Cmplx & a){ return a.x * a.x + a.y * a.y; } template <typename Float> __host__ __device__ static inline void reunit_link( Matrix<typename ComplexTypeId<Float>::Type,3> &U ){ typedef typename ComplexTypeId<Float>::Type Cmplx; Cmplx t2 = makeComplex((Float)0.0, (Float)0.0); Float t1 = 0.0; //first normalize first row //sum of squares of row #pragma unroll for ( int c = 0; c < 3; c++ ) t1 += Abs2(U(0, c)); t1 = (Float)1.0 / sqrt(t1); //14 //used to normalize row #pragma unroll for ( int c = 0; c < 3; c++ ) U(0,c) *= t1; //6 #pragma unroll for ( int c = 0; c < 3; c++ ) t2 += Conj(U(0,c)) * U(1,c); //24 #pragma unroll for ( int c = 0; c < 3; c++ ) U(1,c) -= t2 * U(0,c); //24 //normalize second row //sum of squares of row t1 = 0.0; #pragma unroll for ( int c = 0; c < 3; c++ ) t1 += Abs2(U(1,c)); t1 = (Float)1.0 / sqrt(t1); //14 //used to normalize row #pragma unroll for ( int c = 0; c < 3; c++ ) U(1, c) *= t1; //6 //Reconstruct lat row U(2,0) = Conj(U(0,1) * U(1,2) - U(0,2) * U(1,1)); U(2,1) = Conj(U(0,2) * U(1,0) - U(0,0) * U(1,2)); U(2,2) = Conj(U(0,0) * U(1,1) - U(0,1) * U(1,0)); //42 //T=130 } /** @brief Generate the four random real elements of the SU(2) matrix @param localstate CURAND rng state @return four real numbers of the SU(2) matrix */ template <class T> __device__ static inline Matrix<T,2> randomSU2(cuRNGState& localState){ Matrix<T,2> a; T aabs, ctheta, stheta, phi; a(0,0) = Random<T>(localState, (T)-1.0, (T)1.0); aabs = sqrt( 1.0 - a(0,0) * a(0,0)); ctheta = Random<T>(localState, (T)-1.0, (T)1.0); phi = PII * Random<T>(localState); stheta = ( hiprand(&localState) & 1 ? 1 : -1 ) * sqrt( (T)1.0 - ctheta * ctheta ); a(0,1) = aabs * stheta * cos( phi ); a(1,0) = aabs * stheta * sin( phi ); a(1,1) = aabs * ctheta; return a; } /** @brief Update the SU(Nc) link with the new SU(2) matrix, link <- u * link @param u SU(2) matrix represented by four real numbers @param link SU(Nc) matrix @param id indices */ template <class T, int NCOLORS> __host__ __device__ static inline void mul_block_sun( Matrix<T,2> u, Matrix<typename ComplexTypeId<T>::Type,NCOLORS> &link, int2 id ){ typename ComplexTypeId<T>::Type tmp; for ( int j = 0; j < NCOLORS; j++ ) { tmp = makeComplex( u(0,0), u(1,1) ) * link(id.x, j) + makeComplex( u(1,0), u(0,1) ) * link(id.y, j); link(id.y, j) = makeComplex(-u(1,0), u(0,1) ) * link(id.x, j) + makeComplex( u(0,0),-u(1,1) ) * link(id.y, j); link(id.x, j) = tmp; } } /** @brief Calculate the SU(2) index block in the SU(Nc) matrix @param block number to calculate the index's, the total number of blocks is NCOLORS * ( NCOLORS - 1) / 2. @return Returns two index's in int2 type, accessed by .x and .y. */ template<int NCOLORS> __host__ __device__ static inline int2 IndexBlock(int block){ int2 id; int i1; int found = 0; int del_i = 0; int index = -1; while ( del_i < (NCOLORS - 1) && found == 0 ) { del_i++; for ( i1 = 0; i1 < (NCOLORS - del_i); i1++ ) { index++; if ( index == block ) { found = 1; break; } } } id.y = i1 + del_i; id.x = i1; return id; } /** @brief Generate a SU(Nc) random matrix @param localstate CURAND rng state @return SU(Nc) matrix */ template <class Float, int NCOLORS> __device__ inline Matrix<typename ComplexTypeId<Float>::Type,NCOLORS> randomize( cuRNGState& localState ){ Matrix<typename ComplexTypeId<Float>::Type,NCOLORS> U; for ( int i = 0; i < NCOLORS; i++ ) for ( int j = 0; j < NCOLORS; j++ ) U(i,j) = makeComplex( (Float)(Random<Float>(localState) - 0.5), (Float)(Random<Float>(localState) - 0.5)); reunit_link<Float>(U); return U; /*setIdentity(&U); for( int block = 0; block < NCOLORS * ( NCOLORS - 1) / 2; block++ ) { Matrix<Float,2> rr = randomSU2<Float>(localState); int2 id = IndexBlock<NCOLORS>( block ); mul_block_sun<Float, NCOLORS>(rr, U, id); //U = block_su2_to_su3<Float>( U, a00, a01, a10, a11, block ); } return U;*/ } template<typename Float, typename Gauge, int NCOLORS> __global__ void compute_InitGauge_HotStart(InitGaugeHotArg<Gauge> arg){ int idx = threadIdx.x + blockIdx.x * blockDim.x; if ( idx >= arg.threads ) return; typedef typename ComplexTypeId<Float>::Type Cmplx; #ifdef MULTI_GPU int X[4], x[4]; for ( int dr = 0; dr < 4; ++dr ) X[dr] = arg.X[dr]; for ( int dr = 0; dr < 4; ++dr ) X[dr] += 2 * arg.border[dr]; int id = idx; cuRNGState localState = arg.rngstate.State()[ id ]; #else cuRNGState localState = arg.rngstate.State()[ idx ]; #endif for ( int parity = 0; parity < 2; parity++ ) { #ifdef MULTI_GPU getCoords(x, id, arg.X, parity); for ( int dr = 0; dr < 4; ++dr ) x[dr] += arg.border[dr]; idx = linkIndex(x,X); #endif for ( int d = 0; d < 4; d++ ) { Matrix<Cmplx,NCOLORS> U; U = randomize<Float, NCOLORS>(localState); arg.dataOr.save((Float*)(U.data),idx, d, parity); } } #ifdef MULTI_GPU arg.rngstate.State()[ id ] = localState; #else arg.rngstate.State()[ idx ] = localState; #endif } template<typename Float, typename Gauge, int NCOLORS> class InitGaugeHot : Tunable { InitGaugeHotArg<Gauge> arg; mutable char aux_string[128]; // used as a label in the autotuner private: unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { return 0; } bool tuneSharedBytes() const { return false; } // Don't tune shared memory bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. unsigned int minThreads() const { return arg.threads; } public: InitGaugeHot(InitGaugeHotArg<Gauge> &arg) : arg(arg) { } ~InitGaugeHot () { } void apply(const hipStream_t &stream){ TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); compute_InitGauge_HotStart<Float, Gauge, NCOLORS><< < tp.grid,tp.block >> > (arg); //hipDeviceSynchronize(); } TuneKey tuneKey() const { std::stringstream vol; vol << arg.X[0] << "x"; vol << arg.X[1] << "x"; vol << arg.X[2] << "x"; vol << arg.X[3]; sprintf(aux_string,"threads=%d,prec=%d",arg.threads, sizeof(Float)); return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string); } std::string paramString(const TuneParam &param) const { std::stringstream ps; ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << ")"; ps << "shared=" << param.shared_bytes; return ps.str(); } void preTune(){ arg.rngstate.backup(); } void postTune(){ arg.rngstate.restore(); } long long flops() const { return 0; } // Only correct if there is no link reconstruction, no cub reduction accounted also long long bytes() const { return 0; } //no accounting the reduction!!!! }; template<typename Float, int NCOLORS, typename Gauge> void InitGaugeField( Gauge dataOr, cudaGaugeField& data, RNG &rngstate) { InitGaugeHotArg<Gauge> initarg(dataOr, data, rngstate); InitGaugeHot<Float, Gauge, NCOLORS> init(initarg); init.apply(0); checkCudaError(); hipDeviceSynchronize(); data.exchangeExtendedGhost(data.R(),false); /*hipDeviceSynchronize(); const double unitarize_eps = 1e-14; const double max_error = 1e-10; const int reunit_allow_svd = 1; const int reunit_svd_only = 0; const double svd_rel_error = 1e-6; const double svd_abs_error = 1e-6; setUnitarizeLinksConstants(unitarize_eps, max_error, reunit_allow_svd, reunit_svd_only, svd_rel_error, svd_abs_error); int num_failures=0; int* num_failures_dev; hipMalloc((void**)&num_failures_dev, sizeof(int)); hipMemset(num_failures_dev, 0, sizeof(int)); if(num_failures_dev == NULL) errorQuda("hipMalloc failed for dev_pointer\n"); unitarizeLinksQuda(data, num_failures_dev); hipMemcpy(&num_failures, num_failures_dev, sizeof(int), hipMemcpyDeviceToHost); if(num_failures>0){ hipFree(num_failures_dev); errorQuda("Error in the unitarization\n"); exit(1); } hipFree(num_failures_dev);*/ } template<typename Float> void InitGaugeField( cudaGaugeField& data, RNG &rngstate) { if ( data.isNative() ) { if ( data.Reconstruct() == QUDA_RECONSTRUCT_NO ) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type Gauge; InitGaugeField<Float, 3>(Gauge(data), data, rngstate); } else if ( data.Reconstruct() == QUDA_RECONSTRUCT_12 ) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type Gauge; InitGaugeField<Float, 3>(Gauge(data), data, rngstate); } else if ( data.Reconstruct() == QUDA_RECONSTRUCT_8 ) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_8>::type Gauge; InitGaugeField<Float, 3>(Gauge(data), data, rngstate); } else { errorQuda("Reconstruction type %d of gauge field not supported", data.Reconstruct()); } } else { errorQuda("Invalid Gauge Order\n"); } } #endif // GPU_GAUGE_ALG /** @brief Perform a hot start to the gauge field, random SU(3) matrix, followed by reunitarization, also exchange borders links in multi-GPU case. * * @param[in,out] data Gauge field * @param[in,out] rngstate state of the CURAND random number generator */ void InitGaugeField( cudaGaugeField& data, RNG &rngstate) { #ifdef GPU_GAUGE_ALG if ( data.Precision() == QUDA_SINGLE_PRECISION ) { InitGaugeField<float> (data, rngstate); } else if ( data.Precision() == QUDA_DOUBLE_PRECISION ) { InitGaugeField<double>(data, rngstate); } else { errorQuda("Precision %d not supported", data.Precision()); } #else errorQuda("Pure gauge code has not been built"); #endif } }
33a687e65943d58c80e9b2b7dd43c48c7c26dd72.cu
#include <quda_internal.h> #include <quda_matrix.h> #include <tune_quda.h> #include <gauge_field.h> #include <gauge_field_order.h> #include <launch_kernel.cuh> #include <comm_quda.h> #include <unitarization_links.h> #include <pgauge_monte.h> #include <random_quda.h> #include <cub/cub.cuh> #include <index_helper.cuh> #ifndef PI #define PI 3.1415926535897932384626433832795 // pi #endif #ifndef PII #define PII 6.2831853071795864769252867665590 // 2 * pi #endif namespace quda { #ifdef GPU_GAUGE_ALG template <typename Gauge> struct InitGaugeColdArg { int threads; // number of active threads required int X[4]; // grid dimensions Gauge dataOr; InitGaugeColdArg(const Gauge &dataOr, const cudaGaugeField &data) : dataOr(dataOr) { for ( int dir = 0; dir < 4; ++dir ) X[dir] = data.X()[dir]; threads = X[0] * X[1] * X[2] * X[3]; } }; template<typename Float, typename Gauge, int NCOLORS> __global__ void compute_InitGauge_ColdStart(InitGaugeColdArg<Gauge> arg){ int idx = threadIdx.x + blockIdx.x * blockDim.x; if ( idx >= arg.threads ) return; typedef typename ComplexTypeId<Float>::Type Cmplx; int parity = 0; if ( idx >= arg.threads / 2 ) { parity = 1; idx -= arg.threads / 2; } Matrix<Cmplx,NCOLORS> U; setIdentity(&U); for ( int d = 0; d < 4; d++ ) arg.dataOr.save((Float*)(U.data),idx, d, parity); } template<typename Float, typename Gauge, int NCOLORS> class InitGaugeCold : Tunable { InitGaugeColdArg<Gauge> arg; mutable char aux_string[128]; // used as a label in the autotuner private: unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { return 0; } //bool tuneSharedBytes() const { return false; } // Don't tune shared memory bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. unsigned int minThreads() const { return arg.threads; } public: InitGaugeCold(InitGaugeColdArg<Gauge> &arg) : arg(arg) { } ~InitGaugeCold () { } void apply(const cudaStream_t &stream){ TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); compute_InitGauge_ColdStart<Float, Gauge, NCOLORS><< < tp.grid,tp.block >> > (arg); //cudaDeviceSynchronize(); } TuneKey tuneKey() const { std::stringstream vol; vol << arg.X[0] << "x"; vol << arg.X[1] << "x"; vol << arg.X[2] << "x"; vol << arg.X[3]; sprintf(aux_string,"threads=%d,prec=%d",arg.threads, sizeof(Float)); return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string); } std::string paramString(const TuneParam &param) const { std::stringstream ps; ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << ")"; ps << "shared=" << param.shared_bytes; return ps.str(); } void preTune(){ } void postTune(){ } long long flops() const { return 0; } // Only correct if there is no link reconstruction, no cub reduction accounted also long long bytes() const { return 0; } //no accounting the reduction!!!! }; template<typename Float, int NCOLORS, typename Gauge> void InitGaugeField( Gauge dataOr, cudaGaugeField& data) { InitGaugeColdArg<Gauge> initarg(dataOr, data); InitGaugeCold<Float, Gauge, NCOLORS> init(initarg); init.apply(0); checkCudaError(); } template<typename Float> void InitGaugeField( cudaGaugeField& data) { if ( data.isNative() ) { if ( data.Reconstruct() == QUDA_RECONSTRUCT_NO ) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type Gauge; InitGaugeField<Float, 3>(Gauge(data), data); } else if ( data.Reconstruct() == QUDA_RECONSTRUCT_12 ) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type Gauge; InitGaugeField<Float, 3>(Gauge(data), data); } else if ( data.Reconstruct() == QUDA_RECONSTRUCT_8 ) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_8>::type Gauge; InitGaugeField<Float, 3>(Gauge(data), data); } else { errorQuda("Reconstruction type %d of gauge field not supported", data.Reconstruct()); } } else { errorQuda("Invalid Gauge Order\n"); } } /** @brief Perform a cold start to the gauge field, identity SU(3) matrix, also fills the ghost links in multi-GPU case (no need to exchange data) * * @param[in,out] data Gauge field */ void InitGaugeField( cudaGaugeField& data) { if ( data.Precision() == QUDA_SINGLE_PRECISION ) { InitGaugeField<float> (data); } else if ( data.Precision() == QUDA_DOUBLE_PRECISION ) { InitGaugeField<double>(data); } else { errorQuda("Precision %d not supported", data.Precision()); } } template <typename Gauge> struct InitGaugeHotArg { int threads; // number of active threads required int X[4]; // grid dimensions RNG rngstate; #ifdef MULTI_GPU int border[4]; #endif Gauge dataOr; InitGaugeHotArg(const Gauge &dataOr, const cudaGaugeField &data, RNG &rngstate) : dataOr(dataOr), rngstate(rngstate) { #ifdef MULTI_GPU for ( int dir = 0; dir < 4; ++dir ) { border[dir] = data.R()[dir]; X[dir] = data.X()[dir] - border[dir] * 2; } #else for ( int dir = 0; dir < 4; ++dir ) X[dir] = data.X()[dir]; #endif //the optimal number of RNG states in rngstate array must be equal to half the lattice volume //this number is the same used in heatbath... threads = X[0] * X[1] * X[2] * X[3] >> 1; } }; template<typename Cmplx> __device__ __host__ static inline typename RealTypeId<Cmplx>::Type Abs2(const Cmplx & a){ return a.x * a.x + a.y * a.y; } template <typename Float> __host__ __device__ static inline void reunit_link( Matrix<typename ComplexTypeId<Float>::Type,3> &U ){ typedef typename ComplexTypeId<Float>::Type Cmplx; Cmplx t2 = makeComplex((Float)0.0, (Float)0.0); Float t1 = 0.0; //first normalize first row //sum of squares of row #pragma unroll for ( int c = 0; c < 3; c++ ) t1 += Abs2(U(0, c)); t1 = (Float)1.0 / sqrt(t1); //14 //used to normalize row #pragma unroll for ( int c = 0; c < 3; c++ ) U(0,c) *= t1; //6 #pragma unroll for ( int c = 0; c < 3; c++ ) t2 += Conj(U(0,c)) * U(1,c); //24 #pragma unroll for ( int c = 0; c < 3; c++ ) U(1,c) -= t2 * U(0,c); //24 //normalize second row //sum of squares of row t1 = 0.0; #pragma unroll for ( int c = 0; c < 3; c++ ) t1 += Abs2(U(1,c)); t1 = (Float)1.0 / sqrt(t1); //14 //used to normalize row #pragma unroll for ( int c = 0; c < 3; c++ ) U(1, c) *= t1; //6 //Reconstruct lat row U(2,0) = Conj(U(0,1) * U(1,2) - U(0,2) * U(1,1)); U(2,1) = Conj(U(0,2) * U(1,0) - U(0,0) * U(1,2)); U(2,2) = Conj(U(0,0) * U(1,1) - U(0,1) * U(1,0)); //42 //T=130 } /** @brief Generate the four random real elements of the SU(2) matrix @param localstate CURAND rng state @return four real numbers of the SU(2) matrix */ template <class T> __device__ static inline Matrix<T,2> randomSU2(cuRNGState& localState){ Matrix<T,2> a; T aabs, ctheta, stheta, phi; a(0,0) = Random<T>(localState, (T)-1.0, (T)1.0); aabs = sqrt( 1.0 - a(0,0) * a(0,0)); ctheta = Random<T>(localState, (T)-1.0, (T)1.0); phi = PII * Random<T>(localState); stheta = ( curand(&localState) & 1 ? 1 : -1 ) * sqrt( (T)1.0 - ctheta * ctheta ); a(0,1) = aabs * stheta * cos( phi ); a(1,0) = aabs * stheta * sin( phi ); a(1,1) = aabs * ctheta; return a; } /** @brief Update the SU(Nc) link with the new SU(2) matrix, link <- u * link @param u SU(2) matrix represented by four real numbers @param link SU(Nc) matrix @param id indices */ template <class T, int NCOLORS> __host__ __device__ static inline void mul_block_sun( Matrix<T,2> u, Matrix<typename ComplexTypeId<T>::Type,NCOLORS> &link, int2 id ){ typename ComplexTypeId<T>::Type tmp; for ( int j = 0; j < NCOLORS; j++ ) { tmp = makeComplex( u(0,0), u(1,1) ) * link(id.x, j) + makeComplex( u(1,0), u(0,1) ) * link(id.y, j); link(id.y, j) = makeComplex(-u(1,0), u(0,1) ) * link(id.x, j) + makeComplex( u(0,0),-u(1,1) ) * link(id.y, j); link(id.x, j) = tmp; } } /** @brief Calculate the SU(2) index block in the SU(Nc) matrix @param block number to calculate the index's, the total number of blocks is NCOLORS * ( NCOLORS - 1) / 2. @return Returns two index's in int2 type, accessed by .x and .y. */ template<int NCOLORS> __host__ __device__ static inline int2 IndexBlock(int block){ int2 id; int i1; int found = 0; int del_i = 0; int index = -1; while ( del_i < (NCOLORS - 1) && found == 0 ) { del_i++; for ( i1 = 0; i1 < (NCOLORS - del_i); i1++ ) { index++; if ( index == block ) { found = 1; break; } } } id.y = i1 + del_i; id.x = i1; return id; } /** @brief Generate a SU(Nc) random matrix @param localstate CURAND rng state @return SU(Nc) matrix */ template <class Float, int NCOLORS> __device__ inline Matrix<typename ComplexTypeId<Float>::Type,NCOLORS> randomize( cuRNGState& localState ){ Matrix<typename ComplexTypeId<Float>::Type,NCOLORS> U; for ( int i = 0; i < NCOLORS; i++ ) for ( int j = 0; j < NCOLORS; j++ ) U(i,j) = makeComplex( (Float)(Random<Float>(localState) - 0.5), (Float)(Random<Float>(localState) - 0.5)); reunit_link<Float>(U); return U; /*setIdentity(&U); for( int block = 0; block < NCOLORS * ( NCOLORS - 1) / 2; block++ ) { Matrix<Float,2> rr = randomSU2<Float>(localState); int2 id = IndexBlock<NCOLORS>( block ); mul_block_sun<Float, NCOLORS>(rr, U, id); //U = block_su2_to_su3<Float>( U, a00, a01, a10, a11, block ); } return U;*/ } template<typename Float, typename Gauge, int NCOLORS> __global__ void compute_InitGauge_HotStart(InitGaugeHotArg<Gauge> arg){ int idx = threadIdx.x + blockIdx.x * blockDim.x; if ( idx >= arg.threads ) return; typedef typename ComplexTypeId<Float>::Type Cmplx; #ifdef MULTI_GPU int X[4], x[4]; for ( int dr = 0; dr < 4; ++dr ) X[dr] = arg.X[dr]; for ( int dr = 0; dr < 4; ++dr ) X[dr] += 2 * arg.border[dr]; int id = idx; cuRNGState localState = arg.rngstate.State()[ id ]; #else cuRNGState localState = arg.rngstate.State()[ idx ]; #endif for ( int parity = 0; parity < 2; parity++ ) { #ifdef MULTI_GPU getCoords(x, id, arg.X, parity); for ( int dr = 0; dr < 4; ++dr ) x[dr] += arg.border[dr]; idx = linkIndex(x,X); #endif for ( int d = 0; d < 4; d++ ) { Matrix<Cmplx,NCOLORS> U; U = randomize<Float, NCOLORS>(localState); arg.dataOr.save((Float*)(U.data),idx, d, parity); } } #ifdef MULTI_GPU arg.rngstate.State()[ id ] = localState; #else arg.rngstate.State()[ idx ] = localState; #endif } template<typename Float, typename Gauge, int NCOLORS> class InitGaugeHot : Tunable { InitGaugeHotArg<Gauge> arg; mutable char aux_string[128]; // used as a label in the autotuner private: unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { return 0; } bool tuneSharedBytes() const { return false; } // Don't tune shared memory bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. unsigned int minThreads() const { return arg.threads; } public: InitGaugeHot(InitGaugeHotArg<Gauge> &arg) : arg(arg) { } ~InitGaugeHot () { } void apply(const cudaStream_t &stream){ TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); compute_InitGauge_HotStart<Float, Gauge, NCOLORS><< < tp.grid,tp.block >> > (arg); //cudaDeviceSynchronize(); } TuneKey tuneKey() const { std::stringstream vol; vol << arg.X[0] << "x"; vol << arg.X[1] << "x"; vol << arg.X[2] << "x"; vol << arg.X[3]; sprintf(aux_string,"threads=%d,prec=%d",arg.threads, sizeof(Float)); return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string); } std::string paramString(const TuneParam &param) const { std::stringstream ps; ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << ")"; ps << "shared=" << param.shared_bytes; return ps.str(); } void preTune(){ arg.rngstate.backup(); } void postTune(){ arg.rngstate.restore(); } long long flops() const { return 0; } // Only correct if there is no link reconstruction, no cub reduction accounted also long long bytes() const { return 0; } //no accounting the reduction!!!! }; template<typename Float, int NCOLORS, typename Gauge> void InitGaugeField( Gauge dataOr, cudaGaugeField& data, RNG &rngstate) { InitGaugeHotArg<Gauge> initarg(dataOr, data, rngstate); InitGaugeHot<Float, Gauge, NCOLORS> init(initarg); init.apply(0); checkCudaError(); cudaDeviceSynchronize(); data.exchangeExtendedGhost(data.R(),false); /*cudaDeviceSynchronize(); const double unitarize_eps = 1e-14; const double max_error = 1e-10; const int reunit_allow_svd = 1; const int reunit_svd_only = 0; const double svd_rel_error = 1e-6; const double svd_abs_error = 1e-6; setUnitarizeLinksConstants(unitarize_eps, max_error, reunit_allow_svd, reunit_svd_only, svd_rel_error, svd_abs_error); int num_failures=0; int* num_failures_dev; cudaMalloc((void**)&num_failures_dev, sizeof(int)); cudaMemset(num_failures_dev, 0, sizeof(int)); if(num_failures_dev == NULL) errorQuda("cudaMalloc failed for dev_pointer\n"); unitarizeLinksQuda(data, num_failures_dev); cudaMemcpy(&num_failures, num_failures_dev, sizeof(int), cudaMemcpyDeviceToHost); if(num_failures>0){ cudaFree(num_failures_dev); errorQuda("Error in the unitarization\n"); exit(1); } cudaFree(num_failures_dev);*/ } template<typename Float> void InitGaugeField( cudaGaugeField& data, RNG &rngstate) { if ( data.isNative() ) { if ( data.Reconstruct() == QUDA_RECONSTRUCT_NO ) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type Gauge; InitGaugeField<Float, 3>(Gauge(data), data, rngstate); } else if ( data.Reconstruct() == QUDA_RECONSTRUCT_12 ) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type Gauge; InitGaugeField<Float, 3>(Gauge(data), data, rngstate); } else if ( data.Reconstruct() == QUDA_RECONSTRUCT_8 ) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_8>::type Gauge; InitGaugeField<Float, 3>(Gauge(data), data, rngstate); } else { errorQuda("Reconstruction type %d of gauge field not supported", data.Reconstruct()); } } else { errorQuda("Invalid Gauge Order\n"); } } #endif // GPU_GAUGE_ALG /** @brief Perform a hot start to the gauge field, random SU(3) matrix, followed by reunitarization, also exchange borders links in multi-GPU case. * * @param[in,out] data Gauge field * @param[in,out] rngstate state of the CURAND random number generator */ void InitGaugeField( cudaGaugeField& data, RNG &rngstate) { #ifdef GPU_GAUGE_ALG if ( data.Precision() == QUDA_SINGLE_PRECISION ) { InitGaugeField<float> (data, rngstate); } else if ( data.Precision() == QUDA_DOUBLE_PRECISION ) { InitGaugeField<double>(data, rngstate); } else { errorQuda("Precision %d not supported", data.Precision()); } #else errorQuda("Pure gauge code has not been built"); #endif } }
062fdc08d1fe31cc1caa8172660fc349c39d7e06.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright 2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "backend/kernel_compiler/gpu/cuda_impl/sponge/nb14/dihedral_14_lj_force_with_direct_cf_impl.cuh" #include "backend/kernel_compiler/gpu/cuda_impl/sponge/common_sponge.cuh" __global__ void Dihedral14LJForceWithDirectCFKernel(const int dihedral_14_numbers, const UINT_VECTOR_LJ_TYPE *uint_crd, const VECTOR *boxlength, const int *a_14, const int *b_14, const float *lj_scale_factor, const float *cf_scale_factor, const float *LJ_type_A, const float *LJ_type_B, VECTOR *frc) { int dihedral_14_i = blockDim.x * blockIdx.x + threadIdx.x; if (dihedral_14_i < dihedral_14_numbers) { int int_x; int int_y; int int_z; UINT_VECTOR_LJ_TYPE r1, r2; VECTOR dr; float dr_abs; float dr2; float dr_1; float dr_2; float dr_4; float dr_8; float dr_14; float frc_abs = 0.; VECTOR temp_frc; int x, y; int atom_pair_LJ_type; int atom_i = a_14[dihedral_14_i]; int atom_j = b_14[dihedral_14_i]; r1 = uint_crd[atom_i]; r2 = uint_crd[atom_j]; int_x = r2.uint_x - r1.uint_x; int_y = r2.uint_y - r1.uint_y; int_z = r2.uint_z - r1.uint_z; dr.x = boxlength[0].x * int_x; dr.y = boxlength[0].y * int_y; dr.z = boxlength[0].z * int_z; dr2 = dr.x * dr.x + dr.y * dr.y + dr.z * dr.z; dr_2 = 1.0 / dr2; dr_4 = dr_2 * dr_2; dr_8 = dr_4 * dr_4; dr_14 = dr_8 * dr_4 * dr_2; dr_abs = norm3df(dr.x, dr.y, dr.z); dr_1 = 1. / dr_abs; float charge_i = r1.charge; float charge_j = r2.charge; float frc_cf_abs; frc_cf_abs = cf_scale_factor[dihedral_14_i] * dr_2 * dr_1; frc_cf_abs = -charge_i * charge_j * frc_cf_abs; // LJ y = (r2.LJ_type - r1.LJ_type); x = y >> 31; y = (y ^ x) - x; x = r2.LJ_type + r1.LJ_type; r2.LJ_type = (x + y) >> 1; x = (x - y) >> 1; atom_pair_LJ_type = (r2.LJ_type * (r2.LJ_type + 1) >> 1) + x; frc_abs = -LJ_type_A[atom_pair_LJ_type] * dr_14 + LJ_type_B[atom_pair_LJ_type] * dr_8; frc_abs *= lj_scale_factor[dihedral_14_i]; frc_abs += frc_cf_abs; temp_frc.x = frc_abs * dr.x; temp_frc.y = frc_abs * dr.y; temp_frc.z = frc_abs * dr.z; atomicAdd(&frc[atom_j].x, -temp_frc.x); atomicAdd(&frc[atom_j].y, -temp_frc.y); atomicAdd(&frc[atom_j].z, -temp_frc.z); atomicAdd(&frc[atom_i].x, temp_frc.x); atomicAdd(&frc[atom_i].y, temp_frc.y); atomicAdd(&frc[atom_i].z, temp_frc.z); } } void Dihedral14LJForceWithDirectCF(const int dihedral_14_numbers, const int atom_numbers, const int *uint_crd_f, const int *LJtype, const float *charge, const float *boxlength_f, const int *a_14, const int *b_14, const float *lj_scale_factor, const float *cf_scale_factor, const float *LJ_type_A, const float *LJ_type_B, float *frc_f, hipStream_t stream) { size_t thread_per_block = 128; size_t block_per_grid = ceilf(static_cast<float>(atom_numbers) / 128); UINT_VECTOR_LJ_TYPE *uint_crd_with_LJ = NULL; Cuda_Malloc_Safely(reinterpret_cast<void **>(&uint_crd_with_LJ), sizeof(UINT_VECTOR_LJ_TYPE) * atom_numbers); UNSIGNED_INT_VECTOR *uint_crd = const_cast<UNSIGNED_INT_VECTOR *>(reinterpret_cast<const UNSIGNED_INT_VECTOR *>(uint_crd_f)); hipLaunchKernelGGL(( Copy_Crd_To_New_Crd_Start), dim3(ceilf(static_cast<float>(atom_numbers) / 32)), dim3(32), 0, stream, atom_numbers, uint_crd, uint_crd_with_LJ, LJtype, charge); hipStreamSynchronize(stream); VECTOR *boxlength = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(boxlength_f)); hipLaunchKernelGGL(( Reset_List), dim3(ceilf(static_cast<float>(3. * atom_numbers) / 128)), dim3(128), 0, stream, 3 * atom_numbers, frc_f, 0.); VECTOR *frc = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(frc_f)); hipLaunchKernelGGL(( Dihedral14LJForceWithDirectCFKernel), dim3(block_per_grid), dim3(thread_per_block), 0, stream, dihedral_14_numbers, uint_crd_with_LJ, boxlength, a_14, b_14, lj_scale_factor, cf_scale_factor, LJ_type_A, LJ_type_B, frc); return; } void Dihedral14LJForceWithDirectCF(const int dihedral_14_numbers, const int atom_numbers, const int *uint_crd_f, const int *LJtype, const float *charge, const float *boxlength_f, const int *a_14, const int *b_14, const float *lj_scale_factor, const float *cf_scale_factor, const float *LJ_type_A, const float *LJ_type_B, float *frc_f, hipStream_t stream);
062fdc08d1fe31cc1caa8172660fc349c39d7e06.cu
/** * Copyright 2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "backend/kernel_compiler/gpu/cuda_impl/sponge/nb14/dihedral_14_lj_force_with_direct_cf_impl.cuh" #include "backend/kernel_compiler/gpu/cuda_impl/sponge/common_sponge.cuh" __global__ void Dihedral14LJForceWithDirectCFKernel(const int dihedral_14_numbers, const UINT_VECTOR_LJ_TYPE *uint_crd, const VECTOR *boxlength, const int *a_14, const int *b_14, const float *lj_scale_factor, const float *cf_scale_factor, const float *LJ_type_A, const float *LJ_type_B, VECTOR *frc) { int dihedral_14_i = blockDim.x * blockIdx.x + threadIdx.x; if (dihedral_14_i < dihedral_14_numbers) { int int_x; int int_y; int int_z; UINT_VECTOR_LJ_TYPE r1, r2; VECTOR dr; float dr_abs; float dr2; float dr_1; float dr_2; float dr_4; float dr_8; float dr_14; float frc_abs = 0.; VECTOR temp_frc; int x, y; int atom_pair_LJ_type; int atom_i = a_14[dihedral_14_i]; int atom_j = b_14[dihedral_14_i]; r1 = uint_crd[atom_i]; r2 = uint_crd[atom_j]; int_x = r2.uint_x - r1.uint_x; int_y = r2.uint_y - r1.uint_y; int_z = r2.uint_z - r1.uint_z; dr.x = boxlength[0].x * int_x; dr.y = boxlength[0].y * int_y; dr.z = boxlength[0].z * int_z; dr2 = dr.x * dr.x + dr.y * dr.y + dr.z * dr.z; dr_2 = 1.0 / dr2; dr_4 = dr_2 * dr_2; dr_8 = dr_4 * dr_4; dr_14 = dr_8 * dr_4 * dr_2; dr_abs = norm3df(dr.x, dr.y, dr.z); dr_1 = 1. / dr_abs; float charge_i = r1.charge; float charge_j = r2.charge; float frc_cf_abs; frc_cf_abs = cf_scale_factor[dihedral_14_i] * dr_2 * dr_1; frc_cf_abs = -charge_i * charge_j * frc_cf_abs; // LJ y = (r2.LJ_type - r1.LJ_type); x = y >> 31; y = (y ^ x) - x; x = r2.LJ_type + r1.LJ_type; r2.LJ_type = (x + y) >> 1; x = (x - y) >> 1; atom_pair_LJ_type = (r2.LJ_type * (r2.LJ_type + 1) >> 1) + x; frc_abs = -LJ_type_A[atom_pair_LJ_type] * dr_14 + LJ_type_B[atom_pair_LJ_type] * dr_8; frc_abs *= lj_scale_factor[dihedral_14_i]; frc_abs += frc_cf_abs; temp_frc.x = frc_abs * dr.x; temp_frc.y = frc_abs * dr.y; temp_frc.z = frc_abs * dr.z; atomicAdd(&frc[atom_j].x, -temp_frc.x); atomicAdd(&frc[atom_j].y, -temp_frc.y); atomicAdd(&frc[atom_j].z, -temp_frc.z); atomicAdd(&frc[atom_i].x, temp_frc.x); atomicAdd(&frc[atom_i].y, temp_frc.y); atomicAdd(&frc[atom_i].z, temp_frc.z); } } void Dihedral14LJForceWithDirectCF(const int dihedral_14_numbers, const int atom_numbers, const int *uint_crd_f, const int *LJtype, const float *charge, const float *boxlength_f, const int *a_14, const int *b_14, const float *lj_scale_factor, const float *cf_scale_factor, const float *LJ_type_A, const float *LJ_type_B, float *frc_f, cudaStream_t stream) { size_t thread_per_block = 128; size_t block_per_grid = ceilf(static_cast<float>(atom_numbers) / 128); UINT_VECTOR_LJ_TYPE *uint_crd_with_LJ = NULL; Cuda_Malloc_Safely(reinterpret_cast<void **>(&uint_crd_with_LJ), sizeof(UINT_VECTOR_LJ_TYPE) * atom_numbers); UNSIGNED_INT_VECTOR *uint_crd = const_cast<UNSIGNED_INT_VECTOR *>(reinterpret_cast<const UNSIGNED_INT_VECTOR *>(uint_crd_f)); Copy_Crd_To_New_Crd_Start<<<ceilf(static_cast<float>(atom_numbers) / 32), 32, 0, stream>>>( atom_numbers, uint_crd, uint_crd_with_LJ, LJtype, charge); cudaStreamSynchronize(stream); VECTOR *boxlength = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(boxlength_f)); Reset_List<<<ceilf(static_cast<float>(3. * atom_numbers) / 128), 128, 0, stream>>>(3 * atom_numbers, frc_f, 0.); VECTOR *frc = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(frc_f)); Dihedral14LJForceWithDirectCFKernel<<<block_per_grid, thread_per_block, 0, stream>>>( dihedral_14_numbers, uint_crd_with_LJ, boxlength, a_14, b_14, lj_scale_factor, cf_scale_factor, LJ_type_A, LJ_type_B, frc); return; } void Dihedral14LJForceWithDirectCF(const int dihedral_14_numbers, const int atom_numbers, const int *uint_crd_f, const int *LJtype, const float *charge, const float *boxlength_f, const int *a_14, const int *b_14, const float *lj_scale_factor, const float *cf_scale_factor, const float *LJ_type_A, const float *LJ_type_B, float *frc_f, cudaStream_t stream);
70fcc7dab3d7bbaf1489ac348c49490741a93806.hip
// !!! This is a file automatically generated by hipify!!! // // auto-generated by op2.py // //global constants #ifndef MAX_CONST_SIZE #define MAX_CONST_SIZE 128 #endif __constant__ float CFL_cuda; __constant__ float EPS_cuda; __constant__ float g_cuda; __constant__ float Mn_cuda; __constant__ int spherical_cuda; __constant__ float Radius_cuda; //header #include "op_lib_cpp.h" #include "op_cuda_rt_support.h" #include "op_cuda_reduction.h" void op_decl_const_char(int dim, char const *type, int size, char *dat, char const *name){ if (!OP_hybrid_gpu) return; if (!strcmp(name,"CFL")) { cutilSafeCall(hipMemcpyToSymbol(CFL_cuda, dat, dim*size)); } else if (!strcmp(name,"EPS")) { cutilSafeCall(hipMemcpyToSymbol(EPS_cuda, dat, dim*size)); } else if (!strcmp(name,"g")) { cutilSafeCall(hipMemcpyToSymbol(g_cuda, dat, dim*size)); } else if (!strcmp(name,"Mn")) { cutilSafeCall(hipMemcpyToSymbol(Mn_cuda, dat, dim*size)); } else if (!strcmp(name,"spherical")) { cutilSafeCall(hipMemcpyToSymbol(spherical_cuda, dat, dim*size)); } else if (!strcmp(name,"Radius")) { cutilSafeCall(hipMemcpyToSymbol(Radius_cuda, dat, dim*size)); } else { printf("error: unknown const name\n"); exit(1); } } //user kernel files #include "EvolveValuesRK2_1_kernel.cu" #include "EvolveValuesRK2_2_kernel.cu" #include "Friction_manning_kernel.cu" #include "simulation_1_kernel.cu" #include "incConst_kernel.cu" #include "initEta_formula_kernel.cu" #include "initU_formula_kernel.cu" #include "initV_formula_kernel.cu" #include "applyConst_kernel.cu" #include "initBathymetry_large_kernel.hip" #include "initBathyRelative_formula_kernel.cu" #include "initBathymetry_formula_kernel.cu" #include "zero_bathy_kernel.cu" #include "initBathymetry_update_kernel.cu" #include "initBore_select_kernel.cu" #include "initGaussianLandslide_kernel.cu" #include "values_operation2_kernel.cu" #include "getTotalVol_kernel.cu" #include "getMaxElevation_kernel.cu" #include "getMaxSpeed_kernel.cu" #include "gatherLocations_kernel.cu" #include "toOutputs_kernel.cu" #include "computeGradient_kernel.cu" #include "limiter_kernel.cu" #include "computeFluxes_kernel.cu" #include "Timestep_kernel.cu" #include "NumericalFluxes_kernel.cu" #include "computeFluxes_sph_kernel.cu" #include "NumericalFluxes_sph_kernel.cu"
70fcc7dab3d7bbaf1489ac348c49490741a93806.cu
// // auto-generated by op2.py // //global constants #ifndef MAX_CONST_SIZE #define MAX_CONST_SIZE 128 #endif __constant__ float CFL_cuda; __constant__ float EPS_cuda; __constant__ float g_cuda; __constant__ float Mn_cuda; __constant__ int spherical_cuda; __constant__ float Radius_cuda; //header #include "op_lib_cpp.h" #include "op_cuda_rt_support.h" #include "op_cuda_reduction.h" void op_decl_const_char(int dim, char const *type, int size, char *dat, char const *name){ if (!OP_hybrid_gpu) return; if (!strcmp(name,"CFL")) { cutilSafeCall(cudaMemcpyToSymbol(CFL_cuda, dat, dim*size)); } else if (!strcmp(name,"EPS")) { cutilSafeCall(cudaMemcpyToSymbol(EPS_cuda, dat, dim*size)); } else if (!strcmp(name,"g")) { cutilSafeCall(cudaMemcpyToSymbol(g_cuda, dat, dim*size)); } else if (!strcmp(name,"Mn")) { cutilSafeCall(cudaMemcpyToSymbol(Mn_cuda, dat, dim*size)); } else if (!strcmp(name,"spherical")) { cutilSafeCall(cudaMemcpyToSymbol(spherical_cuda, dat, dim*size)); } else if (!strcmp(name,"Radius")) { cutilSafeCall(cudaMemcpyToSymbol(Radius_cuda, dat, dim*size)); } else { printf("error: unknown const name\n"); exit(1); } } //user kernel files #include "EvolveValuesRK2_1_kernel.cu" #include "EvolveValuesRK2_2_kernel.cu" #include "Friction_manning_kernel.cu" #include "simulation_1_kernel.cu" #include "incConst_kernel.cu" #include "initEta_formula_kernel.cu" #include "initU_formula_kernel.cu" #include "initV_formula_kernel.cu" #include "applyConst_kernel.cu" #include "initBathymetry_large_kernel.cu" #include "initBathyRelative_formula_kernel.cu" #include "initBathymetry_formula_kernel.cu" #include "zero_bathy_kernel.cu" #include "initBathymetry_update_kernel.cu" #include "initBore_select_kernel.cu" #include "initGaussianLandslide_kernel.cu" #include "values_operation2_kernel.cu" #include "getTotalVol_kernel.cu" #include "getMaxElevation_kernel.cu" #include "getMaxSpeed_kernel.cu" #include "gatherLocations_kernel.cu" #include "toOutputs_kernel.cu" #include "computeGradient_kernel.cu" #include "limiter_kernel.cu" #include "computeFluxes_kernel.cu" #include "Timestep_kernel.cu" #include "NumericalFluxes_kernel.cu" #include "computeFluxes_sph_kernel.cu" #include "NumericalFluxes_sph_kernel.cu"
e37ead726ead8f41531bff95994775b45675f919.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int dims_update_halo_kernel1_ba2 [8][2]; static int dims_update_halo_kernel1_ba2_h [8][2] = {0}; //user function __device__ inline void update_halo_kernel1_ba2_gpu(ACC<double> &density0, ACC<double> &density1, ACC<double> &energy0, ACC<double> &energy1, ACC<double> &pressure, ACC<double> &viscosity, ACC<double> &soundspeed, const int* fields) { if(fields[FIELD_DENSITY0] == 1) density0(0,0,0) = density0(0,0,3); if(fields[FIELD_DENSITY1] == 1) density1(0,0,0) = density1(0,0,3); if(fields[FIELD_ENERGY0] == 1) energy0(0,0,0) = energy0(0,0,3); if(fields[FIELD_ENERGY1] == 1) energy1(0,0,0) = energy1(0,0,3); if(fields[FIELD_PRESSURE] == 1) pressure(0,0,0) = pressure(0,0,3); if(fields[FIELD_VISCOSITY] == 1) viscosity(0,0,0) = viscosity(0,0,3); if(fields[FIELD_SOUNDSPEED] == 1) soundspeed(0,0,0) = soundspeed(0,0,3); } __global__ void ops_update_halo_kernel1_ba2( double* __restrict arg0, double* __restrict arg1, double* __restrict arg2, double* __restrict arg3, double* __restrict arg4, double* __restrict arg5, double* __restrict arg6, const int* __restrict arg7, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_ba2[0][0] + idx_z * 1*1 * dims_update_halo_kernel1_ba2[0][0] * dims_update_halo_kernel1_ba2[0][1]; arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_ba2[1][0] + idx_z * 1*1 * dims_update_halo_kernel1_ba2[1][0] * dims_update_halo_kernel1_ba2[1][1]; arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_ba2[2][0] + idx_z * 1*1 * dims_update_halo_kernel1_ba2[2][0] * dims_update_halo_kernel1_ba2[2][1]; arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_ba2[3][0] + idx_z * 1*1 * dims_update_halo_kernel1_ba2[3][0] * dims_update_halo_kernel1_ba2[3][1]; arg4 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_ba2[4][0] + idx_z * 1*1 * dims_update_halo_kernel1_ba2[4][0] * dims_update_halo_kernel1_ba2[4][1]; arg5 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_ba2[5][0] + idx_z * 1*1 * dims_update_halo_kernel1_ba2[5][0] * dims_update_halo_kernel1_ba2[5][1]; arg6 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_ba2[6][0] + idx_z * 1*1 * dims_update_halo_kernel1_ba2[6][0] * dims_update_halo_kernel1_ba2[6][1]; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { ACC<double> argp0(dims_update_halo_kernel1_ba2[0][0], dims_update_halo_kernel1_ba2[0][1], arg0); ACC<double> argp1(dims_update_halo_kernel1_ba2[1][0], dims_update_halo_kernel1_ba2[1][1], arg1); ACC<double> argp2(dims_update_halo_kernel1_ba2[2][0], dims_update_halo_kernel1_ba2[2][1], arg2); ACC<double> argp3(dims_update_halo_kernel1_ba2[3][0], dims_update_halo_kernel1_ba2[3][1], arg3); ACC<double> argp4(dims_update_halo_kernel1_ba2[4][0], dims_update_halo_kernel1_ba2[4][1], arg4); ACC<double> argp5(dims_update_halo_kernel1_ba2[5][0], dims_update_halo_kernel1_ba2[5][1], arg5); ACC<double> argp6(dims_update_halo_kernel1_ba2[6][0], dims_update_halo_kernel1_ba2[6][1], arg6); update_halo_kernel1_ba2_gpu(argp0, argp1, argp2, argp3, argp4, argp5, argp6, arg7); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel1_ba2(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) { #else void ops_par_loop_update_halo_kernel1_ba2_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; #if OPS_MPI ops_block block = desc->block; #endif int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; ops_arg arg4 = desc->args[4]; ops_arg arg5 = desc->args[5]; ops_arg arg6 = desc->args[6]; ops_arg arg7 = desc->args[7]; #endif //Timing double t1,t2,c1,c2; ops_arg args[8] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,8,range,20)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(20,"update_halo_kernel1_ba2"); OPS_kernels[20].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; #endif //OPS_MPI #ifdef OPS_MPI int arg_idx[3]; #endif #ifdef OPS_MPI if (compute_ranges(args, 8,block, range, start, end, arg_idx) < 0) return; #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; int xdim5 = args[5].dat->size[0]; int ydim5 = args[5].dat->size[1]; int xdim6 = args[6].dat->size[0]; int ydim6 = args[6].dat->size[1]; if (xdim0 != dims_update_halo_kernel1_ba2_h[0][0] || ydim0 != dims_update_halo_kernel1_ba2_h[0][1] || xdim1 != dims_update_halo_kernel1_ba2_h[1][0] || ydim1 != dims_update_halo_kernel1_ba2_h[1][1] || xdim2 != dims_update_halo_kernel1_ba2_h[2][0] || ydim2 != dims_update_halo_kernel1_ba2_h[2][1] || xdim3 != dims_update_halo_kernel1_ba2_h[3][0] || ydim3 != dims_update_halo_kernel1_ba2_h[3][1] || xdim4 != dims_update_halo_kernel1_ba2_h[4][0] || ydim4 != dims_update_halo_kernel1_ba2_h[4][1] || xdim5 != dims_update_halo_kernel1_ba2_h[5][0] || ydim5 != dims_update_halo_kernel1_ba2_h[5][1] || xdim6 != dims_update_halo_kernel1_ba2_h[6][0] || ydim6 != dims_update_halo_kernel1_ba2_h[6][1]) { dims_update_halo_kernel1_ba2_h[0][0] = xdim0; dims_update_halo_kernel1_ba2_h[0][1] = ydim0; dims_update_halo_kernel1_ba2_h[1][0] = xdim1; dims_update_halo_kernel1_ba2_h[1][1] = ydim1; dims_update_halo_kernel1_ba2_h[2][0] = xdim2; dims_update_halo_kernel1_ba2_h[2][1] = ydim2; dims_update_halo_kernel1_ba2_h[3][0] = xdim3; dims_update_halo_kernel1_ba2_h[3][1] = ydim3; dims_update_halo_kernel1_ba2_h[4][0] = xdim4; dims_update_halo_kernel1_ba2_h[4][1] = ydim4; dims_update_halo_kernel1_ba2_h[5][0] = xdim5; dims_update_halo_kernel1_ba2_h[5][1] = ydim5; dims_update_halo_kernel1_ba2_h[6][0] = xdim6; dims_update_halo_kernel1_ba2_h[6][1] = ydim6; cutilSafeCall(hipMemcpyToSymbol( dims_update_halo_kernel1_ba2, dims_update_halo_kernel1_ba2_h, sizeof(dims_update_halo_kernel1_ba2))); } int *arg7h = (int *)arg7.data; int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg7.data = OPS_consts_h + consts_bytes; arg7.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg7.data)[d] = arg7h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size); int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size); int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size); char *p_a[8]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; int base4 = args[4].dat->base_offset + dat4 * 1 * (start[0] * args[4].stencil->stride[0]); base4 = base4+ dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]); base4 = base4+ dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2]); p_a[4] = (char *)args[4].data_d + base4; int base5 = args[5].dat->base_offset + dat5 * 1 * (start[0] * args[5].stencil->stride[0]); base5 = base5+ dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1]); base5 = base5+ dat5 * args[5].dat->size[0] * args[5].dat->size[1] * (start[2] * args[5].stencil->stride[2]); p_a[5] = (char *)args[5].data_d + base5; int base6 = args[6].dat->base_offset + dat6 * 1 * (start[0] * args[6].stencil->stride[0]); base6 = base6+ dat6 * args[6].dat->size[0] * (start[1] * args[6].stencil->stride[1]); base6 = base6+ dat6 * args[6].dat->size[0] * args[6].dat->size[1] * (start[2] * args[6].stencil->stride[2]); p_a[6] = (char *)args[6].data_d + base6; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 8); ops_halo_exchanges(args,8,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[20].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) hipLaunchKernelGGL(( ops_update_halo_kernel1_ba2), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], (double *)p_a[6], (int *)arg7.data_d,x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[20].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 8); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); ops_set_halo_dirtybit3(&args[2],range); ops_set_halo_dirtybit3(&args[3],range); ops_set_halo_dirtybit3(&args[4],range); ops_set_halo_dirtybit3(&args[5],range); ops_set_halo_dirtybit3(&args[6],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[20].mpi_time += t2-t1; OPS_kernels[20].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[20].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[20].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[20].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[20].transfer += ops_compute_transfer(dim, start, end, &arg4); OPS_kernels[20].transfer += ops_compute_transfer(dim, start, end, &arg5); OPS_kernels[20].transfer += ops_compute_transfer(dim, start, end, &arg6); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel1_ba2(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 20; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 20; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 8; desc->args = (ops_arg*)malloc(8*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->args[4] = arg4; desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index; desc->args[5] = arg5; desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index; desc->args[6] = arg6; desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index; desc->args[7] = arg7; char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int)); memcpy(tmp, arg7.data,NUM_FIELDS*sizeof(int)); desc->args[7].data = tmp; desc->function = ops_par_loop_update_halo_kernel1_ba2_execute; if (OPS_diags > 1) { ops_timing_realloc(20,"update_halo_kernel1_ba2"); } ops_enqueue_kernel(desc); } #endif
e37ead726ead8f41531bff95994775b45675f919.cu
// // auto-generated by ops.py // __constant__ int dims_update_halo_kernel1_ba2 [8][2]; static int dims_update_halo_kernel1_ba2_h [8][2] = {0}; //user function __device__ inline void update_halo_kernel1_ba2_gpu(ACC<double> &density0, ACC<double> &density1, ACC<double> &energy0, ACC<double> &energy1, ACC<double> &pressure, ACC<double> &viscosity, ACC<double> &soundspeed, const int* fields) { if(fields[FIELD_DENSITY0] == 1) density0(0,0,0) = density0(0,0,3); if(fields[FIELD_DENSITY1] == 1) density1(0,0,0) = density1(0,0,3); if(fields[FIELD_ENERGY0] == 1) energy0(0,0,0) = energy0(0,0,3); if(fields[FIELD_ENERGY1] == 1) energy1(0,0,0) = energy1(0,0,3); if(fields[FIELD_PRESSURE] == 1) pressure(0,0,0) = pressure(0,0,3); if(fields[FIELD_VISCOSITY] == 1) viscosity(0,0,0) = viscosity(0,0,3); if(fields[FIELD_SOUNDSPEED] == 1) soundspeed(0,0,0) = soundspeed(0,0,3); } __global__ void ops_update_halo_kernel1_ba2( double* __restrict arg0, double* __restrict arg1, double* __restrict arg2, double* __restrict arg3, double* __restrict arg4, double* __restrict arg5, double* __restrict arg6, const int* __restrict arg7, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_ba2[0][0] + idx_z * 1*1 * dims_update_halo_kernel1_ba2[0][0] * dims_update_halo_kernel1_ba2[0][1]; arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_ba2[1][0] + idx_z * 1*1 * dims_update_halo_kernel1_ba2[1][0] * dims_update_halo_kernel1_ba2[1][1]; arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_ba2[2][0] + idx_z * 1*1 * dims_update_halo_kernel1_ba2[2][0] * dims_update_halo_kernel1_ba2[2][1]; arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_ba2[3][0] + idx_z * 1*1 * dims_update_halo_kernel1_ba2[3][0] * dims_update_halo_kernel1_ba2[3][1]; arg4 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_ba2[4][0] + idx_z * 1*1 * dims_update_halo_kernel1_ba2[4][0] * dims_update_halo_kernel1_ba2[4][1]; arg5 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_ba2[5][0] + idx_z * 1*1 * dims_update_halo_kernel1_ba2[5][0] * dims_update_halo_kernel1_ba2[5][1]; arg6 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_ba2[6][0] + idx_z * 1*1 * dims_update_halo_kernel1_ba2[6][0] * dims_update_halo_kernel1_ba2[6][1]; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { ACC<double> argp0(dims_update_halo_kernel1_ba2[0][0], dims_update_halo_kernel1_ba2[0][1], arg0); ACC<double> argp1(dims_update_halo_kernel1_ba2[1][0], dims_update_halo_kernel1_ba2[1][1], arg1); ACC<double> argp2(dims_update_halo_kernel1_ba2[2][0], dims_update_halo_kernel1_ba2[2][1], arg2); ACC<double> argp3(dims_update_halo_kernel1_ba2[3][0], dims_update_halo_kernel1_ba2[3][1], arg3); ACC<double> argp4(dims_update_halo_kernel1_ba2[4][0], dims_update_halo_kernel1_ba2[4][1], arg4); ACC<double> argp5(dims_update_halo_kernel1_ba2[5][0], dims_update_halo_kernel1_ba2[5][1], arg5); ACC<double> argp6(dims_update_halo_kernel1_ba2[6][0], dims_update_halo_kernel1_ba2[6][1], arg6); update_halo_kernel1_ba2_gpu(argp0, argp1, argp2, argp3, argp4, argp5, argp6, arg7); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel1_ba2(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) { #else void ops_par_loop_update_halo_kernel1_ba2_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; #if OPS_MPI ops_block block = desc->block; #endif int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; ops_arg arg4 = desc->args[4]; ops_arg arg5 = desc->args[5]; ops_arg arg6 = desc->args[6]; ops_arg arg7 = desc->args[7]; #endif //Timing double t1,t2,c1,c2; ops_arg args[8] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,8,range,20)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(20,"update_halo_kernel1_ba2"); OPS_kernels[20].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; #endif //OPS_MPI #ifdef OPS_MPI int arg_idx[3]; #endif #ifdef OPS_MPI if (compute_ranges(args, 8,block, range, start, end, arg_idx) < 0) return; #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; int xdim5 = args[5].dat->size[0]; int ydim5 = args[5].dat->size[1]; int xdim6 = args[6].dat->size[0]; int ydim6 = args[6].dat->size[1]; if (xdim0 != dims_update_halo_kernel1_ba2_h[0][0] || ydim0 != dims_update_halo_kernel1_ba2_h[0][1] || xdim1 != dims_update_halo_kernel1_ba2_h[1][0] || ydim1 != dims_update_halo_kernel1_ba2_h[1][1] || xdim2 != dims_update_halo_kernel1_ba2_h[2][0] || ydim2 != dims_update_halo_kernel1_ba2_h[2][1] || xdim3 != dims_update_halo_kernel1_ba2_h[3][0] || ydim3 != dims_update_halo_kernel1_ba2_h[3][1] || xdim4 != dims_update_halo_kernel1_ba2_h[4][0] || ydim4 != dims_update_halo_kernel1_ba2_h[4][1] || xdim5 != dims_update_halo_kernel1_ba2_h[5][0] || ydim5 != dims_update_halo_kernel1_ba2_h[5][1] || xdim6 != dims_update_halo_kernel1_ba2_h[6][0] || ydim6 != dims_update_halo_kernel1_ba2_h[6][1]) { dims_update_halo_kernel1_ba2_h[0][0] = xdim0; dims_update_halo_kernel1_ba2_h[0][1] = ydim0; dims_update_halo_kernel1_ba2_h[1][0] = xdim1; dims_update_halo_kernel1_ba2_h[1][1] = ydim1; dims_update_halo_kernel1_ba2_h[2][0] = xdim2; dims_update_halo_kernel1_ba2_h[2][1] = ydim2; dims_update_halo_kernel1_ba2_h[3][0] = xdim3; dims_update_halo_kernel1_ba2_h[3][1] = ydim3; dims_update_halo_kernel1_ba2_h[4][0] = xdim4; dims_update_halo_kernel1_ba2_h[4][1] = ydim4; dims_update_halo_kernel1_ba2_h[5][0] = xdim5; dims_update_halo_kernel1_ba2_h[5][1] = ydim5; dims_update_halo_kernel1_ba2_h[6][0] = xdim6; dims_update_halo_kernel1_ba2_h[6][1] = ydim6; cutilSafeCall(cudaMemcpyToSymbol( dims_update_halo_kernel1_ba2, dims_update_halo_kernel1_ba2_h, sizeof(dims_update_halo_kernel1_ba2))); } int *arg7h = (int *)arg7.data; int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg7.data = OPS_consts_h + consts_bytes; arg7.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg7.data)[d] = arg7h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size); int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size); int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size); char *p_a[8]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; int base4 = args[4].dat->base_offset + dat4 * 1 * (start[0] * args[4].stencil->stride[0]); base4 = base4+ dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]); base4 = base4+ dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2]); p_a[4] = (char *)args[4].data_d + base4; int base5 = args[5].dat->base_offset + dat5 * 1 * (start[0] * args[5].stencil->stride[0]); base5 = base5+ dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1]); base5 = base5+ dat5 * args[5].dat->size[0] * args[5].dat->size[1] * (start[2] * args[5].stencil->stride[2]); p_a[5] = (char *)args[5].data_d + base5; int base6 = args[6].dat->base_offset + dat6 * 1 * (start[0] * args[6].stencil->stride[0]); base6 = base6+ dat6 * args[6].dat->size[0] * (start[1] * args[6].stencil->stride[1]); base6 = base6+ dat6 * args[6].dat->size[0] * args[6].dat->size[1] * (start[2] * args[6].stencil->stride[2]); p_a[6] = (char *)args[6].data_d + base6; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 8); ops_halo_exchanges(args,8,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[20].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) ops_update_halo_kernel1_ba2<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], (double *)p_a[6], (int *)arg7.data_d,x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[20].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 8); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); ops_set_halo_dirtybit3(&args[2],range); ops_set_halo_dirtybit3(&args[3],range); ops_set_halo_dirtybit3(&args[4],range); ops_set_halo_dirtybit3(&args[5],range); ops_set_halo_dirtybit3(&args[6],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[20].mpi_time += t2-t1; OPS_kernels[20].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[20].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[20].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[20].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[20].transfer += ops_compute_transfer(dim, start, end, &arg4); OPS_kernels[20].transfer += ops_compute_transfer(dim, start, end, &arg5); OPS_kernels[20].transfer += ops_compute_transfer(dim, start, end, &arg6); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel1_ba2(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 20; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 20; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 8; desc->args = (ops_arg*)malloc(8*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->args[4] = arg4; desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index; desc->args[5] = arg5; desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index; desc->args[6] = arg6; desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index; desc->args[7] = arg7; char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int)); memcpy(tmp, arg7.data,NUM_FIELDS*sizeof(int)); desc->args[7].data = tmp; desc->function = ops_par_loop_update_halo_kernel1_ba2_execute; if (OPS_diags > 1) { ops_timing_realloc(20,"update_halo_kernel1_ba2"); } ops_enqueue_kernel(desc); } #endif
40bf62797d83977871b222c910773c480d2328bd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void MultiplyAdd(float *d_Result, float *d_Data, int width, int height) { const int x = __mul24(blockIdx.x, 16) + threadIdx.x; const int y = __mul24(blockIdx.y, 16) + threadIdx.y; int p = __mul24(y, width) + x; if (x<width && y<height) d_Result[p] = d_ConstantA[0]*d_Data[p] + d_ConstantB[0]; __syncthreads(); }
40bf62797d83977871b222c910773c480d2328bd.cu
#include "includes.h" __global__ void MultiplyAdd(float *d_Result, float *d_Data, int width, int height) { const int x = __mul24(blockIdx.x, 16) + threadIdx.x; const int y = __mul24(blockIdx.y, 16) + threadIdx.y; int p = __mul24(y, width) + x; if (x<width && y<height) d_Result[p] = d_ConstantA[0]*d_Data[p] + d_ConstantB[0]; __syncthreads(); }
a3258fe12aa5177dcd2647be4d04b11bd08a8937.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14) { for (int i=0; i < var_1; ++i) { if (comp <= var_3 * -1.0111E-42f) { for (int i=0; i < var_2; ++i) { comp += +1.9573E35f * var_4 + (var_5 - (-0.0f - var_6)); float tmp_1 = +1.9881E-37f; comp = tmp_1 - (-1.8594E-43f * var_7 + -1.4813E-43f / -1.4732E34f); if (comp <= +1.7059E-37f + (-1.1710E-23f / -1.6079E-37f * (var_8 + -0.0f - var_9))) { float tmp_2 = -1.9125E19f * (-1.7493E-44f / (var_10 / var_11)); float tmp_3 = -0.0f * (+1.3786E-41f * sqrtf((+1.9976E-42f + fabsf((var_12 + +1.0464E-10f * floorf(sinf(-1.8859E-42f))))))); comp += tmp_3 * tmp_2 * var_13 * sqrtf(-1.5827E-3f + +1.0035E-35f / -0.0f / (var_14 / -1.1672E34f)); } } } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); int tmp_3 = atoi(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15); hipDeviceSynchronize(); return 0; }
a3258fe12aa5177dcd2647be4d04b11bd08a8937.cu
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14) { for (int i=0; i < var_1; ++i) { if (comp <= var_3 * -1.0111E-42f) { for (int i=0; i < var_2; ++i) { comp += +1.9573E35f * var_4 + (var_5 - (-0.0f - var_6)); float tmp_1 = +1.9881E-37f; comp = tmp_1 - (-1.8594E-43f * var_7 + -1.4813E-43f / -1.4732E34f); if (comp <= +1.7059E-37f + (-1.1710E-23f / -1.6079E-37f * (var_8 + -0.0f - var_9))) { float tmp_2 = -1.9125E19f * (-1.7493E-44f / (var_10 / var_11)); float tmp_3 = -0.0f * (+1.3786E-41f * sqrtf((+1.9976E-42f + fabsf((var_12 + +1.0464E-10f * floorf(sinf(-1.8859E-42f))))))); comp += tmp_3 * tmp_2 * var_13 * sqrtf(-1.5827E-3f + +1.0035E-35f / -0.0f / (var_14 / -1.1672E34f)); } } } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); int tmp_3 = atoi(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15); cudaDeviceSynchronize(); return 0; }
807b4c4fceb6487540e33b253b762c0850a80d41.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel1_t1; int xdim0_update_halo_kernel1_t1_h = -1; __constant__ int ydim0_update_halo_kernel1_t1; int ydim0_update_halo_kernel1_t1_h = -1; __constant__ int xdim1_update_halo_kernel1_t1; int xdim1_update_halo_kernel1_t1_h = -1; __constant__ int ydim1_update_halo_kernel1_t1; int ydim1_update_halo_kernel1_t1_h = -1; __constant__ int xdim2_update_halo_kernel1_t1; int xdim2_update_halo_kernel1_t1_h = -1; __constant__ int ydim2_update_halo_kernel1_t1; int ydim2_update_halo_kernel1_t1_h = -1; __constant__ int xdim3_update_halo_kernel1_t1; int xdim3_update_halo_kernel1_t1_h = -1; __constant__ int ydim3_update_halo_kernel1_t1; int ydim3_update_halo_kernel1_t1_h = -1; __constant__ int xdim4_update_halo_kernel1_t1; int xdim4_update_halo_kernel1_t1_h = -1; __constant__ int ydim4_update_halo_kernel1_t1; int ydim4_update_halo_kernel1_t1_h = -1; __constant__ int xdim5_update_halo_kernel1_t1; int xdim5_update_halo_kernel1_t1_h = -1; __constant__ int ydim5_update_halo_kernel1_t1; int ydim5_update_halo_kernel1_t1_h = -1; __constant__ int xdim6_update_halo_kernel1_t1; int xdim6_update_halo_kernel1_t1_h = -1; __constant__ int ydim6_update_halo_kernel1_t1; int ydim6_update_halo_kernel1_t1_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel1_t1 * (y) + \ xdim0_update_halo_kernel1_t1 * ydim0_update_halo_kernel1_t1 * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel1_t1 * (y) + \ xdim1_update_halo_kernel1_t1 * ydim1_update_halo_kernel1_t1 * (z)) #define OPS_ACC2(x, y, z) \ (x + xdim2_update_halo_kernel1_t1 * (y) + \ xdim2_update_halo_kernel1_t1 * ydim2_update_halo_kernel1_t1 * (z)) #define OPS_ACC3(x, y, z) \ (x + xdim3_update_halo_kernel1_t1 * (y) + \ xdim3_update_halo_kernel1_t1 * ydim3_update_halo_kernel1_t1 * (z)) #define OPS_ACC4(x, y, z) \ (x + xdim4_update_halo_kernel1_t1 * (y) + \ xdim4_update_halo_kernel1_t1 * ydim4_update_halo_kernel1_t1 * (z)) #define OPS_ACC5(x, y, z) \ (x + xdim5_update_halo_kernel1_t1 * (y) + \ xdim5_update_halo_kernel1_t1 * ydim5_update_halo_kernel1_t1 * (z)) #define OPS_ACC6(x, y, z) \ (x + xdim6_update_halo_kernel1_t1 * (y) + \ xdim6_update_halo_kernel1_t1 * ydim6_update_halo_kernel1_t1 * (z)) // user function __device__ inline void update_halo_kernel1_t1(double *density0, double *density1, double *energy0, double *energy1, double *pressure, double *viscosity, double *soundspeed, const int *fields) { if (fields[FIELD_DENSITY0] == 1) density0[OPS_ACC0(0, 0, 0)] = density0[OPS_ACC0(0, -1, 0)]; if (fields[FIELD_DENSITY1] == 1) density1[OPS_ACC1(0, 0, 0)] = density1[OPS_ACC1(0, -1, 0)]; if (fields[FIELD_ENERGY0] == 1) energy0[OPS_ACC2(0, 0, 0)] = energy0[OPS_ACC2(0, -1, 0)]; if (fields[FIELD_ENERGY1] == 1) energy1[OPS_ACC3(0, 0, 0)] = energy1[OPS_ACC3(0, -1, 0)]; if (fields[FIELD_PRESSURE] == 1) pressure[OPS_ACC4(0, 0, 0)] = pressure[OPS_ACC4(0, -1, 0)]; if (fields[FIELD_VISCOSITY] == 1) viscosity[OPS_ACC5(0, 0, 0)] = viscosity[OPS_ACC5(0, -1, 0)]; if (fields[FIELD_SOUNDSPEED] == 1) soundspeed[OPS_ACC6(0, 0, 0)] = soundspeed[OPS_ACC6(0, -1, 0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 __global__ void ops_update_halo_kernel1_t1(double *__restrict arg0, double *__restrict arg1, double *__restrict arg2, double *__restrict arg3, double *__restrict arg4, double *__restrict arg5, double *__restrict arg6, const int *__restrict arg7, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel1_t1 + idx_z * 1 * 1 * xdim0_update_halo_kernel1_t1 * ydim0_update_halo_kernel1_t1; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel1_t1 + idx_z * 1 * 1 * xdim1_update_halo_kernel1_t1 * ydim1_update_halo_kernel1_t1; arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_update_halo_kernel1_t1 + idx_z * 1 * 1 * xdim2_update_halo_kernel1_t1 * ydim2_update_halo_kernel1_t1; arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_update_halo_kernel1_t1 + idx_z * 1 * 1 * xdim3_update_halo_kernel1_t1 * ydim3_update_halo_kernel1_t1; arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_update_halo_kernel1_t1 + idx_z * 1 * 1 * xdim4_update_halo_kernel1_t1 * ydim4_update_halo_kernel1_t1; arg5 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim5_update_halo_kernel1_t1 + idx_z * 1 * 1 * xdim5_update_halo_kernel1_t1 * ydim5_update_halo_kernel1_t1; arg6 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim6_update_halo_kernel1_t1 + idx_z * 1 * 1 * xdim6_update_halo_kernel1_t1 * ydim6_update_halo_kernel1_t1; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel1_t1(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7); } } // host stub function void ops_par_loop_update_halo_kernel1_t1(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) { // Timing double t1, t2, c1, c2; ops_arg args[8] = {arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 8, range, 59)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(59, "update_halo_kernel1_t1"); OPS_kernels[59].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; int xdim5 = args[5].dat->size[0]; int ydim5 = args[5].dat->size[1]; int xdim6 = args[6].dat->size[0]; int ydim6 = args[6].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel1_t1_h || ydim0 != ydim0_update_halo_kernel1_t1_h || xdim1 != xdim1_update_halo_kernel1_t1_h || ydim1 != ydim1_update_halo_kernel1_t1_h || xdim2 != xdim2_update_halo_kernel1_t1_h || ydim2 != ydim2_update_halo_kernel1_t1_h || xdim3 != xdim3_update_halo_kernel1_t1_h || ydim3 != ydim3_update_halo_kernel1_t1_h || xdim4 != xdim4_update_halo_kernel1_t1_h || ydim4 != ydim4_update_halo_kernel1_t1_h || xdim5 != xdim5_update_halo_kernel1_t1_h || ydim5 != ydim5_update_halo_kernel1_t1_h || xdim6 != xdim6_update_halo_kernel1_t1_h || ydim6 != ydim6_update_halo_kernel1_t1_h) { hipMemcpyToSymbol(xdim0_update_halo_kernel1_t1, &xdim0, sizeof(int)); xdim0_update_halo_kernel1_t1_h = xdim0; hipMemcpyToSymbol(ydim0_update_halo_kernel1_t1, &ydim0, sizeof(int)); ydim0_update_halo_kernel1_t1_h = ydim0; hipMemcpyToSymbol(xdim1_update_halo_kernel1_t1, &xdim1, sizeof(int)); xdim1_update_halo_kernel1_t1_h = xdim1; hipMemcpyToSymbol(ydim1_update_halo_kernel1_t1, &ydim1, sizeof(int)); ydim1_update_halo_kernel1_t1_h = ydim1; hipMemcpyToSymbol(xdim2_update_halo_kernel1_t1, &xdim2, sizeof(int)); xdim2_update_halo_kernel1_t1_h = xdim2; hipMemcpyToSymbol(ydim2_update_halo_kernel1_t1, &ydim2, sizeof(int)); ydim2_update_halo_kernel1_t1_h = ydim2; hipMemcpyToSymbol(xdim3_update_halo_kernel1_t1, &xdim3, sizeof(int)); xdim3_update_halo_kernel1_t1_h = xdim3; hipMemcpyToSymbol(ydim3_update_halo_kernel1_t1, &ydim3, sizeof(int)); ydim3_update_halo_kernel1_t1_h = ydim3; hipMemcpyToSymbol(xdim4_update_halo_kernel1_t1, &xdim4, sizeof(int)); xdim4_update_halo_kernel1_t1_h = xdim4; hipMemcpyToSymbol(ydim4_update_halo_kernel1_t1, &ydim4, sizeof(int)); ydim4_update_halo_kernel1_t1_h = ydim4; hipMemcpyToSymbol(xdim5_update_halo_kernel1_t1, &xdim5, sizeof(int)); xdim5_update_halo_kernel1_t1_h = xdim5; hipMemcpyToSymbol(ydim5_update_halo_kernel1_t1, &ydim5, sizeof(int)); ydim5_update_halo_kernel1_t1_h = ydim5; hipMemcpyToSymbol(xdim6_update_halo_kernel1_t1, &xdim6, sizeof(int)); xdim6_update_halo_kernel1_t1_h = xdim6; hipMemcpyToSymbol(ydim6_update_halo_kernel1_t1, &ydim6, sizeof(int)); ydim6_update_halo_kernel1_t1_h = ydim6; } int *arg7h = (int *)arg7.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg7.data = OPS_consts_h + consts_bytes; arg7.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg7.data)[d] = arg7h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; int dat2 = args[2].dat->elem_size; int dat3 = args[3].dat->elem_size; int dat4 = args[4].dat->elem_size; int dat5 = args[5].dat->elem_size; int dat6 = args[6].dat->elem_size; char *p_a[8]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d]; #endif int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]); base2 = base2 + dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]); base2 = base2 + dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] - d_m[2]); p_a[2] = (char *)args[2].data_d + base2; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d]; #endif int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]); base3 = base3 + dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]); base3 = base3 + dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] - d_m[2]); p_a[3] = (char *)args[3].data_d + base3; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d]; #endif int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] - args[4].dat->base[0] - d_m[0]); base4 = base4 + dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1] - args[4].dat->base[1] - d_m[1]); base4 = base4 + dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] - d_m[2]); p_a[4] = (char *)args[4].data_d + base4; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d]; #endif int base5 = dat5 * 1 * (start[0] * args[5].stencil->stride[0] - args[5].dat->base[0] - d_m[0]); base5 = base5 + dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1] - args[5].dat->base[1] - d_m[1]); base5 = base5 + dat5 * args[5].dat->size[0] * args[5].dat->size[1] * (start[2] * args[5].stencil->stride[2] - args[5].dat->base[2] - d_m[2]); p_a[5] = (char *)args[5].data_d + base5; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d] + OPS_sub_dat_list[args[6].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d]; #endif int base6 = dat6 * 1 * (start[0] * args[6].stencil->stride[0] - args[6].dat->base[0] - d_m[0]); base6 = base6 + dat6 * args[6].dat->size[0] * (start[1] * args[6].stencil->stride[1] - args[6].dat->base[1] - d_m[1]); base6 = base6 + dat6 * args[6].dat->size[0] * args[6].dat->size[1] * (start[2] * args[6].stencil->stride[2] - args[6].dat->base[2] - d_m[2]); p_a[6] = (char *)args[6].data_d + base6; ops_H_D_exchanges_device(args, 8); ops_halo_exchanges(args, 8, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[59].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_update_halo_kernel1_t1), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], (double *)p_a[6], (int *)arg7.data_d, x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[59].time += t1 - t2; } ops_set_dirtybit_device(args, 8); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); ops_set_halo_dirtybit3(&args[2], range); ops_set_halo_dirtybit3(&args[3], range); ops_set_halo_dirtybit3(&args[4], range); ops_set_halo_dirtybit3(&args[5], range); ops_set_halo_dirtybit3(&args[6], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[59].mpi_time += t2 - t1; OPS_kernels[59].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[59].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[59].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[59].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[59].transfer += ops_compute_transfer(dim, start, end, &arg4); OPS_kernels[59].transfer += ops_compute_transfer(dim, start, end, &arg5); OPS_kernels[59].transfer += ops_compute_transfer(dim, start, end, &arg6); } }
807b4c4fceb6487540e33b253b762c0850a80d41.cu
// // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel1_t1; int xdim0_update_halo_kernel1_t1_h = -1; __constant__ int ydim0_update_halo_kernel1_t1; int ydim0_update_halo_kernel1_t1_h = -1; __constant__ int xdim1_update_halo_kernel1_t1; int xdim1_update_halo_kernel1_t1_h = -1; __constant__ int ydim1_update_halo_kernel1_t1; int ydim1_update_halo_kernel1_t1_h = -1; __constant__ int xdim2_update_halo_kernel1_t1; int xdim2_update_halo_kernel1_t1_h = -1; __constant__ int ydim2_update_halo_kernel1_t1; int ydim2_update_halo_kernel1_t1_h = -1; __constant__ int xdim3_update_halo_kernel1_t1; int xdim3_update_halo_kernel1_t1_h = -1; __constant__ int ydim3_update_halo_kernel1_t1; int ydim3_update_halo_kernel1_t1_h = -1; __constant__ int xdim4_update_halo_kernel1_t1; int xdim4_update_halo_kernel1_t1_h = -1; __constant__ int ydim4_update_halo_kernel1_t1; int ydim4_update_halo_kernel1_t1_h = -1; __constant__ int xdim5_update_halo_kernel1_t1; int xdim5_update_halo_kernel1_t1_h = -1; __constant__ int ydim5_update_halo_kernel1_t1; int ydim5_update_halo_kernel1_t1_h = -1; __constant__ int xdim6_update_halo_kernel1_t1; int xdim6_update_halo_kernel1_t1_h = -1; __constant__ int ydim6_update_halo_kernel1_t1; int ydim6_update_halo_kernel1_t1_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel1_t1 * (y) + \ xdim0_update_halo_kernel1_t1 * ydim0_update_halo_kernel1_t1 * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel1_t1 * (y) + \ xdim1_update_halo_kernel1_t1 * ydim1_update_halo_kernel1_t1 * (z)) #define OPS_ACC2(x, y, z) \ (x + xdim2_update_halo_kernel1_t1 * (y) + \ xdim2_update_halo_kernel1_t1 * ydim2_update_halo_kernel1_t1 * (z)) #define OPS_ACC3(x, y, z) \ (x + xdim3_update_halo_kernel1_t1 * (y) + \ xdim3_update_halo_kernel1_t1 * ydim3_update_halo_kernel1_t1 * (z)) #define OPS_ACC4(x, y, z) \ (x + xdim4_update_halo_kernel1_t1 * (y) + \ xdim4_update_halo_kernel1_t1 * ydim4_update_halo_kernel1_t1 * (z)) #define OPS_ACC5(x, y, z) \ (x + xdim5_update_halo_kernel1_t1 * (y) + \ xdim5_update_halo_kernel1_t1 * ydim5_update_halo_kernel1_t1 * (z)) #define OPS_ACC6(x, y, z) \ (x + xdim6_update_halo_kernel1_t1 * (y) + \ xdim6_update_halo_kernel1_t1 * ydim6_update_halo_kernel1_t1 * (z)) // user function __device__ inline void update_halo_kernel1_t1(double *density0, double *density1, double *energy0, double *energy1, double *pressure, double *viscosity, double *soundspeed, const int *fields) { if (fields[FIELD_DENSITY0] == 1) density0[OPS_ACC0(0, 0, 0)] = density0[OPS_ACC0(0, -1, 0)]; if (fields[FIELD_DENSITY1] == 1) density1[OPS_ACC1(0, 0, 0)] = density1[OPS_ACC1(0, -1, 0)]; if (fields[FIELD_ENERGY0] == 1) energy0[OPS_ACC2(0, 0, 0)] = energy0[OPS_ACC2(0, -1, 0)]; if (fields[FIELD_ENERGY1] == 1) energy1[OPS_ACC3(0, 0, 0)] = energy1[OPS_ACC3(0, -1, 0)]; if (fields[FIELD_PRESSURE] == 1) pressure[OPS_ACC4(0, 0, 0)] = pressure[OPS_ACC4(0, -1, 0)]; if (fields[FIELD_VISCOSITY] == 1) viscosity[OPS_ACC5(0, 0, 0)] = viscosity[OPS_ACC5(0, -1, 0)]; if (fields[FIELD_SOUNDSPEED] == 1) soundspeed[OPS_ACC6(0, 0, 0)] = soundspeed[OPS_ACC6(0, -1, 0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 __global__ void ops_update_halo_kernel1_t1(double *__restrict arg0, double *__restrict arg1, double *__restrict arg2, double *__restrict arg3, double *__restrict arg4, double *__restrict arg5, double *__restrict arg6, const int *__restrict arg7, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel1_t1 + idx_z * 1 * 1 * xdim0_update_halo_kernel1_t1 * ydim0_update_halo_kernel1_t1; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel1_t1 + idx_z * 1 * 1 * xdim1_update_halo_kernel1_t1 * ydim1_update_halo_kernel1_t1; arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_update_halo_kernel1_t1 + idx_z * 1 * 1 * xdim2_update_halo_kernel1_t1 * ydim2_update_halo_kernel1_t1; arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_update_halo_kernel1_t1 + idx_z * 1 * 1 * xdim3_update_halo_kernel1_t1 * ydim3_update_halo_kernel1_t1; arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_update_halo_kernel1_t1 + idx_z * 1 * 1 * xdim4_update_halo_kernel1_t1 * ydim4_update_halo_kernel1_t1; arg5 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim5_update_halo_kernel1_t1 + idx_z * 1 * 1 * xdim5_update_halo_kernel1_t1 * ydim5_update_halo_kernel1_t1; arg6 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim6_update_halo_kernel1_t1 + idx_z * 1 * 1 * xdim6_update_halo_kernel1_t1 * ydim6_update_halo_kernel1_t1; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel1_t1(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7); } } // host stub function void ops_par_loop_update_halo_kernel1_t1(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) { // Timing double t1, t2, c1, c2; ops_arg args[8] = {arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 8, range, 59)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(59, "update_halo_kernel1_t1"); OPS_kernels[59].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; int xdim5 = args[5].dat->size[0]; int ydim5 = args[5].dat->size[1]; int xdim6 = args[6].dat->size[0]; int ydim6 = args[6].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel1_t1_h || ydim0 != ydim0_update_halo_kernel1_t1_h || xdim1 != xdim1_update_halo_kernel1_t1_h || ydim1 != ydim1_update_halo_kernel1_t1_h || xdim2 != xdim2_update_halo_kernel1_t1_h || ydim2 != ydim2_update_halo_kernel1_t1_h || xdim3 != xdim3_update_halo_kernel1_t1_h || ydim3 != ydim3_update_halo_kernel1_t1_h || xdim4 != xdim4_update_halo_kernel1_t1_h || ydim4 != ydim4_update_halo_kernel1_t1_h || xdim5 != xdim5_update_halo_kernel1_t1_h || ydim5 != ydim5_update_halo_kernel1_t1_h || xdim6 != xdim6_update_halo_kernel1_t1_h || ydim6 != ydim6_update_halo_kernel1_t1_h) { cudaMemcpyToSymbol(xdim0_update_halo_kernel1_t1, &xdim0, sizeof(int)); xdim0_update_halo_kernel1_t1_h = xdim0; cudaMemcpyToSymbol(ydim0_update_halo_kernel1_t1, &ydim0, sizeof(int)); ydim0_update_halo_kernel1_t1_h = ydim0; cudaMemcpyToSymbol(xdim1_update_halo_kernel1_t1, &xdim1, sizeof(int)); xdim1_update_halo_kernel1_t1_h = xdim1; cudaMemcpyToSymbol(ydim1_update_halo_kernel1_t1, &ydim1, sizeof(int)); ydim1_update_halo_kernel1_t1_h = ydim1; cudaMemcpyToSymbol(xdim2_update_halo_kernel1_t1, &xdim2, sizeof(int)); xdim2_update_halo_kernel1_t1_h = xdim2; cudaMemcpyToSymbol(ydim2_update_halo_kernel1_t1, &ydim2, sizeof(int)); ydim2_update_halo_kernel1_t1_h = ydim2; cudaMemcpyToSymbol(xdim3_update_halo_kernel1_t1, &xdim3, sizeof(int)); xdim3_update_halo_kernel1_t1_h = xdim3; cudaMemcpyToSymbol(ydim3_update_halo_kernel1_t1, &ydim3, sizeof(int)); ydim3_update_halo_kernel1_t1_h = ydim3; cudaMemcpyToSymbol(xdim4_update_halo_kernel1_t1, &xdim4, sizeof(int)); xdim4_update_halo_kernel1_t1_h = xdim4; cudaMemcpyToSymbol(ydim4_update_halo_kernel1_t1, &ydim4, sizeof(int)); ydim4_update_halo_kernel1_t1_h = ydim4; cudaMemcpyToSymbol(xdim5_update_halo_kernel1_t1, &xdim5, sizeof(int)); xdim5_update_halo_kernel1_t1_h = xdim5; cudaMemcpyToSymbol(ydim5_update_halo_kernel1_t1, &ydim5, sizeof(int)); ydim5_update_halo_kernel1_t1_h = ydim5; cudaMemcpyToSymbol(xdim6_update_halo_kernel1_t1, &xdim6, sizeof(int)); xdim6_update_halo_kernel1_t1_h = xdim6; cudaMemcpyToSymbol(ydim6_update_halo_kernel1_t1, &ydim6, sizeof(int)); ydim6_update_halo_kernel1_t1_h = ydim6; } int *arg7h = (int *)arg7.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg7.data = OPS_consts_h + consts_bytes; arg7.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg7.data)[d] = arg7h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; int dat2 = args[2].dat->elem_size; int dat3 = args[3].dat->elem_size; int dat4 = args[4].dat->elem_size; int dat5 = args[5].dat->elem_size; int dat6 = args[6].dat->elem_size; char *p_a[8]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d]; #endif int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]); base2 = base2 + dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]); base2 = base2 + dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] - d_m[2]); p_a[2] = (char *)args[2].data_d + base2; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d]; #endif int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]); base3 = base3 + dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]); base3 = base3 + dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] - d_m[2]); p_a[3] = (char *)args[3].data_d + base3; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d]; #endif int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] - args[4].dat->base[0] - d_m[0]); base4 = base4 + dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1] - args[4].dat->base[1] - d_m[1]); base4 = base4 + dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] - d_m[2]); p_a[4] = (char *)args[4].data_d + base4; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d]; #endif int base5 = dat5 * 1 * (start[0] * args[5].stencil->stride[0] - args[5].dat->base[0] - d_m[0]); base5 = base5 + dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1] - args[5].dat->base[1] - d_m[1]); base5 = base5 + dat5 * args[5].dat->size[0] * args[5].dat->size[1] * (start[2] * args[5].stencil->stride[2] - args[5].dat->base[2] - d_m[2]); p_a[5] = (char *)args[5].data_d + base5; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d] + OPS_sub_dat_list[args[6].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d]; #endif int base6 = dat6 * 1 * (start[0] * args[6].stencil->stride[0] - args[6].dat->base[0] - d_m[0]); base6 = base6 + dat6 * args[6].dat->size[0] * (start[1] * args[6].stencil->stride[1] - args[6].dat->base[1] - d_m[1]); base6 = base6 + dat6 * args[6].dat->size[0] * args[6].dat->size[1] * (start[2] * args[6].stencil->stride[2] - args[6].dat->base[2] - d_m[2]); p_a[6] = (char *)args[6].data_d + base6; ops_H_D_exchanges_device(args, 8); ops_halo_exchanges(args, 8, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[59].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_update_halo_kernel1_t1<<<grid, tblock>>>( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], (double *)p_a[6], (int *)arg7.data_d, x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[59].time += t1 - t2; } ops_set_dirtybit_device(args, 8); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); ops_set_halo_dirtybit3(&args[2], range); ops_set_halo_dirtybit3(&args[3], range); ops_set_halo_dirtybit3(&args[4], range); ops_set_halo_dirtybit3(&args[5], range); ops_set_halo_dirtybit3(&args[6], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[59].mpi_time += t2 - t1; OPS_kernels[59].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[59].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[59].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[59].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[59].transfer += ops_compute_transfer(dim, start, end, &arg4); OPS_kernels[59].transfer += ops_compute_transfer(dim, start, end, &arg5); OPS_kernels[59].transfer += ops_compute_transfer(dim, start, end, &arg6); } }
eeb4be89b4ceb4b40954bd00ba8e85105b72e129.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <cstring> #include <glm/gtc/matrix_inverse.hpp> #include <hip/hip_runtime.h> #include <cmath> #include <thrust/execution_policy.h> #include <thrust/random.h> #include <thrust/remove.h> #include <thrust/sort.h> #include <thrust/device_ptr.h> #include <stb_image.h> #include "thirdparty/tiny_obj_loader.h" #include "scene.h" const dim3 IMAGE_PROCESS_BLOCK_SIZE(16, 16, 1); const size_t Background::BACKGROUND_MATERIAL_INDEX = std::numeric_limits<size_t>::max(); const size_t Background::COLOR_RAMP_MATERIAL_INDEX = std::numeric_limits<size_t>::max() - 1; #define LOAD_UINT8_TEXTURE 1 #if LOAD_UINT8_TEXTURE using STBPixelType = stbi_uc; #define STBI_LOAD stbi_load #else // LOAD_UINT8_TEXTURE using STBPixelType = float; #define STBI_LOAD stbi_loadf #endif // LOAD_UINT8_TEXTURE #define DEBUG_BVH_CONSTRUCTION 0//1 #if BUILD_BVH_FOR_TRIMESH void buildBVH_CPU_NaiveRecursive(BoundingVolumeHierarchy<Triangle>& BVH, Triangle* geoms, int geomStart, int geomEnd, int rootIdx, float expand, int level = 0) { if (geomStart > geomEnd) { // No triangles. return; } else if (geomStart == geomEnd) { // Leaf node. BVH.nodesArray[rootIdx].box = BBox::getLocalBoundingBox(geoms[geomStart], expand); #if SORT_BEFORE_BUILD_BVH geoms[geomStart].triangleid = geomStart; // Because you have already sorted triangles, reassign triangleId. #endif // SORT_BEFORE_BUILD_BVH BVH.nodesArray[rootIdx].geomIdx = geoms[geomStart].triangleid; BVH.nodesArray[rootIdx].leftSubtreeIdx = -1; BVH.nodesArray[rootIdx].rightSubtreeIdx = -1; #if DEBUG_BVH_CONSTRUCTION BBox box = BVH.nodesArray[rootIdx].box; printf("==Leaf: nodes[%d, %d].box={<%f,%f,%f>, <%f,%f,%f>} with geoms leaf[%d]->%d\n", rootIdx, level, box.minP.x, box.minP.y, box.minP.z, box.maxP.x, box.maxP.y, box.maxP.z, geomStart, geoms[geomStart].triangleid); #endif // DEBUG_BVH_CONSTRUCTION return; } static struct GeomSortPredicate { GeomSortPredicate(i32 axis) : axis(axis) {} bool operator()(const Triangle& t1, const Triangle& t2) const { return BBox::getLocalBoundingBox(t1).getCenter()[axis] < BBox::getLocalBoundingBox(t2).getCenter()[axis]; } i32 axis = 0; } const predicates[]{ GeomSortPredicate(0), GeomSortPredicate(1), GeomSortPredicate(2) }; // geoms is unused after that so just sort if necessary. BBox box = BBox::getLocalBoundingBox(geoms[geomStart], expand); for (int i = geomStart + 1; i <= geomEnd; ++i) { #if DEBUG_BVH_CONSTRUCTION > 1 printf("--For i = %d: nodes[%d, %d].box={<%f,%f,%f>, <%f,%f,%f>} with geoms[%d:%d]\n", i - 1, rootIdx, level, box.minP.x, box.minP.y, box.minP.z, box.maxP.x, box.maxP.y, box.maxP.z, geomStart, i - 1); #endif // DEBUG_BVH_CONSTRUCTION box += BBox::getLocalBoundingBox(geoms[i], expand); } #if DEBUG_BVH_CONSTRUCTION printf("nodes[%d, %d].box={<%f,%f,%f>, <%f,%f,%f>} with geoms[%d:%d]\n", rootIdx, level, box.minP.x, box.minP.y, box.minP.z, box.maxP.x, box.maxP.y, box.maxP.z, geomStart, geomEnd); #endif // DEBUG_BVH_CONSTRUCTION i32 axis = box.getMaxDistAxis(); std::sort(geoms + geomStart, geoms + geomEnd + 1, predicates[axis]); int geomMiddle = geomStart + ((geomEnd - geomStart) >> 1); int leftRoot = (rootIdx << 1) + 1; int rightRoot = (rootIdx << 1) + 2; BVH.nodesArray[rootIdx].box = box; BVH.nodesArray[rootIdx].geomIdx = -1; BVH.nodesArray[rootIdx].leftSubtreeIdx = leftRoot; BVH.nodesArray[rootIdx].rightSubtreeIdx = rightRoot; buildBVH_CPU_NaiveRecursive(BVH, geoms, geomStart, geomMiddle, leftRoot, expand, level + 1); buildBVH_CPU_NaiveRecursive(BVH, geoms, geomMiddle + 1, geomEnd, rightRoot, expand, level + 1); } template<> __host__ void BoundingVolumeHierarchy<Triangle>::buildBVH_CPU(Triangle* geoms, int geomNum, float expand) { if (geomNum == 0) { return; } nodeNum = (geomNum << 1) - 1; i32 maxNodeNum = 1; treeHeight = 0; for (i32 i = 2; i - 2 < nodeNum; i <<= 1) { // 0(2)| 1(3), 2(4)| 3(5), 4(6), 5(7), 6(8)| ... ++treeHeight; maxNodeNum <<= 1; } nodeNum = (1 << (treeHeight + 1)) - 1; BoundingVolumeHierarchy<Triangle> BVHCPU(*this); //std::vector<BVHNode> nodesCPU(nodeNum); //BVHCPU.nodesArray = nodesCPU.data(); //BVHCPU.nodesArray = new BVHNode[nodeNum]; hipHostMalloc(&BVHCPU.nodesArray, sizeof(BVHNode) * nodeNum); hipMalloc(&nodesArray, sizeof(BVHNode) * nodeNum); hipDeviceSynchronize(); for (size_t i = 0; i < nodeNum; ++i) { BVHCPU.nodesArray[i] = BVHNode(); } buildBVH_CPU_NaiveRecursive(BVHCPU, geoms, 0, geomNum - 1, 0, expand); hipMemcpy(nodesArray, BVHCPU.nodesArray, sizeof(BVHNode) * nodeNum, hipMemcpyHostToDevice); hipHostFree(BVHCPU.nodesArray); hipDeviceSynchronize(); //delete[] BVHCPU.nodesArray; printf("Initialize BVH with %d nodes, %d leaves, with height %d.\n", nodeNum, geomNum, treeHeight); checkCUDAError("buildBVH"); } #endif // BUILD_BVH_FOR_TRIMESH __global__ void kernInvGammaCorrection(glm::vec3* dst, STBPixelType* src, int x, int y, int channel) { int idxX = blockIdx.x * blockDim.x + threadIdx.x; int idxY = blockIdx.y * blockDim.y + threadIdx.y; if (idxX < x && idxY < y) { int index = Texture2D<glm::vec3>::index2Dto1D(glm::ivec2(x, y), idxY, idxX); glm::vec3 color; #pragma unroll for (int c = 0; c < channel && c < 3; ++c) { #if LOAD_UINT8_TEXTURE float comp = glm::clamp(src[index * channel + c] / 255.f, 0.f, 1.f); #else // LOAD_UINT8_TEXTURE float comp = glm::clamp(src[index * channel + c], 0.f, 1.f); #endif // LOAD_UINT8_TEXTURE comp = powf(comp, 2.2); color[c] = glm::clamp(comp, 0.f, 1.f); } dst[index] = color; } } __global__ void kernGammaCorrection(Texture2D<glm::vec3> image) { int idxX = blockIdx.x * blockDim.x + threadIdx.x; int idxY = blockIdx.y * blockDim.y + threadIdx.y; if (idxX < image.size.x && idxY < image.size.y) { int index = Texture2D<glm::vec3>::index2Dto1D(glm::ivec2(image.size.x, image.size.y), idxY, idxX); glm::vec3 srcColor = image.buffer[index]; glm::vec3 dstColor; dstColor.r = powf(glm::clamp(srcColor.r, 0.f, 1.f), 1.f / 2.2f); dstColor.g = powf(glm::clamp(srcColor.g, 0.f, 1.f), 1.f / 2.2f); dstColor.b = powf(glm::clamp(srcColor.b, 0.f, 1.f), 1.f / 2.2f); image.buffer[index] = dstColor; } } void Scene::addTextureToLoad(size_t id, i64 varOffset, const std::string& filename) { auto& pkg = texturePackage; auto it = pkg.dstToAddrMap.find(id); std::unordered_map<i64, std::string>* map_ptr = nullptr; if (it != pkg.dstToAddrMap.end()) { map_ptr = &it->second; } else { pkg.dstToAddrMap[id] = std::unordered_map<i64, std::string>(); map_ptr = &pkg.dstToAddrMap[id]; } (*map_ptr)[varOffset] = filename; } Texture2D<glm::vec3> Scene::loadTexture(const std::string& filename) { std::cout << "Loading Texture from " << filename << "..." << std::endl; auto& pkg = texturePackage; auto it = pkg.idMap.find(filename); if (it != pkg.idMap.end()) { std::cout << "Texture " << filename << " is already exist.\n" << std::endl; return pkg.resources[it->second]; } pkg.idMap[filename] = pkg.resources.size(); pkg.resources.emplace_back(); auto& res = pkg.resources.back(); //Texture2D<glm::vec3>& tex = textureBuffers[i]; int x, y, channel; stbi_set_flip_vertically_on_load(1); std::string extension = utilityCore::getFileExtension(filename); if (stricmp(extension.c_str(), "hdr") == 0) { float* imageCPU = stbi_loadf(filename.c_str(), &x, &y, &channel, 0); if (!imageCPU) { std::cout << "Texture " << filename << ": failed to load.\n" << std::endl; } hipMalloc(&res.buffer, sizeof(glm::vec3) * x * y); hipMemcpy(res.buffer, imageCPU, sizeof(float) * x * y * channel, hipMemcpyHostToDevice); hipDeviceSynchronize(); stbi_image_free(imageCPU); } else { STBPixelType* imageCPU = STBI_LOAD(filename.c_str(), &x, &y, &channel, 0); if (!imageCPU) { std::cout << "Texture " << filename << ": failed to load.\n" << std::endl; } hipMalloc(&res.buffer, sizeof(glm::vec3) * x * y); STBPixelType* imageGPU; hipMalloc(&imageGPU, sizeof(STBPixelType) * x * y * channel); hipMemcpy(imageGPU, imageCPU, sizeof(STBPixelType) * x * y * channel, hipMemcpyHostToDevice); dim3 blockCount((x + IMAGE_PROCESS_BLOCK_SIZE.x - 1) / IMAGE_PROCESS_BLOCK_SIZE.x, (y + IMAGE_PROCESS_BLOCK_SIZE.y - 1) / IMAGE_PROCESS_BLOCK_SIZE.y, 1); hipLaunchKernelGGL(( kernInvGammaCorrection), dim3(blockCount), dim3(IMAGE_PROCESS_BLOCK_SIZE), 0, 0, res.buffer, imageGPU, x, y, channel); checkCUDAError("kernInvGammaCorrection"); hipFree(imageGPU); hipDeviceSynchronize(); stbi_image_free(imageCPU); } res.size.x = x; res.size.y = y; checkCUDAError("hipFree imageGPU"); std::cout << "Texture " << filename << '<' << x << ',' << y << ',' << channel << "> created.\n" << std::endl; return res; } void Scene::initTextures() { for (auto& materialToTexturePair : texturePackage.dstToAddrMap) { size_t materialId = materialToTexturePair.first; switch (materialId) { case Background::BACKGROUND_MATERIAL_INDEX: { for (auto& textureFilePair : materialToTexturePair.second) { const std::string& filename = textureFilePair.second; Texture2D<glm::vec3>* texture_ptr = utilityCore::getPtrInStruct<Texture2D<glm::vec3>>(&background, textureFilePair.first); *texture_ptr = loadTexture(filename); } } break; case Background::COLOR_RAMP_MATERIAL_INDEX: { for (auto& textureFilePair : materialToTexturePair.second) { const std::string& filename = textureFilePair.second; Texture2D<glm::vec3>* texture_ptr = utilityCore::getPtrInStruct<Texture2D<glm::vec3>>(&rampMap, textureFilePair.first); *texture_ptr = loadTexture(filename); } } break; default: { Material& material = materials[materialId]; for (auto& textureFilePair : materialToTexturePair.second) { const std::string& filename = textureFilePair.second; Texture2D<glm::vec3>* texture_ptr = utilityCore::getPtrInStruct<Texture2D<glm::vec3>>(&material, textureFilePair.first); *texture_ptr = loadTexture(filename); } } break; } } } void Scene::freeTextures() { for (auto& tex : texturePackage.resources) { hipFree(tex.buffer); } hipDeviceSynchronize(); checkCUDAError("hipFree textures"); } void Scene::addModelToLoad(size_t id, i64 varOffset, const std::string& filename) { auto& pkg = modelPackage; auto it = pkg.dstToAddrMap.find(id); std::unordered_map<i64, std::string>* map_ptr = nullptr; if (it != pkg.dstToAddrMap.end()) { map_ptr = &it->second; } else { pkg.dstToAddrMap[id] = std::unordered_map<i64, std::string>(); map_ptr = &pkg.dstToAddrMap[id]; } (*map_ptr)[varOffset] = filename; } TriMesh Scene::loadModelObj(const std::string& filename) { std::cout << "Loading Model from " << filename << "..." << std::endl; auto& pkg = modelPackage; auto it = pkg.idMap.find(filename); if (it != pkg.idMap.end()) { std::cout << "Model " << filename << " is already exist.\n" << std::endl; return pkg.resources[it->second]; } pkg.idMap[filename] = pkg.resources.size(); pkg.resources.emplace_back(); auto& res = pkg.resources.back(); std::vector<tinyobj::shape_t> shapes; std::vector<tinyobj::material_t> materials; tinyobj::LoadObj(shapes, materials, filename.c_str()); res.triangleNum = 0; for (tinyobj::shape_t& shape : shapes) { res.triangleNum += shape.mesh.indices.size() / 3; } std::vector<Triangle> tris(res.triangleNum); size_t triIdx = 0; for (tinyobj::shape_t& shape : shapes) { for (int i = 0; i < shape.mesh.indices.size(); i += 3) { auto& tri = tris[triIdx]; tri.triangleid = triIdx; tri.twoSided = true; //memcpy(tri.position, &shape.mesh.positions[i0 * 3], sizeof(tri.position)); //memcpy(tri.normal, &shape.mesh.normals[i0 * 3], sizeof(tri.normal)); //memcpy(tri.uv0, &shape.mesh.texcoords[i0 * 2], sizeof(tri.uv0)); size_t i0 = shape.mesh.indices[i]; size_t i1 = shape.mesh.indices[i + 1]; size_t i2 = shape.mesh.indices[i + 2]; memcpy(&tri.pos0, &shape.mesh.positions[i0 * 3], sizeof(tri.pos0)); memcpy(&tri.pos1, &shape.mesh.positions[i1 * 3], sizeof(tri.pos1)); memcpy(&tri.pos2, &shape.mesh.positions[i2 * 3], sizeof(tri.pos2)); if (i0 * 3 < shape.mesh.normals.size()) { memcpy(&tri.nrm0, &shape.mesh.normals[i0 * 3], sizeof(tri.nrm0)); } if (i1 * 3 < shape.mesh.normals.size()) { memcpy(&tri.nrm1, &shape.mesh.normals[i1 * 3], sizeof(tri.nrm1)); } if (i2 * 3 < shape.mesh.normals.size()) { memcpy(&tri.nrm2, &shape.mesh.normals[i2 * 3], sizeof(tri.nrm2)); } if (i0 * 2 < shape.mesh.texcoords.size()) { memcpy(&tri.uv00, &shape.mesh.texcoords[i0 * 2], sizeof(tri.uv00)); } if (i1 * 2 < shape.mesh.texcoords.size()) { memcpy(&tri.uv01, &shape.mesh.texcoords[i1 * 2], sizeof(tri.uv01)); } if (i2 * 2 < shape.mesh.texcoords.size()) { memcpy(&tri.uv02, &shape.mesh.texcoords[i2 * 2], sizeof(tri.uv02)); } ++triIdx; } } #if BUILD_BVH_FOR_TRIMESH #if SORT_BEFORE_BUILD_BVH res.localBVH.buildBVH_CPU(tris.data(), tris.size(), FLT_EPSILON); #endif // SORT_BEFORE_BUILD_BVH #endif // BUILD_BVH_FOR_TRIMESH hipMalloc(&res.triangles, sizeof(Triangle) * res.triangleNum); hipMemcpy(res.triangles, tris.data(), sizeof(Triangle) * res.triangleNum, hipMemcpyHostToDevice); hipDeviceSynchronize(); checkCUDAError("loadModelObj"); #if BUILD_BVH_FOR_TRIMESH #if !SORT_BEFORE_BUILD_BVH res.localBVH.buildBVH_CPU(tris.data(), tris.size(), FLT_EPSILON); #endif // SORT_BEFORE_BUILD_BVH #endif // BUILD_BVH_FOR_TRIMESH std::cout << "Model " << filename << '<' << res.triangleNum << "> created.\n" << std::endl; return res; } void Scene::initModels() { for(auto& geomToModelPair : modelPackage.dstToAddrMap) { size_t geomId = geomToModelPair.first; Geom& geom = geoms[geomId]; for (auto& modelFilePair : geomToModelPair.second) { const std::string& filename = modelFilePair.second; TriMesh* model_ptr = utilityCore::getPtrInStruct<TriMesh>(&geom, modelFilePair.first); std::string extension = utilityCore::getFileExtension(filename); if (stricmp("obj", extension.c_str()) == 0) { *model_ptr = loadModelObj(filename); } //TODO: Other model format. } //TODO: Build BVH if necessary. } } void Scene::freeModels() { for (auto& mdl : modelPackage.resources) { hipFree(mdl.triangles); } hipDeviceSynchronize(); checkCUDAError("hipFree models"); } void Scene::initGBuffer() { dev_frameBuffer.size = state.camera.resolution; hipMalloc(&dev_frameBuffer.buffer, sizeof(glm::vec3) * dev_frameBuffer.size.x * dev_frameBuffer.size.y); dev_GBuffer.size = state.camera.resolution; hipMalloc(&dev_GBuffer.buffer, sizeof(GBufferData) * dev_GBuffer.size.x * dev_GBuffer.size.y); hipDeviceSynchronize(); std::cout << "Initialized frame buffer " << dev_frameBuffer.size.x << " x " << dev_frameBuffer.size.y << std::endl; std::cout << "Initialized G-buffer " << dev_GBuffer.size.x << " x " << dev_GBuffer.size.y << std::endl; checkCUDAError("hipMalloc GBuffer"); } void Scene::freeGBuffer() { hipFree(dev_GBuffer.buffer); hipFree(dev_frameBuffer.buffer); hipDeviceSynchronize(); checkCUDAError("hipFree GBuffer"); } namespace PostProcessGPU { #if !PREGATHER_FINAL_IMAGE __global__ void dividedByIter(Texture2D<glm::vec3> dst, Texture2D<glm::vec3> src, float iter) { int idxX = blockDim.x * blockIdx.x + threadIdx.x; int idxY = blockDim.y * blockIdx.y + threadIdx.y; if (idxX < dst.size.x && idxY < dst.size.y) { dst.setPixelByHW(idxY, idxX, src.getPixelByHW(idxY, idxX) / iter); } } #endif // PREGATHER_FINAL_IMAGE extern __global__ void postProcess_ColorRamp( Texture2D<glm::vec3> imageTexture, Texture2D<GBufferData> gBuffer, Texture2D<glm::vec3> rampTexture); extern __global__ void postProcess_OutlineByStencil( Texture2D<glm::vec3> imageTexture, Texture2D<GBufferData> gBuffer, int stencilId, glm::vec3 outlineColor, int outlineWidth); } glm::vec3* Scene::postProcessGPU(glm::vec3* dev_image, PathSegment* dev_paths, const dim3 blocksPerGrid2d, const dim3 blockSize2d, int iter) const { Texture2D<glm::vec3> imageTexture; imageTexture.buffer = dev_image; imageTexture.size = dev_GBuffer.size; #if !PREGATHER_FINAL_IMAGE hipLaunchKernelGGL(( PostProcessGPU::dividedByIter), dim3(blocksPerGrid2d), dim3(blockSize2d), 0, 0, dev_frameBuffer, imageTexture, iter); #else // PREGATHER_FINAL_IMAGE hipMemcpy(dev_frameBuffer.buffer, dev_image, sizeof(glm::vec3) * dev_GBuffer.size.x * dev_GBuffer.size.y, hipMemcpyDeviceToDevice); #endif // PREGATHER_FINAL_IMAGE for (size_t i = 0; i < postprocesses.size(); ++i) { auto& pppair = postprocesses[i]; if (!pppair.second) { continue; } PostProcessType pptype = pppair.first; switch (pptype) { case PostProcessType::COLOR_RAMP: if (rampMap.isReadable()) { hipLaunchKernelGGL(( PostProcessGPU::postProcess_ColorRamp), dim3(blocksPerGrid2d), dim3(blockSize2d), 0, 0, dev_frameBuffer, dev_GBuffer, rampMap); } break; case PostProcessType::OUTLINE_BY_STENCIL: { int stencilId = ppToStencilMap.at(i); auto colorWidthPair = stencilOutlineColorWidths.at(stencilId); //printf("%d<%f,%f,%f>%d\n", stencilId, colorWidthPair.first.r, colorWidthPair.first.g, colorWidthPair.first.b, colorWidthPair.second); hipLaunchKernelGGL(( PostProcessGPU::postProcess_OutlineByStencil), dim3(blocksPerGrid2d), dim3(blockSize2d), 0, 0, dev_frameBuffer, dev_GBuffer, stencilId, colorWidthPair.first, colorWidthPair.second); } break; } checkCUDAError(("postprocess" + std::to_string(i)).c_str()); } hipLaunchKernelGGL(( kernGammaCorrection), dim3(blocksPerGrid2d), dim3(blockSize2d), 0, 0, dev_frameBuffer); checkCUDAError("gamma correction"); return dev_frameBuffer.buffer; } __global__ void PostProcessGPU::postProcess_ColorRamp(Texture2D<glm::vec3> imageTexture, Texture2D<GBufferData> gBuffer, Texture2D<glm::vec3> rampTexture) { int idxX = blockDim.x * blockIdx.x + threadIdx.x; int idxY = blockDim.y * blockIdx.y + threadIdx.y; if (idxX < imageTexture.size.x && idxY < imageTexture.size.y) { //GBufferData gBufferData = gBuffer.getPixelByHW(idxY, idxX); glm::vec3 color = imageTexture.getPixelByHW(idxY, idxX); color = glm::clamp(color, 0.f, 1.f); glm::vec3 ramp; ramp.r = rampTexture.getPixelByUV(color.r, 0.5f).r; ramp.g = rampTexture.getPixelByUV(color.g, 0.5f).g; ramp.b = rampTexture.getPixelByUV(color.b, 0.5f).b; //printf("ramp of <%f,%f,%f> is <%f,%f,%f>\n", color.r, color.g, color.b, ramp.r, ramp.g, ramp.b); int index = Texture2D<glm::vec3>::index2Dto1D(imageTexture.size, idxY, idxX); imageTexture.buffer[index] = ramp;//glm::clamp(ramp, 0.f, 1.f); } } __global__ void PostProcessGPU::postProcess_OutlineByStencil(Texture2D<glm::vec3> imageTexture, Texture2D<GBufferData> gBuffer, int stencilId, glm::vec3 outlineColor, int outlineWidth) { int idxX = blockDim.x * blockIdx.x + threadIdx.x; int idxY = blockDim.y * blockIdx.y + threadIdx.y; if (idxX < imageTexture.size.x && idxY < imageTexture.size.y) { GBufferData data = gBuffer.getPixelByHW(idxY, idxX); if (data.stencilId == stencilId) { return; } //printf("%d,%d stencil = %d\n", idxX, idxY, data.stencilId); for (int y = glm::max(0, idxY - outlineWidth); y <= glm::min(imageTexture.size.y - 1, idxY + outlineWidth); ++y) { for (int x = glm::max(0, idxX - outlineWidth); x <= glm::min(imageTexture.size.x - 1, idxX + outlineWidth); ++x) { GBufferData data1 = gBuffer.getPixelByHW(y, x); if (data1.stencilId == stencilId) { imageTexture.setPixelByHW(idxY, idxX, outlineColor); return; } } } } }
eeb4be89b4ceb4b40954bd00ba8e85105b72e129.cu
#include <iostream> #include <cstring> #include <glm/gtc/matrix_inverse.hpp> #include <cuda.h> #include <cmath> #include <thrust/execution_policy.h> #include <thrust/random.h> #include <thrust/remove.h> #include <thrust/sort.h> #include <thrust/device_ptr.h> #include <stb_image.h> #include "thirdparty/tiny_obj_loader.h" #include "scene.h" const dim3 IMAGE_PROCESS_BLOCK_SIZE(16, 16, 1); const size_t Background::BACKGROUND_MATERIAL_INDEX = std::numeric_limits<size_t>::max(); const size_t Background::COLOR_RAMP_MATERIAL_INDEX = std::numeric_limits<size_t>::max() - 1; #define LOAD_UINT8_TEXTURE 1 #if LOAD_UINT8_TEXTURE using STBPixelType = stbi_uc; #define STBI_LOAD stbi_load #else // LOAD_UINT8_TEXTURE using STBPixelType = float; #define STBI_LOAD stbi_loadf #endif // LOAD_UINT8_TEXTURE #define DEBUG_BVH_CONSTRUCTION 0//1 #if BUILD_BVH_FOR_TRIMESH void buildBVH_CPU_NaiveRecursive(BoundingVolumeHierarchy<Triangle>& BVH, Triangle* geoms, int geomStart, int geomEnd, int rootIdx, float expand, int level = 0) { if (geomStart > geomEnd) { // No triangles. return; } else if (geomStart == geomEnd) { // Leaf node. BVH.nodesArray[rootIdx].box = BBox::getLocalBoundingBox(geoms[geomStart], expand); #if SORT_BEFORE_BUILD_BVH geoms[geomStart].triangleid = geomStart; // Because you have already sorted triangles, reassign triangleId. #endif // SORT_BEFORE_BUILD_BVH BVH.nodesArray[rootIdx].geomIdx = geoms[geomStart].triangleid; BVH.nodesArray[rootIdx].leftSubtreeIdx = -1; BVH.nodesArray[rootIdx].rightSubtreeIdx = -1; #if DEBUG_BVH_CONSTRUCTION BBox box = BVH.nodesArray[rootIdx].box; printf("==Leaf: nodes[%d, %d].box={<%f,%f,%f>, <%f,%f,%f>} with geoms leaf[%d]->%d\n", rootIdx, level, box.minP.x, box.minP.y, box.minP.z, box.maxP.x, box.maxP.y, box.maxP.z, geomStart, geoms[geomStart].triangleid); #endif // DEBUG_BVH_CONSTRUCTION return; } static struct GeomSortPredicate { GeomSortPredicate(i32 axis) : axis(axis) {} bool operator()(const Triangle& t1, const Triangle& t2) const { return BBox::getLocalBoundingBox(t1).getCenter()[axis] < BBox::getLocalBoundingBox(t2).getCenter()[axis]; } i32 axis = 0; } const predicates[]{ GeomSortPredicate(0), GeomSortPredicate(1), GeomSortPredicate(2) }; // geoms is unused after that so just sort if necessary. BBox box = BBox::getLocalBoundingBox(geoms[geomStart], expand); for (int i = geomStart + 1; i <= geomEnd; ++i) { #if DEBUG_BVH_CONSTRUCTION > 1 printf("--For i = %d: nodes[%d, %d].box={<%f,%f,%f>, <%f,%f,%f>} with geoms[%d:%d]\n", i - 1, rootIdx, level, box.minP.x, box.minP.y, box.minP.z, box.maxP.x, box.maxP.y, box.maxP.z, geomStart, i - 1); #endif // DEBUG_BVH_CONSTRUCTION box += BBox::getLocalBoundingBox(geoms[i], expand); } #if DEBUG_BVH_CONSTRUCTION printf("nodes[%d, %d].box={<%f,%f,%f>, <%f,%f,%f>} with geoms[%d:%d]\n", rootIdx, level, box.minP.x, box.minP.y, box.minP.z, box.maxP.x, box.maxP.y, box.maxP.z, geomStart, geomEnd); #endif // DEBUG_BVH_CONSTRUCTION i32 axis = box.getMaxDistAxis(); std::sort(geoms + geomStart, geoms + geomEnd + 1, predicates[axis]); int geomMiddle = geomStart + ((geomEnd - geomStart) >> 1); int leftRoot = (rootIdx << 1) + 1; int rightRoot = (rootIdx << 1) + 2; BVH.nodesArray[rootIdx].box = box; BVH.nodesArray[rootIdx].geomIdx = -1; BVH.nodesArray[rootIdx].leftSubtreeIdx = leftRoot; BVH.nodesArray[rootIdx].rightSubtreeIdx = rightRoot; buildBVH_CPU_NaiveRecursive(BVH, geoms, geomStart, geomMiddle, leftRoot, expand, level + 1); buildBVH_CPU_NaiveRecursive(BVH, geoms, geomMiddle + 1, geomEnd, rightRoot, expand, level + 1); } template<> __host__ void BoundingVolumeHierarchy<Triangle>::buildBVH_CPU(Triangle* geoms, int geomNum, float expand) { if (geomNum == 0) { return; } nodeNum = (geomNum << 1) - 1; i32 maxNodeNum = 1; treeHeight = 0; for (i32 i = 2; i - 2 < nodeNum; i <<= 1) { // 0(2)| 1(3), 2(4)| 3(5), 4(6), 5(7), 6(8)| ... ++treeHeight; maxNodeNum <<= 1; } nodeNum = (1 << (treeHeight + 1)) - 1; BoundingVolumeHierarchy<Triangle> BVHCPU(*this); //std::vector<BVHNode> nodesCPU(nodeNum); //BVHCPU.nodesArray = nodesCPU.data(); //BVHCPU.nodesArray = new BVHNode[nodeNum]; cudaMallocHost(&BVHCPU.nodesArray, sizeof(BVHNode) * nodeNum); cudaMalloc(&nodesArray, sizeof(BVHNode) * nodeNum); cudaDeviceSynchronize(); for (size_t i = 0; i < nodeNum; ++i) { BVHCPU.nodesArray[i] = BVHNode(); } buildBVH_CPU_NaiveRecursive(BVHCPU, geoms, 0, geomNum - 1, 0, expand); cudaMemcpy(nodesArray, BVHCPU.nodesArray, sizeof(BVHNode) * nodeNum, cudaMemcpyHostToDevice); cudaFreeHost(BVHCPU.nodesArray); cudaDeviceSynchronize(); //delete[] BVHCPU.nodesArray; printf("Initialize BVH with %d nodes, %d leaves, with height %d.\n", nodeNum, geomNum, treeHeight); checkCUDAError("buildBVH"); } #endif // BUILD_BVH_FOR_TRIMESH __global__ void kernInvGammaCorrection(glm::vec3* dst, STBPixelType* src, int x, int y, int channel) { int idxX = blockIdx.x * blockDim.x + threadIdx.x; int idxY = blockIdx.y * blockDim.y + threadIdx.y; if (idxX < x && idxY < y) { int index = Texture2D<glm::vec3>::index2Dto1D(glm::ivec2(x, y), idxY, idxX); glm::vec3 color; #pragma unroll for (int c = 0; c < channel && c < 3; ++c) { #if LOAD_UINT8_TEXTURE float comp = glm::clamp(src[index * channel + c] / 255.f, 0.f, 1.f); #else // LOAD_UINT8_TEXTURE float comp = glm::clamp(src[index * channel + c], 0.f, 1.f); #endif // LOAD_UINT8_TEXTURE comp = powf(comp, 2.2); color[c] = glm::clamp(comp, 0.f, 1.f); } dst[index] = color; } } __global__ void kernGammaCorrection(Texture2D<glm::vec3> image) { int idxX = blockIdx.x * blockDim.x + threadIdx.x; int idxY = blockIdx.y * blockDim.y + threadIdx.y; if (idxX < image.size.x && idxY < image.size.y) { int index = Texture2D<glm::vec3>::index2Dto1D(glm::ivec2(image.size.x, image.size.y), idxY, idxX); glm::vec3 srcColor = image.buffer[index]; glm::vec3 dstColor; dstColor.r = powf(glm::clamp(srcColor.r, 0.f, 1.f), 1.f / 2.2f); dstColor.g = powf(glm::clamp(srcColor.g, 0.f, 1.f), 1.f / 2.2f); dstColor.b = powf(glm::clamp(srcColor.b, 0.f, 1.f), 1.f / 2.2f); image.buffer[index] = dstColor; } } void Scene::addTextureToLoad(size_t id, i64 varOffset, const std::string& filename) { auto& pkg = texturePackage; auto it = pkg.dstToAddrMap.find(id); std::unordered_map<i64, std::string>* map_ptr = nullptr; if (it != pkg.dstToAddrMap.end()) { map_ptr = &it->second; } else { pkg.dstToAddrMap[id] = std::unordered_map<i64, std::string>(); map_ptr = &pkg.dstToAddrMap[id]; } (*map_ptr)[varOffset] = filename; } Texture2D<glm::vec3> Scene::loadTexture(const std::string& filename) { std::cout << "Loading Texture from " << filename << "..." << std::endl; auto& pkg = texturePackage; auto it = pkg.idMap.find(filename); if (it != pkg.idMap.end()) { std::cout << "Texture " << filename << " is already exist.\n" << std::endl; return pkg.resources[it->second]; } pkg.idMap[filename] = pkg.resources.size(); pkg.resources.emplace_back(); auto& res = pkg.resources.back(); //Texture2D<glm::vec3>& tex = textureBuffers[i]; int x, y, channel; stbi_set_flip_vertically_on_load(1); std::string extension = utilityCore::getFileExtension(filename); if (stricmp(extension.c_str(), "hdr") == 0) { float* imageCPU = stbi_loadf(filename.c_str(), &x, &y, &channel, 0); if (!imageCPU) { std::cout << "Texture " << filename << ": failed to load.\n" << std::endl; } cudaMalloc(&res.buffer, sizeof(glm::vec3) * x * y); cudaMemcpy(res.buffer, imageCPU, sizeof(float) * x * y * channel, cudaMemcpyHostToDevice); cudaDeviceSynchronize(); stbi_image_free(imageCPU); } else { STBPixelType* imageCPU = STBI_LOAD(filename.c_str(), &x, &y, &channel, 0); if (!imageCPU) { std::cout << "Texture " << filename << ": failed to load.\n" << std::endl; } cudaMalloc(&res.buffer, sizeof(glm::vec3) * x * y); STBPixelType* imageGPU; cudaMalloc(&imageGPU, sizeof(STBPixelType) * x * y * channel); cudaMemcpy(imageGPU, imageCPU, sizeof(STBPixelType) * x * y * channel, cudaMemcpyHostToDevice); dim3 blockCount((x + IMAGE_PROCESS_BLOCK_SIZE.x - 1) / IMAGE_PROCESS_BLOCK_SIZE.x, (y + IMAGE_PROCESS_BLOCK_SIZE.y - 1) / IMAGE_PROCESS_BLOCK_SIZE.y, 1); kernInvGammaCorrection<<<blockCount, IMAGE_PROCESS_BLOCK_SIZE>>>(res.buffer, imageGPU, x, y, channel); checkCUDAError("kernInvGammaCorrection"); cudaFree(imageGPU); cudaDeviceSynchronize(); stbi_image_free(imageCPU); } res.size.x = x; res.size.y = y; checkCUDAError("cudaFree imageGPU"); std::cout << "Texture " << filename << '<' << x << ',' << y << ',' << channel << "> created.\n" << std::endl; return res; } void Scene::initTextures() { for (auto& materialToTexturePair : texturePackage.dstToAddrMap) { size_t materialId = materialToTexturePair.first; switch (materialId) { case Background::BACKGROUND_MATERIAL_INDEX: { for (auto& textureFilePair : materialToTexturePair.second) { const std::string& filename = textureFilePair.second; Texture2D<glm::vec3>* texture_ptr = utilityCore::getPtrInStruct<Texture2D<glm::vec3>>(&background, textureFilePair.first); *texture_ptr = loadTexture(filename); } } break; case Background::COLOR_RAMP_MATERIAL_INDEX: { for (auto& textureFilePair : materialToTexturePair.second) { const std::string& filename = textureFilePair.second; Texture2D<glm::vec3>* texture_ptr = utilityCore::getPtrInStruct<Texture2D<glm::vec3>>(&rampMap, textureFilePair.first); *texture_ptr = loadTexture(filename); } } break; default: { Material& material = materials[materialId]; for (auto& textureFilePair : materialToTexturePair.second) { const std::string& filename = textureFilePair.second; Texture2D<glm::vec3>* texture_ptr = utilityCore::getPtrInStruct<Texture2D<glm::vec3>>(&material, textureFilePair.first); *texture_ptr = loadTexture(filename); } } break; } } } void Scene::freeTextures() { for (auto& tex : texturePackage.resources) { cudaFree(tex.buffer); } cudaDeviceSynchronize(); checkCUDAError("cudaFree textures"); } void Scene::addModelToLoad(size_t id, i64 varOffset, const std::string& filename) { auto& pkg = modelPackage; auto it = pkg.dstToAddrMap.find(id); std::unordered_map<i64, std::string>* map_ptr = nullptr; if (it != pkg.dstToAddrMap.end()) { map_ptr = &it->second; } else { pkg.dstToAddrMap[id] = std::unordered_map<i64, std::string>(); map_ptr = &pkg.dstToAddrMap[id]; } (*map_ptr)[varOffset] = filename; } TriMesh Scene::loadModelObj(const std::string& filename) { std::cout << "Loading Model from " << filename << "..." << std::endl; auto& pkg = modelPackage; auto it = pkg.idMap.find(filename); if (it != pkg.idMap.end()) { std::cout << "Model " << filename << " is already exist.\n" << std::endl; return pkg.resources[it->second]; } pkg.idMap[filename] = pkg.resources.size(); pkg.resources.emplace_back(); auto& res = pkg.resources.back(); std::vector<tinyobj::shape_t> shapes; std::vector<tinyobj::material_t> materials; tinyobj::LoadObj(shapes, materials, filename.c_str()); res.triangleNum = 0; for (tinyobj::shape_t& shape : shapes) { res.triangleNum += shape.mesh.indices.size() / 3; } std::vector<Triangle> tris(res.triangleNum); size_t triIdx = 0; for (tinyobj::shape_t& shape : shapes) { for (int i = 0; i < shape.mesh.indices.size(); i += 3) { auto& tri = tris[triIdx]; tri.triangleid = triIdx; tri.twoSided = true; //memcpy(tri.position, &shape.mesh.positions[i0 * 3], sizeof(tri.position)); //memcpy(tri.normal, &shape.mesh.normals[i0 * 3], sizeof(tri.normal)); //memcpy(tri.uv0, &shape.mesh.texcoords[i0 * 2], sizeof(tri.uv0)); size_t i0 = shape.mesh.indices[i]; size_t i1 = shape.mesh.indices[i + 1]; size_t i2 = shape.mesh.indices[i + 2]; memcpy(&tri.pos0, &shape.mesh.positions[i0 * 3], sizeof(tri.pos0)); memcpy(&tri.pos1, &shape.mesh.positions[i1 * 3], sizeof(tri.pos1)); memcpy(&tri.pos2, &shape.mesh.positions[i2 * 3], sizeof(tri.pos2)); if (i0 * 3 < shape.mesh.normals.size()) { memcpy(&tri.nrm0, &shape.mesh.normals[i0 * 3], sizeof(tri.nrm0)); } if (i1 * 3 < shape.mesh.normals.size()) { memcpy(&tri.nrm1, &shape.mesh.normals[i1 * 3], sizeof(tri.nrm1)); } if (i2 * 3 < shape.mesh.normals.size()) { memcpy(&tri.nrm2, &shape.mesh.normals[i2 * 3], sizeof(tri.nrm2)); } if (i0 * 2 < shape.mesh.texcoords.size()) { memcpy(&tri.uv00, &shape.mesh.texcoords[i0 * 2], sizeof(tri.uv00)); } if (i1 * 2 < shape.mesh.texcoords.size()) { memcpy(&tri.uv01, &shape.mesh.texcoords[i1 * 2], sizeof(tri.uv01)); } if (i2 * 2 < shape.mesh.texcoords.size()) { memcpy(&tri.uv02, &shape.mesh.texcoords[i2 * 2], sizeof(tri.uv02)); } ++triIdx; } } #if BUILD_BVH_FOR_TRIMESH #if SORT_BEFORE_BUILD_BVH res.localBVH.buildBVH_CPU(tris.data(), tris.size(), FLT_EPSILON); #endif // SORT_BEFORE_BUILD_BVH #endif // BUILD_BVH_FOR_TRIMESH cudaMalloc(&res.triangles, sizeof(Triangle) * res.triangleNum); cudaMemcpy(res.triangles, tris.data(), sizeof(Triangle) * res.triangleNum, cudaMemcpyHostToDevice); cudaDeviceSynchronize(); checkCUDAError("loadModelObj"); #if BUILD_BVH_FOR_TRIMESH #if !SORT_BEFORE_BUILD_BVH res.localBVH.buildBVH_CPU(tris.data(), tris.size(), FLT_EPSILON); #endif // SORT_BEFORE_BUILD_BVH #endif // BUILD_BVH_FOR_TRIMESH std::cout << "Model " << filename << '<' << res.triangleNum << "> created.\n" << std::endl; return res; } void Scene::initModels() { for(auto& geomToModelPair : modelPackage.dstToAddrMap) { size_t geomId = geomToModelPair.first; Geom& geom = geoms[geomId]; for (auto& modelFilePair : geomToModelPair.second) { const std::string& filename = modelFilePair.second; TriMesh* model_ptr = utilityCore::getPtrInStruct<TriMesh>(&geom, modelFilePair.first); std::string extension = utilityCore::getFileExtension(filename); if (stricmp("obj", extension.c_str()) == 0) { *model_ptr = loadModelObj(filename); } //TODO: Other model format. } //TODO: Build BVH if necessary. } } void Scene::freeModels() { for (auto& mdl : modelPackage.resources) { cudaFree(mdl.triangles); } cudaDeviceSynchronize(); checkCUDAError("cudaFree models"); } void Scene::initGBuffer() { dev_frameBuffer.size = state.camera.resolution; cudaMalloc(&dev_frameBuffer.buffer, sizeof(glm::vec3) * dev_frameBuffer.size.x * dev_frameBuffer.size.y); dev_GBuffer.size = state.camera.resolution; cudaMalloc(&dev_GBuffer.buffer, sizeof(GBufferData) * dev_GBuffer.size.x * dev_GBuffer.size.y); cudaDeviceSynchronize(); std::cout << "Initialized frame buffer " << dev_frameBuffer.size.x << " x " << dev_frameBuffer.size.y << std::endl; std::cout << "Initialized G-buffer " << dev_GBuffer.size.x << " x " << dev_GBuffer.size.y << std::endl; checkCUDAError("cudaMalloc GBuffer"); } void Scene::freeGBuffer() { cudaFree(dev_GBuffer.buffer); cudaFree(dev_frameBuffer.buffer); cudaDeviceSynchronize(); checkCUDAError("cudaFree GBuffer"); } namespace PostProcessGPU { #if !PREGATHER_FINAL_IMAGE __global__ void dividedByIter(Texture2D<glm::vec3> dst, Texture2D<glm::vec3> src, float iter) { int idxX = blockDim.x * blockIdx.x + threadIdx.x; int idxY = blockDim.y * blockIdx.y + threadIdx.y; if (idxX < dst.size.x && idxY < dst.size.y) { dst.setPixelByHW(idxY, idxX, src.getPixelByHW(idxY, idxX) / iter); } } #endif // PREGATHER_FINAL_IMAGE extern __global__ void postProcess_ColorRamp( Texture2D<glm::vec3> imageTexture, Texture2D<GBufferData> gBuffer, Texture2D<glm::vec3> rampTexture); extern __global__ void postProcess_OutlineByStencil( Texture2D<glm::vec3> imageTexture, Texture2D<GBufferData> gBuffer, int stencilId, glm::vec3 outlineColor, int outlineWidth); } glm::vec3* Scene::postProcessGPU(glm::vec3* dev_image, PathSegment* dev_paths, const dim3 blocksPerGrid2d, const dim3 blockSize2d, int iter) const { Texture2D<glm::vec3> imageTexture; imageTexture.buffer = dev_image; imageTexture.size = dev_GBuffer.size; #if !PREGATHER_FINAL_IMAGE PostProcessGPU::dividedByIter<<<blocksPerGrid2d, blockSize2d>>>(dev_frameBuffer, imageTexture, iter); #else // PREGATHER_FINAL_IMAGE cudaMemcpy(dev_frameBuffer.buffer, dev_image, sizeof(glm::vec3) * dev_GBuffer.size.x * dev_GBuffer.size.y, cudaMemcpyDeviceToDevice); #endif // PREGATHER_FINAL_IMAGE for (size_t i = 0; i < postprocesses.size(); ++i) { auto& pppair = postprocesses[i]; if (!pppair.second) { continue; } PostProcessType pptype = pppair.first; switch (pptype) { case PostProcessType::COLOR_RAMP: if (rampMap.isReadable()) { PostProcessGPU::postProcess_ColorRamp<<<blocksPerGrid2d, blockSize2d>>>(dev_frameBuffer, dev_GBuffer, rampMap); } break; case PostProcessType::OUTLINE_BY_STENCIL: { int stencilId = ppToStencilMap.at(i); auto colorWidthPair = stencilOutlineColorWidths.at(stencilId); //printf("%d<%f,%f,%f>%d\n", stencilId, colorWidthPair.first.r, colorWidthPair.first.g, colorWidthPair.first.b, colorWidthPair.second); PostProcessGPU::postProcess_OutlineByStencil<<<blocksPerGrid2d, blockSize2d>>>(dev_frameBuffer, dev_GBuffer, stencilId, colorWidthPair.first, colorWidthPair.second); } break; } checkCUDAError(("postprocess" + std::to_string(i)).c_str()); } kernGammaCorrection<<<blocksPerGrid2d, blockSize2d>>>(dev_frameBuffer); checkCUDAError("gamma correction"); return dev_frameBuffer.buffer; } __global__ void PostProcessGPU::postProcess_ColorRamp(Texture2D<glm::vec3> imageTexture, Texture2D<GBufferData> gBuffer, Texture2D<glm::vec3> rampTexture) { int idxX = blockDim.x * blockIdx.x + threadIdx.x; int idxY = blockDim.y * blockIdx.y + threadIdx.y; if (idxX < imageTexture.size.x && idxY < imageTexture.size.y) { //GBufferData gBufferData = gBuffer.getPixelByHW(idxY, idxX); glm::vec3 color = imageTexture.getPixelByHW(idxY, idxX); color = glm::clamp(color, 0.f, 1.f); glm::vec3 ramp; ramp.r = rampTexture.getPixelByUV(color.r, 0.5f).r; ramp.g = rampTexture.getPixelByUV(color.g, 0.5f).g; ramp.b = rampTexture.getPixelByUV(color.b, 0.5f).b; //printf("ramp of <%f,%f,%f> is <%f,%f,%f>\n", color.r, color.g, color.b, ramp.r, ramp.g, ramp.b); int index = Texture2D<glm::vec3>::index2Dto1D(imageTexture.size, idxY, idxX); imageTexture.buffer[index] = ramp;//glm::clamp(ramp, 0.f, 1.f); } } __global__ void PostProcessGPU::postProcess_OutlineByStencil(Texture2D<glm::vec3> imageTexture, Texture2D<GBufferData> gBuffer, int stencilId, glm::vec3 outlineColor, int outlineWidth) { int idxX = blockDim.x * blockIdx.x + threadIdx.x; int idxY = blockDim.y * blockIdx.y + threadIdx.y; if (idxX < imageTexture.size.x && idxY < imageTexture.size.y) { GBufferData data = gBuffer.getPixelByHW(idxY, idxX); if (data.stencilId == stencilId) { return; } //printf("%d,%d stencil = %d\n", idxX, idxY, data.stencilId); for (int y = glm::max(0, idxY - outlineWidth); y <= glm::min(imageTexture.size.y - 1, idxY + outlineWidth); ++y) { for (int x = glm::max(0, idxX - outlineWidth); x <= glm::min(imageTexture.size.x - 1, idxX + outlineWidth); ++x) { GBufferData data1 = gBuffer.getPixelByHW(y, x); if (data1.stencilId == stencilId) { imageTexture.setPixelByHW(idxY, idxX, outlineColor); return; } } } } }
7783ddf89a4cef0b060a805f433bfa32f4b77c9b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #include <cstdio> #include <cstdlib> #include <cstring> #include <ctime> /** * This macro checks return value of the CUDA runtime call and exits * the application if the call failed. */ #define CUDA_CHECK_RETURN(value) { \ hipError_t _m_cudaStat = value; \ if (_m_cudaStat != hipSuccess) { \ fprintf(stderr, "Error %s at line %d in file %s\n", \ hipGetErrorString(_m_cudaStat), __LINE__, __FILE__); \ exit(1); \ } } void alloc_on_device(float *, float*, void **, void **, int); void free_on_device(void *, void*); void copy_from_device(void *, float*, int); void fprint_mat(FILE *, float *, int); void a_x_mais_y_host(int, float *, float*, int); float* init(int); __global__ void a_x_mais_y_device(int, float* , float*, int); __global__ void a_x_mais_y_device_coalesce(int, float*, float*, int); const int SEQUENTIAL = 1; const int CUDA = 2; const int CUDA_COALESCE = 4; /** * Host function that prepares data array and passes it to the CUDA kernel. */ int main(int argc, char ** argv) { if(argc < 4) { printf("Uso: %s <algoritmo> <tamanho_vetor> <tamanho_bloco> <a opcional> <debug opcional>\n\ algoritmo:\n\ts (Sequencial)\n\tc (Cuda nao coalescente)\n\tcc (Cuda coalescente)\n", argv[0]); return -1; } const char * mode = (argc > 1 ? argv[1] : ""); const int vector_size = (argc > 2 ? atoi(argv[2]) : 0); const int block_size = (argc > 3 ? atoi(argv[3]) : 0); const int debug = argc > 4 ? strcmp(argv[4], "debug") == 0 : 0; srand(time(NULL)); int state = -1; float *x = init(vector_size); float *y = init(vector_size); float *x_device, *y_device; int n_blocks = (vector_size + (block_size - 1)) / block_size; int shared_memory = block_size * 2 * sizeof(float); float a = 0.0f; FILE *f_a; FILE *f_x; FILE *f_y; FILE *f_axy; if(argc > 4) { a = atof(argv[4]); } else { a = (float) (rand() % vector_size); } if(strncmp(mode, "s", 1) == 0) { state = SEQUENTIAL; if(debug) { f_a = fopen("y_s.txt", "w"); f_x = fopen("x_s.txt", "w"); f_y = fopen("y_s.txt", "w"); f_axy = fopen("axy_s.txt", "w"); } } else if(strlen(mode) == 1 && strncmp(mode, "c", 1) == 0) { state = CUDA; if(debug) { f_a = fopen("y_c.txt", "w"); f_x = fopen("x_c.txt", "w"); f_y = fopen("y_c.txt", "w"); f_axy = fopen("axy_c.txt", "w"); } } else if(strncmp(mode, "cc", 2) == 0) { state = CUDA_COALESCE; if(debug) { f_a = fopen("y_c.txt", "w"); f_x = fopen("x_cc.txt", "w"); f_y = fopen("y_cc.txt", "w"); f_axy = fopen("axy_cc.txt", "w"); } } if(debug) { fprintf(f_a, "%f", a); fprint_mat(f_x, x, vector_size); fprint_mat(f_y, y, vector_size); } if(state == SEQUENTIAL) { a_x_mais_y_host(a, x, y, vector_size); } else { alloc_on_device(x, y, (void**)&x_device, (void**) &y_device, vector_size); if(state == CUDA) { hipLaunchKernelGGL(( a_x_mais_y_device), dim3(n_blocks), dim3(block_size), 0, 0, a, x_device, y_device, vector_size); } else if(state == CUDA_COALESCE) { hipLaunchKernelGGL(( a_x_mais_y_device_coalesce), dim3(n_blocks), dim3(block_size), shared_memory, 0, a, x_device, y_device, vector_size); } copy_from_device(y_device, y, vector_size); } if(debug) { fprint_mat(f_axy, y, vector_size); fclose(f_x); fclose(f_y); fclose(f_axy); fclose(f_a); } free(x); free(y); return 0; } void fprint_mat(FILE *f, float *v, int n) { for(int i = 0; i < n-1; i++) { fprintf(f, "%f ", v[i]); } fprintf(f, "%f", v[n-1]); } void alloc_on_device(float *x, float *y, void **x_ptr, void **y_ptr, int n) { size_t size = sizeof(float) * n; CUDA_CHECK_RETURN(hipMalloc(x_ptr, size)); CUDA_CHECK_RETURN(hipMalloc(y_ptr, size)); CUDA_CHECK_RETURN(hipMemcpy(*x_ptr, x, size, hipMemcpyHostToDevice)); CUDA_CHECK_RETURN(hipMemcpy(*y_ptr, y, size, hipMemcpyHostToDevice)); } void copy_from_device(void *v_ptr, float *v, int n) { hipMemcpy(v, v_ptr, sizeof(float) * n, hipMemcpyDeviceToHost); } void free_on_device(void *x_ptr, void *y_ptr) { hipFree(x_ptr); hipFree(y_ptr); } void a_x_mais_y_host(int a, float *x, float* y, int n) { for(int i = 0; i < n; ++i) { y[i] = (a * x[i]) + y[i]; } } float* init(int n) { float *v = (float*) malloc(sizeof(float) * n); for(int i = 0; i < n; ++i) { v[i] = (float) i; } return v; } //////////////////////////////////////////////////////////////////////////////// __global__ void a_x_mais_y_device(int a, float* x, float* y, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if(i < n) { y[i] = a * x[i] + y[i]; } } __global__ void a_x_mais_y_device_coalesce(int a, float* x, float* y, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; int ti = threadIdx.x; extern __shared__ float sharedX[]; float *sharedY = &sharedX[0] + blockDim.x; if(i < n) { sharedX[ti] = x[i]; sharedY[ti] = y[i]; y[i] = a * sharedX[ti] + sharedY[ti]; } }
7783ddf89a4cef0b060a805f433bfa32f4b77c9b.cu
/** * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #include <cstdio> #include <cstdlib> #include <cstring> #include <ctime> /** * This macro checks return value of the CUDA runtime call and exits * the application if the call failed. */ #define CUDA_CHECK_RETURN(value) { \ cudaError_t _m_cudaStat = value; \ if (_m_cudaStat != cudaSuccess) { \ fprintf(stderr, "Error %s at line %d in file %s\n", \ cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \ exit(1); \ } } void alloc_on_device(float *, float*, void **, void **, int); void free_on_device(void *, void*); void copy_from_device(void *, float*, int); void fprint_mat(FILE *, float *, int); void a_x_mais_y_host(int, float *, float*, int); float* init(int); __global__ void a_x_mais_y_device(int, float* , float*, int); __global__ void a_x_mais_y_device_coalesce(int, float*, float*, int); const int SEQUENTIAL = 1; const int CUDA = 2; const int CUDA_COALESCE = 4; /** * Host function that prepares data array and passes it to the CUDA kernel. */ int main(int argc, char ** argv) { if(argc < 4) { printf("Uso: %s <algoritmo> <tamanho_vetor> <tamanho_bloco> <a opcional> <debug opcional>\n\ algoritmo:\n\ts (Sequencial)\n\tc (Cuda nao coalescente)\n\tcc (Cuda coalescente)\n", argv[0]); return -1; } const char * mode = (argc > 1 ? argv[1] : ""); const int vector_size = (argc > 2 ? atoi(argv[2]) : 0); const int block_size = (argc > 3 ? atoi(argv[3]) : 0); const int debug = argc > 4 ? strcmp(argv[4], "debug") == 0 : 0; srand(time(NULL)); int state = -1; float *x = init(vector_size); float *y = init(vector_size); float *x_device, *y_device; int n_blocks = (vector_size + (block_size - 1)) / block_size; int shared_memory = block_size * 2 * sizeof(float); float a = 0.0f; FILE *f_a; FILE *f_x; FILE *f_y; FILE *f_axy; if(argc > 4) { a = atof(argv[4]); } else { a = (float) (rand() % vector_size); } if(strncmp(mode, "s", 1) == 0) { state = SEQUENTIAL; if(debug) { f_a = fopen("y_s.txt", "w"); f_x = fopen("x_s.txt", "w"); f_y = fopen("y_s.txt", "w"); f_axy = fopen("axy_s.txt", "w"); } } else if(strlen(mode) == 1 && strncmp(mode, "c", 1) == 0) { state = CUDA; if(debug) { f_a = fopen("y_c.txt", "w"); f_x = fopen("x_c.txt", "w"); f_y = fopen("y_c.txt", "w"); f_axy = fopen("axy_c.txt", "w"); } } else if(strncmp(mode, "cc", 2) == 0) { state = CUDA_COALESCE; if(debug) { f_a = fopen("y_c.txt", "w"); f_x = fopen("x_cc.txt", "w"); f_y = fopen("y_cc.txt", "w"); f_axy = fopen("axy_cc.txt", "w"); } } if(debug) { fprintf(f_a, "%f", a); fprint_mat(f_x, x, vector_size); fprint_mat(f_y, y, vector_size); } if(state == SEQUENTIAL) { a_x_mais_y_host(a, x, y, vector_size); } else { alloc_on_device(x, y, (void**)&x_device, (void**) &y_device, vector_size); if(state == CUDA) { a_x_mais_y_device<<<n_blocks, block_size>>>(a, x_device, y_device, vector_size); } else if(state == CUDA_COALESCE) { a_x_mais_y_device_coalesce<<<n_blocks, block_size, shared_memory>>>(a, x_device, y_device, vector_size); } copy_from_device(y_device, y, vector_size); } if(debug) { fprint_mat(f_axy, y, vector_size); fclose(f_x); fclose(f_y); fclose(f_axy); fclose(f_a); } free(x); free(y); return 0; } void fprint_mat(FILE *f, float *v, int n) { for(int i = 0; i < n-1; i++) { fprintf(f, "%f ", v[i]); } fprintf(f, "%f", v[n-1]); } void alloc_on_device(float *x, float *y, void **x_ptr, void **y_ptr, int n) { size_t size = sizeof(float) * n; CUDA_CHECK_RETURN(cudaMalloc(x_ptr, size)); CUDA_CHECK_RETURN(cudaMalloc(y_ptr, size)); CUDA_CHECK_RETURN(cudaMemcpy(*x_ptr, x, size, cudaMemcpyHostToDevice)); CUDA_CHECK_RETURN(cudaMemcpy(*y_ptr, y, size, cudaMemcpyHostToDevice)); } void copy_from_device(void *v_ptr, float *v, int n) { cudaMemcpy(v, v_ptr, sizeof(float) * n, cudaMemcpyDeviceToHost); } void free_on_device(void *x_ptr, void *y_ptr) { cudaFree(x_ptr); cudaFree(y_ptr); } void a_x_mais_y_host(int a, float *x, float* y, int n) { for(int i = 0; i < n; ++i) { y[i] = (a * x[i]) + y[i]; } } float* init(int n) { float *v = (float*) malloc(sizeof(float) * n); for(int i = 0; i < n; ++i) { v[i] = (float) i; } return v; } //////////////////////////////////////////////////////////////////////////////// __global__ void a_x_mais_y_device(int a, float* x, float* y, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if(i < n) { y[i] = a * x[i] + y[i]; } } __global__ void a_x_mais_y_device_coalesce(int a, float* x, float* y, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; int ti = threadIdx.x; extern __shared__ float sharedX[]; float *sharedY = &sharedX[0] + blockDim.x; if(i < n) { sharedX[ti] = x[i]; sharedY[ti] = y[i]; y[i] = a * sharedX[ti] + sharedY[ti]; } }
600735f432bb984e5d4b8837545d15a48312c43a.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #define N 10000000 #define MAX_ERR 1e-6 __global__ void vector_add(float *out, float *a, float *b, int n) { for(int i = 0; i < n; i ++){ out[i] = a[i] + b[i]; } } int main(){ float *a, *b, *out; float *d_a, *d_b, *d_out; // Allocate host memory a = (float*)malloc(sizeof(float) * N); b = (float*)malloc(sizeof(float) * N); out = (float*)malloc(sizeof(float) * N); // Initialize host arrays for(int i = 0; i < N; i++){ a[i] = 1.0f; b[i] = 2.0f; } // Allocate device memory hipMalloc((void**)&d_a, sizeof(float) * N); hipMalloc((void**)&d_b, sizeof(float) * N); hipMalloc((void**)&d_out, sizeof(float) * N); // Transfer data from host to device memory hipMemcpy(d_a, a, sizeof(float) * N, hipMemcpyHostToDevice); hipMemcpy(d_b, b, sizeof(float) * N, hipMemcpyHostToDevice); // Executing kernel hipLaunchKernelGGL(( vector_add), dim3(1),dim3(1), 0, 0, d_out, d_a, d_b, N); // Transfer data back to host memory hipMemcpy(out, d_out, sizeof(float) * N, hipMemcpyDeviceToHost); // Verification for(int i = 0; i < N; i++){ assert(fabs(out[i] - a[i] - b[i]) < MAX_ERR); } printf("out[0] = %f\n", out[0]); printf("PASSED\n"); // Deallocate device memory hipFree(d_a); hipFree(d_b); hipFree(d_out); // Deallocate host memory free(a); free(b); free(out); }
600735f432bb984e5d4b8837545d15a48312c43a.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <cuda.h> #include <cuda_runtime.h> #define N 10000000 #define MAX_ERR 1e-6 __global__ void vector_add(float *out, float *a, float *b, int n) { for(int i = 0; i < n; i ++){ out[i] = a[i] + b[i]; } } int main(){ float *a, *b, *out; float *d_a, *d_b, *d_out; // Allocate host memory a = (float*)malloc(sizeof(float) * N); b = (float*)malloc(sizeof(float) * N); out = (float*)malloc(sizeof(float) * N); // Initialize host arrays for(int i = 0; i < N; i++){ a[i] = 1.0f; b[i] = 2.0f; } // Allocate device memory cudaMalloc((void**)&d_a, sizeof(float) * N); cudaMalloc((void**)&d_b, sizeof(float) * N); cudaMalloc((void**)&d_out, sizeof(float) * N); // Transfer data from host to device memory cudaMemcpy(d_a, a, sizeof(float) * N, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, sizeof(float) * N, cudaMemcpyHostToDevice); // Executing kernel vector_add<<<1,1>>>(d_out, d_a, d_b, N); // Transfer data back to host memory cudaMemcpy(out, d_out, sizeof(float) * N, cudaMemcpyDeviceToHost); // Verification for(int i = 0; i < N; i++){ assert(fabs(out[i] - a[i] - b[i]) < MAX_ERR); } printf("out[0] = %f\n", out[0]); printf("PASSED\n"); // Deallocate device memory cudaFree(d_a); cudaFree(d_b); cudaFree(d_out); // Deallocate host memory free(a); free(b); free(out); }
PSROIPooling.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "THH.h" #include <algorithm> #include <cfloat> #include "common.h" #include "gpu_util_hip.cuh" using std::max; using std::min; template <typename Dtype> __global__ void PSROIPoolingForward( const int nthreads, const Dtype* bottom_data, const Dtype spatial_scale, const int channels, // total number of channels for one image, e.g. C*N*N const int height, const int width, const int pooled_height, const int pooled_width, const Dtype* bottom_rois, const int output_dim, // the number of class, e.g. C Dtype* top_data, int* mapping_channel) { // DEBUG //printf("[INIT c1=%.2f,c2=%.2f,c3=%.2f,c4=%.2f,c5=%.2f]\n", bottom_rois[0], bottom_rois[1], bottom_rois[2], bottom_rois[3], bottom_rois[4]); // DEBUG //printf("[INIT-DATA c1=%.2f,c2=%.2f,c3=%.2f,c4=%.2f,c5=%.2f]\n", bottom_data[0], bottom_data[1], bottom_data[2], bottom_data[3], bottom_data[4]); CUDA_KERNEL_LOOP(index, nthreads){ // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int ctop = (index / pooled_width / pooled_height) % output_dim; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling bottom_rois += n * 5; // DEBUG //printf("[c1=%.2f,c2=%.2f,c3=%.2f,c4=%.2f,c5=%.2f]\n", bottom_rois[0], bottom_rois[1], bottom_rois[2], bottom_rois[3], bottom_rois[4]); // DEBUG //printf("spatial_scale=%.3f\n", spatial_scale); int roi_batch_ind = bottom_rois[0] - 1; // -1 is due to the Lua/C conversion //Dtype roi_start_w = static_cast<Dtype>(round(bottom_rois[1])) * spatial_scale; //Dtype roi_start_h = static_cast<Dtype>(round(bottom_rois[2])) * spatial_scale; //Dtype roi_end_w = static_cast<Dtype>(round(bottom_rois[3]) + 1.) * spatial_scale; //Dtype roi_end_h = static_cast<Dtype>(round(bottom_rois[4]) + 1.) * spatial_scale; Dtype roi_start_w = bottom_rois[1] * spatial_scale; Dtype roi_start_h = bottom_rois[2] * spatial_scale; Dtype roi_end_w = static_cast<Dtype>(bottom_rois[3] + 1.) * spatial_scale; Dtype roi_end_h = static_cast<Dtype>(bottom_rois[4] + 1.) * spatial_scale; bool roi_is_empty = (roi_end_h <= roi_start_h) || (roi_end_w <= roi_start_w); // DEBUG //printf("[hs=%.2f,ws=%.2f,he=%.2f,we=%.2f]\n", roi_start_h, roi_start_w, roi_end_h, roi_end_w); // Force too small ROIs to be 1x1 Dtype roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0 Dtype roi_height = max(roi_end_h - roi_start_h, 0.1); // Compute w and h at bottom Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height); Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width); int hstart = floor(static_cast<Dtype>(ph) * bin_size_h + roi_start_h); int wstart = floor(static_cast<Dtype>(pw)* bin_size_w + roi_start_w); int hend = ceil(static_cast<Dtype>(ph + 1) * bin_size_h + roi_start_h); int wend = ceil(static_cast<Dtype>(pw + 1) * bin_size_w + roi_start_w); // Add roi offsets and clip to input boundaries hstart = min(max(hstart, 0), height); hend = min(max(hend, 0), height); wstart = min(max(wstart, 0),width); wend = min(max(wend, 0), width); bool is_empty = roi_is_empty || (hend <= hstart) || (wend <= wstart); // DEBUG //printf("[%d,%d,%d,%d]\n", wstart+1, hstart+1, wend, hend); int gw = pw; int gh = ph; int c = ctop * pooled_width * pooled_height + gh * pooled_width + gw; bottom_data += (roi_batch_ind * channels + c) * height * width; Dtype out_sum = 0; for (int h = hstart; h < hend; ++h){ for (int w = wstart; w < wend; ++w){ int bottom_index = h*width + w; out_sum += bottom_data[bottom_index]; } } // DEBUG //if (is_empty) { // printf("empty\n"); //} else { // printf("non-empty\n"); //} Dtype bin_area = (hend - hstart)*(wend - wstart); top_data[index] = is_empty? 0. : out_sum/bin_area; mapping_channel[index] = c; } } extern "C" void PSROIPooling_updateOutput(THCState *state, THCudaTensor *output, THCudaTensor *indices, THCudaTensor *data, THCudaTensor* rois, int height, int width, int pooled_height, int pooled_width, int output_dim, double spatial_scale) { // DEBUG //printf("PSROIPooling_updateOutput, spatial_scale=%.3f\n", spatial_scale); //printf("PSROIPooling_updateOutput, height=%d\n", height); //printf("PSROIPooling_updateOutput, width=%d\n", width); //printf("PSROIPooling_updateOutput, pooled_height=%d\n", pooled_height); //printf("PSROIPooling_updateOutput, pooled_width=%d\n", pooled_width); //printf("PSROIPooling_updateOutput, output_dim=%d\n", output_dim); THAssert(THCudaTensor_nDimension(state, data) == 4); THAssert(THCudaTensor_nDimension(state, rois) == 2 && rois->size[1] == 5); THAssert(THCudaTensor_isContiguous(state, data)); THAssert(THCudaTensor_isContiguous(state, rois)); long num_rois = rois->size[0]; int channels = pooled_height * pooled_width * output_dim; THCudaTensor_zero(state, output); THCudaTensor_zero(state, indices); THCudaTensor_resize4d(state, output, num_rois, output_dim, pooled_height, pooled_width); THCudaTensor_resize4d(state, indices, num_rois, output_dim, pooled_height, pooled_width); long count = THCudaTensor_nElement(state, output); PSROIPoolingForward<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >> >(count, THCudaTensor_data(state, data), spatial_scale, channels, height, width, pooled_height, pooled_width, THCudaTensor_data(state, rois), output_dim, THCudaTensor_data(state, output), (int*)THCudaTensor_data(state, indices)); // check for errors hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in PSROIPooling_updateOutput: %s\n", hipGetErrorString(err)); THError("aborting"); } } template <typename Dtype> __global__ void PSROIPoolingBackwardAtomic( const int nthreads, const Dtype* top_diff, const int* mapping_channel, const int num_rois, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int output_dim, Dtype* bottom_diff, const Dtype* bottom_rois) { CUDA_KERNEL_LOOP(index, nthreads) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0] - 1; // -1 is due to the Lua/C conversion //Dtype roi_start_w = static_cast<Dtype>(round(bottom_rois[1])) * spatial_scale; //Dtype roi_start_h = static_cast<Dtype>(round(bottom_rois[2])) * spatial_scale; //Dtype roi_end_w = static_cast<Dtype>(round(bottom_rois[3]) + 1.) * spatial_scale; //Dtype roi_end_h = static_cast<Dtype>(round(bottom_rois[4]) + 1.) * spatial_scale; Dtype roi_start_w = bottom_rois[1] * spatial_scale; Dtype roi_start_h = bottom_rois[2] * spatial_scale; Dtype roi_end_w = static_cast<Dtype>(bottom_rois[3] + 1.) * spatial_scale; Dtype roi_end_h = static_cast<Dtype>(bottom_rois[4] + 1.) * spatial_scale; bool roi_is_empty = (roi_end_h <= roi_start_h) || (roi_end_w <= roi_start_w); // Force too small ROIs to be 1x1 Dtype roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0 Dtype roi_height = max(roi_end_h - roi_start_h, 0.1); // Compute w and h at bottom Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height); Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width); int hstart = floor(static_cast<Dtype>(ph)* bin_size_h + roi_start_h); int wstart = floor(static_cast<Dtype>(pw)* bin_size_w + roi_start_w); int hend = ceil(static_cast<Dtype>(ph + 1) * bin_size_h + roi_start_h); int wend = ceil(static_cast<Dtype>(pw + 1) * bin_size_w + roi_start_w); // Add roi offsets and clip to input boundaries hstart = min(max(hstart, 0), height); hend = min(max(hend, 0), height); wstart = min(max(wstart, 0), width); wend = min(max(wend, 0), width); bool is_empty = roi_is_empty || (hend <= hstart) || (wend <= wstart); // Compute c at bottom int c = mapping_channel[index]; Dtype* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width; Dtype bin_area = (hend - hstart)*(wend - wstart); Dtype diff_val = is_empty ? 0. : top_diff[index] / bin_area; for (int h = hstart; h < hend; ++h){ for (int w = wstart; w < wend; ++w){ int bottom_index = h*width + w; caffe_gpu_atomic_add(diff_val, offset_bottom_diff + bottom_index); } } } } extern "C" void PSROIPooling_updateGradInputAtomic(THCState *state, THCudaTensor *gradInput, THCudaTensor *gradOutput, THCudaTensor *data, THCudaTensor* rois, THCudaTensor *indices, int height, int width, int pooled_height, int pooled_width, int output_dim, double spatial_scale) { THAssert(THCudaTensor_nDimension(state, data) == 4); THAssert(THCudaTensor_nDimension(state, rois) == 2 && rois->size[1] == 5); THAssert(THCudaTensor_isContiguous(state, data)); THAssert(THCudaTensor_isContiguous(state, rois)); long num_rois = rois->size[0]; int channels = pooled_height * pooled_width * output_dim; THCudaTensor_resizeAs(state, gradInput, data); THCudaTensor_zero(state, gradInput); long count = THCudaTensor_nElement(state, gradOutput); PSROIPoolingBackwardAtomic<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >> >(count, THCudaTensor_data(state, gradOutput), (int*)THCudaTensor_data(state, indices), num_rois, spatial_scale, channels, height, width, pooled_height, pooled_width, output_dim, THCudaTensor_data(state, gradInput), THCudaTensor_data(state, rois)); // check for errors hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in PSROIPooling_updateGradInputAtomic: %s\n", hipGetErrorString(err)); THError("aborting"); } }
PSROIPooling.cu
#include "THC.h" #include <algorithm> #include <cfloat> #include "common.h" #include "gpu_util.cuh" using std::max; using std::min; template <typename Dtype> __global__ void PSROIPoolingForward( const int nthreads, const Dtype* bottom_data, const Dtype spatial_scale, const int channels, // total number of channels for one image, e.g. C*N*N const int height, const int width, const int pooled_height, const int pooled_width, const Dtype* bottom_rois, const int output_dim, // the number of class, e.g. C Dtype* top_data, int* mapping_channel) { // DEBUG //printf("[INIT c1=%.2f,c2=%.2f,c3=%.2f,c4=%.2f,c5=%.2f]\n", bottom_rois[0], bottom_rois[1], bottom_rois[2], bottom_rois[3], bottom_rois[4]); // DEBUG //printf("[INIT-DATA c1=%.2f,c2=%.2f,c3=%.2f,c4=%.2f,c5=%.2f]\n", bottom_data[0], bottom_data[1], bottom_data[2], bottom_data[3], bottom_data[4]); CUDA_KERNEL_LOOP(index, nthreads){ // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int ctop = (index / pooled_width / pooled_height) % output_dim; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling bottom_rois += n * 5; // DEBUG //printf("[c1=%.2f,c2=%.2f,c3=%.2f,c4=%.2f,c5=%.2f]\n", bottom_rois[0], bottom_rois[1], bottom_rois[2], bottom_rois[3], bottom_rois[4]); // DEBUG //printf("spatial_scale=%.3f\n", spatial_scale); int roi_batch_ind = bottom_rois[0] - 1; // -1 is due to the Lua/C conversion //Dtype roi_start_w = static_cast<Dtype>(round(bottom_rois[1])) * spatial_scale; //Dtype roi_start_h = static_cast<Dtype>(round(bottom_rois[2])) * spatial_scale; //Dtype roi_end_w = static_cast<Dtype>(round(bottom_rois[3]) + 1.) * spatial_scale; //Dtype roi_end_h = static_cast<Dtype>(round(bottom_rois[4]) + 1.) * spatial_scale; Dtype roi_start_w = bottom_rois[1] * spatial_scale; Dtype roi_start_h = bottom_rois[2] * spatial_scale; Dtype roi_end_w = static_cast<Dtype>(bottom_rois[3] + 1.) * spatial_scale; Dtype roi_end_h = static_cast<Dtype>(bottom_rois[4] + 1.) * spatial_scale; bool roi_is_empty = (roi_end_h <= roi_start_h) || (roi_end_w <= roi_start_w); // DEBUG //printf("[hs=%.2f,ws=%.2f,he=%.2f,we=%.2f]\n", roi_start_h, roi_start_w, roi_end_h, roi_end_w); // Force too small ROIs to be 1x1 Dtype roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0 Dtype roi_height = max(roi_end_h - roi_start_h, 0.1); // Compute w and h at bottom Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height); Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width); int hstart = floor(static_cast<Dtype>(ph) * bin_size_h + roi_start_h); int wstart = floor(static_cast<Dtype>(pw)* bin_size_w + roi_start_w); int hend = ceil(static_cast<Dtype>(ph + 1) * bin_size_h + roi_start_h); int wend = ceil(static_cast<Dtype>(pw + 1) * bin_size_w + roi_start_w); // Add roi offsets and clip to input boundaries hstart = min(max(hstart, 0), height); hend = min(max(hend, 0), height); wstart = min(max(wstart, 0),width); wend = min(max(wend, 0), width); bool is_empty = roi_is_empty || (hend <= hstart) || (wend <= wstart); // DEBUG //printf("[%d,%d,%d,%d]\n", wstart+1, hstart+1, wend, hend); int gw = pw; int gh = ph; int c = ctop * pooled_width * pooled_height + gh * pooled_width + gw; bottom_data += (roi_batch_ind * channels + c) * height * width; Dtype out_sum = 0; for (int h = hstart; h < hend; ++h){ for (int w = wstart; w < wend; ++w){ int bottom_index = h*width + w; out_sum += bottom_data[bottom_index]; } } // DEBUG //if (is_empty) { // printf("empty\n"); //} else { // printf("non-empty\n"); //} Dtype bin_area = (hend - hstart)*(wend - wstart); top_data[index] = is_empty? 0. : out_sum/bin_area; mapping_channel[index] = c; } } extern "C" void PSROIPooling_updateOutput(THCState *state, THCudaTensor *output, THCudaTensor *indices, THCudaTensor *data, THCudaTensor* rois, int height, int width, int pooled_height, int pooled_width, int output_dim, double spatial_scale) { // DEBUG //printf("PSROIPooling_updateOutput, spatial_scale=%.3f\n", spatial_scale); //printf("PSROIPooling_updateOutput, height=%d\n", height); //printf("PSROIPooling_updateOutput, width=%d\n", width); //printf("PSROIPooling_updateOutput, pooled_height=%d\n", pooled_height); //printf("PSROIPooling_updateOutput, pooled_width=%d\n", pooled_width); //printf("PSROIPooling_updateOutput, output_dim=%d\n", output_dim); THAssert(THCudaTensor_nDimension(state, data) == 4); THAssert(THCudaTensor_nDimension(state, rois) == 2 && rois->size[1] == 5); THAssert(THCudaTensor_isContiguous(state, data)); THAssert(THCudaTensor_isContiguous(state, rois)); long num_rois = rois->size[0]; int channels = pooled_height * pooled_width * output_dim; THCudaTensor_zero(state, output); THCudaTensor_zero(state, indices); THCudaTensor_resize4d(state, output, num_rois, output_dim, pooled_height, pooled_width); THCudaTensor_resize4d(state, indices, num_rois, output_dim, pooled_height, pooled_width); long count = THCudaTensor_nElement(state, output); PSROIPoolingForward<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >> >(count, THCudaTensor_data(state, data), spatial_scale, channels, height, width, pooled_height, pooled_width, THCudaTensor_data(state, rois), output_dim, THCudaTensor_data(state, output), (int*)THCudaTensor_data(state, indices)); // check for errors cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in PSROIPooling_updateOutput: %s\n", cudaGetErrorString(err)); THError("aborting"); } } template <typename Dtype> __global__ void PSROIPoolingBackwardAtomic( const int nthreads, const Dtype* top_diff, const int* mapping_channel, const int num_rois, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int output_dim, Dtype* bottom_diff, const Dtype* bottom_rois) { CUDA_KERNEL_LOOP(index, nthreads) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0] - 1; // -1 is due to the Lua/C conversion //Dtype roi_start_w = static_cast<Dtype>(round(bottom_rois[1])) * spatial_scale; //Dtype roi_start_h = static_cast<Dtype>(round(bottom_rois[2])) * spatial_scale; //Dtype roi_end_w = static_cast<Dtype>(round(bottom_rois[3]) + 1.) * spatial_scale; //Dtype roi_end_h = static_cast<Dtype>(round(bottom_rois[4]) + 1.) * spatial_scale; Dtype roi_start_w = bottom_rois[1] * spatial_scale; Dtype roi_start_h = bottom_rois[2] * spatial_scale; Dtype roi_end_w = static_cast<Dtype>(bottom_rois[3] + 1.) * spatial_scale; Dtype roi_end_h = static_cast<Dtype>(bottom_rois[4] + 1.) * spatial_scale; bool roi_is_empty = (roi_end_h <= roi_start_h) || (roi_end_w <= roi_start_w); // Force too small ROIs to be 1x1 Dtype roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0 Dtype roi_height = max(roi_end_h - roi_start_h, 0.1); // Compute w and h at bottom Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height); Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width); int hstart = floor(static_cast<Dtype>(ph)* bin_size_h + roi_start_h); int wstart = floor(static_cast<Dtype>(pw)* bin_size_w + roi_start_w); int hend = ceil(static_cast<Dtype>(ph + 1) * bin_size_h + roi_start_h); int wend = ceil(static_cast<Dtype>(pw + 1) * bin_size_w + roi_start_w); // Add roi offsets and clip to input boundaries hstart = min(max(hstart, 0), height); hend = min(max(hend, 0), height); wstart = min(max(wstart, 0), width); wend = min(max(wend, 0), width); bool is_empty = roi_is_empty || (hend <= hstart) || (wend <= wstart); // Compute c at bottom int c = mapping_channel[index]; Dtype* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width; Dtype bin_area = (hend - hstart)*(wend - wstart); Dtype diff_val = is_empty ? 0. : top_diff[index] / bin_area; for (int h = hstart; h < hend; ++h){ for (int w = wstart; w < wend; ++w){ int bottom_index = h*width + w; caffe_gpu_atomic_add(diff_val, offset_bottom_diff + bottom_index); } } } } extern "C" void PSROIPooling_updateGradInputAtomic(THCState *state, THCudaTensor *gradInput, THCudaTensor *gradOutput, THCudaTensor *data, THCudaTensor* rois, THCudaTensor *indices, int height, int width, int pooled_height, int pooled_width, int output_dim, double spatial_scale) { THAssert(THCudaTensor_nDimension(state, data) == 4); THAssert(THCudaTensor_nDimension(state, rois) == 2 && rois->size[1] == 5); THAssert(THCudaTensor_isContiguous(state, data)); THAssert(THCudaTensor_isContiguous(state, rois)); long num_rois = rois->size[0]; int channels = pooled_height * pooled_width * output_dim; THCudaTensor_resizeAs(state, gradInput, data); THCudaTensor_zero(state, gradInput); long count = THCudaTensor_nElement(state, gradOutput); PSROIPoolingBackwardAtomic<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >> >(count, THCudaTensor_data(state, gradOutput), (int*)THCudaTensor_data(state, indices), num_rois, spatial_scale, channels, height, width, pooled_height, pooled_width, output_dim, THCudaTensor_data(state, gradInput), THCudaTensor_data(state, rois)); // check for errors cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in PSROIPooling_updateGradInputAtomic: %s\n", cudaGetErrorString(err)); THError("aborting"); } }
86f4da5ba770adcb725e0972952dbc3d5b9b0772.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> #include <hip/hip_runtime.h> #define DATATYPE int #define SMEMSIZE 512 #define REP 128 texture <int,1,hipReadModeElementType> texref1; texture <int,1,hipReadModeElementType> texref2; __global__ void texture_order_1(double *time,DATATYPE *out,int its) { DATATYPE p,q=threadIdx.x; double time_tmp=0.0; unsigned int start_time=0,stop_time=0; unsigned int i,j; for (i=0;i<its;i++) { __syncthreads(); start_time=clock(); #pragma unroll for (j=0;j<REP;j++) { p=tex1Dfetch(texref1,q); q=tex1Dfetch(texref2,p); } stop_time=clock(); time_tmp+=(stop_time-start_time); } time_tmp=time_tmp/REP/its; out[blockDim.x*blockIdx.x+threadIdx.x] = p+q; time[blockDim.x*blockIdx.x+threadIdx.x] = time_tmp; } int main_test(int blocks,int threads,DATATYPE *h_in1,DATATYPE *h_in2,int xxx) { int its=30; //int blocks=1,threads=32; DATATYPE *d_in1,*d_in2; hipMalloc((void**)&d_in1,sizeof(DATATYPE)*SMEMSIZE); hipMalloc((void**)&d_in2,sizeof(DATATYPE)*SMEMSIZE); hipMemcpy(d_in1,h_in1,sizeof(DATATYPE)*SMEMSIZE,hipMemcpyHostToDevice); hipMemcpy(d_in2,h_in2,sizeof(DATATYPE)*SMEMSIZE,hipMemcpyHostToDevice); hipBindTexture(NULL,texref1,d_in1,sizeof(DATATYPE)*SMEMSIZE); hipBindTexture(NULL,texref2,d_in2,sizeof(DATATYPE)*SMEMSIZE); double *h_time,*d_time; DATATYPE *d_out; h_time=(double*)malloc(sizeof(double)*blocks*threads); hipMalloc((void**)&d_time,sizeof(double)*blocks*threads); hipMalloc((void**)&d_out,sizeof(DATATYPE)*blocks*threads); hipLaunchKernelGGL(( texture_order_1), dim3(blocks),dim3(threads), 0, 0, d_time,d_out,its); hipMemcpy(h_time,d_time,sizeof(double)*blocks*threads,hipMemcpyDeviceToHost); double avert=0.0,maxt=0.0,mint=99999.9; int nn=0; for (int i=0;i<blocks;i++) { for (int j=0;j<threads;j+=32) { avert+=h_time[i*threads+j]; nn++; if (maxt<h_time[i*threads+j]) { maxt=h_time[i*threads+j]; } if (mint>h_time[i*threads+j]) { mint=h_time[i*threads+j]; } } } avert/=nn; printf("%d\t%d\t%d\t\t%f\t%f\t%f\n",xxx, blocks,threads,avert,mint,maxt); hipUnbindTexture(texref1); hipUnbindTexture(texref2); hipFree(d_time); hipFree(d_out); hipFree(d_in1); hipFree(d_in2); free(h_time); return 0; } void init_order(DATATYPE *a,int n) { for (int i=0;i<n;i++) { a[i]=i; } } void init_disordered_32(DATATYPE *a,int n) { DATATYPE p[32]; for (int i=0;i<32;i++) { p[i]=i; } for (int i=0;i<n;i+=32) { for (int j=0;j<32;j++) { int jj=rand()%(32-j); a[i+j]=p[jj]; for (int k=jj;k<(32-j);k++) { p[k]=p[k+1]; } } for (int j=0;j<32;j++) { p[j]=a[i+j]; a[i+j]+=i; } } } void init_disordered_512(DATATYPE *a,int n) { const int nn=n/32; DATATYPE *q=(DATATYPE*)malloc(sizeof(DATATYPE)*nn); DATATYPE *b=(DATATYPE*)malloc(sizeof(DATATYPE)*n); init_order(q,nn); /* for (int j=0;j<nn;j++) { printf("%d\t",q[j]); } printf("\n"); */ for (int i=0;i<n;i+=nn) { for (int j=0;j<nn;j++) { int jj=rand()%(nn-j); b[i+j]=q[jj]; for (int k=jj;k<(nn-j);k++) { q[k]=q[k+1]; } } for (int j=0;j<nn;j++) { q[j]=b[i+j]; } } /* for (int i=0;i<n;i+=nn) { for (int j=0;j<nn;j++) { printf("%d\t",b[i+j]); } printf("\n"); } */ DATATYPE p[32]; for (int i=0;i<32;i++) { p[i]=i; } for (int i=0;i<32;i++) { for (int j=0;j<nn;j++) { a[j*32+i]=b[i*nn+j]*32+p[i]; } } free(q); free(b); } int main() { DATATYPE *h_in1,*h_in2,*h_in3; h_in1=(DATATYPE*)malloc(sizeof(DATATYPE)*SMEMSIZE); h_in2=(DATATYPE*)malloc(sizeof(DATATYPE)*SMEMSIZE); h_in3=(DATATYPE*)malloc(sizeof(DATATYPE)*SMEMSIZE); init_order(h_in1,SMEMSIZE); init_disordered_32(h_in2,SMEMSIZE); init_disordered_512(h_in3,SMEMSIZE); /* for (int i=0;i<SMEMSIZE;i+=32) { for (int j=0;j<32;j++) { printf("%d\t",h_in3[i+j]); } printf("\n"); } */ printf("blocks\t threads\t aver \t min \t max \t(clocks)\n"); //main_test(1,32,h_in1,h_in1,1); //main_test(1,32,h_in2,h_in2,2); //main_test(1,32,h_in3,h_in3,3); //main_test(1,512,h_in1,h_in1,1); //main_test(1,512,h_in2,h_in2,2); //main_test(1,512,h_in3,h_in3,3); /* for (int i=0;i<=1;i+=32) { int blocks=i; if (i==0) { blocks=1; } for (int j=0;j<=512;j+=32) { int threads=j; if (j==0) { threads=1; } main_test(blocks,threads,h_in1,h_in1,1); main_test(blocks,threads,h_in2,h_in2,2); main_test(blocks,threads,h_in3,h_in3,3); } } */ for (int i=0;i<=1024;i+=32) { int blocks=i; if (i==0) { blocks=1; } for (int j=256;j<=256;j+=32) { int threads=j; if (j==0) { threads=1; } main_test(blocks,threads,h_in1,h_in1,1); main_test(blocks,threads,h_in2,h_in2,2); main_test(blocks,threads,h_in3,h_in3,3); } } free(h_in1); free(h_in2); free(h_in3); return 0; }
86f4da5ba770adcb725e0972952dbc3d5b9b0772.cu
#include <stdlib.h> #include <stdio.h> #include <cuda_runtime.h> #define DATATYPE int #define SMEMSIZE 512 #define REP 128 texture <int,1,cudaReadModeElementType> texref1; texture <int,1,cudaReadModeElementType> texref2; __global__ void texture_order_1(double *time,DATATYPE *out,int its) { DATATYPE p,q=threadIdx.x; double time_tmp=0.0; unsigned int start_time=0,stop_time=0; unsigned int i,j; for (i=0;i<its;i++) { __syncthreads(); start_time=clock(); #pragma unroll for (j=0;j<REP;j++) { p=tex1Dfetch(texref1,q); q=tex1Dfetch(texref2,p); } stop_time=clock(); time_tmp+=(stop_time-start_time); } time_tmp=time_tmp/REP/its; out[blockDim.x*blockIdx.x+threadIdx.x] = p+q; time[blockDim.x*blockIdx.x+threadIdx.x] = time_tmp; } int main_test(int blocks,int threads,DATATYPE *h_in1,DATATYPE *h_in2,int xxx) { int its=30; //int blocks=1,threads=32; DATATYPE *d_in1,*d_in2; cudaMalloc((void**)&d_in1,sizeof(DATATYPE)*SMEMSIZE); cudaMalloc((void**)&d_in2,sizeof(DATATYPE)*SMEMSIZE); cudaMemcpy(d_in1,h_in1,sizeof(DATATYPE)*SMEMSIZE,cudaMemcpyHostToDevice); cudaMemcpy(d_in2,h_in2,sizeof(DATATYPE)*SMEMSIZE,cudaMemcpyHostToDevice); cudaBindTexture(NULL,texref1,d_in1,sizeof(DATATYPE)*SMEMSIZE); cudaBindTexture(NULL,texref2,d_in2,sizeof(DATATYPE)*SMEMSIZE); double *h_time,*d_time; DATATYPE *d_out; h_time=(double*)malloc(sizeof(double)*blocks*threads); cudaMalloc((void**)&d_time,sizeof(double)*blocks*threads); cudaMalloc((void**)&d_out,sizeof(DATATYPE)*blocks*threads); texture_order_1<<<blocks,threads>>>(d_time,d_out,its); cudaMemcpy(h_time,d_time,sizeof(double)*blocks*threads,cudaMemcpyDeviceToHost); double avert=0.0,maxt=0.0,mint=99999.9; int nn=0; for (int i=0;i<blocks;i++) { for (int j=0;j<threads;j+=32) { avert+=h_time[i*threads+j]; nn++; if (maxt<h_time[i*threads+j]) { maxt=h_time[i*threads+j]; } if (mint>h_time[i*threads+j]) { mint=h_time[i*threads+j]; } } } avert/=nn; printf("%d\t%d\t%d\t\t%f\t%f\t%f\n",xxx, blocks,threads,avert,mint,maxt); cudaUnbindTexture(texref1); cudaUnbindTexture(texref2); cudaFree(d_time); cudaFree(d_out); cudaFree(d_in1); cudaFree(d_in2); free(h_time); return 0; } void init_order(DATATYPE *a,int n) { for (int i=0;i<n;i++) { a[i]=i; } } void init_disordered_32(DATATYPE *a,int n) { DATATYPE p[32]; for (int i=0;i<32;i++) { p[i]=i; } for (int i=0;i<n;i+=32) { for (int j=0;j<32;j++) { int jj=rand()%(32-j); a[i+j]=p[jj]; for (int k=jj;k<(32-j);k++) { p[k]=p[k+1]; } } for (int j=0;j<32;j++) { p[j]=a[i+j]; a[i+j]+=i; } } } void init_disordered_512(DATATYPE *a,int n) { const int nn=n/32; DATATYPE *q=(DATATYPE*)malloc(sizeof(DATATYPE)*nn); DATATYPE *b=(DATATYPE*)malloc(sizeof(DATATYPE)*n); init_order(q,nn); /* for (int j=0;j<nn;j++) { printf("%d\t",q[j]); } printf("\n"); */ for (int i=0;i<n;i+=nn) { for (int j=0;j<nn;j++) { int jj=rand()%(nn-j); b[i+j]=q[jj]; for (int k=jj;k<(nn-j);k++) { q[k]=q[k+1]; } } for (int j=0;j<nn;j++) { q[j]=b[i+j]; } } /* for (int i=0;i<n;i+=nn) { for (int j=0;j<nn;j++) { printf("%d\t",b[i+j]); } printf("\n"); } */ DATATYPE p[32]; for (int i=0;i<32;i++) { p[i]=i; } for (int i=0;i<32;i++) { for (int j=0;j<nn;j++) { a[j*32+i]=b[i*nn+j]*32+p[i]; } } free(q); free(b); } int main() { DATATYPE *h_in1,*h_in2,*h_in3; h_in1=(DATATYPE*)malloc(sizeof(DATATYPE)*SMEMSIZE); h_in2=(DATATYPE*)malloc(sizeof(DATATYPE)*SMEMSIZE); h_in3=(DATATYPE*)malloc(sizeof(DATATYPE)*SMEMSIZE); init_order(h_in1,SMEMSIZE); init_disordered_32(h_in2,SMEMSIZE); init_disordered_512(h_in3,SMEMSIZE); /* for (int i=0;i<SMEMSIZE;i+=32) { for (int j=0;j<32;j++) { printf("%d\t",h_in3[i+j]); } printf("\n"); } */ printf("blocks\t threads\t aver \t min \t max \t(clocks)\n"); //main_test(1,32,h_in1,h_in1,1); //main_test(1,32,h_in2,h_in2,2); //main_test(1,32,h_in3,h_in3,3); //main_test(1,512,h_in1,h_in1,1); //main_test(1,512,h_in2,h_in2,2); //main_test(1,512,h_in3,h_in3,3); /* for (int i=0;i<=1;i+=32) { int blocks=i; if (i==0) { blocks=1; } for (int j=0;j<=512;j+=32) { int threads=j; if (j==0) { threads=1; } main_test(blocks,threads,h_in1,h_in1,1); main_test(blocks,threads,h_in2,h_in2,2); main_test(blocks,threads,h_in3,h_in3,3); } } */ for (int i=0;i<=1024;i+=32) { int blocks=i; if (i==0) { blocks=1; } for (int j=256;j<=256;j+=32) { int threads=j; if (j==0) { threads=1; } main_test(blocks,threads,h_in1,h_in1,1); main_test(blocks,threads,h_in2,h_in2,2); main_test(blocks,threads,h_in3,h_in3,3); } } free(h_in1); free(h_in2); free(h_in3); return 0; }
e796f64f2cb5d72471af4387a2094e8b245bf711.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2020 Stanford * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "model.h" #include "cuda_helper.h" Tensor FFModel::embedding(const Tensor& input, int num_entries, int out_dim, AggrMode aggr, Initializer* kernel_initializer) { //assert(config.strategies.find(name) != config.strategies.end()); //ParallelConfig pc = config.strategies[name]; //IndexSpaceT<2> task_is = IndexSpaceT<2>(get_or_create_task_is(pc)); Embedding* embed = new Embedding(*this, input, num_entries, out_dim, aggr, kernel_initializer); layers.push_back(embed); return embed->outputs[0]; } Embedding* FFModel::embedding(int num_entries, int out_dim, AggrMode aggr, Initializer* kernel_initializer) { //assert(config.strategies.find(name) != config.strategies.end()); //ParallelConfig pc = config.strategies[name]; //IndexSpaceT<2> task_is = IndexSpaceT<2>(get_or_create_task_is(pc)); Embedding* embed = new Embedding(*this, num_entries, out_dim, aggr, kernel_initializer); layers.push_back(embed); return embed; } Embedding::Embedding(FFModel& model, const Tensor& _input, //std::stirng name, int _num_entries, int outDim, AggrMode _aggr, Initializer* _kernel_initializer) : Op(model, OP_EMBEDDING, "Embed_"+std::to_string(_num_entries)+"x"+std::to_string(outDim), _input), num_entries(_num_entries), out_channels(outDim), aggr(_aggr), kernel_initializer(_kernel_initializer), profiling(model.config.profiling) { assert(_input.numDim == 2); outputs[0].numDim = 2; outputs[0].adim[0] = out_channels; outputs[0].adim[1] = inputs[0].adim[1]; weights[0].numDim = 2; weights[0].adim[0] = num_entries; weights[0].adim[1] = out_channels; numWeights = 1; } Embedding::Embedding(FFModel& model, int _num_entries, int outDim, AggrMode _aggr, Initializer* kernel_initializer) : Op(model, OP_EMBEDDING, "Embed_"+std::to_string(_num_entries)+"x"+std::to_string(outDim), 1), num_entries(_num_entries), out_channels(outDim), aggr(_aggr), profiling(model.config.profiling) { } Tensor Embedding::init_inout(FFModel& model, const Tensor& _input) { assert(_input.numDim == 2); inputs[0] = _input; create_output_and_partition(model); return outputs[0]; } /* void Embedding::add_to_model(FFModel& model) { model.layers.push_back(this); model.parameters.push_back(weights[0]); } */ void Embedding::create_weights(FFModel& model) { // Retrive the task indexspace for the op std::string pcname = name; task_is = IndexSpaceT<2>(model.get_or_create_task_is(2, pcname)); { const int dims[2] = {out_channels, num_entries}; // Embeddding weights and linear weights can be partitioned in the same way weights[0] = model.create_linear_weight<2>(this, dims, (IndexSpaceT<2>)task_is, DT_FLOAT, kernel_initializer); assert(numWeights == 1); } } void Embedding::create_output_and_partition(FFModel& model) { // Retrive the task indexspace for the op std::string pcname = name; task_is = IndexSpaceT<2>(model.get_or_create_task_is(2, pcname)); Context ctx = model.config.lg_ctx; Runtime* runtime = model.config.lg_hlr; Rect<2> part_rect = runtime->get_index_space_domain(ctx, task_is); // Currently assume we can only partition over the sample dim assert(part_rect.hi[0] == part_rect.lo[0]); { const int dims[2] = {inputs[0].adim[1], out_channels}; outputs[0] = model.create_tensor<2>(dims, (IndexSpaceT<2>)task_is, DT_FLOAT); outputs[0].owner_op = this; outputs[0].owner_idx = 0; } // Compute partition bound for input Rect<2> input_rect = runtime->get_index_partition_color_space( ctx, inputs[0].part.get_index_partition()); if (input_rect == part_rect) { input_lps[0] = inputs[0].part; input_grad_lps[0] = inputs[0].part_grad; } else { // Currently assert input must have the same partition // to avoid data movement assert(false); } } __host__ OpMeta* Embedding::init_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime* runtime) { return NULL; } void Embedding::init(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; IndexLauncher launcher(EMBED_INIT_TASK_ID, task_is, TaskArgument(this, sizeof(Embedding)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); // regions[0]: input launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); // regions[1]: output launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection*/, WRITE_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(1, FID_DATA); // regions[2]: weight launcher.add_region_requirement( RegionRequirement(weights[0].part, 0/*projection*/, READ_ONLY, EXCLUSIVE, weights[0].region)); launcher.add_field(2, FID_DATA); runtime->execute_index_space(ctx, launcher); } __global__ void embed_forward(const int64_t* input, float* output, const float* embed, int out_dim, int in_dim, int batch_size, AggrMode aggr) { CUDA_KERNEL_LOOP(i, batch_size * out_dim) { output[i] = 0; int idx = i / out_dim; int off = i % out_dim; for (int j = 0; j < in_dim; j++) { int64_t wordIdx = input[idx * in_dim + j]; output[i] += embed[wordIdx * out_dim + off]; if (aggr == AGGR_MODE_SUM) { } else { assert(aggr == AGGR_MODE_AVG); output[i] /= in_dim; } } } } __global__ void embed_backward(const int64_t* input, const float* output, float* embed, int out_dim, int in_dim, int batch_size, AggrMode aggr) { CUDA_KERNEL_LOOP(i, batch_size * out_dim) { int idx = i / out_dim; int off = i % out_dim; float gradient; if (aggr == AGGR_MODE_SUM) { gradient = output[i]; } else { assert(aggr == AGGR_MODE_AVG); gradient = output[i] / in_dim; } for (int j = 0; j < in_dim; j++) { int64_t wordIdx = input[idx * in_dim + j]; atomicAdd(embed + wordIdx * out_dim + off, gradient); } } } /* regions[0](I): input regions[1](O): output regions[2](I): kernel */ __host__ void Embedding::forward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime* runtime) { assert(regions.size() == 3); assert(task->regions.size() == 3); const Embedding* embed = (Embedding*) task->args; TensorAccessorR<int64_t, 2> accInput( regions[0], task->regions[0], FID_DATA, ctx, runtime); TensorAccessorW<float, 2> accOutput( regions[1], task->regions[1], FID_DATA, ctx, runtime, false/*readOutput*/); TensorAccessorR<float, 2> accWeight( regions[2], task->regions[2], FID_DATA, ctx, runtime); // Input matches Output assert(accInput.rect.hi[1] == accOutput.rect.hi[1]); assert(accInput.rect.lo[1] == accOutput.rect.lo[1]); // Weight matches Output assert(accWeight.rect.hi[1] == accOutput.rect.hi[0]); assert(accWeight.rect.lo[1] == accOutput.rect.lo[0]); int in_dim = accInput.rect.hi[0] - accInput.rect.lo[0] + 1; int out_dim = accOutput.rect.hi[0] - accOutput.rect.lo[0] + 1; int batch_size = accOutput.rect.hi[1] - accOutput.rect.lo[1] + 1; hipLaunchKernelGGL(( embed_forward), dim3(GET_BLOCKS(accOutput.rect.volume())), dim3(CUDA_NUM_THREADS), 0, 0, accInput.ptr, accOutput.ptr, accWeight.ptr, out_dim, in_dim, batch_size, embed->aggr); checkCUDA(hipDeviceSynchronize()); if (embed->profiling) { print_tensor<2, int64_t>(accInput.ptr, accInput.rect, "[Embedding:forward:input]"); print_tensor<2, float>(accWeight.ptr, accWeight.rect, "[Embedding:forward:weight]"); print_tensor<2, float>(accOutput.ptr, accOutput.rect, "[Embedding:forward:output]"); checkCUDA(hipDeviceSynchronize()); } } void Embedding::forward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; IndexLauncher launcher(EMBED_FWD_TASK_ID, task_is, TaskArgument(this, sizeof(Embedding)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); // regions[0]: input launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); // regions[1]: output launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection*/, WRITE_ONLY, EXCLUSIVE, outputs[0].region, MAP_TO_ZC_MEMORY)); launcher.add_field(1, FID_DATA); // regions[2]: weight launcher.add_region_requirement( RegionRequirement(weights[0].part, 0/*projection*/, READ_ONLY, EXCLUSIVE, weights[0].region)); launcher.add_field(2, FID_DATA); runtime->execute_index_space(ctx, launcher); } void Embedding::backward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 3); assert(task->regions.size() == 3); const Embedding* embed = (Embedding*) task->args; TensorAccessorR<int64_t, 2> accInput( regions[0], task->regions[0], FID_DATA, ctx, runtime); TensorAccessorR<float, 2> accOutput( regions[1], task->regions[1], FID_DATA, ctx, runtime); TensorAccessorW<float, 2> accWeightGrad( regions[2], task->regions[2], FID_DATA, ctx, runtime, true/*readOutput*/); // Input matches Output assert(accInput.rect.hi[1] == accOutput.rect.hi[1]); assert(accInput.rect.lo[1] == accOutput.rect.lo[1]); // WeightGrad matches Output assert(accWeightGrad.rect.hi[1] - accWeightGrad.rect.lo[1] == accOutput.rect.hi[0] - accOutput.rect.lo[0]); int in_dim = accInput.rect.hi[0] - accInput.rect.lo[0] + 1; int out_dim = accOutput.rect.hi[0] - accOutput.rect.lo[0] + 1; int batch_size = accOutput.rect.hi[1] - accOutput.rect.lo[1] + 1; // Explicitly initialize accWegihtGrad to zero to aviod calling zero_gradients() before backward() // as an optimization for DLRM //assign_kernel<<<GET_BLOCKS(accWeightGrad.rect.volume()), CUDA_NUM_THREADS>>>( // accWeightGrad.ptr, accWeightGrad.rect.volume(), 0.0f); hipLaunchKernelGGL(( embed_backward), dim3(GET_BLOCKS(accOutput.rect.volume())), dim3(CUDA_NUM_THREADS), 0, 0, accInput.ptr, accOutput.ptr, accWeightGrad.ptr, out_dim, in_dim, batch_size, embed->aggr); checkCUDA(hipDeviceSynchronize()); if (embed->profiling) { print_tensor<2, float>(accOutput.ptr, accOutput.rect, "[Embedding:backward:output_grad]"); print_tensor<2, float>(accWeightGrad.ptr, accWeightGrad.rect, "[Embedding:backward:weight_grad]"); print_tensor<2, int64_t>(accInput.ptr, accInput.rect, "[Embedding:backward:input]"); checkCUDA(hipDeviceSynchronize()); } } void Embedding::backward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; IndexLauncher launcher(EMBED_BWD_TASK_ID, task_is, TaskArgument(this, sizeof(Embedding)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); // regions[0]: input launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); // regions[1]: output_grad launcher.add_region_requirement( RegionRequirement(outputs[0].part_grad, 0/*projection*/, READ_ONLY, EXCLUSIVE, outputs[0].region_grad, MAP_TO_ZC_MEMORY)); launcher.add_field(1, FID_DATA); // regions[2]: weight_grad launcher.add_region_requirement( RegionRequirement(weights[0].part_grad, 0/*projection*/, READ_WRITE, EXCLUSIVE, weights[0].region_grad)); launcher.add_field(2, FID_DATA); runtime->execute_index_space(ctx, launcher); } bool Embedding::measure_compute_time(Simulator* sim, const ParallelConfig& pc, float& forward_time, float& backward_time) { //TODO: implement measure_forward return false; }
e796f64f2cb5d72471af4387a2094e8b245bf711.cu
/* Copyright 2020 Stanford * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "model.h" #include "cuda_helper.h" Tensor FFModel::embedding(const Tensor& input, int num_entries, int out_dim, AggrMode aggr, Initializer* kernel_initializer) { //assert(config.strategies.find(name) != config.strategies.end()); //ParallelConfig pc = config.strategies[name]; //IndexSpaceT<2> task_is = IndexSpaceT<2>(get_or_create_task_is(pc)); Embedding* embed = new Embedding(*this, input, num_entries, out_dim, aggr, kernel_initializer); layers.push_back(embed); return embed->outputs[0]; } Embedding* FFModel::embedding(int num_entries, int out_dim, AggrMode aggr, Initializer* kernel_initializer) { //assert(config.strategies.find(name) != config.strategies.end()); //ParallelConfig pc = config.strategies[name]; //IndexSpaceT<2> task_is = IndexSpaceT<2>(get_or_create_task_is(pc)); Embedding* embed = new Embedding(*this, num_entries, out_dim, aggr, kernel_initializer); layers.push_back(embed); return embed; } Embedding::Embedding(FFModel& model, const Tensor& _input, //std::stirng name, int _num_entries, int outDim, AggrMode _aggr, Initializer* _kernel_initializer) : Op(model, OP_EMBEDDING, "Embed_"+std::to_string(_num_entries)+"x"+std::to_string(outDim), _input), num_entries(_num_entries), out_channels(outDim), aggr(_aggr), kernel_initializer(_kernel_initializer), profiling(model.config.profiling) { assert(_input.numDim == 2); outputs[0].numDim = 2; outputs[0].adim[0] = out_channels; outputs[0].adim[1] = inputs[0].adim[1]; weights[0].numDim = 2; weights[0].adim[0] = num_entries; weights[0].adim[1] = out_channels; numWeights = 1; } Embedding::Embedding(FFModel& model, int _num_entries, int outDim, AggrMode _aggr, Initializer* kernel_initializer) : Op(model, OP_EMBEDDING, "Embed_"+std::to_string(_num_entries)+"x"+std::to_string(outDim), 1), num_entries(_num_entries), out_channels(outDim), aggr(_aggr), profiling(model.config.profiling) { } Tensor Embedding::init_inout(FFModel& model, const Tensor& _input) { assert(_input.numDim == 2); inputs[0] = _input; create_output_and_partition(model); return outputs[0]; } /* void Embedding::add_to_model(FFModel& model) { model.layers.push_back(this); model.parameters.push_back(weights[0]); } */ void Embedding::create_weights(FFModel& model) { // Retrive the task indexspace for the op std::string pcname = name; task_is = IndexSpaceT<2>(model.get_or_create_task_is(2, pcname)); { const int dims[2] = {out_channels, num_entries}; // Embeddding weights and linear weights can be partitioned in the same way weights[0] = model.create_linear_weight<2>(this, dims, (IndexSpaceT<2>)task_is, DT_FLOAT, kernel_initializer); assert(numWeights == 1); } } void Embedding::create_output_and_partition(FFModel& model) { // Retrive the task indexspace for the op std::string pcname = name; task_is = IndexSpaceT<2>(model.get_or_create_task_is(2, pcname)); Context ctx = model.config.lg_ctx; Runtime* runtime = model.config.lg_hlr; Rect<2> part_rect = runtime->get_index_space_domain(ctx, task_is); // Currently assume we can only partition over the sample dim assert(part_rect.hi[0] == part_rect.lo[0]); { const int dims[2] = {inputs[0].adim[1], out_channels}; outputs[0] = model.create_tensor<2>(dims, (IndexSpaceT<2>)task_is, DT_FLOAT); outputs[0].owner_op = this; outputs[0].owner_idx = 0; } // Compute partition bound for input Rect<2> input_rect = runtime->get_index_partition_color_space( ctx, inputs[0].part.get_index_partition()); if (input_rect == part_rect) { input_lps[0] = inputs[0].part; input_grad_lps[0] = inputs[0].part_grad; } else { // Currently assert input must have the same partition // to avoid data movement assert(false); } } __host__ OpMeta* Embedding::init_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime* runtime) { return NULL; } void Embedding::init(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; IndexLauncher launcher(EMBED_INIT_TASK_ID, task_is, TaskArgument(this, sizeof(Embedding)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); // regions[0]: input launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); // regions[1]: output launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection*/, WRITE_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(1, FID_DATA); // regions[2]: weight launcher.add_region_requirement( RegionRequirement(weights[0].part, 0/*projection*/, READ_ONLY, EXCLUSIVE, weights[0].region)); launcher.add_field(2, FID_DATA); runtime->execute_index_space(ctx, launcher); } __global__ void embed_forward(const int64_t* input, float* output, const float* embed, int out_dim, int in_dim, int batch_size, AggrMode aggr) { CUDA_KERNEL_LOOP(i, batch_size * out_dim) { output[i] = 0; int idx = i / out_dim; int off = i % out_dim; for (int j = 0; j < in_dim; j++) { int64_t wordIdx = input[idx * in_dim + j]; output[i] += embed[wordIdx * out_dim + off]; if (aggr == AGGR_MODE_SUM) { } else { assert(aggr == AGGR_MODE_AVG); output[i] /= in_dim; } } } } __global__ void embed_backward(const int64_t* input, const float* output, float* embed, int out_dim, int in_dim, int batch_size, AggrMode aggr) { CUDA_KERNEL_LOOP(i, batch_size * out_dim) { int idx = i / out_dim; int off = i % out_dim; float gradient; if (aggr == AGGR_MODE_SUM) { gradient = output[i]; } else { assert(aggr == AGGR_MODE_AVG); gradient = output[i] / in_dim; } for (int j = 0; j < in_dim; j++) { int64_t wordIdx = input[idx * in_dim + j]; atomicAdd(embed + wordIdx * out_dim + off, gradient); } } } /* regions[0](I): input regions[1](O): output regions[2](I): kernel */ __host__ void Embedding::forward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime* runtime) { assert(regions.size() == 3); assert(task->regions.size() == 3); const Embedding* embed = (Embedding*) task->args; TensorAccessorR<int64_t, 2> accInput( regions[0], task->regions[0], FID_DATA, ctx, runtime); TensorAccessorW<float, 2> accOutput( regions[1], task->regions[1], FID_DATA, ctx, runtime, false/*readOutput*/); TensorAccessorR<float, 2> accWeight( regions[2], task->regions[2], FID_DATA, ctx, runtime); // Input matches Output assert(accInput.rect.hi[1] == accOutput.rect.hi[1]); assert(accInput.rect.lo[1] == accOutput.rect.lo[1]); // Weight matches Output assert(accWeight.rect.hi[1] == accOutput.rect.hi[0]); assert(accWeight.rect.lo[1] == accOutput.rect.lo[0]); int in_dim = accInput.rect.hi[0] - accInput.rect.lo[0] + 1; int out_dim = accOutput.rect.hi[0] - accOutput.rect.lo[0] + 1; int batch_size = accOutput.rect.hi[1] - accOutput.rect.lo[1] + 1; embed_forward<<<GET_BLOCKS(accOutput.rect.volume()), CUDA_NUM_THREADS>>>( accInput.ptr, accOutput.ptr, accWeight.ptr, out_dim, in_dim, batch_size, embed->aggr); checkCUDA(cudaDeviceSynchronize()); if (embed->profiling) { print_tensor<2, int64_t>(accInput.ptr, accInput.rect, "[Embedding:forward:input]"); print_tensor<2, float>(accWeight.ptr, accWeight.rect, "[Embedding:forward:weight]"); print_tensor<2, float>(accOutput.ptr, accOutput.rect, "[Embedding:forward:output]"); checkCUDA(cudaDeviceSynchronize()); } } void Embedding::forward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; IndexLauncher launcher(EMBED_FWD_TASK_ID, task_is, TaskArgument(this, sizeof(Embedding)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); // regions[0]: input launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); // regions[1]: output launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection*/, WRITE_ONLY, EXCLUSIVE, outputs[0].region, MAP_TO_ZC_MEMORY)); launcher.add_field(1, FID_DATA); // regions[2]: weight launcher.add_region_requirement( RegionRequirement(weights[0].part, 0/*projection*/, READ_ONLY, EXCLUSIVE, weights[0].region)); launcher.add_field(2, FID_DATA); runtime->execute_index_space(ctx, launcher); } void Embedding::backward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 3); assert(task->regions.size() == 3); const Embedding* embed = (Embedding*) task->args; TensorAccessorR<int64_t, 2> accInput( regions[0], task->regions[0], FID_DATA, ctx, runtime); TensorAccessorR<float, 2> accOutput( regions[1], task->regions[1], FID_DATA, ctx, runtime); TensorAccessorW<float, 2> accWeightGrad( regions[2], task->regions[2], FID_DATA, ctx, runtime, true/*readOutput*/); // Input matches Output assert(accInput.rect.hi[1] == accOutput.rect.hi[1]); assert(accInput.rect.lo[1] == accOutput.rect.lo[1]); // WeightGrad matches Output assert(accWeightGrad.rect.hi[1] - accWeightGrad.rect.lo[1] == accOutput.rect.hi[0] - accOutput.rect.lo[0]); int in_dim = accInput.rect.hi[0] - accInput.rect.lo[0] + 1; int out_dim = accOutput.rect.hi[0] - accOutput.rect.lo[0] + 1; int batch_size = accOutput.rect.hi[1] - accOutput.rect.lo[1] + 1; // Explicitly initialize accWegihtGrad to zero to aviod calling zero_gradients() before backward() // as an optimization for DLRM //assign_kernel<<<GET_BLOCKS(accWeightGrad.rect.volume()), CUDA_NUM_THREADS>>>( // accWeightGrad.ptr, accWeightGrad.rect.volume(), 0.0f); embed_backward<<<GET_BLOCKS(accOutput.rect.volume()), CUDA_NUM_THREADS>>>( accInput.ptr, accOutput.ptr, accWeightGrad.ptr, out_dim, in_dim, batch_size, embed->aggr); checkCUDA(cudaDeviceSynchronize()); if (embed->profiling) { print_tensor<2, float>(accOutput.ptr, accOutput.rect, "[Embedding:backward:output_grad]"); print_tensor<2, float>(accWeightGrad.ptr, accWeightGrad.rect, "[Embedding:backward:weight_grad]"); print_tensor<2, int64_t>(accInput.ptr, accInput.rect, "[Embedding:backward:input]"); checkCUDA(cudaDeviceSynchronize()); } } void Embedding::backward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; IndexLauncher launcher(EMBED_BWD_TASK_ID, task_is, TaskArgument(this, sizeof(Embedding)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); // regions[0]: input launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); // regions[1]: output_grad launcher.add_region_requirement( RegionRequirement(outputs[0].part_grad, 0/*projection*/, READ_ONLY, EXCLUSIVE, outputs[0].region_grad, MAP_TO_ZC_MEMORY)); launcher.add_field(1, FID_DATA); // regions[2]: weight_grad launcher.add_region_requirement( RegionRequirement(weights[0].part_grad, 0/*projection*/, READ_WRITE, EXCLUSIVE, weights[0].region_grad)); launcher.add_field(2, FID_DATA); runtime->execute_index_space(ctx, launcher); } bool Embedding::measure_compute_time(Simulator* sim, const ParallelConfig& pc, float& forward_time, float& backward_time) { //TODO: implement measure_forward return false; }
bfae778c3df763e487be5e8a3c640adfb387a7ad.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <stdio.h> #include <omp.h> #include <string> void mmul(double* C, const double* A, const double* B, long N){ #pragma omp parallel for schedule(static) collapse(2) for(int row=0; row<N; row++) { for(int col=0; col<N; col++) { double sum = 0.0f; for (long i = 0; i < N; i++) sum += A[row*N+i] * B[col+i*N]; C[row*N+col] = sum; } } } void Check_CUDA_Error(void){ hipError_t error = hipGetLastError(); if(error!=hipSuccess) { fprintf(stderr,"ERROR: : %s\n", hipGetErrorString(error) ); exit(-1); } } #define BLOCK_SIZE 32 //32*32 = 1024 __global__ void mmul_kernel(double* C, double* A, const double* B, long N){ int ROW = (blockIdx.y) * blockDim.y + threadIdx.y; int COL = (blockIdx.x) * blockDim.x + threadIdx.x; double tmpSum = 0.0f; if(ROW < N && COL < N) { for(int i=0; i<N; i++) { tmpSum += A[ROW*N+i]*B[i*N+COL]; } } C[ROW*N + COL] = tmpSum; } int main() { long N = (1UL<<11); // 2048 * 2048 double *A, *B, *C, *C_ref; hipHostMalloc((void**)&A, N*N* sizeof(double)); hipHostMalloc((void**)&B, N*N* sizeof(double)); hipHostMalloc((void**)&C, N*N* sizeof(double)); hipHostMalloc((void**)&C_ref, N*N* sizeof(double)); #pragma omp parallel for schedule(static) collapse(2) for (long i = 0; i < N; i++) { for (long j = 0; j < N; j++) { A[i*N+j] = 1.0/(i+1); B[i*N+j] = 2.0/(i+1); } } double tt = omp_get_wtime(); mmul(C_ref, A, B, N); printf("CPU Bandwidth = %f GB/s\n", N*N*sizeof(double) / (omp_get_wtime()-tt)/1e9); double *A_d, *B_d, *C_d; hipMalloc(&A_d, N*N*sizeof(double)); hipMalloc(&B_d, N*N*sizeof(double)); hipMalloc(&C_d, N*N*sizeof(double)); hipMemcpyAsync(A_d, A, N*N*sizeof(double), hipMemcpyHostToDevice); hipMemcpyAsync(B_d, B, N*N*sizeof(double), hipMemcpyHostToDevice); hipDeviceSynchronize(); tt = omp_get_wtime(); int Nb = (N+BLOCK_SIZE-1)/(BLOCK_SIZE); //printf("N=%ld, Nb=%ld\n",N,Nb); dim3 Blocks(BLOCK_SIZE, BLOCK_SIZE); dim3 Grids(Nb,Nb); hipLaunchKernelGGL(( mmul_kernel), dim3(Grids),dim3(Blocks), 0, 0, C_d, A_d, B_d, N); Check_CUDA_Error(); hipMemcpyAsync(C, C_d, N*N*sizeof(double), hipMemcpyDeviceToHost); hipDeviceSynchronize(); printf("GPU Bandwidth = %f GB/s\n", N*N*sizeof(double) / (omp_get_wtime()-tt)/1e9); double sumdiff = 0.0f; for(int row=0; row<N; row++) { for(int col=0; col<N; col++) { double diff = fabs(C[row*N+col]-C_ref[row*N+col]); sumdiff += diff; //printf("Error[%d][%d] = %f, %f %f\n", row, col, // fabs(C[row*N+col]-C_ref[row*N+col]),C[row*N+col],C_ref[row*N+col]); } } printf("sumError = %f\n", sumdiff); hipFree(A_d); hipFree(B_d); hipFree(C_d); hipHostFree(A); hipHostFree(B); hipHostFree(C); hipHostFree(C_ref); return 0; }
bfae778c3df763e487be5e8a3c640adfb387a7ad.cu
#include <algorithm> #include <stdio.h> #include <omp.h> #include <string> void mmul(double* C, const double* A, const double* B, long N){ #pragma omp parallel for schedule(static) collapse(2) for(int row=0; row<N; row++) { for(int col=0; col<N; col++) { double sum = 0.0f; for (long i = 0; i < N; i++) sum += A[row*N+i] * B[col+i*N]; C[row*N+col] = sum; } } } void Check_CUDA_Error(void){ cudaError_t error = cudaGetLastError(); if(error!=cudaSuccess) { fprintf(stderr,"ERROR: : %s\n", cudaGetErrorString(error) ); exit(-1); } } #define BLOCK_SIZE 32 //32*32 = 1024 __global__ void mmul_kernel(double* C, double* A, const double* B, long N){ int ROW = (blockIdx.y) * blockDim.y + threadIdx.y; int COL = (blockIdx.x) * blockDim.x + threadIdx.x; double tmpSum = 0.0f; if(ROW < N && COL < N) { for(int i=0; i<N; i++) { tmpSum += A[ROW*N+i]*B[i*N+COL]; } } C[ROW*N + COL] = tmpSum; } int main() { long N = (1UL<<11); // 2048 * 2048 double *A, *B, *C, *C_ref; cudaMallocHost((void**)&A, N*N* sizeof(double)); cudaMallocHost((void**)&B, N*N* sizeof(double)); cudaMallocHost((void**)&C, N*N* sizeof(double)); cudaMallocHost((void**)&C_ref, N*N* sizeof(double)); #pragma omp parallel for schedule(static) collapse(2) for (long i = 0; i < N; i++) { for (long j = 0; j < N; j++) { A[i*N+j] = 1.0/(i+1); B[i*N+j] = 2.0/(i+1); } } double tt = omp_get_wtime(); mmul(C_ref, A, B, N); printf("CPU Bandwidth = %f GB/s\n", N*N*sizeof(double) / (omp_get_wtime()-tt)/1e9); double *A_d, *B_d, *C_d; cudaMalloc(&A_d, N*N*sizeof(double)); cudaMalloc(&B_d, N*N*sizeof(double)); cudaMalloc(&C_d, N*N*sizeof(double)); cudaMemcpyAsync(A_d, A, N*N*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpyAsync(B_d, B, N*N*sizeof(double), cudaMemcpyHostToDevice); cudaDeviceSynchronize(); tt = omp_get_wtime(); int Nb = (N+BLOCK_SIZE-1)/(BLOCK_SIZE); //printf("N=%ld, Nb=%ld\n",N,Nb); dim3 Blocks(BLOCK_SIZE, BLOCK_SIZE); dim3 Grids(Nb,Nb); mmul_kernel<<<Grids,Blocks>>>(C_d, A_d, B_d, N); Check_CUDA_Error(); cudaMemcpyAsync(C, C_d, N*N*sizeof(double), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); printf("GPU Bandwidth = %f GB/s\n", N*N*sizeof(double) / (omp_get_wtime()-tt)/1e9); double sumdiff = 0.0f; for(int row=0; row<N; row++) { for(int col=0; col<N; col++) { double diff = fabs(C[row*N+col]-C_ref[row*N+col]); sumdiff += diff; //printf("Error[%d][%d] = %f, %f %f\n", row, col, // fabs(C[row*N+col]-C_ref[row*N+col]),C[row*N+col],C_ref[row*N+col]); } } printf("sumError = %f\n", sumdiff); cudaFree(A_d); cudaFree(B_d); cudaFree(C_d); cudaFreeHost(A); cudaFreeHost(B); cudaFreeHost(C); cudaFreeHost(C_ref); return 0; }
ampere_sparse_tensorop_gemm.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************************************** * Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are permitted * provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used * to endorse or promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** Please check example 07, 08 and 17 for the basics of dense tensor op gemm kernels. NVIDIA Ampere architecture also supports structured sparse tensor op for tf32, fp16, int8 and int4. Sparse GEMM kernels needs to takes an additional E matrix which stores the meta data. The format of meta data is different for every data types. CUTLASS templates can automatically infer it based on input A and B. Check code below. Moreover, matrix E needs to be preprocessed so that it can use ldmatrix to load into the registers efficiently. */ #include <iostream> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm_sparse.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/host/gemm.h" #include "cutlass/util/host_reorder.h" #include "cutlass/util/host_uncompress.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/tensor_view_io.h" #include "helper.h" // The code section below describes datatype for input, output matrices and computation between // elements in input matrices. using ElementAccumulator = int32_t; // <- data type of accumulator using ElementComputeEpilogue = ElementAccumulator; // <- data type of epilogue operations using ElementInputA = cutlass::int4b_t; // <- data type of elements in input matrix A using ElementInputB = cutlass::int4b_t; // <- data type of elements in input matrix B using ElementOutput = int32_t; // <- data type of elements in output matrix D // The code section below describes matrix layout of input and output matrices. Row Major for // Matrix A, Column Major for Matrix B and Row Major for Matrix C using LayoutInputA = cutlass::layout::RowMajor; using LayoutInputB = cutlass::layout::ColumnMajor; using LayoutOutput = cutlass::layout::RowMajor; // This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM using MMAOp = cutlass::arch::OpClassTensorOp; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm80; // This code section describes the tile size a thread block will compute using ShapeMMAThreadBlock = cutlass::gemm::GemmShape<128, 128, 256>; // <- threadblock tile M = 128, N = 128, K = 256 // This code section describes tile size a warp will compute using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 64, 256>; // <- warp tile M = 64, N = 64, K = 256 // This code section describes the size of MMA op using ShapeMMAOp = cutlass::gemm::GemmShape<16, 8, 128>; // <- MMA Op tile M = 16, N = 8, K = 128 // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // <- ?? // This code section describes the epilogue part of the kernel using EpilogueOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, // <- data type of output matrix 128 / cutlass::sizeof_bits<ElementOutput>::value, // <- the number of elements per vectorized // memory access. For a byte, it's 16 // elements. This becomes the vector width of // math instructions in the epilogue too ElementAccumulator, // <- data type of accumulator ElementComputeEpilogue>; // <- data type for alpha/beta in linear combination function // Number of pipelines you want to use constexpr int NumStages = 3; using Gemm = cutlass::gemm::device::SparseGemm<ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementAccumulator, MMAOp, SmArch, ShapeMMAThreadBlock, ShapeMMAWarp, ShapeMMAOp, EpilogueOp, SwizzleThreadBlock, NumStages>; // Data type and layout of meta data matrix E can be inferred from template Gemm. using ElementInputE = typename Gemm::ElementE; using LayoutInputE = cutlass::layout::RowMajor; using ReorderedLayoutInputE = typename Gemm::LayoutE; // Blow property is defined in include/cutlass/arch/sp_mma_sm80.h // 50% Sparsity on Ampere constexpr int kSparse = Gemm::kSparse; // How many elements of A are covered per ElementE constexpr int kElementsPerElementE = Gemm::kElementsPerElementE; // The size of individual meta data constexpr int kMetaSizeInBits = Gemm::kMetaSizeInBits; int run() { const int length_m = 512; const int length_n = 512; const int length_k = 1024; // Create a tuple of problem size for matrix multiplication cutlass::gemm::GemmCoord problem_size(length_m, length_n, length_k); // Initialize tensors using CUTLASS helper functions cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a( cutlass::make_Coord(problem_size.m(), problem_size.k() / kSparse)); // <- Create matrix A with dimensions M x (K / 2) cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a_uncompressed( problem_size.mk()); // <- Create uncompressed matrix A with dimensions M x K for reference computing cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b( problem_size.kn()); // <- Create matrix B with dimensions K x N cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c( problem_size.mn()); // <- Create matrix C with dimensions M x N cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d( problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from // CUTLASS kernel cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d( problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from // reference kernel // Create matrix E with dimensions M x (K / 2 / kElementsPerElementE). This one is used by reference computing. cutlass::HostTensor<ElementInputE, LayoutInputE> tensor_e( cutlass::make_Coord(problem_size.m(), problem_size.k() / kSparse / kElementsPerElementE)); // Same size as the above. The above one needs to be reordered and stored in this one. cutlass::HostTensor<ElementInputE, ReorderedLayoutInputE> tensor_e_reordered( cutlass::make_Coord(problem_size.m(), problem_size.k() / kSparse / kElementsPerElementE)); // Fill input and output matrices on host using CUTLASS helper functions cutlass::reference::host::TensorFillRandomUniform( tensor_a.host_view(), 1, ElementInputA(2), ElementInputA(-2), 0); // <- Fill matrix A on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_b.host_view(), 1, ElementInputB(2), ElementInputB(-2), 0); // <- Fill matrix B on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_c.host_view(), 1, ElementOutput(2), ElementOutput(-2), 0); // <- Fill matrix C on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomSparseMeta( tensor_e.host_view(), 1, kMetaSizeInBits); // <- Fill matrix E on host with uniform-distribution random meta data cutlass::reference::host::TensorFill( tensor_d.host_view()); // <- fill matrix D on host with zeros cutlass::reference::host::TensorFill( tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros // Reorder the meta data matrix so that we can use ldmatrix to load them to tensor core // instructions. cutlass::reorder_meta(tensor_e_reordered.host_ref(), tensor_e.host_ref(), {problem_size.m(), problem_size.n(), problem_size.k() / kSparse / kElementsPerElementE}); // Copy data from host to GPU tensor_a.sync_device(); tensor_b.sync_device(); tensor_c.sync_device(); tensor_d.sync_device(); tensor_e_reordered.sync_device(); tensor_ref_d.sync_device(); // Initialize alpha and beta for dot product computation ElementComputeEpilogue alpha = ElementComputeEpilogue(1); ElementComputeEpilogue beta = ElementComputeEpilogue(0); // Split K dimension into 1 partitions int split_k_slices = 1; // Create a tuple of gemm kernel arguments. This is later passed as arguments to launch // instantiated CUTLASS kernel typename Gemm::Arguments arguments{problem_size, // <- problem size of matrix multiplication tensor_a.device_ref(), // <- reference to matrix A on device tensor_b.device_ref(), // <- reference to matrix B on device tensor_c.device_ref(), // <- reference to matrix C on device tensor_d.device_ref(), // <- reference to matrix D on device tensor_e_reordered.device_ref(), // <- reference to matrix E on device {alpha, beta}, // <- tuple of alpha and beta split_k_slices}; // <- k-dimension split factor // Using the arguments, query for extra workspace required for matrix multiplication computation size_t workspace_size = Gemm::get_workspace_size(arguments); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); // Instantiate CUTLASS kernel depending on templates Gemm gemm_op; // Check the problem size is supported or not cutlass::Status status = gemm_op.can_implement(arguments); CUTLASS_CHECK(status); // Initialize CUTLASS kernel with arguments and workspace pointer status = gemm_op.initialize(arguments, workspace.get()); CUTLASS_CHECK(status); // Launch initialized CUTLASS kernel status = gemm_op(); CUTLASS_CHECK(status); // uncompress tensor_a based on meta data tensor_e. We need it for reference computing. cutlass::uncompress(tensor_a_uncompressed.host_ref(), tensor_a.host_ref(), tensor_e.host_ref(), problem_size.m(), problem_size.k()); // Create instantiation for host reference gemm kernel cutlass::reference::host::Gemm<ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementComputeEpilogue, ElementComputeEpilogue, typename Gemm::Operator> gemm_host; // Launch host reference gemm kernel gemm_host(problem_size, alpha, tensor_a_uncompressed.host_ref(), tensor_b.host_ref(), beta, tensor_c.host_ref(), tensor_ref_d.host_ref()); // Copy output data from CUTLASS host for comparison tensor_d.sync_host(); // Check if output from CUTLASS kernel and reference kernel are equal or not bool passed = cutlass::reference::host::TensorEquals( tensor_d.host_view(), tensor_ref_d.host_view()); std::cout << (passed ? "Passed" : "Failed") << std::endl; return (passed ? 0 : -1); } int main() { bool notSupported = false; // Ampere Sparse Tensor Core operations exposed with mma.sync and ldmatrix are first available // in CUDA 11.1. // // CUTLASS must be compiled with CUDA 11.1 Toolkit to run these examples. if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 1))) { std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.1 Toolkit or later." << std::endl; notSupported = true; } hipDeviceProp_t props; hipError_t error = hipGetDeviceProperties(&props, 0); if (error != hipSuccess) { std::cerr << "hipGetDeviceProperties() returned an error: " << hipGetErrorString(error) << std::endl; return -1; } if (!((props.major * 10 + props.minor) >= 80)) { std::cerr << "Ampere Tensor Core operations must be run on a machine with compute capability at least 80." << std::endl; notSupported = true; } if (notSupported) { // Returning zero so this test passes on older Toolkits. Its actions are no-op. return 0; } return run(); }
ampere_sparse_tensorop_gemm.cu
/*************************************************************************************************** * Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are permitted * provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used * to endorse or promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** Please check example 07, 08 and 17 for the basics of dense tensor op gemm kernels. NVIDIA Ampere architecture also supports structured sparse tensor op for tf32, fp16, int8 and int4. Sparse GEMM kernels needs to takes an additional E matrix which stores the meta data. The format of meta data is different for every data types. CUTLASS templates can automatically infer it based on input A and B. Check code below. Moreover, matrix E needs to be preprocessed so that it can use ldmatrix to load into the registers efficiently. */ #include <iostream> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm_sparse.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/host/gemm.h" #include "cutlass/util/host_reorder.h" #include "cutlass/util/host_uncompress.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/tensor_view_io.h" #include "helper.h" // The code section below describes datatype for input, output matrices and computation between // elements in input matrices. using ElementAccumulator = int32_t; // <- data type of accumulator using ElementComputeEpilogue = ElementAccumulator; // <- data type of epilogue operations using ElementInputA = cutlass::int4b_t; // <- data type of elements in input matrix A using ElementInputB = cutlass::int4b_t; // <- data type of elements in input matrix B using ElementOutput = int32_t; // <- data type of elements in output matrix D // The code section below describes matrix layout of input and output matrices. Row Major for // Matrix A, Column Major for Matrix B and Row Major for Matrix C using LayoutInputA = cutlass::layout::RowMajor; using LayoutInputB = cutlass::layout::ColumnMajor; using LayoutOutput = cutlass::layout::RowMajor; // This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM using MMAOp = cutlass::arch::OpClassTensorOp; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm80; // This code section describes the tile size a thread block will compute using ShapeMMAThreadBlock = cutlass::gemm::GemmShape<128, 128, 256>; // <- threadblock tile M = 128, N = 128, K = 256 // This code section describes tile size a warp will compute using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 64, 256>; // <- warp tile M = 64, N = 64, K = 256 // This code section describes the size of MMA op using ShapeMMAOp = cutlass::gemm::GemmShape<16, 8, 128>; // <- MMA Op tile M = 16, N = 8, K = 128 // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // <- ?? // This code section describes the epilogue part of the kernel using EpilogueOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, // <- data type of output matrix 128 / cutlass::sizeof_bits<ElementOutput>::value, // <- the number of elements per vectorized // memory access. For a byte, it's 16 // elements. This becomes the vector width of // math instructions in the epilogue too ElementAccumulator, // <- data type of accumulator ElementComputeEpilogue>; // <- data type for alpha/beta in linear combination function // Number of pipelines you want to use constexpr int NumStages = 3; using Gemm = cutlass::gemm::device::SparseGemm<ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementAccumulator, MMAOp, SmArch, ShapeMMAThreadBlock, ShapeMMAWarp, ShapeMMAOp, EpilogueOp, SwizzleThreadBlock, NumStages>; // Data type and layout of meta data matrix E can be inferred from template Gemm. using ElementInputE = typename Gemm::ElementE; using LayoutInputE = cutlass::layout::RowMajor; using ReorderedLayoutInputE = typename Gemm::LayoutE; // Blow property is defined in include/cutlass/arch/sp_mma_sm80.h // 50% Sparsity on Ampere constexpr int kSparse = Gemm::kSparse; // How many elements of A are covered per ElementE constexpr int kElementsPerElementE = Gemm::kElementsPerElementE; // The size of individual meta data constexpr int kMetaSizeInBits = Gemm::kMetaSizeInBits; int run() { const int length_m = 512; const int length_n = 512; const int length_k = 1024; // Create a tuple of problem size for matrix multiplication cutlass::gemm::GemmCoord problem_size(length_m, length_n, length_k); // Initialize tensors using CUTLASS helper functions cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a( cutlass::make_Coord(problem_size.m(), problem_size.k() / kSparse)); // <- Create matrix A with dimensions M x (K / 2) cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a_uncompressed( problem_size.mk()); // <- Create uncompressed matrix A with dimensions M x K for reference computing cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b( problem_size.kn()); // <- Create matrix B with dimensions K x N cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c( problem_size.mn()); // <- Create matrix C with dimensions M x N cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d( problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from // CUTLASS kernel cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d( problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from // reference kernel // Create matrix E with dimensions M x (K / 2 / kElementsPerElementE). This one is used by reference computing. cutlass::HostTensor<ElementInputE, LayoutInputE> tensor_e( cutlass::make_Coord(problem_size.m(), problem_size.k() / kSparse / kElementsPerElementE)); // Same size as the above. The above one needs to be reordered and stored in this one. cutlass::HostTensor<ElementInputE, ReorderedLayoutInputE> tensor_e_reordered( cutlass::make_Coord(problem_size.m(), problem_size.k() / kSparse / kElementsPerElementE)); // Fill input and output matrices on host using CUTLASS helper functions cutlass::reference::host::TensorFillRandomUniform( tensor_a.host_view(), 1, ElementInputA(2), ElementInputA(-2), 0); // <- Fill matrix A on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_b.host_view(), 1, ElementInputB(2), ElementInputB(-2), 0); // <- Fill matrix B on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_c.host_view(), 1, ElementOutput(2), ElementOutput(-2), 0); // <- Fill matrix C on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomSparseMeta( tensor_e.host_view(), 1, kMetaSizeInBits); // <- Fill matrix E on host with uniform-distribution random meta data cutlass::reference::host::TensorFill( tensor_d.host_view()); // <- fill matrix D on host with zeros cutlass::reference::host::TensorFill( tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros // Reorder the meta data matrix so that we can use ldmatrix to load them to tensor core // instructions. cutlass::reorder_meta(tensor_e_reordered.host_ref(), tensor_e.host_ref(), {problem_size.m(), problem_size.n(), problem_size.k() / kSparse / kElementsPerElementE}); // Copy data from host to GPU tensor_a.sync_device(); tensor_b.sync_device(); tensor_c.sync_device(); tensor_d.sync_device(); tensor_e_reordered.sync_device(); tensor_ref_d.sync_device(); // Initialize alpha and beta for dot product computation ElementComputeEpilogue alpha = ElementComputeEpilogue(1); ElementComputeEpilogue beta = ElementComputeEpilogue(0); // Split K dimension into 1 partitions int split_k_slices = 1; // Create a tuple of gemm kernel arguments. This is later passed as arguments to launch // instantiated CUTLASS kernel typename Gemm::Arguments arguments{problem_size, // <- problem size of matrix multiplication tensor_a.device_ref(), // <- reference to matrix A on device tensor_b.device_ref(), // <- reference to matrix B on device tensor_c.device_ref(), // <- reference to matrix C on device tensor_d.device_ref(), // <- reference to matrix D on device tensor_e_reordered.device_ref(), // <- reference to matrix E on device {alpha, beta}, // <- tuple of alpha and beta split_k_slices}; // <- k-dimension split factor // Using the arguments, query for extra workspace required for matrix multiplication computation size_t workspace_size = Gemm::get_workspace_size(arguments); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); // Instantiate CUTLASS kernel depending on templates Gemm gemm_op; // Check the problem size is supported or not cutlass::Status status = gemm_op.can_implement(arguments); CUTLASS_CHECK(status); // Initialize CUTLASS kernel with arguments and workspace pointer status = gemm_op.initialize(arguments, workspace.get()); CUTLASS_CHECK(status); // Launch initialized CUTLASS kernel status = gemm_op(); CUTLASS_CHECK(status); // uncompress tensor_a based on meta data tensor_e. We need it for reference computing. cutlass::uncompress(tensor_a_uncompressed.host_ref(), tensor_a.host_ref(), tensor_e.host_ref(), problem_size.m(), problem_size.k()); // Create instantiation for host reference gemm kernel cutlass::reference::host::Gemm<ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementComputeEpilogue, ElementComputeEpilogue, typename Gemm::Operator> gemm_host; // Launch host reference gemm kernel gemm_host(problem_size, alpha, tensor_a_uncompressed.host_ref(), tensor_b.host_ref(), beta, tensor_c.host_ref(), tensor_ref_d.host_ref()); // Copy output data from CUTLASS host for comparison tensor_d.sync_host(); // Check if output from CUTLASS kernel and reference kernel are equal or not bool passed = cutlass::reference::host::TensorEquals( tensor_d.host_view(), tensor_ref_d.host_view()); std::cout << (passed ? "Passed" : "Failed") << std::endl; return (passed ? 0 : -1); } int main() { bool notSupported = false; // Ampere Sparse Tensor Core operations exposed with mma.sync and ldmatrix are first available // in CUDA 11.1. // // CUTLASS must be compiled with CUDA 11.1 Toolkit to run these examples. if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 1))) { std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.1 Toolkit or later." << std::endl; notSupported = true; } cudaDeviceProp props; cudaError_t error = cudaGetDeviceProperties(&props, 0); if (error != cudaSuccess) { std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl; return -1; } if (!((props.major * 10 + props.minor) >= 80)) { std::cerr << "Ampere Tensor Core operations must be run on a machine with compute capability at least 80." << std::endl; notSupported = true; } if (notSupported) { // Returning zero so this test passes on older Toolkits. Its actions are no-op. return 0; } return run(); }
c1ec00ab6aa17e3cce2d5656b72b75b87328c82b.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hipfft.h> #include <stdio.h> #include "common.h" #include "cuda_matrix_utility.cuh" __global__ void cuda2DConvolveKernal(float* f, float* result, float* input, int input_x, int input_y, int f_x, int f_y){ uint thread_id = blockIdx.x * blockDim.x + threadIdx.x; int Px = (f_x - 1)/2; int Py = (f_y - 1)/2; extern __shared__ float shared_f[]; if(thread_id < f_x * f_y){ shared_f[thread_id] = f[thread_id]; } while(thread_id < input_x * input_y){ // TODO load f into shared memory? int x = thread_id % input_x; int y = thread_id / input_x; result[thread_id] = 0; for(int n = 0; n < f_x * f_y; n++){ int t1 = n % f_x; int t2 = n / f_x; int x_index = x - t1 + Px; int y_index = y - t2 + Py; float x_element = (x_index >= 0 && y_index >= 0) ? input[x_index + y_index * input_x] : 0; result[thread_id] += f[n] * x_element; } thread_id += blockDim.x * gridDim.x; } } __global__ void cudaMMSEEstKernal(float* dev_data, int size){ uint thread_id = blockIdx.x * blockDim.x + threadIdx.x; while(thread_id < size){ dev_data[thread_id] = 255/ (1 + expf(-0.04*(dev_data[thread_id] - 255/2))); thread_id += blockDim.x * gridDim.x; } } __global__ void cudaWeinerRxyKernal(float* x, float* y, float* Rxy, int x_w, int x_h, int k1, int k2){ uint thread_id = blockIdx.x * blockDim.x + threadIdx.x; int size = x_w * x_h; while(thread_id < size){ // TODO load to shared memory ? int w = thread_id % x_w; int h = thread_id % x_h; if ((w < x_w - abs(k1)) && (h < x_h - abs(k2))){ float y_element = ((w - k1) < 0 || (h - k2) < 0) ? 0 : y[(w - k1) + (h - k2) * x_w]; atomicAdd(Rxy, x[h*x_w + w] * y_element/(float)(256*size)); } thread_id += blockDim.x * gridDim.x; } } __global__ void cudaWeinerUpdateKernal(float* f, float* Rxy, float* Ryy, int f_w, int f_h){ uint thread_id = blockIdx.x * blockDim.x + threadIdx.x; int size = f_w * f_h; int P = (f_w - 1)/2; while(thread_id < size){ int r1 = thread_id % f_w; int r2 = thread_id / f_w; for(int n = 0; n < size; n++) { int t1 = n % f_w; int t2 = n / f_w; int x = (P + t1 - r1); int y = (P + t2 - r2); float Ryy_element = (x > 0 && y > 0) ? Ryy[y * f_w + x] : 0.0; f[thread_id] += Rxy[n] * Ryy_element; } thread_id += blockDim.x * gridDim.x; } } __global__ void cudaFIRNormalizeKernal(float* f, int f_size){ __shared__ double temp[1]; unsigned int threadId = threadIdx.x; if(threadId < f_size){ temp[0] += f[threadId]; } __syncthreads(); /* for (unsigned int s = blockDim.x/2 ; s > 0; s >> 1){ if(threadId < s){ temp[threadId] = temp[threadId] + temp[threadId + s]; __syncthreads(); } } */ if (threadId < f_size){ f[threadId] = f[threadId] / temp[0]; } } void call2DConvolveKernal(float* f, float* result, float* input, int input_x, int input_y, int f_x, int f_y){ // max threads per block is 1024, nblocks = 512 int shmem = f_x * f_y * sizeof(float); int input_size = input_x * input_y; int block_size = input_size < 1024 ? input_size : 1024; int nblocks = input_size / block_size < 512 ? input_size / block_size : 512; hipLaunchKernelGGL(( cuda2DConvolveKernal), dim3(nblocks), dim3(block_size), shmem, 0, f, result, input, input_x, input_y, f_x, f_y); } void callMMSEEstKernal(float* data, int size){ int block_size = (size < 1024) ? size : 1024; int nblocks = size/block_size < 512 ? size/block_size : 512; hipLaunchKernelGGL(( cudaMMSEEstKernal), dim3(nblocks), dim3(block_size), 0, 0, data, size); } void callWeinerRxyKernal(float* x, float* y, float* Rxy, int x_w, int x_h, int k1, int k2){ int size = x_w * x_h; int block_size = 32; int nblocks = 32; hipLaunchKernelGGL(( cudaWeinerRxyKernal), dim3(block_size), dim3(nblocks), 0, 0, x, y, Rxy, x_w, x_h, k1, k2); } void callWeinerUpdateKernal(float* f, float* Rxy, float* Ryy, int f_w, int f_h){ int size = f_w * f_h; int block_size = (size < 1024) ? size : 1024; int nblocks = size/block_size; hipLaunchKernelGGL(( cudaWeinerUpdateKernal), dim3(1), dim3(size), 0, 0, f, Rxy, Ryy, f_w, f_h); } void callFIRNormalizeKernal(float* f, int size){ hipLaunchKernelGGL(( cudaFIRNormalizeKernal), dim3(2), dim3(64), 0, 0, f, size); }
c1ec00ab6aa17e3cce2d5656b72b75b87328c82b.cu
#include <cuda_runtime.h> #include <cufft.h> #include <stdio.h> #include "common.h" #include "cuda_matrix_utility.cuh" __global__ void cuda2DConvolveKernal(float* f, float* result, float* input, int input_x, int input_y, int f_x, int f_y){ uint thread_id = blockIdx.x * blockDim.x + threadIdx.x; int Px = (f_x - 1)/2; int Py = (f_y - 1)/2; extern __shared__ float shared_f[]; if(thread_id < f_x * f_y){ shared_f[thread_id] = f[thread_id]; } while(thread_id < input_x * input_y){ // TODO load f into shared memory? int x = thread_id % input_x; int y = thread_id / input_x; result[thread_id] = 0; for(int n = 0; n < f_x * f_y; n++){ int t1 = n % f_x; int t2 = n / f_x; int x_index = x - t1 + Px; int y_index = y - t2 + Py; float x_element = (x_index >= 0 && y_index >= 0) ? input[x_index + y_index * input_x] : 0; result[thread_id] += f[n] * x_element; } thread_id += blockDim.x * gridDim.x; } } __global__ void cudaMMSEEstKernal(float* dev_data, int size){ uint thread_id = blockIdx.x * blockDim.x + threadIdx.x; while(thread_id < size){ dev_data[thread_id] = 255/ (1 + expf(-0.04*(dev_data[thread_id] - 255/2))); thread_id += blockDim.x * gridDim.x; } } __global__ void cudaWeinerRxyKernal(float* x, float* y, float* Rxy, int x_w, int x_h, int k1, int k2){ uint thread_id = blockIdx.x * blockDim.x + threadIdx.x; int size = x_w * x_h; while(thread_id < size){ // TODO load to shared memory ? int w = thread_id % x_w; int h = thread_id % x_h; if ((w < x_w - abs(k1)) && (h < x_h - abs(k2))){ float y_element = ((w - k1) < 0 || (h - k2) < 0) ? 0 : y[(w - k1) + (h - k2) * x_w]; atomicAdd(Rxy, x[h*x_w + w] * y_element/(float)(256*size)); } thread_id += blockDim.x * gridDim.x; } } __global__ void cudaWeinerUpdateKernal(float* f, float* Rxy, float* Ryy, int f_w, int f_h){ uint thread_id = blockIdx.x * blockDim.x + threadIdx.x; int size = f_w * f_h; int P = (f_w - 1)/2; while(thread_id < size){ int r1 = thread_id % f_w; int r2 = thread_id / f_w; for(int n = 0; n < size; n++) { int t1 = n % f_w; int t2 = n / f_w; int x = (P + t1 - r1); int y = (P + t2 - r2); float Ryy_element = (x > 0 && y > 0) ? Ryy[y * f_w + x] : 0.0; f[thread_id] += Rxy[n] * Ryy_element; } thread_id += blockDim.x * gridDim.x; } } __global__ void cudaFIRNormalizeKernal(float* f, int f_size){ __shared__ double temp[1]; unsigned int threadId = threadIdx.x; if(threadId < f_size){ temp[0] += f[threadId]; } __syncthreads(); /* for (unsigned int s = blockDim.x/2 ; s > 0; s >> 1){ if(threadId < s){ temp[threadId] = temp[threadId] + temp[threadId + s]; __syncthreads(); } } */ if (threadId < f_size){ f[threadId] = f[threadId] / temp[0]; } } void call2DConvolveKernal(float* f, float* result, float* input, int input_x, int input_y, int f_x, int f_y){ // max threads per block is 1024, nblocks = 512 int shmem = f_x * f_y * sizeof(float); int input_size = input_x * input_y; int block_size = input_size < 1024 ? input_size : 1024; int nblocks = input_size / block_size < 512 ? input_size / block_size : 512; cuda2DConvolveKernal<<<nblocks, block_size, shmem>>>(f, result, input, input_x, input_y, f_x, f_y); } void callMMSEEstKernal(float* data, int size){ int block_size = (size < 1024) ? size : 1024; int nblocks = size/block_size < 512 ? size/block_size : 512; cudaMMSEEstKernal<<<nblocks, block_size>>>(data, size); } void callWeinerRxyKernal(float* x, float* y, float* Rxy, int x_w, int x_h, int k1, int k2){ int size = x_w * x_h; int block_size = 32; int nblocks = 32; cudaWeinerRxyKernal<<<block_size, nblocks>>>(x, y, Rxy, x_w, x_h, k1, k2); } void callWeinerUpdateKernal(float* f, float* Rxy, float* Ryy, int f_w, int f_h){ int size = f_w * f_h; int block_size = (size < 1024) ? size : 1024; int nblocks = size/block_size; cudaWeinerUpdateKernal<<<1, size>>>(f, Rxy, Ryy, f_w, f_h); } void callFIRNormalizeKernal(float* f, int size){ cudaFIRNormalizeKernal<<<2, 64>>>(f, size); }
881cf0c093435d790251e900415dfa8e226da95a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "file_io.c" #include "utils.c" #include <assert.h> #include <pthread.h> #include <time.h> #define THREADS_X 16 #define THREADS_Y 16 #define BLOCKS_X 16 #define BLOCKS_Y 16 int devices = 0; //structure to send to each pthread struct arg_struct { float *cpu_distance; float *gpu_distance; unsigned int cities; float *return_pointer; int device_index; }; //function to be called by each thread void *tsp(void *arguements); int main(int argc, char * argv[]) { if(argc != 4){ fprintf(stderr, "usage: gpu_tsp cities max_devices city_distance_file\n"); } unsigned int cities = (unsigned int) atoi(argv[1]); unsigned int suggested_devices = (unsigned int) atoi(argv[2]); if(suggested_devices == 0){ suggested_devices = cities/100; } unsigned int distance_array_size = cities*cities*sizeof(float); FILE *fp=fopen(argv[3], "r"); //read the data from the file float *distance = (float *)malloc(distance_array_size); unsigned int *tour = (unsigned int *)malloc((cities+1)*sizeof(unsigned int *)); read_files(fp, distance, tour, cities); fclose(fp); //create thread structures struct arg_struct args[8]; pthread_t threads[8]; //get number of devices available CUDA_CALL(hipGetDeviceCount (&devices)); float *gpu_distance; //if suggested devices is lesser than max devices available, then change it if(suggested_devices < devices){ devices = suggested_devices; } //we are using a max of 8 pthreads, and each pthread is mapped to a device, so code can accomadate a max of 8 devices if(devices > 8){ devices = 8; } struct timespec start, end; double time_usec = 0.0; clock_gettime(CLOCK_MONOTONIC, &start); //run a thread for each device for(int i = 0; i < devices; i++){ CUDA_CALL(hipSetDevice(i)); CUDA_CALL(hipMalloc(&gpu_distance, distance_array_size)); CUDA_CALL(hipMemcpy(gpu_distance, distance, distance_array_size, hipMemcpyHostToDevice)); args[i].cpu_distance = distance; args[i].cities = cities; args[i].gpu_distance= gpu_distance; args[i].return_pointer = (float *)malloc(sizeof(float)); args[i].device_index = i; if (pthread_create(&threads[i], NULL, &tsp, (void *)&args[i]) != 0) { printf("Error in creating threads, exiting program\n"); return -1; } } //wait for threads for(int i = 0; i < devices; i++){ pthread_join(threads[i], NULL); } //get global minimum based on value returned by each thread float min_val = FLT_MAX; for(int i = 0; i < devices; i++){ if(args[i].return_pointer[0] < min_val){ min_val = args[i].return_pointer[0]; } } clock_gettime(CLOCK_MONOTONIC,&end); time_usec = (((double)end.tv_sec*1000 + (double)end.tv_nsec/1000000)-((double)start.tv_sec*1000 + (double)start.tv_nsec/1000000)); printf("Time taken = %lf milliseconds\n",time_usec); printf("Global minimum value is %f\n",min_val); free(distance); free(tour); hipFree(gpu_distance); } //GPU kernel that runs 2-opt algo in parallel __global__ void two_opt(unsigned int *cycle, float *distance, unsigned int cities, float *min_val_array, unsigned int* min_index_array){ //shared array to store most negetive decrease reported by each thread __shared__ float temp_min[THREADS_Y*THREADS_X]; __shared__ float temp_min_index[THREADS_Y*THREADS_X]; float min_val = FLT_MAX; float temp_val; float min_index = -1; for(int i = blockIdx.x*blockDim.x + threadIdx.x+1; i < cities; i = i + blockDim.x*gridDim.x){ for(int j = blockIdx.y*blockDim.y + threadIdx.y+1; j < cities; j = j + blockDim.y*gridDim.y){ temp_val = distance[cycle[i]*cities + cycle[j+1]]+distance[cycle[i-1]*cities + cycle[j]]-distance[cycle[j]*cities + cycle[j+1]]-distance[cycle[i-1]*cities + cycle[i]]; if(temp_val < min_val && i < j){ min_val = temp_val; min_index = i*cities+j; //this is being done to save space, both i and j values can be stored and retrieved this way } } } //total threads in each block = blockDim.x*blockDim.y //id of thread in block = threadIdx.x*blockDim.x + threadIdx.y int tid = threadIdx.x*blockDim.x + threadIdx.y; int bid = blockIdx.x*gridDim.x + blockIdx.y; temp_min[tid] = min_val; temp_min_index[tid] = min_index; //now reduce the min array so that the index = 0 in array has the min value and index for(unsigned int stride = 1; stride < blockDim.x*blockDim.y; stride*=2){ __syncthreads(); if(tid %(2*stride) == 0){ if(temp_min[tid] > temp_min[tid+stride]){ temp_min[tid] = temp_min[tid+stride]; temp_min_index[tid] = temp_min_index[tid+stride]; } } } //save the min value for the block into global index for the block if(tid == 0){ min_index_array[bid] = temp_min_index[0]; min_val_array[bid] = temp_min[0]; } } void *tsp(void *arguments){ struct arg_struct *args = (struct arg_struct *)arguments; float *gpu_distance = args -> gpu_distance; float *cpu_distance = args -> cpu_distance; unsigned int cities = args -> cities; float *return_pointer = args -> return_pointer; int device_index = args -> device_index; //set the device to run on based on what was sent by master thread CUDA_CALL(hipSetDevice(device_index)); dim3 gridDim(BLOCKS_X, BLOCKS_Y); dim3 blockDim(THREADS_X, THREADS_Y); int min_index; //stores the min value reported by each block float *cpu_min_val = (float *)malloc(BLOCKS_X*BLOCKS_Y*sizeof(float)); float *gpu_min_val; CUDA_CALL(hipMalloc(&gpu_min_val, BLOCKS_X*BLOCKS_Y*sizeof(float))); //stored the i and j values(as i*cities + j) of the swap that will result in the min value unsigned int *cpu_min_index = (unsigned int *)malloc(BLOCKS_X*BLOCKS_Y*sizeof(unsigned int)); unsigned int *gpu_min_index; CUDA_CALL(hipMalloc(&gpu_min_index, BLOCKS_X*BLOCKS_Y*sizeof(unsigned int))); //stores current cycle unsigned int cycle_size = (cities+1)*sizeof(unsigned int); unsigned int *cpu_cycle = (unsigned int *)malloc(cycle_size); unsigned int *global_optimal_cycle = (unsigned int *)malloc(cycle_size); unsigned int *gpu_cycle; CUDA_CALL(hipMalloc(&gpu_cycle, cycle_size)); //run 8 streams const int num_streams = 8; hipStream_t streams[num_streams]; hipStream_t current_stream; int stream_index = 0; for (int i = 0; i < num_streams; i++) { hipStreamCreate(&streams[i]); } float global_minima = FLT_MAX; for(int i = device_index; i < cities; i = i + devices){ //allocate initial cities cycle allocate_cycle(cpu_cycle, i, cities); current_stream = streams[stream_index%num_streams]; while(true){ //get current cost float temp_cost = get_total_cost(cpu_cycle, cpu_distance, cities); //move current cycle to gpu and find best swap CUDA_CALL(hipMemcpyAsync(gpu_cycle, cpu_cycle, cycle_size, hipMemcpyHostToDevice, current_stream)); hipLaunchKernelGGL(( two_opt), dim3(gridDim), dim3(blockDim), 0, current_stream, gpu_cycle, gpu_distance, cities, gpu_min_val, gpu_min_index); //move best reported swap and most decrease of that swap to CPU CUDA_CALL(hipMemcpyAsync(cpu_min_val, gpu_min_val, BLOCKS_X*BLOCKS_Y*sizeof(float), hipMemcpyDeviceToHost,current_stream)); CUDA_CALL(hipMemcpyAsync(cpu_min_index, gpu_min_index, BLOCKS_X*BLOCKS_Y*sizeof(int), hipMemcpyDeviceToHost,current_stream)); hipStreamSynchronize(current_stream); //using this, calculated best values across blocks min_index = get_min_val(cpu_min_val,BLOCKS_X*BLOCKS_Y); //if the benefit of best swap is less than an increase of 0.1, we are close to minima, can exit if(cpu_min_val[min_index] >= -.1){ if(global_minima > temp_cost){ global_minima = temp_cost; memcpy(global_optimal_cycle, cpu_cycle, cycle_size); } break; } else{ //otherwise find best indices for the swap and update the cycle of cities int min_agg_index = cpu_min_index[min_index]; update_cycle(cpu_cycle, min_agg_index/cities, min_agg_index%cities); } } stream_index++; } return_pointer[0] = global_minima; hipFree(gpu_min_val); hipFree(gpu_min_index); hipFree(gpu_cycle); free(cpu_cycle); free(cpu_min_val); free(cpu_min_index); return NULL; }
881cf0c093435d790251e900415dfa8e226da95a.cu
#include "file_io.c" #include "utils.c" #include <assert.h> #include <pthread.h> #include <time.h> #define THREADS_X 16 #define THREADS_Y 16 #define BLOCKS_X 16 #define BLOCKS_Y 16 int devices = 0; //structure to send to each pthread struct arg_struct { float *cpu_distance; float *gpu_distance; unsigned int cities; float *return_pointer; int device_index; }; //function to be called by each thread void *tsp(void *arguements); int main(int argc, char * argv[]) { if(argc != 4){ fprintf(stderr, "usage: gpu_tsp cities max_devices city_distance_file\n"); } unsigned int cities = (unsigned int) atoi(argv[1]); unsigned int suggested_devices = (unsigned int) atoi(argv[2]); if(suggested_devices == 0){ suggested_devices = cities/100; } unsigned int distance_array_size = cities*cities*sizeof(float); FILE *fp=fopen(argv[3], "r"); //read the data from the file float *distance = (float *)malloc(distance_array_size); unsigned int *tour = (unsigned int *)malloc((cities+1)*sizeof(unsigned int *)); read_files(fp, distance, tour, cities); fclose(fp); //create thread structures struct arg_struct args[8]; pthread_t threads[8]; //get number of devices available CUDA_CALL(cudaGetDeviceCount (&devices)); float *gpu_distance; //if suggested devices is lesser than max devices available, then change it if(suggested_devices < devices){ devices = suggested_devices; } //we are using a max of 8 pthreads, and each pthread is mapped to a device, so code can accomadate a max of 8 devices if(devices > 8){ devices = 8; } struct timespec start, end; double time_usec = 0.0; clock_gettime(CLOCK_MONOTONIC, &start); //run a thread for each device for(int i = 0; i < devices; i++){ CUDA_CALL(cudaSetDevice(i)); CUDA_CALL(cudaMalloc(&gpu_distance, distance_array_size)); CUDA_CALL(cudaMemcpy(gpu_distance, distance, distance_array_size, cudaMemcpyHostToDevice)); args[i].cpu_distance = distance; args[i].cities = cities; args[i].gpu_distance= gpu_distance; args[i].return_pointer = (float *)malloc(sizeof(float)); args[i].device_index = i; if (pthread_create(&threads[i], NULL, &tsp, (void *)&args[i]) != 0) { printf("Error in creating threads, exiting program\n"); return -1; } } //wait for threads for(int i = 0; i < devices; i++){ pthread_join(threads[i], NULL); } //get global minimum based on value returned by each thread float min_val = FLT_MAX; for(int i = 0; i < devices; i++){ if(args[i].return_pointer[0] < min_val){ min_val = args[i].return_pointer[0]; } } clock_gettime(CLOCK_MONOTONIC,&end); time_usec = (((double)end.tv_sec*1000 + (double)end.tv_nsec/1000000)-((double)start.tv_sec*1000 + (double)start.tv_nsec/1000000)); printf("Time taken = %lf milliseconds\n",time_usec); printf("Global minimum value is %f\n",min_val); free(distance); free(tour); cudaFree(gpu_distance); } //GPU kernel that runs 2-opt algo in parallel __global__ void two_opt(unsigned int *cycle, float *distance, unsigned int cities, float *min_val_array, unsigned int* min_index_array){ //shared array to store most negetive decrease reported by each thread __shared__ float temp_min[THREADS_Y*THREADS_X]; __shared__ float temp_min_index[THREADS_Y*THREADS_X]; float min_val = FLT_MAX; float temp_val; float min_index = -1; for(int i = blockIdx.x*blockDim.x + threadIdx.x+1; i < cities; i = i + blockDim.x*gridDim.x){ for(int j = blockIdx.y*blockDim.y + threadIdx.y+1; j < cities; j = j + blockDim.y*gridDim.y){ temp_val = distance[cycle[i]*cities + cycle[j+1]]+distance[cycle[i-1]*cities + cycle[j]]-distance[cycle[j]*cities + cycle[j+1]]-distance[cycle[i-1]*cities + cycle[i]]; if(temp_val < min_val && i < j){ min_val = temp_val; min_index = i*cities+j; //this is being done to save space, both i and j values can be stored and retrieved this way } } } //total threads in each block = blockDim.x*blockDim.y //id of thread in block = threadIdx.x*blockDim.x + threadIdx.y int tid = threadIdx.x*blockDim.x + threadIdx.y; int bid = blockIdx.x*gridDim.x + blockIdx.y; temp_min[tid] = min_val; temp_min_index[tid] = min_index; //now reduce the min array so that the index = 0 in array has the min value and index for(unsigned int stride = 1; stride < blockDim.x*blockDim.y; stride*=2){ __syncthreads(); if(tid %(2*stride) == 0){ if(temp_min[tid] > temp_min[tid+stride]){ temp_min[tid] = temp_min[tid+stride]; temp_min_index[tid] = temp_min_index[tid+stride]; } } } //save the min value for the block into global index for the block if(tid == 0){ min_index_array[bid] = temp_min_index[0]; min_val_array[bid] = temp_min[0]; } } void *tsp(void *arguments){ struct arg_struct *args = (struct arg_struct *)arguments; float *gpu_distance = args -> gpu_distance; float *cpu_distance = args -> cpu_distance; unsigned int cities = args -> cities; float *return_pointer = args -> return_pointer; int device_index = args -> device_index; //set the device to run on based on what was sent by master thread CUDA_CALL(cudaSetDevice(device_index)); dim3 gridDim(BLOCKS_X, BLOCKS_Y); dim3 blockDim(THREADS_X, THREADS_Y); int min_index; //stores the min value reported by each block float *cpu_min_val = (float *)malloc(BLOCKS_X*BLOCKS_Y*sizeof(float)); float *gpu_min_val; CUDA_CALL(cudaMalloc(&gpu_min_val, BLOCKS_X*BLOCKS_Y*sizeof(float))); //stored the i and j values(as i*cities + j) of the swap that will result in the min value unsigned int *cpu_min_index = (unsigned int *)malloc(BLOCKS_X*BLOCKS_Y*sizeof(unsigned int)); unsigned int *gpu_min_index; CUDA_CALL(cudaMalloc(&gpu_min_index, BLOCKS_X*BLOCKS_Y*sizeof(unsigned int))); //stores current cycle unsigned int cycle_size = (cities+1)*sizeof(unsigned int); unsigned int *cpu_cycle = (unsigned int *)malloc(cycle_size); unsigned int *global_optimal_cycle = (unsigned int *)malloc(cycle_size); unsigned int *gpu_cycle; CUDA_CALL(cudaMalloc(&gpu_cycle, cycle_size)); //run 8 streams const int num_streams = 8; cudaStream_t streams[num_streams]; cudaStream_t current_stream; int stream_index = 0; for (int i = 0; i < num_streams; i++) { cudaStreamCreate(&streams[i]); } float global_minima = FLT_MAX; for(int i = device_index; i < cities; i = i + devices){ //allocate initial cities cycle allocate_cycle(cpu_cycle, i, cities); current_stream = streams[stream_index%num_streams]; while(true){ //get current cost float temp_cost = get_total_cost(cpu_cycle, cpu_distance, cities); //move current cycle to gpu and find best swap CUDA_CALL(cudaMemcpyAsync(gpu_cycle, cpu_cycle, cycle_size, cudaMemcpyHostToDevice, current_stream)); two_opt<<<gridDim, blockDim, 0, current_stream>>>(gpu_cycle, gpu_distance, cities, gpu_min_val, gpu_min_index); //move best reported swap and most decrease of that swap to CPU CUDA_CALL(cudaMemcpyAsync(cpu_min_val, gpu_min_val, BLOCKS_X*BLOCKS_Y*sizeof(float), cudaMemcpyDeviceToHost,current_stream)); CUDA_CALL(cudaMemcpyAsync(cpu_min_index, gpu_min_index, BLOCKS_X*BLOCKS_Y*sizeof(int), cudaMemcpyDeviceToHost,current_stream)); cudaStreamSynchronize(current_stream); //using this, calculated best values across blocks min_index = get_min_val(cpu_min_val,BLOCKS_X*BLOCKS_Y); //if the benefit of best swap is less than an increase of 0.1, we are close to minima, can exit if(cpu_min_val[min_index] >= -.1){ if(global_minima > temp_cost){ global_minima = temp_cost; memcpy(global_optimal_cycle, cpu_cycle, cycle_size); } break; } else{ //otherwise find best indices for the swap and update the cycle of cities int min_agg_index = cpu_min_index[min_index]; update_cycle(cpu_cycle, min_agg_index/cities, min_agg_index%cities); } } stream_index++; } return_pointer[0] = global_minima; cudaFree(gpu_min_val); cudaFree(gpu_min_index); cudaFree(gpu_cycle); free(cpu_cycle); free(cpu_min_val); free(cpu_min_index); return NULL; }
127167208f32a00726cb4d4a06516fc5540cec3e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernels_hip.cuh" #define THREADS_PER_BLOCK 256 #define BLOCK_COUNT(SIZE) ((SIZE) / THREADS_PER_BLOCK + (((SIZE) % THREADS_PER_BLOCK) ? 1 : 0)) __device__ void d_dot(const float* v1, const float* v2, float* out, int size) { __shared__ float cache[THREADS_PER_BLOCK]; int i = blockIdx.x * blockDim.x + threadIdx.x; cache[threadIdx.x] = 0.f; while (i < size) { cache[threadIdx.x] += v1[i] * v2[i]; i += gridDim.x * blockDim.x; } __syncthreads(); i = THREADS_PER_BLOCK / 2; while (i > 0) { if (threadIdx.x < i) cache[threadIdx.x] += cache[threadIdx.x + i]; __syncthreads(); i /= 2; } if (threadIdx.x == 0) atomicAdd(out, cache[0]); } __global__ void k_dot(const float* V1, const float* V2, float* V3, int size) { d_dot(V1, V2, V3, size); } __global__ void k_update(float learn_rate, float* expected, float* data, float* bias, float* weights, int size, float* result) { d_dot(data, weights, result, size); if (blockIdx.x == 0 && threadIdx.x == 0) { *result = (*bias + *result) > 0 ? 1.f : -1.f; *result = learn_rate * (*expected - *result); } } /// Compute dot product of two vectors float* dot(float* a, float* b, int size) { float* c; gpuErrchk(hipMalloc(&c, 1 * sizeof(float))); int bc = BLOCK_COUNT(size); k_dot << <bc, THREADS_PER_BLOCK >> > (a, b, c, size); return c; } /// Compute updpate value for training float* update(float learn_rate, float* expected, float* data, float* bias, float* weights, int size) { float* result; gpuErrchk(hipMalloc(&result, 1 * sizeof(float))); int bc = BLOCK_COUNT(size); k_update << <bc, THREADS_PER_BLOCK >> > (learn_rate, expected, data, bias, weights, size, result); return result; } __global__ void k_scale(float* scaler, float* vector, float* result, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; float s = *scaler; if (index < size) { result[index] += vector[index] * s; } } /// Scale vector with a given scaler and save to result void scale(float* scaler, float* vector, float* result, int size) { k_scale << <1, size >> > (scaler, vector, result, size); } __global__ void k_classify(float* data, float* weights, float* bias, float* result, int length, int size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < length) { float* current = data + index * size; float prob = *bias; for (int i = 0; i < size; i++) { prob += current[i] * weights[i]; } result[index] = prob > 0 ? 1.f : -1.f; } } /// Classify input data with given weights and bias float* classify(float* data, float* weights, float* bias, int length, int size) { float* result; gpuErrchk(hipMalloc(&result, length * sizeof(float))); int bc = BLOCK_COUNT(length); k_classify << < bc, THREADS_PER_BLOCK >> > (data, weights, bias, result, length, size); return result; }
127167208f32a00726cb4d4a06516fc5540cec3e.cu
#include "kernels.cuh" #define THREADS_PER_BLOCK 256 #define BLOCK_COUNT(SIZE) ((SIZE) / THREADS_PER_BLOCK + (((SIZE) % THREADS_PER_BLOCK) ? 1 : 0)) __device__ void d_dot(const float* v1, const float* v2, float* out, int size) { __shared__ float cache[THREADS_PER_BLOCK]; int i = blockIdx.x * blockDim.x + threadIdx.x; cache[threadIdx.x] = 0.f; while (i < size) { cache[threadIdx.x] += v1[i] * v2[i]; i += gridDim.x * blockDim.x; } __syncthreads(); i = THREADS_PER_BLOCK / 2; while (i > 0) { if (threadIdx.x < i) cache[threadIdx.x] += cache[threadIdx.x + i]; __syncthreads(); i /= 2; } if (threadIdx.x == 0) atomicAdd(out, cache[0]); } __global__ void k_dot(const float* V1, const float* V2, float* V3, int size) { d_dot(V1, V2, V3, size); } __global__ void k_update(float learn_rate, float* expected, float* data, float* bias, float* weights, int size, float* result) { d_dot(data, weights, result, size); if (blockIdx.x == 0 && threadIdx.x == 0) { *result = (*bias + *result) > 0 ? 1.f : -1.f; *result = learn_rate * (*expected - *result); } } /// Compute dot product of two vectors float* dot(float* a, float* b, int size) { float* c; gpuErrchk(cudaMalloc(&c, 1 * sizeof(float))); int bc = BLOCK_COUNT(size); k_dot << <bc, THREADS_PER_BLOCK >> > (a, b, c, size); return c; } /// Compute updpate value for training float* update(float learn_rate, float* expected, float* data, float* bias, float* weights, int size) { float* result; gpuErrchk(cudaMalloc(&result, 1 * sizeof(float))); int bc = BLOCK_COUNT(size); k_update << <bc, THREADS_PER_BLOCK >> > (learn_rate, expected, data, bias, weights, size, result); return result; } __global__ void k_scale(float* scaler, float* vector, float* result, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; float s = *scaler; if (index < size) { result[index] += vector[index] * s; } } /// Scale vector with a given scaler and save to result void scale(float* scaler, float* vector, float* result, int size) { k_scale << <1, size >> > (scaler, vector, result, size); } __global__ void k_classify(float* data, float* weights, float* bias, float* result, int length, int size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < length) { float* current = data + index * size; float prob = *bias; for (int i = 0; i < size; i++) { prob += current[i] * weights[i]; } result[index] = prob > 0 ? 1.f : -1.f; } } /// Classify input data with given weights and bias float* classify(float* data, float* weights, float* bias, int length, int size) { float* result; gpuErrchk(cudaMalloc(&result, length * sizeof(float))); int bc = BLOCK_COUNT(length); k_classify << < bc, THREADS_PER_BLOCK >> > (data, weights, bias, result, length, size); return result; }
1880167d60b5f85de0bc70148ba246b32032090e.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright 2019-2021 by XGBoost Contributors * * \file data.cu * \brief Handles setting metainfo from array interface. */ #include "xgboost/data.h" #include "xgboost/logging.h" #include "xgboost/json.h" #include "array_interface.h" #include "../common/device_helpers.cuh" #include "device_adapter_hip.cuh" #include "simple_dmatrix.h" namespace xgboost { void CopyInfoImpl(ArrayInterface column, HostDeviceVector<float>* out) { auto SetDeviceToPtr = [](void* ptr) { hipPointerAttribute_t attr; dh::safe_cuda(hipPointerGetAttributes(&attr, ptr)); int32_t ptr_device = attr.device; dh::safe_cuda(hipSetDevice(ptr_device)); return ptr_device; }; auto ptr_device = SetDeviceToPtr(column.data); out->SetDevice(ptr_device); out->Resize(column.num_rows); auto p_dst = thrust::device_pointer_cast(out->DevicePointer()); dh::LaunchN(column.num_rows, [=] __device__(size_t idx) { p_dst[idx] = column.GetElement(idx, 0); }); } namespace { auto SetDeviceToPtr(void *ptr) { hipPointerAttribute_t attr; dh::safe_cuda(hipPointerGetAttributes(&attr, ptr)); int32_t ptr_device = attr.device; dh::safe_cuda(hipSetDevice(ptr_device)); return ptr_device; } } // anonymous namespace void CopyGroupInfoImpl(ArrayInterface column, std::vector<bst_group_t>* out) { CHECK(column.type != ArrayInterface::kF4 && column.type != ArrayInterface::kF8) << "Expected integer for group info."; auto ptr_device = SetDeviceToPtr(column.data); CHECK_EQ(ptr_device, dh::CurrentDevice()); dh::TemporaryArray<bst_group_t> temp(column.num_rows); auto d_tmp = temp.data(); dh::LaunchN(column.num_rows, [=] __device__(size_t idx) { d_tmp[idx] = column.GetElement<size_t>(idx, 0); }); auto length = column.num_rows; out->resize(length + 1); out->at(0) = 0; thrust::copy(temp.data(), temp.data() + length, out->begin() + 1); std::partial_sum(out->begin(), out->end(), out->begin()); } void CopyQidImpl(ArrayInterface array_interface, std::vector<bst_group_t> *p_group_ptr) { auto &group_ptr_ = *p_group_ptr; auto it = dh::MakeTransformIterator<uint32_t>( thrust::make_counting_iterator(0ul), [array_interface] __device__(size_t i) { return array_interface.GetElement<uint32_t>(i, 0); }); dh::caching_device_vector<bool> flag(1); auto d_flag = dh::ToSpan(flag); auto d = SetDeviceToPtr(array_interface.data); dh::LaunchN(1, [=] __device__(size_t) { d_flag[0] = true; }); dh::LaunchN(array_interface.num_rows - 1, [=] __device__(size_t i) { if (array_interface.GetElement<uint32_t>(i, 0) > array_interface.GetElement<uint32_t>(i + 1, 0)) { d_flag[0] = false; } }); bool non_dec = true; dh::safe_cuda(hipMemcpy(&non_dec, flag.data().get(), sizeof(bool), hipMemcpyDeviceToHost)); CHECK(non_dec) << "`qid` must be sorted in increasing order along with data."; size_t bytes = 0; dh::caching_device_vector<uint32_t> out(array_interface.num_rows); dh::caching_device_vector<uint32_t> cnt(array_interface.num_rows); HostDeviceVector<int> d_num_runs_out(1, 0, d); hipcub::DeviceRunLengthEncode::Encode( nullptr, bytes, it, out.begin(), cnt.begin(), d_num_runs_out.DevicePointer(), array_interface.num_rows); dh::caching_device_vector<char> tmp(bytes); hipcub::DeviceRunLengthEncode::Encode( tmp.data().get(), bytes, it, out.begin(), cnt.begin(), d_num_runs_out.DevicePointer(), array_interface.num_rows); auto h_num_runs_out = d_num_runs_out.HostSpan()[0]; group_ptr_.clear(); group_ptr_.resize(h_num_runs_out + 1, 0); dh::XGBCachingDeviceAllocator<char> alloc; thrust::inclusive_scan(thrust::hip::par(alloc), cnt.begin(), cnt.begin() + h_num_runs_out, cnt.begin()); thrust::copy(cnt.begin(), cnt.begin() + h_num_runs_out, group_ptr_.begin() + 1); } namespace { // thrust::all_of tries to copy lambda function. struct AllOfOp { __device__ bool operator()(float w) { return w >= 0; } }; } // anonymous namespace void MetaInfo::SetInfo(const char * c_key, std::string const& interface_str) { Json j_interface = Json::Load({interface_str.c_str(), interface_str.size()}); auto const& j_arr = get<Array>(j_interface); CHECK_EQ(j_arr.size(), 1) << "MetaInfo: " << c_key << ". " << ArrayInterfaceErrors::Dimension(1); ArrayInterface array_interface(interface_str); std::string key{c_key}; array_interface.AsColumnVector(); CHECK(!array_interface.valid.Data()) << "Meta info " << key << " should be dense, found validity mask"; if (array_interface.num_rows == 0) { return; } if (key == "label") { CopyInfoImpl(array_interface, &labels_); } else if (key == "weight") { CopyInfoImpl(array_interface, &weights_); auto ptr = weights_.ConstDevicePointer(); auto valid = thrust::all_of(thrust::device, ptr, ptr + weights_.Size(), AllOfOp{}); CHECK(valid) << "Weights must be positive values."; } else if (key == "base_margin") { CopyInfoImpl(array_interface, &base_margin_); } else if (key == "group") { CopyGroupInfoImpl(array_interface, &group_ptr_); return; } else if (key == "qid") { CopyQidImpl(array_interface, &group_ptr_); return; } else if (key == "label_lower_bound") { CopyInfoImpl(array_interface, &labels_lower_bound_); return; } else if (key == "label_upper_bound") { CopyInfoImpl(array_interface, &labels_upper_bound_); return; } else if (key == "feature_weights") { CopyInfoImpl(array_interface, &feature_weigths); auto d_feature_weights = feature_weigths.ConstDeviceSpan(); auto valid = thrust::all_of( thrust::device, d_feature_weights.data(), d_feature_weights.data() + d_feature_weights.size(), AllOfOp{}); CHECK(valid) << "Feature weight must be greater than 0."; return; } else { LOG(FATAL) << "Unknown metainfo: " << key; } } template <typename AdapterT> DMatrix* DMatrix::Create(AdapterT* adapter, float missing, int nthread, const std::string& cache_prefix) { CHECK_EQ(cache_prefix.size(), 0) << "Device memory construction is not currently supported with external " "memory."; return new data::SimpleDMatrix(adapter, missing, nthread); } template DMatrix* DMatrix::Create<data::CudfAdapter>( data::CudfAdapter* adapter, float missing, int nthread, const std::string& cache_prefix); template DMatrix* DMatrix::Create<data::CupyAdapter>( data::CupyAdapter* adapter, float missing, int nthread, const std::string& cache_prefix); } // namespace xgboost
1880167d60b5f85de0bc70148ba246b32032090e.cu
/*! * Copyright 2019-2021 by XGBoost Contributors * * \file data.cu * \brief Handles setting metainfo from array interface. */ #include "xgboost/data.h" #include "xgboost/logging.h" #include "xgboost/json.h" #include "array_interface.h" #include "../common/device_helpers.cuh" #include "device_adapter.cuh" #include "simple_dmatrix.h" namespace xgboost { void CopyInfoImpl(ArrayInterface column, HostDeviceVector<float>* out) { auto SetDeviceToPtr = [](void* ptr) { cudaPointerAttributes attr; dh::safe_cuda(cudaPointerGetAttributes(&attr, ptr)); int32_t ptr_device = attr.device; dh::safe_cuda(cudaSetDevice(ptr_device)); return ptr_device; }; auto ptr_device = SetDeviceToPtr(column.data); out->SetDevice(ptr_device); out->Resize(column.num_rows); auto p_dst = thrust::device_pointer_cast(out->DevicePointer()); dh::LaunchN(column.num_rows, [=] __device__(size_t idx) { p_dst[idx] = column.GetElement(idx, 0); }); } namespace { auto SetDeviceToPtr(void *ptr) { cudaPointerAttributes attr; dh::safe_cuda(cudaPointerGetAttributes(&attr, ptr)); int32_t ptr_device = attr.device; dh::safe_cuda(cudaSetDevice(ptr_device)); return ptr_device; } } // anonymous namespace void CopyGroupInfoImpl(ArrayInterface column, std::vector<bst_group_t>* out) { CHECK(column.type != ArrayInterface::kF4 && column.type != ArrayInterface::kF8) << "Expected integer for group info."; auto ptr_device = SetDeviceToPtr(column.data); CHECK_EQ(ptr_device, dh::CurrentDevice()); dh::TemporaryArray<bst_group_t> temp(column.num_rows); auto d_tmp = temp.data(); dh::LaunchN(column.num_rows, [=] __device__(size_t idx) { d_tmp[idx] = column.GetElement<size_t>(idx, 0); }); auto length = column.num_rows; out->resize(length + 1); out->at(0) = 0; thrust::copy(temp.data(), temp.data() + length, out->begin() + 1); std::partial_sum(out->begin(), out->end(), out->begin()); } void CopyQidImpl(ArrayInterface array_interface, std::vector<bst_group_t> *p_group_ptr) { auto &group_ptr_ = *p_group_ptr; auto it = dh::MakeTransformIterator<uint32_t>( thrust::make_counting_iterator(0ul), [array_interface] __device__(size_t i) { return array_interface.GetElement<uint32_t>(i, 0); }); dh::caching_device_vector<bool> flag(1); auto d_flag = dh::ToSpan(flag); auto d = SetDeviceToPtr(array_interface.data); dh::LaunchN(1, [=] __device__(size_t) { d_flag[0] = true; }); dh::LaunchN(array_interface.num_rows - 1, [=] __device__(size_t i) { if (array_interface.GetElement<uint32_t>(i, 0) > array_interface.GetElement<uint32_t>(i + 1, 0)) { d_flag[0] = false; } }); bool non_dec = true; dh::safe_cuda(cudaMemcpy(&non_dec, flag.data().get(), sizeof(bool), cudaMemcpyDeviceToHost)); CHECK(non_dec) << "`qid` must be sorted in increasing order along with data."; size_t bytes = 0; dh::caching_device_vector<uint32_t> out(array_interface.num_rows); dh::caching_device_vector<uint32_t> cnt(array_interface.num_rows); HostDeviceVector<int> d_num_runs_out(1, 0, d); cub::DeviceRunLengthEncode::Encode( nullptr, bytes, it, out.begin(), cnt.begin(), d_num_runs_out.DevicePointer(), array_interface.num_rows); dh::caching_device_vector<char> tmp(bytes); cub::DeviceRunLengthEncode::Encode( tmp.data().get(), bytes, it, out.begin(), cnt.begin(), d_num_runs_out.DevicePointer(), array_interface.num_rows); auto h_num_runs_out = d_num_runs_out.HostSpan()[0]; group_ptr_.clear(); group_ptr_.resize(h_num_runs_out + 1, 0); dh::XGBCachingDeviceAllocator<char> alloc; thrust::inclusive_scan(thrust::cuda::par(alloc), cnt.begin(), cnt.begin() + h_num_runs_out, cnt.begin()); thrust::copy(cnt.begin(), cnt.begin() + h_num_runs_out, group_ptr_.begin() + 1); } namespace { // thrust::all_of tries to copy lambda function. struct AllOfOp { __device__ bool operator()(float w) { return w >= 0; } }; } // anonymous namespace void MetaInfo::SetInfo(const char * c_key, std::string const& interface_str) { Json j_interface = Json::Load({interface_str.c_str(), interface_str.size()}); auto const& j_arr = get<Array>(j_interface); CHECK_EQ(j_arr.size(), 1) << "MetaInfo: " << c_key << ". " << ArrayInterfaceErrors::Dimension(1); ArrayInterface array_interface(interface_str); std::string key{c_key}; array_interface.AsColumnVector(); CHECK(!array_interface.valid.Data()) << "Meta info " << key << " should be dense, found validity mask"; if (array_interface.num_rows == 0) { return; } if (key == "label") { CopyInfoImpl(array_interface, &labels_); } else if (key == "weight") { CopyInfoImpl(array_interface, &weights_); auto ptr = weights_.ConstDevicePointer(); auto valid = thrust::all_of(thrust::device, ptr, ptr + weights_.Size(), AllOfOp{}); CHECK(valid) << "Weights must be positive values."; } else if (key == "base_margin") { CopyInfoImpl(array_interface, &base_margin_); } else if (key == "group") { CopyGroupInfoImpl(array_interface, &group_ptr_); return; } else if (key == "qid") { CopyQidImpl(array_interface, &group_ptr_); return; } else if (key == "label_lower_bound") { CopyInfoImpl(array_interface, &labels_lower_bound_); return; } else if (key == "label_upper_bound") { CopyInfoImpl(array_interface, &labels_upper_bound_); return; } else if (key == "feature_weights") { CopyInfoImpl(array_interface, &feature_weigths); auto d_feature_weights = feature_weigths.ConstDeviceSpan(); auto valid = thrust::all_of( thrust::device, d_feature_weights.data(), d_feature_weights.data() + d_feature_weights.size(), AllOfOp{}); CHECK(valid) << "Feature weight must be greater than 0."; return; } else { LOG(FATAL) << "Unknown metainfo: " << key; } } template <typename AdapterT> DMatrix* DMatrix::Create(AdapterT* adapter, float missing, int nthread, const std::string& cache_prefix) { CHECK_EQ(cache_prefix.size(), 0) << "Device memory construction is not currently supported with external " "memory."; return new data::SimpleDMatrix(adapter, missing, nthread); } template DMatrix* DMatrix::Create<data::CudfAdapter>( data::CudfAdapter* adapter, float missing, int nthread, const std::string& cache_prefix); template DMatrix* DMatrix::Create<data::CupyAdapter>( data::CupyAdapter* adapter, float missing, int nthread, const std::string& cache_prefix); } // namespace xgboost
d7cc2b29df2310f2fde791bdf1a92b5d09d9c41a.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <vector> #include <set> #include <map> #include <algorithm> #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include <cmath> using namespace std; #define CUDA_SAFE_CALL( err ) (safe_call(err, __LINE__)) #define MAX_THREADS_PER_BLOCK 1024 #define GLOBAL_MAX_EDGES_PER_SHARD 33554432 #define ERR 0.01 void safe_call(hipError_t ret, int line) { if(ret!=hipSuccess) { printf("Error at line %d : %s\n",line,hipGetErrorString(ret)); exit(-1); } } typedef struct __interval { int start; int end; } interval_t; typedef struct __edge { int src; int dest; int val; } edge_t; typedef struct __vertex { double val; int numInEdges; int numOutEdges; } vertex_t; typedef struct __shard { int E; int Vstart; int Vend; int * from; // int * to; int * vmap; double * updates; } shard_t; __device__ bool d_over; __global__ void reset() { d_over = false; } __device__ int binarySearch(int * arr, int l, int r, int x) { if(x < arr[l]) return l; while (l <= r) { int m = l+((r-l)/2); if (arr[m] == x) { int t = m; while(t<r && arr[t+1] == arr[t]) { t++; } return t+1; } if (arr[m] < x) l = m + 1; else r = m - 1; } return l; } __global__ void gather_pr_edge(const shard_t * shard, vertex_t * vertices, int num_vertices, int current_depth) { if(current_depth > 0) { int id = blockDim.x*blockIdx.x + threadIdx.x; if(id < shard->E) { int s = shard->from[id]; double u = vertices[s].val/vertices[s].numOutEdges; shard->updates[id] = u; } } } __global__ void apply_pr_edge(const shard_t * shard, vertex_t * vertices, int num_vertices, int current_depth) { int id = blockDim.x*blockIdx.x + threadIdx.x; int vid = id + shard->Vstart; if(vid <= shard->Vend) { if(current_depth == 0) { d_over = true; vertices[vid].val = 1.0; } else { int i; double sum=0; if(id == 0) i = 0; else i = shard->vmap[id-1]; for(; i < shard->vmap[id]; i++) { sum += shard->updates[i]; } double newval = 0.15 + 0.85 * sum; if(fabs(vertices[vid].val - newval) >= ERR) { d_over=true; } vertices[vid].val = newval; } } } bool cost(const edge_t &a, const edge_t &b) { return ((a.src < b.src) || (a.src == b.src && a.dest < b.dest)); } int main(int argc, char * argv[]) { struct timeval t1,t2; static char * filename; if(argc!=2) { printf("./a.out <filename>\n"); exit(-1); } else { filename = argv[1]; } FILE * fp = fopen(filename,"r"); if(!fp) { printf("Error reading file.\n"); exit(-1); } /* Set cuda device to K40 */ CUDA_SAFE_CALL(hipSetDevice(0)); printf("Begin file reading...\n"); /* Get graph from file into CPU memory */ int num_vertices, num_edges, i, j, k; fscanf(fp,"%d %d",&num_vertices,&num_edges); //We are always going to have atleast 2 shards to have double bufferring int ns = num_edges / GLOBAL_MAX_EDGES_PER_SHARD; int MAX_EDGES_PER_SHARD = (ns == 0) ? (num_edges + 1)/2 : (num_edges + 1)/(ns + 1); //We do this to balance the no of edges in the shards //Array of vectors. vector i contains the out edges of vertex i vector< vector<edge_t> > inEdges(num_vertices); int * prefixV = (int *) calloc(num_vertices,sizeof(int)); int s,d; // It will contain the visited status of each vertex vertex_t *vertices; vertex_t *vertices_host = (vertex_t *) malloc(num_vertices*sizeof(vertex_t)); CUDA_SAFE_CALL(hipMalloc((void **)&vertices, num_vertices*sizeof(vertex_t))); //Initialise the vertices for(i=0; i<num_vertices; i++) { vertices_host[i].numInEdges = 0; vertices_host[i].numOutEdges = 0; } for(i=0; i<num_edges; i++) { fscanf(fp,"%d",&s); fscanf(fp,"%d",&d); edge_t e; e.src=s; e.dest=d; inEdges[d].push_back(e); vertices_host[s].numOutEdges++; vertices_host[d].numInEdges++; } printf("Finished file reading.\n"); printf("\nBegin interval construction...\n"); // Construction of intervals gettimeofday(&t1,NULL); int num_intervals = 0, add = 1; vector<int> startInter; prefixV[0] = inEdges[0].size(); if(prefixV[0] > MAX_EDGES_PER_SHARD) { startInter.push_back(0); num_intervals++; add = 0; } for(i=1; i<num_vertices; i++) { prefixV[i] = inEdges[i].size(); if(add==1) prefixV[i] += prefixV[i-1]; if(prefixV[i] > MAX_EDGES_PER_SHARD) { startInter.push_back(i); num_intervals++; add = 0; } else add = 1; } if(add==1) { startInter.push_back(i-1); num_intervals++; } interval_t * interval = (interval_t *) malloc(num_intervals*sizeof(interval_t)); for(i=0; i<num_intervals; i++) { interval[i].start = (i == 0) ? 0 : (startInter[i-1]+1); interval[i].end = startInter[i]; } gettimeofday(&t2,NULL); printf("Time to construct intervals : %f sec\n",((t2.tv_sec+t2.tv_usec*1.0e-6)-(t1.tv_sec+t1.tv_usec*1.0e-6))); printf("\nBegin shard construction...\n"); //Construction of shard gettimeofday(&t1,NULL); shard_t * shard = (shard_t *) malloc(num_intervals*sizeof(shard_t)); //Finding the max number of edges in a shard // We will allocate space for that many edges to each shard to maintain consistency int MAX_NUM_EDGES_SHARD = INT_MIN; int MAX_NUM_VERTICES_SHARD = INT_MIN; for(i=0; i<num_intervals; i++) { int t = prefixV[interval[i].end]; if(t > MAX_NUM_EDGES_SHARD) MAX_NUM_EDGES_SHARD = t; int q = interval[i].end-interval[i].start+1; if(q > MAX_NUM_VERTICES_SHARD) MAX_NUM_VERTICES_SHARD = q; } for(i=0; i<num_intervals; i++) { // first and last vertices in shard shard[i].Vstart = interval[i].start; shard[i].Vend = interval[i].end; shard[i].E = prefixV[interval[i].end]; shard[i].vmap = (int *) malloc(MAX_NUM_VERTICES_SHARD*sizeof(int)); shard[i].from = (int *) malloc(MAX_NUM_EDGES_SHARD*sizeof(int)); // shard[i].to = (int *) malloc(MAX_NUM_EDGES_SHARD*sizeof(int)); shard[i].updates = (double *) malloc(MAX_NUM_EDGES_SHARD*sizeof(double)); } for(i=0; i<num_intervals; i++) { vector<edge_t> tempEdges; for(j=interval[i].start; j<=interval[i].end; j++) { for(vector<edge_t>::iterator it=inEdges[j].begin(); it!=inEdges[j].end(); ++it) tempEdges.push_back(*it); } //Sorting based on src vertex to align the edges such that the access of vertices[src] is sequential sort(tempEdges.begin(),tempEdges.end(),cost); vector< vector<edge_t> > bucket(MAX_NUM_VERTICES_SHARD); for (vector<edge_t>::iterator it = tempEdges.begin() ; it != tempEdges.end(); ++it) { bucket[(*it).dest-interval[i].start].push_back(*it); } for(j=0;j<MAX_NUM_VERTICES_SHARD;j++) { shard[i].vmap[j] = bucket[j].size(); } for(j=1;j<MAX_NUM_VERTICES_SHARD;j++) { shard[i].vmap[j] += shard[i].vmap[j-1]; } k=0; for(j=0;j<MAX_NUM_VERTICES_SHARD;j++) { for (vector<edge_t>::iterator it = bucket[j].begin() ; it != bucket[j].end(); ++it) { shard[i].from[k] = (*it).src; // shard[i].to[k] = (*it).dest; k++; } } } gettimeofday(&t2,NULL); printf("Time to construct shards : %f sec\n",((t2.tv_sec+t2.tv_usec*1.0e-6)-(t1.tv_sec+t1.tv_usec*1.0e-6))); hipStream_t * str; hipEvent_t * start; hipEvent_t * stop; int num_evts=2; str = (hipStream_t *) malloc(num_evts * sizeof(hipStream_t)); start = (hipEvent_t *) malloc(num_evts * sizeof(hipEvent_t)); stop = (hipEvent_t *) malloc(num_evts * sizeof(hipEvent_t)); for(int i = 0; i < num_evts; i++) { CUDA_SAFE_CALL(hipStreamCreate(&(str[i]))); CUDA_SAFE_CALL(hipEventCreate(&(start[i]))); CUDA_SAFE_CALL(hipEventCreate(&(stop[i]))); } float * diff = (float *) malloc(num_intervals*sizeof(float)); double time = 0; // For vertex centric algo shard_t * shard_dev; int * vmap_dev; int * from_dev; // int * to_dev; double * updates_dev; CUDA_SAFE_CALL(hipMalloc((void **)&shard_dev, sizeof(shard_t))); CUDA_SAFE_CALL(hipMalloc((void **)&from_dev, MAX_NUM_EDGES_SHARD*sizeof(int))); // CUDA_SAFE_CALL(hipMalloc((void **)&to_dev, MAX_NUM_EDGES_SHARD*sizeof(int))); CUDA_SAFE_CALL(hipMalloc((void **)&vmap_dev, MAX_NUM_VERTICES_SHARD*sizeof(int))); CUDA_SAFE_CALL(hipMalloc((void **)&updates_dev, MAX_NUM_EDGES_SHARD*sizeof(double))); //Extra Buffer for doing double bufferring shard_t * shard_dev2; int * vmap_dev2; int * from_dev2; // int * to_dev2; double * updates_dev2; CUDA_SAFE_CALL(hipMalloc((void **)&shard_dev2, sizeof(shard_t))); CUDA_SAFE_CALL(hipMalloc((void **)&from_dev2, MAX_NUM_EDGES_SHARD*sizeof(int))); // CUDA_SAFE_CALL(hipMalloc((void **)&to_dev2, MAX_NUM_EDGES_SHARD*sizeof(int))); CUDA_SAFE_CALL(hipMalloc((void **)&vmap_dev2, MAX_NUM_VERTICES_SHARD*sizeof(int))); CUDA_SAFE_CALL(hipMalloc((void **)&updates_dev2, MAX_NUM_EDGES_SHARD*sizeof(double))); int num_of_blocks = 1; //int MAX_THREADS = MAX_NUM_VERTICES_SHARD; int MAX_THREADS = MAX_NUM_EDGES_SHARD; int num_of_threads_per_block = MAX_THREADS; if(MAX_THREADS>MAX_THREADS_PER_BLOCK) { num_of_blocks = (int)ceil(MAX_THREADS/(double)MAX_THREADS_PER_BLOCK); num_of_threads_per_block = MAX_THREADS_PER_BLOCK; } dim3 grid( num_of_blocks, 1, 1); dim3 threads( num_of_threads_per_block, 1, 1); printf("Begin kernel\n"); CUDA_SAFE_CALL(hipMemcpy(vertices,vertices_host,num_vertices*sizeof(vertex_t),hipMemcpyHostToDevice)); int pingpong; bool over; k=0; gettimeofday(&t1,NULL); do { over = false; CUDA_SAFE_CALL(hipMemcpyToSymbol(d_over, &over, sizeof(bool),0, hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipDeviceSynchronize()); /* GATHER PHASE BEGINS */ pingpong=0; for(i=0; i<num_intervals; i++) { if(pingpong==0) { //Copy Ping CUDA_SAFE_CALL(hipMemcpyAsync(shard_dev, &shard[i], sizeof(shard_t),hipMemcpyHostToDevice,str[0])); CUDA_SAFE_CALL(hipMemcpyAsync(from_dev, shard[i].from, MAX_NUM_EDGES_SHARD*sizeof(int),hipMemcpyHostToDevice,str[0])); CUDA_SAFE_CALL(hipMemcpyAsync(&(shard_dev->from), &from_dev, sizeof(int *),hipMemcpyHostToDevice,str[0])); // CUDA_SAFE_CALL(hipMemcpyAsync(to_dev, shard[i].to, MAX_NUM_EDGES_SHARD*sizeof(int),hipMemcpyHostToDevice,str[0])); // CUDA_SAFE_CALL(hipMemcpyAsync(&(shard_dev->to), &to_dev, sizeof(int *),hipMemcpyHostToDevice,str[0])); CUDA_SAFE_CALL(hipMemcpyAsync(vmap_dev, shard[i].vmap, MAX_NUM_VERTICES_SHARD*sizeof(int),hipMemcpyHostToDevice,str[0])); CUDA_SAFE_CALL(hipMemcpyAsync(&(shard_dev->vmap), &vmap_dev, sizeof(int *),hipMemcpyHostToDevice,str[0])); CUDA_SAFE_CALL(hipMemcpyAsync(&(shard_dev->updates), &updates_dev, sizeof(double *),hipMemcpyHostToDevice,str[0])); if(i>0) { //Process Pong CUDA_SAFE_CALL(hipEventRecord(start[1],str[1])); hipLaunchKernelGGL(( gather_pr_edge), dim3(grid), dim3(threads),0,str[1], shard_dev2, vertices, num_vertices, k); CUDA_SAFE_CALL(hipStreamSynchronize(str[1])); CUDA_SAFE_CALL(hipEventRecord(stop[1],str[1])); CUDA_SAFE_CALL(hipEventSynchronize(stop[1])); CUDA_SAFE_CALL(hipEventElapsedTime(&diff[i-1],start[1],stop[1])); } pingpong=1; } else { //Copy Pong CUDA_SAFE_CALL(hipMemcpyAsync(shard_dev2, &shard[i], sizeof(shard_t),hipMemcpyHostToDevice,str[1])); CUDA_SAFE_CALL(hipMemcpyAsync(from_dev2, shard[i].from, MAX_NUM_EDGES_SHARD*sizeof(int),hipMemcpyHostToDevice,str[1])); CUDA_SAFE_CALL(hipMemcpyAsync(&(shard_dev2->from), &from_dev2, sizeof(int *),hipMemcpyHostToDevice,str[1])); // CUDA_SAFE_CALL(hipMemcpyAsync(to_dev2, shard[i].to, MAX_NUM_EDGES_SHARD*sizeof(int),hipMemcpyHostToDevice,str[1])); // CUDA_SAFE_CALL(hipMemcpyAsync(&(shard_dev2->to), &to_dev2, sizeof(int *),hipMemcpyHostToDevice,str[1])); CUDA_SAFE_CALL(hipMemcpyAsync(vmap_dev2, shard[i].vmap, MAX_NUM_VERTICES_SHARD*sizeof(int),hipMemcpyHostToDevice,str[1])); CUDA_SAFE_CALL(hipMemcpyAsync(&(shard_dev2->vmap), &vmap_dev2, sizeof(int *),hipMemcpyHostToDevice,str[1])); CUDA_SAFE_CALL(hipMemcpyAsync(&(shard_dev2->updates), &updates_dev2, sizeof(double *),hipMemcpyHostToDevice,str[0])); //Process Pong CUDA_SAFE_CALL(hipEventRecord(start[0],str[0])); hipLaunchKernelGGL(( gather_pr_edge), dim3(grid), dim3(threads),0,str[0], shard_dev, vertices, num_vertices, k); CUDA_SAFE_CALL(hipStreamSynchronize(str[0])); CUDA_SAFE_CALL(hipEventRecord(stop[0],str[0])); CUDA_SAFE_CALL(hipEventSynchronize(stop[0])); CUDA_SAFE_CALL(hipEventElapsedTime(&diff[i-1],start[0],stop[0])); pingpong=0; } } if(pingpong==0) { //Process Pong CUDA_SAFE_CALL(hipEventRecord(start[1],str[1])); hipLaunchKernelGGL(( gather_pr_edge), dim3(grid), dim3(threads),0,str[1], shard_dev2, vertices, num_vertices, k); CUDA_SAFE_CALL(hipStreamSynchronize(str[1])); CUDA_SAFE_CALL(hipEventRecord(stop[1],str[1])); CUDA_SAFE_CALL(hipEventSynchronize(stop[1])); CUDA_SAFE_CALL(hipEventElapsedTime(&diff[i-1],start[1],stop[1])); } else { //Process Pong CUDA_SAFE_CALL(hipEventRecord(start[0],str[0])); hipLaunchKernelGGL(( gather_pr_edge), dim3(grid), dim3(threads),0,str[0], shard_dev, vertices, num_vertices, k); CUDA_SAFE_CALL(hipStreamSynchronize(str[0])); CUDA_SAFE_CALL(hipEventRecord(stop[0],str[0])); CUDA_SAFE_CALL(hipEventSynchronize(stop[0])); CUDA_SAFE_CALL(hipEventElapsedTime(&diff[i-1],start[1],stop[1])); } for(i=0;i<num_intervals;i++) time += diff[i]; /* GATHER PHASE ENDS */ /* APPLY PHASE BEGINS */ pingpong=0; for(i=0; i<num_intervals; i++) { if(pingpong==0) { //Copy Ping CUDA_SAFE_CALL(hipMemcpyAsync(shard_dev, &shard[i], sizeof(shard_t),hipMemcpyHostToDevice,str[0])); CUDA_SAFE_CALL(hipMemcpyAsync(from_dev, shard[i].from, MAX_NUM_EDGES_SHARD*sizeof(int),hipMemcpyHostToDevice,str[0])); CUDA_SAFE_CALL(hipMemcpyAsync(&(shard_dev->from), &from_dev, sizeof(int *),hipMemcpyHostToDevice,str[0])); // CUDA_SAFE_CALL(hipMemcpyAsync(to_dev, shard[i].to, MAX_NUM_EDGES_SHARD*sizeof(int),hipMemcpyHostToDevice,str[0])); // CUDA_SAFE_CALL(hipMemcpyAsync(&(shard_dev->to), &to_dev, sizeof(int *),hipMemcpyHostToDevice,str[0])); CUDA_SAFE_CALL(hipMemcpyAsync(vmap_dev, shard[i].vmap, MAX_NUM_VERTICES_SHARD*sizeof(int),hipMemcpyHostToDevice,str[0])); CUDA_SAFE_CALL(hipMemcpyAsync(&(shard_dev->vmap), &vmap_dev, sizeof(int *),hipMemcpyHostToDevice,str[0])); CUDA_SAFE_CALL(hipMemcpyAsync(&(shard_dev->updates), &updates_dev, sizeof(double *),hipMemcpyHostToDevice,str[0])); if(i>0) { //Process Pong CUDA_SAFE_CALL(hipEventRecord(start[1],str[1])); hipLaunchKernelGGL(( apply_pr_edge), dim3(grid), dim3(threads),0,str[1], shard_dev2, vertices, num_vertices, k); CUDA_SAFE_CALL(hipStreamSynchronize(str[1])); CUDA_SAFE_CALL(hipEventRecord(stop[1],str[1])); CUDA_SAFE_CALL(hipEventSynchronize(stop[1])); CUDA_SAFE_CALL(hipEventElapsedTime(&diff[i-1],start[1],stop[1])); } pingpong=1; } else { //Copy Pong CUDA_SAFE_CALL(hipMemcpyAsync(shard_dev2, &shard[i], sizeof(shard_t),hipMemcpyHostToDevice,str[1])); CUDA_SAFE_CALL(hipMemcpyAsync(from_dev2, shard[i].from, MAX_NUM_EDGES_SHARD*sizeof(int),hipMemcpyHostToDevice,str[1])); CUDA_SAFE_CALL(hipMemcpyAsync(&(shard_dev2->from), &from_dev2, sizeof(int *),hipMemcpyHostToDevice,str[1])); // CUDA_SAFE_CALL(hipMemcpyAsync(to_dev2, shard[i].to, MAX_NUM_EDGES_SHARD*sizeof(int),hipMemcpyHostToDevice,str[1])); // CUDA_SAFE_CALL(hipMemcpyAsync(&(shard_dev2->to), &to_dev2, sizeof(int *),hipMemcpyHostToDevice,str[1])); CUDA_SAFE_CALL(hipMemcpyAsync(vmap_dev2, shard[i].vmap, MAX_NUM_VERTICES_SHARD*sizeof(int),hipMemcpyHostToDevice,str[1])); CUDA_SAFE_CALL(hipMemcpyAsync(&(shard_dev2->vmap), &vmap_dev2, sizeof(int *),hipMemcpyHostToDevice,str[1])); CUDA_SAFE_CALL(hipMemcpyAsync(&(shard_dev2->updates), &updates_dev2, sizeof(double *),hipMemcpyHostToDevice,str[0])); //Process Pong CUDA_SAFE_CALL(hipEventRecord(start[0],str[0])); hipLaunchKernelGGL(( apply_pr_edge), dim3(grid), dim3(threads),0,str[0], shard_dev, vertices, num_vertices, k); CUDA_SAFE_CALL(hipStreamSynchronize(str[0])); CUDA_SAFE_CALL(hipEventRecord(stop[0],str[0])); CUDA_SAFE_CALL(hipEventSynchronize(stop[0])); CUDA_SAFE_CALL(hipEventElapsedTime(&diff[i-1],start[0],stop[0])); pingpong=0; } } if(pingpong==0) { //Process Pong CUDA_SAFE_CALL(hipEventRecord(start[1],str[1])); hipLaunchKernelGGL(( apply_pr_edge), dim3(grid), dim3(threads),0,str[1], shard_dev2, vertices, num_vertices, k); CUDA_SAFE_CALL(hipStreamSynchronize(str[1])); CUDA_SAFE_CALL(hipEventRecord(stop[1],str[1])); CUDA_SAFE_CALL(hipEventSynchronize(stop[1])); CUDA_SAFE_CALL(hipEventElapsedTime(&diff[i-1],start[1],stop[1])); } else { //Process Pong CUDA_SAFE_CALL(hipEventRecord(start[0],str[0])); hipLaunchKernelGGL(( apply_pr_edge), dim3(grid), dim3(threads),0,str[0], shard_dev, vertices, num_vertices, k); CUDA_SAFE_CALL(hipStreamSynchronize(str[0])); CUDA_SAFE_CALL(hipEventRecord(stop[0],str[0])); CUDA_SAFE_CALL(hipEventSynchronize(stop[0])); CUDA_SAFE_CALL(hipEventElapsedTime(&diff[i-1],start[1],stop[1])); } for(i=0;i<num_intervals;i++) time += diff[i]; /* APPLY PHASE ENDS */ CUDA_SAFE_CALL(hipMemcpyFromSymbol(&over, d_over, sizeof(bool),0, hipMemcpyDeviceToHost)); k++; }while(over); CUDA_SAFE_CALL(hipDeviceSynchronize()); gettimeofday(&t2,NULL); printf("Time to Pagerank : %f sec\n",((t2.tv_sec+t2.tv_usec*1.0e-6)-(t1.tv_sec+t1.tv_usec*1.0e-6))); printf("Number of iterations : %d\n",k); /* CUDA_SAFE_CALL(hipMemcpy(vertices_host, vertices, num_vertices*sizeof(vertex_t), hipMemcpyDeviceToHost)); for(int i = 0; i < num_vertices; i++) { printf("Vertex %d Distance %d\n",i,(int)vertices_host[i].val); }*/ printf("Time: %f ms\n",time); for(int i = 0; i < num_evts; i++) { CUDA_SAFE_CALL(hipStreamDestroy(str[i])); CUDA_SAFE_CALL(hipEventDestroy(start[i])); CUDA_SAFE_CALL(hipEventDestroy(stop[i])); } free(interval); for(i=0; i<num_intervals; i++) { free(shard[i].vmap); free(shard[i].from); // free(shard[i].to); free(shard[i].updates); } free(shard); free(vertices_host); CUDA_SAFE_CALL(hipFree(vertices)); CUDA_SAFE_CALL(hipFree(vmap_dev)); CUDA_SAFE_CALL(hipFree(from_dev)); // CUDA_SAFE_CALL(hipFree(to_dev)); CUDA_SAFE_CALL(hipFree(updates_dev)); CUDA_SAFE_CALL(hipFree(shard_dev)); CUDA_SAFE_CALL(hipFree(vmap_dev2)); CUDA_SAFE_CALL(hipFree(from_dev2)); // CUDA_SAFE_CALL(hipFree(to_dev2)); CUDA_SAFE_CALL(hipFree(updates_dev2)); CUDA_SAFE_CALL(hipFree(shard_dev2)); return 0; }
d7cc2b29df2310f2fde791bdf1a92b5d09d9c41a.cu
#include <iostream> #include <vector> #include <set> #include <map> #include <algorithm> #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include <cmath> using namespace std; #define CUDA_SAFE_CALL( err ) (safe_call(err, __LINE__)) #define MAX_THREADS_PER_BLOCK 1024 #define GLOBAL_MAX_EDGES_PER_SHARD 33554432 #define ERR 0.01 void safe_call(cudaError_t ret, int line) { if(ret!=cudaSuccess) { printf("Error at line %d : %s\n",line,cudaGetErrorString(ret)); exit(-1); } } typedef struct __interval { int start; int end; } interval_t; typedef struct __edge { int src; int dest; int val; } edge_t; typedef struct __vertex { double val; int numInEdges; int numOutEdges; } vertex_t; typedef struct __shard { int E; int Vstart; int Vend; int * from; // int * to; int * vmap; double * updates; } shard_t; __device__ bool d_over; __global__ void reset() { d_over = false; } __device__ int binarySearch(int * arr, int l, int r, int x) { if(x < arr[l]) return l; while (l <= r) { int m = l+((r-l)/2); if (arr[m] == x) { int t = m; while(t<r && arr[t+1] == arr[t]) { t++; } return t+1; } if (arr[m] < x) l = m + 1; else r = m - 1; } return l; } __global__ void gather_pr_edge(const shard_t * shard, vertex_t * vertices, int num_vertices, int current_depth) { if(current_depth > 0) { int id = blockDim.x*blockIdx.x + threadIdx.x; if(id < shard->E) { int s = shard->from[id]; double u = vertices[s].val/vertices[s].numOutEdges; shard->updates[id] = u; } } } __global__ void apply_pr_edge(const shard_t * shard, vertex_t * vertices, int num_vertices, int current_depth) { int id = blockDim.x*blockIdx.x + threadIdx.x; int vid = id + shard->Vstart; if(vid <= shard->Vend) { if(current_depth == 0) { d_over = true; vertices[vid].val = 1.0; } else { int i; double sum=0; if(id == 0) i = 0; else i = shard->vmap[id-1]; for(; i < shard->vmap[id]; i++) { sum += shard->updates[i]; } double newval = 0.15 + 0.85 * sum; if(fabs(vertices[vid].val - newval) >= ERR) { d_over=true; } vertices[vid].val = newval; } } } bool cost(const edge_t &a, const edge_t &b) { return ((a.src < b.src) || (a.src == b.src && a.dest < b.dest)); } int main(int argc, char * argv[]) { struct timeval t1,t2; static char * filename; if(argc!=2) { printf("./a.out <filename>\n"); exit(-1); } else { filename = argv[1]; } FILE * fp = fopen(filename,"r"); if(!fp) { printf("Error reading file.\n"); exit(-1); } /* Set cuda device to K40 */ CUDA_SAFE_CALL(cudaSetDevice(0)); printf("Begin file reading...\n"); /* Get graph from file into CPU memory */ int num_vertices, num_edges, i, j, k; fscanf(fp,"%d %d",&num_vertices,&num_edges); //We are always going to have atleast 2 shards to have double bufferring int ns = num_edges / GLOBAL_MAX_EDGES_PER_SHARD; int MAX_EDGES_PER_SHARD = (ns == 0) ? (num_edges + 1)/2 : (num_edges + 1)/(ns + 1); //We do this to balance the no of edges in the shards //Array of vectors. vector i contains the out edges of vertex i vector< vector<edge_t> > inEdges(num_vertices); int * prefixV = (int *) calloc(num_vertices,sizeof(int)); int s,d; // It will contain the visited status of each vertex vertex_t *vertices; vertex_t *vertices_host = (vertex_t *) malloc(num_vertices*sizeof(vertex_t)); CUDA_SAFE_CALL(cudaMalloc((void **)&vertices, num_vertices*sizeof(vertex_t))); //Initialise the vertices for(i=0; i<num_vertices; i++) { vertices_host[i].numInEdges = 0; vertices_host[i].numOutEdges = 0; } for(i=0; i<num_edges; i++) { fscanf(fp,"%d",&s); fscanf(fp,"%d",&d); edge_t e; e.src=s; e.dest=d; inEdges[d].push_back(e); vertices_host[s].numOutEdges++; vertices_host[d].numInEdges++; } printf("Finished file reading.\n"); printf("\nBegin interval construction...\n"); // Construction of intervals gettimeofday(&t1,NULL); int num_intervals = 0, add = 1; vector<int> startInter; prefixV[0] = inEdges[0].size(); if(prefixV[0] > MAX_EDGES_PER_SHARD) { startInter.push_back(0); num_intervals++; add = 0; } for(i=1; i<num_vertices; i++) { prefixV[i] = inEdges[i].size(); if(add==1) prefixV[i] += prefixV[i-1]; if(prefixV[i] > MAX_EDGES_PER_SHARD) { startInter.push_back(i); num_intervals++; add = 0; } else add = 1; } if(add==1) { startInter.push_back(i-1); num_intervals++; } interval_t * interval = (interval_t *) malloc(num_intervals*sizeof(interval_t)); for(i=0; i<num_intervals; i++) { interval[i].start = (i == 0) ? 0 : (startInter[i-1]+1); interval[i].end = startInter[i]; } gettimeofday(&t2,NULL); printf("Time to construct intervals : %f sec\n",((t2.tv_sec+t2.tv_usec*1.0e-6)-(t1.tv_sec+t1.tv_usec*1.0e-6))); printf("\nBegin shard construction...\n"); //Construction of shard gettimeofday(&t1,NULL); shard_t * shard = (shard_t *) malloc(num_intervals*sizeof(shard_t)); //Finding the max number of edges in a shard // We will allocate space for that many edges to each shard to maintain consistency int MAX_NUM_EDGES_SHARD = INT_MIN; int MAX_NUM_VERTICES_SHARD = INT_MIN; for(i=0; i<num_intervals; i++) { int t = prefixV[interval[i].end]; if(t > MAX_NUM_EDGES_SHARD) MAX_NUM_EDGES_SHARD = t; int q = interval[i].end-interval[i].start+1; if(q > MAX_NUM_VERTICES_SHARD) MAX_NUM_VERTICES_SHARD = q; } for(i=0; i<num_intervals; i++) { // first and last vertices in shard shard[i].Vstart = interval[i].start; shard[i].Vend = interval[i].end; shard[i].E = prefixV[interval[i].end]; shard[i].vmap = (int *) malloc(MAX_NUM_VERTICES_SHARD*sizeof(int)); shard[i].from = (int *) malloc(MAX_NUM_EDGES_SHARD*sizeof(int)); // shard[i].to = (int *) malloc(MAX_NUM_EDGES_SHARD*sizeof(int)); shard[i].updates = (double *) malloc(MAX_NUM_EDGES_SHARD*sizeof(double)); } for(i=0; i<num_intervals; i++) { vector<edge_t> tempEdges; for(j=interval[i].start; j<=interval[i].end; j++) { for(vector<edge_t>::iterator it=inEdges[j].begin(); it!=inEdges[j].end(); ++it) tempEdges.push_back(*it); } //Sorting based on src vertex to align the edges such that the access of vertices[src] is sequential sort(tempEdges.begin(),tempEdges.end(),cost); vector< vector<edge_t> > bucket(MAX_NUM_VERTICES_SHARD); for (vector<edge_t>::iterator it = tempEdges.begin() ; it != tempEdges.end(); ++it) { bucket[(*it).dest-interval[i].start].push_back(*it); } for(j=0;j<MAX_NUM_VERTICES_SHARD;j++) { shard[i].vmap[j] = bucket[j].size(); } for(j=1;j<MAX_NUM_VERTICES_SHARD;j++) { shard[i].vmap[j] += shard[i].vmap[j-1]; } k=0; for(j=0;j<MAX_NUM_VERTICES_SHARD;j++) { for (vector<edge_t>::iterator it = bucket[j].begin() ; it != bucket[j].end(); ++it) { shard[i].from[k] = (*it).src; // shard[i].to[k] = (*it).dest; k++; } } } gettimeofday(&t2,NULL); printf("Time to construct shards : %f sec\n",((t2.tv_sec+t2.tv_usec*1.0e-6)-(t1.tv_sec+t1.tv_usec*1.0e-6))); cudaStream_t * str; cudaEvent_t * start; cudaEvent_t * stop; int num_evts=2; str = (cudaStream_t *) malloc(num_evts * sizeof(cudaStream_t)); start = (cudaEvent_t *) malloc(num_evts * sizeof(cudaEvent_t)); stop = (cudaEvent_t *) malloc(num_evts * sizeof(cudaEvent_t)); for(int i = 0; i < num_evts; i++) { CUDA_SAFE_CALL(cudaStreamCreate(&(str[i]))); CUDA_SAFE_CALL(cudaEventCreate(&(start[i]))); CUDA_SAFE_CALL(cudaEventCreate(&(stop[i]))); } float * diff = (float *) malloc(num_intervals*sizeof(float)); double time = 0; // For vertex centric algo shard_t * shard_dev; int * vmap_dev; int * from_dev; // int * to_dev; double * updates_dev; CUDA_SAFE_CALL(cudaMalloc((void **)&shard_dev, sizeof(shard_t))); CUDA_SAFE_CALL(cudaMalloc((void **)&from_dev, MAX_NUM_EDGES_SHARD*sizeof(int))); // CUDA_SAFE_CALL(cudaMalloc((void **)&to_dev, MAX_NUM_EDGES_SHARD*sizeof(int))); CUDA_SAFE_CALL(cudaMalloc((void **)&vmap_dev, MAX_NUM_VERTICES_SHARD*sizeof(int))); CUDA_SAFE_CALL(cudaMalloc((void **)&updates_dev, MAX_NUM_EDGES_SHARD*sizeof(double))); //Extra Buffer for doing double bufferring shard_t * shard_dev2; int * vmap_dev2; int * from_dev2; // int * to_dev2; double * updates_dev2; CUDA_SAFE_CALL(cudaMalloc((void **)&shard_dev2, sizeof(shard_t))); CUDA_SAFE_CALL(cudaMalloc((void **)&from_dev2, MAX_NUM_EDGES_SHARD*sizeof(int))); // CUDA_SAFE_CALL(cudaMalloc((void **)&to_dev2, MAX_NUM_EDGES_SHARD*sizeof(int))); CUDA_SAFE_CALL(cudaMalloc((void **)&vmap_dev2, MAX_NUM_VERTICES_SHARD*sizeof(int))); CUDA_SAFE_CALL(cudaMalloc((void **)&updates_dev2, MAX_NUM_EDGES_SHARD*sizeof(double))); int num_of_blocks = 1; //int MAX_THREADS = MAX_NUM_VERTICES_SHARD; int MAX_THREADS = MAX_NUM_EDGES_SHARD; int num_of_threads_per_block = MAX_THREADS; if(MAX_THREADS>MAX_THREADS_PER_BLOCK) { num_of_blocks = (int)ceil(MAX_THREADS/(double)MAX_THREADS_PER_BLOCK); num_of_threads_per_block = MAX_THREADS_PER_BLOCK; } dim3 grid( num_of_blocks, 1, 1); dim3 threads( num_of_threads_per_block, 1, 1); printf("Begin kernel\n"); CUDA_SAFE_CALL(cudaMemcpy(vertices,vertices_host,num_vertices*sizeof(vertex_t),cudaMemcpyHostToDevice)); int pingpong; bool over; k=0; gettimeofday(&t1,NULL); do { over = false; CUDA_SAFE_CALL(cudaMemcpyToSymbol(d_over, &over, sizeof(bool),0, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaDeviceSynchronize()); /* GATHER PHASE BEGINS */ pingpong=0; for(i=0; i<num_intervals; i++) { if(pingpong==0) { //Copy Ping CUDA_SAFE_CALL(cudaMemcpyAsync(shard_dev, &shard[i], sizeof(shard_t),cudaMemcpyHostToDevice,str[0])); CUDA_SAFE_CALL(cudaMemcpyAsync(from_dev, shard[i].from, MAX_NUM_EDGES_SHARD*sizeof(int),cudaMemcpyHostToDevice,str[0])); CUDA_SAFE_CALL(cudaMemcpyAsync(&(shard_dev->from), &from_dev, sizeof(int *),cudaMemcpyHostToDevice,str[0])); // CUDA_SAFE_CALL(cudaMemcpyAsync(to_dev, shard[i].to, MAX_NUM_EDGES_SHARD*sizeof(int),cudaMemcpyHostToDevice,str[0])); // CUDA_SAFE_CALL(cudaMemcpyAsync(&(shard_dev->to), &to_dev, sizeof(int *),cudaMemcpyHostToDevice,str[0])); CUDA_SAFE_CALL(cudaMemcpyAsync(vmap_dev, shard[i].vmap, MAX_NUM_VERTICES_SHARD*sizeof(int),cudaMemcpyHostToDevice,str[0])); CUDA_SAFE_CALL(cudaMemcpyAsync(&(shard_dev->vmap), &vmap_dev, sizeof(int *),cudaMemcpyHostToDevice,str[0])); CUDA_SAFE_CALL(cudaMemcpyAsync(&(shard_dev->updates), &updates_dev, sizeof(double *),cudaMemcpyHostToDevice,str[0])); if(i>0) { //Process Pong CUDA_SAFE_CALL(cudaEventRecord(start[1],str[1])); gather_pr_edge<<<grid, threads,0,str[1]>>> (shard_dev2, vertices, num_vertices, k); CUDA_SAFE_CALL(cudaStreamSynchronize(str[1])); CUDA_SAFE_CALL(cudaEventRecord(stop[1],str[1])); CUDA_SAFE_CALL(cudaEventSynchronize(stop[1])); CUDA_SAFE_CALL(cudaEventElapsedTime(&diff[i-1],start[1],stop[1])); } pingpong=1; } else { //Copy Pong CUDA_SAFE_CALL(cudaMemcpyAsync(shard_dev2, &shard[i], sizeof(shard_t),cudaMemcpyHostToDevice,str[1])); CUDA_SAFE_CALL(cudaMemcpyAsync(from_dev2, shard[i].from, MAX_NUM_EDGES_SHARD*sizeof(int),cudaMemcpyHostToDevice,str[1])); CUDA_SAFE_CALL(cudaMemcpyAsync(&(shard_dev2->from), &from_dev2, sizeof(int *),cudaMemcpyHostToDevice,str[1])); // CUDA_SAFE_CALL(cudaMemcpyAsync(to_dev2, shard[i].to, MAX_NUM_EDGES_SHARD*sizeof(int),cudaMemcpyHostToDevice,str[1])); // CUDA_SAFE_CALL(cudaMemcpyAsync(&(shard_dev2->to), &to_dev2, sizeof(int *),cudaMemcpyHostToDevice,str[1])); CUDA_SAFE_CALL(cudaMemcpyAsync(vmap_dev2, shard[i].vmap, MAX_NUM_VERTICES_SHARD*sizeof(int),cudaMemcpyHostToDevice,str[1])); CUDA_SAFE_CALL(cudaMemcpyAsync(&(shard_dev2->vmap), &vmap_dev2, sizeof(int *),cudaMemcpyHostToDevice,str[1])); CUDA_SAFE_CALL(cudaMemcpyAsync(&(shard_dev2->updates), &updates_dev2, sizeof(double *),cudaMemcpyHostToDevice,str[0])); //Process Pong CUDA_SAFE_CALL(cudaEventRecord(start[0],str[0])); gather_pr_edge<<<grid, threads,0,str[0]>>> (shard_dev, vertices, num_vertices, k); CUDA_SAFE_CALL(cudaStreamSynchronize(str[0])); CUDA_SAFE_CALL(cudaEventRecord(stop[0],str[0])); CUDA_SAFE_CALL(cudaEventSynchronize(stop[0])); CUDA_SAFE_CALL(cudaEventElapsedTime(&diff[i-1],start[0],stop[0])); pingpong=0; } } if(pingpong==0) { //Process Pong CUDA_SAFE_CALL(cudaEventRecord(start[1],str[1])); gather_pr_edge<<<grid, threads,0,str[1]>>> (shard_dev2, vertices, num_vertices, k); CUDA_SAFE_CALL(cudaStreamSynchronize(str[1])); CUDA_SAFE_CALL(cudaEventRecord(stop[1],str[1])); CUDA_SAFE_CALL(cudaEventSynchronize(stop[1])); CUDA_SAFE_CALL(cudaEventElapsedTime(&diff[i-1],start[1],stop[1])); } else { //Process Pong CUDA_SAFE_CALL(cudaEventRecord(start[0],str[0])); gather_pr_edge<<<grid, threads,0,str[0]>>> (shard_dev, vertices, num_vertices, k); CUDA_SAFE_CALL(cudaStreamSynchronize(str[0])); CUDA_SAFE_CALL(cudaEventRecord(stop[0],str[0])); CUDA_SAFE_CALL(cudaEventSynchronize(stop[0])); CUDA_SAFE_CALL(cudaEventElapsedTime(&diff[i-1],start[1],stop[1])); } for(i=0;i<num_intervals;i++) time += diff[i]; /* GATHER PHASE ENDS */ /* APPLY PHASE BEGINS */ pingpong=0; for(i=0; i<num_intervals; i++) { if(pingpong==0) { //Copy Ping CUDA_SAFE_CALL(cudaMemcpyAsync(shard_dev, &shard[i], sizeof(shard_t),cudaMemcpyHostToDevice,str[0])); CUDA_SAFE_CALL(cudaMemcpyAsync(from_dev, shard[i].from, MAX_NUM_EDGES_SHARD*sizeof(int),cudaMemcpyHostToDevice,str[0])); CUDA_SAFE_CALL(cudaMemcpyAsync(&(shard_dev->from), &from_dev, sizeof(int *),cudaMemcpyHostToDevice,str[0])); // CUDA_SAFE_CALL(cudaMemcpyAsync(to_dev, shard[i].to, MAX_NUM_EDGES_SHARD*sizeof(int),cudaMemcpyHostToDevice,str[0])); // CUDA_SAFE_CALL(cudaMemcpyAsync(&(shard_dev->to), &to_dev, sizeof(int *),cudaMemcpyHostToDevice,str[0])); CUDA_SAFE_CALL(cudaMemcpyAsync(vmap_dev, shard[i].vmap, MAX_NUM_VERTICES_SHARD*sizeof(int),cudaMemcpyHostToDevice,str[0])); CUDA_SAFE_CALL(cudaMemcpyAsync(&(shard_dev->vmap), &vmap_dev, sizeof(int *),cudaMemcpyHostToDevice,str[0])); CUDA_SAFE_CALL(cudaMemcpyAsync(&(shard_dev->updates), &updates_dev, sizeof(double *),cudaMemcpyHostToDevice,str[0])); if(i>0) { //Process Pong CUDA_SAFE_CALL(cudaEventRecord(start[1],str[1])); apply_pr_edge<<<grid, threads,0,str[1]>>> (shard_dev2, vertices, num_vertices, k); CUDA_SAFE_CALL(cudaStreamSynchronize(str[1])); CUDA_SAFE_CALL(cudaEventRecord(stop[1],str[1])); CUDA_SAFE_CALL(cudaEventSynchronize(stop[1])); CUDA_SAFE_CALL(cudaEventElapsedTime(&diff[i-1],start[1],stop[1])); } pingpong=1; } else { //Copy Pong CUDA_SAFE_CALL(cudaMemcpyAsync(shard_dev2, &shard[i], sizeof(shard_t),cudaMemcpyHostToDevice,str[1])); CUDA_SAFE_CALL(cudaMemcpyAsync(from_dev2, shard[i].from, MAX_NUM_EDGES_SHARD*sizeof(int),cudaMemcpyHostToDevice,str[1])); CUDA_SAFE_CALL(cudaMemcpyAsync(&(shard_dev2->from), &from_dev2, sizeof(int *),cudaMemcpyHostToDevice,str[1])); // CUDA_SAFE_CALL(cudaMemcpyAsync(to_dev2, shard[i].to, MAX_NUM_EDGES_SHARD*sizeof(int),cudaMemcpyHostToDevice,str[1])); // CUDA_SAFE_CALL(cudaMemcpyAsync(&(shard_dev2->to), &to_dev2, sizeof(int *),cudaMemcpyHostToDevice,str[1])); CUDA_SAFE_CALL(cudaMemcpyAsync(vmap_dev2, shard[i].vmap, MAX_NUM_VERTICES_SHARD*sizeof(int),cudaMemcpyHostToDevice,str[1])); CUDA_SAFE_CALL(cudaMemcpyAsync(&(shard_dev2->vmap), &vmap_dev2, sizeof(int *),cudaMemcpyHostToDevice,str[1])); CUDA_SAFE_CALL(cudaMemcpyAsync(&(shard_dev2->updates), &updates_dev2, sizeof(double *),cudaMemcpyHostToDevice,str[0])); //Process Pong CUDA_SAFE_CALL(cudaEventRecord(start[0],str[0])); apply_pr_edge<<<grid, threads,0,str[0]>>> (shard_dev, vertices, num_vertices, k); CUDA_SAFE_CALL(cudaStreamSynchronize(str[0])); CUDA_SAFE_CALL(cudaEventRecord(stop[0],str[0])); CUDA_SAFE_CALL(cudaEventSynchronize(stop[0])); CUDA_SAFE_CALL(cudaEventElapsedTime(&diff[i-1],start[0],stop[0])); pingpong=0; } } if(pingpong==0) { //Process Pong CUDA_SAFE_CALL(cudaEventRecord(start[1],str[1])); apply_pr_edge<<<grid, threads,0,str[1]>>> (shard_dev2, vertices, num_vertices, k); CUDA_SAFE_CALL(cudaStreamSynchronize(str[1])); CUDA_SAFE_CALL(cudaEventRecord(stop[1],str[1])); CUDA_SAFE_CALL(cudaEventSynchronize(stop[1])); CUDA_SAFE_CALL(cudaEventElapsedTime(&diff[i-1],start[1],stop[1])); } else { //Process Pong CUDA_SAFE_CALL(cudaEventRecord(start[0],str[0])); apply_pr_edge<<<grid, threads,0,str[0]>>> (shard_dev, vertices, num_vertices, k); CUDA_SAFE_CALL(cudaStreamSynchronize(str[0])); CUDA_SAFE_CALL(cudaEventRecord(stop[0],str[0])); CUDA_SAFE_CALL(cudaEventSynchronize(stop[0])); CUDA_SAFE_CALL(cudaEventElapsedTime(&diff[i-1],start[1],stop[1])); } for(i=0;i<num_intervals;i++) time += diff[i]; /* APPLY PHASE ENDS */ CUDA_SAFE_CALL(cudaMemcpyFromSymbol(&over, d_over, sizeof(bool),0, cudaMemcpyDeviceToHost)); k++; }while(over); CUDA_SAFE_CALL(cudaDeviceSynchronize()); gettimeofday(&t2,NULL); printf("Time to Pagerank : %f sec\n",((t2.tv_sec+t2.tv_usec*1.0e-6)-(t1.tv_sec+t1.tv_usec*1.0e-6))); printf("Number of iterations : %d\n",k); /* CUDA_SAFE_CALL(cudaMemcpy(vertices_host, vertices, num_vertices*sizeof(vertex_t), cudaMemcpyDeviceToHost)); for(int i = 0; i < num_vertices; i++) { printf("Vertex %d Distance %d\n",i,(int)vertices_host[i].val); }*/ printf("Time: %f ms\n",time); for(int i = 0; i < num_evts; i++) { CUDA_SAFE_CALL(cudaStreamDestroy(str[i])); CUDA_SAFE_CALL(cudaEventDestroy(start[i])); CUDA_SAFE_CALL(cudaEventDestroy(stop[i])); } free(interval); for(i=0; i<num_intervals; i++) { free(shard[i].vmap); free(shard[i].from); // free(shard[i].to); free(shard[i].updates); } free(shard); free(vertices_host); CUDA_SAFE_CALL(cudaFree(vertices)); CUDA_SAFE_CALL(cudaFree(vmap_dev)); CUDA_SAFE_CALL(cudaFree(from_dev)); // CUDA_SAFE_CALL(cudaFree(to_dev)); CUDA_SAFE_CALL(cudaFree(updates_dev)); CUDA_SAFE_CALL(cudaFree(shard_dev)); CUDA_SAFE_CALL(cudaFree(vmap_dev2)); CUDA_SAFE_CALL(cudaFree(from_dev2)); // CUDA_SAFE_CALL(cudaFree(to_dev2)); CUDA_SAFE_CALL(cudaFree(updates_dev2)); CUDA_SAFE_CALL(cudaFree(shard_dev2)); return 0; }
03d1e1f8da3449d8cfa376c34fe9326e70ee4676.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdio> #include <cstdlib> #include <staggered_oprod.h> #include <tune_quda.h> #include <quda_internal.h> #include <gauge_field_order.h> #include <quda_matrix.h> namespace quda { #ifdef GPU_STAGGERED_DIRAC namespace { // anonymous #include <texture.h> } enum KernelType {OPROD_INTERIOR_KERNEL, OPROD_EXTERIOR_KERNEL}; template<typename Float, typename Output, typename InputA, typename InputB> struct StaggeredOprodArg { unsigned int length; int X[4]; unsigned int parity; unsigned int dir; unsigned int ghostOffset[4]; unsigned int displacement; KernelType kernelType; int nFace; bool partitioned[4]; InputA inA; InputB inB; Output outA; Output outB; Float coeff[2]; StaggeredOprodArg(const unsigned int parity, const unsigned int dir, const unsigned int *ghostOffset, const unsigned int displacement, const KernelType& kernelType, const int nFace, const double coeff[2], InputA& inA, InputB& inB, Output& outA, Output& outB, GaugeField& meta) : length(meta.VolumeCB()), parity(parity), dir(dir), displacement(displacement), kernelType(kernelType), nFace(nFace), inA(inA), inB(inB), outA(outA), outB(outB) { this->coeff[0] = coeff[0]; this->coeff[1] = coeff[1]; for(int i=0; i<4; ++i) this->X[i] = meta.X()[i]; for(int i=0; i<4; ++i) this->ghostOffset[i] = ghostOffset[i]; for(int i=0; i<4; ++i) this->partitioned[i] = commDimPartitioned(i) ? true : false; } }; enum IndexType { EVEN_X = 0, EVEN_Y = 1, EVEN_Z = 2, EVEN_T = 3 }; template <IndexType idxType> static __device__ __forceinline__ void coordsFromIndex(int& idx, int c[4], const unsigned int cb_idx, const unsigned int parity, const int X[4]) { const int &LX = X[0]; const int &LY = X[1]; const int &LZ = X[2]; const int XYZ = X[2]*X[1]*X[0]; const int XY = X[1]*X[0]; idx = 2*cb_idx; int x, y, z, t; if (idxType == EVEN_X /*!(LX & 1)*/) { // X even // t = idx / XYZ; // z = (idx / XY) % Z; // y = (idx / X) % Y; // idx += (parity + t + z + y) & 1; // x = idx % X; // equivalent to the above, but with fewer divisions/mods: int aux1 = idx / LX; x = idx - aux1 * LX; int aux2 = aux1 / LY; y = aux1 - aux2 * LY; t = aux2 / LZ; z = aux2 - t * LZ; aux1 = (parity + t + z + y) & 1; x += aux1; idx += aux1; } else if (idxType == EVEN_Y /*!(LY & 1)*/) { // Y even t = idx / XYZ; z = (idx / XY) % LZ; idx += (parity + t + z) & 1; y = (idx / LX) % LY; x = idx % LX; } else if (idxType == EVEN_Z /*!(LZ & 1)*/) { // Z even t = idx / XYZ; idx += (parity + t) & 1; z = (idx / XY) % LZ; y = (idx / LX) % LY; x = idx % LX; } else { idx += parity; t = idx / XYZ; z = (idx / XY) % LZ; y = (idx / LX) % LY; x = idx % LX; } c[0] = x; c[1] = y; c[2] = z; c[3] = t; } // Get the coordinates for the exterior kernels __device__ static void coordsFromIndex(int x[4], const unsigned int cb_idx, const int X[4], const unsigned int dir, const int displacement, const unsigned int parity) { int Xh[2] = {X[0]/2, X[1]/2}; switch(dir){ case 0: x[2] = cb_idx/Xh[1] % X[2]; x[3] = cb_idx/(Xh[1]*X[2]) % X[3]; x[0] = cb_idx/(Xh[1]*X[2]*X[3]); x[0] += (X[0] - displacement); x[1] = 2*(cb_idx % Xh[1]) + ((x[0]+x[2]+x[3]+parity)&1); break; case 1: x[2] = cb_idx/Xh[0] % X[2]; x[3] = cb_idx/(Xh[0]*X[2]) % X[3]; x[1] = cb_idx/(Xh[0]*X[2]*X[3]); x[1] += (X[1] - displacement); x[0] = 2*(cb_idx % Xh[0]) + ((x[1]+x[2]+x[3]+parity)&1); break; case 2: x[1] = cb_idx/Xh[0] % X[1]; x[3] = cb_idx/(Xh[0]*X[1]) % X[3]; x[2] = cb_idx/(Xh[0]*X[1]*X[3]); x[2] += (X[2] - displacement); x[0] = 2*(cb_idx % Xh[0]) + ((x[1]+x[2]+x[3]+parity)&1); break; case 3: x[1] = cb_idx/Xh[0] % X[1]; x[2] = cb_idx/(Xh[0]*X[1]) % X[2]; x[3] = cb_idx/(Xh[0]*X[1]*X[2]); x[3] += (X[3] - displacement); x[0] = 2*(cb_idx % Xh[0]) + ((x[1]+x[2]+x[3]+parity)&1); break; } return; } __device__ __forceinline__ int neighborIndex(const unsigned int cb_idx, const int shift[4], const bool partitioned[4], const unsigned int parity, const int X[4]){ int full_idx; int x[4]; coordsFromIndex<EVEN_X>(full_idx, x, cb_idx, parity, X); for(int dim = 0; dim<4; ++dim){ if( partitioned[dim] ) if( (x[dim]+shift[dim])<0 || (x[dim]+shift[dim])>=X[dim]) return -1; } for(int dim=0; dim<4; ++dim){ x[dim] = shift[dim] ? (x[dim]+shift[dim] + X[dim]) % X[dim] : x[dim]; } return (((x[3]*X[2] + x[2])*X[1] + x[1])*X[0] + x[0]) >> 1; } template<typename real, typename Output, typename InputA, typename InputB> __global__ void interiorOprodKernel(StaggeredOprodArg<real, Output, InputA, InputB> arg) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int gridSize = gridDim.x*blockDim.x; typedef complex<real> Complex; Complex x[3]; Complex y[3]; Complex z[3]; Matrix<Complex,3> result; Matrix<Complex,3> tempA, tempB; // input while(idx<arg.length){ arg.inA.load(x, idx); #pragma unroll for(int dim=0; dim<4; ++dim){ int shift[4] = {0,0,0,0}; shift[dim] = 1; const int first_nbr_idx = neighborIndex(idx, shift, arg.partitioned, arg.parity, arg.X); if(first_nbr_idx >= 0){ arg.inB.load(y, first_nbr_idx); outerProd(y,x,&result); arg.outA.load(reinterpret_cast<real*>(tempA.data), idx, dim, arg.parity); result = tempA + result*arg.coeff[0]; arg.outA.save(reinterpret_cast<real*>(result.data), idx, dim, arg.parity); if (arg.nFace == 3) { shift[dim] = 3; const int third_nbr_idx = neighborIndex(idx, shift, arg.partitioned, arg.parity, arg.X); if(third_nbr_idx >= 0){ arg.inB.load(z, third_nbr_idx); outerProd(z, x, &result); arg.outB.load(reinterpret_cast<real*>(tempB.data), idx, dim, arg.parity); result = tempB + result*arg.coeff[1]; arg.outB.save(reinterpret_cast<real*>(result.data), idx, dim, arg.parity); } } } } // dim idx += gridSize; } return; } // interiorOprodKernel template<int dim, typename real, typename Output, typename InputA, typename InputB> __global__ void exteriorOprodKernel(StaggeredOprodArg<real, Output, InputA, InputB> arg) { typedef complex<real> Complex; unsigned int cb_idx = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int gridSize = gridDim.x*blockDim.x; Complex a[3]; Complex b[3]; Matrix<Complex,3> result; Matrix<Complex,3> inmatrix; // input Output& out = (arg.displacement == 1) ? arg.outA : arg.outB; real coeff = (arg.displacement == 1) ? arg.coeff[0] : arg.coeff[1]; int x[4]; while(cb_idx<arg.length){ coordsFromIndex(x, cb_idx, arg.X, arg.dir, arg.displacement, arg.parity); const unsigned int bulk_cb_idx = ((((x[3]*arg.X[2] + x[2])*arg.X[1] + x[1])*arg.X[0] + x[0]) >> 1); out.load(reinterpret_cast<real*>(inmatrix.data), bulk_cb_idx, arg.dir, arg.parity); arg.inA.load(a, bulk_cb_idx); const unsigned int ghost_idx = arg.ghostOffset[dim] + cb_idx; arg.inB.loadGhost(b, ghost_idx, arg.dir); outerProd(b,a,&result); result = inmatrix + result*coeff; out.save(reinterpret_cast<real*>(result.data), bulk_cb_idx, arg.dir, arg.parity); cb_idx += gridSize; } return; } template<typename Float, typename Output, typename InputA, typename InputB> class StaggeredOprodField : public Tunable { private: StaggeredOprodArg<Float,Output,InputA,InputB> &arg; const GaugeField &meta; unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &) const { return 0; } unsigned int minThreads() const { return arg.outA.volumeCB; } bool tunedGridDim() const { return false; } public: StaggeredOprodField(StaggeredOprodArg<Float,Output,InputA,InputB> &arg, const GaugeField &meta) : arg(arg), meta(meta) { writeAuxString("threads=%d,prec=%lu,stride=%d",arg.length,sizeof(Complex)/2,arg.inA.Stride()); // this sets the communications pattern for the packing kernel int comms[QUDA_MAX_DIM] = { commDimPartitioned(0), commDimPartitioned(1), commDimPartitioned(2), commDimPartitioned(3) }; setPackComms(comms); } virtual ~StaggeredOprodField() {} void apply(const hipStream_t &stream){ if (meta.Location() == QUDA_CUDA_FIELD_LOCATION) { // Disable tuning for the time being TuneParam tp = tuneLaunch(*this, QUDA_TUNE_NO, getVerbosity()); if (arg.kernelType == OPROD_INTERIOR_KERNEL) { hipLaunchKernelGGL(( interiorOprodKernel), dim3(tp.grid),dim3(tp.block),tp.shared_bytes, stream, arg); } else if (arg.kernelType == OPROD_EXTERIOR_KERNEL) { if (arg.dir == 0)hipLaunchKernelGGL(( exteriorOprodKernel<0>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes, stream, arg); else if (arg.dir == 1)hipLaunchKernelGGL(( exteriorOprodKernel<1>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes, stream, arg); else if (arg.dir == 2)hipLaunchKernelGGL(( exteriorOprodKernel<2>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes, stream, arg); else if (arg.dir == 3)hipLaunchKernelGGL(( exteriorOprodKernel<3>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes, stream, arg); } else { errorQuda("Kernel type not supported\n"); } } else { // run the CPU code errorQuda("No CPU support for staggered outer-product calculation\n"); } } // apply void preTune(){ this->arg.outA.save(); this->arg.outB.save(); } void postTune(){ this->arg.outA.load(); this->arg.outB.load(); } long long flops() const { return 0; } // FIXME long long bytes() const { return 0; } // FIXME TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux);} }; // StaggeredOprodField void exchangeGhost(int nFace, cudaColorSpinorField &a, int parity, int dag) { // need to enable packing in temporal direction to get spin-projector correct bool pack_old = getKernelPackT(); setKernelPackT(true); // first transfer src1 hipDeviceSynchronize(); MemoryLocation location[2*QUDA_MAX_DIM] = {Device, Device, Device, Device, Device, Device, Device, Device}; a.pack(nFace, 1-parity, dag, Nstream-1, location); hipDeviceSynchronize(); for(int i=3; i>=0; i--){ if(commDimPartitioned(i)){ // Initialize the host transfer from the source spinor a.gather(nFace, dag, 2*i); } // commDim(i) } // i=3,..,0 hipDeviceSynchronize(); comm_barrier(); for (int i=3; i>=0; i--) { if(commDimPartitioned(i)) { a.commsStart(nFace, 2*i, dag); } } for (int i=3; i>=0; i--) { if(commDimPartitioned(i)) { a.commsWait(nFace, 2*i, dag); a.scatter(nFace, dag, 2*i); } } hipDeviceSynchronize(); setKernelPackT(pack_old); // restore packing state a.bufferIndex = (1 - a.bufferIndex); comm_barrier(); } template<typename Float, typename Output, typename InputA, typename InputB> void computeStaggeredOprodCuda(Output outA, Output outB, GaugeField& outFieldA, GaugeField& outFieldB, InputA& inA, InputB& inB, cudaColorSpinorField& src, const unsigned int parity, const int faceVolumeCB[4], const double coeff[2], int nFace) { unsigned int ghostOffset[4] = {0,0,0,0}; for(int dir=0; dir<4; ++dir) ghostOffset[dir] = src.GhostOffset(dir,1)/src.FieldOrder(); // offset we want is the forwards one // Create the arguments for the interior kernel StaggeredOprodArg<Float,Output,InputA,InputB> arg(parity, 0, ghostOffset, 1, OPROD_INTERIOR_KERNEL, nFace, coeff, inA, inB, outA, outB, outFieldA); StaggeredOprodField<Float,Output,InputA,InputB> oprod(arg, outFieldA); arg.kernelType = OPROD_INTERIOR_KERNEL; arg.length = src.VolumeCB(); oprod.apply(streams[Nstream-1]); for(int i=3; i>=0; i--){ if (commDimPartitioned(i)) { // update parameters for this exterior kernel arg.kernelType = OPROD_EXTERIOR_KERNEL; arg.dir = i; // First, do the one hop term { arg.displacement = 1; arg.length = faceVolumeCB[i]; oprod.apply(streams[Nstream-1]); } // Now do the 3 hop term if (nFace == 3) { arg.displacement = 3; arg.length = arg.displacement*faceVolumeCB[i]; oprod.apply(streams[Nstream-1]); } } } // i=3,..,0 checkCudaError(); } // computeStaggeredOprodCuda #endif // GPU_STAGGERED_DIRAC void computeStaggeredOprod(GaugeField& outA, GaugeField& outB, ColorSpinorField& inEven, ColorSpinorField& inOdd, const unsigned int parity, const double coeff[2], int nFace) { #ifdef GPU_STAGGERED_DIRAC if(outA.Order() != QUDA_FLOAT2_GAUGE_ORDER) errorQuda("Unsupported output ordering: %d\n", outA.Order()); if(outB.Order() != QUDA_FLOAT2_GAUGE_ORDER) errorQuda("Unsupported output ordering: %d\n", outB.Order()); if(inEven.Precision() != outA.Precision()) errorQuda("Mixed precision not supported: %d %d\n", inEven.Precision(), outA.Precision()); cudaColorSpinorField &inA = (parity&1) ? static_cast<cudaColorSpinorField&>(inOdd) : static_cast<cudaColorSpinorField&>(inEven); cudaColorSpinorField &inB = (parity&1) ? static_cast<cudaColorSpinorField&>(inEven) : static_cast<cudaColorSpinorField&>(inOdd); inA.allocateGhostBuffer(nFace); inB.allocateGhostBuffer(nFace); if (inEven.Precision() == QUDA_DOUBLE_PRECISION) { Spinor<double2, double2, 3, 0, 0> spinorA(inA, nFace); Spinor<double2, double2, 3, 0, 1> spinorB(inB, nFace); exchangeGhost(nFace,static_cast<cudaColorSpinorField&>(inB), parity, 0); computeStaggeredOprodCuda<double>(gauge::FloatNOrder<double, 18, 2, 18>(outA), gauge::FloatNOrder<double, 18, 2, 18>(outB), outA, outB, spinorA, spinorB, inB, parity, inB.GhostFace(), coeff, nFace); } else if (inEven.Precision() == QUDA_SINGLE_PRECISION) { Spinor<float2, float2, 3, 0, 0> spinorA(inA, nFace); Spinor<float2, float2, 3, 0, 1> spinorB(inB, nFace); exchangeGhost(nFace,static_cast<cudaColorSpinorField&>(inB), parity, 0); computeStaggeredOprodCuda<float>(gauge::FloatNOrder<float, 18, 2, 18>(outA), gauge::FloatNOrder<float, 18, 2, 18>(outB), outA, outB, spinorA, spinorB, inB, parity, inB.GhostFace(), coeff, nFace); } else { errorQuda("Unsupported precision: %d\n", inEven.Precision()); } #else // GPU_STAGGERED_DIRAC not defined errorQuda("Staggered Outer Product has not been built!"); #endif return; } // computeStaggeredOprod void computeStaggeredOprod(GaugeField *out[], ColorSpinorField& in, const double coeff[], int nFace) { if (nFace == 1) { computeStaggeredOprod(*out[0], *out[0], in.Even(), in.Odd(), 0, coeff, nFace); double coeff_[2] = {-coeff[0],0.0}; // need to multiply by -1 on odd sites computeStaggeredOprod(*out[0], *out[0], in.Even(), in.Odd(), 1, coeff_, nFace); } else if (nFace == 3) { computeStaggeredOprod(*out[0], *out[1], in.Even(), in.Odd(), 0, coeff, nFace); computeStaggeredOprod(*out[0], *out[1], in.Even(), in.Odd(), 1, coeff, nFace); } else { errorQuda("Invalid nFace=%d", nFace); } } } // namespace quda
03d1e1f8da3449d8cfa376c34fe9326e70ee4676.cu
#include <cstdio> #include <cstdlib> #include <staggered_oprod.h> #include <tune_quda.h> #include <quda_internal.h> #include <gauge_field_order.h> #include <quda_matrix.h> namespace quda { #ifdef GPU_STAGGERED_DIRAC namespace { // anonymous #include <texture.h> } enum KernelType {OPROD_INTERIOR_KERNEL, OPROD_EXTERIOR_KERNEL}; template<typename Float, typename Output, typename InputA, typename InputB> struct StaggeredOprodArg { unsigned int length; int X[4]; unsigned int parity; unsigned int dir; unsigned int ghostOffset[4]; unsigned int displacement; KernelType kernelType; int nFace; bool partitioned[4]; InputA inA; InputB inB; Output outA; Output outB; Float coeff[2]; StaggeredOprodArg(const unsigned int parity, const unsigned int dir, const unsigned int *ghostOffset, const unsigned int displacement, const KernelType& kernelType, const int nFace, const double coeff[2], InputA& inA, InputB& inB, Output& outA, Output& outB, GaugeField& meta) : length(meta.VolumeCB()), parity(parity), dir(dir), displacement(displacement), kernelType(kernelType), nFace(nFace), inA(inA), inB(inB), outA(outA), outB(outB) { this->coeff[0] = coeff[0]; this->coeff[1] = coeff[1]; for(int i=0; i<4; ++i) this->X[i] = meta.X()[i]; for(int i=0; i<4; ++i) this->ghostOffset[i] = ghostOffset[i]; for(int i=0; i<4; ++i) this->partitioned[i] = commDimPartitioned(i) ? true : false; } }; enum IndexType { EVEN_X = 0, EVEN_Y = 1, EVEN_Z = 2, EVEN_T = 3 }; template <IndexType idxType> static __device__ __forceinline__ void coordsFromIndex(int& idx, int c[4], const unsigned int cb_idx, const unsigned int parity, const int X[4]) { const int &LX = X[0]; const int &LY = X[1]; const int &LZ = X[2]; const int XYZ = X[2]*X[1]*X[0]; const int XY = X[1]*X[0]; idx = 2*cb_idx; int x, y, z, t; if (idxType == EVEN_X /*!(LX & 1)*/) { // X even // t = idx / XYZ; // z = (idx / XY) % Z; // y = (idx / X) % Y; // idx += (parity + t + z + y) & 1; // x = idx % X; // equivalent to the above, but with fewer divisions/mods: int aux1 = idx / LX; x = idx - aux1 * LX; int aux2 = aux1 / LY; y = aux1 - aux2 * LY; t = aux2 / LZ; z = aux2 - t * LZ; aux1 = (parity + t + z + y) & 1; x += aux1; idx += aux1; } else if (idxType == EVEN_Y /*!(LY & 1)*/) { // Y even t = idx / XYZ; z = (idx / XY) % LZ; idx += (parity + t + z) & 1; y = (idx / LX) % LY; x = idx % LX; } else if (idxType == EVEN_Z /*!(LZ & 1)*/) { // Z even t = idx / XYZ; idx += (parity + t) & 1; z = (idx / XY) % LZ; y = (idx / LX) % LY; x = idx % LX; } else { idx += parity; t = idx / XYZ; z = (idx / XY) % LZ; y = (idx / LX) % LY; x = idx % LX; } c[0] = x; c[1] = y; c[2] = z; c[3] = t; } // Get the coordinates for the exterior kernels __device__ static void coordsFromIndex(int x[4], const unsigned int cb_idx, const int X[4], const unsigned int dir, const int displacement, const unsigned int parity) { int Xh[2] = {X[0]/2, X[1]/2}; switch(dir){ case 0: x[2] = cb_idx/Xh[1] % X[2]; x[3] = cb_idx/(Xh[1]*X[2]) % X[3]; x[0] = cb_idx/(Xh[1]*X[2]*X[3]); x[0] += (X[0] - displacement); x[1] = 2*(cb_idx % Xh[1]) + ((x[0]+x[2]+x[3]+parity)&1); break; case 1: x[2] = cb_idx/Xh[0] % X[2]; x[3] = cb_idx/(Xh[0]*X[2]) % X[3]; x[1] = cb_idx/(Xh[0]*X[2]*X[3]); x[1] += (X[1] - displacement); x[0] = 2*(cb_idx % Xh[0]) + ((x[1]+x[2]+x[3]+parity)&1); break; case 2: x[1] = cb_idx/Xh[0] % X[1]; x[3] = cb_idx/(Xh[0]*X[1]) % X[3]; x[2] = cb_idx/(Xh[0]*X[1]*X[3]); x[2] += (X[2] - displacement); x[0] = 2*(cb_idx % Xh[0]) + ((x[1]+x[2]+x[3]+parity)&1); break; case 3: x[1] = cb_idx/Xh[0] % X[1]; x[2] = cb_idx/(Xh[0]*X[1]) % X[2]; x[3] = cb_idx/(Xh[0]*X[1]*X[2]); x[3] += (X[3] - displacement); x[0] = 2*(cb_idx % Xh[0]) + ((x[1]+x[2]+x[3]+parity)&1); break; } return; } __device__ __forceinline__ int neighborIndex(const unsigned int cb_idx, const int shift[4], const bool partitioned[4], const unsigned int parity, const int X[4]){ int full_idx; int x[4]; coordsFromIndex<EVEN_X>(full_idx, x, cb_idx, parity, X); for(int dim = 0; dim<4; ++dim){ if( partitioned[dim] ) if( (x[dim]+shift[dim])<0 || (x[dim]+shift[dim])>=X[dim]) return -1; } for(int dim=0; dim<4; ++dim){ x[dim] = shift[dim] ? (x[dim]+shift[dim] + X[dim]) % X[dim] : x[dim]; } return (((x[3]*X[2] + x[2])*X[1] + x[1])*X[0] + x[0]) >> 1; } template<typename real, typename Output, typename InputA, typename InputB> __global__ void interiorOprodKernel(StaggeredOprodArg<real, Output, InputA, InputB> arg) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int gridSize = gridDim.x*blockDim.x; typedef complex<real> Complex; Complex x[3]; Complex y[3]; Complex z[3]; Matrix<Complex,3> result; Matrix<Complex,3> tempA, tempB; // input while(idx<arg.length){ arg.inA.load(x, idx); #pragma unroll for(int dim=0; dim<4; ++dim){ int shift[4] = {0,0,0,0}; shift[dim] = 1; const int first_nbr_idx = neighborIndex(idx, shift, arg.partitioned, arg.parity, arg.X); if(first_nbr_idx >= 0){ arg.inB.load(y, first_nbr_idx); outerProd(y,x,&result); arg.outA.load(reinterpret_cast<real*>(tempA.data), idx, dim, arg.parity); result = tempA + result*arg.coeff[0]; arg.outA.save(reinterpret_cast<real*>(result.data), idx, dim, arg.parity); if (arg.nFace == 3) { shift[dim] = 3; const int third_nbr_idx = neighborIndex(idx, shift, arg.partitioned, arg.parity, arg.X); if(third_nbr_idx >= 0){ arg.inB.load(z, third_nbr_idx); outerProd(z, x, &result); arg.outB.load(reinterpret_cast<real*>(tempB.data), idx, dim, arg.parity); result = tempB + result*arg.coeff[1]; arg.outB.save(reinterpret_cast<real*>(result.data), idx, dim, arg.parity); } } } } // dim idx += gridSize; } return; } // interiorOprodKernel template<int dim, typename real, typename Output, typename InputA, typename InputB> __global__ void exteriorOprodKernel(StaggeredOprodArg<real, Output, InputA, InputB> arg) { typedef complex<real> Complex; unsigned int cb_idx = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int gridSize = gridDim.x*blockDim.x; Complex a[3]; Complex b[3]; Matrix<Complex,3> result; Matrix<Complex,3> inmatrix; // input Output& out = (arg.displacement == 1) ? arg.outA : arg.outB; real coeff = (arg.displacement == 1) ? arg.coeff[0] : arg.coeff[1]; int x[4]; while(cb_idx<arg.length){ coordsFromIndex(x, cb_idx, arg.X, arg.dir, arg.displacement, arg.parity); const unsigned int bulk_cb_idx = ((((x[3]*arg.X[2] + x[2])*arg.X[1] + x[1])*arg.X[0] + x[0]) >> 1); out.load(reinterpret_cast<real*>(inmatrix.data), bulk_cb_idx, arg.dir, arg.parity); arg.inA.load(a, bulk_cb_idx); const unsigned int ghost_idx = arg.ghostOffset[dim] + cb_idx; arg.inB.loadGhost(b, ghost_idx, arg.dir); outerProd(b,a,&result); result = inmatrix + result*coeff; out.save(reinterpret_cast<real*>(result.data), bulk_cb_idx, arg.dir, arg.parity); cb_idx += gridSize; } return; } template<typename Float, typename Output, typename InputA, typename InputB> class StaggeredOprodField : public Tunable { private: StaggeredOprodArg<Float,Output,InputA,InputB> &arg; const GaugeField &meta; unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &) const { return 0; } unsigned int minThreads() const { return arg.outA.volumeCB; } bool tunedGridDim() const { return false; } public: StaggeredOprodField(StaggeredOprodArg<Float,Output,InputA,InputB> &arg, const GaugeField &meta) : arg(arg), meta(meta) { writeAuxString("threads=%d,prec=%lu,stride=%d",arg.length,sizeof(Complex)/2,arg.inA.Stride()); // this sets the communications pattern for the packing kernel int comms[QUDA_MAX_DIM] = { commDimPartitioned(0), commDimPartitioned(1), commDimPartitioned(2), commDimPartitioned(3) }; setPackComms(comms); } virtual ~StaggeredOprodField() {} void apply(const cudaStream_t &stream){ if (meta.Location() == QUDA_CUDA_FIELD_LOCATION) { // Disable tuning for the time being TuneParam tp = tuneLaunch(*this, QUDA_TUNE_NO, getVerbosity()); if (arg.kernelType == OPROD_INTERIOR_KERNEL) { interiorOprodKernel<<<tp.grid,tp.block,tp.shared_bytes, stream>>>(arg); } else if (arg.kernelType == OPROD_EXTERIOR_KERNEL) { if (arg.dir == 0) exteriorOprodKernel<0><<<tp.grid,tp.block,tp.shared_bytes, stream>>>(arg); else if (arg.dir == 1) exteriorOprodKernel<1><<<tp.grid,tp.block,tp.shared_bytes, stream>>>(arg); else if (arg.dir == 2) exteriorOprodKernel<2><<<tp.grid,tp.block,tp.shared_bytes, stream>>>(arg); else if (arg.dir == 3) exteriorOprodKernel<3><<<tp.grid,tp.block,tp.shared_bytes, stream>>>(arg); } else { errorQuda("Kernel type not supported\n"); } } else { // run the CPU code errorQuda("No CPU support for staggered outer-product calculation\n"); } } // apply void preTune(){ this->arg.outA.save(); this->arg.outB.save(); } void postTune(){ this->arg.outA.load(); this->arg.outB.load(); } long long flops() const { return 0; } // FIXME long long bytes() const { return 0; } // FIXME TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux);} }; // StaggeredOprodField void exchangeGhost(int nFace, cudaColorSpinorField &a, int parity, int dag) { // need to enable packing in temporal direction to get spin-projector correct bool pack_old = getKernelPackT(); setKernelPackT(true); // first transfer src1 cudaDeviceSynchronize(); MemoryLocation location[2*QUDA_MAX_DIM] = {Device, Device, Device, Device, Device, Device, Device, Device}; a.pack(nFace, 1-parity, dag, Nstream-1, location); cudaDeviceSynchronize(); for(int i=3; i>=0; i--){ if(commDimPartitioned(i)){ // Initialize the host transfer from the source spinor a.gather(nFace, dag, 2*i); } // commDim(i) } // i=3,..,0 cudaDeviceSynchronize(); comm_barrier(); for (int i=3; i>=0; i--) { if(commDimPartitioned(i)) { a.commsStart(nFace, 2*i, dag); } } for (int i=3; i>=0; i--) { if(commDimPartitioned(i)) { a.commsWait(nFace, 2*i, dag); a.scatter(nFace, dag, 2*i); } } cudaDeviceSynchronize(); setKernelPackT(pack_old); // restore packing state a.bufferIndex = (1 - a.bufferIndex); comm_barrier(); } template<typename Float, typename Output, typename InputA, typename InputB> void computeStaggeredOprodCuda(Output outA, Output outB, GaugeField& outFieldA, GaugeField& outFieldB, InputA& inA, InputB& inB, cudaColorSpinorField& src, const unsigned int parity, const int faceVolumeCB[4], const double coeff[2], int nFace) { unsigned int ghostOffset[4] = {0,0,0,0}; for(int dir=0; dir<4; ++dir) ghostOffset[dir] = src.GhostOffset(dir,1)/src.FieldOrder(); // offset we want is the forwards one // Create the arguments for the interior kernel StaggeredOprodArg<Float,Output,InputA,InputB> arg(parity, 0, ghostOffset, 1, OPROD_INTERIOR_KERNEL, nFace, coeff, inA, inB, outA, outB, outFieldA); StaggeredOprodField<Float,Output,InputA,InputB> oprod(arg, outFieldA); arg.kernelType = OPROD_INTERIOR_KERNEL; arg.length = src.VolumeCB(); oprod.apply(streams[Nstream-1]); for(int i=3; i>=0; i--){ if (commDimPartitioned(i)) { // update parameters for this exterior kernel arg.kernelType = OPROD_EXTERIOR_KERNEL; arg.dir = i; // First, do the one hop term { arg.displacement = 1; arg.length = faceVolumeCB[i]; oprod.apply(streams[Nstream-1]); } // Now do the 3 hop term if (nFace == 3) { arg.displacement = 3; arg.length = arg.displacement*faceVolumeCB[i]; oprod.apply(streams[Nstream-1]); } } } // i=3,..,0 checkCudaError(); } // computeStaggeredOprodCuda #endif // GPU_STAGGERED_DIRAC void computeStaggeredOprod(GaugeField& outA, GaugeField& outB, ColorSpinorField& inEven, ColorSpinorField& inOdd, const unsigned int parity, const double coeff[2], int nFace) { #ifdef GPU_STAGGERED_DIRAC if(outA.Order() != QUDA_FLOAT2_GAUGE_ORDER) errorQuda("Unsupported output ordering: %d\n", outA.Order()); if(outB.Order() != QUDA_FLOAT2_GAUGE_ORDER) errorQuda("Unsupported output ordering: %d\n", outB.Order()); if(inEven.Precision() != outA.Precision()) errorQuda("Mixed precision not supported: %d %d\n", inEven.Precision(), outA.Precision()); cudaColorSpinorField &inA = (parity&1) ? static_cast<cudaColorSpinorField&>(inOdd) : static_cast<cudaColorSpinorField&>(inEven); cudaColorSpinorField &inB = (parity&1) ? static_cast<cudaColorSpinorField&>(inEven) : static_cast<cudaColorSpinorField&>(inOdd); inA.allocateGhostBuffer(nFace); inB.allocateGhostBuffer(nFace); if (inEven.Precision() == QUDA_DOUBLE_PRECISION) { Spinor<double2, double2, 3, 0, 0> spinorA(inA, nFace); Spinor<double2, double2, 3, 0, 1> spinorB(inB, nFace); exchangeGhost(nFace,static_cast<cudaColorSpinorField&>(inB), parity, 0); computeStaggeredOprodCuda<double>(gauge::FloatNOrder<double, 18, 2, 18>(outA), gauge::FloatNOrder<double, 18, 2, 18>(outB), outA, outB, spinorA, spinorB, inB, parity, inB.GhostFace(), coeff, nFace); } else if (inEven.Precision() == QUDA_SINGLE_PRECISION) { Spinor<float2, float2, 3, 0, 0> spinorA(inA, nFace); Spinor<float2, float2, 3, 0, 1> spinorB(inB, nFace); exchangeGhost(nFace,static_cast<cudaColorSpinorField&>(inB), parity, 0); computeStaggeredOprodCuda<float>(gauge::FloatNOrder<float, 18, 2, 18>(outA), gauge::FloatNOrder<float, 18, 2, 18>(outB), outA, outB, spinorA, spinorB, inB, parity, inB.GhostFace(), coeff, nFace); } else { errorQuda("Unsupported precision: %d\n", inEven.Precision()); } #else // GPU_STAGGERED_DIRAC not defined errorQuda("Staggered Outer Product has not been built!"); #endif return; } // computeStaggeredOprod void computeStaggeredOprod(GaugeField *out[], ColorSpinorField& in, const double coeff[], int nFace) { if (nFace == 1) { computeStaggeredOprod(*out[0], *out[0], in.Even(), in.Odd(), 0, coeff, nFace); double coeff_[2] = {-coeff[0],0.0}; // need to multiply by -1 on odd sites computeStaggeredOprod(*out[0], *out[0], in.Even(), in.Odd(), 1, coeff_, nFace); } else if (nFace == 3) { computeStaggeredOprod(*out[0], *out[1], in.Even(), in.Odd(), 0, coeff, nFace); computeStaggeredOprod(*out[0], *out[1], in.Even(), in.Odd(), 1, coeff, nFace); } else { errorQuda("Invalid nFace=%d", nFace); } } } // namespace quda
1670f947a1bc9d643bd6e7bd3bb9965664444e16.hip
// !!! This is a file automatically generated by hipify!!! /*! \file Sort.cu \author Gregory Diamos <gregory.diamos> \date Wednesday December 1, 2010 \brief The source file for the C interface to CUDA sorting routines. */ // Redfox Includes #include <redfox/nvcc/interface/RelationalAlgebraKernel.h> #include <redfox/ra/interface/ModernGPUSort.h> #include <redfox/ra/interface/Tuple.h> #include <redfox/ra/interface/moderngpu/include/kernels/mergesort.cuh> #include <stdio.h> #include <iostream> class gpu128 { public: typedef long long unsigned int type; public: type a[2]; }; class gpu256 { public: typedef long long unsigned int type; public: type a[4]; }; class gpu512 { public: typedef long long unsigned int type; public: type a[8]; }; struct compare_sort_string { __host__ __device__ bool operator()(unsigned long long int i, unsigned long long int j) { char *string1 = (char *)i; char *string2 = (char *)j; int ii = 0; while(string1[ii] != '\0' && string2[ii] != '\0') { if(string1[ii] != string2[ii]) return (string1[ii] < string2[ii]); ii++; } if(string1[ii] == '\0' && string2[ii] != '\0') return true; else return false; } }; struct compare_sort_gpu128 { typedef gpu128 type; __host__ __device__ bool operator()(type i, type j) { if (i.a[1] != j.a[1]) return (i.a[1] < j.a[1]); return (i.a[0] < j.a[0]); } }; struct compare_sort_gpu256 { typedef gpu256 type; __host__ __device__ bool operator()(type i, type j) { if (i.a[3] != j.a[3]) return (i.a[3] < j.a[3]); if (i.a[2] != j.a[2]) return (i.a[2] < j.a[2]); if (i.a[1] != j.a[1]) return (i.a[1] < j.a[1]); return (i.a[0] < j.a[0]); } }; struct compare_sort_gpu512 { typedef gpu512 type; __host__ __device__ bool operator()(type i, type j) { if (i.a[7] != j.a[7]) return (i.a[7] < j.a[7]); if (i.a[6] != j.a[6]) return (i.a[6] < j.a[6]); if (i.a[5] != j.a[5]) return (i.a[5] < j.a[5]); if (i.a[4] != j.a[4]) return (i.a[4] < j.a[4]); if (i.a[3] != j.a[3]) return (i.a[3] < j.a[3]); if (i.a[2] != j.a[2]) return (i.a[2] < j.a[2]); if (i.a[1] != j.a[1]) return (i.a[1] < j.a[1]); return (i.a[0] < j.a[0]); } }; namespace redfox { void check(hipError_t status) { if(status != hipSuccess) { std::cerr << hipGetErrorString(status) << "\n"; std::abort(); } } void sort_string_pair(void* key_begin, void* value_begin, unsigned long long int size, unsigned long long int key_type, unsigned long long int value_type) { // unsigned int data_key[30142]; // unsigned long long int data_value[10]; // check(hipMemcpy(data_key, (unsigned int *)key_begin, 30142*4, // hipMemcpyDeviceToHost)); // check(hipMemcpy(data_value, (unsigned long long int *)value_begin, 80, // hipMemcpyDeviceToHost)); //printf("size %llu\n", size); // for(unsigned int i = 0; i <300; ++i) // printf("%u %u \n", i, data_key[i]); // for(unsigned int i = 30141; i > 30131; --i) // printf("%u %u \n", i, data_key[i]); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float exe_time = 0.0f; hipEventRecord(start,0); // size_t freeMem, totalMem; // hipMemGetInfo(&freeMem, &totalMem); // std::cout << freeMem << "\n"; // mgpu::ContextPtr context = mgpu::CreateCudaDevice(0); // // context->Start(); if (value_type == nvcc::RelationalAlgebraKernel::I8) { mgpu::MergesortPairs<unsigned long long int, unsigned char>((unsigned long long int*)key_begin, (unsigned char*)value_begin, size, compare_sort_string()/*, *context*/); } else if (value_type == nvcc::RelationalAlgebraKernel::I16) { mgpu::MergesortPairs<unsigned long long int, unsigned short>((unsigned long long int*)key_begin, (unsigned short*)value_begin, size, compare_sort_string()/*, *context*/); } else if (value_type == nvcc::RelationalAlgebraKernel::I32) { mgpu::MergesortPairs<unsigned long long int, unsigned int>((unsigned long long int*)key_begin, (unsigned int*)value_begin, size, compare_sort_string()/*, *context*/); } else if (value_type == nvcc::RelationalAlgebraKernel::I64) { mgpu::MergesortPairs<unsigned long long int, unsigned long long int>((unsigned long long int*)key_begin, (unsigned long long int*)value_begin, size, compare_sort_string()/*, *context*/); } else if (value_type == nvcc::RelationalAlgebraKernel::I128) { typedef gpu128 type; mgpu::MergesortPairs<unsigned long long int, type>((unsigned long long int*)key_begin, (type *)value_begin, size, compare_sort_string()/*, *context*/); } else if (value_type == nvcc::RelationalAlgebraKernel::I256) { typedef gpu256 type; mgpu::MergesortPairs<unsigned long long int, type>((unsigned long long int*)key_begin, (type *)value_begin, size, compare_sort_string()/*, *context*/); } else if (value_type == nvcc::RelationalAlgebraKernel::I512) { typedef gpu512 type; mgpu::MergesortPairs<unsigned long long int, type>((unsigned long long int*)key_begin, (type *)value_begin, size, compare_sort_string()/*, *context*/); } // exe_time += context->Split(); // hipMemGetInfo(&freeMem, &totalMem); // std::cout << freeMem << "\n"; hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&exe_time, start, stop); hipEventDestroy(start); hipEventDestroy(stop); printf("sort %lf\n", exe_time); // check(hipMemcpy(data_key, (unsigned int *)key_begin, 40, // hipMemcpyDeviceToHost)); // // check(hipMemcpy(data_value, (unsigned long long int *)value_begin, 80, // hipMemcpyDeviceToHost)); // // for(unsigned int i = 0; i < 10; ++i) // printf("%u %u %llx\n", i, data_key[i], data_value[i]); } void sort_pair(void* key_begin, void* value_begin, unsigned long long int size, unsigned long long int key_type, unsigned long long int value_type) { // unsigned int data_key[30142]; // unsigned long long int data_value[10]; // check(hipMemcpy(data_key, (unsigned int *)key_begin, 30142*4, // hipMemcpyDeviceToHost)); // check(hipMemcpy(data_value, (unsigned long long int *)value_begin, 80, // hipMemcpyDeviceToHost)); //printf("size %llu\n", size); // for(unsigned int i = 0; i <300; ++i) // printf("%u %u \n", i, data_key[i]); // for(unsigned int i = 30141; i > 30131; --i) // printf("%u %u \n", i, data_key[i]); // ra::tuple::PackedNBytes<2> data_key[10]; // check(hipMemcpy(&data_key, (ra::tuple::PackedNBytes<2> *)key_begin, 160, // hipMemcpyDeviceToHost)); // for(int i = 0; i < 10; ++i) // printf("before reduce key %d %llx, %llx\n", i, data_key[i].a[0], data_key[i].a[1]); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float exe_time = 0.0f; hipEventRecord(start,0); // size_t freeMem, totalMem; // hipMemGetInfo(&freeMem, &totalMem); // std::cout << freeMem << "\n"; // mgpu::ContextPtr context = mgpu::CreateCudaDevice(0); // // context->Start(); if (key_type == nvcc::RelationalAlgebraKernel::I8 && value_type == nvcc::RelationalAlgebraKernel::I16) { mgpu::MergesortPairs<unsigned char, unsigned short>((unsigned char*)key_begin, (unsigned short *)value_begin, size/*, *context*/); } else if (key_type == nvcc::RelationalAlgebraKernel::I8 && value_type == nvcc::RelationalAlgebraKernel::I32) { mgpu::MergesortPairs<unsigned char, unsigned int>((unsigned char*)key_begin, (unsigned int *)value_begin, size/*, *context*/); } else if (key_type == nvcc::RelationalAlgebraKernel::I8 && value_type == nvcc::RelationalAlgebraKernel::I64) { mgpu::MergesortPairs<unsigned char, unsigned long long int>((unsigned char*)key_begin, (unsigned long long int *)value_begin, size/*, *context*/); } else if (key_type == nvcc::RelationalAlgebraKernel::I8 && value_type == nvcc::RelationalAlgebraKernel::I128) { typedef gpu128 type; mgpu::MergesortPairs<unsigned char, type>((unsigned char*)key_begin, (type *)value_begin, size/*, *context*/); } else if (key_type == nvcc::RelationalAlgebraKernel::I8 && value_type == nvcc::RelationalAlgebraKernel::I256) { typedef gpu256 type; mgpu::MergesortPairs<unsigned char, type>((unsigned char*)key_begin, (type *)value_begin, size/*, *context*/); } else if (key_type == nvcc::RelationalAlgebraKernel::I8 && value_type == nvcc::RelationalAlgebraKernel::I512) { typedef gpu512 type; mgpu::MergesortPairs<unsigned char, type>((unsigned char*)key_begin, (type *)value_begin, size/*, *context*/); } else if (key_type == nvcc::RelationalAlgebraKernel::I16 && value_type == nvcc::RelationalAlgebraKernel::I8) { mgpu::MergesortPairs<unsigned short, unsigned char>((unsigned short*)key_begin, (unsigned char *)value_begin, size/*, *context*/); } else if (key_type == nvcc::RelationalAlgebraKernel::I16 && value_type == nvcc::RelationalAlgebraKernel::I32) { mgpu::MergesortPairs<unsigned short, unsigned int>((unsigned short*)key_begin, (unsigned int *)value_begin, size/*, *context*/); } else if (key_type == nvcc::RelationalAlgebraKernel::I16 && value_type == nvcc::RelationalAlgebraKernel::I64) { mgpu::MergesortPairs<unsigned short, unsigned long long int>((unsigned short*)key_begin, (unsigned long long int *)value_begin, size/*, *context*/); } else if (key_type == nvcc::RelationalAlgebraKernel::I16 && value_type == nvcc::RelationalAlgebraKernel::I128) { typedef gpu128 type; mgpu::MergesortPairs<unsigned short, type>((unsigned short*)key_begin, (type *)value_begin, size/*, *context*/); } else if (key_type == nvcc::RelationalAlgebraKernel::I16 && value_type == nvcc::RelationalAlgebraKernel::I256) { typedef gpu256 type; mgpu::MergesortPairs<unsigned short, type>((unsigned short*)key_begin, (type *)value_begin, size/*, *context*/); } else if (key_type == nvcc::RelationalAlgebraKernel::I16 && value_type == nvcc::RelationalAlgebraKernel::I512) { typedef gpu512 type; mgpu::MergesortPairs<unsigned short, type>((unsigned short*)key_begin, (type *)value_begin, size/*, *context*/); } else if (key_type == nvcc::RelationalAlgebraKernel::I32 && value_type == nvcc::RelationalAlgebraKernel::I8) { mgpu::MergesortPairs<unsigned int, unsigned char>((unsigned int*)key_begin, (unsigned char *)value_begin, size/*, *context*/); } else if (key_type == nvcc::RelationalAlgebraKernel::I32 && value_type == nvcc::RelationalAlgebraKernel::I16) { mgpu::MergesortPairs<unsigned int, unsigned short>((unsigned int*)key_begin, (unsigned short *)value_begin, size/*, *context*/); } else if (key_type == nvcc::RelationalAlgebraKernel::I32 && value_type == nvcc::RelationalAlgebraKernel::I32) { mgpu::MergesortPairs<unsigned int, unsigned int>((unsigned int*)key_begin, (unsigned int *)value_begin, size/*, *context*/); } else if (key_type == nvcc::RelationalAlgebraKernel::I32 && value_type == nvcc::RelationalAlgebraKernel::I64) { mgpu::MergesortPairs<unsigned int, unsigned long long int>((unsigned int*)key_begin, (unsigned long long int *)value_begin, size/*, *context*/); } else if (key_type == nvcc::RelationalAlgebraKernel::I32 && value_type == nvcc::RelationalAlgebraKernel::I128) { typedef gpu128 type; mgpu::MergesortPairs<unsigned int, type>((unsigned int*)key_begin, (type *)value_begin, size/*, *context*/); } else if (key_type == nvcc::RelationalAlgebraKernel::I32 && value_type == nvcc::RelationalAlgebraKernel::I256) { typedef gpu256 type; mgpu::MergesortPairs<unsigned int, type>((unsigned int*)key_begin, (type *)value_begin, size/*, *context*/); } else if (key_type == nvcc::RelationalAlgebraKernel::I32 && value_type == nvcc::RelationalAlgebraKernel::I512) { typedef gpu512 type; mgpu::MergesortPairs<unsigned int, type>((unsigned int*)key_begin, (type *)value_begin, size/*, *context*/); } else if (key_type == nvcc::RelationalAlgebraKernel::I64 && value_type == nvcc::RelationalAlgebraKernel::I8) { mgpu::MergesortPairs<unsigned long long int, unsigned char>((unsigned long long int*)key_begin, (unsigned char *)value_begin, size/*, *context*/); } else if (key_type == nvcc::RelationalAlgebraKernel::I64 && value_type == nvcc::RelationalAlgebraKernel::I16) { mgpu::MergesortPairs<unsigned long long int, unsigned short>((unsigned long long int*)key_begin, (unsigned short *)value_begin, size/*, *context*/); } else if (key_type == nvcc::RelationalAlgebraKernel::I64 && value_type == nvcc::RelationalAlgebraKernel::I32) { mgpu::MergesortPairs<unsigned long long int, unsigned int>((unsigned long long int*)key_begin, (unsigned int *)value_begin, size/*, *context*/); } else if (key_type == nvcc::RelationalAlgebraKernel::I128 && value_type == nvcc::RelationalAlgebraKernel::I16) { typedef gpu128 type; mgpu::MergesortPairs<type, unsigned short>((type*)key_begin, (unsigned short *)value_begin, size, compare_sort_gpu128()/*, *context*/); } else if (key_type == nvcc::RelationalAlgebraKernel::I128 && value_type == nvcc::RelationalAlgebraKernel::I32) { typedef gpu128 type; mgpu::MergesortPairs<type, unsigned int>((type*)key_begin, (unsigned int *)value_begin, size, compare_sort_gpu128()/*, *context*/); } else if (key_type == nvcc::RelationalAlgebraKernel::I128 && value_type == nvcc::RelationalAlgebraKernel::I64) { typedef gpu128 type; mgpu::MergesortPairs<type, unsigned long long int>((type*)key_begin, (unsigned long long int *)value_begin, size, compare_sort_gpu128()/*, *context*/); } // exe_time += context->Split(); // hipMemGetInfo(&freeMem, &totalMem); // std::cout << freeMem << "\n"; hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&exe_time, start, stop); hipEventDestroy(start); hipEventDestroy(stop); printf("sort %lf\n", exe_time); // check(hipMemcpy(data_key, (unsigned int *)key_begin, 40, // hipMemcpyDeviceToHost)); // // check(hipMemcpy(data_value, (unsigned long long int *)value_begin, 80, // hipMemcpyDeviceToHost)); // // for(unsigned int i = 0; i < 10; ++i) // printf("%u %u %llx\n", i, data_key[i], data_value[i]); } void sort_string_key(void* key_begin, unsigned long long int size, unsigned long long int type) { // unsigned int *data_key = (unsigned int *)malloc(100 * 4); // // check(hipMemcpy(data_key, (unsigned int *)key_begin, 100*4, // hipMemcpyDeviceToHost)); // for(unsigned int i = 0; i < 100; ++i) // printf("%u %x \n", i, data_key[i]); // printf("%llu %p\n", size, key_begin); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float exe_time = 0.0f; hipEventRecord(start,0); // size_t freeMem, totalMem; // hipMemGetInfo(&freeMem, &totalMem); // std::cout << freeMem << "\n"; // mgpu::ContextPtr context = mgpu::CreateCudaDevice(0); // // context->Start(); mgpu::MergesortKeys<unsigned long long int>((unsigned long long int*)key_begin, size, compare_sort_string()/*, *context*/); // exe_time += context->Split(); // hipMemGetInfo(&freeMem, &totalMem); // std::cout << freeMem << "\n"; hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&exe_time, start, stop); hipEventDestroy(start); hipEventDestroy(stop); printf("sort %lf\n", exe_time); // check(hipMemcpy(data_key, (unsigned char *)key_begin, 10, // hipMemcpyDeviceToHost)); // // check(hipMemcpy(data_value, (double *)value_begin, 80, // hipMemcpyDeviceToHost)); // // for(unsigned int i = 0; i < 10; ++i) // printf("%u %x %lf\n", i, data_key[i], data_value[i]); } void sort_key(void* key_begin, unsigned long long int size, unsigned long long int type) { // unsigned int *data_key = (unsigned int *)malloc(100 * 4); // // check(hipMemcpy(data_key, (unsigned int *)key_begin, 100*4, // hipMemcpyDeviceToHost)); // for(unsigned int i = 0; i < 100; ++i) // printf("%u %x \n", i, data_key[i]); // printf("%llu %p\n", size, key_begin); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float exe_time = 0.0f; hipEventRecord(start,0); // size_t freeMem, totalMem; // hipMemGetInfo(&freeMem, &totalMem); // std::cout << freeMem << "\n"; // mgpu::ContextPtr context = mgpu::CreateCudaDevice(0); // // context->Start(); if (type == nvcc::RelationalAlgebraKernel::I8) { mgpu::MergesortKeys<unsigned char>((unsigned char*)key_begin, size/*, *context*/); } else if (type == nvcc::RelationalAlgebraKernel::I16) { mgpu::MergesortKeys<unsigned short>((unsigned short*)key_begin, size/*, *context*/); } else if (type == nvcc::RelationalAlgebraKernel::I32) { mgpu::MergesortKeys<unsigned int>((unsigned int*)key_begin, size/*, *context*/); } else if (type == nvcc::RelationalAlgebraKernel::I64) { mgpu::MergesortKeys<unsigned long long int>((unsigned long long int*)key_begin, size/*, *context*/); } else if (type == nvcc::RelationalAlgebraKernel::I128) { typedef gpu128 type; mgpu::MergesortKeys<type>((type *)key_begin, size, compare_sort_gpu128()/*, *context*/); } else if (type == nvcc::RelationalAlgebraKernel::I256) { typedef gpu256 type; mgpu::MergesortKeys<type>((type *)key_begin, size, compare_sort_gpu256()/*, *context*/); } else if (type == nvcc::RelationalAlgebraKernel::I512) { typedef gpu512 type; mgpu::MergesortKeys<type>((type *)key_begin, size, compare_sort_gpu512()/*, *context*/); } // exe_time += context->Split(); // hipMemGetInfo(&freeMem, &totalMem); // std::cout << freeMem << "\n"; hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&exe_time, start, stop); hipEventDestroy(start); hipEventDestroy(stop); printf("sort %lf\n", exe_time); // check(hipMemcpy(data_key, (unsigned char *)key_begin, 10, // hipMemcpyDeviceToHost)); // // check(hipMemcpy(data_value, (double *)value_begin, 80, // hipMemcpyDeviceToHost)); // // for(unsigned int i = 0; i < 10; ++i) // printf("%u %x %lf\n", i, data_key[i], data_value[i]); } }
1670f947a1bc9d643bd6e7bd3bb9965664444e16.cu
/*! \file Sort.cu \author Gregory Diamos <gregory.diamos> \date Wednesday December 1, 2010 \brief The source file for the C interface to CUDA sorting routines. */ // Redfox Includes #include <redfox/nvcc/interface/RelationalAlgebraKernel.h> #include <redfox/ra/interface/ModernGPUSort.h> #include <redfox/ra/interface/Tuple.h> #include <redfox/ra/interface/moderngpu/include/kernels/mergesort.cuh> #include <stdio.h> #include <iostream> class gpu128 { public: typedef long long unsigned int type; public: type a[2]; }; class gpu256 { public: typedef long long unsigned int type; public: type a[4]; }; class gpu512 { public: typedef long long unsigned int type; public: type a[8]; }; struct compare_sort_string { __host__ __device__ bool operator()(unsigned long long int i, unsigned long long int j) { char *string1 = (char *)i; char *string2 = (char *)j; int ii = 0; while(string1[ii] != '\0' && string2[ii] != '\0') { if(string1[ii] != string2[ii]) return (string1[ii] < string2[ii]); ii++; } if(string1[ii] == '\0' && string2[ii] != '\0') return true; else return false; } }; struct compare_sort_gpu128 { typedef gpu128 type; __host__ __device__ bool operator()(type i, type j) { if (i.a[1] != j.a[1]) return (i.a[1] < j.a[1]); return (i.a[0] < j.a[0]); } }; struct compare_sort_gpu256 { typedef gpu256 type; __host__ __device__ bool operator()(type i, type j) { if (i.a[3] != j.a[3]) return (i.a[3] < j.a[3]); if (i.a[2] != j.a[2]) return (i.a[2] < j.a[2]); if (i.a[1] != j.a[1]) return (i.a[1] < j.a[1]); return (i.a[0] < j.a[0]); } }; struct compare_sort_gpu512 { typedef gpu512 type; __host__ __device__ bool operator()(type i, type j) { if (i.a[7] != j.a[7]) return (i.a[7] < j.a[7]); if (i.a[6] != j.a[6]) return (i.a[6] < j.a[6]); if (i.a[5] != j.a[5]) return (i.a[5] < j.a[5]); if (i.a[4] != j.a[4]) return (i.a[4] < j.a[4]); if (i.a[3] != j.a[3]) return (i.a[3] < j.a[3]); if (i.a[2] != j.a[2]) return (i.a[2] < j.a[2]); if (i.a[1] != j.a[1]) return (i.a[1] < j.a[1]); return (i.a[0] < j.a[0]); } }; namespace redfox { void check(cudaError_t status) { if(status != cudaSuccess) { std::cerr << cudaGetErrorString(status) << "\n"; std::abort(); } } void sort_string_pair(void* key_begin, void* value_begin, unsigned long long int size, unsigned long long int key_type, unsigned long long int value_type) { // unsigned int data_key[30142]; // unsigned long long int data_value[10]; // check(cudaMemcpy(data_key, (unsigned int *)key_begin, 30142*4, // cudaMemcpyDeviceToHost)); // check(cudaMemcpy(data_value, (unsigned long long int *)value_begin, 80, // cudaMemcpyDeviceToHost)); //printf("size %llu\n", size); // for(unsigned int i = 0; i <300; ++i) // printf("%u %u \n", i, data_key[i]); // for(unsigned int i = 30141; i > 30131; --i) // printf("%u %u \n", i, data_key[i]); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float exe_time = 0.0f; cudaEventRecord(start,0); // size_t freeMem, totalMem; // cudaMemGetInfo(&freeMem, &totalMem); // std::cout << freeMem << "\n"; // mgpu::ContextPtr context = mgpu::CreateCudaDevice(0); // // context->Start(); if (value_type == nvcc::RelationalAlgebraKernel::I8) { mgpu::MergesortPairs<unsigned long long int, unsigned char>((unsigned long long int*)key_begin, (unsigned char*)value_begin, size, compare_sort_string()/*, *context*/); } else if (value_type == nvcc::RelationalAlgebraKernel::I16) { mgpu::MergesortPairs<unsigned long long int, unsigned short>((unsigned long long int*)key_begin, (unsigned short*)value_begin, size, compare_sort_string()/*, *context*/); } else if (value_type == nvcc::RelationalAlgebraKernel::I32) { mgpu::MergesortPairs<unsigned long long int, unsigned int>((unsigned long long int*)key_begin, (unsigned int*)value_begin, size, compare_sort_string()/*, *context*/); } else if (value_type == nvcc::RelationalAlgebraKernel::I64) { mgpu::MergesortPairs<unsigned long long int, unsigned long long int>((unsigned long long int*)key_begin, (unsigned long long int*)value_begin, size, compare_sort_string()/*, *context*/); } else if (value_type == nvcc::RelationalAlgebraKernel::I128) { typedef gpu128 type; mgpu::MergesortPairs<unsigned long long int, type>((unsigned long long int*)key_begin, (type *)value_begin, size, compare_sort_string()/*, *context*/); } else if (value_type == nvcc::RelationalAlgebraKernel::I256) { typedef gpu256 type; mgpu::MergesortPairs<unsigned long long int, type>((unsigned long long int*)key_begin, (type *)value_begin, size, compare_sort_string()/*, *context*/); } else if (value_type == nvcc::RelationalAlgebraKernel::I512) { typedef gpu512 type; mgpu::MergesortPairs<unsigned long long int, type>((unsigned long long int*)key_begin, (type *)value_begin, size, compare_sort_string()/*, *context*/); } // exe_time += context->Split(); // cudaMemGetInfo(&freeMem, &totalMem); // std::cout << freeMem << "\n"; cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&exe_time, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); printf("sort %lf\n", exe_time); // check(cudaMemcpy(data_key, (unsigned int *)key_begin, 40, // cudaMemcpyDeviceToHost)); // // check(cudaMemcpy(data_value, (unsigned long long int *)value_begin, 80, // cudaMemcpyDeviceToHost)); // // for(unsigned int i = 0; i < 10; ++i) // printf("%u %u %llx\n", i, data_key[i], data_value[i]); } void sort_pair(void* key_begin, void* value_begin, unsigned long long int size, unsigned long long int key_type, unsigned long long int value_type) { // unsigned int data_key[30142]; // unsigned long long int data_value[10]; // check(cudaMemcpy(data_key, (unsigned int *)key_begin, 30142*4, // cudaMemcpyDeviceToHost)); // check(cudaMemcpy(data_value, (unsigned long long int *)value_begin, 80, // cudaMemcpyDeviceToHost)); //printf("size %llu\n", size); // for(unsigned int i = 0; i <300; ++i) // printf("%u %u \n", i, data_key[i]); // for(unsigned int i = 30141; i > 30131; --i) // printf("%u %u \n", i, data_key[i]); // ra::tuple::PackedNBytes<2> data_key[10]; // check(cudaMemcpy(&data_key, (ra::tuple::PackedNBytes<2> *)key_begin, 160, // cudaMemcpyDeviceToHost)); // for(int i = 0; i < 10; ++i) // printf("before reduce key %d %llx, %llx\n", i, data_key[i].a[0], data_key[i].a[1]); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float exe_time = 0.0f; cudaEventRecord(start,0); // size_t freeMem, totalMem; // cudaMemGetInfo(&freeMem, &totalMem); // std::cout << freeMem << "\n"; // mgpu::ContextPtr context = mgpu::CreateCudaDevice(0); // // context->Start(); if (key_type == nvcc::RelationalAlgebraKernel::I8 && value_type == nvcc::RelationalAlgebraKernel::I16) { mgpu::MergesortPairs<unsigned char, unsigned short>((unsigned char*)key_begin, (unsigned short *)value_begin, size/*, *context*/); } else if (key_type == nvcc::RelationalAlgebraKernel::I8 && value_type == nvcc::RelationalAlgebraKernel::I32) { mgpu::MergesortPairs<unsigned char, unsigned int>((unsigned char*)key_begin, (unsigned int *)value_begin, size/*, *context*/); } else if (key_type == nvcc::RelationalAlgebraKernel::I8 && value_type == nvcc::RelationalAlgebraKernel::I64) { mgpu::MergesortPairs<unsigned char, unsigned long long int>((unsigned char*)key_begin, (unsigned long long int *)value_begin, size/*, *context*/); } else if (key_type == nvcc::RelationalAlgebraKernel::I8 && value_type == nvcc::RelationalAlgebraKernel::I128) { typedef gpu128 type; mgpu::MergesortPairs<unsigned char, type>((unsigned char*)key_begin, (type *)value_begin, size/*, *context*/); } else if (key_type == nvcc::RelationalAlgebraKernel::I8 && value_type == nvcc::RelationalAlgebraKernel::I256) { typedef gpu256 type; mgpu::MergesortPairs<unsigned char, type>((unsigned char*)key_begin, (type *)value_begin, size/*, *context*/); } else if (key_type == nvcc::RelationalAlgebraKernel::I8 && value_type == nvcc::RelationalAlgebraKernel::I512) { typedef gpu512 type; mgpu::MergesortPairs<unsigned char, type>((unsigned char*)key_begin, (type *)value_begin, size/*, *context*/); } else if (key_type == nvcc::RelationalAlgebraKernel::I16 && value_type == nvcc::RelationalAlgebraKernel::I8) { mgpu::MergesortPairs<unsigned short, unsigned char>((unsigned short*)key_begin, (unsigned char *)value_begin, size/*, *context*/); } else if (key_type == nvcc::RelationalAlgebraKernel::I16 && value_type == nvcc::RelationalAlgebraKernel::I32) { mgpu::MergesortPairs<unsigned short, unsigned int>((unsigned short*)key_begin, (unsigned int *)value_begin, size/*, *context*/); } else if (key_type == nvcc::RelationalAlgebraKernel::I16 && value_type == nvcc::RelationalAlgebraKernel::I64) { mgpu::MergesortPairs<unsigned short, unsigned long long int>((unsigned short*)key_begin, (unsigned long long int *)value_begin, size/*, *context*/); } else if (key_type == nvcc::RelationalAlgebraKernel::I16 && value_type == nvcc::RelationalAlgebraKernel::I128) { typedef gpu128 type; mgpu::MergesortPairs<unsigned short, type>((unsigned short*)key_begin, (type *)value_begin, size/*, *context*/); } else if (key_type == nvcc::RelationalAlgebraKernel::I16 && value_type == nvcc::RelationalAlgebraKernel::I256) { typedef gpu256 type; mgpu::MergesortPairs<unsigned short, type>((unsigned short*)key_begin, (type *)value_begin, size/*, *context*/); } else if (key_type == nvcc::RelationalAlgebraKernel::I16 && value_type == nvcc::RelationalAlgebraKernel::I512) { typedef gpu512 type; mgpu::MergesortPairs<unsigned short, type>((unsigned short*)key_begin, (type *)value_begin, size/*, *context*/); } else if (key_type == nvcc::RelationalAlgebraKernel::I32 && value_type == nvcc::RelationalAlgebraKernel::I8) { mgpu::MergesortPairs<unsigned int, unsigned char>((unsigned int*)key_begin, (unsigned char *)value_begin, size/*, *context*/); } else if (key_type == nvcc::RelationalAlgebraKernel::I32 && value_type == nvcc::RelationalAlgebraKernel::I16) { mgpu::MergesortPairs<unsigned int, unsigned short>((unsigned int*)key_begin, (unsigned short *)value_begin, size/*, *context*/); } else if (key_type == nvcc::RelationalAlgebraKernel::I32 && value_type == nvcc::RelationalAlgebraKernel::I32) { mgpu::MergesortPairs<unsigned int, unsigned int>((unsigned int*)key_begin, (unsigned int *)value_begin, size/*, *context*/); } else if (key_type == nvcc::RelationalAlgebraKernel::I32 && value_type == nvcc::RelationalAlgebraKernel::I64) { mgpu::MergesortPairs<unsigned int, unsigned long long int>((unsigned int*)key_begin, (unsigned long long int *)value_begin, size/*, *context*/); } else if (key_type == nvcc::RelationalAlgebraKernel::I32 && value_type == nvcc::RelationalAlgebraKernel::I128) { typedef gpu128 type; mgpu::MergesortPairs<unsigned int, type>((unsigned int*)key_begin, (type *)value_begin, size/*, *context*/); } else if (key_type == nvcc::RelationalAlgebraKernel::I32 && value_type == nvcc::RelationalAlgebraKernel::I256) { typedef gpu256 type; mgpu::MergesortPairs<unsigned int, type>((unsigned int*)key_begin, (type *)value_begin, size/*, *context*/); } else if (key_type == nvcc::RelationalAlgebraKernel::I32 && value_type == nvcc::RelationalAlgebraKernel::I512) { typedef gpu512 type; mgpu::MergesortPairs<unsigned int, type>((unsigned int*)key_begin, (type *)value_begin, size/*, *context*/); } else if (key_type == nvcc::RelationalAlgebraKernel::I64 && value_type == nvcc::RelationalAlgebraKernel::I8) { mgpu::MergesortPairs<unsigned long long int, unsigned char>((unsigned long long int*)key_begin, (unsigned char *)value_begin, size/*, *context*/); } else if (key_type == nvcc::RelationalAlgebraKernel::I64 && value_type == nvcc::RelationalAlgebraKernel::I16) { mgpu::MergesortPairs<unsigned long long int, unsigned short>((unsigned long long int*)key_begin, (unsigned short *)value_begin, size/*, *context*/); } else if (key_type == nvcc::RelationalAlgebraKernel::I64 && value_type == nvcc::RelationalAlgebraKernel::I32) { mgpu::MergesortPairs<unsigned long long int, unsigned int>((unsigned long long int*)key_begin, (unsigned int *)value_begin, size/*, *context*/); } else if (key_type == nvcc::RelationalAlgebraKernel::I128 && value_type == nvcc::RelationalAlgebraKernel::I16) { typedef gpu128 type; mgpu::MergesortPairs<type, unsigned short>((type*)key_begin, (unsigned short *)value_begin, size, compare_sort_gpu128()/*, *context*/); } else if (key_type == nvcc::RelationalAlgebraKernel::I128 && value_type == nvcc::RelationalAlgebraKernel::I32) { typedef gpu128 type; mgpu::MergesortPairs<type, unsigned int>((type*)key_begin, (unsigned int *)value_begin, size, compare_sort_gpu128()/*, *context*/); } else if (key_type == nvcc::RelationalAlgebraKernel::I128 && value_type == nvcc::RelationalAlgebraKernel::I64) { typedef gpu128 type; mgpu::MergesortPairs<type, unsigned long long int>((type*)key_begin, (unsigned long long int *)value_begin, size, compare_sort_gpu128()/*, *context*/); } // exe_time += context->Split(); // cudaMemGetInfo(&freeMem, &totalMem); // std::cout << freeMem << "\n"; cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&exe_time, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); printf("sort %lf\n", exe_time); // check(cudaMemcpy(data_key, (unsigned int *)key_begin, 40, // cudaMemcpyDeviceToHost)); // // check(cudaMemcpy(data_value, (unsigned long long int *)value_begin, 80, // cudaMemcpyDeviceToHost)); // // for(unsigned int i = 0; i < 10; ++i) // printf("%u %u %llx\n", i, data_key[i], data_value[i]); } void sort_string_key(void* key_begin, unsigned long long int size, unsigned long long int type) { // unsigned int *data_key = (unsigned int *)malloc(100 * 4); // // check(cudaMemcpy(data_key, (unsigned int *)key_begin, 100*4, // cudaMemcpyDeviceToHost)); // for(unsigned int i = 0; i < 100; ++i) // printf("%u %x \n", i, data_key[i]); // printf("%llu %p\n", size, key_begin); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float exe_time = 0.0f; cudaEventRecord(start,0); // size_t freeMem, totalMem; // cudaMemGetInfo(&freeMem, &totalMem); // std::cout << freeMem << "\n"; // mgpu::ContextPtr context = mgpu::CreateCudaDevice(0); // // context->Start(); mgpu::MergesortKeys<unsigned long long int>((unsigned long long int*)key_begin, size, compare_sort_string()/*, *context*/); // exe_time += context->Split(); // cudaMemGetInfo(&freeMem, &totalMem); // std::cout << freeMem << "\n"; cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&exe_time, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); printf("sort %lf\n", exe_time); // check(cudaMemcpy(data_key, (unsigned char *)key_begin, 10, // cudaMemcpyDeviceToHost)); // // check(cudaMemcpy(data_value, (double *)value_begin, 80, // cudaMemcpyDeviceToHost)); // // for(unsigned int i = 0; i < 10; ++i) // printf("%u %x %lf\n", i, data_key[i], data_value[i]); } void sort_key(void* key_begin, unsigned long long int size, unsigned long long int type) { // unsigned int *data_key = (unsigned int *)malloc(100 * 4); // // check(cudaMemcpy(data_key, (unsigned int *)key_begin, 100*4, // cudaMemcpyDeviceToHost)); // for(unsigned int i = 0; i < 100; ++i) // printf("%u %x \n", i, data_key[i]); // printf("%llu %p\n", size, key_begin); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float exe_time = 0.0f; cudaEventRecord(start,0); // size_t freeMem, totalMem; // cudaMemGetInfo(&freeMem, &totalMem); // std::cout << freeMem << "\n"; // mgpu::ContextPtr context = mgpu::CreateCudaDevice(0); // // context->Start(); if (type == nvcc::RelationalAlgebraKernel::I8) { mgpu::MergesortKeys<unsigned char>((unsigned char*)key_begin, size/*, *context*/); } else if (type == nvcc::RelationalAlgebraKernel::I16) { mgpu::MergesortKeys<unsigned short>((unsigned short*)key_begin, size/*, *context*/); } else if (type == nvcc::RelationalAlgebraKernel::I32) { mgpu::MergesortKeys<unsigned int>((unsigned int*)key_begin, size/*, *context*/); } else if (type == nvcc::RelationalAlgebraKernel::I64) { mgpu::MergesortKeys<unsigned long long int>((unsigned long long int*)key_begin, size/*, *context*/); } else if (type == nvcc::RelationalAlgebraKernel::I128) { typedef gpu128 type; mgpu::MergesortKeys<type>((type *)key_begin, size, compare_sort_gpu128()/*, *context*/); } else if (type == nvcc::RelationalAlgebraKernel::I256) { typedef gpu256 type; mgpu::MergesortKeys<type>((type *)key_begin, size, compare_sort_gpu256()/*, *context*/); } else if (type == nvcc::RelationalAlgebraKernel::I512) { typedef gpu512 type; mgpu::MergesortKeys<type>((type *)key_begin, size, compare_sort_gpu512()/*, *context*/); } // exe_time += context->Split(); // cudaMemGetInfo(&freeMem, &totalMem); // std::cout << freeMem << "\n"; cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&exe_time, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); printf("sort %lf\n", exe_time); // check(cudaMemcpy(data_key, (unsigned char *)key_begin, 10, // cudaMemcpyDeviceToHost)); // // check(cudaMemcpy(data_value, (double *)value_begin, 80, // cudaMemcpyDeviceToHost)); // // for(unsigned int i = 0; i < 10; ++i) // printf("%u %x %lf\n", i, data_key[i], data_value[i]); } }
103bf20991110f126fc85ec4dcef316e652ddc80.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "gputimer.h" #include <stdlib.h> #include <stdio.h> #include <iostream> #include <string> #include <vector> #include <algorithm> #include "A_10.h" #include "b_10.h" #include "A_32.h" #include "b_32.h" #include "A_512.h" #include "b_512.h" #include "A_1024.h" #include "b_1024.h" #include "X_32.h" #include "X_512.h" #include "X_1024.h" #define MAX_N 2048 using namespace std; constexpr auto MAX_NUMBER_THREADS = 1024; hipError_t solveMatrixWithCuda(int numOfThreads, int dimension); void PrintMatrix(double ar[][MAX_N], int n, int m, bool isInverted); void InverseOfMatrix(double matrix[][MAX_N], int dimension); __global__ void multiplyKernel(double* MatrixA, double* vecB, double* VecSol, int dimension, int numOfThreads) { for (int i = 0; i < ((dimension+(numOfThreads-1))/numOfThreads); i++) { int j = (threadIdx.x + numOfThreads * i) + (blockIdx.x * numOfThreads); int k = dimension * j; if (j < dimension) { for (int z = 0; z < dimension; z++) { VecSol[j] += MatrixA[k + z] * vecB[z]; } } } } // Function to print the matrix. void PrintMatrix(double ar[][MAX_N], int n, int m, bool isInverted) { for (int i = 0; i < n; i++) { if (isInverted) { for (int j = n; j < m; j++) { printf("%.3f ", ar[i][j]); } } else { for (int j = 0; j < m; j++) { printf("%.3f ", ar[i][j]); } } printf("\n"); } return; } // Function to perform the inverse operation on a matrix void InverseOfMatrix(double matrix[][MAX_N], int dimension) { double temp; for (int i = 0; i < dimension; i++) { // Create an identity matrix next to the matrix we are inverting for (int j = 0; j < 2 * dimension; j++) { if (j == (i + dimension)) matrix[i][j] = 1; } } // Interchange/swap the rows for (int i = dimension - 1; i > 0; i--) { if (matrix[i - 1][0] < matrix[i][0]) for (int j = 0; j < 2 * dimension; j++) { temp = matrix[i][j]; matrix[i][j] = matrix[i - 1][j]; matrix[i - 1][j] = temp; } } // Replace a row by sum of itself and a constant multiple of another row of the matrix for (int i = 0; i < dimension; i++) { for (int j = 0; j < dimension; j++) { if (j != i) { temp = matrix[j][i] / matrix[i][i]; for (int k = 0; k < 2 * dimension; k++) { matrix[j][k] -= matrix[i][k] * temp; } } } } // Multiply each row by a nonzero integer and divide row element by the diagonal element for (int i = 0; i < dimension; i++) { temp = matrix[i][i]; for (int j = 0; j < 2 * dimension; j++) { matrix[i][j] = matrix[i][j] / temp; } } return; } int main(int argc, char* argv[]) { // Getting values for dimension and number of threads int dimension; int numOfThreads; if (argc != 3 || argv[1] == NULL || argv[2] == NULL || argv[1] == "-h" || argv[1] == "--help" || argv[1] == "--h") { cout << "ParallelMatrixSolver.exe <Dimension (n) of Matrix = 10, 32, 512, or 1024> <# threads>" << endl; return 0; } else { if (argv[1] != NULL) { dimension = stoi(argv[1]); if (!(dimension == 10 || dimension == 32 || dimension == 512 || dimension == 1024)) { cout << "Dimension must be 10, 32, 512, or 1024" << endl; return -1; } } if (argv[2] != NULL) { numOfThreads = stoi(argv[2]); } } cout << "\nDimension of A = " << dimension; cout << "\nNumber of Threads = " << numOfThreads << endl; cout << "Calculating..." << endl; solveMatrixWithCuda(numOfThreads, dimension); return 0; } hipError_t solveMatrixWithCuda(int numOfThreads, int dimension) { hipError_t cudaStatus = hipError_t::cudaErrorDeviceUninitilialized; GpuTimer gpuTimer; // Struct for timing the GPU // Initialize sparse 2048 * 2048 matrices in CPU memory static double matrixA[MAX_N][MAX_N] = { 0 }; static double solutionX[MAX_N][MAX_N] = { 0 }; // Allocate from float matrices to the double matrix for (int i = 0; i < dimension; i++) { for (int j = 0; j < dimension; j++) { if (dimension == 10) { matrixA[i][j] = A_10[i][j]; } if (dimension == 32) { matrixA[i][j] = A_32[i][j]; } if (dimension == 512) { matrixA[i][j] = A_512[i][j]; } if (dimension == 1024) { matrixA[i][j] = A_1024[i][j]; } } } // As per assignment instructions, if the matrix has dimension n >= 512 we don't invert and instead multiply // the values directly with the solution matrix if (dimension == 10 || dimension == 32) { InverseOfMatrix(matrixA, dimension); } double *dev_inverseA, *dev_vectorB, *dev_solution = nullptr; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate memory for the input matrix, the vectorB, and then the solution cudaStatus = hipMallocManaged((void**)& dev_inverseA, dimension * dimension * sizeof(double)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc for matrix failed!"); goto Error; } cudaStatus = hipMallocManaged((void**)& dev_vectorB, dimension * sizeof(double)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc for vector b failed!"); goto Error; } cudaStatus = hipMallocManaged((void**)& dev_solution, dimension * sizeof(double)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc for solution vector x failed!"); goto Error; } // Copy over values from the input matrices to the CUDA memory // Note that the index which you copy from will vary depending on // whether or not you inverted the matrix for (int i = 0; i < dimension; i++) { int k = 0; if (dimension == 10 || dimension == 32) { for (int j = dimension; j < (2 * dimension); j++) { dev_inverseA[(i * dimension) + k] = matrixA[i][j]; k++; } } if (dimension == 512 || dimension == 1024) { for (int j = 0; j < dimension; j++) { dev_inverseA[(i * dimension) + k] = matrixA[i][j]; k++; } } } for (int i = 0; i < dimension; i++) { if (dimension == 10) { dev_vectorB[i] = b_10[i][0]; } if (dimension == 32) { dev_vectorB[i] = b_32[i][0]; } if (dimension == 512) { dev_vectorB[i] = X_512[i][0]; } if (dimension == 1024) { dev_vectorB[i] = X_1024[i][0]; } } int numBlocks = ((numOfThreads + (MAX_NUMBER_THREADS - 1)) / MAX_NUMBER_THREADS); int threadsPerBlock = ((numOfThreads + (numBlocks - 1)) / numBlocks); /*************************************** Parrallel Part of Execution **********************************************/ gpuTimer.Start(); multiplyKernel << <numBlocks, threadsPerBlock >> > (dev_inverseA, dev_vectorB, dev_solution, dimension, threadsPerBlock); gpuTimer.Stop(); /******************************************************************************************************************/ float timeElapsed = gpuTimer.Elapsed(); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "multiplyKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching one of the kernels!\n", cudaStatus); goto Error; } // Copy over values back from CUDA memory and print them for (int i = 0; i < dimension; i++) { solutionX[i][0] = dev_solution[i]; } PrintMatrix(solutionX, dimension, 1, false); printf("-- Number of Threads: %d -- Dimension: %d -- Execution Time (ms): %g \n", numOfThreads, dimension, timeElapsed); Error: hipFree(dev_inverseA); hipFree(dev_vectorB); hipFree(dev_solution); return cudaStatus; }
103bf20991110f126fc85ec4dcef316e652ddc80.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "gputimer.h" #include <stdlib.h> #include <stdio.h> #include <iostream> #include <string> #include <vector> #include <algorithm> #include "A_10.h" #include "b_10.h" #include "A_32.h" #include "b_32.h" #include "A_512.h" #include "b_512.h" #include "A_1024.h" #include "b_1024.h" #include "X_32.h" #include "X_512.h" #include "X_1024.h" #define MAX_N 2048 using namespace std; constexpr auto MAX_NUMBER_THREADS = 1024; cudaError_t solveMatrixWithCuda(int numOfThreads, int dimension); void PrintMatrix(double ar[][MAX_N], int n, int m, bool isInverted); void InverseOfMatrix(double matrix[][MAX_N], int dimension); __global__ void multiplyKernel(double* MatrixA, double* vecB, double* VecSol, int dimension, int numOfThreads) { for (int i = 0; i < ((dimension+(numOfThreads-1))/numOfThreads); i++) { int j = (threadIdx.x + numOfThreads * i) + (blockIdx.x * numOfThreads); int k = dimension * j; if (j < dimension) { for (int z = 0; z < dimension; z++) { VecSol[j] += MatrixA[k + z] * vecB[z]; } } } } // Function to print the matrix. void PrintMatrix(double ar[][MAX_N], int n, int m, bool isInverted) { for (int i = 0; i < n; i++) { if (isInverted) { for (int j = n; j < m; j++) { printf("%.3f ", ar[i][j]); } } else { for (int j = 0; j < m; j++) { printf("%.3f ", ar[i][j]); } } printf("\n"); } return; } // Function to perform the inverse operation on a matrix void InverseOfMatrix(double matrix[][MAX_N], int dimension) { double temp; for (int i = 0; i < dimension; i++) { // Create an identity matrix next to the matrix we are inverting for (int j = 0; j < 2 * dimension; j++) { if (j == (i + dimension)) matrix[i][j] = 1; } } // Interchange/swap the rows for (int i = dimension - 1; i > 0; i--) { if (matrix[i - 1][0] < matrix[i][0]) for (int j = 0; j < 2 * dimension; j++) { temp = matrix[i][j]; matrix[i][j] = matrix[i - 1][j]; matrix[i - 1][j] = temp; } } // Replace a row by sum of itself and a constant multiple of another row of the matrix for (int i = 0; i < dimension; i++) { for (int j = 0; j < dimension; j++) { if (j != i) { temp = matrix[j][i] / matrix[i][i]; for (int k = 0; k < 2 * dimension; k++) { matrix[j][k] -= matrix[i][k] * temp; } } } } // Multiply each row by a nonzero integer and divide row element by the diagonal element for (int i = 0; i < dimension; i++) { temp = matrix[i][i]; for (int j = 0; j < 2 * dimension; j++) { matrix[i][j] = matrix[i][j] / temp; } } return; } int main(int argc, char* argv[]) { // Getting values for dimension and number of threads int dimension; int numOfThreads; if (argc != 3 || argv[1] == NULL || argv[2] == NULL || argv[1] == "-h" || argv[1] == "--help" || argv[1] == "--h") { cout << "ParallelMatrixSolver.exe <Dimension (n) of Matrix = 10, 32, 512, or 1024> <# threads>" << endl; return 0; } else { if (argv[1] != NULL) { dimension = stoi(argv[1]); if (!(dimension == 10 || dimension == 32 || dimension == 512 || dimension == 1024)) { cout << "Dimension must be 10, 32, 512, or 1024" << endl; return -1; } } if (argv[2] != NULL) { numOfThreads = stoi(argv[2]); } } cout << "\nDimension of A = " << dimension; cout << "\nNumber of Threads = " << numOfThreads << endl; cout << "Calculating..." << endl; solveMatrixWithCuda(numOfThreads, dimension); return 0; } cudaError_t solveMatrixWithCuda(int numOfThreads, int dimension) { cudaError_t cudaStatus = cudaError_t::cudaErrorDeviceUninitilialized; GpuTimer gpuTimer; // Struct for timing the GPU // Initialize sparse 2048 * 2048 matrices in CPU memory static double matrixA[MAX_N][MAX_N] = { 0 }; static double solutionX[MAX_N][MAX_N] = { 0 }; // Allocate from float matrices to the double matrix for (int i = 0; i < dimension; i++) { for (int j = 0; j < dimension; j++) { if (dimension == 10) { matrixA[i][j] = A_10[i][j]; } if (dimension == 32) { matrixA[i][j] = A_32[i][j]; } if (dimension == 512) { matrixA[i][j] = A_512[i][j]; } if (dimension == 1024) { matrixA[i][j] = A_1024[i][j]; } } } // As per assignment instructions, if the matrix has dimension n >= 512 we don't invert and instead multiply // the values directly with the solution matrix if (dimension == 10 || dimension == 32) { InverseOfMatrix(matrixA, dimension); } double *dev_inverseA, *dev_vectorB, *dev_solution = nullptr; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate memory for the input matrix, the vectorB, and then the solution cudaStatus = cudaMallocManaged((void**)& dev_inverseA, dimension * dimension * sizeof(double)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc for matrix failed!"); goto Error; } cudaStatus = cudaMallocManaged((void**)& dev_vectorB, dimension * sizeof(double)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc for vector b failed!"); goto Error; } cudaStatus = cudaMallocManaged((void**)& dev_solution, dimension * sizeof(double)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc for solution vector x failed!"); goto Error; } // Copy over values from the input matrices to the CUDA memory // Note that the index which you copy from will vary depending on // whether or not you inverted the matrix for (int i = 0; i < dimension; i++) { int k = 0; if (dimension == 10 || dimension == 32) { for (int j = dimension; j < (2 * dimension); j++) { dev_inverseA[(i * dimension) + k] = matrixA[i][j]; k++; } } if (dimension == 512 || dimension == 1024) { for (int j = 0; j < dimension; j++) { dev_inverseA[(i * dimension) + k] = matrixA[i][j]; k++; } } } for (int i = 0; i < dimension; i++) { if (dimension == 10) { dev_vectorB[i] = b_10[i][0]; } if (dimension == 32) { dev_vectorB[i] = b_32[i][0]; } if (dimension == 512) { dev_vectorB[i] = X_512[i][0]; } if (dimension == 1024) { dev_vectorB[i] = X_1024[i][0]; } } int numBlocks = ((numOfThreads + (MAX_NUMBER_THREADS - 1)) / MAX_NUMBER_THREADS); int threadsPerBlock = ((numOfThreads + (numBlocks - 1)) / numBlocks); /*************************************** Parrallel Part of Execution **********************************************/ gpuTimer.Start(); multiplyKernel << <numBlocks, threadsPerBlock >> > (dev_inverseA, dev_vectorB, dev_solution, dimension, threadsPerBlock); gpuTimer.Stop(); /******************************************************************************************************************/ float timeElapsed = gpuTimer.Elapsed(); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "multiplyKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching one of the kernels!\n", cudaStatus); goto Error; } // Copy over values back from CUDA memory and print them for (int i = 0; i < dimension; i++) { solutionX[i][0] = dev_solution[i]; } PrintMatrix(solutionX, dimension, 1, false); printf("-- Number of Threads: %d -- Dimension: %d -- Execution Time (ms): %g \n", numOfThreads, dimension, timeElapsed); Error: cudaFree(dev_inverseA); cudaFree(dev_vectorB); cudaFree(dev_solution); return cudaStatus; }
5b0930c0362f0e6ebc8921cbf2591fc491e9df0f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <time.h> //execution time macro #define GET_TIME(X, Y) (((Y).tv_sec - (X).tv_sec) + ((Y).tv_nsec - (X).tv_nsec) / 1000000000.0) // threads per block #define THREADS_PER_BLOCK 1024 // __constant__ __device__ int IE_d; __constant__ __device__ int JE_d; __constant__ __device__ float cb_d; __constant__ __device__ int is_d; __constant__ __device__ float pi_d; __constant__ __device__ float freq_d; __constant__ __device__ float dt_d; __constant__ __device__ float db_d; __global__ void ezCalc ( float *ez, float *hx, float *hy ) { int i = blockIdx.y * blockDim.x + threadIdx.x; int j = blockIdx.x; if (i < IE_d) { if (j == 0) { // at x=0 if (i == 0 || i == IE_d - 1) // at x=0,y=0 ez[j * IE_d + i] = 0.0; else ez[j * IE_d + i] = ez[j * IE_d + i] + cb_d * (hy[j * IE_d + i] - hy[j * IE_d + (i - 1)] + hx[(j - 1 + JE_d) * IE_d + i] - hx[j * IE_d + i]); } else { if (i == 0 || i == IE_d - 1) ez[j * IE_d + i] = 0.0; else ez[j * IE_d + i] = ez[j * IE_d + i] + cb_d * (hy[j * IE_d + i] - hy[j * IE_d + (i - 1)] + hx[(j - 1) * IE_d + i] - hx[j * IE_d + i]); } } } __global__ void ezCalc2 ( float *ez , int n ) { int j; for (j = threadIdx.x; j < JE_d; j += blockDim.x) { ez[j * IE_d + is_d] = cos(2 * pi_d * freq_d * n * dt_d); } } __global__ void hCalc ( float *ez, float *hx, float *hy ) { int i = blockIdx.y * blockDim.x + threadIdx.x; int j = blockIdx.x; if (i < IE_d) { if (j + 1 == JE_d) hx[j * IE_d + i] = hx[j * IE_d + i] + db_d * (ez[j * IE_d + i] - ez[i]); else hx[j * IE_d + i] = hx[j * IE_d + i] + db_d * (ez[j * IE_d + i] - ez[(j + 1) * JE_d + i]); if (i == IE_d - 1) hy[j * JE_d + i] = hy[j * JE_d + i] + db_d * (0 - ez[j * JE_d + i]); else hy[j * JE_d + i] = hy[j * JE_d + i] + db_d * (ez[j * JE_d + (i + 1)] - ez[j * JE_d + i]); } } int main(int argc, char * argv[]) { int IE, JE, nsteps; int i, j, n, is; float pi = 3.141592653589793238462643; float * ez, * hx, * hy; float * ez_d, *hx_d, * hy_d; float dx, dt, epsz, mu, courant, cb, db, c, freq; int size, grid_x, grid_y; struct timespec Begin, Step0, Step1, Step2, Step3, End; FILE * fp; if (argc != 4) { printf("Invalid arguments... please type:\n"); printf(" %s IE JE steps\n", argv[0]); exit(0); } IE = atoi(argv[1]); JE = atoi(argv[2]); nsteps = atoi(argv[3]); printf("Running 2D FDTD algorithm with matrix of size %d x %d (%d steps)\n", IE, JE, nsteps); hipMemcpyToSymbol(pi_d, &pi, sizeof(float), 0, hipMemcpyHostToDevice); is = 10; hipMemcpyToSymbol(is_d, &is, sizeof(int), 0, hipMemcpyHostToDevice); epsz = 8.854e-12; mu = 4.0 * pi * 1.0e-7; c = 3.0e8; courant = 0.5; dx = 0.001; dt = (courant * dx) / (sqrt(2) * c); hipMemcpyToSymbol(dt_d, &dt, sizeof(float), 0, hipMemcpyHostToDevice); cb = dt / (epsz * dx); db = dt / (mu * dx); hipMemcpyToSymbol(cb_d, &cb, sizeof(float), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(db_d, &db, sizeof(float), 0, hipMemcpyHostToDevice); printf("Coefficients are: dt=%g cb=%g db=%g\n", dt, cb, db); size = IE * JE; grid_x = JE; grid_y = (IE + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK; dim3 grid(grid_x, grid_y, 1); ez = (float * ) calloc(size, sizeof(float)); hx = (float * ) calloc(size, sizeof(float)); hy = (float * ) calloc(size, sizeof(float)); hipMalloc( (void **) &ez_d, size * sizeof(float)); hipMalloc( (void **) &hx_d, size * sizeof(float)); hipMalloc( (void **) &hy_d, size * sizeof(float)); freq = 50e9; hipMemcpyToSymbol(freq_d, &freq, sizeof(float), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(JE_d, &JE, sizeof(float), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(IE_d, &IE, sizeof(float), 0, hipMemcpyHostToDevice); if (clock_gettime(CLOCK_REALTIME, &Begin) == -1) { perror("Error in gettime"); exit(1); } // Transfer initial matrices to gpu hipMemcpy( ez_d, ez, size * sizeof(float), hipMemcpyHostToDevice ); hipMemcpy( hx_d, hx, size * sizeof(float), hipMemcpyHostToDevice ); hipMemcpy( hy_d, hy, size * sizeof(float), hipMemcpyHostToDevice ); for (n = 0; n < nsteps; n++) { // TIME if (clock_gettime(CLOCK_REALTIME, &Step0) == -1) { perror("Error in gettime"); exit(1); } //Calculate the Ez field hipLaunchKernelGGL(( ezCalc), dim3(grid), dim3(THREADS_PER_BLOCK), 0, 0, ez_d, hx_d, hy_d ); clock_gettime(CLOCK_REALTIME, &Step1); //Ez field generator (line) hipLaunchKernelGGL(( ezCalc2), dim3(1), dim3(THREADS_PER_BLOCK), 0, 0, ez_d , n ); clock_gettime(CLOCK_REALTIME, &Step2); //Calculate the H field hipLaunchKernelGGL(( hCalc), dim3(grid), dim3(THREADS_PER_BLOCK), 0, 0, ez_d, hx_d, hy_d ); if (clock_gettime(CLOCK_REALTIME, &Step3) == -1) { perror("Error in gettime"); exit(1); } } // Retrieve matrices from gpu hipMemcpy( ez, ez_d, size * sizeof(float), hipMemcpyDeviceToHost ); hipMemcpy( hx, hx_d, size * sizeof(float), hipMemcpyDeviceToHost ); hipMemcpy( hy, hy_d, size * sizeof(float), hipMemcpyDeviceToHost ); if (clock_gettime(CLOCK_REALTIME, &End) == -1) { perror("Error in gettime"); exit(1); } printf("\n\n====Total time: %f\n", GET_TIME(Begin, End)); // write output to file fp = fopen("output_gpu_v4.txt", "w"); fprintf(fp, "==================== Ez MATRIX ========================\n"); for (i = 0, j = 0; (i < IE * JE) && (i < 1000); i++, j++) { if (j == 8) { fprintf(fp, "\n"); j = 0; } fprintf(fp, "%8f ", ez[i]); } fprintf(fp, "==================== Hx MATRIX ========================\n"); for (i = 0, j = 0; (i < IE * JE) && (i < 1000); i++, j++) { if (j == 8) { fprintf(fp, "\n"); j = 0; } fprintf(fp, "%8f ", hx[i]); } fprintf(fp, "==================== Hy MATRIX ========================\n"); for (i = 0, j = 0; (i < IE * JE) && (i < 1000); i++, j++) { if (j == 8) { fprintf(fp, "\n"); j = 0; } fprintf(fp, "%8f ", hy[i]); } free(ez); free(hy); free(hx); hipFree( ez_d ); hipFree( hx_d ); hipFree( hy_d ); return 0; }
5b0930c0362f0e6ebc8921cbf2591fc491e9df0f.cu
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <time.h> //execution time macro #define GET_TIME(X, Y) (((Y).tv_sec - (X).tv_sec) + ((Y).tv_nsec - (X).tv_nsec) / 1000000000.0) // threads per block #define THREADS_PER_BLOCK 1024 // __constant__ __device__ int IE_d; __constant__ __device__ int JE_d; __constant__ __device__ float cb_d; __constant__ __device__ int is_d; __constant__ __device__ float pi_d; __constant__ __device__ float freq_d; __constant__ __device__ float dt_d; __constant__ __device__ float db_d; __global__ void ezCalc ( float *ez, float *hx, float *hy ) { int i = blockIdx.y * blockDim.x + threadIdx.x; int j = blockIdx.x; if (i < IE_d) { if (j == 0) { // at x=0 if (i == 0 || i == IE_d - 1) // at x=0,y=0 ez[j * IE_d + i] = 0.0; else ez[j * IE_d + i] = ez[j * IE_d + i] + cb_d * (hy[j * IE_d + i] - hy[j * IE_d + (i - 1)] + hx[(j - 1 + JE_d) * IE_d + i] - hx[j * IE_d + i]); } else { if (i == 0 || i == IE_d - 1) ez[j * IE_d + i] = 0.0; else ez[j * IE_d + i] = ez[j * IE_d + i] + cb_d * (hy[j * IE_d + i] - hy[j * IE_d + (i - 1)] + hx[(j - 1) * IE_d + i] - hx[j * IE_d + i]); } } } __global__ void ezCalc2 ( float *ez , int n ) { int j; for (j = threadIdx.x; j < JE_d; j += blockDim.x) { ez[j * IE_d + is_d] = cos(2 * pi_d * freq_d * n * dt_d); } } __global__ void hCalc ( float *ez, float *hx, float *hy ) { int i = blockIdx.y * blockDim.x + threadIdx.x; int j = blockIdx.x; if (i < IE_d) { if (j + 1 == JE_d) hx[j * IE_d + i] = hx[j * IE_d + i] + db_d * (ez[j * IE_d + i] - ez[i]); else hx[j * IE_d + i] = hx[j * IE_d + i] + db_d * (ez[j * IE_d + i] - ez[(j + 1) * JE_d + i]); if (i == IE_d - 1) hy[j * JE_d + i] = hy[j * JE_d + i] + db_d * (0 - ez[j * JE_d + i]); else hy[j * JE_d + i] = hy[j * JE_d + i] + db_d * (ez[j * JE_d + (i + 1)] - ez[j * JE_d + i]); } } int main(int argc, char * argv[]) { int IE, JE, nsteps; int i, j, n, is; float pi = 3.141592653589793238462643; float * ez, * hx, * hy; float * ez_d, *hx_d, * hy_d; float dx, dt, epsz, mu, courant, cb, db, c, freq; int size, grid_x, grid_y; struct timespec Begin, Step0, Step1, Step2, Step3, End; FILE * fp; if (argc != 4) { printf("Invalid arguments... please type:\n"); printf(" %s IE JE steps\n", argv[0]); exit(0); } IE = atoi(argv[1]); JE = atoi(argv[2]); nsteps = atoi(argv[3]); printf("Running 2D FDTD algorithm with matrix of size %d x %d (%d steps)\n", IE, JE, nsteps); cudaMemcpyToSymbol(pi_d, &pi, sizeof(float), 0, cudaMemcpyHostToDevice); is = 10; cudaMemcpyToSymbol(is_d, &is, sizeof(int), 0, cudaMemcpyHostToDevice); epsz = 8.854e-12; mu = 4.0 * pi * 1.0e-7; c = 3.0e8; courant = 0.5; dx = 0.001; dt = (courant * dx) / (sqrt(2) * c); cudaMemcpyToSymbol(dt_d, &dt, sizeof(float), 0, cudaMemcpyHostToDevice); cb = dt / (epsz * dx); db = dt / (mu * dx); cudaMemcpyToSymbol(cb_d, &cb, sizeof(float), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(db_d, &db, sizeof(float), 0, cudaMemcpyHostToDevice); printf("Coefficients are: dt=%g cb=%g db=%g\n", dt, cb, db); size = IE * JE; grid_x = JE; grid_y = (IE + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK; dim3 grid(grid_x, grid_y, 1); ez = (float * ) calloc(size, sizeof(float)); hx = (float * ) calloc(size, sizeof(float)); hy = (float * ) calloc(size, sizeof(float)); cudaMalloc( (void **) &ez_d, size * sizeof(float)); cudaMalloc( (void **) &hx_d, size * sizeof(float)); cudaMalloc( (void **) &hy_d, size * sizeof(float)); freq = 50e9; cudaMemcpyToSymbol(freq_d, &freq, sizeof(float), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(JE_d, &JE, sizeof(float), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(IE_d, &IE, sizeof(float), 0, cudaMemcpyHostToDevice); if (clock_gettime(CLOCK_REALTIME, &Begin) == -1) { perror("Error in gettime"); exit(1); } // Transfer initial matrices to gpu cudaMemcpy( ez_d, ez, size * sizeof(float), cudaMemcpyHostToDevice ); cudaMemcpy( hx_d, hx, size * sizeof(float), cudaMemcpyHostToDevice ); cudaMemcpy( hy_d, hy, size * sizeof(float), cudaMemcpyHostToDevice ); for (n = 0; n < nsteps; n++) { // TIME if (clock_gettime(CLOCK_REALTIME, &Step0) == -1) { perror("Error in gettime"); exit(1); } //Calculate the Ez field ezCalc<<<grid, THREADS_PER_BLOCK>>>( ez_d, hx_d, hy_d ); clock_gettime(CLOCK_REALTIME, &Step1); //Ez field generator (line) ezCalc2<<<1, THREADS_PER_BLOCK>>>( ez_d , n ); clock_gettime(CLOCK_REALTIME, &Step2); //Calculate the H field hCalc<<<grid, THREADS_PER_BLOCK>>>( ez_d, hx_d, hy_d ); if (clock_gettime(CLOCK_REALTIME, &Step3) == -1) { perror("Error in gettime"); exit(1); } } // Retrieve matrices from gpu cudaMemcpy( ez, ez_d, size * sizeof(float), cudaMemcpyDeviceToHost ); cudaMemcpy( hx, hx_d, size * sizeof(float), cudaMemcpyDeviceToHost ); cudaMemcpy( hy, hy_d, size * sizeof(float), cudaMemcpyDeviceToHost ); if (clock_gettime(CLOCK_REALTIME, &End) == -1) { perror("Error in gettime"); exit(1); } printf("\n\n====Total time: %f\n", GET_TIME(Begin, End)); // write output to file fp = fopen("output_gpu_v4.txt", "w"); fprintf(fp, "==================== Ez MATRIX ========================\n"); for (i = 0, j = 0; (i < IE * JE) && (i < 1000); i++, j++) { if (j == 8) { fprintf(fp, "\n"); j = 0; } fprintf(fp, "%8f ", ez[i]); } fprintf(fp, "==================== Hx MATRIX ========================\n"); for (i = 0, j = 0; (i < IE * JE) && (i < 1000); i++, j++) { if (j == 8) { fprintf(fp, "\n"); j = 0; } fprintf(fp, "%8f ", hx[i]); } fprintf(fp, "==================== Hy MATRIX ========================\n"); for (i = 0, j = 0; (i < IE * JE) && (i < 1000); i++, j++) { if (j == 8) { fprintf(fp, "\n"); j = 0; } fprintf(fp, "%8f ", hy[i]); } free(ez); free(hy); free(hx); cudaFree( ez_d ); cudaFree( hx_d ); cudaFree( hy_d ); return 0; }
4f45e43cf86bfe8e34a10698ccbfe072e070c89d.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include "hip/hip_runtime.h" #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) #define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1) void check_error (const char* message) { hipError_t error = hipGetLastError (); if (error != hipSuccess) { printf ("CUDA error : %s, %s\n", message, hipGetErrorString (error)); exit(-1); } } __global__ void sw4 (double * uacc_in_0, double * uacc_in_1, double * uacc_in_2, double * __restrict__ u_in_0, double * __restrict__ u_in_1, double * __restrict__ u_in_2, double * __restrict__ mu_in, double * __restrict__ la_in, double * strx, double * stry, double * strz, int N) { //Determing the block's indices int blockdim_i= (int)(blockDim.x); int i0 = (int)(blockIdx.x)*(blockdim_i); int i = max (i0, 0) + (int)(threadIdx.x); int blockdim_j= (int)(blockDim.y); int j0 = (int)(blockIdx.y)*(blockdim_j); int j = max (j0, 0) + (int)(threadIdx.y); int blockdim_k= (int)(blockDim.z); int k0 = (int)(blockIdx.z)*(blockdim_k); int k = max (k0, 0) + (int)(threadIdx.z); // Assumptions int a1 = 1; double h = 3.7; double cof = 1e0 / ( h * h); double (*uacc_0)[304][304] = (double (*)[304][304])uacc_in_0; double (*uacc_1)[304][304] = (double (*)[304][304])uacc_in_1; double (*uacc_2)[304][304] = (double (*)[304][304])uacc_in_2; double (*u_0)[304][304] = (double (*)[304][304])u_in_0; double (*u_1)[304][304] = (double (*)[304][304])u_in_1; double (*u_2)[304][304] = (double (*)[304][304])u_in_2; double (*mu)[304][304] = (double (*)[304][304])mu_in; double (*la)[304][304] = (double (*)[304][304])la_in; double mux1, mux2, mux3, mux4, muy1, muy2, muy3, muy4, muz1, muz2, muz3, muz4; double r1, r2, r3; if (i>=2 & j>=2 & k>=2 & i<=N-3 & j<=N-3 & k<=N-3) { double mux1; double mux2; double mux3; double mux4; double muy1; double muy2; double muy3; double muy4; double muz1; double muz2; double muz3; double muz4; double _t_5_; double _t_7_; double _t_3_; double _t_9_; double _t_11_; double _t_10_; double _t_12_; double _t_13_; double _t_14_; double _t_0_; double _t_16_; double _t_15_; double _t_17_; double _t_18_; double _t_19_; double _t_4_; double _t_2_; double _t_6_; double _t_1_; double _t_8_; double r1; double _t_30_; double _t_31_; double _t_33_; double _t_29_; double _t_27_; double _t_26_; double _t_28_; double _t_32_; double _t_34_; double _t_20_; double _t_22_; double _t_21_; double _t_23_; double _t_24_; double _t_25_; double _t_36_; double _t_35_; double _t_52_; double _t_37_; double _t_54_; double _t_38_; double _t_39_; double r2; double _t_56_; double _t_58_; double _t_42_; double _t_41_; double _t_43_; double _t_44_; double _t_45_; double _t_40_; double _t_47_; double _t_46_; double _t_48_; double _t_49_; double _t_50_; double _t_53_; double _t_51_; double _t_55_; double _t_57_; double _t_59_; double r3; double _t_102_; double _t_100_; double _t_76_; double _t_74_; double _t_63_; double _t_61_; double _t_89_; double _t_87_; double _t_103_; double _t_77_; double _t_85_; double _t_111_; double _t_104_; double _t_80_; double _t_112_; double _t_83_; double _t_101_; double _t_78_; double _t_106_; double _t_86_; double _t_109_; double _t_75_; double _t_107_; double _t_81_; double _t_84_; double _t_105_; double _t_110_; double _t_108_; double _t_60_; double _t_79_; double _t_82_; double _t_64_; double _t_90_; double _t_72_; double _t_98_; double _t_65_; double _t_93_; double _t_73_; double _t_96_; double _t_62_; double _t_91_; double _t_67_; double _t_99_; double _t_88_; double _t_70_; double _t_68_; double _t_94_; double _t_92_; double _t_71_; double _t_97_; double _t_95_; double _t_66_; double _t_69_; double _t_129_; double _t_127_; double _t_155_; double _t_153_; double _t_116_; double _t_114_; double _t_142_; double _t_140_; double _t_117_; double _t_143_; double _t_125_; double _t_151_; double _t_118_; double _t_146_; double _t_126_; double _t_149_; double _t_115_; double _t_144_; double _t_120_; double _t_152_; double _t_141_; double _t_123_; double _t_121_; double _t_147_; double _t_145_; double _t_124_; double _t_150_; double _t_148_; double _t_113_; double _t_119_; double _t_122_; double _t_130_; double _t_156_; double _t_138_; double _t_164_; double _t_131_; double _t_159_; double _t_139_; double _t_128_; double _t_162_; double _t_157_; double _t_133_; double _t_165_; double _t_136_; double _t_154_; double _t_134_; double _t_160_; double _t_137_; double _t_158_; double _t_132_; double _t_163_; double _t_135_; double _t_161_; double _t_169_; double _t_182_; double _t_167_; double _t_180_; double _t_195_; double _t_208_; double _t_193_; double _t_206_; double _t_170_; double _t_183_; double _t_178_; double _t_191_; double _t_171_; double _t_186_; double _t_179_; double _t_168_; double _t_189_; double _t_184_; double _t_173_; double _t_192_; double _t_176_; double _t_181_; double _t_174_; double _t_187_; double _t_177_; double _t_185_; double _t_172_; double _t_190_; double _t_175_; double _t_166_; double _t_188_; double _t_196_; double _t_209_; double _t_204_; double _t_217_; double _t_197_; double _t_212_; double _t_205_; double _t_194_; double _t_215_; double _t_210_; double _t_199_; double _t_218_; double _t_207_; double _t_202_; double _t_200_; double _t_213_; double _t_211_; double _t_203_; double _t_198_; double _t_216_; double _t_201_; double _t_214_; double uacc_0kc0jc0ic0; double uacc_1kc0jc0ic0; double uacc_2kc0jc0ic0; mux1 = -3.0 / 4.0 * mu[k][j][i-2] * strx[i-2]; mux1 += mu[k][j][i-1] * strx[i-1]; mux1 -= 3.0 / 4.0 * mu[k][j][i] * strx[i]; mux2 = mu[k][j][i-2] * strx[i-2]; mux2 += 3.0 * mu[k][j][i] * strx[i]; mux2 += 3.0 * mu[k][j][i-1] * strx[i-1]; mux2 += mu[k][j][i+1] * strx[i+1]; mux3 = mu[k][j][i-1] * strx[i-1]; mux3 += 3.0 * mu[k][j][i+1] * strx[i+1]; mux3 += 3.0 * mu[k][j][i] * strx[i]; mux3 += mu[k][j][i+2] * strx[i+2]; mux4 = mu[k][j][i+1] * strx[i+1]; mux4 -= 3.0 / 4.0 * mu[k][j][i] * strx[i]; mux4 -= 3.0 / 4.0 * mu[k][j][i+2] * strx[i+2]; muy1 = -3.0 / 4.0 * mu[k][j][i] * stry[j]; muy1 += mu[k][j-1][i] * stry[j-1]; muy1 -= 3.0 / 4.0 * mu[k][j-2][i] * stry[j-2]; muy2 = mu[k][j-2][i] * stry[j-2]; muy2 += 3.0 * mu[k][j][i] * stry[j]; muy2 += 3.0 * mu[k][j-1][i] * stry[j-1]; muy2 += mu[k][j+1][i] * stry[j+1]; muy3 = mu[k][j-1][i] * stry[j-1]; muy3 += 3.0 * mu[k][j+1][i] * stry[j+1]; muy3 += 3.0 * mu[k][j][i] * stry[j]; muy3 += mu[k][j+2][i] * stry[j+2]; muy4 = mu[k][j+1][i] * stry[j+1]; muy4 -= 3.0 / 4.0 * mu[k][j][i] * stry[j]; muy4 -= 3.0 / 4.0 * mu[k][j+2][i] * stry[j+2]; muz1 = -3.0 / 4.0 * mu[k][j][i] * strz[k]; muz1 += mu[k-1][j][i] * strz[k-1]; muz1 -= 3.0 / 4.0 * mu[k-2][j][i] * strz[k-2]; muz2 = mu[k-2][j][i] * strz[k-2]; muz2 += 3.0 * mu[k][j][i] * strz[k]; muz2 += 3.0 * mu[k-1][j][i] * strz[k-1]; muz2 += mu[k+1][j][i] * strz[k+1]; muz3 = mu[k-1][j][i] * strz[k-1]; muz3 += 3.0 * mu[k+1][j][i] * strz[k+1]; muz3 += 3.0 * mu[k][j][i] * strz[k]; muz4 = mu[k+1][j][i] * strz[k+1]; muz4 -= 3.0 / 4.0 * mu[k][j][i] * strz[k]; muz3 += mu[k+2][j][i] * strz[k+2]; muz4 -= 3.0 / 4.0 * mu[k+2][j][i] * strz[k+2]; _t_5_ = u_0[k][j][i-1]; _t_5_ -= u_0[k][j][i]; _t_7_ = -u_0[k][j][i]; _t_7_ += u_0[k][j][i+1]; _t_3_ = -u_0[k][j][i]; _t_3_ += u_0[k][j][i-2]; _t_9_ = -u_0[k][j][i]; _t_9_ += u_0[k][j][i+2]; _t_11_ = -u_0[k][j][i]; _t_11_ += u_0[k][j-2][i]; _t_10_ = muy1 * _t_11_; _t_12_ = -u_0[k][j][i]; _t_12_ += u_0[k][j-1][i]; _t_10_ += muy2 * _t_12_; _t_13_ = -u_0[k][j][i]; _t_13_ += u_0[k][j+1][i]; _t_10_ += muy3 * _t_13_; _t_14_ = -u_0[k][j][i]; _t_14_ += u_0[k][j+2][i]; _t_10_ += muy4 * _t_14_; _t_0_ = stry[j] * _t_10_; _t_16_ = -u_0[k][j][i]; _t_16_ += u_0[k-2][j][i]; _t_15_ = muz1 * _t_16_; _t_17_ = -u_0[k][j][i]; _t_17_ += u_0[k-1][j][i]; _t_15_ += muz2 * _t_17_; _t_18_ = -u_0[k][j][i]; _t_19_ = -u_0[k][j][i]; _t_18_ += u_0[k+1][j][i]; _t_15_ += muz3 * _t_18_; _t_19_ += u_0[k+2][j][i]; _t_15_ += muz4 * _t_19_; _t_0_ += strz[k] * _t_15_; _t_4_ = 2.0 * mux2; _t_2_ = 2.0 * mux1; _t_2_ -= 3.0 / 4.0 * la[k][j][i-2] * strx[i-2]; _t_4_ += la[k][j][i-2] * strx[i-2]; _t_2_ += la[k][j][i-1] * strx[i-1]; _t_4_ += 3.0 * la[k][j][i-1] * strx[i-1]; _t_6_ = la[k][j][i-1] * strx[i-1]; _t_6_ += 2.0 * mux3; _t_2_ -= 3.0 / 4.0 * la[k][j][i] * strx[i]; _t_4_ += 3.0 * la[k][j][i] * strx[i]; _t_6_ += 3.0 * la[k][j][i] * strx[i]; _t_1_ = _t_2_ * _t_3_; _t_8_ = -3.0 / 4.0 * la[k][j][i] * strx[i]; _t_8_ += 2.0 * mux4; _t_4_ += la[k][j][i+1] * strx[i+1]; _t_1_ += _t_4_ * _t_5_; _t_6_ += 3.0 * la[k][j][i+1] * strx[i+1]; _t_8_ += la[k][j][i+1] * strx[i+1]; _t_6_ += la[k][j][i+2] * strx[i+2]; _t_1_ += _t_6_ * _t_7_; _t_8_ -= 3.0 / 4.0 * la[k][j][i+2] * strx[i+2]; _t_1_ += _t_8_ * _t_9_; _t_0_ += strx[i] * _t_1_; r1 = 1.0 / 6.0 * _t_0_; _t_30_ = u_1[k][j-1][i]; _t_31_ = 3.0 * la[k][j][i] * stry[j]; _t_31_ += 2.0 * muy3; _t_33_ = -3.0 / 4.0 * la[k][j][i] * stry[j]; _t_33_ += 2.0 * muy4; _t_31_ += la[k][j+2][i] * stry[j+2]; _t_33_ -= 3.0 / 4.0 * la[k][j+2][i] * stry[j+2]; _t_29_ = 3.0 * la[k][j][i] * stry[j]; _t_29_ += 2.0 * muy2; _t_29_ += la[k][j+1][i] * stry[j+1]; _t_31_ += 3.0 * la[k][j+1][i] * stry[j+1]; _t_33_ += la[k][j+1][i] * stry[j+1]; _t_27_ = -3.0 / 4.0 * la[k][j][i] * stry[j]; _t_27_ += 2.0 * muy1; _t_27_ += la[k][j-1][i] * stry[j-1]; _t_29_ += 3.0 * la[k][j-1][i] * stry[j-1]; _t_31_ += la[k][j-1][i] * stry[j-1]; _t_27_ -= 3.0 / 4.0 * la[k][j-2][i] * stry[j-2]; _t_29_ += la[k][j-2][i] * stry[j-2]; _t_30_ -= u_1[k][j][i]; _t_26_ = _t_29_ * _t_30_; _t_28_ = -u_1[k][j][i]; _t_28_ += u_1[k][j-2][i]; _t_26_ += _t_27_ * _t_28_; _t_32_ = -u_1[k][j][i]; _t_32_ += u_1[k][j+1][i]; _t_26_ += _t_31_ * _t_32_; _t_34_ = -u_1[k][j][i]; _t_34_ += u_1[k][j+2][i]; _t_26_ += _t_33_ * _t_34_; _t_20_ = stry[j] * _t_26_; _t_22_ = -u_1[k][j][i]; _t_22_ += u_1[k][j][i-2]; _t_21_ = mux1 * _t_22_; _t_23_ = -u_1[k][j][i]; _t_23_ += u_1[k][j][i-1]; _t_21_ += mux2 * _t_23_; _t_24_ = -u_1[k][j][i]; _t_24_ += u_1[k][j][i+1]; _t_21_ += mux3 * _t_24_; _t_25_ = -u_1[k][j][i]; _t_25_ += u_1[k][j][i+2]; _t_21_ += mux4 * _t_25_; _t_20_ += strx[i] * _t_21_; _t_36_ = -u_1[k][j][i]; _t_36_ += u_1[k-2][j][i]; _t_35_ = muz1 * _t_36_; _t_52_ = -3.0 / 4.0 * la[k][j][i] * strz[k]; _t_52_ += 2.0 * muz1; _t_37_ = -u_1[k][j][i]; _t_37_ += u_1[k-1][j][i]; _t_35_ += muz2 * _t_37_; _t_54_ = 3.0 * la[k][j][i] * strz[k]; _t_54_ += 2.0 * muz2; _t_52_ -= 3.0 / 4.0 * la[k-2][j][i] * strz[k-2]; _t_54_ += la[k-2][j][i] * strz[k-2]; _t_38_ = -u_1[k][j][i]; _t_39_ = -u_1[k][j][i]; _t_38_ += u_1[k+1][j][i]; _t_35_ += muz3 * _t_38_; _t_39_ += u_1[k+2][j][i]; _t_35_ += muz4 * _t_39_; _t_20_ += strz[k] * _t_35_; r2 = 1.0 / 6.0 * _t_20_; _t_56_ = 3.0 * la[k][j][i] * strz[k]; _t_56_ += 2.0 * muz3; _t_58_ = -3.0 / 4.0 * la[k][j][i] * strz[k]; _t_58_ += 2.0 * muz4; _t_52_ += la[k-1][j][i] * strz[k-1]; _t_54_ += 3.0 * la[k-1][j][i] * strz[k-1]; _t_56_ += la[k-1][j][i] * strz[k-1]; _t_54_ += la[k+1][j][i] * strz[k+1]; _t_56_ += 3.0 * la[k+1][j][i] * strz[k+1]; _t_58_ += la[k+1][j][i] * strz[k+1]; _t_56_ += la[k+2][j][i] * strz[k+2]; _t_58_ -= 3.0 / 4.0 * la[k+2][j][i] * strz[k+2]; _t_42_ = u_2[k][j][i-2]; _t_42_ -= u_2[k][j][i]; _t_41_ = mux1 * _t_42_; _t_43_ = -u_2[k][j][i]; _t_43_ += u_2[k][j][i-1]; _t_41_ += mux2 * _t_43_; _t_44_ = -u_2[k][j][i]; _t_44_ += u_2[k][j][i+1]; _t_41_ += mux3 * _t_44_; _t_45_ = -u_2[k][j][i]; _t_45_ += u_2[k][j][i+2]; _t_41_ += mux4 * _t_45_; _t_40_ = strx[i] * _t_41_; _t_47_ = -u_2[k][j][i]; _t_47_ += u_2[k][j-2][i]; _t_46_ = muy1 * _t_47_; _t_48_ = -u_2[k][j][i]; _t_48_ += u_2[k][j-1][i]; _t_46_ += muy2 * _t_48_; _t_49_ = -u_2[k][j][i]; _t_49_ += u_2[k][j+1][i]; _t_46_ += muy3 * _t_49_; _t_50_ = -u_2[k][j][i]; _t_50_ += u_2[k][j+2][i]; _t_46_ += muy4 * _t_50_; _t_40_ += stry[j] * _t_46_; _t_53_ = -u_2[k][j][i]; _t_53_ += u_2[k-2][j][i]; _t_51_ = _t_52_ * _t_53_; _t_55_ = -u_2[k][j][i]; _t_55_ += u_2[k-1][j][i]; _t_51_ += _t_54_ * _t_55_; _t_57_ = -u_2[k][j][i]; _t_59_ = -u_2[k][j][i]; _t_57_ += u_2[k+1][j][i]; _t_51_ += _t_56_ * _t_57_; _t_59_ += u_2[k+2][j][i]; _t_51_ += _t_58_ * _t_59_; _t_40_ += strz[k] * _t_51_; r3 = 1.0 / 6.0 * _t_40_; _t_102_ = stry[j] * strz[k]; _t_100_ = _t_102_ * 1.0 / 144.0; _t_76_ = stry[j] * strz[k]; _t_74_ = _t_76_ * 1.0 / 144.0; _t_63_ = strx[i] * strz[k]; _t_61_ = _t_63_ * 1.0 / 144.0; _t_89_ = strx[i] * strz[k]; _t_87_ = _t_89_ * 1.0 / 144.0; _t_103_ = u_1[k-2][j-2][i]; _t_77_ = u_1[k-2][j-2][i]; _t_103_ -= u_1[k-2][j+2][i]; _t_85_ = u_1[k-2][j+2][i]; _t_77_ -= u_1[k+2][j-2][i]; _t_111_ = u_1[k+2][j-2][i]; _t_85_ -= u_1[k+2][j+2][i]; _t_111_ -= u_1[k+2][j+2][i]; _t_104_ = -u_1[k-2][j-1][i]; _t_80_ = u_1[k-2][j-1][i]; _t_80_ -= u_1[k+2][j-1][i]; _t_112_ = -u_1[k+2][j-1][i]; _t_104_ += u_1[k-2][j+1][i]; _t_103_ += 8.0 * _t_104_; _t_83_ = u_1[k-2][j+1][i]; _t_83_ -= u_1[k+2][j+1][i]; _t_112_ += u_1[k+2][j+1][i]; _t_111_ += 8.0 * _t_112_; _t_101_ = la[k-2][j][i] * _t_103_; _t_101_ -= la[k+2][j][i] * _t_111_; _t_78_ = -u_1[k-1][j-2][i]; _t_106_ = u_1[k-1][j-2][i]; _t_106_ -= u_1[k-1][j+2][i]; _t_86_ = -u_1[k-1][j+2][i]; _t_78_ += u_1[k+1][j-2][i]; _t_77_ += 8.0 * _t_78_; _t_109_ = u_1[k+1][j-2][i]; _t_86_ += u_1[k+1][j+2][i]; _t_85_ += 8.0 * _t_86_; _t_109_ -= u_1[k+1][j+2][i]; _t_75_ = mu[k][j-2][i] * _t_77_; _t_75_ -= mu[k][j+2][i] * _t_85_; _t_107_ = -u_1[k-1][j-1][i]; _t_81_ = -u_1[k-1][j-1][i]; _t_107_ += u_1[k-1][j+1][i]; _t_106_ += 8.0 * _t_107_; _t_84_ = -u_1[k-1][j+1][i]; _t_105_ = la[k-1][j][i] * _t_106_; _t_101_ -= 8.0 * _t_105_; _t_81_ += u_1[k+1][j-1][i]; _t_80_ += 8.0 * _t_81_; _t_110_ = -u_1[k+1][j-1][i]; _t_84_ += u_1[k+1][j+1][i]; _t_83_ += 8.0 * _t_84_; _t_110_ += u_1[k+1][j+1][i]; _t_109_ += 8.0 * _t_110_; _t_108_ = la[k+1][j][i] * _t_109_; _t_101_ += 8.0 * _t_108_; _t_60_ = _t_100_ * _t_101_; _t_79_ = mu[k][j-1][i] * _t_80_; _t_75_ -= 8.0 * _t_79_; _t_82_ = mu[k][j+1][i] * _t_83_; _t_75_ += 8.0 * _t_82_; _t_60_ += _t_74_ * _t_75_; _t_64_ = u_0[k-2][j][i-2]; _t_90_ = u_0[k-2][j][i-2]; _t_90_ -= u_0[k-2][j][i+2]; _t_72_ = u_0[k-2][j][i+2]; _t_64_ -= u_0[k+2][j][i-2]; _t_98_ = u_0[k+2][j][i-2]; _t_72_ -= u_0[k+2][j][i+2]; _t_98_ -= u_0[k+2][j][i+2]; _t_65_ = -u_0[k-1][j][i-2]; _t_93_ = u_0[k-1][j][i-2]; _t_93_ -= u_0[k-1][j][i+2]; _t_73_ = -u_0[k-1][j][i+2]; _t_65_ += u_0[k+1][j][i-2]; _t_64_ += 8.0 * _t_65_; _t_96_ = u_0[k+1][j][i-2]; _t_73_ += u_0[k+1][j][i+2]; _t_72_ += 8.0 * _t_73_; _t_96_ -= u_0[k+1][j][i+2]; _t_62_ = mu[k][j][i-2] * _t_64_; _t_62_ -= mu[k][j][i+2] * _t_72_; _t_91_ = -u_0[k-2][j][i-1]; _t_67_ = u_0[k-2][j][i-1]; _t_67_ -= u_0[k+2][j][i-1]; _t_99_ = -u_0[k+2][j][i-1]; _t_91_ += u_0[k-2][j][i+1]; _t_90_ += 8.0 * _t_91_; _t_88_ = la[k-2][j][i] * _t_90_; _t_70_ = u_0[k-2][j][i+1]; _t_70_ -= u_0[k+2][j][i+1]; _t_99_ += u_0[k+2][j][i+1]; _t_98_ += 8.0 * _t_99_; _t_88_ -= la[k+2][j][i] * _t_98_; _t_68_ = -u_0[k-1][j][i-1]; _t_94_ = -u_0[k-1][j][i-1]; _t_94_ += u_0[k-1][j][i+1]; _t_93_ += 8.0 * _t_94_; _t_92_ = la[k-1][j][i] * _t_93_; _t_88_ -= 8.0 * _t_92_; _t_71_ = -u_0[k-1][j][i+1]; _t_68_ += u_0[k+1][j][i-1]; _t_67_ += 8.0 * _t_68_; _t_97_ = -u_0[k+1][j][i-1]; _t_71_ += u_0[k+1][j][i+1]; _t_70_ += 8.0 * _t_71_; _t_97_ += u_0[k+1][j][i+1]; _t_96_ += 8.0 * _t_97_; _t_95_ = la[k+1][j][i] * _t_96_; _t_88_ += 8.0 * _t_95_; _t_60_ += _t_87_ * _t_88_; _t_66_ = mu[k][j][i-1] * _t_67_; _t_62_ -= 8.0 * _t_66_; _t_69_ = mu[k][j][i+1] * _t_70_; _t_62_ += 8.0 * _t_69_; _t_60_ += _t_61_ * _t_62_; r3 += _t_60_; _t_129_ = strx[i] * strz[k]; _t_127_ = _t_129_ * 1.0 / 144.0; _t_155_ = strx[i] * strz[k]; _t_153_ = _t_155_ * 1.0 / 144.0; _t_116_ = strx[i] * stry[j]; _t_114_ = _t_116_ * 1.0 / 144.0; _t_142_ = strx[i] * stry[j]; _t_140_ = _t_142_ * 1.0 / 144.0; _t_117_ = u_1[k][j-2][i-2]; _t_143_ = u_1[k][j-2][i-2]; _t_143_ -= u_1[k][j-2][i+2]; _t_125_ = u_1[k][j-2][i+2]; _t_117_ -= u_1[k][j+2][i-2]; _t_151_ = u_1[k][j+2][i-2]; _t_125_ -= u_1[k][j+2][i+2]; _t_151_ -= u_1[k][j+2][i+2]; _t_118_ = -u_1[k][j-1][i-2]; _t_146_ = u_1[k][j-1][i-2]; _t_146_ -= u_1[k][j-1][i+2]; _t_126_ = -u_1[k][j-1][i+2]; _t_118_ += u_1[k][j+1][i-2]; _t_117_ += 8.0 * _t_118_; _t_149_ = u_1[k][j+1][i-2]; _t_126_ += u_1[k][j+1][i+2]; _t_125_ += 8.0 * _t_126_; _t_149_ -= u_1[k][j+1][i+2]; _t_115_ = la[k][j][i-2] * _t_117_; _t_115_ -= la[k][j][i+2] * _t_125_; _t_144_ = -u_1[k][j-2][i-1]; _t_120_ = u_1[k][j-2][i-1]; _t_120_ -= u_1[k][j+2][i-1]; _t_152_ = -u_1[k][j+2][i-1]; _t_144_ += u_1[k][j-2][i+1]; _t_143_ += 8.0 * _t_144_; _t_141_ = mu[k][j-2][i] * _t_143_; _t_123_ = u_1[k][j-2][i+1]; _t_123_ -= u_1[k][j+2][i+1]; _t_152_ += u_1[k][j+2][i+1]; _t_151_ += 8.0 * _t_152_; _t_141_ -= mu[k][j+2][i] * _t_151_; _t_121_ = -u_1[k][j-1][i-1]; _t_147_ = -u_1[k][j-1][i-1]; _t_147_ += u_1[k][j-1][i+1]; _t_146_ += 8.0 * _t_147_; _t_145_ = mu[k][j-1][i] * _t_146_; _t_141_ -= 8.0 * _t_145_; _t_124_ = -u_1[k][j-1][i+1]; _t_121_ += u_1[k][j+1][i-1]; _t_120_ += 8.0 * _t_121_; _t_150_ = -u_1[k][j+1][i-1]; _t_124_ += u_1[k][j+1][i+1]; _t_123_ += 8.0 * _t_124_; _t_150_ += u_1[k][j+1][i+1]; _t_149_ += 8.0 * _t_150_; _t_148_ = mu[k][j+1][i] * _t_149_; _t_141_ += 8.0 * _t_148_; _t_113_ = _t_140_ * _t_141_; _t_119_ = la[k][j][i-1] * _t_120_; _t_115_ -= 8.0 * _t_119_; _t_122_ = la[k][j][i+1] * _t_123_; _t_115_ += 8.0 * _t_122_; _t_113_ += _t_114_ * _t_115_; _t_130_ = u_2[k-2][j][i-2]; _t_156_ = u_2[k-2][j][i-2]; _t_156_ -= u_2[k-2][j][i+2]; _t_138_ = u_2[k-2][j][i+2]; _t_130_ -= u_2[k+2][j][i-2]; _t_164_ = u_2[k+2][j][i-2]; _t_138_ -= u_2[k+2][j][i+2]; _t_164_ -= u_2[k+2][j][i+2]; _t_131_ = -u_2[k-1][j][i-2]; _t_159_ = u_2[k-1][j][i-2]; _t_159_ -= u_2[k-1][j][i+2]; _t_139_ = -u_2[k-1][j][i+2]; _t_131_ += u_2[k+1][j][i-2]; _t_130_ += 8.0 * _t_131_; _t_128_ = la[k][j][i-2] * _t_130_; _t_162_ = u_2[k+1][j][i-2]; _t_139_ += u_2[k+1][j][i+2]; _t_138_ += 8.0 * _t_139_; _t_128_ -= la[k][j][i+2] * _t_138_; _t_162_ -= u_2[k+1][j][i+2]; _t_157_ = -u_2[k-2][j][i-1]; _t_133_ = u_2[k-2][j][i-1]; _t_133_ -= u_2[k+2][j][i-1]; _t_165_ = -u_2[k+2][j][i-1]; _t_157_ += u_2[k-2][j][i+1]; _t_156_ += 8.0 * _t_157_; _t_136_ = u_2[k-2][j][i+1]; _t_136_ -= u_2[k+2][j][i+1]; _t_165_ += u_2[k+2][j][i+1]; _t_164_ += 8.0 * _t_165_; _t_154_ = mu[k-2][j][i] * _t_156_; _t_154_ -= mu[k+2][j][i] * _t_164_; _t_134_ = -u_2[k-1][j][i-1]; _t_160_ = -u_2[k-1][j][i-1]; _t_160_ += u_2[k-1][j][i+1]; _t_159_ += 8.0 * _t_160_; _t_137_ = -u_2[k-1][j][i+1]; _t_158_ = mu[k-1][j][i] * _t_159_; _t_154_ -= 8.0 * _t_158_; _t_134_ += u_2[k+1][j][i-1]; _t_133_ += 8.0 * _t_134_; _t_132_ = la[k][j][i-1] * _t_133_; _t_128_ -= 8.0 * _t_132_; _t_163_ = -u_2[k+1][j][i-1]; _t_137_ += u_2[k+1][j][i+1]; _t_136_ += 8.0 * _t_137_; _t_163_ += u_2[k+1][j][i+1]; _t_162_ += 8.0 * _t_163_; _t_135_ = la[k][j][i+1] * _t_136_; _t_128_ += 8.0 * _t_135_; _t_113_ += _t_127_ * _t_128_; _t_161_ = mu[k+1][j][i] * _t_162_; _t_154_ += 8.0 * _t_161_; _t_113_ += _t_153_ * _t_154_; r1 += _t_113_; _t_169_ = strx[i] * stry[j]; _t_182_ = strx[i] * stry[j]; _t_167_ = _t_169_ * 1.0 / 144.0; _t_180_ = _t_182_ * 1.0 / 144.0; _t_195_ = stry[j] * strz[k]; _t_208_ = stry[j] * strz[k]; _t_193_ = _t_195_ * 1.0 / 144.0; _t_206_ = _t_208_ * 1.0 / 144.0; _t_170_ = u_0[k][j-2][i-2]; _t_183_ = u_0[k][j-2][i-2]; _t_183_ -= u_0[k][j-2][i+2]; _t_178_ = u_0[k][j-2][i+2]; _t_170_ -= u_0[k][j+2][i-2]; _t_191_ = u_0[k][j+2][i-2]; _t_178_ -= u_0[k][j+2][i+2]; _t_191_ -= u_0[k][j+2][i+2]; _t_171_ = -u_0[k][j-1][i-2]; _t_186_ = u_0[k][j-1][i-2]; _t_186_ -= u_0[k][j-1][i+2]; _t_179_ = -u_0[k][j-1][i+2]; _t_171_ += u_0[k][j+1][i-2]; _t_170_ += 8.0 * _t_171_; _t_168_ = mu[k][j][i-2] * _t_170_; _t_189_ = u_0[k][j+1][i-2]; _t_179_ += u_0[k][j+1][i+2]; _t_178_ += 8.0 * _t_179_; _t_168_ -= mu[k][j][i+2] * _t_178_; _t_189_ -= u_0[k][j+1][i+2]; _t_184_ = -u_0[k][j-2][i-1]; _t_173_ = u_0[k][j-2][i-1]; _t_173_ -= u_0[k][j+2][i-1]; _t_192_ = -u_0[k][j+2][i-1]; _t_184_ += u_0[k][j-2][i+1]; _t_183_ += 8.0 * _t_184_; _t_176_ = u_0[k][j-2][i+1]; _t_176_ -= u_0[k][j+2][i+1]; _t_192_ += u_0[k][j+2][i+1]; _t_191_ += 8.0 * _t_192_; _t_181_ = la[k][j-2][i] * _t_183_; _t_181_ -= la[k][j+2][i] * _t_191_; _t_174_ = -u_0[k][j-1][i-1]; _t_187_ = -u_0[k][j-1][i-1]; _t_187_ += u_0[k][j-1][i+1]; _t_186_ += 8.0 * _t_187_; _t_177_ = -u_0[k][j-1][i+1]; _t_185_ = la[k][j-1][i] * _t_186_; _t_181_ -= 8.0 * _t_185_; _t_174_ += u_0[k][j+1][i-1]; _t_173_ += 8.0 * _t_174_; _t_172_ = mu[k][j][i-1] * _t_173_; _t_168_ -= 8.0 * _t_172_; _t_190_ = -u_0[k][j+1][i-1]; _t_177_ += u_0[k][j+1][i+1]; _t_176_ += 8.0 * _t_177_; _t_190_ += u_0[k][j+1][i+1]; _t_189_ += 8.0 * _t_190_; _t_175_ = mu[k][j][i+1] * _t_176_; _t_168_ += 8.0 * _t_175_; _t_166_ = _t_167_ * _t_168_; _t_188_ = la[k][j+1][i] * _t_189_; _t_181_ += 8.0 * _t_188_; _t_166_ += _t_180_ * _t_181_; _t_196_ = u_2[k-2][j-2][i]; _t_209_ = u_2[k-2][j-2][i]; _t_209_ -= u_2[k-2][j+2][i]; _t_204_ = u_2[k-2][j+2][i]; _t_196_ -= u_2[k+2][j-2][i]; _t_217_ = u_2[k+2][j-2][i]; _t_204_ -= u_2[k+2][j+2][i]; _t_217_ -= u_2[k+2][j+2][i]; _t_197_ = -u_2[k-1][j-2][i]; _t_212_ = u_2[k-1][j-2][i]; _t_212_ -= u_2[k-1][j+2][i]; _t_205_ = -u_2[k-1][j+2][i]; _t_197_ += u_2[k+1][j-2][i]; _t_196_ += 8.0 * _t_197_; _t_194_ = la[k][j-2][i] * _t_196_; _t_215_ = u_2[k+1][j-2][i]; _t_205_ += u_2[k+1][j+2][i]; _t_204_ += 8.0 * _t_205_; _t_194_ -= la[k][j+2][i] * _t_204_; _t_215_ -= u_2[k+1][j+2][i]; _t_210_ = -u_2[k-2][j-1][i]; _t_199_ = u_2[k-2][j-1][i]; _t_199_ -= u_2[k+2][j-1][i]; _t_218_ = -u_2[k+2][j-1][i]; _t_210_ += u_2[k-2][j+1][i]; _t_209_ += 8.0 * _t_210_; _t_207_ = mu[k-2][j][i] * _t_209_; _t_202_ = u_2[k-2][j+1][i]; _t_202_ -= u_2[k+2][j+1][i]; _t_218_ += u_2[k+2][j+1][i]; _t_217_ += 8.0 * _t_218_; _t_207_ -= mu[k+2][j][i] * _t_217_; _t_200_ = -u_2[k-1][j-1][i]; _t_213_ = -u_2[k-1][j-1][i]; _t_213_ += u_2[k-1][j+1][i]; _t_212_ += 8.0 * _t_213_; _t_211_ = mu[k-1][j][i] * _t_212_; _t_207_ -= 8.0 * _t_211_; _t_203_ = -u_2[k-1][j+1][i]; _t_200_ += u_2[k+1][j-1][i]; _t_199_ += 8.0 * _t_200_; _t_198_ = la[k][j-1][i] * _t_199_; _t_194_ -= 8.0 * _t_198_; _t_216_ = -u_2[k+1][j-1][i]; _t_203_ += u_2[k+1][j+1][i]; _t_202_ += 8.0 * _t_203_; _t_216_ += u_2[k+1][j+1][i]; _t_215_ += 8.0 * _t_216_; _t_201_ = la[k][j+1][i] * _t_202_; _t_194_ += 8.0 * _t_201_; _t_166_ += _t_193_ * _t_194_; _t_214_ = mu[k+1][j][i] * _t_215_; _t_207_ += 8.0 * _t_214_; _t_166_ += _t_206_ * _t_207_; r2 += _t_166_; uacc_0kc0jc0ic0 = a1 * uacc_0[k][j][i]; uacc_0kc0jc0ic0 += cof * r1; uacc_0[k][j][i] = uacc_0kc0jc0ic0; uacc_1kc0jc0ic0 = a1 * uacc_1[k][j][i]; uacc_1kc0jc0ic0 += cof * r2; uacc_1[k][j][i] = uacc_1kc0jc0ic0; uacc_2kc0jc0ic0 = a1 * uacc_2[k][j][i]; uacc_2kc0jc0ic0 += cof * r3; uacc_2[k][j][i] = uacc_2kc0jc0ic0; } } extern "C" void host_code (double *h_uacc_0, double *h_uacc_1, double *h_uacc_2, double *h_u_0, double *h_u_1, double *h_u_2, double *h_mu, double *h_la, double *h_strx, double *h_stry, double *h_strz, int N) { double *uacc_0; hipMalloc (&uacc_0, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for uacc_0\n"); hipMemcpy (uacc_0, h_uacc_0, sizeof(double)*N*N*N, hipMemcpyHostToDevice); double *uacc_1; hipMalloc (&uacc_1, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for uacc_1\n"); hipMemcpy (uacc_1, h_uacc_1, sizeof(double)*N*N*N, hipMemcpyHostToDevice); double *uacc_2; hipMalloc (&uacc_2, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for uacc_2\n"); hipMemcpy (uacc_2, h_uacc_2, sizeof(double)*N*N*N, hipMemcpyHostToDevice); double *u_0; hipMalloc (&u_0, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for u_0\n"); hipMemcpy (u_0, h_u_0, sizeof(double)*N*N*N, hipMemcpyHostToDevice); double *u_1; hipMalloc (&u_1, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for u_1\n"); hipMemcpy (u_1, h_u_1, sizeof(double)*N*N*N, hipMemcpyHostToDevice); double *u_2; hipMalloc (&u_2, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for u_2\n"); hipMemcpy (u_2, h_u_2, sizeof(double)*N*N*N, hipMemcpyHostToDevice); double *mu; hipMalloc (&mu, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for mu\n"); hipMemcpy (mu, h_mu, sizeof(double)*N*N*N, hipMemcpyHostToDevice); double *la; hipMalloc (&la, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for la\n"); hipMemcpy (la, h_la, sizeof(double)*N*N*N, hipMemcpyHostToDevice); double *strx; hipMalloc (&strx, sizeof(double)*N); check_error ("Failed to allocate device memory for strx\n"); hipMemcpy (strx, h_strx, sizeof(double)*N, hipMemcpyHostToDevice); double *stry; hipMalloc (&stry, sizeof(double)*N); check_error ("Failed to allocate device memory for stry\n"); hipMemcpy (stry, h_stry, sizeof(double)*N, hipMemcpyHostToDevice); double *strz; hipMalloc (&strz, sizeof(double)*N); check_error ("Failed to allocate device memory for strz\n"); hipMemcpy (strz, h_strz, sizeof(double)*N, hipMemcpyHostToDevice); dim3 blockconfig (16, 2, 2); dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, blockconfig.y), ceil(N, blockconfig.z)); hipLaunchKernelGGL(( sw4) , dim3(gridconfig), dim3(blockconfig), 0, 0, uacc_0, uacc_1, uacc_2, u_0, u_1, u_2, mu, la, strx, stry, strz, N); hipMemcpy (h_uacc_0, uacc_0, sizeof(double)*N*N*N, hipMemcpyDeviceToHost); hipMemcpy (h_uacc_1, uacc_1, sizeof(double)*N*N*N, hipMemcpyDeviceToHost); hipMemcpy (h_uacc_2, uacc_2, sizeof(double)*N*N*N, hipMemcpyDeviceToHost); hipFree (uacc_0); hipFree (uacc_1); hipFree (uacc_2); hipFree (u_0); hipFree (u_1); hipFree (u_2); hipFree (mu); hipFree (la); hipFree (strx); hipFree (stry); hipFree (strz); }
4f45e43cf86bfe8e34a10698ccbfe072e070c89d.cu
#include <stdio.h> #include "cuda.h" #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) #define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1) void check_error (const char* message) { cudaError_t error = cudaGetLastError (); if (error != cudaSuccess) { printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error)); exit(-1); } } __global__ void sw4 (double * uacc_in_0, double * uacc_in_1, double * uacc_in_2, double * __restrict__ u_in_0, double * __restrict__ u_in_1, double * __restrict__ u_in_2, double * __restrict__ mu_in, double * __restrict__ la_in, double * strx, double * stry, double * strz, int N) { //Determing the block's indices int blockdim_i= (int)(blockDim.x); int i0 = (int)(blockIdx.x)*(blockdim_i); int i = max (i0, 0) + (int)(threadIdx.x); int blockdim_j= (int)(blockDim.y); int j0 = (int)(blockIdx.y)*(blockdim_j); int j = max (j0, 0) + (int)(threadIdx.y); int blockdim_k= (int)(blockDim.z); int k0 = (int)(blockIdx.z)*(blockdim_k); int k = max (k0, 0) + (int)(threadIdx.z); // Assumptions int a1 = 1; double h = 3.7; double cof = 1e0 / ( h * h); double (*uacc_0)[304][304] = (double (*)[304][304])uacc_in_0; double (*uacc_1)[304][304] = (double (*)[304][304])uacc_in_1; double (*uacc_2)[304][304] = (double (*)[304][304])uacc_in_2; double (*u_0)[304][304] = (double (*)[304][304])u_in_0; double (*u_1)[304][304] = (double (*)[304][304])u_in_1; double (*u_2)[304][304] = (double (*)[304][304])u_in_2; double (*mu)[304][304] = (double (*)[304][304])mu_in; double (*la)[304][304] = (double (*)[304][304])la_in; double mux1, mux2, mux3, mux4, muy1, muy2, muy3, muy4, muz1, muz2, muz3, muz4; double r1, r2, r3; if (i>=2 & j>=2 & k>=2 & i<=N-3 & j<=N-3 & k<=N-3) { double mux1; double mux2; double mux3; double mux4; double muy1; double muy2; double muy3; double muy4; double muz1; double muz2; double muz3; double muz4; double _t_5_; double _t_7_; double _t_3_; double _t_9_; double _t_11_; double _t_10_; double _t_12_; double _t_13_; double _t_14_; double _t_0_; double _t_16_; double _t_15_; double _t_17_; double _t_18_; double _t_19_; double _t_4_; double _t_2_; double _t_6_; double _t_1_; double _t_8_; double r1; double _t_30_; double _t_31_; double _t_33_; double _t_29_; double _t_27_; double _t_26_; double _t_28_; double _t_32_; double _t_34_; double _t_20_; double _t_22_; double _t_21_; double _t_23_; double _t_24_; double _t_25_; double _t_36_; double _t_35_; double _t_52_; double _t_37_; double _t_54_; double _t_38_; double _t_39_; double r2; double _t_56_; double _t_58_; double _t_42_; double _t_41_; double _t_43_; double _t_44_; double _t_45_; double _t_40_; double _t_47_; double _t_46_; double _t_48_; double _t_49_; double _t_50_; double _t_53_; double _t_51_; double _t_55_; double _t_57_; double _t_59_; double r3; double _t_102_; double _t_100_; double _t_76_; double _t_74_; double _t_63_; double _t_61_; double _t_89_; double _t_87_; double _t_103_; double _t_77_; double _t_85_; double _t_111_; double _t_104_; double _t_80_; double _t_112_; double _t_83_; double _t_101_; double _t_78_; double _t_106_; double _t_86_; double _t_109_; double _t_75_; double _t_107_; double _t_81_; double _t_84_; double _t_105_; double _t_110_; double _t_108_; double _t_60_; double _t_79_; double _t_82_; double _t_64_; double _t_90_; double _t_72_; double _t_98_; double _t_65_; double _t_93_; double _t_73_; double _t_96_; double _t_62_; double _t_91_; double _t_67_; double _t_99_; double _t_88_; double _t_70_; double _t_68_; double _t_94_; double _t_92_; double _t_71_; double _t_97_; double _t_95_; double _t_66_; double _t_69_; double _t_129_; double _t_127_; double _t_155_; double _t_153_; double _t_116_; double _t_114_; double _t_142_; double _t_140_; double _t_117_; double _t_143_; double _t_125_; double _t_151_; double _t_118_; double _t_146_; double _t_126_; double _t_149_; double _t_115_; double _t_144_; double _t_120_; double _t_152_; double _t_141_; double _t_123_; double _t_121_; double _t_147_; double _t_145_; double _t_124_; double _t_150_; double _t_148_; double _t_113_; double _t_119_; double _t_122_; double _t_130_; double _t_156_; double _t_138_; double _t_164_; double _t_131_; double _t_159_; double _t_139_; double _t_128_; double _t_162_; double _t_157_; double _t_133_; double _t_165_; double _t_136_; double _t_154_; double _t_134_; double _t_160_; double _t_137_; double _t_158_; double _t_132_; double _t_163_; double _t_135_; double _t_161_; double _t_169_; double _t_182_; double _t_167_; double _t_180_; double _t_195_; double _t_208_; double _t_193_; double _t_206_; double _t_170_; double _t_183_; double _t_178_; double _t_191_; double _t_171_; double _t_186_; double _t_179_; double _t_168_; double _t_189_; double _t_184_; double _t_173_; double _t_192_; double _t_176_; double _t_181_; double _t_174_; double _t_187_; double _t_177_; double _t_185_; double _t_172_; double _t_190_; double _t_175_; double _t_166_; double _t_188_; double _t_196_; double _t_209_; double _t_204_; double _t_217_; double _t_197_; double _t_212_; double _t_205_; double _t_194_; double _t_215_; double _t_210_; double _t_199_; double _t_218_; double _t_207_; double _t_202_; double _t_200_; double _t_213_; double _t_211_; double _t_203_; double _t_198_; double _t_216_; double _t_201_; double _t_214_; double uacc_0kc0jc0ic0; double uacc_1kc0jc0ic0; double uacc_2kc0jc0ic0; mux1 = -3.0 / 4.0 * mu[k][j][i-2] * strx[i-2]; mux1 += mu[k][j][i-1] * strx[i-1]; mux1 -= 3.0 / 4.0 * mu[k][j][i] * strx[i]; mux2 = mu[k][j][i-2] * strx[i-2]; mux2 += 3.0 * mu[k][j][i] * strx[i]; mux2 += 3.0 * mu[k][j][i-1] * strx[i-1]; mux2 += mu[k][j][i+1] * strx[i+1]; mux3 = mu[k][j][i-1] * strx[i-1]; mux3 += 3.0 * mu[k][j][i+1] * strx[i+1]; mux3 += 3.0 * mu[k][j][i] * strx[i]; mux3 += mu[k][j][i+2] * strx[i+2]; mux4 = mu[k][j][i+1] * strx[i+1]; mux4 -= 3.0 / 4.0 * mu[k][j][i] * strx[i]; mux4 -= 3.0 / 4.0 * mu[k][j][i+2] * strx[i+2]; muy1 = -3.0 / 4.0 * mu[k][j][i] * stry[j]; muy1 += mu[k][j-1][i] * stry[j-1]; muy1 -= 3.0 / 4.0 * mu[k][j-2][i] * stry[j-2]; muy2 = mu[k][j-2][i] * stry[j-2]; muy2 += 3.0 * mu[k][j][i] * stry[j]; muy2 += 3.0 * mu[k][j-1][i] * stry[j-1]; muy2 += mu[k][j+1][i] * stry[j+1]; muy3 = mu[k][j-1][i] * stry[j-1]; muy3 += 3.0 * mu[k][j+1][i] * stry[j+1]; muy3 += 3.0 * mu[k][j][i] * stry[j]; muy3 += mu[k][j+2][i] * stry[j+2]; muy4 = mu[k][j+1][i] * stry[j+1]; muy4 -= 3.0 / 4.0 * mu[k][j][i] * stry[j]; muy4 -= 3.0 / 4.0 * mu[k][j+2][i] * stry[j+2]; muz1 = -3.0 / 4.0 * mu[k][j][i] * strz[k]; muz1 += mu[k-1][j][i] * strz[k-1]; muz1 -= 3.0 / 4.0 * mu[k-2][j][i] * strz[k-2]; muz2 = mu[k-2][j][i] * strz[k-2]; muz2 += 3.0 * mu[k][j][i] * strz[k]; muz2 += 3.0 * mu[k-1][j][i] * strz[k-1]; muz2 += mu[k+1][j][i] * strz[k+1]; muz3 = mu[k-1][j][i] * strz[k-1]; muz3 += 3.0 * mu[k+1][j][i] * strz[k+1]; muz3 += 3.0 * mu[k][j][i] * strz[k]; muz4 = mu[k+1][j][i] * strz[k+1]; muz4 -= 3.0 / 4.0 * mu[k][j][i] * strz[k]; muz3 += mu[k+2][j][i] * strz[k+2]; muz4 -= 3.0 / 4.0 * mu[k+2][j][i] * strz[k+2]; _t_5_ = u_0[k][j][i-1]; _t_5_ -= u_0[k][j][i]; _t_7_ = -u_0[k][j][i]; _t_7_ += u_0[k][j][i+1]; _t_3_ = -u_0[k][j][i]; _t_3_ += u_0[k][j][i-2]; _t_9_ = -u_0[k][j][i]; _t_9_ += u_0[k][j][i+2]; _t_11_ = -u_0[k][j][i]; _t_11_ += u_0[k][j-2][i]; _t_10_ = muy1 * _t_11_; _t_12_ = -u_0[k][j][i]; _t_12_ += u_0[k][j-1][i]; _t_10_ += muy2 * _t_12_; _t_13_ = -u_0[k][j][i]; _t_13_ += u_0[k][j+1][i]; _t_10_ += muy3 * _t_13_; _t_14_ = -u_0[k][j][i]; _t_14_ += u_0[k][j+2][i]; _t_10_ += muy4 * _t_14_; _t_0_ = stry[j] * _t_10_; _t_16_ = -u_0[k][j][i]; _t_16_ += u_0[k-2][j][i]; _t_15_ = muz1 * _t_16_; _t_17_ = -u_0[k][j][i]; _t_17_ += u_0[k-1][j][i]; _t_15_ += muz2 * _t_17_; _t_18_ = -u_0[k][j][i]; _t_19_ = -u_0[k][j][i]; _t_18_ += u_0[k+1][j][i]; _t_15_ += muz3 * _t_18_; _t_19_ += u_0[k+2][j][i]; _t_15_ += muz4 * _t_19_; _t_0_ += strz[k] * _t_15_; _t_4_ = 2.0 * mux2; _t_2_ = 2.0 * mux1; _t_2_ -= 3.0 / 4.0 * la[k][j][i-2] * strx[i-2]; _t_4_ += la[k][j][i-2] * strx[i-2]; _t_2_ += la[k][j][i-1] * strx[i-1]; _t_4_ += 3.0 * la[k][j][i-1] * strx[i-1]; _t_6_ = la[k][j][i-1] * strx[i-1]; _t_6_ += 2.0 * mux3; _t_2_ -= 3.0 / 4.0 * la[k][j][i] * strx[i]; _t_4_ += 3.0 * la[k][j][i] * strx[i]; _t_6_ += 3.0 * la[k][j][i] * strx[i]; _t_1_ = _t_2_ * _t_3_; _t_8_ = -3.0 / 4.0 * la[k][j][i] * strx[i]; _t_8_ += 2.0 * mux4; _t_4_ += la[k][j][i+1] * strx[i+1]; _t_1_ += _t_4_ * _t_5_; _t_6_ += 3.0 * la[k][j][i+1] * strx[i+1]; _t_8_ += la[k][j][i+1] * strx[i+1]; _t_6_ += la[k][j][i+2] * strx[i+2]; _t_1_ += _t_6_ * _t_7_; _t_8_ -= 3.0 / 4.0 * la[k][j][i+2] * strx[i+2]; _t_1_ += _t_8_ * _t_9_; _t_0_ += strx[i] * _t_1_; r1 = 1.0 / 6.0 * _t_0_; _t_30_ = u_1[k][j-1][i]; _t_31_ = 3.0 * la[k][j][i] * stry[j]; _t_31_ += 2.0 * muy3; _t_33_ = -3.0 / 4.0 * la[k][j][i] * stry[j]; _t_33_ += 2.0 * muy4; _t_31_ += la[k][j+2][i] * stry[j+2]; _t_33_ -= 3.0 / 4.0 * la[k][j+2][i] * stry[j+2]; _t_29_ = 3.0 * la[k][j][i] * stry[j]; _t_29_ += 2.0 * muy2; _t_29_ += la[k][j+1][i] * stry[j+1]; _t_31_ += 3.0 * la[k][j+1][i] * stry[j+1]; _t_33_ += la[k][j+1][i] * stry[j+1]; _t_27_ = -3.0 / 4.0 * la[k][j][i] * stry[j]; _t_27_ += 2.0 * muy1; _t_27_ += la[k][j-1][i] * stry[j-1]; _t_29_ += 3.0 * la[k][j-1][i] * stry[j-1]; _t_31_ += la[k][j-1][i] * stry[j-1]; _t_27_ -= 3.0 / 4.0 * la[k][j-2][i] * stry[j-2]; _t_29_ += la[k][j-2][i] * stry[j-2]; _t_30_ -= u_1[k][j][i]; _t_26_ = _t_29_ * _t_30_; _t_28_ = -u_1[k][j][i]; _t_28_ += u_1[k][j-2][i]; _t_26_ += _t_27_ * _t_28_; _t_32_ = -u_1[k][j][i]; _t_32_ += u_1[k][j+1][i]; _t_26_ += _t_31_ * _t_32_; _t_34_ = -u_1[k][j][i]; _t_34_ += u_1[k][j+2][i]; _t_26_ += _t_33_ * _t_34_; _t_20_ = stry[j] * _t_26_; _t_22_ = -u_1[k][j][i]; _t_22_ += u_1[k][j][i-2]; _t_21_ = mux1 * _t_22_; _t_23_ = -u_1[k][j][i]; _t_23_ += u_1[k][j][i-1]; _t_21_ += mux2 * _t_23_; _t_24_ = -u_1[k][j][i]; _t_24_ += u_1[k][j][i+1]; _t_21_ += mux3 * _t_24_; _t_25_ = -u_1[k][j][i]; _t_25_ += u_1[k][j][i+2]; _t_21_ += mux4 * _t_25_; _t_20_ += strx[i] * _t_21_; _t_36_ = -u_1[k][j][i]; _t_36_ += u_1[k-2][j][i]; _t_35_ = muz1 * _t_36_; _t_52_ = -3.0 / 4.0 * la[k][j][i] * strz[k]; _t_52_ += 2.0 * muz1; _t_37_ = -u_1[k][j][i]; _t_37_ += u_1[k-1][j][i]; _t_35_ += muz2 * _t_37_; _t_54_ = 3.0 * la[k][j][i] * strz[k]; _t_54_ += 2.0 * muz2; _t_52_ -= 3.0 / 4.0 * la[k-2][j][i] * strz[k-2]; _t_54_ += la[k-2][j][i] * strz[k-2]; _t_38_ = -u_1[k][j][i]; _t_39_ = -u_1[k][j][i]; _t_38_ += u_1[k+1][j][i]; _t_35_ += muz3 * _t_38_; _t_39_ += u_1[k+2][j][i]; _t_35_ += muz4 * _t_39_; _t_20_ += strz[k] * _t_35_; r2 = 1.0 / 6.0 * _t_20_; _t_56_ = 3.0 * la[k][j][i] * strz[k]; _t_56_ += 2.0 * muz3; _t_58_ = -3.0 / 4.0 * la[k][j][i] * strz[k]; _t_58_ += 2.0 * muz4; _t_52_ += la[k-1][j][i] * strz[k-1]; _t_54_ += 3.0 * la[k-1][j][i] * strz[k-1]; _t_56_ += la[k-1][j][i] * strz[k-1]; _t_54_ += la[k+1][j][i] * strz[k+1]; _t_56_ += 3.0 * la[k+1][j][i] * strz[k+1]; _t_58_ += la[k+1][j][i] * strz[k+1]; _t_56_ += la[k+2][j][i] * strz[k+2]; _t_58_ -= 3.0 / 4.0 * la[k+2][j][i] * strz[k+2]; _t_42_ = u_2[k][j][i-2]; _t_42_ -= u_2[k][j][i]; _t_41_ = mux1 * _t_42_; _t_43_ = -u_2[k][j][i]; _t_43_ += u_2[k][j][i-1]; _t_41_ += mux2 * _t_43_; _t_44_ = -u_2[k][j][i]; _t_44_ += u_2[k][j][i+1]; _t_41_ += mux3 * _t_44_; _t_45_ = -u_2[k][j][i]; _t_45_ += u_2[k][j][i+2]; _t_41_ += mux4 * _t_45_; _t_40_ = strx[i] * _t_41_; _t_47_ = -u_2[k][j][i]; _t_47_ += u_2[k][j-2][i]; _t_46_ = muy1 * _t_47_; _t_48_ = -u_2[k][j][i]; _t_48_ += u_2[k][j-1][i]; _t_46_ += muy2 * _t_48_; _t_49_ = -u_2[k][j][i]; _t_49_ += u_2[k][j+1][i]; _t_46_ += muy3 * _t_49_; _t_50_ = -u_2[k][j][i]; _t_50_ += u_2[k][j+2][i]; _t_46_ += muy4 * _t_50_; _t_40_ += stry[j] * _t_46_; _t_53_ = -u_2[k][j][i]; _t_53_ += u_2[k-2][j][i]; _t_51_ = _t_52_ * _t_53_; _t_55_ = -u_2[k][j][i]; _t_55_ += u_2[k-1][j][i]; _t_51_ += _t_54_ * _t_55_; _t_57_ = -u_2[k][j][i]; _t_59_ = -u_2[k][j][i]; _t_57_ += u_2[k+1][j][i]; _t_51_ += _t_56_ * _t_57_; _t_59_ += u_2[k+2][j][i]; _t_51_ += _t_58_ * _t_59_; _t_40_ += strz[k] * _t_51_; r3 = 1.0 / 6.0 * _t_40_; _t_102_ = stry[j] * strz[k]; _t_100_ = _t_102_ * 1.0 / 144.0; _t_76_ = stry[j] * strz[k]; _t_74_ = _t_76_ * 1.0 / 144.0; _t_63_ = strx[i] * strz[k]; _t_61_ = _t_63_ * 1.0 / 144.0; _t_89_ = strx[i] * strz[k]; _t_87_ = _t_89_ * 1.0 / 144.0; _t_103_ = u_1[k-2][j-2][i]; _t_77_ = u_1[k-2][j-2][i]; _t_103_ -= u_1[k-2][j+2][i]; _t_85_ = u_1[k-2][j+2][i]; _t_77_ -= u_1[k+2][j-2][i]; _t_111_ = u_1[k+2][j-2][i]; _t_85_ -= u_1[k+2][j+2][i]; _t_111_ -= u_1[k+2][j+2][i]; _t_104_ = -u_1[k-2][j-1][i]; _t_80_ = u_1[k-2][j-1][i]; _t_80_ -= u_1[k+2][j-1][i]; _t_112_ = -u_1[k+2][j-1][i]; _t_104_ += u_1[k-2][j+1][i]; _t_103_ += 8.0 * _t_104_; _t_83_ = u_1[k-2][j+1][i]; _t_83_ -= u_1[k+2][j+1][i]; _t_112_ += u_1[k+2][j+1][i]; _t_111_ += 8.0 * _t_112_; _t_101_ = la[k-2][j][i] * _t_103_; _t_101_ -= la[k+2][j][i] * _t_111_; _t_78_ = -u_1[k-1][j-2][i]; _t_106_ = u_1[k-1][j-2][i]; _t_106_ -= u_1[k-1][j+2][i]; _t_86_ = -u_1[k-1][j+2][i]; _t_78_ += u_1[k+1][j-2][i]; _t_77_ += 8.0 * _t_78_; _t_109_ = u_1[k+1][j-2][i]; _t_86_ += u_1[k+1][j+2][i]; _t_85_ += 8.0 * _t_86_; _t_109_ -= u_1[k+1][j+2][i]; _t_75_ = mu[k][j-2][i] * _t_77_; _t_75_ -= mu[k][j+2][i] * _t_85_; _t_107_ = -u_1[k-1][j-1][i]; _t_81_ = -u_1[k-1][j-1][i]; _t_107_ += u_1[k-1][j+1][i]; _t_106_ += 8.0 * _t_107_; _t_84_ = -u_1[k-1][j+1][i]; _t_105_ = la[k-1][j][i] * _t_106_; _t_101_ -= 8.0 * _t_105_; _t_81_ += u_1[k+1][j-1][i]; _t_80_ += 8.0 * _t_81_; _t_110_ = -u_1[k+1][j-1][i]; _t_84_ += u_1[k+1][j+1][i]; _t_83_ += 8.0 * _t_84_; _t_110_ += u_1[k+1][j+1][i]; _t_109_ += 8.0 * _t_110_; _t_108_ = la[k+1][j][i] * _t_109_; _t_101_ += 8.0 * _t_108_; _t_60_ = _t_100_ * _t_101_; _t_79_ = mu[k][j-1][i] * _t_80_; _t_75_ -= 8.0 * _t_79_; _t_82_ = mu[k][j+1][i] * _t_83_; _t_75_ += 8.0 * _t_82_; _t_60_ += _t_74_ * _t_75_; _t_64_ = u_0[k-2][j][i-2]; _t_90_ = u_0[k-2][j][i-2]; _t_90_ -= u_0[k-2][j][i+2]; _t_72_ = u_0[k-2][j][i+2]; _t_64_ -= u_0[k+2][j][i-2]; _t_98_ = u_0[k+2][j][i-2]; _t_72_ -= u_0[k+2][j][i+2]; _t_98_ -= u_0[k+2][j][i+2]; _t_65_ = -u_0[k-1][j][i-2]; _t_93_ = u_0[k-1][j][i-2]; _t_93_ -= u_0[k-1][j][i+2]; _t_73_ = -u_0[k-1][j][i+2]; _t_65_ += u_0[k+1][j][i-2]; _t_64_ += 8.0 * _t_65_; _t_96_ = u_0[k+1][j][i-2]; _t_73_ += u_0[k+1][j][i+2]; _t_72_ += 8.0 * _t_73_; _t_96_ -= u_0[k+1][j][i+2]; _t_62_ = mu[k][j][i-2] * _t_64_; _t_62_ -= mu[k][j][i+2] * _t_72_; _t_91_ = -u_0[k-2][j][i-1]; _t_67_ = u_0[k-2][j][i-1]; _t_67_ -= u_0[k+2][j][i-1]; _t_99_ = -u_0[k+2][j][i-1]; _t_91_ += u_0[k-2][j][i+1]; _t_90_ += 8.0 * _t_91_; _t_88_ = la[k-2][j][i] * _t_90_; _t_70_ = u_0[k-2][j][i+1]; _t_70_ -= u_0[k+2][j][i+1]; _t_99_ += u_0[k+2][j][i+1]; _t_98_ += 8.0 * _t_99_; _t_88_ -= la[k+2][j][i] * _t_98_; _t_68_ = -u_0[k-1][j][i-1]; _t_94_ = -u_0[k-1][j][i-1]; _t_94_ += u_0[k-1][j][i+1]; _t_93_ += 8.0 * _t_94_; _t_92_ = la[k-1][j][i] * _t_93_; _t_88_ -= 8.0 * _t_92_; _t_71_ = -u_0[k-1][j][i+1]; _t_68_ += u_0[k+1][j][i-1]; _t_67_ += 8.0 * _t_68_; _t_97_ = -u_0[k+1][j][i-1]; _t_71_ += u_0[k+1][j][i+1]; _t_70_ += 8.0 * _t_71_; _t_97_ += u_0[k+1][j][i+1]; _t_96_ += 8.0 * _t_97_; _t_95_ = la[k+1][j][i] * _t_96_; _t_88_ += 8.0 * _t_95_; _t_60_ += _t_87_ * _t_88_; _t_66_ = mu[k][j][i-1] * _t_67_; _t_62_ -= 8.0 * _t_66_; _t_69_ = mu[k][j][i+1] * _t_70_; _t_62_ += 8.0 * _t_69_; _t_60_ += _t_61_ * _t_62_; r3 += _t_60_; _t_129_ = strx[i] * strz[k]; _t_127_ = _t_129_ * 1.0 / 144.0; _t_155_ = strx[i] * strz[k]; _t_153_ = _t_155_ * 1.0 / 144.0; _t_116_ = strx[i] * stry[j]; _t_114_ = _t_116_ * 1.0 / 144.0; _t_142_ = strx[i] * stry[j]; _t_140_ = _t_142_ * 1.0 / 144.0; _t_117_ = u_1[k][j-2][i-2]; _t_143_ = u_1[k][j-2][i-2]; _t_143_ -= u_1[k][j-2][i+2]; _t_125_ = u_1[k][j-2][i+2]; _t_117_ -= u_1[k][j+2][i-2]; _t_151_ = u_1[k][j+2][i-2]; _t_125_ -= u_1[k][j+2][i+2]; _t_151_ -= u_1[k][j+2][i+2]; _t_118_ = -u_1[k][j-1][i-2]; _t_146_ = u_1[k][j-1][i-2]; _t_146_ -= u_1[k][j-1][i+2]; _t_126_ = -u_1[k][j-1][i+2]; _t_118_ += u_1[k][j+1][i-2]; _t_117_ += 8.0 * _t_118_; _t_149_ = u_1[k][j+1][i-2]; _t_126_ += u_1[k][j+1][i+2]; _t_125_ += 8.0 * _t_126_; _t_149_ -= u_1[k][j+1][i+2]; _t_115_ = la[k][j][i-2] * _t_117_; _t_115_ -= la[k][j][i+2] * _t_125_; _t_144_ = -u_1[k][j-2][i-1]; _t_120_ = u_1[k][j-2][i-1]; _t_120_ -= u_1[k][j+2][i-1]; _t_152_ = -u_1[k][j+2][i-1]; _t_144_ += u_1[k][j-2][i+1]; _t_143_ += 8.0 * _t_144_; _t_141_ = mu[k][j-2][i] * _t_143_; _t_123_ = u_1[k][j-2][i+1]; _t_123_ -= u_1[k][j+2][i+1]; _t_152_ += u_1[k][j+2][i+1]; _t_151_ += 8.0 * _t_152_; _t_141_ -= mu[k][j+2][i] * _t_151_; _t_121_ = -u_1[k][j-1][i-1]; _t_147_ = -u_1[k][j-1][i-1]; _t_147_ += u_1[k][j-1][i+1]; _t_146_ += 8.0 * _t_147_; _t_145_ = mu[k][j-1][i] * _t_146_; _t_141_ -= 8.0 * _t_145_; _t_124_ = -u_1[k][j-1][i+1]; _t_121_ += u_1[k][j+1][i-1]; _t_120_ += 8.0 * _t_121_; _t_150_ = -u_1[k][j+1][i-1]; _t_124_ += u_1[k][j+1][i+1]; _t_123_ += 8.0 * _t_124_; _t_150_ += u_1[k][j+1][i+1]; _t_149_ += 8.0 * _t_150_; _t_148_ = mu[k][j+1][i] * _t_149_; _t_141_ += 8.0 * _t_148_; _t_113_ = _t_140_ * _t_141_; _t_119_ = la[k][j][i-1] * _t_120_; _t_115_ -= 8.0 * _t_119_; _t_122_ = la[k][j][i+1] * _t_123_; _t_115_ += 8.0 * _t_122_; _t_113_ += _t_114_ * _t_115_; _t_130_ = u_2[k-2][j][i-2]; _t_156_ = u_2[k-2][j][i-2]; _t_156_ -= u_2[k-2][j][i+2]; _t_138_ = u_2[k-2][j][i+2]; _t_130_ -= u_2[k+2][j][i-2]; _t_164_ = u_2[k+2][j][i-2]; _t_138_ -= u_2[k+2][j][i+2]; _t_164_ -= u_2[k+2][j][i+2]; _t_131_ = -u_2[k-1][j][i-2]; _t_159_ = u_2[k-1][j][i-2]; _t_159_ -= u_2[k-1][j][i+2]; _t_139_ = -u_2[k-1][j][i+2]; _t_131_ += u_2[k+1][j][i-2]; _t_130_ += 8.0 * _t_131_; _t_128_ = la[k][j][i-2] * _t_130_; _t_162_ = u_2[k+1][j][i-2]; _t_139_ += u_2[k+1][j][i+2]; _t_138_ += 8.0 * _t_139_; _t_128_ -= la[k][j][i+2] * _t_138_; _t_162_ -= u_2[k+1][j][i+2]; _t_157_ = -u_2[k-2][j][i-1]; _t_133_ = u_2[k-2][j][i-1]; _t_133_ -= u_2[k+2][j][i-1]; _t_165_ = -u_2[k+2][j][i-1]; _t_157_ += u_2[k-2][j][i+1]; _t_156_ += 8.0 * _t_157_; _t_136_ = u_2[k-2][j][i+1]; _t_136_ -= u_2[k+2][j][i+1]; _t_165_ += u_2[k+2][j][i+1]; _t_164_ += 8.0 * _t_165_; _t_154_ = mu[k-2][j][i] * _t_156_; _t_154_ -= mu[k+2][j][i] * _t_164_; _t_134_ = -u_2[k-1][j][i-1]; _t_160_ = -u_2[k-1][j][i-1]; _t_160_ += u_2[k-1][j][i+1]; _t_159_ += 8.0 * _t_160_; _t_137_ = -u_2[k-1][j][i+1]; _t_158_ = mu[k-1][j][i] * _t_159_; _t_154_ -= 8.0 * _t_158_; _t_134_ += u_2[k+1][j][i-1]; _t_133_ += 8.0 * _t_134_; _t_132_ = la[k][j][i-1] * _t_133_; _t_128_ -= 8.0 * _t_132_; _t_163_ = -u_2[k+1][j][i-1]; _t_137_ += u_2[k+1][j][i+1]; _t_136_ += 8.0 * _t_137_; _t_163_ += u_2[k+1][j][i+1]; _t_162_ += 8.0 * _t_163_; _t_135_ = la[k][j][i+1] * _t_136_; _t_128_ += 8.0 * _t_135_; _t_113_ += _t_127_ * _t_128_; _t_161_ = mu[k+1][j][i] * _t_162_; _t_154_ += 8.0 * _t_161_; _t_113_ += _t_153_ * _t_154_; r1 += _t_113_; _t_169_ = strx[i] * stry[j]; _t_182_ = strx[i] * stry[j]; _t_167_ = _t_169_ * 1.0 / 144.0; _t_180_ = _t_182_ * 1.0 / 144.0; _t_195_ = stry[j] * strz[k]; _t_208_ = stry[j] * strz[k]; _t_193_ = _t_195_ * 1.0 / 144.0; _t_206_ = _t_208_ * 1.0 / 144.0; _t_170_ = u_0[k][j-2][i-2]; _t_183_ = u_0[k][j-2][i-2]; _t_183_ -= u_0[k][j-2][i+2]; _t_178_ = u_0[k][j-2][i+2]; _t_170_ -= u_0[k][j+2][i-2]; _t_191_ = u_0[k][j+2][i-2]; _t_178_ -= u_0[k][j+2][i+2]; _t_191_ -= u_0[k][j+2][i+2]; _t_171_ = -u_0[k][j-1][i-2]; _t_186_ = u_0[k][j-1][i-2]; _t_186_ -= u_0[k][j-1][i+2]; _t_179_ = -u_0[k][j-1][i+2]; _t_171_ += u_0[k][j+1][i-2]; _t_170_ += 8.0 * _t_171_; _t_168_ = mu[k][j][i-2] * _t_170_; _t_189_ = u_0[k][j+1][i-2]; _t_179_ += u_0[k][j+1][i+2]; _t_178_ += 8.0 * _t_179_; _t_168_ -= mu[k][j][i+2] * _t_178_; _t_189_ -= u_0[k][j+1][i+2]; _t_184_ = -u_0[k][j-2][i-1]; _t_173_ = u_0[k][j-2][i-1]; _t_173_ -= u_0[k][j+2][i-1]; _t_192_ = -u_0[k][j+2][i-1]; _t_184_ += u_0[k][j-2][i+1]; _t_183_ += 8.0 * _t_184_; _t_176_ = u_0[k][j-2][i+1]; _t_176_ -= u_0[k][j+2][i+1]; _t_192_ += u_0[k][j+2][i+1]; _t_191_ += 8.0 * _t_192_; _t_181_ = la[k][j-2][i] * _t_183_; _t_181_ -= la[k][j+2][i] * _t_191_; _t_174_ = -u_0[k][j-1][i-1]; _t_187_ = -u_0[k][j-1][i-1]; _t_187_ += u_0[k][j-1][i+1]; _t_186_ += 8.0 * _t_187_; _t_177_ = -u_0[k][j-1][i+1]; _t_185_ = la[k][j-1][i] * _t_186_; _t_181_ -= 8.0 * _t_185_; _t_174_ += u_0[k][j+1][i-1]; _t_173_ += 8.0 * _t_174_; _t_172_ = mu[k][j][i-1] * _t_173_; _t_168_ -= 8.0 * _t_172_; _t_190_ = -u_0[k][j+1][i-1]; _t_177_ += u_0[k][j+1][i+1]; _t_176_ += 8.0 * _t_177_; _t_190_ += u_0[k][j+1][i+1]; _t_189_ += 8.0 * _t_190_; _t_175_ = mu[k][j][i+1] * _t_176_; _t_168_ += 8.0 * _t_175_; _t_166_ = _t_167_ * _t_168_; _t_188_ = la[k][j+1][i] * _t_189_; _t_181_ += 8.0 * _t_188_; _t_166_ += _t_180_ * _t_181_; _t_196_ = u_2[k-2][j-2][i]; _t_209_ = u_2[k-2][j-2][i]; _t_209_ -= u_2[k-2][j+2][i]; _t_204_ = u_2[k-2][j+2][i]; _t_196_ -= u_2[k+2][j-2][i]; _t_217_ = u_2[k+2][j-2][i]; _t_204_ -= u_2[k+2][j+2][i]; _t_217_ -= u_2[k+2][j+2][i]; _t_197_ = -u_2[k-1][j-2][i]; _t_212_ = u_2[k-1][j-2][i]; _t_212_ -= u_2[k-1][j+2][i]; _t_205_ = -u_2[k-1][j+2][i]; _t_197_ += u_2[k+1][j-2][i]; _t_196_ += 8.0 * _t_197_; _t_194_ = la[k][j-2][i] * _t_196_; _t_215_ = u_2[k+1][j-2][i]; _t_205_ += u_2[k+1][j+2][i]; _t_204_ += 8.0 * _t_205_; _t_194_ -= la[k][j+2][i] * _t_204_; _t_215_ -= u_2[k+1][j+2][i]; _t_210_ = -u_2[k-2][j-1][i]; _t_199_ = u_2[k-2][j-1][i]; _t_199_ -= u_2[k+2][j-1][i]; _t_218_ = -u_2[k+2][j-1][i]; _t_210_ += u_2[k-2][j+1][i]; _t_209_ += 8.0 * _t_210_; _t_207_ = mu[k-2][j][i] * _t_209_; _t_202_ = u_2[k-2][j+1][i]; _t_202_ -= u_2[k+2][j+1][i]; _t_218_ += u_2[k+2][j+1][i]; _t_217_ += 8.0 * _t_218_; _t_207_ -= mu[k+2][j][i] * _t_217_; _t_200_ = -u_2[k-1][j-1][i]; _t_213_ = -u_2[k-1][j-1][i]; _t_213_ += u_2[k-1][j+1][i]; _t_212_ += 8.0 * _t_213_; _t_211_ = mu[k-1][j][i] * _t_212_; _t_207_ -= 8.0 * _t_211_; _t_203_ = -u_2[k-1][j+1][i]; _t_200_ += u_2[k+1][j-1][i]; _t_199_ += 8.0 * _t_200_; _t_198_ = la[k][j-1][i] * _t_199_; _t_194_ -= 8.0 * _t_198_; _t_216_ = -u_2[k+1][j-1][i]; _t_203_ += u_2[k+1][j+1][i]; _t_202_ += 8.0 * _t_203_; _t_216_ += u_2[k+1][j+1][i]; _t_215_ += 8.0 * _t_216_; _t_201_ = la[k][j+1][i] * _t_202_; _t_194_ += 8.0 * _t_201_; _t_166_ += _t_193_ * _t_194_; _t_214_ = mu[k+1][j][i] * _t_215_; _t_207_ += 8.0 * _t_214_; _t_166_ += _t_206_ * _t_207_; r2 += _t_166_; uacc_0kc0jc0ic0 = a1 * uacc_0[k][j][i]; uacc_0kc0jc0ic0 += cof * r1; uacc_0[k][j][i] = uacc_0kc0jc0ic0; uacc_1kc0jc0ic0 = a1 * uacc_1[k][j][i]; uacc_1kc0jc0ic0 += cof * r2; uacc_1[k][j][i] = uacc_1kc0jc0ic0; uacc_2kc0jc0ic0 = a1 * uacc_2[k][j][i]; uacc_2kc0jc0ic0 += cof * r3; uacc_2[k][j][i] = uacc_2kc0jc0ic0; } } extern "C" void host_code (double *h_uacc_0, double *h_uacc_1, double *h_uacc_2, double *h_u_0, double *h_u_1, double *h_u_2, double *h_mu, double *h_la, double *h_strx, double *h_stry, double *h_strz, int N) { double *uacc_0; cudaMalloc (&uacc_0, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for uacc_0\n"); cudaMemcpy (uacc_0, h_uacc_0, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *uacc_1; cudaMalloc (&uacc_1, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for uacc_1\n"); cudaMemcpy (uacc_1, h_uacc_1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *uacc_2; cudaMalloc (&uacc_2, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for uacc_2\n"); cudaMemcpy (uacc_2, h_uacc_2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *u_0; cudaMalloc (&u_0, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for u_0\n"); cudaMemcpy (u_0, h_u_0, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *u_1; cudaMalloc (&u_1, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for u_1\n"); cudaMemcpy (u_1, h_u_1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *u_2; cudaMalloc (&u_2, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for u_2\n"); cudaMemcpy (u_2, h_u_2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *mu; cudaMalloc (&mu, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for mu\n"); cudaMemcpy (mu, h_mu, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *la; cudaMalloc (&la, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for la\n"); cudaMemcpy (la, h_la, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *strx; cudaMalloc (&strx, sizeof(double)*N); check_error ("Failed to allocate device memory for strx\n"); cudaMemcpy (strx, h_strx, sizeof(double)*N, cudaMemcpyHostToDevice); double *stry; cudaMalloc (&stry, sizeof(double)*N); check_error ("Failed to allocate device memory for stry\n"); cudaMemcpy (stry, h_stry, sizeof(double)*N, cudaMemcpyHostToDevice); double *strz; cudaMalloc (&strz, sizeof(double)*N); check_error ("Failed to allocate device memory for strz\n"); cudaMemcpy (strz, h_strz, sizeof(double)*N, cudaMemcpyHostToDevice); dim3 blockconfig (16, 2, 2); dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, blockconfig.y), ceil(N, blockconfig.z)); sw4 <<<gridconfig, blockconfig>>> (uacc_0, uacc_1, uacc_2, u_0, u_1, u_2, mu, la, strx, stry, strz, N); cudaMemcpy (h_uacc_0, uacc_0, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost); cudaMemcpy (h_uacc_1, uacc_1, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost); cudaMemcpy (h_uacc_2, uacc_2, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost); cudaFree (uacc_0); cudaFree (uacc_1); cudaFree (uacc_2); cudaFree (u_0); cudaFree (u_1); cudaFree (u_2); cudaFree (mu); cudaFree (la); cudaFree (strx); cudaFree (stry); cudaFree (strz); }
5f5aa68a7907a52a8f2a9292fdf839b86673baad.hip
// !!! This is a file automatically generated by hipify!!! ///sta programa calcula la versin paralelizada del algoritmo FFT_DIF_DIT_TD ///(03/08/2016) ///sta versin sirve para graficar en matlab los errores absolutos y relativos (RADIX-3) 3^1 - 3^6 #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <hipfft.h> #include <cufftw.h> #include <stdio.h> #include <stdlib.h> #include <hip/hip_complex.h> #include <math.h> #include <math_constants.h> #include <iostream> #include <time.h> ////////////////////////////////////////////////////////////////////////// ///////////////////////DECLARACIN DE FUNCIONES/////////////////////////// ////////////////////////////////////////////////////////////////////////// void vector_entrada_xn(int N,int Li); void arreglo_W(int N); void asign_rap(int N,int Li,int Lo); void factor(int N); void product(int vector_1[50],int vector_2[50],int valor); void etapa_entrada(void); __global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,cuFloatComplex *x,cuFloatComplex *W,cuFloatComplex *y,int *flag_inputstage_1_d,int *flag_inputstage_2_d,int *flag_inputstage_3_d); void etapa_intermedia(void); void etapa_salida(void); __global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,cuFloatComplex *z,cuFloatComplex *W,cuFloatComplex *X,int *flag_outputstage_1_d,int *flag_outputstage_2_d,int *flag_outputstage_3_d); ////////////////////////////////////////////////////////////////////////// /////////////////////DECLARACIN DE VARIABLES GLOBALES//////////////////// ////////////////////////////////////////////////////////////////////////// cuFloatComplex *x_host; cuFloatComplex *W_host; //cuFloatComplex *y_host; //cuFloatComplex *z_host; cuFloatComplex *X_host; cuFloatComplex *x_device; cuFloatComplex *W_device; cuFloatComplex *y_device; cuFloatComplex *z_device; cuFloatComplex *X_device; hipfftComplex *in,*out; int *flag_inputstage_1,*flag_inputstage_2,*flag_inputstage_3,*flag_outputstage_1,*flag_outputstage_2,*flag_outputstage_3; int *flag_inputstage_1_d,*flag_inputstage_2_d,*flag_inputstage_3_d,*flag_outputstage_1_d,*flag_outputstage_2_d,*flag_outputstage_3_d; int Dip,Dop,P,N,Li,Lo; int vF[50]; //Almacena los factores de N int svF; //Almacena el numero de factores de N int Prod[50]; int a; #define inf 99999 ////////////////////////////////////////////////////////////////////////// //////////////////////////DATOS DE ENTRADA//////////////////////////////// ////////////////////////////////////////////////////////////////////////// /// N >>> Nmero de elementos del vector de entrada /// Li >>> Nmero de elementos de entrada diferentes de cero /// Lo >>> Nmero de elementos de salida requeridos /// loop >>> Nmero de iteraciones /// muestras >>> Nmero de muestras ////////////////////////////////////////////////////////////////////////// ///////////////////////////DATOS DE SALIDA//////////////////////////////// ////////////////////////////////////////////////////////////////////////// /// X >>> Vector de salida ////////////////////////////////////////////////////////////////////////// /////////////////// SE INGRESAN LOS DATOS DE ENTRADA ///////////////////// ////////////////////////////////////////////////////////////////////////// ///Ingrese el nmero de iteraciones requeridas int loop = 1; ///Ingrese el nmero de muestras requeridas const int muestras = 1; ////////////////////////////////////////////////////////////////////////// //////////////////////////FUNCION PRINCIPAL/////////////////////////////// ////////////////////////////////////////////////////////////////////////// //Funcin principal int main() { int i,j,alea_real[729],alea_imag[729],i_N,l_res,j_res,k_res,incremento_j; //float suma; //float promedio[muestras]; ///Se crean los archivos binarios donde se guardarn los datos FILE *da; FILE *db; FILE *dc; //FILE *dd; FILE *fi_1; FILE *fi_2; FILE *fi_3; FILE *fo_1; FILE *fo_2; FILE *fo_3; da = fopen("Resultados_radix_3_real_CUDA.bin","a+b"); //Crea o sobre escribe archivo db = fopen("Resultados_radix_3_imag_CUDA.bin","a+b"); //Crea o sobre escribe archivo dc = fopen("Entrada_radix_3_CUDA.txt","w+t"); //Crea o sobre escribe archivo //dd = fopen("TIEMPOS_FFT_DIF_DIT_TD_SECUENCIAL_CUDA.bin","a+b"); //Crea o sobre escribe archivo fi_1 = fopen("Flag_inputstage_1_radix_3_CUDA.bin","a+b"); //Crea o sobre escribe archivo fi_2 = fopen("Flag_inputstage_2_radix_3_CUDA.bin","a+b"); //Crea o sobre escribe archivo fi_3 = fopen("Flag_inputstage_3_radix_3_CUDA.bin","a+b"); //Crea o sobre escribe archivo fo_1 = fopen("Flag_outputstage_1_radix_3_CUDA.bin","a+b"); //Crea o sobre escribe archivo fo_2 = fopen("Flag_outputstage_2_radix_3_CUDA.bin","a+b"); //Crea o sobre escribe archivo fo_3 = fopen("Flag_outputstage_3_radix_3_CUDA.bin","a+b"); //Crea o sobre escribe archivo ///Generacin de vector de entrada aleatorio srand (time(NULL)); //Utilizo la hr del sistema como semilla for(i = 0;i < 729;i++) { alea_real[i]=rand()%11; //alea_real[i]=i+1; alea_imag[i]=rand()%11; //alea_imag[i]=0; fprintf(dc,"%d %d\n",alea_real[i],alea_imag[i]); } fclose(dc); //Pausa printf("\n---PRESIONA UNA TECLA PARA CONTINUAR---\n\n"); getchar(); //Se reserva espacio para las flags flag_inputstage_1 = (int *)malloc(1*sizeof(int)); flag_inputstage_2 = (int *)malloc(1*sizeof(int)); flag_inputstage_3 = (int *)malloc(1*sizeof(int)); flag_outputstage_1 = (int *)malloc(1*sizeof(int)); flag_outputstage_2 = (int *)malloc(1*sizeof(int)); flag_outputstage_3 = (int *)malloc(1*sizeof(int)); hipMalloc((int**)&flag_inputstage_1_d,1*sizeof(int)); hipMalloc((int**)&flag_inputstage_2_d,1*sizeof(int)); hipMalloc((int**)&flag_inputstage_3_d,1*sizeof(int)); hipMalloc((int**)&flag_outputstage_1_d,1*sizeof(int)); hipMalloc((int**)&flag_outputstage_2_d,1*sizeof(int)); hipMalloc((int**)&flag_outputstage_3_d,1*sizeof(int)); //Inicializaciones incremento_j = 1; flag_inputstage_1[0] = 0; flag_inputstage_2[0] = 0; flag_inputstage_3[0] = 0; flag_outputstage_1[0] = 0; flag_outputstage_2[0] = 0; flag_outputstage_3[0] = 0; for(i_N = 1;i_N <= 6;i_N++) { N = (int )pow(3,i_N); printf("\n N = %d \n",N); //Se reserva memoria para xn_host en el host x_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*N); //Se reserva memoria para x_device y W_device hipMalloc((void**)&x_device,N*sizeof(cuFloatComplex)); hipMalloc((void**)&W_device,N*sizeof(cuFloatComplex)); ///Generacin del vector x for(l_res=0;l_res < N;l_res++) { //x_host[l_res] = make_cuFloatComplex((float)(rand()%11),(float)(rand()%21)); x_host[l_res] = make_cuFloatComplex((float)alea_real[l_res],(float)alea_imag[l_res]); //printf(" %d-> (%f) + (%f)\n",l_res+1,cuCrealf(x_host[l_res]),cuCimagf(x_host[l_res])); } ///Se genera el arreglo W[N] arreglo_W(N); //Envo de los arreglos x y W hacia la memoria global del device hipMemcpy(x_device,x_host,N*sizeof(cuFloatComplex),hipMemcpyHostToDevice); hipMemcpy(W_device,W_host,N*sizeof(cuFloatComplex),hipMemcpyHostToDevice); for(j_res=incremento_j;j_res<=N;j_res=j_res+incremento_j) { Li=j_res; for(k_res=incremento_j;k_res<=N;k_res=k_res+incremento_j) { Lo=k_res; //printf("\n Li = %d Lo = %d",Li,Lo); for(i=1;i<=muestras;i++) { //suma=0.0; for(j=0;j<loop;j++) { //Comandos necesarios para medir el tiempo //float elapsedTime_app; //hipEvent_t start_app, stop_app; //hipEventCreate(&start_app); //hipEventCreate(&stop_app); //--------------------------------------------------------------------------------------------- //Se empieza a medir el tiempo de ejecucion de la aplicacion //hipEventRecord(start_app,0); //Se generan en el host los valores del vector de entrada x[n] //vector_entrada_xn(N,Li); //Se generan en el host los valores del arreglo W[N] //arreglo_W(N); //Se generan en el host los factores Dip y Dop asign_rap(N,Li,Lo); //Clculo en el host del factor P P = N/(Dip*Dop); //printf("\n\n FACTOR P:\n\n"); //printf("\n Dip = %d Dop = %d P = %d ",Dip,Dop,P); //Funcin auxiliar del host para ejecutar la etapa de entrada etapa_entrada(); //Funcin auxiliar del host para ejecutar la etapa intermedia etapa_intermedia(); //Funcin auxiliar del host para ejecutar la etapa de salida etapa_salida(); ///Se imprimen los resultados en los archivos binarios int m; float *parte_real; float *parte_imag; parte_real = (float*) malloc(Lo*sizeof(float)); parte_imag = (float*) malloc(Lo*sizeof(float)); for(m=0;m<=Lo-1;m++) { parte_real[m]=cuCrealf(X_host[m]); parte_imag[m]=cuCimagf(X_host[m]); //printf("\n X[%d] = %.4f + (%.4f)",m,creal(X[m]),cimag(X[m])); //fprintf(dc,"%f %f\n",creal(X[m]),cimag(X[m])); } fwrite(parte_real,sizeof(float),Lo,da); fwrite(parte_imag,sizeof(float),Lo,db); ///Se leen los valores de las flags desde el device hipMemcpy(flag_inputstage_1,flag_inputstage_1_d,1*sizeof(int),hipMemcpyDeviceToHost); hipMemcpy(flag_inputstage_2,flag_inputstage_2_d,1*sizeof(int),hipMemcpyDeviceToHost); hipMemcpy(flag_inputstage_3,flag_inputstage_3_d,1*sizeof(int),hipMemcpyDeviceToHost); hipMemcpy(flag_outputstage_1,flag_outputstage_1_d,1*sizeof(int),hipMemcpyDeviceToHost); hipMemcpy(flag_outputstage_2,flag_outputstage_2_d,1*sizeof(int),hipMemcpyDeviceToHost); hipMemcpy(flag_outputstage_3,flag_outputstage_3_d,1*sizeof(int),hipMemcpyDeviceToHost); ///Se imprimen el valor de las flags en sus respectivos archivos binarios fwrite(flag_inputstage_1,1*sizeof(int),1,fi_1); fwrite(flag_inputstage_2,1*sizeof(int),1,fi_2); fwrite(flag_inputstage_3,1*sizeof(int),1,fi_3); fwrite(flag_outputstage_1,1*sizeof(int),1,fo_1); fwrite(flag_outputstage_2,1*sizeof(int),1,fo_2); fwrite(flag_outputstage_3,1*sizeof(int),1,fo_3); /* printf("\n flag_inputstage_1 = %d \n",flag_inputstage_1[0]); printf("\n flag_inputstage_2 = %d \n",flag_inputstage_2[0]); printf("\n flag_inputstage_3 = %d \n",flag_inputstage_3[0]); printf("\n flag_outputstage_1 = %d \n",flag_outputstage_1[0]); printf("\n flag_outputstage_2 = %d \n",flag_outputstage_2[0]); printf("\n flag_outputstage_3 = %d \n",flag_outputstage_3[0]); */ //Se liberan memorias del Host y Device //free(x_host); //free(W_host); //free(y_host); //free(z_host); free(X_host); free(parte_real); free(parte_imag); //hipFree(x_device); //hipFree(W_device); hipFree(y_device); hipFree(z_device); hipFree(X_device); //--------------------------------------------------------------------------------------------- //Comandos necesarios para medir el tiempo de la aplicacion (app) //hipEventRecord(stop_app,0); //hipEventSynchronize(stop_app); //hipEventElapsedTime(&elapsedTime_app,start_app,stop_app); //Suma de todos los tiempos //suma = suma + elapsedTime_app; //Se destruyen los eventos que miden el tiempo de la aplicacion //hipEventDestroy(start_app); //hipEventDestroy(stop_app); ///Se resetean las flags flag_inputstage_1[0] = 0; flag_inputstage_2[0] = 0; flag_inputstage_3[0] = 0; flag_outputstage_1[0] = 0; flag_outputstage_2[0] = 0; flag_outputstage_3[0] = 0; } //promedio[i-1] = suma/(float)loop; //printf(" \n\n%d - Tiempo promedio para N = %ld >>> %f mS\n",i,N,promedio[i-1]); } //fwrite(promedio,sizeof(float),muestras,dd); //fclose(dd); } } free(x_host); free(W_host); hipFree(x_device); hipFree(W_device); } fclose(da); fclose(db); fclose(fi_1); fclose(fi_2); fclose(fi_3); fclose(fo_1); fclose(fo_2); fclose(fo_3); free(flag_inputstage_1); free(flag_inputstage_2); free(flag_inputstage_3); free(flag_outputstage_1); free(flag_outputstage_2); free(flag_outputstage_3); hipFree(flag_inputstage_1_d); hipFree(flag_inputstage_2_d); hipFree(flag_inputstage_3_d); hipFree(flag_outputstage_1_d); hipFree(flag_outputstage_2_d); hipFree(flag_outputstage_3_d); } ////////////////////////////////////////////////////////////////////////// /////////////////////////FUNCIONES SECUNDARIAS//////////////////////////// ////////////////////////////////////////////////////////////////////////// //sta funcin genera el vector de entrada x[n] void vector_entrada_xn(int N,int Li) { //Declaracin de variables locales int k; //Se reserva memoria para xn_host en el host x_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*N); //Se dan valores a x[n] for(k=0;k<N;k++) { if(k < Li) { //x[k] = make_cuFloatComplex((float)(rand()%11),(float)(rand()%21)); x_host[k] = make_cuFloatComplex((float)(k + 1),(float)(0.0)); } else { x_host[k] = make_cuFloatComplex((float)(0.0),(float)(0.0)); } } /* //Se imprimen los valores de entrada x[n] printf("\n---ELEMENTOS DE ENTRADA x[n]---\n\n"); for(k=0;k<N;k++) { printf(" %d-> (%f) + (%f)\n",k+1,cuCrealf(x_host[k]),cuCimagf(x_host[k])); } */ } //sta funcin genera el arreglo W void arreglo_W(int N) { //Declaracin de variables locales int n; //Se reserva memoria para W_host en el host W_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*N); //Se genera el arreglo W for(n = 1;n <= N;n++) { W_host[n-1] = make_cuFloatComplex((float)cos((2*CUDART_PI*n)/N),(float)(-1)*sin((2*CUDART_PI*n)/N)); } /* //Se imprimen los valores del arreglo W[N] printf("\n---ARREGLO W[N]---\n\n"); for(n = 0;n < N; n++) { printf(" W[%d]-> (%f) + (%f)\n",n+1,cuCrealf(W_host[n]),cuCimagf(W_host[n])); } */ } //sta funcin genera los factores Dip y Dop void asign_rap(int N,int Li,int Lo) { //Declaracin de variables locales float NLi,NLo,Diprapt,Doprapt; int Nh[50]; int k[50]; int G; int g,i,t,ta; int Dipt[50],Dopt[50]; float distrapt,distrap; int Pos,h,Poss; int nk[50]; int r; //Inicializaciones G = 0; svF = 0; //Factores Dip y Dop ideales NLi=(float)N/(float)Li; NLo=(float)N/(float)Lo; Diprapt=NLi; Doprapt=NLo; //Se encuentran los factores de "N" //vF almacena los factores de "N" //svF almacena el nmero de factores de "N" factor(N); /* Almacena en el vector Nh los factores que son diferentes de del vector vF En el vector k se almacena la cantidad de veces que se repite cada elemento almacenado en el vector Nh. */ Nh[0] = vF[0]; k[0]=1; for(g=1;g<=svF-1;g=g+1) { if(vF[g]!=vF[g-1]) { G=G+1; Nh[G]=vF[g]; k[G]=1; } else { k[G]=k[G]+1; } } /* Almacena en el vector Nh todas las posibles combinaciones que den como producto a N. t almacena el numero de elementos del vector Nh. */ product(Nh,k,G); t = a; for(i=0;i<t;i=i+1) { Dipt[i]=Prod[i]; } distrapt=inf; for(g=1;g<=t;g=g+1) { if(Dipt[g-1]<=NLi) { Pos=g-1; for(h=0;h<=G;h=h+1) { Poss=floor(Pos/(k[h]+1)); nk[h]=k[h]+Poss*(k[h]+1)-Pos; Pos=Poss; } product(Nh,nk,G); ta=a; for(i=0;i<ta;i=i+1) { Dopt[i]=Prod[i]; } //////////////////////////////////////////// //int j; //for(j=0;j<ta;j++) //{ // printf(" %d ",Dopt[j]); //} //printf("\n\n ta=%d\n\n",ta); /////////////////////////////////////////// for(r=0;r<ta;r=r+1) { distrap=sqrt(pow(Diprapt-(Dipt[g-1]),2)+pow(Doprapt-(Dopt[r]),2)); if(distrap<distrapt) { distrapt=distrap; Dip=Dipt[g-1]; Dop=Dopt[r]; } } } } /* printf("\n\n FACTOR Dip :\n\n"); printf(" %d ",Dip); printf("\n\n FACTOR Dop:\n\n"); printf(" %d ",Dop); */ } //sta funcin encuentra los factores de "N" void factor(int N) { //Se empieza a verificar los factores desde 2 int i=2; long N_factor; N_factor = N; while(i<=N_factor) { while((N_factor%i)==0) { vF[svF]=i; N_factor=N_factor/i; // printf("Factores: %d ",vF[svF]); svF++; } i++; } } //sta funcin encuentra todas las posibles combinaciones de factores que den como resultado "N" void product(int vector_1[50],int vector_2[50],int valor) { int d,e,s,pNh,i; int cont=0; Prod[0]=1; a=1; for(d=0;d<=valor;d=d+1) { s=a; pNh=1; for(e=1;e<=vector_2[d];e=e+1) { pNh=pNh*vector_1[d]; for(i=(s*e+1);i<=(s*e+s);i=i+1) { Prod[i-1]=pNh*Prod[cont]; cont=cont+1; } a=a+s; cont=0; } } } //Funcin auxiliar del host para calcular la etapa de entrada en el device void etapa_entrada(void) { ////////////////////////////////////////////////////////////////////////// ////////////////////////////ETAPA DE ENTRADA////////////////////////////// ////////////////////////////////////////////////////////////////////////// //Declaracin de variables locales int k1,n1,n2; //Asignacin de memoria en el device hipMalloc((void**)&y_device,P*Dip*Dop*sizeof(cuFloatComplex)); //Asignacin de memoria en el host para "y" //y_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*P*Dip*Dop); //Dimensionamiento del grid para la funcin kernel "inputStage" //Dimensionamiento del Grid dim3 gridDim(1,1,1); //Dimensionamiento del block dim3 blockDim(1,1,1); if((P*Dop) < 32 && (Dip) < 32) { blockDim.x = (P*Dop); blockDim.y = (Dip); gridDim.x = 1; gridDim.y = 1; } else { blockDim.x = 32; blockDim.y = 32; gridDim.x = (unsigned int) (ceilf((float)(P*Dop)/(float)blockDim.x)); gridDim.y = (unsigned int) (ceilf((float)Dip/(float)blockDim.y)); } //Lanzamiento del kernel "inputStage_kernel" hipLaunchKernelGGL(( inputStage_kernel), dim3(gridDim),dim3(blockDim), 0, 0, N,Li,Dip,Dop,P,x_device,W_device,y_device,flag_inputstage_1_d,flag_inputstage_2_d,flag_inputstage_3_d); //Esperar que el kernel termine de ejecutarse totalmente hipDeviceSynchronize(); /* //Copia del arreglo "y" del device hacia el host hipMemcpy(y_host,y_device,sizeof(cuFloatComplex)*P*Dip*Dop,hipMemcpyDeviceToHost); //Se imprimen los valores de "y" printf("\n\n--- ARREGLO y(n1,n2,k1) ---\n\n"); for(k1 = 0;k1 < Dip;k1++) { for(n1 = 0;n1 < Dop;n1++) { for(n2 = 0;n2 < P;n2++) { printf(" (%f) + (%f) ",cuCrealf(y_host[(k1*Dop*P)+(n1*P)+n2]),cuCimagf(y_host[(k1*Dop*P)+(n1*P)+n2])); } printf("\n"); } printf("\n\n"); } printf("\n"); */ } //funcin kernel que ejecuta la etapa de entrada en el device __global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,cuFloatComplex *x,cuFloatComplex *W,cuFloatComplex *y,int *flag_inputstage_1_d,int *flag_inputstage_2_d,int *flag_inputstage_3_d) { int n1,n2; cuFloatComplex t1; //Threads int n = blockDim.x *blockIdx.x + threadIdx.x; int k1 = blockDim.y *blockIdx.y + threadIdx.y; //Se resetean las flags flag_inputstage_1_d[0] = 0; flag_inputstage_2_d[0] = 0; flag_inputstage_3_d[0] = 0; //printf("\n n = %d k1 = %d",n,k1); if( (n < (P*Dop)) && (k1 < Dip)) { n2 = floorf(n/Dop); n1 = n - (Dop*n2); //Generacin de los elementos que dependen de x[0] if(n == 0) { y[(k1*Dop*P)+(0*P)+ 0] = x[0]; ///Flag flag_inputstage_1_d[0] = 1; } //Mapeo de x[n] a las entradas del primer conjunto de Dop DFT's if((n >= 1) && (n <= (Li-1))) { t1 = x[n]; if(k1 == 0) { y[(0*Dop*P)+(n1*P)+ n2] = t1; } if(k1 >= 1) { y[(k1*Dop*P)+(n1*P)+ n2] = cuCmulf(W[((n*k1)%N)-1],t1); } ///Flag flag_inputstage_2_d[0] = 1; } //Rellenado de ceros para los elementos de "y" para Li <= n <= (P*Dop)-1 if((n >= Li) && (n <= (P*Dop)-1)) { y[(k1*Dop*P)+(n1*P)+ n2] = make_cuFloatComplex(0.0,0.0); ///Flag flag_inputstage_3_d[0] = 1; } //printf("\n (%f) + (%f)\n ",cuCrealf(y[(k1*Dop*P)+(n1*P)+ n2]),cuCimagf(y[(k1*Dop*P)+(n1*P)+ n2])); } } //Funcin auxiliar del host para calcular la etapa intermedia en el device void etapa_intermedia(void) { ////////////////////////////////////////////////////////////////////////// ////////////////////////////ETAPA INTERMEDIA////////////////////////////// ////////////////////////////////////////////////////////////////////////// //Declaracin de variables locales int k1,k2,n1; int n[1] = {P}; int inembed[1] = {P}; int onembed[1] = {P}; //Asignacin de memoria en el device para "z" hipMalloc((void**)&z_device,P*Dip*Dop*sizeof(cuFloatComplex)); //Asignacin de memoria en el host para "z" //z_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*P*Dip*Dop); //Asignacin de memoria en el device para "in" y "out" hipMalloc((void**)&in,sizeof(hipfftComplex)*P*Dip*Dop); hipMalloc((void**)&out,sizeof(hipfftComplex)*P*Dip*Dop); //Se copia el arreglo "y" al arreglo "in" hipMemcpy(in,y_device,sizeof(cuFloatComplex)*P*Dip*Dop,hipMemcpyDeviceToDevice); //Se crea un plan hipfftHandle plan; hipfftPlanMany(&plan,1,n,inembed,1,P,onembed,1,P,HIPFFT_C2C,Dip*Dop); //Ejecucin del plan hipfftExecC2C(plan,in,out,HIPFFT_FORWARD); //Esperar que el kernel termine de ejecutarse totalmente hipDeviceSynchronize(); //Se copian los datos del arreglo "out" al arreglo "z_device" hipMemcpy(z_device,out,sizeof(hipfftComplex)*P*Dip*Dop,hipMemcpyDeviceToDevice); //Se destruye el plan hipfftDestroy(plan); //Se liberan los arreglos "in" y "out" hipFree(in); hipFree(out); /* //Se copian los datos del arreglo "z_device" al arreglo "z_host" hipMemcpy(z_host,z_device,sizeof(cuFloatComplex)*P*Dip*Dop,hipMemcpyDeviceToHost); ///Se imprimen los valores de z(n1,k2,k1) printf("\n\n--- ARREGLO z(n1,k2,k1) ---\n\n"); for(k1 = 0;k1 < Dip;k1++) { for(n1 = 0;n1 < Dop;n1++) { for(k2 = 0;k2 < P;k2++) { printf(" (%f) + (%f) ",cuCrealf(z_host[(k1*Dop*P)+(n1*P)+k2]),cuCimagf(z_host[(k1*Dop*P)+(n1*P)+k2])); } printf("\n"); } printf("\n\n"); } printf("\n"); */ } //Funcin auxiliar del host para calcular la etapa de salida en el device void etapa_salida(void) { ////////////////////////////////////////////////////////////////////////// ////////////////////////////ETAPA DE SALIDA/////////////////////////////// ////////////////////////////////////////////////////////////////////////// //Declaracin de variables locales int m; //Asignacin de memoria en el device para "X" hipMalloc((void**)&X_device,Lo*sizeof(cuFloatComplex)); //Asignacin de memoria en el host para "X" X_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*Lo); //Dimensionamiento del grid para la funcin kernel "outputStage" //Dimensionamiento del Grid dim3 gridDim(1,1,1); //Dimensionamiento del block dim3 blockDim(1,1,1); if((Lo) < 1024) { blockDim.x = Lo; gridDim.x = 1; } else { blockDim.x = 1024; gridDim.x = (unsigned int) (ceilf((float)Lo/(float)blockDim.x)); } //Lanzamiento del kernel "outputStage_kernel" hipLaunchKernelGGL(( outputStage_kernel), dim3(gridDim),dim3(blockDim), 0, 0, N,Lo,Dip,Dop,P,z_device,W_device,X_device,flag_outputstage_1_d,flag_outputstage_2_d,flag_outputstage_3_d); //Esperar que el kernel termine de ejecutarse totalmente hipDeviceSynchronize(); //Copia del arreglo "X" del device hacia el host hipMemcpy(X_host,X_device,sizeof(cuFloatComplex)*Lo,hipMemcpyDeviceToHost); /* //Se imprimen los valores de "X_host" ///Imprimir X[k] printf("\n\n--- ARREGLO X[k] ---\n\n"); for(m=0;m<=Lo-1;m++) { printf("\n X[%d] = %.4f + (%.4f)",m,cuCrealf(X_host[m]),cuCimagf(X_host[m])); //fprintf(da,"%.4f %.4f\n",creal(X[i]),cimag(X[i])); } */ } //funcin kernel que ejecuta la etapa de salida en el device __global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,cuFloatComplex *z,cuFloatComplex *W,cuFloatComplex *X,int *flag_outputstage_1_d,int *flag_outputstage_2_d,int *flag_outputstage_3_d) { //Declaracin de variables locales int n1,k_aux,k1,k2,a,b; cuFloatComplex t1,t2,t3,t4,t5,t4_aux,X_aux,X_aux_1; float t4_aux_1; //Threads int k = blockDim.x *blockIdx.x + threadIdx.x; //Se resetean las flags flag_outputstage_1_d[0] = 0; flag_outputstage_2_d[0] = 0; flag_outputstage_3_d[0] = 0; if(k < Lo) { for(n1 = 0; n1 <= (Dop-1); n1 = n1+1) { if(Lo <= Dip) { //Clculo de X(k) para 0<=k<=Lo-1. //printf("\n--- Caso (Lo <= Dip) ---\n"); //En la descomposicin k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1 if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez { X[k] = z[(k*Dop*P)+(0*P) + 0]; ///Flag flag_outputstage_1_d[0] = 1; } else { if(n1 == 1) { X[k] = z[(k*Dop*P)+(0*P) + 0]; } X[k] = cuCaddf(z[(k*Dop*P)+(n1*P) + 0],X[k]); ///Flag flag_outputstage_1_d[0] = 1; } } else { if((k >= 0) && (k <= (Dip-1))) { //Clculo de X(k) para 0<=k<=Dip-1. //En la descomposicin k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1 if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez { X[k] = z[(k*Dop*P)+(0*P) + 0]; } else { if(n1 == 1) { X[k] = z[(k*Dop*P)+(0*P) + 0]; } X[k] = cuCaddf(z[(k*Dop*P)+(n1*P) + 0],X[k]); } } else { if(Dop <= 4) { //Usando el mtodo directo //printf("\n--- Caso (Metodo directo) ---\n"); if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez { k_aux = k-((Dip*P)*floorf(k/(Dip*P))); k2 = floorf(k_aux/Dip); k1 = k_aux-(Dip*k2); X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)]; ///Flag flag_outputstage_2_d[0] = 1; } else { if(n1 == 1) { k_aux = k-((Dip*P)*floorf(k/(Dip*P))); k2 = floorf(k_aux/Dip); k1 = k_aux-(Dip*k2); X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)]; } a = floorf(k/(Dip*P)); X[k] = cuCaddf(X[k],cuCmulf(z[(k1*Dop*P)+(n1*P)+ (k2%P)],W[((n1*(k2+P*(a))*Dip)%N)-1])); ///Flag flag_outputstage_2_d[0] = 1; } } else { //Usando el mtodo filtering 2BF //printf("\n--- Caso (Filtro 2BF) ---\n"); if((Dop-2) >= 1) { if(n1 == 0) { k_aux = k-((Dip*P)*floorf(k/(Dip*P))); k2 = floorf(k_aux/Dip); k1 = k_aux-(Dip*k2); t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)]; b = floorf(k/(Dip*P)); t4 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0)); ///Flag flag_outputstage_3_d[0] = 1; } if((n1 >= 1) && (n1 <= (Dop-2))) { t2 = t1; t1 = cuCaddf(z[(k1*Dop*P)+((-(n1-(Dop-1)))*P)+ (k2%P)],t4); t3 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0)); t4 = cuCsubf(t3,t2); } if(n1 == (Dop-1)) { t5 = cuCaddf(z[(k1*Dop*P)+(0*P)+ (k2%P)],t4); X[k] = cuCsubf(t5,cuCmulf(t1,cuConjf(W[(((k2+P*(b))*Dip)%N)-1]))); } } else { if(Dop == 1) { k_aux = k-((Dip*P)*floorf(k/(Dip*P))); k2 = floorf(k_aux/Dip); k1 = k_aux-(Dip*k2); t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)]; X[k] = t1; ///Flag flag_outputstage_3_d[0] = 1; } else { k_aux = k-((Dip*P)*floorf(k/(Dip*P))); k2 = floorf(k_aux/Dip); k1 = k_aux-(Dip*k2); t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)]; b = floorf(k/(Dip*P)); t4 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0)); t5 = cuCaddf(z[(k1*Dop*P)+(0*P)+ (k2%P)],t4); X[k] = cuCsubf(t5,cuCmulf(t1,cuConjf(W[(((k2+P*(b))*Dip)%N)-1]))); ///Flag flag_outputstage_3_d[0] = 1; } } } } } } } }
5f5aa68a7907a52a8f2a9292fdf839b86673baad.cu
///Ésta programa calcula la versión paralelizada del algoritmo FFT_DIF_DIT_TD ///(03/08/2016) ///Ésta versión sirve para graficar en matlab los errores absolutos y relativos (RADIX-3) 3^1 - 3^6 #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <cufft.h> #include <cufftw.h> #include <stdio.h> #include <stdlib.h> #include <cuComplex.h> #include <math.h> #include <math_constants.h> #include <iostream> #include <time.h> ////////////////////////////////////////////////////////////////////////// ///////////////////////DECLARACIÓN DE FUNCIONES/////////////////////////// ////////////////////////////////////////////////////////////////////////// void vector_entrada_xn(int N,int Li); void arreglo_W(int N); void asign_rap(int N,int Li,int Lo); void factor(int N); void product(int vector_1[50],int vector_2[50],int valor); void etapa_entrada(void); __global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,cuFloatComplex *x,cuFloatComplex *W,cuFloatComplex *y,int *flag_inputstage_1_d,int *flag_inputstage_2_d,int *flag_inputstage_3_d); void etapa_intermedia(void); void etapa_salida(void); __global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,cuFloatComplex *z,cuFloatComplex *W,cuFloatComplex *X,int *flag_outputstage_1_d,int *flag_outputstage_2_d,int *flag_outputstage_3_d); ////////////////////////////////////////////////////////////////////////// /////////////////////DECLARACIÓN DE VARIABLES GLOBALES//////////////////// ////////////////////////////////////////////////////////////////////////// cuFloatComplex *x_host; cuFloatComplex *W_host; //cuFloatComplex *y_host; //cuFloatComplex *z_host; cuFloatComplex *X_host; cuFloatComplex *x_device; cuFloatComplex *W_device; cuFloatComplex *y_device; cuFloatComplex *z_device; cuFloatComplex *X_device; cufftComplex *in,*out; int *flag_inputstage_1,*flag_inputstage_2,*flag_inputstage_3,*flag_outputstage_1,*flag_outputstage_2,*flag_outputstage_3; int *flag_inputstage_1_d,*flag_inputstage_2_d,*flag_inputstage_3_d,*flag_outputstage_1_d,*flag_outputstage_2_d,*flag_outputstage_3_d; int Dip,Dop,P,N,Li,Lo; int vF[50]; //Almacena los factores de N int svF; //Almacena el numero de factores de N int Prod[50]; int a; #define inf 99999 ////////////////////////////////////////////////////////////////////////// //////////////////////////DATOS DE ENTRADA//////////////////////////////// ////////////////////////////////////////////////////////////////////////// /// N >>> Número de elementos del vector de entrada /// Li >>> Número de elementos de entrada diferentes de cero /// Lo >>> Número de elementos de salida requeridos /// loop >>> Número de iteraciones /// muestras >>> Número de muestras ////////////////////////////////////////////////////////////////////////// ///////////////////////////DATOS DE SALIDA//////////////////////////////// ////////////////////////////////////////////////////////////////////////// /// X >>> Vector de salida ////////////////////////////////////////////////////////////////////////// /////////////////// SE INGRESAN LOS DATOS DE ENTRADA ///////////////////// ////////////////////////////////////////////////////////////////////////// ///Ingrese el número de iteraciones requeridas int loop = 1; ///Ingrese el número de muestras requeridas const int muestras = 1; ////////////////////////////////////////////////////////////////////////// //////////////////////////FUNCION PRINCIPAL/////////////////////////////// ////////////////////////////////////////////////////////////////////////// //Función principal int main() { int i,j,alea_real[729],alea_imag[729],i_N,l_res,j_res,k_res,incremento_j; //float suma; //float promedio[muestras]; ///Se crean los archivos binarios donde se guardarán los datos FILE *da; FILE *db; FILE *dc; //FILE *dd; FILE *fi_1; FILE *fi_2; FILE *fi_3; FILE *fo_1; FILE *fo_2; FILE *fo_3; da = fopen("Resultados_radix_3_real_CUDA.bin","a+b"); //Crea o sobre escribe archivo db = fopen("Resultados_radix_3_imag_CUDA.bin","a+b"); //Crea o sobre escribe archivo dc = fopen("Entrada_radix_3_CUDA.txt","w+t"); //Crea o sobre escribe archivo //dd = fopen("TIEMPOS_FFT_DIF_DIT_TD_SECUENCIAL_CUDA.bin","a+b"); //Crea o sobre escribe archivo fi_1 = fopen("Flag_inputstage_1_radix_3_CUDA.bin","a+b"); //Crea o sobre escribe archivo fi_2 = fopen("Flag_inputstage_2_radix_3_CUDA.bin","a+b"); //Crea o sobre escribe archivo fi_3 = fopen("Flag_inputstage_3_radix_3_CUDA.bin","a+b"); //Crea o sobre escribe archivo fo_1 = fopen("Flag_outputstage_1_radix_3_CUDA.bin","a+b"); //Crea o sobre escribe archivo fo_2 = fopen("Flag_outputstage_2_radix_3_CUDA.bin","a+b"); //Crea o sobre escribe archivo fo_3 = fopen("Flag_outputstage_3_radix_3_CUDA.bin","a+b"); //Crea o sobre escribe archivo ///Generación de vector de entrada aleatorio srand (time(NULL)); //Utilizo la hr del sistema como semilla for(i = 0;i < 729;i++) { alea_real[i]=rand()%11; //alea_real[i]=i+1; alea_imag[i]=rand()%11; //alea_imag[i]=0; fprintf(dc,"%d %d\n",alea_real[i],alea_imag[i]); } fclose(dc); //Pausa printf("\n---PRESIONA UNA TECLA PARA CONTINUAR---\n\n"); getchar(); //Se reserva espacio para las flags flag_inputstage_1 = (int *)malloc(1*sizeof(int)); flag_inputstage_2 = (int *)malloc(1*sizeof(int)); flag_inputstage_3 = (int *)malloc(1*sizeof(int)); flag_outputstage_1 = (int *)malloc(1*sizeof(int)); flag_outputstage_2 = (int *)malloc(1*sizeof(int)); flag_outputstage_3 = (int *)malloc(1*sizeof(int)); cudaMalloc((int**)&flag_inputstage_1_d,1*sizeof(int)); cudaMalloc((int**)&flag_inputstage_2_d,1*sizeof(int)); cudaMalloc((int**)&flag_inputstage_3_d,1*sizeof(int)); cudaMalloc((int**)&flag_outputstage_1_d,1*sizeof(int)); cudaMalloc((int**)&flag_outputstage_2_d,1*sizeof(int)); cudaMalloc((int**)&flag_outputstage_3_d,1*sizeof(int)); //Inicializaciones incremento_j = 1; flag_inputstage_1[0] = 0; flag_inputstage_2[0] = 0; flag_inputstage_3[0] = 0; flag_outputstage_1[0] = 0; flag_outputstage_2[0] = 0; flag_outputstage_3[0] = 0; for(i_N = 1;i_N <= 6;i_N++) { N = (int )pow(3,i_N); printf("\n N = %d \n",N); //Se reserva memoria para xn_host en el host x_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*N); //Se reserva memoria para x_device y W_device cudaMalloc((void**)&x_device,N*sizeof(cuFloatComplex)); cudaMalloc((void**)&W_device,N*sizeof(cuFloatComplex)); ///Generación del vector x for(l_res=0;l_res < N;l_res++) { //x_host[l_res] = make_cuFloatComplex((float)(rand()%11),(float)(rand()%21)); x_host[l_res] = make_cuFloatComplex((float)alea_real[l_res],(float)alea_imag[l_res]); //printf(" %d-> (%f) + (%f)\n",l_res+1,cuCrealf(x_host[l_res]),cuCimagf(x_host[l_res])); } ///Se genera el arreglo W[N] arreglo_W(N); //Envío de los arreglos x y W hacia la memoria global del device cudaMemcpy(x_device,x_host,N*sizeof(cuFloatComplex),cudaMemcpyHostToDevice); cudaMemcpy(W_device,W_host,N*sizeof(cuFloatComplex),cudaMemcpyHostToDevice); for(j_res=incremento_j;j_res<=N;j_res=j_res+incremento_j) { Li=j_res; for(k_res=incremento_j;k_res<=N;k_res=k_res+incremento_j) { Lo=k_res; //printf("\n Li = %d Lo = %d",Li,Lo); for(i=1;i<=muestras;i++) { //suma=0.0; for(j=0;j<loop;j++) { //Comandos necesarios para medir el tiempo //float elapsedTime_app; //cudaEvent_t start_app, stop_app; //cudaEventCreate(&start_app); //cudaEventCreate(&stop_app); //--------------------------------------------------------------------------------------------- //Se empieza a medir el tiempo de ejecucion de la aplicacion //cudaEventRecord(start_app,0); //Se generan en el host los valores del vector de entrada x[n] //vector_entrada_xn(N,Li); //Se generan en el host los valores del arreglo W[N] //arreglo_W(N); //Se generan en el host los factores Dip y Dop asign_rap(N,Li,Lo); //Cálculo en el host del factor P P = N/(Dip*Dop); //printf("\n\n FACTOR P:\n\n"); //printf("\n Dip = %d Dop = %d P = %d ",Dip,Dop,P); //Función auxiliar del host para ejecutar la etapa de entrada etapa_entrada(); //Función auxiliar del host para ejecutar la etapa intermedia etapa_intermedia(); //Función auxiliar del host para ejecutar la etapa de salida etapa_salida(); ///Se imprimen los resultados en los archivos binarios int m; float *parte_real; float *parte_imag; parte_real = (float*) malloc(Lo*sizeof(float)); parte_imag = (float*) malloc(Lo*sizeof(float)); for(m=0;m<=Lo-1;m++) { parte_real[m]=cuCrealf(X_host[m]); parte_imag[m]=cuCimagf(X_host[m]); //printf("\n X[%d] = %.4f + (%.4f)",m,creal(X[m]),cimag(X[m])); //fprintf(dc,"%f %f\n",creal(X[m]),cimag(X[m])); } fwrite(parte_real,sizeof(float),Lo,da); fwrite(parte_imag,sizeof(float),Lo,db); ///Se leen los valores de las flags desde el device cudaMemcpy(flag_inputstage_1,flag_inputstage_1_d,1*sizeof(int),cudaMemcpyDeviceToHost); cudaMemcpy(flag_inputstage_2,flag_inputstage_2_d,1*sizeof(int),cudaMemcpyDeviceToHost); cudaMemcpy(flag_inputstage_3,flag_inputstage_3_d,1*sizeof(int),cudaMemcpyDeviceToHost); cudaMemcpy(flag_outputstage_1,flag_outputstage_1_d,1*sizeof(int),cudaMemcpyDeviceToHost); cudaMemcpy(flag_outputstage_2,flag_outputstage_2_d,1*sizeof(int),cudaMemcpyDeviceToHost); cudaMemcpy(flag_outputstage_3,flag_outputstage_3_d,1*sizeof(int),cudaMemcpyDeviceToHost); ///Se imprimen el valor de las flags en sus respectivos archivos binarios fwrite(flag_inputstage_1,1*sizeof(int),1,fi_1); fwrite(flag_inputstage_2,1*sizeof(int),1,fi_2); fwrite(flag_inputstage_3,1*sizeof(int),1,fi_3); fwrite(flag_outputstage_1,1*sizeof(int),1,fo_1); fwrite(flag_outputstage_2,1*sizeof(int),1,fo_2); fwrite(flag_outputstage_3,1*sizeof(int),1,fo_3); /* printf("\n flag_inputstage_1 = %d \n",flag_inputstage_1[0]); printf("\n flag_inputstage_2 = %d \n",flag_inputstage_2[0]); printf("\n flag_inputstage_3 = %d \n",flag_inputstage_3[0]); printf("\n flag_outputstage_1 = %d \n",flag_outputstage_1[0]); printf("\n flag_outputstage_2 = %d \n",flag_outputstage_2[0]); printf("\n flag_outputstage_3 = %d \n",flag_outputstage_3[0]); */ //Se liberan memorias del Host y Device //free(x_host); //free(W_host); //free(y_host); //free(z_host); free(X_host); free(parte_real); free(parte_imag); //cudaFree(x_device); //cudaFree(W_device); cudaFree(y_device); cudaFree(z_device); cudaFree(X_device); //--------------------------------------------------------------------------------------------- //Comandos necesarios para medir el tiempo de la aplicacion (app) //cudaEventRecord(stop_app,0); //cudaEventSynchronize(stop_app); //cudaEventElapsedTime(&elapsedTime_app,start_app,stop_app); //Suma de todos los tiempos //suma = suma + elapsedTime_app; //Se destruyen los eventos que miden el tiempo de la aplicacion //cudaEventDestroy(start_app); //cudaEventDestroy(stop_app); ///Se resetean las flags flag_inputstage_1[0] = 0; flag_inputstage_2[0] = 0; flag_inputstage_3[0] = 0; flag_outputstage_1[0] = 0; flag_outputstage_2[0] = 0; flag_outputstage_3[0] = 0; } //promedio[i-1] = suma/(float)loop; //printf(" \n\n%d - Tiempo promedio para N = %ld >>> %f mS\n",i,N,promedio[i-1]); } //fwrite(promedio,sizeof(float),muestras,dd); //fclose(dd); } } free(x_host); free(W_host); cudaFree(x_device); cudaFree(W_device); } fclose(da); fclose(db); fclose(fi_1); fclose(fi_2); fclose(fi_3); fclose(fo_1); fclose(fo_2); fclose(fo_3); free(flag_inputstage_1); free(flag_inputstage_2); free(flag_inputstage_3); free(flag_outputstage_1); free(flag_outputstage_2); free(flag_outputstage_3); cudaFree(flag_inputstage_1_d); cudaFree(flag_inputstage_2_d); cudaFree(flag_inputstage_3_d); cudaFree(flag_outputstage_1_d); cudaFree(flag_outputstage_2_d); cudaFree(flag_outputstage_3_d); } ////////////////////////////////////////////////////////////////////////// /////////////////////////FUNCIONES SECUNDARIAS//////////////////////////// ////////////////////////////////////////////////////////////////////////// //Ésta función genera el vector de entrada x[n] void vector_entrada_xn(int N,int Li) { //Declaración de variables locales int k; //Se reserva memoria para xn_host en el host x_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*N); //Se dan valores a x[n] for(k=0;k<N;k++) { if(k < Li) { //x[k] = make_cuFloatComplex((float)(rand()%11),(float)(rand()%21)); x_host[k] = make_cuFloatComplex((float)(k + 1),(float)(0.0)); } else { x_host[k] = make_cuFloatComplex((float)(0.0),(float)(0.0)); } } /* //Se imprimen los valores de entrada x[n] printf("\n---ELEMENTOS DE ENTRADA x[n]---\n\n"); for(k=0;k<N;k++) { printf(" %d-> (%f) + (%f)\n",k+1,cuCrealf(x_host[k]),cuCimagf(x_host[k])); } */ } //Ésta función genera el arreglo W void arreglo_W(int N) { //Declaración de variables locales int n; //Se reserva memoria para W_host en el host W_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*N); //Se genera el arreglo W for(n = 1;n <= N;n++) { W_host[n-1] = make_cuFloatComplex((float)cos((2*CUDART_PI*n)/N),(float)(-1)*sin((2*CUDART_PI*n)/N)); } /* //Se imprimen los valores del arreglo W[N] printf("\n---ARREGLO W[N]---\n\n"); for(n = 0;n < N; n++) { printf(" W[%d]-> (%f) + (%f)\n",n+1,cuCrealf(W_host[n]),cuCimagf(W_host[n])); } */ } //Ésta función genera los factores Dip y Dop void asign_rap(int N,int Li,int Lo) { //Declaración de variables locales float NLi,NLo,Diprapt,Doprapt; int Nh[50]; int k[50]; int G; int g,i,t,ta; int Dipt[50],Dopt[50]; float distrapt,distrap; int Pos,h,Poss; int nk[50]; int r; //Inicializaciones G = 0; svF = 0; //Factores Dip y Dop ideales NLi=(float)N/(float)Li; NLo=(float)N/(float)Lo; Diprapt=NLi; Doprapt=NLo; //Se encuentran los factores de "N" //vF almacena los factores de "N" //svF almacena el número de factores de "N" factor(N); /* Almacena en el vector Nh los factores que son diferentes de del vector vF En el vector k se almacena la cantidad de veces que se repite cada elemento almacenado en el vector Nh. */ Nh[0] = vF[0]; k[0]=1; for(g=1;g<=svF-1;g=g+1) { if(vF[g]!=vF[g-1]) { G=G+1; Nh[G]=vF[g]; k[G]=1; } else { k[G]=k[G]+1; } } /* Almacena en el vector Nh todas las posibles combinaciones que den como producto a N. t almacena el numero de elementos del vector Nh. */ product(Nh,k,G); t = a; for(i=0;i<t;i=i+1) { Dipt[i]=Prod[i]; } distrapt=inf; for(g=1;g<=t;g=g+1) { if(Dipt[g-1]<=NLi) { Pos=g-1; for(h=0;h<=G;h=h+1) { Poss=floor(Pos/(k[h]+1)); nk[h]=k[h]+Poss*(k[h]+1)-Pos; Pos=Poss; } product(Nh,nk,G); ta=a; for(i=0;i<ta;i=i+1) { Dopt[i]=Prod[i]; } //////////////////////////////////////////// //int j; //for(j=0;j<ta;j++) //{ // printf(" %d ",Dopt[j]); //} //printf("\n\n ta=%d\n\n",ta); /////////////////////////////////////////// for(r=0;r<ta;r=r+1) { distrap=sqrt(pow(Diprapt-(Dipt[g-1]),2)+pow(Doprapt-(Dopt[r]),2)); if(distrap<distrapt) { distrapt=distrap; Dip=Dipt[g-1]; Dop=Dopt[r]; } } } } /* printf("\n\n FACTOR Dip :\n\n"); printf(" %d ",Dip); printf("\n\n FACTOR Dop:\n\n"); printf(" %d ",Dop); */ } //Ésta función encuentra los factores de "N" void factor(int N) { //Se empieza a verificar los factores desde 2 int i=2; long N_factor; N_factor = N; while(i<=N_factor) { while((N_factor%i)==0) { vF[svF]=i; N_factor=N_factor/i; // printf("Factores: %d ",vF[svF]); svF++; } i++; } } //Ésta función encuentra todas las posibles combinaciones de factores que den como resultado "N" void product(int vector_1[50],int vector_2[50],int valor) { int d,e,s,pNh,i; int cont=0; Prod[0]=1; a=1; for(d=0;d<=valor;d=d+1) { s=a; pNh=1; for(e=1;e<=vector_2[d];e=e+1) { pNh=pNh*vector_1[d]; for(i=(s*e+1);i<=(s*e+s);i=i+1) { Prod[i-1]=pNh*Prod[cont]; cont=cont+1; } a=a+s; cont=0; } } } //Función auxiliar del host para calcular la etapa de entrada en el device void etapa_entrada(void) { ////////////////////////////////////////////////////////////////////////// ////////////////////////////ETAPA DE ENTRADA////////////////////////////// ////////////////////////////////////////////////////////////////////////// //Declaración de variables locales int k1,n1,n2; //Asignación de memoria en el device cudaMalloc((void**)&y_device,P*Dip*Dop*sizeof(cuFloatComplex)); //Asignación de memoria en el host para "y" //y_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*P*Dip*Dop); //Dimensionamiento del grid para la función kernel "inputStage" //Dimensionamiento del Grid dim3 gridDim(1,1,1); //Dimensionamiento del block dim3 blockDim(1,1,1); if((P*Dop) < 32 && (Dip) < 32) { blockDim.x = (P*Dop); blockDim.y = (Dip); gridDim.x = 1; gridDim.y = 1; } else { blockDim.x = 32; blockDim.y = 32; gridDim.x = (unsigned int) (ceilf((float)(P*Dop)/(float)blockDim.x)); gridDim.y = (unsigned int) (ceilf((float)Dip/(float)blockDim.y)); } //Lanzamiento del kernel "inputStage_kernel" inputStage_kernel<<<gridDim,blockDim>>>(N,Li,Dip,Dop,P,x_device,W_device,y_device,flag_inputstage_1_d,flag_inputstage_2_d,flag_inputstage_3_d); //Esperar que el kernel termine de ejecutarse totalmente cudaDeviceSynchronize(); /* //Copia del arreglo "y" del device hacia el host cudaMemcpy(y_host,y_device,sizeof(cuFloatComplex)*P*Dip*Dop,cudaMemcpyDeviceToHost); //Se imprimen los valores de "y" printf("\n\n--- ARREGLO y(n1,n2,k1) ---\n\n"); for(k1 = 0;k1 < Dip;k1++) { for(n1 = 0;n1 < Dop;n1++) { for(n2 = 0;n2 < P;n2++) { printf(" (%f) + (%f) ",cuCrealf(y_host[(k1*Dop*P)+(n1*P)+n2]),cuCimagf(y_host[(k1*Dop*P)+(n1*P)+n2])); } printf("\n"); } printf("\n\n"); } printf("\n"); */ } //función kernel que ejecuta la etapa de entrada en el device __global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,cuFloatComplex *x,cuFloatComplex *W,cuFloatComplex *y,int *flag_inputstage_1_d,int *flag_inputstage_2_d,int *flag_inputstage_3_d) { int n1,n2; cuFloatComplex t1; //Threads int n = blockDim.x *blockIdx.x + threadIdx.x; int k1 = blockDim.y *blockIdx.y + threadIdx.y; //Se resetean las flags flag_inputstage_1_d[0] = 0; flag_inputstage_2_d[0] = 0; flag_inputstage_3_d[0] = 0; //printf("\n n = %d k1 = %d",n,k1); if( (n < (P*Dop)) && (k1 < Dip)) { n2 = floorf(n/Dop); n1 = n - (Dop*n2); //Generación de los elementos que dependen de x[0] if(n == 0) { y[(k1*Dop*P)+(0*P)+ 0] = x[0]; ///Flag flag_inputstage_1_d[0] = 1; } //Mapeo de x[n] a las entradas del primer conjunto de Dop DFT's if((n >= 1) && (n <= (Li-1))) { t1 = x[n]; if(k1 == 0) { y[(0*Dop*P)+(n1*P)+ n2] = t1; } if(k1 >= 1) { y[(k1*Dop*P)+(n1*P)+ n2] = cuCmulf(W[((n*k1)%N)-1],t1); } ///Flag flag_inputstage_2_d[0] = 1; } //Rellenado de ceros para los elementos de "y" para Li <= n <= (P*Dop)-1 if((n >= Li) && (n <= (P*Dop)-1)) { y[(k1*Dop*P)+(n1*P)+ n2] = make_cuFloatComplex(0.0,0.0); ///Flag flag_inputstage_3_d[0] = 1; } //printf("\n (%f) + (%f)\n ",cuCrealf(y[(k1*Dop*P)+(n1*P)+ n2]),cuCimagf(y[(k1*Dop*P)+(n1*P)+ n2])); } } //Función auxiliar del host para calcular la etapa intermedia en el device void etapa_intermedia(void) { ////////////////////////////////////////////////////////////////////////// ////////////////////////////ETAPA INTERMEDIA////////////////////////////// ////////////////////////////////////////////////////////////////////////// //Declaración de variables locales int k1,k2,n1; int n[1] = {P}; int inembed[1] = {P}; int onembed[1] = {P}; //Asignación de memoria en el device para "z" cudaMalloc((void**)&z_device,P*Dip*Dop*sizeof(cuFloatComplex)); //Asignación de memoria en el host para "z" //z_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*P*Dip*Dop); //Asignación de memoria en el device para "in" y "out" cudaMalloc((void**)&in,sizeof(cufftComplex)*P*Dip*Dop); cudaMalloc((void**)&out,sizeof(cufftComplex)*P*Dip*Dop); //Se copia el arreglo "y" al arreglo "in" cudaMemcpy(in,y_device,sizeof(cuFloatComplex)*P*Dip*Dop,cudaMemcpyDeviceToDevice); //Se crea un plan cufftHandle plan; cufftPlanMany(&plan,1,n,inembed,1,P,onembed,1,P,CUFFT_C2C,Dip*Dop); //Ejecución del plan cufftExecC2C(plan,in,out,CUFFT_FORWARD); //Esperar que el kernel termine de ejecutarse totalmente cudaDeviceSynchronize(); //Se copian los datos del arreglo "out" al arreglo "z_device" cudaMemcpy(z_device,out,sizeof(cufftComplex)*P*Dip*Dop,cudaMemcpyDeviceToDevice); //Se destruye el plan cufftDestroy(plan); //Se liberan los arreglos "in" y "out" cudaFree(in); cudaFree(out); /* //Se copian los datos del arreglo "z_device" al arreglo "z_host" cudaMemcpy(z_host,z_device,sizeof(cuFloatComplex)*P*Dip*Dop,cudaMemcpyDeviceToHost); ///Se imprimen los valores de z(n1,k2,k1) printf("\n\n--- ARREGLO z(n1,k2,k1) ---\n\n"); for(k1 = 0;k1 < Dip;k1++) { for(n1 = 0;n1 < Dop;n1++) { for(k2 = 0;k2 < P;k2++) { printf(" (%f) + (%f) ",cuCrealf(z_host[(k1*Dop*P)+(n1*P)+k2]),cuCimagf(z_host[(k1*Dop*P)+(n1*P)+k2])); } printf("\n"); } printf("\n\n"); } printf("\n"); */ } //Función auxiliar del host para calcular la etapa de salida en el device void etapa_salida(void) { ////////////////////////////////////////////////////////////////////////// ////////////////////////////ETAPA DE SALIDA/////////////////////////////// ////////////////////////////////////////////////////////////////////////// //Declaración de variables locales int m; //Asignación de memoria en el device para "X" cudaMalloc((void**)&X_device,Lo*sizeof(cuFloatComplex)); //Asignación de memoria en el host para "X" X_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*Lo); //Dimensionamiento del grid para la función kernel "outputStage" //Dimensionamiento del Grid dim3 gridDim(1,1,1); //Dimensionamiento del block dim3 blockDim(1,1,1); if((Lo) < 1024) { blockDim.x = Lo; gridDim.x = 1; } else { blockDim.x = 1024; gridDim.x = (unsigned int) (ceilf((float)Lo/(float)blockDim.x)); } //Lanzamiento del kernel "outputStage_kernel" outputStage_kernel<<<gridDim,blockDim>>>(N,Lo,Dip,Dop,P,z_device,W_device,X_device,flag_outputstage_1_d,flag_outputstage_2_d,flag_outputstage_3_d); //Esperar que el kernel termine de ejecutarse totalmente cudaDeviceSynchronize(); //Copia del arreglo "X" del device hacia el host cudaMemcpy(X_host,X_device,sizeof(cuFloatComplex)*Lo,cudaMemcpyDeviceToHost); /* //Se imprimen los valores de "X_host" ///Imprimir X[k] printf("\n\n--- ARREGLO X[k] ---\n\n"); for(m=0;m<=Lo-1;m++) { printf("\n X[%d] = %.4f + (%.4f)",m,cuCrealf(X_host[m]),cuCimagf(X_host[m])); //fprintf(da,"%.4f %.4f\n",creal(X[i]),cimag(X[i])); } */ } //función kernel que ejecuta la etapa de salida en el device __global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,cuFloatComplex *z,cuFloatComplex *W,cuFloatComplex *X,int *flag_outputstage_1_d,int *flag_outputstage_2_d,int *flag_outputstage_3_d) { //Declaración de variables locales int n1,k_aux,k1,k2,a,b; cuFloatComplex t1,t2,t3,t4,t5,t4_aux,X_aux,X_aux_1; float t4_aux_1; //Threads int k = blockDim.x *blockIdx.x + threadIdx.x; //Se resetean las flags flag_outputstage_1_d[0] = 0; flag_outputstage_2_d[0] = 0; flag_outputstage_3_d[0] = 0; if(k < Lo) { for(n1 = 0; n1 <= (Dop-1); n1 = n1+1) { if(Lo <= Dip) { //Cálculo de X(k) para 0<=k<=Lo-1. //printf("\n--- Caso (Lo <= Dip) ---\n"); //En la descomposición k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1 if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez { X[k] = z[(k*Dop*P)+(0*P) + 0]; ///Flag flag_outputstage_1_d[0] = 1; } else { if(n1 == 1) { X[k] = z[(k*Dop*P)+(0*P) + 0]; } X[k] = cuCaddf(z[(k*Dop*P)+(n1*P) + 0],X[k]); ///Flag flag_outputstage_1_d[0] = 1; } } else { if((k >= 0) && (k <= (Dip-1))) { //Cálculo de X(k) para 0<=k<=Dip-1. //En la descomposición k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1 if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez { X[k] = z[(k*Dop*P)+(0*P) + 0]; } else { if(n1 == 1) { X[k] = z[(k*Dop*P)+(0*P) + 0]; } X[k] = cuCaddf(z[(k*Dop*P)+(n1*P) + 0],X[k]); } } else { if(Dop <= 4) { //Usando el método directo //printf("\n--- Caso (Metodo directo) ---\n"); if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez { k_aux = k-((Dip*P)*floorf(k/(Dip*P))); k2 = floorf(k_aux/Dip); k1 = k_aux-(Dip*k2); X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)]; ///Flag flag_outputstage_2_d[0] = 1; } else { if(n1 == 1) { k_aux = k-((Dip*P)*floorf(k/(Dip*P))); k2 = floorf(k_aux/Dip); k1 = k_aux-(Dip*k2); X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)]; } a = floorf(k/(Dip*P)); X[k] = cuCaddf(X[k],cuCmulf(z[(k1*Dop*P)+(n1*P)+ (k2%P)],W[((n1*(k2+P*(a))*Dip)%N)-1])); ///Flag flag_outputstage_2_d[0] = 1; } } else { //Usando el método filtering 2BF //printf("\n--- Caso (Filtro 2BF) ---\n"); if((Dop-2) >= 1) { if(n1 == 0) { k_aux = k-((Dip*P)*floorf(k/(Dip*P))); k2 = floorf(k_aux/Dip); k1 = k_aux-(Dip*k2); t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)]; b = floorf(k/(Dip*P)); t4 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0)); ///Flag flag_outputstage_3_d[0] = 1; } if((n1 >= 1) && (n1 <= (Dop-2))) { t2 = t1; t1 = cuCaddf(z[(k1*Dop*P)+((-(n1-(Dop-1)))*P)+ (k2%P)],t4); t3 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0)); t4 = cuCsubf(t3,t2); } if(n1 == (Dop-1)) { t5 = cuCaddf(z[(k1*Dop*P)+(0*P)+ (k2%P)],t4); X[k] = cuCsubf(t5,cuCmulf(t1,cuConjf(W[(((k2+P*(b))*Dip)%N)-1]))); } } else { if(Dop == 1) { k_aux = k-((Dip*P)*floorf(k/(Dip*P))); k2 = floorf(k_aux/Dip); k1 = k_aux-(Dip*k2); t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)]; X[k] = t1; ///Flag flag_outputstage_3_d[0] = 1; } else { k_aux = k-((Dip*P)*floorf(k/(Dip*P))); k2 = floorf(k_aux/Dip); k1 = k_aux-(Dip*k2); t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)]; b = floorf(k/(Dip*P)); t4 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0)); t5 = cuCaddf(z[(k1*Dop*P)+(0*P)+ (k2%P)],t4); X[k] = cuCsubf(t5,cuCmulf(t1,cuConjf(W[(((k2+P*(b))*Dip)%N)-1]))); ///Flag flag_outputstage_3_d[0] = 1; } } } } } } } }
40f6261967ed73606f3e42ca41b3d22119ddf9fd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // This file modifies the ROI pooling layer of Fast R-CNN such that it implements the // the rectangular rings regions that were introduced on: // "Object detection via a multi-region & semantic segmentation-aware CNN model" // Those rectangular ring regions are defined in terms of an inner rectangle and // and an outer rectangle. During the region pooling operation, both the inner and // the outer rectangles are projected on the activation maps and the activations // that lay inside the inner rectangle are ignored during the adaptive max // pooling operation. // // AUTORIGHTS // -------------------------------------------------------- // Copyright (c) 2015 Spyros Gidaris // // "Object detection via a multi-region & semantic segmentation-aware CNN model" // Technical report: http://arxiv.org/abs/1505.01749 // --------------------------------------------------------- // ------------------------------------------------------------------ // Fast R-CNN // Copyright (c) 2015 Microsoft // Licensed under The MIT License [see fast-rcnn/LICENSE for details] // Written by Ross Girshick // ------------------------------------------------------------------ #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/region_pooling_layer.hpp" #include "caffe/util/math_functions.hpp" using std::max; using std::min; namespace caffe { template <typename Dtype> __global__ void RegionPoolForward(const int nthreads, const Dtype* bottom_data, const Dtype spatial_scale, const Dtype offset, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const Dtype* bottom_rois, Dtype* top_data, int* argmax_data) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; // For each ROI R = [batch_index, x_outer_1, y_outer_1, x_outer_2, y_outer_2, x_inner_1, y_inner_1, x_inner_2, y_inner_2]: // where R_outer = [x_outer_1, y_outer_1, x_outer_2, y_outer_2] is the outer rectangle of the region and // R_inner = [x_inner_1, y_inner_1, x_inner_2, y_inner_2] is the inner rectangle of the region // max pooler over R by ignoring (setting to zero) the activations that lay inside the inner rectangle R_inner bottom_rois += n * 9; int roi_batch_ind = bottom_rois[0]; // outer rectangle of the region int roi_start_w = floor((bottom_rois[1] + 1 + offset) * spatial_scale + 0.5); int roi_start_h = floor((bottom_rois[2] + 1 + offset) * spatial_scale + 0.5); int roi_end_w = ceil( (bottom_rois[3] + 1 - offset) * spatial_scale - 0.5); int roi_end_h = ceil( (bottom_rois[4] + 1 - offset) * spatial_scale - 0.5); // inner rectangle of the region int roi_start_w_in = floor((bottom_rois[5] + 1 + offset) * spatial_scale + 0.5); int roi_start_h_in = floor((bottom_rois[6] + 1 + offset) * spatial_scale + 0.5); int roi_end_w_in = ceil( (bottom_rois[7] + 1 - offset) * spatial_scale - 0.5); int roi_end_h_in = ceil( (bottom_rois[8] + 1 - offset) * spatial_scale - 0.5); if (roi_start_w > roi_end_w) { roi_start_w = (roi_start_w + roi_end_w) / 2; roi_end_w = roi_start_w; } if (roi_start_h > roi_end_h) { roi_start_h = (roi_start_h + roi_end_h) / 2; roi_end_h = roi_start_h; } if (roi_start_w_in > roi_end_w_in) { roi_start_w_in = (roi_start_w_in + roi_end_w_in) / 2; roi_end_w_in = roi_start_w_in; } if (roi_start_h_in > roi_end_h_in) { roi_start_h_in = (roi_start_h_in + roi_end_h_in) / 2; roi_end_h_in = roi_start_h_in; } // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); const int hstart = min(height, max(0, static_cast<int>(floor(static_cast<Dtype>(ph) * bin_size_h)) + roi_start_h)); const int hend = min(height, max(0, static_cast<int>(ceil( static_cast<Dtype>(ph+1) * bin_size_h)) + roi_start_h)); const int wstart = min(width, max(0, static_cast<int>(floor(static_cast<Dtype>(pw) * bin_size_w)) + roi_start_w)); const int wend = min(width, max(0, static_cast<int>(ceil( static_cast<Dtype>(pw+1) * bin_size_w)) + roi_start_w)); Dtype maxval = 0; int maxidx = -1; bottom_data += (roi_batch_ind * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { if (!(w > roi_start_w_in && w < roi_end_w_in && h > roi_start_h_in && h < roi_end_h_in)) { // if it is not inside the inner rectangle of the region int bottom_index = h * width + w; if (bottom_data[bottom_index] > maxval) { maxval = bottom_data[bottom_index]; maxidx = bottom_index; } } } } top_data[index] = maxval; argmax_data[index] = maxidx; } } template <typename Dtype> void RegionPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bottom_rois = bottom[1]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int* argmax_data = max_idx_.mutable_gpu_data(); int count = top[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( RegionPoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, spatial_scale_, offset_, channels_, height_, width_, pooled_height_, pooled_width_, bottom_rois, top_data, argmax_data); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void RegionPoolBackward(const int nthreads, const Dtype* top_diff, const int* argmax_data, const int num_rois, const Dtype spatial_scale, const Dtype offset, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, Dtype* bottom_diff, const Dtype* bottom_rois) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, h, w) coords in bottom data int w = index % width; int h = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; Dtype gradient = 0; // Accumulate gradient over all ROIs that pooled this element for (int roi_n = 0; roi_n < num_rois; ++roi_n) { const Dtype* offset_bottom_rois = bottom_rois + roi_n * 9; int roi_batch_ind = offset_bottom_rois[0]; // Skip if ROI's batch index doesn't match n if (n != roi_batch_ind) { continue; } int roi_start_w = floor((offset_bottom_rois[1] + 1 + offset) * spatial_scale + 0.5); int roi_start_h = floor((offset_bottom_rois[2] + 1 + offset) * spatial_scale + 0.5); int roi_end_w = ceil( (offset_bottom_rois[3] + 1 - offset) * spatial_scale - 0.5); int roi_end_h = ceil( (offset_bottom_rois[4] + 1 - offset) * spatial_scale - 0.5); int roi_start_w_in = floor((offset_bottom_rois[5] + 1 + offset) * spatial_scale + 0.5); int roi_start_h_in = floor((offset_bottom_rois[6] + 1 + offset) * spatial_scale + 0.5); int roi_end_w_in = ceil( (offset_bottom_rois[7] + 1 - offset) * spatial_scale - 0.5); int roi_end_h_in = ceil( (offset_bottom_rois[8] + 1 - offset) * spatial_scale - 0.5); if (roi_start_w > roi_end_w) { roi_start_w = (roi_start_w + roi_end_w) / 2; roi_end_w = roi_start_w; } if (roi_start_h > roi_end_h) { roi_start_h = (roi_start_h + roi_end_h) / 2; roi_end_h = roi_start_h; } if (roi_start_w_in > roi_end_w_in) { roi_start_w_in = (roi_start_w_in + roi_end_w_in) / 2; roi_end_w_in = roi_start_w_in; } if (roi_start_h_in > roi_end_h_in) { roi_start_h_in = (roi_start_h_in + roi_end_h_in) / 2; roi_end_h_in = roi_start_h_in; } // Skip if ROI doesn't include (h, w) const bool in_roi = (w >= roi_start_w && w <= roi_end_w && h >= roi_start_h && h <= roi_end_h) && !(w > roi_start_w_in && w < roi_end_w_in && h > roi_start_h_in && h < roi_end_h_in); if (!in_roi) { continue; } int top_offset = (roi_n * channels + c) * pooled_height * pooled_width; const Dtype* offset_top_diff = top_diff + top_offset; const int* offset_argmax_data = argmax_data + top_offset; // Compute feasible set of pooled units that could have pooled // this bottom unit // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h); int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h); int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w); int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w); phstart = min(max(phstart, 0), pooled_height); phend = min(max(phend, 0), pooled_height); pwstart = min(max(pwstart, 0), pooled_width); pwend = min(max(pwend, 0), pooled_width); for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (offset_argmax_data[ph * pooled_width + pw] == (h * width + w)) { gradient += offset_top_diff[ph * pooled_width + pw]; } } } } bottom_diff[index] = gradient; } } template <typename Dtype> void RegionPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { return; } const Dtype* bottom_rois = bottom[1]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); caffe_gpu_set(count, Dtype(0.), bottom_diff); const int* argmax_data = max_idx_.gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( RegionPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, argmax_data, top[0]->num(), spatial_scale_, offset_, channels_, height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(RegionPoolingLayer); } // namespace caffe
40f6261967ed73606f3e42ca41b3d22119ddf9fd.cu
// This file modifies the ROI pooling layer of Fast R-CNN such that it implements the // the rectangular rings regions that were introduced on: // "Object detection via a multi-region & semantic segmentation-aware CNN model" // Those rectangular ring regions are defined in terms of an inner rectangle and // and an outer rectangle. During the region pooling operation, both the inner and // the outer rectangles are projected on the activation maps and the activations // that lay inside the inner rectangle are ignored during the adaptive max // pooling operation. // // AUTORIGHTS // -------------------------------------------------------- // Copyright (c) 2015 Spyros Gidaris // // "Object detection via a multi-region & semantic segmentation-aware CNN model" // Technical report: http://arxiv.org/abs/1505.01749 // --------------------------------------------------------- // ------------------------------------------------------------------ // Fast R-CNN // Copyright (c) 2015 Microsoft // Licensed under The MIT License [see fast-rcnn/LICENSE for details] // Written by Ross Girshick // ------------------------------------------------------------------ #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/region_pooling_layer.hpp" #include "caffe/util/math_functions.hpp" using std::max; using std::min; namespace caffe { template <typename Dtype> __global__ void RegionPoolForward(const int nthreads, const Dtype* bottom_data, const Dtype spatial_scale, const Dtype offset, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const Dtype* bottom_rois, Dtype* top_data, int* argmax_data) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; // For each ROI R = [batch_index, x_outer_1, y_outer_1, x_outer_2, y_outer_2, x_inner_1, y_inner_1, x_inner_2, y_inner_2]: // where R_outer = [x_outer_1, y_outer_1, x_outer_2, y_outer_2] is the outer rectangle of the region and // R_inner = [x_inner_1, y_inner_1, x_inner_2, y_inner_2] is the inner rectangle of the region // max pooler over R by ignoring (setting to zero) the activations that lay inside the inner rectangle R_inner bottom_rois += n * 9; int roi_batch_ind = bottom_rois[0]; // outer rectangle of the region int roi_start_w = floor((bottom_rois[1] + 1 + offset) * spatial_scale + 0.5); int roi_start_h = floor((bottom_rois[2] + 1 + offset) * spatial_scale + 0.5); int roi_end_w = ceil( (bottom_rois[3] + 1 - offset) * spatial_scale - 0.5); int roi_end_h = ceil( (bottom_rois[4] + 1 - offset) * spatial_scale - 0.5); // inner rectangle of the region int roi_start_w_in = floor((bottom_rois[5] + 1 + offset) * spatial_scale + 0.5); int roi_start_h_in = floor((bottom_rois[6] + 1 + offset) * spatial_scale + 0.5); int roi_end_w_in = ceil( (bottom_rois[7] + 1 - offset) * spatial_scale - 0.5); int roi_end_h_in = ceil( (bottom_rois[8] + 1 - offset) * spatial_scale - 0.5); if (roi_start_w > roi_end_w) { roi_start_w = (roi_start_w + roi_end_w) / 2; roi_end_w = roi_start_w; } if (roi_start_h > roi_end_h) { roi_start_h = (roi_start_h + roi_end_h) / 2; roi_end_h = roi_start_h; } if (roi_start_w_in > roi_end_w_in) { roi_start_w_in = (roi_start_w_in + roi_end_w_in) / 2; roi_end_w_in = roi_start_w_in; } if (roi_start_h_in > roi_end_h_in) { roi_start_h_in = (roi_start_h_in + roi_end_h_in) / 2; roi_end_h_in = roi_start_h_in; } // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); const int hstart = min(height, max(0, static_cast<int>(floor(static_cast<Dtype>(ph) * bin_size_h)) + roi_start_h)); const int hend = min(height, max(0, static_cast<int>(ceil( static_cast<Dtype>(ph+1) * bin_size_h)) + roi_start_h)); const int wstart = min(width, max(0, static_cast<int>(floor(static_cast<Dtype>(pw) * bin_size_w)) + roi_start_w)); const int wend = min(width, max(0, static_cast<int>(ceil( static_cast<Dtype>(pw+1) * bin_size_w)) + roi_start_w)); Dtype maxval = 0; int maxidx = -1; bottom_data += (roi_batch_ind * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { if (!(w > roi_start_w_in && w < roi_end_w_in && h > roi_start_h_in && h < roi_end_h_in)) { // if it is not inside the inner rectangle of the region int bottom_index = h * width + w; if (bottom_data[bottom_index] > maxval) { maxval = bottom_data[bottom_index]; maxidx = bottom_index; } } } } top_data[index] = maxval; argmax_data[index] = maxidx; } } template <typename Dtype> void RegionPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bottom_rois = bottom[1]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int* argmax_data = max_idx_.mutable_gpu_data(); int count = top[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) RegionPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, spatial_scale_, offset_, channels_, height_, width_, pooled_height_, pooled_width_, bottom_rois, top_data, argmax_data); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void RegionPoolBackward(const int nthreads, const Dtype* top_diff, const int* argmax_data, const int num_rois, const Dtype spatial_scale, const Dtype offset, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, Dtype* bottom_diff, const Dtype* bottom_rois) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, h, w) coords in bottom data int w = index % width; int h = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; Dtype gradient = 0; // Accumulate gradient over all ROIs that pooled this element for (int roi_n = 0; roi_n < num_rois; ++roi_n) { const Dtype* offset_bottom_rois = bottom_rois + roi_n * 9; int roi_batch_ind = offset_bottom_rois[0]; // Skip if ROI's batch index doesn't match n if (n != roi_batch_ind) { continue; } int roi_start_w = floor((offset_bottom_rois[1] + 1 + offset) * spatial_scale + 0.5); int roi_start_h = floor((offset_bottom_rois[2] + 1 + offset) * spatial_scale + 0.5); int roi_end_w = ceil( (offset_bottom_rois[3] + 1 - offset) * spatial_scale - 0.5); int roi_end_h = ceil( (offset_bottom_rois[4] + 1 - offset) * spatial_scale - 0.5); int roi_start_w_in = floor((offset_bottom_rois[5] + 1 + offset) * spatial_scale + 0.5); int roi_start_h_in = floor((offset_bottom_rois[6] + 1 + offset) * spatial_scale + 0.5); int roi_end_w_in = ceil( (offset_bottom_rois[7] + 1 - offset) * spatial_scale - 0.5); int roi_end_h_in = ceil( (offset_bottom_rois[8] + 1 - offset) * spatial_scale - 0.5); if (roi_start_w > roi_end_w) { roi_start_w = (roi_start_w + roi_end_w) / 2; roi_end_w = roi_start_w; } if (roi_start_h > roi_end_h) { roi_start_h = (roi_start_h + roi_end_h) / 2; roi_end_h = roi_start_h; } if (roi_start_w_in > roi_end_w_in) { roi_start_w_in = (roi_start_w_in + roi_end_w_in) / 2; roi_end_w_in = roi_start_w_in; } if (roi_start_h_in > roi_end_h_in) { roi_start_h_in = (roi_start_h_in + roi_end_h_in) / 2; roi_end_h_in = roi_start_h_in; } // Skip if ROI doesn't include (h, w) const bool in_roi = (w >= roi_start_w && w <= roi_end_w && h >= roi_start_h && h <= roi_end_h) && !(w > roi_start_w_in && w < roi_end_w_in && h > roi_start_h_in && h < roi_end_h_in); if (!in_roi) { continue; } int top_offset = (roi_n * channels + c) * pooled_height * pooled_width; const Dtype* offset_top_diff = top_diff + top_offset; const int* offset_argmax_data = argmax_data + top_offset; // Compute feasible set of pooled units that could have pooled // this bottom unit // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h); int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h); int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w); int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w); phstart = min(max(phstart, 0), pooled_height); phend = min(max(phend, 0), pooled_height); pwstart = min(max(pwstart, 0), pooled_width); pwend = min(max(pwend, 0), pooled_width); for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (offset_argmax_data[ph * pooled_width + pw] == (h * width + w)) { gradient += offset_top_diff[ph * pooled_width + pw]; } } } } bottom_diff[index] = gradient; } } template <typename Dtype> void RegionPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { return; } const Dtype* bottom_rois = bottom[1]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); caffe_gpu_set(count, Dtype(0.), bottom_diff); const int* argmax_data = max_idx_.gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) RegionPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, argmax_data, top[0]->num(), spatial_scale_, offset_, channels_, height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(RegionPoolingLayer); } // namespace caffe
fcd444938893cb9506764a33e666da73d363ca72.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include "CudaGraph.h" #include <thrust/sort.h> #include <thrust/unique.h> #include <thrust/execution_policy.h> #define cprint(...) printMutex.lock(); printf(__VA_ARGS__); printMutex.unlock(); #define p(x) printMutex.lock(); cout << x << endl; printMutex.unlock(); #define CUDA_ERR_CHECK \ if( err != hipSuccess) { \ printf("CUDA error: %s ** at Line %d\n", hipGetErrorString(err), __LINE__); \ return EXIT_FAILURE; \ } #define CUDA_SET_DEVICE_ID \ hipSetDevice(0); #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } using namespace std; __device__ int *graph[3], d_numVertices, d_numEdges, *d_worklist, *d_gatherWorklist, d_worklistLength; __device__ int *d_distance, *d_parentInWorklist; __device__ int *d_prefixSum, *d_blockPrefixSum; __device__ int *d_prefixLevel; __device__ bool d_terminate; __global__ void CudaInitialize(int *vertexArray, int *edgeArray, int *weightArray, int *distance, int *worklist, int *worklist2, int *parentInWorklist, int *prefixSum, int *blockPrefixSum, int *prefixLevel, int numVertices, int numEdges) { d_numVertices = numVertices; d_numEdges = numEdges; graph[0] = vertexArray; graph[1] = edgeArray; graph[2] = weightArray; d_distance = distance; d_distance[1] = 0; d_worklist = worklist; d_gatherWorklist = worklist2; d_parentInWorklist = parentInWorklist; d_prefixSum = prefixSum; d_blockPrefixSum = blockPrefixSum; d_prefixLevel = prefixLevel; d_worklist[0] = 1; /*for (int i = 0; i < numVertices; i++) { d_worklist[i] = i; //d_distance[i] = 100; }*/ d_worklistLength = 1; print("WLLenght = %d, numVertices = %d\n", d_worklistLength, d_numVertices); d_prefixSum[numVertices + 1] = 0; d_prefixSum[numVertices + 2] = 0; } __global__ void CudaPrintGraph() { print("Vertex Array:\n"); for (int i = 0; i < d_numVertices + 2; i++) print(" %d", graph[0][i]); print("\n"); print("Edge Array:\n"); for (int i = 0; i < d_numEdges + 1; i++) print(" %d[%d]", graph[1][i], graph[2][i]); print("\n"); } // Prefix Sum calculation within a single block __global__ void Cuda_IntraBlockPrefixSum() { extern __shared__ int temp[]; __shared__ int blockPrefixSum; int vertex, maxLength, numNeighbours, tId = threadIdx.x; if ((d_worklistLength - blockIdx.x * 1024) < 1024) { maxLength = d_worklistLength - blockIdx.x * 1024 + 1; // To mark the boundary, maxLength is increased by 1 and the // numNeighbours of the last element is set to zero. temp[maxLength - 1] = 0; } else maxLength = 1024; if (blockIdx.x * blockDim.x + tId < d_worklistLength) { vertex = d_worklist[blockIdx.x * blockDim.x + tId]; numNeighbours = graph[0][vertex + 1] - graph[0][vertex]; temp[tId] = numNeighbours; } //print("This is a thread : %d. Max = %d\n", threadIdx.x, maxLength); __syncthreads(); int index = 2 * tId, add = 1; for (int depth = maxLength; depth > 0; depth = depth >> 1) { if (index + add < maxLength) { temp[index] += temp[index + add]; index = index << 1; add = add << 1; } __syncthreads(); } if (tId == 0) { d_prefixLevel[blockIdx.x] = add; blockPrefixSum = temp[0]; //print("Level = %d. MaxLength = %d\n", d_prefixLevel[blockIdx.x], maxLength); } /*if (tId < d_worklistLength) d_prefixSum[tId] = temp[tId];*/ __syncthreads(); int level; level = d_prefixLevel[blockIdx.x]; index = tId * level; for (int depth = maxLength; depth > 0; depth = depth >> 1) { if (index + level / 2 < maxLength) { temp[index] -= temp[index + level / 2]; d_prefixSum[blockIdx.x * blockDim.x + index + level / 2] = temp[index] + d_prefixSum[blockIdx.x * blockDim.x + index]; } index = index >> 1; level = level >> 1; __syncthreads(); } if (tId == 0) { d_prefixLevel[blockIdx.x] = blockPrefixSum; //print("Block %d. PrefixSum = %d, Array Value = %d\n", blockIdx.x, blockPrefixSum, d_prefixLevel[blockIdx.x]); } } // Prefix Sum on the whole block sum. __global__ void Cuda_BlockOffsetPrefixSum(int numBlocks) { extern __shared__ int temp[]; int tId = threadIdx.x; if (tId < numBlocks) temp[tId] = d_prefixLevel[tId]; int index = 2 * tId, add = 1; __shared__ int sharedVar; for (int depth = numBlocks; depth > 0; depth = depth >> 1) { if (index + add < numBlocks) { temp[index] += temp[index + add]; index = index << 1; add = add << 1; } __syncthreads(); } if (tId == 0) { sharedVar = add; d_worklistLength = temp[0]; print("New WorkList Length = %d\n", d_worklistLength); } __syncthreads(); int level; level = sharedVar; index = tId * level; for (int depth = numBlocks; depth > 0; depth = depth >> 1) { if (index + level / 2 < numBlocks) { temp[index] -= temp[index + level / 2]; d_blockPrefixSum[blockIdx.x * blockDim.x + index + level / 2] = temp[index] + d_blockPrefixSum[blockIdx.x * blockDim.x + index]; } index = index >> 1; level = level >> 1; __syncthreads(); } } __global__ void Cuda_AddBlockPrefix() { int tId = blockIdx.x * blockDim.x + threadIdx.x; if (tId < d_worklistLength) { d_prefixSum[tId] += d_blockPrefixSum[blockIdx.x]; } } int CudaGraphClass::verifyPrefixSum(int *calculatedPrefix) { int *verifiedPrefix, prefix = 0; verifiedPrefix = new int[(numVertices + 1)]; for (int vertex = 0; vertex < numVertices; vertex++) { verifiedPrefix[vertex] = prefix; int numNeighbours = row[0][vertex + 1] - row[0][vertex]; prefix += numNeighbours; } /*for (int vertex = 0; vertex <= numVertices; vertex++) { print("Prefix[%d] = %d\n", vertex, verifiedPrefix[vertex]); }*/ for (int vertex = 0; vertex < numVertices; vertex++) { if (verifiedPrefix[vertex] != calculatedPrefix[vertex]) { print("Verification failed at vertex %d.\n", vertex); print("Verified prefix = %d. Calculated prefix = %d\n", verifiedPrefix[vertex], calculatedPrefix[vertex]); return 1; } } delete[] verifiedPrefix; return 0; } int CudaGraphClass::verifyGatherWorklist(int *calculatedGatherWorklist, int newWorklistLength) { int vertex = 0, i = 0; while (i < newWorklistLength) { for (int j = row[0][vertex]; j < row[0][vertex + 1]; j++, i++) { if (row[1][j] != calculatedGatherWorklist[i]) { print("Verify Gather Worklist: Verification Failed at vertex %d\n", vertex); return 1; } } vertex++; } cout << "Gather Worklist Verified: " << i << "\n"; return 0; } inline int reallocDeviceMemory(int *d_pointer, int newMemorySize) { int *devicePointer; hipError_t err; err = hipMemcpyFromSymbol(&devicePointer, d_pointer, sizeof(int *), 0, hipMemcpyDeviceToHost); CUDA_ERR_CHECK; err = hipFree(devicePointer); CUDA_ERR_CHECK; err = hipMalloc((void **)&devicePointer, newMemorySize * sizeof(int)); CUDA_ERR_CHECK; err = hipMemcpyToSymbol(d_pointer, &devicePointer, sizeof(int *), 0, hipMemcpyHostToDevice); CUDA_ERR_CHECK; return 0; } int CudaGraphClass::PrefixSum(int worklistLength, int *newWorklistLength) { hipError_t err; if (maxWorklistLength < worklistLength) { cout << "PrefixSum Realloc\n"; int *devicePrefixSum, *newPrefixSum; maxWorklistLength = worklistLength; err = hipMemcpyFromSymbol(&devicePrefixSum, d_prefixSum, sizeof(int *), 0, hipMemcpyDeviceToHost); CUDA_ERR_CHECK; err = hipFree(devicePrefixSum); CUDA_ERR_CHECK; err = hipMalloc((void **)&newPrefixSum, maxWorklistLength * sizeof(int)); CUDA_ERR_CHECK; err = hipMemcpyToSymbol(d_prefixSum, &newPrefixSum, sizeof(int *), 0, hipMemcpyHostToDevice); CUDA_ERR_CHECK; //reallocDeviceMemory(d_worklist, maxWorklistLength); /*int *deviceWorklist, *newWorklist; err = hipMemcpyFromSymbol(&deviceWorklist, d_worklist, sizeof(int *), 0, hipMemcpyDeviceToHost); CUDA_ERR_CHECK; err = hipFree(deviceWorklist); CUDA_ERR_CHECK; err = hipMalloc((void **)&newWorklist, maxWorklistLength * sizeof(int)); CUDA_ERR_CHECK; err = hipMemcpyToSymbol(d_worklist, &newWorklist, sizeof(int *), 0, hipMemcpyHostToDevice); CUDA_ERR_CHECK;*/ } int numBlocksPerGrid = (worklistLength + numThreadsPerBlock) / numThreadsPerBlock; if (maxNumBlocksPerGrid < numBlocksPerGrid) { maxNumBlocksPerGrid = numBlocksPerGrid; //reallocDeviceMemory(d_prefixLevel, maxNumBlocksPerGrid); int *devicePrefixLevel, *newPrefixLevel; err = hipMemcpyFromSymbol(&devicePrefixLevel, d_prefixLevel, sizeof(int *), 0, hipMemcpyDeviceToHost); CUDA_ERR_CHECK; err = hipFree(devicePrefixLevel); CUDA_ERR_CHECK; err = hipMalloc((void **)&newPrefixLevel, maxNumBlocksPerGrid * sizeof(int)); CUDA_ERR_CHECK; err = hipMemcpyToSymbol(d_prefixLevel, &newPrefixLevel, sizeof(int *), 0, hipMemcpyHostToDevice); CUDA_ERR_CHECK; //reallocDeviceMemory(d_blockPrefixSum, maxNumBlocksPerGrid); int *deviceBlockPrefixSum, *newBlockPrefixSum; err = hipMemcpyFromSymbol(&deviceBlockPrefixSum, d_blockPrefixSum, sizeof(int *), 0, hipMemcpyDeviceToHost); CUDA_ERR_CHECK; err = hipFree(deviceBlockPrefixSum); CUDA_ERR_CHECK; err = hipMalloc((void **)&newBlockPrefixSum, maxNumBlocksPerGrid * sizeof(int)); CUDA_ERR_CHECK; err = hipMemcpyToSymbol(d_blockPrefixSum, &newBlockPrefixSum, sizeof(int *), 0, hipMemcpyHostToDevice); CUDA_ERR_CHECK; } hipLaunchKernelGGL(( Cuda_IntraBlockPrefixSum), dim3(numBlocksPerGrid), dim3(numThreadsPerBlock), numThreadsPerBlock * sizeof(int), 0, ); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); hipLaunchKernelGGL(( Cuda_BlockOffsetPrefixSum), dim3((numBlocksPerGrid + numThreadsPerBlock) / numThreadsPerBlock), dim3(numThreadsPerBlock), numBlocksPerGrid * sizeof(int ), 0, numBlocksPerGrid); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); hipLaunchKernelGGL(( Cuda_AddBlockPrefix), dim3(numBlocksPerGrid), dim3(numThreadsPerBlock), 0, 0, ); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); int *devicePrefixSum, *hostPrefixSum; hostPrefixSum = new int[(worklistLength + 1)]; err = hipMemcpyFromSymbol(&devicePrefixSum, d_prefixSum, sizeof(int *), 0, hipMemcpyDeviceToHost); CUDA_ERR_CHECK; err = hipMemcpy(hostPrefixSum, devicePrefixSum, (worklistLength + 1) * sizeof(int), hipMemcpyDeviceToHost); CUDA_ERR_CHECK; out << "Prefix Sums\n"; //for (int i = 0; i <= worklistLength; i++) // out << "["<< i << "] = " << hostPrefixSum[i] << endl; //verifyPrefixSum(hostPrefixSum); err = hipMemcpyFromSymbol(&worklistLength, d_worklistLength, sizeof(int), 0, hipMemcpyDeviceToHost); CUDA_ERR_CHECK; *newWorklistLength = worklistLength; delete[] hostPrefixSum; return 0; } // TODO: Build an optimized fine grained gathering algorithm __global__ void populateNeighbours(int worklistLength) { int tId = blockIdx.x * blockDim.x + threadIdx.x; if (tId < worklistLength) { int vertex = d_worklist[tId]; printf("Thread %d: vertex = %d\n", tId, d_worklist[tId]); int edgeIndex = graph[0][vertex]; int index = d_prefixSum[tId], lastIndex = d_prefixSum[tId + 1]; //print("Thread: %d: vertex = %d, edgeIndex = %d, prefix = %d, lastIndex = %d\n", tId, vertex, edgeIndex, index, lastIndex); for (int i = 0; i < lastIndex - index; i++) { d_gatherWorklist[index + i] = graph[1][edgeIndex + i]; //d_parentInWorklist[index + i] = vertex; d_parentInWorklist[index + i] = d_distance[vertex] + graph[2][edgeIndex + i]; } } } int CudaGraphClass::gatherNeighbours(int worklistLength) { int numBlocksPerGrid = (worklistLength + 1 + numThreadsPerBlock - 1) / numThreadsPerBlock; cout << "Gather Neighbours: " << numThreadsPerBlock << ", " << numBlocksPerGrid << ", " << worklistLength << "\n"; hipLaunchKernelGGL(( populateNeighbours), dim3(numBlocksPerGrid), dim3(numThreadsPerBlock), 0, 0, worklistLength); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); return 0; } __global__ void removeDuplicatesInGather(int worklistLength) { int tId = blockIdx.x * blockDim.x + threadIdx.x; if (tId != 0) return; int prevVertex = d_gatherWorklist[0], index = 1; for (int i = 1; i < worklistLength; i++) { if (prevVertex != d_gatherWorklist[i]) { d_gatherWorklist[index] = d_gatherWorklist[i]; prevVertex = d_gatherWorklist[i]; index++; } } d_worklistLength = index; } __global__ void processEdges(int worklistLength) { int tId = blockIdx.x * blockDim.x + threadIdx.x; __shared__ bool terminate; terminate = true; if (tId >= worklistLength) return; if (tId == 0 || d_gatherWorklist[tId] != d_gatherWorklist[tId - 1]) { int vertex = d_gatherWorklist[tId]; int min = d_distance[vertex], i = 0; while((tId + i) < worklistLength && d_gatherWorklist[tId + i] == vertex) { if (min > d_parentInWorklist[tId + i]) { min = d_parentInWorklist[tId + i]; terminate = false; } i++; } d_distance[vertex] = min; } if (terminate == false) { //printf("ThreadId : %d . Terminate = %d, d_terminate = %d\n", threadIdx.x, terminate, d_terminate); d_terminate = false; } } int CudaGraphClass::processNeighbours(int newWorklistLength) { int *deviceGatherWorklist, *deviceParentInWorklist; hipError_t err; err = hipMemcpyFromSymbol(&deviceGatherWorklist, d_gatherWorklist, sizeof(int *), 0, hipMemcpyDeviceToHost); CUDA_ERR_CHECK; err = hipMemcpyFromSymbol(&deviceParentInWorklist, d_parentInWorklist, sizeof(int *), 0, hipMemcpyDeviceToHost); CUDA_ERR_CHECK; thrust::device_ptr<int> dev_gatherWorklist(deviceGatherWorklist); thrust::device_ptr<int> dev_parentInWorklist(deviceParentInWorklist); // wrap raw pointer with a device_ptr //thrust::device_ptr<int> dev_gatherWorklist = thrust::device_pointer_cast(deviceGatherWorklist); thrust::sort_by_key(dev_gatherWorklist, dev_gatherWorklist + newWorklistLength, dev_parentInWorklist); int numBlocksPerGrid = (newWorklistLength + 1 + numThreadsPerBlock - 1) / numThreadsPerBlock; hipLaunchKernelGGL(( processEdges), dim3(numBlocksPerGrid), dim3(numThreadsPerBlock), 0, 0, newWorklistLength); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); hipLaunchKernelGGL(( removeDuplicatesInGather), dim3(numBlocksPerGrid), dim3(numThreadsPerBlock), 0, 0, newWorklistLength); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); return 0; } int CudaGraphClass::callSSSP() { int terminate = false, *distance, *parentInWorklist, *gatherWorklist, worklistLength = 1; bool worklistReallocated; hipError_t err; distance = new int[(numVertices + 1)]; parentInWorklist = new int[(numEdges+ 1)]; gatherWorklist = new int[(numEdges + 1)]; //int numBlocksPerGrid = (numVertices + 1 + numThreadsPerBlock - 1) / numThreadsPerBlock; //cout << numThreadsPerBlock << ", " << numBlocksPerGrid << "\n"; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); while (terminate == false) { terminate = true; worklistReallocated = false; err = hipMemcpyToSymbol(d_terminate, &terminate, sizeof(bool), 0, hipMemcpyHostToDevice); CUDA_ERR_CHECK; gpuErrchk(hipDeviceSynchronize()); int newWorklistLength; PrefixSum(worklistLength, &newWorklistLength); /*if (maxWorklistLength < newWorklistLength) { maxWorklistLength = newWorklistLength; worklistReallocated = true; //reallocDeviceMemory(d_worklist, maxWorklistLength); cout << "After Prefix Sum: Realloc\n"; int *deviceGatherWorklist, *newGatherWorklist; int *deviceParentInWorklist, *newParentInWorklist; err = hipMemcpyFromSymbol(&deviceGatherWorklist, d_gatherWorklist, sizeof(int *), 0, hipMemcpyDeviceToHost); CUDA_ERR_CHECK; cout << "Gather worklist = " << deviceGatherWorklist << "\n"; err = hipFree(deviceGatherWorklist); CUDA_ERR_CHECK; err = hipMalloc((void **)&newGatherWorklist, maxWorklistLength * sizeof(int)); CUDA_ERR_CHECK; err = hipMemcpyToSymbol(d_gatherWorklist, &newGatherWorklist, sizeof(int *), 0, hipMemcpyHostToDevice); CUDA_ERR_CHECK; err = hipMemcpyFromSymbol(&deviceParentInWorklist, d_parentInWorklist, sizeof(int *), 0, hipMemcpyDeviceToHost); CUDA_ERR_CHECK; err = hipFree(deviceParentInWorklist); CUDA_ERR_CHECK; err = hipMalloc((void **)&newParentInWorklist, maxWorklistLength * sizeof(int)); CUDA_ERR_CHECK; err = hipMemcpyToSymbol(d_parentInWorklist, &newParentInWorklist, sizeof(int *), 0, hipMemcpyHostToDevice); CUDA_ERR_CHECK; delete[] parentInWorklist; delete[] gatherWorklist; parentInWorklist = new int[maxWorklistLength]; gatherWorklist = new int[maxWorklistLength]; cout << "Size of gather and parent = " << maxWorklistLength << "\n"; }*/ cout << "New WorkList in Host = " << newWorklistLength << "\n"; gatherNeighbours(worklistLength); int * tempdeviceGatherWorklist; err = hipMemcpyFromSymbol(&tempdeviceGatherWorklist, d_gatherWorklist, sizeof(int *), 0, hipMemcpyDeviceToHost); CUDA_ERR_CHECK; err = hipMemcpy(gatherWorklist, tempdeviceGatherWorklist, newWorklistLength * sizeof(int), hipMemcpyDeviceToHost); CUDA_ERR_CHECK; cout << "Before removing duplicates: \n"; for (int i = 0; i < newWorklistLength; i++) out << "["<< i << "] = " << gatherWorklist[i] << " -- " << parentInWorklist[i] << " -> " << distance[gatherWorklist[i]] << endl; processNeighbours(newWorklistLength); int *deviceWorklist, *deviceGatherWorklist, *deviceParentInWorklist, *deviceDistance; err = hipMemcpyFromSymbol(&deviceGatherWorklist, d_gatherWorklist, sizeof(int *), 0, hipMemcpyDeviceToHost); CUDA_ERR_CHECK; err = hipMemcpyFromSymbol(&deviceWorklist, d_worklist, sizeof(int *), 0, hipMemcpyDeviceToHost); CUDA_ERR_CHECK; err = hipMemcpyFromSymbol(&deviceParentInWorklist, d_parentInWorklist, sizeof(int *), 0, hipMemcpyDeviceToHost); CUDA_ERR_CHECK; err = hipMemcpyFromSymbol(&deviceDistance, d_distance, sizeof(int *), 0, hipMemcpyDeviceToHost); CUDA_ERR_CHECK; err = hipMemcpy(gatherWorklist, deviceGatherWorklist, newWorklistLength * sizeof(int), hipMemcpyDeviceToHost); CUDA_ERR_CHECK; err = hipMemcpy(parentInWorklist, deviceParentInWorklist, newWorklistLength * sizeof(int), hipMemcpyDeviceToHost); CUDA_ERR_CHECK; err = hipMemcpy(distance, deviceDistance, numVertices * sizeof(int), hipMemcpyDeviceToHost); CUDA_ERR_CHECK; cout << "New Worklist: \n"; //for (int i = 0; i < newWorklistLength; i++) // out << "["<< i << "] = " << gatherWorklist[i] << " -- " << parentInWorklist[i] << " -> " << distance[gatherWorklist[i]] << endl; //verifyGatherWorklist(gatherWorklist, newWorklistLength); err = hipMemcpyFromSymbol(&terminate, d_terminate, sizeof(bool), 0, hipMemcpyDeviceToHost); CUDA_ERR_CHECK; cout << "Terminate: " << terminate << "\n"; //worklistLength = newWorklistLength; err = hipMemcpyFromSymbol(&worklistLength, d_worklistLength, sizeof(int), 0, hipMemcpyDeviceToHost); CUDA_ERR_CHECK; cout << "After removing duplicated: " << worklistLength << "\n"; //cout << "New Worklist: \n"; for (int i = 0; i < worklistLength; i++) out << "["<< i << "] = " << gatherWorklist[i] << " -> " << distance[gatherWorklist[i]] << endl; /*if (worklistReallocated == true) { int *deviceWorklist, *newWorklist; err = hipMemcpyFromSymbol(&deviceWorklist, d_worklist, sizeof(int *), 0, hipMemcpyDeviceToHost); CUDA_ERR_CHECK; err = hipFree(deviceWorklist); CUDA_ERR_CHECK; err = hipMalloc((void **)&newWorklist, maxWorklistLength * sizeof(int)); CUDA_ERR_CHECK; err = hipMemcpyToSymbol(d_worklist, &newWorklist, sizeof(int *), 0, hipMemcpyHostToDevice); CUDA_ERR_CHECK; }*/ // Swap worklist and gatherWorklist err = hipMemcpyToSymbol(d_worklist, &deviceGatherWorklist, sizeof(int *), 0, hipMemcpyHostToDevice); CUDA_ERR_CHECK; err = hipMemcpyToSymbol(d_gatherWorklist, &deviceWorklist, sizeof(int *), 0, hipMemcpyHostToDevice); CUDA_ERR_CHECK; } /*hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); cout << "Elapsed time = " << milliseconds << "\n";*/ hipEventRecord(stop, 0); hipEventSynchronize(stop); float elapsedTime; hipEventElapsedTime(&elapsedTime, start, stop); cout << "Elapsed time = " << elapsedTime << " milliseconds \n"; hipEventDestroy(start); hipEventDestroy(stop); cout << "Shortest Distances: \n"; for (int i = 0; i <= numVertices; i++) out << "["<< i << "] = " << distance[i] << endl; delete[] distance; delete[] parentInWorklist; delete[] gatherWorklist; return 0; } int CudaGraphClass::copyGraphToDevice() { CUDA_SET_DEVICE_ID; gpuErrchk(hipPeekAtLastError()); int *vertexArray, *edgeArray, *weightArray, *distance, *parent, *worklist, *worklist2; int *prefixSum, *blockPrefixSum, *prefixLevel; hipError_t err; err = hipMalloc((void **)&vertexArray, (numVertices + 2) * sizeof(int)); CUDA_ERR_CHECK; err = hipMalloc((void **)&edgeArray, (numEdges + 1) * sizeof(int)); CUDA_ERR_CHECK; err = hipMalloc((void **)&weightArray, (numEdges + 1) * sizeof(int)); CUDA_ERR_CHECK; err = hipMalloc((void **)&distance, (numVertices + 1) * sizeof(int)); CUDA_ERR_CHECK; maxWorklistLength = numVertices + 2; err = hipMalloc((void **)&worklist, numEdges/*maxWorklistLength*/ * sizeof(int)); CUDA_ERR_CHECK; err = hipMalloc((void **)&worklist2, numEdges/*maxWorklistLength*/ * sizeof(int)); CUDA_ERR_CHECK; err = hipMalloc((void **)&parent, numEdges/*maxWorklistLength*/ * sizeof(int)); CUDA_ERR_CHECK; err = hipMalloc((void **)&prefixSum, maxWorklistLength * sizeof(int)); CUDA_ERR_CHECK; maxNumBlocksPerGrid = 1024; err = hipMalloc((void **)&blockPrefixSum, maxNumBlocksPerGrid * sizeof(int)); CUDA_ERR_CHECK; err = hipMalloc((void **)&prefixLevel, maxNumBlocksPerGrid * sizeof(int)); CUDA_ERR_CHECK; err = hipMemset(prefixSum, 0x0, maxWorklistLength * sizeof(int)); CUDA_ERR_CHECK; err = hipMemset(blockPrefixSum, 0x0, maxNumBlocksPerGrid * sizeof(int)); CUDA_ERR_CHECK; err = hipMemcpy(vertexArray, row[0], (numVertices + 2) * sizeof(int), hipMemcpyHostToDevice); CUDA_ERR_CHECK; err = hipMemcpy(edgeArray, row[1], (numEdges + 1) * sizeof(int), hipMemcpyHostToDevice); CUDA_ERR_CHECK; err = hipMemcpy(weightArray, row[2], (numEdges + 1) * sizeof(int), hipMemcpyHostToDevice); CUDA_ERR_CHECK; err = hipMemset(distance, 0x7f, (numVertices + 1) * sizeof(int)); CUDA_ERR_CHECK; hipLaunchKernelGGL(( CudaInitialize), dim3(1), dim3(1), 0, 0, vertexArray, edgeArray, weightArray, distance, worklist, worklist2, parent, prefixSum, blockPrefixSum, prefixLevel, numVertices, numEdges); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); return 0; } void CudaGraphClass::populate(char *fileName) { inputFile.open(fileName); if (!inputFile.is_open()){ cout << "invalid file"; return; } int **AdjMatrix, i, j, k; AdjMatrix = new int* [numVertices + 1](); for (i = 0; i <= numVertices; i++) { AdjMatrix[i] = new int [numVertices + 1](); } i = numEdges; int lastj = 0, currentIndex = 0; inputFile >> j >> k; srand(time(NULL)); while(i > 0) { //scanf("%d %d", &j, &k); inputFile >> j >> k; AdjMatrix[j][k] = 1; while (lastj <= j || lastj == 0) { if (lastj == 0) { row[0][0] = currentIndex; row[0][1] = currentIndex; }else { row[0][lastj] = currentIndex; } lastj++; } // if (AdjMatrix[k][j] != 1) row[1][currentIndex] = k; row[2][currentIndex] = (rand() % 2) ? rand() % 10 + 1/* - 10 */: rand() % 10 + 1; currentIndex ++; i--; } //row[1][0] = 0; // Sentinel node just points to the end of the last node in the graph while (lastj <= numVertices + 1) { row[0][lastj] = currentIndex; lastj++; } //row[0][lastj+1] = currentIndex; /* for (i = 0; i <= numVertices + 1; i++) print("Vertex: %d = %d\n", i, row[0][i]); print("Second Array:\n"); for (i = 0; i <= numEdges; i++) print("Edges: Index: %d, Value = %d\n", i, row[1][i]); */ j = 1; for (i = 1; i <= numVertices; i++) { currentIndex = row[0][i]; while (currentIndex < row[0][i+1]) { // print("%d %d\n", i, row[1][currentIndex]); if (AdjMatrix[i][row[1][currentIndex]] != 1 /*&& AdjMatrix[row[1][currentIndex]][i] != 1*/) { outs("\n\nGraph Do not Match at [%d][%d]. CurrentIndex = %d\n\n", i, row[1][currentIndex], currentIndex); break; } j++; currentIndex ++; } } for (i = 0; i <= numVertices; i++) { delete[] AdjMatrix[i]; } delete[] AdjMatrix; } void CudaGraphClass::printGraph() { hipLaunchKernelGGL(( CudaPrintGraph), dim3(1), dim3(1), 0, 0, ); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); }
fcd444938893cb9506764a33e666da73d363ca72.cu
#include <iostream> #include "CudaGraph.h" #include <thrust/sort.h> #include <thrust/unique.h> #include <thrust/execution_policy.h> #define cprint(...) printMutex.lock(); printf(__VA_ARGS__); printMutex.unlock(); #define p(x) printMutex.lock(); cout << x << endl; printMutex.unlock(); #define CUDA_ERR_CHECK \ if( err != cudaSuccess) { \ printf("CUDA error: %s ** at Line %d\n", cudaGetErrorString(err), __LINE__); \ return EXIT_FAILURE; \ } #define CUDA_SET_DEVICE_ID \ cudaSetDevice(0); #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } using namespace std; __device__ int *graph[3], d_numVertices, d_numEdges, *d_worklist, *d_gatherWorklist, d_worklistLength; __device__ int *d_distance, *d_parentInWorklist; __device__ int *d_prefixSum, *d_blockPrefixSum; __device__ int *d_prefixLevel; __device__ bool d_terminate; __global__ void CudaInitialize(int *vertexArray, int *edgeArray, int *weightArray, int *distance, int *worklist, int *worklist2, int *parentInWorklist, int *prefixSum, int *blockPrefixSum, int *prefixLevel, int numVertices, int numEdges) { d_numVertices = numVertices; d_numEdges = numEdges; graph[0] = vertexArray; graph[1] = edgeArray; graph[2] = weightArray; d_distance = distance; d_distance[1] = 0; d_worklist = worklist; d_gatherWorklist = worklist2; d_parentInWorklist = parentInWorklist; d_prefixSum = prefixSum; d_blockPrefixSum = blockPrefixSum; d_prefixLevel = prefixLevel; d_worklist[0] = 1; /*for (int i = 0; i < numVertices; i++) { d_worklist[i] = i; //d_distance[i] = 100; }*/ d_worklistLength = 1; print("WLLenght = %d, numVertices = %d\n", d_worklistLength, d_numVertices); d_prefixSum[numVertices + 1] = 0; d_prefixSum[numVertices + 2] = 0; } __global__ void CudaPrintGraph() { print("Vertex Array:\n"); for (int i = 0; i < d_numVertices + 2; i++) print(" %d", graph[0][i]); print("\n"); print("Edge Array:\n"); for (int i = 0; i < d_numEdges + 1; i++) print(" %d[%d]", graph[1][i], graph[2][i]); print("\n"); } // Prefix Sum calculation within a single block __global__ void Cuda_IntraBlockPrefixSum() { extern __shared__ int temp[]; __shared__ int blockPrefixSum; int vertex, maxLength, numNeighbours, tId = threadIdx.x; if ((d_worklistLength - blockIdx.x * 1024) < 1024) { maxLength = d_worklistLength - blockIdx.x * 1024 + 1; // To mark the boundary, maxLength is increased by 1 and the // numNeighbours of the last element is set to zero. temp[maxLength - 1] = 0; } else maxLength = 1024; if (blockIdx.x * blockDim.x + tId < d_worklistLength) { vertex = d_worklist[blockIdx.x * blockDim.x + tId]; numNeighbours = graph[0][vertex + 1] - graph[0][vertex]; temp[tId] = numNeighbours; } //print("This is a thread : %d. Max = %d\n", threadIdx.x, maxLength); __syncthreads(); int index = 2 * tId, add = 1; for (int depth = maxLength; depth > 0; depth = depth >> 1) { if (index + add < maxLength) { temp[index] += temp[index + add]; index = index << 1; add = add << 1; } __syncthreads(); } if (tId == 0) { d_prefixLevel[blockIdx.x] = add; blockPrefixSum = temp[0]; //print("Level = %d. MaxLength = %d\n", d_prefixLevel[blockIdx.x], maxLength); } /*if (tId < d_worklistLength) d_prefixSum[tId] = temp[tId];*/ __syncthreads(); int level; level = d_prefixLevel[blockIdx.x]; index = tId * level; for (int depth = maxLength; depth > 0; depth = depth >> 1) { if (index + level / 2 < maxLength) { temp[index] -= temp[index + level / 2]; d_prefixSum[blockIdx.x * blockDim.x + index + level / 2] = temp[index] + d_prefixSum[blockIdx.x * blockDim.x + index]; } index = index >> 1; level = level >> 1; __syncthreads(); } if (tId == 0) { d_prefixLevel[blockIdx.x] = blockPrefixSum; //print("Block %d. PrefixSum = %d, Array Value = %d\n", blockIdx.x, blockPrefixSum, d_prefixLevel[blockIdx.x]); } } // Prefix Sum on the whole block sum. __global__ void Cuda_BlockOffsetPrefixSum(int numBlocks) { extern __shared__ int temp[]; int tId = threadIdx.x; if (tId < numBlocks) temp[tId] = d_prefixLevel[tId]; int index = 2 * tId, add = 1; __shared__ int sharedVar; for (int depth = numBlocks; depth > 0; depth = depth >> 1) { if (index + add < numBlocks) { temp[index] += temp[index + add]; index = index << 1; add = add << 1; } __syncthreads(); } if (tId == 0) { sharedVar = add; d_worklistLength = temp[0]; print("New WorkList Length = %d\n", d_worklistLength); } __syncthreads(); int level; level = sharedVar; index = tId * level; for (int depth = numBlocks; depth > 0; depth = depth >> 1) { if (index + level / 2 < numBlocks) { temp[index] -= temp[index + level / 2]; d_blockPrefixSum[blockIdx.x * blockDim.x + index + level / 2] = temp[index] + d_blockPrefixSum[blockIdx.x * blockDim.x + index]; } index = index >> 1; level = level >> 1; __syncthreads(); } } __global__ void Cuda_AddBlockPrefix() { int tId = blockIdx.x * blockDim.x + threadIdx.x; if (tId < d_worklistLength) { d_prefixSum[tId] += d_blockPrefixSum[blockIdx.x]; } } int CudaGraphClass::verifyPrefixSum(int *calculatedPrefix) { int *verifiedPrefix, prefix = 0; verifiedPrefix = new int[(numVertices + 1)]; for (int vertex = 0; vertex < numVertices; vertex++) { verifiedPrefix[vertex] = prefix; int numNeighbours = row[0][vertex + 1] - row[0][vertex]; prefix += numNeighbours; } /*for (int vertex = 0; vertex <= numVertices; vertex++) { print("Prefix[%d] = %d\n", vertex, verifiedPrefix[vertex]); }*/ for (int vertex = 0; vertex < numVertices; vertex++) { if (verifiedPrefix[vertex] != calculatedPrefix[vertex]) { print("Verification failed at vertex %d.\n", vertex); print("Verified prefix = %d. Calculated prefix = %d\n", verifiedPrefix[vertex], calculatedPrefix[vertex]); return 1; } } delete[] verifiedPrefix; return 0; } int CudaGraphClass::verifyGatherWorklist(int *calculatedGatherWorklist, int newWorklistLength) { int vertex = 0, i = 0; while (i < newWorklistLength) { for (int j = row[0][vertex]; j < row[0][vertex + 1]; j++, i++) { if (row[1][j] != calculatedGatherWorklist[i]) { print("Verify Gather Worklist: Verification Failed at vertex %d\n", vertex); return 1; } } vertex++; } cout << "Gather Worklist Verified: " << i << "\n"; return 0; } inline int reallocDeviceMemory(int *d_pointer, int newMemorySize) { int *devicePointer; cudaError_t err; err = cudaMemcpyFromSymbol(&devicePointer, d_pointer, sizeof(int *), 0, cudaMemcpyDeviceToHost); CUDA_ERR_CHECK; err = cudaFree(devicePointer); CUDA_ERR_CHECK; err = cudaMalloc((void **)&devicePointer, newMemorySize * sizeof(int)); CUDA_ERR_CHECK; err = cudaMemcpyToSymbol(d_pointer, &devicePointer, sizeof(int *), 0, cudaMemcpyHostToDevice); CUDA_ERR_CHECK; return 0; } int CudaGraphClass::PrefixSum(int worklistLength, int *newWorklistLength) { cudaError_t err; if (maxWorklistLength < worklistLength) { cout << "PrefixSum Realloc\n"; int *devicePrefixSum, *newPrefixSum; maxWorklistLength = worklistLength; err = cudaMemcpyFromSymbol(&devicePrefixSum, d_prefixSum, sizeof(int *), 0, cudaMemcpyDeviceToHost); CUDA_ERR_CHECK; err = cudaFree(devicePrefixSum); CUDA_ERR_CHECK; err = cudaMalloc((void **)&newPrefixSum, maxWorklistLength * sizeof(int)); CUDA_ERR_CHECK; err = cudaMemcpyToSymbol(d_prefixSum, &newPrefixSum, sizeof(int *), 0, cudaMemcpyHostToDevice); CUDA_ERR_CHECK; //reallocDeviceMemory(d_worklist, maxWorklistLength); /*int *deviceWorklist, *newWorklist; err = cudaMemcpyFromSymbol(&deviceWorklist, d_worklist, sizeof(int *), 0, cudaMemcpyDeviceToHost); CUDA_ERR_CHECK; err = cudaFree(deviceWorklist); CUDA_ERR_CHECK; err = cudaMalloc((void **)&newWorklist, maxWorklistLength * sizeof(int)); CUDA_ERR_CHECK; err = cudaMemcpyToSymbol(d_worklist, &newWorklist, sizeof(int *), 0, cudaMemcpyHostToDevice); CUDA_ERR_CHECK;*/ } int numBlocksPerGrid = (worklistLength + numThreadsPerBlock) / numThreadsPerBlock; if (maxNumBlocksPerGrid < numBlocksPerGrid) { maxNumBlocksPerGrid = numBlocksPerGrid; //reallocDeviceMemory(d_prefixLevel, maxNumBlocksPerGrid); int *devicePrefixLevel, *newPrefixLevel; err = cudaMemcpyFromSymbol(&devicePrefixLevel, d_prefixLevel, sizeof(int *), 0, cudaMemcpyDeviceToHost); CUDA_ERR_CHECK; err = cudaFree(devicePrefixLevel); CUDA_ERR_CHECK; err = cudaMalloc((void **)&newPrefixLevel, maxNumBlocksPerGrid * sizeof(int)); CUDA_ERR_CHECK; err = cudaMemcpyToSymbol(d_prefixLevel, &newPrefixLevel, sizeof(int *), 0, cudaMemcpyHostToDevice); CUDA_ERR_CHECK; //reallocDeviceMemory(d_blockPrefixSum, maxNumBlocksPerGrid); int *deviceBlockPrefixSum, *newBlockPrefixSum; err = cudaMemcpyFromSymbol(&deviceBlockPrefixSum, d_blockPrefixSum, sizeof(int *), 0, cudaMemcpyDeviceToHost); CUDA_ERR_CHECK; err = cudaFree(deviceBlockPrefixSum); CUDA_ERR_CHECK; err = cudaMalloc((void **)&newBlockPrefixSum, maxNumBlocksPerGrid * sizeof(int)); CUDA_ERR_CHECK; err = cudaMemcpyToSymbol(d_blockPrefixSum, &newBlockPrefixSum, sizeof(int *), 0, cudaMemcpyHostToDevice); CUDA_ERR_CHECK; } Cuda_IntraBlockPrefixSum<<<numBlocksPerGrid, numThreadsPerBlock, numThreadsPerBlock * sizeof(int)>>>(); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); Cuda_BlockOffsetPrefixSum<<<(numBlocksPerGrid + numThreadsPerBlock) / numThreadsPerBlock, numThreadsPerBlock, numBlocksPerGrid * sizeof(int )>>>(numBlocksPerGrid); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); Cuda_AddBlockPrefix<<<numBlocksPerGrid, numThreadsPerBlock>>>(); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); int *devicePrefixSum, *hostPrefixSum; hostPrefixSum = new int[(worklistLength + 1)]; err = cudaMemcpyFromSymbol(&devicePrefixSum, d_prefixSum, sizeof(int *), 0, cudaMemcpyDeviceToHost); CUDA_ERR_CHECK; err = cudaMemcpy(hostPrefixSum, devicePrefixSum, (worklistLength + 1) * sizeof(int), cudaMemcpyDeviceToHost); CUDA_ERR_CHECK; out << "Prefix Sums\n"; //for (int i = 0; i <= worklistLength; i++) // out << "["<< i << "] = " << hostPrefixSum[i] << endl; //verifyPrefixSum(hostPrefixSum); err = cudaMemcpyFromSymbol(&worklistLength, d_worklistLength, sizeof(int), 0, cudaMemcpyDeviceToHost); CUDA_ERR_CHECK; *newWorklistLength = worklistLength; delete[] hostPrefixSum; return 0; } // TODO: Build an optimized fine grained gathering algorithm __global__ void populateNeighbours(int worklistLength) { int tId = blockIdx.x * blockDim.x + threadIdx.x; if (tId < worklistLength) { int vertex = d_worklist[tId]; printf("Thread %d: vertex = %d\n", tId, d_worklist[tId]); int edgeIndex = graph[0][vertex]; int index = d_prefixSum[tId], lastIndex = d_prefixSum[tId + 1]; //print("Thread: %d: vertex = %d, edgeIndex = %d, prefix = %d, lastIndex = %d\n", tId, vertex, edgeIndex, index, lastIndex); for (int i = 0; i < lastIndex - index; i++) { d_gatherWorklist[index + i] = graph[1][edgeIndex + i]; //d_parentInWorklist[index + i] = vertex; d_parentInWorklist[index + i] = d_distance[vertex] + graph[2][edgeIndex + i]; } } } int CudaGraphClass::gatherNeighbours(int worklistLength) { int numBlocksPerGrid = (worklistLength + 1 + numThreadsPerBlock - 1) / numThreadsPerBlock; cout << "Gather Neighbours: " << numThreadsPerBlock << ", " << numBlocksPerGrid << ", " << worklistLength << "\n"; populateNeighbours<<<numBlocksPerGrid, numThreadsPerBlock>>>(worklistLength); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); return 0; } __global__ void removeDuplicatesInGather(int worklistLength) { int tId = blockIdx.x * blockDim.x + threadIdx.x; if (tId != 0) return; int prevVertex = d_gatherWorklist[0], index = 1; for (int i = 1; i < worklistLength; i++) { if (prevVertex != d_gatherWorklist[i]) { d_gatherWorklist[index] = d_gatherWorklist[i]; prevVertex = d_gatherWorklist[i]; index++; } } d_worklistLength = index; } __global__ void processEdges(int worklistLength) { int tId = blockIdx.x * blockDim.x + threadIdx.x; __shared__ bool terminate; terminate = true; if (tId >= worklistLength) return; if (tId == 0 || d_gatherWorklist[tId] != d_gatherWorklist[tId - 1]) { int vertex = d_gatherWorklist[tId]; int min = d_distance[vertex], i = 0; while((tId + i) < worklistLength && d_gatherWorklist[tId + i] == vertex) { if (min > d_parentInWorklist[tId + i]) { min = d_parentInWorklist[tId + i]; terminate = false; } i++; } d_distance[vertex] = min; } if (terminate == false) { //printf("ThreadId : %d . Terminate = %d, d_terminate = %d\n", threadIdx.x, terminate, d_terminate); d_terminate = false; } } int CudaGraphClass::processNeighbours(int newWorklistLength) { int *deviceGatherWorklist, *deviceParentInWorklist; cudaError_t err; err = cudaMemcpyFromSymbol(&deviceGatherWorklist, d_gatherWorklist, sizeof(int *), 0, cudaMemcpyDeviceToHost); CUDA_ERR_CHECK; err = cudaMemcpyFromSymbol(&deviceParentInWorklist, d_parentInWorklist, sizeof(int *), 0, cudaMemcpyDeviceToHost); CUDA_ERR_CHECK; thrust::device_ptr<int> dev_gatherWorklist(deviceGatherWorklist); thrust::device_ptr<int> dev_parentInWorklist(deviceParentInWorklist); // wrap raw pointer with a device_ptr //thrust::device_ptr<int> dev_gatherWorklist = thrust::device_pointer_cast(deviceGatherWorklist); thrust::sort_by_key(dev_gatherWorklist, dev_gatherWorklist + newWorklistLength, dev_parentInWorklist); int numBlocksPerGrid = (newWorklistLength + 1 + numThreadsPerBlock - 1) / numThreadsPerBlock; processEdges<<<numBlocksPerGrid, numThreadsPerBlock>>>(newWorklistLength); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); removeDuplicatesInGather<<<numBlocksPerGrid, numThreadsPerBlock>>>(newWorklistLength); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); return 0; } int CudaGraphClass::callSSSP() { int terminate = false, *distance, *parentInWorklist, *gatherWorklist, worklistLength = 1; bool worklistReallocated; cudaError_t err; distance = new int[(numVertices + 1)]; parentInWorklist = new int[(numEdges+ 1)]; gatherWorklist = new int[(numEdges + 1)]; //int numBlocksPerGrid = (numVertices + 1 + numThreadsPerBlock - 1) / numThreadsPerBlock; //cout << numThreadsPerBlock << ", " << numBlocksPerGrid << "\n"; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); while (terminate == false) { terminate = true; worklistReallocated = false; err = cudaMemcpyToSymbol(d_terminate, &terminate, sizeof(bool), 0, cudaMemcpyHostToDevice); CUDA_ERR_CHECK; gpuErrchk(cudaDeviceSynchronize()); int newWorklistLength; PrefixSum(worklistLength, &newWorklistLength); /*if (maxWorklistLength < newWorklistLength) { maxWorklistLength = newWorklistLength; worklistReallocated = true; //reallocDeviceMemory(d_worklist, maxWorklistLength); cout << "After Prefix Sum: Realloc\n"; int *deviceGatherWorklist, *newGatherWorklist; int *deviceParentInWorklist, *newParentInWorklist; err = cudaMemcpyFromSymbol(&deviceGatherWorklist, d_gatherWorklist, sizeof(int *), 0, cudaMemcpyDeviceToHost); CUDA_ERR_CHECK; cout << "Gather worklist = " << deviceGatherWorklist << "\n"; err = cudaFree(deviceGatherWorklist); CUDA_ERR_CHECK; err = cudaMalloc((void **)&newGatherWorklist, maxWorklistLength * sizeof(int)); CUDA_ERR_CHECK; err = cudaMemcpyToSymbol(d_gatherWorklist, &newGatherWorklist, sizeof(int *), 0, cudaMemcpyHostToDevice); CUDA_ERR_CHECK; err = cudaMemcpyFromSymbol(&deviceParentInWorklist, d_parentInWorklist, sizeof(int *), 0, cudaMemcpyDeviceToHost); CUDA_ERR_CHECK; err = cudaFree(deviceParentInWorklist); CUDA_ERR_CHECK; err = cudaMalloc((void **)&newParentInWorklist, maxWorklistLength * sizeof(int)); CUDA_ERR_CHECK; err = cudaMemcpyToSymbol(d_parentInWorklist, &newParentInWorklist, sizeof(int *), 0, cudaMemcpyHostToDevice); CUDA_ERR_CHECK; delete[] parentInWorklist; delete[] gatherWorklist; parentInWorklist = new int[maxWorklistLength]; gatherWorklist = new int[maxWorklistLength]; cout << "Size of gather and parent = " << maxWorklistLength << "\n"; }*/ cout << "New WorkList in Host = " << newWorklistLength << "\n"; gatherNeighbours(worklistLength); int * tempdeviceGatherWorklist; err = cudaMemcpyFromSymbol(&tempdeviceGatherWorklist, d_gatherWorklist, sizeof(int *), 0, cudaMemcpyDeviceToHost); CUDA_ERR_CHECK; err = cudaMemcpy(gatherWorklist, tempdeviceGatherWorklist, newWorklistLength * sizeof(int), cudaMemcpyDeviceToHost); CUDA_ERR_CHECK; cout << "Before removing duplicates: \n"; for (int i = 0; i < newWorklistLength; i++) out << "["<< i << "] = " << gatherWorklist[i] << " -- " << parentInWorklist[i] << " -> " << distance[gatherWorklist[i]] << endl; processNeighbours(newWorklistLength); int *deviceWorklist, *deviceGatherWorklist, *deviceParentInWorklist, *deviceDistance; err = cudaMemcpyFromSymbol(&deviceGatherWorklist, d_gatherWorklist, sizeof(int *), 0, cudaMemcpyDeviceToHost); CUDA_ERR_CHECK; err = cudaMemcpyFromSymbol(&deviceWorklist, d_worklist, sizeof(int *), 0, cudaMemcpyDeviceToHost); CUDA_ERR_CHECK; err = cudaMemcpyFromSymbol(&deviceParentInWorklist, d_parentInWorklist, sizeof(int *), 0, cudaMemcpyDeviceToHost); CUDA_ERR_CHECK; err = cudaMemcpyFromSymbol(&deviceDistance, d_distance, sizeof(int *), 0, cudaMemcpyDeviceToHost); CUDA_ERR_CHECK; err = cudaMemcpy(gatherWorklist, deviceGatherWorklist, newWorklistLength * sizeof(int), cudaMemcpyDeviceToHost); CUDA_ERR_CHECK; err = cudaMemcpy(parentInWorklist, deviceParentInWorklist, newWorklistLength * sizeof(int), cudaMemcpyDeviceToHost); CUDA_ERR_CHECK; err = cudaMemcpy(distance, deviceDistance, numVertices * sizeof(int), cudaMemcpyDeviceToHost); CUDA_ERR_CHECK; cout << "New Worklist: \n"; //for (int i = 0; i < newWorklistLength; i++) // out << "["<< i << "] = " << gatherWorklist[i] << " -- " << parentInWorklist[i] << " -> " << distance[gatherWorklist[i]] << endl; //verifyGatherWorklist(gatherWorklist, newWorklistLength); err = cudaMemcpyFromSymbol(&terminate, d_terminate, sizeof(bool), 0, cudaMemcpyDeviceToHost); CUDA_ERR_CHECK; cout << "Terminate: " << terminate << "\n"; //worklistLength = newWorklistLength; err = cudaMemcpyFromSymbol(&worklistLength, d_worklistLength, sizeof(int), 0, cudaMemcpyDeviceToHost); CUDA_ERR_CHECK; cout << "After removing duplicated: " << worklistLength << "\n"; //cout << "New Worklist: \n"; for (int i = 0; i < worklistLength; i++) out << "["<< i << "] = " << gatherWorklist[i] << " -> " << distance[gatherWorklist[i]] << endl; /*if (worklistReallocated == true) { int *deviceWorklist, *newWorklist; err = cudaMemcpyFromSymbol(&deviceWorklist, d_worklist, sizeof(int *), 0, cudaMemcpyDeviceToHost); CUDA_ERR_CHECK; err = cudaFree(deviceWorklist); CUDA_ERR_CHECK; err = cudaMalloc((void **)&newWorklist, maxWorklistLength * sizeof(int)); CUDA_ERR_CHECK; err = cudaMemcpyToSymbol(d_worklist, &newWorklist, sizeof(int *), 0, cudaMemcpyHostToDevice); CUDA_ERR_CHECK; }*/ // Swap worklist and gatherWorklist err = cudaMemcpyToSymbol(d_worklist, &deviceGatherWorklist, sizeof(int *), 0, cudaMemcpyHostToDevice); CUDA_ERR_CHECK; err = cudaMemcpyToSymbol(d_gatherWorklist, &deviceWorklist, sizeof(int *), 0, cudaMemcpyHostToDevice); CUDA_ERR_CHECK; } /*cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); cout << "Elapsed time = " << milliseconds << "\n";*/ cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); cout << "Elapsed time = " << elapsedTime << " milliseconds \n"; cudaEventDestroy(start); cudaEventDestroy(stop); cout << "Shortest Distances: \n"; for (int i = 0; i <= numVertices; i++) out << "["<< i << "] = " << distance[i] << endl; delete[] distance; delete[] parentInWorklist; delete[] gatherWorklist; return 0; } int CudaGraphClass::copyGraphToDevice() { CUDA_SET_DEVICE_ID; gpuErrchk(cudaPeekAtLastError()); int *vertexArray, *edgeArray, *weightArray, *distance, *parent, *worklist, *worklist2; int *prefixSum, *blockPrefixSum, *prefixLevel; cudaError_t err; err = cudaMalloc((void **)&vertexArray, (numVertices + 2) * sizeof(int)); CUDA_ERR_CHECK; err = cudaMalloc((void **)&edgeArray, (numEdges + 1) * sizeof(int)); CUDA_ERR_CHECK; err = cudaMalloc((void **)&weightArray, (numEdges + 1) * sizeof(int)); CUDA_ERR_CHECK; err = cudaMalloc((void **)&distance, (numVertices + 1) * sizeof(int)); CUDA_ERR_CHECK; maxWorklistLength = numVertices + 2; err = cudaMalloc((void **)&worklist, numEdges/*maxWorklistLength*/ * sizeof(int)); CUDA_ERR_CHECK; err = cudaMalloc((void **)&worklist2, numEdges/*maxWorklistLength*/ * sizeof(int)); CUDA_ERR_CHECK; err = cudaMalloc((void **)&parent, numEdges/*maxWorklistLength*/ * sizeof(int)); CUDA_ERR_CHECK; err = cudaMalloc((void **)&prefixSum, maxWorklistLength * sizeof(int)); CUDA_ERR_CHECK; maxNumBlocksPerGrid = 1024; err = cudaMalloc((void **)&blockPrefixSum, maxNumBlocksPerGrid * sizeof(int)); CUDA_ERR_CHECK; err = cudaMalloc((void **)&prefixLevel, maxNumBlocksPerGrid * sizeof(int)); CUDA_ERR_CHECK; err = cudaMemset(prefixSum, 0x0, maxWorklistLength * sizeof(int)); CUDA_ERR_CHECK; err = cudaMemset(blockPrefixSum, 0x0, maxNumBlocksPerGrid * sizeof(int)); CUDA_ERR_CHECK; err = cudaMemcpy(vertexArray, row[0], (numVertices + 2) * sizeof(int), cudaMemcpyHostToDevice); CUDA_ERR_CHECK; err = cudaMemcpy(edgeArray, row[1], (numEdges + 1) * sizeof(int), cudaMemcpyHostToDevice); CUDA_ERR_CHECK; err = cudaMemcpy(weightArray, row[2], (numEdges + 1) * sizeof(int), cudaMemcpyHostToDevice); CUDA_ERR_CHECK; err = cudaMemset(distance, 0x7f, (numVertices + 1) * sizeof(int)); CUDA_ERR_CHECK; CudaInitialize<<<1, 1>>>(vertexArray, edgeArray, weightArray, distance, worklist, worklist2, parent, prefixSum, blockPrefixSum, prefixLevel, numVertices, numEdges); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); return 0; } void CudaGraphClass::populate(char *fileName) { inputFile.open(fileName); if (!inputFile.is_open()){ cout << "invalid file"; return; } int **AdjMatrix, i, j, k; AdjMatrix = new int* [numVertices + 1](); for (i = 0; i <= numVertices; i++) { AdjMatrix[i] = new int [numVertices + 1](); } i = numEdges; int lastj = 0, currentIndex = 0; inputFile >> j >> k; srand(time(NULL)); while(i > 0) { //scanf("%d %d", &j, &k); inputFile >> j >> k; AdjMatrix[j][k] = 1; while (lastj <= j || lastj == 0) { if (lastj == 0) { row[0][0] = currentIndex; row[0][1] = currentIndex; }else { row[0][lastj] = currentIndex; } lastj++; } // if (AdjMatrix[k][j] != 1) row[1][currentIndex] = k; row[2][currentIndex] = (rand() % 2) ? rand() % 10 + 1/* - 10 */: rand() % 10 + 1; currentIndex ++; i--; } //row[1][0] = 0; // Sentinel node just points to the end of the last node in the graph while (lastj <= numVertices + 1) { row[0][lastj] = currentIndex; lastj++; } //row[0][lastj+1] = currentIndex; /* for (i = 0; i <= numVertices + 1; i++) print("Vertex: %d = %d\n", i, row[0][i]); print("Second Array:\n"); for (i = 0; i <= numEdges; i++) print("Edges: Index: %d, Value = %d\n", i, row[1][i]); */ j = 1; for (i = 1; i <= numVertices; i++) { currentIndex = row[0][i]; while (currentIndex < row[0][i+1]) { // print("%d %d\n", i, row[1][currentIndex]); if (AdjMatrix[i][row[1][currentIndex]] != 1 /*&& AdjMatrix[row[1][currentIndex]][i] != 1*/) { outs("\n\nGraph Do not Match at [%d][%d]. CurrentIndex = %d\n\n", i, row[1][currentIndex], currentIndex); break; } j++; currentIndex ++; } } for (i = 0; i <= numVertices; i++) { delete[] AdjMatrix[i]; } delete[] AdjMatrix; } void CudaGraphClass::printGraph() { CudaPrintGraph<<<1, 1>>>(); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); }
abf3a4b9e0173b04649efc1c6dcf22102a89e722.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2017 Stanford, NVIDIA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "model.h" #include "cuda_helper.h" Tensor FFModel::batch_norm(const Tensor& input, bool relu) { assert(input.numDim == 4); //Only support 4D BN for now BatchNorm *bn = new BatchNorm(*this, input, relu); layers.push_back(bn); return bn->outputs[0]; } /* locals[0] = scale locals[1] = bias */ BatchNorm::BatchNorm(FFModel& model, const Tensor& _input, bool _relu) : Op(model, "BatchNorm", _input), relu(_relu), profiling(model.config.profiling) { Context ctx = model.config.lg_ctx; HighLevelRuntime* runtime = model.config.lg_hlr; Rect<4> part_rect = runtime->get_index_space_domain(ctx, task_is); num_replica = part_rect.volume(); // Create output tensor int output_w = _input.adim[0]; int output_h = _input.adim[1]; int output_c = _input.adim[2]; int output_n = _input.adim[3]; int num_par_w = part_rect.hi[0] - part_rect.lo[0] + 1; int num_par_h = part_rect.hi[1] - part_rect.lo[1] + 1; int num_par_c = part_rect.hi[2] - part_rect.lo[2] + 1; int num_par_n = part_rect.hi[3] - part_rect.lo[3] + 1; FieldSpace fs = model.config.field_space; Rect<4> output_rect(Point<4>(0, 0, 0, 0), Point<4>(output_w-1, output_h-1, output_c-1, output_n-1)); IndexSpaceT<4> output_is = runtime->create_index_space(ctx, output_rect); LogicalRegion output_lr = runtime->create_logical_region(ctx, output_is, fs); LogicalRegion output_grad_lr = runtime->create_logical_region(ctx, output_is, fs); int extent_w = (output_w + num_par_w - 1) / num_par_w; int extent_h = (output_h + num_par_h - 1) / num_par_h; int extent_c = output_c / num_par_c; int extent_n = output_n / num_par_n; assert(output_c % num_par_c == 0); assert(output_n % num_par_n == 0); Rect<4> ext(Point<4>(0, 0, 0, 0), Point<4>(extent_w-1, extent_h-1, extent_c-1, extent_n-1)); Transform<4, 4, coord_t> trans; for (int i = 0; i < 4; i++) for (int j = 0; j < 4; j++) trans[i][j] = 0; trans[0][0] = extent_w; trans[1][1] = extent_h; trans[2][2] = extent_c; trans[3][3] = extent_n; IndexPartition output_ip = runtime->create_partition_by_restriction(ctx, output_is, task_is, trans, ext); assert(runtime->is_index_partition_disjoint(ctx, output_ip)); assert(runtime->is_index_partition_complete(ctx, output_ip)); LogicalPartition output_lp = runtime->get_logical_partition(ctx, output_lr, output_ip); LogicalPartition output_grad_lp = runtime->get_logical_partition(ctx, output_grad_lr, output_ip); int bias_nc = num_replica * _input.adim[2]; /*input_channels*/ Rect<1, coord_t> bias_grad_rect(0, bias_nc - 1); Rect<1, coord_t> bias_rect(0, _input.adim[2] - 1); IndexSpaceT<1> bias_is = runtime->create_index_space(ctx, bias_rect); IndexSpaceT<1> bias_grad_is = runtime->create_index_space(ctx, bias_grad_rect); LogicalRegion bias_lr = runtime->create_logical_region(ctx, bias_is, fs); LogicalRegion scale_lr = runtime->create_logical_region(ctx, bias_is, fs); LogicalRegion bias_grad_lr = runtime->create_logical_region(ctx, bias_grad_is, fs); LogicalRegion scale_grad_lr = runtime->create_logical_region(ctx, bias_grad_is, fs); IndexPartition bias_grad_ip = runtime->create_equal_partition(ctx, bias_grad_is, task_is); LogicalPartition bias_grad_lp = runtime->get_logical_partition(ctx, bias_grad_lr, bias_grad_ip); LogicalPartition scale_grad_lp = runtime->get_logical_partition(ctx, scale_grad_lr, bias_grad_ip); Parameter scale_tensor, bias_tensor; scale_tensor.region = scale_lr; scale_tensor.region_grad = scale_grad_lr; scale_tensor.part = LogicalPartition::NO_PART; scale_tensor.part_grad = scale_grad_lp; weights[0] = scale_tensor; bias_tensor.region = bias_lr; bias_tensor.region_grad = bias_grad_lr; bias_tensor.part = LogicalPartition::NO_PART; bias_tensor.part_grad = bias_grad_lp; weights[1] = bias_tensor; numWeights = 2; outputs[0] = _input; outputs[0].region = output_lr; outputs[0].part = output_lp; outputs[0].region_grad = output_grad_lr; outputs[0].part_grad = output_grad_lp; printf("Create bn layer: output(%d %d %d %d)\n", outputs[0].adim[3], outputs[0].adim[2], outputs[0].adim[1], outputs[0].adim[0]); input_lps[0] = _input.part; } void BatchNorm::create_weights(FFModel& model) { // TODO assert(false); } void BatchNorm::create_output_and_partition(FFModel& model) { // TODO assert(false); } /* regions[0]: input regions[1]: output regions[2](I): scale regions[3](I): bias */ __host__ OpMeta* BatchNorm::init_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 4); assert(task->regions.size() == 4); const BatchNorm* bm = (BatchNorm*) task->args; FFHandler handle = *((const FFHandler*) task->local_args); TensorAccessorR<float, 4> acc_input( regions[0], task->regions[0], FID_DATA, ctx, runtime); TensorAccessorW<float, 4> acc_output( regions[1], task->regions[1], FID_DATA, ctx, runtime); TensorAccessorR<float, 1> acc_scale( regions[2], task->regions[2], FID_DATA, ctx, runtime); TensorAccessorR<float, 1> acc_bias( regions[3], task->regions[3], FID_DATA, ctx, runtime); BatchNormMeta* m = new BatchNormMeta(handle); m->relu = bm->relu; m->mode = CUDNN_BATCHNORM_SPATIAL; #if CUDNN_VERSION >= 7000 m->mode = CUDNN_BATCHNORM_SPATIAL_PERSISTENT; #endif checkCUDNN(cudnnCreateTensorDescriptor(&m->inputTensor)); checkCUDNN(cudnnCreateTensorDescriptor(&m->outputTensor)); checkCUDNN(cudnnCreateTensorDescriptor(&m->biasTensor)); assert(acc_input.rect == acc_output.rect); int input_w = acc_input.rect.hi[0] - acc_input.rect.lo[0] + 1; int input_h = acc_input.rect.hi[1] - acc_input.rect.lo[1] + 1; int input_c = acc_input.rect.hi[2] - acc_input.rect.lo[2] + 1; int input_n = acc_input.rect.hi[3] - acc_input.rect.lo[3] + 1; int output_w = acc_output.rect.hi[0] - acc_output.rect.lo[0] + 1; int output_h = acc_output.rect.hi[1] - acc_output.rect.lo[1] + 1; int output_c = acc_output.rect.hi[2] - acc_output.rect.lo[2] + 1; int output_n = acc_output.rect.hi[3] - acc_output.rect.lo[3] + 1; checkCUDNN(cudnnSetTensor4dDescriptor(m->inputTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, input_n, input_c, input_h, input_w)); checkCUDNN(cudnnSetTensor4dDescriptor(m->outputTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, output_n, output_c, output_h, output_w)); checkCUDNN(cudnnSetTensor4dDescriptor(m->biasTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, output_c, 1, 1)); //float *runningMean, *runningVar, *saveMean, *saveVar; checkCUDA(hipMalloc(&m->runningMean, sizeof(float) * output_c)); checkCUDA(hipMalloc(&m->runningVar, sizeof(float) * output_c)); checkCUDA(hipMalloc(&m->saveMean, sizeof(float) * output_c)); checkCUDA(hipMalloc(&m->saveVar, sizeof(float) * output_c)); if (m->relu) { checkCUDNN(cudnnCreateActivationDescriptor(&m->actiDesc)); checkCUDNN(cudnnSetActivationDescriptor(m->actiDesc, CUDNN_ACTIVATION_RELU, CUDNN_PROPAGATE_NAN, 0.0)); } return m; } /* regions[0](O): scale, initilized to ones regions[1](O): bias, initilized to zeros */ __host__ void BatchNorm::init_para_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 2); assert(task->regions.size() == 2); //const BatchNorm* bm = (BatchNorm*) task->args; const AccessorWO<float, 1> acc_scale(regions[0], FID_DATA); const AccessorWO<float, 1> acc_bias(regions[1], FID_DATA); Rect<1> rect_scale, rect_bias; rect_scale = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space()); rect_bias = runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space()); assert(acc_scale.accessor.is_dense_arbitrary(rect_scale)); assert(acc_bias.accessor.is_dense_arbitrary(rect_bias)); float *scale_ptr = acc_scale.ptr(rect_scale.lo); float *bias_ptr = acc_bias.ptr(rect_bias.lo); // init kernel and bias #ifdef PARAMETER_ALL_ONES hipLaunchKernelGGL(( ones_kernel), dim3(GET_BLOCKS(rect_scale.volume())), dim3(CUDA_NUM_THREADS), 0, 0, scale_ptr, rect_scale.volume()); hipLaunchKernelGGL(( ones_kernel), dim3(GET_BLOCKS(rect_bias.volume())), dim3(CUDA_NUM_THREADS), 0, 0, bias_ptr, rect_bias.volume()); #else //hipStream_t stream; //checkCUDA(hipStreamCreate(&stream)); //hiprandGenerator_t genGPU; //hiprandCreateGenerator(&genGPU, HIPRAND_RNG_PSEUDO_DEFAULT); //hiprandSetStream(genGPU, stream); //hiprandSetPseudoRandomGeneratorSeed(genGPU, 1234ULL); //hiprandGenerateUniform(genGPU, scale_ptr, rect_scale.volume()); hipLaunchKernelGGL(( assign_kernel), dim3(GET_BLOCKS(rect_scale.volume())), dim3(CUDA_NUM_THREADS), 0, 0, scale_ptr, rect_scale.volume(), 1.0f); hipLaunchKernelGGL(( assign_kernel), dim3(GET_BLOCKS(rect_bias.volume())), dim3(CUDA_NUM_THREADS), 0, 0, bias_ptr, rect_bias.volume(), 0.0f); //hiprandDestroyGenerator(genGPU); #endif } __host__ void BatchNorm::init(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; // First we initialize the scale and bias parameters { TaskLauncher para_launcher(BATCHNORM_INIT_PARA_TASK_ID, TaskArgument(NULL, 0)); para_launcher.add_region_requirement( RegionRequirement(weights[0].region, WRITE_DISCARD, EXCLUSIVE, weights[0].region)); para_launcher.add_field(0, FID_DATA); para_launcher.add_region_requirement( RegionRequirement(weights[1].region, WRITE_DISCARD, EXCLUSIVE, weights[1].region)); para_launcher.add_field(1, FID_DATA); runtime->execute_task(ctx, para_launcher); } Rect<4> rect = runtime->get_index_space_domain(ctx, task_is); int idx = 0; for (PointInRectIterator<4> it(rect); it(); it++) { FFHandler handle = ff.handlers[idx++]; argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler))); } IndexLauncher init_launcher(BATCHNORM_INIT_TASK_ID, task_is, TaskArgument(this, sizeof(BatchNorm)), argmap); init_launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); init_launcher.add_field(0, FID_DATA); init_launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, WRITE_DISCARD, EXCLUSIVE, outputs[0].region)); init_launcher.add_field(1, FID_DATA); init_launcher.add_region_requirement( RegionRequirement(weights[0].region, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[0].region)); init_launcher.add_field(2, FID_DATA); init_launcher.add_region_requirement( RegionRequirement(weights[1].region, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[1].region)); init_launcher.add_field(3, FID_DATA); FutureMap fm = runtime->execute_index_space(ctx, init_launcher); fm.wait_all_results(); idx = 0; for (PointInRectIterator<4> it(rect); it(); it++) { meta[idx++] = fm.get_result<OpMeta*>(*it); } } /* regions[0](I): input regions[1](O): ouptut regions[2](I): scale regions[3](I): bias */ __host__ void BatchNorm::forward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 4); assert(task->regions.size() == 4); float alpha = 1.0f, beta = 0.0f; const BatchNorm* bm = (BatchNorm*) task->args; const BatchNormMeta* m = *((BatchNormMeta**) task->local_args); TensorAccessorR<float, 4> acc_input( regions[0], task->regions[0], FID_DATA, ctx, runtime); TensorAccessorW<float, 4> acc_output( regions[1], task->regions[1], FID_DATA, ctx, runtime); TensorAccessorR<float, 1> acc_scale( regions[2], task->regions[2], FID_DATA, ctx, runtime); TensorAccessorR<float, 1> acc_bias( regions[3], task->regions[3], FID_DATA, ctx, runtime); hipEvent_t t_start, t_end; if (bm->profiling) { hipEventCreate(&t_start); hipEventCreate(&t_end); hipEventRecord(t_start); } #ifndef DISABLE_LEGION_CUDA_HIJACK hipStream_t stream; checkCUDA(hipStreamCreate(&stream)); checkCUDNN(cudnnSetStream(m->handle.dnn, stream)); #endif coord_t numChannels = acc_output.rect.hi[2] - acc_output.rect.lo[2] + 1; hipLaunchKernelGGL(( assign_kernel), dim3(GET_BLOCKS(numChannels)), dim3(CUDA_NUM_THREADS), 0, 0, m->runningMean, numChannels, 0.0f); hipLaunchKernelGGL(( assign_kernel), dim3(GET_BLOCKS(numChannels)), dim3(CUDA_NUM_THREADS), 0, 0, m->runningVar, numChannels, 0.0f); checkCUDNN(cudnnBatchNormalizationForwardTraining( m->handle.dnn, m->mode, &alpha, &beta, m->inputTensor, acc_input.ptr, m->outputTensor, acc_output.ptr, m->biasTensor, acc_scale.ptr, acc_bias.ptr, 1.0, m->runningMean, m->runningVar, CUDNN_BN_MIN_EPSILON, m->saveMean, m->saveVar)); if (bm->profiling) { hipEventRecord(t_end); checkCUDA(hipEventSynchronize(t_end)); float elapsed = 0; checkCUDA(hipEventElapsedTime(&elapsed, t_start, t_end)); hipEventDestroy(t_start); hipEventDestroy(t_end); printf("BatchNorm forward time (BF) = %.2fms\n", elapsed); } } __host__ void BatchNorm::forward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Rect<4> rect = runtime->get_index_space_domain(ctx, task_is); int idx = 0; for (PointInRectIterator<4> it(rect); it(); it++) { OpMeta* mp = meta[idx++]; argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); } IndexLauncher launcher(BATCHNORM_FWD_TASK_ID, task_is, TaskArgument(this, sizeof(BatchNorm)), argmap); launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, WRITE_DISCARD, EXCLUSIVE, outputs[0].region)); launcher.add_field(1, FID_DATA); launcher.add_region_requirement( RegionRequirement(weights[0].region, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[0].region)); launcher.add_field(2, FID_DATA); launcher.add_region_requirement( RegionRequirement(weights[1].region, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[1].region)); launcher.add_field(3, FID_DATA); runtime->execute_index_space(ctx, launcher); } /* regions[0](I): input regions[1](I/O): input_grad regions[2](I): output regions[3](I/O): output_grad regions[4](I): scale regions[5](I/O): scale_grad regions[6](I/O): bias_grad */ __host__ void BatchNorm::backward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { #ifndef DISABLE_COMPUTATION assert(regions.size() == 7); assert(task->regions.size() == 7); float alpha = 1.0f; //float beta = 0.0f; const BatchNorm* bm = (BatchNorm*) task->args; const BatchNormMeta* m = *((BatchNormMeta**) task->local_args); const AccessorRO<float, 4> acc_input(regions[0], FID_DATA); const AccessorRW<float, 4> acc_input_grad(regions[1], FID_DATA); const AccessorRO<float, 4> acc_output(regions[2], FID_DATA); const AccessorRW<float, 4> acc_output_grad(regions[3], FID_DATA); const AccessorRO<float, 1> acc_scale(regions[4], FID_DATA); const AccessorRW<float, 1> acc_scale_grad(regions[5], FID_DATA); const AccessorRW<float, 1> acc_bias_grad(regions[6], FID_DATA); Rect<4> rect_input, rect_input_grad, rect_output, rect_output_grad; Rect<1> rect_scale, rect_scale_grad, rect_bias_grad; rect_input = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space()); rect_input_grad = runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space()); rect_output = runtime->get_index_space_domain(ctx, task->regions[2].region.get_index_space()); rect_output_grad = runtime->get_index_space_domain(ctx, task->regions[3].region.get_index_space()); rect_scale = runtime->get_index_space_domain(ctx, task->regions[4].region.get_index_space()); rect_scale_grad = runtime->get_index_space_domain(ctx, task->regions[5].region.get_index_space()); rect_bias_grad = runtime->get_index_space_domain(ctx, task->regions[6].region.get_index_space()); // make sure all regions are dense assert(acc_input.accessor.is_dense_arbitrary(rect_input)); assert(acc_input_grad.accessor.is_dense_arbitrary(rect_input_grad)); assert(acc_output.accessor.is_dense_arbitrary(rect_output)); assert(acc_output_grad.accessor.is_dense_arbitrary(rect_output_grad)); assert(acc_scale.accessor.is_dense_arbitrary(rect_scale)); assert(acc_scale_grad.accessor.is_dense_arbitrary(rect_scale_grad)); assert(acc_bias_grad.accessor.is_dense_arbitrary(rect_bias_grad)); const float *input_ptr = acc_input.ptr(rect_input.lo); float *input_grad_ptr = acc_input_grad.ptr(rect_input_grad.lo); const float *output_ptr = acc_output.ptr(rect_output.lo); float *output_grad_ptr = acc_output_grad.ptr(rect_output_grad.lo); const float *scale_ptr = acc_scale.ptr(rect_scale.lo); float *scale_grad_ptr = acc_scale_grad.ptr(rect_scale_grad.lo); float *bias_grad_ptr = acc_bias_grad.ptr(rect_bias_grad.lo); hipEvent_t t_start, t_end; if (bm->profiling) { hipEventCreate(&t_start); hipEventCreate(&t_end); hipEventRecord(t_start); } hipStream_t stream; checkCUDA(hipStreamCreate(&stream)); checkCUDNN(cudnnSetStream(m->handle.dnn, stream)); if (m->relu) { int n = rect_output.volume(); hipLaunchKernelGGL(( reluBackward), dim3(GET_BLOCKS(n)), dim3(CUDA_NUM_THREADS), 0, 0, output_grad_ptr, output_ptr, n); } checkCUDNN(cudnnBatchNormalizationBackward( m->handle.dnn, m->mode, &alpha, &alpha, &alpha, &alpha, m->inputTensor, input_ptr, m->outputTensor, output_grad_ptr, m->inputTensor, input_grad_ptr, m->biasTensor, scale_ptr, scale_grad_ptr, bias_grad_ptr, CUDNN_BN_MIN_EPSILON, m->saveMean, m->saveVar)); if (bm->profiling) { hipEventRecord(t_end); checkCUDA(hipEventSynchronize(t_end)); float elapsed = 0; checkCUDA(hipEventElapsedTime(&elapsed, t_start, t_end)); hipEventDestroy(t_start); hipEventDestroy(t_end); printf("BatchNorm backward time = %.2fms\n", elapsed); } #endif } __host__ void BatchNorm::backward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Rect<4> rect = runtime->get_index_space_domain(ctx, task_is); int idx = 0; for (PointInRectIterator<4> it(rect); it(); it++) { OpMeta* mp = meta[idx++]; argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); } IndexLauncher launcher(BATCHNORM_BWD_TASK_ID, task_is, TaskArgument(this, sizeof(BatchNorm)), argmap); // regions[0](I): input launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); // regions[1](I/O): input_grad (we only need grad tensors) launcher.add_region_requirement( RegionRequirement(inputs[0].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[0].region_grad)); launcher.add_field(1, FID_DATA); // regions[2](I): output launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(2, FID_DATA); // regions[3](I/O): output_grad launcher.add_region_requirement( RegionRequirement(outputs[0].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, outputs[0].region_grad)); launcher.add_field(3, FID_DATA); // regions[4](I): filter launcher.add_region_requirement( RegionRequirement(weights[0].region, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[0].region)); launcher.add_field(4, FID_DATA); // regions[5](I/O): filter_grad launcher.add_region_requirement( RegionRequirement(weights[0].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, weights[0].region_grad)); launcher.add_field(5, FID_DATA); // regions[6](I/O): bias_grad launcher.add_region_requirement( RegionRequirement(weights[1].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, weights[1].region_grad)); launcher.add_field(6, FID_DATA); FutureMap fm = runtime->execute_index_space(ctx, launcher); } bool BatchNorm::measure_compute_time(Simulator* sim, const ParallelConfig& pc, float& forward_time, float& backward_time) { //TODO: implement measure_forward return false; }
abf3a4b9e0173b04649efc1c6dcf22102a89e722.cu
/* Copyright 2017 Stanford, NVIDIA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "model.h" #include "cuda_helper.h" Tensor FFModel::batch_norm(const Tensor& input, bool relu) { assert(input.numDim == 4); //Only support 4D BN for now BatchNorm *bn = new BatchNorm(*this, input, relu); layers.push_back(bn); return bn->outputs[0]; } /* locals[0] = scale locals[1] = bias */ BatchNorm::BatchNorm(FFModel& model, const Tensor& _input, bool _relu) : Op(model, "BatchNorm", _input), relu(_relu), profiling(model.config.profiling) { Context ctx = model.config.lg_ctx; HighLevelRuntime* runtime = model.config.lg_hlr; Rect<4> part_rect = runtime->get_index_space_domain(ctx, task_is); num_replica = part_rect.volume(); // Create output tensor int output_w = _input.adim[0]; int output_h = _input.adim[1]; int output_c = _input.adim[2]; int output_n = _input.adim[3]; int num_par_w = part_rect.hi[0] - part_rect.lo[0] + 1; int num_par_h = part_rect.hi[1] - part_rect.lo[1] + 1; int num_par_c = part_rect.hi[2] - part_rect.lo[2] + 1; int num_par_n = part_rect.hi[3] - part_rect.lo[3] + 1; FieldSpace fs = model.config.field_space; Rect<4> output_rect(Point<4>(0, 0, 0, 0), Point<4>(output_w-1, output_h-1, output_c-1, output_n-1)); IndexSpaceT<4> output_is = runtime->create_index_space(ctx, output_rect); LogicalRegion output_lr = runtime->create_logical_region(ctx, output_is, fs); LogicalRegion output_grad_lr = runtime->create_logical_region(ctx, output_is, fs); int extent_w = (output_w + num_par_w - 1) / num_par_w; int extent_h = (output_h + num_par_h - 1) / num_par_h; int extent_c = output_c / num_par_c; int extent_n = output_n / num_par_n; assert(output_c % num_par_c == 0); assert(output_n % num_par_n == 0); Rect<4> ext(Point<4>(0, 0, 0, 0), Point<4>(extent_w-1, extent_h-1, extent_c-1, extent_n-1)); Transform<4, 4, coord_t> trans; for (int i = 0; i < 4; i++) for (int j = 0; j < 4; j++) trans[i][j] = 0; trans[0][0] = extent_w; trans[1][1] = extent_h; trans[2][2] = extent_c; trans[3][3] = extent_n; IndexPartition output_ip = runtime->create_partition_by_restriction(ctx, output_is, task_is, trans, ext); assert(runtime->is_index_partition_disjoint(ctx, output_ip)); assert(runtime->is_index_partition_complete(ctx, output_ip)); LogicalPartition output_lp = runtime->get_logical_partition(ctx, output_lr, output_ip); LogicalPartition output_grad_lp = runtime->get_logical_partition(ctx, output_grad_lr, output_ip); int bias_nc = num_replica * _input.adim[2]; /*input_channels*/ Rect<1, coord_t> bias_grad_rect(0, bias_nc - 1); Rect<1, coord_t> bias_rect(0, _input.adim[2] - 1); IndexSpaceT<1> bias_is = runtime->create_index_space(ctx, bias_rect); IndexSpaceT<1> bias_grad_is = runtime->create_index_space(ctx, bias_grad_rect); LogicalRegion bias_lr = runtime->create_logical_region(ctx, bias_is, fs); LogicalRegion scale_lr = runtime->create_logical_region(ctx, bias_is, fs); LogicalRegion bias_grad_lr = runtime->create_logical_region(ctx, bias_grad_is, fs); LogicalRegion scale_grad_lr = runtime->create_logical_region(ctx, bias_grad_is, fs); IndexPartition bias_grad_ip = runtime->create_equal_partition(ctx, bias_grad_is, task_is); LogicalPartition bias_grad_lp = runtime->get_logical_partition(ctx, bias_grad_lr, bias_grad_ip); LogicalPartition scale_grad_lp = runtime->get_logical_partition(ctx, scale_grad_lr, bias_grad_ip); Parameter scale_tensor, bias_tensor; scale_tensor.region = scale_lr; scale_tensor.region_grad = scale_grad_lr; scale_tensor.part = LogicalPartition::NO_PART; scale_tensor.part_grad = scale_grad_lp; weights[0] = scale_tensor; bias_tensor.region = bias_lr; bias_tensor.region_grad = bias_grad_lr; bias_tensor.part = LogicalPartition::NO_PART; bias_tensor.part_grad = bias_grad_lp; weights[1] = bias_tensor; numWeights = 2; outputs[0] = _input; outputs[0].region = output_lr; outputs[0].part = output_lp; outputs[0].region_grad = output_grad_lr; outputs[0].part_grad = output_grad_lp; printf("Create bn layer: output(%d %d %d %d)\n", outputs[0].adim[3], outputs[0].adim[2], outputs[0].adim[1], outputs[0].adim[0]); input_lps[0] = _input.part; } void BatchNorm::create_weights(FFModel& model) { // TODO assert(false); } void BatchNorm::create_output_and_partition(FFModel& model) { // TODO assert(false); } /* regions[0]: input regions[1]: output regions[2](I): scale regions[3](I): bias */ __host__ OpMeta* BatchNorm::init_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 4); assert(task->regions.size() == 4); const BatchNorm* bm = (BatchNorm*) task->args; FFHandler handle = *((const FFHandler*) task->local_args); TensorAccessorR<float, 4> acc_input( regions[0], task->regions[0], FID_DATA, ctx, runtime); TensorAccessorW<float, 4> acc_output( regions[1], task->regions[1], FID_DATA, ctx, runtime); TensorAccessorR<float, 1> acc_scale( regions[2], task->regions[2], FID_DATA, ctx, runtime); TensorAccessorR<float, 1> acc_bias( regions[3], task->regions[3], FID_DATA, ctx, runtime); BatchNormMeta* m = new BatchNormMeta(handle); m->relu = bm->relu; m->mode = CUDNN_BATCHNORM_SPATIAL; #if CUDNN_VERSION >= 7000 m->mode = CUDNN_BATCHNORM_SPATIAL_PERSISTENT; #endif checkCUDNN(cudnnCreateTensorDescriptor(&m->inputTensor)); checkCUDNN(cudnnCreateTensorDescriptor(&m->outputTensor)); checkCUDNN(cudnnCreateTensorDescriptor(&m->biasTensor)); assert(acc_input.rect == acc_output.rect); int input_w = acc_input.rect.hi[0] - acc_input.rect.lo[0] + 1; int input_h = acc_input.rect.hi[1] - acc_input.rect.lo[1] + 1; int input_c = acc_input.rect.hi[2] - acc_input.rect.lo[2] + 1; int input_n = acc_input.rect.hi[3] - acc_input.rect.lo[3] + 1; int output_w = acc_output.rect.hi[0] - acc_output.rect.lo[0] + 1; int output_h = acc_output.rect.hi[1] - acc_output.rect.lo[1] + 1; int output_c = acc_output.rect.hi[2] - acc_output.rect.lo[2] + 1; int output_n = acc_output.rect.hi[3] - acc_output.rect.lo[3] + 1; checkCUDNN(cudnnSetTensor4dDescriptor(m->inputTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, input_n, input_c, input_h, input_w)); checkCUDNN(cudnnSetTensor4dDescriptor(m->outputTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, output_n, output_c, output_h, output_w)); checkCUDNN(cudnnSetTensor4dDescriptor(m->biasTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, output_c, 1, 1)); //float *runningMean, *runningVar, *saveMean, *saveVar; checkCUDA(cudaMalloc(&m->runningMean, sizeof(float) * output_c)); checkCUDA(cudaMalloc(&m->runningVar, sizeof(float) * output_c)); checkCUDA(cudaMalloc(&m->saveMean, sizeof(float) * output_c)); checkCUDA(cudaMalloc(&m->saveVar, sizeof(float) * output_c)); if (m->relu) { checkCUDNN(cudnnCreateActivationDescriptor(&m->actiDesc)); checkCUDNN(cudnnSetActivationDescriptor(m->actiDesc, CUDNN_ACTIVATION_RELU, CUDNN_PROPAGATE_NAN, 0.0)); } return m; } /* regions[0](O): scale, initilized to ones regions[1](O): bias, initilized to zeros */ __host__ void BatchNorm::init_para_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 2); assert(task->regions.size() == 2); //const BatchNorm* bm = (BatchNorm*) task->args; const AccessorWO<float, 1> acc_scale(regions[0], FID_DATA); const AccessorWO<float, 1> acc_bias(regions[1], FID_DATA); Rect<1> rect_scale, rect_bias; rect_scale = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space()); rect_bias = runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space()); assert(acc_scale.accessor.is_dense_arbitrary(rect_scale)); assert(acc_bias.accessor.is_dense_arbitrary(rect_bias)); float *scale_ptr = acc_scale.ptr(rect_scale.lo); float *bias_ptr = acc_bias.ptr(rect_bias.lo); // init kernel and bias #ifdef PARAMETER_ALL_ONES ones_kernel<<<GET_BLOCKS(rect_scale.volume()), CUDA_NUM_THREADS>>>( scale_ptr, rect_scale.volume()); ones_kernel<<<GET_BLOCKS(rect_bias.volume()), CUDA_NUM_THREADS>>>( bias_ptr, rect_bias.volume()); #else //cudaStream_t stream; //checkCUDA(cudaStreamCreate(&stream)); //curandGenerator_t genGPU; //curandCreateGenerator(&genGPU, CURAND_RNG_PSEUDO_DEFAULT); //curandSetStream(genGPU, stream); //curandSetPseudoRandomGeneratorSeed(genGPU, 1234ULL); //curandGenerateUniform(genGPU, scale_ptr, rect_scale.volume()); assign_kernel<<<GET_BLOCKS(rect_scale.volume()), CUDA_NUM_THREADS>>>( scale_ptr, rect_scale.volume(), 1.0f); assign_kernel<<<GET_BLOCKS(rect_bias.volume()), CUDA_NUM_THREADS>>>( bias_ptr, rect_bias.volume(), 0.0f); //curandDestroyGenerator(genGPU); #endif } __host__ void BatchNorm::init(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; // First we initialize the scale and bias parameters { TaskLauncher para_launcher(BATCHNORM_INIT_PARA_TASK_ID, TaskArgument(NULL, 0)); para_launcher.add_region_requirement( RegionRequirement(weights[0].region, WRITE_DISCARD, EXCLUSIVE, weights[0].region)); para_launcher.add_field(0, FID_DATA); para_launcher.add_region_requirement( RegionRequirement(weights[1].region, WRITE_DISCARD, EXCLUSIVE, weights[1].region)); para_launcher.add_field(1, FID_DATA); runtime->execute_task(ctx, para_launcher); } Rect<4> rect = runtime->get_index_space_domain(ctx, task_is); int idx = 0; for (PointInRectIterator<4> it(rect); it(); it++) { FFHandler handle = ff.handlers[idx++]; argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler))); } IndexLauncher init_launcher(BATCHNORM_INIT_TASK_ID, task_is, TaskArgument(this, sizeof(BatchNorm)), argmap); init_launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); init_launcher.add_field(0, FID_DATA); init_launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, WRITE_DISCARD, EXCLUSIVE, outputs[0].region)); init_launcher.add_field(1, FID_DATA); init_launcher.add_region_requirement( RegionRequirement(weights[0].region, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[0].region)); init_launcher.add_field(2, FID_DATA); init_launcher.add_region_requirement( RegionRequirement(weights[1].region, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[1].region)); init_launcher.add_field(3, FID_DATA); FutureMap fm = runtime->execute_index_space(ctx, init_launcher); fm.wait_all_results(); idx = 0; for (PointInRectIterator<4> it(rect); it(); it++) { meta[idx++] = fm.get_result<OpMeta*>(*it); } } /* regions[0](I): input regions[1](O): ouptut regions[2](I): scale regions[3](I): bias */ __host__ void BatchNorm::forward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == 4); assert(task->regions.size() == 4); float alpha = 1.0f, beta = 0.0f; const BatchNorm* bm = (BatchNorm*) task->args; const BatchNormMeta* m = *((BatchNormMeta**) task->local_args); TensorAccessorR<float, 4> acc_input( regions[0], task->regions[0], FID_DATA, ctx, runtime); TensorAccessorW<float, 4> acc_output( regions[1], task->regions[1], FID_DATA, ctx, runtime); TensorAccessorR<float, 1> acc_scale( regions[2], task->regions[2], FID_DATA, ctx, runtime); TensorAccessorR<float, 1> acc_bias( regions[3], task->regions[3], FID_DATA, ctx, runtime); cudaEvent_t t_start, t_end; if (bm->profiling) { cudaEventCreate(&t_start); cudaEventCreate(&t_end); cudaEventRecord(t_start); } #ifndef DISABLE_LEGION_CUDA_HIJACK cudaStream_t stream; checkCUDA(cudaStreamCreate(&stream)); checkCUDNN(cudnnSetStream(m->handle.dnn, stream)); #endif coord_t numChannels = acc_output.rect.hi[2] - acc_output.rect.lo[2] + 1; assign_kernel<<<GET_BLOCKS(numChannels), CUDA_NUM_THREADS>>>(m->runningMean, numChannels, 0.0f); assign_kernel<<<GET_BLOCKS(numChannels), CUDA_NUM_THREADS>>>(m->runningVar, numChannels, 0.0f); checkCUDNN(cudnnBatchNormalizationForwardTraining( m->handle.dnn, m->mode, &alpha, &beta, m->inputTensor, acc_input.ptr, m->outputTensor, acc_output.ptr, m->biasTensor, acc_scale.ptr, acc_bias.ptr, 1.0, m->runningMean, m->runningVar, CUDNN_BN_MIN_EPSILON, m->saveMean, m->saveVar)); if (bm->profiling) { cudaEventRecord(t_end); checkCUDA(cudaEventSynchronize(t_end)); float elapsed = 0; checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end)); cudaEventDestroy(t_start); cudaEventDestroy(t_end); printf("BatchNorm forward time (BF) = %.2fms\n", elapsed); } } __host__ void BatchNorm::forward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Rect<4> rect = runtime->get_index_space_domain(ctx, task_is); int idx = 0; for (PointInRectIterator<4> it(rect); it(); it++) { OpMeta* mp = meta[idx++]; argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); } IndexLauncher launcher(BATCHNORM_FWD_TASK_ID, task_is, TaskArgument(this, sizeof(BatchNorm)), argmap); launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, WRITE_DISCARD, EXCLUSIVE, outputs[0].region)); launcher.add_field(1, FID_DATA); launcher.add_region_requirement( RegionRequirement(weights[0].region, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[0].region)); launcher.add_field(2, FID_DATA); launcher.add_region_requirement( RegionRequirement(weights[1].region, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[1].region)); launcher.add_field(3, FID_DATA); runtime->execute_index_space(ctx, launcher); } /* regions[0](I): input regions[1](I/O): input_grad regions[2](I): output regions[3](I/O): output_grad regions[4](I): scale regions[5](I/O): scale_grad regions[6](I/O): bias_grad */ __host__ void BatchNorm::backward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { #ifndef DISABLE_COMPUTATION assert(regions.size() == 7); assert(task->regions.size() == 7); float alpha = 1.0f; //float beta = 0.0f; const BatchNorm* bm = (BatchNorm*) task->args; const BatchNormMeta* m = *((BatchNormMeta**) task->local_args); const AccessorRO<float, 4> acc_input(regions[0], FID_DATA); const AccessorRW<float, 4> acc_input_grad(regions[1], FID_DATA); const AccessorRO<float, 4> acc_output(regions[2], FID_DATA); const AccessorRW<float, 4> acc_output_grad(regions[3], FID_DATA); const AccessorRO<float, 1> acc_scale(regions[4], FID_DATA); const AccessorRW<float, 1> acc_scale_grad(regions[5], FID_DATA); const AccessorRW<float, 1> acc_bias_grad(regions[6], FID_DATA); Rect<4> rect_input, rect_input_grad, rect_output, rect_output_grad; Rect<1> rect_scale, rect_scale_grad, rect_bias_grad; rect_input = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space()); rect_input_grad = runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space()); rect_output = runtime->get_index_space_domain(ctx, task->regions[2].region.get_index_space()); rect_output_grad = runtime->get_index_space_domain(ctx, task->regions[3].region.get_index_space()); rect_scale = runtime->get_index_space_domain(ctx, task->regions[4].region.get_index_space()); rect_scale_grad = runtime->get_index_space_domain(ctx, task->regions[5].region.get_index_space()); rect_bias_grad = runtime->get_index_space_domain(ctx, task->regions[6].region.get_index_space()); // make sure all regions are dense assert(acc_input.accessor.is_dense_arbitrary(rect_input)); assert(acc_input_grad.accessor.is_dense_arbitrary(rect_input_grad)); assert(acc_output.accessor.is_dense_arbitrary(rect_output)); assert(acc_output_grad.accessor.is_dense_arbitrary(rect_output_grad)); assert(acc_scale.accessor.is_dense_arbitrary(rect_scale)); assert(acc_scale_grad.accessor.is_dense_arbitrary(rect_scale_grad)); assert(acc_bias_grad.accessor.is_dense_arbitrary(rect_bias_grad)); const float *input_ptr = acc_input.ptr(rect_input.lo); float *input_grad_ptr = acc_input_grad.ptr(rect_input_grad.lo); const float *output_ptr = acc_output.ptr(rect_output.lo); float *output_grad_ptr = acc_output_grad.ptr(rect_output_grad.lo); const float *scale_ptr = acc_scale.ptr(rect_scale.lo); float *scale_grad_ptr = acc_scale_grad.ptr(rect_scale_grad.lo); float *bias_grad_ptr = acc_bias_grad.ptr(rect_bias_grad.lo); cudaEvent_t t_start, t_end; if (bm->profiling) { cudaEventCreate(&t_start); cudaEventCreate(&t_end); cudaEventRecord(t_start); } cudaStream_t stream; checkCUDA(cudaStreamCreate(&stream)); checkCUDNN(cudnnSetStream(m->handle.dnn, stream)); if (m->relu) { int n = rect_output.volume(); reluBackward<<<GET_BLOCKS(n), CUDA_NUM_THREADS>>>(output_grad_ptr, output_ptr, n); } checkCUDNN(cudnnBatchNormalizationBackward( m->handle.dnn, m->mode, &alpha, &alpha, &alpha, &alpha, m->inputTensor, input_ptr, m->outputTensor, output_grad_ptr, m->inputTensor, input_grad_ptr, m->biasTensor, scale_ptr, scale_grad_ptr, bias_grad_ptr, CUDNN_BN_MIN_EPSILON, m->saveMean, m->saveVar)); if (bm->profiling) { cudaEventRecord(t_end); checkCUDA(cudaEventSynchronize(t_end)); float elapsed = 0; checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end)); cudaEventDestroy(t_start); cudaEventDestroy(t_end); printf("BatchNorm backward time = %.2fms\n", elapsed); } #endif } __host__ void BatchNorm::backward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Rect<4> rect = runtime->get_index_space_domain(ctx, task_is); int idx = 0; for (PointInRectIterator<4> it(rect); it(); it++) { OpMeta* mp = meta[idx++]; argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); } IndexLauncher launcher(BATCHNORM_BWD_TASK_ID, task_is, TaskArgument(this, sizeof(BatchNorm)), argmap); // regions[0](I): input launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); // regions[1](I/O): input_grad (we only need grad tensors) launcher.add_region_requirement( RegionRequirement(inputs[0].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[0].region_grad)); launcher.add_field(1, FID_DATA); // regions[2](I): output launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(2, FID_DATA); // regions[3](I/O): output_grad launcher.add_region_requirement( RegionRequirement(outputs[0].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, outputs[0].region_grad)); launcher.add_field(3, FID_DATA); // regions[4](I): filter launcher.add_region_requirement( RegionRequirement(weights[0].region, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[0].region)); launcher.add_field(4, FID_DATA); // regions[5](I/O): filter_grad launcher.add_region_requirement( RegionRequirement(weights[0].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, weights[0].region_grad)); launcher.add_field(5, FID_DATA); // regions[6](I/O): bias_grad launcher.add_region_requirement( RegionRequirement(weights[1].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, weights[1].region_grad)); launcher.add_field(6, FID_DATA); FutureMap fm = runtime->execute_index_space(ctx, launcher); } bool BatchNorm::measure_compute_time(Simulator* sim, const ParallelConfig& pc, float& forward_time, float& backward_time) { //TODO: implement measure_forward return false; }
a452999c399cf698a91e71d835cc268c2ed0f0c4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* The implementation of this file is based on skipLayerNorm plugin in TensorRT demo: https://github.com/NVIDIA/TensorRT/tree/release/5.1/demo/BERT/ Copyright 2019 NVIDIA Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "layer_norm.cuh" #include "skip_layer_norm_impl.h" namespace onnxruntime { namespace contrib { namespace cuda { template <typename T, unsigned TPB> __global__ void SkipLayerNormKernelSmall( const int ld, const T* input, const T* skip, const T* beta, const T* gamma, const T* bias, T* output) { const T reverse_ld = T(1) / T(ld); const int offset = blockIdx.x * ld; KeyValuePairSum pair_sum; // reduce x and x^2 hipcub::KeyValuePair<T, T> thread_data(0, 0); const int idx = offset + threadIdx.x; T val = 0; if (threadIdx.x < ld) { val = (bias == nullptr) ? input[idx] + skip[idx] : input[idx] + skip[idx] + bias[threadIdx.x]; const T rldval = reverse_ld * val; thread_data = pair_sum(thread_data, hipcub::KeyValuePair<T, T>(rldval, rldval * val)); } LayerNormSmall<T, TPB>(val, thread_data, ld, idx, beta, gamma, output); } template <typename T, unsigned TPB> __global__ void SkipLayerNormKernel( const int ld, const T* input, const T* skip, const T* beta, const T* gamma, const T* bias, T* output) { const T reverse_ld = T(1) / T(ld); const int offset = blockIdx.x * ld; KeyValuePairSum pair_sum; // reduce x and x^2 hipcub::KeyValuePair<T, T> thread_data(0, 0); for (int i = threadIdx.x; i < ld; i += TPB) { const int idx = offset + i; const T val = (bias == nullptr) ? input[idx] + skip[idx] : input[idx] + skip[idx] + bias[i]; const T rldval = reverse_ld * val; thread_data = pair_sum(thread_data, hipcub::KeyValuePair<T, T>(rldval, rldval * val)); output[idx] = val; } LayerNorm<T, TPB>(thread_data, ld, offset, beta, gamma, output); } template <typename T> bool ComputeSkipLayerNorm( hipStream_t stream, const int ld, const int n, const T* input, const T* skip, const T* beta, const T* gamma, const T* bias, T* output) { // this must be true because n is the total size of the tensor assert(n % ld == 0); const int grid_size = n / ld; if (ld <= 32) { constexpr int block_size = 32; hipLaunchKernelGGL(( SkipLayerNormKernelSmall<T, block_size>) , dim3(grid_size), dim3(block_size), 0, stream, ld, input, skip, beta, gamma, bias, output); } else if (ld <= 128) { constexpr int block_size = 128; hipLaunchKernelGGL(( SkipLayerNormKernelSmall<T, block_size>) , dim3(grid_size), dim3(block_size), 0, stream, ld, input, skip, beta, gamma, bias, output); } else if (ld == 384) { constexpr int block_size = 384; hipLaunchKernelGGL(( SkipLayerNormKernelSmall<T, block_size>) , dim3(grid_size), dim3(block_size), 0, stream, ld, input, skip, beta, gamma, bias, output); } else { constexpr int block_size = 256; hipLaunchKernelGGL(( SkipLayerNormKernel<T, block_size>), dim3(grid_size), dim3(block_size), 0, stream, ld, input, skip, beta, gamma, bias, output); } return CUDA_CALL(hipPeekAtLastError()); } bool LaunchSkipLayerNormKernel( void* output, const void* input, const void* skip, const void* gamma, const void* beta, const void* bias, const int batch_size, const int hidden_size, const int element_count, const size_t element_size) { // use default stream const hipStream_t stream = nullptr; if (element_size == 2) { return ComputeSkipLayerNorm( stream, hidden_size, element_count, reinterpret_cast<const half*>(input), reinterpret_cast<const half*>(skip), reinterpret_cast<const half*>(beta), reinterpret_cast<const half*>(gamma), reinterpret_cast<const half*>(bias), reinterpret_cast<half*>(output)); } else { return ComputeSkipLayerNorm( stream, hidden_size, element_count, reinterpret_cast<const float*>(input), reinterpret_cast<const float*>(skip), reinterpret_cast<const float*>(beta), reinterpret_cast<const float*>(gamma), reinterpret_cast<const float*>(bias), reinterpret_cast<float*>(output)); } } } // namespace cuda } // namespace contrib } // namespace onnxruntime
a452999c399cf698a91e71d835cc268c2ed0f0c4.cu
/* The implementation of this file is based on skipLayerNorm plugin in TensorRT demo: https://github.com/NVIDIA/TensorRT/tree/release/5.1/demo/BERT/ Copyright 2019 NVIDIA Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "layer_norm.cuh" #include "skip_layer_norm_impl.h" namespace onnxruntime { namespace contrib { namespace cuda { template <typename T, unsigned TPB> __global__ void SkipLayerNormKernelSmall( const int ld, const T* input, const T* skip, const T* beta, const T* gamma, const T* bias, T* output) { const T reverse_ld = T(1) / T(ld); const int offset = blockIdx.x * ld; KeyValuePairSum pair_sum; // reduce x and x^2 cub::KeyValuePair<T, T> thread_data(0, 0); const int idx = offset + threadIdx.x; T val = 0; if (threadIdx.x < ld) { val = (bias == nullptr) ? input[idx] + skip[idx] : input[idx] + skip[idx] + bias[threadIdx.x]; const T rldval = reverse_ld * val; thread_data = pair_sum(thread_data, cub::KeyValuePair<T, T>(rldval, rldval * val)); } LayerNormSmall<T, TPB>(val, thread_data, ld, idx, beta, gamma, output); } template <typename T, unsigned TPB> __global__ void SkipLayerNormKernel( const int ld, const T* input, const T* skip, const T* beta, const T* gamma, const T* bias, T* output) { const T reverse_ld = T(1) / T(ld); const int offset = blockIdx.x * ld; KeyValuePairSum pair_sum; // reduce x and x^2 cub::KeyValuePair<T, T> thread_data(0, 0); for (int i = threadIdx.x; i < ld; i += TPB) { const int idx = offset + i; const T val = (bias == nullptr) ? input[idx] + skip[idx] : input[idx] + skip[idx] + bias[i]; const T rldval = reverse_ld * val; thread_data = pair_sum(thread_data, cub::KeyValuePair<T, T>(rldval, rldval * val)); output[idx] = val; } LayerNorm<T, TPB>(thread_data, ld, offset, beta, gamma, output); } template <typename T> bool ComputeSkipLayerNorm( cudaStream_t stream, const int ld, const int n, const T* input, const T* skip, const T* beta, const T* gamma, const T* bias, T* output) { // this must be true because n is the total size of the tensor assert(n % ld == 0); const int grid_size = n / ld; if (ld <= 32) { constexpr int block_size = 32; SkipLayerNormKernelSmall<T, block_size> <<<grid_size, block_size, 0, stream>>>(ld, input, skip, beta, gamma, bias, output); } else if (ld <= 128) { constexpr int block_size = 128; SkipLayerNormKernelSmall<T, block_size> <<<grid_size, block_size, 0, stream>>>(ld, input, skip, beta, gamma, bias, output); } else if (ld == 384) { constexpr int block_size = 384; SkipLayerNormKernelSmall<T, block_size> <<<grid_size, block_size, 0, stream>>>(ld, input, skip, beta, gamma, bias, output); } else { constexpr int block_size = 256; SkipLayerNormKernel<T, block_size><<<grid_size, block_size, 0, stream>>>(ld, input, skip, beta, gamma, bias, output); } return CUDA_CALL(cudaPeekAtLastError()); } bool LaunchSkipLayerNormKernel( void* output, const void* input, const void* skip, const void* gamma, const void* beta, const void* bias, const int batch_size, const int hidden_size, const int element_count, const size_t element_size) { // use default stream const cudaStream_t stream = nullptr; if (element_size == 2) { return ComputeSkipLayerNorm( stream, hidden_size, element_count, reinterpret_cast<const half*>(input), reinterpret_cast<const half*>(skip), reinterpret_cast<const half*>(beta), reinterpret_cast<const half*>(gamma), reinterpret_cast<const half*>(bias), reinterpret_cast<half*>(output)); } else { return ComputeSkipLayerNorm( stream, hidden_size, element_count, reinterpret_cast<const float*>(input), reinterpret_cast<const float*>(skip), reinterpret_cast<const float*>(beta), reinterpret_cast<const float*>(gamma), reinterpret_cast<const float*>(bias), reinterpret_cast<float*>(output)); } } } // namespace cuda } // namespace contrib } // namespace onnxruntime
f33e21ef893bd823b1b48360851e5d11c013637f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright 2018,2019,2020,2021 Sony Corporation. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <nbla/cuda/array/cuda_array.hpp> #include <nbla/cuda/function/mean.hpp> #include <nbla/cuda/math.hpp> #include <nbla/cuda/utils/block_reduce.cuh> namespace nbla { template <typename T> using Accum = typename CudaTypeForceFloat<T>::type; template <typename T> __global__ void kernel_reduce_per_block(const int N, const T *x, T *buff, const Accum<T> scale = Accum<T>(1)) { Accum<T> thread_data = 0; NBLA_CUDA_KERNEL_LOOP(i, N) { thread_data += (Accum<T>)x[i]; } thread_data = blockReduceSum(thread_data); if (threadIdx.x == 0) { buff[blockIdx.x] = thread_data * scale; } } template <typename T> void MeanCuda<T>::forward_impl_reduce(const T *x_, T *y_, int outer_size, int reduction_size) { const Tc *x = reinterpret_cast<const Tc *>(x_); Tc *y = reinterpret_cast<Tc *>(y_); cuda_set_device(this->device_); const Accum<T> scale = Accum<T>(1) / reduction_size; if (reduction_size / outer_size < 2048) { const Tc *ones = static_cast<const Tc *>(SingletonManager::get<NNabla>()->ones( reduction_size, get_dtype<Tc>(), this->ctx_)); cuda_gemv<Tc>(this->device_, y, x, reduction_size, outer_size, true, ones, reduction_size, scale, 0); } else if (reduction_size > 1024) { const int threads = NBLA_CUDA_NUM_THREADS; const int blocks = min(NBLA_CUDA_GET_BLOCKS(reduction_size), 1024); NdArray arr_buff({blocks}); Tc *buff = arr_buff.cast(get_dtype<Tc>(), this->ctx_, true)->pointer<Tc>(); while (outer_size--) { hipLaunchKernelGGL(( kernel_reduce_per_block<Tc>) , dim3(blocks), dim3(threads), 0, 0, reduction_size, x, buff, scale); NBLA_CUDA_KERNEL_CHECK(); hipLaunchKernelGGL(( kernel_reduce_per_block<Tc>), dim3(1), dim3(1024), 0, 0, blocks, buff, y); NBLA_CUDA_KERNEL_CHECK(); x += reduction_size; y += 1; } } else { while (outer_size--) { hipLaunchKernelGGL(( kernel_reduce_per_block<Tc>), dim3(1), dim3(1024), 0, 0, reduction_size, x, y, scale); NBLA_CUDA_KERNEL_CHECK(); x += reduction_size; y += 1; } } } template <typename T, bool accum> __global__ void kernel_reduce_mean_backward(const int num, T *dx, const T *dy, T scale) { NBLA_CUDA_KERNEL_LOOP(idx, num) { dx[idx] = (accum ? dx[idx] : (T)0) + scale * (*dy); } } template <typename T> void MeanCuda<T>::backward_impl_reduce(const T *dy_, T *dx_, int outer_size, int reduction_size, bool accum) { const Tc *dy = reinterpret_cast<const Tc *>(dy_); Tc *dx = reinterpret_cast<Tc *>(dx_); cuda_set_device(this->device_); if (outer_size == 1) { if (accum) { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_reduce_mean_backward<Tc, true>), reduction_size, dx, dy, (T)(1. / reduction_size)); } else { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_reduce_mean_backward<Tc, false>), reduction_size, dx, dy, (T)(1. / reduction_size)); } return; } const Tc *ones = static_cast<const Tc *>(SingletonManager::get<NNabla>()->ones( reduction_size, get_dtype<Tc>(), this->ctx_)); cuda_gemm<Tc>(this->device_, dx, true, dy, outer_size, 1, false, ones, 1, reduction_size, false, 1. / reduction_size, accum ? 1 : 0); } } // namespace nbla
f33e21ef893bd823b1b48360851e5d11c013637f.cu
// Copyright 2018,2019,2020,2021 Sony Corporation. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <nbla/cuda/array/cuda_array.hpp> #include <nbla/cuda/function/mean.hpp> #include <nbla/cuda/math.hpp> #include <nbla/cuda/utils/block_reduce.cuh> namespace nbla { template <typename T> using Accum = typename CudaTypeForceFloat<T>::type; template <typename T> __global__ void kernel_reduce_per_block(const int N, const T *x, T *buff, const Accum<T> scale = Accum<T>(1)) { Accum<T> thread_data = 0; NBLA_CUDA_KERNEL_LOOP(i, N) { thread_data += (Accum<T>)x[i]; } thread_data = blockReduceSum(thread_data); if (threadIdx.x == 0) { buff[blockIdx.x] = thread_data * scale; } } template <typename T> void MeanCuda<T>::forward_impl_reduce(const T *x_, T *y_, int outer_size, int reduction_size) { const Tc *x = reinterpret_cast<const Tc *>(x_); Tc *y = reinterpret_cast<Tc *>(y_); cuda_set_device(this->device_); const Accum<T> scale = Accum<T>(1) / reduction_size; if (reduction_size / outer_size < 2048) { const Tc *ones = static_cast<const Tc *>(SingletonManager::get<NNabla>()->ones( reduction_size, get_dtype<Tc>(), this->ctx_)); cuda_gemv<Tc>(this->device_, y, x, reduction_size, outer_size, true, ones, reduction_size, scale, 0); } else if (reduction_size > 1024) { const int threads = NBLA_CUDA_NUM_THREADS; const int blocks = min(NBLA_CUDA_GET_BLOCKS(reduction_size), 1024); NdArray arr_buff({blocks}); Tc *buff = arr_buff.cast(get_dtype<Tc>(), this->ctx_, true)->pointer<Tc>(); while (outer_size--) { kernel_reduce_per_block<Tc> <<<blocks, threads>>>(reduction_size, x, buff, scale); NBLA_CUDA_KERNEL_CHECK(); kernel_reduce_per_block<Tc><<<1, 1024>>>(blocks, buff, y); NBLA_CUDA_KERNEL_CHECK(); x += reduction_size; y += 1; } } else { while (outer_size--) { kernel_reduce_per_block<Tc><<<1, 1024>>>(reduction_size, x, y, scale); NBLA_CUDA_KERNEL_CHECK(); x += reduction_size; y += 1; } } } template <typename T, bool accum> __global__ void kernel_reduce_mean_backward(const int num, T *dx, const T *dy, T scale) { NBLA_CUDA_KERNEL_LOOP(idx, num) { dx[idx] = (accum ? dx[idx] : (T)0) + scale * (*dy); } } template <typename T> void MeanCuda<T>::backward_impl_reduce(const T *dy_, T *dx_, int outer_size, int reduction_size, bool accum) { const Tc *dy = reinterpret_cast<const Tc *>(dy_); Tc *dx = reinterpret_cast<Tc *>(dx_); cuda_set_device(this->device_); if (outer_size == 1) { if (accum) { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_reduce_mean_backward<Tc, true>), reduction_size, dx, dy, (T)(1. / reduction_size)); } else { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_reduce_mean_backward<Tc, false>), reduction_size, dx, dy, (T)(1. / reduction_size)); } return; } const Tc *ones = static_cast<const Tc *>(SingletonManager::get<NNabla>()->ones( reduction_size, get_dtype<Tc>(), this->ctx_)); cuda_gemm<Tc>(this->device_, dx, true, dy, outer_size, 1, false, ones, 1, reduction_size, false, 1. / reduction_size, accum ? 1 : 0); } } // namespace nbla
fc8e66257c78118c7f9b2d6c652aed1fec8bc167.hip
// !!! This is a file automatically generated by hipify!!! #include "figures.h" #include "hip/hip_runtime_api.h" #include "../cuda_numerics/float3.h" #include "../cuda_numerics/float2.h" #include "stdio.h" #define EPSILON 10e-15f FUNC_PREF Triangle make_triangle(float3 p0, float3 p1, float3 p2) { Triangle T; T.p0 = p0; T.p1 = p1; T.p2 = p2; return T; } FUNC_PREF void Triangle::set(float3 p0, float3 p1, float3 p2) { this->p0 = p0; this->p1 = p1; this->p2 = p2; } FUNC_PREF void Triangle::set(InnerPoint A, InnerPoint B, InnerPoint C) { p0 = A.p; n0 = A.n; uv0 = A.uv; p1 = B.p; n1 = B.n; uv1 = B.uv; p2 = C.p; n2 = C.n; uv2 = C.uv; } /* p0 | \ p1-p2 */ FUNC_PREF float3 Triangle::normal() const //p0, p1, p2 , , z { return norma(cross(p1-p0, p2-p0)); } FUNC_PREF Ray reflect(Ray ray, float3 normal) { return make_ray(ray.org, ray.dir - 2 * dot(ray.dir, normal) * normal); } //----------------------------------------------------------------- FUNC_PREF float3 Triangle::interpolatePoint(Baricentric coords) const { return coords.alpha() * p0 + coords.beta() * p1 + coords.gamma() * p2; } FUNC_PREF float3 Triangle::interpolateNormal(Baricentric coords) const { return coords.alpha() * n0 + coords.beta() * n1 + coords.gamma() * n2; } FUNC_PREF float2 Triangle::interpolateUV(Baricentric coords) const { return coords.alpha() * uv0 + coords.beta() * uv1 + coords.gamma() * uv2; } FUNC_PREF InnerPoint Triangle::interpolate(Baricentric coords) const { InnerPoint P; P.p = interpolatePoint (coords); P.n = interpolateNormal(coords); P.uv = interpolateUV (coords); return P; } FUNC_PREF InnerPoint Triangle::interpolate(float u, float v) const { InnerPoint P; float alpha = 1.0f - u - v; P.p = alpha * p0 + u * p1 + v * p2; P.n = alpha * n0 + u * n1 + v * n2; P.uv = alpha * uv0 + u * uv1 + v * uv2; return P; } FUNC_PREF void Triangle::displace(float h0, float h1, float h2) { // p0 = p0 + norma(n0)*h0; // p1 = p1 + norma(n1)*h1; // p2 = p2 + norma(n2)*h2; p0 = p0 + n0*h0; p1 = p1 + n1*h1; p2 = p2 + n2*h2; } FUNC_PREF void Triangle::setDefaultNormals() { n0 = n1 = n2 = normal(); } FUNC_PREF Triangle Triangle::getMicrotriangle(Baricentric uva, Baricentric uvb, Baricentric uvc) const { Triangle T; T.p0 = interpolatePoint(uva); T.p1 = interpolatePoint(uvb); T.p2 = interpolatePoint(uvc); T.n0 = interpolateNormal(uva); T.n1 = interpolateNormal(uvb); T.n2 = interpolateNormal(uvc); T.uv0 = interpolateUV(uva); T.uv1 = interpolateUV(uvb); T.uv2 = interpolateUV(uvc); return T; } FUNC_PREF InnerPoint Triangle::getVertex0() const { InnerPoint P; P.p = p0; P.n = n0; P.uv = uv0; return P; } FUNC_PREF InnerPoint Triangle::getVertex1() const { InnerPoint P; P.p = p1; P.n = n1; P.uv = uv1; return P; } FUNC_PREF InnerPoint Triangle::getVertex2() const { InnerPoint P; P.p = p2; P.n = n2; P.uv = uv2; return P; } FUNC_PREF void Triangle::print() const { printf("--\nTriangle:" "Points: <{%f, %f, %f}, {%f, %f, %f}, {%f, %f, %f}>\n" "Normals: <{%f, %f, %f}, {%f, %f, %f}, {%f, %f, %f}>\n" "Texture coords: <{%f, %f}, {%f, %f}, {%f, %f}>\n", p0.x, p0.y, p0.z, p1.x, p1.y, p1.z, p2.x, p2.y, p2.z, n0.x, n0.y, n0.z, n1.x, n1.y, n1.z, n2.x, n2.y, n2.z, uv0.x, uv0.y, uv1.x, uv1.y, uv2.x, uv2.y ); }
fc8e66257c78118c7f9b2d6c652aed1fec8bc167.cu
#include "figures.h" #include "cuda_runtime_api.h" #include "../cuda_numerics/float3.h" #include "../cuda_numerics/float2.h" #include "stdio.h" #define EPSILON 10e-15f FUNC_PREF Triangle make_triangle(float3 p0, float3 p1, float3 p2) { Triangle T; T.p0 = p0; T.p1 = p1; T.p2 = p2; return T; } FUNC_PREF void Triangle::set(float3 p0, float3 p1, float3 p2) { this->p0 = p0; this->p1 = p1; this->p2 = p2; } FUNC_PREF void Triangle::set(InnerPoint A, InnerPoint B, InnerPoint C) { p0 = A.p; n0 = A.n; uv0 = A.uv; p1 = B.p; n1 = B.n; uv1 = B.uv; p2 = C.p; n2 = C.n; uv2 = C.uv; } /* p0 | \ p1-p2 */ FUNC_PREF float3 Triangle::normal() const //p0, p1, p2 обходятся по часовой стрелке, но ск правостороння, z смотрит от нас { return norma(cross(p1-p0, p2-p0)); } FUNC_PREF Ray reflect(Ray ray, float3 normal) { return make_ray(ray.org, ray.dir - 2 * dot(ray.dir, normal) * normal); } //----------------------------------------------------------------- FUNC_PREF float3 Triangle::interpolatePoint(Baricentric coords) const { return coords.alpha() * p0 + coords.beta() * p1 + coords.gamma() * p2; } FUNC_PREF float3 Triangle::interpolateNormal(Baricentric coords) const { return coords.alpha() * n0 + coords.beta() * n1 + coords.gamma() * n2; } FUNC_PREF float2 Triangle::interpolateUV(Baricentric coords) const { return coords.alpha() * uv0 + coords.beta() * uv1 + coords.gamma() * uv2; } FUNC_PREF InnerPoint Triangle::interpolate(Baricentric coords) const { InnerPoint P; P.p = interpolatePoint (coords); P.n = interpolateNormal(coords); P.uv = interpolateUV (coords); return P; } FUNC_PREF InnerPoint Triangle::interpolate(float u, float v) const { InnerPoint P; float alpha = 1.0f - u - v; P.p = alpha * p0 + u * p1 + v * p2; P.n = alpha * n0 + u * n1 + v * n2; P.uv = alpha * uv0 + u * uv1 + v * uv2; return P; } FUNC_PREF void Triangle::displace(float h0, float h1, float h2) { // p0 = p0 + norma(n0)*h0; // p1 = p1 + norma(n1)*h1; // p2 = p2 + norma(n2)*h2; p0 = p0 + n0*h0; p1 = p1 + n1*h1; p2 = p2 + n2*h2; } FUNC_PREF void Triangle::setDefaultNormals() { n0 = n1 = n2 = normal(); } FUNC_PREF Triangle Triangle::getMicrotriangle(Baricentric uva, Baricentric uvb, Baricentric uvc) const { Triangle T; T.p0 = interpolatePoint(uva); T.p1 = interpolatePoint(uvb); T.p2 = interpolatePoint(uvc); T.n0 = interpolateNormal(uva); T.n1 = interpolateNormal(uvb); T.n2 = interpolateNormal(uvc); T.uv0 = interpolateUV(uva); T.uv1 = interpolateUV(uvb); T.uv2 = interpolateUV(uvc); return T; } FUNC_PREF InnerPoint Triangle::getVertex0() const { InnerPoint P; P.p = p0; P.n = n0; P.uv = uv0; return P; } FUNC_PREF InnerPoint Triangle::getVertex1() const { InnerPoint P; P.p = p1; P.n = n1; P.uv = uv1; return P; } FUNC_PREF InnerPoint Triangle::getVertex2() const { InnerPoint P; P.p = p2; P.n = n2; P.uv = uv2; return P; } FUNC_PREF void Triangle::print() const { printf("--\nTriangle:" "Points: <{%f, %f, %f}, {%f, %f, %f}, {%f, %f, %f}>\n" "Normals: <{%f, %f, %f}, {%f, %f, %f}, {%f, %f, %f}>\n" "Texture coords: <{%f, %f}, {%f, %f}, {%f, %f}>\n", p0.x, p0.y, p0.z, p1.x, p1.y, p1.z, p2.x, p2.y, p2.z, n0.x, n0.y, n0.z, n1.x, n1.y, n1.z, n2.x, n2.y, n2.z, uv0.x, uv0.y, uv1.x, uv1.y, uv2.x, uv2.y ); }
47edbae5187c7c0e619479808c731c01ab609fb4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> /* GPU aa[M][N] b[N][S] result[M][S] */ __global__ void matmulKernel(const int *a, const int *b, int *result, const int M, const in N, const int S){ int tid = (blockIdx.y * blockDim.y + threadIdx.y) * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x;// indexresultindex if (tid < M * S) { int row = tid / S;//result int column = tid % S; //result result[tid] = 0; for(int i = 0;i<N;i++) result[tid] += a[row*N + i] * b[i*S + column];//arow*N+1bi*S } } /* __share__GPU */ template<int BLOCK_SIZE> __global__ void matmulSharedKernel(const int *a, const int *b, int *result, const int M, const in N, const int S){ int block_x = blockIdx.x; int block_y = blockIdx.y; int thread_x = threadIdx.x; int thread_y = threadIdx.y; if ((block_y*blockDim.y+thread_y)*gridDim.x*blockDim.x + block_x*blockDim.x+thread_x < M*S){ const int begin_a = block_y * blockDim.y * N; // threada const int end_a = begin_a + N-1; // threada const int step_a = blockDim.x; const int begin_b = block_x * blockDim.x; // threadb const int step_b = blockDim.y * S; // threadb int tempResult = 0; for(int i = begin_a; int j = begin_b; i<end_a; i += step_a, j += step_b){ __shared__ int SubMat_A[BLOCK_SIZE][BLOCK_SIZE] __shared__ int SubMat_B[BLOCK_SIZE][BLOCK_SIZE] SubMat_A[thread_y][thread_x] = a[i + thread_y*N + thread_x]; SubMat_B[thread_y][thread_x] = b[j + thread_y*S + thread_x]; __syncthreads(); for(int k = 0; k<BLOCK_SIZE; k++) tempResult += SubMat_A[thread_y][k] * SubMat_B[k][thread_x]; __syncthreads(); } int begin_result = block_y * blockDim.y * S + begin_b; result[begin_result + thread_y*S + thread_x] = tempResult; } } /* __shared__ int SubMat_A__shared__SubMat_ASubMat_A __syncthreads()__shared__ for */ // /* gpuMatMultWithTextureKernelGPUtexture * resultresult[M][S]; * MAresult * NAB * SBresult */ __global__ void gpuMatMultWithTextureKernel(int * result, const int M, const int N, const int S) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; if (offset < M * S) { int a = 0, b = 0; int temp_result = 0; for (int i = 0; i < N; i++) { a = tex1Dfetch(texA, y * N + i); b = tex1Dfetch(texB, i * S + x); temp_result += a * b; } result[offset] = temp_result; } } /* tex1Dfetch texture //GPU texture<int> texA; //2 texture<float, 2> texB; GPUcudaBindTextureCUDA hipBindTexture(NULL, texA, dev_a, desc, M * N * sizeof(int)); hipBindTexture(NULL, texB, dev_b, desc, N * S * sizeof(int)); CUDAcudaChannelFormatDesccudaCreateChannelDesc<int>() cudaUnbindTexture() */
47edbae5187c7c0e619479808c731c01ab609fb4.cu
#include<stdio.h> /* 在GPU上实现矩阵乘法 矩阵a为a[M][N] 矩阵b为[N][S] 矩阵result为[M][S] */ __global__ void matmulKernel(const int *a, const int *b, int *result, const int M, const in N, const int S){ int tid = (blockIdx.y * blockDim.y + threadIdx.y) * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x;// 计算出当前线程的index(对应着result的index) if (tid < M * S) { int row = tid / S;//本次计算result对应的行值 int column = tid % S; //本次计算result对应的列值 result[tid] = 0; for(int i = 0;i<N;i++) result[tid] += a[row*N + i] * b[i*S + column];//遍历a的第row*N+1行,和b第i*S列对应位相乘 } } /* 使用共享内存__share__实现GPU矩阵乘法可以大大加快运行速度 */ template<int BLOCK_SIZE> __global__ void matmulSharedKernel(const int *a, const int *b, int *result, const int M, const in N, const int S){ int block_x = blockIdx.x; int block_y = blockIdx.y; int thread_x = threadIdx.x; int thread_y = threadIdx.y; if ((block_y*blockDim.y+thread_y)*gridDim.x*blockDim.x + block_x*blockDim.x+thread_x < M*S){ const int begin_a = block_y * blockDim.y * N; // 当前thread计算中,矩阵a的起始位置 const int end_a = begin_a + N-1; // 当前thread计算中,矩阵a的终止位置 const int step_a = blockDim.x; const int begin_b = block_x * blockDim.x; // 当前thread计算中,矩阵b的起始位置 const int step_b = blockDim.y * S; // 当前thread计算中,矩阵b的终止位置 int tempResult = 0; for(int i = begin_a; int j = begin_b; i<end_a; i += step_a, j += step_b){ __shared__ int SubMat_A[BLOCK_SIZE][BLOCK_SIZE] __shared__ int SubMat_B[BLOCK_SIZE][BLOCK_SIZE] SubMat_A[thread_y][thread_x] = a[i + thread_y*N + thread_x]; SubMat_B[thread_y][thread_x] = b[j + thread_y*S + thread_x]; __syncthreads(); for(int k = 0; k<BLOCK_SIZE; k++) tempResult += SubMat_A[thread_y][k] * SubMat_B[k][thread_x]; __syncthreads(); } int begin_result = block_y * blockDim.y * S + begin_b; result[begin_result + thread_y*S + thread_x] = tempResult; } } /* 矩阵乘法的并行运算,每次计算矩阵的一块数据。利用共享内存的共享功能,每次将一块数据保存到共享内存中使得一个线程块同时调用数据进行计算当前块相对应得矩阵乘法结果值。 代码 __shared__ int SubMat_A中的__shared__声明变量SubMat_A为共享内存中保存的变量。然后将数组中的数据提取到变量SubMat_A中保存在共享内存。 __syncthreads()对线程块中的线程进行同步,确保对__shared__进行下面的操作时上面的操作已经完成。 两个for循环完成了当前线程块对应矩阵子块的乘法结果计算。 */ // 使用纹理内存 /* gpuMatMultWithTextureKernel:GPU下使用texture内存的矩阵乘法 * result:结果矩阵,表示为result[M][S]; * M:表示为矩阵A与矩阵result的行数 * N:表示矩阵A的列数,矩阵B的行数 * S:表示矩阵B和矩阵result的列数 */ __global__ void gpuMatMultWithTextureKernel(int * result, const int M, const int N, const int S) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; if (offset < M * S) { int a = 0, b = 0; int temp_result = 0; for (int i = 0; i < N; i++) { a = tex1Dfetch(texA, y * N + i); b = tex1Dfetch(texB, i * S + x); temp_result += a * b; } result[offset] = temp_result; } } /* 纹理内存的运用与普通内存运用时的算法大致相同,只不过数据是在核函数中调用tex1Dfetch从纹理中提取。 在使用纹理内存时,主要注意的是纹理内存的使用。 首先,需要将输入的数据声明为texture类型的引用。 注意,输入的数据是什么类型,相应的纹理也应该与之一致。并且纹理引用必须声明为文件作用域内的全局变量。 //这些变量将位于GPU上 texture<int> texA; //二维纹理引用,增加了代表维数的参数2 texture<float, 2> texB; 在为这两个缓冲区分配了GPU内存后,需要通过cudaBindTexture将这些变量绑定到内存缓冲区。这相当于告诉CUDA运行时两件事: 我们希望将指定的缓冲区作为纹理来使用。 我们希望将纹理引用作为纹理的“名字”。 cudaBindTexture(NULL, texA, dev_a, desc, M * N * sizeof(int)); cudaBindTexture(NULL, texB, dev_b, desc, N * S * sizeof(int)); 在绑定纹理时,CUDA运行时要求提供一个cudaChannelFormatDesc。此时,需要调用cudaCreateChannelDesc<int>()。 最后,通过cudaUnbindTexture()函数来取消纹理的绑定。 */
e3bcbe2b4637a6a6427a939e8a02a59b9381188b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/hip/Atomic.cuh> #include <ATen/hip/HIPContext.h> #include <ATen/TensorUtils.h> #include <ATen/NativeFunctions.h> #include <ATen/native/hip/SortingCommon.cuh> #include <ATen/AccumulateType.h> #include <THH/THHDeviceUtils.cuh> #include <THH/THHTensorMathReduce.cuh> #include <THH/THHThrustAllocator.cuh> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <thrust/unique.h> #include <c10/macros/Macros.h> namespace at { namespace native { namespace { /* This code computes the sum of the weights in two-steps: 1) Each GPU warp sums `NROWS_PER_THREAD` number of row given by `indeces` 2) Each partial-sum from 1) are summed and scatter into `grad_weight` Notice, `NROWS_PER_THREAD` impacts the Achieved Occupancy of the kernel execution. If it is high, the size of the thread blocks will be too small to achieve good occupancy. Similarly, a very low value will make the size of the thread blocks in the final sum in step 2) too small. */ constexpr int NROWS_PER_THREAD = 10; // Fast ceil division (no overflow checking) __host__ __device__ __forceinline__ int64_t ceil_div(int64_t x, int64_t y) { return (x + y - 1) / y; } template <typename index_t> __global__ void krn_partials_per_segment(index_t *ret, const index_t *segment_offsets, int64_t num_of_segments, int64_t numel) { const int id = blockIdx.x * blockDim.x + threadIdx.x; if(id < num_of_segments) { const int64_t idx_start = segment_offsets[id]; const int64_t idx_end = (id == num_of_segments-1)?numel:segment_offsets[id+1]; const int64_t size = idx_end - idx_start; ret[id] = ceil_div(size, NROWS_PER_THREAD); } } template <typename index_t> __global__ void krn_partial_segment_offset( index_t *ret, const index_t *partials_per_segment, const index_t *partials_per_segment_offset, const index_t *segment_offsets, int64_t num_of_segments) { const int id = blockIdx.x * blockDim.x + threadIdx.x; if(id < num_of_segments) { index_t idx = partials_per_segment_offset[id]; const index_t num_partials = partials_per_segment[id]; const index_t segment_offset = segment_offsets[id]; for (int64_t i=0; i<num_partials; ++i) { ret[idx++] = segment_offset + i * NROWS_PER_THREAD; } } } template <typename scalar_t, typename index_t> __global__ void compute_grad_weight_bags( index_t *indices, scalar_t *gradOutput, index_t *offset2bag, index_t *count, ptrdiff_t numel, int64_t stride, int mode_mean, const index_t *bag_size, scalar_t* per_sample_weights, int64_t per_sample_weights_stride, index_t* segment_offsets, int64_t num_of_segments, acc_type<scalar_t, true> *grad_weight_per_segment, const int64_t stride_warped) { const int gid = blockIdx.x * blockDim.x + threadIdx.x; const int id = gid / stride_warped; const int startFeature = gid % stride_warped; if (startFeature >= stride) { return; } if (id >= num_of_segments) { return; } const int idx_begin = segment_offsets[id]; const int idx_end = (id == num_of_segments-1)?numel:segment_offsets[id+1]; acc_type<scalar_t, true> weight = 0; for (int idx=idx_begin; idx < idx_end; ++idx) { const int origRow = indices[idx]; const int seq_number = offset2bag[origRow]; const int gradOutputRow = seq_number * stride; acc_type<scalar_t, true> scale = count ? 1.0 / count[idx] : 1.0; if (per_sample_weights) { scale *= per_sample_weights[origRow * per_sample_weights_stride]; } acc_type<scalar_t, true> gradient = gradOutput[gradOutputRow + startFeature]; if (mode_mean) { gradient /= bag_size[seq_number]; } weight += gradient * scale; } grad_weight_per_segment[id * stride + startFeature] = weight; } template <typename scalar_t, typename index_t> __global__ void compute_grad_weight( index_t *indices, scalar_t *gradOutput, index_t *count, ptrdiff_t numel, int64_t stride, index_t* segment_offsets, int64_t num_of_segments, acc_type<scalar_t, true> *grad_weight_per_segment, const int64_t stride_warped) { using accscalar_t = acc_type<scalar_t, true>; const int gid = blockIdx.x * blockDim.x + threadIdx.x; const int id = gid / stride_warped; const int startFeature = gid % stride_warped; if (startFeature >= stride) { return; } if (id >= num_of_segments) { return; } const int idx_begin = segment_offsets[id]; const int idx_end = (id == num_of_segments-1)?numel:segment_offsets[id+1]; accscalar_t weight = 0; for (int idx=idx_begin; idx < idx_end; ++idx) { const index_t target_row = indices[idx]; const accscalar_t scale = count ? (accscalar_t)1.0 / count[idx] : 1.0; weight += gradOutput[target_row * stride + startFeature] * scale; } grad_weight_per_segment[id * stride + startFeature] = weight; } // This kernel assumes that all input tensors are contiguous. template <typename scalar_t, typename index_t> __global__ void sum_and_scatter( index_t *input, scalar_t *gradWeight, int64_t stride, index_t* segment_offsets, int64_t num_of_segments, const acc_type<scalar_t, true> *grad_weight_per_segment, const index_t *segment_sizes_offsets, int64_t num_of_partial_segments, const int64_t padding_idx, const int64_t stride_warped) { const int gid = blockIdx.x * blockDim.x + threadIdx.x; const int id = gid / stride_warped; const int startFeature = gid % stride_warped; if (startFeature >= stride) { return; } if (id >= num_of_segments) { return; } const int idx_begin = segment_sizes_offsets[id]; const int idx_end = (id == num_of_segments-1)?num_of_partial_segments:segment_sizes_offsets[id+1]; acc_type<scalar_t, true> weight = 0; for (int idx=idx_begin; idx < idx_end; ++idx) { weight += grad_weight_per_segment[idx*stride + startFeature]; } int64_t target_row = input[segment_offsets[id]]; if (target_row != padding_idx) { gradWeight[target_row * stride + startFeature] = weight; } } } // anon namespace Tensor embedding_backward_cuda_kernel( const Tensor &grad, const Tensor &orig_indices, const Tensor &sorted_indices, const Tensor &count, int64_t num_weights, int padding_idx, bool mode_mean, const Tensor &offset2bag, const Tensor &bag_size, const Tensor &per_sample_weights) { auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::hip::par(allocator).on(stream); const ptrdiff_t numel = sorted_indices.numel(); auto grad_weight = at::zeros({num_weights, grad.size(-1)}, grad.options()); const int64_t stride = grad_weight.stride(0); // Compute the number of segments and their start position so that we do not have to // spawn a warp per index. In this context, a segment is a number of rows that should // be summarized. // Unit: index in `sorted_indices` and `orig_indices` AT_DISPATCH_INDEX_TYPES(orig_indices.scalar_type(), "embedding_backward_cuda_kernel", [&] () { auto segment_offsets = at::empty({numel}, orig_indices.options()); int64_t num_of_segments; { auto sorted_indices_dev = thrust::device_ptr<index_t>(sorted_indices.data_ptr<index_t>()); auto dummy = at::empty_like(sorted_indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto dummy_dev = thrust::device_ptr<index_t>(dummy.data_ptr<index_t>()); auto ends = thrust::unique_by_key_copy( policy, sorted_indices_dev, sorted_indices_dev + numel, thrust::make_counting_iterator(0), dummy_dev, thrust::device_ptr<index_t>(segment_offsets.data_ptr<index_t>())); num_of_segments = thrust::get<0>(ends) - dummy_dev; } // We split the segments up into sizes of `NROWS_PER_THREAD` // Compute the number partial-segments per segment (some partial-segments // may not be the full `NROWS_PER_THREAD` number of rows) auto partials_per_segment = at::empty({num_of_segments}, orig_indices.options()); { hipLaunchKernelGGL(( krn_partials_per_segment), dim3(ceil_div(num_of_segments, 32)), dim3(32), 0, stream, partials_per_segment.data_ptr<index_t>(), segment_offsets.data_ptr<index_t>(), num_of_segments, numel); C10_HIP_KERNEL_LAUNCH_CHECK(); } // In order to compute `partial_segment_offset`, which is the start index // of each partial-segment in `sorted_indices`, we need to compute the // start position of each _segment_ in `partial_segment_offset`. // Unit: index in `partial_segment_offset` auto partials_per_segment_offset = at::empty({num_of_segments}, orig_indices.options()); thrust::exclusive_scan( policy, thrust::device_ptr<index_t>(partials_per_segment.data_ptr<index_t>()), thrust::device_ptr<index_t>(partials_per_segment.data_ptr<index_t>()+num_of_segments), thrust::device_ptr<index_t>(partials_per_segment_offset.data_ptr<index_t>())); // The total number of partial-segments is the sum of `partials_per_segment_offset` const int num_of_partial_segments = partials_per_segment[num_of_segments-1].item<index_t>() + partials_per_segment_offset[num_of_segments-1].item<index_t>(); // Now we can compute the start position of each partial-segment // Unit: index in `sorted_indices` and `orig_indices` auto partial_segment_offset = at::empty({num_of_partial_segments}, orig_indices.options()); { hipLaunchKernelGGL(( krn_partial_segment_offset), dim3(ceil_div(num_of_segments, 32)), dim3(32), 0, stream, partial_segment_offset.data_ptr<index_t>(), partials_per_segment.data_ptr<index_t>(), partials_per_segment_offset.data_ptr<index_t>(), segment_offsets.data_ptr<index_t>(), num_of_segments); C10_HIP_KERNEL_LAUNCH_CHECK(); } const int stride_warped = ceil_div(stride, C10_WARP_SIZE)*C10_WARP_SIZE; const int block = ::min(stride_warped, MAX_BLOCK_SIZE); const int grid = ceil_div(num_of_partial_segments*stride_warped, block); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, grad.scalar_type(), "embedding_bag_backward_cuda_compute_grad_weight", [&] { // For numerical stability, the dtype of `grad_weight_per_segment` // should match `acc_type` using partial_weight_t = acc_type<scalar_t, true>; TensorOptions op; if(grad.dtype() == at::kHalf || grad.dtype() == at::kBFloat16) { op = grad.options().dtype(at::kFloat); } else { op = grad.options(); } auto grad_weight_per_segment = at::empty({num_of_partial_segments, stride}, op); // Compute the sum of each partial-segment and handle bags if (offset2bag.defined()) { hipLaunchKernelGGL(( compute_grad_weight_bags<scalar_t>), dim3(grid), dim3(block), 0, stream, orig_indices.data_ptr<index_t>(), grad.data_ptr<scalar_t>(), offset2bag.data_ptr<index_t>(), count.defined() ? count.data_ptr<index_t>() : nullptr, numel, stride, mode_mean, bag_size.data_ptr<index_t>(), per_sample_weights.defined() ? per_sample_weights.data_ptr<scalar_t>() : NULL, per_sample_weights.defined() ? per_sample_weights.stride(0) : 0, partial_segment_offset.data_ptr<index_t>(), num_of_partial_segments, grad_weight_per_segment.data_ptr<partial_weight_t>(), stride_warped); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { hipLaunchKernelGGL(( compute_grad_weight<scalar_t>), dim3(grid), dim3(block), 0, stream, orig_indices.data_ptr<index_t>(), grad.data_ptr<scalar_t>(), count.defined() ? count.data_ptr<index_t>() : nullptr, numel, stride, partial_segment_offset.data_ptr<index_t>(), num_of_partial_segments, grad_weight_per_segment.data_ptr<partial_weight_t>(), stride_warped); C10_HIP_KERNEL_LAUNCH_CHECK(); } // Finally, we sum all the partial-sums and scatter them // into `grad_weight`. const int grid2 = ceil_div(num_of_segments*stride_warped, block); hipLaunchKernelGGL(( sum_and_scatter<scalar_t>), dim3(grid2), dim3(block), 0, stream, sorted_indices.data_ptr<index_t>(), grad_weight.data_ptr<scalar_t>(), stride, segment_offsets.data_ptr<index_t>(), num_of_segments, grad_weight_per_segment.data_ptr<partial_weight_t>(), partials_per_segment_offset.data_ptr<index_t>(), num_of_partial_segments, padding_idx, stride_warped); C10_HIP_KERNEL_LAUNCH_CHECK(); }); }); return grad_weight; } }}
e3bcbe2b4637a6a6427a939e8a02a59b9381188b.cu
#include <ATen/ATen.h> #include <ATen/cuda/Atomic.cuh> #include <ATen/cuda/CUDAContext.h> #include <ATen/TensorUtils.h> #include <ATen/NativeFunctions.h> #include <ATen/native/cuda/SortingCommon.cuh> #include <ATen/AccumulateType.h> #include <THC/THCDeviceUtils.cuh> #include <THC/THCTensorMathReduce.cuh> #include <THC/THCThrustAllocator.cuh> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <thrust/unique.h> #include <c10/macros/Macros.h> namespace at { namespace native { namespace { /* This code computes the sum of the weights in two-steps: 1) Each GPU warp sums `NROWS_PER_THREAD` number of row given by `indeces` 2) Each partial-sum from 1) are summed and scatter into `grad_weight` Notice, `NROWS_PER_THREAD` impacts the Achieved Occupancy of the kernel execution. If it is high, the size of the thread blocks will be too small to achieve good occupancy. Similarly, a very low value will make the size of the thread blocks in the final sum in step 2) too small. */ constexpr int NROWS_PER_THREAD = 10; // Fast ceil division (no overflow checking) __host__ __device__ __forceinline__ int64_t ceil_div(int64_t x, int64_t y) { return (x + y - 1) / y; } template <typename index_t> __global__ void krn_partials_per_segment(index_t *ret, const index_t *segment_offsets, int64_t num_of_segments, int64_t numel) { const int id = blockIdx.x * blockDim.x + threadIdx.x; if(id < num_of_segments) { const int64_t idx_start = segment_offsets[id]; const int64_t idx_end = (id == num_of_segments-1)?numel:segment_offsets[id+1]; const int64_t size = idx_end - idx_start; ret[id] = ceil_div(size, NROWS_PER_THREAD); } } template <typename index_t> __global__ void krn_partial_segment_offset( index_t *ret, const index_t *partials_per_segment, const index_t *partials_per_segment_offset, const index_t *segment_offsets, int64_t num_of_segments) { const int id = blockIdx.x * blockDim.x + threadIdx.x; if(id < num_of_segments) { index_t idx = partials_per_segment_offset[id]; const index_t num_partials = partials_per_segment[id]; const index_t segment_offset = segment_offsets[id]; for (int64_t i=0; i<num_partials; ++i) { ret[idx++] = segment_offset + i * NROWS_PER_THREAD; } } } template <typename scalar_t, typename index_t> __global__ void compute_grad_weight_bags( index_t *indices, scalar_t *gradOutput, index_t *offset2bag, index_t *count, ptrdiff_t numel, int64_t stride, int mode_mean, const index_t *bag_size, scalar_t* per_sample_weights, int64_t per_sample_weights_stride, index_t* segment_offsets, int64_t num_of_segments, acc_type<scalar_t, true> *grad_weight_per_segment, const int64_t stride_warped) { const int gid = blockIdx.x * blockDim.x + threadIdx.x; const int id = gid / stride_warped; const int startFeature = gid % stride_warped; if (startFeature >= stride) { return; } if (id >= num_of_segments) { return; } const int idx_begin = segment_offsets[id]; const int idx_end = (id == num_of_segments-1)?numel:segment_offsets[id+1]; acc_type<scalar_t, true> weight = 0; for (int idx=idx_begin; idx < idx_end; ++idx) { const int origRow = indices[idx]; const int seq_number = offset2bag[origRow]; const int gradOutputRow = seq_number * stride; acc_type<scalar_t, true> scale = count ? 1.0 / count[idx] : 1.0; if (per_sample_weights) { scale *= per_sample_weights[origRow * per_sample_weights_stride]; } acc_type<scalar_t, true> gradient = gradOutput[gradOutputRow + startFeature]; if (mode_mean) { gradient /= bag_size[seq_number]; } weight += gradient * scale; } grad_weight_per_segment[id * stride + startFeature] = weight; } template <typename scalar_t, typename index_t> __global__ void compute_grad_weight( index_t *indices, scalar_t *gradOutput, index_t *count, ptrdiff_t numel, int64_t stride, index_t* segment_offsets, int64_t num_of_segments, acc_type<scalar_t, true> *grad_weight_per_segment, const int64_t stride_warped) { using accscalar_t = acc_type<scalar_t, true>; const int gid = blockIdx.x * blockDim.x + threadIdx.x; const int id = gid / stride_warped; const int startFeature = gid % stride_warped; if (startFeature >= stride) { return; } if (id >= num_of_segments) { return; } const int idx_begin = segment_offsets[id]; const int idx_end = (id == num_of_segments-1)?numel:segment_offsets[id+1]; accscalar_t weight = 0; for (int idx=idx_begin; idx < idx_end; ++idx) { const index_t target_row = indices[idx]; const accscalar_t scale = count ? (accscalar_t)1.0 / count[idx] : 1.0; weight += gradOutput[target_row * stride + startFeature] * scale; } grad_weight_per_segment[id * stride + startFeature] = weight; } // This kernel assumes that all input tensors are contiguous. template <typename scalar_t, typename index_t> __global__ void sum_and_scatter( index_t *input, scalar_t *gradWeight, int64_t stride, index_t* segment_offsets, int64_t num_of_segments, const acc_type<scalar_t, true> *grad_weight_per_segment, const index_t *segment_sizes_offsets, int64_t num_of_partial_segments, const int64_t padding_idx, const int64_t stride_warped) { const int gid = blockIdx.x * blockDim.x + threadIdx.x; const int id = gid / stride_warped; const int startFeature = gid % stride_warped; if (startFeature >= stride) { return; } if (id >= num_of_segments) { return; } const int idx_begin = segment_sizes_offsets[id]; const int idx_end = (id == num_of_segments-1)?num_of_partial_segments:segment_sizes_offsets[id+1]; acc_type<scalar_t, true> weight = 0; for (int idx=idx_begin; idx < idx_end; ++idx) { weight += grad_weight_per_segment[idx*stride + startFeature]; } int64_t target_row = input[segment_offsets[id]]; if (target_row != padding_idx) { gradWeight[target_row * stride + startFeature] = weight; } } } // anon namespace Tensor embedding_backward_cuda_kernel( const Tensor &grad, const Tensor &orig_indices, const Tensor &sorted_indices, const Tensor &count, int64_t num_weights, int padding_idx, bool mode_mean, const Tensor &offset2bag, const Tensor &bag_size, const Tensor &per_sample_weights) { auto stream = at::cuda::getCurrentCUDAStream(); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::cuda::par(allocator).on(stream); const ptrdiff_t numel = sorted_indices.numel(); auto grad_weight = at::zeros({num_weights, grad.size(-1)}, grad.options()); const int64_t stride = grad_weight.stride(0); // Compute the number of segments and their start position so that we do not have to // spawn a warp per index. In this context, a segment is a number of rows that should // be summarized. // Unit: index in `sorted_indices` and `orig_indices` AT_DISPATCH_INDEX_TYPES(orig_indices.scalar_type(), "embedding_backward_cuda_kernel", [&] () { auto segment_offsets = at::empty({numel}, orig_indices.options()); int64_t num_of_segments; { auto sorted_indices_dev = thrust::device_ptr<index_t>(sorted_indices.data_ptr<index_t>()); auto dummy = at::empty_like(sorted_indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto dummy_dev = thrust::device_ptr<index_t>(dummy.data_ptr<index_t>()); auto ends = thrust::unique_by_key_copy( policy, sorted_indices_dev, sorted_indices_dev + numel, thrust::make_counting_iterator(0), dummy_dev, thrust::device_ptr<index_t>(segment_offsets.data_ptr<index_t>())); num_of_segments = thrust::get<0>(ends) - dummy_dev; } // We split the segments up into sizes of `NROWS_PER_THREAD` // Compute the number partial-segments per segment (some partial-segments // may not be the full `NROWS_PER_THREAD` number of rows) auto partials_per_segment = at::empty({num_of_segments}, orig_indices.options()); { krn_partials_per_segment<<<ceil_div(num_of_segments, 32), 32, 0, stream>>> ( partials_per_segment.data_ptr<index_t>(), segment_offsets.data_ptr<index_t>(), num_of_segments, numel); C10_CUDA_KERNEL_LAUNCH_CHECK(); } // In order to compute `partial_segment_offset`, which is the start index // of each partial-segment in `sorted_indices`, we need to compute the // start position of each _segment_ in `partial_segment_offset`. // Unit: index in `partial_segment_offset` auto partials_per_segment_offset = at::empty({num_of_segments}, orig_indices.options()); thrust::exclusive_scan( policy, thrust::device_ptr<index_t>(partials_per_segment.data_ptr<index_t>()), thrust::device_ptr<index_t>(partials_per_segment.data_ptr<index_t>()+num_of_segments), thrust::device_ptr<index_t>(partials_per_segment_offset.data_ptr<index_t>())); // The total number of partial-segments is the sum of `partials_per_segment_offset` const int num_of_partial_segments = partials_per_segment[num_of_segments-1].item<index_t>() + partials_per_segment_offset[num_of_segments-1].item<index_t>(); // Now we can compute the start position of each partial-segment // Unit: index in `sorted_indices` and `orig_indices` auto partial_segment_offset = at::empty({num_of_partial_segments}, orig_indices.options()); { krn_partial_segment_offset<<<ceil_div(num_of_segments, 32), 32, 0, stream>>> ( partial_segment_offset.data_ptr<index_t>(), partials_per_segment.data_ptr<index_t>(), partials_per_segment_offset.data_ptr<index_t>(), segment_offsets.data_ptr<index_t>(), num_of_segments); C10_CUDA_KERNEL_LAUNCH_CHECK(); } const int stride_warped = ceil_div(stride, C10_WARP_SIZE)*C10_WARP_SIZE; const int block = std::min(stride_warped, MAX_BLOCK_SIZE); const int grid = ceil_div(num_of_partial_segments*stride_warped, block); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, grad.scalar_type(), "embedding_bag_backward_cuda_compute_grad_weight", [&] { // For numerical stability, the dtype of `grad_weight_per_segment` // should match `acc_type` using partial_weight_t = acc_type<scalar_t, true>; TensorOptions op; if(grad.dtype() == at::kHalf || grad.dtype() == at::kBFloat16) { op = grad.options().dtype(at::kFloat); } else { op = grad.options(); } auto grad_weight_per_segment = at::empty({num_of_partial_segments, stride}, op); // Compute the sum of each partial-segment and handle bags if (offset2bag.defined()) { compute_grad_weight_bags<scalar_t><<<grid, block, 0, stream>>>( orig_indices.data_ptr<index_t>(), grad.data_ptr<scalar_t>(), offset2bag.data_ptr<index_t>(), count.defined() ? count.data_ptr<index_t>() : nullptr, numel, stride, mode_mean, bag_size.data_ptr<index_t>(), per_sample_weights.defined() ? per_sample_weights.data_ptr<scalar_t>() : NULL, per_sample_weights.defined() ? per_sample_weights.stride(0) : 0, partial_segment_offset.data_ptr<index_t>(), num_of_partial_segments, grad_weight_per_segment.data_ptr<partial_weight_t>(), stride_warped); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { compute_grad_weight<scalar_t><<<grid, block, 0, stream>>>( orig_indices.data_ptr<index_t>(), grad.data_ptr<scalar_t>(), count.defined() ? count.data_ptr<index_t>() : nullptr, numel, stride, partial_segment_offset.data_ptr<index_t>(), num_of_partial_segments, grad_weight_per_segment.data_ptr<partial_weight_t>(), stride_warped); C10_CUDA_KERNEL_LAUNCH_CHECK(); } // Finally, we sum all the partial-sums and scatter them // into `grad_weight`. const int grid2 = ceil_div(num_of_segments*stride_warped, block); sum_and_scatter<scalar_t><<<grid2, block, 0, stream>>>( sorted_indices.data_ptr<index_t>(), grad_weight.data_ptr<scalar_t>(), stride, segment_offsets.data_ptr<index_t>(), num_of_segments, grad_weight_per_segment.data_ptr<partial_weight_t>(), partials_per_segment_offset.data_ptr<index_t>(), num_of_partial_segments, padding_idx, stride_warped); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); }); return grad_weight; } }}
b32225c6423ba2350562b306e118a3c4293ff48a.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "elementwise_1D_1D_mul_mutate.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *in1 = NULL; hipMalloc(&in1, XSIZE*YSIZE); float *in2 = NULL; hipMalloc(&in2, XSIZE*YSIZE); float *out = NULL; hipMalloc(&out, XSIZE*YSIZE); int size = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( elementwise_1D_1D_mul_mutate), dim3(gridBlock),dim3(threadBlock), 0, 0, in1,in2,out,size); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( elementwise_1D_1D_mul_mutate), dim3(gridBlock),dim3(threadBlock), 0, 0, in1,in2,out,size); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( elementwise_1D_1D_mul_mutate), dim3(gridBlock),dim3(threadBlock), 0, 0, in1,in2,out,size); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
b32225c6423ba2350562b306e118a3c4293ff48a.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "elementwise_1D_1D_mul_mutate.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *in1 = NULL; cudaMalloc(&in1, XSIZE*YSIZE); float *in2 = NULL; cudaMalloc(&in2, XSIZE*YSIZE); float *out = NULL; cudaMalloc(&out, XSIZE*YSIZE); int size = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); elementwise_1D_1D_mul_mutate<<<gridBlock,threadBlock>>>(in1,in2,out,size); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { elementwise_1D_1D_mul_mutate<<<gridBlock,threadBlock>>>(in1,in2,out,size); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { elementwise_1D_1D_mul_mutate<<<gridBlock,threadBlock>>>(in1,in2,out,size); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
6e0a8a5894c31c057d520e1b73ef5bc515037a87.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <stdio.h> #include <cassert> #include <hipcub/hipcub.hpp> // NOLINT #include <vector> #include "glog/logging.h" #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/framework/tensor_util.h" #include "paddle/fluid/inference/tensorrt/plugin/emb_eltwise_layernorm_plugin.h" #include "paddle/fluid/inference/tensorrt/plugin/trt_plugin_factory.h" #include "paddle/fluid/operators/math/bert_encoder_functor.h" namespace paddle { namespace inference { namespace tensorrt { namespace plugin { // Dynamic shape plugin requires TRT version greater than 6.0. #if IS_TRT_VERSION_GE(6000) template <typename T> int EmbEltwiseLayernormPluginDynamic<T>::initialize() { int nb_emb = embs_.size(); std::vector<void *> ptr_vector(nb_emb); std::vector<std::vector<half>> emb_fp16(nb_emb); if (sizeof(T) == sizeof(float)) { // FP32 for (int i = 0; i < nb_emb; ++i) { ptr_vector[i] = embs_[i]; } } else { // FP16 for (int i = 0; i < nb_emb; ++i) { auto emb_size = emb_sizes_[i]; auto &tmp = emb_fp16[i]; tmp.resize(emb_size); for (int j = 0; j < emb_size; ++j) { tmp[j] = static_cast<half>(embs_[i][j]); } ptr_vector[i] = tmp.data(); } } embs_gpu_.resize(embs_.size()); for (int i = 0; i < embs_.size(); i++) { hipMalloc(&embs_gpu_[i], sizeof(T) * emb_sizes_[i]); hipMemcpy(embs_gpu_[i], ptr_vector[i], emb_sizes_[i] * sizeof(T), hipMemcpyHostToDevice); } hipMalloc(&bias_gpu_, sizeof(float) * bias_size_); hipMemcpy(bias_gpu_, bias_, bias_size_ * sizeof(float), hipMemcpyHostToDevice); hipMalloc(&scale_gpu_, sizeof(float) * scale_size_); hipMemcpy(scale_gpu_, scale_, scale_size_ * sizeof(float), hipMemcpyHostToDevice); return 0; } template <typename T> size_t EmbEltwiseLayernormPluginDynamic<T>::getSerializationSize() const { return 0; } template <typename T> void EmbEltwiseLayernormPluginDynamic<T>::serialize(void *buffer) const {} template <typename T> nvinfer1::DimsExprs EmbEltwiseLayernormPluginDynamic<T>::getOutputDimensions( int output_index, const nvinfer1::DimsExprs *inputs, int nb_inputs, nvinfer1::IExprBuilder &expr_builder) { // NOLINT PADDLE_ENFORCE_EQ(output_index, 0, platform::errors::InvalidArgument( "There is only one output of the EmbEltwiseLayernorm, " "so the index should be zero," "but it's (%d)", output_index)); PADDLE_ENFORCE_EQ( nb_inputs, 3, platform::errors::InvalidArgument( "The Input of the EmbEltwiseLayernorm should be 3, but we found " "it has (%d) inputs", nb_inputs)); nvinfer1::DimsExprs ret; ret.nbDims = 5; ret.d[0] = inputs[0].d[0]; ret.d[1] = inputs[0].d[1]; ret.d[2] = expr_builder.constant(hidden_size_); ret.d[3] = expr_builder.constant(1); ret.d[4] = expr_builder.constant(1); return ret; } template <typename T> bool EmbEltwiseLayernormPluginDynamic<T>::supportsFormatCombination( int pos, const nvinfer1::PluginTensorDesc *in_out, int nb_inputs, int nb_outputs) { PADDLE_ENFORCE_NOT_NULL( in_out, platform::errors::InvalidArgument( "The input of swish plugin shoule not be nullptr.")); PADDLE_ENFORCE_LT( pos, nb_inputs + nb_outputs, platform::errors::InvalidArgument("The pos(%d) should be less than the " "num(%d) of the input and the output.", pos, nb_inputs + nb_outputs)); (in_out && pos < (nb_inputs + nb_outputs)); const nvinfer1::PluginTensorDesc &desc = in_out[pos]; if (desc.format != nvinfer1::TensorFormat::kLINEAR) { return false; } if (pos == 0) { return desc.type == nvinfer1::DataType::kINT32; } const nvinfer1::PluginTensorDesc &prev = in_out[pos - 1]; if (pos == 1 || pos == 2) { return desc.type == nvinfer1::DataType::kINT32 && desc.dims.d[0] == prev.dims.d[0] && desc.dims.d[1] == prev.dims.d[1]; } if (pos == 3) { if (sizeof(T) == sizeof(float)) { return desc.type == nvinfer1::DataType::kFLOAT; } else { return desc.type == nvinfer1::DataType::kHALF; } } } template <typename T> nvinfer1::DataType EmbEltwiseLayernormPluginDynamic<T>::getOutputDataType( int index, const nvinfer1::DataType *input_types, int nb_inputs) const { PADDLE_ENFORCE_EQ( index, 0, platform::errors::InvalidArgument( "The EmbEltwiseLayernorm Plugin only has one input, so the " "index value should be 0, but get %d.", index)); return nvinfer1::DataType::kFLOAT; } template <typename T> int EmbEltwiseLayernormPluginDynamic<T>::enqueue( const nvinfer1::PluginTensorDesc *input_desc, const nvinfer1::PluginTensorDesc *output_desc, const void *const *inputs, void *const *outputs, void *workspace, hipStream_t stream) { auto id_dims = input_desc[0].dims; int batch = id_dims.d[0]; int seq_len = id_dims.d[1]; int input_num = embs_.size(); framework::Tensor in_ptr_tensor, emb_ptr_tensor; int device_id; hipGetDevice(&device_id); in_ptr_tensor.Resize({input_num}); emb_ptr_tensor.Resize({input_num}); int64_t *in_ptr_gpu_d = in_ptr_tensor.mutable_data<int64_t>(platform::CUDAPlace(device_id)); int64_t *emb_ptr_gpu_d = emb_ptr_tensor.mutable_data<int64_t>(platform::CUDAPlace(device_id)); std::vector<int64_t> in_ptr, emb_ptr; for (int i = 0; i < input_num; i++) { in_ptr.push_back(reinterpret_cast<uintptr_t>(inputs[i])); emb_ptr.push_back(reinterpret_cast<uintptr_t>(embs_gpu_[i])); } hipMemcpyAsync(in_ptr_gpu_d, in_ptr.data(), sizeof(int64_t) * input_num, hipMemcpyHostToDevice, stream); hipMemcpyAsync(emb_ptr_gpu_d, emb_ptr.data(), sizeof(int64_t) * input_num, hipMemcpyHostToDevice, stream); auto out_type = output_desc[0].type; const unsigned tpb = 256; const dim3 grid(seq_len, batch, 1); const dim3 block(tpb, 1, 1); if (sizeof(T) == sizeof(float)) { PADDLE_ENFORCE_EQ( out_type == nvinfer1::DataType::kFLOAT, true, platform::errors::InvalidArgument( "The EmbEltwiseLayernorm Plugin only support fp32 input.")); } else if (sizeof(T) == sizeof(int16_t)) { PADDLE_ENFORCE_EQ( out_type == nvinfer1::DataType::kHALF, true, platform::errors::InvalidArgument( "The EmbEltwiseLayernorm Plugin only support fp16 input.")); } else { PADDLE_THROW(platform::errors::Fatal( "Unsupport data type, the out type of EmbEltwiseLayernorm should be " "float or half.")); } T *output_d = static_cast<T *>(outputs[0]); operators::math::EmbEltwiseLayerNormFunctor<T> emb_eltwise_layernorm_func; emb_eltwise_layernorm_func(batch, seq_len, hidden_size_, in_ptr_gpu_d, scale_gpu_, bias_gpu_, emb_ptr_gpu_d, output_d, eps_, input_num, stream); return hipGetLastError() != hipSuccess; } template class EmbEltwiseLayernormPluginDynamic<float>; #ifdef SUPPORTS_CUDA_FP16 template class EmbEltwiseLayernormPluginDynamic<half>; #endif // SUPPORTS_CUDA_FP16 #endif } // namespace plugin } // namespace tensorrt } // namespace inference } // namespace paddle
6e0a8a5894c31c057d520e1b73ef5bc515037a87.cu
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <stdio.h> #include <cassert> #include <cub/cub.cuh> // NOLINT #include <vector> #include "glog/logging.h" #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/framework/tensor_util.h" #include "paddle/fluid/inference/tensorrt/plugin/emb_eltwise_layernorm_plugin.h" #include "paddle/fluid/inference/tensorrt/plugin/trt_plugin_factory.h" #include "paddle/fluid/operators/math/bert_encoder_functor.h" namespace paddle { namespace inference { namespace tensorrt { namespace plugin { // Dynamic shape plugin requires TRT version greater than 6.0. #if IS_TRT_VERSION_GE(6000) template <typename T> int EmbEltwiseLayernormPluginDynamic<T>::initialize() { int nb_emb = embs_.size(); std::vector<void *> ptr_vector(nb_emb); std::vector<std::vector<half>> emb_fp16(nb_emb); if (sizeof(T) == sizeof(float)) { // FP32 for (int i = 0; i < nb_emb; ++i) { ptr_vector[i] = embs_[i]; } } else { // FP16 for (int i = 0; i < nb_emb; ++i) { auto emb_size = emb_sizes_[i]; auto &tmp = emb_fp16[i]; tmp.resize(emb_size); for (int j = 0; j < emb_size; ++j) { tmp[j] = static_cast<half>(embs_[i][j]); } ptr_vector[i] = tmp.data(); } } embs_gpu_.resize(embs_.size()); for (int i = 0; i < embs_.size(); i++) { cudaMalloc(&embs_gpu_[i], sizeof(T) * emb_sizes_[i]); cudaMemcpy(embs_gpu_[i], ptr_vector[i], emb_sizes_[i] * sizeof(T), cudaMemcpyHostToDevice); } cudaMalloc(&bias_gpu_, sizeof(float) * bias_size_); cudaMemcpy(bias_gpu_, bias_, bias_size_ * sizeof(float), cudaMemcpyHostToDevice); cudaMalloc(&scale_gpu_, sizeof(float) * scale_size_); cudaMemcpy(scale_gpu_, scale_, scale_size_ * sizeof(float), cudaMemcpyHostToDevice); return 0; } template <typename T> size_t EmbEltwiseLayernormPluginDynamic<T>::getSerializationSize() const { return 0; } template <typename T> void EmbEltwiseLayernormPluginDynamic<T>::serialize(void *buffer) const {} template <typename T> nvinfer1::DimsExprs EmbEltwiseLayernormPluginDynamic<T>::getOutputDimensions( int output_index, const nvinfer1::DimsExprs *inputs, int nb_inputs, nvinfer1::IExprBuilder &expr_builder) { // NOLINT PADDLE_ENFORCE_EQ(output_index, 0, platform::errors::InvalidArgument( "There is only one output of the EmbEltwiseLayernorm, " "so the index should be zero," "but it's (%d)", output_index)); PADDLE_ENFORCE_EQ( nb_inputs, 3, platform::errors::InvalidArgument( "The Input of the EmbEltwiseLayernorm should be 3, but we found " "it has (%d) inputs", nb_inputs)); nvinfer1::DimsExprs ret; ret.nbDims = 5; ret.d[0] = inputs[0].d[0]; ret.d[1] = inputs[0].d[1]; ret.d[2] = expr_builder.constant(hidden_size_); ret.d[3] = expr_builder.constant(1); ret.d[4] = expr_builder.constant(1); return ret; } template <typename T> bool EmbEltwiseLayernormPluginDynamic<T>::supportsFormatCombination( int pos, const nvinfer1::PluginTensorDesc *in_out, int nb_inputs, int nb_outputs) { PADDLE_ENFORCE_NOT_NULL( in_out, platform::errors::InvalidArgument( "The input of swish plugin shoule not be nullptr.")); PADDLE_ENFORCE_LT( pos, nb_inputs + nb_outputs, platform::errors::InvalidArgument("The pos(%d) should be less than the " "num(%d) of the input and the output.", pos, nb_inputs + nb_outputs)); (in_out && pos < (nb_inputs + nb_outputs)); const nvinfer1::PluginTensorDesc &desc = in_out[pos]; if (desc.format != nvinfer1::TensorFormat::kLINEAR) { return false; } if (pos == 0) { return desc.type == nvinfer1::DataType::kINT32; } const nvinfer1::PluginTensorDesc &prev = in_out[pos - 1]; if (pos == 1 || pos == 2) { return desc.type == nvinfer1::DataType::kINT32 && desc.dims.d[0] == prev.dims.d[0] && desc.dims.d[1] == prev.dims.d[1]; } if (pos == 3) { if (sizeof(T) == sizeof(float)) { return desc.type == nvinfer1::DataType::kFLOAT; } else { return desc.type == nvinfer1::DataType::kHALF; } } } template <typename T> nvinfer1::DataType EmbEltwiseLayernormPluginDynamic<T>::getOutputDataType( int index, const nvinfer1::DataType *input_types, int nb_inputs) const { PADDLE_ENFORCE_EQ( index, 0, platform::errors::InvalidArgument( "The EmbEltwiseLayernorm Plugin only has one input, so the " "index value should be 0, but get %d.", index)); return nvinfer1::DataType::kFLOAT; } template <typename T> int EmbEltwiseLayernormPluginDynamic<T>::enqueue( const nvinfer1::PluginTensorDesc *input_desc, const nvinfer1::PluginTensorDesc *output_desc, const void *const *inputs, void *const *outputs, void *workspace, cudaStream_t stream) { auto id_dims = input_desc[0].dims; int batch = id_dims.d[0]; int seq_len = id_dims.d[1]; int input_num = embs_.size(); framework::Tensor in_ptr_tensor, emb_ptr_tensor; int device_id; cudaGetDevice(&device_id); in_ptr_tensor.Resize({input_num}); emb_ptr_tensor.Resize({input_num}); int64_t *in_ptr_gpu_d = in_ptr_tensor.mutable_data<int64_t>(platform::CUDAPlace(device_id)); int64_t *emb_ptr_gpu_d = emb_ptr_tensor.mutable_data<int64_t>(platform::CUDAPlace(device_id)); std::vector<int64_t> in_ptr, emb_ptr; for (int i = 0; i < input_num; i++) { in_ptr.push_back(reinterpret_cast<uintptr_t>(inputs[i])); emb_ptr.push_back(reinterpret_cast<uintptr_t>(embs_gpu_[i])); } cudaMemcpyAsync(in_ptr_gpu_d, in_ptr.data(), sizeof(int64_t) * input_num, cudaMemcpyHostToDevice, stream); cudaMemcpyAsync(emb_ptr_gpu_d, emb_ptr.data(), sizeof(int64_t) * input_num, cudaMemcpyHostToDevice, stream); auto out_type = output_desc[0].type; const unsigned tpb = 256; const dim3 grid(seq_len, batch, 1); const dim3 block(tpb, 1, 1); if (sizeof(T) == sizeof(float)) { PADDLE_ENFORCE_EQ( out_type == nvinfer1::DataType::kFLOAT, true, platform::errors::InvalidArgument( "The EmbEltwiseLayernorm Plugin only support fp32 input.")); } else if (sizeof(T) == sizeof(int16_t)) { PADDLE_ENFORCE_EQ( out_type == nvinfer1::DataType::kHALF, true, platform::errors::InvalidArgument( "The EmbEltwiseLayernorm Plugin only support fp16 input.")); } else { PADDLE_THROW(platform::errors::Fatal( "Unsupport data type, the out type of EmbEltwiseLayernorm should be " "float or half.")); } T *output_d = static_cast<T *>(outputs[0]); operators::math::EmbEltwiseLayerNormFunctor<T> emb_eltwise_layernorm_func; emb_eltwise_layernorm_func(batch, seq_len, hidden_size_, in_ptr_gpu_d, scale_gpu_, bias_gpu_, emb_ptr_gpu_d, output_d, eps_, input_num, stream); return cudaGetLastError() != cudaSuccess; } template class EmbEltwiseLayernormPluginDynamic<float>; #ifdef SUPPORTS_CUDA_FP16 template class EmbEltwiseLayernormPluginDynamic<half>; #endif // SUPPORTS_CUDA_FP16 #endif } // namespace plugin } // namespace tensorrt } // namespace inference } // namespace paddle
af16661a5da24c4f7830bcd94d24fe1e14c8e1ca.hip
// !!! This is a file automatically generated by hipify!!! /* ----------------------------------------------------------------- * Programmer(s): Slaven Peles, and Cody J. Balos @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2021, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the testing routine to check the NVECTOR CUDA module * implementation. * -----------------------------------------------------------------*/ #include <stdio.h> #include <stdlib.h> #include <sundials/sundials_math.h> #include <sundials/sundials_types.h> #include <nvector/nvector_serial.h> #include <nvector/nvector_cuda.h> #include "custom_memory_helper.h" #include "test_nvector.h" /* Private custom allocator functions */ static void* sunalloc(size_t); static void sunfree(void* ptr); /* CUDA vector variants */ enum mem_type { UNMANAGED, MANAGED, SUNMEMORY, MANAGED_ALLOC }; enum pol_type { DEFAULT_POL, DEFAULT_POL_W_STREAM, GRID_STRIDE }; /* ---------------------------------------------------------------------- * Main NVector Testing Routine * --------------------------------------------------------------------*/ int main(int argc, char *argv[]) { int fails = 0; /* counter for test failures */ int retval; /* function return value */ sunindextype length; /* vector length */ N_Vector U, V, X, Y, Z; /* test vectors */ int print_timing; /* turn timing on/off */ int threadsPerBlock; /* cuda block size */ hipStream_t stream; /* cuda stream */ int memtype, policy; /* check input and set vector length */ if (argc < 4){ printf("ERROR: THREE (3) Inputs required: vector length, CUDA threads per block (0 for default), print timing \n"); return(-1); } length = (sunindextype) atol(argv[1]); if (length <= 0) { printf("ERROR: length of vector must be a positive integer\n"); return(-1); } threadsPerBlock = (int) atoi(argv[2]); if (threadsPerBlock < 0 || threadsPerBlock % 32) { printf("ERROR: CUDA threads per block must be 0 to use the default or a multiple of 32\n"); return(-1); } print_timing = atoi(argv[3]); SetTiming(print_timing, 0); /* test with all policy variants */ for (policy=DEFAULT_POL; policy<=GRID_STRIDE; ++policy) { int actualThreadsPerBlock = threadsPerBlock ? threadsPerBlock : 256; SUNCudaExecPolicy* stream_exec_policy = NULL; SUNCudaExecPolicy* reduce_exec_policy = NULL; hipStreamCreate(&stream); if (policy == DEFAULT_POL_W_STREAM) { stream_exec_policy = new SUNCudaThreadDirectExecPolicy(actualThreadsPerBlock, stream); reduce_exec_policy = new SUNCudaBlockReduceExecPolicy(actualThreadsPerBlock, 0, stream); } else if (policy == GRID_STRIDE) { stream_exec_policy = new SUNCudaGridStrideExecPolicy(actualThreadsPerBlock, 1); reduce_exec_policy = new SUNCudaBlockReduceExecPolicy(actualThreadsPerBlock, 1); } /* test with all memory variants */ for (memtype=UNMANAGED; memtype<=MANAGED_ALLOC; ++memtype) { SUNMemoryHelper mem_helper = NULL; printf("=====> Beginning setup\n\n"); if (memtype==UNMANAGED) { printf("Testing CUDA N_Vector, policy %d\n", policy); } else if (memtype==MANAGED) { printf("Testing CUDA N_Vector with managed memory, policy %d\n", policy); } else if (memtype==MANAGED_ALLOC) { printf("Testing CUDA N_Vector with user allocator, policy %d\n", policy); } else if (memtype==SUNMEMORY) { printf("Testing CUDA N_Vector with SUNMemoryHelper, policy %d\n", policy); mem_helper = MyMemoryHelper(); } printf("Vector length: %ld \n", (long int) length); /* Create new vectors */ if (memtype == UNMANAGED) X = N_VNew_Cuda(length); else if (memtype == MANAGED) X = N_VNewManaged_Cuda(length); else if (memtype == MANAGED_ALLOC) X = N_VMakeWithManagedAllocator_Cuda(length, sunalloc, sunfree); else if (memtype == SUNMEMORY) X = N_VNewWithMemHelp_Cuda(length, SUNFALSE, mem_helper); if (X == NULL) { delete stream_exec_policy; delete reduce_exec_policy; if (mem_helper) SUNMemoryHelper_Destroy(mem_helper); printf("FAIL: Unable to create a new vector \n\n"); return(1); } if (stream_exec_policy != NULL && reduce_exec_policy != NULL) { if (N_VSetKernelExecPolicy_Cuda(X, stream_exec_policy, reduce_exec_policy)) { N_VDestroy(X); delete stream_exec_policy; delete reduce_exec_policy; if (mem_helper) SUNMemoryHelper_Destroy(mem_helper); printf("FAIL: Unable to set kernel execution policy \n\n"); return(1); } printf("Using non-default kernel execution policy\n"); printf("Threads per block: %d\n\n", actualThreadsPerBlock); } /* Fill vector with uniform random data in [-1,1] */ realtype* xdata = N_VGetHostArrayPointer_Cuda(X); for (sunindextype j=0; j<length; j++) xdata[j] = ((realtype) rand() / (realtype) RAND_MAX)*2-1; N_VCopyToDevice_Cuda(X); /* Clone additional vectors for testing */ Y = N_VClone(X); if (Y == NULL) { N_VDestroy(X); printf("FAIL: Unable to create a new vector \n\n"); delete stream_exec_policy; delete reduce_exec_policy; if (mem_helper) SUNMemoryHelper_Destroy(mem_helper); return(1); } Z = N_VClone(X); if (Z == NULL) { N_VDestroy(X); N_VDestroy(Y); delete stream_exec_policy; delete reduce_exec_policy; if (mem_helper) SUNMemoryHelper_Destroy(mem_helper); printf("FAIL: Unable to create a new vector \n\n"); return(1); } /* Fill vectors with uniform random data in [-1,1] */ realtype* ydata = N_VGetHostArrayPointer_Cuda(Y); realtype* zdata = N_VGetHostArrayPointer_Cuda(Z); for (sunindextype j=0; j<length; j++) { ydata[j] = ((realtype) rand() / (realtype) RAND_MAX)*2-1; zdata[j] = ((realtype) rand() / (realtype) RAND_MAX)*2-1; } N_VCopyToDevice_Cuda(Y); N_VCopyToDevice_Cuda(Z); printf("=====> Setup complete\n"); printf("=====> Beginning tests\n\n"); /* Standard vector operation tests */ printf("\nTesting standard vector operations:\n\n"); /* Check vector ID */ fails += Test_N_VGetVectorID(X, SUNDIALS_NVEC_CUDA, 0); /* Check vector length */ fails += Test_N_VGetLength(X, 0); /* Check vector communicator */ fails += Test_N_VGetCommunicator(X, NULL, 0); /* Test clone functions */ fails += Test_N_VCloneEmpty(X, 0); fails += Test_N_VClone(X, length, 0); fails += Test_N_VCloneEmptyVectorArray(5, X, 0); fails += Test_N_VCloneVectorArray(5, X, length, 0); /* Test vector math kernels */ fails += Test_N_VConst(X, length, 0); fails += Test_N_VLinearSum(X, Y, Z, length, 0); fails += Test_N_VProd(X, Y, Z, length, 0); fails += Test_N_VDiv(X, Y, Z, length, 0); fails += Test_N_VScale(X, Z, length, 0); fails += Test_N_VAbs(X, Z, length, 0); fails += Test_N_VInv(X, Z, length, 0); fails += Test_N_VAddConst(X, Z, length, 0); fails += Test_N_VDotProd(X, Y, length, 0); fails += Test_N_VMaxNorm(X, length, 0); fails += Test_N_VWrmsNorm(X, Y, length, 0); fails += Test_N_VWrmsNormMask(X, Y, Z, length, 0); fails += Test_N_VMin(X, length, 0); fails += Test_N_VWL2Norm(X, Y, length, 0); fails += Test_N_VL1Norm(X, length, 0); if (length >= 3) fails += Test_N_VCompare(X, Z, length, 0); fails += Test_N_VInvTest(X, Z, length, 0); if (length >= 7) fails += Test_N_VConstrMask(X, Y, Z, length, 0); fails += Test_N_VMinQuotient(X, Y, length, 0); /* Fused and vector array operations tests (disabled) */ printf("\nTesting fused and vector array operations (disabled):\n\n"); /* create vector and disable all fused and vector array operations */ U = N_VClone(X); if (U == NULL) { N_VDestroy(X); N_VDestroy(Y); delete stream_exec_policy; delete reduce_exec_policy; if (mem_helper) SUNMemoryHelper_Destroy(mem_helper); printf("FAIL: Unable to create a new vector \n\n"); return(1); } retval = N_VEnableFusedOps_Cuda(U, SUNFALSE); if (retval != 0) { N_VDestroy(X); N_VDestroy(Y); N_VDestroy(Z); N_VDestroy(U); delete stream_exec_policy; delete reduce_exec_policy; if (mem_helper) SUNMemoryHelper_Destroy(mem_helper); printf("FAIL: Unable to create a new vector \n\n"); return(1); } /* fused operations */ fails += Test_N_VLinearCombination(U, length, 0); fails += Test_N_VScaleAddMulti(U, length, 0); fails += Test_N_VDotProdMulti(U, length, 0); /* vector array operations */ fails += Test_N_VLinearSumVectorArray(U, length, 0); fails += Test_N_VScaleVectorArray(U, length, 0); fails += Test_N_VConstVectorArray(U, length, 0); fails += Test_N_VWrmsNormVectorArray(U, length, 0); fails += Test_N_VWrmsNormMaskVectorArray(U, length, 0); fails += Test_N_VScaleAddMultiVectorArray(U, length, 0); fails += Test_N_VLinearCombinationVectorArray(U, length, 0); /* Fused and vector array operations tests (enabled) */ printf("\nTesting fused and vector array operations (enabled):\n\n"); /* create vector and enable all fused and vector array operations */ V = N_VClone(X); retval = N_VEnableFusedOps_Cuda(V, SUNTRUE); if (V == NULL) { N_VDestroy(X); N_VDestroy(Y); N_VDestroy(Z); N_VDestroy(U); delete stream_exec_policy; delete reduce_exec_policy; if (mem_helper) SUNMemoryHelper_Destroy(mem_helper); printf("FAIL: Unable to create a new vector \n\n"); return(1); } if (retval != 0) { N_VDestroy(X); N_VDestroy(Y); N_VDestroy(Z); N_VDestroy(U); N_VDestroy(V); delete stream_exec_policy; delete reduce_exec_policy; if (mem_helper) SUNMemoryHelper_Destroy(mem_helper); printf("FAIL: Unable to create a new vector \n\n"); return(1); } /* fused operations */ fails += Test_N_VLinearCombination(V, length, 0); fails += Test_N_VScaleAddMulti(V, length, 0); fails += Test_N_VDotProdMulti(V, length, 0); /* vector array operations */ fails += Test_N_VLinearSumVectorArray(V, length, 0); fails += Test_N_VScaleVectorArray(V, length, 0); fails += Test_N_VConstVectorArray(V, length, 0); fails += Test_N_VWrmsNormVectorArray(V, length, 0); fails += Test_N_VWrmsNormMaskVectorArray(V, length, 0); fails += Test_N_VScaleAddMultiVectorArray(V, length, 0); fails += Test_N_VLinearCombinationVectorArray(V, length, 0); /* local reduction operations */ printf("\nTesting local reduction operations:\n\n"); fails += Test_N_VDotProdLocal(X, Y, length, 0); fails += Test_N_VMaxNormLocal(X, length, 0); fails += Test_N_VMinLocal(X, length, 0); fails += Test_N_VL1NormLocal(X, length, 0); fails += Test_N_VWSqrSumLocal(X, Y, length, 0); fails += Test_N_VWSqrSumMaskLocal(X, Y, Z, length, 0); fails += Test_N_VInvTestLocal(X, Z, length, 0); if (length >= 7) fails += Test_N_VConstrMaskLocal(X, Y, Z, length, 0); fails += Test_N_VMinQuotientLocal(X, Y, length, 0); /* XBraid interface operations */ printf("\nTesting XBraid interface operations:\n\n"); fails += Test_N_VBufSize(X, length, 0); fails += Test_N_VBufPack(X, length, 0); fails += Test_N_VBufUnpack(X, length, 0); printf("\n=====> Beginning teardown\n"); /* Free vectors */ N_VDestroy(X); N_VDestroy(Y); N_VDestroy(Z); N_VDestroy(U); N_VDestroy(V); if (mem_helper) SUNMemoryHelper_Destroy(mem_helper); /* Synchronize */ hipDeviceSynchronize(); printf("=====> Teardown complete\n\n"); } /* Print result */ if (fails) { printf("\n\nFAIL: NVector module failed %i tests \n\n", fails); } else { printf("\n\nSUCCESS: NVector module passed all tests \n\n"); } hipStreamDestroy(stream); delete stream_exec_policy; delete reduce_exec_policy; } hipDeviceSynchronize(); hipDeviceReset(); return(fails); } /* ---------------------------------------------------------------------- * Implementation specific utility functions for vector tests * --------------------------------------------------------------------*/ int check_ans(realtype ans, N_Vector X, sunindextype length) { int failure = 0; sunindextype i; realtype *Xdata; N_VCopyFromDevice_Cuda(X); Xdata = N_VGetHostArrayPointer_Cuda(X); /* check vector data */ for (i = 0; i < length; i++) { if (failure += FNEQ(Xdata[i], ans)) { printf("check_ans fail: Xdata[%ld] = %f, expected Xdata[%ld] = %f\n", (long int)i, Xdata[i], (long int)i, ans); } } return (failure > ZERO) ? (1) : (0); } booleantype has_data(N_Vector X) { /* check if vector data is non-null */ if ((N_VGetHostArrayPointer_Cuda(X) == NULL) && (N_VGetDeviceArrayPointer_Cuda(X) == NULL)) return SUNFALSE; return SUNTRUE; } void set_element(N_Vector X, sunindextype i, realtype val) { /* set i-th element of data array */ set_element_range(X, i, i, val); } void set_element_range(N_Vector X, sunindextype is, sunindextype ie, realtype val) { sunindextype i; realtype* xd; /* set elements [is,ie] of the data array */ N_VCopyFromDevice_Cuda(X); xd = N_VGetHostArrayPointer_Cuda(X); for(i = is; i <= ie; i++) xd[i] = val; N_VCopyToDevice_Cuda(X); } realtype get_element(N_Vector X, sunindextype i) { /* get i-th element of data array */ N_VCopyFromDevice_Cuda(X); return (N_VGetHostArrayPointer_Cuda(X))[i]; } double max_time(N_Vector X, double time) { /* not running in parallel, just return input time */ return(time); } void sync_device(N_Vector x) { /* sync with GPU */ hipDeviceSynchronize(); return; } void* sunalloc(size_t mem_size) { void* ptr; hipError_t err; err = hipMallocManaged(&ptr, mem_size); if (err != hipSuccess) { printf("Error in sunalloc\n"); ptr = NULL; } return ptr; } void sunfree(void* ptr) { hipFree(ptr); }
af16661a5da24c4f7830bcd94d24fe1e14c8e1ca.cu
/* ----------------------------------------------------------------- * Programmer(s): Slaven Peles, and Cody J. Balos @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2021, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the testing routine to check the NVECTOR CUDA module * implementation. * -----------------------------------------------------------------*/ #include <stdio.h> #include <stdlib.h> #include <sundials/sundials_math.h> #include <sundials/sundials_types.h> #include <nvector/nvector_serial.h> #include <nvector/nvector_cuda.h> #include "custom_memory_helper.h" #include "test_nvector.h" /* Private custom allocator functions */ static void* sunalloc(size_t); static void sunfree(void* ptr); /* CUDA vector variants */ enum mem_type { UNMANAGED, MANAGED, SUNMEMORY, MANAGED_ALLOC }; enum pol_type { DEFAULT_POL, DEFAULT_POL_W_STREAM, GRID_STRIDE }; /* ---------------------------------------------------------------------- * Main NVector Testing Routine * --------------------------------------------------------------------*/ int main(int argc, char *argv[]) { int fails = 0; /* counter for test failures */ int retval; /* function return value */ sunindextype length; /* vector length */ N_Vector U, V, X, Y, Z; /* test vectors */ int print_timing; /* turn timing on/off */ int threadsPerBlock; /* cuda block size */ cudaStream_t stream; /* cuda stream */ int memtype, policy; /* check input and set vector length */ if (argc < 4){ printf("ERROR: THREE (3) Inputs required: vector length, CUDA threads per block (0 for default), print timing \n"); return(-1); } length = (sunindextype) atol(argv[1]); if (length <= 0) { printf("ERROR: length of vector must be a positive integer\n"); return(-1); } threadsPerBlock = (int) atoi(argv[2]); if (threadsPerBlock < 0 || threadsPerBlock % 32) { printf("ERROR: CUDA threads per block must be 0 to use the default or a multiple of 32\n"); return(-1); } print_timing = atoi(argv[3]); SetTiming(print_timing, 0); /* test with all policy variants */ for (policy=DEFAULT_POL; policy<=GRID_STRIDE; ++policy) { int actualThreadsPerBlock = threadsPerBlock ? threadsPerBlock : 256; SUNCudaExecPolicy* stream_exec_policy = NULL; SUNCudaExecPolicy* reduce_exec_policy = NULL; cudaStreamCreate(&stream); if (policy == DEFAULT_POL_W_STREAM) { stream_exec_policy = new SUNCudaThreadDirectExecPolicy(actualThreadsPerBlock, stream); reduce_exec_policy = new SUNCudaBlockReduceExecPolicy(actualThreadsPerBlock, 0, stream); } else if (policy == GRID_STRIDE) { stream_exec_policy = new SUNCudaGridStrideExecPolicy(actualThreadsPerBlock, 1); reduce_exec_policy = new SUNCudaBlockReduceExecPolicy(actualThreadsPerBlock, 1); } /* test with all memory variants */ for (memtype=UNMANAGED; memtype<=MANAGED_ALLOC; ++memtype) { SUNMemoryHelper mem_helper = NULL; printf("=====> Beginning setup\n\n"); if (memtype==UNMANAGED) { printf("Testing CUDA N_Vector, policy %d\n", policy); } else if (memtype==MANAGED) { printf("Testing CUDA N_Vector with managed memory, policy %d\n", policy); } else if (memtype==MANAGED_ALLOC) { printf("Testing CUDA N_Vector with user allocator, policy %d\n", policy); } else if (memtype==SUNMEMORY) { printf("Testing CUDA N_Vector with SUNMemoryHelper, policy %d\n", policy); mem_helper = MyMemoryHelper(); } printf("Vector length: %ld \n", (long int) length); /* Create new vectors */ if (memtype == UNMANAGED) X = N_VNew_Cuda(length); else if (memtype == MANAGED) X = N_VNewManaged_Cuda(length); else if (memtype == MANAGED_ALLOC) X = N_VMakeWithManagedAllocator_Cuda(length, sunalloc, sunfree); else if (memtype == SUNMEMORY) X = N_VNewWithMemHelp_Cuda(length, SUNFALSE, mem_helper); if (X == NULL) { delete stream_exec_policy; delete reduce_exec_policy; if (mem_helper) SUNMemoryHelper_Destroy(mem_helper); printf("FAIL: Unable to create a new vector \n\n"); return(1); } if (stream_exec_policy != NULL && reduce_exec_policy != NULL) { if (N_VSetKernelExecPolicy_Cuda(X, stream_exec_policy, reduce_exec_policy)) { N_VDestroy(X); delete stream_exec_policy; delete reduce_exec_policy; if (mem_helper) SUNMemoryHelper_Destroy(mem_helper); printf("FAIL: Unable to set kernel execution policy \n\n"); return(1); } printf("Using non-default kernel execution policy\n"); printf("Threads per block: %d\n\n", actualThreadsPerBlock); } /* Fill vector with uniform random data in [-1,1] */ realtype* xdata = N_VGetHostArrayPointer_Cuda(X); for (sunindextype j=0; j<length; j++) xdata[j] = ((realtype) rand() / (realtype) RAND_MAX)*2-1; N_VCopyToDevice_Cuda(X); /* Clone additional vectors for testing */ Y = N_VClone(X); if (Y == NULL) { N_VDestroy(X); printf("FAIL: Unable to create a new vector \n\n"); delete stream_exec_policy; delete reduce_exec_policy; if (mem_helper) SUNMemoryHelper_Destroy(mem_helper); return(1); } Z = N_VClone(X); if (Z == NULL) { N_VDestroy(X); N_VDestroy(Y); delete stream_exec_policy; delete reduce_exec_policy; if (mem_helper) SUNMemoryHelper_Destroy(mem_helper); printf("FAIL: Unable to create a new vector \n\n"); return(1); } /* Fill vectors with uniform random data in [-1,1] */ realtype* ydata = N_VGetHostArrayPointer_Cuda(Y); realtype* zdata = N_VGetHostArrayPointer_Cuda(Z); for (sunindextype j=0; j<length; j++) { ydata[j] = ((realtype) rand() / (realtype) RAND_MAX)*2-1; zdata[j] = ((realtype) rand() / (realtype) RAND_MAX)*2-1; } N_VCopyToDevice_Cuda(Y); N_VCopyToDevice_Cuda(Z); printf("=====> Setup complete\n"); printf("=====> Beginning tests\n\n"); /* Standard vector operation tests */ printf("\nTesting standard vector operations:\n\n"); /* Check vector ID */ fails += Test_N_VGetVectorID(X, SUNDIALS_NVEC_CUDA, 0); /* Check vector length */ fails += Test_N_VGetLength(X, 0); /* Check vector communicator */ fails += Test_N_VGetCommunicator(X, NULL, 0); /* Test clone functions */ fails += Test_N_VCloneEmpty(X, 0); fails += Test_N_VClone(X, length, 0); fails += Test_N_VCloneEmptyVectorArray(5, X, 0); fails += Test_N_VCloneVectorArray(5, X, length, 0); /* Test vector math kernels */ fails += Test_N_VConst(X, length, 0); fails += Test_N_VLinearSum(X, Y, Z, length, 0); fails += Test_N_VProd(X, Y, Z, length, 0); fails += Test_N_VDiv(X, Y, Z, length, 0); fails += Test_N_VScale(X, Z, length, 0); fails += Test_N_VAbs(X, Z, length, 0); fails += Test_N_VInv(X, Z, length, 0); fails += Test_N_VAddConst(X, Z, length, 0); fails += Test_N_VDotProd(X, Y, length, 0); fails += Test_N_VMaxNorm(X, length, 0); fails += Test_N_VWrmsNorm(X, Y, length, 0); fails += Test_N_VWrmsNormMask(X, Y, Z, length, 0); fails += Test_N_VMin(X, length, 0); fails += Test_N_VWL2Norm(X, Y, length, 0); fails += Test_N_VL1Norm(X, length, 0); if (length >= 3) fails += Test_N_VCompare(X, Z, length, 0); fails += Test_N_VInvTest(X, Z, length, 0); if (length >= 7) fails += Test_N_VConstrMask(X, Y, Z, length, 0); fails += Test_N_VMinQuotient(X, Y, length, 0); /* Fused and vector array operations tests (disabled) */ printf("\nTesting fused and vector array operations (disabled):\n\n"); /* create vector and disable all fused and vector array operations */ U = N_VClone(X); if (U == NULL) { N_VDestroy(X); N_VDestroy(Y); delete stream_exec_policy; delete reduce_exec_policy; if (mem_helper) SUNMemoryHelper_Destroy(mem_helper); printf("FAIL: Unable to create a new vector \n\n"); return(1); } retval = N_VEnableFusedOps_Cuda(U, SUNFALSE); if (retval != 0) { N_VDestroy(X); N_VDestroy(Y); N_VDestroy(Z); N_VDestroy(U); delete stream_exec_policy; delete reduce_exec_policy; if (mem_helper) SUNMemoryHelper_Destroy(mem_helper); printf("FAIL: Unable to create a new vector \n\n"); return(1); } /* fused operations */ fails += Test_N_VLinearCombination(U, length, 0); fails += Test_N_VScaleAddMulti(U, length, 0); fails += Test_N_VDotProdMulti(U, length, 0); /* vector array operations */ fails += Test_N_VLinearSumVectorArray(U, length, 0); fails += Test_N_VScaleVectorArray(U, length, 0); fails += Test_N_VConstVectorArray(U, length, 0); fails += Test_N_VWrmsNormVectorArray(U, length, 0); fails += Test_N_VWrmsNormMaskVectorArray(U, length, 0); fails += Test_N_VScaleAddMultiVectorArray(U, length, 0); fails += Test_N_VLinearCombinationVectorArray(U, length, 0); /* Fused and vector array operations tests (enabled) */ printf("\nTesting fused and vector array operations (enabled):\n\n"); /* create vector and enable all fused and vector array operations */ V = N_VClone(X); retval = N_VEnableFusedOps_Cuda(V, SUNTRUE); if (V == NULL) { N_VDestroy(X); N_VDestroy(Y); N_VDestroy(Z); N_VDestroy(U); delete stream_exec_policy; delete reduce_exec_policy; if (mem_helper) SUNMemoryHelper_Destroy(mem_helper); printf("FAIL: Unable to create a new vector \n\n"); return(1); } if (retval != 0) { N_VDestroy(X); N_VDestroy(Y); N_VDestroy(Z); N_VDestroy(U); N_VDestroy(V); delete stream_exec_policy; delete reduce_exec_policy; if (mem_helper) SUNMemoryHelper_Destroy(mem_helper); printf("FAIL: Unable to create a new vector \n\n"); return(1); } /* fused operations */ fails += Test_N_VLinearCombination(V, length, 0); fails += Test_N_VScaleAddMulti(V, length, 0); fails += Test_N_VDotProdMulti(V, length, 0); /* vector array operations */ fails += Test_N_VLinearSumVectorArray(V, length, 0); fails += Test_N_VScaleVectorArray(V, length, 0); fails += Test_N_VConstVectorArray(V, length, 0); fails += Test_N_VWrmsNormVectorArray(V, length, 0); fails += Test_N_VWrmsNormMaskVectorArray(V, length, 0); fails += Test_N_VScaleAddMultiVectorArray(V, length, 0); fails += Test_N_VLinearCombinationVectorArray(V, length, 0); /* local reduction operations */ printf("\nTesting local reduction operations:\n\n"); fails += Test_N_VDotProdLocal(X, Y, length, 0); fails += Test_N_VMaxNormLocal(X, length, 0); fails += Test_N_VMinLocal(X, length, 0); fails += Test_N_VL1NormLocal(X, length, 0); fails += Test_N_VWSqrSumLocal(X, Y, length, 0); fails += Test_N_VWSqrSumMaskLocal(X, Y, Z, length, 0); fails += Test_N_VInvTestLocal(X, Z, length, 0); if (length >= 7) fails += Test_N_VConstrMaskLocal(X, Y, Z, length, 0); fails += Test_N_VMinQuotientLocal(X, Y, length, 0); /* XBraid interface operations */ printf("\nTesting XBraid interface operations:\n\n"); fails += Test_N_VBufSize(X, length, 0); fails += Test_N_VBufPack(X, length, 0); fails += Test_N_VBufUnpack(X, length, 0); printf("\n=====> Beginning teardown\n"); /* Free vectors */ N_VDestroy(X); N_VDestroy(Y); N_VDestroy(Z); N_VDestroy(U); N_VDestroy(V); if (mem_helper) SUNMemoryHelper_Destroy(mem_helper); /* Synchronize */ cudaDeviceSynchronize(); printf("=====> Teardown complete\n\n"); } /* Print result */ if (fails) { printf("\n\nFAIL: NVector module failed %i tests \n\n", fails); } else { printf("\n\nSUCCESS: NVector module passed all tests \n\n"); } cudaStreamDestroy(stream); delete stream_exec_policy; delete reduce_exec_policy; } cudaDeviceSynchronize(); cudaDeviceReset(); return(fails); } /* ---------------------------------------------------------------------- * Implementation specific utility functions for vector tests * --------------------------------------------------------------------*/ int check_ans(realtype ans, N_Vector X, sunindextype length) { int failure = 0; sunindextype i; realtype *Xdata; N_VCopyFromDevice_Cuda(X); Xdata = N_VGetHostArrayPointer_Cuda(X); /* check vector data */ for (i = 0; i < length; i++) { if (failure += FNEQ(Xdata[i], ans)) { printf("check_ans fail: Xdata[%ld] = %f, expected Xdata[%ld] = %f\n", (long int)i, Xdata[i], (long int)i, ans); } } return (failure > ZERO) ? (1) : (0); } booleantype has_data(N_Vector X) { /* check if vector data is non-null */ if ((N_VGetHostArrayPointer_Cuda(X) == NULL) && (N_VGetDeviceArrayPointer_Cuda(X) == NULL)) return SUNFALSE; return SUNTRUE; } void set_element(N_Vector X, sunindextype i, realtype val) { /* set i-th element of data array */ set_element_range(X, i, i, val); } void set_element_range(N_Vector X, sunindextype is, sunindextype ie, realtype val) { sunindextype i; realtype* xd; /* set elements [is,ie] of the data array */ N_VCopyFromDevice_Cuda(X); xd = N_VGetHostArrayPointer_Cuda(X); for(i = is; i <= ie; i++) xd[i] = val; N_VCopyToDevice_Cuda(X); } realtype get_element(N_Vector X, sunindextype i) { /* get i-th element of data array */ N_VCopyFromDevice_Cuda(X); return (N_VGetHostArrayPointer_Cuda(X))[i]; } double max_time(N_Vector X, double time) { /* not running in parallel, just return input time */ return(time); } void sync_device(N_Vector x) { /* sync with GPU */ cudaDeviceSynchronize(); return; } void* sunalloc(size_t mem_size) { void* ptr; cudaError_t err; err = cudaMallocManaged(&ptr, mem_size); if (err != cudaSuccess) { printf("Error in sunalloc\n"); ptr = NULL; } return ptr; } void sunfree(void* ptr) { cudaFree(ptr); }
7be6145ca0d6a8eaf1b6045ccae539edf790c6f2.hip
// !!! This is a file automatically generated by hipify!!! //THE COPY SHOULD WORK IN LINUX! #include <hip/hip_runtime.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "hip/device_functions.h" #include "hip/hip_runtime_api.h" #include "hip/driver_types.h" #include <thrust\reduce.h> #include <thrust\execution_policy.h> #include <thrust\device_vector.h> #include <thrust\host_vector.h> #include <thrust\for_each.h> #include <thrust\transform.h> #include <thrust\iterator\zip_iterator.h> #include <thrust\copy.h> #include <stdio.h> #include <cmath> #include <time.h> #include <stdlib.h> #include <fstream> #include <iostream> #include <omp.h> using namespace std; // Now the plate must be square. #Divisions should be some multiple of 32, preferably some 2^x. #define LENS 1. #define TH_COND 16. #define DZ .01 #define DIVISIONS 512. #define TOLERANCE 1.e-5 #define REAL float struct absdiff { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { // diffmat = (redi-redf)+blacki-blackf) thrust::get<4>(t) = fabsf(thrust::get<0>(t) - thrust::get<1>(t)) + fabsf(thrust::get<2>(t) - thrust::get<3>(t)); } }; REAL *cornerSource(REAL BC1, REAL BC2, REAL coff) { REAL *sc; sc = new REAL[2]; if (BC1>0) { if (BC2>0) { sc[0] = 2.0f * coff * (BC1 + BC2); sc[1] = 4.0f * coff; } else { sc[0] = 2.0f * coff * BC1; sc[1] = 2.0f * coff; } } else if (BC2>0) { sc[0] = 2.0f * coff * BC2; sc[1] = 2.0f * coff; } else { sc[0] = 0.0f; sc[1] = 0.0f; } return sc; } void coefficientFills(REAL *a_ext, REAL *a_int, REAL *temps, REAL a_base, const int turn) { const int grdx = int(DIVISIONS)/2; const int ar_len = int(grdx*DIVISIONS); //Well at least we get to call this with omp. omp_set_num_threads( 8 ); #pragma omp parallel for default(none),private(sp,k),shared(grdx,a_int,a_ext,temps,a_base,turn,ar_len) for (int k = 0; k < ar_len; k++) { if (k < grdx) { // If bottom left (SouthWest) corner and red. if (k == 0 && turn == 0) { REAL *sp; sp = new REAL[2]; sp = cornerSource(temps[2],temps[3], a_base); a_ext[k] = sp[0]; a_int[k] = 2.0f * a_base + sp[1]; free(sp); } // If bottom right (SouthEast) corner and black. else if (k == (grdx-1) && turn == 1) { REAL *sp; sp = new REAL[2]; sp = cornerSource(temps[2],temps[1], a_base); a_ext[k] = sp[0]; a_int[k] = 2.0f * a_base + sp[1]; free(sp); } // Bottom row no corner. else { // Check South Boundary Condition. If it's constant temperature: if (temps[2] > 0) { a_ext[k] = 2.0f * a_base * temps[2]; a_int[k] = 5.0f * a_base; } else { a_int[k] = 3.0f * a_base; } } } // If top row else if (k >= (ar_len-grdx)) { // If top right (NorthEast) corner and red. if ((k == (ar_len-1)) && turn == 0) { REAL *sp; sp = new REAL[2]; sp = cornerSource(temps[0],temps[1], a_base); a_ext[k] = sp[0]; a_int[k] = 2.0f * a_base + sp[1]; free(sp); } // If top left (NorthWest) corner and black. else if ((k == (ar_len-grdx)) && turn == 1) { REAL *sp; sp = new REAL[2]; sp = cornerSource(temps[0],temps[3], a_base); a_ext[k] = sp[0]; a_int[k] = 2.0f * a_base + sp[1]; free(sp); } // Top row no corner. The top row is the compliment of the bottom row so the operation for seq is reversed. else { // Check North Boundary Condition. If it's constant temperature: if (temps[0]>0) { a_ext[k] = 2.0f * a_base * temps[0]; a_int[k] = 5.0f * a_base; } else { a_int[k]= 3.0f * a_base; } } } // Check side walls. This is West when the matrix starts the row, that's when seq is -1. else if (((k % grdx)== 0) && (((k/grdx + turn) & 1) == 0)) { if (temps[3]>0) { a_ext[k] = 2.0f * a_base * temps[3]; a_int[k] = 5.0f * a_base; } else { a_int[k]= 3.0f * a_base; } } // This is East when the matrix ends the row. else if (((k % (grdx)) == (grdx-1)) && (((k/grdx + turn) & 1))) { if (temps[1]>0) { a_ext[k] = 2.0f * a_base * temps[1]; a_int[k] = 5.0f * a_base; } else { a_int[k]= 3.0f * a_base; } //cout << "East: Turn: " << turn << " Interior: " << a_int[k] << " Exterior: " << a_ext[k] << " Modulo: " << k % grdx << " Row: " << k/grdx << endl; } // Every cell not on an edge or corner. else { a_int[k] = 4.0f * a_base; } } } __global__ void differencingOperation(REAL *active_half, REAL *passive_half, REAL *a_e, REAL *a_i, REAL *ac, const int turn) { const int grd = int(DIVISIONS)/2; int ind_x = blockIdx.x * blockDim.x + threadIdx.x; int ind_y = blockIdx.y * blockDim.y + threadIdx.y; int id = ind_x + ind_y * grd; // Negative seq means active half starts first. Positive seq means passive half starts first. // If it's one it's odd if it's 0 it's even. int seq = ((turn + ind_y) & 1) ? 1:-1; if (id<(grd*int(DIVISIONS))) { // If bottom row. if (ind_y == 0) { // If bottom left (SouthWest) corner and red. if (ind_x == 0 && turn == 0) { active_half[id] = (ac[0]*(passive_half[id] + passive_half[id+grd])+a_e[id])/a_i[id]; } // If bottom right (SouthEast) corner and black. else if (ind_x == (grd-1) && turn == 1) { active_half[id] = (ac[0]*(passive_half[id] + passive_half[id+grd])+a_e[id])/a_i[id]; } // Bottom row no corner. else { active_half[id] = (ac[0]*(passive_half[id]+passive_half[id+grd]+passive_half[id+seq])+a_e[id])/a_i[id]; } } // If top row else if (ind_y == (int(DIVISIONS)-1)) { // If top right (NorthEast) corner and red. if (ind_x == (grd-1) && turn == 0) { active_half[id] = (ac[0]*(passive_half[id] + passive_half[id-grd])+a_e[id])/a_i[id]; } // If top left (NorthWest) corner and black. else if (ind_x == 0 && turn == 1) { active_half[id] = (ac[0]*(passive_half[id] + passive_half[id-grd])+a_e[id])/a_i[id]; } // Top row no corner. The top row is the compliment of the bottom row so the operation for seq is reversed. else { active_half[id] = (ac[0]*(passive_half[id]+passive_half[id-grd]+passive_half[id+seq])+a_e[id])/a_i[id]; } } // Check side walls. This is West when the matrix starts the row, that's when seq is -1. else if (ind_x == 0 && seq == -1) { active_half[id] = (ac[0]*(passive_half[id]+ passive_half[id+grd] + passive_half[id-grd])+a_e[id])/a_i[id]; } // This is East when the matrix ends the row. else if (ind_x == (grd-1) && seq == 1) { active_half[id] = (ac[0]*(passive_half[id] + passive_half[id+grd] + passive_half[id-grd])+a_e[id])/a_i[id]; } // Every cell not on an edge or corner. else { active_half[id] = (ac[0]/a_i[id]) * (passive_half[id]+passive_half[id+grd]+passive_half[id-grd]+passive_half[id+seq]); } } } int main() { // Get device properties and set threads to be max thread size. // We need the threads to fit the matrix correctly so reject the program if they don't. hipDeviceProp_t prop; hipGetDeviceProperties( &prop, 0 ); int mt = prop.maxThreadsPerBlock; int thread = int(sqrtf(float(mt))); if (int(DIVISIONS)%(2*thread) != 0) { printf("Error: DIVISIONS must be a multiple of %.i. That's twice the thread dimension.\n",(2*thread)); return 0; } const int rw = int(DIVISIONS)/2; const int sz = rw*int(DIVISIONS); cout << "Begin!!! \n\n"; thrust::host_vector<REAL> red(sz); thrust::host_vector<REAL> black(sz); REAL ds = (REAL)LENS/((REAL)(DIVISIONS-1)); REAL A = (REAL)DZ * ds; const int y_gr = (int)DIVISIONS/thread; const int x_gr = y_gr/2; REAL dm2; // Get initial conditions // cout << "Provide Boundary conditions for each edge of the slab.\nEnter Constant Temperature in KELVIN\nor a negative number for an insulated boundary:\nNorth: \n"; //cin >> temp_c[0]; // cout << "East: \n"; // cin >> temp_c[1]; // cout << "South: \n"; // cin >> temp_c[2]; // cout << "West: \n"; // cin >> temp_c[3]; // // Get Guess for slab temperature // cout << "Provide a guess Temperature for the slab in Kelvin:\n"; // cin >> temp_c[4]; REAL *ared_caste, *ablack_caste, *ared_casti, *ablack_casti, *a_b, *ahost_red_e, *ahost_black_e, *ahost_red_i, *ahost_black_i; ahost_red_e = (REAL *) malloc(sz*sizeof(REAL)); ahost_black_e = (REAL *) malloc(sz*sizeof(REAL)); ahost_red_i = (REAL *) malloc(sz*sizeof(REAL)); ahost_black_i = (REAL *) malloc(sz*sizeof(REAL)); REAL ab[2] = {(REAL)TH_COND * A / ds, 0}; REAL temp_c[4]; // For debugging: temp_c[0] = 2.; temp_c[1] = 1.; temp_c[2] = 1.; temp_c[3] = 1.; REAL guess = .5; // I know that this can get confusing, but in the coefficients (variables starting with a), the e stands for external and the i stands for internal // If it helps any, they're always built in the same order, externals first red before black. // Set up host vectors and fill them with coefficients. for (int k = 0; k<sz; k++) { ahost_red_e[k] = 0.f; ahost_black_e[k] = 0.f; ahost_red_i[k] = 0.f; ahost_black_i[k] = 0.f; } coefficientFills(ahost_red_e, ahost_red_i, temp_c, ab[0], 0); coefficientFills(ahost_black_e, ahost_black_i, temp_c, ab[0], 1); // Copy the Initial arrays to the GPU. thrust::device_vector<REAL> d_red_i(sz,guess); thrust::device_vector<REAL> d_red_f(sz,guess); thrust::device_vector<REAL> d_black_i(sz,guess); thrust::device_vector<REAL> d_black_f(sz,guess); // Copy coefficient vectors to device. hipMalloc((void **) &ared_caste, sizeof(REAL)*sz); hipMalloc((void **) &ablack_caste, sizeof(REAL)*sz); hipMalloc((void **) &ared_casti, sizeof(REAL)*sz); hipMalloc((void **) &ablack_casti, sizeof(REAL)*sz); hipMalloc((void **) &a_b, sizeof(REAL)*2); // Fill the difference matrix to be reduced as well. thrust::device_vector<REAL> diff_mat(sz); //Now make all the raw pointers so you can pass them to the kernel. REAL *red_cast = thrust::raw_pointer_cast(&d_red_f[0]); REAL *black_cast = thrust::raw_pointer_cast(&d_black_f[0]); REAL *red_casti = thrust::raw_pointer_cast(&d_red_i[0]); REAL *black_casti = thrust::raw_pointer_cast(&d_black_i[0]); //The coefficients are vanilla CUDA/C++ hipMemcpy(ared_caste, ahost_red_e, sizeof(REAL)*sz, hipMemcpyHostToDevice); hipMemcpy(ablack_caste, ahost_black_e, sizeof(REAL)*sz, hipMemcpyHostToDevice); hipMemcpy(ared_casti, ahost_red_i, sizeof(REAL)*sz, hipMemcpyHostToDevice); hipMemcpy(ablack_casti, ahost_black_i, sizeof(REAL)*sz, hipMemcpyHostToDevice); hipMemcpy(a_b, ab, sizeof(REAL)*2, hipMemcpyHostToDevice); dim3 grids(x_gr,y_gr); dim3 threads(thread,thread); bool stops = true; int iter = 0; double wall0 = clock(); while (stops) { hipLaunchKernelGGL(( differencingOperation) , dim3(grids), dim3(threads) , 0, 0, red_cast, black_cast, ared_caste, ared_casti, a_b, 0); hipDeviceSynchronize(); hipLaunchKernelGGL(( differencingOperation) , dim3(grids), dim3(threads) , 0, 0, black_cast, red_cast, ablack_caste, ablack_casti, a_b, 1); hipDeviceSynchronize(); thrust::for_each(thrust::make_zip_iterator(thrust::make_tuple(d_red_i.begin(), d_red_f.begin(), d_black_i.begin(), d_black_f.begin(), diff_mat.begin())), thrust::make_zip_iterator(thrust::make_tuple(d_red_i.end(), d_red_f.end(), d_black_i.end(), d_black_f.end(), diff_mat.end())), absdiff()); hipDeviceSynchronize(); dm2 = thrust::reduce(diff_mat.begin(),diff_mat.end()); iter++; if (((dm2 /REAL(sz*2)) < TOLERANCE) || (iter>1e7)) { stops = false; } //d_red_i = d_red_f; hipMemcpy(red_casti, red_cast, sz * sizeof(REAL), hipMemcpyDeviceToDevice); //thrust::copy(d_red_f.begin(), d_red_f.end(), d_red_i.begin()); //d_black_i = d_black_f; hipMemcpy(black_casti, black_cast, sz * sizeof(REAL), hipMemcpyDeviceToDevice); //thrust::copy(d_black_f.begin(), d_black_f.end(), d_black_i.begin()); hipDeviceSynchronize(); if (iter%200 == 0) { cout << "Iteration: " << iter << " dm:" << dm2/REAL(sz*2) << endl; cout << "First red: " << d_red_f[0] << " Last Black:" << d_black_f[sz-1] << endl; cout << "Random red: " << d_red_i[8201] << " Random Black:" << d_black_i[105] << endl; } } double wall1 = clock(); double timed = (wall1-wall0)/CLOCKS_PER_SEC; printf("Outside the loop\n"); printf("It converged after %d iterations: \n",iter); cout << "That took: " << timed << " seconds" << endl; thrust::copy(d_red_f.begin(), d_red_f.end(), red.begin()); thrust::copy(d_black_f.begin(), d_black_f.end(), black.begin()); // Write it out! ofstream filewrite; filewrite.open("C:\\Users\\Philadelphia\\Documents\\1_SweptTimeResearch\\GaussSeidel\\GaussSeidelCUDA\\GS_outputCUDA.dat", ios::trunc); filewrite << DIVISIONS << "\n" << ds; for (int k = 0; k < sz; k++) { filewrite << "\n" << red[k] << "\n" << black[k]; } filewrite.close(); hipFree(ared_caste); hipFree(ared_casti); hipFree(ablack_caste); hipFree(ablack_casti); hipFree(a_b); free(ared_caste); free(ared_casti); free(ablack_caste); free(ablack_casti); free(a_b); //hipDeviceReset(); return 0; }
7be6145ca0d6a8eaf1b6045ccae539edf790c6f2.cu
//THE COPY SHOULD WORK IN LINUX! #include <cuda.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "device_functions.h" #include "cuda_runtime_api.h" #include "driver_types.h" #include <thrust\reduce.h> #include <thrust\execution_policy.h> #include <thrust\device_vector.h> #include <thrust\host_vector.h> #include <thrust\for_each.h> #include <thrust\transform.h> #include <thrust\iterator\zip_iterator.h> #include <thrust\copy.h> #include <stdio.h> #include <cmath> #include <time.h> #include <stdlib.h> #include <fstream> #include <iostream> #include <omp.h> using namespace std; // Now the plate must be square. #Divisions should be some multiple of 32, preferably some 2^x. #define LENS 1. #define TH_COND 16. #define DZ .01 #define DIVISIONS 512. #define TOLERANCE 1.e-5 #define REAL float struct absdiff { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { // diffmat = (redi-redf)+blacki-blackf) thrust::get<4>(t) = fabsf(thrust::get<0>(t) - thrust::get<1>(t)) + fabsf(thrust::get<2>(t) - thrust::get<3>(t)); } }; REAL *cornerSource(REAL BC1, REAL BC2, REAL coff) { REAL *sc; sc = new REAL[2]; if (BC1>0) { if (BC2>0) { sc[0] = 2.0f * coff * (BC1 + BC2); sc[1] = 4.0f * coff; } else { sc[0] = 2.0f * coff * BC1; sc[1] = 2.0f * coff; } } else if (BC2>0) { sc[0] = 2.0f * coff * BC2; sc[1] = 2.0f * coff; } else { sc[0] = 0.0f; sc[1] = 0.0f; } return sc; } void coefficientFills(REAL *a_ext, REAL *a_int, REAL *temps, REAL a_base, const int turn) { const int grdx = int(DIVISIONS)/2; const int ar_len = int(grdx*DIVISIONS); //Well at least we get to call this with omp. omp_set_num_threads( 8 ); #pragma omp parallel for default(none),private(sp,k),shared(grdx,a_int,a_ext,temps,a_base,turn,ar_len) for (int k = 0; k < ar_len; k++) { if (k < grdx) { // If bottom left (SouthWest) corner and red. if (k == 0 && turn == 0) { REAL *sp; sp = new REAL[2]; sp = cornerSource(temps[2],temps[3], a_base); a_ext[k] = sp[0]; a_int[k] = 2.0f * a_base + sp[1]; free(sp); } // If bottom right (SouthEast) corner and black. else if (k == (grdx-1) && turn == 1) { REAL *sp; sp = new REAL[2]; sp = cornerSource(temps[2],temps[1], a_base); a_ext[k] = sp[0]; a_int[k] = 2.0f * a_base + sp[1]; free(sp); } // Bottom row no corner. else { // Check South Boundary Condition. If it's constant temperature: if (temps[2] > 0) { a_ext[k] = 2.0f * a_base * temps[2]; a_int[k] = 5.0f * a_base; } else { a_int[k] = 3.0f * a_base; } } } // If top row else if (k >= (ar_len-grdx)) { // If top right (NorthEast) corner and red. if ((k == (ar_len-1)) && turn == 0) { REAL *sp; sp = new REAL[2]; sp = cornerSource(temps[0],temps[1], a_base); a_ext[k] = sp[0]; a_int[k] = 2.0f * a_base + sp[1]; free(sp); } // If top left (NorthWest) corner and black. else if ((k == (ar_len-grdx)) && turn == 1) { REAL *sp; sp = new REAL[2]; sp = cornerSource(temps[0],temps[3], a_base); a_ext[k] = sp[0]; a_int[k] = 2.0f * a_base + sp[1]; free(sp); } // Top row no corner. The top row is the compliment of the bottom row so the operation for seq is reversed. else { // Check North Boundary Condition. If it's constant temperature: if (temps[0]>0) { a_ext[k] = 2.0f * a_base * temps[0]; a_int[k] = 5.0f * a_base; } else { a_int[k]= 3.0f * a_base; } } } // Check side walls. This is West when the matrix starts the row, that's when seq is -1. else if (((k % grdx)== 0) && (((k/grdx + turn) & 1) == 0)) { if (temps[3]>0) { a_ext[k] = 2.0f * a_base * temps[3]; a_int[k] = 5.0f * a_base; } else { a_int[k]= 3.0f * a_base; } } // This is East when the matrix ends the row. else if (((k % (grdx)) == (grdx-1)) && (((k/grdx + turn) & 1))) { if (temps[1]>0) { a_ext[k] = 2.0f * a_base * temps[1]; a_int[k] = 5.0f * a_base; } else { a_int[k]= 3.0f * a_base; } //cout << "East: Turn: " << turn << " Interior: " << a_int[k] << " Exterior: " << a_ext[k] << " Modulo: " << k % grdx << " Row: " << k/grdx << endl; } // Every cell not on an edge or corner. else { a_int[k] = 4.0f * a_base; } } } __global__ void differencingOperation(REAL *active_half, REAL *passive_half, REAL *a_e, REAL *a_i, REAL *ac, const int turn) { const int grd = int(DIVISIONS)/2; int ind_x = blockIdx.x * blockDim.x + threadIdx.x; int ind_y = blockIdx.y * blockDim.y + threadIdx.y; int id = ind_x + ind_y * grd; // Negative seq means active half starts first. Positive seq means passive half starts first. // If it's one it's odd if it's 0 it's even. int seq = ((turn + ind_y) & 1) ? 1:-1; if (id<(grd*int(DIVISIONS))) { // If bottom row. if (ind_y == 0) { // If bottom left (SouthWest) corner and red. if (ind_x == 0 && turn == 0) { active_half[id] = (ac[0]*(passive_half[id] + passive_half[id+grd])+a_e[id])/a_i[id]; } // If bottom right (SouthEast) corner and black. else if (ind_x == (grd-1) && turn == 1) { active_half[id] = (ac[0]*(passive_half[id] + passive_half[id+grd])+a_e[id])/a_i[id]; } // Bottom row no corner. else { active_half[id] = (ac[0]*(passive_half[id]+passive_half[id+grd]+passive_half[id+seq])+a_e[id])/a_i[id]; } } // If top row else if (ind_y == (int(DIVISIONS)-1)) { // If top right (NorthEast) corner and red. if (ind_x == (grd-1) && turn == 0) { active_half[id] = (ac[0]*(passive_half[id] + passive_half[id-grd])+a_e[id])/a_i[id]; } // If top left (NorthWest) corner and black. else if (ind_x == 0 && turn == 1) { active_half[id] = (ac[0]*(passive_half[id] + passive_half[id-grd])+a_e[id])/a_i[id]; } // Top row no corner. The top row is the compliment of the bottom row so the operation for seq is reversed. else { active_half[id] = (ac[0]*(passive_half[id]+passive_half[id-grd]+passive_half[id+seq])+a_e[id])/a_i[id]; } } // Check side walls. This is West when the matrix starts the row, that's when seq is -1. else if (ind_x == 0 && seq == -1) { active_half[id] = (ac[0]*(passive_half[id]+ passive_half[id+grd] + passive_half[id-grd])+a_e[id])/a_i[id]; } // This is East when the matrix ends the row. else if (ind_x == (grd-1) && seq == 1) { active_half[id] = (ac[0]*(passive_half[id] + passive_half[id+grd] + passive_half[id-grd])+a_e[id])/a_i[id]; } // Every cell not on an edge or corner. else { active_half[id] = (ac[0]/a_i[id]) * (passive_half[id]+passive_half[id+grd]+passive_half[id-grd]+passive_half[id+seq]); } } } int main() { // Get device properties and set threads to be max thread size. // We need the threads to fit the matrix correctly so reject the program if they don't. cudaDeviceProp prop; cudaGetDeviceProperties( &prop, 0 ); int mt = prop.maxThreadsPerBlock; int thread = int(sqrtf(float(mt))); if (int(DIVISIONS)%(2*thread) != 0) { printf("Error: DIVISIONS must be a multiple of %.i. That's twice the thread dimension.\n",(2*thread)); return 0; } const int rw = int(DIVISIONS)/2; const int sz = rw*int(DIVISIONS); cout << "Begin!!! \n\n"; thrust::host_vector<REAL> red(sz); thrust::host_vector<REAL> black(sz); REAL ds = (REAL)LENS/((REAL)(DIVISIONS-1)); REAL A = (REAL)DZ * ds; const int y_gr = (int)DIVISIONS/thread; const int x_gr = y_gr/2; REAL dm2; // Get initial conditions // cout << "Provide Boundary conditions for each edge of the slab.\nEnter Constant Temperature in KELVIN\nor a negative number for an insulated boundary:\nNorth: \n"; //cin >> temp_c[0]; // cout << "East: \n"; // cin >> temp_c[1]; // cout << "South: \n"; // cin >> temp_c[2]; // cout << "West: \n"; // cin >> temp_c[3]; // // Get Guess for slab temperature // cout << "Provide a guess Temperature for the slab in Kelvin:\n"; // cin >> temp_c[4]; REAL *ared_caste, *ablack_caste, *ared_casti, *ablack_casti, *a_b, *ahost_red_e, *ahost_black_e, *ahost_red_i, *ahost_black_i; ahost_red_e = (REAL *) malloc(sz*sizeof(REAL)); ahost_black_e = (REAL *) malloc(sz*sizeof(REAL)); ahost_red_i = (REAL *) malloc(sz*sizeof(REAL)); ahost_black_i = (REAL *) malloc(sz*sizeof(REAL)); REAL ab[2] = {(REAL)TH_COND * A / ds, 0}; REAL temp_c[4]; // For debugging: temp_c[0] = 2.; temp_c[1] = 1.; temp_c[2] = 1.; temp_c[3] = 1.; REAL guess = .5; // I know that this can get confusing, but in the coefficients (variables starting with a), the e stands for external and the i stands for internal // If it helps any, they're always built in the same order, externals first red before black. // Set up host vectors and fill them with coefficients. for (int k = 0; k<sz; k++) { ahost_red_e[k] = 0.f; ahost_black_e[k] = 0.f; ahost_red_i[k] = 0.f; ahost_black_i[k] = 0.f; } coefficientFills(ahost_red_e, ahost_red_i, temp_c, ab[0], 0); coefficientFills(ahost_black_e, ahost_black_i, temp_c, ab[0], 1); // Copy the Initial arrays to the GPU. thrust::device_vector<REAL> d_red_i(sz,guess); thrust::device_vector<REAL> d_red_f(sz,guess); thrust::device_vector<REAL> d_black_i(sz,guess); thrust::device_vector<REAL> d_black_f(sz,guess); // Copy coefficient vectors to device. cudaMalloc((void **) &ared_caste, sizeof(REAL)*sz); cudaMalloc((void **) &ablack_caste, sizeof(REAL)*sz); cudaMalloc((void **) &ared_casti, sizeof(REAL)*sz); cudaMalloc((void **) &ablack_casti, sizeof(REAL)*sz); cudaMalloc((void **) &a_b, sizeof(REAL)*2); // Fill the difference matrix to be reduced as well. thrust::device_vector<REAL> diff_mat(sz); //Now make all the raw pointers so you can pass them to the kernel. REAL *red_cast = thrust::raw_pointer_cast(&d_red_f[0]); REAL *black_cast = thrust::raw_pointer_cast(&d_black_f[0]); REAL *red_casti = thrust::raw_pointer_cast(&d_red_i[0]); REAL *black_casti = thrust::raw_pointer_cast(&d_black_i[0]); //The coefficients are vanilla CUDA/C++ cudaMemcpy(ared_caste, ahost_red_e, sizeof(REAL)*sz, cudaMemcpyHostToDevice); cudaMemcpy(ablack_caste, ahost_black_e, sizeof(REAL)*sz, cudaMemcpyHostToDevice); cudaMemcpy(ared_casti, ahost_red_i, sizeof(REAL)*sz, cudaMemcpyHostToDevice); cudaMemcpy(ablack_casti, ahost_black_i, sizeof(REAL)*sz, cudaMemcpyHostToDevice); cudaMemcpy(a_b, ab, sizeof(REAL)*2, cudaMemcpyHostToDevice); dim3 grids(x_gr,y_gr); dim3 threads(thread,thread); bool stops = true; int iter = 0; double wall0 = clock(); while (stops) { differencingOperation <<< grids, threads >>> (red_cast, black_cast, ared_caste, ared_casti, a_b, 0); cudaDeviceSynchronize(); differencingOperation <<< grids, threads >>> (black_cast, red_cast, ablack_caste, ablack_casti, a_b, 1); cudaDeviceSynchronize(); thrust::for_each(thrust::make_zip_iterator(thrust::make_tuple(d_red_i.begin(), d_red_f.begin(), d_black_i.begin(), d_black_f.begin(), diff_mat.begin())), thrust::make_zip_iterator(thrust::make_tuple(d_red_i.end(), d_red_f.end(), d_black_i.end(), d_black_f.end(), diff_mat.end())), absdiff()); cudaDeviceSynchronize(); dm2 = thrust::reduce(diff_mat.begin(),diff_mat.end()); iter++; if (((dm2 /REAL(sz*2)) < TOLERANCE) || (iter>1e7)) { stops = false; } //d_red_i = d_red_f; cudaMemcpy(red_casti, red_cast, sz * sizeof(REAL), cudaMemcpyDeviceToDevice); //thrust::copy(d_red_f.begin(), d_red_f.end(), d_red_i.begin()); //d_black_i = d_black_f; cudaMemcpy(black_casti, black_cast, sz * sizeof(REAL), cudaMemcpyDeviceToDevice); //thrust::copy(d_black_f.begin(), d_black_f.end(), d_black_i.begin()); cudaDeviceSynchronize(); if (iter%200 == 0) { cout << "Iteration: " << iter << " dm:" << dm2/REAL(sz*2) << endl; cout << "First red: " << d_red_f[0] << " Last Black:" << d_black_f[sz-1] << endl; cout << "Random red: " << d_red_i[8201] << " Random Black:" << d_black_i[105] << endl; } } double wall1 = clock(); double timed = (wall1-wall0)/CLOCKS_PER_SEC; printf("Outside the loop\n"); printf("It converged after %d iterations: \n",iter); cout << "That took: " << timed << " seconds" << endl; thrust::copy(d_red_f.begin(), d_red_f.end(), red.begin()); thrust::copy(d_black_f.begin(), d_black_f.end(), black.begin()); // Write it out! ofstream filewrite; filewrite.open("C:\\Users\\Philadelphia\\Documents\\1_SweptTimeResearch\\GaussSeidel\\GaussSeidelCUDA\\GS_outputCUDA.dat", ios::trunc); filewrite << DIVISIONS << "\n" << ds; for (int k = 0; k < sz; k++) { filewrite << "\n" << red[k] << "\n" << black[k]; } filewrite.close(); cudaFree(ared_caste); cudaFree(ared_casti); cudaFree(ablack_caste); cudaFree(ablack_casti); cudaFree(a_b); free(ared_caste); free(ared_casti); free(ablack_caste); free(ablack_casti); free(a_b); //cudaDeviceReset(); return 0; }
3cc5e53c45fe85a3318a1ed49ce94f83748eca65.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ComputerGraphics Tuebingen, 2018 #if GOOGLE_CUDA #define EIGEN_USE_GPU #include "tensorflow/core/util/cuda_kernel_helper.h" #include "matrix_add_op.h" namespace { using CudaLaunchConfig = ::tensorflow::CudaLaunchConfig; template<typename T> __global__ void forward(CudaLaunchConfig cfg, T* top, const int N, const T* matrixA, const T* matrixB, const T bias) { // for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { CUDA_1D_KERNEL_LOOP(i, cfg.virtual_thread_count) { top[i] = matrixA[i] + matrixB[i] + (T) bias; } } template<typename T> __global__ void backward(CudaLaunchConfig cfg, const T* top_diff, const int N, T* grad_matrixA, T* grad_matrixB) { // for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { CUDA_1D_KERNEL_LOOP(i, cfg.virtual_thread_count) { grad_matrixA[i] = top_diff[i]; grad_matrixB[i] = top_diff[i]; } } } // anonymous namespace namespace tensorflow { namespace functor { template <typename Dtype> struct MatrixAddFunctor<GPUDevice, Dtype> { static void launch(::tensorflow::OpKernelContext* ctx, const Tensor& mA_, const Tensor& mB_, Tensor *mC_, Dtype bias) { const int N = mA_.NumElements(); ::tensorflow::CudaLaunchConfig cfg = ::tensorflow::GetCudaLaunchConfig(N, ctx->eigen_device<GPUDevice>()); hipLaunchKernelGGL(( forward<Dtype>) , dim3(cfg.block_count), dim3(cfg.thread_per_block), 0, ctx->eigen_gpu_device().stream() , cfg, mC_->flat<Dtype>().data(), mA_.NumElements(), mA_.flat<Dtype>().data(), mB_.flat<Dtype>().data(), bias); if (!ctx->eigen_gpu_device().ok()) { ctx->SetStatus(tensorflow::errors::Internal("Failed launching MatrixAdd on GPU")); } } }; template struct MatrixAddFunctor<GPUDevice, int>; template struct MatrixAddFunctor<GPUDevice, float>; template struct MatrixAddFunctor<GPUDevice, double>; template <typename Dtype> struct MatrixAddGrad<GPUDevice, Dtype> { static void launch(::tensorflow::OpKernelContext* ctx, const Tensor& topdiff_, Tensor *grad_mA_, Tensor *grad_mB_) { const int N = topdiff_.NumElements(); ::tensorflow::CudaLaunchConfig cfg = ::tensorflow::GetCudaLaunchConfig(N, ctx->eigen_device<GPUDevice>()); // // optional reset gradients before running a kernel // hipMemset(grad_mA_->flat<Dtype>().data(), 0, N * sizeof(Dtype)); // hipMemset(grad_mB_->flat<Dtype>().data(), 0, N * sizeof(Dtype)); // backward<Dtype> // <<< cfg.block_count, cfg.thread_per_block, 0, ctx->eigen_gpu_device().stream() >>> ( // cfg, // topdiff_.flat<Dtype>().data(), // topdiff_.NumElements(), // grad_mA_->flat<Dtype>().data(), // grad_mB_->flat<Dtype>().data()); // faster alternative to custom kernel (above) hipMemcpy(grad_mA_->flat<Dtype>().data(), topdiff_.flat<Dtype>().data(), N * sizeof(Dtype), hipMemcpyDeviceToDevice); hipMemcpy(grad_mB_->flat<Dtype>().data(), topdiff_.flat<Dtype>().data(), N * sizeof(Dtype), hipMemcpyDeviceToDevice); if (!ctx->eigen_gpu_device().ok()) { ctx->SetStatus(tensorflow::errors::Internal("Failed launching MatrixAddGrad on GPU")); } } }; template struct MatrixAddGrad<GPUDevice, float>; template struct MatrixAddGrad<GPUDevice, double>; } // namespace functor } // namespace tensorflow #endif // GOOGLE_CUDA
3cc5e53c45fe85a3318a1ed49ce94f83748eca65.cu
// ComputerGraphics Tuebingen, 2018 #if GOOGLE_CUDA #define EIGEN_USE_GPU #include "tensorflow/core/util/cuda_kernel_helper.h" #include "matrix_add_op.h" namespace { using CudaLaunchConfig = ::tensorflow::CudaLaunchConfig; template<typename T> __global__ void forward(CudaLaunchConfig cfg, T* top, const int N, const T* matrixA, const T* matrixB, const T bias) { // for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { CUDA_1D_KERNEL_LOOP(i, cfg.virtual_thread_count) { top[i] = matrixA[i] + matrixB[i] + (T) bias; } } template<typename T> __global__ void backward(CudaLaunchConfig cfg, const T* top_diff, const int N, T* grad_matrixA, T* grad_matrixB) { // for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { CUDA_1D_KERNEL_LOOP(i, cfg.virtual_thread_count) { grad_matrixA[i] = top_diff[i]; grad_matrixB[i] = top_diff[i]; } } } // anonymous namespace namespace tensorflow { namespace functor { template <typename Dtype> struct MatrixAddFunctor<GPUDevice, Dtype> { static void launch(::tensorflow::OpKernelContext* ctx, const Tensor& mA_, const Tensor& mB_, Tensor *mC_, Dtype bias) { const int N = mA_.NumElements(); ::tensorflow::CudaLaunchConfig cfg = ::tensorflow::GetCudaLaunchConfig(N, ctx->eigen_device<GPUDevice>()); forward<Dtype> <<< cfg.block_count, cfg.thread_per_block, 0, ctx->eigen_gpu_device().stream() >>> ( cfg, mC_->flat<Dtype>().data(), mA_.NumElements(), mA_.flat<Dtype>().data(), mB_.flat<Dtype>().data(), bias); if (!ctx->eigen_gpu_device().ok()) { ctx->SetStatus(tensorflow::errors::Internal("Failed launching MatrixAdd on GPU")); } } }; template struct MatrixAddFunctor<GPUDevice, int>; template struct MatrixAddFunctor<GPUDevice, float>; template struct MatrixAddFunctor<GPUDevice, double>; template <typename Dtype> struct MatrixAddGrad<GPUDevice, Dtype> { static void launch(::tensorflow::OpKernelContext* ctx, const Tensor& topdiff_, Tensor *grad_mA_, Tensor *grad_mB_) { const int N = topdiff_.NumElements(); ::tensorflow::CudaLaunchConfig cfg = ::tensorflow::GetCudaLaunchConfig(N, ctx->eigen_device<GPUDevice>()); // // optional reset gradients before running a kernel // cudaMemset(grad_mA_->flat<Dtype>().data(), 0, N * sizeof(Dtype)); // cudaMemset(grad_mB_->flat<Dtype>().data(), 0, N * sizeof(Dtype)); // backward<Dtype> // <<< cfg.block_count, cfg.thread_per_block, 0, ctx->eigen_gpu_device().stream() >>> ( // cfg, // topdiff_.flat<Dtype>().data(), // topdiff_.NumElements(), // grad_mA_->flat<Dtype>().data(), // grad_mB_->flat<Dtype>().data()); // faster alternative to custom kernel (above) cudaMemcpy(grad_mA_->flat<Dtype>().data(), topdiff_.flat<Dtype>().data(), N * sizeof(Dtype), cudaMemcpyDeviceToDevice); cudaMemcpy(grad_mB_->flat<Dtype>().data(), topdiff_.flat<Dtype>().data(), N * sizeof(Dtype), cudaMemcpyDeviceToDevice); if (!ctx->eigen_gpu_device().ok()) { ctx->SetStatus(tensorflow::errors::Internal("Failed launching MatrixAddGrad on GPU")); } } }; template struct MatrixAddGrad<GPUDevice, float>; template struct MatrixAddGrad<GPUDevice, double>; } // namespace functor } // namespace tensorflow #endif // GOOGLE_CUDA
03ad8f44696d2b4df1fc137b33bbb48e2b400c71.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <vector> #include "bboxUtils.h" #include "hipcub/hipcub.hpp" #include "cub_helper.h" #include "kernel.h" template <typename T_SCORE, unsigned nthds_per_cta> __launch_bounds__(nthds_per_cta) __global__ void prepareSortData(const int num, const int num_classes, const int num_preds_per_class, const int background_label_id, const float confidence_threshold, T_SCORE *conf_scores_gpu, T_SCORE *temp_scores, int *temp_idx, int *d_offsets) { // Prepare scores data for sort const int cur_idx = blockIdx.x * nthds_per_cta + threadIdx.x; const int numPredsPerBatch = num_classes * num_preds_per_class; if (cur_idx < numPredsPerBatch) { const int class_idx = cur_idx / num_preds_per_class; for (int i = 0; i < num; i++) { const int targetIdx = i * numPredsPerBatch + cur_idx; const T_SCORE score = conf_scores_gpu[targetIdx]; // "Clear" background labeled score and index // Because we do not care about background if (class_idx == background_label_id) { // Set scores to 0 // Set label = -1 temp_scores[targetIdx] = 0.0f; temp_idx[targetIdx] = -1; conf_scores_gpu[targetIdx] = 0.0f; } // "Clear" scores lower than threshold else { if (score > confidence_threshold) { temp_scores[targetIdx] = score; temp_idx[targetIdx] = cur_idx + i * numPredsPerBatch; } else { // Set scores to 0 // Set label = -1 temp_scores[targetIdx] = 0.0f; temp_idx[targetIdx] = -1; conf_scores_gpu[targetIdx] = 0.0f; // TODO: HERE writing memory too many times } } if ((cur_idx % num_preds_per_class) == 0) { const int offset_ct = i * num_classes + cur_idx / num_preds_per_class; d_offsets[offset_ct] = offset_ct * num_preds_per_class; // set the last element in d_offset if (blockIdx.x == 0 && threadIdx.x == 0) d_offsets[num * num_classes] = num * numPredsPerBatch; } } } } template <typename T_SCORE> pluginStatus_t sortScoresPerClass_gpu(hipStream_t stream, const int num, const int num_classes, const int num_preds_per_class, const int background_label_id, const float confidence_threshold, void *conf_scores_gpu, void *index_array_gpu, void *workspace) { const int num_segments = num * num_classes; void *temp_scores = workspace; const int arrayLen = num * num_classes * num_preds_per_class; void *temp_idx = nextWorkspacePtr((int8_t *)temp_scores, arrayLen * sizeof(T_SCORE)); void *d_offsets = nextWorkspacePtr((int8_t *)temp_idx, arrayLen * sizeof(int)); size_t cubOffsetSize = (num_segments + 1) * sizeof(int); void *cubWorkspace = nextWorkspacePtr((int8_t *)d_offsets, cubOffsetSize); const int BS = 512; const int GS = (num_classes * num_preds_per_class + BS - 1) / BS; hipLaunchKernelGGL(( prepareSortData<T_SCORE, BS>), dim3(GS), dim3(BS), 0, stream, num, num_classes, num_preds_per_class, background_label_id, confidence_threshold, (T_SCORE *)conf_scores_gpu, (T_SCORE *)temp_scores, (int *)temp_idx, (int *)d_offsets); size_t temp_storage_bytes = cubSortPairsWorkspaceSize<T_SCORE, int>(arrayLen, num_segments); hipcub::DeviceSegmentedRadixSort::SortPairsDescending( cubWorkspace, temp_storage_bytes, (const T_SCORE *)(temp_scores), (T_SCORE *)(conf_scores_gpu), (const int *)(temp_idx), (int *)(index_array_gpu), arrayLen, num_segments, (const int *)d_offsets, (const int *)d_offsets + 1, 0, sizeof(T_SCORE) * 8, stream); CSC(hipGetLastError(), STATUS_FAILURE); return STATUS_SUCCESS; } // sortScoresPerClass LAUNCH CONFIG typedef pluginStatus_t (*sspcFunc)(hipStream_t, const int, const int, const int, const int, const float, void *, void *, void *); struct sspcLaunchConfig { DataType t_score; sspcFunc function; sspcLaunchConfig(DataType t_score) : t_score(t_score) {} sspcLaunchConfig(DataType t_score, sspcFunc function) : t_score(t_score), function(function) {} bool operator==(const sspcLaunchConfig &other) { return t_score == other.t_score; } }; static std::vector<sspcLaunchConfig> sspcFuncVec; bool sspcInit() { sspcFuncVec.push_back( sspcLaunchConfig(DataType::kFLOAT, sortScoresPerClass_gpu<float>)); return true; } static bool initialized = sspcInit(); pluginStatus_t sortScoresPerClass( hipStream_t stream, const int num, const int num_classes, const int num_preds_per_class, const int background_label_id, const float confidence_threshold, const DataType DT_SCORE, void *conf_scores_gpu, void *index_array_gpu, void *workspace) { sspcLaunchConfig lc = sspcLaunchConfig(DT_SCORE); for (unsigned i = 0; i < sspcFuncVec.size(); ++i) { if (lc == sspcFuncVec[i]) { DEBUG_PRINTF("sortScoresPerClass kernel %d\n", i); return sspcFuncVec[i].function( stream, num, num_classes, num_preds_per_class, background_label_id, confidence_threshold, conf_scores_gpu, index_array_gpu, workspace); } } return STATUS_BAD_PARAM; } size_t sortScoresPerClassWorkspaceSize(const int num, const int num_classes, const int num_preds_per_class, const DataType DT_CONF) { size_t wss[4]; const int arrayLen = num * num_classes * num_preds_per_class; wss[0] = arrayLen * dataTypeSize(DT_CONF); // temp scores wss[1] = arrayLen * sizeof(int); // temp indices wss[2] = (num * num_classes + 1) * sizeof(int); // offsets if (DT_CONF == DataType::kFLOAT) { wss[3] = cubSortPairsWorkspaceSize<float, int>( arrayLen, num * num_classes); // cub workspace } else { printf("SCORE type not supported\n"); return (size_t)-1; } return calculateTotalWorkspaceSize(wss, 4); }
03ad8f44696d2b4df1fc137b33bbb48e2b400c71.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <vector> #include "bboxUtils.h" #include "cub/cub.cuh" #include "cub_helper.h" #include "kernel.h" template <typename T_SCORE, unsigned nthds_per_cta> __launch_bounds__(nthds_per_cta) __global__ void prepareSortData(const int num, const int num_classes, const int num_preds_per_class, const int background_label_id, const float confidence_threshold, T_SCORE *conf_scores_gpu, T_SCORE *temp_scores, int *temp_idx, int *d_offsets) { // Prepare scores data for sort const int cur_idx = blockIdx.x * nthds_per_cta + threadIdx.x; const int numPredsPerBatch = num_classes * num_preds_per_class; if (cur_idx < numPredsPerBatch) { const int class_idx = cur_idx / num_preds_per_class; for (int i = 0; i < num; i++) { const int targetIdx = i * numPredsPerBatch + cur_idx; const T_SCORE score = conf_scores_gpu[targetIdx]; // "Clear" background labeled score and index // Because we do not care about background if (class_idx == background_label_id) { // Set scores to 0 // Set label = -1 temp_scores[targetIdx] = 0.0f; temp_idx[targetIdx] = -1; conf_scores_gpu[targetIdx] = 0.0f; } // "Clear" scores lower than threshold else { if (score > confidence_threshold) { temp_scores[targetIdx] = score; temp_idx[targetIdx] = cur_idx + i * numPredsPerBatch; } else { // Set scores to 0 // Set label = -1 temp_scores[targetIdx] = 0.0f; temp_idx[targetIdx] = -1; conf_scores_gpu[targetIdx] = 0.0f; // TODO: HERE writing memory too many times } } if ((cur_idx % num_preds_per_class) == 0) { const int offset_ct = i * num_classes + cur_idx / num_preds_per_class; d_offsets[offset_ct] = offset_ct * num_preds_per_class; // set the last element in d_offset if (blockIdx.x == 0 && threadIdx.x == 0) d_offsets[num * num_classes] = num * numPredsPerBatch; } } } } template <typename T_SCORE> pluginStatus_t sortScoresPerClass_gpu(cudaStream_t stream, const int num, const int num_classes, const int num_preds_per_class, const int background_label_id, const float confidence_threshold, void *conf_scores_gpu, void *index_array_gpu, void *workspace) { const int num_segments = num * num_classes; void *temp_scores = workspace; const int arrayLen = num * num_classes * num_preds_per_class; void *temp_idx = nextWorkspacePtr((int8_t *)temp_scores, arrayLen * sizeof(T_SCORE)); void *d_offsets = nextWorkspacePtr((int8_t *)temp_idx, arrayLen * sizeof(int)); size_t cubOffsetSize = (num_segments + 1) * sizeof(int); void *cubWorkspace = nextWorkspacePtr((int8_t *)d_offsets, cubOffsetSize); const int BS = 512; const int GS = (num_classes * num_preds_per_class + BS - 1) / BS; prepareSortData<T_SCORE, BS><<<GS, BS, 0, stream>>>( num, num_classes, num_preds_per_class, background_label_id, confidence_threshold, (T_SCORE *)conf_scores_gpu, (T_SCORE *)temp_scores, (int *)temp_idx, (int *)d_offsets); size_t temp_storage_bytes = cubSortPairsWorkspaceSize<T_SCORE, int>(arrayLen, num_segments); cub::DeviceSegmentedRadixSort::SortPairsDescending( cubWorkspace, temp_storage_bytes, (const T_SCORE *)(temp_scores), (T_SCORE *)(conf_scores_gpu), (const int *)(temp_idx), (int *)(index_array_gpu), arrayLen, num_segments, (const int *)d_offsets, (const int *)d_offsets + 1, 0, sizeof(T_SCORE) * 8, stream); CSC(cudaGetLastError(), STATUS_FAILURE); return STATUS_SUCCESS; } // sortScoresPerClass LAUNCH CONFIG typedef pluginStatus_t (*sspcFunc)(cudaStream_t, const int, const int, const int, const int, const float, void *, void *, void *); struct sspcLaunchConfig { DataType t_score; sspcFunc function; sspcLaunchConfig(DataType t_score) : t_score(t_score) {} sspcLaunchConfig(DataType t_score, sspcFunc function) : t_score(t_score), function(function) {} bool operator==(const sspcLaunchConfig &other) { return t_score == other.t_score; } }; static std::vector<sspcLaunchConfig> sspcFuncVec; bool sspcInit() { sspcFuncVec.push_back( sspcLaunchConfig(DataType::kFLOAT, sortScoresPerClass_gpu<float>)); return true; } static bool initialized = sspcInit(); pluginStatus_t sortScoresPerClass( cudaStream_t stream, const int num, const int num_classes, const int num_preds_per_class, const int background_label_id, const float confidence_threshold, const DataType DT_SCORE, void *conf_scores_gpu, void *index_array_gpu, void *workspace) { sspcLaunchConfig lc = sspcLaunchConfig(DT_SCORE); for (unsigned i = 0; i < sspcFuncVec.size(); ++i) { if (lc == sspcFuncVec[i]) { DEBUG_PRINTF("sortScoresPerClass kernel %d\n", i); return sspcFuncVec[i].function( stream, num, num_classes, num_preds_per_class, background_label_id, confidence_threshold, conf_scores_gpu, index_array_gpu, workspace); } } return STATUS_BAD_PARAM; } size_t sortScoresPerClassWorkspaceSize(const int num, const int num_classes, const int num_preds_per_class, const DataType DT_CONF) { size_t wss[4]; const int arrayLen = num * num_classes * num_preds_per_class; wss[0] = arrayLen * dataTypeSize(DT_CONF); // temp scores wss[1] = arrayLen * sizeof(int); // temp indices wss[2] = (num * num_classes + 1) * sizeof(int); // offsets if (DT_CONF == DataType::kFLOAT) { wss[3] = cubSortPairsWorkspaceSize<float, int>( arrayLen, num * num_classes); // cub workspace } else { printf("SCORE type not supported\n"); return (size_t)-1; } return calculateTotalWorkspaceSize(wss, 4); }
202167defd308a12beefd08320e29934dec200a7.hip
// !!! This is a file automatically generated by hipify!!! #include"svd.h" #include<cusolverDn.h> void svd(int m,int n,hipComplex* t,hipComplex* U,hipComplex* V,float* S){ hipsolverDnHandle_t handle; hipsolverGesvdjInfo_t params=NULL; int* info=NULL; int echo=0; int lda=m; int ldu=m; int ldv=n; int lwork=0; hipComplex* work=NULL; float* s; hipComplex* u; hipComplex* v; cusolverStatus_t status=CUSOLVER_STATUS_SUCCESS; status=hipsolverDnCreate(&handle); assert(status==CUSOLVER_STATUS_SUCCESS); status=hipsolverDnCreateGesvdjInfo(&params); assert(status==CUSOLVER_STATUS_SUCCESS); hipError_t stat1=hipSuccess; hipError_t stat2=hipSuccess; hipError_t stat3=hipSuccess; hipError_t stat4=hipSuccess; stat1=hipMalloc((void**)&info,sizeof(int)); stat2=hipMalloc((void**)&u,sizeof(hipComplex)*m*m); stat3=hipMalloc((void**)&v,sizeof(hipComplex)*n*n); stat4=hipMalloc((void**)&s,sizeof(float)*((m<n)?m:n)); if( stat1!=hipSuccess|| stat2!=hipSuccess|| stat3!=hipSuccess|| stat4!=hipSuccess){ printf("cuda malloc error\n"); exit(-1); } if(hipsolverDnCgesvdj_bufferSize( handle, HIPSOLVER_EIG_MODE_VECTOR, echo, m, n, t, m, s, u, ldu, v, ldv, &lwork, params)!=CUSOLVER_STATUS_SUCCESS){ printf("hipsolverDnCgesvdj_bufferSize failed\n"); exit(-1); } if(hipDeviceSynchronize()!=hipSuccess){ printf("synchronize failed"); exit(-1); } stat1=hipMalloc((void**)&work,sizeof(hipComplex)*lwork); assert(stat1==hipSuccess); if(hipsolverDnCgesvdj( handle, HIPSOLVER_EIG_MODE_VECTOR, echo, m, n, t, lda, s, u, ldu, v, ldv, work, lwork, info, params)!=CUSOLVER_STATUS_SUCCESS){ printf("hipsolverDnCgesvdj err\n"); return; } if(hipDeviceSynchronize()!=hipSuccess){ printf("cuda synchronize err\n"); return; } stat1=hipMemcpy(U,u,sizeof(hipComplex)*ldu*m,hipMemcpyDeviceToHost); assert(stat1==hipSuccess); stat1=hipMemcpy(V,v,sizeof(hipComplex)*ldv*n,hipMemcpyDeviceToHost); assert(stat1==hipSuccess); stat1=hipMemcpy(S,s,sizeof(float)*((m<n)?m:n),hipMemcpyDeviceToHost); assert(stat1==hipSuccess); status=hipsolverDnDestroy(handle); assert(status==CUSOLVER_STATUS_SUCCESS); status=hipsolverDnDestroyGesvdjInfo(params); assert(status==CUSOLVER_STATUS_SUCCESS); stat1=hipFree(u); assert(stat1==hipSuccess); stat1=hipFree(v); assert(stat1==hipSuccess); stat1=hipFree(s); assert(stat1==hipSuccess); }
202167defd308a12beefd08320e29934dec200a7.cu
#include"svd.h" #include<cusolverDn.h> void svd(int m,int n,cuComplex* t,cuComplex* U,cuComplex* V,float* S){ cusolverDnHandle_t handle; gesvdjInfo_t params=NULL; int* info=NULL; int echo=0; int lda=m; int ldu=m; int ldv=n; int lwork=0; cuComplex* work=NULL; float* s; cuComplex* u; cuComplex* v; cusolverStatus_t status=CUSOLVER_STATUS_SUCCESS; status=cusolverDnCreate(&handle); assert(status==CUSOLVER_STATUS_SUCCESS); status=cusolverDnCreateGesvdjInfo(&params); assert(status==CUSOLVER_STATUS_SUCCESS); cudaError_t stat1=cudaSuccess; cudaError_t stat2=cudaSuccess; cudaError_t stat3=cudaSuccess; cudaError_t stat4=cudaSuccess; stat1=cudaMalloc((void**)&info,sizeof(int)); stat2=cudaMalloc((void**)&u,sizeof(cuComplex)*m*m); stat3=cudaMalloc((void**)&v,sizeof(cuComplex)*n*n); stat4=cudaMalloc((void**)&s,sizeof(float)*((m<n)?m:n)); if( stat1!=cudaSuccess|| stat2!=cudaSuccess|| stat3!=cudaSuccess|| stat4!=cudaSuccess){ printf("cuda malloc error\n"); exit(-1); } if(cusolverDnCgesvdj_bufferSize( handle, CUSOLVER_EIG_MODE_VECTOR, echo, m, n, t, m, s, u, ldu, v, ldv, &lwork, params)!=CUSOLVER_STATUS_SUCCESS){ printf("cusolverDnCgesvdj_bufferSize failed\n"); exit(-1); } if(cudaDeviceSynchronize()!=cudaSuccess){ printf("synchronize failed"); exit(-1); } stat1=cudaMalloc((void**)&work,sizeof(cuComplex)*lwork); assert(stat1==cudaSuccess); if(cusolverDnCgesvdj( handle, CUSOLVER_EIG_MODE_VECTOR, echo, m, n, t, lda, s, u, ldu, v, ldv, work, lwork, info, params)!=CUSOLVER_STATUS_SUCCESS){ printf("cusolverDnCgesvdj err\n"); return; } if(cudaDeviceSynchronize()!=cudaSuccess){ printf("cuda synchronize err\n"); return; } stat1=cudaMemcpy(U,u,sizeof(cuComplex)*ldu*m,cudaMemcpyDeviceToHost); assert(stat1==cudaSuccess); stat1=cudaMemcpy(V,v,sizeof(cuComplex)*ldv*n,cudaMemcpyDeviceToHost); assert(stat1==cudaSuccess); stat1=cudaMemcpy(S,s,sizeof(float)*((m<n)?m:n),cudaMemcpyDeviceToHost); assert(stat1==cudaSuccess); status=cusolverDnDestroy(handle); assert(status==CUSOLVER_STATUS_SUCCESS); status=cusolverDnDestroyGesvdjInfo(params); assert(status==CUSOLVER_STATUS_SUCCESS); stat1=cudaFree(u); assert(stat1==cudaSuccess); stat1=cudaFree(v); assert(stat1==cudaSuccess); stat1=cudaFree(s); assert(stat1==cudaSuccess); }
3a1ff8d941d4d047627b0446cd15df788e2ac8a3.hip
// !!! This is a file automatically generated by hipify!!! /* The MIT License Copyright (c) 2011 by Attractive Chaos <[email protected]> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/stat.h> #include <fcntl.h> #include <unistd.h> #include <math.h> #include <chrono> #include <hip/hip_runtime.h> #include "read_data.h" static void check(int a, int b, const char *s) { if (a != b) printf("Error: %s %d %d\n", s, a, b); } typedef struct { int h, e; } eh_t; __global__ void kernel_extend2( const unsigned char* query, const unsigned char* target, const char* mat, eh_t* eh, char* qp, int* qle_acc, int* tle_acc, int* gtle_acc, int* gscore_acc, int* max_off_acc, int* score_acc, const int qlen, const int tlen, const int m, const int o_del, const int e_del, const int o_ins, const int e_ins, int w, const int end_bonus, const int zdrop, const int h0) { int oe_del = o_del + e_del; int oe_ins = o_ins + e_ins; int i, j, k; int beg, end; int max, max_i, max_j, max_ins, max_del, max_ie; int gscore; int max_off; // generate the query profile for (k = i = 0; k < m; ++k) { const char *p = mat + k * m; for (j = 0; j < qlen; ++j) qp[i++] = p[query[j]]; } // fill the first row eh[0].h = h0; eh[1].h = h0 > oe_ins? h0 - oe_ins : 0; for (j = 2; j <= qlen && eh[j-1].h > e_ins; ++j) eh[j].h = eh[j-1].h - e_ins; // adjust $w if it is too large k = m * m; for (i = 0, max = 0; i < k; ++i) // get the max score max = max > mat[i]? max : mat[i]; max_ins = (int)((float)(qlen * max + end_bonus - o_ins) / e_ins + 1.f); max_ins = max_ins > 1? max_ins : 1; w = w < max_ins? w : max_ins; max_del = (int)((float)(qlen * max + end_bonus - o_del) / e_del + 1.f); max_del = max_del > 1? max_del : 1; w = w < max_del? w : max_del; // TODO: is this necessary? // DP loop max = h0, max_i = max_j = -1; max_ie = -1, gscore = -1; max_off = 0; beg = 0, end = qlen; for (i = 0; i < tlen; ++i) { int t, f = 0, h1, m = 0, mj = -1; char *q = qp + target[i] * qlen; // apply the band and the constraint (if provided) if (beg < i - w) beg = i - w; if (end > i + w + 1) end = i + w + 1; if (end > qlen) end = qlen; // compute the first column if (beg == 0) { h1 = h0 - (o_del + e_del * (i + 1)); if (h1 < 0) h1 = 0; } else h1 = 0; for (j = beg; j < end; ++j) { // At the beginning of the loop: eh[j] = { H(i-1,j-1), E(i,j) }, f = F(i,j) and h1 = H(i,j-1) // Similar to SSE2-SW, cells are computed in the following order: // H(i,j) = max{H(i-1,j-1)+S(i,j), E(i,j), F(i,j)} // E(i+1,j) = max{H(i,j)-gapo, E(i,j)} - gape // F(i,j+1) = max{H(i,j)-gapo, F(i,j)} - gape eh_t *p = eh+j; int h, M = p->h, e = p->e; // get H(i-1,j-1) and E(i-1,j) p->h = h1; // set H(i,j-1) for the next row M = M? M + q[j] : 0;// separating H and M to disallow a cigar like "100M3I3D20M" h = M > e? M : e; // e and f are guaranteed to be non-negative, so h>=0 even if M<0 h = h > f? h : f; h1 = h; // save H(i,j) to h1 for the next column mj = m > h? mj : j; // record the position where max score is achieved m = m > h? m : h; // m is stored at eh[mj+1] t = M - oe_del; t = t > 0? t : 0; e -= e_del; e = e > t? e : t; // computed E(i+1,j) p->e = e; // save E(i+1,j) for the next row t = M - oe_ins; t = t > 0? t : 0; f -= e_ins; f = f > t? f : t; // computed F(i,j+1) } eh[end].h = h1; eh[end].e = 0; if (j == qlen) { max_ie = gscore > h1? max_ie : i; gscore = gscore > h1? gscore : h1; } if (m == 0) break; if (m > max) { max = m, max_i = i, max_j = mj; max_off = max_off > abs(mj - i)? max_off : abs(mj - i); } else if (zdrop > 0) { if (i - max_i > mj - max_j) { if (max - m - ((i - max_i) - (mj - max_j)) * e_del > zdrop) break; } else { if (max - m - ((mj - max_j) - (i - max_i)) * e_ins > zdrop) break; } } // update beg and end for the next round for (j = beg; j < end && eh[j].h == 0 && eh[j].e == 0; ++j); beg = j; for (j = end; j >= beg && eh[j].h == 0 && eh[j].e == 0; --j); end = j + 2 < qlen? j + 2 : qlen; //beg = 0; end = qlen; // uncomment this line for debugging } *qle_acc = max_j + 1; *tle_acc = max_i + 1; *gtle_acc = max_ie + 1; *gscore_acc = gscore; *max_off_acc = max_off; *score_acc = max; } float extend2(struct extend2_dat *d) { eh_t *eh = NULL; /* score array*/ char *qp = NULL; /* query profile*/ posix_memalign((void**)&eh, 64, (d->qlen+1) * 8); posix_memalign((void**)&qp, 64, d->qlen * d->m); memset(eh, 0, (d->qlen+1) * 8); int qle, tle, gtle, gscore, max_off, score; const int qlen = d->qlen; const int tlen = d->tlen; const int m = d->m; const int o_del = d->o_del; const int e_del = d->e_del; const int o_ins = d->o_ins; const int e_ins = d->e_ins; const int w = d->w; const int end_bonus = d->end_bonus; const int zdrop = d->zdrop; const int h0 = d->h0; auto start = std::chrono::steady_clock::now(); unsigned char *d_query; hipMalloc((void**)&d_query, qlen); hipMemcpyAsync(d_query, d->query, qlen, hipMemcpyHostToDevice, 0); unsigned char *d_target; hipMalloc((void**)&d_target, tlen); hipMemcpyAsync(d_target, d->target, tlen, hipMemcpyHostToDevice, 0); char *d_mat; hipMalloc((void**)&d_mat, m*m); hipMemcpyAsync(d_mat, d->mat, m*m, hipMemcpyHostToDevice, 0); eh_t *d_eh; hipMalloc((void**)&d_eh, (qlen+1)*sizeof(eh_t)); hipMemcpyAsync(d_eh, eh, (qlen+1)*sizeof(eh_t), hipMemcpyHostToDevice, 0); char *d_qp; hipMalloc((void**)&d_qp, qlen*m); hipMemcpyAsync(d_qp, qp, qlen*m, hipMemcpyHostToDevice, 0); int *d_qle; hipMalloc((void**)&d_qle, 4); int *d_tle; hipMalloc((void**)&d_tle, 4); int *d_gtle; hipMalloc((void**)&d_gtle, 4); int *d_gscore; hipMalloc((void**)&d_gscore, 4); int *d_max_off; hipMalloc((void**)&d_max_off, 4); int *d_score; hipMalloc((void**)&d_score, 4); hipLaunchKernelGGL(( kernel_extend2), dim3(1),dim3(1), 0, 0, d_query, d_target, d_mat, d_eh, d_qp, d_qle, d_tle, d_gtle, d_gscore, d_max_off, d_score, qlen, tlen, m, o_del, e_del, o_ins, e_ins, w, end_bonus, zdrop, h0); hipMemcpy(&qle, d_qle, 4, hipMemcpyDeviceToHost); hipMemcpy(&tle, d_tle, 4, hipMemcpyDeviceToHost); hipMemcpy(&gtle, d_gtle, 4, hipMemcpyDeviceToHost); hipMemcpy(&max_off, d_max_off, 4, hipMemcpyDeviceToHost); hipMemcpy(&gscore, d_gscore, 4, hipMemcpyDeviceToHost); hipMemcpy(&score, d_score, 4, hipMemcpyDeviceToHost); hipFree(d_query); hipFree(d_target); hipFree(d_mat); hipFree(d_eh); hipFree(d_qp); hipFree(d_qle); hipFree(d_tle); hipFree(d_gtle); hipFree(d_gscore); hipFree(d_max_off); hipFree(d_score); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); check(d->qle, qle, "qle"); check(d->tle, tle, "tle"); check(d->gtle, gtle, "gtle"); check(d->gscore, gscore, "gscore"); check(d->max_off, max_off, "max_off"); check(d->score, score, "score"); free(eh); free(qp); #ifdef VERBOSE printf("device: qle=%d, tle=%d, gtle=%d, gscore=%d, max_off=%d, score=%d\n", qle, tle, gtle, gscore, max_off, score); #endif return time; } int main(int argc, char *argv[]) { if (argc != 2) { printf("Usage: %s <repeat>\n", argv[0]); return 1; } int repeat = atoi(argv[1]); struct extend2_dat d; // Instead of iterating over a directory, list the file names (17 in total) const char* files[] = { #include "filelist.txt" }; float time = 0.f; for (int f = 0; f < repeat; f++) { read_data(files[f%17], &d); time += extend2(&d); } printf("Average offload time %f (us)\n", (time * 1e-3f) / repeat); return 0; }
3a1ff8d941d4d047627b0446cd15df788e2ac8a3.cu
/* The MIT License Copyright (c) 2011 by Attractive Chaos <[email protected]> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/stat.h> #include <fcntl.h> #include <unistd.h> #include <math.h> #include <chrono> #include <cuda.h> #include "read_data.h" static void check(int a, int b, const char *s) { if (a != b) printf("Error: %s %d %d\n", s, a, b); } typedef struct { int h, e; } eh_t; __global__ void kernel_extend2( const unsigned char* query, const unsigned char* target, const char* mat, eh_t* eh, char* qp, int* qle_acc, int* tle_acc, int* gtle_acc, int* gscore_acc, int* max_off_acc, int* score_acc, const int qlen, const int tlen, const int m, const int o_del, const int e_del, const int o_ins, const int e_ins, int w, const int end_bonus, const int zdrop, const int h0) { int oe_del = o_del + e_del; int oe_ins = o_ins + e_ins; int i, j, k; int beg, end; int max, max_i, max_j, max_ins, max_del, max_ie; int gscore; int max_off; // generate the query profile for (k = i = 0; k < m; ++k) { const char *p = mat + k * m; for (j = 0; j < qlen; ++j) qp[i++] = p[query[j]]; } // fill the first row eh[0].h = h0; eh[1].h = h0 > oe_ins? h0 - oe_ins : 0; for (j = 2; j <= qlen && eh[j-1].h > e_ins; ++j) eh[j].h = eh[j-1].h - e_ins; // adjust $w if it is too large k = m * m; for (i = 0, max = 0; i < k; ++i) // get the max score max = max > mat[i]? max : mat[i]; max_ins = (int)((float)(qlen * max + end_bonus - o_ins) / e_ins + 1.f); max_ins = max_ins > 1? max_ins : 1; w = w < max_ins? w : max_ins; max_del = (int)((float)(qlen * max + end_bonus - o_del) / e_del + 1.f); max_del = max_del > 1? max_del : 1; w = w < max_del? w : max_del; // TODO: is this necessary? // DP loop max = h0, max_i = max_j = -1; max_ie = -1, gscore = -1; max_off = 0; beg = 0, end = qlen; for (i = 0; i < tlen; ++i) { int t, f = 0, h1, m = 0, mj = -1; char *q = qp + target[i] * qlen; // apply the band and the constraint (if provided) if (beg < i - w) beg = i - w; if (end > i + w + 1) end = i + w + 1; if (end > qlen) end = qlen; // compute the first column if (beg == 0) { h1 = h0 - (o_del + e_del * (i + 1)); if (h1 < 0) h1 = 0; } else h1 = 0; for (j = beg; j < end; ++j) { // At the beginning of the loop: eh[j] = { H(i-1,j-1), E(i,j) }, f = F(i,j) and h1 = H(i,j-1) // Similar to SSE2-SW, cells are computed in the following order: // H(i,j) = max{H(i-1,j-1)+S(i,j), E(i,j), F(i,j)} // E(i+1,j) = max{H(i,j)-gapo, E(i,j)} - gape // F(i,j+1) = max{H(i,j)-gapo, F(i,j)} - gape eh_t *p = eh+j; int h, M = p->h, e = p->e; // get H(i-1,j-1) and E(i-1,j) p->h = h1; // set H(i,j-1) for the next row M = M? M + q[j] : 0;// separating H and M to disallow a cigar like "100M3I3D20M" h = M > e? M : e; // e and f are guaranteed to be non-negative, so h>=0 even if M<0 h = h > f? h : f; h1 = h; // save H(i,j) to h1 for the next column mj = m > h? mj : j; // record the position where max score is achieved m = m > h? m : h; // m is stored at eh[mj+1] t = M - oe_del; t = t > 0? t : 0; e -= e_del; e = e > t? e : t; // computed E(i+1,j) p->e = e; // save E(i+1,j) for the next row t = M - oe_ins; t = t > 0? t : 0; f -= e_ins; f = f > t? f : t; // computed F(i,j+1) } eh[end].h = h1; eh[end].e = 0; if (j == qlen) { max_ie = gscore > h1? max_ie : i; gscore = gscore > h1? gscore : h1; } if (m == 0) break; if (m > max) { max = m, max_i = i, max_j = mj; max_off = max_off > abs(mj - i)? max_off : abs(mj - i); } else if (zdrop > 0) { if (i - max_i > mj - max_j) { if (max - m - ((i - max_i) - (mj - max_j)) * e_del > zdrop) break; } else { if (max - m - ((mj - max_j) - (i - max_i)) * e_ins > zdrop) break; } } // update beg and end for the next round for (j = beg; j < end && eh[j].h == 0 && eh[j].e == 0; ++j); beg = j; for (j = end; j >= beg && eh[j].h == 0 && eh[j].e == 0; --j); end = j + 2 < qlen? j + 2 : qlen; //beg = 0; end = qlen; // uncomment this line for debugging } *qle_acc = max_j + 1; *tle_acc = max_i + 1; *gtle_acc = max_ie + 1; *gscore_acc = gscore; *max_off_acc = max_off; *score_acc = max; } float extend2(struct extend2_dat *d) { eh_t *eh = NULL; /* score array*/ char *qp = NULL; /* query profile*/ posix_memalign((void**)&eh, 64, (d->qlen+1) * 8); posix_memalign((void**)&qp, 64, d->qlen * d->m); memset(eh, 0, (d->qlen+1) * 8); int qle, tle, gtle, gscore, max_off, score; const int qlen = d->qlen; const int tlen = d->tlen; const int m = d->m; const int o_del = d->o_del; const int e_del = d->e_del; const int o_ins = d->o_ins; const int e_ins = d->e_ins; const int w = d->w; const int end_bonus = d->end_bonus; const int zdrop = d->zdrop; const int h0 = d->h0; auto start = std::chrono::steady_clock::now(); unsigned char *d_query; cudaMalloc((void**)&d_query, qlen); cudaMemcpyAsync(d_query, d->query, qlen, cudaMemcpyHostToDevice, 0); unsigned char *d_target; cudaMalloc((void**)&d_target, tlen); cudaMemcpyAsync(d_target, d->target, tlen, cudaMemcpyHostToDevice, 0); char *d_mat; cudaMalloc((void**)&d_mat, m*m); cudaMemcpyAsync(d_mat, d->mat, m*m, cudaMemcpyHostToDevice, 0); eh_t *d_eh; cudaMalloc((void**)&d_eh, (qlen+1)*sizeof(eh_t)); cudaMemcpyAsync(d_eh, eh, (qlen+1)*sizeof(eh_t), cudaMemcpyHostToDevice, 0); char *d_qp; cudaMalloc((void**)&d_qp, qlen*m); cudaMemcpyAsync(d_qp, qp, qlen*m, cudaMemcpyHostToDevice, 0); int *d_qle; cudaMalloc((void**)&d_qle, 4); int *d_tle; cudaMalloc((void**)&d_tle, 4); int *d_gtle; cudaMalloc((void**)&d_gtle, 4); int *d_gscore; cudaMalloc((void**)&d_gscore, 4); int *d_max_off; cudaMalloc((void**)&d_max_off, 4); int *d_score; cudaMalloc((void**)&d_score, 4); kernel_extend2<<<1,1>>>( d_query, d_target, d_mat, d_eh, d_qp, d_qle, d_tle, d_gtle, d_gscore, d_max_off, d_score, qlen, tlen, m, o_del, e_del, o_ins, e_ins, w, end_bonus, zdrop, h0); cudaMemcpy(&qle, d_qle, 4, cudaMemcpyDeviceToHost); cudaMemcpy(&tle, d_tle, 4, cudaMemcpyDeviceToHost); cudaMemcpy(&gtle, d_gtle, 4, cudaMemcpyDeviceToHost); cudaMemcpy(&max_off, d_max_off, 4, cudaMemcpyDeviceToHost); cudaMemcpy(&gscore, d_gscore, 4, cudaMemcpyDeviceToHost); cudaMemcpy(&score, d_score, 4, cudaMemcpyDeviceToHost); cudaFree(d_query); cudaFree(d_target); cudaFree(d_mat); cudaFree(d_eh); cudaFree(d_qp); cudaFree(d_qle); cudaFree(d_tle); cudaFree(d_gtle); cudaFree(d_gscore); cudaFree(d_max_off); cudaFree(d_score); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); check(d->qle, qle, "qle"); check(d->tle, tle, "tle"); check(d->gtle, gtle, "gtle"); check(d->gscore, gscore, "gscore"); check(d->max_off, max_off, "max_off"); check(d->score, score, "score"); free(eh); free(qp); #ifdef VERBOSE printf("device: qle=%d, tle=%d, gtle=%d, gscore=%d, max_off=%d, score=%d\n", qle, tle, gtle, gscore, max_off, score); #endif return time; } int main(int argc, char *argv[]) { if (argc != 2) { printf("Usage: %s <repeat>\n", argv[0]); return 1; } int repeat = atoi(argv[1]); struct extend2_dat d; // Instead of iterating over a directory, list the file names (17 in total) const char* files[] = { #include "filelist.txt" }; float time = 0.f; for (int f = 0; f < repeat; f++) { read_data(files[f%17], &d); time += extend2(&d); } printf("Average offload time %f (us)\n", (time * 1e-3f) / repeat); return 0; }
cf1a3fe23fb2c1a6ea92d795c8ecfad4789d5b82.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <primitiv/config.h> #include <primitiv/devices/cuda16/device.h> #include <primitiv/devices/cuda16/ops/common.h> #include <primitiv/internal/cuda/utils.h> namespace { __global__ void set_identity_dev( std::uint32_t size, std::uint32_t skip, half *py) { const std::uint32_t i = IDX; if (i < size) py[i] = ::__float2half(!(i % skip)); } } // namespace namespace primitiv { namespace devices { void CUDA16::identity_impl(Tensor &y) { const std::uint32_t size = y.shape().size(); const std::uint32_t skip = y.shape()[0] + 1; const std::uint32_t num_blocks = GRID_SIZE(size, dim1_x_); CUDA_CALL(::hipSetDevice(dev_id_)); hipLaunchKernelGGL(( ::set_identity_dev), dim3(num_blocks), dim3(dim1_x_), 0, 0, size, skip, MDATA(half, y)); } } // namespace devices } // namespace primitiv
cf1a3fe23fb2c1a6ea92d795c8ecfad4789d5b82.cu
#include <primitiv/config.h> #include <primitiv/devices/cuda16/device.h> #include <primitiv/devices/cuda16/ops/common.h> #include <primitiv/internal/cuda/utils.h> namespace { __global__ void set_identity_dev( std::uint32_t size, std::uint32_t skip, half *py) { const std::uint32_t i = IDX; if (i < size) py[i] = ::__float2half(!(i % skip)); } } // namespace namespace primitiv { namespace devices { void CUDA16::identity_impl(Tensor &y) { const std::uint32_t size = y.shape().size(); const std::uint32_t skip = y.shape()[0] + 1; const std::uint32_t num_blocks = GRID_SIZE(size, dim1_x_); CUDA_CALL(::cudaSetDevice(dev_id_)); ::set_identity_dev<<<num_blocks, dim1_x_>>>(size, skip, MDATA(half, y)); } } // namespace devices } // namespace primitiv
817c4f03a2f4cc1c4e512ffa54407547808edf4a.hip
// !!! This is a file automatically generated by hipify!!! #include <array> #include "gtest/gtest.h" #include "executor/cuda_executor.h" #include "soa/soa.h" using ikra::soa::IndexType; using ikra::soa::SoaLayout; using ikra::executor::cuda::construct; const static int kTestSize = 12; class Vertex : public SoaLayout<Vertex, 1000> { public: IKRA_INITIALIZE_CLASS __host__ __device__ Vertex(int f0, int f1) : field0(f0), field1(f1) {} int_ field0; int_ field1; __device__ void add_fields(int increment) { field0 = field0 + field1 + increment + this->id(); } }; IKRA_DEVICE_STORAGE(Vertex); // Cannot run "cuda_execute" inside gtest case. void run_test_construct_and_execute() { Vertex::initialize_storage(); EXPECT_EQ(Vertex::size(), 0UL); Vertex* first = construct<Vertex>(kTestSize, 5, 6); gpuErrchk(hipPeekAtLastError()); cuda_execute(&Vertex::add_fields, first, 12, 10); // Check result. for (int i = 0; i < kTestSize; ++i) { int actual = Vertex::get(i)->field0; int expected = 10 + 5 + 6 + i; EXPECT_EQ(actual, expected); } // Copy size to host memory and compare. EXPECT_EQ(Vertex::size(), static_cast<IndexType>(kTestSize)); // Make sure that we had no CUDA failures. gpuErrchk(hipPeekAtLastError()); } void run_test_host_side_assignment() { Vertex::initialize_storage(); EXPECT_EQ(Vertex::size(), 0UL); Vertex* first = construct<Vertex>(kTestSize, 5, 6); cuda_execute(&Vertex::add_fields, first, kTestSize, 10); for (int i = 0; i < kTestSize; ++i) { Vertex::get(i)->field0 = Vertex::get(i)->field0*Vertex::get(i)->field0; } // Check result. for (int i = 0; i < kTestSize; ++i) { int actual = Vertex::get(i)->field0; int expected = (10 + 5 + 6 + i)*(10 + 5 + 6 + i); EXPECT_EQ(actual, expected); } // Copy size to host memory and compare. EXPECT_EQ(Vertex::size(), static_cast<IndexType>(kTestSize)); // Make sure that we had no CUDA failures. gpuErrchk(hipPeekAtLastError()); } void run_test_host_side_new() { Vertex::initialize_storage(); EXPECT_EQ(Vertex::size(), 0UL); std::array<Vertex*, kTestSize> vertices; for (int i = 0; i < kTestSize; ++i) { vertices[i] = new Vertex(i + 1, i * i); EXPECT_EQ(vertices[i]->id(), static_cast<IndexType>(i)); } cuda_execute(&Vertex::add_fields, vertices[0], kTestSize, 10); // Check result. for (int i = 0; i < kTestSize; ++i) { int actual = Vertex::get(i)->field0; int expected = 10 + i + (i + 1) + (i*i); EXPECT_EQ(actual, expected); actual = vertices[i]->field1; expected = i*i; EXPECT_EQ(actual, expected); } // Copy size to host memory and compare. EXPECT_EQ(Vertex::size(), static_cast<IndexType>(kTestSize)); // Make sure that we had no CUDA failures. gpuErrchk(hipPeekAtLastError()); } void run_test_placement_new() { Vertex::initialize_storage(); EXPECT_EQ(Vertex::size(), 0UL); Vertex* v1 = new(Vertex::get_uninitialized(4)) Vertex(10, 20); EXPECT_EQ(Vertex::size(), 0UL); EXPECT_EQ(v1->field0, 10); EXPECT_EQ(v1->field1, 20); EXPECT_EQ(v1->id(), 4); // Make sure that we had no CUDA failures. gpuErrchk(hipPeekAtLastError()); } TEST(MinimumCudaTest, ConstructAndExecute) { run_test_construct_and_execute(); } TEST(MinimumCudaTest, HostSideAssignment) { run_test_host_side_assignment(); } TEST(MinimumCudaTest, HostSideNew) { run_test_host_side_new(); } TEST(MinimumCudaTest, PlacementNew) { run_test_placement_new(); }
817c4f03a2f4cc1c4e512ffa54407547808edf4a.cu
#include <array> #include "gtest/gtest.h" #include "executor/cuda_executor.h" #include "soa/soa.h" using ikra::soa::IndexType; using ikra::soa::SoaLayout; using ikra::executor::cuda::construct; const static int kTestSize = 12; class Vertex : public SoaLayout<Vertex, 1000> { public: IKRA_INITIALIZE_CLASS __host__ __device__ Vertex(int f0, int f1) : field0(f0), field1(f1) {} int_ field0; int_ field1; __device__ void add_fields(int increment) { field0 = field0 + field1 + increment + this->id(); } }; IKRA_DEVICE_STORAGE(Vertex); // Cannot run "cuda_execute" inside gtest case. void run_test_construct_and_execute() { Vertex::initialize_storage(); EXPECT_EQ(Vertex::size(), 0UL); Vertex* first = construct<Vertex>(kTestSize, 5, 6); gpuErrchk(cudaPeekAtLastError()); cuda_execute(&Vertex::add_fields, first, 12, 10); // Check result. for (int i = 0; i < kTestSize; ++i) { int actual = Vertex::get(i)->field0; int expected = 10 + 5 + 6 + i; EXPECT_EQ(actual, expected); } // Copy size to host memory and compare. EXPECT_EQ(Vertex::size(), static_cast<IndexType>(kTestSize)); // Make sure that we had no CUDA failures. gpuErrchk(cudaPeekAtLastError()); } void run_test_host_side_assignment() { Vertex::initialize_storage(); EXPECT_EQ(Vertex::size(), 0UL); Vertex* first = construct<Vertex>(kTestSize, 5, 6); cuda_execute(&Vertex::add_fields, first, kTestSize, 10); for (int i = 0; i < kTestSize; ++i) { Vertex::get(i)->field0 = Vertex::get(i)->field0*Vertex::get(i)->field0; } // Check result. for (int i = 0; i < kTestSize; ++i) { int actual = Vertex::get(i)->field0; int expected = (10 + 5 + 6 + i)*(10 + 5 + 6 + i); EXPECT_EQ(actual, expected); } // Copy size to host memory and compare. EXPECT_EQ(Vertex::size(), static_cast<IndexType>(kTestSize)); // Make sure that we had no CUDA failures. gpuErrchk(cudaPeekAtLastError()); } void run_test_host_side_new() { Vertex::initialize_storage(); EXPECT_EQ(Vertex::size(), 0UL); std::array<Vertex*, kTestSize> vertices; for (int i = 0; i < kTestSize; ++i) { vertices[i] = new Vertex(i + 1, i * i); EXPECT_EQ(vertices[i]->id(), static_cast<IndexType>(i)); } cuda_execute(&Vertex::add_fields, vertices[0], kTestSize, 10); // Check result. for (int i = 0; i < kTestSize; ++i) { int actual = Vertex::get(i)->field0; int expected = 10 + i + (i + 1) + (i*i); EXPECT_EQ(actual, expected); actual = vertices[i]->field1; expected = i*i; EXPECT_EQ(actual, expected); } // Copy size to host memory and compare. EXPECT_EQ(Vertex::size(), static_cast<IndexType>(kTestSize)); // Make sure that we had no CUDA failures. gpuErrchk(cudaPeekAtLastError()); } void run_test_placement_new() { Vertex::initialize_storage(); EXPECT_EQ(Vertex::size(), 0UL); Vertex* v1 = new(Vertex::get_uninitialized(4)) Vertex(10, 20); EXPECT_EQ(Vertex::size(), 0UL); EXPECT_EQ(v1->field0, 10); EXPECT_EQ(v1->field1, 20); EXPECT_EQ(v1->id(), 4); // Make sure that we had no CUDA failures. gpuErrchk(cudaPeekAtLastError()); } TEST(MinimumCudaTest, ConstructAndExecute) { run_test_construct_and_execute(); } TEST(MinimumCudaTest, HostSideAssignment) { run_test_host_side_assignment(); } TEST(MinimumCudaTest, HostSideNew) { run_test_host_side_new(); } TEST(MinimumCudaTest, PlacementNew) { run_test_placement_new(); }
0b69fc68eb293bfd3e85bda427af4b8673cd39e0.hip
// !!! This is a file automatically generated by hipify!!! #if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2) // generated by gen_cutlass_matrix_mul_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #pragma GCC diagnostic ignored "-Wuninitialized" #pragma GCC diagnostic ignored "-Wmaybe-uninitialized" #include "src/cuda/matrix_mul/fp32_simt/matrix_mul_float_simt_cutlass_wrapper.cuinl" using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::RowMajor; using ThreadBlockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using EpilogueOp = cutlass::epilogue::thread::LinearCombination<float, 1, float, float>; using Gemm = cutlass::gemm::device::GemmSplitKParallel< float, LayoutA, float, LayoutB, float, cutlass::layout::RowMajor, float, cutlass::arch::OpClassSimt, cutlass::arch::Sm50, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp>; template void megdnn::cuda::cutlass_wrapper::cutlass_matrix_mul_wrapper<Gemm>( const typename Gemm::ElementA* d_A, size_t lda, const typename Gemm::ElementB* d_B, size_t ldb, typename Gemm::ElementC* d_C, size_t ldc, int* workspace, cutlass::gemm::GemmCoord const& problem_size, typename Gemm::EpilogueOutputOp::Params const& epilogue, hipStream_t stream, int split_k_slices); #pragma GCC diagnostic pop #endif
0b69fc68eb293bfd3e85bda427af4b8673cd39e0.cu
#if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2) // generated by gen_cutlass_matrix_mul_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #pragma GCC diagnostic ignored "-Wuninitialized" #pragma GCC diagnostic ignored "-Wmaybe-uninitialized" #include "src/cuda/matrix_mul/fp32_simt/matrix_mul_float_simt_cutlass_wrapper.cuinl" using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::RowMajor; using ThreadBlockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using EpilogueOp = cutlass::epilogue::thread::LinearCombination<float, 1, float, float>; using Gemm = cutlass::gemm::device::GemmSplitKParallel< float, LayoutA, float, LayoutB, float, cutlass::layout::RowMajor, float, cutlass::arch::OpClassSimt, cutlass::arch::Sm50, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp>; template void megdnn::cuda::cutlass_wrapper::cutlass_matrix_mul_wrapper<Gemm>( const typename Gemm::ElementA* d_A, size_t lda, const typename Gemm::ElementB* d_B, size_t ldb, typename Gemm::ElementC* d_C, size_t ldc, int* workspace, cutlass::gemm::GemmCoord const& problem_size, typename Gemm::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream, int split_k_slices); #pragma GCC diagnostic pop #endif
6ccac7a4abe557e592507f6d22486c24de2b6f71.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @generated from sparse/blas/zgecsrmv.cu, normal z -> d, Wed Jan 2 14:18:53 2019 */ #include "magmasparse_internal.h" #define BLOCK_SIZE 256 // CSR-SpMV kernel __global__ void dgecsrmv_kernel( int num_rows, int num_cols, double alpha, double * dval, magma_index_t * drowptr, magma_index_t * dcolind, double * dx, double beta, double * dy) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j; if(row<num_rows){ double dot = MAGMA_D_ZERO; int start = drowptr[ row ]; int end = drowptr[ row+1 ]; for( j=start; j<end; j++) dot += dval[ j ] * dx[ dcolind[j] ]; dy[ row ] = dot *alpha + beta * dy[ row ]; } } // shifted CSR-SpMV kernel __global__ void dgecsrmv_kernel_shift( int num_rows, int num_cols, double alpha, double lambda, double * dval, magma_index_t * drowptr, magma_index_t * dcolind, double * dx, double beta, int offset, int blocksize, magma_index_t * addrows, double * dy) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j; if(row<num_rows){ double dot = MAGMA_D_ZERO; int start = drowptr[ row ]; int end = drowptr[ row+1 ]; for( j=start; j<end; j++) dot += dval[ j ] * dx[ dcolind[j] ]; if( row<blocksize ) dy[ row ] = dot * alpha - lambda * dx[ offset+row ] + beta * dy [ row ]; else dy[ row ] = dot * alpha - lambda * dx[ addrows[row-blocksize] ] + beta * dy [ row ]; } } /** Purpose ------- This routine computes y = alpha * A * x + beta * y on the GPU. The input format is CSR (val, row, col). Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] alpha double scalar multiplier @param[in] dval magmaDouble_ptr array containing values of A in CSR @param[in] drowptr magmaIndex_ptr rowpointer of A in CSR @param[in] dcolind magmaIndex_ptr columnindices of A in CSR @param[in] dx magmaDouble_ptr input vector x @param[in] beta double scalar multiplier @param[out] dy magmaDouble_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dblas ********************************************************************/ extern "C" magma_int_t magma_dgecsrmv( magma_trans_t transA, magma_int_t m, magma_int_t n, double alpha, magmaDouble_ptr dval, magmaIndex_ptr drowptr, magmaIndex_ptr dcolind, magmaDouble_ptr dx, double beta, magmaDouble_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; hipLaunchKernelGGL(( dgecsrmv_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, alpha, dval, drowptr, dcolind, dx, beta, dy); return MAGMA_SUCCESS; } /** Purpose ------- This routine computes y = alpha * ( A -lambda I ) * x + beta * y on the GPU. It is a shifted version of the CSR-SpMV. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] alpha double scalar multiplier @param[in] lambda double scalar multiplier @param[in] dval magmaDouble_ptr array containing values of A in CSR @param[in] drowptr magmaIndex_ptr rowpointer of A in CSR @param[in] dcolind magmaIndex_ptr columnindices of A in CSR @param[in] dx magmaDouble_ptr input vector x @param[in] beta double scalar multiplier @param[in] offset magma_int_t in case not the main diagonal is scaled @param[in] blocksize magma_int_t in case of processing multiple vectors @param[in] addrows magmaIndex_ptr in case the matrixpowerskernel is used @param[out] dy magmaDouble_ptr output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dblas ********************************************************************/ extern "C" magma_int_t magma_dgecsrmv_shift( magma_trans_t transA, magma_int_t m, magma_int_t n, double alpha, double lambda, magmaDouble_ptr dval, magmaIndex_ptr drowptr, magmaIndex_ptr dcolind, magmaDouble_ptr dx, double beta, magma_int_t offset, magma_int_t blocksize, magma_index_t * addrows, magmaDouble_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; hipLaunchKernelGGL(( dgecsrmv_kernel_shift), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, alpha, lambda, dval, drowptr, dcolind, dx, beta, offset, blocksize, addrows, dy); return MAGMA_SUCCESS; }
6ccac7a4abe557e592507f6d22486c24de2b6f71.cu
/* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @generated from sparse/blas/zgecsrmv.cu, normal z -> d, Wed Jan 2 14:18:53 2019 */ #include "magmasparse_internal.h" #define BLOCK_SIZE 256 // CSR-SpMV kernel __global__ void dgecsrmv_kernel( int num_rows, int num_cols, double alpha, double * dval, magma_index_t * drowptr, magma_index_t * dcolind, double * dx, double beta, double * dy) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j; if(row<num_rows){ double dot = MAGMA_D_ZERO; int start = drowptr[ row ]; int end = drowptr[ row+1 ]; for( j=start; j<end; j++) dot += dval[ j ] * dx[ dcolind[j] ]; dy[ row ] = dot *alpha + beta * dy[ row ]; } } // shifted CSR-SpMV kernel __global__ void dgecsrmv_kernel_shift( int num_rows, int num_cols, double alpha, double lambda, double * dval, magma_index_t * drowptr, magma_index_t * dcolind, double * dx, double beta, int offset, int blocksize, magma_index_t * addrows, double * dy) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j; if(row<num_rows){ double dot = MAGMA_D_ZERO; int start = drowptr[ row ]; int end = drowptr[ row+1 ]; for( j=start; j<end; j++) dot += dval[ j ] * dx[ dcolind[j] ]; if( row<blocksize ) dy[ row ] = dot * alpha - lambda * dx[ offset+row ] + beta * dy [ row ]; else dy[ row ] = dot * alpha - lambda * dx[ addrows[row-blocksize] ] + beta * dy [ row ]; } } /** Purpose ------- This routine computes y = alpha * A * x + beta * y on the GPU. The input format is CSR (val, row, col). Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] alpha double scalar multiplier @param[in] dval magmaDouble_ptr array containing values of A in CSR @param[in] drowptr magmaIndex_ptr rowpointer of A in CSR @param[in] dcolind magmaIndex_ptr columnindices of A in CSR @param[in] dx magmaDouble_ptr input vector x @param[in] beta double scalar multiplier @param[out] dy magmaDouble_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dblas ********************************************************************/ extern "C" magma_int_t magma_dgecsrmv( magma_trans_t transA, magma_int_t m, magma_int_t n, double alpha, magmaDouble_ptr dval, magmaIndex_ptr drowptr, magmaIndex_ptr dcolind, magmaDouble_ptr dx, double beta, magmaDouble_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; dgecsrmv_kernel<<< grid, threads, 0, queue->cuda_stream() >>> (m, n, alpha, dval, drowptr, dcolind, dx, beta, dy); return MAGMA_SUCCESS; } /** Purpose ------- This routine computes y = alpha * ( A -lambda I ) * x + beta * y on the GPU. It is a shifted version of the CSR-SpMV. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] alpha double scalar multiplier @param[in] lambda double scalar multiplier @param[in] dval magmaDouble_ptr array containing values of A in CSR @param[in] drowptr magmaIndex_ptr rowpointer of A in CSR @param[in] dcolind magmaIndex_ptr columnindices of A in CSR @param[in] dx magmaDouble_ptr input vector x @param[in] beta double scalar multiplier @param[in] offset magma_int_t in case not the main diagonal is scaled @param[in] blocksize magma_int_t in case of processing multiple vectors @param[in] addrows magmaIndex_ptr in case the matrixpowerskernel is used @param[out] dy magmaDouble_ptr output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dblas ********************************************************************/ extern "C" magma_int_t magma_dgecsrmv_shift( magma_trans_t transA, magma_int_t m, magma_int_t n, double alpha, double lambda, magmaDouble_ptr dval, magmaIndex_ptr drowptr, magmaIndex_ptr dcolind, magmaDouble_ptr dx, double beta, magma_int_t offset, magma_int_t blocksize, magma_index_t * addrows, magmaDouble_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; dgecsrmv_kernel_shift<<< grid, threads, 0, queue->cuda_stream() >>> (m, n, alpha, lambda, dval, drowptr, dcolind, dx, beta, offset, blocksize, addrows, dy); return MAGMA_SUCCESS; }
385fbc53c2011d846f4f2cd83791cb15d5837835.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void bitonic_sort_step(int *dev_values, int j, int k) { unsigned int i, ixj; /* Sorting partners: i and ixj */ i = threadIdx.x + blockDim.x * blockIdx.x; ixj = i^j; /* The threads with the lowest ids sort the array. */ if ((ixj)>i) { if ((i&k)==0) { /* Sort ascending */ if (dev_values[i]>dev_values[ixj]) { // swap int temp = dev_values[i]; dev_values[i] = dev_values[ixj]; dev_values[ixj] = temp; } } if ((i&k)!=0) { /* Sort descending */ if (dev_values[i]<dev_values[ixj]) { // swap int temp = dev_values[i]; dev_values[i] = dev_values[ixj]; dev_values[ixj] = temp; } } } }
385fbc53c2011d846f4f2cd83791cb15d5837835.cu
#include "includes.h" __global__ void bitonic_sort_step(int *dev_values, int j, int k) { unsigned int i, ixj; /* Sorting partners: i and ixj */ i = threadIdx.x + blockDim.x * blockIdx.x; ixj = i^j; /* The threads with the lowest ids sort the array. */ if ((ixj)>i) { if ((i&k)==0) { /* Sort ascending */ if (dev_values[i]>dev_values[ixj]) { // swap int temp = dev_values[i]; dev_values[i] = dev_values[ixj]; dev_values[ixj] = temp; } } if ((i&k)!=0) { /* Sort descending */ if (dev_values[i]<dev_values[ixj]) { // swap int temp = dev_values[i]; dev_values[i] = dev_values[ixj]; dev_values[ixj] = temp; } } } }
8abcda995afe5faacac89abed46a9f8e8bea5ec3.hip
// !!! This is a file automatically generated by hipify!!! //pass //--blockDim=1024 --gridDim=1 --no-inline #include <hip/hip_runtime.h> #include <stdio.h> #define N 2 //1024 __global__ void definitions (int* A, unsigned int* B) { atomicSub(A,10); atomicSub(B,5); }
8abcda995afe5faacac89abed46a9f8e8bea5ec3.cu
//pass //--blockDim=1024 --gridDim=1 --no-inline #include <cuda.h> #include <stdio.h> #define N 2 //1024 __global__ void definitions (int* A, unsigned int* B) { atomicSub(A,10); atomicSub(B,5); }
e21bdf6051e5f104e3b1ad5c1ecb3b9ad4baee6e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //======================================================================= // Copyright (c) 2017 Baptiste Wicht // Distributed under the terms of the MIT License. // (See accompanying file LICENSE or copy at // http://opensource.org/licenses/MIT) //======================================================================= #include "egblas/floor.hpp" #include "complex.hpp" template <typename T> __global__ void floor_kernel(size_t n, T alpha, const T* x, size_t incx, T* y, size_t incy) { auto index = threadIdx.x + blockIdx.x * blockDim.x; auto stride = blockDim.x * gridDim.x; for (; index < n; index += stride) { y[incy * index] = alpha * floor(x[incx * index]); } } template <typename T> __global__ void floor_kernel1(size_t n, const T* x, size_t incx, T* y, size_t incy) { auto index = threadIdx.x + blockIdx.x * blockDim.x; auto stride = blockDim.x * gridDim.x; for (; index < n; index += stride) { y[incy * index] = floor(x[incx * index]); } } template <typename T> __global__ void floor_kernel0(size_t n, T* y, size_t incy) { auto index = threadIdx.x + blockIdx.x * blockDim.x; auto stride = blockDim.x * gridDim.x; for (; index < n; index += stride) { y[incy * index] = zero<T>(); } } template <typename T> void floor_kernel_run(size_t n, T alpha, const T* x, size_t incx, T* y, size_t incy) { int blockSize; int minGridSize; hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, floor_kernel<T>, 0, 0); int gridSize = ((n / incy) + blockSize - 1) / blockSize; hipLaunchKernelGGL(( floor_kernel<T>), dim3(gridSize), dim3(blockSize), 0, 0, n, alpha, x, incx, y, incy); #ifdef EGBLAS_SYNCHRONIZE hipDeviceSynchronize(); #endif } template <typename T> void floor_kernel1_run(size_t n, const T* x, size_t incx, T* y, size_t incy) { int blockSize; int minGridSize; hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, floor_kernel1<T>, 0, 0); int gridSize = ((n / incy) + blockSize - 1) / blockSize; hipLaunchKernelGGL(( floor_kernel1<T>), dim3(gridSize), dim3(blockSize), 0, 0, n, x, incx, y, incy); #ifdef EGBLAS_SYNCHRONIZE hipDeviceSynchronize(); #endif } template <typename T> void floor_kernel0_run(size_t n, T* y, size_t incy) { int blockSize; int minGridSize; hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, floor_kernel0<T>, 0, 0); int gridSize = ((n / incy) + blockSize - 1) / blockSize; hipLaunchKernelGGL(( floor_kernel0<T>), dim3(gridSize), dim3(blockSize), 0, 0, n, y, incy); #ifdef EGBLAS_SYNCHRONIZE hipDeviceSynchronize(); #endif } void egblas_sfloor(size_t n, float alpha, const float* x, size_t incx, float* y, size_t incy) { if (alpha == 1.0f) { floor_kernel1_run(n, x, incx, y, incy); } else if (alpha == 0.0f) { floor_kernel0_run(n, y, incy); } else { floor_kernel_run(n, alpha, x, incx, y, incy); } } void egblas_dfloor(size_t n, double alpha, const double* x, size_t incx, double* y, size_t incy) { if (alpha == 1.0) { floor_kernel1_run(n, x, incx, y, incy); } else if (alpha == 0.0) { floor_kernel0_run(n, y, incy); } else { floor_kernel_run(n, alpha, x, incx, y, incy); } } void egblas_cfloor(size_t n, hipComplex alpha, const hipComplex* x, size_t incx, hipComplex* y, size_t incy) { if (alpha.x == 1.0f && alpha.y == 0.0f) { floor_kernel1_run(n, x, incx, y, incy); } else if (alpha.x == 0.0f && alpha.y == 0.0f) { floor_kernel0_run(n, y, incy); } else { floor_kernel_run(n, alpha, x, incx, y, incy); } } void egblas_zfloor(size_t n, hipDoubleComplex alpha, const hipDoubleComplex* x, size_t incx, hipDoubleComplex* y, size_t incy) { if (alpha.x == 1.0 && alpha.y == 0.0) { floor_kernel1_run(n, x, incx, y, incy); } else if (alpha.x == 0.0 && alpha.y == 0.0) { floor_kernel0_run(n, y, incy); } else { floor_kernel_run(n, alpha, x, incx, y, incy); } }
e21bdf6051e5f104e3b1ad5c1ecb3b9ad4baee6e.cu
//======================================================================= // Copyright (c) 2017 Baptiste Wicht // Distributed under the terms of the MIT License. // (See accompanying file LICENSE or copy at // http://opensource.org/licenses/MIT) //======================================================================= #include "egblas/floor.hpp" #include "complex.hpp" template <typename T> __global__ void floor_kernel(size_t n, T alpha, const T* x, size_t incx, T* y, size_t incy) { auto index = threadIdx.x + blockIdx.x * blockDim.x; auto stride = blockDim.x * gridDim.x; for (; index < n; index += stride) { y[incy * index] = alpha * floor(x[incx * index]); } } template <typename T> __global__ void floor_kernel1(size_t n, const T* x, size_t incx, T* y, size_t incy) { auto index = threadIdx.x + blockIdx.x * blockDim.x; auto stride = blockDim.x * gridDim.x; for (; index < n; index += stride) { y[incy * index] = floor(x[incx * index]); } } template <typename T> __global__ void floor_kernel0(size_t n, T* y, size_t incy) { auto index = threadIdx.x + blockIdx.x * blockDim.x; auto stride = blockDim.x * gridDim.x; for (; index < n; index += stride) { y[incy * index] = zero<T>(); } } template <typename T> void floor_kernel_run(size_t n, T alpha, const T* x, size_t incx, T* y, size_t incy) { int blockSize; int minGridSize; cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, floor_kernel<T>, 0, 0); int gridSize = ((n / incy) + blockSize - 1) / blockSize; floor_kernel<T><<<gridSize, blockSize>>>(n, alpha, x, incx, y, incy); #ifdef EGBLAS_SYNCHRONIZE cudaDeviceSynchronize(); #endif } template <typename T> void floor_kernel1_run(size_t n, const T* x, size_t incx, T* y, size_t incy) { int blockSize; int minGridSize; cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, floor_kernel1<T>, 0, 0); int gridSize = ((n / incy) + blockSize - 1) / blockSize; floor_kernel1<T><<<gridSize, blockSize>>>(n, x, incx, y, incy); #ifdef EGBLAS_SYNCHRONIZE cudaDeviceSynchronize(); #endif } template <typename T> void floor_kernel0_run(size_t n, T* y, size_t incy) { int blockSize; int minGridSize; cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, floor_kernel0<T>, 0, 0); int gridSize = ((n / incy) + blockSize - 1) / blockSize; floor_kernel0<T><<<gridSize, blockSize>>>(n, y, incy); #ifdef EGBLAS_SYNCHRONIZE cudaDeviceSynchronize(); #endif } void egblas_sfloor(size_t n, float alpha, const float* x, size_t incx, float* y, size_t incy) { if (alpha == 1.0f) { floor_kernel1_run(n, x, incx, y, incy); } else if (alpha == 0.0f) { floor_kernel0_run(n, y, incy); } else { floor_kernel_run(n, alpha, x, incx, y, incy); } } void egblas_dfloor(size_t n, double alpha, const double* x, size_t incx, double* y, size_t incy) { if (alpha == 1.0) { floor_kernel1_run(n, x, incx, y, incy); } else if (alpha == 0.0) { floor_kernel0_run(n, y, incy); } else { floor_kernel_run(n, alpha, x, incx, y, incy); } } void egblas_cfloor(size_t n, cuComplex alpha, const cuComplex* x, size_t incx, cuComplex* y, size_t incy) { if (alpha.x == 1.0f && alpha.y == 0.0f) { floor_kernel1_run(n, x, incx, y, incy); } else if (alpha.x == 0.0f && alpha.y == 0.0f) { floor_kernel0_run(n, y, incy); } else { floor_kernel_run(n, alpha, x, incx, y, incy); } } void egblas_zfloor(size_t n, cuDoubleComplex alpha, const cuDoubleComplex* x, size_t incx, cuDoubleComplex* y, size_t incy) { if (alpha.x == 1.0 && alpha.y == 0.0) { floor_kernel1_run(n, x, incx, y, incy); } else if (alpha.x == 0.0 && alpha.y == 0.0) { floor_kernel0_run(n, y, incy); } else { floor_kernel_run(n, alpha, x, incx, y, incy); } }
7a02d88dcff2cb158a555db3ee7c5cbbc40e2169.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void reduceUnrollWarp8(int *g_idata, int *g_odata, unsigned int n) { // set the thread id. unsigned int tid = threadIdx.x; unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x * 8; // convert global data pointer to the local pointer of this block. int *idata = g_idata + blockIdx.x * blockDim.x * 8; // unrolling 8 data blocks. if (idx + blockDim.x * 7 < n) { int a1 = g_idata[idx]; int a2 = g_idata[idx + blockDim.x]; int a3 = g_idata[idx + blockDim.x * 2]; int a4 = g_idata[idx + blockDim.x * 3]; int b1 = g_idata[idx + blockDim.x * 4]; int b2 = g_idata[idx + blockDim.x * 5]; int b3 = g_idata[idx + blockDim.x * 6]; int b4 = g_idata[idx + blockDim.x * 7]; g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4; } __syncthreads(); // in-place reduction in global memory for (int stride = blockDim.x/2; stride>32; stride>>=1) { if (tid < stride) { idata[tid] += idata[tid + stride]; } // synchronize within threadblock. __syncthreads(); } // unrolling warp if (tid < 32) { volatile int *vmem = idata; vmem[tid] += vmem[tid + 32]; vmem[tid] += vmem[tid + 16]; vmem[tid] += vmem[tid + 8]; vmem[tid] += vmem[tid + 4]; vmem[tid] += vmem[tid + 2]; vmem[tid] += vmem[tid + 1]; } // write result for this block to global mem. if (tid == 0) { g_odata[blockIdx.x] = idata[0]; } }
7a02d88dcff2cb158a555db3ee7c5cbbc40e2169.cu
#include "includes.h" __global__ void reduceUnrollWarp8(int *g_idata, int *g_odata, unsigned int n) { // set the thread id. unsigned int tid = threadIdx.x; unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x * 8; // convert global data pointer to the local pointer of this block. int *idata = g_idata + blockIdx.x * blockDim.x * 8; // unrolling 8 data blocks. if (idx + blockDim.x * 7 < n) { int a1 = g_idata[idx]; int a2 = g_idata[idx + blockDim.x]; int a3 = g_idata[idx + blockDim.x * 2]; int a4 = g_idata[idx + blockDim.x * 3]; int b1 = g_idata[idx + blockDim.x * 4]; int b2 = g_idata[idx + blockDim.x * 5]; int b3 = g_idata[idx + blockDim.x * 6]; int b4 = g_idata[idx + blockDim.x * 7]; g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4; } __syncthreads(); // in-place reduction in global memory for (int stride = blockDim.x/2; stride>32; stride>>=1) { if (tid < stride) { idata[tid] += idata[tid + stride]; } // synchronize within threadblock. __syncthreads(); } // unrolling warp if (tid < 32) { volatile int *vmem = idata; vmem[tid] += vmem[tid + 32]; vmem[tid] += vmem[tid + 16]; vmem[tid] += vmem[tid + 8]; vmem[tid] += vmem[tid + 4]; vmem[tid] += vmem[tid + 2]; vmem[tid] += vmem[tid + 1]; } // write result for this block to global mem. if (tid == 0) { g_odata[blockIdx.x] = idata[0]; } }
4003e328abaaf00f43f54bc2b278b7641680b186.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <fstream> #include <iostream> #define N 10 using namespace std; __global__ void addKernel(int *a, int *b, int *c) { int i = blockIdx.x; //int i = threadIdx.x; c[i] = a[i] + b[i]; } void add(int *a, int *b, int *c) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; hipMalloc((void**)&dev_a, N * sizeof(int)); hipMalloc((void**)&dev_b, N * sizeof(int)); hipMalloc((void**)&dev_c, N * sizeof(int)); hipMemcpy(dev_a, a, N * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_b, b, N * sizeof(int), hipMemcpyHostToDevice); addKernel << <N, 1 >> > (dev_a, dev_b, dev_c); //addKernel << <1, N >> > (dev_a, dev_b, dev_c); hipDeviceSynchronize(); hipMemcpy(c, dev_c, N * sizeof(int), hipMemcpyDeviceToHost); } int main() { ofstream out; out.open("C:\\tasks_cuda\\task_1.txt", ios::out); int a[N], b[N], c[N]; for (int i = 0; i < N; i++) { a[i] = -i; b[i] = i * i; } add(a, b, c); if (out.is_open()) { for (int i = 0; i < N; i++) { out << a[i] << ' ' << b[i] << ' ' << c[i] <<'\n'; } } else cout << "error"; return 0; }
4003e328abaaf00f43f54bc2b278b7641680b186.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <fstream> #include <iostream> #define N 10 using namespace std; __global__ void addKernel(int *a, int *b, int *c) { int i = blockIdx.x; //int i = threadIdx.x; c[i] = a[i] + b[i]; } void add(int *a, int *b, int *c) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; cudaMalloc((void**)&dev_a, N * sizeof(int)); cudaMalloc((void**)&dev_b, N * sizeof(int)); cudaMalloc((void**)&dev_c, N * sizeof(int)); cudaMemcpy(dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice); addKernel << <N, 1 >> > (dev_a, dev_b, dev_c); //addKernel << <1, N >> > (dev_a, dev_b, dev_c); cudaDeviceSynchronize(); cudaMemcpy(c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost); } int main() { ofstream out; out.open("C:\\tasks_cuda\\task_1.txt", ios::out); int a[N], b[N], c[N]; for (int i = 0; i < N; i++) { a[i] = -i; b[i] = i * i; } add(a, b, c); if (out.is_open()) { for (int i = 0; i < N; i++) { out << a[i] << ' ' << b[i] << ' ' << c[i] <<'\n'; } } else cout << "error"; return 0; }
tstshadow.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <hip/hip_runtime.h> #include <helper_cuda.h> #include <helper_functions.h> static const int NUM_BLOCKS = 64; static const int NUM_THREADS = 1024; static const int NUM_LOOPS = 5; static const int TOTAL = NUM_BLOCKS * NUM_THREADS * NUM_LOOPS; __device__ unsigned int counter = 0; __global__ void tst_shadow(unsigned int* numbers) { for(int i = 0; i < NUM_LOOPS; ++ i) { unsigned int val = atomicAdd((unsigned int*)&counter, 1); if(val >= TOTAL) val = TOTAL; atomicAdd(&numbers[val], 1); } } int main(int argc, char* argv[]) { // Copy input data to device. unsigned int* numbers; checkCudaErrors(hipMalloc(&numbers, (1 + TOTAL) * sizeof(unsigned int))); checkCudaErrors(hipMemset(numbers, 0 ,(1 + TOTAL) * sizeof(unsigned int))); // Launch the kernel. hipLaunchKernelGGL(( tst_shadow), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, numbers); // Copy output data to host. checkCudaErrors(hipDeviceSynchronize()); unsigned int* lnumbers = (unsigned int*)malloc((1 + TOTAL) * sizeof(unsigned int)); checkCudaErrors(hipMemcpy(lnumbers, numbers, (1 + TOTAL) * sizeof(unsigned int), hipMemcpyDeviceToHost)); for (int i = 0; i < TOTAL; ++i) { if(lnumbers[i] != 1) { fprintf(stderr, "Error at %i\n", i); } } if(lnumbers[TOTAL] != 0) { fprintf(stderr, "Overflows: %i\n", lnumbers[TOTAL]); } hipDeviceReset(); return 0; }
tstshadow.cu
#include <iostream> #include <cuda_runtime.h> #include <helper_cuda.h> #include <helper_functions.h> static const int NUM_BLOCKS = 64; static const int NUM_THREADS = 1024; static const int NUM_LOOPS = 5; static const int TOTAL = NUM_BLOCKS * NUM_THREADS * NUM_LOOPS; __device__ unsigned int counter = 0; __global__ void tst_shadow(unsigned int* numbers) { for(int i = 0; i < NUM_LOOPS; ++ i) { unsigned int val = atomicAdd((unsigned int*)&counter, 1); if(val >= TOTAL) val = TOTAL; atomicAdd(&numbers[val], 1); } } int main(int argc, char* argv[]) { // Copy input data to device. unsigned int* numbers; checkCudaErrors(cudaMalloc(&numbers, (1 + TOTAL) * sizeof(unsigned int))); checkCudaErrors(cudaMemset(numbers, 0 ,(1 + TOTAL) * sizeof(unsigned int))); // Launch the kernel. tst_shadow<<<NUM_BLOCKS, NUM_THREADS>>>(numbers); // Copy output data to host. checkCudaErrors(cudaDeviceSynchronize()); unsigned int* lnumbers = (unsigned int*)malloc((1 + TOTAL) * sizeof(unsigned int)); checkCudaErrors(cudaMemcpy(lnumbers, numbers, (1 + TOTAL) * sizeof(unsigned int), cudaMemcpyDeviceToHost)); for (int i = 0; i < TOTAL; ++i) { if(lnumbers[i] != 1) { fprintf(stderr, "Error at %i\n", i); } } if(lnumbers[TOTAL] != 0) { fprintf(stderr, "Overflows: %i\n", lnumbers[TOTAL]); } cudaDeviceReset(); return 0; }
780eb75a58cca9d8c788c05f10c66166388e4be8.hip
// !!! This is a file automatically generated by hipify!!! #include "Operators.cuh" #include "Reduce_hip.cuh" #include "Scan.cuh" #include <ctime> #include <cstdlib> #include <iostream> using namespace std; #define TYPE int int main() { srand(time(0)); size_t n = 1 << 24; TYPE* arr = new TYPE[n]; for (size_t i = 0; i < n; ++i) { arr[i] = static_cast<TYPE>(20.0 * rand() / RAND_MAX - 10.0); } hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // hipEventRecord(start); // hipEventSynchronize(start); // TYPE reduce_cpu = ReduceCPU::reduce(arr, n, Operators::add<TYPE>()); // hipEventRecord(stop); // hipEventSynchronize(stop); // float cpu_time = 0.0f; // hipEventElapsedTime(&cpu_time, start, stop); // cout << "Reduce CPU: " << reduce_cpu << endl; // cout << "Execution time: " << cpu_time << " ms" << endl; // hipEventRecord(start); // hipEventSynchronize(start); // TYPE reduce_gpu = ReduceGPU::reduce(arr, n, Operators::add<TYPE>()); // hipEventRecord(stop); // hipEventSynchronize(stop); // float gpu_time = 0.0f; // hipEventElapsedTime(&gpu_time, start, stop); // cout << "Reduce GPU: " << reduce_gpu << endl; // cout << "Execution time: " << gpu_time << " ms" << endl; hipEventRecord(start); hipEventSynchronize(start); TYPE* exc_scan_cpu = ScanCPU::exclusive_scan(arr, n, Operators::add<TYPE>()); hipEventRecord(stop); hipEventSynchronize(stop); float cpu_time = 0.0f; hipEventElapsedTime(&cpu_time, start, stop); cout << "Exclusive Scan CPU: "; for (size_t i = 0; i < n; i += n >> 3) cout << exc_scan_cpu[i] << " "; cout << endl; cout << "Execution time: " << cpu_time << " ms" << endl; hipEventRecord(start); hipEventSynchronize(start); TYPE* exc_scan_gpu = ScanGPUEfficient::exclusive_scan(arr, n, Operators::add<TYPE>()); hipEventRecord(stop); hipEventSynchronize(stop); float gpu_time = 0.0f; hipEventElapsedTime(&gpu_time, start, stop); cout << "Exclusive Scan GPU: "; for (size_t i = 0; i < n; i += n >> 3) cout << exc_scan_gpu[i] << " "; cout << endl; cout << "Execution time: " << gpu_time << " ms" << endl; delete [] exc_scan_cpu; delete [] exc_scan_gpu; hipEventDestroy(start); hipEventDestroy(stop); delete [] arr; }
780eb75a58cca9d8c788c05f10c66166388e4be8.cu
#include "Operators.cuh" #include "Reduce.cuh" #include "Scan.cuh" #include <ctime> #include <cstdlib> #include <iostream> using namespace std; #define TYPE int int main() { srand(time(0)); size_t n = 1 << 24; TYPE* arr = new TYPE[n]; for (size_t i = 0; i < n; ++i) { arr[i] = static_cast<TYPE>(20.0 * rand() / RAND_MAX - 10.0); } cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // cudaEventRecord(start); // cudaEventSynchronize(start); // TYPE reduce_cpu = ReduceCPU::reduce(arr, n, Operators::add<TYPE>()); // cudaEventRecord(stop); // cudaEventSynchronize(stop); // float cpu_time = 0.0f; // cudaEventElapsedTime(&cpu_time, start, stop); // cout << "Reduce CPU: " << reduce_cpu << endl; // cout << "Execution time: " << cpu_time << " ms" << endl; // cudaEventRecord(start); // cudaEventSynchronize(start); // TYPE reduce_gpu = ReduceGPU::reduce(arr, n, Operators::add<TYPE>()); // cudaEventRecord(stop); // cudaEventSynchronize(stop); // float gpu_time = 0.0f; // cudaEventElapsedTime(&gpu_time, start, stop); // cout << "Reduce GPU: " << reduce_gpu << endl; // cout << "Execution time: " << gpu_time << " ms" << endl; cudaEventRecord(start); cudaEventSynchronize(start); TYPE* exc_scan_cpu = ScanCPU::exclusive_scan(arr, n, Operators::add<TYPE>()); cudaEventRecord(stop); cudaEventSynchronize(stop); float cpu_time = 0.0f; cudaEventElapsedTime(&cpu_time, start, stop); cout << "Exclusive Scan CPU: "; for (size_t i = 0; i < n; i += n >> 3) cout << exc_scan_cpu[i] << " "; cout << endl; cout << "Execution time: " << cpu_time << " ms" << endl; cudaEventRecord(start); cudaEventSynchronize(start); TYPE* exc_scan_gpu = ScanGPUEfficient::exclusive_scan(arr, n, Operators::add<TYPE>()); cudaEventRecord(stop); cudaEventSynchronize(stop); float gpu_time = 0.0f; cudaEventElapsedTime(&gpu_time, start, stop); cout << "Exclusive Scan GPU: "; for (size_t i = 0; i < n; i += n >> 3) cout << exc_scan_gpu[i] << " "; cout << endl; cout << "Execution time: " << gpu_time << " ms" << endl; delete [] exc_scan_cpu; delete [] exc_scan_gpu; cudaEventDestroy(start); cudaEventDestroy(stop); delete [] arr; }
30e549f431af344633abc1f31f02ade4493a1ae9.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/ATen.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <vector> namespace { template <typename scalar_t> __global__ void shift_cuda_forward_kernel( const scalar_t* __restrict__ input, const int32_t* __restrict__ shift, scalar_t* __restrict__ output, const int32_t B, const int32_t C, const int32_t W, const int32_t H) { const int32_t idx = blockIdx.x * blockDim.x + threadIdx.x; const int32_t size = B*C*W*H; const int32_t CWH = C*W*H; const int32_t WH = W*H; const int32_t b = idx / CWH; const int32_t c = (idx - b*CWH) / WH; const int32_t w = (idx - b*CWH - c*WH) / W; const int32_t h = idx - b*CWH - c*WH - w*H; const int32_t target_w = w + shift[2*c]; const int32_t target_h = h + shift[2*c + 1]; const int32_t target_idx = b*CWH + c*WH + target_w*W + target_h; if (idx < size && target_w >= 0 && target_w < W && target_h >= 0 && target_h < H) { output[target_idx] = input[idx]; } } template <typename scalar_t> __global__ void shift_cuda_backward_kernel( const scalar_t* __restrict__ grad_input, scalar_t* __restrict__ grad_output, const int32_t* __restrict__ shift, const int32_t B, const int32_t C, const int32_t W, const int32_t H) { const int32_t idx = blockIdx.x * blockDim.x + threadIdx.x; const int32_t size = B*C*W*H; const int32_t CWH = C*W*H; const int32_t WH = W*H; const int32_t b = idx / CWH; const int32_t c = (idx - b*CWH) / WH; const int32_t w = (idx - b*CWH - c*WH) / W; const int32_t h = idx - b*CWH - c*WH - w*H; const int32_t target_w = w - shift[2*c]; const int32_t target_h = h - shift[2*c + 1]; const int32_t target_idx = b*CWH + c*WH + target_w*W + target_h; if (idx < size && target_w >= 0 && target_w < W && target_h >= 0 && target_h < H) { grad_output[target_idx] = grad_input[idx]; } } } // namespace at::Tensor shift_cuda_forward( const at::Tensor input, const at::Tensor shift) { const auto B = input.size(0); const auto C = input.size(1); const auto W = input.size(2); const auto H = input.size(3); const auto size = B*C*W*H; const int threads = 1024; const int blocks = (size + threads - 1) / threads; auto output = at::zeros_like(input); AT_DISPATCH_FLOATING_TYPES(input.type(), "shift_forward_cuda", ([&] { hipLaunchKernelGGL(( shift_cuda_forward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, input.data<scalar_t>(), shift.data<int32_t>(), output.data<scalar_t>(), B, C, W, H); })); return output; } at::Tensor shift_cuda_backward( const at::Tensor grad_input, const at::Tensor shift) { const auto B = grad_input.size(0); const auto C = grad_input.size(1); const auto W = grad_input.size(2); const auto H = grad_input.size(3); const auto size = B*C*W*H; const int threads = 1024; const int blocks = (size + threads - 1) / threads; auto grad_output = at::zeros_like(grad_input); AT_DISPATCH_FLOATING_TYPES(grad_input.type(), "shift_backward_cuda", ([&] { hipLaunchKernelGGL(( shift_cuda_backward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, grad_input.data<scalar_t>(), grad_output.data<scalar_t>(), shift.data<int32_t>(), B, C, W, H); })); return grad_output; }
30e549f431af344633abc1f31f02ade4493a1ae9.cu
#include <ATen/ATen.h> #include <cuda.h> #include <cuda_runtime.h> #include <vector> namespace { template <typename scalar_t> __global__ void shift_cuda_forward_kernel( const scalar_t* __restrict__ input, const int32_t* __restrict__ shift, scalar_t* __restrict__ output, const int32_t B, const int32_t C, const int32_t W, const int32_t H) { const int32_t idx = blockIdx.x * blockDim.x + threadIdx.x; const int32_t size = B*C*W*H; const int32_t CWH = C*W*H; const int32_t WH = W*H; const int32_t b = idx / CWH; const int32_t c = (idx - b*CWH) / WH; const int32_t w = (idx - b*CWH - c*WH) / W; const int32_t h = idx - b*CWH - c*WH - w*H; const int32_t target_w = w + shift[2*c]; const int32_t target_h = h + shift[2*c + 1]; const int32_t target_idx = b*CWH + c*WH + target_w*W + target_h; if (idx < size && target_w >= 0 && target_w < W && target_h >= 0 && target_h < H) { output[target_idx] = input[idx]; } } template <typename scalar_t> __global__ void shift_cuda_backward_kernel( const scalar_t* __restrict__ grad_input, scalar_t* __restrict__ grad_output, const int32_t* __restrict__ shift, const int32_t B, const int32_t C, const int32_t W, const int32_t H) { const int32_t idx = blockIdx.x * blockDim.x + threadIdx.x; const int32_t size = B*C*W*H; const int32_t CWH = C*W*H; const int32_t WH = W*H; const int32_t b = idx / CWH; const int32_t c = (idx - b*CWH) / WH; const int32_t w = (idx - b*CWH - c*WH) / W; const int32_t h = idx - b*CWH - c*WH - w*H; const int32_t target_w = w - shift[2*c]; const int32_t target_h = h - shift[2*c + 1]; const int32_t target_idx = b*CWH + c*WH + target_w*W + target_h; if (idx < size && target_w >= 0 && target_w < W && target_h >= 0 && target_h < H) { grad_output[target_idx] = grad_input[idx]; } } } // namespace at::Tensor shift_cuda_forward( const at::Tensor input, const at::Tensor shift) { const auto B = input.size(0); const auto C = input.size(1); const auto W = input.size(2); const auto H = input.size(3); const auto size = B*C*W*H; const int threads = 1024; const int blocks = (size + threads - 1) / threads; auto output = at::zeros_like(input); AT_DISPATCH_FLOATING_TYPES(input.type(), "shift_forward_cuda", ([&] { shift_cuda_forward_kernel<scalar_t><<<blocks, threads>>>( input.data<scalar_t>(), shift.data<int32_t>(), output.data<scalar_t>(), B, C, W, H); })); return output; } at::Tensor shift_cuda_backward( const at::Tensor grad_input, const at::Tensor shift) { const auto B = grad_input.size(0); const auto C = grad_input.size(1); const auto W = grad_input.size(2); const auto H = grad_input.size(3); const auto size = B*C*W*H; const int threads = 1024; const int blocks = (size + threads - 1) / threads; auto grad_output = at::zeros_like(grad_input); AT_DISPATCH_FLOATING_TYPES(grad_input.type(), "shift_backward_cuda", ([&] { shift_cuda_backward_kernel<scalar_t><<<blocks, threads>>>( grad_input.data<scalar_t>(), grad_output.data<scalar_t>(), shift.data<int32_t>(), B, C, W, H); })); return grad_output; }
3f12ca8f86b18fffe6637a88c3d80fda654e6793.hip
// !!! This is a file automatically generated by hipify!!! /** * Element-wise Vector Addition: C[i] = A[i] + B[i]. * * This sample is a very basic sample that implements element by element * vector addition. */ #include <stdio.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <hip/hip_runtime.h> #include "device_launch_parameters.h" /** * CUDA Kernel Device code * * Computes the element-wise vector multiplication of A and B into C. The 3 vectors have the same * number of elements numElements. */ __global__ void vectorMultiply( float *A, float *B, float *C, int numElements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElements) { C[i] = A[i] + B[i]; } } /** * Host main routine */ int main(void) { // Error code to check return values for CUDA calls hipError_t err = hipSuccess; // Print the vector length to be used, and compute its size float EPS = 0.0001; int numElements = 50000; size_t size = numElements * sizeof(float); printf("[Vector addition of %d elements]\n", numElements); // Allocate the host input vector A float *h_A = (float *)malloc(size); // Allocate the host input vector B float *h_B = (float *)malloc(size); // Allocate the host output vector C float *h_C = (float *)malloc(size); // Verify that allocations succeeded if (h_A == NULL || h_B == NULL || h_C == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } // Initialize the host input vectors for (int i = 0; i < numElements; ++i) { h_A[i] = float(i); h_B[i] = 1/(i+EPS); } // Allocate the device input vector A float *d_A = NULL; err = hipMalloc((void **)&d_A, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device input vector B float *d_B = NULL; err = hipMalloc((void **)&d_B, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device output vector C float *d_C = NULL; err = hipMalloc((void **)&d_C, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the host input vectors A and B in host memory to the device input vectors in // device memory printf("Copy input data from the host memory to the CUDA device\n"); err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Launch the Vector Add CUDA Kernel int threadsPerBlock = 256; int blocksPerGrid = (numElements + threadsPerBlock - 1) / threadsPerBlock; printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); vectorMultiply << <blocksPerGrid, threadsPerBlock >> >(d_A, d_B, d_C, numElements); err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the device result vector in device memory to the host result vector // in host memory. printf("Copy output data from the CUDA device to the host memory\n"); err = hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Verify that the result vector is correct for (int i = 0; i < numElements; ++i) { if (fabs((h_A[i] + h_B[i]) - h_C[i]) > 1e-5) { fprintf(stderr, "Result verification failed at element %d!\n", i); exit(EXIT_FAILURE); } } printf("Test PASSED\n"); // Free device global memory err = hipFree(d_A); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipFree(d_B); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector B (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipFree(d_C); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector C (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Free host memory free(h_A); free(h_B); free(h_C); printf("Done\n"); return 0; }
3f12ca8f86b18fffe6637a88c3d80fda654e6793.cu
/** * Element-wise Vector Addition: C[i] = A[i] + B[i]. * * This sample is a very basic sample that implements element by element * vector addition. */ #include <stdio.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <cuda_runtime.h> #include "device_launch_parameters.h" /** * CUDA Kernel Device code * * Computes the element-wise vector multiplication of A and B into C. The 3 vectors have the same * number of elements numElements. */ __global__ void vectorMultiply( float *A, float *B, float *C, int numElements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElements) { C[i] = A[i] + B[i]; } } /** * Host main routine */ int main(void) { // Error code to check return values for CUDA calls cudaError_t err = cudaSuccess; // Print the vector length to be used, and compute its size float EPS = 0.0001; int numElements = 50000; size_t size = numElements * sizeof(float); printf("[Vector addition of %d elements]\n", numElements); // Allocate the host input vector A float *h_A = (float *)malloc(size); // Allocate the host input vector B float *h_B = (float *)malloc(size); // Allocate the host output vector C float *h_C = (float *)malloc(size); // Verify that allocations succeeded if (h_A == NULL || h_B == NULL || h_C == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } // Initialize the host input vectors for (int i = 0; i < numElements; ++i) { h_A[i] = float(i); h_B[i] = 1/(i+EPS); } // Allocate the device input vector A float *d_A = NULL; err = cudaMalloc((void **)&d_A, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device input vector B float *d_B = NULL; err = cudaMalloc((void **)&d_B, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device output vector C float *d_C = NULL; err = cudaMalloc((void **)&d_C, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the host input vectors A and B in host memory to the device input vectors in // device memory printf("Copy input data from the host memory to the CUDA device\n"); err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Launch the Vector Add CUDA Kernel int threadsPerBlock = 256; int blocksPerGrid = (numElements + threadsPerBlock - 1) / threadsPerBlock; printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); vectorMultiply << <blocksPerGrid, threadsPerBlock >> >(d_A, d_B, d_C, numElements); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the device result vector in device memory to the host result vector // in host memory. printf("Copy output data from the CUDA device to the host memory\n"); err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Verify that the result vector is correct for (int i = 0; i < numElements; ++i) { if (fabs((h_A[i] + h_B[i]) - h_C[i]) > 1e-5) { fprintf(stderr, "Result verification failed at element %d!\n", i); exit(EXIT_FAILURE); } } printf("Test PASSED\n"); // Free device global memory err = cudaFree(d_A); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_B); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_C); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Free host memory free(h_A); free(h_B); free(h_C); printf("Done\n"); return 0; }
a60e378539dc30b68e7bfdd54f8073a5f08c5871.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" extern "C" { #ifndef REAL #define REAL float #endif #ifndef CAST #define CAST(fun) fun ## f #endif #ifndef REAL2o3 #define REAL2o3 (REAL)0.6666666666666667 #endif #ifndef REAL3o2 #define REAL3o2 (REAL)1.5 #endif } __global__ void uplo_scale_shift (const int sd, const int unit, const int bottom, const REAL* a, const int offset_a, const int ld_a, const REAL scalea, const REAL shifta, const REAL scaleb, const REAL shiftb, REAL* c, const int offset_c, const int ld_c) { const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x; const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y; const bool valid = (gid_0 < sd) && (gid_1 < sd); const bool check = valid && ((unit == 132) ? bottom * gid_0 > bottom * gid_1 : bottom * gid_0 >= bottom * gid_1); if (check) { c[offset_c + gid_0 + gid_1 * ld_c] = scalea * a[offset_a + gid_0 + gid_1 * ld_a] + shifta; } }
a60e378539dc30b68e7bfdd54f8073a5f08c5871.cu
#include "includes.h" extern "C" { #ifndef REAL #define REAL float #endif #ifndef CAST #define CAST(fun) fun ## f #endif #ifndef REAL2o3 #define REAL2o3 (REAL)0.6666666666666667 #endif #ifndef REAL3o2 #define REAL3o2 (REAL)1.5 #endif } __global__ void uplo_scale_shift (const int sd, const int unit, const int bottom, const REAL* a, const int offset_a, const int ld_a, const REAL scalea, const REAL shifta, const REAL scaleb, const REAL shiftb, REAL* c, const int offset_c, const int ld_c) { const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x; const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y; const bool valid = (gid_0 < sd) && (gid_1 < sd); const bool check = valid && ((unit == 132) ? bottom * gid_0 > bottom * gid_1 : bottom * gid_0 >= bottom * gid_1); if (check) { c[offset_c + gid_0 + gid_1 * ld_c] = scalea * a[offset_a + gid_0 + gid_1 * ld_a] + shifta; } }
18a1dae87ce6c7b5264555fdb12014e81322b623.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <assert.h> #include <stdio.h> #include "box2d2r-256-5-128_kernel.hu" #define BENCH_DIM 2 #define BENCH_FPP 49 #define BENCH_RAD 2 #include "common.h" double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop) { double start_time = sb_time(), end_time = 0.0; int dimsize = compsize + BENCH_RAD * 2; SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1; if (scop) { if (dimsize >= 5 && timestep >= 1) { #define cudaCheckReturn(ret) \ do { \ hipError_t cudaCheckReturn_e = (ret); \ if (cudaCheckReturn_e != hipSuccess) { \ fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(cudaCheckReturn_e)); \ fflush(stderr); \ } \ assert(cudaCheckReturn_e == hipSuccess); \ } while(0) #define cudaCheckKernel() \ do { \ cudaCheckReturn(hipGetLastError()); \ } while(0) float *dev_A; cudaCheckReturn(hipMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float))); { cudaCheckReturn(hipMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), hipMemcpyHostToDevice)); #ifdef STENCILBENCH hipDeviceSynchronize(); SB_START_INSTRUMENTS; #endif } { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; AN5D_TYPE c0; AN5D_TYPE __side0LenMax; { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 236; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0; __side0LenMax = __side0Len; for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1) { hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2)) { if (__c0Len % __side0LenMax == 0) { { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 244; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 248; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 1) { { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 248; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 248; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 248; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 2) { { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 252; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 252; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 3) { { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 248; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 252; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 4) { { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 248; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 248; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } } else if (__c0Len % __side0LenMax) { if (__c0Len % __side0LenMax == 1) { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 252; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 2) { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 248; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 3) { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 244; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 4) { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 240; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } } cudaCheckKernel(); { #ifdef STENCILBENCH hipDeviceSynchronize(); SB_STOP_INSTRUMENTS; #endif cudaCheckReturn(hipMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), hipMemcpyDeviceToHost)); } cudaCheckReturn(hipFree(dev_A)); } } else { for (int t = 0; t < timestep; t++) #pragma omp parallel for for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++) for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++) A[(t+1)%2][i][j] = 0.03125f * A[t%2][i-2][j-2] + 0.03126f * A[t%2][i-2][j-1] + 0.03127f * A[t%2][i-2][j] + 0.03128f * A[t%2][i-2][j+1] + 0.03129f * A[t%2][i-2][j+2] + 0.03130f * A[t%2][i-1][j-2] + 0.03131f * A[t%2][i-1][j-1] + 0.03132f * A[t%2][i-1][j] + 0.03133f * A[t%2][i-1][j+1] + 0.03134f * A[t%2][i-1][j+2] + 0.03135f * A[t%2][i][j-2] + 0.03136f * A[t%2][i][j-1] + 0.24712f * A[t%2][i][j] + 0.03138f * A[t%2][i][j+1] + 0.03139f * A[t%2][i][j+2] + 0.03140f * A[t%2][i+1][j-2] + 0.03141f * A[t%2][i+1][j-1] + 0.03142f * A[t%2][i+1][j] + 0.03143f * A[t%2][i+1][j+1] + 0.03144f * A[t%2][i+1][j+2] + 0.03145f * A[t%2][i+2][j-2] + 0.03146f * A[t%2][i+2][j-1] + 0.03147f * A[t%2][i+2][j] + 0.03148f * A[t%2][i+2][j+1] + 0.03149f * A[t%2][i+2][j+2]; } return (((end_time != 0.0) ? end_time : sb_time()) - start_time); }
18a1dae87ce6c7b5264555fdb12014e81322b623.cu
#include <assert.h> #include <stdio.h> #include "box2d2r-256-5-128_kernel.hu" #define BENCH_DIM 2 #define BENCH_FPP 49 #define BENCH_RAD 2 #include "common.h" double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop) { double start_time = sb_time(), end_time = 0.0; int dimsize = compsize + BENCH_RAD * 2; SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1; if (scop) { if (dimsize >= 5 && timestep >= 1) { #define cudaCheckReturn(ret) \ do { \ cudaError_t cudaCheckReturn_e = (ret); \ if (cudaCheckReturn_e != cudaSuccess) { \ fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \ fflush(stderr); \ } \ assert(cudaCheckReturn_e == cudaSuccess); \ } while(0) #define cudaCheckKernel() \ do { \ cudaCheckReturn(cudaGetLastError()); \ } while(0) float *dev_A; cudaCheckReturn(cudaMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float))); { cudaCheckReturn(cudaMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), cudaMemcpyHostToDevice)); #ifdef STENCILBENCH cudaDeviceSynchronize(); SB_START_INSTRUMENTS; #endif } { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; AN5D_TYPE c0; AN5D_TYPE __side0LenMax; { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 236; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0; __side0LenMax = __side0Len; for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1) { kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2)) { if (__c0Len % __side0LenMax == 0) { { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 244; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 248; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 1) { { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 248; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 248; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 248; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 2) { { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 252; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 252; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 3) { { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 248; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 252; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 4) { { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 248; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 248; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } } else if (__c0Len % __side0LenMax) { if (__c0Len % __side0LenMax == 1) { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 252; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 2) { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 248; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 3) { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 244; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 4) { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 240; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } } cudaCheckKernel(); { #ifdef STENCILBENCH cudaDeviceSynchronize(); SB_STOP_INSTRUMENTS; #endif cudaCheckReturn(cudaMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), cudaMemcpyDeviceToHost)); } cudaCheckReturn(cudaFree(dev_A)); } } else { for (int t = 0; t < timestep; t++) #pragma omp parallel for for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++) for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++) A[(t+1)%2][i][j] = 0.03125f * A[t%2][i-2][j-2] + 0.03126f * A[t%2][i-2][j-1] + 0.03127f * A[t%2][i-2][j] + 0.03128f * A[t%2][i-2][j+1] + 0.03129f * A[t%2][i-2][j+2] + 0.03130f * A[t%2][i-1][j-2] + 0.03131f * A[t%2][i-1][j-1] + 0.03132f * A[t%2][i-1][j] + 0.03133f * A[t%2][i-1][j+1] + 0.03134f * A[t%2][i-1][j+2] + 0.03135f * A[t%2][i][j-2] + 0.03136f * A[t%2][i][j-1] + 0.24712f * A[t%2][i][j] + 0.03138f * A[t%2][i][j+1] + 0.03139f * A[t%2][i][j+2] + 0.03140f * A[t%2][i+1][j-2] + 0.03141f * A[t%2][i+1][j-1] + 0.03142f * A[t%2][i+1][j] + 0.03143f * A[t%2][i+1][j+1] + 0.03144f * A[t%2][i+1][j+2] + 0.03145f * A[t%2][i+2][j-2] + 0.03146f * A[t%2][i+2][j-1] + 0.03147f * A[t%2][i+2][j] + 0.03148f * A[t%2][i+2][j+1] + 0.03149f * A[t%2][i+2][j+2]; } return (((end_time != 0.0) ? end_time : sb_time()) - start_time); }
d8bec0f60f965e9cfe703c95557ce3761f28c170.hip
// !!! This is a file automatically generated by hipify!!! //Includes for IntelliSense #define _SIZE_T_DEFINED #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <texture_fetch_functions.h> #include "float.h" #include <builtin_types.h> #include <vector_functions.h> #include <math.h> extern "C" { __constant__ int D_INPUT_UNITS; __constant__ int D_HIDDEN_UNITS; __constant__ int D_OUTPUT_UNITS; //kernel code __global__ void OutputDeltaKernel(float *outputDeltas, float *target, float *outputActivations, float *outputActivationDerivatives) { int unitId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if (unitId < D_OUTPUT_UNITS) { outputDeltas[unitId] = (target[unitId] - outputActivations[unitId]) * outputActivationDerivatives[unitId]; } } }
d8bec0f60f965e9cfe703c95557ce3761f28c170.cu
//Includes for IntelliSense #define _SIZE_T_DEFINED #include <cuda.h> #include <device_launch_parameters.h> #include <texture_fetch_functions.h> #include "float.h" #include <builtin_types.h> #include <vector_functions.h> #include <math.h> extern "C" { __constant__ int D_INPUT_UNITS; __constant__ int D_HIDDEN_UNITS; __constant__ int D_OUTPUT_UNITS; //kernel code __global__ void OutputDeltaKernel(float *outputDeltas, float *target, float *outputActivations, float *outputActivationDerivatives) { int unitId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if (unitId < D_OUTPUT_UNITS) { outputDeltas[unitId] = (target[unitId] - outputActivations[unitId]) * outputActivationDerivatives[unitId]; } } }
ac47c14b7c2e5d2d4cd99c4c92712cfbae1a91c1.hip
// !!! This is a file automatically generated by hipify!!! #define PETSC_SKIP_SPINLOCK #define PETSC_SKIP_CXX_COMPLEX_FIX #define PETSC_SKIP_IMMINTRIN_H_CUDAWORKAROUND 1 #include <petscconf.h> #include <../src/mat/impls/aij/mpi/mpiaij.h> /*I "petscmat.h" I*/ #include <../src/mat/impls/aij/seq/seqcusparse/cusparsematimpl.h> #include <../src/mat/impls/aij/mpi/mpicusparse/mpicusparsematimpl.h> #include <thrust/advance.h> #include <petscsf.h> struct VecCUDAEquals { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<1>(t) = thrust::get<0>(t); } }; static PetscErrorCode MatSetValuesCOO_MPIAIJCUSPARSE(Mat A, const PetscScalar v[], InsertMode imode) { Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; Mat_MPIAIJCUSPARSE *cusp = (Mat_MPIAIJCUSPARSE*)a->spptr; PetscInt n = cusp->coo_nd + cusp->coo_no; PetscErrorCode ierr; hipError_t cerr; PetscFunctionBegin; if (cusp->coo_p && v) { thrust::device_ptr<const PetscScalar> d_v; THRUSTARRAY *w = NULL; if (isCudaMem(v)) { d_v = thrust::device_pointer_cast(v); } else { w = new THRUSTARRAY(n); w->assign(v,v+n); ierr = PetscLogCpuToGpu(n*sizeof(PetscScalar));CHKERRQ(ierr); d_v = w->data(); } auto zibit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v,cusp->coo_p->begin()), cusp->coo_pw->begin())); auto zieit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v,cusp->coo_p->end()), cusp->coo_pw->end())); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); thrust::for_each(zibit,zieit,VecCUDAEquals()); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); delete w; ierr = MatSetValuesCOO_SeqAIJCUSPARSE(a->A,cusp->coo_pw->data().get(),imode);CHKERRQ(ierr); ierr = MatSetValuesCOO_SeqAIJCUSPARSE(a->B,cusp->coo_pw->data().get()+cusp->coo_nd,imode);CHKERRQ(ierr); } else { ierr = MatSetValuesCOO_SeqAIJCUSPARSE(a->A,v,imode);CHKERRQ(ierr); ierr = MatSetValuesCOO_SeqAIJCUSPARSE(a->B,v ? v+cusp->coo_nd : NULL,imode);CHKERRQ(ierr); } ierr = PetscObjectStateIncrease((PetscObject)A);CHKERRQ(ierr); A->num_ass++; A->assembled = PETSC_TRUE; A->ass_nonzerostate = A->nonzerostate; A->offloadmask = PETSC_OFFLOAD_GPU; PetscFunctionReturn(0); } template <typename Tuple> struct IsNotOffDiagT { PetscInt _cstart,_cend; IsNotOffDiagT(PetscInt cstart, PetscInt cend) : _cstart(cstart), _cend(cend) {} __host__ __device__ inline bool operator()(Tuple t) { return !(thrust::get<1>(t) < _cstart || thrust::get<1>(t) >= _cend); } }; struct IsOffDiag { PetscInt _cstart,_cend; IsOffDiag(PetscInt cstart, PetscInt cend) : _cstart(cstart), _cend(cend) {} __host__ __device__ inline bool operator() (const PetscInt &c) { return c < _cstart || c >= _cend; } }; struct GlobToLoc { PetscInt _start; GlobToLoc(PetscInt start) : _start(start) {} __host__ __device__ inline PetscInt operator() (const PetscInt &c) { return c - _start; } }; static PetscErrorCode MatSetPreallocationCOO_MPIAIJCUSPARSE(Mat B, PetscInt n, const PetscInt coo_i[], const PetscInt coo_j[]) { Mat_MPIAIJ *b = (Mat_MPIAIJ*)B->data; Mat_MPIAIJCUSPARSE *cusp = (Mat_MPIAIJCUSPARSE*)b->spptr; PetscErrorCode ierr; PetscInt *jj; size_t noff = 0; THRUSTINTARRAY d_i(n); THRUSTINTARRAY d_j(n); ISLocalToGlobalMapping l2g; hipError_t cerr; PetscFunctionBegin; ierr = PetscLayoutSetUp(B->rmap);CHKERRQ(ierr); ierr = PetscLayoutSetUp(B->cmap);CHKERRQ(ierr); if (b->A) { ierr = MatCUSPARSEClearHandle(b->A);CHKERRQ(ierr); } if (b->B) { ierr = MatCUSPARSEClearHandle(b->B);CHKERRQ(ierr); } ierr = PetscFree(b->garray);CHKERRQ(ierr); ierr = VecDestroy(&b->lvec);CHKERRQ(ierr); ierr = MatDestroy(&b->A);CHKERRQ(ierr); ierr = MatDestroy(&b->B);CHKERRQ(ierr); ierr = PetscLogCpuToGpu(2.*n*sizeof(PetscInt));CHKERRQ(ierr); d_i.assign(coo_i,coo_i+n); d_j.assign(coo_j,coo_j+n); delete cusp->coo_p; delete cusp->coo_pw; cusp->coo_p = NULL; cusp->coo_pw = NULL; ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); auto firstoffd = thrust::find_if(thrust::device,d_j.begin(),d_j.end(),IsOffDiag(B->cmap->rstart,B->cmap->rend)); auto firstdiag = thrust::find_if_not(thrust::device,firstoffd,d_j.end(),IsOffDiag(B->cmap->rstart,B->cmap->rend)); if (firstoffd != d_j.end() && firstdiag != d_j.end()) { cusp->coo_p = new THRUSTINTARRAY(n); cusp->coo_pw = new THRUSTARRAY(n); thrust::sequence(thrust::device,cusp->coo_p->begin(),cusp->coo_p->end(),0); auto fzipp = thrust::make_zip_iterator(thrust::make_tuple(d_i.begin(),d_j.begin(),cusp->coo_p->begin())); auto ezipp = thrust::make_zip_iterator(thrust::make_tuple(d_i.end(),d_j.end(),cusp->coo_p->end())); auto mzipp = thrust::partition(thrust::device,fzipp,ezipp,IsNotOffDiagT<thrust::tuple<PetscInt,PetscInt,PetscInt> >(B->cmap->rstart,B->cmap->rend)); firstoffd = mzipp.get_iterator_tuple().get<1>(); } cusp->coo_nd = thrust::distance(d_j.begin(),firstoffd); cusp->coo_no = thrust::distance(firstoffd,d_j.end()); /* from global to local */ thrust::transform(thrust::device,d_i.begin(),d_i.end(),d_i.begin(),GlobToLoc(B->rmap->rstart)); thrust::transform(thrust::device,d_j.begin(),firstoffd,d_j.begin(),GlobToLoc(B->cmap->rstart)); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); /* copy offdiag column indices to map on the CPU */ ierr = PetscMalloc1(cusp->coo_no,&jj);CHKERRQ(ierr); cerr = hipMemcpy(jj,d_j.data().get()+cusp->coo_nd,cusp->coo_no*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr); auto o_j = d_j.begin(); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); thrust::advance(o_j,cusp->coo_nd); thrust::sort(thrust::device,o_j,d_j.end()); auto wit = thrust::unique(thrust::device,o_j,d_j.end()); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); noff = thrust::distance(o_j,wit); ierr = PetscMalloc1(noff+1,&b->garray);CHKERRQ(ierr); cerr = hipMemcpy(b->garray,d_j.data().get()+cusp->coo_nd,noff*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr); ierr = PetscLogGpuToCpu((noff+cusp->coo_no)*sizeof(PetscInt));CHKERRQ(ierr); ierr = ISLocalToGlobalMappingCreate(PETSC_COMM_SELF,1,noff,b->garray,PETSC_COPY_VALUES,&l2g);CHKERRQ(ierr); ierr = ISLocalToGlobalMappingSetType(l2g,ISLOCALTOGLOBALMAPPINGHASH);CHKERRQ(ierr); ierr = ISGlobalToLocalMappingApply(l2g,IS_GTOLM_DROP,cusp->coo_no,jj,&n,jj);CHKERRQ(ierr); if (n != cusp->coo_no) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Unexpected is size %D != %D coo size",n,cusp->coo_no); ierr = ISLocalToGlobalMappingDestroy(&l2g);CHKERRQ(ierr); ierr = MatCreate(PETSC_COMM_SELF,&b->A);CHKERRQ(ierr); ierr = MatSetSizes(b->A,B->rmap->n,B->cmap->n,B->rmap->n,B->cmap->n);CHKERRQ(ierr); ierr = MatSetType(b->A,MATSEQAIJCUSPARSE);CHKERRQ(ierr); ierr = PetscLogObjectParent((PetscObject)B,(PetscObject)b->A);CHKERRQ(ierr); ierr = MatCreate(PETSC_COMM_SELF,&b->B);CHKERRQ(ierr); ierr = MatSetSizes(b->B,B->rmap->n,noff,B->rmap->n,noff);CHKERRQ(ierr); ierr = MatSetType(b->B,MATSEQAIJCUSPARSE);CHKERRQ(ierr); ierr = PetscLogObjectParent((PetscObject)B,(PetscObject)b->B);CHKERRQ(ierr); /* GPU memory, cusparse specific call handles it internally */ ierr = MatSetPreallocationCOO_SeqAIJCUSPARSE(b->A,cusp->coo_nd,d_i.data().get(),d_j.data().get());CHKERRQ(ierr); ierr = MatSetPreallocationCOO_SeqAIJCUSPARSE(b->B,cusp->coo_no,d_i.data().get()+cusp->coo_nd,jj);CHKERRQ(ierr); ierr = PetscFree(jj);CHKERRQ(ierr); ierr = MatCUSPARSESetFormat(b->A,MAT_CUSPARSE_MULT,cusp->diagGPUMatFormat);CHKERRQ(ierr); ierr = MatCUSPARSESetFormat(b->B,MAT_CUSPARSE_MULT,cusp->offdiagGPUMatFormat);CHKERRQ(ierr); ierr = MatCUSPARSESetHandle(b->A,cusp->handle);CHKERRQ(ierr); ierr = MatCUSPARSESetHandle(b->B,cusp->handle);CHKERRQ(ierr); ierr = MatCUSPARSESetStream(b->A,cusp->stream);CHKERRQ(ierr); ierr = MatCUSPARSESetStream(b->B,cusp->stream);CHKERRQ(ierr); ierr = MatSetUpMultiply_MPIAIJ(B);CHKERRQ(ierr); B->preallocated = PETSC_TRUE; B->nonzerostate++; ierr = MatBindToCPU(b->A,B->boundtocpu);CHKERRQ(ierr); ierr = MatBindToCPU(b->B,B->boundtocpu);CHKERRQ(ierr); B->offloadmask = PETSC_OFFLOAD_CPU; B->assembled = PETSC_FALSE; B->was_assembled = PETSC_FALSE; PetscFunctionReturn(0); } static PetscErrorCode MatMPIAIJGetLocalMatMerge_MPIAIJCUSPARSE(Mat A,MatReuse scall,IS *glob,Mat *A_loc) { Mat Ad,Ao; const PetscInt *cmap; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMPIAIJGetSeqAIJ(A,&Ad,&Ao,&cmap);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEMergeMats(Ad,Ao,scall,A_loc);CHKERRQ(ierr); if (glob) { PetscInt cst, i, dn, on, *gidx; ierr = MatGetLocalSize(Ad,NULL,&dn);CHKERRQ(ierr); ierr = MatGetLocalSize(Ao,NULL,&on);CHKERRQ(ierr); ierr = MatGetOwnershipRangeColumn(A,&cst,NULL);CHKERRQ(ierr); ierr = PetscMalloc1(dn+on,&gidx);CHKERRQ(ierr); for (i=0; i<dn; i++) gidx[i] = cst + i; for (i=0; i<on; i++) gidx[i+dn] = cmap[i]; ierr = ISCreateGeneral(PetscObjectComm((PetscObject)Ad),dn+on,gidx,PETSC_OWN_POINTER,glob);CHKERRQ(ierr); } PetscFunctionReturn(0); } PetscErrorCode MatMPIAIJSetPreallocation_MPIAIJCUSPARSE(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[]) { Mat_MPIAIJ *b = (Mat_MPIAIJ*)B->data; Mat_MPIAIJCUSPARSE *cusparseStruct = (Mat_MPIAIJCUSPARSE*)b->spptr; PetscErrorCode ierr; PetscInt i; PetscFunctionBegin; ierr = PetscLayoutSetUp(B->rmap);CHKERRQ(ierr); ierr = PetscLayoutSetUp(B->cmap);CHKERRQ(ierr); if (PetscDefined(USE_DEBUG) && d_nnz) { for (i=0; i<B->rmap->n; i++) { if (d_nnz[i] < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"d_nnz cannot be less than 0: local row %D value %D",i,d_nnz[i]); } } if (PetscDefined(USE_DEBUG) && o_nnz) { for (i=0; i<B->rmap->n; i++) { if (o_nnz[i] < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"o_nnz cannot be less than 0: local row %D value %D",i,o_nnz[i]); } } #if defined(PETSC_USE_CTABLE) ierr = PetscTableDestroy(&b->colmap);CHKERRQ(ierr); #else ierr = PetscFree(b->colmap);CHKERRQ(ierr); #endif ierr = PetscFree(b->garray);CHKERRQ(ierr); ierr = VecDestroy(&b->lvec);CHKERRQ(ierr); ierr = VecScatterDestroy(&b->Mvctx);CHKERRQ(ierr); /* Because the B will have been resized we simply destroy it and create a new one each time */ ierr = MatDestroy(&b->B);CHKERRQ(ierr); if (!b->A) { ierr = MatCreate(PETSC_COMM_SELF,&b->A);CHKERRQ(ierr); ierr = MatSetSizes(b->A,B->rmap->n,B->cmap->n,B->rmap->n,B->cmap->n);CHKERRQ(ierr); ierr = PetscLogObjectParent((PetscObject)B,(PetscObject)b->A);CHKERRQ(ierr); } if (!b->B) { PetscMPIInt size; ierr = MPI_Comm_size(PetscObjectComm((PetscObject)B),&size);CHKERRQ(ierr); ierr = MatCreate(PETSC_COMM_SELF,&b->B);CHKERRQ(ierr); ierr = MatSetSizes(b->B,B->rmap->n,size > 1 ? B->cmap->N : 0,B->rmap->n,size > 1 ? B->cmap->N : 0);CHKERRQ(ierr); ierr = PetscLogObjectParent((PetscObject)B,(PetscObject)b->B);CHKERRQ(ierr); } ierr = MatSetType(b->A,MATSEQAIJCUSPARSE);CHKERRQ(ierr); ierr = MatSetType(b->B,MATSEQAIJCUSPARSE);CHKERRQ(ierr); ierr = MatBindToCPU(b->A,B->boundtocpu);CHKERRQ(ierr); ierr = MatBindToCPU(b->B,B->boundtocpu);CHKERRQ(ierr); ierr = MatSeqAIJSetPreallocation(b->A,d_nz,d_nnz);CHKERRQ(ierr); ierr = MatSeqAIJSetPreallocation(b->B,o_nz,o_nnz);CHKERRQ(ierr); ierr = MatCUSPARSESetFormat(b->A,MAT_CUSPARSE_MULT,cusparseStruct->diagGPUMatFormat);CHKERRQ(ierr); ierr = MatCUSPARSESetFormat(b->B,MAT_CUSPARSE_MULT,cusparseStruct->offdiagGPUMatFormat);CHKERRQ(ierr); ierr = MatCUSPARSESetHandle(b->A,cusparseStruct->handle);CHKERRQ(ierr); ierr = MatCUSPARSESetHandle(b->B,cusparseStruct->handle);CHKERRQ(ierr); ierr = MatCUSPARSESetStream(b->A,cusparseStruct->stream);CHKERRQ(ierr); ierr = MatCUSPARSESetStream(b->B,cusparseStruct->stream);CHKERRQ(ierr); B->preallocated = PETSC_TRUE; PetscFunctionReturn(0); } /*@ MatAIJCUSPARSESetGenerateTranspose - Sets the flag to explicitly generate the transpose matrix before calling MatMultTranspose Not collective Input Parameters: + A - Matrix of type SEQAIJCUSPARSE or MPIAIJCUSPARSE - gen - the boolean flag Level: intermediate .seealso: MATSEQAIJCUSPARSE, MATMPIAIJCUSPARSE @*/ PetscErrorCode MatAIJCUSPARSESetGenerateTranspose(Mat A, PetscBool gen) { PetscErrorCode ierr; PetscBool ismpiaij; PetscFunctionBegin; PetscValidHeaderSpecific(A,MAT_CLASSID,1); MatCheckPreallocated(A,1); ierr = PetscObjectBaseTypeCompare((PetscObject)A,MATMPIAIJ,&ismpiaij);CHKERRQ(ierr); if (ismpiaij) { Mat A_d,A_o; ierr = MatMPIAIJGetSeqAIJ(A,&A_d,&A_o,NULL);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSESetGenerateTranspose(A_d,gen);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSESetGenerateTranspose(A_o,gen);CHKERRQ(ierr); } else { ierr = MatSeqAIJCUSPARSESetGenerateTranspose(A,gen);CHKERRQ(ierr); } PetscFunctionReturn(0); } PetscErrorCode MatMult_MPIAIJCUSPARSE(Mat A,Vec xx,Vec yy) { Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; PetscErrorCode ierr; PetscInt nt; PetscFunctionBegin; ierr = VecGetLocalSize(xx,&nt);CHKERRQ(ierr); if (nt != A->cmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Incompatible partition of A (%D) and xx (%D)",A->cmap->n,nt); ierr = VecScatterBegin(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); ierr = (*a->A->ops->mult)(a->A,xx,yy);CHKERRQ(ierr); ierr = VecScatterEnd(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); ierr = (*a->B->ops->multadd)(a->B,a->lvec,yy,yy);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatZeroEntries_MPIAIJCUSPARSE(Mat A) { Mat_MPIAIJ *l = (Mat_MPIAIJ*)A->data; PetscErrorCode ierr; PetscFunctionBegin; if (A->factortype == MAT_FACTOR_NONE) { Mat_MPIAIJCUSPARSE *spptr = (Mat_MPIAIJCUSPARSE*)l->spptr; PetscSplitCSRDataStructure *d_mat = spptr->deviceMat; if (d_mat) { Mat_SeqAIJ *a = (Mat_SeqAIJ*)l->A->data; Mat_SeqAIJ *b = (Mat_SeqAIJ*)l->B->data; PetscInt n = A->rmap->n, nnza = a->i[n], nnzb = b->i[n]; hipError_t err; PetscScalar *vals; ierr = PetscInfo(A,"Zero device matrix diag and offfdiag\n");CHKERRQ(ierr); err = hipMemcpy( &vals, &d_mat->diag.a, sizeof(PetscScalar*), hipMemcpyDeviceToHost);CHKERRCUDA(err); err = hipMemset( vals, 0, (nnza)*sizeof(PetscScalar));CHKERRCUDA(err); err = hipMemcpy( &vals, &d_mat->offdiag.a, sizeof(PetscScalar*), hipMemcpyDeviceToHost);CHKERRCUDA(err); err = hipMemset( vals, 0, (nnzb)*sizeof(PetscScalar));CHKERRCUDA(err); } } ierr = MatZeroEntries(l->A);CHKERRQ(ierr); ierr = MatZeroEntries(l->B);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatMultAdd_MPIAIJCUSPARSE(Mat A,Vec xx,Vec yy,Vec zz) { Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; PetscErrorCode ierr; PetscInt nt; PetscFunctionBegin; ierr = VecGetLocalSize(xx,&nt);CHKERRQ(ierr); if (nt != A->cmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Incompatible partition of A (%D) and xx (%D)",A->cmap->n,nt); ierr = VecScatterBegin(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); ierr = (*a->A->ops->multadd)(a->A,xx,yy,zz);CHKERRQ(ierr); ierr = VecScatterEnd(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); ierr = (*a->B->ops->multadd)(a->B,a->lvec,zz,zz);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatMultTranspose_MPIAIJCUSPARSE(Mat A,Vec xx,Vec yy) { Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; PetscErrorCode ierr; PetscInt nt; PetscFunctionBegin; ierr = VecGetLocalSize(xx,&nt);CHKERRQ(ierr); if (nt != A->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Incompatible partition of A (%D) and xx (%D)",A->rmap->n,nt); ierr = (*a->B->ops->multtranspose)(a->B,xx,a->lvec);CHKERRQ(ierr); ierr = (*a->A->ops->multtranspose)(a->A,xx,yy);CHKERRQ(ierr); ierr = VecScatterBegin(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); ierr = VecScatterEnd(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatCUSPARSESetFormat_MPIAIJCUSPARSE(Mat A,MatCUSPARSEFormatOperation op,MatCUSPARSEStorageFormat format) { Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; Mat_MPIAIJCUSPARSE * cusparseStruct = (Mat_MPIAIJCUSPARSE*)a->spptr; PetscFunctionBegin; switch (op) { case MAT_CUSPARSE_MULT_DIAG: cusparseStruct->diagGPUMatFormat = format; break; case MAT_CUSPARSE_MULT_OFFDIAG: cusparseStruct->offdiagGPUMatFormat = format; break; case MAT_CUSPARSE_ALL: cusparseStruct->diagGPUMatFormat = format; cusparseStruct->offdiagGPUMatFormat = format; break; default: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"unsupported operation %d for MatCUSPARSEFormatOperation. Only MAT_CUSPARSE_MULT_DIAG, MAT_CUSPARSE_MULT_DIAG, and MAT_CUSPARSE_MULT_ALL are currently supported.",op); } PetscFunctionReturn(0); } PetscErrorCode MatSetFromOptions_MPIAIJCUSPARSE(PetscOptionItems *PetscOptionsObject,Mat A) { MatCUSPARSEStorageFormat format; PetscErrorCode ierr; PetscBool flg; Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; Mat_MPIAIJCUSPARSE *cusparseStruct = (Mat_MPIAIJCUSPARSE*)a->spptr; PetscFunctionBegin; ierr = PetscOptionsHead(PetscOptionsObject,"MPIAIJCUSPARSE options");CHKERRQ(ierr); if (A->factortype==MAT_FACTOR_NONE) { ierr = PetscOptionsEnum("-mat_cusparse_mult_diag_storage_format","sets storage format of the diagonal blocks of (mpi)aijcusparse gpu matrices for SpMV", "MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparseStruct->diagGPUMatFormat,(PetscEnum*)&format,&flg);CHKERRQ(ierr); if (flg) { ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_MULT_DIAG,format);CHKERRQ(ierr); } ierr = PetscOptionsEnum("-mat_cusparse_mult_offdiag_storage_format","sets storage format of the off-diagonal blocks (mpi)aijcusparse gpu matrices for SpMV", "MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparseStruct->offdiagGPUMatFormat,(PetscEnum*)&format,&flg);CHKERRQ(ierr); if (flg) { ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_MULT_OFFDIAG,format);CHKERRQ(ierr); } ierr = PetscOptionsEnum("-mat_cusparse_storage_format","sets storage format of the diagonal and off-diagonal blocks (mpi)aijcusparse gpu matrices for SpMV", "MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparseStruct->diagGPUMatFormat,(PetscEnum*)&format,&flg);CHKERRQ(ierr); if (flg) { ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_ALL,format);CHKERRQ(ierr); } } ierr = PetscOptionsTail();CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatAssemblyEnd_MPIAIJCUSPARSE(Mat A,MatAssemblyType mode) { PetscErrorCode ierr; Mat_MPIAIJ *mpiaij = (Mat_MPIAIJ*)A->data; Mat_MPIAIJCUSPARSE *cusparseStruct = (Mat_MPIAIJCUSPARSE*)mpiaij->spptr; PetscSplitCSRDataStructure *d_mat = cusparseStruct->deviceMat; PetscFunctionBegin; ierr = MatAssemblyEnd_MPIAIJ(A,mode);CHKERRQ(ierr); if (!A->was_assembled && mode == MAT_FINAL_ASSEMBLY) { ierr = VecSetType(mpiaij->lvec,VECSEQCUDA);CHKERRQ(ierr); } if (d_mat) { A->offloadmask = PETSC_OFFLOAD_GPU; // if we assembled on the device } PetscFunctionReturn(0); } PetscErrorCode MatDestroy_MPIAIJCUSPARSE(Mat A) { PetscErrorCode ierr; Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data; Mat_MPIAIJCUSPARSE *cusparseStruct = (Mat_MPIAIJCUSPARSE*)aij->spptr; hipError_t err; hipsparseStatus_t stat; PetscFunctionBegin; if (!cusparseStruct) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing spptr"); if (cusparseStruct->deviceMat) { Mat_SeqAIJ *jaca = (Mat_SeqAIJ*)aij->A->data; Mat_SeqAIJ *jacb = (Mat_SeqAIJ*)aij->B->data; PetscSplitCSRDataStructure *d_mat = cusparseStruct->deviceMat, h_mat; ierr = PetscInfo(A,"Have device matrix\n");CHKERRQ(ierr); err = hipMemcpy( &h_mat, d_mat, sizeof(PetscSplitCSRDataStructure), hipMemcpyDeviceToHost);CHKERRCUDA(err); if (jaca->compressedrow.use) { err = hipFree(h_mat.diag.i);CHKERRCUDA(err); } if (jacb->compressedrow.use) { err = hipFree(h_mat.offdiag.i);CHKERRCUDA(err); } err = hipFree(h_mat.colmap);CHKERRCUDA(err); err = hipFree(d_mat);CHKERRCUDA(err); } try { if (aij->A) { ierr = MatCUSPARSEClearHandle(aij->A);CHKERRQ(ierr); } if (aij->B) { ierr = MatCUSPARSEClearHandle(aij->B);CHKERRQ(ierr); } stat = hipsparseDestroy(cusparseStruct->handle);CHKERRCUSPARSE(stat); if (cusparseStruct->stream) { err = hipStreamDestroy(cusparseStruct->stream);CHKERRCUDA(err); } delete cusparseStruct->coo_p; delete cusparseStruct->coo_pw; delete cusparseStruct; } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Mat_MPIAIJCUSPARSE error: %s", ex); } ierr = PetscObjectComposeFunction((PetscObject)A,"MatMPIAIJSetPreallocation_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatMPIAIJGetLocalMatMerge_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetPreallocationCOO_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetValuesCOO_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatCUSPARSESetFormat_C",NULL);CHKERRQ(ierr); ierr = MatDestroy_MPIAIJ(A);CHKERRQ(ierr); PetscFunctionReturn(0); } PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJCUSPARSE(Mat B, MatType mtype, MatReuse reuse, Mat* newmat) { PetscErrorCode ierr; Mat_MPIAIJ *a; Mat_MPIAIJCUSPARSE *cusparseStruct; hipsparseStatus_t stat; Mat A; PetscFunctionBegin; if (reuse == MAT_INITIAL_MATRIX) { ierr = MatDuplicate(B,MAT_COPY_VALUES,newmat);CHKERRQ(ierr); } else if (reuse == MAT_REUSE_MATRIX) { ierr = MatCopy(B,*newmat,SAME_NONZERO_PATTERN);CHKERRQ(ierr); } A = *newmat; A->boundtocpu = PETSC_FALSE; ierr = PetscFree(A->defaultvectype);CHKERRQ(ierr); ierr = PetscStrallocpy(VECCUDA,&A->defaultvectype);CHKERRQ(ierr); a = (Mat_MPIAIJ*)A->data; if (a->A) { ierr = MatSetType(a->A,MATSEQAIJCUSPARSE);CHKERRQ(ierr); } if (a->B) { ierr = MatSetType(a->B,MATSEQAIJCUSPARSE);CHKERRQ(ierr); } if (a->lvec) { ierr = VecSetType(a->lvec,VECSEQCUDA);CHKERRQ(ierr); } if (reuse != MAT_REUSE_MATRIX && !a->spptr) { a->spptr = new Mat_MPIAIJCUSPARSE; cusparseStruct = (Mat_MPIAIJCUSPARSE*)a->spptr; cusparseStruct->diagGPUMatFormat = MAT_CUSPARSE_CSR; cusparseStruct->offdiagGPUMatFormat = MAT_CUSPARSE_CSR; cusparseStruct->coo_p = NULL; cusparseStruct->coo_pw = NULL; cusparseStruct->stream = 0; stat = hipsparseCreate(&(cusparseStruct->handle));CHKERRCUSPARSE(stat); cusparseStruct->deviceMat = NULL; } A->ops->assemblyend = MatAssemblyEnd_MPIAIJCUSPARSE; A->ops->mult = MatMult_MPIAIJCUSPARSE; A->ops->multadd = MatMultAdd_MPIAIJCUSPARSE; A->ops->multtranspose = MatMultTranspose_MPIAIJCUSPARSE; A->ops->setfromoptions = MatSetFromOptions_MPIAIJCUSPARSE; A->ops->destroy = MatDestroy_MPIAIJCUSPARSE; A->ops->zeroentries = MatZeroEntries_MPIAIJCUSPARSE; A->ops->productsetfromoptions = MatProductSetFromOptions_MPIAIJBACKEND; ierr = PetscObjectChangeTypeName((PetscObject)A,MATMPIAIJCUSPARSE);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatMPIAIJGetLocalMatMerge_C",MatMPIAIJGetLocalMatMerge_MPIAIJCUSPARSE);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatMPIAIJSetPreallocation_C",MatMPIAIJSetPreallocation_MPIAIJCUSPARSE);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatCUSPARSESetFormat_C",MatCUSPARSESetFormat_MPIAIJCUSPARSE);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetPreallocationCOO_C",MatSetPreallocationCOO_MPIAIJCUSPARSE);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetValuesCOO_C",MatSetValuesCOO_MPIAIJCUSPARSE);CHKERRQ(ierr); PetscFunctionReturn(0); } PETSC_EXTERN PetscErrorCode MatCreate_MPIAIJCUSPARSE(Mat A) { PetscErrorCode ierr; PetscFunctionBegin; ierr = PetscCUDAInitializeCheck();CHKERRQ(ierr); ierr = MatCreate_MPIAIJ(A);CHKERRQ(ierr); ierr = MatConvert_MPIAIJ_MPIAIJCUSPARSE(A,MATMPIAIJCUSPARSE,MAT_INPLACE_MATRIX,&A);CHKERRQ(ierr); PetscFunctionReturn(0); } /*@ MatCreateAIJCUSPARSE - Creates a sparse matrix in AIJ (compressed row) format (the default parallel PETSc format). This matrix will ultimately pushed down to NVidia GPUs and use the CUSPARSE library for calculations. For good matrix assembly performance the user should preallocate the matrix storage by setting the parameter nz (or the array nnz). By setting these parameters accurately, performance during matrix assembly can be increased by more than a factor of 50. Collective Input Parameters: + comm - MPI communicator, set to PETSC_COMM_SELF . m - number of rows . n - number of columns . nz - number of nonzeros per row (same for all rows) - nnz - array containing the number of nonzeros in the various rows (possibly different for each row) or NULL Output Parameter: . A - the matrix It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(), MatXXXXSetPreallocation() paradigm instead of this routine directly. [MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation] Notes: If nnz is given then nz is ignored The AIJ format (also called the Yale sparse matrix format or compressed row storage), is fully compatible with standard Fortran 77 storage. That is, the stored row and column indices can begin at either one (as in Fortran) or zero. See the users' manual for details. Specify the preallocated storage with either nz or nnz (not both). Set nz=PETSC_DEFAULT and nnz=NULL for PETSc to control dynamic memory allocation. For large problems you MUST preallocate memory or you will get TERRIBLE performance, see the users' manual chapter on matrices. By default, this format uses inodes (identical nodes) when possible, to improve numerical efficiency of matrix-vector products and solves. We search for consecutive rows with the same nonzero structure, thereby reusing matrix information to achieve increased efficiency. Level: intermediate .seealso: MatCreate(), MatCreateAIJ(), MatSetValues(), MatSeqAIJSetColumnIndices(), MatCreateSeqAIJWithArrays(), MatCreateAIJ(), MATMPIAIJCUSPARSE, MATAIJCUSPARSE @*/ PetscErrorCode MatCreateAIJCUSPARSE(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[],Mat *A) { PetscErrorCode ierr; PetscMPIInt size; PetscFunctionBegin; ierr = MatCreate(comm,A);CHKERRQ(ierr); ierr = MatSetSizes(*A,m,n,M,N);CHKERRQ(ierr); ierr = MPI_Comm_size(comm,&size);CHKERRMPI(ierr); if (size > 1) { ierr = MatSetType(*A,MATMPIAIJCUSPARSE);CHKERRQ(ierr); ierr = MatMPIAIJSetPreallocation(*A,d_nz,d_nnz,o_nz,o_nnz);CHKERRQ(ierr); } else { ierr = MatSetType(*A,MATSEQAIJCUSPARSE);CHKERRQ(ierr); ierr = MatSeqAIJSetPreallocation(*A,d_nz,d_nnz);CHKERRQ(ierr); } PetscFunctionReturn(0); } /*MC MATAIJCUSPARSE - MATMPIAIJCUSPARSE = "aijcusparse" = "mpiaijcusparse" - A matrix type to be used for sparse matrices. A matrix type type whose data resides on Nvidia GPUs. These matrices can be in either CSR, ELL, or Hybrid format. The ELL and HYB formats require CUDA 4.2 or later. All matrix calculations are performed on Nvidia GPUs using the CUSPARSE library. This matrix type is identical to MATSEQAIJCUSPARSE when constructed with a single process communicator, and MATMPIAIJCUSPARSE otherwise. As a result, for single process communicators, MatSeqAIJSetPreallocation is supported, and similarly MatMPIAIJSetPreallocation is supported for communicators controlling multiple processes. It is recommended that you call both of the above preallocation routines for simplicity. Options Database Keys: + -mat_type mpiaijcusparse - sets the matrix type to "mpiaijcusparse" during a call to MatSetFromOptions() . -mat_cusparse_storage_format csr - sets the storage format of diagonal and off-diagonal matrices during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid). . -mat_cusparse_mult_diag_storage_format csr - sets the storage format of diagonal matrix during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid). - -mat_cusparse_mult_offdiag_storage_format csr - sets the storage format of off-diagonal matrix during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid). Level: beginner .seealso: MatCreateAIJCUSPARSE(), MATSEQAIJCUSPARSE, MatCreateSeqAIJCUSPARSE(), MatCUSPARSESetFormat(), MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation M M*/ // get GPU pointer to stripped down Mat. For both Seq and MPI Mat. PetscErrorCode MatCUSPARSEGetDeviceMatWrite(Mat A, PetscSplitCSRDataStructure **B) { #if defined(PETSC_USE_CTABLE) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Device metadata does not support ctable (--with-ctable=0)"); #else PetscSplitCSRDataStructure **p_d_mat; PetscMPIInt size,rank; MPI_Comm comm; PetscErrorCode ierr; int *ai,*bi,*aj,*bj; PetscScalar *aa,*ba; PetscFunctionBegin; ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); ierr = MPI_Comm_size(comm,&size);CHKERRMPI(ierr); ierr = MPI_Comm_rank(comm,&rank);CHKERRMPI(ierr); if (A->factortype == MAT_FACTOR_NONE) { CsrMatrix *matrixA,*matrixB=NULL; if (size == 1) { Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; p_d_mat = &cusparsestruct->deviceMat; Mat_SeqAIJCUSPARSEMultStruct *matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat; if (cusparsestruct->format==MAT_CUSPARSE_CSR) { matrixA = (CsrMatrix*)matstruct->mat; bi = bj = NULL; ba = NULL; } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Device Mat needs MAT_CUSPARSE_CSR"); } else { Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data; Mat_MPIAIJCUSPARSE *spptr = (Mat_MPIAIJCUSPARSE*)aij->spptr; p_d_mat = &spptr->deviceMat; Mat_SeqAIJCUSPARSE *cusparsestructA = (Mat_SeqAIJCUSPARSE*)aij->A->spptr; Mat_SeqAIJCUSPARSE *cusparsestructB = (Mat_SeqAIJCUSPARSE*)aij->B->spptr; Mat_SeqAIJCUSPARSEMultStruct *matstructA = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestructA->mat; Mat_SeqAIJCUSPARSEMultStruct *matstructB = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestructB->mat; if (cusparsestructA->format==MAT_CUSPARSE_CSR) { if (cusparsestructB->format!=MAT_CUSPARSE_CSR) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Device Mat B needs MAT_CUSPARSE_CSR"); matrixA = (CsrMatrix*)matstructA->mat; matrixB = (CsrMatrix*)matstructB->mat; bi = thrust::raw_pointer_cast(matrixB->row_offsets->data()); bj = thrust::raw_pointer_cast(matrixB->column_indices->data()); ba = thrust::raw_pointer_cast(matrixB->values->data()); } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Device Mat A needs MAT_CUSPARSE_CSR"); } ai = thrust::raw_pointer_cast(matrixA->row_offsets->data()); aj = thrust::raw_pointer_cast(matrixA->column_indices->data()); aa = thrust::raw_pointer_cast(matrixA->values->data()); } else { *B = NULL; PetscFunctionReturn(0); } // act like MatSetValues because not called on host if (A->assembled) { if (A->was_assembled) { ierr = PetscInfo(A,"Assemble more than once already\n");CHKERRQ(ierr); } A->was_assembled = PETSC_TRUE; // this is done (lazy) in MatAssemble but we are not calling it anymore - done in AIJ AssemblyEnd, need here? } else { SETERRQ(comm,PETSC_ERR_SUP,"Need assemble matrix"); } if (!*p_d_mat) { hipError_t err; PetscSplitCSRDataStructure *d_mat, h_mat; Mat_SeqAIJ *jaca; PetscInt n = A->rmap->n, nnz; // create and copy ierr = PetscInfo(A,"Create device matrix\n");CHKERRQ(ierr); err = hipMalloc((void **)&d_mat, sizeof(PetscSplitCSRDataStructure));CHKERRCUDA(err); err = hipMemset( d_mat, 0, sizeof(PetscSplitCSRDataStructure));CHKERRCUDA(err); *B = *p_d_mat = d_mat; // return it, set it in Mat, and set it up if (size == 1) { jaca = (Mat_SeqAIJ*)A->data; h_mat.rstart = 0; h_mat.rend = A->rmap->n; h_mat.cstart = 0; h_mat.cend = A->cmap->n; h_mat.offdiag.i = h_mat.offdiag.j = NULL; h_mat.offdiag.a = NULL; } else { Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data; Mat_SeqAIJ *jacb; jaca = (Mat_SeqAIJ*)aij->A->data; jacb = (Mat_SeqAIJ*)aij->B->data; if (!aij->garray) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"MPIAIJ Matrix was assembled but is missing garray"); if (aij->B->rmap->n != aij->A->rmap->n) SETERRQ(comm,PETSC_ERR_SUP,"Only support aij->B->rmap->n == aij->A->rmap->n"); // create colmap - this is ussually done (lazy) in MatSetValues aij->donotstash = PETSC_TRUE; aij->A->nooffprocentries = aij->B->nooffprocentries = A->nooffprocentries = PETSC_TRUE; jaca->nonew = jacb->nonew = PETSC_TRUE; // no more dissassembly ierr = PetscCalloc1(A->cmap->N+1,&aij->colmap);CHKERRQ(ierr); aij->colmap[A->cmap->N] = -9; ierr = PetscLogObjectMemory((PetscObject)A,(A->cmap->N+1)*sizeof(PetscInt));CHKERRQ(ierr); { PetscInt ii; for (ii=0; ii<aij->B->cmap->n; ii++) aij->colmap[aij->garray[ii]] = ii+1; } if (aij->colmap[A->cmap->N] != -9) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"aij->colmap[A->cmap->N] != -9"); // allocate B copy data h_mat.rstart = A->rmap->rstart; h_mat.rend = A->rmap->rend; h_mat.cstart = A->cmap->rstart; h_mat.cend = A->cmap->rend; nnz = jacb->i[n]; if (jacb->compressedrow.use) { err = hipMalloc((void **)&h_mat.offdiag.i, (n+1)*sizeof(int));CHKERRCUDA(err); // kernel input err = hipMemcpy( h_mat.offdiag.i, jacb->i, (n+1)*sizeof(int), hipMemcpyHostToDevice);CHKERRCUDA(err); } else h_mat.offdiag.i = bi; h_mat.offdiag.j = bj; h_mat.offdiag.a = ba; err = hipMalloc((void **)&h_mat.colmap, (A->cmap->N+1)*sizeof(PetscInt));CHKERRCUDA(err); // kernel output err = hipMemcpy( h_mat.colmap, aij->colmap, (A->cmap->N+1)*sizeof(PetscInt), hipMemcpyHostToDevice);CHKERRCUDA(err); h_mat.offdiag.ignorezeroentries = jacb->ignorezeroentries; h_mat.offdiag.n = n; } // allocate A copy data nnz = jaca->i[n]; h_mat.diag.n = n; h_mat.diag.ignorezeroentries = jaca->ignorezeroentries; ierr = MPI_Comm_rank(comm,&h_mat.rank);CHKERRMPI(ierr); if (jaca->compressedrow.use) { err = hipMalloc((void **)&h_mat.diag.i, (n+1)*sizeof(int));CHKERRCUDA(err); // kernel input err = hipMemcpy( h_mat.diag.i, jaca->i, (n+1)*sizeof(int), hipMemcpyHostToDevice);CHKERRCUDA(err); } else { h_mat.diag.i = ai; } h_mat.diag.j = aj; h_mat.diag.a = aa; // copy pointers and metdata to device err = hipMemcpy( d_mat, &h_mat, sizeof(PetscSplitCSRDataStructure), hipMemcpyHostToDevice);CHKERRCUDA(err); ierr = PetscInfo2(A,"Create device Mat n=%D nnz=%D\n",h_mat.diag.n, nnz);CHKERRQ(ierr); } else { *B = *p_d_mat; } A->assembled = PETSC_FALSE; // ready to write with matsetvalues - this done (lazy) in normal MatSetValues PetscFunctionReturn(0); #endif }
ac47c14b7c2e5d2d4cd99c4c92712cfbae1a91c1.cu
#define PETSC_SKIP_SPINLOCK #define PETSC_SKIP_CXX_COMPLEX_FIX #define PETSC_SKIP_IMMINTRIN_H_CUDAWORKAROUND 1 #include <petscconf.h> #include <../src/mat/impls/aij/mpi/mpiaij.h> /*I "petscmat.h" I*/ #include <../src/mat/impls/aij/seq/seqcusparse/cusparsematimpl.h> #include <../src/mat/impls/aij/mpi/mpicusparse/mpicusparsematimpl.h> #include <thrust/advance.h> #include <petscsf.h> struct VecCUDAEquals { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<1>(t) = thrust::get<0>(t); } }; static PetscErrorCode MatSetValuesCOO_MPIAIJCUSPARSE(Mat A, const PetscScalar v[], InsertMode imode) { Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; Mat_MPIAIJCUSPARSE *cusp = (Mat_MPIAIJCUSPARSE*)a->spptr; PetscInt n = cusp->coo_nd + cusp->coo_no; PetscErrorCode ierr; cudaError_t cerr; PetscFunctionBegin; if (cusp->coo_p && v) { thrust::device_ptr<const PetscScalar> d_v; THRUSTARRAY *w = NULL; if (isCudaMem(v)) { d_v = thrust::device_pointer_cast(v); } else { w = new THRUSTARRAY(n); w->assign(v,v+n); ierr = PetscLogCpuToGpu(n*sizeof(PetscScalar));CHKERRQ(ierr); d_v = w->data(); } auto zibit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v,cusp->coo_p->begin()), cusp->coo_pw->begin())); auto zieit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v,cusp->coo_p->end()), cusp->coo_pw->end())); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); thrust::for_each(zibit,zieit,VecCUDAEquals()); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); delete w; ierr = MatSetValuesCOO_SeqAIJCUSPARSE(a->A,cusp->coo_pw->data().get(),imode);CHKERRQ(ierr); ierr = MatSetValuesCOO_SeqAIJCUSPARSE(a->B,cusp->coo_pw->data().get()+cusp->coo_nd,imode);CHKERRQ(ierr); } else { ierr = MatSetValuesCOO_SeqAIJCUSPARSE(a->A,v,imode);CHKERRQ(ierr); ierr = MatSetValuesCOO_SeqAIJCUSPARSE(a->B,v ? v+cusp->coo_nd : NULL,imode);CHKERRQ(ierr); } ierr = PetscObjectStateIncrease((PetscObject)A);CHKERRQ(ierr); A->num_ass++; A->assembled = PETSC_TRUE; A->ass_nonzerostate = A->nonzerostate; A->offloadmask = PETSC_OFFLOAD_GPU; PetscFunctionReturn(0); } template <typename Tuple> struct IsNotOffDiagT { PetscInt _cstart,_cend; IsNotOffDiagT(PetscInt cstart, PetscInt cend) : _cstart(cstart), _cend(cend) {} __host__ __device__ inline bool operator()(Tuple t) { return !(thrust::get<1>(t) < _cstart || thrust::get<1>(t) >= _cend); } }; struct IsOffDiag { PetscInt _cstart,_cend; IsOffDiag(PetscInt cstart, PetscInt cend) : _cstart(cstart), _cend(cend) {} __host__ __device__ inline bool operator() (const PetscInt &c) { return c < _cstart || c >= _cend; } }; struct GlobToLoc { PetscInt _start; GlobToLoc(PetscInt start) : _start(start) {} __host__ __device__ inline PetscInt operator() (const PetscInt &c) { return c - _start; } }; static PetscErrorCode MatSetPreallocationCOO_MPIAIJCUSPARSE(Mat B, PetscInt n, const PetscInt coo_i[], const PetscInt coo_j[]) { Mat_MPIAIJ *b = (Mat_MPIAIJ*)B->data; Mat_MPIAIJCUSPARSE *cusp = (Mat_MPIAIJCUSPARSE*)b->spptr; PetscErrorCode ierr; PetscInt *jj; size_t noff = 0; THRUSTINTARRAY d_i(n); THRUSTINTARRAY d_j(n); ISLocalToGlobalMapping l2g; cudaError_t cerr; PetscFunctionBegin; ierr = PetscLayoutSetUp(B->rmap);CHKERRQ(ierr); ierr = PetscLayoutSetUp(B->cmap);CHKERRQ(ierr); if (b->A) { ierr = MatCUSPARSEClearHandle(b->A);CHKERRQ(ierr); } if (b->B) { ierr = MatCUSPARSEClearHandle(b->B);CHKERRQ(ierr); } ierr = PetscFree(b->garray);CHKERRQ(ierr); ierr = VecDestroy(&b->lvec);CHKERRQ(ierr); ierr = MatDestroy(&b->A);CHKERRQ(ierr); ierr = MatDestroy(&b->B);CHKERRQ(ierr); ierr = PetscLogCpuToGpu(2.*n*sizeof(PetscInt));CHKERRQ(ierr); d_i.assign(coo_i,coo_i+n); d_j.assign(coo_j,coo_j+n); delete cusp->coo_p; delete cusp->coo_pw; cusp->coo_p = NULL; cusp->coo_pw = NULL; ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); auto firstoffd = thrust::find_if(thrust::device,d_j.begin(),d_j.end(),IsOffDiag(B->cmap->rstart,B->cmap->rend)); auto firstdiag = thrust::find_if_not(thrust::device,firstoffd,d_j.end(),IsOffDiag(B->cmap->rstart,B->cmap->rend)); if (firstoffd != d_j.end() && firstdiag != d_j.end()) { cusp->coo_p = new THRUSTINTARRAY(n); cusp->coo_pw = new THRUSTARRAY(n); thrust::sequence(thrust::device,cusp->coo_p->begin(),cusp->coo_p->end(),0); auto fzipp = thrust::make_zip_iterator(thrust::make_tuple(d_i.begin(),d_j.begin(),cusp->coo_p->begin())); auto ezipp = thrust::make_zip_iterator(thrust::make_tuple(d_i.end(),d_j.end(),cusp->coo_p->end())); auto mzipp = thrust::partition(thrust::device,fzipp,ezipp,IsNotOffDiagT<thrust::tuple<PetscInt,PetscInt,PetscInt> >(B->cmap->rstart,B->cmap->rend)); firstoffd = mzipp.get_iterator_tuple().get<1>(); } cusp->coo_nd = thrust::distance(d_j.begin(),firstoffd); cusp->coo_no = thrust::distance(firstoffd,d_j.end()); /* from global to local */ thrust::transform(thrust::device,d_i.begin(),d_i.end(),d_i.begin(),GlobToLoc(B->rmap->rstart)); thrust::transform(thrust::device,d_j.begin(),firstoffd,d_j.begin(),GlobToLoc(B->cmap->rstart)); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); /* copy offdiag column indices to map on the CPU */ ierr = PetscMalloc1(cusp->coo_no,&jj);CHKERRQ(ierr); cerr = cudaMemcpy(jj,d_j.data().get()+cusp->coo_nd,cusp->coo_no*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr); auto o_j = d_j.begin(); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); thrust::advance(o_j,cusp->coo_nd); thrust::sort(thrust::device,o_j,d_j.end()); auto wit = thrust::unique(thrust::device,o_j,d_j.end()); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); noff = thrust::distance(o_j,wit); ierr = PetscMalloc1(noff+1,&b->garray);CHKERRQ(ierr); cerr = cudaMemcpy(b->garray,d_j.data().get()+cusp->coo_nd,noff*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr); ierr = PetscLogGpuToCpu((noff+cusp->coo_no)*sizeof(PetscInt));CHKERRQ(ierr); ierr = ISLocalToGlobalMappingCreate(PETSC_COMM_SELF,1,noff,b->garray,PETSC_COPY_VALUES,&l2g);CHKERRQ(ierr); ierr = ISLocalToGlobalMappingSetType(l2g,ISLOCALTOGLOBALMAPPINGHASH);CHKERRQ(ierr); ierr = ISGlobalToLocalMappingApply(l2g,IS_GTOLM_DROP,cusp->coo_no,jj,&n,jj);CHKERRQ(ierr); if (n != cusp->coo_no) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Unexpected is size %D != %D coo size",n,cusp->coo_no); ierr = ISLocalToGlobalMappingDestroy(&l2g);CHKERRQ(ierr); ierr = MatCreate(PETSC_COMM_SELF,&b->A);CHKERRQ(ierr); ierr = MatSetSizes(b->A,B->rmap->n,B->cmap->n,B->rmap->n,B->cmap->n);CHKERRQ(ierr); ierr = MatSetType(b->A,MATSEQAIJCUSPARSE);CHKERRQ(ierr); ierr = PetscLogObjectParent((PetscObject)B,(PetscObject)b->A);CHKERRQ(ierr); ierr = MatCreate(PETSC_COMM_SELF,&b->B);CHKERRQ(ierr); ierr = MatSetSizes(b->B,B->rmap->n,noff,B->rmap->n,noff);CHKERRQ(ierr); ierr = MatSetType(b->B,MATSEQAIJCUSPARSE);CHKERRQ(ierr); ierr = PetscLogObjectParent((PetscObject)B,(PetscObject)b->B);CHKERRQ(ierr); /* GPU memory, cusparse specific call handles it internally */ ierr = MatSetPreallocationCOO_SeqAIJCUSPARSE(b->A,cusp->coo_nd,d_i.data().get(),d_j.data().get());CHKERRQ(ierr); ierr = MatSetPreallocationCOO_SeqAIJCUSPARSE(b->B,cusp->coo_no,d_i.data().get()+cusp->coo_nd,jj);CHKERRQ(ierr); ierr = PetscFree(jj);CHKERRQ(ierr); ierr = MatCUSPARSESetFormat(b->A,MAT_CUSPARSE_MULT,cusp->diagGPUMatFormat);CHKERRQ(ierr); ierr = MatCUSPARSESetFormat(b->B,MAT_CUSPARSE_MULT,cusp->offdiagGPUMatFormat);CHKERRQ(ierr); ierr = MatCUSPARSESetHandle(b->A,cusp->handle);CHKERRQ(ierr); ierr = MatCUSPARSESetHandle(b->B,cusp->handle);CHKERRQ(ierr); ierr = MatCUSPARSESetStream(b->A,cusp->stream);CHKERRQ(ierr); ierr = MatCUSPARSESetStream(b->B,cusp->stream);CHKERRQ(ierr); ierr = MatSetUpMultiply_MPIAIJ(B);CHKERRQ(ierr); B->preallocated = PETSC_TRUE; B->nonzerostate++; ierr = MatBindToCPU(b->A,B->boundtocpu);CHKERRQ(ierr); ierr = MatBindToCPU(b->B,B->boundtocpu);CHKERRQ(ierr); B->offloadmask = PETSC_OFFLOAD_CPU; B->assembled = PETSC_FALSE; B->was_assembled = PETSC_FALSE; PetscFunctionReturn(0); } static PetscErrorCode MatMPIAIJGetLocalMatMerge_MPIAIJCUSPARSE(Mat A,MatReuse scall,IS *glob,Mat *A_loc) { Mat Ad,Ao; const PetscInt *cmap; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMPIAIJGetSeqAIJ(A,&Ad,&Ao,&cmap);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEMergeMats(Ad,Ao,scall,A_loc);CHKERRQ(ierr); if (glob) { PetscInt cst, i, dn, on, *gidx; ierr = MatGetLocalSize(Ad,NULL,&dn);CHKERRQ(ierr); ierr = MatGetLocalSize(Ao,NULL,&on);CHKERRQ(ierr); ierr = MatGetOwnershipRangeColumn(A,&cst,NULL);CHKERRQ(ierr); ierr = PetscMalloc1(dn+on,&gidx);CHKERRQ(ierr); for (i=0; i<dn; i++) gidx[i] = cst + i; for (i=0; i<on; i++) gidx[i+dn] = cmap[i]; ierr = ISCreateGeneral(PetscObjectComm((PetscObject)Ad),dn+on,gidx,PETSC_OWN_POINTER,glob);CHKERRQ(ierr); } PetscFunctionReturn(0); } PetscErrorCode MatMPIAIJSetPreallocation_MPIAIJCUSPARSE(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[]) { Mat_MPIAIJ *b = (Mat_MPIAIJ*)B->data; Mat_MPIAIJCUSPARSE *cusparseStruct = (Mat_MPIAIJCUSPARSE*)b->spptr; PetscErrorCode ierr; PetscInt i; PetscFunctionBegin; ierr = PetscLayoutSetUp(B->rmap);CHKERRQ(ierr); ierr = PetscLayoutSetUp(B->cmap);CHKERRQ(ierr); if (PetscDefined(USE_DEBUG) && d_nnz) { for (i=0; i<B->rmap->n; i++) { if (d_nnz[i] < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"d_nnz cannot be less than 0: local row %D value %D",i,d_nnz[i]); } } if (PetscDefined(USE_DEBUG) && o_nnz) { for (i=0; i<B->rmap->n; i++) { if (o_nnz[i] < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"o_nnz cannot be less than 0: local row %D value %D",i,o_nnz[i]); } } #if defined(PETSC_USE_CTABLE) ierr = PetscTableDestroy(&b->colmap);CHKERRQ(ierr); #else ierr = PetscFree(b->colmap);CHKERRQ(ierr); #endif ierr = PetscFree(b->garray);CHKERRQ(ierr); ierr = VecDestroy(&b->lvec);CHKERRQ(ierr); ierr = VecScatterDestroy(&b->Mvctx);CHKERRQ(ierr); /* Because the B will have been resized we simply destroy it and create a new one each time */ ierr = MatDestroy(&b->B);CHKERRQ(ierr); if (!b->A) { ierr = MatCreate(PETSC_COMM_SELF,&b->A);CHKERRQ(ierr); ierr = MatSetSizes(b->A,B->rmap->n,B->cmap->n,B->rmap->n,B->cmap->n);CHKERRQ(ierr); ierr = PetscLogObjectParent((PetscObject)B,(PetscObject)b->A);CHKERRQ(ierr); } if (!b->B) { PetscMPIInt size; ierr = MPI_Comm_size(PetscObjectComm((PetscObject)B),&size);CHKERRQ(ierr); ierr = MatCreate(PETSC_COMM_SELF,&b->B);CHKERRQ(ierr); ierr = MatSetSizes(b->B,B->rmap->n,size > 1 ? B->cmap->N : 0,B->rmap->n,size > 1 ? B->cmap->N : 0);CHKERRQ(ierr); ierr = PetscLogObjectParent((PetscObject)B,(PetscObject)b->B);CHKERRQ(ierr); } ierr = MatSetType(b->A,MATSEQAIJCUSPARSE);CHKERRQ(ierr); ierr = MatSetType(b->B,MATSEQAIJCUSPARSE);CHKERRQ(ierr); ierr = MatBindToCPU(b->A,B->boundtocpu);CHKERRQ(ierr); ierr = MatBindToCPU(b->B,B->boundtocpu);CHKERRQ(ierr); ierr = MatSeqAIJSetPreallocation(b->A,d_nz,d_nnz);CHKERRQ(ierr); ierr = MatSeqAIJSetPreallocation(b->B,o_nz,o_nnz);CHKERRQ(ierr); ierr = MatCUSPARSESetFormat(b->A,MAT_CUSPARSE_MULT,cusparseStruct->diagGPUMatFormat);CHKERRQ(ierr); ierr = MatCUSPARSESetFormat(b->B,MAT_CUSPARSE_MULT,cusparseStruct->offdiagGPUMatFormat);CHKERRQ(ierr); ierr = MatCUSPARSESetHandle(b->A,cusparseStruct->handle);CHKERRQ(ierr); ierr = MatCUSPARSESetHandle(b->B,cusparseStruct->handle);CHKERRQ(ierr); ierr = MatCUSPARSESetStream(b->A,cusparseStruct->stream);CHKERRQ(ierr); ierr = MatCUSPARSESetStream(b->B,cusparseStruct->stream);CHKERRQ(ierr); B->preallocated = PETSC_TRUE; PetscFunctionReturn(0); } /*@ MatAIJCUSPARSESetGenerateTranspose - Sets the flag to explicitly generate the transpose matrix before calling MatMultTranspose Not collective Input Parameters: + A - Matrix of type SEQAIJCUSPARSE or MPIAIJCUSPARSE - gen - the boolean flag Level: intermediate .seealso: MATSEQAIJCUSPARSE, MATMPIAIJCUSPARSE @*/ PetscErrorCode MatAIJCUSPARSESetGenerateTranspose(Mat A, PetscBool gen) { PetscErrorCode ierr; PetscBool ismpiaij; PetscFunctionBegin; PetscValidHeaderSpecific(A,MAT_CLASSID,1); MatCheckPreallocated(A,1); ierr = PetscObjectBaseTypeCompare((PetscObject)A,MATMPIAIJ,&ismpiaij);CHKERRQ(ierr); if (ismpiaij) { Mat A_d,A_o; ierr = MatMPIAIJGetSeqAIJ(A,&A_d,&A_o,NULL);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSESetGenerateTranspose(A_d,gen);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSESetGenerateTranspose(A_o,gen);CHKERRQ(ierr); } else { ierr = MatSeqAIJCUSPARSESetGenerateTranspose(A,gen);CHKERRQ(ierr); } PetscFunctionReturn(0); } PetscErrorCode MatMult_MPIAIJCUSPARSE(Mat A,Vec xx,Vec yy) { Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; PetscErrorCode ierr; PetscInt nt; PetscFunctionBegin; ierr = VecGetLocalSize(xx,&nt);CHKERRQ(ierr); if (nt != A->cmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Incompatible partition of A (%D) and xx (%D)",A->cmap->n,nt); ierr = VecScatterBegin(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); ierr = (*a->A->ops->mult)(a->A,xx,yy);CHKERRQ(ierr); ierr = VecScatterEnd(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); ierr = (*a->B->ops->multadd)(a->B,a->lvec,yy,yy);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatZeroEntries_MPIAIJCUSPARSE(Mat A) { Mat_MPIAIJ *l = (Mat_MPIAIJ*)A->data; PetscErrorCode ierr; PetscFunctionBegin; if (A->factortype == MAT_FACTOR_NONE) { Mat_MPIAIJCUSPARSE *spptr = (Mat_MPIAIJCUSPARSE*)l->spptr; PetscSplitCSRDataStructure *d_mat = spptr->deviceMat; if (d_mat) { Mat_SeqAIJ *a = (Mat_SeqAIJ*)l->A->data; Mat_SeqAIJ *b = (Mat_SeqAIJ*)l->B->data; PetscInt n = A->rmap->n, nnza = a->i[n], nnzb = b->i[n]; cudaError_t err; PetscScalar *vals; ierr = PetscInfo(A,"Zero device matrix diag and offfdiag\n");CHKERRQ(ierr); err = cudaMemcpy( &vals, &d_mat->diag.a, sizeof(PetscScalar*), cudaMemcpyDeviceToHost);CHKERRCUDA(err); err = cudaMemset( vals, 0, (nnza)*sizeof(PetscScalar));CHKERRCUDA(err); err = cudaMemcpy( &vals, &d_mat->offdiag.a, sizeof(PetscScalar*), cudaMemcpyDeviceToHost);CHKERRCUDA(err); err = cudaMemset( vals, 0, (nnzb)*sizeof(PetscScalar));CHKERRCUDA(err); } } ierr = MatZeroEntries(l->A);CHKERRQ(ierr); ierr = MatZeroEntries(l->B);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatMultAdd_MPIAIJCUSPARSE(Mat A,Vec xx,Vec yy,Vec zz) { Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; PetscErrorCode ierr; PetscInt nt; PetscFunctionBegin; ierr = VecGetLocalSize(xx,&nt);CHKERRQ(ierr); if (nt != A->cmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Incompatible partition of A (%D) and xx (%D)",A->cmap->n,nt); ierr = VecScatterBegin(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); ierr = (*a->A->ops->multadd)(a->A,xx,yy,zz);CHKERRQ(ierr); ierr = VecScatterEnd(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); ierr = (*a->B->ops->multadd)(a->B,a->lvec,zz,zz);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatMultTranspose_MPIAIJCUSPARSE(Mat A,Vec xx,Vec yy) { Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; PetscErrorCode ierr; PetscInt nt; PetscFunctionBegin; ierr = VecGetLocalSize(xx,&nt);CHKERRQ(ierr); if (nt != A->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Incompatible partition of A (%D) and xx (%D)",A->rmap->n,nt); ierr = (*a->B->ops->multtranspose)(a->B,xx,a->lvec);CHKERRQ(ierr); ierr = (*a->A->ops->multtranspose)(a->A,xx,yy);CHKERRQ(ierr); ierr = VecScatterBegin(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); ierr = VecScatterEnd(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatCUSPARSESetFormat_MPIAIJCUSPARSE(Mat A,MatCUSPARSEFormatOperation op,MatCUSPARSEStorageFormat format) { Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; Mat_MPIAIJCUSPARSE * cusparseStruct = (Mat_MPIAIJCUSPARSE*)a->spptr; PetscFunctionBegin; switch (op) { case MAT_CUSPARSE_MULT_DIAG: cusparseStruct->diagGPUMatFormat = format; break; case MAT_CUSPARSE_MULT_OFFDIAG: cusparseStruct->offdiagGPUMatFormat = format; break; case MAT_CUSPARSE_ALL: cusparseStruct->diagGPUMatFormat = format; cusparseStruct->offdiagGPUMatFormat = format; break; default: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"unsupported operation %d for MatCUSPARSEFormatOperation. Only MAT_CUSPARSE_MULT_DIAG, MAT_CUSPARSE_MULT_DIAG, and MAT_CUSPARSE_MULT_ALL are currently supported.",op); } PetscFunctionReturn(0); } PetscErrorCode MatSetFromOptions_MPIAIJCUSPARSE(PetscOptionItems *PetscOptionsObject,Mat A) { MatCUSPARSEStorageFormat format; PetscErrorCode ierr; PetscBool flg; Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; Mat_MPIAIJCUSPARSE *cusparseStruct = (Mat_MPIAIJCUSPARSE*)a->spptr; PetscFunctionBegin; ierr = PetscOptionsHead(PetscOptionsObject,"MPIAIJCUSPARSE options");CHKERRQ(ierr); if (A->factortype==MAT_FACTOR_NONE) { ierr = PetscOptionsEnum("-mat_cusparse_mult_diag_storage_format","sets storage format of the diagonal blocks of (mpi)aijcusparse gpu matrices for SpMV", "MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparseStruct->diagGPUMatFormat,(PetscEnum*)&format,&flg);CHKERRQ(ierr); if (flg) { ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_MULT_DIAG,format);CHKERRQ(ierr); } ierr = PetscOptionsEnum("-mat_cusparse_mult_offdiag_storage_format","sets storage format of the off-diagonal blocks (mpi)aijcusparse gpu matrices for SpMV", "MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparseStruct->offdiagGPUMatFormat,(PetscEnum*)&format,&flg);CHKERRQ(ierr); if (flg) { ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_MULT_OFFDIAG,format);CHKERRQ(ierr); } ierr = PetscOptionsEnum("-mat_cusparse_storage_format","sets storage format of the diagonal and off-diagonal blocks (mpi)aijcusparse gpu matrices for SpMV", "MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparseStruct->diagGPUMatFormat,(PetscEnum*)&format,&flg);CHKERRQ(ierr); if (flg) { ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_ALL,format);CHKERRQ(ierr); } } ierr = PetscOptionsTail();CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatAssemblyEnd_MPIAIJCUSPARSE(Mat A,MatAssemblyType mode) { PetscErrorCode ierr; Mat_MPIAIJ *mpiaij = (Mat_MPIAIJ*)A->data; Mat_MPIAIJCUSPARSE *cusparseStruct = (Mat_MPIAIJCUSPARSE*)mpiaij->spptr; PetscSplitCSRDataStructure *d_mat = cusparseStruct->deviceMat; PetscFunctionBegin; ierr = MatAssemblyEnd_MPIAIJ(A,mode);CHKERRQ(ierr); if (!A->was_assembled && mode == MAT_FINAL_ASSEMBLY) { ierr = VecSetType(mpiaij->lvec,VECSEQCUDA);CHKERRQ(ierr); } if (d_mat) { A->offloadmask = PETSC_OFFLOAD_GPU; // if we assembled on the device } PetscFunctionReturn(0); } PetscErrorCode MatDestroy_MPIAIJCUSPARSE(Mat A) { PetscErrorCode ierr; Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data; Mat_MPIAIJCUSPARSE *cusparseStruct = (Mat_MPIAIJCUSPARSE*)aij->spptr; cudaError_t err; cusparseStatus_t stat; PetscFunctionBegin; if (!cusparseStruct) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing spptr"); if (cusparseStruct->deviceMat) { Mat_SeqAIJ *jaca = (Mat_SeqAIJ*)aij->A->data; Mat_SeqAIJ *jacb = (Mat_SeqAIJ*)aij->B->data; PetscSplitCSRDataStructure *d_mat = cusparseStruct->deviceMat, h_mat; ierr = PetscInfo(A,"Have device matrix\n");CHKERRQ(ierr); err = cudaMemcpy( &h_mat, d_mat, sizeof(PetscSplitCSRDataStructure), cudaMemcpyDeviceToHost);CHKERRCUDA(err); if (jaca->compressedrow.use) { err = cudaFree(h_mat.diag.i);CHKERRCUDA(err); } if (jacb->compressedrow.use) { err = cudaFree(h_mat.offdiag.i);CHKERRCUDA(err); } err = cudaFree(h_mat.colmap);CHKERRCUDA(err); err = cudaFree(d_mat);CHKERRCUDA(err); } try { if (aij->A) { ierr = MatCUSPARSEClearHandle(aij->A);CHKERRQ(ierr); } if (aij->B) { ierr = MatCUSPARSEClearHandle(aij->B);CHKERRQ(ierr); } stat = cusparseDestroy(cusparseStruct->handle);CHKERRCUSPARSE(stat); if (cusparseStruct->stream) { err = cudaStreamDestroy(cusparseStruct->stream);CHKERRCUDA(err); } delete cusparseStruct->coo_p; delete cusparseStruct->coo_pw; delete cusparseStruct; } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Mat_MPIAIJCUSPARSE error: %s", ex); } ierr = PetscObjectComposeFunction((PetscObject)A,"MatMPIAIJSetPreallocation_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatMPIAIJGetLocalMatMerge_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetPreallocationCOO_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetValuesCOO_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatCUSPARSESetFormat_C",NULL);CHKERRQ(ierr); ierr = MatDestroy_MPIAIJ(A);CHKERRQ(ierr); PetscFunctionReturn(0); } PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJCUSPARSE(Mat B, MatType mtype, MatReuse reuse, Mat* newmat) { PetscErrorCode ierr; Mat_MPIAIJ *a; Mat_MPIAIJCUSPARSE *cusparseStruct; cusparseStatus_t stat; Mat A; PetscFunctionBegin; if (reuse == MAT_INITIAL_MATRIX) { ierr = MatDuplicate(B,MAT_COPY_VALUES,newmat);CHKERRQ(ierr); } else if (reuse == MAT_REUSE_MATRIX) { ierr = MatCopy(B,*newmat,SAME_NONZERO_PATTERN);CHKERRQ(ierr); } A = *newmat; A->boundtocpu = PETSC_FALSE; ierr = PetscFree(A->defaultvectype);CHKERRQ(ierr); ierr = PetscStrallocpy(VECCUDA,&A->defaultvectype);CHKERRQ(ierr); a = (Mat_MPIAIJ*)A->data; if (a->A) { ierr = MatSetType(a->A,MATSEQAIJCUSPARSE);CHKERRQ(ierr); } if (a->B) { ierr = MatSetType(a->B,MATSEQAIJCUSPARSE);CHKERRQ(ierr); } if (a->lvec) { ierr = VecSetType(a->lvec,VECSEQCUDA);CHKERRQ(ierr); } if (reuse != MAT_REUSE_MATRIX && !a->spptr) { a->spptr = new Mat_MPIAIJCUSPARSE; cusparseStruct = (Mat_MPIAIJCUSPARSE*)a->spptr; cusparseStruct->diagGPUMatFormat = MAT_CUSPARSE_CSR; cusparseStruct->offdiagGPUMatFormat = MAT_CUSPARSE_CSR; cusparseStruct->coo_p = NULL; cusparseStruct->coo_pw = NULL; cusparseStruct->stream = 0; stat = cusparseCreate(&(cusparseStruct->handle));CHKERRCUSPARSE(stat); cusparseStruct->deviceMat = NULL; } A->ops->assemblyend = MatAssemblyEnd_MPIAIJCUSPARSE; A->ops->mult = MatMult_MPIAIJCUSPARSE; A->ops->multadd = MatMultAdd_MPIAIJCUSPARSE; A->ops->multtranspose = MatMultTranspose_MPIAIJCUSPARSE; A->ops->setfromoptions = MatSetFromOptions_MPIAIJCUSPARSE; A->ops->destroy = MatDestroy_MPIAIJCUSPARSE; A->ops->zeroentries = MatZeroEntries_MPIAIJCUSPARSE; A->ops->productsetfromoptions = MatProductSetFromOptions_MPIAIJBACKEND; ierr = PetscObjectChangeTypeName((PetscObject)A,MATMPIAIJCUSPARSE);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatMPIAIJGetLocalMatMerge_C",MatMPIAIJGetLocalMatMerge_MPIAIJCUSPARSE);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatMPIAIJSetPreallocation_C",MatMPIAIJSetPreallocation_MPIAIJCUSPARSE);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatCUSPARSESetFormat_C",MatCUSPARSESetFormat_MPIAIJCUSPARSE);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetPreallocationCOO_C",MatSetPreallocationCOO_MPIAIJCUSPARSE);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetValuesCOO_C",MatSetValuesCOO_MPIAIJCUSPARSE);CHKERRQ(ierr); PetscFunctionReturn(0); } PETSC_EXTERN PetscErrorCode MatCreate_MPIAIJCUSPARSE(Mat A) { PetscErrorCode ierr; PetscFunctionBegin; ierr = PetscCUDAInitializeCheck();CHKERRQ(ierr); ierr = MatCreate_MPIAIJ(A);CHKERRQ(ierr); ierr = MatConvert_MPIAIJ_MPIAIJCUSPARSE(A,MATMPIAIJCUSPARSE,MAT_INPLACE_MATRIX,&A);CHKERRQ(ierr); PetscFunctionReturn(0); } /*@ MatCreateAIJCUSPARSE - Creates a sparse matrix in AIJ (compressed row) format (the default parallel PETSc format). This matrix will ultimately pushed down to NVidia GPUs and use the CUSPARSE library for calculations. For good matrix assembly performance the user should preallocate the matrix storage by setting the parameter nz (or the array nnz). By setting these parameters accurately, performance during matrix assembly can be increased by more than a factor of 50. Collective Input Parameters: + comm - MPI communicator, set to PETSC_COMM_SELF . m - number of rows . n - number of columns . nz - number of nonzeros per row (same for all rows) - nnz - array containing the number of nonzeros in the various rows (possibly different for each row) or NULL Output Parameter: . A - the matrix It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(), MatXXXXSetPreallocation() paradigm instead of this routine directly. [MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation] Notes: If nnz is given then nz is ignored The AIJ format (also called the Yale sparse matrix format or compressed row storage), is fully compatible with standard Fortran 77 storage. That is, the stored row and column indices can begin at either one (as in Fortran) or zero. See the users' manual for details. Specify the preallocated storage with either nz or nnz (not both). Set nz=PETSC_DEFAULT and nnz=NULL for PETSc to control dynamic memory allocation. For large problems you MUST preallocate memory or you will get TERRIBLE performance, see the users' manual chapter on matrices. By default, this format uses inodes (identical nodes) when possible, to improve numerical efficiency of matrix-vector products and solves. We search for consecutive rows with the same nonzero structure, thereby reusing matrix information to achieve increased efficiency. Level: intermediate .seealso: MatCreate(), MatCreateAIJ(), MatSetValues(), MatSeqAIJSetColumnIndices(), MatCreateSeqAIJWithArrays(), MatCreateAIJ(), MATMPIAIJCUSPARSE, MATAIJCUSPARSE @*/ PetscErrorCode MatCreateAIJCUSPARSE(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[],Mat *A) { PetscErrorCode ierr; PetscMPIInt size; PetscFunctionBegin; ierr = MatCreate(comm,A);CHKERRQ(ierr); ierr = MatSetSizes(*A,m,n,M,N);CHKERRQ(ierr); ierr = MPI_Comm_size(comm,&size);CHKERRMPI(ierr); if (size > 1) { ierr = MatSetType(*A,MATMPIAIJCUSPARSE);CHKERRQ(ierr); ierr = MatMPIAIJSetPreallocation(*A,d_nz,d_nnz,o_nz,o_nnz);CHKERRQ(ierr); } else { ierr = MatSetType(*A,MATSEQAIJCUSPARSE);CHKERRQ(ierr); ierr = MatSeqAIJSetPreallocation(*A,d_nz,d_nnz);CHKERRQ(ierr); } PetscFunctionReturn(0); } /*MC MATAIJCUSPARSE - MATMPIAIJCUSPARSE = "aijcusparse" = "mpiaijcusparse" - A matrix type to be used for sparse matrices. A matrix type type whose data resides on Nvidia GPUs. These matrices can be in either CSR, ELL, or Hybrid format. The ELL and HYB formats require CUDA 4.2 or later. All matrix calculations are performed on Nvidia GPUs using the CUSPARSE library. This matrix type is identical to MATSEQAIJCUSPARSE when constructed with a single process communicator, and MATMPIAIJCUSPARSE otherwise. As a result, for single process communicators, MatSeqAIJSetPreallocation is supported, and similarly MatMPIAIJSetPreallocation is supported for communicators controlling multiple processes. It is recommended that you call both of the above preallocation routines for simplicity. Options Database Keys: + -mat_type mpiaijcusparse - sets the matrix type to "mpiaijcusparse" during a call to MatSetFromOptions() . -mat_cusparse_storage_format csr - sets the storage format of diagonal and off-diagonal matrices during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid). . -mat_cusparse_mult_diag_storage_format csr - sets the storage format of diagonal matrix during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid). - -mat_cusparse_mult_offdiag_storage_format csr - sets the storage format of off-diagonal matrix during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid). Level: beginner .seealso: MatCreateAIJCUSPARSE(), MATSEQAIJCUSPARSE, MatCreateSeqAIJCUSPARSE(), MatCUSPARSESetFormat(), MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation M M*/ // get GPU pointer to stripped down Mat. For both Seq and MPI Mat. PetscErrorCode MatCUSPARSEGetDeviceMatWrite(Mat A, PetscSplitCSRDataStructure **B) { #if defined(PETSC_USE_CTABLE) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Device metadata does not support ctable (--with-ctable=0)"); #else PetscSplitCSRDataStructure **p_d_mat; PetscMPIInt size,rank; MPI_Comm comm; PetscErrorCode ierr; int *ai,*bi,*aj,*bj; PetscScalar *aa,*ba; PetscFunctionBegin; ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); ierr = MPI_Comm_size(comm,&size);CHKERRMPI(ierr); ierr = MPI_Comm_rank(comm,&rank);CHKERRMPI(ierr); if (A->factortype == MAT_FACTOR_NONE) { CsrMatrix *matrixA,*matrixB=NULL; if (size == 1) { Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; p_d_mat = &cusparsestruct->deviceMat; Mat_SeqAIJCUSPARSEMultStruct *matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat; if (cusparsestruct->format==MAT_CUSPARSE_CSR) { matrixA = (CsrMatrix*)matstruct->mat; bi = bj = NULL; ba = NULL; } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Device Mat needs MAT_CUSPARSE_CSR"); } else { Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data; Mat_MPIAIJCUSPARSE *spptr = (Mat_MPIAIJCUSPARSE*)aij->spptr; p_d_mat = &spptr->deviceMat; Mat_SeqAIJCUSPARSE *cusparsestructA = (Mat_SeqAIJCUSPARSE*)aij->A->spptr; Mat_SeqAIJCUSPARSE *cusparsestructB = (Mat_SeqAIJCUSPARSE*)aij->B->spptr; Mat_SeqAIJCUSPARSEMultStruct *matstructA = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestructA->mat; Mat_SeqAIJCUSPARSEMultStruct *matstructB = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestructB->mat; if (cusparsestructA->format==MAT_CUSPARSE_CSR) { if (cusparsestructB->format!=MAT_CUSPARSE_CSR) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Device Mat B needs MAT_CUSPARSE_CSR"); matrixA = (CsrMatrix*)matstructA->mat; matrixB = (CsrMatrix*)matstructB->mat; bi = thrust::raw_pointer_cast(matrixB->row_offsets->data()); bj = thrust::raw_pointer_cast(matrixB->column_indices->data()); ba = thrust::raw_pointer_cast(matrixB->values->data()); } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Device Mat A needs MAT_CUSPARSE_CSR"); } ai = thrust::raw_pointer_cast(matrixA->row_offsets->data()); aj = thrust::raw_pointer_cast(matrixA->column_indices->data()); aa = thrust::raw_pointer_cast(matrixA->values->data()); } else { *B = NULL; PetscFunctionReturn(0); } // act like MatSetValues because not called on host if (A->assembled) { if (A->was_assembled) { ierr = PetscInfo(A,"Assemble more than once already\n");CHKERRQ(ierr); } A->was_assembled = PETSC_TRUE; // this is done (lazy) in MatAssemble but we are not calling it anymore - done in AIJ AssemblyEnd, need here? } else { SETERRQ(comm,PETSC_ERR_SUP,"Need assemble matrix"); } if (!*p_d_mat) { cudaError_t err; PetscSplitCSRDataStructure *d_mat, h_mat; Mat_SeqAIJ *jaca; PetscInt n = A->rmap->n, nnz; // create and copy ierr = PetscInfo(A,"Create device matrix\n");CHKERRQ(ierr); err = cudaMalloc((void **)&d_mat, sizeof(PetscSplitCSRDataStructure));CHKERRCUDA(err); err = cudaMemset( d_mat, 0, sizeof(PetscSplitCSRDataStructure));CHKERRCUDA(err); *B = *p_d_mat = d_mat; // return it, set it in Mat, and set it up if (size == 1) { jaca = (Mat_SeqAIJ*)A->data; h_mat.rstart = 0; h_mat.rend = A->rmap->n; h_mat.cstart = 0; h_mat.cend = A->cmap->n; h_mat.offdiag.i = h_mat.offdiag.j = NULL; h_mat.offdiag.a = NULL; } else { Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data; Mat_SeqAIJ *jacb; jaca = (Mat_SeqAIJ*)aij->A->data; jacb = (Mat_SeqAIJ*)aij->B->data; if (!aij->garray) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"MPIAIJ Matrix was assembled but is missing garray"); if (aij->B->rmap->n != aij->A->rmap->n) SETERRQ(comm,PETSC_ERR_SUP,"Only support aij->B->rmap->n == aij->A->rmap->n"); // create colmap - this is ussually done (lazy) in MatSetValues aij->donotstash = PETSC_TRUE; aij->A->nooffprocentries = aij->B->nooffprocentries = A->nooffprocentries = PETSC_TRUE; jaca->nonew = jacb->nonew = PETSC_TRUE; // no more dissassembly ierr = PetscCalloc1(A->cmap->N+1,&aij->colmap);CHKERRQ(ierr); aij->colmap[A->cmap->N] = -9; ierr = PetscLogObjectMemory((PetscObject)A,(A->cmap->N+1)*sizeof(PetscInt));CHKERRQ(ierr); { PetscInt ii; for (ii=0; ii<aij->B->cmap->n; ii++) aij->colmap[aij->garray[ii]] = ii+1; } if (aij->colmap[A->cmap->N] != -9) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"aij->colmap[A->cmap->N] != -9"); // allocate B copy data h_mat.rstart = A->rmap->rstart; h_mat.rend = A->rmap->rend; h_mat.cstart = A->cmap->rstart; h_mat.cend = A->cmap->rend; nnz = jacb->i[n]; if (jacb->compressedrow.use) { err = cudaMalloc((void **)&h_mat.offdiag.i, (n+1)*sizeof(int));CHKERRCUDA(err); // kernel input err = cudaMemcpy( h_mat.offdiag.i, jacb->i, (n+1)*sizeof(int), cudaMemcpyHostToDevice);CHKERRCUDA(err); } else h_mat.offdiag.i = bi; h_mat.offdiag.j = bj; h_mat.offdiag.a = ba; err = cudaMalloc((void **)&h_mat.colmap, (A->cmap->N+1)*sizeof(PetscInt));CHKERRCUDA(err); // kernel output err = cudaMemcpy( h_mat.colmap, aij->colmap, (A->cmap->N+1)*sizeof(PetscInt), cudaMemcpyHostToDevice);CHKERRCUDA(err); h_mat.offdiag.ignorezeroentries = jacb->ignorezeroentries; h_mat.offdiag.n = n; } // allocate A copy data nnz = jaca->i[n]; h_mat.diag.n = n; h_mat.diag.ignorezeroentries = jaca->ignorezeroentries; ierr = MPI_Comm_rank(comm,&h_mat.rank);CHKERRMPI(ierr); if (jaca->compressedrow.use) { err = cudaMalloc((void **)&h_mat.diag.i, (n+1)*sizeof(int));CHKERRCUDA(err); // kernel input err = cudaMemcpy( h_mat.diag.i, jaca->i, (n+1)*sizeof(int), cudaMemcpyHostToDevice);CHKERRCUDA(err); } else { h_mat.diag.i = ai; } h_mat.diag.j = aj; h_mat.diag.a = aa; // copy pointers and metdata to device err = cudaMemcpy( d_mat, &h_mat, sizeof(PetscSplitCSRDataStructure), cudaMemcpyHostToDevice);CHKERRCUDA(err); ierr = PetscInfo2(A,"Create device Mat n=%D nnz=%D\n",h_mat.diag.n, nnz);CHKERRQ(ierr); } else { *B = *p_d_mat; } A->assembled = PETSC_FALSE; // ready to write with matsetvalues - this done (lazy) in normal MatSetValues PetscFunctionReturn(0); #endif }
92559402147a1ebb23b702891ece7d4cb19eeb58.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright 2018-2019 by Contributors * \author Rory Mitchell */ #include <thrust/execution_policy.h> #include <thrust/inner_product.h> #include <xgboost/data.h> #include <xgboost/linear_updater.h> #include "xgboost/span.h" #include "coordinate_common.h" #include "../common/common.h" #include "../common/device_helpers.cuh" #include "../common/timer.h" #include "./param.h" namespace xgboost { namespace linear { DMLC_REGISTRY_FILE_TAG(updater_gpu_coordinate); /** * \class GPUCoordinateUpdater * * \brief Coordinate descent algorithm that updates one feature per iteration */ class GPUCoordinateUpdater : public LinearUpdater { // NOLINT public: ~GPUCoordinateUpdater() { // NOLINT if (learner_param_->gpu_id >= 0) { dh::safe_cuda(hipSetDevice(learner_param_->gpu_id)); } } // set training parameter void Configure(Args const& args) override { tparam_.UpdateAllowUnknown(args); coord_param_.UpdateAllowUnknown(args); selector_.reset(FeatureSelector::Create(tparam_.feature_selector)); monitor_.Init("GPUCoordinateUpdater"); } void LoadConfig(Json const& in) override { auto const& config = get<Object const>(in); fromJson(config.at("linear_train_param"), &tparam_); fromJson(config.at("coordinate_param"), &coord_param_); } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["linear_train_param"] = toJson(tparam_); out["coordinate_param"] = toJson(coord_param_); } void LazyInitDevice(DMatrix *p_fmat, const LearnerModelParam &model_param) { if (learner_param_->gpu_id < 0) return; num_row_ = static_cast<size_t>(p_fmat->Info().num_row_); CHECK(p_fmat->SingleColBlock()); SparsePage const& batch = *(p_fmat->GetBatches<CSCPage>().begin()); if (IsEmpty()) { return; } dh::safe_cuda(hipSetDevice(learner_param_->gpu_id)); // The begin and end indices for the section of each column associated with // this device std::vector<std::pair<bst_uint, bst_uint>> column_segments; row_ptr_ = {0}; // iterate through columns for (size_t fidx = 0; fidx < batch.Size(); fidx++) { common::Span<Entry const> col = batch[fidx]; auto cmp = [](Entry e1, Entry e2) { return e1.index < e2.index; }; auto column_begin = std::lower_bound(col.cbegin(), col.cend(), xgboost::Entry(0, 0.0f), cmp); auto column_end = std::lower_bound(col.cbegin(), col.cend(), xgboost::Entry(num_row_, 0.0f), cmp); column_segments.emplace_back( std::make_pair(column_begin - col.cbegin(), column_end - col.cbegin())); row_ptr_.push_back(row_ptr_.back() + (column_end - column_begin)); } ba_.Allocate(learner_param_->gpu_id, &data_, row_ptr_.back(), &gpair_, num_row_ * model_param.num_output_group); for (size_t fidx = 0; fidx < batch.Size(); fidx++) { auto col = batch[fidx]; auto seg = column_segments[fidx]; dh::safe_cuda(hipMemcpy( data_.subspan(row_ptr_[fidx]).data(), col.data() + seg.first, sizeof(Entry) * (seg.second - seg.first), hipMemcpyHostToDevice)); } } void Update(HostDeviceVector<GradientPair> *in_gpair, DMatrix *p_fmat, gbm::GBLinearModel *model, double sum_instance_weight) override { tparam_.DenormalizePenalties(sum_instance_weight); monitor_.Start("LazyInitDevice"); this->LazyInitDevice(p_fmat, *(model->learner_model_param_)); monitor_.Stop("LazyInitDevice"); monitor_.Start("UpdateGpair"); auto &in_gpair_host = in_gpair->ConstHostVector(); // Update gpair if (learner_param_->gpu_id >= 0) { this->UpdateGpair(in_gpair_host); } monitor_.Stop("UpdateGpair"); monitor_.Start("UpdateBias"); this->UpdateBias(p_fmat, model); monitor_.Stop("UpdateBias"); // prepare for updating the weights selector_->Setup(*model, in_gpair->ConstHostVector(), p_fmat, tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm, coord_param_.top_k); monitor_.Start("UpdateFeature"); for (auto group_idx = 0; group_idx < model->learner_model_param_->num_output_group; ++group_idx) { for (auto i = 0U; i < model->learner_model_param_->num_feature; i++) { auto fidx = selector_->NextFeature( i, *model, group_idx, in_gpair->ConstHostVector(), p_fmat, tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm); if (fidx < 0) break; this->UpdateFeature(fidx, group_idx, &in_gpair->HostVector(), model); } } monitor_.Stop("UpdateFeature"); } void UpdateBias(DMatrix *p_fmat, gbm::GBLinearModel *model) { for (int group_idx = 0; group_idx < model->learner_model_param_->num_output_group; ++group_idx) { // Get gradient auto grad = GradientPair(0, 0); if (learner_param_->gpu_id >= 0) { grad = GetBiasGradient(group_idx, model->learner_model_param_->num_output_group); } auto dbias = static_cast<float>( tparam_.learning_rate * CoordinateDeltaBias(grad.GetGrad(), grad.GetHess())); model->bias()[group_idx] += dbias; // Update residual if (learner_param_->gpu_id >= 0) { UpdateBiasResidual(dbias, group_idx, model->learner_model_param_->num_output_group); } } } void UpdateFeature(int fidx, int group_idx, std::vector<GradientPair> *in_gpair, gbm::GBLinearModel *model) { bst_float &w = (*model)[fidx][group_idx]; // Get gradient auto grad = GradientPair(0, 0); if (learner_param_->gpu_id >= 0) { grad = GetGradient(group_idx, model->learner_model_param_->num_output_group, fidx); } auto dw = static_cast<float>(tparam_.learning_rate * CoordinateDelta(grad.GetGrad(), grad.GetHess(), w, tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm)); w += dw; if (learner_param_->gpu_id >= 0) { UpdateResidual(dw, group_idx, model->learner_model_param_->num_output_group, fidx); } } // This needs to be public because of the __device__ lambda. GradientPair GetBiasGradient(int group_idx, int num_group) { dh::safe_cuda(hipSetDevice(learner_param_->gpu_id)); auto counting = thrust::make_counting_iterator(0ull); auto f = [=] __device__(size_t idx) { return idx * num_group + group_idx; }; // NOLINT thrust::transform_iterator<decltype(f), decltype(counting), size_t> skip( counting, f); auto perm = thrust::make_permutation_iterator(gpair_.data(), skip); return dh::SumReduction(temp_, perm, num_row_); } // This needs to be public because of the __device__ lambda. void UpdateBiasResidual(float dbias, int group_idx, int num_groups) { if (dbias == 0.0f) return; auto d_gpair = gpair_; dh::LaunchN(learner_param_->gpu_id, num_row_, [=] __device__(size_t idx) { auto &g = d_gpair[idx * num_groups + group_idx]; g += GradientPair(g.GetHess() * dbias, 0); }); } // This needs to be public because of the __device__ lambda. GradientPair GetGradient(int group_idx, int num_group, int fidx) { dh::safe_cuda(hipSetDevice(learner_param_->gpu_id)); common::Span<xgboost::Entry> d_col = data_.subspan(row_ptr_[fidx]); size_t col_size = row_ptr_[fidx + 1] - row_ptr_[fidx]; common::Span<GradientPair> d_gpair = gpair_; auto counting = thrust::make_counting_iterator(0ull); auto f = [=] __device__(size_t idx) { auto entry = d_col[idx]; auto g = d_gpair[entry.index * num_group + group_idx]; return GradientPair(g.GetGrad() * entry.fvalue, g.GetHess() * entry.fvalue * entry.fvalue); }; // NOLINT thrust::transform_iterator<decltype(f), decltype(counting), GradientPair> multiply_iterator(counting, f); return dh::SumReduction(temp_, multiply_iterator, col_size); } // This needs to be public because of the __device__ lambda. void UpdateResidual(float dw, int group_idx, int num_groups, int fidx) { common::Span<GradientPair> d_gpair = gpair_; common::Span<Entry> d_col = data_.subspan(row_ptr_[fidx]); size_t col_size = row_ptr_[fidx + 1] - row_ptr_[fidx]; dh::LaunchN(learner_param_->gpu_id, col_size, [=] __device__(size_t idx) { auto entry = d_col[idx]; auto &g = d_gpair[entry.index * num_groups + group_idx]; g += GradientPair(g.GetHess() * dw * entry.fvalue, 0); }); } private: bool IsEmpty() { return num_row_ == 0; } void UpdateGpair(const std::vector<GradientPair> &host_gpair) { dh::safe_cuda(hipMemcpyAsync( gpair_.data(), host_gpair.data(), gpair_.size() * sizeof(GradientPair), hipMemcpyHostToDevice)); } // training parameter LinearTrainParam tparam_; CoordinateParam coord_param_; std::unique_ptr<FeatureSelector> selector_; common::Monitor monitor_; dh::BulkAllocator ba_; std::vector<size_t> row_ptr_; common::Span<xgboost::Entry> data_; common::Span<GradientPair> gpair_; dh::CubMemory temp_; size_t num_row_; }; XGBOOST_REGISTER_LINEAR_UPDATER(GPUCoordinateUpdater, "gpu_coord_descent") .describe( "Update linear model according to coordinate descent algorithm. GPU " "accelerated.") .set_body([]() { return new GPUCoordinateUpdater(); }); } // namespace linear } // namespace xgboost
92559402147a1ebb23b702891ece7d4cb19eeb58.cu
/*! * Copyright 2018-2019 by Contributors * \author Rory Mitchell */ #include <thrust/execution_policy.h> #include <thrust/inner_product.h> #include <xgboost/data.h> #include <xgboost/linear_updater.h> #include "xgboost/span.h" #include "coordinate_common.h" #include "../common/common.h" #include "../common/device_helpers.cuh" #include "../common/timer.h" #include "./param.h" namespace xgboost { namespace linear { DMLC_REGISTRY_FILE_TAG(updater_gpu_coordinate); /** * \class GPUCoordinateUpdater * * \brief Coordinate descent algorithm that updates one feature per iteration */ class GPUCoordinateUpdater : public LinearUpdater { // NOLINT public: ~GPUCoordinateUpdater() { // NOLINT if (learner_param_->gpu_id >= 0) { dh::safe_cuda(cudaSetDevice(learner_param_->gpu_id)); } } // set training parameter void Configure(Args const& args) override { tparam_.UpdateAllowUnknown(args); coord_param_.UpdateAllowUnknown(args); selector_.reset(FeatureSelector::Create(tparam_.feature_selector)); monitor_.Init("GPUCoordinateUpdater"); } void LoadConfig(Json const& in) override { auto const& config = get<Object const>(in); fromJson(config.at("linear_train_param"), &tparam_); fromJson(config.at("coordinate_param"), &coord_param_); } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["linear_train_param"] = toJson(tparam_); out["coordinate_param"] = toJson(coord_param_); } void LazyInitDevice(DMatrix *p_fmat, const LearnerModelParam &model_param) { if (learner_param_->gpu_id < 0) return; num_row_ = static_cast<size_t>(p_fmat->Info().num_row_); CHECK(p_fmat->SingleColBlock()); SparsePage const& batch = *(p_fmat->GetBatches<CSCPage>().begin()); if (IsEmpty()) { return; } dh::safe_cuda(cudaSetDevice(learner_param_->gpu_id)); // The begin and end indices for the section of each column associated with // this device std::vector<std::pair<bst_uint, bst_uint>> column_segments; row_ptr_ = {0}; // iterate through columns for (size_t fidx = 0; fidx < batch.Size(); fidx++) { common::Span<Entry const> col = batch[fidx]; auto cmp = [](Entry e1, Entry e2) { return e1.index < e2.index; }; auto column_begin = std::lower_bound(col.cbegin(), col.cend(), xgboost::Entry(0, 0.0f), cmp); auto column_end = std::lower_bound(col.cbegin(), col.cend(), xgboost::Entry(num_row_, 0.0f), cmp); column_segments.emplace_back( std::make_pair(column_begin - col.cbegin(), column_end - col.cbegin())); row_ptr_.push_back(row_ptr_.back() + (column_end - column_begin)); } ba_.Allocate(learner_param_->gpu_id, &data_, row_ptr_.back(), &gpair_, num_row_ * model_param.num_output_group); for (size_t fidx = 0; fidx < batch.Size(); fidx++) { auto col = batch[fidx]; auto seg = column_segments[fidx]; dh::safe_cuda(cudaMemcpy( data_.subspan(row_ptr_[fidx]).data(), col.data() + seg.first, sizeof(Entry) * (seg.second - seg.first), cudaMemcpyHostToDevice)); } } void Update(HostDeviceVector<GradientPair> *in_gpair, DMatrix *p_fmat, gbm::GBLinearModel *model, double sum_instance_weight) override { tparam_.DenormalizePenalties(sum_instance_weight); monitor_.Start("LazyInitDevice"); this->LazyInitDevice(p_fmat, *(model->learner_model_param_)); monitor_.Stop("LazyInitDevice"); monitor_.Start("UpdateGpair"); auto &in_gpair_host = in_gpair->ConstHostVector(); // Update gpair if (learner_param_->gpu_id >= 0) { this->UpdateGpair(in_gpair_host); } monitor_.Stop("UpdateGpair"); monitor_.Start("UpdateBias"); this->UpdateBias(p_fmat, model); monitor_.Stop("UpdateBias"); // prepare for updating the weights selector_->Setup(*model, in_gpair->ConstHostVector(), p_fmat, tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm, coord_param_.top_k); monitor_.Start("UpdateFeature"); for (auto group_idx = 0; group_idx < model->learner_model_param_->num_output_group; ++group_idx) { for (auto i = 0U; i < model->learner_model_param_->num_feature; i++) { auto fidx = selector_->NextFeature( i, *model, group_idx, in_gpair->ConstHostVector(), p_fmat, tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm); if (fidx < 0) break; this->UpdateFeature(fidx, group_idx, &in_gpair->HostVector(), model); } } monitor_.Stop("UpdateFeature"); } void UpdateBias(DMatrix *p_fmat, gbm::GBLinearModel *model) { for (int group_idx = 0; group_idx < model->learner_model_param_->num_output_group; ++group_idx) { // Get gradient auto grad = GradientPair(0, 0); if (learner_param_->gpu_id >= 0) { grad = GetBiasGradient(group_idx, model->learner_model_param_->num_output_group); } auto dbias = static_cast<float>( tparam_.learning_rate * CoordinateDeltaBias(grad.GetGrad(), grad.GetHess())); model->bias()[group_idx] += dbias; // Update residual if (learner_param_->gpu_id >= 0) { UpdateBiasResidual(dbias, group_idx, model->learner_model_param_->num_output_group); } } } void UpdateFeature(int fidx, int group_idx, std::vector<GradientPair> *in_gpair, gbm::GBLinearModel *model) { bst_float &w = (*model)[fidx][group_idx]; // Get gradient auto grad = GradientPair(0, 0); if (learner_param_->gpu_id >= 0) { grad = GetGradient(group_idx, model->learner_model_param_->num_output_group, fidx); } auto dw = static_cast<float>(tparam_.learning_rate * CoordinateDelta(grad.GetGrad(), grad.GetHess(), w, tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm)); w += dw; if (learner_param_->gpu_id >= 0) { UpdateResidual(dw, group_idx, model->learner_model_param_->num_output_group, fidx); } } // This needs to be public because of the __device__ lambda. GradientPair GetBiasGradient(int group_idx, int num_group) { dh::safe_cuda(cudaSetDevice(learner_param_->gpu_id)); auto counting = thrust::make_counting_iterator(0ull); auto f = [=] __device__(size_t idx) { return idx * num_group + group_idx; }; // NOLINT thrust::transform_iterator<decltype(f), decltype(counting), size_t> skip( counting, f); auto perm = thrust::make_permutation_iterator(gpair_.data(), skip); return dh::SumReduction(temp_, perm, num_row_); } // This needs to be public because of the __device__ lambda. void UpdateBiasResidual(float dbias, int group_idx, int num_groups) { if (dbias == 0.0f) return; auto d_gpair = gpair_; dh::LaunchN(learner_param_->gpu_id, num_row_, [=] __device__(size_t idx) { auto &g = d_gpair[idx * num_groups + group_idx]; g += GradientPair(g.GetHess() * dbias, 0); }); } // This needs to be public because of the __device__ lambda. GradientPair GetGradient(int group_idx, int num_group, int fidx) { dh::safe_cuda(cudaSetDevice(learner_param_->gpu_id)); common::Span<xgboost::Entry> d_col = data_.subspan(row_ptr_[fidx]); size_t col_size = row_ptr_[fidx + 1] - row_ptr_[fidx]; common::Span<GradientPair> d_gpair = gpair_; auto counting = thrust::make_counting_iterator(0ull); auto f = [=] __device__(size_t idx) { auto entry = d_col[idx]; auto g = d_gpair[entry.index * num_group + group_idx]; return GradientPair(g.GetGrad() * entry.fvalue, g.GetHess() * entry.fvalue * entry.fvalue); }; // NOLINT thrust::transform_iterator<decltype(f), decltype(counting), GradientPair> multiply_iterator(counting, f); return dh::SumReduction(temp_, multiply_iterator, col_size); } // This needs to be public because of the __device__ lambda. void UpdateResidual(float dw, int group_idx, int num_groups, int fidx) { common::Span<GradientPair> d_gpair = gpair_; common::Span<Entry> d_col = data_.subspan(row_ptr_[fidx]); size_t col_size = row_ptr_[fidx + 1] - row_ptr_[fidx]; dh::LaunchN(learner_param_->gpu_id, col_size, [=] __device__(size_t idx) { auto entry = d_col[idx]; auto &g = d_gpair[entry.index * num_groups + group_idx]; g += GradientPair(g.GetHess() * dw * entry.fvalue, 0); }); } private: bool IsEmpty() { return num_row_ == 0; } void UpdateGpair(const std::vector<GradientPair> &host_gpair) { dh::safe_cuda(cudaMemcpyAsync( gpair_.data(), host_gpair.data(), gpair_.size() * sizeof(GradientPair), cudaMemcpyHostToDevice)); } // training parameter LinearTrainParam tparam_; CoordinateParam coord_param_; std::unique_ptr<FeatureSelector> selector_; common::Monitor monitor_; dh::BulkAllocator ba_; std::vector<size_t> row_ptr_; common::Span<xgboost::Entry> data_; common::Span<GradientPair> gpair_; dh::CubMemory temp_; size_t num_row_; }; XGBOOST_REGISTER_LINEAR_UPDATER(GPUCoordinateUpdater, "gpu_coord_descent") .describe( "Update linear model according to coordinate descent algorithm. GPU " "accelerated.") .set_body([]() { return new GPUCoordinateUpdater(); }); } // namespace linear } // namespace xgboost
21748a37623d71d647d5d9f7cf9f8de4e1d054d8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ************************************************************************** // // PARALUTION www.paralution.com // // Copyright (C) 2015 PARALUTION Labs UG (haftungsbeschrnkt) & Co. KG // Am Hasensprung 6, 76571 Gaggenau // Handelsregister: Amtsgericht Mannheim, HRA 706051 // Vertreten durch: // PARALUTION Labs Verwaltungs UG (haftungsbeschrnkt) // Am Hasensprung 6, 76571 Gaggenau // Handelsregister: Amtsgericht Mannheim, HRB 721277 // Geschftsfhrer: Dimitar Lukarski, Nico Trost // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with this program. If not, see <http://www.gnu.org/licenses/>. // // ************************************************************************** // PARALUTION version 1.1.0 #include "../../utils/def.hpp" #include "gpu_matrix_csr.hpp" #include "gpu_matrix_coo.hpp" #include "gpu_matrix_ell.hpp" #include "gpu_matrix_hyb.hpp" #include "gpu_vector.hpp" #include "../host/host_matrix_hyb.hpp" #include "../base_matrix.hpp" #include "../base_vector.hpp" #include "../backend_manager.hpp" #include "../../utils/log.hpp" #include "../../utils/allocate_free.hpp" #include "gpu_utils.hpp" #include "cuda_kernels_general.hpp" #include "cuda_kernels_hyb.hpp" #include "cuda_kernels_vector.hpp" #include "gpu_allocate_free.hpp" #include "../matrix_formats_ind.hpp" #include <algorithm> #include <hip/hip_runtime.h> //#include <hipsparse.h> #include "hipsparse.h" namespace paralution { template <typename ValueType> GPUAcceleratorMatrixHYB<ValueType>::GPUAcceleratorMatrixHYB() { // no default constructors LOG_INFO("no default constructor"); FATAL_ERROR(__FILE__, __LINE__); } template <typename ValueType> GPUAcceleratorMatrixHYB<ValueType>::GPUAcceleratorMatrixHYB(const Paralution_Backend_Descriptor local_backend) { LOG_DEBUG(this, "GPUAcceleratorMatrixHYB::GPUAcceleratorMatrixHYB()", "constructor with local_backend"); this->mat_.ELL.val = NULL; this->mat_.ELL.col = NULL; this->mat_.ELL.max_row = 0; this->mat_.COO.row = NULL; this->mat_.COO.col = NULL; this->mat_.COO.val = NULL; this->ell_nnz_ = 0; this->coo_nnz_ = 0; this->set_backend(local_backend); CHECK_CUDA_ERROR(__FILE__, __LINE__); } template <typename ValueType> GPUAcceleratorMatrixHYB<ValueType>::~GPUAcceleratorMatrixHYB() { LOG_DEBUG(this, "GPUAcceleratorMatrixHYB::~GPUAcceleratorMatrixHYB()", "destructor"); this->Clear(); } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::info(void) const { LOG_INFO("GPUAcceleratorMatrixHYB<ValueType>"); } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::AllocateHYB(const int ell_nnz, const int coo_nnz, const int ell_max_row, const int nrow, const int ncol) { assert( ell_nnz >= 0); assert( coo_nnz >= 0); assert( ell_max_row >= 0); assert( ncol >= 0); assert( nrow >= 0); if (this->get_nnz() > 0) this->Clear(); if (ell_nnz + coo_nnz > 0) { // ELL assert(ell_nnz == ell_max_row*nrow); allocate_gpu(ell_nnz, &this->mat_.ELL.val); allocate_gpu(ell_nnz, &this->mat_.ELL.col); set_to_zero_gpu(this->local_backend_.GPU_block_size, this->local_backend_.GPU_max_threads, ell_nnz, this->mat_.ELL.val); set_to_zero_gpu(this->local_backend_.GPU_block_size, this->local_backend_.GPU_max_threads, ell_nnz, this->mat_.ELL.col); this->mat_.ELL.max_row = ell_max_row; this->ell_nnz_ = ell_nnz; // COO allocate_gpu(coo_nnz, &this->mat_.COO.row); allocate_gpu(coo_nnz, &this->mat_.COO.col); allocate_gpu(coo_nnz, &this->mat_.COO.val); set_to_zero_gpu(this->local_backend_.GPU_block_size, this->local_backend_.GPU_max_threads, coo_nnz, this->mat_.COO.row); set_to_zero_gpu(this->local_backend_.GPU_block_size, this->local_backend_.GPU_max_threads, coo_nnz, this->mat_.COO.col); set_to_zero_gpu(this->local_backend_.GPU_block_size, this->local_backend_.GPU_max_threads, coo_nnz, this->mat_.COO.val); this->coo_nnz_ = coo_nnz; this->nrow_ = nrow; this->ncol_ = ncol; this->nnz_ = ell_nnz + coo_nnz; } } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::Clear() { if (this->get_nnz() > 0) { free_gpu(&this->mat_.COO.row); free_gpu(&this->mat_.COO.col); free_gpu(&this->mat_.COO.val); free_gpu(&this->mat_.ELL.val); free_gpu(&this->mat_.ELL.col); this->ell_nnz_ = 0; this->coo_nnz_ = 0; this->mat_.ELL.max_row = 0; this->nrow_ = 0; this->ncol_ = 0; this->nnz_ = 0; } } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::CopyFromHost(const HostMatrix<ValueType> &src) { const HostMatrixHYB<ValueType> *cast_mat; // copy only in the same format assert(this->get_mat_format() == src.get_mat_format()); // CPU to GPU copy if ((cast_mat = dynamic_cast<const HostMatrixHYB<ValueType>*> (&src)) != NULL) { if (this->get_nnz() == 0) this->AllocateHYB(cast_mat->get_ell_nnz(), cast_mat->get_coo_nnz(), cast_mat->get_ell_max_row(), cast_mat->get_nrow(), cast_mat->get_ncol()); assert(this->get_nnz() == src.get_nnz()); assert(this->get_nrow() == src.get_nrow()); assert(this->get_ncol() == src.get_ncol()); if (this->get_ell_nnz() > 0) { // ELL hipMemcpy(this->mat_.ELL.col, // dst cast_mat->mat_.ELL.col, // src this->get_ell_nnz()*sizeof(int), // size hipMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(this->mat_.ELL.val, // dst cast_mat->mat_.ELL.val, // src this->get_ell_nnz()*sizeof(ValueType), // size hipMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } if (this->get_coo_nnz() > 0) { // COO hipMemcpy(this->mat_.COO.row, // dst cast_mat->mat_.COO.row, // src (this->get_coo_nnz())*sizeof(int), // size hipMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(this->mat_.COO.col, // dst cast_mat->mat_.COO.col, // src this->get_coo_nnz()*sizeof(int), // size hipMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(this->mat_.COO.val, // dst cast_mat->mat_.COO.val, // src this->get_coo_nnz()*sizeof(ValueType), // size hipMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); src.info(); FATAL_ERROR(__FILE__, __LINE__); } } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::CopyToHost(HostMatrix<ValueType> *dst) const { HostMatrixHYB<ValueType> *cast_mat; // copy only in the same format assert(this->get_mat_format() == dst->get_mat_format()); // GPU to CPU copy if ((cast_mat = dynamic_cast<HostMatrixHYB<ValueType>*> (dst)) != NULL) { cast_mat->set_backend(this->local_backend_); if (dst->get_nnz() == 0) cast_mat->AllocateHYB(this->get_ell_nnz(), this->get_coo_nnz(), this->get_ell_max_row(), this->get_nrow(), this->get_ncol()); assert(this->get_nnz() == dst->get_nnz()); assert(this->get_nrow() == dst->get_nrow()); assert(this->get_ncol() == dst->get_ncol()); if (this->get_ell_nnz() > 0) { // ELL hipMemcpy(cast_mat->mat_.ELL.col, // dst this->mat_.ELL.col, // src this->get_ell_nnz()*sizeof(int), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(cast_mat->mat_.ELL.val, // dst this->mat_.ELL.val, // src this->get_ell_nnz()*sizeof(ValueType), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); } if (this->get_coo_nnz() > 0) { // COO hipMemcpy(cast_mat->mat_.COO.row, // dst this->mat_.COO.row, // src this->get_coo_nnz()*sizeof(int), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(cast_mat->mat_.COO.col, // dst this->mat_.COO.col, // src this->get_coo_nnz()*sizeof(int), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(cast_mat->mat_.COO.val, // dst this->mat_.COO.val, // src this->get_coo_nnz()*sizeof(ValueType), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); dst->info(); FATAL_ERROR(__FILE__, __LINE__); } } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::CopyFrom(const BaseMatrix<ValueType> &src) { const GPUAcceleratorMatrixHYB<ValueType> *gpu_cast_mat; const HostMatrix<ValueType> *host_cast_mat; // copy only in the same format assert(this->get_mat_format() == src.get_mat_format()); // GPU to GPU copy if ((gpu_cast_mat = dynamic_cast<const GPUAcceleratorMatrixHYB<ValueType>*> (&src)) != NULL) { if (this->get_nnz() == 0) this->AllocateHYB(gpu_cast_mat->get_ell_nnz(), gpu_cast_mat->get_coo_nnz(), gpu_cast_mat->get_ell_max_row(), gpu_cast_mat->get_nrow(), gpu_cast_mat->get_ncol()); assert(this->get_nnz() == src.get_nnz()); assert(this->get_nrow() == src.get_nrow()); assert(this->get_ncol() == src.get_ncol()); if (this->get_ell_nnz() > 0) { // ELL hipMemcpy(this->mat_.ELL.col, // dst gpu_cast_mat->mat_.ELL.col, // src this->get_ell_nnz()*sizeof(int), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(this->mat_.ELL.val, // dst gpu_cast_mat->mat_.ELL.val, // src this->get_ell_nnz()*sizeof(ValueType), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } if (this->get_coo_nnz() > 0) { // COO hipMemcpy(this->mat_.COO.row, // dst gpu_cast_mat->mat_.COO.row, // src (this->get_coo_nnz())*sizeof(int), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(this->mat_.COO.col, // dst gpu_cast_mat->mat_.COO.col, // src this->get_coo_nnz()*sizeof(int), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(this->mat_.COO.val, // dst gpu_cast_mat->mat_.COO.val, // src this->get_coo_nnz()*sizeof(ValueType), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } else { //CPU to GPU if ((host_cast_mat = dynamic_cast<const HostMatrix<ValueType>*> (&src)) != NULL) { this->CopyFromHost(*host_cast_mat); } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); src.info(); FATAL_ERROR(__FILE__, __LINE__); } } } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::CopyTo(BaseMatrix<ValueType> *dst) const { GPUAcceleratorMatrixHYB<ValueType> *gpu_cast_mat; HostMatrix<ValueType> *host_cast_mat; // copy only in the same format assert(this->get_mat_format() == dst->get_mat_format()); // GPU to GPU copy if (gpu_cast_mat = dynamic_cast<GPUAcceleratorMatrixHYB<ValueType>*> (dst)) { gpu_cast_mat->set_backend(this->local_backend_); if (this->get_nnz() == 0) gpu_cast_mat->AllocateHYB(this->get_ell_nnz(), this->get_coo_nnz(), this->get_ell_max_row(), this->get_nrow(), this->get_ncol()); assert(this->get_nnz() == dst->get_nnz()); assert(this->get_nrow() == dst->get_nrow()); assert(this->get_ncol() == dst->get_ncol()); if (this->get_ell_nnz() > 0) { // ELL hipMemcpy(gpu_cast_mat->mat_.ELL.col, // dst this->mat_.ELL.col, // src this->get_ell_nnz()*sizeof(int), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(gpu_cast_mat->mat_.ELL.val, // dst this->mat_.ELL.val, // src this->get_ell_nnz()*sizeof(ValueType), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } if (this->get_coo_nnz() > 0) { // COO hipMemcpy(gpu_cast_mat->mat_.COO.row, // dst this->mat_.COO.row, // src this->get_coo_nnz()*sizeof(int), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(gpu_cast_mat->mat_.COO.col, // dst this->mat_.COO.col, // src this->get_coo_nnz()*sizeof(int), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(gpu_cast_mat->mat_.COO.val, // dst this->mat_.COO.val, // src this->get_coo_nnz()*sizeof(ValueType), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } else { //GPU to CPU if (host_cast_mat = dynamic_cast<HostMatrix<ValueType>*> (dst)) { this->CopyToHost(host_cast_mat); } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); dst->info(); FATAL_ERROR(__FILE__, __LINE__); } } } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::CopyFromHostAsync(const HostMatrix<ValueType> &src) { const HostMatrixHYB<ValueType> *cast_mat; // copy only in the same format assert(this->get_mat_format() == src.get_mat_format()); // CPU to GPU copy if ((cast_mat = dynamic_cast<const HostMatrixHYB<ValueType>*> (&src)) != NULL) { if (this->get_nnz() == 0) this->AllocateHYB(cast_mat->get_ell_nnz(), cast_mat->get_coo_nnz(), cast_mat->get_ell_max_row(), cast_mat->get_nrow(), cast_mat->get_ncol()); assert(this->get_nnz() == src.get_nnz()); assert(this->get_nrow() == src.get_nrow()); assert(this->get_ncol() == src.get_ncol()); if (this->get_ell_nnz() > 0) { // ELL hipMemcpyAsync(this->mat_.ELL.col, // dst cast_mat->mat_.ELL.col, // src this->get_ell_nnz()*sizeof(int), // size hipMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpyAsync(this->mat_.ELL.val, // dst cast_mat->mat_.ELL.val, // src this->get_ell_nnz()*sizeof(ValueType), // size hipMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } if (this->get_coo_nnz() > 0) { // COO hipMemcpyAsync(this->mat_.COO.row, // dst cast_mat->mat_.COO.row, // src (this->get_coo_nnz())*sizeof(int), // size hipMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpyAsync(this->mat_.COO.col, // dst cast_mat->mat_.COO.col, // src this->get_coo_nnz()*sizeof(int), // size hipMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpyAsync(this->mat_.COO.val, // dst cast_mat->mat_.COO.val, // src this->get_coo_nnz()*sizeof(ValueType), // size hipMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); src.info(); FATAL_ERROR(__FILE__, __LINE__); } } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::CopyToHostAsync(HostMatrix<ValueType> *dst) const { HostMatrixHYB<ValueType> *cast_mat; // copy only in the same format assert(this->get_mat_format() == dst->get_mat_format()); // GPU to CPU copy if ((cast_mat = dynamic_cast<HostMatrixHYB<ValueType>*> (dst)) != NULL) { cast_mat->set_backend(this->local_backend_); if (dst->get_nnz() == 0) cast_mat->AllocateHYB(this->get_ell_nnz(), this->get_coo_nnz(), this->get_ell_max_row(), this->get_nrow(), this->get_ncol()); assert(this->get_nnz() == dst->get_nnz()); assert(this->get_nrow() == dst->get_nrow()); assert(this->get_ncol() == dst->get_ncol()); if (this->get_ell_nnz() > 0) { // ELL hipMemcpyAsync(cast_mat->mat_.ELL.col, // dst this->mat_.ELL.col, // src this->get_ell_nnz()*sizeof(int), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpyAsync(cast_mat->mat_.ELL.val, // dst this->mat_.ELL.val, // src this->get_ell_nnz()*sizeof(ValueType), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); } if (this->get_coo_nnz() > 0) { // COO hipMemcpyAsync(cast_mat->mat_.COO.row, // dst this->mat_.COO.row, // src this->get_coo_nnz()*sizeof(int), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpyAsync(cast_mat->mat_.COO.col, // dst this->mat_.COO.col, // src this->get_coo_nnz()*sizeof(int), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpyAsync(cast_mat->mat_.COO.val, // dst this->mat_.COO.val, // src this->get_coo_nnz()*sizeof(ValueType), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); dst->info(); FATAL_ERROR(__FILE__, __LINE__); } } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::CopyFromAsync(const BaseMatrix<ValueType> &src) { const GPUAcceleratorMatrixHYB<ValueType> *gpu_cast_mat; const HostMatrix<ValueType> *host_cast_mat; // copy only in the same format assert(this->get_mat_format() == src.get_mat_format()); // GPU to GPU copy if ((gpu_cast_mat = dynamic_cast<const GPUAcceleratorMatrixHYB<ValueType>*> (&src)) != NULL) { if (this->get_nnz() == 0) this->AllocateHYB(gpu_cast_mat->get_ell_nnz(), gpu_cast_mat->get_coo_nnz(), gpu_cast_mat->get_ell_max_row(), gpu_cast_mat->get_nrow(), gpu_cast_mat->get_ncol()); assert(this->get_nnz() == src.get_nnz()); assert(this->get_nrow() == src.get_nrow()); assert(this->get_ncol() == src.get_ncol()); if (this->get_ell_nnz() > 0) { // ELL hipMemcpy(this->mat_.ELL.col, // dst gpu_cast_mat->mat_.ELL.col, // src this->get_ell_nnz()*sizeof(int), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(this->mat_.ELL.val, // dst gpu_cast_mat->mat_.ELL.val, // src this->get_ell_nnz()*sizeof(ValueType), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } if (this->get_coo_nnz() > 0) { // COO hipMemcpy(this->mat_.COO.row, // dst gpu_cast_mat->mat_.COO.row, // src (this->get_coo_nnz())*sizeof(int), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(this->mat_.COO.col, // dst gpu_cast_mat->mat_.COO.col, // src this->get_coo_nnz()*sizeof(int), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(this->mat_.COO.val, // dst gpu_cast_mat->mat_.COO.val, // src this->get_coo_nnz()*sizeof(ValueType), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } else { //CPU to GPU if ((host_cast_mat = dynamic_cast<const HostMatrix<ValueType>*> (&src)) != NULL) { this->CopyFromHostAsync(*host_cast_mat); } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); src.info(); FATAL_ERROR(__FILE__, __LINE__); } } } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::CopyToAsync(BaseMatrix<ValueType> *dst) const { GPUAcceleratorMatrixHYB<ValueType> *gpu_cast_mat; HostMatrix<ValueType> *host_cast_mat; // copy only in the same format assert(this->get_mat_format() == dst->get_mat_format()); // GPU to GPU copy if (gpu_cast_mat = dynamic_cast<GPUAcceleratorMatrixHYB<ValueType>*> (dst)) { gpu_cast_mat->set_backend(this->local_backend_); if (this->get_nnz() == 0) gpu_cast_mat->AllocateHYB(this->get_ell_nnz(), this->get_coo_nnz(), this->get_ell_max_row(), this->get_nrow(), this->get_ncol()); assert(this->get_nnz() == dst->get_nnz()); assert(this->get_nrow() == dst->get_nrow()); assert(this->get_ncol() == dst->get_ncol()); if (this->get_ell_nnz() > 0) { // ELL hipMemcpy(gpu_cast_mat->mat_.ELL.col, // dst this->mat_.ELL.col, // src this->get_ell_nnz()*sizeof(int), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(gpu_cast_mat->mat_.ELL.val, // dst this->mat_.ELL.val, // src this->get_ell_nnz()*sizeof(ValueType), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } if (this->get_coo_nnz() > 0) { // COO hipMemcpy(gpu_cast_mat->mat_.COO.row, // dst this->mat_.COO.row, // src this->get_coo_nnz()*sizeof(int), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(gpu_cast_mat->mat_.COO.col, // dst this->mat_.COO.col, // src this->get_coo_nnz()*sizeof(int), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(gpu_cast_mat->mat_.COO.val, // dst this->mat_.COO.val, // src this->get_coo_nnz()*sizeof(ValueType), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } else { //GPU to CPU if (host_cast_mat = dynamic_cast<HostMatrix<ValueType>*> (dst)) { this->CopyToHostAsync(host_cast_mat); } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); dst->info(); FATAL_ERROR(__FILE__, __LINE__); } } } template <typename ValueType> bool GPUAcceleratorMatrixHYB<ValueType>::ConvertFrom(const BaseMatrix<ValueType> &mat) { this->Clear(); // empty matrix is empty matrix if (mat.get_nnz() == 0) return true; const GPUAcceleratorMatrixHYB<ValueType> *cast_mat_hyb; if ((cast_mat_hyb = dynamic_cast<const GPUAcceleratorMatrixHYB<ValueType>*> (&mat)) != NULL) { this->CopyFrom(*cast_mat_hyb); return true; } const GPUAcceleratorMatrixCSR<ValueType> *cast_mat_csr; if ((cast_mat_csr = dynamic_cast<const GPUAcceleratorMatrixCSR<ValueType>*> (&mat)) != NULL) { this->Clear(); int nrow = cast_mat_csr->get_nrow(); int ncol = cast_mat_csr->get_ncol(); int max_row = cast_mat_csr->get_nnz() / nrow; // get nnz per row for COO part int *nnz_coo = NULL; dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); allocate_gpu<int>(nrow, &nnz_coo); hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_ell_nnz_coo<int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow, max_row, cast_mat_csr->mat_.row_offset, nnz_coo); CHECK_CUDA_ERROR(__FILE__, __LINE__); // get nnz for COO part by summing up nnz per row array int *d_buffer = NULL; int *h_buffer = NULL; int GROUP_SIZE; int LOCAL_SIZE; int FinalReduceSize; allocate_gpu<int>(this->local_backend_.GPU_warp * 4, &d_buffer); dim3 GridSize2(this->local_backend_.GPU_warp * 4); GROUP_SIZE = ( size_t( ( size_t( nrow / ( this->local_backend_.GPU_warp * 4 ) ) + 1 ) / this->local_backend_.GPU_block_size ) + 1 ) * this->local_backend_.GPU_block_size; LOCAL_SIZE = GROUP_SIZE / this->local_backend_.GPU_block_size; hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_reduce<int, int, 256>), dim3(GridSize2), dim3(BlockSize), 0, 0, nrow, nnz_coo, d_buffer, GROUP_SIZE, LOCAL_SIZE); CHECK_CUDA_ERROR(__FILE__, __LINE__); FinalReduceSize = this->local_backend_.GPU_warp * 4; allocate_host(FinalReduceSize, &h_buffer); hipMemcpy(h_buffer, // dst d_buffer, // src FinalReduceSize*sizeof(int), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); free_gpu<int>(&d_buffer); int num_nnz_coo = 0; for (int i=0; i<FinalReduceSize; ++i) num_nnz_coo += h_buffer[i]; free_host(&h_buffer); // allocate ELL and COO matrices int num_nnz_ell = max_row * nrow; if (num_nnz_ell <= 0 || num_nnz_coo <= 0) { free_gpu<int>(&nnz_coo); return false; } this->AllocateHYB(num_nnz_ell, num_nnz_coo, max_row, nrow, ncol); hipMemset(this->mat_.ELL.col, -1, num_nnz_ell*sizeof(int)); CHECK_CUDA_ERROR(__FILE__, __LINE__); // copy up to num_cols_per_row values of row i into the ELL int *nnz_ell = NULL; allocate_gpu<int>(nrow, &nnz_ell); hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_ell_fill_ell<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow, max_row, cast_mat_csr->mat_.row_offset, cast_mat_csr->mat_.col, cast_mat_csr->mat_.val, this->mat_.ELL.col, this->mat_.ELL.val, nnz_ell); CHECK_CUDA_ERROR(__FILE__, __LINE__); // TODO currently performing partial sum on host allocate_host(nrow, &h_buffer); hipMemcpy(h_buffer, // dst nnz_ell, // src nrow*sizeof(int), // size hipMemcpyDeviceToHost); for (int i=1; i<nrow; ++i) h_buffer[i] += h_buffer[i-1]; hipMemcpy(nnz_ell, // dst h_buffer, // src nrow*sizeof(int), // size hipMemcpyHostToDevice); free_host(&h_buffer); // end TODO // copy any remaining values in row i into the COO hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_ell_fill_coo<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow, cast_mat_csr->mat_.row_offset, cast_mat_csr->mat_.col, cast_mat_csr->mat_.val, nnz_coo, nnz_ell, this->mat_.COO.row, this->mat_.COO.col, this->mat_.COO.val); CHECK_CUDA_ERROR(__FILE__, __LINE__); free_gpu<int>(&nnz_ell); free_gpu<int>(&nnz_coo); this->nrow_ = cast_mat_csr->get_nrow(); this->ncol_ = cast_mat_csr->get_ncol(); this->nnz_ = num_nnz_ell + num_nnz_coo; this->mat_.ELL.max_row = max_row; this->ell_nnz_ = num_nnz_ell; this->coo_nnz_ = num_nnz_coo; return true; } return false; } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::Apply(const BaseVector<ValueType> &in, BaseVector<ValueType> *out) const { if (this->get_nnz() > 0) { assert(in. get_size() >= 0); assert(out->get_size() >= 0); assert(in. get_size() == this->get_ncol()); assert(out->get_size() == this->get_nrow()); const GPUAcceleratorVector<ValueType> *cast_in = dynamic_cast<const GPUAcceleratorVector<ValueType>*> (&in) ; GPUAcceleratorVector<ValueType> *cast_out = dynamic_cast< GPUAcceleratorVector<ValueType>*> (out) ; assert(cast_in != NULL); assert(cast_out!= NULL); // ELL if (this->get_ell_nnz() > 0) { int nrow = this->get_nrow(); int ncol = this->get_ncol(); int max_row = this->get_ell_max_row(); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_ell_spmv<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow, ncol, max_row, this->mat_.ELL.col, this->mat_.ELL.val, cast_in->vec_, cast_out->vec_ ); CHECK_CUDA_ERROR(__FILE__, __LINE__); } if (this->get_coo_nnz() > 0) { // do not support super small matrices assert(this->get_coo_nnz() > this->local_backend_.GPU_warp); // ---------------------------------------------------------- // Modified and adapted from CUSP 0.3.1, // http://code.google.com/p/cusp-library/ // NVIDIA, APACHE LICENSE 2.0 // ---------------------------------------------------------- // see __spmv_coo_flat(...) // ---------------------------------------------------------- // CHANGELOG // - adapted interface // ---------------------------------------------------------- const unsigned int BLOCK_SIZE = this->local_backend_.GPU_block_size; // const unsigned int MAX_BLOCKS = this->local_backend_.GPU_max_blocks; const unsigned int MAX_BLOCKS = 32; // cusp::detail::device::arch::max_active_blocks(spmv_coo_flat_kernel<IndexType, ValueType, BLOCK_SIZE, UseCache>, BLOCK_SIZE, (size_t) 0); const unsigned int WARPS_PER_BLOCK = BLOCK_SIZE / this->local_backend_.GPU_warp; const unsigned int num_units = this->get_coo_nnz() / this->local_backend_.GPU_warp; const unsigned int num_warps = ::min(num_units, WARPS_PER_BLOCK * MAX_BLOCKS); const unsigned int num_blocks = (num_warps + (WARPS_PER_BLOCK-1)) / WARPS_PER_BLOCK; // (N + (granularity - 1)) / granularity const unsigned int num_iters = (num_units + (num_warps-1)) / num_warps; const unsigned int interval_size = this->local_backend_.GPU_warp * num_iters; const int tail = num_units * this->local_backend_.GPU_warp; // do the last few nonzeros separately (fewer than this->local_backend_.GPU_warp elements) const unsigned int active_warps = (interval_size == 0) ? 0 : ((tail + (interval_size-1))/interval_size); int *temp_rows = NULL; ValueType *temp_vals = NULL; allocate_gpu(active_warps, &temp_rows); allocate_gpu(active_warps, &temp_vals); CHECK_CUDA_ERROR(__FILE__, __LINE__); // TODO // BLOCK_SIZE == 256 // WARP_SIZE == 32 hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_spmv_coo_flat<int, ValueType, 256, 32>), dim3(num_blocks), dim3(BLOCK_SIZE), 0, 0, tail, interval_size, this->mat_.COO.row, this->mat_.COO.col, this->mat_.COO.val, ValueType(1.0), cast_in->vec_, cast_out->vec_, temp_rows, temp_vals); CHECK_CUDA_ERROR(__FILE__, __LINE__); // TODO // BLOCK_SIZE == 256 hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_spmv_coo_reduce_update<int, ValueType, 256>), dim3(1), dim3(BLOCK_SIZE), 0, 0, active_warps, temp_rows, temp_vals, cast_out->vec_); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_spmv_coo_serial<int, ValueType>), dim3(1), dim3(1), 0, 0, this->get_coo_nnz() - tail, this->mat_.COO.row + tail, this->mat_.COO.col + tail, this->mat_.COO.val + tail, ValueType(1.0), cast_in->vec_, cast_out->vec_); CHECK_CUDA_ERROR(__FILE__, __LINE__); free_gpu(&temp_rows); free_gpu(&temp_vals); } } } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::ApplyAdd(const BaseVector<ValueType> &in, const ValueType scalar, BaseVector<ValueType> *out) const { if (this->get_nnz() > 0) { assert(in. get_size() >= 0); assert(out->get_size() >= 0); assert(in. get_size() == this->get_ncol()); assert(out->get_size() == this->get_nrow()); const GPUAcceleratorVector<ValueType> *cast_in = dynamic_cast<const GPUAcceleratorVector<ValueType>*> (&in) ; GPUAcceleratorVector<ValueType> *cast_out = dynamic_cast< GPUAcceleratorVector<ValueType>*> (out) ; assert(cast_in != NULL); assert(cast_out!= NULL); // ELL if (this->get_ell_nnz() > 0) { int nrow = this->get_nrow(); int ncol = this->get_ncol(); int max_row = this->get_ell_max_row(); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_ell_add_spmv<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow, ncol, max_row, this->mat_.ELL.col, this->mat_.ELL.val, scalar, cast_in->vec_, cast_out->vec_ ); CHECK_CUDA_ERROR(__FILE__, __LINE__); } if (this->get_coo_nnz() > 0) { // do not support super small matrices assert(this->get_coo_nnz() > this->local_backend_.GPU_warp); // ---------------------------------------------------------- // Modified and adapted from CUSP 0.3.1, // http://code.google.com/p/cusp-library/ // NVIDIA, APACHE LICENSE 2.0 // ---------------------------------------------------------- // see __spmv_coo_flat(...) // ---------------------------------------------------------- // CHANGELOG // - adapted interface // ---------------------------------------------------------- const unsigned int BLOCK_SIZE = this->local_backend_.GPU_block_size; // const unsigned int MAX_BLOCKS = this->local_backend_.GPU_max_blocks; const unsigned int MAX_BLOCKS = 32; // cusp::detail::device::arch::max_active_blocks(spmv_coo_flat_kernel<IndexType, ValueType, BLOCK_SIZE, UseCache>, BLOCK_SIZE, (size_t) 0); const unsigned int WARPS_PER_BLOCK = BLOCK_SIZE / this->local_backend_.GPU_warp; const unsigned int num_units = this->get_coo_nnz() / this->local_backend_.GPU_warp; const unsigned int num_warps = ::min(num_units, WARPS_PER_BLOCK * MAX_BLOCKS); const unsigned int num_blocks = (num_warps + (WARPS_PER_BLOCK-1)) / WARPS_PER_BLOCK; // (N + (granularity - 1)) / granularity const unsigned int num_iters = (num_units + (num_warps-1)) / num_warps; const unsigned int interval_size = this->local_backend_.GPU_warp * num_iters; const int tail = num_units * this->local_backend_.GPU_warp; // do the last few nonzeros separately (fewer than this->local_backend_.GPU_warp elements) const unsigned int active_warps = (interval_size == 0) ? 0 : ((tail + (interval_size-1))/interval_size); int *temp_rows = NULL; ValueType *temp_vals = NULL; allocate_gpu(active_warps, &temp_rows); allocate_gpu(active_warps, &temp_vals); CHECK_CUDA_ERROR(__FILE__, __LINE__); // TODO // BLOCK_SIZE == 256 // WARP_SIZE == 32 hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_spmv_coo_flat<int, ValueType, 256, 32>), dim3(num_blocks), dim3(BLOCK_SIZE), 0, 0, tail, interval_size, this->mat_.COO.row, this->mat_.COO.col, this->mat_.COO.val, scalar, cast_in->vec_, cast_out->vec_, temp_rows, temp_vals); CHECK_CUDA_ERROR(__FILE__, __LINE__); // TODO // BLOCK_SIZE == 256 hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_spmv_coo_reduce_update<int, ValueType, 256>), dim3(1), dim3(BLOCK_SIZE), 0, 0, active_warps, temp_rows, temp_vals, cast_out->vec_); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_spmv_coo_serial<int, ValueType>), dim3(1), dim3(1), 0, 0, this->get_coo_nnz() - tail, this->mat_.COO.row + tail, this->mat_.COO.col + tail, this->mat_.COO.val + tail, scalar, cast_in->vec_, cast_out->vec_); CHECK_CUDA_ERROR(__FILE__, __LINE__); free_gpu(&temp_rows); free_gpu(&temp_vals); } } } template class GPUAcceleratorMatrixHYB<double>; template class GPUAcceleratorMatrixHYB<float>; }
21748a37623d71d647d5d9f7cf9f8de4e1d054d8.cu
#include "hip/hip_runtime.h" // ************************************************************************** // // PARALUTION www.paralution.com // // Copyright (C) 2015 PARALUTION Labs UG (haftungsbeschränkt) & Co. KG // Am Hasensprung 6, 76571 Gaggenau // Handelsregister: Amtsgericht Mannheim, HRA 706051 // Vertreten durch: // PARALUTION Labs Verwaltungs UG (haftungsbeschränkt) // Am Hasensprung 6, 76571 Gaggenau // Handelsregister: Amtsgericht Mannheim, HRB 721277 // Geschäftsführer: Dimitar Lukarski, Nico Trost // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with this program. If not, see <http://www.gnu.org/licenses/>. // // ************************************************************************** // PARALUTION version 1.1.0 #include "../../utils/def.hpp" #include "gpu_matrix_csr.hpp" #include "gpu_matrix_coo.hpp" #include "gpu_matrix_ell.hpp" #include "gpu_matrix_hyb.hpp" #include "gpu_vector.hpp" #include "../host/host_matrix_hyb.hpp" #include "../base_matrix.hpp" #include "../base_vector.hpp" #include "../backend_manager.hpp" #include "../../utils/log.hpp" #include "../../utils/allocate_free.hpp" #include "gpu_utils.hpp" #include "cuda_kernels_general.hpp" #include "cuda_kernels_hyb.hpp" #include "cuda_kernels_vector.hpp" #include "gpu_allocate_free.hpp" #include "../matrix_formats_ind.hpp" #include <algorithm> #include <hip/hip_runtime.h> //#include <hipsparse.h> #include "hipsparse.h" namespace paralution { template <typename ValueType> GPUAcceleratorMatrixHYB<ValueType>::GPUAcceleratorMatrixHYB() { // no default constructors LOG_INFO("no default constructor"); FATAL_ERROR(__FILE__, __LINE__); } template <typename ValueType> GPUAcceleratorMatrixHYB<ValueType>::GPUAcceleratorMatrixHYB(const Paralution_Backend_Descriptor local_backend) { LOG_DEBUG(this, "GPUAcceleratorMatrixHYB::GPUAcceleratorMatrixHYB()", "constructor with local_backend"); this->mat_.ELL.val = NULL; this->mat_.ELL.col = NULL; this->mat_.ELL.max_row = 0; this->mat_.COO.row = NULL; this->mat_.COO.col = NULL; this->mat_.COO.val = NULL; this->ell_nnz_ = 0; this->coo_nnz_ = 0; this->set_backend(local_backend); CHECK_CUDA_ERROR(__FILE__, __LINE__); } template <typename ValueType> GPUAcceleratorMatrixHYB<ValueType>::~GPUAcceleratorMatrixHYB() { LOG_DEBUG(this, "GPUAcceleratorMatrixHYB::~GPUAcceleratorMatrixHYB()", "destructor"); this->Clear(); } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::info(void) const { LOG_INFO("GPUAcceleratorMatrixHYB<ValueType>"); } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::AllocateHYB(const int ell_nnz, const int coo_nnz, const int ell_max_row, const int nrow, const int ncol) { assert( ell_nnz >= 0); assert( coo_nnz >= 0); assert( ell_max_row >= 0); assert( ncol >= 0); assert( nrow >= 0); if (this->get_nnz() > 0) this->Clear(); if (ell_nnz + coo_nnz > 0) { // ELL assert(ell_nnz == ell_max_row*nrow); allocate_gpu(ell_nnz, &this->mat_.ELL.val); allocate_gpu(ell_nnz, &this->mat_.ELL.col); set_to_zero_gpu(this->local_backend_.GPU_block_size, this->local_backend_.GPU_max_threads, ell_nnz, this->mat_.ELL.val); set_to_zero_gpu(this->local_backend_.GPU_block_size, this->local_backend_.GPU_max_threads, ell_nnz, this->mat_.ELL.col); this->mat_.ELL.max_row = ell_max_row; this->ell_nnz_ = ell_nnz; // COO allocate_gpu(coo_nnz, &this->mat_.COO.row); allocate_gpu(coo_nnz, &this->mat_.COO.col); allocate_gpu(coo_nnz, &this->mat_.COO.val); set_to_zero_gpu(this->local_backend_.GPU_block_size, this->local_backend_.GPU_max_threads, coo_nnz, this->mat_.COO.row); set_to_zero_gpu(this->local_backend_.GPU_block_size, this->local_backend_.GPU_max_threads, coo_nnz, this->mat_.COO.col); set_to_zero_gpu(this->local_backend_.GPU_block_size, this->local_backend_.GPU_max_threads, coo_nnz, this->mat_.COO.val); this->coo_nnz_ = coo_nnz; this->nrow_ = nrow; this->ncol_ = ncol; this->nnz_ = ell_nnz + coo_nnz; } } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::Clear() { if (this->get_nnz() > 0) { free_gpu(&this->mat_.COO.row); free_gpu(&this->mat_.COO.col); free_gpu(&this->mat_.COO.val); free_gpu(&this->mat_.ELL.val); free_gpu(&this->mat_.ELL.col); this->ell_nnz_ = 0; this->coo_nnz_ = 0; this->mat_.ELL.max_row = 0; this->nrow_ = 0; this->ncol_ = 0; this->nnz_ = 0; } } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::CopyFromHost(const HostMatrix<ValueType> &src) { const HostMatrixHYB<ValueType> *cast_mat; // copy only in the same format assert(this->get_mat_format() == src.get_mat_format()); // CPU to GPU copy if ((cast_mat = dynamic_cast<const HostMatrixHYB<ValueType>*> (&src)) != NULL) { if (this->get_nnz() == 0) this->AllocateHYB(cast_mat->get_ell_nnz(), cast_mat->get_coo_nnz(), cast_mat->get_ell_max_row(), cast_mat->get_nrow(), cast_mat->get_ncol()); assert(this->get_nnz() == src.get_nnz()); assert(this->get_nrow() == src.get_nrow()); assert(this->get_ncol() == src.get_ncol()); if (this->get_ell_nnz() > 0) { // ELL hipMemcpy(this->mat_.ELL.col, // dst cast_mat->mat_.ELL.col, // src this->get_ell_nnz()*sizeof(int), // size hipMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(this->mat_.ELL.val, // dst cast_mat->mat_.ELL.val, // src this->get_ell_nnz()*sizeof(ValueType), // size hipMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } if (this->get_coo_nnz() > 0) { // COO hipMemcpy(this->mat_.COO.row, // dst cast_mat->mat_.COO.row, // src (this->get_coo_nnz())*sizeof(int), // size hipMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(this->mat_.COO.col, // dst cast_mat->mat_.COO.col, // src this->get_coo_nnz()*sizeof(int), // size hipMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(this->mat_.COO.val, // dst cast_mat->mat_.COO.val, // src this->get_coo_nnz()*sizeof(ValueType), // size hipMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); src.info(); FATAL_ERROR(__FILE__, __LINE__); } } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::CopyToHost(HostMatrix<ValueType> *dst) const { HostMatrixHYB<ValueType> *cast_mat; // copy only in the same format assert(this->get_mat_format() == dst->get_mat_format()); // GPU to CPU copy if ((cast_mat = dynamic_cast<HostMatrixHYB<ValueType>*> (dst)) != NULL) { cast_mat->set_backend(this->local_backend_); if (dst->get_nnz() == 0) cast_mat->AllocateHYB(this->get_ell_nnz(), this->get_coo_nnz(), this->get_ell_max_row(), this->get_nrow(), this->get_ncol()); assert(this->get_nnz() == dst->get_nnz()); assert(this->get_nrow() == dst->get_nrow()); assert(this->get_ncol() == dst->get_ncol()); if (this->get_ell_nnz() > 0) { // ELL hipMemcpy(cast_mat->mat_.ELL.col, // dst this->mat_.ELL.col, // src this->get_ell_nnz()*sizeof(int), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(cast_mat->mat_.ELL.val, // dst this->mat_.ELL.val, // src this->get_ell_nnz()*sizeof(ValueType), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); } if (this->get_coo_nnz() > 0) { // COO hipMemcpy(cast_mat->mat_.COO.row, // dst this->mat_.COO.row, // src this->get_coo_nnz()*sizeof(int), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(cast_mat->mat_.COO.col, // dst this->mat_.COO.col, // src this->get_coo_nnz()*sizeof(int), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(cast_mat->mat_.COO.val, // dst this->mat_.COO.val, // src this->get_coo_nnz()*sizeof(ValueType), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); dst->info(); FATAL_ERROR(__FILE__, __LINE__); } } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::CopyFrom(const BaseMatrix<ValueType> &src) { const GPUAcceleratorMatrixHYB<ValueType> *gpu_cast_mat; const HostMatrix<ValueType> *host_cast_mat; // copy only in the same format assert(this->get_mat_format() == src.get_mat_format()); // GPU to GPU copy if ((gpu_cast_mat = dynamic_cast<const GPUAcceleratorMatrixHYB<ValueType>*> (&src)) != NULL) { if (this->get_nnz() == 0) this->AllocateHYB(gpu_cast_mat->get_ell_nnz(), gpu_cast_mat->get_coo_nnz(), gpu_cast_mat->get_ell_max_row(), gpu_cast_mat->get_nrow(), gpu_cast_mat->get_ncol()); assert(this->get_nnz() == src.get_nnz()); assert(this->get_nrow() == src.get_nrow()); assert(this->get_ncol() == src.get_ncol()); if (this->get_ell_nnz() > 0) { // ELL hipMemcpy(this->mat_.ELL.col, // dst gpu_cast_mat->mat_.ELL.col, // src this->get_ell_nnz()*sizeof(int), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(this->mat_.ELL.val, // dst gpu_cast_mat->mat_.ELL.val, // src this->get_ell_nnz()*sizeof(ValueType), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } if (this->get_coo_nnz() > 0) { // COO hipMemcpy(this->mat_.COO.row, // dst gpu_cast_mat->mat_.COO.row, // src (this->get_coo_nnz())*sizeof(int), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(this->mat_.COO.col, // dst gpu_cast_mat->mat_.COO.col, // src this->get_coo_nnz()*sizeof(int), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(this->mat_.COO.val, // dst gpu_cast_mat->mat_.COO.val, // src this->get_coo_nnz()*sizeof(ValueType), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } else { //CPU to GPU if ((host_cast_mat = dynamic_cast<const HostMatrix<ValueType>*> (&src)) != NULL) { this->CopyFromHost(*host_cast_mat); } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); src.info(); FATAL_ERROR(__FILE__, __LINE__); } } } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::CopyTo(BaseMatrix<ValueType> *dst) const { GPUAcceleratorMatrixHYB<ValueType> *gpu_cast_mat; HostMatrix<ValueType> *host_cast_mat; // copy only in the same format assert(this->get_mat_format() == dst->get_mat_format()); // GPU to GPU copy if (gpu_cast_mat = dynamic_cast<GPUAcceleratorMatrixHYB<ValueType>*> (dst)) { gpu_cast_mat->set_backend(this->local_backend_); if (this->get_nnz() == 0) gpu_cast_mat->AllocateHYB(this->get_ell_nnz(), this->get_coo_nnz(), this->get_ell_max_row(), this->get_nrow(), this->get_ncol()); assert(this->get_nnz() == dst->get_nnz()); assert(this->get_nrow() == dst->get_nrow()); assert(this->get_ncol() == dst->get_ncol()); if (this->get_ell_nnz() > 0) { // ELL hipMemcpy(gpu_cast_mat->mat_.ELL.col, // dst this->mat_.ELL.col, // src this->get_ell_nnz()*sizeof(int), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(gpu_cast_mat->mat_.ELL.val, // dst this->mat_.ELL.val, // src this->get_ell_nnz()*sizeof(ValueType), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } if (this->get_coo_nnz() > 0) { // COO hipMemcpy(gpu_cast_mat->mat_.COO.row, // dst this->mat_.COO.row, // src this->get_coo_nnz()*sizeof(int), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(gpu_cast_mat->mat_.COO.col, // dst this->mat_.COO.col, // src this->get_coo_nnz()*sizeof(int), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(gpu_cast_mat->mat_.COO.val, // dst this->mat_.COO.val, // src this->get_coo_nnz()*sizeof(ValueType), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } else { //GPU to CPU if (host_cast_mat = dynamic_cast<HostMatrix<ValueType>*> (dst)) { this->CopyToHost(host_cast_mat); } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); dst->info(); FATAL_ERROR(__FILE__, __LINE__); } } } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::CopyFromHostAsync(const HostMatrix<ValueType> &src) { const HostMatrixHYB<ValueType> *cast_mat; // copy only in the same format assert(this->get_mat_format() == src.get_mat_format()); // CPU to GPU copy if ((cast_mat = dynamic_cast<const HostMatrixHYB<ValueType>*> (&src)) != NULL) { if (this->get_nnz() == 0) this->AllocateHYB(cast_mat->get_ell_nnz(), cast_mat->get_coo_nnz(), cast_mat->get_ell_max_row(), cast_mat->get_nrow(), cast_mat->get_ncol()); assert(this->get_nnz() == src.get_nnz()); assert(this->get_nrow() == src.get_nrow()); assert(this->get_ncol() == src.get_ncol()); if (this->get_ell_nnz() > 0) { // ELL hipMemcpyAsync(this->mat_.ELL.col, // dst cast_mat->mat_.ELL.col, // src this->get_ell_nnz()*sizeof(int), // size hipMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpyAsync(this->mat_.ELL.val, // dst cast_mat->mat_.ELL.val, // src this->get_ell_nnz()*sizeof(ValueType), // size hipMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } if (this->get_coo_nnz() > 0) { // COO hipMemcpyAsync(this->mat_.COO.row, // dst cast_mat->mat_.COO.row, // src (this->get_coo_nnz())*sizeof(int), // size hipMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpyAsync(this->mat_.COO.col, // dst cast_mat->mat_.COO.col, // src this->get_coo_nnz()*sizeof(int), // size hipMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpyAsync(this->mat_.COO.val, // dst cast_mat->mat_.COO.val, // src this->get_coo_nnz()*sizeof(ValueType), // size hipMemcpyHostToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); src.info(); FATAL_ERROR(__FILE__, __LINE__); } } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::CopyToHostAsync(HostMatrix<ValueType> *dst) const { HostMatrixHYB<ValueType> *cast_mat; // copy only in the same format assert(this->get_mat_format() == dst->get_mat_format()); // GPU to CPU copy if ((cast_mat = dynamic_cast<HostMatrixHYB<ValueType>*> (dst)) != NULL) { cast_mat->set_backend(this->local_backend_); if (dst->get_nnz() == 0) cast_mat->AllocateHYB(this->get_ell_nnz(), this->get_coo_nnz(), this->get_ell_max_row(), this->get_nrow(), this->get_ncol()); assert(this->get_nnz() == dst->get_nnz()); assert(this->get_nrow() == dst->get_nrow()); assert(this->get_ncol() == dst->get_ncol()); if (this->get_ell_nnz() > 0) { // ELL hipMemcpyAsync(cast_mat->mat_.ELL.col, // dst this->mat_.ELL.col, // src this->get_ell_nnz()*sizeof(int), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpyAsync(cast_mat->mat_.ELL.val, // dst this->mat_.ELL.val, // src this->get_ell_nnz()*sizeof(ValueType), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); } if (this->get_coo_nnz() > 0) { // COO hipMemcpyAsync(cast_mat->mat_.COO.row, // dst this->mat_.COO.row, // src this->get_coo_nnz()*sizeof(int), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpyAsync(cast_mat->mat_.COO.col, // dst this->mat_.COO.col, // src this->get_coo_nnz()*sizeof(int), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpyAsync(cast_mat->mat_.COO.val, // dst this->mat_.COO.val, // src this->get_coo_nnz()*sizeof(ValueType), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); dst->info(); FATAL_ERROR(__FILE__, __LINE__); } } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::CopyFromAsync(const BaseMatrix<ValueType> &src) { const GPUAcceleratorMatrixHYB<ValueType> *gpu_cast_mat; const HostMatrix<ValueType> *host_cast_mat; // copy only in the same format assert(this->get_mat_format() == src.get_mat_format()); // GPU to GPU copy if ((gpu_cast_mat = dynamic_cast<const GPUAcceleratorMatrixHYB<ValueType>*> (&src)) != NULL) { if (this->get_nnz() == 0) this->AllocateHYB(gpu_cast_mat->get_ell_nnz(), gpu_cast_mat->get_coo_nnz(), gpu_cast_mat->get_ell_max_row(), gpu_cast_mat->get_nrow(), gpu_cast_mat->get_ncol()); assert(this->get_nnz() == src.get_nnz()); assert(this->get_nrow() == src.get_nrow()); assert(this->get_ncol() == src.get_ncol()); if (this->get_ell_nnz() > 0) { // ELL hipMemcpy(this->mat_.ELL.col, // dst gpu_cast_mat->mat_.ELL.col, // src this->get_ell_nnz()*sizeof(int), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(this->mat_.ELL.val, // dst gpu_cast_mat->mat_.ELL.val, // src this->get_ell_nnz()*sizeof(ValueType), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } if (this->get_coo_nnz() > 0) { // COO hipMemcpy(this->mat_.COO.row, // dst gpu_cast_mat->mat_.COO.row, // src (this->get_coo_nnz())*sizeof(int), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(this->mat_.COO.col, // dst gpu_cast_mat->mat_.COO.col, // src this->get_coo_nnz()*sizeof(int), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(this->mat_.COO.val, // dst gpu_cast_mat->mat_.COO.val, // src this->get_coo_nnz()*sizeof(ValueType), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } else { //CPU to GPU if ((host_cast_mat = dynamic_cast<const HostMatrix<ValueType>*> (&src)) != NULL) { this->CopyFromHostAsync(*host_cast_mat); } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); src.info(); FATAL_ERROR(__FILE__, __LINE__); } } } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::CopyToAsync(BaseMatrix<ValueType> *dst) const { GPUAcceleratorMatrixHYB<ValueType> *gpu_cast_mat; HostMatrix<ValueType> *host_cast_mat; // copy only in the same format assert(this->get_mat_format() == dst->get_mat_format()); // GPU to GPU copy if (gpu_cast_mat = dynamic_cast<GPUAcceleratorMatrixHYB<ValueType>*> (dst)) { gpu_cast_mat->set_backend(this->local_backend_); if (this->get_nnz() == 0) gpu_cast_mat->AllocateHYB(this->get_ell_nnz(), this->get_coo_nnz(), this->get_ell_max_row(), this->get_nrow(), this->get_ncol()); assert(this->get_nnz() == dst->get_nnz()); assert(this->get_nrow() == dst->get_nrow()); assert(this->get_ncol() == dst->get_ncol()); if (this->get_ell_nnz() > 0) { // ELL hipMemcpy(gpu_cast_mat->mat_.ELL.col, // dst this->mat_.ELL.col, // src this->get_ell_nnz()*sizeof(int), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(gpu_cast_mat->mat_.ELL.val, // dst this->mat_.ELL.val, // src this->get_ell_nnz()*sizeof(ValueType), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } if (this->get_coo_nnz() > 0) { // COO hipMemcpy(gpu_cast_mat->mat_.COO.row, // dst this->mat_.COO.row, // src this->get_coo_nnz()*sizeof(int), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(gpu_cast_mat->mat_.COO.col, // dst this->mat_.COO.col, // src this->get_coo_nnz()*sizeof(int), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipMemcpy(gpu_cast_mat->mat_.COO.val, // dst this->mat_.COO.val, // src this->get_coo_nnz()*sizeof(ValueType), // size hipMemcpyDeviceToDevice); CHECK_CUDA_ERROR(__FILE__, __LINE__); } } else { //GPU to CPU if (host_cast_mat = dynamic_cast<HostMatrix<ValueType>*> (dst)) { this->CopyToHostAsync(host_cast_mat); } else { LOG_INFO("Error unsupported GPU matrix type"); this->info(); dst->info(); FATAL_ERROR(__FILE__, __LINE__); } } } template <typename ValueType> bool GPUAcceleratorMatrixHYB<ValueType>::ConvertFrom(const BaseMatrix<ValueType> &mat) { this->Clear(); // empty matrix is empty matrix if (mat.get_nnz() == 0) return true; const GPUAcceleratorMatrixHYB<ValueType> *cast_mat_hyb; if ((cast_mat_hyb = dynamic_cast<const GPUAcceleratorMatrixHYB<ValueType>*> (&mat)) != NULL) { this->CopyFrom(*cast_mat_hyb); return true; } const GPUAcceleratorMatrixCSR<ValueType> *cast_mat_csr; if ((cast_mat_csr = dynamic_cast<const GPUAcceleratorMatrixCSR<ValueType>*> (&mat)) != NULL) { this->Clear(); int nrow = cast_mat_csr->get_nrow(); int ncol = cast_mat_csr->get_ncol(); int max_row = cast_mat_csr->get_nnz() / nrow; // get nnz per row for COO part int *nnz_coo = NULL; dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); allocate_gpu<int>(nrow, &nnz_coo); hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_ell_nnz_coo<int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow, max_row, cast_mat_csr->mat_.row_offset, nnz_coo); CHECK_CUDA_ERROR(__FILE__, __LINE__); // get nnz for COO part by summing up nnz per row array int *d_buffer = NULL; int *h_buffer = NULL; int GROUP_SIZE; int LOCAL_SIZE; int FinalReduceSize; allocate_gpu<int>(this->local_backend_.GPU_warp * 4, &d_buffer); dim3 GridSize2(this->local_backend_.GPU_warp * 4); GROUP_SIZE = ( size_t( ( size_t( nrow / ( this->local_backend_.GPU_warp * 4 ) ) + 1 ) / this->local_backend_.GPU_block_size ) + 1 ) * this->local_backend_.GPU_block_size; LOCAL_SIZE = GROUP_SIZE / this->local_backend_.GPU_block_size; hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_reduce<int, int, 256>), dim3(GridSize2), dim3(BlockSize), 0, 0, nrow, nnz_coo, d_buffer, GROUP_SIZE, LOCAL_SIZE); CHECK_CUDA_ERROR(__FILE__, __LINE__); FinalReduceSize = this->local_backend_.GPU_warp * 4; allocate_host(FinalReduceSize, &h_buffer); hipMemcpy(h_buffer, // dst d_buffer, // src FinalReduceSize*sizeof(int), // size hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(__FILE__, __LINE__); free_gpu<int>(&d_buffer); int num_nnz_coo = 0; for (int i=0; i<FinalReduceSize; ++i) num_nnz_coo += h_buffer[i]; free_host(&h_buffer); // allocate ELL and COO matrices int num_nnz_ell = max_row * nrow; if (num_nnz_ell <= 0 || num_nnz_coo <= 0) { free_gpu<int>(&nnz_coo); return false; } this->AllocateHYB(num_nnz_ell, num_nnz_coo, max_row, nrow, ncol); hipMemset(this->mat_.ELL.col, -1, num_nnz_ell*sizeof(int)); CHECK_CUDA_ERROR(__FILE__, __LINE__); // copy up to num_cols_per_row values of row i into the ELL int *nnz_ell = NULL; allocate_gpu<int>(nrow, &nnz_ell); hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_ell_fill_ell<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow, max_row, cast_mat_csr->mat_.row_offset, cast_mat_csr->mat_.col, cast_mat_csr->mat_.val, this->mat_.ELL.col, this->mat_.ELL.val, nnz_ell); CHECK_CUDA_ERROR(__FILE__, __LINE__); // TODO currently performing partial sum on host allocate_host(nrow, &h_buffer); hipMemcpy(h_buffer, // dst nnz_ell, // src nrow*sizeof(int), // size hipMemcpyDeviceToHost); for (int i=1; i<nrow; ++i) h_buffer[i] += h_buffer[i-1]; hipMemcpy(nnz_ell, // dst h_buffer, // src nrow*sizeof(int), // size hipMemcpyHostToDevice); free_host(&h_buffer); // end TODO // copy any remaining values in row i into the COO hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_ell_fill_coo<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow, cast_mat_csr->mat_.row_offset, cast_mat_csr->mat_.col, cast_mat_csr->mat_.val, nnz_coo, nnz_ell, this->mat_.COO.row, this->mat_.COO.col, this->mat_.COO.val); CHECK_CUDA_ERROR(__FILE__, __LINE__); free_gpu<int>(&nnz_ell); free_gpu<int>(&nnz_coo); this->nrow_ = cast_mat_csr->get_nrow(); this->ncol_ = cast_mat_csr->get_ncol(); this->nnz_ = num_nnz_ell + num_nnz_coo; this->mat_.ELL.max_row = max_row; this->ell_nnz_ = num_nnz_ell; this->coo_nnz_ = num_nnz_coo; return true; } return false; } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::Apply(const BaseVector<ValueType> &in, BaseVector<ValueType> *out) const { if (this->get_nnz() > 0) { assert(in. get_size() >= 0); assert(out->get_size() >= 0); assert(in. get_size() == this->get_ncol()); assert(out->get_size() == this->get_nrow()); const GPUAcceleratorVector<ValueType> *cast_in = dynamic_cast<const GPUAcceleratorVector<ValueType>*> (&in) ; GPUAcceleratorVector<ValueType> *cast_out = dynamic_cast< GPUAcceleratorVector<ValueType>*> (out) ; assert(cast_in != NULL); assert(cast_out!= NULL); // ELL if (this->get_ell_nnz() > 0) { int nrow = this->get_nrow(); int ncol = this->get_ncol(); int max_row = this->get_ell_max_row(); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_ell_spmv<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow, ncol, max_row, this->mat_.ELL.col, this->mat_.ELL.val, cast_in->vec_, cast_out->vec_ ); CHECK_CUDA_ERROR(__FILE__, __LINE__); } if (this->get_coo_nnz() > 0) { // do not support super small matrices assert(this->get_coo_nnz() > this->local_backend_.GPU_warp); // ---------------------------------------------------------- // Modified and adapted from CUSP 0.3.1, // http://code.google.com/p/cusp-library/ // NVIDIA, APACHE LICENSE 2.0 // ---------------------------------------------------------- // see __spmv_coo_flat(...) // ---------------------------------------------------------- // CHANGELOG // - adapted interface // ---------------------------------------------------------- const unsigned int BLOCK_SIZE = this->local_backend_.GPU_block_size; // const unsigned int MAX_BLOCKS = this->local_backend_.GPU_max_blocks; const unsigned int MAX_BLOCKS = 32; // cusp::detail::device::arch::max_active_blocks(spmv_coo_flat_kernel<IndexType, ValueType, BLOCK_SIZE, UseCache>, BLOCK_SIZE, (size_t) 0); const unsigned int WARPS_PER_BLOCK = BLOCK_SIZE / this->local_backend_.GPU_warp; const unsigned int num_units = this->get_coo_nnz() / this->local_backend_.GPU_warp; const unsigned int num_warps = std::min(num_units, WARPS_PER_BLOCK * MAX_BLOCKS); const unsigned int num_blocks = (num_warps + (WARPS_PER_BLOCK-1)) / WARPS_PER_BLOCK; // (N + (granularity - 1)) / granularity const unsigned int num_iters = (num_units + (num_warps-1)) / num_warps; const unsigned int interval_size = this->local_backend_.GPU_warp * num_iters; const int tail = num_units * this->local_backend_.GPU_warp; // do the last few nonzeros separately (fewer than this->local_backend_.GPU_warp elements) const unsigned int active_warps = (interval_size == 0) ? 0 : ((tail + (interval_size-1))/interval_size); int *temp_rows = NULL; ValueType *temp_vals = NULL; allocate_gpu(active_warps, &temp_rows); allocate_gpu(active_warps, &temp_vals); CHECK_CUDA_ERROR(__FILE__, __LINE__); // TODO // BLOCK_SIZE == 256 // WARP_SIZE == 32 hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_spmv_coo_flat<int, ValueType, 256, 32>), dim3(num_blocks), dim3(BLOCK_SIZE), 0, 0, tail, interval_size, this->mat_.COO.row, this->mat_.COO.col, this->mat_.COO.val, ValueType(1.0), cast_in->vec_, cast_out->vec_, temp_rows, temp_vals); CHECK_CUDA_ERROR(__FILE__, __LINE__); // TODO // BLOCK_SIZE == 256 hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_spmv_coo_reduce_update<int, ValueType, 256>), dim3(1), dim3(BLOCK_SIZE), 0, 0, active_warps, temp_rows, temp_vals, cast_out->vec_); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_spmv_coo_serial<int, ValueType>), dim3(1), dim3(1), 0, 0, this->get_coo_nnz() - tail, this->mat_.COO.row + tail, this->mat_.COO.col + tail, this->mat_.COO.val + tail, ValueType(1.0), cast_in->vec_, cast_out->vec_); CHECK_CUDA_ERROR(__FILE__, __LINE__); free_gpu(&temp_rows); free_gpu(&temp_vals); } } } template <typename ValueType> void GPUAcceleratorMatrixHYB<ValueType>::ApplyAdd(const BaseVector<ValueType> &in, const ValueType scalar, BaseVector<ValueType> *out) const { if (this->get_nnz() > 0) { assert(in. get_size() >= 0); assert(out->get_size() >= 0); assert(in. get_size() == this->get_ncol()); assert(out->get_size() == this->get_nrow()); const GPUAcceleratorVector<ValueType> *cast_in = dynamic_cast<const GPUAcceleratorVector<ValueType>*> (&in) ; GPUAcceleratorVector<ValueType> *cast_out = dynamic_cast< GPUAcceleratorVector<ValueType>*> (out) ; assert(cast_in != NULL); assert(cast_out!= NULL); // ELL if (this->get_ell_nnz() > 0) { int nrow = this->get_nrow(); int ncol = this->get_ncol(); int max_row = this->get_ell_max_row(); dim3 BlockSize(this->local_backend_.GPU_block_size); dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1); hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_ell_add_spmv<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow, ncol, max_row, this->mat_.ELL.col, this->mat_.ELL.val, scalar, cast_in->vec_, cast_out->vec_ ); CHECK_CUDA_ERROR(__FILE__, __LINE__); } if (this->get_coo_nnz() > 0) { // do not support super small matrices assert(this->get_coo_nnz() > this->local_backend_.GPU_warp); // ---------------------------------------------------------- // Modified and adapted from CUSP 0.3.1, // http://code.google.com/p/cusp-library/ // NVIDIA, APACHE LICENSE 2.0 // ---------------------------------------------------------- // see __spmv_coo_flat(...) // ---------------------------------------------------------- // CHANGELOG // - adapted interface // ---------------------------------------------------------- const unsigned int BLOCK_SIZE = this->local_backend_.GPU_block_size; // const unsigned int MAX_BLOCKS = this->local_backend_.GPU_max_blocks; const unsigned int MAX_BLOCKS = 32; // cusp::detail::device::arch::max_active_blocks(spmv_coo_flat_kernel<IndexType, ValueType, BLOCK_SIZE, UseCache>, BLOCK_SIZE, (size_t) 0); const unsigned int WARPS_PER_BLOCK = BLOCK_SIZE / this->local_backend_.GPU_warp; const unsigned int num_units = this->get_coo_nnz() / this->local_backend_.GPU_warp; const unsigned int num_warps = std::min(num_units, WARPS_PER_BLOCK * MAX_BLOCKS); const unsigned int num_blocks = (num_warps + (WARPS_PER_BLOCK-1)) / WARPS_PER_BLOCK; // (N + (granularity - 1)) / granularity const unsigned int num_iters = (num_units + (num_warps-1)) / num_warps; const unsigned int interval_size = this->local_backend_.GPU_warp * num_iters; const int tail = num_units * this->local_backend_.GPU_warp; // do the last few nonzeros separately (fewer than this->local_backend_.GPU_warp elements) const unsigned int active_warps = (interval_size == 0) ? 0 : ((tail + (interval_size-1))/interval_size); int *temp_rows = NULL; ValueType *temp_vals = NULL; allocate_gpu(active_warps, &temp_rows); allocate_gpu(active_warps, &temp_vals); CHECK_CUDA_ERROR(__FILE__, __LINE__); // TODO // BLOCK_SIZE == 256 // WARP_SIZE == 32 hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_spmv_coo_flat<int, ValueType, 256, 32>), dim3(num_blocks), dim3(BLOCK_SIZE), 0, 0, tail, interval_size, this->mat_.COO.row, this->mat_.COO.col, this->mat_.COO.val, scalar, cast_in->vec_, cast_out->vec_, temp_rows, temp_vals); CHECK_CUDA_ERROR(__FILE__, __LINE__); // TODO // BLOCK_SIZE == 256 hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_spmv_coo_reduce_update<int, ValueType, 256>), dim3(1), dim3(BLOCK_SIZE), 0, 0, active_warps, temp_rows, temp_vals, cast_out->vec_); CHECK_CUDA_ERROR(__FILE__, __LINE__); hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_spmv_coo_serial<int, ValueType>), dim3(1), dim3(1), 0, 0, this->get_coo_nnz() - tail, this->mat_.COO.row + tail, this->mat_.COO.col + tail, this->mat_.COO.val + tail, scalar, cast_in->vec_, cast_out->vec_); CHECK_CUDA_ERROR(__FILE__, __LINE__); free_gpu(&temp_rows); free_gpu(&temp_vals); } } } template class GPUAcceleratorMatrixHYB<double>; template class GPUAcceleratorMatrixHYB<float>; }
8c28d2748dd29e23a7a082bc8f3f14f33ffdd3c1.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2011-2017 NVIDIA Corporation. All rights reserved * * Sample app to demonstrate use of CUPTI library to obtain metric values * using callbacks for CUDA runtime APIs * */ #include <stdio.h> #include <hip/hip_runtime.h> #include <cupti.h> #define METRIC_NAME "ipc" #define DRIVER_API_CALL(apiFuncCall) \ do { \ hipError_t _status = apiFuncCall; \ if (_status != hipSuccess) { \ fprintf(stderr, "%s:%d: error: function %s failed with error %d.\n", \ __FILE__, __LINE__, #apiFuncCall, _status); \ exit(-1); \ } \ } while (0) #define RUNTIME_API_CALL(apiFuncCall) \ do { \ hipError_t _status = apiFuncCall; \ if (_status != hipSuccess) { \ fprintf(stderr, "%s:%d: error: function %s failed with error %s.\n", \ __FILE__, __LINE__, #apiFuncCall, hipGetErrorString(_status));\ exit(-1); \ } \ } while (0) #define CUPTI_CALL(call) \ do { \ CUptiResult _status = call; \ if (_status != CUPTI_SUCCESS) { \ const char *errstr; \ cuptiGetResultString(_status, &errstr); \ fprintf(stderr, "%s:%d: error: function %s failed with error %s.\n", \ __FILE__, __LINE__, #call, errstr); \ exit(-1); \ } \ } while (0) #define ALIGN_SIZE (8) #define ALIGN_BUFFER(buffer, align) \ (((uintptr_t) (buffer) & ((align)-1)) ? ((buffer) + (align) - ((uintptr_t) (buffer) & ((align)-1))) : (buffer)) // User data for event collection callback typedef struct MetricData_st { // the device where metric is being collected hipDevice_t device; // the set of event groups to collect for a pass CUpti_EventGroupSet *eventGroups; // the current number of events collected in eventIdArray and // eventValueArray uint32_t eventIdx; // the number of entries in eventIdArray and eventValueArray uint32_t numEvents; // array of event ids CUpti_EventID *eventIdArray; // array of event values uint64_t *eventValueArray; } MetricData_t; static uint64_t kernelDuration; // Device code __global__ void VecAdd(const int* A, const int* B, int* C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < N) C[i] = A[i] + B[i]; } static void initVec(int *vec, int n) { for (int i=0; i< n; i++) vec[i] = i; } void CUPTIAPI getMetricValueCallback(void *userdata, CUpti_CallbackDomain domain, CUpti_CallbackId cbid, const CUpti_CallbackData *cbInfo) { MetricData_t *metricData = (MetricData_t*)userdata; unsigned int i, j, k; // This callback is enabled only for launch so we shouldn't see // anything else. if ((cbid != CUPTI_RUNTIME_TRACE_CBID_cudaLaunch_v3020) && (cbid != CUPTI_RUNTIME_TRACE_CBID_cudaLaunchKernel_v7000)) { printf("%s:%d: unexpected cbid %d\n", __FILE__, __LINE__, cbid); exit(-1); } // on entry, enable all the event groups being collected this pass, // for metrics we collect for all instances of the event if (cbInfo->callbackSite == CUPTI_API_ENTER) { hipDeviceSynchronize(); CUPTI_CALL(cuptiSetEventCollectionMode(cbInfo->context, CUPTI_EVENT_COLLECTION_MODE_KERNEL)); for (i = 0; i < metricData->eventGroups->numEventGroups; i++) { uint32_t all = 1; CUPTI_CALL(cuptiEventGroupSetAttribute(metricData->eventGroups->eventGroups[i], CUPTI_EVENT_GROUP_ATTR_PROFILE_ALL_DOMAIN_INSTANCES, sizeof(all), &all)); CUPTI_CALL(cuptiEventGroupEnable(metricData->eventGroups->eventGroups[i])); } } // on exit, read and record event values if (cbInfo->callbackSite == CUPTI_API_EXIT) { hipDeviceSynchronize(); // for each group, read the event values from the group and record // in metricData for (i = 0; i < metricData->eventGroups->numEventGroups; i++) { CUpti_EventGroup group = metricData->eventGroups->eventGroups[i]; CUpti_EventDomainID groupDomain; uint32_t numEvents, numInstances, numTotalInstances; CUpti_EventID *eventIds; size_t groupDomainSize = sizeof(groupDomain); size_t numEventsSize = sizeof(numEvents); size_t numInstancesSize = sizeof(numInstances); size_t numTotalInstancesSize = sizeof(numTotalInstances); uint64_t *values, normalized, sum; size_t valuesSize, eventIdsSize; CUPTI_CALL(cuptiEventGroupGetAttribute(group, CUPTI_EVENT_GROUP_ATTR_EVENT_DOMAIN_ID, &groupDomainSize, &groupDomain)); CUPTI_CALL(cuptiDeviceGetEventDomainAttribute(metricData->device, groupDomain, CUPTI_EVENT_DOMAIN_ATTR_TOTAL_INSTANCE_COUNT, &numTotalInstancesSize, &numTotalInstances)); CUPTI_CALL(cuptiEventGroupGetAttribute(group, CUPTI_EVENT_GROUP_ATTR_INSTANCE_COUNT, &numInstancesSize, &numInstances)); CUPTI_CALL(cuptiEventGroupGetAttribute(group, CUPTI_EVENT_GROUP_ATTR_NUM_EVENTS, &numEventsSize, &numEvents)); eventIdsSize = numEvents * sizeof(CUpti_EventID); eventIds = (CUpti_EventID *)malloc(eventIdsSize); CUPTI_CALL(cuptiEventGroupGetAttribute(group, CUPTI_EVENT_GROUP_ATTR_EVENTS, &eventIdsSize, eventIds)); valuesSize = sizeof(uint64_t) * numInstances; values = (uint64_t *)malloc(valuesSize); for (j = 0; j < numEvents; j++) { CUPTI_CALL(cuptiEventGroupReadEvent(group, CUPTI_EVENT_READ_FLAG_NONE, eventIds[j], &valuesSize, values)); if (metricData->eventIdx >= metricData->numEvents) { fprintf(stderr, "error: too many events collected, metric expects only %d\n", (int)metricData->numEvents); exit(-1); } // sum collect event values from all instances sum = 0; for (k = 0; k < numInstances; k++) sum += values[k]; // normalize the event value to represent the total number of // domain instances on the device normalized = (sum * numTotalInstances) / numInstances; metricData->eventIdArray[metricData->eventIdx] = eventIds[j]; metricData->eventValueArray[metricData->eventIdx] = normalized; metricData->eventIdx++; // print collected value { char eventName[128]; size_t eventNameSize = sizeof(eventName) - 1; CUPTI_CALL(cuptiEventGetAttribute(eventIds[j], CUPTI_EVENT_ATTR_NAME, &eventNameSize, eventName)); eventName[127] = '\0'; printf("\t%s = %llu (", eventName, (unsigned long long)sum); if (numInstances > 1) { for (k = 0; k < numInstances; k++) { if (k != 0) printf(", "); printf("%llu", (unsigned long long)values[k]); } } printf(")\n"); printf("\t%s (normalized) (%llu * %u) / %u = %llu\n", eventName, (unsigned long long)sum, numTotalInstances, numInstances, (unsigned long long)normalized); } } free(values); } for (i = 0; i < metricData->eventGroups->numEventGroups; i++) CUPTI_CALL(cuptiEventGroupDisable(metricData->eventGroups->eventGroups[i])); } } static void cleanUp(int *h_A, int *h_B, int *h_C, int *d_A, int *d_B, int *d_C) { if (d_A) hipFree(d_A); if (d_B) hipFree(d_B); if (d_C) hipFree(d_C); // Free host memory if (h_A) free(h_A); if (h_B) free(h_B); if (h_C) free(h_C); } static void runPass() { int N = 50000; size_t size = N * sizeof(int); int threadsPerBlock = 0; int blocksPerGrid = 0; int *h_A, *h_B, *h_C; int *d_A, *d_B, *d_C; int i, sum; // Allocate input vectors h_A and h_B in host memory h_A = (int*)malloc(size); h_B = (int*)malloc(size); h_C = (int*)malloc(size); // Initialize input vectors initVec(h_A, N); initVec(h_B, N); memset(h_C, 0, size); // Allocate vectors in device memory hipMalloc((void**)&d_A, size); hipMalloc((void**)&d_B, size); hipMalloc((void**)&d_C, size); // Copy vectors from host memory to device memory hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice); // Invoke kernel threadsPerBlock = 256; blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock; printf("Launching kernel: blocks %d, thread/block %d\n", blocksPerGrid, threadsPerBlock); hipLaunchKernelGGL(( VecAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, N); // Copy result from device memory to host memory // h_C contains the result in host memory hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost); // Verify result for (i = 0; i < N; ++i) { sum = h_A[i] + h_B[i]; if (h_C[i] != sum) { fprintf(stderr, "error: result verification failed\n"); exit(-1); } } cleanUp(h_A, h_B, h_C, d_A, d_B, d_C); } static void CUPTIAPI bufferRequested(uint8_t **buffer, size_t *size, size_t *maxNumRecords) { uint8_t *rawBuffer; *size = 16 * 1024; rawBuffer = (uint8_t *)malloc(*size + ALIGN_SIZE); *buffer = ALIGN_BUFFER(rawBuffer, ALIGN_SIZE); *maxNumRecords = 0; if (*buffer == NULL) { printf("Error: out of memory\n"); exit(-1); } } static void CUPTIAPI bufferCompleted(hipCtx_t ctx, uint32_t streamId, uint8_t *buffer, size_t size, size_t validSize) { CUpti_Activity *record = NULL; CUpti_ActivityKernel4 *kernel; //since we launched only 1 kernel, we should have only 1 kernel record CUPTI_CALL(cuptiActivityGetNextRecord(buffer, validSize, &record)); kernel = (CUpti_ActivityKernel4 *)record; if (kernel->kind != CUPTI_ACTIVITY_KIND_KERNEL) { fprintf(stderr, "Error: expected kernel activity record, got %d\n", (int)kernel->kind); exit(-1); } kernelDuration = kernel->end - kernel->start; free(buffer); } int main(int argc, char *argv[]) { CUpti_SubscriberHandle subscriber; hipCtx_t context = 0; hipDevice_t device = 0; int deviceNum; int deviceCount; char deviceName[32]; const char *metricName; CUpti_MetricID metricId; CUpti_EventGroupSets *passData; MetricData_t metricData; unsigned int pass; CUpti_MetricValue metricValue; printf("Usage: %s [device_num] [metric_name]\n", argv[0]); // make sure activity is enabled before any CUDA API CUPTI_CALL(cuptiActivityEnable(CUPTI_ACTIVITY_KIND_KERNEL)); DRIVER_API_CALL(hipInit(0)); DRIVER_API_CALL(hipGetDeviceCount(&deviceCount)); if (deviceCount == 0) { printf("There is no device supporting CUDA.\n"); return -2; } if (argc > 1) deviceNum = atoi(argv[1]); else deviceNum = 0; printf("CUDA Device Number: %d\n", deviceNum); DRIVER_API_CALL(hipDeviceGet(&device, deviceNum)); DRIVER_API_CALL(hipDeviceGetName(deviceName, 32, device)); printf("CUDA Device Name: %s\n", deviceName); DRIVER_API_CALL(hipCtxCreate(&context, 0, device)); // Get the name of the metric to collect if (argc > 2) metricName = argv[2]; else { metricName = METRIC_NAME; } // need to collect duration of kernel execution without any event // collection enabled (some metrics need kernel duration as part of // calculation). The only accurate way to do this is by using the // activity API. { CUPTI_CALL(cuptiActivityRegisterCallbacks(bufferRequested, bufferCompleted)); runPass(); hipDeviceSynchronize(); CUPTI_CALL(cuptiActivityFlushAll(0)); } // setup launch callback for event collection CUPTI_CALL(cuptiSubscribe(&subscriber, (CUpti_CallbackFunc)getMetricValueCallback, &metricData)); CUPTI_CALL(cuptiEnableCallback(1, subscriber, CUPTI_CB_DOMAIN_RUNTIME_API, CUPTI_RUNTIME_TRACE_CBID_cudaLaunch_v3020)); CUPTI_CALL(cuptiEnableCallback(1, subscriber, CUPTI_CB_DOMAIN_RUNTIME_API, CUPTI_RUNTIME_TRACE_CBID_cudaLaunchKernel_v7000)); // allocate space to hold all the events needed for the metric CUPTI_CALL(cuptiMetricGetIdFromName(device, metricName, &metricId)); CUPTI_CALL(cuptiMetricGetNumEvents(metricId, &metricData.numEvents)); metricData.device = device; metricData.eventIdArray = (CUpti_EventID *)malloc(metricData.numEvents * sizeof(CUpti_EventID)); metricData.eventValueArray = (uint64_t *)malloc(metricData.numEvents * sizeof(uint64_t)); metricData.eventIdx = 0; // get the number of passes required to collect all the events // needed for the metric and the event groups for each pass CUPTI_CALL(cuptiMetricCreateEventGroupSets(context, sizeof(metricId), &metricId, &passData)); for (pass = 0; pass < passData->numSets; pass++) { printf("Pass %u\n", pass); metricData.eventGroups = passData->sets + pass; runPass(); } if (metricData.eventIdx != metricData.numEvents) { fprintf(stderr, "error: expected %u metric events, got %u\n", metricData.numEvents, metricData.eventIdx); exit(-1); } // use all the collected events to calculate the metric value CUPTI_CALL(cuptiMetricGetValue(device, metricId, metricData.numEvents * sizeof(CUpti_EventID), metricData.eventIdArray, metricData.numEvents * sizeof(uint64_t), metricData.eventValueArray, kernelDuration, &metricValue)); // print metric value, we format based on the value kind { CUpti_MetricValueKind valueKind; size_t valueKindSize = sizeof(valueKind); CUPTI_CALL(cuptiMetricGetAttribute(metricId, CUPTI_METRIC_ATTR_VALUE_KIND, &valueKindSize, &valueKind)); switch (valueKind) { case CUPTI_METRIC_VALUE_KIND_DOUBLE: printf("Metric %s = %f\n", metricName, metricValue.metricValueDouble); break; case CUPTI_METRIC_VALUE_KIND_UINT64: printf("Metric %s = %llu\n", metricName, (unsigned long long)metricValue.metricValueUint64); break; case CUPTI_METRIC_VALUE_KIND_INT64: printf("Metric %s = %lld\n", metricName, (long long)metricValue.metricValueInt64); break; case CUPTI_METRIC_VALUE_KIND_PERCENT: printf("Metric %s = %f%%\n", metricName, metricValue.metricValuePercent); break; case CUPTI_METRIC_VALUE_KIND_THROUGHPUT: printf("Metric %s = %llu bytes/sec\n", metricName, (unsigned long long)metricValue.metricValueThroughput); break; case CUPTI_METRIC_VALUE_KIND_UTILIZATION_LEVEL: printf("Metric %s = utilization level %u\n", metricName, (unsigned int)metricValue.metricValueUtilizationLevel); break; default: fprintf(stderr, "error: unknown value kind\n"); exit(-1); } } CUPTI_CALL(cuptiUnsubscribe(subscriber)); return 0; }
8c28d2748dd29e23a7a082bc8f3f14f33ffdd3c1.cu
/* * Copyright 2011-2017 NVIDIA Corporation. All rights reserved * * Sample app to demonstrate use of CUPTI library to obtain metric values * using callbacks for CUDA runtime APIs * */ #include <stdio.h> #include <cuda.h> #include <cupti.h> #define METRIC_NAME "ipc" #define DRIVER_API_CALL(apiFuncCall) \ do { \ CUresult _status = apiFuncCall; \ if (_status != CUDA_SUCCESS) { \ fprintf(stderr, "%s:%d: error: function %s failed with error %d.\n", \ __FILE__, __LINE__, #apiFuncCall, _status); \ exit(-1); \ } \ } while (0) #define RUNTIME_API_CALL(apiFuncCall) \ do { \ cudaError_t _status = apiFuncCall; \ if (_status != cudaSuccess) { \ fprintf(stderr, "%s:%d: error: function %s failed with error %s.\n", \ __FILE__, __LINE__, #apiFuncCall, cudaGetErrorString(_status));\ exit(-1); \ } \ } while (0) #define CUPTI_CALL(call) \ do { \ CUptiResult _status = call; \ if (_status != CUPTI_SUCCESS) { \ const char *errstr; \ cuptiGetResultString(_status, &errstr); \ fprintf(stderr, "%s:%d: error: function %s failed with error %s.\n", \ __FILE__, __LINE__, #call, errstr); \ exit(-1); \ } \ } while (0) #define ALIGN_SIZE (8) #define ALIGN_BUFFER(buffer, align) \ (((uintptr_t) (buffer) & ((align)-1)) ? ((buffer) + (align) - ((uintptr_t) (buffer) & ((align)-1))) : (buffer)) // User data for event collection callback typedef struct MetricData_st { // the device where metric is being collected CUdevice device; // the set of event groups to collect for a pass CUpti_EventGroupSet *eventGroups; // the current number of events collected in eventIdArray and // eventValueArray uint32_t eventIdx; // the number of entries in eventIdArray and eventValueArray uint32_t numEvents; // array of event ids CUpti_EventID *eventIdArray; // array of event values uint64_t *eventValueArray; } MetricData_t; static uint64_t kernelDuration; // Device code __global__ void VecAdd(const int* A, const int* B, int* C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < N) C[i] = A[i] + B[i]; } static void initVec(int *vec, int n) { for (int i=0; i< n; i++) vec[i] = i; } void CUPTIAPI getMetricValueCallback(void *userdata, CUpti_CallbackDomain domain, CUpti_CallbackId cbid, const CUpti_CallbackData *cbInfo) { MetricData_t *metricData = (MetricData_t*)userdata; unsigned int i, j, k; // This callback is enabled only for launch so we shouldn't see // anything else. if ((cbid != CUPTI_RUNTIME_TRACE_CBID_cudaLaunch_v3020) && (cbid != CUPTI_RUNTIME_TRACE_CBID_cudaLaunchKernel_v7000)) { printf("%s:%d: unexpected cbid %d\n", __FILE__, __LINE__, cbid); exit(-1); } // on entry, enable all the event groups being collected this pass, // for metrics we collect for all instances of the event if (cbInfo->callbackSite == CUPTI_API_ENTER) { cudaDeviceSynchronize(); CUPTI_CALL(cuptiSetEventCollectionMode(cbInfo->context, CUPTI_EVENT_COLLECTION_MODE_KERNEL)); for (i = 0; i < metricData->eventGroups->numEventGroups; i++) { uint32_t all = 1; CUPTI_CALL(cuptiEventGroupSetAttribute(metricData->eventGroups->eventGroups[i], CUPTI_EVENT_GROUP_ATTR_PROFILE_ALL_DOMAIN_INSTANCES, sizeof(all), &all)); CUPTI_CALL(cuptiEventGroupEnable(metricData->eventGroups->eventGroups[i])); } } // on exit, read and record event values if (cbInfo->callbackSite == CUPTI_API_EXIT) { cudaDeviceSynchronize(); // for each group, read the event values from the group and record // in metricData for (i = 0; i < metricData->eventGroups->numEventGroups; i++) { CUpti_EventGroup group = metricData->eventGroups->eventGroups[i]; CUpti_EventDomainID groupDomain; uint32_t numEvents, numInstances, numTotalInstances; CUpti_EventID *eventIds; size_t groupDomainSize = sizeof(groupDomain); size_t numEventsSize = sizeof(numEvents); size_t numInstancesSize = sizeof(numInstances); size_t numTotalInstancesSize = sizeof(numTotalInstances); uint64_t *values, normalized, sum; size_t valuesSize, eventIdsSize; CUPTI_CALL(cuptiEventGroupGetAttribute(group, CUPTI_EVENT_GROUP_ATTR_EVENT_DOMAIN_ID, &groupDomainSize, &groupDomain)); CUPTI_CALL(cuptiDeviceGetEventDomainAttribute(metricData->device, groupDomain, CUPTI_EVENT_DOMAIN_ATTR_TOTAL_INSTANCE_COUNT, &numTotalInstancesSize, &numTotalInstances)); CUPTI_CALL(cuptiEventGroupGetAttribute(group, CUPTI_EVENT_GROUP_ATTR_INSTANCE_COUNT, &numInstancesSize, &numInstances)); CUPTI_CALL(cuptiEventGroupGetAttribute(group, CUPTI_EVENT_GROUP_ATTR_NUM_EVENTS, &numEventsSize, &numEvents)); eventIdsSize = numEvents * sizeof(CUpti_EventID); eventIds = (CUpti_EventID *)malloc(eventIdsSize); CUPTI_CALL(cuptiEventGroupGetAttribute(group, CUPTI_EVENT_GROUP_ATTR_EVENTS, &eventIdsSize, eventIds)); valuesSize = sizeof(uint64_t) * numInstances; values = (uint64_t *)malloc(valuesSize); for (j = 0; j < numEvents; j++) { CUPTI_CALL(cuptiEventGroupReadEvent(group, CUPTI_EVENT_READ_FLAG_NONE, eventIds[j], &valuesSize, values)); if (metricData->eventIdx >= metricData->numEvents) { fprintf(stderr, "error: too many events collected, metric expects only %d\n", (int)metricData->numEvents); exit(-1); } // sum collect event values from all instances sum = 0; for (k = 0; k < numInstances; k++) sum += values[k]; // normalize the event value to represent the total number of // domain instances on the device normalized = (sum * numTotalInstances) / numInstances; metricData->eventIdArray[metricData->eventIdx] = eventIds[j]; metricData->eventValueArray[metricData->eventIdx] = normalized; metricData->eventIdx++; // print collected value { char eventName[128]; size_t eventNameSize = sizeof(eventName) - 1; CUPTI_CALL(cuptiEventGetAttribute(eventIds[j], CUPTI_EVENT_ATTR_NAME, &eventNameSize, eventName)); eventName[127] = '\0'; printf("\t%s = %llu (", eventName, (unsigned long long)sum); if (numInstances > 1) { for (k = 0; k < numInstances; k++) { if (k != 0) printf(", "); printf("%llu", (unsigned long long)values[k]); } } printf(")\n"); printf("\t%s (normalized) (%llu * %u) / %u = %llu\n", eventName, (unsigned long long)sum, numTotalInstances, numInstances, (unsigned long long)normalized); } } free(values); } for (i = 0; i < metricData->eventGroups->numEventGroups; i++) CUPTI_CALL(cuptiEventGroupDisable(metricData->eventGroups->eventGroups[i])); } } static void cleanUp(int *h_A, int *h_B, int *h_C, int *d_A, int *d_B, int *d_C) { if (d_A) cudaFree(d_A); if (d_B) cudaFree(d_B); if (d_C) cudaFree(d_C); // Free host memory if (h_A) free(h_A); if (h_B) free(h_B); if (h_C) free(h_C); } static void runPass() { int N = 50000; size_t size = N * sizeof(int); int threadsPerBlock = 0; int blocksPerGrid = 0; int *h_A, *h_B, *h_C; int *d_A, *d_B, *d_C; int i, sum; // Allocate input vectors h_A and h_B in host memory h_A = (int*)malloc(size); h_B = (int*)malloc(size); h_C = (int*)malloc(size); // Initialize input vectors initVec(h_A, N); initVec(h_B, N); memset(h_C, 0, size); // Allocate vectors in device memory cudaMalloc((void**)&d_A, size); cudaMalloc((void**)&d_B, size); cudaMalloc((void**)&d_C, size); // Copy vectors from host memory to device memory cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); // Invoke kernel threadsPerBlock = 256; blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock; printf("Launching kernel: blocks %d, thread/block %d\n", blocksPerGrid, threadsPerBlock); VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N); // Copy result from device memory to host memory // h_C contains the result in host memory cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); // Verify result for (i = 0; i < N; ++i) { sum = h_A[i] + h_B[i]; if (h_C[i] != sum) { fprintf(stderr, "error: result verification failed\n"); exit(-1); } } cleanUp(h_A, h_B, h_C, d_A, d_B, d_C); } static void CUPTIAPI bufferRequested(uint8_t **buffer, size_t *size, size_t *maxNumRecords) { uint8_t *rawBuffer; *size = 16 * 1024; rawBuffer = (uint8_t *)malloc(*size + ALIGN_SIZE); *buffer = ALIGN_BUFFER(rawBuffer, ALIGN_SIZE); *maxNumRecords = 0; if (*buffer == NULL) { printf("Error: out of memory\n"); exit(-1); } } static void CUPTIAPI bufferCompleted(CUcontext ctx, uint32_t streamId, uint8_t *buffer, size_t size, size_t validSize) { CUpti_Activity *record = NULL; CUpti_ActivityKernel4 *kernel; //since we launched only 1 kernel, we should have only 1 kernel record CUPTI_CALL(cuptiActivityGetNextRecord(buffer, validSize, &record)); kernel = (CUpti_ActivityKernel4 *)record; if (kernel->kind != CUPTI_ACTIVITY_KIND_KERNEL) { fprintf(stderr, "Error: expected kernel activity record, got %d\n", (int)kernel->kind); exit(-1); } kernelDuration = kernel->end - kernel->start; free(buffer); } int main(int argc, char *argv[]) { CUpti_SubscriberHandle subscriber; CUcontext context = 0; CUdevice device = 0; int deviceNum; int deviceCount; char deviceName[32]; const char *metricName; CUpti_MetricID metricId; CUpti_EventGroupSets *passData; MetricData_t metricData; unsigned int pass; CUpti_MetricValue metricValue; printf("Usage: %s [device_num] [metric_name]\n", argv[0]); // make sure activity is enabled before any CUDA API CUPTI_CALL(cuptiActivityEnable(CUPTI_ACTIVITY_KIND_KERNEL)); DRIVER_API_CALL(cuInit(0)); DRIVER_API_CALL(cuDeviceGetCount(&deviceCount)); if (deviceCount == 0) { printf("There is no device supporting CUDA.\n"); return -2; } if (argc > 1) deviceNum = atoi(argv[1]); else deviceNum = 0; printf("CUDA Device Number: %d\n", deviceNum); DRIVER_API_CALL(cuDeviceGet(&device, deviceNum)); DRIVER_API_CALL(cuDeviceGetName(deviceName, 32, device)); printf("CUDA Device Name: %s\n", deviceName); DRIVER_API_CALL(cuCtxCreate(&context, 0, device)); // Get the name of the metric to collect if (argc > 2) metricName = argv[2]; else { metricName = METRIC_NAME; } // need to collect duration of kernel execution without any event // collection enabled (some metrics need kernel duration as part of // calculation). The only accurate way to do this is by using the // activity API. { CUPTI_CALL(cuptiActivityRegisterCallbacks(bufferRequested, bufferCompleted)); runPass(); cudaDeviceSynchronize(); CUPTI_CALL(cuptiActivityFlushAll(0)); } // setup launch callback for event collection CUPTI_CALL(cuptiSubscribe(&subscriber, (CUpti_CallbackFunc)getMetricValueCallback, &metricData)); CUPTI_CALL(cuptiEnableCallback(1, subscriber, CUPTI_CB_DOMAIN_RUNTIME_API, CUPTI_RUNTIME_TRACE_CBID_cudaLaunch_v3020)); CUPTI_CALL(cuptiEnableCallback(1, subscriber, CUPTI_CB_DOMAIN_RUNTIME_API, CUPTI_RUNTIME_TRACE_CBID_cudaLaunchKernel_v7000)); // allocate space to hold all the events needed for the metric CUPTI_CALL(cuptiMetricGetIdFromName(device, metricName, &metricId)); CUPTI_CALL(cuptiMetricGetNumEvents(metricId, &metricData.numEvents)); metricData.device = device; metricData.eventIdArray = (CUpti_EventID *)malloc(metricData.numEvents * sizeof(CUpti_EventID)); metricData.eventValueArray = (uint64_t *)malloc(metricData.numEvents * sizeof(uint64_t)); metricData.eventIdx = 0; // get the number of passes required to collect all the events // needed for the metric and the event groups for each pass CUPTI_CALL(cuptiMetricCreateEventGroupSets(context, sizeof(metricId), &metricId, &passData)); for (pass = 0; pass < passData->numSets; pass++) { printf("Pass %u\n", pass); metricData.eventGroups = passData->sets + pass; runPass(); } if (metricData.eventIdx != metricData.numEvents) { fprintf(stderr, "error: expected %u metric events, got %u\n", metricData.numEvents, metricData.eventIdx); exit(-1); } // use all the collected events to calculate the metric value CUPTI_CALL(cuptiMetricGetValue(device, metricId, metricData.numEvents * sizeof(CUpti_EventID), metricData.eventIdArray, metricData.numEvents * sizeof(uint64_t), metricData.eventValueArray, kernelDuration, &metricValue)); // print metric value, we format based on the value kind { CUpti_MetricValueKind valueKind; size_t valueKindSize = sizeof(valueKind); CUPTI_CALL(cuptiMetricGetAttribute(metricId, CUPTI_METRIC_ATTR_VALUE_KIND, &valueKindSize, &valueKind)); switch (valueKind) { case CUPTI_METRIC_VALUE_KIND_DOUBLE: printf("Metric %s = %f\n", metricName, metricValue.metricValueDouble); break; case CUPTI_METRIC_VALUE_KIND_UINT64: printf("Metric %s = %llu\n", metricName, (unsigned long long)metricValue.metricValueUint64); break; case CUPTI_METRIC_VALUE_KIND_INT64: printf("Metric %s = %lld\n", metricName, (long long)metricValue.metricValueInt64); break; case CUPTI_METRIC_VALUE_KIND_PERCENT: printf("Metric %s = %f%%\n", metricName, metricValue.metricValuePercent); break; case CUPTI_METRIC_VALUE_KIND_THROUGHPUT: printf("Metric %s = %llu bytes/sec\n", metricName, (unsigned long long)metricValue.metricValueThroughput); break; case CUPTI_METRIC_VALUE_KIND_UTILIZATION_LEVEL: printf("Metric %s = utilization level %u\n", metricName, (unsigned int)metricValue.metricValueUtilizationLevel); break; default: fprintf(stderr, "error: unknown value kind\n"); exit(-1); } } CUPTI_CALL(cuptiUnsubscribe(subscriber)); return 0; }
9135f8d2c4b9867b32a5aa3bbc13629b1a6f3cea.hip
// !!! This is a file automatically generated by hipify!!! // // Author: Marko Atanasievski // // Copyright (C) 2020 TANCOM SOFTWARE SOLUTIONS Ltd. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. // // Parts of this file are originally copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include "cuda_util.h" #include <iostream> #include "mat.h" #include "convolutiondepthwise_cuda.h" static __device__ inline signed char float2int8(float v) { int int32 = static_cast<int>(round(v)); if (int32 > 127) return 127; if (int32 < -127) return -127; return (signed char)int32; } __global__ void gpu_convolutiondepthwise_cuda_forward(const float* a_input, const ncnn::CudaMatInfo a_info, const float* weight_data, const ncnn::CudaMatInfo weight_info, const float* bias_data, const float* activation_params, float* output, const ncnn::CudaMatInfo output_info, const ncnn::ConvolutionDepthWise_cuda::ConvolutionDepthWise_info product_info, const int* gpu_space_offset) { const int output_column = blockIdx.x * blockDim.x + threadIdx.x; const int output_row = blockIdx.y * blockDim.y + threadIdx.y; const int current_group = blockIdx.z * blockDim.z + threadIdx.z; const int input_channel = current_group; extern __shared__ float buffer[]; float* shared_kptr = buffer; const int k_index = threadIdx.x; if (k_index < product_info.maxk) { const float* kptr = (const float*)weight_data + product_info.maxk * current_group; shared_kptr[k_index] = kptr[k_index]; } __syncthreads(); if (output_column >= output_info.w || output_row >= output_info.h || current_group >= a_info.c) { return; } float sum = 0.f; if (product_info.bias_term) { sum += bias_data[current_group]; } const float* sptr = a_input + input_channel * a_info.cstep + output_row * product_info.stride_h * a_info.w + output_column * product_info.stride_w; for (int k = 0; k < product_info.maxk; k++) { const float val = sptr [gpu_space_offset[k]]; const float w = shared_kptr[k]; sum += val * w; } if (product_info.activation_type == 1) { sum = max(sum, 0.f); } else if (product_info.activation_type == 2) { float slope = activation_params[0]; sum = sum > 0.f ? sum : sum * slope; } else if (product_info.activation_type == 3) { float min = activation_params[0]; float max = activation_params[1]; if (sum < min) sum = min; if (sum > max) sum = max; } else if (product_info.activation_type == 4) { sum = static_cast<float>(1.f / (1.f + exp(-sum))); } else if (product_info.activation_type == 5) { const float MISH_THRESHOLD = 20; float x = sum, y; if (x > MISH_THRESHOLD) y = x; else if (x < -MISH_THRESHOLD) y = expf(x); else y = logf(expf(x) + 1); sum = static_cast<float>(x * tanh(y)); } const int output_index = current_group * output_info.cstep + output_row * output_info.w + output_column; output[output_index] = sum; } __global__ void gpu_convolutiondepthwise_cuda_forward_group(const float* a_input, const ncnn::CudaMatInfo a_info, const float* weight_data, const ncnn::CudaMatInfo weight_info, const float* bias_data, const float* activation_params, float* output, const ncnn::CudaMatInfo output_info, const ncnn::ConvolutionDepthWise_cuda::ConvolutionDepthWise_info product_info, const int* gpu_space_offset, const int channels_g, const int num_output_g) { const int output_column = blockIdx.x * blockDim.x + threadIdx.x; const int output_row = blockIdx.y * blockDim.y + threadIdx.y; const int total_output = blockIdx.z * blockDim.z + threadIdx.z; const int current_num_output_g = total_output % num_output_g; const int current_group = total_output / num_output_g; extern __shared__ float buffer[]; float* shared_kptr = buffer; const int k_index = threadIdx.x; if (k_index < product_info.maxk) { const float* weight_data_ptr = (const float*)weight_data + product_info.maxk * channels_g * num_output_g * current_group; const float* kptr = (const float*)weight_data_ptr + product_info.maxk * channels_g * current_num_output_g; for (int input_channel = 0; input_channel < channels_g; input_channel++) { shared_kptr[input_channel * product_info.maxk + k_index] = kptr[input_channel * product_info.maxk + k_index]; } } __syncthreads(); if (output_column >= output_info.w || output_row >= output_info.h || current_num_output_g >= num_output_g || current_group >= product_info.group) { return; } float sum = 0.f; if (product_info.bias_term) { sum += bias_data[num_output_g * current_group + current_num_output_g]; } for (int input_channel = 0; input_channel < channels_g; input_channel++) { const float* sptr = a_input + (channels_g * current_group + input_channel) * a_info.cstep + output_row * product_info.stride_h * a_info.w + output_column * product_info.stride_w; for (int k = 0; k < product_info.maxk; k++) { const float val = sptr [gpu_space_offset[k]]; const float w = shared_kptr[input_channel * product_info.maxk + k]; sum += val * w; } } if (product_info.activation_type == 1) { sum = max(sum, 0.f); } else if (product_info.activation_type == 2) { float slope = activation_params[0]; sum = sum > 0.f ? sum : sum * slope; } else if (product_info.activation_type == 3) { float min = activation_params[0]; float max = activation_params[1]; if (sum < min) sum = min; if (sum > max) sum = max; } else if (product_info.activation_type == 4) { sum = static_cast<float>(1.f / (1.f + exp(-sum))); } else if (product_info.activation_type == 5) { const float MISH_THRESHOLD = 20; float x = sum, y; if (x > MISH_THRESHOLD) y = x; else if (x < -MISH_THRESHOLD) y = expf(x); else y = logf(expf(x) + 1); sum = static_cast<float>(x * tanh(y)); } const int output_index = (current_group*num_output_g+current_num_output_g) * output_info.cstep + output_row * output_info.w + output_column; output[output_index] = sum; } __global__ void gpu_convolutiondepthwise_cuda_forward_int8(const signed char* a_input, const ncnn::CudaMatInfo a_info, const signed char* weight_data, const ncnn::CudaMatInfo weight_info, const float* bias_data, const float* activation_params, signed char* output, const ncnn::CudaMatInfo output_info, const ncnn::ConvolutionDepthWise_cuda::ConvolutionDepthWise_info product_info, const int* gpu_space_offset, const float *gpu_weight_data_int8_scales, const float *gpu_bottom_blob_int8_scales) { const int output_column = blockIdx.x * blockDim.x + threadIdx.x; const int output_row = blockIdx.y * blockDim.y + threadIdx.y; const int current_group = blockIdx.z * blockDim.z + threadIdx.z; const int input_channel = current_group; extern __shared__ float buffer[]; float* shared_kptr = buffer; const int k_index = threadIdx.x; if (k_index < product_info.maxk) { const signed char* kptr = (const signed char*)weight_data + product_info.maxk * current_group; shared_kptr[k_index] = kptr[k_index]; } __syncthreads(); if (output_column >= output_info.w || output_row >= output_info.h || current_group >= a_info.c) { return; } int sum = 0; const signed char* sptr = a_input + input_channel * a_info.cstep + output_row * product_info.stride_h * a_info.w + output_column * product_info.stride_w; for (int k = 0; k < product_info.maxk; k++) { const signed char val = sptr [gpu_space_offset[k]]; const signed char w = shared_kptr[k]; sum += val * w; } const int output_index = current_group * output_info.cstep + output_row * output_info.w + output_column; if (product_info.use_int8_requantize) { // requantize and relu float scale_in; if (gpu_weight_data_int8_scales[current_group] == 0) scale_in = 0; else scale_in = 1.f / (gpu_bottom_blob_int8_scales[current_group] * gpu_weight_data_int8_scales[current_group]); float sumfp32 = sum * scale_in; if (product_info.bias_term) sumfp32 += bias_data[current_group]; float scale_out = *product_info.gpu_top_blob_int8_scale; //FIXME load param signed char sums8 = float2int8(sumfp32 * scale_out); if (product_info.activation_type == 1) { sums8 = max(sums8, (signed char)0); } output[output_index] = sums8; } else { // dequantize and relu float scale_in; if (gpu_weight_data_int8_scales[current_group] == 0) scale_in = 0; else scale_in = 1.f / (gpu_bottom_blob_int8_scales[current_group] * gpu_weight_data_int8_scales[current_group]); float sumfp32 = sum * scale_in; if (product_info.bias_term) sumfp32 += bias_data[current_group]; if (product_info.activation_type == 1) { sumfp32 = max(sumfp32, 0.f); } ((float*)output)[output_index] = sumfp32; } } __global__ void gpu_convolutiondepthwise_cuda_forward_group_int8(const signed char* a_input, const ncnn::CudaMatInfo a_info, const signed char* weight_data, const ncnn::CudaMatInfo weight_info, const float* bias_data, const float* activation_params, signed char* output, const ncnn::CudaMatInfo output_info, const ncnn::ConvolutionDepthWise_cuda::ConvolutionDepthWise_info product_info, const int* gpu_space_offset, const int channels_g, const int num_output_g, const float *gpu_weight_data_int8_scales, const float *gpu_bottom_blob_int8_scales) { const int output_column = blockIdx.x * blockDim.x + threadIdx.x; const int output_row = blockIdx.y * blockDim.y + threadIdx.y; const int total_output = blockIdx.z * blockDim.z + threadIdx.z; const int current_num_output_g = total_output % num_output_g; const int current_group = total_output / num_output_g; extern __shared__ signed char buffer8[]; signed char* shared_kptr = buffer8; const int k_index = threadIdx.x; if (k_index < product_info.maxk) { const signed char* weight_data_ptr = (const signed char*)weight_data + product_info.maxk * channels_g * num_output_g * current_group; const signed char* kptr = (const signed char*)weight_data_ptr + product_info.maxk * channels_g * current_num_output_g; for (int input_channel = 0; input_channel < channels_g; input_channel++) { shared_kptr[input_channel * product_info.maxk + k_index] = kptr[input_channel * product_info.maxk + k_index]; } } __syncthreads(); if (output_column >= output_info.w || output_row >= output_info.h || current_num_output_g >= num_output_g || current_group >= product_info.group) { return; } int sum = 0; for (int input_channel = 0; input_channel < channels_g; input_channel++) { const signed char* sptr = a_input + (channels_g * current_group + input_channel) * a_info.cstep + output_row * product_info.stride_h * a_info.w + output_column * product_info.stride_w; for (int k = 0; k < product_info.maxk; k++) { const signed char val = sptr [gpu_space_offset[k]]; const signed char w = shared_kptr[input_channel * product_info.maxk + k]; sum += val * w; } } const int output_index = (current_group * num_output_g + current_num_output_g) * output_info.cstep + output_row * output_info.w + output_column; if (product_info.use_int8_requantize) { // requantize and relu float scale_in; if (gpu_weight_data_int8_scales[current_group] == 0) scale_in = 0; else scale_in = 1.f / (gpu_bottom_blob_int8_scales[current_group] * gpu_weight_data_int8_scales[current_group]); float sumfp32 = sum * scale_in; if (product_info.bias_term) sumfp32 += bias_data[current_group * num_output_g + current_num_output_g]; float scale_out = *product_info.gpu_top_blob_int8_scale; //FIXME load param signed char sums8 = float2int8(sumfp32 * scale_out); if (product_info.activation_type == 1) { sums8 = max(sums8, (signed char)0); } output[output_index] = sums8; } else { // dequantize and relu float scale_in; if (gpu_weight_data_int8_scales[current_group] == 0) scale_in = 0; else scale_in = 1.f / (gpu_bottom_blob_int8_scales[current_group] * gpu_weight_data_int8_scales[current_group]); float sumfp32 = sum * scale_in; if (product_info.bias_term) sumfp32 += bias_data[current_group * num_output_g + current_num_output_g]; if (product_info.activation_type == 1) { sumfp32 = max(sumfp32, 0.f); } ((float*)output)[output_index] = sumfp32; } } namespace ncnn { int convolutiondepthwise_cuda_forward(const CudaMat& bottom_blob, CudaMat& top_blob, const ConvolutionDepthWise_cuda::ConvolutionDepthWise_info& info) { const ncnn::CudaMatInfo bottom_blob_info{bottom_blob}; const ncnn::CudaMatInfo top_blob_info{top_blob}; const ncnn::CudaMatInfo weight_info{*info.gpu_weight_data}; if (bottom_blob_info.c == info.group && info.group == info.num_output) { const int number_of_threads = top_blob.w > info.maxk ? top_blob.w : info.maxk; int thread_per_block_x = ((number_of_threads - 1) / 32 + 1) * 32; if (thread_per_block_x > 64) thread_per_block_x = 64; int thread_per_block_y = ((top_blob.h - 1) / 8 + 1) * 8; if (thread_per_block_y > 8) thread_per_block_y = 8; const int thread_per_block_z = 1; const int total_number_of_channels = info.group; const int total_number_of_columns = top_blob.w; const int total_number_of_rows = top_blob.h; const dim3 block_size(thread_per_block_x, thread_per_block_y, thread_per_block_z); const dim3 grid_size((total_number_of_columns - 1) / thread_per_block_x + 1, (total_number_of_rows - 1) / thread_per_block_y + 1, (total_number_of_channels - 1) / thread_per_block_z + 1); hipLaunchKernelGGL(( gpu_convolutiondepthwise_cuda_forward), dim3(grid_size), dim3(block_size), info.group * info.maxk * sizeof(float), 0, static_cast<const float*>(bottom_blob.get_craw_data()), bottom_blob_info, static_cast<const float*>(info.gpu_weight_data->get_craw_data()), weight_info, static_cast<const float*>(info.gpu_bias_data->get_craw_data()), static_cast<const float*>(info.gpu_activation_params->get_craw_data()), static_cast<float*>(top_blob.get_raw_data()), top_blob_info, info, info.gpu_space_ofs.get()); } else { const int channels_g = bottom_blob_info.c / info.group; const int num_output_g = info.num_output / info.group; const int number_of_threads = top_blob.w > info.maxk ? top_blob.w : info.maxk; int thread_per_block_x = ((number_of_threads - 1) / 64 + 1) * 64; if (thread_per_block_x > 128) thread_per_block_x = 128; int thread_per_block_y = ((top_blob.h - 1) / 8 + 1) * 8; if (thread_per_block_y > 8) thread_per_block_y = 8; const int thread_per_block_z = 1; const int total_number_of_channels = num_output_g * info.group; const int total_number_of_columns = top_blob.w; const int total_number_of_rows = top_blob.h; const dim3 block_size(thread_per_block_x, thread_per_block_y, thread_per_block_z); const dim3 grid_size((total_number_of_columns - 1) / thread_per_block_x + 1, (total_number_of_rows - 1) / thread_per_block_y + 1, (total_number_of_channels - 1) / thread_per_block_z + 1); hipLaunchKernelGGL(( gpu_convolutiondepthwise_cuda_forward_group), dim3(grid_size), dim3(block_size), num_output_g * channels_g * info.maxk * sizeof(float), 0, static_cast<const float*>(bottom_blob.get_craw_data()), bottom_blob_info, static_cast<const float*>(info.gpu_weight_data->get_craw_data()), weight_info, static_cast<const float*>(info.gpu_bias_data->get_craw_data()), static_cast<const float*>(info.gpu_activation_params->get_craw_data()), static_cast<float*>(top_blob.get_raw_data()), top_blob_info, info, info.gpu_space_ofs.get(), channels_g, num_output_g); } return 0; } int convolutiondepthwise_cuda_forward_int8(const CudaMat& bottom_blob, CudaMat& top_blob, const ConvolutionDepthWise_cuda::ConvolutionDepthWise_info& info) { const ncnn::CudaMatInfo bottom_blob_info{bottom_blob}; const ncnn::CudaMatInfo top_blob_info{top_blob}; const ncnn::CudaMatInfo weight_info{*info.gpu_weight_data}; if (bottom_blob_info.c == info.group && info.group == info.num_output) { const int number_of_threads = top_blob.w > info.maxk ? top_blob.w : info.maxk; int thread_per_block_x = ((number_of_threads - 1) / 64 + 1) * 64; if (thread_per_block_x > 128) thread_per_block_x = 128; int thread_per_block_y = ((top_blob.h - 1) / 8 + 1) * 8; if (thread_per_block_y > 8) thread_per_block_y = 8; const int thread_per_block_z = 1; const int total_number_of_channels = info.group; const int total_number_of_columns = top_blob.w; const int total_number_of_rows = top_blob.h; const dim3 block_size(thread_per_block_x, thread_per_block_y, thread_per_block_z); const dim3 grid_size((total_number_of_columns - 1) / thread_per_block_x + 1, (total_number_of_rows - 1) / thread_per_block_y + 1, (total_number_of_channels - 1) / thread_per_block_z + 1); hipLaunchKernelGGL(( gpu_convolutiondepthwise_cuda_forward_int8), dim3(grid_size), dim3(block_size), info.group * info.maxk * sizeof(signed char), 0, static_cast<const signed char*>(bottom_blob.get_craw_data()), bottom_blob_info, static_cast<const signed char*>(info.gpu_weight_data->get_craw_data()), weight_info, static_cast<const float*>(info.gpu_bias_data->get_craw_data()), static_cast<const float*>(info.gpu_activation_params->get_craw_data()), static_cast<signed char*>(top_blob.get_raw_data()), top_blob_info, info, info.gpu_space_ofs.get(), static_cast<const float*>(info.gpu_weight_data_int8_scales->get_craw_data()), static_cast<const float*>(info.gpu_bottom_blob_int8_scales->get_craw_data())); } else { const int channels_g = bottom_blob_info.c / info.group; const int num_output_g = info.num_output / info.group; const int number_of_threads = top_blob.w > info.maxk ? top_blob.w : info.maxk; int thread_per_block_x = ((number_of_threads - 1) / 64 + 1) * 64; if (thread_per_block_x > 128) thread_per_block_x = 128; int thread_per_block_y = ((top_blob.h - 1) / 8 + 1) * 8; if (thread_per_block_y > 8) thread_per_block_y = 8; const int thread_per_block_z = 1; const int total_number_of_channels = num_output_g * info.group; const int total_number_of_columns = top_blob.w; const int total_number_of_rows = top_blob.h; const dim3 block_size(thread_per_block_x, thread_per_block_y, thread_per_block_z); const dim3 grid_size((total_number_of_columns - 1) / thread_per_block_x + 1, (total_number_of_rows - 1) / thread_per_block_y + 1, (total_number_of_channels - 1) / thread_per_block_z + 1); hipLaunchKernelGGL(( gpu_convolutiondepthwise_cuda_forward_group_int8), dim3(grid_size), dim3(block_size), num_output_g * channels_g * info.maxk * sizeof(signed char), 0, static_cast<const signed char*>(bottom_blob.get_craw_data()), bottom_blob_info, static_cast<const signed char*>(info.gpu_weight_data->get_craw_data()), weight_info, static_cast<const float*>(info.gpu_bias_data->get_craw_data()), static_cast<const float*>(info.gpu_activation_params->get_craw_data()), static_cast<signed char*>(top_blob.get_raw_data()), top_blob_info, info, info.gpu_space_ofs.get(), channels_g, num_output_g, static_cast<const float*>(info.gpu_weight_data_int8_scales->get_craw_data()), static_cast<const float*>(info.gpu_bottom_blob_int8_scales->get_craw_data())); } return 0; } }
9135f8d2c4b9867b32a5aa3bbc13629b1a6f3cea.cu
// // Author: Marko Atanasievski // // Copyright (C) 2020 TANCOM SOFTWARE SOLUTIONS Ltd. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. // // Parts of this file are originally copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. #include <cuda.h> #include <cuda_runtime.h> #include <cuda_runtime_api.h> #include "cuda_util.h" #include <iostream> #include "mat.h" #include "convolutiondepthwise_cuda.h" static __device__ inline signed char float2int8(float v) { int int32 = static_cast<int>(round(v)); if (int32 > 127) return 127; if (int32 < -127) return -127; return (signed char)int32; } __global__ void gpu_convolutiondepthwise_cuda_forward(const float* a_input, const ncnn::CudaMatInfo a_info, const float* weight_data, const ncnn::CudaMatInfo weight_info, const float* bias_data, const float* activation_params, float* output, const ncnn::CudaMatInfo output_info, const ncnn::ConvolutionDepthWise_cuda::ConvolutionDepthWise_info product_info, const int* gpu_space_offset) { const int output_column = blockIdx.x * blockDim.x + threadIdx.x; const int output_row = blockIdx.y * blockDim.y + threadIdx.y; const int current_group = blockIdx.z * blockDim.z + threadIdx.z; const int input_channel = current_group; extern __shared__ float buffer[]; float* shared_kptr = buffer; const int k_index = threadIdx.x; if (k_index < product_info.maxk) { const float* kptr = (const float*)weight_data + product_info.maxk * current_group; shared_kptr[k_index] = kptr[k_index]; } __syncthreads(); if (output_column >= output_info.w || output_row >= output_info.h || current_group >= a_info.c) { return; } float sum = 0.f; if (product_info.bias_term) { sum += bias_data[current_group]; } const float* sptr = a_input + input_channel * a_info.cstep + output_row * product_info.stride_h * a_info.w + output_column * product_info.stride_w; for (int k = 0; k < product_info.maxk; k++) { const float val = sptr [gpu_space_offset[k]]; const float w = shared_kptr[k]; sum += val * w; } if (product_info.activation_type == 1) { sum = max(sum, 0.f); } else if (product_info.activation_type == 2) { float slope = activation_params[0]; sum = sum > 0.f ? sum : sum * slope; } else if (product_info.activation_type == 3) { float min = activation_params[0]; float max = activation_params[1]; if (sum < min) sum = min; if (sum > max) sum = max; } else if (product_info.activation_type == 4) { sum = static_cast<float>(1.f / (1.f + exp(-sum))); } else if (product_info.activation_type == 5) { const float MISH_THRESHOLD = 20; float x = sum, y; if (x > MISH_THRESHOLD) y = x; else if (x < -MISH_THRESHOLD) y = expf(x); else y = logf(expf(x) + 1); sum = static_cast<float>(x * tanh(y)); } const int output_index = current_group * output_info.cstep + output_row * output_info.w + output_column; output[output_index] = sum; } __global__ void gpu_convolutiondepthwise_cuda_forward_group(const float* a_input, const ncnn::CudaMatInfo a_info, const float* weight_data, const ncnn::CudaMatInfo weight_info, const float* bias_data, const float* activation_params, float* output, const ncnn::CudaMatInfo output_info, const ncnn::ConvolutionDepthWise_cuda::ConvolutionDepthWise_info product_info, const int* gpu_space_offset, const int channels_g, const int num_output_g) { const int output_column = blockIdx.x * blockDim.x + threadIdx.x; const int output_row = blockIdx.y * blockDim.y + threadIdx.y; const int total_output = blockIdx.z * blockDim.z + threadIdx.z; const int current_num_output_g = total_output % num_output_g; const int current_group = total_output / num_output_g; extern __shared__ float buffer[]; float* shared_kptr = buffer; const int k_index = threadIdx.x; if (k_index < product_info.maxk) { const float* weight_data_ptr = (const float*)weight_data + product_info.maxk * channels_g * num_output_g * current_group; const float* kptr = (const float*)weight_data_ptr + product_info.maxk * channels_g * current_num_output_g; for (int input_channel = 0; input_channel < channels_g; input_channel++) { shared_kptr[input_channel * product_info.maxk + k_index] = kptr[input_channel * product_info.maxk + k_index]; } } __syncthreads(); if (output_column >= output_info.w || output_row >= output_info.h || current_num_output_g >= num_output_g || current_group >= product_info.group) { return; } float sum = 0.f; if (product_info.bias_term) { sum += bias_data[num_output_g * current_group + current_num_output_g]; } for (int input_channel = 0; input_channel < channels_g; input_channel++) { const float* sptr = a_input + (channels_g * current_group + input_channel) * a_info.cstep + output_row * product_info.stride_h * a_info.w + output_column * product_info.stride_w; for (int k = 0; k < product_info.maxk; k++) { const float val = sptr [gpu_space_offset[k]]; const float w = shared_kptr[input_channel * product_info.maxk + k]; sum += val * w; } } if (product_info.activation_type == 1) { sum = max(sum, 0.f); } else if (product_info.activation_type == 2) { float slope = activation_params[0]; sum = sum > 0.f ? sum : sum * slope; } else if (product_info.activation_type == 3) { float min = activation_params[0]; float max = activation_params[1]; if (sum < min) sum = min; if (sum > max) sum = max; } else if (product_info.activation_type == 4) { sum = static_cast<float>(1.f / (1.f + exp(-sum))); } else if (product_info.activation_type == 5) { const float MISH_THRESHOLD = 20; float x = sum, y; if (x > MISH_THRESHOLD) y = x; else if (x < -MISH_THRESHOLD) y = expf(x); else y = logf(expf(x) + 1); sum = static_cast<float>(x * tanh(y)); } const int output_index = (current_group*num_output_g+current_num_output_g) * output_info.cstep + output_row * output_info.w + output_column; output[output_index] = sum; } __global__ void gpu_convolutiondepthwise_cuda_forward_int8(const signed char* a_input, const ncnn::CudaMatInfo a_info, const signed char* weight_data, const ncnn::CudaMatInfo weight_info, const float* bias_data, const float* activation_params, signed char* output, const ncnn::CudaMatInfo output_info, const ncnn::ConvolutionDepthWise_cuda::ConvolutionDepthWise_info product_info, const int* gpu_space_offset, const float *gpu_weight_data_int8_scales, const float *gpu_bottom_blob_int8_scales) { const int output_column = blockIdx.x * blockDim.x + threadIdx.x; const int output_row = blockIdx.y * blockDim.y + threadIdx.y; const int current_group = blockIdx.z * blockDim.z + threadIdx.z; const int input_channel = current_group; extern __shared__ float buffer[]; float* shared_kptr = buffer; const int k_index = threadIdx.x; if (k_index < product_info.maxk) { const signed char* kptr = (const signed char*)weight_data + product_info.maxk * current_group; shared_kptr[k_index] = kptr[k_index]; } __syncthreads(); if (output_column >= output_info.w || output_row >= output_info.h || current_group >= a_info.c) { return; } int sum = 0; const signed char* sptr = a_input + input_channel * a_info.cstep + output_row * product_info.stride_h * a_info.w + output_column * product_info.stride_w; for (int k = 0; k < product_info.maxk; k++) { const signed char val = sptr [gpu_space_offset[k]]; const signed char w = shared_kptr[k]; sum += val * w; } const int output_index = current_group * output_info.cstep + output_row * output_info.w + output_column; if (product_info.use_int8_requantize) { // requantize and relu float scale_in; if (gpu_weight_data_int8_scales[current_group] == 0) scale_in = 0; else scale_in = 1.f / (gpu_bottom_blob_int8_scales[current_group] * gpu_weight_data_int8_scales[current_group]); float sumfp32 = sum * scale_in; if (product_info.bias_term) sumfp32 += bias_data[current_group]; float scale_out = *product_info.gpu_top_blob_int8_scale; //FIXME load param signed char sums8 = float2int8(sumfp32 * scale_out); if (product_info.activation_type == 1) { sums8 = max(sums8, (signed char)0); } output[output_index] = sums8; } else { // dequantize and relu float scale_in; if (gpu_weight_data_int8_scales[current_group] == 0) scale_in = 0; else scale_in = 1.f / (gpu_bottom_blob_int8_scales[current_group] * gpu_weight_data_int8_scales[current_group]); float sumfp32 = sum * scale_in; if (product_info.bias_term) sumfp32 += bias_data[current_group]; if (product_info.activation_type == 1) { sumfp32 = max(sumfp32, 0.f); } ((float*)output)[output_index] = sumfp32; } } __global__ void gpu_convolutiondepthwise_cuda_forward_group_int8(const signed char* a_input, const ncnn::CudaMatInfo a_info, const signed char* weight_data, const ncnn::CudaMatInfo weight_info, const float* bias_data, const float* activation_params, signed char* output, const ncnn::CudaMatInfo output_info, const ncnn::ConvolutionDepthWise_cuda::ConvolutionDepthWise_info product_info, const int* gpu_space_offset, const int channels_g, const int num_output_g, const float *gpu_weight_data_int8_scales, const float *gpu_bottom_blob_int8_scales) { const int output_column = blockIdx.x * blockDim.x + threadIdx.x; const int output_row = blockIdx.y * blockDim.y + threadIdx.y; const int total_output = blockIdx.z * blockDim.z + threadIdx.z; const int current_num_output_g = total_output % num_output_g; const int current_group = total_output / num_output_g; extern __shared__ signed char buffer8[]; signed char* shared_kptr = buffer8; const int k_index = threadIdx.x; if (k_index < product_info.maxk) { const signed char* weight_data_ptr = (const signed char*)weight_data + product_info.maxk * channels_g * num_output_g * current_group; const signed char* kptr = (const signed char*)weight_data_ptr + product_info.maxk * channels_g * current_num_output_g; for (int input_channel = 0; input_channel < channels_g; input_channel++) { shared_kptr[input_channel * product_info.maxk + k_index] = kptr[input_channel * product_info.maxk + k_index]; } } __syncthreads(); if (output_column >= output_info.w || output_row >= output_info.h || current_num_output_g >= num_output_g || current_group >= product_info.group) { return; } int sum = 0; for (int input_channel = 0; input_channel < channels_g; input_channel++) { const signed char* sptr = a_input + (channels_g * current_group + input_channel) * a_info.cstep + output_row * product_info.stride_h * a_info.w + output_column * product_info.stride_w; for (int k = 0; k < product_info.maxk; k++) { const signed char val = sptr [gpu_space_offset[k]]; const signed char w = shared_kptr[input_channel * product_info.maxk + k]; sum += val * w; } } const int output_index = (current_group * num_output_g + current_num_output_g) * output_info.cstep + output_row * output_info.w + output_column; if (product_info.use_int8_requantize) { // requantize and relu float scale_in; if (gpu_weight_data_int8_scales[current_group] == 0) scale_in = 0; else scale_in = 1.f / (gpu_bottom_blob_int8_scales[current_group] * gpu_weight_data_int8_scales[current_group]); float sumfp32 = sum * scale_in; if (product_info.bias_term) sumfp32 += bias_data[current_group * num_output_g + current_num_output_g]; float scale_out = *product_info.gpu_top_blob_int8_scale; //FIXME load param signed char sums8 = float2int8(sumfp32 * scale_out); if (product_info.activation_type == 1) { sums8 = max(sums8, (signed char)0); } output[output_index] = sums8; } else { // dequantize and relu float scale_in; if (gpu_weight_data_int8_scales[current_group] == 0) scale_in = 0; else scale_in = 1.f / (gpu_bottom_blob_int8_scales[current_group] * gpu_weight_data_int8_scales[current_group]); float sumfp32 = sum * scale_in; if (product_info.bias_term) sumfp32 += bias_data[current_group * num_output_g + current_num_output_g]; if (product_info.activation_type == 1) { sumfp32 = max(sumfp32, 0.f); } ((float*)output)[output_index] = sumfp32; } } namespace ncnn { int convolutiondepthwise_cuda_forward(const CudaMat& bottom_blob, CudaMat& top_blob, const ConvolutionDepthWise_cuda::ConvolutionDepthWise_info& info) { const ncnn::CudaMatInfo bottom_blob_info{bottom_blob}; const ncnn::CudaMatInfo top_blob_info{top_blob}; const ncnn::CudaMatInfo weight_info{*info.gpu_weight_data}; if (bottom_blob_info.c == info.group && info.group == info.num_output) { const int number_of_threads = top_blob.w > info.maxk ? top_blob.w : info.maxk; int thread_per_block_x = ((number_of_threads - 1) / 32 + 1) * 32; if (thread_per_block_x > 64) thread_per_block_x = 64; int thread_per_block_y = ((top_blob.h - 1) / 8 + 1) * 8; if (thread_per_block_y > 8) thread_per_block_y = 8; const int thread_per_block_z = 1; const int total_number_of_channels = info.group; const int total_number_of_columns = top_blob.w; const int total_number_of_rows = top_blob.h; const dim3 block_size(thread_per_block_x, thread_per_block_y, thread_per_block_z); const dim3 grid_size((total_number_of_columns - 1) / thread_per_block_x + 1, (total_number_of_rows - 1) / thread_per_block_y + 1, (total_number_of_channels - 1) / thread_per_block_z + 1); gpu_convolutiondepthwise_cuda_forward<<<grid_size, block_size, info.group * info.maxk * sizeof(float)>>>(static_cast<const float*>(bottom_blob.get_craw_data()), bottom_blob_info, static_cast<const float*>(info.gpu_weight_data->get_craw_data()), weight_info, static_cast<const float*>(info.gpu_bias_data->get_craw_data()), static_cast<const float*>(info.gpu_activation_params->get_craw_data()), static_cast<float*>(top_blob.get_raw_data()), top_blob_info, info, info.gpu_space_ofs.get()); } else { const int channels_g = bottom_blob_info.c / info.group; const int num_output_g = info.num_output / info.group; const int number_of_threads = top_blob.w > info.maxk ? top_blob.w : info.maxk; int thread_per_block_x = ((number_of_threads - 1) / 64 + 1) * 64; if (thread_per_block_x > 128) thread_per_block_x = 128; int thread_per_block_y = ((top_blob.h - 1) / 8 + 1) * 8; if (thread_per_block_y > 8) thread_per_block_y = 8; const int thread_per_block_z = 1; const int total_number_of_channels = num_output_g * info.group; const int total_number_of_columns = top_blob.w; const int total_number_of_rows = top_blob.h; const dim3 block_size(thread_per_block_x, thread_per_block_y, thread_per_block_z); const dim3 grid_size((total_number_of_columns - 1) / thread_per_block_x + 1, (total_number_of_rows - 1) / thread_per_block_y + 1, (total_number_of_channels - 1) / thread_per_block_z + 1); gpu_convolutiondepthwise_cuda_forward_group<<<grid_size, block_size, num_output_g * channels_g * info.maxk * sizeof(float)>>>(static_cast<const float*>(bottom_blob.get_craw_data()), bottom_blob_info, static_cast<const float*>(info.gpu_weight_data->get_craw_data()), weight_info, static_cast<const float*>(info.gpu_bias_data->get_craw_data()), static_cast<const float*>(info.gpu_activation_params->get_craw_data()), static_cast<float*>(top_blob.get_raw_data()), top_blob_info, info, info.gpu_space_ofs.get(), channels_g, num_output_g); } return 0; } int convolutiondepthwise_cuda_forward_int8(const CudaMat& bottom_blob, CudaMat& top_blob, const ConvolutionDepthWise_cuda::ConvolutionDepthWise_info& info) { const ncnn::CudaMatInfo bottom_blob_info{bottom_blob}; const ncnn::CudaMatInfo top_blob_info{top_blob}; const ncnn::CudaMatInfo weight_info{*info.gpu_weight_data}; if (bottom_blob_info.c == info.group && info.group == info.num_output) { const int number_of_threads = top_blob.w > info.maxk ? top_blob.w : info.maxk; int thread_per_block_x = ((number_of_threads - 1) / 64 + 1) * 64; if (thread_per_block_x > 128) thread_per_block_x = 128; int thread_per_block_y = ((top_blob.h - 1) / 8 + 1) * 8; if (thread_per_block_y > 8) thread_per_block_y = 8; const int thread_per_block_z = 1; const int total_number_of_channels = info.group; const int total_number_of_columns = top_blob.w; const int total_number_of_rows = top_blob.h; const dim3 block_size(thread_per_block_x, thread_per_block_y, thread_per_block_z); const dim3 grid_size((total_number_of_columns - 1) / thread_per_block_x + 1, (total_number_of_rows - 1) / thread_per_block_y + 1, (total_number_of_channels - 1) / thread_per_block_z + 1); gpu_convolutiondepthwise_cuda_forward_int8<<<grid_size, block_size, info.group * info.maxk * sizeof(signed char)>>>(static_cast<const signed char*>(bottom_blob.get_craw_data()), bottom_blob_info, static_cast<const signed char*>(info.gpu_weight_data->get_craw_data()), weight_info, static_cast<const float*>(info.gpu_bias_data->get_craw_data()), static_cast<const float*>(info.gpu_activation_params->get_craw_data()), static_cast<signed char*>(top_blob.get_raw_data()), top_blob_info, info, info.gpu_space_ofs.get(), static_cast<const float*>(info.gpu_weight_data_int8_scales->get_craw_data()), static_cast<const float*>(info.gpu_bottom_blob_int8_scales->get_craw_data())); } else { const int channels_g = bottom_blob_info.c / info.group; const int num_output_g = info.num_output / info.group; const int number_of_threads = top_blob.w > info.maxk ? top_blob.w : info.maxk; int thread_per_block_x = ((number_of_threads - 1) / 64 + 1) * 64; if (thread_per_block_x > 128) thread_per_block_x = 128; int thread_per_block_y = ((top_blob.h - 1) / 8 + 1) * 8; if (thread_per_block_y > 8) thread_per_block_y = 8; const int thread_per_block_z = 1; const int total_number_of_channels = num_output_g * info.group; const int total_number_of_columns = top_blob.w; const int total_number_of_rows = top_blob.h; const dim3 block_size(thread_per_block_x, thread_per_block_y, thread_per_block_z); const dim3 grid_size((total_number_of_columns - 1) / thread_per_block_x + 1, (total_number_of_rows - 1) / thread_per_block_y + 1, (total_number_of_channels - 1) / thread_per_block_z + 1); gpu_convolutiondepthwise_cuda_forward_group_int8<<<grid_size, block_size, num_output_g * channels_g * info.maxk * sizeof(signed char)>>>(static_cast<const signed char*>(bottom_blob.get_craw_data()), bottom_blob_info, static_cast<const signed char*>(info.gpu_weight_data->get_craw_data()), weight_info, static_cast<const float*>(info.gpu_bias_data->get_craw_data()), static_cast<const float*>(info.gpu_activation_params->get_craw_data()), static_cast<signed char*>(top_blob.get_raw_data()), top_blob_info, info, info.gpu_space_ofs.get(), channels_g, num_output_g, static_cast<const float*>(info.gpu_weight_data_int8_scales->get_craw_data()), static_cast<const float*>(info.gpu_bottom_blob_int8_scales->get_craw_data())); } return 0; } }
0bae48d3a05a9ccb90ba498b3ad644614b645a20.hip
// !!! This is a file automatically generated by hipify!!! /* -*- c-basic-offset:2; tab-width:2; indent-tabs-mode:nil -*- * * @(#)matrix_free_gpu.cu * @author Karl Ljungkvist <[email protected]> * */ #include <deal.II/fe/fe.h> #include <deal.II/fe/fe_values.h> #include <deal.II/matrix_free/shape_info.h> #include <deal.II/grid/filtered_iterator.h> #include <deal.II/base/graph_coloring.h> #include "coloring.h" #ifdef MATRIX_FREE_HANGING_NODES #include "hanging_nodes.cuh" #endif #include "cuda_utils.cuh" //============================================================================= // MatrixFreeGpu is an object living on the CPU, but with most of its member // data residing on the gpu. Here, we keep all the data related to a matrix-free // evaluation. //============================================================================= // helper object for (re)initialization of main class template <int dim, typename Number> class ReinitHelper { private: MatrixFreeGpu<dim,Number> *data; // host arrays std::vector<unsigned int> loc2glob_host; std::vector<Point<dim,Number> > quad_points_host; std::vector<Number> JxW_host; std::vector<Number> inv_jac_host; #ifdef MATRIX_FREE_HANGING_NODES std::vector<unsigned int> constraint_mask_host; #endif // local buffers std::vector<types::global_dof_index> local_dof_indices; FEValues<dim> fe_values; // get the translation from default dof numbering to a lexicographic one const std::vector<unsigned int> &lexicographic_inv; std::vector<unsigned int> lexicographic_dof_indices; const unsigned int fe_degree; const unsigned int dofs_per_cell; const unsigned int qpts_per_cell; // TODO: fix update flags const UpdateFlags &update_flags; // For setting up hanging node constraints #ifdef MATRIX_FREE_HANGING_NODES bool hanging_node_constraints_possible; HangingNodes<dim> hanging_nodes; #endif // for padding const unsigned int rowlength; public: ReinitHelper(MatrixFreeGpu<dim,Number> *data, const Mapping<dim> &mapping, const FiniteElement<dim> &fe, const Quadrature<1> &quad, const internal::MatrixFreeFunctions::ShapeInfo<Number> &shape_info, const DoFHandler<dim> &dof_handler, const UpdateFlags &update_flags) : data(data), fe_degree(data->fe_degree), dofs_per_cell(data->dofs_per_cell), qpts_per_cell(data->qpts_per_cell), fe_values (mapping, fe, Quadrature<dim>(quad), update_inverse_jacobians | update_quadrature_points | update_values | update_gradients | update_JxW_values), lexicographic_inv(shape_info.lexicographic_numbering), #ifdef MATRIX_FREE_HANGING_NODES hanging_node_constraints_possible(data->level_mg_handler == numbers::invalid_unsigned_int), hanging_nodes(fe_degree,dof_handler,lexicographic_inv), #endif update_flags(update_flags), rowlength(data->get_rowlength()) { local_dof_indices.resize(data->dofs_per_cell); lexicographic_dof_indices.resize(dofs_per_cell); } template <typename Iterator> void init(const Iterator &begin, const Iterator &end, const dealii::ConstraintMatrix &constraints); template <typename Iterator> void init_with_coloring(const Iterator &begin, const Iterator &end, const dealii::ConstraintMatrix &constraints); void setup_color_arrays(const unsigned int num_colors); void setup_cell_arrays(const unsigned int c); /** * Loop over all cells from begin to end and set up data structures */ template <typename Iterator> void cell_loop(const Iterator& begin, const Iterator& end); /** * Version used with coloring. In this case we want to loop over the resulting * std::vector from the coloring algorithm */ template <typename CellFilter> void cell_loop(const typename std::vector<CellFilter>::iterator & begin, const typename std::vector<CellFilter>::iterator & end); /** * Called internally from cell_loop to fill in data for one cell */ template <typename T> void get_cell_data(const T& cell,const unsigned int cellid); void alloc_and_copy_arrays(const unsigned int c); }; template <int dim, typename Number> template <typename Iterator> void ReinitHelper<dim,Number>::init(const Iterator &begin, const Iterator &end, const dealii::ConstraintMatrix &constraints) { data->num_colors = 1; setup_color_arrays(1); data->n_cells[0] = data->n_cells_tot; setup_cell_arrays(0); cell_loop(begin,end); // now allocate and copy stuff to the device alloc_and_copy_arrays(0); } template <int dim, typename Number> template <typename Iterator> void ReinitHelper<dim,Number>::init_with_coloring(const Iterator &begin, const Iterator &end, const dealii::ConstraintMatrix &constraints) { // create graph coloring typedef FilteredIterator<Iterator> CellFilter; std::vector<std::vector<CellFilter > > graph = GraphColoringWrapper<dim,Iterator>::make_graph_coloring(begin, end, constraints); data->num_colors = graph.size(); setup_color_arrays(data->num_colors); for(int c = 0; c < data->num_colors; ++c) { data->n_cells[c] = graph[c].size(); setup_cell_arrays(c); cell_loop<CellFilter>(graph[c].begin(), graph[c].end()); // now allocate and copy stuff to the device alloc_and_copy_arrays(c); } } template <int dim, typename Number> void ReinitHelper<dim,Number>::setup_color_arrays(const unsigned int num_colors) { data->n_cells.resize(num_colors); data->grid_dim.resize(num_colors); data->block_dim.resize(num_colors); data->loc2glob.resize(num_colors); #ifdef MATRIX_FREE_HANGING_NODES data->constraint_mask.resize(num_colors); #endif data->rowstart.resize(num_colors); if(update_flags & update_quadrature_points) data->quadrature_points.resize(num_colors); if(update_flags & update_JxW_values) data->JxW.resize(num_colors); if(update_flags & update_gradients) data->inv_jac.resize(num_colors); } template <int dim, typename Number> void ReinitHelper<dim,Number>::setup_cell_arrays(const unsigned int c) { const unsigned int n_cells = data->n_cells[c]; const unsigned int cells_per_block = data->cells_per_block; // setup kernel parameters const unsigned int apply_num_blocks = ceil(n_cells / float(cells_per_block)); const unsigned int apply_x_num_blocks = round(sqrt(apply_num_blocks)); // get closest to even square. const unsigned int apply_y_num_blocks = ceil(double(apply_num_blocks)/apply_x_num_blocks); data->grid_dim[c] = dim3(apply_x_num_blocks,apply_y_num_blocks); const unsigned int n_dofs_1d = fe_degree+1; if(data->parallelization_scheme == MatrixFreeGpu<dim,Number>::scheme_par_in_elem) { if(dim==1) data->block_dim[c] = dim3(n_dofs_1d*cells_per_block); else if(dim==2) data->block_dim[c] = dim3(n_dofs_1d*cells_per_block,n_dofs_1d); else if(dim==3) data->block_dim[c] = dim3(n_dofs_1d*cells_per_block,n_dofs_1d,n_dofs_1d); } else { data->block_dim[c] = dim3(cells_per_block); } loc2glob_host.resize(n_cells*rowlength); if(update_flags & update_quadrature_points) quad_points_host.resize(n_cells*rowlength); if(update_flags & update_JxW_values) JxW_host.resize(n_cells*rowlength); if(update_flags & update_gradients) { #ifdef MATRIX_FREE_UNIFORM_MESH // for uniform meshes, it is enough to store one number per element inv_jac_host.resize(n_cells); #else inv_jac_host.resize(n_cells*rowlength*dim*dim); #endif } #ifdef MATRIX_FREE_HANGING_NODES constraint_mask_host.resize(n_cells); #endif } template <int dim, typename Number> template <typename Iterator> void ReinitHelper<dim,Number>::cell_loop(const Iterator& begin, const Iterator& end) { Iterator cell=begin; unsigned int cellid=0; for (; cell!=end; ++cell,++cellid) get_cell_data(cell,cellid); } template <int dim, typename Number> template <typename CellFilter> void ReinitHelper<dim,Number>::cell_loop(const typename std::vector<CellFilter>::iterator & begin, const typename std::vector<CellFilter>::iterator & end) { typename std::vector<CellFilter>::iterator cell=begin; unsigned int cellid=0; for (; cell!=end; ++cell,++cellid) get_cell_data(*cell,cellid); // dereference iterator to get underlying cell_iterator } template <int dim, typename Number> template <typename T> void ReinitHelper<dim,Number>::get_cell_data(const T& cell, const unsigned int cellid) { cell->get_active_or_mg_dof_indices(local_dof_indices); for(int i = 0; i < dofs_per_cell; ++i) lexicographic_dof_indices[i] = local_dof_indices[lexicographic_inv[i]]; // setup hanging nodes #ifdef MATRIX_FREE_HANGING_NODES if(hanging_node_constraints_possible) hanging_nodes.setup_constraints (constraint_mask_host[cellid], lexicographic_dof_indices, cell,cellid); #endif memcpy(&loc2glob_host[cellid*rowlength],&lexicographic_dof_indices[0],dofs_per_cell*sizeof(unsigned int)); fe_values.reinit(cell); // quadrature points if(update_flags & update_quadrature_points) { const std::vector<dealii::Point<dim> > & qpts = fe_values.get_quadrature_points(); std::vector<dealii::Point<dim,Number> > qpts_conv(qpts.size()); for(int i=0; i < qpts_conv.size(); ++i) { qpts_conv[i]=dealii::Point<dim,Number> (qpts[i]); } memcpy(&quad_points_host[cellid*rowlength],qpts_conv.data(),qpts_per_cell*sizeof(Point<dim,Number>)); } if(update_flags & update_JxW_values) { const std::vector<double > & jxws_double = fe_values.get_JxW_values(); const unsigned int n = jxws_double.size(); std::vector<Number > jxws(n); for(int i=0; i<n; ++i) jxws[i] = Number(jxws_double[i]); memcpy(&JxW_host[cellid*rowlength],jxws.data(),qpts_per_cell*sizeof(Number)); } if(update_flags & update_gradients) { const std::vector<DerivativeForm<1,dim,dim> >& jacs = fe_values.get_inverse_jacobians(); std::vector<DerivativeForm<1,dim,dim,Number> > jacs_conv(jacs.size()); for(int i=0; i < jacs_conv.size(); ++i) { for(int d1=0; d1<dim; ++d1) for(int d2=0; d2<dim; ++d2) jacs_conv[i][d1][d2] = jacs[i][d1][d2]; } #ifdef MATRIX_FREE_UNIFORM_MESH // for uniform meshes, it is enough to store one number per element inv_jac_host[cellid] = jacs_conv[0][0][0]; #else memcpy(&inv_jac_host[cellid*rowlength*dim*dim],jacs_conv.data(),qpts_per_cell*sizeof(DerivativeForm<1,dim,dim,Number>)); #endif } } template <typename T> void transpose(T *dst, const T *src, const unsigned int N, const unsigned int M) { // src is N X M // dst is M X N for(int i = 0; i < N; ++i) for(int j = 0; j < M; ++j) dst[j*N+i] = src[i*M+j]; } // TODO: if a unified gpuarray / point would exist, only need one template argument template <typename T> void transpose_inplace(std::vector<T> &a_host, const unsigned int n, const unsigned int m) { // convert to structure-of-array std::vector<T> old(a_host.size()); old.swap(a_host); transpose(&a_host[0],&old[0],n,m); } template <typename T1, typename T2> void alloc_and_copy(T1 **a_dev, std::vector<T2> &a_host, const unsigned int n) { CUDA_CHECK_SUCCESS(hipMalloc(a_dev,n*sizeof(T1))); CUDA_CHECK_SUCCESS(hipMemcpy(*a_dev, &a_host[0], n*sizeof(T1), hipMemcpyHostToDevice)); } template <int dim, typename Number> void ReinitHelper<dim,Number>::alloc_and_copy_arrays(const unsigned int c) { const unsigned n_cells = data->n_cells[c]; // local-to-global mapping if(data->parallelization_scheme == MatrixFreeGpu<dim,Number>::scheme_par_over_elems) { transpose_inplace(loc2glob_host,n_cells, rowlength); } alloc_and_copy(&data->loc2glob[c], loc2glob_host, n_cells*rowlength); // quadrature points if(update_flags & update_quadrature_points) { if(data->parallelization_scheme == MatrixFreeGpu<dim,Number>::scheme_par_over_elems) { transpose_inplace(quad_points_host,n_cells, rowlength); } alloc_and_copy(&data->quadrature_points[c], quad_points_host, n_cells*rowlength); } // jacobian determinants/quadrature weights if(update_flags & update_JxW_values) { if(data->parallelization_scheme == MatrixFreeGpu<dim,Number>::scheme_par_over_elems) { transpose_inplace(JxW_host,n_cells, rowlength); } alloc_and_copy(&data->JxW[c], JxW_host, n_cells*rowlength); } // inverse jacobians if(update_flags & update_gradients) { #ifdef MATRIX_FREE_UNIFORM_MESH // for uniform meshes, it is enough to store one number per element alloc_and_copy(&data->inv_jac[c], inv_jac_host, n_cells); #else // now this has index order: cellid*qpts_per_cell*dim*dim + q*dim*dim + i // this is not good at all? // convert so that all J_11 elements are together, all J_12 elements together, etc. // i.e. this index order: i*qpts_per_cell*n_cells + cellid*qpts_per_cell + q // this is good for a dof-level parallelization transpose_inplace(inv_jac_host,rowlength*n_cells,dim*dim); // transpose second time means we get the following index order: // q*n_cells*dim*dim + i*n_cells + cellid // which is good for an element-level parallelization if(data->parallelization_scheme == MatrixFreeGpu<dim,Number>::scheme_par_over_elems) { transpose_inplace(inv_jac_host,n_cells*dim*dim, rowlength); } alloc_and_copy(&data->inv_jac[c], inv_jac_host, n_cells*dim*dim*rowlength); #endif } #ifdef MATRIX_FREE_HANGING_NODES alloc_and_copy(&data->constraint_mask[c],constraint_mask_host,n_cells); #endif } //============================================================================= // Initialization function //============================================================================= template <int dim, typename Number> void MatrixFreeGpu<dim,Number>:: reinit(const Mapping<dim> &mapping, const DoFHandler<dim> &dof_handler, const ConstraintMatrix &constraints, const Quadrature<1> &quad, const AdditionalData additional_data) { if(typeid(Number) == typeid(double)) { hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte); } use_coloring = additional_data.use_coloring; const UpdateFlags &update_flags = additional_data.mapping_update_flags; if(additional_data.parallelization_scheme != scheme_par_over_elems && additional_data.parallelization_scheme != scheme_par_in_elem) { fprintf(stderr,"Invalid parallelization scheme!\n"); exit(1); } this->parallelization_scheme = additional_data.parallelization_scheme; free(); // todo, only free if we actually need arrays of different length const FiniteElement<dim> &fe = dof_handler.get_fe(); fe_degree = fe.degree; const unsigned int n_dofs_1d = fe_degree+1; const unsigned int n_q_points_1d = quad.size(); // set row length to the closest power of two larger than or equal to the number of threads rowlength = 1 << static_cast<unsigned int>(ceil(dim*log2(fe_degree+1.0))); Assert(n_dofs_1d == n_q_points_1d,ExcMessage("n_q_points_1d must be equal to fe_degree+1.")); level_mg_handler = additional_data.level_mg_handler; if(level_mg_handler != numbers::invalid_unsigned_int) { n_dofs = dof_handler.n_dofs(level_mg_handler); n_cells_tot = dof_handler.get_triangulation().n_cells(level_mg_handler); } else { n_dofs = dof_handler.n_dofs(); n_cells_tot = dof_handler.get_triangulation().n_active_cells(); } dofs_per_cell = fe.dofs_per_cell; qpts_per_cell = ipowf(n_q_points_1d,dim); // shape info, a single copy const internal::MatrixFreeFunctions::ShapeInfo<Number> shape_info(quad,fe); unsigned int size_shape_values = n_dofs_1d*n_q_points_1d*sizeof(Number); // test if shape_info.shape_values_number.size() == (fe_degree+1)*num_quad_1d ConstantMemoryWrapper<Number>::copy_to_shape_values(&shape_info.shape_values_number[0], size_shape_values); if(update_flags & update_gradients) { ConstantMemoryWrapper<Number>::copy_to_shape_gradient(&shape_info.shape_gradient_number[0], size_shape_values); } // Setup number of cells per CUDA thread block cells_per_block = cells_per_block_shmem(dim,fe_degree); //--------------------------------------------------------------------------- // cell-specific stuff (indices, JxW, inverse jacobian, quadrature points, etc) //--------------------------------------------------------------------------- ReinitHelper<dim,Number> helper(this,mapping,fe,quad,shape_info, dof_handler,update_flags); if(use_coloring) { if(level_mg_handler != numbers::invalid_unsigned_int) { helper.init_with_coloring(dof_handler.begin_mg(level_mg_handler), dof_handler.end_mg(level_mg_handler), constraints); } else { const typename DoFHandler<dim>::active_cell_iterator begin = dof_handler.begin_active(), end = dof_handler.end(); // explicitly make end() an active iterator helper.init_with_coloring(begin,end, constraints); } } else { // no coloring if(level_mg_handler != numbers::invalid_unsigned_int) helper.init(dof_handler.begin_mg(level_mg_handler), dof_handler.end_mg(level_mg_handler), constraints); else { const typename DoFHandler<dim>::active_cell_iterator begin = dof_handler.begin_active(), end = dof_handler.end(); // explicitly make end() an active iterator helper.init(begin,end, constraints); } } // setup row starts rowstart[0] = 0; for(int c = 0; c < num_colors-1; ++c) { rowstart[c+1] = rowstart[c] + n_cells[c] * get_rowlength(); } } template <int dim, typename Number> void MatrixFreeGpu<dim,Number>::free() { for(int c = 0; c < quadrature_points.size(); ++c) { if(quadrature_points[c] != NULL) CUDA_CHECK_SUCCESS(hipFree(quadrature_points[c])); } for(int c = 0; c < loc2glob.size(); ++c) { if(loc2glob[c] != NULL) CUDA_CHECK_SUCCESS(hipFree(loc2glob[c])); } for(int c = 0; c < inv_jac.size(); ++c) { if(inv_jac[c] != NULL) CUDA_CHECK_SUCCESS(hipFree(inv_jac[c])); } for(int c = 0; c < JxW.size(); ++c) { if(JxW[c] != NULL) CUDA_CHECK_SUCCESS(hipFree(JxW[c])); } #ifdef MATRIX_FREE_HANGING_NODES for(int c = 0; c < constraint_mask.size(); ++c) { if(constraint_mask[c] != NULL) CUDA_CHECK_SUCCESS(hipFree(constraint_mask[c])); } #endif quadrature_points.clear(); loc2glob.clear(); inv_jac.clear(); JxW.clear(); #ifdef MATRIX_FREE_HANGING_NODES constraint_mask.clear(); #endif }
0bae48d3a05a9ccb90ba498b3ad644614b645a20.cu
/* -*- c-basic-offset:2; tab-width:2; indent-tabs-mode:nil -*- * * @(#)matrix_free_gpu.cu * @author Karl Ljungkvist <[email protected]> * */ #include <deal.II/fe/fe.h> #include <deal.II/fe/fe_values.h> #include <deal.II/matrix_free/shape_info.h> #include <deal.II/grid/filtered_iterator.h> #include <deal.II/base/graph_coloring.h> #include "coloring.h" #ifdef MATRIX_FREE_HANGING_NODES #include "hanging_nodes.cuh" #endif #include "cuda_utils.cuh" //============================================================================= // MatrixFreeGpu is an object living on the CPU, but with most of its member // data residing on the gpu. Here, we keep all the data related to a matrix-free // evaluation. //============================================================================= // helper object for (re)initialization of main class template <int dim, typename Number> class ReinitHelper { private: MatrixFreeGpu<dim,Number> *data; // host arrays std::vector<unsigned int> loc2glob_host; std::vector<Point<dim,Number> > quad_points_host; std::vector<Number> JxW_host; std::vector<Number> inv_jac_host; #ifdef MATRIX_FREE_HANGING_NODES std::vector<unsigned int> constraint_mask_host; #endif // local buffers std::vector<types::global_dof_index> local_dof_indices; FEValues<dim> fe_values; // get the translation from default dof numbering to a lexicographic one const std::vector<unsigned int> &lexicographic_inv; std::vector<unsigned int> lexicographic_dof_indices; const unsigned int fe_degree; const unsigned int dofs_per_cell; const unsigned int qpts_per_cell; // TODO: fix update flags const UpdateFlags &update_flags; // For setting up hanging node constraints #ifdef MATRIX_FREE_HANGING_NODES bool hanging_node_constraints_possible; HangingNodes<dim> hanging_nodes; #endif // for padding const unsigned int rowlength; public: ReinitHelper(MatrixFreeGpu<dim,Number> *data, const Mapping<dim> &mapping, const FiniteElement<dim> &fe, const Quadrature<1> &quad, const internal::MatrixFreeFunctions::ShapeInfo<Number> &shape_info, const DoFHandler<dim> &dof_handler, const UpdateFlags &update_flags) : data(data), fe_degree(data->fe_degree), dofs_per_cell(data->dofs_per_cell), qpts_per_cell(data->qpts_per_cell), fe_values (mapping, fe, Quadrature<dim>(quad), update_inverse_jacobians | update_quadrature_points | update_values | update_gradients | update_JxW_values), lexicographic_inv(shape_info.lexicographic_numbering), #ifdef MATRIX_FREE_HANGING_NODES hanging_node_constraints_possible(data->level_mg_handler == numbers::invalid_unsigned_int), hanging_nodes(fe_degree,dof_handler,lexicographic_inv), #endif update_flags(update_flags), rowlength(data->get_rowlength()) { local_dof_indices.resize(data->dofs_per_cell); lexicographic_dof_indices.resize(dofs_per_cell); } template <typename Iterator> void init(const Iterator &begin, const Iterator &end, const dealii::ConstraintMatrix &constraints); template <typename Iterator> void init_with_coloring(const Iterator &begin, const Iterator &end, const dealii::ConstraintMatrix &constraints); void setup_color_arrays(const unsigned int num_colors); void setup_cell_arrays(const unsigned int c); /** * Loop over all cells from begin to end and set up data structures */ template <typename Iterator> void cell_loop(const Iterator& begin, const Iterator& end); /** * Version used with coloring. In this case we want to loop over the resulting * std::vector from the coloring algorithm */ template <typename CellFilter> void cell_loop(const typename std::vector<CellFilter>::iterator & begin, const typename std::vector<CellFilter>::iterator & end); /** * Called internally from cell_loop to fill in data for one cell */ template <typename T> void get_cell_data(const T& cell,const unsigned int cellid); void alloc_and_copy_arrays(const unsigned int c); }; template <int dim, typename Number> template <typename Iterator> void ReinitHelper<dim,Number>::init(const Iterator &begin, const Iterator &end, const dealii::ConstraintMatrix &constraints) { data->num_colors = 1; setup_color_arrays(1); data->n_cells[0] = data->n_cells_tot; setup_cell_arrays(0); cell_loop(begin,end); // now allocate and copy stuff to the device alloc_and_copy_arrays(0); } template <int dim, typename Number> template <typename Iterator> void ReinitHelper<dim,Number>::init_with_coloring(const Iterator &begin, const Iterator &end, const dealii::ConstraintMatrix &constraints) { // create graph coloring typedef FilteredIterator<Iterator> CellFilter; std::vector<std::vector<CellFilter > > graph = GraphColoringWrapper<dim,Iterator>::make_graph_coloring(begin, end, constraints); data->num_colors = graph.size(); setup_color_arrays(data->num_colors); for(int c = 0; c < data->num_colors; ++c) { data->n_cells[c] = graph[c].size(); setup_cell_arrays(c); cell_loop<CellFilter>(graph[c].begin(), graph[c].end()); // now allocate and copy stuff to the device alloc_and_copy_arrays(c); } } template <int dim, typename Number> void ReinitHelper<dim,Number>::setup_color_arrays(const unsigned int num_colors) { data->n_cells.resize(num_colors); data->grid_dim.resize(num_colors); data->block_dim.resize(num_colors); data->loc2glob.resize(num_colors); #ifdef MATRIX_FREE_HANGING_NODES data->constraint_mask.resize(num_colors); #endif data->rowstart.resize(num_colors); if(update_flags & update_quadrature_points) data->quadrature_points.resize(num_colors); if(update_flags & update_JxW_values) data->JxW.resize(num_colors); if(update_flags & update_gradients) data->inv_jac.resize(num_colors); } template <int dim, typename Number> void ReinitHelper<dim,Number>::setup_cell_arrays(const unsigned int c) { const unsigned int n_cells = data->n_cells[c]; const unsigned int cells_per_block = data->cells_per_block; // setup kernel parameters const unsigned int apply_num_blocks = ceil(n_cells / float(cells_per_block)); const unsigned int apply_x_num_blocks = round(sqrt(apply_num_blocks)); // get closest to even square. const unsigned int apply_y_num_blocks = ceil(double(apply_num_blocks)/apply_x_num_blocks); data->grid_dim[c] = dim3(apply_x_num_blocks,apply_y_num_blocks); const unsigned int n_dofs_1d = fe_degree+1; if(data->parallelization_scheme == MatrixFreeGpu<dim,Number>::scheme_par_in_elem) { if(dim==1) data->block_dim[c] = dim3(n_dofs_1d*cells_per_block); else if(dim==2) data->block_dim[c] = dim3(n_dofs_1d*cells_per_block,n_dofs_1d); else if(dim==3) data->block_dim[c] = dim3(n_dofs_1d*cells_per_block,n_dofs_1d,n_dofs_1d); } else { data->block_dim[c] = dim3(cells_per_block); } loc2glob_host.resize(n_cells*rowlength); if(update_flags & update_quadrature_points) quad_points_host.resize(n_cells*rowlength); if(update_flags & update_JxW_values) JxW_host.resize(n_cells*rowlength); if(update_flags & update_gradients) { #ifdef MATRIX_FREE_UNIFORM_MESH // for uniform meshes, it is enough to store one number per element inv_jac_host.resize(n_cells); #else inv_jac_host.resize(n_cells*rowlength*dim*dim); #endif } #ifdef MATRIX_FREE_HANGING_NODES constraint_mask_host.resize(n_cells); #endif } template <int dim, typename Number> template <typename Iterator> void ReinitHelper<dim,Number>::cell_loop(const Iterator& begin, const Iterator& end) { Iterator cell=begin; unsigned int cellid=0; for (; cell!=end; ++cell,++cellid) get_cell_data(cell,cellid); } template <int dim, typename Number> template <typename CellFilter> void ReinitHelper<dim,Number>::cell_loop(const typename std::vector<CellFilter>::iterator & begin, const typename std::vector<CellFilter>::iterator & end) { typename std::vector<CellFilter>::iterator cell=begin; unsigned int cellid=0; for (; cell!=end; ++cell,++cellid) get_cell_data(*cell,cellid); // dereference iterator to get underlying cell_iterator } template <int dim, typename Number> template <typename T> void ReinitHelper<dim,Number>::get_cell_data(const T& cell, const unsigned int cellid) { cell->get_active_or_mg_dof_indices(local_dof_indices); for(int i = 0; i < dofs_per_cell; ++i) lexicographic_dof_indices[i] = local_dof_indices[lexicographic_inv[i]]; // setup hanging nodes #ifdef MATRIX_FREE_HANGING_NODES if(hanging_node_constraints_possible) hanging_nodes.setup_constraints (constraint_mask_host[cellid], lexicographic_dof_indices, cell,cellid); #endif memcpy(&loc2glob_host[cellid*rowlength],&lexicographic_dof_indices[0],dofs_per_cell*sizeof(unsigned int)); fe_values.reinit(cell); // quadrature points if(update_flags & update_quadrature_points) { const std::vector<dealii::Point<dim> > & qpts = fe_values.get_quadrature_points(); std::vector<dealii::Point<dim,Number> > qpts_conv(qpts.size()); for(int i=0; i < qpts_conv.size(); ++i) { qpts_conv[i]=dealii::Point<dim,Number> (qpts[i]); } memcpy(&quad_points_host[cellid*rowlength],qpts_conv.data(),qpts_per_cell*sizeof(Point<dim,Number>)); } if(update_flags & update_JxW_values) { const std::vector<double > & jxws_double = fe_values.get_JxW_values(); const unsigned int n = jxws_double.size(); std::vector<Number > jxws(n); for(int i=0; i<n; ++i) jxws[i] = Number(jxws_double[i]); memcpy(&JxW_host[cellid*rowlength],jxws.data(),qpts_per_cell*sizeof(Number)); } if(update_flags & update_gradients) { const std::vector<DerivativeForm<1,dim,dim> >& jacs = fe_values.get_inverse_jacobians(); std::vector<DerivativeForm<1,dim,dim,Number> > jacs_conv(jacs.size()); for(int i=0; i < jacs_conv.size(); ++i) { for(int d1=0; d1<dim; ++d1) for(int d2=0; d2<dim; ++d2) jacs_conv[i][d1][d2] = jacs[i][d1][d2]; } #ifdef MATRIX_FREE_UNIFORM_MESH // for uniform meshes, it is enough to store one number per element inv_jac_host[cellid] = jacs_conv[0][0][0]; #else memcpy(&inv_jac_host[cellid*rowlength*dim*dim],jacs_conv.data(),qpts_per_cell*sizeof(DerivativeForm<1,dim,dim,Number>)); #endif } } template <typename T> void transpose(T *dst, const T *src, const unsigned int N, const unsigned int M) { // src is N X M // dst is M X N for(int i = 0; i < N; ++i) for(int j = 0; j < M; ++j) dst[j*N+i] = src[i*M+j]; } // TODO: if a unified gpuarray / point would exist, only need one template argument template <typename T> void transpose_inplace(std::vector<T> &a_host, const unsigned int n, const unsigned int m) { // convert to structure-of-array std::vector<T> old(a_host.size()); old.swap(a_host); transpose(&a_host[0],&old[0],n,m); } template <typename T1, typename T2> void alloc_and_copy(T1 **a_dev, std::vector<T2> &a_host, const unsigned int n) { CUDA_CHECK_SUCCESS(cudaMalloc(a_dev,n*sizeof(T1))); CUDA_CHECK_SUCCESS(cudaMemcpy(*a_dev, &a_host[0], n*sizeof(T1), cudaMemcpyHostToDevice)); } template <int dim, typename Number> void ReinitHelper<dim,Number>::alloc_and_copy_arrays(const unsigned int c) { const unsigned n_cells = data->n_cells[c]; // local-to-global mapping if(data->parallelization_scheme == MatrixFreeGpu<dim,Number>::scheme_par_over_elems) { transpose_inplace(loc2glob_host,n_cells, rowlength); } alloc_and_copy(&data->loc2glob[c], loc2glob_host, n_cells*rowlength); // quadrature points if(update_flags & update_quadrature_points) { if(data->parallelization_scheme == MatrixFreeGpu<dim,Number>::scheme_par_over_elems) { transpose_inplace(quad_points_host,n_cells, rowlength); } alloc_and_copy(&data->quadrature_points[c], quad_points_host, n_cells*rowlength); } // jacobian determinants/quadrature weights if(update_flags & update_JxW_values) { if(data->parallelization_scheme == MatrixFreeGpu<dim,Number>::scheme_par_over_elems) { transpose_inplace(JxW_host,n_cells, rowlength); } alloc_and_copy(&data->JxW[c], JxW_host, n_cells*rowlength); } // inverse jacobians if(update_flags & update_gradients) { #ifdef MATRIX_FREE_UNIFORM_MESH // for uniform meshes, it is enough to store one number per element alloc_and_copy(&data->inv_jac[c], inv_jac_host, n_cells); #else // now this has index order: cellid*qpts_per_cell*dim*dim + q*dim*dim + i // this is not good at all? // convert so that all J_11 elements are together, all J_12 elements together, etc. // i.e. this index order: i*qpts_per_cell*n_cells + cellid*qpts_per_cell + q // this is good for a dof-level parallelization transpose_inplace(inv_jac_host,rowlength*n_cells,dim*dim); // transpose second time means we get the following index order: // q*n_cells*dim*dim + i*n_cells + cellid // which is good for an element-level parallelization if(data->parallelization_scheme == MatrixFreeGpu<dim,Number>::scheme_par_over_elems) { transpose_inplace(inv_jac_host,n_cells*dim*dim, rowlength); } alloc_and_copy(&data->inv_jac[c], inv_jac_host, n_cells*dim*dim*rowlength); #endif } #ifdef MATRIX_FREE_HANGING_NODES alloc_and_copy(&data->constraint_mask[c],constraint_mask_host,n_cells); #endif } //============================================================================= // Initialization function //============================================================================= template <int dim, typename Number> void MatrixFreeGpu<dim,Number>:: reinit(const Mapping<dim> &mapping, const DoFHandler<dim> &dof_handler, const ConstraintMatrix &constraints, const Quadrature<1> &quad, const AdditionalData additional_data) { if(typeid(Number) == typeid(double)) { cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte); } use_coloring = additional_data.use_coloring; const UpdateFlags &update_flags = additional_data.mapping_update_flags; if(additional_data.parallelization_scheme != scheme_par_over_elems && additional_data.parallelization_scheme != scheme_par_in_elem) { fprintf(stderr,"Invalid parallelization scheme!\n"); exit(1); } this->parallelization_scheme = additional_data.parallelization_scheme; free(); // todo, only free if we actually need arrays of different length const FiniteElement<dim> &fe = dof_handler.get_fe(); fe_degree = fe.degree; const unsigned int n_dofs_1d = fe_degree+1; const unsigned int n_q_points_1d = quad.size(); // set row length to the closest power of two larger than or equal to the number of threads rowlength = 1 << static_cast<unsigned int>(ceil(dim*log2(fe_degree+1.0))); Assert(n_dofs_1d == n_q_points_1d,ExcMessage("n_q_points_1d must be equal to fe_degree+1.")); level_mg_handler = additional_data.level_mg_handler; if(level_mg_handler != numbers::invalid_unsigned_int) { n_dofs = dof_handler.n_dofs(level_mg_handler); n_cells_tot = dof_handler.get_triangulation().n_cells(level_mg_handler); } else { n_dofs = dof_handler.n_dofs(); n_cells_tot = dof_handler.get_triangulation().n_active_cells(); } dofs_per_cell = fe.dofs_per_cell; qpts_per_cell = ipowf(n_q_points_1d,dim); // shape info, a single copy const internal::MatrixFreeFunctions::ShapeInfo<Number> shape_info(quad,fe); unsigned int size_shape_values = n_dofs_1d*n_q_points_1d*sizeof(Number); // test if shape_info.shape_values_number.size() == (fe_degree+1)*num_quad_1d ConstantMemoryWrapper<Number>::copy_to_shape_values(&shape_info.shape_values_number[0], size_shape_values); if(update_flags & update_gradients) { ConstantMemoryWrapper<Number>::copy_to_shape_gradient(&shape_info.shape_gradient_number[0], size_shape_values); } // Setup number of cells per CUDA thread block cells_per_block = cells_per_block_shmem(dim,fe_degree); //--------------------------------------------------------------------------- // cell-specific stuff (indices, JxW, inverse jacobian, quadrature points, etc) //--------------------------------------------------------------------------- ReinitHelper<dim,Number> helper(this,mapping,fe,quad,shape_info, dof_handler,update_flags); if(use_coloring) { if(level_mg_handler != numbers::invalid_unsigned_int) { helper.init_with_coloring(dof_handler.begin_mg(level_mg_handler), dof_handler.end_mg(level_mg_handler), constraints); } else { const typename DoFHandler<dim>::active_cell_iterator begin = dof_handler.begin_active(), end = dof_handler.end(); // explicitly make end() an active iterator helper.init_with_coloring(begin,end, constraints); } } else { // no coloring if(level_mg_handler != numbers::invalid_unsigned_int) helper.init(dof_handler.begin_mg(level_mg_handler), dof_handler.end_mg(level_mg_handler), constraints); else { const typename DoFHandler<dim>::active_cell_iterator begin = dof_handler.begin_active(), end = dof_handler.end(); // explicitly make end() an active iterator helper.init(begin,end, constraints); } } // setup row starts rowstart[0] = 0; for(int c = 0; c < num_colors-1; ++c) { rowstart[c+1] = rowstart[c] + n_cells[c] * get_rowlength(); } } template <int dim, typename Number> void MatrixFreeGpu<dim,Number>::free() { for(int c = 0; c < quadrature_points.size(); ++c) { if(quadrature_points[c] != NULL) CUDA_CHECK_SUCCESS(cudaFree(quadrature_points[c])); } for(int c = 0; c < loc2glob.size(); ++c) { if(loc2glob[c] != NULL) CUDA_CHECK_SUCCESS(cudaFree(loc2glob[c])); } for(int c = 0; c < inv_jac.size(); ++c) { if(inv_jac[c] != NULL) CUDA_CHECK_SUCCESS(cudaFree(inv_jac[c])); } for(int c = 0; c < JxW.size(); ++c) { if(JxW[c] != NULL) CUDA_CHECK_SUCCESS(cudaFree(JxW[c])); } #ifdef MATRIX_FREE_HANGING_NODES for(int c = 0; c < constraint_mask.size(); ++c) { if(constraint_mask[c] != NULL) CUDA_CHECK_SUCCESS(cudaFree(constraint_mask[c])); } #endif quadrature_points.clear(); loc2glob.clear(); inv_jac.clear(); JxW.clear(); #ifdef MATRIX_FREE_HANGING_NODES constraint_mask.clear(); #endif }
644f64645d7256ad9ae054597f79fad938d1b009.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //Udacity HW 4 //Radix Sorting #include <cassert> #include <cstring> #include <cstdio> #include <stdint.h> #include <algorithm> #include "reference_calc.h" #include "utils.h" //#include "quicksort.h" #define MAX_THREADS 1024 /* Red Eye Removal =============== For this assignment we are implementing red eye removal. This is accomplished by first creating a score for every pixel that tells us how likely it is to be a red eye pixel. We have already done this for you - you are receiving the scores and need to sort them in ascending order so that we know which pixels to alter to remove the red eye. Note: ascending order == smallest to largest Each score is associated with a position, when you sort the scores, you must also move the positions accordingly. Implementing Parallel Radix Sort with CUDA ========================================== The basic idea is to construct a histogram on each pass of how many of each "digit" there are. Then we scan this histogram so that we know where to put the output of each digit. For example, the first 1 must come after all the 0s so we have to know how many 0s there are to be able to start moving 1s into the correct position. 1) Histogram of the number of occurrences of each digit 2) Exclusive Prefix Sum of Histogram 3) Determine relative offset of each digit For example [0 0 1 1 0 0 1] -> [0 1 0 1 2 3 2] 4) Combine the results of steps 2 & 3 to determine the final output location for each element and move it there LSB Radix sort is an out-of-place sort and you will need to ping-pong values between the input and output buffers we have provided. Make sure the final sorted results end up in the output buffer! Hint: You may need to do a copy at the end. */ #if 1 /* histo[0](118983,101497), histo[1](110205,110275), histo[2](110021,110459), histo[3](109913,110567), histo[4](110267,110213), histo[5](110493,109987), histo[6](110067,110413), histo[7](109837,110643), histo[8](110064,110416), histo[9](110043,110437), histo[10](111037,109443), histo[11](110788,109692), histo[12](111193,109287), histo[13](111433,109047), histo[14](111952,108528), histo[15](112241,108239), histo[16](111609,108871), histo[17](100344,120136), histo[18](100286,120194), histo[19](101878,118602), histo[20](104156,116324), histo[21](105779,114701), histo[22](109431,111049), histo[23](103261,117219), histo[24](102942,117538), histo[25](117587,102893), histo[26](9,220471), histo[27](1,220479), histo[28](1,220479), histo[29](1,220479), histo[30](220480,0), histo[31](220480,0) */ // generate a histogram for each bit order of the numSamps inputs __global__ void gen_histo(unsigned int* d_out, const unsigned int* const d_in, const int numSamps) { /* d_in is numSamps each with 32bits (so numSamps ) d_out is 2 outputs for each bit (so 32 x 2 ) numBits must equal gridDim.y */ const int2 my2DPos = make_int2( (blockDim.x * blockIdx.x) + threadIdx.x, (blockDim.y * blockIdx.y) + threadIdx.y ); // my memory location const int my1DPos = my2DPos.x; //const int numBits = gridDim.y; if( my1DPos >= numSamps ) return; // 2 possible out bins for each of the bit orders // aka base, and max power const int2 numBins = make_int2( 2, blockIdx.y); // xbin is either 0 or 1 for all blocks const int xbin = ((d_in[my1DPos] & (0x1 << numBins.y)) == 0) ? 0 : 1; // ybin is blockIdx.y for all blocks //const int ybin = numBins.y; const int bin1DPos = xbin + (numBins.y * numBins.x); atomicAdd( &(d_out[bin1DPos]), 1); } #endif __global__ void gen_pred(unsigned int* d_zeroOut, unsigned int* d_oneOut, const unsigned int* const d_in, const unsigned int numSamps, const unsigned int shift) { // d_zeroOut is the predicate output for zeros // d_oneOut is the predicate output for the ones // d_in is the inputs // numSamps is the number of samples in the input, // we will have numSamps output to each of the // output arrays // shift is the bit specifier (which bit location of // the input to check if is a 1 or a 0 // my memory location const int myPos = (blockDim.x * blockIdx.x) + threadIdx.x; if( myPos >= numSamps ) return; if( (d_in[myPos] & (0x1<<shift)) == 0 ) { d_zeroOut[myPos] = 1; d_oneOut[myPos] = 0; } else { d_zeroOut[myPos] = 0; d_oneOut[myPos] = 1; } } #define LOAD_INPUTS 1 #define REDUCE_STEPS 1 #define SAVE_SUM 1 #define DOWNSWEEP_STEPS 1 #define SAVE_OUTPUTS 1 __global__ void exclusive_scan(unsigned int* d_out, unsigned int* d_sums, const unsigned int* const d_in, const int numSamps) { // blelloch scan (reduce and downsweep) extern __shared__ unsigned int sdata[]; const int tid = threadIdx.x; // block is sized for half the data (1 thread does 2 samps) so *2 const int numBlkSamps = blockDim.x*2; const int myPos = threadIdx.x + (blockIdx.x * blockDim.x); int offset = 1; // zero out the shared memory //(this is required but only for the threads that return early) // yet go ahead and do for all sdata[2*tid] = 0; sdata[2*tid+1] = 0; __syncthreads(); if( (2*myPos+1) >= numSamps ) { //printf("returning early BlockIdx = (%d, %d, %d), BlockDim = (%d, %d, %d)\n", // blockIdx.x, blockIdx.y, blockIdx.z, // blockDim.x, blockDim.y, blockDim.z); return; } #if LOAD_INPUTS // copy data into shared mem sdata[2*tid] = d_in[2*myPos]; sdata[2*tid+1] = d_in[2*myPos+1]; __syncthreads(); #if 0 //__CUDA_ARCH__ >= 200 // if( tid == 0 ) // { // printf("BlockIdx = (%d, %d, %d), BlockDim = (%d, %d, %d)\n", // blockIdx.x, blockIdx.y, blockIdx.z, // blockDim.x, blockDim.y, blockDim.z); //} if( sdata[2*tid] > numSamps || sdata[2*tid+1] > numSamps ) { printf("Input data(%u, %u) is larger than numSamps(%u)\n", d_in[2*myPos], d_in[2*myPos+1], numSamps); return; } #endif #endif #if REDUCE_STEPS // reductions steps for(unsigned int s=(numBlkSamps>>1); s > 0; s >>= 1) { if(tid < s) { int a = offset*(2*tid+1)-1; int b = offset*(2*tid+2)-1; sdata[b] += sdata[a]; } __syncthreads(); offset <<= 1; } #endif #if SAVE_SUM // reduction step done, clear the last element after saving it to sums if( tid == 0 ) { if( d_sums ) { // save out the sum before clearing (it is inclusive) d_sums[blockIdx.x] = sdata[numBlkSamps-1]; // we don't have to do anything special about the last block // (which may not have a complete block's worth of samples) // because we don't care about the last blocks's sum // with an exclusive scan } #if 0 //__CUDA_ARCH__ >= 200 //printf("d_sum[%d] = %u\n", blockIdx.x, d_sums[blockIdx.x]); if( sdata[numBlkSamps-1] > numSamps ) { printf("Final block data(%u) is larger than numSamps(%u)\n", sdata[numBlkSamps-1], numSamps); return; } #endif sdata[numBlkSamps-1] = 0; } #endif #if DOWNSWEEP_STEPS // downsweep steps for(unsigned int s=1; s < numBlkSamps; s *= 2) { offset >>= 1; if( tid < s ) { int a = offset*(2*tid+1)-1; int b = offset*(2*tid+2)-1; unsigned int tmp = sdata[a]; sdata[a] = sdata[b]; sdata[b] += tmp; } __syncthreads(); } #endif #if SAVE_OUTPUTS // copy to output d_out[2*myPos] = sdata[2*tid]; d_out[2*myPos+1] = sdata[2*tid+1]; #if 0 //__CUDA_ARCH__ >= 200 if( sdata[2*tid] > numSamps || sdata[2*tid+1] > numSamps ) { printf("output data(%u, %u) is larger than numSamps(%u)\n", d_out[2*tid], d_out[2*tid+1], numSamps); return; } #endif #endif } #define SLOW_GEN_SCAN 1 __global__ void exclusive_scan_gen( unsigned int* d_out, const unsigned int* const d_unused, const unsigned int* const d_in, const unsigned int numSamps) { //extern __shared__ unsigned int shisto[]; // my memory location //const int myPos = (blockDim.x * blockIdx.x) + threadIdx.x; const int tid = threadIdx.x; if( tid >= numSamps ) { return; } #if SLOW_GEN_SCAN if( tid == 0 ) { d_out[0] = 0; for(int i=1; i < numSamps; ++i) { d_out[i] = d_in[i-1] + d_out[i-1]; } } #else d_out[tid] = d_in[tid]; #endif } __global__ void sum_2input( unsigned int* d_out, const unsigned int* const d_in1, const unsigned int* const d_sums, const unsigned int numSamps) { extern __shared__ unsigned int sdata[]; const int tid = threadIdx.x; const int myPos = threadIdx.x + (blockIdx.x * blockDim.x); if( myPos >= numSamps ) { return; } if( tid == 0 ) { sdata[0] = d_sums[blockIdx.x]; } __syncthreads(); //d_out[myPos] = d_in1[myPos] + d_sums[blockIdx.x]; d_out[myPos] = d_in1[myPos] + sdata[0]; } #define LOAD_SH_SORT 1 #define LOAD_IN_SORT 1 #define LOAD_PD_SORT 1 #define SAVE_SORT 1 __global__ void sort( unsigned int* d_outV, unsigned int* d_outP, const unsigned int* const d_inV, const unsigned int* const d_inP, const unsigned int* const d_histo, const unsigned int* const d_1Pred, const unsigned int* const d_1Scan, const unsigned int* const d_0Scan, const unsigned int numSamps) { extern __shared__ unsigned int shisto[]; // my memory location const int myPos = (blockDim.x * blockIdx.x) + threadIdx.x; const int tid = threadIdx.x; if( myPos >= numSamps ) { return; } #if LOAD_SH_SORT // read in histo data if( tid == 0 ) { shisto[0] = 0; shisto[1] = d_histo[0]; } __syncthreads(); #endif #if LOAD_IN_SORT // this thread's inputs const unsigned int inV = d_inV[myPos]; const unsigned int inP = d_inP[myPos]; #else const unsigned int inV = 0; const unsigned int inP = 0; #endif #if LOAD_PD_SORT // this thread's predicate ( if ==0 is a 0 val, if ==1 is a 1 val) const unsigned int pred = d_1Pred[myPos]; #else const unsigned int pred = 0; #endif // this thread's relative location in 0's output or 1's output const unsigned int relIdx = pred ? d_1Scan[myPos]: d_0Scan[myPos]; // this thread's starting location const unsigned int startIdx = pred ? shisto[1] : shisto[0]; // this thread's output location const unsigned int outIdx = startIdx + relIdx; #if SAVE_SORT // write output to new location if( outIdx < numSamps ) { d_outV[outIdx] = inV; d_outP[outIdx] = inP; } #if __CUDA_ARCH__ >= 200 else { printf("block(%d,%d,%d):thread(%d,%d,%d): OutIdx(%u) is too large(%u)! pred(%u) relIdx(%u) startIdx(%u) histo(%u, %u)\n", blockIdx.x,blockIdx.y,blockIdx.z, threadIdx.x, threadIdx.y, threadIdx.z, outIdx, numSamps, pred, relIdx, startIdx, shisto[0], shisto[1]); } #endif #endif } __global__ void swap( unsigned int* const d_outVals, unsigned int* const d_outPos, unsigned int* const d_inVals, unsigned int* const d_inPos, const unsigned int numSamps ) { // my memory location const int myPos = (blockDim.x * blockIdx.x) + threadIdx.x; //const int tid = threadIdx.x; if( myPos >= numSamps ) { return; } unsigned int inV = d_inVals[myPos]; unsigned int inP = d_inPos[myPos]; d_inVals[myPos] = d_outVals[myPos]; d_inPos[myPos] = d_outPos[myPos]; d_outVals[myPos] = inV; d_outPos[myPos] = inP; } // gen_histo works, yay!!! #define HISTO_REF 0 #if HISTO_REF #include <fstream> #endif // preds seem to be ok #define PRED_VALIDATE 0 // working looks like #define SCAN_VALIDATE 0 // sums scan are ok #define SUMS_VALIDATE 0 #define SORT_VALIDATE 0 #if SORT_VALIDATE #include <sstream> #include <fstream> #endif // do not use with other validate #define CPU_VALIDATE 0 #if CPU_VALIDATE #include <fstream> #endif void your_sort(unsigned int* const d_inputVals, unsigned int* const d_inputPos, unsigned int* const d_outputVals, unsigned int* const d_outputPos, const size_t numElems) //TODO //PUT YOUR SORT HERE { const unsigned int numPoss = 2; // number of possible outcomes const unsigned int numBits = 32; // number of bits #if CPU_VALIDATE unsigned int* h_inputVals = NULL; unsigned int* h_inputPos = NULL; h_inputVals = new unsigned int[numElems]; h_inputPos = new unsigned int[numElems]; checkCudaErrors(hipMemcpy(h_inputVals, d_inputVals, (numElems*sizeof(unsigned int)), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_inputPos, d_inputPos, (numElems*sizeof(unsigned int)), hipMemcpyDeviceToHost)); #endif // numElems must be a power of 2 assert( (numElems%2) == 0 ); #if HISTO_REF unsigned int histo[numBits*numPoss]; unsigned int dev_histo[numBits*numPoss]; std::cout << "NumElems: " << numElems << ", sizeof(histo): " << sizeof(histo) << std::endl; memset( histo, 0, sizeof(histo) ); unsigned int* h_inputVals = new unsigned int[numElems]; checkCudaErrors(hipMemcpy(h_inputVals, d_inputVals, (numElems*sizeof(unsigned int)), hipMemcpyDeviceToHost)); std::ofstream fout("inputVals.txt"); for(int i=0; i < numElems; ++i) { if( fout.good() ) fout << std::hex << h_inputVals[i] << std::endl; else std::cout << "Cannot write to inputVals.txt!" << std::endl; } fout.close(); for(int i=0; i < numElems; ++i) { for(int b=0; b < numBits; ++b) { if( (h_inputVals[i] & (1<<b)) == 0 ) ++(histo[b*numPoss]); else ++(histo[b*numPoss+1]); } } //std::cout << "histo calculated" << std::endl; //for(int i=0; i < numBits; ++i) //{ // std::cout << "histo[" << i << "](" << histo[i*numPoss] << "," << histo[i*numPoss+1] << "), "; //} //std::cout << std::endl; #endif /* uint2* posVals = new uint2[numElems]; for(int i=0; i < numElems; ++i) { posVals[i].x = d_inputVals[i]; posVals[i].y = d_inputPos[i]; } */ const int numThreads = (MAX_THREADS < numElems) ? MAX_THREADS : numElems; const dim3 blockSize_histo( numThreads, 1, 1); const dim3 gridSize_histo( (numElems + blockSize_histo.x-1) / blockSize_histo.x, numBits, 1); unsigned int* d_histo = NULL; checkCudaErrors(hipMalloc(&d_histo, sizeof(unsigned int) * numPoss * numBits )); checkCudaErrors(hipMemset(d_histo, 0, sizeof(unsigned int) * numPoss*numBits)); hipLaunchKernelGGL(( gen_histo), dim3(gridSize_histo), dim3(blockSize_histo), 0, 0, d_histo, d_inputVals, numElems); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // we can execlusively scan the d_histo to put so the // follow-on functions can look at the same location // (e.g. 0's read d_histo[blockIdx][0] (always 0) and 1's read d_histo[blockIdx][1]), // but since only 2 values just as east to have 0's always write to 0 // and have the 1's read d_histo[blockIdx][0]; #if HISTO_REF checkCudaErrors(hipMemcpy(dev_histo, d_histo, sizeof(unsigned int) * numPoss * numBits, hipMemcpyDeviceToHost)); for(int i=0; i < numBits; ++i) { if( histo[i*numPoss] != dev_histo[i*numPoss] ) std::cout << "histo[" << i << "](" << histo[i*numPoss] << "," << dev_histo[i*numPoss] << "), "; if( histo[i*numPoss+1] != dev_histo[i*numPoss+1] ) std::cout << "histo[" << i << "](" << histo[i*numPoss+1] << "," << dev_histo[i*numPoss+1] << ")\n"; } std::cout << std::endl; #endif const dim3 blockSize_pred( numThreads, 1, 1); const dim3 gridSize_pred( (numElems + blockSize_pred.x-1) / blockSize_pred.x, 1, 1); unsigned int* d_zeroPred = NULL; unsigned int* d_onePred = NULL; checkCudaErrors(hipMalloc(&d_zeroPred, sizeof(unsigned int) * numElems )); checkCudaErrors(hipMalloc(&d_onePred, sizeof(unsigned int) * numElems )); // each thread in the scan does two elements so numThreads/2 with same gridSize // for half the number of threads as elements const dim3 blockSize_scan( (numThreads/2), 1, 1 ); const dim3 gridSize_scan( (numElems + blockSize_pred.x-1) / blockSize_pred.x, 1, 1); // numThreads is a power of 2, ok //std::cout << "numThreads = " << numThreads << std::endl; unsigned int* d_zeroScan = NULL; unsigned int* d_oneScan = NULL; unsigned int* d_zeroSums = NULL; unsigned int* d_oneSums = NULL; unsigned int* d_1Sums = NULL; unsigned int* d_0Sums = NULL; // if there are an odd number of blocks set pad to 1 to we can specify an even number of blocks unsigned int pad = 0; if( gridSize_scan.x % 2 ) { ++pad; } assert( (gridSize_scan.x + pad) % 2 == 0 ); checkCudaErrors(hipMalloc(&d_zeroScan, sizeof(unsigned int) * numElems )); checkCudaErrors(hipMalloc(&d_oneScan, sizeof(unsigned int) * numElems )); checkCudaErrors(hipMalloc(&d_zeroSums, sizeof(unsigned int) * (gridSize_scan.x + pad))); checkCudaErrors(hipMalloc(&d_oneSums, sizeof(unsigned int) * (gridSize_scan.x + pad))); checkCudaErrors(hipMalloc(&d_0Sums, sizeof(unsigned int) * (gridSize_scan.x + pad))); checkCudaErrors(hipMalloc(&d_1Sums, sizeof(unsigned int) * (gridSize_scan.x + pad))); //std::cout << "Scan gridSize_scan.x = " << gridSize_scan.x << ", pad = " << pad //<< ", blockSize_scan.x = " << blockSize_scan.x //<< std::endl; // we are using the bufferes to ping-pong back and forth // so input and outputs change each loop iterator // (setup backwards as the loop will swap before using) unsigned int *d_outV = d_inputVals; unsigned int *d_outP = d_inputPos; unsigned int *d_inV = d_outputVals; unsigned int *d_inP = d_outputPos; for(unsigned int bit=0; bit < numBits; ++bit) { //swap input and output pointers (we do it before so when // we leave the loop the pointers will point to their last // action (input or output) if( d_outV == d_outputVals ) { d_outV = d_inputVals; d_outP = d_inputPos; d_inV = d_outputVals; d_inP = d_outputPos; } else { d_outV = d_outputVals; d_outP = d_outputPos; d_inV = d_inputVals; d_inP = d_inputPos; } // get predicates for if is a 1 or is a 0 hipLaunchKernelGGL(( gen_pred), dim3(gridSize_pred), dim3(blockSize_pred), 0, 0, d_zeroPred, d_onePred, d_inV, numElems, bit); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); #if PRED_VALIDATE unsigned int* h_one = NULL; unsigned int* h_zero = NULL; unsigned int* h_inputVals = new unsigned int[numElems]; h_one = new unsigned int[numElems]; h_zero = new unsigned int[numElems]; checkCudaErrors(hipMemcpy(h_inputVals, d_inV, (numElems*sizeof(unsigned int)), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_one, d_onePred, (numElems*sizeof(unsigned int)), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_zero, d_zeroPred, (numElems*sizeof(unsigned int)), hipMemcpyDeviceToHost)); std::cout << "Checking predicates for bit" << bit << std::endl; std::stringstream psstrm; psstrm << "preds" << bit << ".txt"; std::ofstream pfout(psstrm.str().c_str()); pfout << "Samp: Pred0 Pred1 inputVals" << std::endl; for(int i=0; i < numElems; ++i) { pfout << std::dec << " " << i << " " << h_zero[i] << " " << h_one[i] << " 0x" << std::hex << h_inputVals[i] << std::endl; // std::cout << std::dec << "bit" << bit << ",samp" << i // << ": h_zero = " << h_zero[i] << ", h_one = " << h_one[i] // << ", inputVal = " << std::hex << h_inputVals[i] // << std::endl; if( (h_inputVals[i] & (0x1<<bit)) == 0 ) { if( !h_zero[i] ) std::cout << "sample" << i << ", bit" << bit << ": is 0 but pred0 = " << h_zero[i] << std::endl; } else { if( !h_one[i] ) std::cout << "sample" << i << ", bit" << bit << ": is 1 but pred1 = " << h_one[i] << std::endl; } if(h_one[i] != 0 && h_one[i] != 1) { std::cout << "OnePred[" << i << "] (" << h_one[i] << ") is not a 0 or 1!" << std::endl; } if(h_zero[i] != 0 && h_zero[i] != 1) { std::cout << "ZeroPred[" << i << "] (" << h_zero[i] << ") is not a 0 or 1!" << std::endl; } if( (h_zero[i]) != !(h_one[i]) ) std::cout << "invalid: h_zero[" << i << "](" << h_zero << "), h_one[" << i << "](" << h_one[i] << ")" << std::endl; } delete [] h_inputVals; h_inputVals = NULL; delete [] h_one; h_one = NULL; delete [] h_zero; h_zero = NULL; #endif #if 1 //scan // exclusive scan the output of the predicate arrays // to get the relative offsets hipLaunchKernelGGL(( exclusive_scan), dim3(gridSize_scan), dim3(blockSize_scan), (blockSize_scan.x*2*sizeof(unsigned int)), 0, d_zeroScan, d_0Sums, //outputs d_zeroPred, //inputs numElems); // params hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( exclusive_scan), dim3(gridSize_scan), dim3(blockSize_scan), blockSize_scan.x*2*sizeof(unsigned int), 0, d_oneScan, d_1Sums, d_onePred, numElems); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); #if SCAN_VALIDATE unsigned int* h_scan1 = NULL; unsigned int* h_scan0 = NULL; unsigned int* h_sums1 = NULL; unsigned int* h_sums0 = NULL; unsigned int* h_pred0 = NULL; unsigned int* h_pred1 = NULL; h_pred0 = new unsigned int[numElems]; h_pred1 = new unsigned int[numElems]; h_scan1 = new unsigned int[numElems]; h_scan0 = new unsigned int[numElems]; h_sums1 = new unsigned int[(gridSize_scan.x + pad)]; h_sums0 = new unsigned int[(gridSize_scan.x + pad)]; checkCudaErrors(hipMemcpy(h_pred1, d_onePred, (numElems*sizeof(unsigned int)), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_pred0, d_zeroPred, (numElems*sizeof(unsigned int)), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_scan1, d_oneScan, (numElems*sizeof(unsigned int)), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_scan0, d_zeroScan, (numElems*sizeof(unsigned int)), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_sums0, d_0Sums, ((gridSize_scan.x + pad)*sizeof(unsigned int)), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_sums1, d_1Sums, ((gridSize_scan.x + pad)*sizeof(unsigned int)), hipMemcpyDeviceToHost)); // for(int i=0; i < (gridSize_scan.x + pad); ++i) // { // std::cout << "bit" << bit << " sums" << i << " (" << h_sums0[i] << ", " << h_sums1[i] << ")" << std::endl; // } //std::cout << "Checking scans for bit" << bit << std::endl; for(int i=0; i < gridSize_scan.x; ++i) { for(int j=1; j < blockSize_scan.x; ++j) { const unsigned int idx = i*blockSize_scan.x + j; if( idx < numElems ) { if( h_scan1[idx] > numElems) { std::cout << "idx = " << idx << ", Scan1[" << i << "][" << j << "]( " << h_scan1[idx] << ") is more than numElems(" << numElems << ")" << std::endl; } if( h_scan0[idx] > numElems) { std::cout << "idx = " << idx << ", Scan0[" << i << "][" << j << "]( " << h_scan0[idx] << ") is more than numElems(" << numElems << ")" << std::endl; return; } } } } #endif //scan validate #endif // we don't support recursive scans (if number of blocks of // the first scan is more than will fit into a block then // we will need to do the scan->sum step multiple times // and we don't do that yet). assert( gridSize_scan.x <= MAX_THREADS ); #if 1 //SUM_SCANS // now scan the sums (pad is needed in case was an odd number of blocks // as scan only supports multiple of 2 inputs since it does 2 per thread) // /2 then + 1 as the numThreads launched can be odd const dim3 blockSize_sums( (gridSize_scan.x/2 + pad), 1, 1); const dim3 gridSize_sums(1, 1, 1); //std::cout << "gridSize_sums = " << gridSize_sums.x // << ", blockSize_sums = " << blockSize_sums.x // << ", sharedSize = " << 2*blockSize_sums.x*2 << " | " << 2*blockSize_sums.x*2*sizeof(unsigned int) // << "b, numSamps = " << (gridSize_scan.x + pad) // << std::endl; #if SUMS_VALIDATE unsigned int* h_sin1 = NULL; unsigned int* h_sin0 = NULL; unsigned int* h_sout1 = NULL; unsigned int* h_sout0 = NULL; h_sin1 = new unsigned int[(gridSize_scan.x + pad)]; h_sin0 = new unsigned int[(gridSize_scan.x + pad)]; h_sout1 = new unsigned int[(gridSize_scan.x + pad)]; h_sout0 = new unsigned int[(gridSize_scan.x + pad)]; checkCudaErrors(hipMemcpy(h_sin0, d_0Sums, ((gridSize_scan.x + pad)*sizeof(unsigned int)), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_sin1, d_1Sums, ((gridSize_scan.x + pad)*sizeof(unsigned int)), hipMemcpyDeviceToHost)); #endif // numSamps: +pad because numSamps must be multiple of 2 hipLaunchKernelGGL(( exclusive_scan_gen), dim3(gridSize_sums), dim3(blockSize_sums), 2*blockSize_sums.x*2*sizeof(unsigned int), 0, d_oneSums, NULL, d_1Sums, (gridSize_scan.x+pad) ); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( exclusive_scan_gen), dim3(gridSize_sums), dim3(blockSize_sums), 2*blockSize_sums.x*2*sizeof(unsigned int), 0, d_zeroSums, NULL, d_0Sums, (gridSize_scan.x+pad) ); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); #if SUMS_VALIDATE checkCudaErrors(hipMemcpy(h_sout0, d_zeroSums, ((gridSize_scan.x + pad)*sizeof(unsigned int)), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_sout1, d_oneSums, ((gridSize_scan.x + pad)*sizeof(unsigned int)), hipMemcpyDeviceToHost)); bool mismatch = false; unsigned int* h_sref0 = new unsigned int[(gridSize_scan.x+pad)]; unsigned int* h_sref1 = new unsigned int[(gridSize_scan.x+pad)]; h_sref0[0] = 0; h_sref1[0] = 0; for(int i=1; i < (gridSize_scan.x+pad); ++i ) { h_sref0[i] = h_sref0[i-1] + h_sin0[i-1]; h_sref1[i] = h_sref1[i-1] + h_sin1[i-1]; // std::cout << "h_sin(" << h_sin0[i-1] << "," << h_sin1[i-1] // << ") h_sref(" << h_sref0[i-1] << "," << h_sref1[i-1] // << ") h_sout(" << h_sout0[i-1] << "," << h_sout1[i-1] << ")" // << std::endl; } #if 1 for(int i=1; i < (gridSize_scan.x + pad); ++i) { if( h_sref0[i] != h_sout0[i] ) { mismatch = true; std::cout << "sample" << i << ": h_sref0 = " << h_sref0[i] << ", h_sout0 = " << h_sout0[i] << std::endl; } if( h_sref1[i] != h_sout1[i] ) { mismatch = true; std::cout << "sample" << i << ": h_sref1 = " << h_sref1[i] << ", h_sout1 = " << h_sout1[i] << std::endl; } if( (h_sin0[i-1] + h_sout0[i-1]) != h_sout0[i] ) { mismatch = true; std::cout << "bit" << bit << ", sumsScan0 incorrect: in[" << i << "] + out[" << i-1 << "]" << " != out[" << i << "] ( " << h_sin0[i] << " + " << h_sout0[i-1] << " != " << h_sout0[i] << std::endl; } if( (h_sin1[i-1] + h_sout1[i-1]) != h_sout1[i] ) { mismatch = true; std::cout << "bit" << bit << ", sumsScan1 incorrect: in[" << i << "] + out[" << i-1 << "]" << " != out[" << i << "] ( " << h_sin1[i] << " + " << h_sout1[i-1] << " != " << h_sout1[i] << std::endl; } } #endif delete [] h_sin0; h_sin0 = NULL; delete [] h_sin1; h_sin1 = NULL; delete [] h_sout0; h_sout0 = NULL; delete [] h_sout1; h_sout1 = NULL; if( mismatch ) return; #endif //sums validate #endif //scans #if 1 //sums // then add the scaned sums to each previously scanned block hipLaunchKernelGGL(( sum_2input), dim3(gridSize_pred), dim3(blockSize_pred), sizeof(unsigned int) , 0, d_zeroScan, d_zeroScan, d_zeroSums, numElems ); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( sum_2input), dim3(gridSize_pred), dim3(blockSize_pred), sizeof(unsigned int) , 0, d_oneScan, d_oneScan, d_oneSums, numElems ); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); #if SCAN_VALIDATE unsigned int* h_ref0 = new unsigned int[numElems]; unsigned int* h_ref1 = new unsigned int[numElems]; checkCudaErrors(hipMemcpy(h_scan1, d_oneScan, (numElems*sizeof(unsigned int)), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_scan0, d_zeroScan, (numElems*sizeof(unsigned int)), hipMemcpyDeviceToHost)); // host scan h_ref0[0] = 0; h_ref1[0] = 0; for(int i=1; i < numElems; ++i) { h_ref0[i] = h_pred0[i-1] + h_ref0[i-1]; h_ref1[i] = h_pred1[i-1] + h_ref1[i-1]; //std::cout << "sample" << i << ": h_ref0 = " << h_ref0[i] << ", h_ref1 = " << h_ref1[i] << std::endl; } //std::cout << "Checking final scan for bit" << bit << std::endl; bool mismatch = false; for(int i=0; i < numElems; ++i) { if( h_ref0[i] != h_scan0[i] ) { std::cout << "sample" << i << ": host ref0 sum(" << h_ref0[i] << ") != device (" << h_scan0[i] << ")" << std::endl; mismatch = true; } if( h_ref1[i] != h_scan1[i] ) { std::cout << "sample" << i << ": host ref1 sum(" << h_ref1[i] << ") != device (" << h_scan1[i] << ")" << std::endl; mismatch = true; } } delete [] h_ref0; delete [] h_ref1; delete [] h_pred0; h_pred0 = NULL; delete [] h_pred1; h_pred1 = NULL; delete [] h_scan1; h_scan1 = NULL; delete [] h_scan0; h_scan0 = NULL; delete [] h_sums0; h_sums0 = NULL; delete [] h_sums1; h_sums1 = NULL; if( mismatch ) return; #endif //scan validate #endif #if 1 //sort // d_(zero|one)Scan now contains the reletive addresses for this bit's values // d_histo contains the offsets to know where to start writing // combine histo output, scan sum, and predicate to copy vals and pos // to new locations // only 1 predicate array is needed as there are only 2 outcomes (0 or 1) // so onePred is 0 for the 0's and 1 for the 1's const dim3 blockSize_sort = blockSize_pred; const dim3 gridSize_sort = gridSize_pred; // each thread sorts 1 '1' and 1 '0' // see about having a 1 thread sort just 1 value //const unsigned int histoSize = sizeof(unsigned int) * numPoss*numBits; //std::cout << "Sorting for bit" << bit << std::endl; const unsigned int sdataSize = numPoss * sizeof(unsigned int); // histo has only 1 value of interest per bit so just pass that address hipLaunchKernelGGL(( sort), dim3(gridSize_sort), dim3(blockSize_sort), sdataSize, 0, d_outV, d_outP, d_inV, d_inP, &d_histo[bit*numPoss], d_onePred, d_oneScan, d_zeroScan, numElems); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); #if SORT_VALIDATE unsigned int* h_inV = new unsigned int[numElems]; unsigned int* h_outV = new unsigned int[numElems]; unsigned int* h_pred1 = new unsigned int[numElems]; unsigned int* h_1scan = new unsigned int[numElems]; unsigned int* h_0scan = new unsigned int[numElems]; unsigned int* h_histo = new unsigned int[numPoss*numBits]; checkCudaErrors(hipMemcpy(h_inV, d_inV, (numElems*sizeof(unsigned int)), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_outV, d_outV, (numElems*sizeof(unsigned int)), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_pred1, d_onePred, (numElems*sizeof(unsigned int)), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_0scan, d_zeroScan, (numElems*sizeof(unsigned int)), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_1scan, d_oneScan, (numElems*sizeof(unsigned int)), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_histo, d_histo, (numPoss*numBits*sizeof(unsigned int)), hipMemcpyDeviceToHost)); std::stringstream sstrm; sstrm << "sorts" << bit << ".txt"; std::ofstream fout(sstrm.str().c_str()); fout << "samp: inputVal outputVal 1pred 1Scan 0Scan histo" << std::endl; for(int i=0; i < numElems; ++i) { fout << " " << i << std::hex << " 0x" << h_inV[i] << " 0x" << h_outV[i] << std::dec << " " << h_pred1[i] << " " << h_1scan[i] << " " << h_0scan[i] << " " << h_histo[bit*numPoss] << " " << h_histo[bit*numPoss + 1] << std::endl; } fout.close(); delete [] h_1scan; delete [] h_0scan; delete [] h_pred1; delete [] h_inV; delete [] h_outV; #endif #endif // sort } // see if the outputs are where we want them if( d_outV != d_outputVals ) { std::cout << "Swapping inputs and outputs." << std::endl; // need to swap hipLaunchKernelGGL(( swap), dim3(gridSize_pred), dim3(blockSize_pred) , 0, 0, d_outputVals, d_outputPos, d_inputVals, d_inputPos, numElems ); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } #if CPU_VALIDATE unsigned int* h_outputVals = NULL; unsigned int* h_outputPos = NULL; unsigned int* h_refVals = NULL; unsigned int* h_refPos = NULL; h_outputVals = new unsigned int[numElems]; h_outputPos = new unsigned int[numElems]; h_refVals = new unsigned int[numElems]; h_refPos = new unsigned int[numElems]; checkCudaErrors(hipMemcpy(h_outputVals, d_outputVals, (numElems*sizeof(unsigned int)), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_outputPos, d_outputPos, (numElems*sizeof(unsigned int)), hipMemcpyDeviceToHost)); //checkCudaErrors(hipMemcpy(h_refVals, d_inputVals, (numElems*sizeof(unsigned int)), hipMemcpyDeviceToHost)); //checkCudaErrors(hipMemcpy(h_refPos, d_inputPos, (numElems*sizeof(unsigned int)), hipMemcpyDeviceToHost)); reference_calculation(h_inputVals, h_inputPos, h_refVals, h_refPos, numElems); std::ofstream fout("inouts.txt"); quicksort::valpos_t* h_srtVP = new quicksort::valpos_t[numElems]; quicksort::valsort( h_srtVP, h_inputVals, h_inputPos, numElems); std::sort( h_inputVals, h_inputVals + numElems ); for(unsigned int i =0; i < numElems; ++i) { //if( fout.good() ) fout << i << " " << h_inputVals[i] << ":" << h_inputPos[i] << " " << h_outputVals[i] << ":" << h_outputPos[i] if( fout.good() ) fout << i << " " << h_srtVP[i].val << ":" << h_srtVP[i].pos << " " << h_outputVals[i] << ":" << h_outputPos[i] << " " << h_refVals[i] << ":" << h_refPos[i] // << " " << h_refVals[i] << std::endl; if( (h_refVals[i] != h_outputVals[i]) || (h_refPos[i] != h_outputPos[i])) { std::cout << "sample" << i << " mismatch: " << h_refVals[i] << ":" << h_refPos[i] << " and " << h_outputVals[i] << ":" << h_outputPos[i] << std::endl; } if( h_inputVals[i] != h_outputVals[i] ) { std::cout << "sorted inputVals[" << i << "]( " << h_inputVals[i] << ") != h_outputVals[" << i << "]( " << h_outputVals[i] << ")" << std::endl; } /* if( h_inputPos[i] != h_outputPos[i] ) { std::cout << "sorted inputPos[" << i << "]( " << h_inputPos[i] << ") != h_outputPos[" << i << "]( " << h_outputPos[i] << ")" << std::endl; } */ } fout.close(); delete [] h_refVals; h_refVals = NULL; delete [] h_refPos; h_refPos = NULL; delete [] h_srtVP; delete [] h_outputVals; delete [] h_outputPos; delete [] h_inputVals; delete [] h_inputPos; h_outputVals= NULL; h_outputPos = NULL; h_inputVals = NULL; h_inputPos = NULL; #endif checkCudaErrors(hipFree(d_histo)); checkCudaErrors(hipFree(d_zeroPred)); checkCudaErrors(hipFree(d_onePred)); checkCudaErrors(hipFree(d_zeroScan)); checkCudaErrors(hipFree(d_oneScan)); checkCudaErrors(hipFree(d_zeroSums)); checkCudaErrors(hipFree(d_oneSums)); checkCudaErrors(hipFree(d_0Sums)); checkCudaErrors(hipFree(d_1Sums)); //delete [] posVals; }
644f64645d7256ad9ae054597f79fad938d1b009.cu
//Udacity HW 4 //Radix Sorting #include <cassert> #include <cstring> #include <cstdio> #include <stdint.h> #include <algorithm> #include "reference_calc.h" #include "utils.h" //#include "quicksort.h" #define MAX_THREADS 1024 /* Red Eye Removal =============== For this assignment we are implementing red eye removal. This is accomplished by first creating a score for every pixel that tells us how likely it is to be a red eye pixel. We have already done this for you - you are receiving the scores and need to sort them in ascending order so that we know which pixels to alter to remove the red eye. Note: ascending order == smallest to largest Each score is associated with a position, when you sort the scores, you must also move the positions accordingly. Implementing Parallel Radix Sort with CUDA ========================================== The basic idea is to construct a histogram on each pass of how many of each "digit" there are. Then we scan this histogram so that we know where to put the output of each digit. For example, the first 1 must come after all the 0s so we have to know how many 0s there are to be able to start moving 1s into the correct position. 1) Histogram of the number of occurrences of each digit 2) Exclusive Prefix Sum of Histogram 3) Determine relative offset of each digit For example [0 0 1 1 0 0 1] -> [0 1 0 1 2 3 2] 4) Combine the results of steps 2 & 3 to determine the final output location for each element and move it there LSB Radix sort is an out-of-place sort and you will need to ping-pong values between the input and output buffers we have provided. Make sure the final sorted results end up in the output buffer! Hint: You may need to do a copy at the end. */ #if 1 /* histo[0](118983,101497), histo[1](110205,110275), histo[2](110021,110459), histo[3](109913,110567), histo[4](110267,110213), histo[5](110493,109987), histo[6](110067,110413), histo[7](109837,110643), histo[8](110064,110416), histo[9](110043,110437), histo[10](111037,109443), histo[11](110788,109692), histo[12](111193,109287), histo[13](111433,109047), histo[14](111952,108528), histo[15](112241,108239), histo[16](111609,108871), histo[17](100344,120136), histo[18](100286,120194), histo[19](101878,118602), histo[20](104156,116324), histo[21](105779,114701), histo[22](109431,111049), histo[23](103261,117219), histo[24](102942,117538), histo[25](117587,102893), histo[26](9,220471), histo[27](1,220479), histo[28](1,220479), histo[29](1,220479), histo[30](220480,0), histo[31](220480,0) */ // generate a histogram for each bit order of the numSamps inputs __global__ void gen_histo(unsigned int* d_out, const unsigned int* const d_in, const int numSamps) { /* d_in is numSamps each with 32bits (so numSamps ) d_out is 2 outputs for each bit (so 32 x 2 ) numBits must equal gridDim.y */ const int2 my2DPos = make_int2( (blockDim.x * blockIdx.x) + threadIdx.x, (blockDim.y * blockIdx.y) + threadIdx.y ); // my memory location const int my1DPos = my2DPos.x; //const int numBits = gridDim.y; if( my1DPos >= numSamps ) return; // 2 possible out bins for each of the bit orders // aka base, and max power const int2 numBins = make_int2( 2, blockIdx.y); // xbin is either 0 or 1 for all blocks const int xbin = ((d_in[my1DPos] & (0x1 << numBins.y)) == 0) ? 0 : 1; // ybin is blockIdx.y for all blocks //const int ybin = numBins.y; const int bin1DPos = xbin + (numBins.y * numBins.x); atomicAdd( &(d_out[bin1DPos]), 1); } #endif __global__ void gen_pred(unsigned int* d_zeroOut, unsigned int* d_oneOut, const unsigned int* const d_in, const unsigned int numSamps, const unsigned int shift) { // d_zeroOut is the predicate output for zeros // d_oneOut is the predicate output for the ones // d_in is the inputs // numSamps is the number of samples in the input, // we will have numSamps output to each of the // output arrays // shift is the bit specifier (which bit location of // the input to check if is a 1 or a 0 // my memory location const int myPos = (blockDim.x * blockIdx.x) + threadIdx.x; if( myPos >= numSamps ) return; if( (d_in[myPos] & (0x1<<shift)) == 0 ) { d_zeroOut[myPos] = 1; d_oneOut[myPos] = 0; } else { d_zeroOut[myPos] = 0; d_oneOut[myPos] = 1; } } #define LOAD_INPUTS 1 #define REDUCE_STEPS 1 #define SAVE_SUM 1 #define DOWNSWEEP_STEPS 1 #define SAVE_OUTPUTS 1 __global__ void exclusive_scan(unsigned int* d_out, unsigned int* d_sums, const unsigned int* const d_in, const int numSamps) { // blelloch scan (reduce and downsweep) extern __shared__ unsigned int sdata[]; const int tid = threadIdx.x; // block is sized for half the data (1 thread does 2 samps) so *2 const int numBlkSamps = blockDim.x*2; const int myPos = threadIdx.x + (blockIdx.x * blockDim.x); int offset = 1; // zero out the shared memory //(this is required but only for the threads that return early) // yet go ahead and do for all sdata[2*tid] = 0; sdata[2*tid+1] = 0; __syncthreads(); if( (2*myPos+1) >= numSamps ) { //printf("returning early BlockIdx = (%d, %d, %d), BlockDim = (%d, %d, %d)\n", // blockIdx.x, blockIdx.y, blockIdx.z, // blockDim.x, blockDim.y, blockDim.z); return; } #if LOAD_INPUTS // copy data into shared mem sdata[2*tid] = d_in[2*myPos]; sdata[2*tid+1] = d_in[2*myPos+1]; __syncthreads(); #if 0 //__CUDA_ARCH__ >= 200 // if( tid == 0 ) // { // printf("BlockIdx = (%d, %d, %d), BlockDim = (%d, %d, %d)\n", // blockIdx.x, blockIdx.y, blockIdx.z, // blockDim.x, blockDim.y, blockDim.z); //} if( sdata[2*tid] > numSamps || sdata[2*tid+1] > numSamps ) { printf("Input data(%u, %u) is larger than numSamps(%u)\n", d_in[2*myPos], d_in[2*myPos+1], numSamps); return; } #endif #endif #if REDUCE_STEPS // reductions steps for(unsigned int s=(numBlkSamps>>1); s > 0; s >>= 1) { if(tid < s) { int a = offset*(2*tid+1)-1; int b = offset*(2*tid+2)-1; sdata[b] += sdata[a]; } __syncthreads(); offset <<= 1; } #endif #if SAVE_SUM // reduction step done, clear the last element after saving it to sums if( tid == 0 ) { if( d_sums ) { // save out the sum before clearing (it is inclusive) d_sums[blockIdx.x] = sdata[numBlkSamps-1]; // we don't have to do anything special about the last block // (which may not have a complete block's worth of samples) // because we don't care about the last blocks's sum // with an exclusive scan } #if 0 //__CUDA_ARCH__ >= 200 //printf("d_sum[%d] = %u\n", blockIdx.x, d_sums[blockIdx.x]); if( sdata[numBlkSamps-1] > numSamps ) { printf("Final block data(%u) is larger than numSamps(%u)\n", sdata[numBlkSamps-1], numSamps); return; } #endif sdata[numBlkSamps-1] = 0; } #endif #if DOWNSWEEP_STEPS // downsweep steps for(unsigned int s=1; s < numBlkSamps; s *= 2) { offset >>= 1; if( tid < s ) { int a = offset*(2*tid+1)-1; int b = offset*(2*tid+2)-1; unsigned int tmp = sdata[a]; sdata[a] = sdata[b]; sdata[b] += tmp; } __syncthreads(); } #endif #if SAVE_OUTPUTS // copy to output d_out[2*myPos] = sdata[2*tid]; d_out[2*myPos+1] = sdata[2*tid+1]; #if 0 //__CUDA_ARCH__ >= 200 if( sdata[2*tid] > numSamps || sdata[2*tid+1] > numSamps ) { printf("output data(%u, %u) is larger than numSamps(%u)\n", d_out[2*tid], d_out[2*tid+1], numSamps); return; } #endif #endif } #define SLOW_GEN_SCAN 1 __global__ void exclusive_scan_gen( unsigned int* d_out, const unsigned int* const d_unused, const unsigned int* const d_in, const unsigned int numSamps) { //extern __shared__ unsigned int shisto[]; // my memory location //const int myPos = (blockDim.x * blockIdx.x) + threadIdx.x; const int tid = threadIdx.x; if( tid >= numSamps ) { return; } #if SLOW_GEN_SCAN if( tid == 0 ) { d_out[0] = 0; for(int i=1; i < numSamps; ++i) { d_out[i] = d_in[i-1] + d_out[i-1]; } } #else d_out[tid] = d_in[tid]; #endif } __global__ void sum_2input( unsigned int* d_out, const unsigned int* const d_in1, const unsigned int* const d_sums, const unsigned int numSamps) { extern __shared__ unsigned int sdata[]; const int tid = threadIdx.x; const int myPos = threadIdx.x + (blockIdx.x * blockDim.x); if( myPos >= numSamps ) { return; } if( tid == 0 ) { sdata[0] = d_sums[blockIdx.x]; } __syncthreads(); //d_out[myPos] = d_in1[myPos] + d_sums[blockIdx.x]; d_out[myPos] = d_in1[myPos] + sdata[0]; } #define LOAD_SH_SORT 1 #define LOAD_IN_SORT 1 #define LOAD_PD_SORT 1 #define SAVE_SORT 1 __global__ void sort( unsigned int* d_outV, unsigned int* d_outP, const unsigned int* const d_inV, const unsigned int* const d_inP, const unsigned int* const d_histo, const unsigned int* const d_1Pred, const unsigned int* const d_1Scan, const unsigned int* const d_0Scan, const unsigned int numSamps) { extern __shared__ unsigned int shisto[]; // my memory location const int myPos = (blockDim.x * blockIdx.x) + threadIdx.x; const int tid = threadIdx.x; if( myPos >= numSamps ) { return; } #if LOAD_SH_SORT // read in histo data if( tid == 0 ) { shisto[0] = 0; shisto[1] = d_histo[0]; } __syncthreads(); #endif #if LOAD_IN_SORT // this thread's inputs const unsigned int inV = d_inV[myPos]; const unsigned int inP = d_inP[myPos]; #else const unsigned int inV = 0; const unsigned int inP = 0; #endif #if LOAD_PD_SORT // this thread's predicate ( if ==0 is a 0 val, if ==1 is a 1 val) const unsigned int pred = d_1Pred[myPos]; #else const unsigned int pred = 0; #endif // this thread's relative location in 0's output or 1's output const unsigned int relIdx = pred ? d_1Scan[myPos]: d_0Scan[myPos]; // this thread's starting location const unsigned int startIdx = pred ? shisto[1] : shisto[0]; // this thread's output location const unsigned int outIdx = startIdx + relIdx; #if SAVE_SORT // write output to new location if( outIdx < numSamps ) { d_outV[outIdx] = inV; d_outP[outIdx] = inP; } #if __CUDA_ARCH__ >= 200 else { printf("block(%d,%d,%d):thread(%d,%d,%d): OutIdx(%u) is too large(%u)! pred(%u) relIdx(%u) startIdx(%u) histo(%u, %u)\n", blockIdx.x,blockIdx.y,blockIdx.z, threadIdx.x, threadIdx.y, threadIdx.z, outIdx, numSamps, pred, relIdx, startIdx, shisto[0], shisto[1]); } #endif #endif } __global__ void swap( unsigned int* const d_outVals, unsigned int* const d_outPos, unsigned int* const d_inVals, unsigned int* const d_inPos, const unsigned int numSamps ) { // my memory location const int myPos = (blockDim.x * blockIdx.x) + threadIdx.x; //const int tid = threadIdx.x; if( myPos >= numSamps ) { return; } unsigned int inV = d_inVals[myPos]; unsigned int inP = d_inPos[myPos]; d_inVals[myPos] = d_outVals[myPos]; d_inPos[myPos] = d_outPos[myPos]; d_outVals[myPos] = inV; d_outPos[myPos] = inP; } // gen_histo works, yay!!! #define HISTO_REF 0 #if HISTO_REF #include <fstream> #endif // preds seem to be ok #define PRED_VALIDATE 0 // working looks like #define SCAN_VALIDATE 0 // sums scan are ok #define SUMS_VALIDATE 0 #define SORT_VALIDATE 0 #if SORT_VALIDATE #include <sstream> #include <fstream> #endif // do not use with other validate #define CPU_VALIDATE 0 #if CPU_VALIDATE #include <fstream> #endif void your_sort(unsigned int* const d_inputVals, unsigned int* const d_inputPos, unsigned int* const d_outputVals, unsigned int* const d_outputPos, const size_t numElems) //TODO //PUT YOUR SORT HERE { const unsigned int numPoss = 2; // number of possible outcomes const unsigned int numBits = 32; // number of bits #if CPU_VALIDATE unsigned int* h_inputVals = NULL; unsigned int* h_inputPos = NULL; h_inputVals = new unsigned int[numElems]; h_inputPos = new unsigned int[numElems]; checkCudaErrors(cudaMemcpy(h_inputVals, d_inputVals, (numElems*sizeof(unsigned int)), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_inputPos, d_inputPos, (numElems*sizeof(unsigned int)), cudaMemcpyDeviceToHost)); #endif // numElems must be a power of 2 assert( (numElems%2) == 0 ); #if HISTO_REF unsigned int histo[numBits*numPoss]; unsigned int dev_histo[numBits*numPoss]; std::cout << "NumElems: " << numElems << ", sizeof(histo): " << sizeof(histo) << std::endl; memset( histo, 0, sizeof(histo) ); unsigned int* h_inputVals = new unsigned int[numElems]; checkCudaErrors(cudaMemcpy(h_inputVals, d_inputVals, (numElems*sizeof(unsigned int)), cudaMemcpyDeviceToHost)); std::ofstream fout("inputVals.txt"); for(int i=0; i < numElems; ++i) { if( fout.good() ) fout << std::hex << h_inputVals[i] << std::endl; else std::cout << "Cannot write to inputVals.txt!" << std::endl; } fout.close(); for(int i=0; i < numElems; ++i) { for(int b=0; b < numBits; ++b) { if( (h_inputVals[i] & (1<<b)) == 0 ) ++(histo[b*numPoss]); else ++(histo[b*numPoss+1]); } } //std::cout << "histo calculated" << std::endl; //for(int i=0; i < numBits; ++i) //{ // std::cout << "histo[" << i << "](" << histo[i*numPoss] << "," << histo[i*numPoss+1] << "), "; //} //std::cout << std::endl; #endif /* uint2* posVals = new uint2[numElems]; for(int i=0; i < numElems; ++i) { posVals[i].x = d_inputVals[i]; posVals[i].y = d_inputPos[i]; } */ const int numThreads = (MAX_THREADS < numElems) ? MAX_THREADS : numElems; const dim3 blockSize_histo( numThreads, 1, 1); const dim3 gridSize_histo( (numElems + blockSize_histo.x-1) / blockSize_histo.x, numBits, 1); unsigned int* d_histo = NULL; checkCudaErrors(cudaMalloc(&d_histo, sizeof(unsigned int) * numPoss * numBits )); checkCudaErrors(cudaMemset(d_histo, 0, sizeof(unsigned int) * numPoss*numBits)); gen_histo<<<gridSize_histo, blockSize_histo>>>( d_histo, d_inputVals, numElems); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // we can execlusively scan the d_histo to put so the // follow-on functions can look at the same location // (e.g. 0's read d_histo[blockIdx][0] (always 0) and 1's read d_histo[blockIdx][1]), // but since only 2 values just as east to have 0's always write to 0 // and have the 1's read d_histo[blockIdx][0]; #if HISTO_REF checkCudaErrors(cudaMemcpy(dev_histo, d_histo, sizeof(unsigned int) * numPoss * numBits, cudaMemcpyDeviceToHost)); for(int i=0; i < numBits; ++i) { if( histo[i*numPoss] != dev_histo[i*numPoss] ) std::cout << "histo[" << i << "](" << histo[i*numPoss] << "," << dev_histo[i*numPoss] << "), "; if( histo[i*numPoss+1] != dev_histo[i*numPoss+1] ) std::cout << "histo[" << i << "](" << histo[i*numPoss+1] << "," << dev_histo[i*numPoss+1] << ")\n"; } std::cout << std::endl; #endif const dim3 blockSize_pred( numThreads, 1, 1); const dim3 gridSize_pred( (numElems + blockSize_pred.x-1) / blockSize_pred.x, 1, 1); unsigned int* d_zeroPred = NULL; unsigned int* d_onePred = NULL; checkCudaErrors(cudaMalloc(&d_zeroPred, sizeof(unsigned int) * numElems )); checkCudaErrors(cudaMalloc(&d_onePred, sizeof(unsigned int) * numElems )); // each thread in the scan does two elements so numThreads/2 with same gridSize // for half the number of threads as elements const dim3 blockSize_scan( (numThreads/2), 1, 1 ); const dim3 gridSize_scan( (numElems + blockSize_pred.x-1) / blockSize_pred.x, 1, 1); // numThreads is a power of 2, ok //std::cout << "numThreads = " << numThreads << std::endl; unsigned int* d_zeroScan = NULL; unsigned int* d_oneScan = NULL; unsigned int* d_zeroSums = NULL; unsigned int* d_oneSums = NULL; unsigned int* d_1Sums = NULL; unsigned int* d_0Sums = NULL; // if there are an odd number of blocks set pad to 1 to we can specify an even number of blocks unsigned int pad = 0; if( gridSize_scan.x % 2 ) { ++pad; } assert( (gridSize_scan.x + pad) % 2 == 0 ); checkCudaErrors(cudaMalloc(&d_zeroScan, sizeof(unsigned int) * numElems )); checkCudaErrors(cudaMalloc(&d_oneScan, sizeof(unsigned int) * numElems )); checkCudaErrors(cudaMalloc(&d_zeroSums, sizeof(unsigned int) * (gridSize_scan.x + pad))); checkCudaErrors(cudaMalloc(&d_oneSums, sizeof(unsigned int) * (gridSize_scan.x + pad))); checkCudaErrors(cudaMalloc(&d_0Sums, sizeof(unsigned int) * (gridSize_scan.x + pad))); checkCudaErrors(cudaMalloc(&d_1Sums, sizeof(unsigned int) * (gridSize_scan.x + pad))); //std::cout << "Scan gridSize_scan.x = " << gridSize_scan.x << ", pad = " << pad //<< ", blockSize_scan.x = " << blockSize_scan.x //<< std::endl; // we are using the bufferes to ping-pong back and forth // so input and outputs change each loop iterator // (setup backwards as the loop will swap before using) unsigned int *d_outV = d_inputVals; unsigned int *d_outP = d_inputPos; unsigned int *d_inV = d_outputVals; unsigned int *d_inP = d_outputPos; for(unsigned int bit=0; bit < numBits; ++bit) { //swap input and output pointers (we do it before so when // we leave the loop the pointers will point to their last // action (input or output) if( d_outV == d_outputVals ) { d_outV = d_inputVals; d_outP = d_inputPos; d_inV = d_outputVals; d_inP = d_outputPos; } else { d_outV = d_outputVals; d_outP = d_outputPos; d_inV = d_inputVals; d_inP = d_inputPos; } // get predicates for if is a 1 or is a 0 gen_pred<<<gridSize_pred, blockSize_pred>>>( d_zeroPred, d_onePred, d_inV, numElems, bit); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); #if PRED_VALIDATE unsigned int* h_one = NULL; unsigned int* h_zero = NULL; unsigned int* h_inputVals = new unsigned int[numElems]; h_one = new unsigned int[numElems]; h_zero = new unsigned int[numElems]; checkCudaErrors(cudaMemcpy(h_inputVals, d_inV, (numElems*sizeof(unsigned int)), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_one, d_onePred, (numElems*sizeof(unsigned int)), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_zero, d_zeroPred, (numElems*sizeof(unsigned int)), cudaMemcpyDeviceToHost)); std::cout << "Checking predicates for bit" << bit << std::endl; std::stringstream psstrm; psstrm << "preds" << bit << ".txt"; std::ofstream pfout(psstrm.str().c_str()); pfout << "Samp: Pred0 Pred1 inputVals" << std::endl; for(int i=0; i < numElems; ++i) { pfout << std::dec << " " << i << " " << h_zero[i] << " " << h_one[i] << " 0x" << std::hex << h_inputVals[i] << std::endl; // std::cout << std::dec << "bit" << bit << ",samp" << i // << ": h_zero = " << h_zero[i] << ", h_one = " << h_one[i] // << ", inputVal = " << std::hex << h_inputVals[i] // << std::endl; if( (h_inputVals[i] & (0x1<<bit)) == 0 ) { if( !h_zero[i] ) std::cout << "sample" << i << ", bit" << bit << ": is 0 but pred0 = " << h_zero[i] << std::endl; } else { if( !h_one[i] ) std::cout << "sample" << i << ", bit" << bit << ": is 1 but pred1 = " << h_one[i] << std::endl; } if(h_one[i] != 0 && h_one[i] != 1) { std::cout << "OnePred[" << i << "] (" << h_one[i] << ") is not a 0 or 1!" << std::endl; } if(h_zero[i] != 0 && h_zero[i] != 1) { std::cout << "ZeroPred[" << i << "] (" << h_zero[i] << ") is not a 0 or 1!" << std::endl; } if( (h_zero[i]) != !(h_one[i]) ) std::cout << "invalid: h_zero[" << i << "](" << h_zero << "), h_one[" << i << "](" << h_one[i] << ")" << std::endl; } delete [] h_inputVals; h_inputVals = NULL; delete [] h_one; h_one = NULL; delete [] h_zero; h_zero = NULL; #endif #if 1 //scan // exclusive scan the output of the predicate arrays // to get the relative offsets exclusive_scan<<<gridSize_scan, blockSize_scan, (blockSize_scan.x*2*sizeof(unsigned int))>>> (d_zeroScan, d_0Sums, //outputs d_zeroPred, //inputs numElems); // params cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); exclusive_scan<<<gridSize_scan, blockSize_scan, blockSize_scan.x*2*sizeof(unsigned int)>>> (d_oneScan, d_1Sums, d_onePred, numElems); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); #if SCAN_VALIDATE unsigned int* h_scan1 = NULL; unsigned int* h_scan0 = NULL; unsigned int* h_sums1 = NULL; unsigned int* h_sums0 = NULL; unsigned int* h_pred0 = NULL; unsigned int* h_pred1 = NULL; h_pred0 = new unsigned int[numElems]; h_pred1 = new unsigned int[numElems]; h_scan1 = new unsigned int[numElems]; h_scan0 = new unsigned int[numElems]; h_sums1 = new unsigned int[(gridSize_scan.x + pad)]; h_sums0 = new unsigned int[(gridSize_scan.x + pad)]; checkCudaErrors(cudaMemcpy(h_pred1, d_onePred, (numElems*sizeof(unsigned int)), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_pred0, d_zeroPred, (numElems*sizeof(unsigned int)), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_scan1, d_oneScan, (numElems*sizeof(unsigned int)), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_scan0, d_zeroScan, (numElems*sizeof(unsigned int)), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_sums0, d_0Sums, ((gridSize_scan.x + pad)*sizeof(unsigned int)), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_sums1, d_1Sums, ((gridSize_scan.x + pad)*sizeof(unsigned int)), cudaMemcpyDeviceToHost)); // for(int i=0; i < (gridSize_scan.x + pad); ++i) // { // std::cout << "bit" << bit << " sums" << i << " (" << h_sums0[i] << ", " << h_sums1[i] << ")" << std::endl; // } //std::cout << "Checking scans for bit" << bit << std::endl; for(int i=0; i < gridSize_scan.x; ++i) { for(int j=1; j < blockSize_scan.x; ++j) { const unsigned int idx = i*blockSize_scan.x + j; if( idx < numElems ) { if( h_scan1[idx] > numElems) { std::cout << "idx = " << idx << ", Scan1[" << i << "][" << j << "]( " << h_scan1[idx] << ") is more than numElems(" << numElems << ")" << std::endl; } if( h_scan0[idx] > numElems) { std::cout << "idx = " << idx << ", Scan0[" << i << "][" << j << "]( " << h_scan0[idx] << ") is more than numElems(" << numElems << ")" << std::endl; return; } } } } #endif //scan validate #endif // we don't support recursive scans (if number of blocks of // the first scan is more than will fit into a block then // we will need to do the scan->sum step multiple times // and we don't do that yet). assert( gridSize_scan.x <= MAX_THREADS ); #if 1 //SUM_SCANS // now scan the sums (pad is needed in case was an odd number of blocks // as scan only supports multiple of 2 inputs since it does 2 per thread) // /2 then + 1 as the numThreads launched can be odd const dim3 blockSize_sums( (gridSize_scan.x/2 + pad), 1, 1); const dim3 gridSize_sums(1, 1, 1); //std::cout << "gridSize_sums = " << gridSize_sums.x // << ", blockSize_sums = " << blockSize_sums.x // << ", sharedSize = " << 2*blockSize_sums.x*2 << " | " << 2*blockSize_sums.x*2*sizeof(unsigned int) // << "b, numSamps = " << (gridSize_scan.x + pad) // << std::endl; #if SUMS_VALIDATE unsigned int* h_sin1 = NULL; unsigned int* h_sin0 = NULL; unsigned int* h_sout1 = NULL; unsigned int* h_sout0 = NULL; h_sin1 = new unsigned int[(gridSize_scan.x + pad)]; h_sin0 = new unsigned int[(gridSize_scan.x + pad)]; h_sout1 = new unsigned int[(gridSize_scan.x + pad)]; h_sout0 = new unsigned int[(gridSize_scan.x + pad)]; checkCudaErrors(cudaMemcpy(h_sin0, d_0Sums, ((gridSize_scan.x + pad)*sizeof(unsigned int)), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_sin1, d_1Sums, ((gridSize_scan.x + pad)*sizeof(unsigned int)), cudaMemcpyDeviceToHost)); #endif // numSamps: +pad because numSamps must be multiple of 2 exclusive_scan_gen<<< gridSize_sums, blockSize_sums, 2*blockSize_sums.x*2*sizeof(unsigned int)>>> ( d_oneSums, NULL, d_1Sums, (gridSize_scan.x+pad) ); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); exclusive_scan_gen<<< gridSize_sums, blockSize_sums, 2*blockSize_sums.x*2*sizeof(unsigned int)>>> ( d_zeroSums, NULL, d_0Sums, (gridSize_scan.x+pad) ); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); #if SUMS_VALIDATE checkCudaErrors(cudaMemcpy(h_sout0, d_zeroSums, ((gridSize_scan.x + pad)*sizeof(unsigned int)), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_sout1, d_oneSums, ((gridSize_scan.x + pad)*sizeof(unsigned int)), cudaMemcpyDeviceToHost)); bool mismatch = false; unsigned int* h_sref0 = new unsigned int[(gridSize_scan.x+pad)]; unsigned int* h_sref1 = new unsigned int[(gridSize_scan.x+pad)]; h_sref0[0] = 0; h_sref1[0] = 0; for(int i=1; i < (gridSize_scan.x+pad); ++i ) { h_sref0[i] = h_sref0[i-1] + h_sin0[i-1]; h_sref1[i] = h_sref1[i-1] + h_sin1[i-1]; // std::cout << "h_sin(" << h_sin0[i-1] << "," << h_sin1[i-1] // << ") h_sref(" << h_sref0[i-1] << "," << h_sref1[i-1] // << ") h_sout(" << h_sout0[i-1] << "," << h_sout1[i-1] << ")" // << std::endl; } #if 1 for(int i=1; i < (gridSize_scan.x + pad); ++i) { if( h_sref0[i] != h_sout0[i] ) { mismatch = true; std::cout << "sample" << i << ": h_sref0 = " << h_sref0[i] << ", h_sout0 = " << h_sout0[i] << std::endl; } if( h_sref1[i] != h_sout1[i] ) { mismatch = true; std::cout << "sample" << i << ": h_sref1 = " << h_sref1[i] << ", h_sout1 = " << h_sout1[i] << std::endl; } if( (h_sin0[i-1] + h_sout0[i-1]) != h_sout0[i] ) { mismatch = true; std::cout << "bit" << bit << ", sumsScan0 incorrect: in[" << i << "] + out[" << i-1 << "]" << " != out[" << i << "] ( " << h_sin0[i] << " + " << h_sout0[i-1] << " != " << h_sout0[i] << std::endl; } if( (h_sin1[i-1] + h_sout1[i-1]) != h_sout1[i] ) { mismatch = true; std::cout << "bit" << bit << ", sumsScan1 incorrect: in[" << i << "] + out[" << i-1 << "]" << " != out[" << i << "] ( " << h_sin1[i] << " + " << h_sout1[i-1] << " != " << h_sout1[i] << std::endl; } } #endif delete [] h_sin0; h_sin0 = NULL; delete [] h_sin1; h_sin1 = NULL; delete [] h_sout0; h_sout0 = NULL; delete [] h_sout1; h_sout1 = NULL; if( mismatch ) return; #endif //sums validate #endif //scans #if 1 //sums // then add the scaned sums to each previously scanned block sum_2input<<< gridSize_pred, blockSize_pred, sizeof(unsigned int) >>> ( d_zeroScan, d_zeroScan, d_zeroSums, numElems ); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); sum_2input<<< gridSize_pred, blockSize_pred, sizeof(unsigned int) >>> ( d_oneScan, d_oneScan, d_oneSums, numElems ); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); #if SCAN_VALIDATE unsigned int* h_ref0 = new unsigned int[numElems]; unsigned int* h_ref1 = new unsigned int[numElems]; checkCudaErrors(cudaMemcpy(h_scan1, d_oneScan, (numElems*sizeof(unsigned int)), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_scan0, d_zeroScan, (numElems*sizeof(unsigned int)), cudaMemcpyDeviceToHost)); // host scan h_ref0[0] = 0; h_ref1[0] = 0; for(int i=1; i < numElems; ++i) { h_ref0[i] = h_pred0[i-1] + h_ref0[i-1]; h_ref1[i] = h_pred1[i-1] + h_ref1[i-1]; //std::cout << "sample" << i << ": h_ref0 = " << h_ref0[i] << ", h_ref1 = " << h_ref1[i] << std::endl; } //std::cout << "Checking final scan for bit" << bit << std::endl; bool mismatch = false; for(int i=0; i < numElems; ++i) { if( h_ref0[i] != h_scan0[i] ) { std::cout << "sample" << i << ": host ref0 sum(" << h_ref0[i] << ") != device (" << h_scan0[i] << ")" << std::endl; mismatch = true; } if( h_ref1[i] != h_scan1[i] ) { std::cout << "sample" << i << ": host ref1 sum(" << h_ref1[i] << ") != device (" << h_scan1[i] << ")" << std::endl; mismatch = true; } } delete [] h_ref0; delete [] h_ref1; delete [] h_pred0; h_pred0 = NULL; delete [] h_pred1; h_pred1 = NULL; delete [] h_scan1; h_scan1 = NULL; delete [] h_scan0; h_scan0 = NULL; delete [] h_sums0; h_sums0 = NULL; delete [] h_sums1; h_sums1 = NULL; if( mismatch ) return; #endif //scan validate #endif #if 1 //sort // d_(zero|one)Scan now contains the reletive addresses for this bit's values // d_histo contains the offsets to know where to start writing // combine histo output, scan sum, and predicate to copy vals and pos // to new locations // only 1 predicate array is needed as there are only 2 outcomes (0 or 1) // so onePred is 0 for the 0's and 1 for the 1's const dim3 blockSize_sort = blockSize_pred; const dim3 gridSize_sort = gridSize_pred; // each thread sorts 1 '1' and 1 '0' // see about having a 1 thread sort just 1 value //const unsigned int histoSize = sizeof(unsigned int) * numPoss*numBits; //std::cout << "Sorting for bit" << bit << std::endl; const unsigned int sdataSize = numPoss * sizeof(unsigned int); // histo has only 1 value of interest per bit so just pass that address sort<<< gridSize_sort, blockSize_sort, sdataSize>>> ( d_outV, d_outP, d_inV, d_inP, &d_histo[bit*numPoss], d_onePred, d_oneScan, d_zeroScan, numElems); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); #if SORT_VALIDATE unsigned int* h_inV = new unsigned int[numElems]; unsigned int* h_outV = new unsigned int[numElems]; unsigned int* h_pred1 = new unsigned int[numElems]; unsigned int* h_1scan = new unsigned int[numElems]; unsigned int* h_0scan = new unsigned int[numElems]; unsigned int* h_histo = new unsigned int[numPoss*numBits]; checkCudaErrors(cudaMemcpy(h_inV, d_inV, (numElems*sizeof(unsigned int)), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_outV, d_outV, (numElems*sizeof(unsigned int)), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_pred1, d_onePred, (numElems*sizeof(unsigned int)), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_0scan, d_zeroScan, (numElems*sizeof(unsigned int)), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_1scan, d_oneScan, (numElems*sizeof(unsigned int)), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_histo, d_histo, (numPoss*numBits*sizeof(unsigned int)), cudaMemcpyDeviceToHost)); std::stringstream sstrm; sstrm << "sorts" << bit << ".txt"; std::ofstream fout(sstrm.str().c_str()); fout << "samp: inputVal outputVal 1pred 1Scan 0Scan histo" << std::endl; for(int i=0; i < numElems; ++i) { fout << " " << i << std::hex << " 0x" << h_inV[i] << " 0x" << h_outV[i] << std::dec << " " << h_pred1[i] << " " << h_1scan[i] << " " << h_0scan[i] << " " << h_histo[bit*numPoss] << " " << h_histo[bit*numPoss + 1] << std::endl; } fout.close(); delete [] h_1scan; delete [] h_0scan; delete [] h_pred1; delete [] h_inV; delete [] h_outV; #endif #endif // sort } // see if the outputs are where we want them if( d_outV != d_outputVals ) { std::cout << "Swapping inputs and outputs." << std::endl; // need to swap swap<<< gridSize_pred, blockSize_pred >>> ( d_outputVals, d_outputPos, d_inputVals, d_inputPos, numElems ); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } #if CPU_VALIDATE unsigned int* h_outputVals = NULL; unsigned int* h_outputPos = NULL; unsigned int* h_refVals = NULL; unsigned int* h_refPos = NULL; h_outputVals = new unsigned int[numElems]; h_outputPos = new unsigned int[numElems]; h_refVals = new unsigned int[numElems]; h_refPos = new unsigned int[numElems]; checkCudaErrors(cudaMemcpy(h_outputVals, d_outputVals, (numElems*sizeof(unsigned int)), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_outputPos, d_outputPos, (numElems*sizeof(unsigned int)), cudaMemcpyDeviceToHost)); //checkCudaErrors(cudaMemcpy(h_refVals, d_inputVals, (numElems*sizeof(unsigned int)), cudaMemcpyDeviceToHost)); //checkCudaErrors(cudaMemcpy(h_refPos, d_inputPos, (numElems*sizeof(unsigned int)), cudaMemcpyDeviceToHost)); reference_calculation(h_inputVals, h_inputPos, h_refVals, h_refPos, numElems); std::ofstream fout("inouts.txt"); quicksort::valpos_t* h_srtVP = new quicksort::valpos_t[numElems]; quicksort::valsort( h_srtVP, h_inputVals, h_inputPos, numElems); std::sort( h_inputVals, h_inputVals + numElems ); for(unsigned int i =0; i < numElems; ++i) { //if( fout.good() ) fout << i << " " << h_inputVals[i] << ":" << h_inputPos[i] << " " << h_outputVals[i] << ":" << h_outputPos[i] if( fout.good() ) fout << i << " " << h_srtVP[i].val << ":" << h_srtVP[i].pos << " " << h_outputVals[i] << ":" << h_outputPos[i] << " " << h_refVals[i] << ":" << h_refPos[i] // << " " << h_refVals[i] << std::endl; if( (h_refVals[i] != h_outputVals[i]) || (h_refPos[i] != h_outputPos[i])) { std::cout << "sample" << i << " mismatch: " << h_refVals[i] << ":" << h_refPos[i] << " and " << h_outputVals[i] << ":" << h_outputPos[i] << std::endl; } if( h_inputVals[i] != h_outputVals[i] ) { std::cout << "sorted inputVals[" << i << "]( " << h_inputVals[i] << ") != h_outputVals[" << i << "]( " << h_outputVals[i] << ")" << std::endl; } /* if( h_inputPos[i] != h_outputPos[i] ) { std::cout << "sorted inputPos[" << i << "]( " << h_inputPos[i] << ") != h_outputPos[" << i << "]( " << h_outputPos[i] << ")" << std::endl; } */ } fout.close(); delete [] h_refVals; h_refVals = NULL; delete [] h_refPos; h_refPos = NULL; delete [] h_srtVP; delete [] h_outputVals; delete [] h_outputPos; delete [] h_inputVals; delete [] h_inputPos; h_outputVals= NULL; h_outputPos = NULL; h_inputVals = NULL; h_inputPos = NULL; #endif checkCudaErrors(cudaFree(d_histo)); checkCudaErrors(cudaFree(d_zeroPred)); checkCudaErrors(cudaFree(d_onePred)); checkCudaErrors(cudaFree(d_zeroScan)); checkCudaErrors(cudaFree(d_oneScan)); checkCudaErrors(cudaFree(d_zeroSums)); checkCudaErrors(cudaFree(d_oneSums)); checkCudaErrors(cudaFree(d_0Sums)); checkCudaErrors(cudaFree(d_1Sums)); //delete [] posVals; }
686b1fd46448155aae933e4b09087d33b84aa6d0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // RUN: %run_test hipify "%s" "%t" %cuda_args /* Copyright (c) 2015-2016 Advanced Micro Devices, Inc. All rights reserved. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include<cuda.h> #include<cuda_runtime.h> #include<iostream> #include<unistd.h> #include<stdio.h> #include<malloc.h> #define LEN 1024 #define SIZE LEN * sizeof(float) #define ITER 1024*1024 // CHECK: if(status != hipSuccess) { #define check(msg, status){ \ if(status != hipSuccess) { \ printf("%s failed. \n", #msg); \ } \ } __global__ void Inc1(float *Ad, float *Bd){ // CHECK: int tx = hipThreadIdx_x + hipBlockIdx_x * hipBlockDim_x; int tx = threadIdx.x + blockIdx.x * blockDim.x; if(tx < 1 ){ for(int i=0;i<ITER;i++){ Ad[tx] = Ad[tx] + 1.0f; for(int j=0;j<256;j++){ Bd[tx] = Ad[tx]; } } } } __global__ void Inc2(float *Ad, float *Bd){ // CHECK: int tx = hipThreadIdx_x + hipBlockIdx_x * hipBlockDim_x; int tx = threadIdx.x + blockIdx.x * blockDim.x; if(tx < 1024){ for(int i=0;i<ITER;i++){ Ad[tx] = Ad[tx] + 1.0f; for(int j=0;j<256;j++){ Bd[tx] = Ad[tx]; } } } } int main(){ float *A, *Ad, *Bd; A = new float[LEN]; for(int i=0;i<LEN;i++){ A[i] = 0.0f; } // CHECK: hipError_t status; hipError_t status; // CHECK: status = hipHostRegister(A, SIZE, hipHostRegisterMapped); status = hipHostRegister(A, SIZE, hipHostRegisterMapped); check("Registering A",status); // CHECK: hipHostGetDevicePointer(&Ad, A, 0); hipHostGetDevicePointer(&Ad, A, 0); // CHECK: hipMalloc((void**) &Bd, SIZE); hipMalloc((void**) &Bd, SIZE); dim3 dimGrid(LEN/512,1,1); dim3 dimBlock(512,1,1); // CHECK: hipLaunchKernelGGL(Inc1, dim3(dimGrid), dim3(dimBlock), 0, 0, Ad, Bd); hipLaunchKernelGGL(( Inc1), dim3(dimGrid), dim3(dimBlock), 0, 0, Ad, Bd); sleep(3); A[0] = -(ITER*1.0f); std::cout<<"Same cache line before completion: \t"<< A[0]<<std::endl; // CHECK: hipDeviceSynchronize(); hipDeviceSynchronize(); std::cout<<"Same cache line after completion: \t"<< A[0]<<std::endl; for(int i=0;i<LEN;i++){ A[i] = 0.0f; } // CHECK: hipLaunchKernelGGL(Inc2, dim3(dimGrid), dim3(dimBlock), 0, 0, Ad, Bd); hipLaunchKernelGGL(( Inc2), dim3(dimGrid), dim3(dimBlock), 0, 0, Ad, Bd); sleep(3); A[0] = -(ITER*1.0f); std::cout<<"Diff cache line before completion: \t"<<A[0]<<std::endl; // CHECK: hipDeviceSynchronize(); hipDeviceSynchronize(); std::cout<<"Diff cache line after completion: \t"<<A[0]<<std::endl; }
686b1fd46448155aae933e4b09087d33b84aa6d0.cu
// RUN: %run_test hipify "%s" "%t" %cuda_args /* Copyright (c) 2015-2016 Advanced Micro Devices, Inc. All rights reserved. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include<cuda.h> #include<cuda_runtime.h> #include<iostream> #include<unistd.h> #include<stdio.h> #include<malloc.h> #define LEN 1024 #define SIZE LEN * sizeof(float) #define ITER 1024*1024 // CHECK: if(status != hipSuccess) { #define check(msg, status){ \ if(status != cudaSuccess) { \ printf("%s failed. \n", #msg); \ } \ } __global__ void Inc1(float *Ad, float *Bd){ // CHECK: int tx = hipThreadIdx_x + hipBlockIdx_x * hipBlockDim_x; int tx = threadIdx.x + blockIdx.x * blockDim.x; if(tx < 1 ){ for(int i=0;i<ITER;i++){ Ad[tx] = Ad[tx] + 1.0f; for(int j=0;j<256;j++){ Bd[tx] = Ad[tx]; } } } } __global__ void Inc2(float *Ad, float *Bd){ // CHECK: int tx = hipThreadIdx_x + hipBlockIdx_x * hipBlockDim_x; int tx = threadIdx.x + blockIdx.x * blockDim.x; if(tx < 1024){ for(int i=0;i<ITER;i++){ Ad[tx] = Ad[tx] + 1.0f; for(int j=0;j<256;j++){ Bd[tx] = Ad[tx]; } } } } int main(){ float *A, *Ad, *Bd; A = new float[LEN]; for(int i=0;i<LEN;i++){ A[i] = 0.0f; } // CHECK: hipError_t status; cudaError_t status; // CHECK: status = hipHostRegister(A, SIZE, hipHostRegisterMapped); status = cudaHostRegister(A, SIZE, cudaHostRegisterMapped); check("Registering A",status); // CHECK: hipHostGetDevicePointer(&Ad, A, 0); cudaHostGetDevicePointer(&Ad, A, 0); // CHECK: hipMalloc((void**) &Bd, SIZE); cudaMalloc((void**) &Bd, SIZE); dim3 dimGrid(LEN/512,1,1); dim3 dimBlock(512,1,1); // CHECK: hipLaunchKernelGGL(Inc1, dim3(dimGrid), dim3(dimBlock), 0, 0, Ad, Bd); Inc1<<<dimGrid, dimBlock>>>(Ad, Bd); sleep(3); A[0] = -(ITER*1.0f); std::cout<<"Same cache line before completion: \t"<< A[0]<<std::endl; // CHECK: hipDeviceSynchronize(); cudaDeviceSynchronize(); std::cout<<"Same cache line after completion: \t"<< A[0]<<std::endl; for(int i=0;i<LEN;i++){ A[i] = 0.0f; } // CHECK: hipLaunchKernelGGL(Inc2, dim3(dimGrid), dim3(dimBlock), 0, 0, Ad, Bd); Inc2<<<dimGrid, dimBlock>>>(Ad, Bd); sleep(3); A[0] = -(ITER*1.0f); std::cout<<"Diff cache line before completion: \t"<<A[0]<<std::endl; // CHECK: hipDeviceSynchronize(); cudaDeviceSynchronize(); std::cout<<"Diff cache line after completion: \t"<<A[0]<<std::endl; }
61ca23b138eceded260715a85517badc4df0ecba.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdio> #include <cmath> #include <cassert> #include <cstdlib> #include <omp.h> #ifdef WITH_CUDA5 # include <helper_cuda.h> # define CUDA_SAFE_CALL checkCudaErrors #else # include <cutil.h> #endif #include "cuda_pointer.h" #define NTHREAD 64 // 64 or 128 // #define NJBLOCK 14 // for GTX 470 #define NJBLOCK 28 // for GTX660Ti #define NIBLOCK 32 // 16 or 32 #define NIMAX (NTHREAD * NIBLOCK) // 2048 #define NXREDUCE 32 // must be >NJBLOCK #define NYREDUCE 8 #define NNB_PER_BLOCK 256 // NNB per block, must be power of 2 #define NB_BUF_SIZE (1<<20) // #define NNB_MAX 384 // total NNB at reduced #define MAX_CPU 16 #define MAX_GPU 4 // for clearity, for myself #define __out #define PROFILE #define NAN_CHECK(val) assert((val) == (val)); typedef unsigned short uint16; struct Jparticle{ float3 pos; float mass; float3 vel; float pad; Jparticle() {} Jparticle(double mj, double xj[3], double vj[3]){ pos.x = xj[0]; pos.y = xj[1]; pos.z = xj[2]; mass = mj; vel.x = vj[0]; vel.y = vj[1]; vel.z = vj[2]; NAN_CHECK(xj[0]); NAN_CHECK(xj[1]); NAN_CHECK(xj[2]); NAN_CHECK(mj); NAN_CHECK(vj[0]); NAN_CHECK(vj[1]); NAN_CHECK(vj[2]); } __device__ Jparticle(const float4 *buf){ float4 tmp1 = buf[0]; float4 tmp2 = buf[1]; pos.x = tmp1.x; pos.y = tmp1.y; pos.z = tmp1.z; mass = tmp1.w; vel.x = tmp2.x; vel.y = tmp2.y; vel.z = tmp2.z; } }; struct Iparticle{ float3 pos; float h2; float3 vel; float dtr; Iparticle() {} Iparticle(double h2i, double dtri, double xi[3], double vi[3]){ pos.x = xi[0]; pos.y = xi[1]; pos.z = xi[2]; h2 = h2i; vel.x = vi[0]; vel.y = vi[1]; vel.z = vi[2]; dtr = dtri; NAN_CHECK(xi[0]); NAN_CHECK(xi[1]); NAN_CHECK(xi[2]); NAN_CHECK(h2i); NAN_CHECK(vi[0]); NAN_CHECK(vi[1]); NAN_CHECK(vi[2]); } }; struct Force{ float3 acc; float pot; float3 jrk; int nnb; // 8 words __device__ void clear(){ acc.x = acc.y = acc.z = 0.f; jrk.x = jrk.y = jrk.z = 0.f; pot = 0.f; nnb = 0; } __device__ void operator+=(const Force &rhs){ acc.x += rhs.acc.x; acc.y += rhs.acc.y; acc.z += rhs.acc.z; pot += rhs.pot; jrk.x += rhs.jrk.x; jrk.y += rhs.jrk.y; jrk.z += rhs.jrk.z; if(nnb>=0 && rhs.nnb>=0){ nnb += rhs.nnb; }else{ nnb = -1; } } #if __CUDA_ARCH__ >= 300 __device__ void reduce_with(const int mask){ acc.x += __shfl_xor(acc.x, mask); acc.y += __shfl_xor(acc.y, mask); acc.z += __shfl_xor(acc.z, mask); pot += __shfl_xor(pot , mask); jrk.x += __shfl_xor(jrk.x, mask); jrk.y += __shfl_xor(jrk.y, mask); jrk.z += __shfl_xor(jrk.z, mask); int ntmp = __shfl_xor(nnb, mask); if(nnb>=0 && ntmp>=0){ nnb += ntmp; }else{ nnb = -1; } } #endif }; __device__ void dev_gravity( const int jidx, const Iparticle &ip, const Jparticle &jp, __out Force &fo, __out uint16 nblist[]){ float dx = jp.pos.x - ip.pos.x; float dy = jp.pos.y - ip.pos.y; float dz = jp.pos.z - ip.pos.z; float dvx = jp.vel.x - ip.vel.x; float dvy = jp.vel.y - ip.vel.y; float dvz = jp.vel.z - ip.vel.z; float r2 = dx*dx + dy*dy + dz*dz; #if 1 float dxp = dx + ip.dtr * dvx; float dyp = dy + ip.dtr * dvy; float dzp = dz + ip.dtr * dvz; float r2p = dxp*dxp + dyp*dyp + dzp*dzp; #else float r2p = r2; #endif float rv = dx*dvx + dy*dvy + dz*dvz; float rinv1 = rsqrtf(r2); if(min(r2, r2p) < ip.h2){ // fo.neib[fo.nnb++ % NBMAX] = j; nblist[fo.nnb & (NNB_PER_BLOCK-1)] = (uint16)jidx; fo.nnb++; rinv1 = 0.f; } float rinv2 = rinv1 * rinv1; float mrinv1 = jp.mass * rinv1; float mrinv3 = mrinv1 * rinv2; rv *= -3.f * rinv2; #ifdef POTENTIAL fo.pot += mrinv1; #endif fo.acc.x += mrinv3 * dx; fo.acc.y += mrinv3 * dy; fo.acc.z += mrinv3 * dz; // fo.acc.z += 1.0; fo.jrk.x += mrinv3 * (dvx + rv * dx); fo.jrk.y += mrinv3 * (dvy + rv * dy); fo.jrk.z += mrinv3 * (dvz + rv * dz); } __global__ void gravity_kernel( const int nbody, const Iparticle ipbuf[], const Jparticle jpbuf[], __out Force fobuf[][NJBLOCK], __out uint16 nbbuf[][NJBLOCK][NNB_PER_BLOCK]){ int ibid = blockIdx.x; int jbid = blockIdx.y; int tid = threadIdx.x; int iaddr = tid + blockDim.x * ibid; int jstart = (nbody * (jbid )) / NJBLOCK; int jend = (nbody * (jbid+1)) / NJBLOCK; Iparticle ip = ipbuf[iaddr]; Force fo; fo.clear(); uint16 *nblist = nbbuf[iaddr][jbid]; #if __CUDA_ARCH__ >= 300 // just some trial for(int j=jstart; j<jend; j+=32){ __shared__ Jparticle jpshare[32]; __syncthreads(); float4 *src = (float4 *)&jpbuf[j]; float4 *dst = (float4 *)jpshare; dst[tid] = src[tid]; __syncthreads(); if(jend-j < 32){ #pragma unroll 4 for(int jj=0; jj<jend-j; jj++){ const Jparticle jp = jpshare[jj]; // const Jparticle jp( (float4 *)jpshare + 2*jj); dev_gravity(j-jstart+jj, ip, jp, fo, nblist); } }else{ #pragma unroll 8 for(int jj=0; jj<32; jj++){ const Jparticle jp = jpshare[jj]; // const Jparticle jp( (float4 *)jpshare + 2*jj); dev_gravity(j-jstart+jj, ip, jp, fo, nblist); } } } #else for(int j=jstart; j<jend; j+=NTHREAD){ __shared__ Jparticle jpshare[NTHREAD]; __syncthreads(); float4 *src = (float4 *)&jpbuf[j]; float4 *dst = (float4 *)jpshare; dst[ tid] = src[ tid]; dst[NTHREAD+tid] = src[NTHREAD+tid]; __syncthreads(); if(jend-j < NTHREAD){ #pragma unroll 4 for(int jj=0; jj<jend-j; jj++){ Jparticle jp = jpshare[jj]; dev_gravity(j-jstart+jj, ip, jp, fo, nblist); } }else{ #pragma unroll 8 for(int jj=0; jj<NTHREAD; jj++){ Jparticle jp = jpshare[jj]; dev_gravity(j-jstart+jj, ip, jp, fo, nblist); } } } #endif if(fo.nnb > NNB_PER_BLOCK) fo.nnb = -1; fobuf[iaddr][jbid] = fo; } #if __CUDA_ARCH__ >= 300 __device__ void warp_reduce_int(int inp, int *out){ inp += __shfl_xor(inp, 1); inp += __shfl_xor(inp, 2); inp += __shfl_xor(inp, 4); inp += __shfl_xor(inp, 8); # if NXREDUCE==32 inp += __shfl_xor(inp, 16); # endif *out = inp; } __device__ void warp_reduce_float8(float4 inp1, float4 inp2, float *out){ const int tid = threadIdx.x; float4 tmp4L = (4&tid) ? inp2 : inp1; float4 tmp4R = (4&tid) ? inp1 : inp2; tmp4L.x += __shfl_xor(tmp4R.x, 4); tmp4L.y += __shfl_xor(tmp4R.y, 4); tmp4L.z += __shfl_xor(tmp4R.z, 4); tmp4L.w += __shfl_xor(tmp4R.w, 4); float4 tmp4; tmp4.x = (2&tid) ? tmp4L.z : tmp4L.x; tmp4.y = (2&tid) ? tmp4L.w : tmp4L.y; tmp4.z = (2&tid) ? tmp4L.x : tmp4L.z; tmp4.w = (2&tid) ? tmp4L.y : tmp4L.w; tmp4.x += __shfl_xor(tmp4.z, 2); tmp4.y += __shfl_xor(tmp4.w, 2); float2 tmp2; tmp2.x = (1&tid) ? tmp4.y : tmp4.x; tmp2.y = (1&tid) ? tmp4.x : tmp4.y; tmp2.x += __shfl_xor(tmp2.y, 1); tmp2.x += __shfl_xor(tmp2.x, 8); # if NXREDUCE==32 tmp2.x += __shfl_xor(tmp2.x, 16); # endif if(tid < 8){ out[tid] = tmp2.x; } } #endif __global__ void force_reduce_kernel( const int ni, const Force fpart[][NJBLOCK], __out Force ftot []){ const int xid = threadIdx.x; const int yid = threadIdx.y; const int bid = blockIdx.x; const int iaddr = yid + blockDim.y * bid; #if __CUDA_ARCH__ >= 300 Force f; if(xid < NJBLOCK){ f = fpart[iaddr][xid]; }else{ f.clear(); } # if 0 # pragma unroll for(int mask=1; mask<NXREDUCE; mask*=2){ f.reduce_with(mask); } if(iaddr < ni && xid == 0){ ftot[iaddr] = f; } # else if(iaddr < ni){ const float4 tmp1 = make_float4(f.acc.x, f.acc.y, f.acc.z, f.pot); const float4 tmp2 = make_float4(f.jrk.x, f.jrk.y, f.jrk.z, 0.0f); const int itmp = f.nnb; float *dst = (float *)(ftot + iaddr); int *idst = (int *)(dst + 7); warp_reduce_float8(tmp1, tmp2, dst); warp_reduce_int(itmp, idst); } # endif #else __shared__ Force fshare[NYREDUCE][NXREDUCE]; if(xid < NJBLOCK){ fshare[yid][xid] = fpart[iaddr][xid]; }else{ fshare[yid][xid].clear(); } Force *fs = fshare[yid]; #if NXREDUCE==32 if(xid < 16) fs[xid] += fs[xid + 16]; #endif if(xid < 8) fs[xid] += fs[xid + 8]; if(xid < 4) fs[xid] += fs[xid + 4]; if(xid < 2) fs[xid] += fs[xid + 2]; if(xid < 1) fs[xid] += fs[xid + 1]; if(iaddr < ni){ ftot[iaddr] = fs[0]; } #endif } __global__ void gather_nb_kernel( const int ni, const int nj, const int joff, const Force fpart[][NJBLOCK], const Force ftot [], const int nboff[], const uint16 nbpart[][NJBLOCK][NNB_PER_BLOCK], __out int nblist[]) { const int xid = threadIdx.x; const int yid = threadIdx.y; const int bid = blockIdx.x; const int iaddr = yid + blockDim.y * bid; if(iaddr >= ni) return; if(ftot[iaddr].nnb < 0) return; const int mynnb = (xid < NJBLOCK) ? fpart[iaddr][xid].nnb : 0; // now performe prefix sum #if __CUDA_ARCH__ >= 300 int ix = mynnb; #pragma unroll for(int ioff=1; ioff<NXREDUCE; ioff*=2){ int iy = __shfl_up(ix, ioff); if(xid>=ioff) ix += iy; } int iz = __shfl_up(ix, 1); const int off = (xid == 0) ? 0 : iz; #else __shared__ int ishare[NYREDUCE][NXREDUCE]; ishare[yid][xid] = mynnb; volatile int *ish = ishare[yid]; if(xid>=1) ish[xid] += ish[xid-1]; if(xid>=2) ish[xid] += ish[xid-2]; if(xid>=4) ish[xid] += ish[xid-4]; if(xid>=8) ish[xid] += ish[xid-8]; #if NXREDUCE==32 if(xid>=16) ish[xid] += ish[xid-16]; #endif const int off = (xid == 0) ? 0 : ish[xid-1]; #endif int *nbdst = nblist + nboff[iaddr] + off; const int jstart = (nj * xid) / NJBLOCK; if(xid < NJBLOCK){ for(int k=0; k<mynnb; k++){ const int nbid = (joff + jstart) + int(nbpart[iaddr][xid][k]); // const int nbid = iaddr * 1000 + k; nbdst[k] = nbid; } } } // Host Part #ifdef PROFILE #include <sys/time.h> static double get_wtime(){ struct timeval tv; gettimeofday(&tv, NULL); return tv.tv_sec + 1.e-6 * tv.tv_usec; } #else static double get_wtime(){ return 0.0; } #endif static double time_send, time_grav, time_reduce; static long long numInter; static cudaPointer <Jparticle> jpbuf[MAX_GPU]; static cudaPointer <Iparticle> ipbuf[MAX_GPU]; static cudaPointer <Force[NJBLOCK]> fpart[MAX_GPU]; static cudaPointer <Force> ftot [MAX_GPU]; static cudaPointer <uint16[NJBLOCK][NNB_PER_BLOCK]> nbpart[MAX_GPU]; static cudaPointer <int> nblist [MAX_GPU]; static cudaPointer <int> nboff [MAX_GPU]; static int numCPU, numGPU; static int joff[MAX_GPU + 1]; static int nbody, nbodymax; static int devid[MAX_GPU]; static bool is_open = false; static bool devinit = false; void GPUNB_devinit(){ if(devinit) return; assert(NXREDUCE >= NJBLOCK); assert(NXREDUCE <= 32); hipGetDeviceCount(&numGPU); assert(numGPU <= MAX_GPU); char *gpu_list = getenv("GPU_LIST"); if(gpu_list){ // get GPU list from environment variable numGPU = 0; char *p = strtok(gpu_list, " "); while(p){ devid[numGPU++] = atoi(p); p = strtok(NULL, " "); assert(numGPU <= MAX_GPU); } }else{ // use all GPUs for(int i=0; i<numGPU; i++){ devid[i] = i; } } // numGPU = 1; #pragma omp parallel { int tid = omp_get_thread_num(); if(tid == 0) numCPU = omp_get_num_threads(); } assert(numCPU <= MAX_CPU); assert(numGPU <= numCPU); #pragma omp parallel { int tid = omp_get_thread_num(); if(tid < numGPU){ hipSetDevice(devid[tid]); } } #ifdef PROFILE fprintf(stderr, "***********************\n"); fprintf(stderr, "Initializing NBODY6/GPU library\n"); fprintf(stderr, "#CPU %d, #GPU %d\n", numCPU, numGPU); fprintf(stderr, " device:"); for(int i=0; i<numGPU; i++){ fprintf(stderr, " %d", devid[i]); } fprintf(stderr, "\n"); #if 1 for(int i=0; i<numGPU; i++){ hipDeviceProp_t prop; hipGetDeviceProperties(&prop, devid[i]); fprintf(stderr, " device %d: %s\n", devid[i], prop.name); } #endif fprintf(stderr, "***********************\n"); #endif devinit = true; } void GPUNB_open(int nbmax){ time_send = time_grav = time_reduce = 0.0; numInter = 0; nbodymax = nbmax; GPUNB_devinit(); if(is_open){ fprintf(stderr, "gpunb: it is already open\n"); return; } is_open = true; for(int id=0; id<numGPU + 1; id++){ joff[id] = (id * nbmax) / numGPU; } // omp_set_num_threads(numGPU); #pragma omp parallel { int tid = omp_get_thread_num(); if(tid < numGPU){ hipSetDevice(devid[tid]); int nj = joff[tid+1] - joff[tid]; jpbuf [tid].allocate(nj + NTHREAD); ipbuf [tid].allocate(NIMAX); fpart [tid].allocate(NIMAX); ftot [tid].allocate(NIMAX); nbpart[tid].allocate(NIMAX); nblist[tid].allocate(NB_BUF_SIZE); // total ganged nblist nboff [tid].allocate(NIMAX+1); } } #ifdef PROFILE fprintf(stderr, "***********************\n"); fprintf(stderr, "Opened NBODY6/GPU library\n"); fprintf(stderr, "#CPU %d, #GPU %d\n", numCPU, numGPU); fprintf(stderr, " device:"); for(int i=0; i<numGPU; i++){ fprintf(stderr, " %d", devid[i]); } fprintf(stderr, "\n"); for(int i=0; i<numGPU+1; i++){ fprintf(stderr, " %d", joff[i]); } fprintf(stderr, "\n"); fprintf(stderr, "nbmax = %d\n", nbmax); fprintf(stderr, "***********************\n"); #endif } void GPUNB_close(){ if(!is_open){ fprintf(stderr, "gpunb: it is already close\n"); return; } is_open = false; // omp_set_num_threads(numGPU); #pragma omp parallel { int tid = omp_get_thread_num(); if(tid < numGPU){ jpbuf [tid].free(); ipbuf [tid].free(); fpart [tid].free(); ftot [tid].free(); nbpart[tid].free(); nblist[tid].free(); nboff [tid].free(); } } // omp_set_num_threads(numCPU); nbodymax = 0; #ifdef PROFILE fprintf(stderr, "***********************\n"); fprintf(stderr, "Closed NBODY6/GPU library\n"); fprintf(stderr, "time send : %f sec\n", time_send); fprintf(stderr, "time grav : %f sec\n", time_grav); fprintf(stderr, "time reduce : %f sec\n", time_reduce); fprintf(stderr, "time regtot : %f sec\n", time_send + time_grav + time_reduce); fprintf(stderr, "%f Gflops (gravity part only)\n", 60.e-9 * numInter / time_grav); fprintf(stderr, "***********************\n"); #endif } void GPUNB_send( int _nbody, double mj[], double xj[][3], double vj[][3]){ assert(is_open); nbody = _nbody; assert(nbody <= nbodymax); time_send -= get_wtime(); for(int id=0; id<numGPU + 1; id++){ joff[id] = (id * nbody) / numGPU; } #pragma omp parallel { int tid = omp_get_thread_num(); if(tid < numGPU){ int nj = joff[tid+1] - joff[tid]; for(int j=0; j<nj; j++){ int jj = j + joff[tid]; jpbuf[tid][j] = Jparticle(mj[jj], xj[jj], vj[jj]); } jpbuf[tid].htod(nj); } } time_send += get_wtime(); } void GPUNB_regf( int ni, double h2[], double dtr[], double xi[][3], double vi[][3], double acc[][3], double jrk[][3], double pot[], int lmax, int nnbmax, int *listbase){ assert(is_open); time_grav -= get_wtime(); numInter += ni * nbody; assert(0 < ni && ni <= NIMAX); // omp_set_num_threads(numGPU); #pragma omp parallel { int tid = omp_get_thread_num(); if(tid < numGPU){ // hipSetDevice(device_id[tid]); for(int i=0; i<ni; i++){ ipbuf[tid][i] = Iparticle(h2[i], dtr[i], xi[i], vi[i]); } // set i-particles ipbuf[tid].htod(ni); // gravity kernel int niblock = 1 + (ni-1) / NTHREAD; dim3 grid(niblock, NJBLOCK, 1); dim3 threads(NTHREAD, 1, 1); int nj = joff[tid+1] - joff[tid]; hipLaunchKernelGGL(( gravity_kernel) , dim3(grid), dim3(threads) , 0, 0, nj, ipbuf[tid], jpbuf[tid], fpart[tid], nbpart[tid]); // CUDA_SAFE_THREAD_SYNC(); #if 0 dim3 rgrid(niblock, 1, 1); hipLaunchKernelGGL(( reduce_kernel) , dim3(rgrid), dim3(threads) , 0, 0, nj, joff[tid], fpart[tid], nbpart[tid], ftot[tid], nbtot[tid]); #else const int ni8 = 1 + (ni-1) / NYREDUCE; dim3 rgrid (ni8, 1, 1); dim3 rthreads(NXREDUCE, NYREDUCE, 1); hipLaunchKernelGGL(( force_reduce_kernel) , dim3(rgrid), dim3(rthreads) , 0, 0, ni, fpart[tid], ftot[tid]); #endif // CUDA_SAFE_THREAD_SYNC(); ftot [tid].dtoh(ni); // now make prefix sum int nbsum = 0; for(int i=0; i<ni; i++){ nboff[tid][i] = nbsum; const int nnb = ftot[tid][i].nnb; // assert(nnb >= 0); if(nnb >= 0) nbsum += nnb; } assert(nbsum <= NB_BUF_SIZE); nboff[tid].htod(ni); // debugging // for(int k=0; k<nbsum; k++) nblist[tid][k] = -1; // nblist[tid].htod(nbsum); hipLaunchKernelGGL(( gather_nb_kernel) , dim3(rgrid), dim3(rthreads), 0, 0, ni, nj, joff[tid], fpart[tid], ftot[tid], nboff[tid], nbpart[tid], nblist[tid]); // CUDA_SAFE_THREAD_SYNC(); nblist[tid].dtoh(nbsum); } } const double wt = get_wtime(); time_grav += wt; time_reduce -= wt; // reduction phase // omp_set_num_threads(numCPU); #pragma omp parallel for for(int i=0; i<ni; i++){ double ax=0.0, ay=0.0, az=0.0; double jx=0.0, jy=0.0, jz=0.0; double po=0.0; for(int id=0; id<numGPU; id++){ Force &fo = ftot[id][i]; ax += fo.acc.x; ay += fo.acc.y; az += fo.acc.z; jx += fo.jrk.x; jy += fo.jrk.y; jz += fo.jrk.z; po += fo.pot; } acc[i][0] = ax; acc[i][1] = ay; acc[i][2] = az; jrk[i][0] = jx; jrk[i][1] = jy; jrk[i][2] = jz; pot[i] = po; } #pragma omp parallel for for(int i=0; i<ni; i++){ bool overflow = false; int *nnbp = listbase + lmax * i; int *nblistp = nnbp + 1; int nnb = 0; for(int id=0; id<numGPU; id++){ const int nnb_part = ftot[id][i].nnb; if(nnb_part < 0){ overflow = true; fprintf(stderr, "!!!overflow : i=%d, id=%d, nnb_part=%d\n", i, id, nnb_part); } // assert(!overflow); nnb += nnb_part; if(nnb > nnbmax){ overflow = true; fprintf(stderr, "!!!overflow : i=%d, id=%d, nnb_tot =%d, nnbmax=%d\n", i, id, nnb, nnbmax); } // assert(!overflow); if(!overflow){ const int off = nboff[id][i]; for(int k=0; k<nnb_part; k++){ *nblistp++ = nblist[id][off + k]; } } } if(overflow){ // *nnbp = -1; *nnbp = nnb ? -abs(nnb) : -9999; }else{ *nnbp = nnb; } } time_reduce += get_wtime(); } extern "C" { void gpunb_devinit_(){ GPUNB_devinit(); } void gpunb_open_(int *nbmax){ GPUNB_open(*nbmax); } void gpunb_close_(){ GPUNB_close(); } void gpunb_send_( int *nj, double mj[], double xj[][3], double vj[][3]){ GPUNB_send(*nj, mj, xj, vj); } void gpunb_regf_( int *ni, double h2[], double dtr[], double xi[][3], double vi[][3], double acc[][3], double jrk[][3], double pot[], int *lmax, int *nbmax, int *list){ // list[][lmax] GPUNB_regf(*ni, h2, dtr, xi, vi, acc, jrk, pot, *lmax, *nbmax, list); } }
61ca23b138eceded260715a85517badc4df0ecba.cu
#include <cstdio> #include <cmath> #include <cassert> #include <cstdlib> #include <omp.h> #ifdef WITH_CUDA5 # include <helper_cuda.h> # define CUDA_SAFE_CALL checkCudaErrors #else # include <cutil.h> #endif #include "cuda_pointer.h" #define NTHREAD 64 // 64 or 128 // #define NJBLOCK 14 // for GTX 470 #define NJBLOCK 28 // for GTX660Ti #define NIBLOCK 32 // 16 or 32 #define NIMAX (NTHREAD * NIBLOCK) // 2048 #define NXREDUCE 32 // must be >NJBLOCK #define NYREDUCE 8 #define NNB_PER_BLOCK 256 // NNB per block, must be power of 2 #define NB_BUF_SIZE (1<<20) // #define NNB_MAX 384 // total NNB at reduced #define MAX_CPU 16 #define MAX_GPU 4 // for clearity, for myself #define __out #define PROFILE #define NAN_CHECK(val) assert((val) == (val)); typedef unsigned short uint16; struct Jparticle{ float3 pos; float mass; float3 vel; float pad; Jparticle() {} Jparticle(double mj, double xj[3], double vj[3]){ pos.x = xj[0]; pos.y = xj[1]; pos.z = xj[2]; mass = mj; vel.x = vj[0]; vel.y = vj[1]; vel.z = vj[2]; NAN_CHECK(xj[0]); NAN_CHECK(xj[1]); NAN_CHECK(xj[2]); NAN_CHECK(mj); NAN_CHECK(vj[0]); NAN_CHECK(vj[1]); NAN_CHECK(vj[2]); } __device__ Jparticle(const float4 *buf){ float4 tmp1 = buf[0]; float4 tmp2 = buf[1]; pos.x = tmp1.x; pos.y = tmp1.y; pos.z = tmp1.z; mass = tmp1.w; vel.x = tmp2.x; vel.y = tmp2.y; vel.z = tmp2.z; } }; struct Iparticle{ float3 pos; float h2; float3 vel; float dtr; Iparticle() {} Iparticle(double h2i, double dtri, double xi[3], double vi[3]){ pos.x = xi[0]; pos.y = xi[1]; pos.z = xi[2]; h2 = h2i; vel.x = vi[0]; vel.y = vi[1]; vel.z = vi[2]; dtr = dtri; NAN_CHECK(xi[0]); NAN_CHECK(xi[1]); NAN_CHECK(xi[2]); NAN_CHECK(h2i); NAN_CHECK(vi[0]); NAN_CHECK(vi[1]); NAN_CHECK(vi[2]); } }; struct Force{ float3 acc; float pot; float3 jrk; int nnb; // 8 words __device__ void clear(){ acc.x = acc.y = acc.z = 0.f; jrk.x = jrk.y = jrk.z = 0.f; pot = 0.f; nnb = 0; } __device__ void operator+=(const Force &rhs){ acc.x += rhs.acc.x; acc.y += rhs.acc.y; acc.z += rhs.acc.z; pot += rhs.pot; jrk.x += rhs.jrk.x; jrk.y += rhs.jrk.y; jrk.z += rhs.jrk.z; if(nnb>=0 && rhs.nnb>=0){ nnb += rhs.nnb; }else{ nnb = -1; } } #if __CUDA_ARCH__ >= 300 __device__ void reduce_with(const int mask){ acc.x += __shfl_xor(acc.x, mask); acc.y += __shfl_xor(acc.y, mask); acc.z += __shfl_xor(acc.z, mask); pot += __shfl_xor(pot , mask); jrk.x += __shfl_xor(jrk.x, mask); jrk.y += __shfl_xor(jrk.y, mask); jrk.z += __shfl_xor(jrk.z, mask); int ntmp = __shfl_xor(nnb, mask); if(nnb>=0 && ntmp>=0){ nnb += ntmp; }else{ nnb = -1; } } #endif }; __device__ void dev_gravity( const int jidx, const Iparticle &ip, const Jparticle &jp, __out Force &fo, __out uint16 nblist[]){ float dx = jp.pos.x - ip.pos.x; float dy = jp.pos.y - ip.pos.y; float dz = jp.pos.z - ip.pos.z; float dvx = jp.vel.x - ip.vel.x; float dvy = jp.vel.y - ip.vel.y; float dvz = jp.vel.z - ip.vel.z; float r2 = dx*dx + dy*dy + dz*dz; #if 1 float dxp = dx + ip.dtr * dvx; float dyp = dy + ip.dtr * dvy; float dzp = dz + ip.dtr * dvz; float r2p = dxp*dxp + dyp*dyp + dzp*dzp; #else float r2p = r2; #endif float rv = dx*dvx + dy*dvy + dz*dvz; float rinv1 = rsqrtf(r2); if(min(r2, r2p) < ip.h2){ // fo.neib[fo.nnb++ % NBMAX] = j; nblist[fo.nnb & (NNB_PER_BLOCK-1)] = (uint16)jidx; fo.nnb++; rinv1 = 0.f; } float rinv2 = rinv1 * rinv1; float mrinv1 = jp.mass * rinv1; float mrinv3 = mrinv1 * rinv2; rv *= -3.f * rinv2; #ifdef POTENTIAL fo.pot += mrinv1; #endif fo.acc.x += mrinv3 * dx; fo.acc.y += mrinv3 * dy; fo.acc.z += mrinv3 * dz; // fo.acc.z += 1.0; fo.jrk.x += mrinv3 * (dvx + rv * dx); fo.jrk.y += mrinv3 * (dvy + rv * dy); fo.jrk.z += mrinv3 * (dvz + rv * dz); } __global__ void gravity_kernel( const int nbody, const Iparticle ipbuf[], const Jparticle jpbuf[], __out Force fobuf[][NJBLOCK], __out uint16 nbbuf[][NJBLOCK][NNB_PER_BLOCK]){ int ibid = blockIdx.x; int jbid = blockIdx.y; int tid = threadIdx.x; int iaddr = tid + blockDim.x * ibid; int jstart = (nbody * (jbid )) / NJBLOCK; int jend = (nbody * (jbid+1)) / NJBLOCK; Iparticle ip = ipbuf[iaddr]; Force fo; fo.clear(); uint16 *nblist = nbbuf[iaddr][jbid]; #if __CUDA_ARCH__ >= 300 // just some trial for(int j=jstart; j<jend; j+=32){ __shared__ Jparticle jpshare[32]; __syncthreads(); float4 *src = (float4 *)&jpbuf[j]; float4 *dst = (float4 *)jpshare; dst[tid] = src[tid]; __syncthreads(); if(jend-j < 32){ #pragma unroll 4 for(int jj=0; jj<jend-j; jj++){ const Jparticle jp = jpshare[jj]; // const Jparticle jp( (float4 *)jpshare + 2*jj); dev_gravity(j-jstart+jj, ip, jp, fo, nblist); } }else{ #pragma unroll 8 for(int jj=0; jj<32; jj++){ const Jparticle jp = jpshare[jj]; // const Jparticle jp( (float4 *)jpshare + 2*jj); dev_gravity(j-jstart+jj, ip, jp, fo, nblist); } } } #else for(int j=jstart; j<jend; j+=NTHREAD){ __shared__ Jparticle jpshare[NTHREAD]; __syncthreads(); float4 *src = (float4 *)&jpbuf[j]; float4 *dst = (float4 *)jpshare; dst[ tid] = src[ tid]; dst[NTHREAD+tid] = src[NTHREAD+tid]; __syncthreads(); if(jend-j < NTHREAD){ #pragma unroll 4 for(int jj=0; jj<jend-j; jj++){ Jparticle jp = jpshare[jj]; dev_gravity(j-jstart+jj, ip, jp, fo, nblist); } }else{ #pragma unroll 8 for(int jj=0; jj<NTHREAD; jj++){ Jparticle jp = jpshare[jj]; dev_gravity(j-jstart+jj, ip, jp, fo, nblist); } } } #endif if(fo.nnb > NNB_PER_BLOCK) fo.nnb = -1; fobuf[iaddr][jbid] = fo; } #if __CUDA_ARCH__ >= 300 __device__ void warp_reduce_int(int inp, int *out){ inp += __shfl_xor(inp, 1); inp += __shfl_xor(inp, 2); inp += __shfl_xor(inp, 4); inp += __shfl_xor(inp, 8); # if NXREDUCE==32 inp += __shfl_xor(inp, 16); # endif *out = inp; } __device__ void warp_reduce_float8(float4 inp1, float4 inp2, float *out){ const int tid = threadIdx.x; float4 tmp4L = (4&tid) ? inp2 : inp1; float4 tmp4R = (4&tid) ? inp1 : inp2; tmp4L.x += __shfl_xor(tmp4R.x, 4); tmp4L.y += __shfl_xor(tmp4R.y, 4); tmp4L.z += __shfl_xor(tmp4R.z, 4); tmp4L.w += __shfl_xor(tmp4R.w, 4); float4 tmp4; tmp4.x = (2&tid) ? tmp4L.z : tmp4L.x; tmp4.y = (2&tid) ? tmp4L.w : tmp4L.y; tmp4.z = (2&tid) ? tmp4L.x : tmp4L.z; tmp4.w = (2&tid) ? tmp4L.y : tmp4L.w; tmp4.x += __shfl_xor(tmp4.z, 2); tmp4.y += __shfl_xor(tmp4.w, 2); float2 tmp2; tmp2.x = (1&tid) ? tmp4.y : tmp4.x; tmp2.y = (1&tid) ? tmp4.x : tmp4.y; tmp2.x += __shfl_xor(tmp2.y, 1); tmp2.x += __shfl_xor(tmp2.x, 8); # if NXREDUCE==32 tmp2.x += __shfl_xor(tmp2.x, 16); # endif if(tid < 8){ out[tid] = tmp2.x; } } #endif __global__ void force_reduce_kernel( const int ni, const Force fpart[][NJBLOCK], __out Force ftot []){ const int xid = threadIdx.x; const int yid = threadIdx.y; const int bid = blockIdx.x; const int iaddr = yid + blockDim.y * bid; #if __CUDA_ARCH__ >= 300 Force f; if(xid < NJBLOCK){ f = fpart[iaddr][xid]; }else{ f.clear(); } # if 0 # pragma unroll for(int mask=1; mask<NXREDUCE; mask*=2){ f.reduce_with(mask); } if(iaddr < ni && xid == 0){ ftot[iaddr] = f; } # else if(iaddr < ni){ const float4 tmp1 = make_float4(f.acc.x, f.acc.y, f.acc.z, f.pot); const float4 tmp2 = make_float4(f.jrk.x, f.jrk.y, f.jrk.z, 0.0f); const int itmp = f.nnb; float *dst = (float *)(ftot + iaddr); int *idst = (int *)(dst + 7); warp_reduce_float8(tmp1, tmp2, dst); warp_reduce_int(itmp, idst); } # endif #else __shared__ Force fshare[NYREDUCE][NXREDUCE]; if(xid < NJBLOCK){ fshare[yid][xid] = fpart[iaddr][xid]; }else{ fshare[yid][xid].clear(); } Force *fs = fshare[yid]; #if NXREDUCE==32 if(xid < 16) fs[xid] += fs[xid + 16]; #endif if(xid < 8) fs[xid] += fs[xid + 8]; if(xid < 4) fs[xid] += fs[xid + 4]; if(xid < 2) fs[xid] += fs[xid + 2]; if(xid < 1) fs[xid] += fs[xid + 1]; if(iaddr < ni){ ftot[iaddr] = fs[0]; } #endif } __global__ void gather_nb_kernel( const int ni, const int nj, const int joff, const Force fpart[][NJBLOCK], const Force ftot [], const int nboff[], const uint16 nbpart[][NJBLOCK][NNB_PER_BLOCK], __out int nblist[]) { const int xid = threadIdx.x; const int yid = threadIdx.y; const int bid = blockIdx.x; const int iaddr = yid + blockDim.y * bid; if(iaddr >= ni) return; if(ftot[iaddr].nnb < 0) return; const int mynnb = (xid < NJBLOCK) ? fpart[iaddr][xid].nnb : 0; // now performe prefix sum #if __CUDA_ARCH__ >= 300 int ix = mynnb; #pragma unroll for(int ioff=1; ioff<NXREDUCE; ioff*=2){ int iy = __shfl_up(ix, ioff); if(xid>=ioff) ix += iy; } int iz = __shfl_up(ix, 1); const int off = (xid == 0) ? 0 : iz; #else __shared__ int ishare[NYREDUCE][NXREDUCE]; ishare[yid][xid] = mynnb; volatile int *ish = ishare[yid]; if(xid>=1) ish[xid] += ish[xid-1]; if(xid>=2) ish[xid] += ish[xid-2]; if(xid>=4) ish[xid] += ish[xid-4]; if(xid>=8) ish[xid] += ish[xid-8]; #if NXREDUCE==32 if(xid>=16) ish[xid] += ish[xid-16]; #endif const int off = (xid == 0) ? 0 : ish[xid-1]; #endif int *nbdst = nblist + nboff[iaddr] + off; const int jstart = (nj * xid) / NJBLOCK; if(xid < NJBLOCK){ for(int k=0; k<mynnb; k++){ const int nbid = (joff + jstart) + int(nbpart[iaddr][xid][k]); // const int nbid = iaddr * 1000 + k; nbdst[k] = nbid; } } } // Host Part #ifdef PROFILE #include <sys/time.h> static double get_wtime(){ struct timeval tv; gettimeofday(&tv, NULL); return tv.tv_sec + 1.e-6 * tv.tv_usec; } #else static double get_wtime(){ return 0.0; } #endif static double time_send, time_grav, time_reduce; static long long numInter; static cudaPointer <Jparticle> jpbuf[MAX_GPU]; static cudaPointer <Iparticle> ipbuf[MAX_GPU]; static cudaPointer <Force[NJBLOCK]> fpart[MAX_GPU]; static cudaPointer <Force> ftot [MAX_GPU]; static cudaPointer <uint16[NJBLOCK][NNB_PER_BLOCK]> nbpart[MAX_GPU]; static cudaPointer <int> nblist [MAX_GPU]; static cudaPointer <int> nboff [MAX_GPU]; static int numCPU, numGPU; static int joff[MAX_GPU + 1]; static int nbody, nbodymax; static int devid[MAX_GPU]; static bool is_open = false; static bool devinit = false; void GPUNB_devinit(){ if(devinit) return; assert(NXREDUCE >= NJBLOCK); assert(NXREDUCE <= 32); cudaGetDeviceCount(&numGPU); assert(numGPU <= MAX_GPU); char *gpu_list = getenv("GPU_LIST"); if(gpu_list){ // get GPU list from environment variable numGPU = 0; char *p = strtok(gpu_list, " "); while(p){ devid[numGPU++] = atoi(p); p = strtok(NULL, " "); assert(numGPU <= MAX_GPU); } }else{ // use all GPUs for(int i=0; i<numGPU; i++){ devid[i] = i; } } // numGPU = 1; #pragma omp parallel { int tid = omp_get_thread_num(); if(tid == 0) numCPU = omp_get_num_threads(); } assert(numCPU <= MAX_CPU); assert(numGPU <= numCPU); #pragma omp parallel { int tid = omp_get_thread_num(); if(tid < numGPU){ cudaSetDevice(devid[tid]); } } #ifdef PROFILE fprintf(stderr, "***********************\n"); fprintf(stderr, "Initializing NBODY6/GPU library\n"); fprintf(stderr, "#CPU %d, #GPU %d\n", numCPU, numGPU); fprintf(stderr, " device:"); for(int i=0; i<numGPU; i++){ fprintf(stderr, " %d", devid[i]); } fprintf(stderr, "\n"); #if 1 for(int i=0; i<numGPU; i++){ cudaDeviceProp prop; cudaGetDeviceProperties(&prop, devid[i]); fprintf(stderr, " device %d: %s\n", devid[i], prop.name); } #endif fprintf(stderr, "***********************\n"); #endif devinit = true; } void GPUNB_open(int nbmax){ time_send = time_grav = time_reduce = 0.0; numInter = 0; nbodymax = nbmax; GPUNB_devinit(); if(is_open){ fprintf(stderr, "gpunb: it is already open\n"); return; } is_open = true; for(int id=0; id<numGPU + 1; id++){ joff[id] = (id * nbmax) / numGPU; } // omp_set_num_threads(numGPU); #pragma omp parallel { int tid = omp_get_thread_num(); if(tid < numGPU){ cudaSetDevice(devid[tid]); int nj = joff[tid+1] - joff[tid]; jpbuf [tid].allocate(nj + NTHREAD); ipbuf [tid].allocate(NIMAX); fpart [tid].allocate(NIMAX); ftot [tid].allocate(NIMAX); nbpart[tid].allocate(NIMAX); nblist[tid].allocate(NB_BUF_SIZE); // total ganged nblist nboff [tid].allocate(NIMAX+1); } } #ifdef PROFILE fprintf(stderr, "***********************\n"); fprintf(stderr, "Opened NBODY6/GPU library\n"); fprintf(stderr, "#CPU %d, #GPU %d\n", numCPU, numGPU); fprintf(stderr, " device:"); for(int i=0; i<numGPU; i++){ fprintf(stderr, " %d", devid[i]); } fprintf(stderr, "\n"); for(int i=0; i<numGPU+1; i++){ fprintf(stderr, " %d", joff[i]); } fprintf(stderr, "\n"); fprintf(stderr, "nbmax = %d\n", nbmax); fprintf(stderr, "***********************\n"); #endif } void GPUNB_close(){ if(!is_open){ fprintf(stderr, "gpunb: it is already close\n"); return; } is_open = false; // omp_set_num_threads(numGPU); #pragma omp parallel { int tid = omp_get_thread_num(); if(tid < numGPU){ jpbuf [tid].free(); ipbuf [tid].free(); fpart [tid].free(); ftot [tid].free(); nbpart[tid].free(); nblist[tid].free(); nboff [tid].free(); } } // omp_set_num_threads(numCPU); nbodymax = 0; #ifdef PROFILE fprintf(stderr, "***********************\n"); fprintf(stderr, "Closed NBODY6/GPU library\n"); fprintf(stderr, "time send : %f sec\n", time_send); fprintf(stderr, "time grav : %f sec\n", time_grav); fprintf(stderr, "time reduce : %f sec\n", time_reduce); fprintf(stderr, "time regtot : %f sec\n", time_send + time_grav + time_reduce); fprintf(stderr, "%f Gflops (gravity part only)\n", 60.e-9 * numInter / time_grav); fprintf(stderr, "***********************\n"); #endif } void GPUNB_send( int _nbody, double mj[], double xj[][3], double vj[][3]){ assert(is_open); nbody = _nbody; assert(nbody <= nbodymax); time_send -= get_wtime(); for(int id=0; id<numGPU + 1; id++){ joff[id] = (id * nbody) / numGPU; } #pragma omp parallel { int tid = omp_get_thread_num(); if(tid < numGPU){ int nj = joff[tid+1] - joff[tid]; for(int j=0; j<nj; j++){ int jj = j + joff[tid]; jpbuf[tid][j] = Jparticle(mj[jj], xj[jj], vj[jj]); } jpbuf[tid].htod(nj); } } time_send += get_wtime(); } void GPUNB_regf( int ni, double h2[], double dtr[], double xi[][3], double vi[][3], double acc[][3], double jrk[][3], double pot[], int lmax, int nnbmax, int *listbase){ assert(is_open); time_grav -= get_wtime(); numInter += ni * nbody; assert(0 < ni && ni <= NIMAX); // omp_set_num_threads(numGPU); #pragma omp parallel { int tid = omp_get_thread_num(); if(tid < numGPU){ // cudaSetDevice(device_id[tid]); for(int i=0; i<ni; i++){ ipbuf[tid][i] = Iparticle(h2[i], dtr[i], xi[i], vi[i]); } // set i-particles ipbuf[tid].htod(ni); // gravity kernel int niblock = 1 + (ni-1) / NTHREAD; dim3 grid(niblock, NJBLOCK, 1); dim3 threads(NTHREAD, 1, 1); int nj = joff[tid+1] - joff[tid]; gravity_kernel <<< grid, threads >>> (nj, ipbuf[tid], jpbuf[tid], fpart[tid], nbpart[tid]); // CUDA_SAFE_THREAD_SYNC(); #if 0 dim3 rgrid(niblock, 1, 1); reduce_kernel <<< rgrid, threads >>> (nj, joff[tid], fpart[tid], nbpart[tid], ftot[tid], nbtot[tid]); #else const int ni8 = 1 + (ni-1) / NYREDUCE; dim3 rgrid (ni8, 1, 1); dim3 rthreads(NXREDUCE, NYREDUCE, 1); force_reduce_kernel <<< rgrid, rthreads >>> (ni, fpart[tid], ftot[tid]); #endif // CUDA_SAFE_THREAD_SYNC(); ftot [tid].dtoh(ni); // now make prefix sum int nbsum = 0; for(int i=0; i<ni; i++){ nboff[tid][i] = nbsum; const int nnb = ftot[tid][i].nnb; // assert(nnb >= 0); if(nnb >= 0) nbsum += nnb; } assert(nbsum <= NB_BUF_SIZE); nboff[tid].htod(ni); // debugging // for(int k=0; k<nbsum; k++) nblist[tid][k] = -1; // nblist[tid].htod(nbsum); gather_nb_kernel <<< rgrid, rthreads>>> (ni, nj, joff[tid], fpart[tid], ftot[tid], nboff[tid], nbpart[tid], nblist[tid]); // CUDA_SAFE_THREAD_SYNC(); nblist[tid].dtoh(nbsum); } } const double wt = get_wtime(); time_grav += wt; time_reduce -= wt; // reduction phase // omp_set_num_threads(numCPU); #pragma omp parallel for for(int i=0; i<ni; i++){ double ax=0.0, ay=0.0, az=0.0; double jx=0.0, jy=0.0, jz=0.0; double po=0.0; for(int id=0; id<numGPU; id++){ Force &fo = ftot[id][i]; ax += fo.acc.x; ay += fo.acc.y; az += fo.acc.z; jx += fo.jrk.x; jy += fo.jrk.y; jz += fo.jrk.z; po += fo.pot; } acc[i][0] = ax; acc[i][1] = ay; acc[i][2] = az; jrk[i][0] = jx; jrk[i][1] = jy; jrk[i][2] = jz; pot[i] = po; } #pragma omp parallel for for(int i=0; i<ni; i++){ bool overflow = false; int *nnbp = listbase + lmax * i; int *nblistp = nnbp + 1; int nnb = 0; for(int id=0; id<numGPU; id++){ const int nnb_part = ftot[id][i].nnb; if(nnb_part < 0){ overflow = true; fprintf(stderr, "!!!overflow : i=%d, id=%d, nnb_part=%d\n", i, id, nnb_part); } // assert(!overflow); nnb += nnb_part; if(nnb > nnbmax){ overflow = true; fprintf(stderr, "!!!overflow : i=%d, id=%d, nnb_tot =%d, nnbmax=%d\n", i, id, nnb, nnbmax); } // assert(!overflow); if(!overflow){ const int off = nboff[id][i]; for(int k=0; k<nnb_part; k++){ *nblistp++ = nblist[id][off + k]; } } } if(overflow){ // *nnbp = -1; *nnbp = nnb ? -abs(nnb) : -9999; }else{ *nnbp = nnb; } } time_reduce += get_wtime(); } extern "C" { void gpunb_devinit_(){ GPUNB_devinit(); } void gpunb_open_(int *nbmax){ GPUNB_open(*nbmax); } void gpunb_close_(){ GPUNB_close(); } void gpunb_send_( int *nj, double mj[], double xj[][3], double vj[][3]){ GPUNB_send(*nj, mj, xj, vj); } void gpunb_regf_( int *ni, double h2[], double dtr[], double xi[][3], double vi[][3], double acc[][3], double jrk[][3], double pot[], int *lmax, int *nbmax, int *list){ // list[][lmax] GPUNB_regf(*ni, h2, dtr, xi, vi, acc, jrk, pot, *lmax, *nbmax, list); } }
8ea581776952d50f9943b50803ae20f9bdd933d8.hip
// !!! This is a file automatically generated by hipify!!! // Include files #include <ctime> #include "hip/hip_runtime.h" #include "math.h" #include <stdio.h> #include <stdlib.h> #include <device_launch_parameters.h> #include <hip/device_functions.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <time.h> #include<string.h> // Parameters #define N_ATOMS 343 #define MASS_ATOM 1.0f #define time_step 0.01f #define L 10.5f #define T 0.728f #define NUM_STEPS 10000 const int BLOCK_SIZE = 1024; //const int L = ; const int scheme = 1; // 0 for explicit, 1 for implicit /*************************************************************************************************************/ /************* INITIALIZATION CODE **********/ /*************************************************************************************************************/ __global__ void init_r(float* r, int N_cube){ int ix = threadIdx.x + blockDim.x* blockIdx.x; int iy = threadIdx.y + blockDim.y* blockIdx.y; int iz = threadIdx.z + blockDim.z* blockIdx.z; int index = ix + iy*N_cube + iz * N_cube * N_cube; if (ix < N_cube && iy < N_cube && iz<N_cube && index < N_ATOMS){ r[index] = L / 2.0 * (1.0 - float(2 * ix + 1) / N_cube); r[index + N_ATOMS] = L / 2.0 * (1.0 - float(2 * iy + 1) / N_cube); r[index + 2 * N_ATOMS] = L / 2.0 * (1.0 - float(2 * iz + 1) / N_cube); } } void init_vel(float* vel){ srand(time(NULL)); float netP[3] = { 0.0f, 0.0f, 0.0f }; float netE = 0.0; int i; float a, b, c; for (i = 0; i < N_ATOMS; i++){ a = ((float)rand() / RAND_MAX) - 0.5; b = ((float)rand() / RAND_MAX) - 0.5; c = ((float)rand() / RAND_MAX) - 0.5; vel[i] = a; vel[i + N_ATOMS] = b; vel[i + 2 * N_ATOMS] = c; netE += a*a + b*b + c*c; netP[0] += a; netP[1] += b; netP[2] += c; } netP[0] *= 1.0f / N_ATOMS; netP[1] *= 1.0f / N_ATOMS; netP[2] *= 1.0f / N_ATOMS; float vscale = sqrtf(3 * N_ATOMS*T / netE); for (i = 0; i < N_ATOMS; i++){ vel[i] = (vel[i] - netP[0])*vscale; vel[i + N_ATOMS] = (vel[i + N_ATOMS] - netP[1])*vscale; vel[i + 2 * N_ATOMS] = (vel[i + 2 * N_ATOMS] - netP[2])*vscale; } //netP[0] = 0.0f; //netP[1] = 0.0f; //netP[2] = 0.0f; //for (i = 0; i < N_ATOMS; i++){ //netP[0] += vel[i]; //netP[1] += vel[i + N_ATOMS]; //netP[2] += vel[i + 2 * N_ATOMS]; //} //printf("netP in x %f \n", netP[0]); //printf("netP in y %f \n", netP[1]); //printf("netP in z %f \n", netP[2]); } /*************************************************************************************************************/ /************* COMPUTATION KERNELS **********/ /*************************************************************************************************************/ __global__ void makePairs(float* positions, float* pairpos){ __shared__ float pos[N_ATOMS * 3]; float del; int iatom1, iatom2, i; int tx = threadIdx.x; int n = 0, breachindex = -1; int index = tx + blockDim.x*blockIdx.x; i = tx; while (i < N_ATOMS * 3){ pos[i] = positions[i]; i += blockDim.x; } __syncthreads(); if (index < N_ATOMS*N_ATOMS){ iatom1 = int(index / N_ATOMS); iatom2 = index - iatom1*N_ATOMS; #pragma unroll for (i = 0; i < 3; i++){ del = pos[iatom1 + i*N_ATOMS] - pos[iatom2 + i*N_ATOMS]; if (fabs(del) > L / 2.0f) { del += (2 * (del<0) - 1)*L; } if (fabs(del)> L / 2.0f){ n = 1 + i; breachindex = index; } pairpos[index + i*N_ATOMS*N_ATOMS] = del; } } __syncthreads(); //if (index == breachindex) { printf("Number of breaches: %i\n", n); } //if (index < N_ATOMS*N_ATOMS){ // del = sqrtf(__powf(pairpos[index], 2) + __powf(pairpos[index + N_ATOMS*N_ATOMS], 2) + __powf(pairpos[index + 2 * N_ATOMS*N_ATOMS], 2)); // if (del <1.0f && del != 0.0f){ // printf("distance breached at index %i\tatom 1: %i\tatom 2 : %i\tdistance: %f (%f, %f, %f)\n", // index, iatom1, iatom2, del, pairpos[index], pairpos[index + N_ATOMS*N_ATOMS], pairpos[index + 2* N_ATOMS*N_ATOMS]); // index = iatom2*N_ATOMS + iatom1; // printf("Opposite distance at index %i: (%f, %f, %f)\n", index, pairpos[index], pairpos[index + N_ATOMS*N_ATOMS], pairpos[index + 2 * N_ATOMS*N_ATOMS]); // } //} } __device__ float PutInBox(float r){ if (fabs(r) > L / 2.0) r += (2 * (r < 0) - 1)*ceil((fabs(r) - L / 2.0f) / L)*L; return r; } __global__ void potForce(float * PairWise, int N, float * PotOut, float * ForceOut) { /* PairWise - PairWise distances between atoms passed from global N - # atoms RowSize - # PairWise distances per block RowCumSize - # nonzero RowSize array elements = # blocks launched in parallel PotOut - Store the output Potential in global memory ForceOut - Store the output Force in global memory along x, 1D array size N*N */ int bx = blockIdx.x; int tx = threadIdx.x; //Register variables to store pairwise separation float delx; float dely; float delz; float delr2, delrm6; float Potential; float Forcex; float Forcey; float Forcez; int row = tx + bx*BLOCK_SIZE; //if (row == 0) printf("I'm in 1! \n"); if (row < N*N) { delx = PairWise[row]; dely = PairWise[row + N*N]; delz = PairWise[row + N*N * 2]; delr2 = delx*delx + dely*dely + delz*delz; delrm6 = __powf(delr2, (float)-3); if (delr2 == 0.0) { Potential = 0; Forcex = 0; Forcey = 0; Forcez = 0; } else{ Potential = 4 * __fadd_rn(delrm6*delrm6, -1 * delrm6); Forcex = -(delx / delr2) * 24 * __fadd_rn(2 * delrm6*delrm6, -1 * delrm6); Forcey = -(dely / delr2) * 24 * __fadd_rn(2 * delrm6*delrm6, -1 * delrm6); Forcez = -(delz / delr2) * 24 * __fadd_rn(2 * delrm6*delrm6, -1 * delrm6); } PotOut[row] = Potential; ForceOut[row] = Forcex; ForceOut[row + N*N] = Forcey; ForceOut[row + N*N * 2] = Forcez; } } /*************************************************************************************************************/ /************* EXPLICIT KERNEL KINEMATICS **********/ /*************************************************************************************************************/ __global__ void kinematics(float* positions, float* force, float* vel, int len){ int tx = threadIdx.x; int bx = blockIdx.x; int index = bx*blockDim.x + tx; float tempr; //if (index == 0){ printf("You have been trolled! \n"); } if (index < len){ tempr = positions[index] + 0.5f * force[index] / MASS_ATOM * time_step*time_step + vel[index] * time_step; positions[index] = PutInBox(tempr); vel[index] += force[index] / MASS_ATOM * time_step; } } /*************************************************************************************************************/ /************* IMPLICIT KERNEL KINEMATICS **********/ /*************************************************************************************************************/ __global__ void kinematics_phase1(float* positions, float* force, float* vel, int len){ int tx = threadIdx.x; int bx = blockIdx.x; int index = bx*blockDim.x + tx; float tempr, tempa, tempvel; //if (index == 0){ printf("You have been trolled! \n"); } if (index < len){ tempa = force[index] / MASS_ATOM; tempvel = vel[index]; tempr = positions[index] + 0.5f * tempa * time_step*time_step + tempvel * time_step; positions[index] = PutInBox(tempr); vel[index] = tempvel + 0.5*tempa*time_step; } } __global__ void kinematics_phase2(float* force, float* vel, int len){ int tx = threadIdx.x; int bx = blockIdx.x; int index = bx*blockDim.x + tx; //if (index == 0){ printf("You have been trolled! \n"); } if (index < len){ vel[index] += 0.5 * force[index] / MASS_ATOM * time_step; } } /*************************************************************************************************************/ /************* REDUCTION KERNELS **********/ /*************************************************************************************************************/ __global__ void total(float *input, float *output, int len) { //@@ Load a segment of the input vector into shared memory //@@ Traverse the reduction tree //@@ Write the computed sum of the block to the output vector at the //@@ correct index __shared__ float partSum[2 * BLOCK_SIZE]; unsigned int tx = threadIdx.x; unsigned int start = 2 * blockIdx.x * BLOCK_SIZE; //Loading input floats to shared memory //Take care of the boundary conditions if (start + tx < len){ partSum[tx] = input[start + tx]; if (start + BLOCK_SIZE + tx <len) partSum[BLOCK_SIZE + tx] = input[start + BLOCK_SIZE + tx]; else partSum[BLOCK_SIZE + tx] = 0; } else{ partSum[tx] = 0; partSum[BLOCK_SIZE + tx] = 0; } unsigned int stride; for (stride = BLOCK_SIZE; stride > 0; stride = stride / 2){ __syncthreads(); if (tx < stride) partSum[tx] += partSum[tx + stride]; } if (tx == 0){ output[blockIdx.x] = partSum[0]; } } __global__ void forcered_simple(float * force, float * forcered){ int index = threadIdx.x + blockDim.x*blockIdx.x; int i = 0; int findex; __shared__ float forcered_sh[3 * N_ATOMS]; //if (index == 0){ printf("In force reduction kernel! \n"); } if (index < 3 * N_ATOMS){ forcered_sh[index] = 0.0f; } __syncthreads(); if (index < 3 * N_ATOMS){ findex = int(index / N_ATOMS)*N_ATOMS*N_ATOMS + index % N_ATOMS; for (i = 0; i < N_ATOMS; i++){ forcered_sh[index] += force[findex + i*N_ATOMS]; } } __syncthreads(); if (index < 3 * N_ATOMS){ forcered[index] = forcered_sh[index]; } /*if (index == 0){ printf("forcered [0]= %f \n", forcered[0]); printf("forcered [2]= %f \n", forcered[2]); printf("forcered [4]= %f \n \n", forcered[4]); }*/ } //__global__ void newForceReduction(float *input, float *output, int startunit, int len) //{ // unsigned int tx = threadIdx.x; // unsigned int start = blockIdx.x *N_ATOMS; // // __shared__ float partSum[BLOCK_SIZE1]; // // if (tx == 0) printf("Length of the shared memory array - %i \n", N_ATOMS); // // //Loading input floats to shared memory // //Take care of the boundary conditions // partSum[tx] = input[start + tx] + input[start + tx + BLOCK_SIZE1]; // if (tx == 0){ // if (N_ATOMS%2) partSum[0] += input[start + N_ATOMS - 1]; // } // __syncthreads(); // // //Reduction Kernel for each dimension // unsigned int stride, stride1 = BLOCK_SIZE1; // for (stride = BLOCK_SIZE1/2; stride > 0; stride = stride / 2){ // if (tx < stride) { partSum[tx] += partSum[tx + stride]; // if (tx==0){ if (stride1%2) partSum[0] += partSum[stride1-1];} // } // __syncthreads(); // stride1 = stride; // } // // if (tx == 0){ // output[blockIdx.x] = -partSum[0]; // } //} __global__ void newForceReduction(float *input, float *output, int startunit, int len) { unsigned int tx = threadIdx.x; unsigned int start = blockIdx.x *N_ATOMS; __shared__ float partSum[BLOCK_SIZE]; // if (tx == 0) printf("Length of the shared memory array - %i \n", N_ATOMS); //Loading input floats to shared memory //Take care of the boundary conditions if (tx < N_ATOMS) { partSum[tx] = input[start + tx]; } else{ partSum[tx] = 0.0f; } __syncthreads(); //Reduction Kernel for each dimension if (tx < 512){ partSum[tx] += partSum[tx + 512]; } __syncthreads(); if (tx < 256){ partSum[tx] += partSum[tx + 256]; } __syncthreads(); if (tx < 128){ partSum[tx] += partSum[tx + 128]; } __syncthreads(); if (tx < 64){ partSum[tx] += partSum[tx + 64]; } __syncthreads(); if (tx < 32){ partSum[tx] += partSum[tx + 32]; partSum[tx] += partSum[tx + 16]; partSum[tx] += partSum[tx + 8]; partSum[tx] += partSum[tx + 4]; partSum[tx] += partSum[tx + 2]; partSum[tx] += partSum[tx + 1]; } if (tx == 0){ output[blockIdx.x] = -partSum[0]; } } /*************************************************************************************************************/ /************* HOST CODE GENERATING DATA **********/ /*************************************************************************************************************/ void create_csv(char *filename, float *data, int Row, int Column){ printf("\n Creating %s.csv file", filename); FILE *fp; int i, j; filename = strcat(filename, ".csv"); fp = fopen(filename, "w+"); //fprintf(fp,"Student Id, Physics, Chemistry, Maths"); for (i = 0; i<Row; i++){ //fprintf(fp,"%d",i+1); for (j = 0; j<Column; j++) //Assuming Row major way of storage fprintf(fp, ",%f", *(data + i*Column + j)); fprintf(fp, "\n"); } fclose(fp); printf("\n %sfile created \n", filename); } void create_dump(char *filename, float *data, int steps, int nparticles){ printf("\n Creating %s.txt file", filename); FILE *fp; int i, j; filename = strcat(filename, ".txt"); fp = fopen(filename, "w+"); int stride; //fprintf(fp,"Student Id, Physics, Chemistry, Maths"); /*ITEM: TIMESTEP 0 ITEM: NUMBER OF ATOMS 130 ITEM: BOX BOUNDS ff pp pp 0 30 0 30 -0.5 0.5 ITEM: ATOMS id type x y z ix iy iz*/ for (i = 0; i<steps; i++){ fprintf(fp, "ITEM: TIMESTEP \n%i\nITEM: NUMBER OF ATOMS\n%i\nITEM: BOX BOUNDS\n%f %f\n%f %f\n%f %f\nITEM: ATOMS id type x y z ix iy iz\n", i, N_ATOMS, -L / 2.0f, L / 2.0f, -L / 2.0f, L / 2.0f, -L / 2.0f, L / 2.0f); stride = i * 3 * N_ATOMS; for (j = 0; j<nparticles; j++) fprintf(fp, "%i %i %f %f %f 0 0 0\n", j + 1, j + 1, data[stride + j], data[stride + j + N_ATOMS], data[stride + j + 2 * N_ATOMS]); } fclose(fp); printf("\n %sfile created \n", filename); } /*************************************************************************************************************/ /************* MAIN FUNCTION **********/ /*************************************************************************************************************/ int main(){ float * d_PotOut; float * d_ForceOut; float * d_PotRedOut; float * d_ForceOutRed; float * PotRedOut; float * ForceOut; float * PotOut; float * ForceOutRed; float* d_r; float* r; float* vel; float* d_vel; float* d_pairpos; float* pairpos; float* rpairpos; char str[100]; float PEtrace[NUM_STEPS]; float* R; //Output for OVITO const int size = sizeof(float) * N_ATOMS * 3; clock_t start, diff; hipError_t state; // Declare space for storing positions and velocities on RAM r = (float *)malloc(size); vel = (float *)malloc(size); pairpos = (float *)malloc(size*N_ATOMS); rpairpos = (float *)malloc(size*N_ATOMS); ForceOut = (float *)malloc(size*N_ATOMS); PotOut = (float *)malloc(size*N_ATOMS / 3); PotRedOut = (float *)malloc(sizeof(float)*ceil(float(N_ATOMS * N_ATOMS) / BLOCK_SIZE)); //sizeof (float) needed to properly allocate memory in potredout ForceOutRed = (float *)malloc(size); R = (float *)malloc(size*NUM_STEPS); //put for OVITO // Declare space for storing positions and velocities on GPU DRAM hipMalloc((void **)&d_r, size); hipMalloc((void **)&d_vel, size); hipMalloc((void **)&d_PotOut, size*N_ATOMS / 3); hipMalloc((void **)&d_ForceOut, size*N_ATOMS); hipMalloc((void **)&d_PotRedOut, sizeof(float)*ceil(float(N_ATOMS * N_ATOMS) / BLOCK_SIZE)); hipMalloc((void **)&d_ForceOutRed, size); hipMalloc((void **)&d_pairpos, size*N_ATOMS); int N_cube = int(cbrt(float(N_ATOMS))); int gd = int(ceil(double(N_ATOMS) / N_cube)); dim3 gridSize(1, 1, 1); dim3 blockSize(N_cube, N_cube, N_cube); init_r << <gridSize, blockSize >> >(d_r, N_cube); // check r //hipMemcpy(r, d_r, size, hipMemcpyDeviceToHost); //for (int ii = 0; ii < N_ATOMS * 3; ii++){ //printf("%f \t", r[ii]); //if ((ii + 1) % N_ATOMS == 0) //printf("\n"); //} //printf("\n All r printed \n"); //printf("Generation of r succeeded! \n"); //char str[100]; //printf("\n Enter the filename :"); //gets(str); //create_csv(str, r , 3, N_ATOMS); // Initialise velocity init_vel(vel); state = hipDeviceSynchronize(); if (state != hipSuccess){ printf("Init_r did not succeed: %s \n", hipGetErrorString(state)); } // Check velocity //for (int ii = 0; ii < N_ATOMS*3; ii++){ //printf("%f \t", vel[ii]); //if ((ii+1) % N_ATOMS == 0) //printf("\n"); //} //printf("\n All vel printed \n"); // Copy velocity data to GPU hipMemcpy(d_vel, vel, size, hipMemcpyHostToDevice); for (int t = 0; t<NUM_STEPS; t++){ // Create pairs if ((t == 0 && scheme == 1) || (scheme == 0)){ makePairs << <ceil(N_ATOMS*N_ATOMS / float(BLOCK_SIZE)), BLOCK_SIZE >> >(d_r, d_pairpos); state = hipDeviceSynchronize(); if (state != hipSuccess){ printf("make pairs did not succeed: %s \n", hipGetErrorString(state)); } } // printing r again when looping //hipMemcpy(r, d_r, size, hipMemcpyDeviceToHost); //printf("\n Enter the filename :"); //gets(str); //create_csv(str, r, 3, N_ATOMS); // print pair distances //state = hipMemcpy(pairpos, d_pairpos, size*N_ATOMS, hipMemcpyDeviceToHost); //if (state != hipSuccess){ // printf("make pairs memcpy did not succeed: %s \n", hipGetErrorString(state)); //} //float distance; //for (int i = 0; i < N_ATOMS*N_ATOMS; i++){ // //rpairpos[i] = pairpos[i]; // distance = sqrtf(pairpos[i] * pairpos[i] // + pairpos[i + N_ATOMS*N_ATOMS] * pairpos[i + N_ATOMS*N_ATOMS] // + pairpos[i + 2 * N_ATOMS*N_ATOMS] * pairpos[i + 2 * N_ATOMS*N_ATOMS]); // printf("%f \t", distance); // if ((i + 1) % N_ATOMS == 0) printf("\n"); //} //printf("\n All pair r printed \n"); //printf("\n Enter the filename :"); //gets(str); //create_csv(str, pairpos, N_ATOMS*3, N_ATOMS); int gridDim = ceil((N_ATOMS*N_ATOMS) / (float)BLOCK_SIZE); potForce << <gridDim, BLOCK_SIZE >> >(d_pairpos, N_ATOMS, d_PotOut, d_ForceOut); hipDeviceSynchronize(); // Check potential and force //hipMemcpy(PotOut, d_PotOut, size*N_ATOMS / 3, hipMemcpyDeviceToHost); //hipMemcpy(ForceOut, d_ForceOut, size*N_ATOMS, hipMemcpyDeviceToHost); //printf("\n Now printing non-reduced force array \n"); //for (int ii = 0; ii < N_ATOMS*N_ATOMS * 3; ii++){ // printf("%f \t", ForceOut[ii]); // if ((ii + 1) % N_ATOMS == 0) // printf("\n"); //} //printf("\n Enter the filename :"); //gets(str); //create_csv(str, ForceOut , 3*N_ATOMS, N_ATOMS); total << < gridDim, BLOCK_SIZE >> >(d_PotOut, d_PotRedOut, N_ATOMS*N_ATOMS); forcered_simple << <ceil(3 * N_ATOMS / float(BLOCK_SIZE)), BLOCK_SIZE >> >(d_ForceOut, d_ForceOutRed); //newForceReduction << <3 * N_ATOMS, BLOCK_SIZE1 >> >(d_ForceOut, d_ForceOutRed, N_ATOMS, 3 * N_ATOMS*N_ATOMS); hipDeviceSynchronize(); int len = int(ceil(N_ATOMS*N_ATOMS / float(BLOCK_SIZE))); state = hipMemcpyAsync(PotRedOut, d_PotRedOut, len*sizeof(float), hipMemcpyDeviceToHost); if (state != hipSuccess) printf("Reduction of potential memcpy failed %s \n", hipGetErrorString(state)); for (int i = 1; i < len; i++){ PotRedOut[0] += PotRedOut[i]; } //printf("Potential energy is %f\n", PotRedOut[0]/2.0f); PEtrace[t] = PotRedOut[0] / 2.0; //if (PotRedOut[0]>0.0){ printf("Simulation crashed!"); break; } if (t % 1000 == 0){ printf("Potential energy at iteration [%i] is %1.10f \n", t, PEtrace[t]); } //check force reduction //state = hipMemcpy(ForceOutRed, d_ForceOutRed, size, hipMemcpyDeviceToHost); //if (state != hipSuccess) // printf("Force memcpy failed: %s \n", hipGetErrorString(state)); //printf("Now printing reduced force array \n"); //for (int ii = 0; ii < N_ATOMS * 3; ii++){ // printf("%f \t", ForceOutRed[ii]); // if ((ii + 1) % N_ATOMS == 0) // printf("\n"); //} //char str[100]; //printf("\n Enter the filename :"); //gets(str); //create_csv(str, ForceOutRed , 3, N_ATOMS); //Explicit scheme if (scheme == 0){ kinematics << < ceil((N_ATOMS * 3) / (float)BLOCK_SIZE), BLOCK_SIZE >> >(d_r, d_ForceOutRed, d_vel, 3 * N_ATOMS); hipDeviceSynchronize(); //state = hipMemcpy(r, d_r, size, hipMemcpyDeviceToHost); //if (state != hipSuccess) // printf("r memcpy failed: %s \n", hipGetErrorString(state)); //printf("Now printing r after iteration %i \n", t); //for (int ii = 0; ii <3 * N_ATOMS; ii++){ // printf("%f \t", r[ii]); // if ((ii + 1) % N_ATOMS == 0) // printf("\n"); //} } //Implicit scheme else{ kinematics_phase1 << <ceil((N_ATOMS * 3) / (float)BLOCK_SIZE), BLOCK_SIZE >> >(d_r, d_ForceOutRed, d_vel, 3 * N_ATOMS); makePairs << <ceil(N_ATOMS*N_ATOMS / float(BLOCK_SIZE)), BLOCK_SIZE >> >(d_r, d_pairpos); hipDeviceSynchronize(); potForce << <gridDim, BLOCK_SIZE >> >(d_pairpos, N_ATOMS, d_PotOut, d_ForceOut); hipDeviceSynchronize(); forcered_simple << <ceil(3 * N_ATOMS / float(BLOCK_SIZE)), BLOCK_SIZE >> >(d_ForceOut, d_ForceOutRed); //newForceReduction << <3 * N_ATOMS, BLOCK_SIZE >> >(d_ForceOut, d_ForceOutRed, N_ATOMS, 3 * N_ATOMS*N_ATOMS); state = hipDeviceSynchronize(); if (state != hipSuccess) printf("Force kernel failed: %s \n", hipGetErrorString(state)); kinematics_phase2 << < ceil((N_ATOMS * 3) / (float)BLOCK_SIZE), BLOCK_SIZE >> >(d_ForceOutRed, d_vel, 3 * N_ATOMS); hipDeviceSynchronize(); } hipMemcpyAsync(&R[0 + t * 3 * N_ATOMS], d_r, size, hipMemcpyDeviceToHost); } // } for time integration for loop*/ //printf("Enter name for creating positions dump: "); //gets(str); //create_dump(str, R, NUM_STEPS, N_ATOMS); // Print final pair distances //state = hipMemcpy(pairpos, d_pairpos, size*N_ATOMS, hipMemcpyDeviceToHost); //if (state != hipSuccess){ // printf("make pairs memcpy did not succeed: %s \n", hipGetErrorString(state)); //} //float distance; //for (int i = 0; i < N_ATOMS*N_ATOMS; i++){ // //rpairpos[i] = pairpos[i]; // distance = sqrtf(pairpos[i] * pairpos[i] // + pairpos[i + N_ATOMS*N_ATOMS] * pairpos[i + N_ATOMS*N_ATOMS] // + pairpos[i + 2 * N_ATOMS*N_ATOMS] * pairpos[i + 2 * N_ATOMS*N_ATOMS]); // printf("%f \t", distance); // if ((i + 1) % N_ATOMS == 0) printf("\n"); //} //printf("\n All pair r printed \n"); //printf("\n Enter the filename :"); //gets(str); //create_csv(str, PEtrace, 1, NUM_STEPS); //printf("\n Enter the filename :"); //gets(str); //create_csv(str, r , 3, N_ATOMS); // Free up host memory free(r); free(vel); free(pairpos); free(rpairpos); //free(PEtrace); // Free up device memory hipFree(d_ForceOut); hipFree(d_ForceOutRed); hipFree(d_pairpos); hipFree(d_PotOut); hipFree(d_PotRedOut); hipFree(d_r); hipFree(d_vel); return 0; }
8ea581776952d50f9943b50803ae20f9bdd933d8.cu
// Include files #include <ctime> #include "cuda_runtime.h" #include "math.h" #include <stdio.h> #include <stdlib.h> #include <device_launch_parameters.h> #include <device_functions.h> #include <curand.h> #include <curand_kernel.h> #include <time.h> #include<string.h> // Parameters #define N_ATOMS 343 #define MASS_ATOM 1.0f #define time_step 0.01f #define L 10.5f #define T 0.728f #define NUM_STEPS 10000 const int BLOCK_SIZE = 1024; //const int L = ; const int scheme = 1; // 0 for explicit, 1 for implicit /*************************************************************************************************************/ /************* INITIALIZATION CODE **********/ /*************************************************************************************************************/ __global__ void init_r(float* r, int N_cube){ int ix = threadIdx.x + blockDim.x* blockIdx.x; int iy = threadIdx.y + blockDim.y* blockIdx.y; int iz = threadIdx.z + blockDim.z* blockIdx.z; int index = ix + iy*N_cube + iz * N_cube * N_cube; if (ix < N_cube && iy < N_cube && iz<N_cube && index < N_ATOMS){ r[index] = L / 2.0 * (1.0 - float(2 * ix + 1) / N_cube); r[index + N_ATOMS] = L / 2.0 * (1.0 - float(2 * iy + 1) / N_cube); r[index + 2 * N_ATOMS] = L / 2.0 * (1.0 - float(2 * iz + 1) / N_cube); } } void init_vel(float* vel){ srand(time(NULL)); float netP[3] = { 0.0f, 0.0f, 0.0f }; float netE = 0.0; int i; float a, b, c; for (i = 0; i < N_ATOMS; i++){ a = ((float)rand() / RAND_MAX) - 0.5; b = ((float)rand() / RAND_MAX) - 0.5; c = ((float)rand() / RAND_MAX) - 0.5; vel[i] = a; vel[i + N_ATOMS] = b; vel[i + 2 * N_ATOMS] = c; netE += a*a + b*b + c*c; netP[0] += a; netP[1] += b; netP[2] += c; } netP[0] *= 1.0f / N_ATOMS; netP[1] *= 1.0f / N_ATOMS; netP[2] *= 1.0f / N_ATOMS; float vscale = sqrtf(3 * N_ATOMS*T / netE); for (i = 0; i < N_ATOMS; i++){ vel[i] = (vel[i] - netP[0])*vscale; vel[i + N_ATOMS] = (vel[i + N_ATOMS] - netP[1])*vscale; vel[i + 2 * N_ATOMS] = (vel[i + 2 * N_ATOMS] - netP[2])*vscale; } //netP[0] = 0.0f; //netP[1] = 0.0f; //netP[2] = 0.0f; //for (i = 0; i < N_ATOMS; i++){ //netP[0] += vel[i]; //netP[1] += vel[i + N_ATOMS]; //netP[2] += vel[i + 2 * N_ATOMS]; //} //printf("netP in x %f \n", netP[0]); //printf("netP in y %f \n", netP[1]); //printf("netP in z %f \n", netP[2]); } /*************************************************************************************************************/ /************* COMPUTATION KERNELS **********/ /*************************************************************************************************************/ __global__ void makePairs(float* positions, float* pairpos){ __shared__ float pos[N_ATOMS * 3]; float del; int iatom1, iatom2, i; int tx = threadIdx.x; int n = 0, breachindex = -1; int index = tx + blockDim.x*blockIdx.x; i = tx; while (i < N_ATOMS * 3){ pos[i] = positions[i]; i += blockDim.x; } __syncthreads(); if (index < N_ATOMS*N_ATOMS){ iatom1 = int(index / N_ATOMS); iatom2 = index - iatom1*N_ATOMS; #pragma unroll for (i = 0; i < 3; i++){ del = pos[iatom1 + i*N_ATOMS] - pos[iatom2 + i*N_ATOMS]; if (fabs(del) > L / 2.0f) { del += (2 * (del<0) - 1)*L; } if (fabs(del)> L / 2.0f){ n = 1 + i; breachindex = index; } pairpos[index + i*N_ATOMS*N_ATOMS] = del; } } __syncthreads(); //if (index == breachindex) { printf("Number of breaches: %i\n", n); } //if (index < N_ATOMS*N_ATOMS){ // del = sqrtf(__powf(pairpos[index], 2) + __powf(pairpos[index + N_ATOMS*N_ATOMS], 2) + __powf(pairpos[index + 2 * N_ATOMS*N_ATOMS], 2)); // if (del <1.0f && del != 0.0f){ // printf("distance breached at index %i\tatom 1: %i\tatom 2 : %i\tdistance: %f (%f, %f, %f)\n", // index, iatom1, iatom2, del, pairpos[index], pairpos[index + N_ATOMS*N_ATOMS], pairpos[index + 2* N_ATOMS*N_ATOMS]); // index = iatom2*N_ATOMS + iatom1; // printf("Opposite distance at index %i: (%f, %f, %f)\n", index, pairpos[index], pairpos[index + N_ATOMS*N_ATOMS], pairpos[index + 2 * N_ATOMS*N_ATOMS]); // } //} } __device__ float PutInBox(float r){ if (fabs(r) > L / 2.0) r += (2 * (r < 0) - 1)*ceil((fabs(r) - L / 2.0f) / L)*L; return r; } __global__ void potForce(float * PairWise, int N, float * PotOut, float * ForceOut) { /* PairWise - PairWise distances between atoms passed from global N - # atoms RowSize - # PairWise distances per block RowCumSize - # nonzero RowSize array elements = # blocks launched in parallel PotOut - Store the output Potential in global memory ForceOut - Store the output Force in global memory along x, 1D array size N*N */ int bx = blockIdx.x; int tx = threadIdx.x; //Register variables to store pairwise separation float delx; float dely; float delz; float delr2, delrm6; float Potential; float Forcex; float Forcey; float Forcez; int row = tx + bx*BLOCK_SIZE; //if (row == 0) printf("I'm in 1! \n"); if (row < N*N) { delx = PairWise[row]; dely = PairWise[row + N*N]; delz = PairWise[row + N*N * 2]; delr2 = delx*delx + dely*dely + delz*delz; delrm6 = __powf(delr2, (float)-3); if (delr2 == 0.0) { Potential = 0; Forcex = 0; Forcey = 0; Forcez = 0; } else{ Potential = 4 * __fadd_rn(delrm6*delrm6, -1 * delrm6); Forcex = -(delx / delr2) * 24 * __fadd_rn(2 * delrm6*delrm6, -1 * delrm6); Forcey = -(dely / delr2) * 24 * __fadd_rn(2 * delrm6*delrm6, -1 * delrm6); Forcez = -(delz / delr2) * 24 * __fadd_rn(2 * delrm6*delrm6, -1 * delrm6); } PotOut[row] = Potential; ForceOut[row] = Forcex; ForceOut[row + N*N] = Forcey; ForceOut[row + N*N * 2] = Forcez; } } /*************************************************************************************************************/ /************* EXPLICIT KERNEL KINEMATICS **********/ /*************************************************************************************************************/ __global__ void kinematics(float* positions, float* force, float* vel, int len){ int tx = threadIdx.x; int bx = blockIdx.x; int index = bx*blockDim.x + tx; float tempr; //if (index == 0){ printf("You have been trolled! \n"); } if (index < len){ tempr = positions[index] + 0.5f * force[index] / MASS_ATOM * time_step*time_step + vel[index] * time_step; positions[index] = PutInBox(tempr); vel[index] += force[index] / MASS_ATOM * time_step; } } /*************************************************************************************************************/ /************* IMPLICIT KERNEL KINEMATICS **********/ /*************************************************************************************************************/ __global__ void kinematics_phase1(float* positions, float* force, float* vel, int len){ int tx = threadIdx.x; int bx = blockIdx.x; int index = bx*blockDim.x + tx; float tempr, tempa, tempvel; //if (index == 0){ printf("You have been trolled! \n"); } if (index < len){ tempa = force[index] / MASS_ATOM; tempvel = vel[index]; tempr = positions[index] + 0.5f * tempa * time_step*time_step + tempvel * time_step; positions[index] = PutInBox(tempr); vel[index] = tempvel + 0.5*tempa*time_step; } } __global__ void kinematics_phase2(float* force, float* vel, int len){ int tx = threadIdx.x; int bx = blockIdx.x; int index = bx*blockDim.x + tx; //if (index == 0){ printf("You have been trolled! \n"); } if (index < len){ vel[index] += 0.5 * force[index] / MASS_ATOM * time_step; } } /*************************************************************************************************************/ /************* REDUCTION KERNELS **********/ /*************************************************************************************************************/ __global__ void total(float *input, float *output, int len) { //@@ Load a segment of the input vector into shared memory //@@ Traverse the reduction tree //@@ Write the computed sum of the block to the output vector at the //@@ correct index __shared__ float partSum[2 * BLOCK_SIZE]; unsigned int tx = threadIdx.x; unsigned int start = 2 * blockIdx.x * BLOCK_SIZE; //Loading input floats to shared memory //Take care of the boundary conditions if (start + tx < len){ partSum[tx] = input[start + tx]; if (start + BLOCK_SIZE + tx <len) partSum[BLOCK_SIZE + tx] = input[start + BLOCK_SIZE + tx]; else partSum[BLOCK_SIZE + tx] = 0; } else{ partSum[tx] = 0; partSum[BLOCK_SIZE + tx] = 0; } unsigned int stride; for (stride = BLOCK_SIZE; stride > 0; stride = stride / 2){ __syncthreads(); if (tx < stride) partSum[tx] += partSum[tx + stride]; } if (tx == 0){ output[blockIdx.x] = partSum[0]; } } __global__ void forcered_simple(float * force, float * forcered){ int index = threadIdx.x + blockDim.x*blockIdx.x; int i = 0; int findex; __shared__ float forcered_sh[3 * N_ATOMS]; //if (index == 0){ printf("In force reduction kernel! \n"); } if (index < 3 * N_ATOMS){ forcered_sh[index] = 0.0f; } __syncthreads(); if (index < 3 * N_ATOMS){ findex = int(index / N_ATOMS)*N_ATOMS*N_ATOMS + index % N_ATOMS; for (i = 0; i < N_ATOMS; i++){ forcered_sh[index] += force[findex + i*N_ATOMS]; } } __syncthreads(); if (index < 3 * N_ATOMS){ forcered[index] = forcered_sh[index]; } /*if (index == 0){ printf("forcered [0]= %f \n", forcered[0]); printf("forcered [2]= %f \n", forcered[2]); printf("forcered [4]= %f \n \n", forcered[4]); }*/ } //__global__ void newForceReduction(float *input, float *output, int startunit, int len) //{ // unsigned int tx = threadIdx.x; // unsigned int start = blockIdx.x *N_ATOMS; // // __shared__ float partSum[BLOCK_SIZE1]; // // if (tx == 0) printf("Length of the shared memory array - %i \n", N_ATOMS); // // //Loading input floats to shared memory // //Take care of the boundary conditions // partSum[tx] = input[start + tx] + input[start + tx + BLOCK_SIZE1]; // if (tx == 0){ // if (N_ATOMS%2) partSum[0] += input[start + N_ATOMS - 1]; // } // __syncthreads(); // // //Reduction Kernel for each dimension // unsigned int stride, stride1 = BLOCK_SIZE1; // for (stride = BLOCK_SIZE1/2; stride > 0; stride = stride / 2){ // if (tx < stride) { partSum[tx] += partSum[tx + stride]; // if (tx==0){ if (stride1%2) partSum[0] += partSum[stride1-1];} // } // __syncthreads(); // stride1 = stride; // } // // if (tx == 0){ // output[blockIdx.x] = -partSum[0]; // } //} __global__ void newForceReduction(float *input, float *output, int startunit, int len) { unsigned int tx = threadIdx.x; unsigned int start = blockIdx.x *N_ATOMS; __shared__ float partSum[BLOCK_SIZE]; // if (tx == 0) printf("Length of the shared memory array - %i \n", N_ATOMS); //Loading input floats to shared memory //Take care of the boundary conditions if (tx < N_ATOMS) { partSum[tx] = input[start + tx]; } else{ partSum[tx] = 0.0f; } __syncthreads(); //Reduction Kernel for each dimension if (tx < 512){ partSum[tx] += partSum[tx + 512]; } __syncthreads(); if (tx < 256){ partSum[tx] += partSum[tx + 256]; } __syncthreads(); if (tx < 128){ partSum[tx] += partSum[tx + 128]; } __syncthreads(); if (tx < 64){ partSum[tx] += partSum[tx + 64]; } __syncthreads(); if (tx < 32){ partSum[tx] += partSum[tx + 32]; partSum[tx] += partSum[tx + 16]; partSum[tx] += partSum[tx + 8]; partSum[tx] += partSum[tx + 4]; partSum[tx] += partSum[tx + 2]; partSum[tx] += partSum[tx + 1]; } if (tx == 0){ output[blockIdx.x] = -partSum[0]; } } /*************************************************************************************************************/ /************* HOST CODE GENERATING DATA **********/ /*************************************************************************************************************/ void create_csv(char *filename, float *data, int Row, int Column){ printf("\n Creating %s.csv file", filename); FILE *fp; int i, j; filename = strcat(filename, ".csv"); fp = fopen(filename, "w+"); //fprintf(fp,"Student Id, Physics, Chemistry, Maths"); for (i = 0; i<Row; i++){ //fprintf(fp,"%d",i+1); for (j = 0; j<Column; j++) //Assuming Row major way of storage fprintf(fp, ",%f", *(data + i*Column + j)); fprintf(fp, "\n"); } fclose(fp); printf("\n %sfile created \n", filename); } void create_dump(char *filename, float *data, int steps, int nparticles){ printf("\n Creating %s.txt file", filename); FILE *fp; int i, j; filename = strcat(filename, ".txt"); fp = fopen(filename, "w+"); int stride; //fprintf(fp,"Student Id, Physics, Chemistry, Maths"); /*ITEM: TIMESTEP 0 ITEM: NUMBER OF ATOMS 130 ITEM: BOX BOUNDS ff pp pp 0 30 0 30 -0.5 0.5 ITEM: ATOMS id type x y z ix iy iz*/ for (i = 0; i<steps; i++){ fprintf(fp, "ITEM: TIMESTEP \n%i\nITEM: NUMBER OF ATOMS\n%i\nITEM: BOX BOUNDS\n%f %f\n%f %f\n%f %f\nITEM: ATOMS id type x y z ix iy iz\n", i, N_ATOMS, -L / 2.0f, L / 2.0f, -L / 2.0f, L / 2.0f, -L / 2.0f, L / 2.0f); stride = i * 3 * N_ATOMS; for (j = 0; j<nparticles; j++) fprintf(fp, "%i %i %f %f %f 0 0 0\n", j + 1, j + 1, data[stride + j], data[stride + j + N_ATOMS], data[stride + j + 2 * N_ATOMS]); } fclose(fp); printf("\n %sfile created \n", filename); } /*************************************************************************************************************/ /************* MAIN FUNCTION **********/ /*************************************************************************************************************/ int main(){ float * d_PotOut; float * d_ForceOut; float * d_PotRedOut; float * d_ForceOutRed; float * PotRedOut; float * ForceOut; float * PotOut; float * ForceOutRed; float* d_r; float* r; float* vel; float* d_vel; float* d_pairpos; float* pairpos; float* rpairpos; char str[100]; float PEtrace[NUM_STEPS]; float* R; //Output for OVITO const int size = sizeof(float) * N_ATOMS * 3; clock_t start, diff; cudaError_t state; // Declare space for storing positions and velocities on RAM r = (float *)malloc(size); vel = (float *)malloc(size); pairpos = (float *)malloc(size*N_ATOMS); rpairpos = (float *)malloc(size*N_ATOMS); ForceOut = (float *)malloc(size*N_ATOMS); PotOut = (float *)malloc(size*N_ATOMS / 3); PotRedOut = (float *)malloc(sizeof(float)*ceil(float(N_ATOMS * N_ATOMS) / BLOCK_SIZE)); //sizeof (float) needed to properly allocate memory in potredout ForceOutRed = (float *)malloc(size); R = (float *)malloc(size*NUM_STEPS); //put for OVITO // Declare space for storing positions and velocities on GPU DRAM cudaMalloc((void **)&d_r, size); cudaMalloc((void **)&d_vel, size); cudaMalloc((void **)&d_PotOut, size*N_ATOMS / 3); cudaMalloc((void **)&d_ForceOut, size*N_ATOMS); cudaMalloc((void **)&d_PotRedOut, sizeof(float)*ceil(float(N_ATOMS * N_ATOMS) / BLOCK_SIZE)); cudaMalloc((void **)&d_ForceOutRed, size); cudaMalloc((void **)&d_pairpos, size*N_ATOMS); int N_cube = int(cbrt(float(N_ATOMS))); int gd = int(ceil(double(N_ATOMS) / N_cube)); dim3 gridSize(1, 1, 1); dim3 blockSize(N_cube, N_cube, N_cube); init_r << <gridSize, blockSize >> >(d_r, N_cube); // check r //cudaMemcpy(r, d_r, size, cudaMemcpyDeviceToHost); //for (int ii = 0; ii < N_ATOMS * 3; ii++){ //printf("%f \t", r[ii]); //if ((ii + 1) % N_ATOMS == 0) //printf("\n"); //} //printf("\n All r printed \n"); //printf("Generation of r succeeded! \n"); //char str[100]; //printf("\n Enter the filename :"); //gets(str); //create_csv(str, r , 3, N_ATOMS); // Initialise velocity init_vel(vel); state = cudaDeviceSynchronize(); if (state != cudaSuccess){ printf("Init_r did not succeed: %s \n", cudaGetErrorString(state)); } // Check velocity //for (int ii = 0; ii < N_ATOMS*3; ii++){ //printf("%f \t", vel[ii]); //if ((ii+1) % N_ATOMS == 0) //printf("\n"); //} //printf("\n All vel printed \n"); // Copy velocity data to GPU cudaMemcpy(d_vel, vel, size, cudaMemcpyHostToDevice); for (int t = 0; t<NUM_STEPS; t++){ // Create pairs if ((t == 0 && scheme == 1) || (scheme == 0)){ makePairs << <ceil(N_ATOMS*N_ATOMS / float(BLOCK_SIZE)), BLOCK_SIZE >> >(d_r, d_pairpos); state = cudaDeviceSynchronize(); if (state != cudaSuccess){ printf("make pairs did not succeed: %s \n", cudaGetErrorString(state)); } } // printing r again when looping //cudaMemcpy(r, d_r, size, cudaMemcpyDeviceToHost); //printf("\n Enter the filename :"); //gets(str); //create_csv(str, r, 3, N_ATOMS); // print pair distances //state = cudaMemcpy(pairpos, d_pairpos, size*N_ATOMS, cudaMemcpyDeviceToHost); //if (state != cudaSuccess){ // printf("make pairs memcpy did not succeed: %s \n", cudaGetErrorString(state)); //} //float distance; //for (int i = 0; i < N_ATOMS*N_ATOMS; i++){ // //rpairpos[i] = pairpos[i]; // distance = sqrtf(pairpos[i] * pairpos[i] // + pairpos[i + N_ATOMS*N_ATOMS] * pairpos[i + N_ATOMS*N_ATOMS] // + pairpos[i + 2 * N_ATOMS*N_ATOMS] * pairpos[i + 2 * N_ATOMS*N_ATOMS]); // printf("%f \t", distance); // if ((i + 1) % N_ATOMS == 0) printf("\n"); //} //printf("\n All pair r printed \n"); //printf("\n Enter the filename :"); //gets(str); //create_csv(str, pairpos, N_ATOMS*3, N_ATOMS); int gridDim = ceil((N_ATOMS*N_ATOMS) / (float)BLOCK_SIZE); potForce << <gridDim, BLOCK_SIZE >> >(d_pairpos, N_ATOMS, d_PotOut, d_ForceOut); cudaDeviceSynchronize(); // Check potential and force //cudaMemcpy(PotOut, d_PotOut, size*N_ATOMS / 3, cudaMemcpyDeviceToHost); //cudaMemcpy(ForceOut, d_ForceOut, size*N_ATOMS, cudaMemcpyDeviceToHost); //printf("\n Now printing non-reduced force array \n"); //for (int ii = 0; ii < N_ATOMS*N_ATOMS * 3; ii++){ // printf("%f \t", ForceOut[ii]); // if ((ii + 1) % N_ATOMS == 0) // printf("\n"); //} //printf("\n Enter the filename :"); //gets(str); //create_csv(str, ForceOut , 3*N_ATOMS, N_ATOMS); total << < gridDim, BLOCK_SIZE >> >(d_PotOut, d_PotRedOut, N_ATOMS*N_ATOMS); forcered_simple << <ceil(3 * N_ATOMS / float(BLOCK_SIZE)), BLOCK_SIZE >> >(d_ForceOut, d_ForceOutRed); //newForceReduction << <3 * N_ATOMS, BLOCK_SIZE1 >> >(d_ForceOut, d_ForceOutRed, N_ATOMS, 3 * N_ATOMS*N_ATOMS); cudaDeviceSynchronize(); int len = int(ceil(N_ATOMS*N_ATOMS / float(BLOCK_SIZE))); state = cudaMemcpyAsync(PotRedOut, d_PotRedOut, len*sizeof(float), cudaMemcpyDeviceToHost); if (state != cudaSuccess) printf("Reduction of potential memcpy failed %s \n", cudaGetErrorString(state)); for (int i = 1; i < len; i++){ PotRedOut[0] += PotRedOut[i]; } //printf("Potential energy is %f\n", PotRedOut[0]/2.0f); PEtrace[t] = PotRedOut[0] / 2.0; //if (PotRedOut[0]>0.0){ printf("Simulation crashed!"); break; } if (t % 1000 == 0){ printf("Potential energy at iteration [%i] is %1.10f \n", t, PEtrace[t]); } //check force reduction //state = cudaMemcpy(ForceOutRed, d_ForceOutRed, size, cudaMemcpyDeviceToHost); //if (state != cudaSuccess) // printf("Force memcpy failed: %s \n", cudaGetErrorString(state)); //printf("Now printing reduced force array \n"); //for (int ii = 0; ii < N_ATOMS * 3; ii++){ // printf("%f \t", ForceOutRed[ii]); // if ((ii + 1) % N_ATOMS == 0) // printf("\n"); //} //char str[100]; //printf("\n Enter the filename :"); //gets(str); //create_csv(str, ForceOutRed , 3, N_ATOMS); //Explicit scheme if (scheme == 0){ kinematics << < ceil((N_ATOMS * 3) / (float)BLOCK_SIZE), BLOCK_SIZE >> >(d_r, d_ForceOutRed, d_vel, 3 * N_ATOMS); cudaDeviceSynchronize(); //state = cudaMemcpy(r, d_r, size, cudaMemcpyDeviceToHost); //if (state != cudaSuccess) // printf("r memcpy failed: %s \n", cudaGetErrorString(state)); //printf("Now printing r after iteration %i \n", t); //for (int ii = 0; ii <3 * N_ATOMS; ii++){ // printf("%f \t", r[ii]); // if ((ii + 1) % N_ATOMS == 0) // printf("\n"); //} } //Implicit scheme else{ kinematics_phase1 << <ceil((N_ATOMS * 3) / (float)BLOCK_SIZE), BLOCK_SIZE >> >(d_r, d_ForceOutRed, d_vel, 3 * N_ATOMS); makePairs << <ceil(N_ATOMS*N_ATOMS / float(BLOCK_SIZE)), BLOCK_SIZE >> >(d_r, d_pairpos); cudaDeviceSynchronize(); potForce << <gridDim, BLOCK_SIZE >> >(d_pairpos, N_ATOMS, d_PotOut, d_ForceOut); cudaDeviceSynchronize(); forcered_simple << <ceil(3 * N_ATOMS / float(BLOCK_SIZE)), BLOCK_SIZE >> >(d_ForceOut, d_ForceOutRed); //newForceReduction << <3 * N_ATOMS, BLOCK_SIZE >> >(d_ForceOut, d_ForceOutRed, N_ATOMS, 3 * N_ATOMS*N_ATOMS); state = cudaDeviceSynchronize(); if (state != cudaSuccess) printf("Force kernel failed: %s \n", cudaGetErrorString(state)); kinematics_phase2 << < ceil((N_ATOMS * 3) / (float)BLOCK_SIZE), BLOCK_SIZE >> >(d_ForceOutRed, d_vel, 3 * N_ATOMS); cudaDeviceSynchronize(); } cudaMemcpyAsync(&R[0 + t * 3 * N_ATOMS], d_r, size, cudaMemcpyDeviceToHost); } // } for time integration for loop*/ //printf("Enter name for creating positions dump: "); //gets(str); //create_dump(str, R, NUM_STEPS, N_ATOMS); // Print final pair distances //state = cudaMemcpy(pairpos, d_pairpos, size*N_ATOMS, cudaMemcpyDeviceToHost); //if (state != cudaSuccess){ // printf("make pairs memcpy did not succeed: %s \n", cudaGetErrorString(state)); //} //float distance; //for (int i = 0; i < N_ATOMS*N_ATOMS; i++){ // //rpairpos[i] = pairpos[i]; // distance = sqrtf(pairpos[i] * pairpos[i] // + pairpos[i + N_ATOMS*N_ATOMS] * pairpos[i + N_ATOMS*N_ATOMS] // + pairpos[i + 2 * N_ATOMS*N_ATOMS] * pairpos[i + 2 * N_ATOMS*N_ATOMS]); // printf("%f \t", distance); // if ((i + 1) % N_ATOMS == 0) printf("\n"); //} //printf("\n All pair r printed \n"); //printf("\n Enter the filename :"); //gets(str); //create_csv(str, PEtrace, 1, NUM_STEPS); //printf("\n Enter the filename :"); //gets(str); //create_csv(str, r , 3, N_ATOMS); // Free up host memory free(r); free(vel); free(pairpos); free(rpairpos); //free(PEtrace); // Free up device memory cudaFree(d_ForceOut); cudaFree(d_ForceOutRed); cudaFree(d_pairpos); cudaFree(d_PotOut); cudaFree(d_PotRedOut); cudaFree(d_r); cudaFree(d_vel); return 0; }
6cd4d5a2117be4f932a600dabefde26feed4a9a8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" #define MAX_SHAREDSIZE 2048 __global__ void LoadStoreViaSharedMemory(int *In, int *Out) { #if 1 int LoadStoreSize = MAX_SHAREDSIZE/blockDim.x; int beginIndex = threadIdx.x * LoadStoreSize; int endIndex = beginIndex + LoadStoreSize; // __shared__ int SharedMemory[MAX_SHAREDSIZE]; int i; for(i = beginIndex; i < endIndex; i++) SharedMemory[i] = In[i]; __syncthreads(); for(i = beginIndex; i < endIndex; i++) Out[i] = SharedMemory[i]; __syncthreads(); #else __shared__ int SharedMemory[MAX_SHAREDSIZE]; int idx = blockDim.x*blockIdx.x+threadIdx.x; SharedMemory[idx] = In[idx]; Out[idx] = SharedMemory[idx]; #endif }
6cd4d5a2117be4f932a600dabefde26feed4a9a8.cu
#include "includes.h" #define MAX_SHAREDSIZE 2048 __global__ void LoadStoreViaSharedMemory(int *In, int *Out) { #if 1 int LoadStoreSize = MAX_SHAREDSIZE/blockDim.x; int beginIndex = threadIdx.x * LoadStoreSize; int endIndex = beginIndex + LoadStoreSize; // °øÀ¯ ¸Þ¸ð¸® ÇÒ´ç __shared__ int SharedMemory[MAX_SHAREDSIZE]; int i; for(i = beginIndex; i < endIndex; i++) SharedMemory[i] = In[i]; __syncthreads(); for(i = beginIndex; i < endIndex; i++) Out[i] = SharedMemory[i]; __syncthreads(); #else __shared__ int SharedMemory[MAX_SHAREDSIZE]; int idx = blockDim.x*blockIdx.x+threadIdx.x; SharedMemory[idx] = In[idx]; Out[idx] = SharedMemory[idx]; #endif }
ac6861ce115f42215cf789e91efe50f9fee99c69.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" /*********************************************************** By Huahua Wang, the University of Minnesota, twin cities ***********************************************************/ __global__ void dual( float* err, float* Y, float* X, float* Z, unsigned int size) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int stride = blockDim.x * gridDim.x; float temp; err[idx] = 0.0; for (unsigned int i = idx; i < size; i += stride) { temp = X[i] - Z[i]; Y[i] += temp; err[idx] += temp*temp; } // __syncthreads(); }
ac6861ce115f42215cf789e91efe50f9fee99c69.cu
#include "includes.h" /*********************************************************** By Huahua Wang, the University of Minnesota, twin cities ***********************************************************/ __global__ void dual( float* err, float* Y, float* X, float* Z, unsigned int size) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int stride = blockDim.x * gridDim.x; float temp; err[idx] = 0.0; for (unsigned int i = idx; i < size; i += stride) { temp = X[i] - Z[i]; Y[i] += temp; err[idx] += temp*temp; } // __syncthreads(); }
8b132aad16561d5bfd4fa9c515ecba5854c42124.hip
// !!! This is a file automatically generated by hipify!!! // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include "srad.h" // includes, project #include <hip/hip_runtime.h> // includes, kernels #include "srad_kernel.hip" void random_matrix(float *I, int rows, int cols); void runTest( int argc, char** argv); void usage(int argc, char **argv) { fprintf(stderr, "Usage: %s <rows> <cols> <y1> <y2> <x1> <x2> <lamda> <no. of iter>\n", argv[0]); fprintf(stderr, "\t<rows> - number of rows\n"); fprintf(stderr, "\t<cols> - number of cols\n"); fprintf(stderr, "\t<y1> - y1 value of the speckle\n"); fprintf(stderr, "\t<y2> - y2 value of the speckle\n"); fprintf(stderr, "\t<x1> - x1 value of the speckle\n"); fprintf(stderr, "\t<x2> - x2 value of the speckle\n"); fprintf(stderr, "\t<lamda> - lambda (0,1)\n"); fprintf(stderr, "\t<no. of iter> - number of iterations\n"); exit(1); } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { runTest( argc, argv); return EXIT_SUCCESS; } void runTest( int argc, char** argv) { int rows, cols, size_I, size_R, niter = 10, iter; float *I, *J, lambda, q0sqr, sum, sum2, tmp, meanROI,varROI ; #ifdef CPU float Jc, G2, L, num, den, qsqr; int *iN,*iS,*jE,*jW, k; float *dN,*dS,*dW,*dE; float cN,cS,cW,cE,D; #endif #ifdef GPU float *J_cuda; float *C_cuda; float *E_C, *W_C, *N_C, *S_C; #endif unsigned int r1, r2, c1, c2; float *c; if (argc == 9) { rows = atoi(argv[1]); //number of rows in the domain cols = atoi(argv[2]); //number of cols in the domain if ((rows%16!=0) || (cols%16!=0)){ fprintf(stderr, "rows and cols must be multiples of 16\n"); exit(1); } r1 = atoi(argv[3]); //y1 position of the speckle r2 = atoi(argv[4]); //y2 position of the speckle c1 = atoi(argv[5]); //x1 position of the speckle c2 = atoi(argv[6]); //x2 position of the speckle lambda = atof(argv[7]); //Lambda value niter = atoi(argv[8]); //number of iterations } else{ usage(argc, argv); } size_I = cols * rows; size_R = (r2-r1+1)*(c2-c1+1); I = (float *)malloc( size_I * sizeof(float) ); J = (float *)malloc( size_I * sizeof(float) ); c = (float *)malloc(sizeof(float)* size_I) ; #ifdef CPU iN = (int *)malloc(sizeof(unsigned int*) * rows) ; iS = (int *)malloc(sizeof(unsigned int*) * rows) ; jW = (int *)malloc(sizeof(unsigned int*) * cols) ; jE = (int *)malloc(sizeof(unsigned int*) * cols) ; dN = (float *)malloc(sizeof(float)* size_I) ; dS = (float *)malloc(sizeof(float)* size_I) ; dW = (float *)malloc(sizeof(float)* size_I) ; dE = (float *)malloc(sizeof(float)* size_I) ; for (int i=0; i< rows; i++) { iN[i] = i-1; iS[i] = i+1; } for (int j=0; j< cols; j++) { jW[j] = j-1; jE[j] = j+1; } iN[0] = 0; iS[rows-1] = rows-1; jW[0] = 0; jE[cols-1] = cols-1; #endif #ifdef GPU //Allocate device memory hipMalloc((void**)& J_cuda, sizeof(float)* size_I); hipMalloc((void**)& C_cuda, sizeof(float)* size_I); hipMalloc((void**)& E_C, sizeof(float)* size_I); hipMalloc((void**)& W_C, sizeof(float)* size_I); hipMalloc((void**)& S_C, sizeof(float)* size_I); hipMalloc((void**)& N_C, sizeof(float)* size_I); #endif printf("Randomizing the input matrix\n"); //Generate a random matrix random_matrix(I, rows, cols); for (int k = 0; k < size_I; k++ ) { J[k] = (float)exp(I[k]) ; } printf("Start the SRAD main loop\n"); for (iter=0; iter< niter; iter++){ sum=0; sum2=0; for (int i=r1; i<=r2; i++) { for (int j=c1; j<=c2; j++) { tmp = J[i * cols + j]; sum += tmp ; sum2 += tmp*tmp; } } meanROI = sum / size_R; varROI = (sum2 / size_R) - meanROI*meanROI; q0sqr = varROI / (meanROI*meanROI); #ifdef CPU for (int i = 0 ; i < rows ; i++) { for (int j = 0; j < cols; j++) { k = i * cols + j; Jc = J[k]; // directional derivates dN[k] = J[iN[i] * cols + j] - Jc; dS[k] = J[iS[i] * cols + j] - Jc; dW[k] = J[i * cols + jW[j]] - Jc; dE[k] = J[i * cols + jE[j]] - Jc; G2 = (dN[k]*dN[k] + dS[k]*dS[k] + dW[k]*dW[k] + dE[k]*dE[k]) / (Jc*Jc); L = (dN[k] + dS[k] + dW[k] + dE[k]) / Jc; num = (0.5*G2) - ((1.0/16.0)*(L*L)) ; den = 1 + (.25*L); qsqr = num/(den*den); // diffusion coefficent (equ 33) den = (qsqr-q0sqr) / (q0sqr * (1+q0sqr)) ; c[k] = 1.0 / (1.0+den) ; // saturate diffusion coefficent if (c[k] < 0) {c[k] = 0;} else if (c[k] > 1) {c[k] = 1;} } } for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { // current index k = i * cols + j; // diffusion coefficent cN = c[k]; cS = c[iS[i] * cols + j]; cW = c[k]; cE = c[i * cols + jE[j]]; // divergence (equ 58) D = cN * dN[k] + cS * dS[k] + cW * dW[k] + cE * dE[k]; // image update (equ 61) J[k] = J[k] + 0.25*lambda*D; } } #endif // CPU #ifdef GPU //Currently the input size must be divided by 16 - the block size int block_x = cols/BLOCK_SIZE ; int block_y = rows/BLOCK_SIZE ; dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(block_x , block_y); //Copy data from main memory to device memory hipMemcpy(J_cuda, J, sizeof(float) * size_I, hipMemcpyHostToDevice); //Run kernels hipLaunchKernelGGL(( srad_cuda_1), dim3(dimGrid), dim3(dimBlock), 0, 0, E_C, W_C, N_C, S_C, J_cuda, C_cuda, cols, rows, q0sqr); hipLaunchKernelGGL(( srad_cuda_2), dim3(dimGrid), dim3(dimBlock), 0, 0, E_C, W_C, N_C, S_C, J_cuda, C_cuda, cols, rows, lambda, q0sqr); //Copy data from device memory to main memory hipMemcpy(J, J_cuda, sizeof(float) * size_I, hipMemcpyDeviceToHost); #endif } hipDeviceSynchronize(); #ifdef OUTPUT //Printing output printf("Printing Output:\n"); for( int i = 0 ; i < rows ; i++){ for ( int j = 0 ; j < cols ; j++){ printf("%.5f ", J[i * cols + j]); } printf("\n"); } #endif printf("Computation Done\n"); free(I); free(J); #ifdef CPU free(iN); free(iS); free(jW); free(jE); free(dN); free(dS); free(dW); free(dE); #endif #ifdef GPU hipFree(C_cuda); hipFree(J_cuda); hipFree(E_C); hipFree(W_C); hipFree(N_C); hipFree(S_C); #endif free(c); } void random_matrix(float *I, int rows, int cols){ srand(7); for( int i = 0 ; i < rows ; i++){ for ( int j = 0 ; j < cols ; j++){ I[i * cols + j] = rand()/(float)RAND_MAX ; } } }
8b132aad16561d5bfd4fa9c515ecba5854c42124.cu
// includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include "srad.h" // includes, project #include <cuda.h> // includes, kernels #include "srad_kernel.cu" void random_matrix(float *I, int rows, int cols); void runTest( int argc, char** argv); void usage(int argc, char **argv) { fprintf(stderr, "Usage: %s <rows> <cols> <y1> <y2> <x1> <x2> <lamda> <no. of iter>\n", argv[0]); fprintf(stderr, "\t<rows> - number of rows\n"); fprintf(stderr, "\t<cols> - number of cols\n"); fprintf(stderr, "\t<y1> - y1 value of the speckle\n"); fprintf(stderr, "\t<y2> - y2 value of the speckle\n"); fprintf(stderr, "\t<x1> - x1 value of the speckle\n"); fprintf(stderr, "\t<x2> - x2 value of the speckle\n"); fprintf(stderr, "\t<lamda> - lambda (0,1)\n"); fprintf(stderr, "\t<no. of iter> - number of iterations\n"); exit(1); } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { runTest( argc, argv); return EXIT_SUCCESS; } void runTest( int argc, char** argv) { int rows, cols, size_I, size_R, niter = 10, iter; float *I, *J, lambda, q0sqr, sum, sum2, tmp, meanROI,varROI ; #ifdef CPU float Jc, G2, L, num, den, qsqr; int *iN,*iS,*jE,*jW, k; float *dN,*dS,*dW,*dE; float cN,cS,cW,cE,D; #endif #ifdef GPU float *J_cuda; float *C_cuda; float *E_C, *W_C, *N_C, *S_C; #endif unsigned int r1, r2, c1, c2; float *c; if (argc == 9) { rows = atoi(argv[1]); //number of rows in the domain cols = atoi(argv[2]); //number of cols in the domain if ((rows%16!=0) || (cols%16!=0)){ fprintf(stderr, "rows and cols must be multiples of 16\n"); exit(1); } r1 = atoi(argv[3]); //y1 position of the speckle r2 = atoi(argv[4]); //y2 position of the speckle c1 = atoi(argv[5]); //x1 position of the speckle c2 = atoi(argv[6]); //x2 position of the speckle lambda = atof(argv[7]); //Lambda value niter = atoi(argv[8]); //number of iterations } else{ usage(argc, argv); } size_I = cols * rows; size_R = (r2-r1+1)*(c2-c1+1); I = (float *)malloc( size_I * sizeof(float) ); J = (float *)malloc( size_I * sizeof(float) ); c = (float *)malloc(sizeof(float)* size_I) ; #ifdef CPU iN = (int *)malloc(sizeof(unsigned int*) * rows) ; iS = (int *)malloc(sizeof(unsigned int*) * rows) ; jW = (int *)malloc(sizeof(unsigned int*) * cols) ; jE = (int *)malloc(sizeof(unsigned int*) * cols) ; dN = (float *)malloc(sizeof(float)* size_I) ; dS = (float *)malloc(sizeof(float)* size_I) ; dW = (float *)malloc(sizeof(float)* size_I) ; dE = (float *)malloc(sizeof(float)* size_I) ; for (int i=0; i< rows; i++) { iN[i] = i-1; iS[i] = i+1; } for (int j=0; j< cols; j++) { jW[j] = j-1; jE[j] = j+1; } iN[0] = 0; iS[rows-1] = rows-1; jW[0] = 0; jE[cols-1] = cols-1; #endif #ifdef GPU //Allocate device memory cudaMalloc((void**)& J_cuda, sizeof(float)* size_I); cudaMalloc((void**)& C_cuda, sizeof(float)* size_I); cudaMalloc((void**)& E_C, sizeof(float)* size_I); cudaMalloc((void**)& W_C, sizeof(float)* size_I); cudaMalloc((void**)& S_C, sizeof(float)* size_I); cudaMalloc((void**)& N_C, sizeof(float)* size_I); #endif printf("Randomizing the input matrix\n"); //Generate a random matrix random_matrix(I, rows, cols); for (int k = 0; k < size_I; k++ ) { J[k] = (float)exp(I[k]) ; } printf("Start the SRAD main loop\n"); for (iter=0; iter< niter; iter++){ sum=0; sum2=0; for (int i=r1; i<=r2; i++) { for (int j=c1; j<=c2; j++) { tmp = J[i * cols + j]; sum += tmp ; sum2 += tmp*tmp; } } meanROI = sum / size_R; varROI = (sum2 / size_R) - meanROI*meanROI; q0sqr = varROI / (meanROI*meanROI); #ifdef CPU for (int i = 0 ; i < rows ; i++) { for (int j = 0; j < cols; j++) { k = i * cols + j; Jc = J[k]; // directional derivates dN[k] = J[iN[i] * cols + j] - Jc; dS[k] = J[iS[i] * cols + j] - Jc; dW[k] = J[i * cols + jW[j]] - Jc; dE[k] = J[i * cols + jE[j]] - Jc; G2 = (dN[k]*dN[k] + dS[k]*dS[k] + dW[k]*dW[k] + dE[k]*dE[k]) / (Jc*Jc); L = (dN[k] + dS[k] + dW[k] + dE[k]) / Jc; num = (0.5*G2) - ((1.0/16.0)*(L*L)) ; den = 1 + (.25*L); qsqr = num/(den*den); // diffusion coefficent (equ 33) den = (qsqr-q0sqr) / (q0sqr * (1+q0sqr)) ; c[k] = 1.0 / (1.0+den) ; // saturate diffusion coefficent if (c[k] < 0) {c[k] = 0;} else if (c[k] > 1) {c[k] = 1;} } } for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { // current index k = i * cols + j; // diffusion coefficent cN = c[k]; cS = c[iS[i] * cols + j]; cW = c[k]; cE = c[i * cols + jE[j]]; // divergence (equ 58) D = cN * dN[k] + cS * dS[k] + cW * dW[k] + cE * dE[k]; // image update (equ 61) J[k] = J[k] + 0.25*lambda*D; } } #endif // CPU #ifdef GPU //Currently the input size must be divided by 16 - the block size int block_x = cols/BLOCK_SIZE ; int block_y = rows/BLOCK_SIZE ; dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(block_x , block_y); //Copy data from main memory to device memory cudaMemcpy(J_cuda, J, sizeof(float) * size_I, cudaMemcpyHostToDevice); //Run kernels srad_cuda_1<<<dimGrid, dimBlock>>>(E_C, W_C, N_C, S_C, J_cuda, C_cuda, cols, rows, q0sqr); srad_cuda_2<<<dimGrid, dimBlock>>>(E_C, W_C, N_C, S_C, J_cuda, C_cuda, cols, rows, lambda, q0sqr); //Copy data from device memory to main memory cudaMemcpy(J, J_cuda, sizeof(float) * size_I, cudaMemcpyDeviceToHost); #endif } cudaThreadSynchronize(); #ifdef OUTPUT //Printing output printf("Printing Output:\n"); for( int i = 0 ; i < rows ; i++){ for ( int j = 0 ; j < cols ; j++){ printf("%.5f ", J[i * cols + j]); } printf("\n"); } #endif printf("Computation Done\n"); free(I); free(J); #ifdef CPU free(iN); free(iS); free(jW); free(jE); free(dN); free(dS); free(dW); free(dE); #endif #ifdef GPU cudaFree(C_cuda); cudaFree(J_cuda); cudaFree(E_C); cudaFree(W_C); cudaFree(N_C); cudaFree(S_C); #endif free(c); } void random_matrix(float *I, int rows, int cols){ srand(7); for( int i = 0 ; i < rows ; i++){ for ( int j = 0 ; j < cols ; j++){ I[i * cols + j] = rand()/(float)RAND_MAX ; } } }
9dbba86f7fab99c67429d107f7cdb15a8061520b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Yurii Shyrma ([email protected]), created on 20.04.2018 // #include<ops/declarable/helpers/transforms.h> #include <array/ResultSet.h> #include <helpers/ShapeUtils.h> #include <numeric> #include <array/NDArrayFactory.h> #include <helpers/TAD.h> #include <exceptions/cuda_exception.h> #include <helpers/PointersManager.h> #include <helpers/ConstantTadHelper.h> namespace sd { namespace ops { namespace helpers { template <typename X, typename Y> static _CUDA_G void scatterSimpleKernel(void *vx, const Nd4jLong *xTadShape, const Nd4jLong *xTadOffsets, Nd4jLong xLength, Nd4jLong numTads, const void *vi, const Nd4jLong *iShapeInfo, Nd4jLong iLength, const void *vu, const Nd4jLong *uShapeInfo, Nd4jLong uLength) { auto u = reinterpret_cast<const X*>(vu); auto indices = reinterpret_cast<const Y*>(vi); auto tid = threadIdx.x + blockIdx.x * blockDim.x; for (int i = tid; i < iLength; i += blockDim.x * gridDim.x) { auto x = reinterpret_cast<X*>(vx) + xTadOffsets[i]; auto idx = indices[shape::getIndexOffset(i, iShapeInfo)]; x[shape::getIndexOffset(idx, xTadShape)] = u[shape::getIndexOffset(i, uShapeInfo)]; } } template <typename X, typename Y> void scatterSimple_(sd::LaunchContext * context, const int opId, NDArray& input, const NDArray& updates, const NDArray& indices, const std::vector<int>& dimensions) { auto dims = ShapeUtils::evalDimsToExclude(input.rankOf(), dimensions); auto packX = ConstantTadHelper::getInstance().tadForDimensions(input.shapeInfo(), dims); auto xLength = shape::length(packX.primaryShapeInfo()); auto iLength = indices.lengthOf(); auto uLength = updates.lengthOf(); hipLaunchKernelGGL(( scatterSimpleKernel<X,Y>), dim3(256), dim3(256), 1024, *context->getCudaStream(), input.specialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), xLength, packX.numberOfTads(), indices.specialBuffer(), indices.specialShapeInfo(), iLength, updates.specialBuffer(), updates.specialShapeInfo(), uLength); } ND4J_LOCAL void scatterSimple(sd::LaunchContext * context, const int opId, NDArray& input, const NDArray& updates, const NDArray& indices, const std::vector<int>& dimensions) { auto xType = input.dataType(); auto yType = indices.dataType(); if (opId != 6) throw std::runtime_error("scatterSimple: only copy op is supported"); NDArray::prepareSpecialUse({&input}, {&updates, &indices}); BUILD_DOUBLE_SELECTOR(xType, yType, scatterSimple_, (context, opId, input, updates, indices, dimensions), LIBND4J_TYPES, INDEXING_TYPES); NDArray::registerSpecialUse({&input}, {&updates, &indices}); } } } }
9dbba86f7fab99c67429d107f7cdb15a8061520b.cu
/* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Yurii Shyrma ([email protected]), created on 20.04.2018 // #include<ops/declarable/helpers/transforms.h> #include <array/ResultSet.h> #include <helpers/ShapeUtils.h> #include <numeric> #include <array/NDArrayFactory.h> #include <helpers/TAD.h> #include <exceptions/cuda_exception.h> #include <helpers/PointersManager.h> #include <helpers/ConstantTadHelper.h> namespace sd { namespace ops { namespace helpers { template <typename X, typename Y> static _CUDA_G void scatterSimpleKernel(void *vx, const Nd4jLong *xTadShape, const Nd4jLong *xTadOffsets, Nd4jLong xLength, Nd4jLong numTads, const void *vi, const Nd4jLong *iShapeInfo, Nd4jLong iLength, const void *vu, const Nd4jLong *uShapeInfo, Nd4jLong uLength) { auto u = reinterpret_cast<const X*>(vu); auto indices = reinterpret_cast<const Y*>(vi); auto tid = threadIdx.x + blockIdx.x * blockDim.x; for (int i = tid; i < iLength; i += blockDim.x * gridDim.x) { auto x = reinterpret_cast<X*>(vx) + xTadOffsets[i]; auto idx = indices[shape::getIndexOffset(i, iShapeInfo)]; x[shape::getIndexOffset(idx, xTadShape)] = u[shape::getIndexOffset(i, uShapeInfo)]; } } template <typename X, typename Y> void scatterSimple_(sd::LaunchContext * context, const int opId, NDArray& input, const NDArray& updates, const NDArray& indices, const std::vector<int>& dimensions) { auto dims = ShapeUtils::evalDimsToExclude(input.rankOf(), dimensions); auto packX = ConstantTadHelper::getInstance().tadForDimensions(input.shapeInfo(), dims); auto xLength = shape::length(packX.primaryShapeInfo()); auto iLength = indices.lengthOf(); auto uLength = updates.lengthOf(); scatterSimpleKernel<X,Y><<<256, 256, 1024, *context->getCudaStream()>>>(input.specialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), xLength, packX.numberOfTads(), indices.specialBuffer(), indices.specialShapeInfo(), iLength, updates.specialBuffer(), updates.specialShapeInfo(), uLength); } ND4J_LOCAL void scatterSimple(sd::LaunchContext * context, const int opId, NDArray& input, const NDArray& updates, const NDArray& indices, const std::vector<int>& dimensions) { auto xType = input.dataType(); auto yType = indices.dataType(); if (opId != 6) throw std::runtime_error("scatterSimple: only copy op is supported"); NDArray::prepareSpecialUse({&input}, {&updates, &indices}); BUILD_DOUBLE_SELECTOR(xType, yType, scatterSimple_, (context, opId, input, updates, indices, dimensions), LIBND4J_TYPES, INDEXING_TYPES); NDArray::registerSpecialUse({&input}, {&updates, &indices}); } } } }
d5fc51e30aeb2fdbc4999e5d421a8a5de1941b51.hip
// !!! This is a file automatically generated by hipify!!! #include "WT.cuh" WTAll::WTAll(int argmaxWTLength, int argWordLength, int argNumChunks, int argMaxChunkWTLength, int argNumOfWordS) { maxWTLength = argmaxWTLength; wordLength = argWordLength; numChunks = argNumChunks; maxChunkWTLength = argMaxChunkWTLength; numOfWordS = argNumOfWordS; /*WTLengthVec = new int[numChunks]; WTRowSum = new int[K]; NZWTCount = new int[numOfWordS]; WTIndex = new unsigned short int[maxWTLength]; WTValue = new unsigned short int[maxWTLength]; WTCount = new int[wordLength]; WTOffset = new int[wordLength];*/ hipHostMalloc((void**)&WTLengthVec, numChunks * sizeof(int)); hipHostMalloc((void**)&WTRowSum, K * sizeof(int)); hipHostMalloc((void**)&NZWTCount, numOfWordS * sizeof(int)); hipHostMalloc((void**)&WTIndex, maxWTLength * sizeof(unsigned short int)); hipHostMalloc((void**)&WTValue, maxWTLength * sizeof(unsigned short int)); hipHostMalloc((void**)&WTCount, wordLength * sizeof(int)); hipHostMalloc((void**)&WTOffset, wordLength * sizeof(int)); //////-----chunkWT-----for test-------- //chunkNZWTCount = new int[numOfWordS]; //chunkWTIndex = new int[maxChunkWTLength]; //chunkWTValue = new int[maxChunkWTLength]; //////-----chunkWT-----for test-------- hipHostMalloc((void**)&chunkNZWTCount, numOfWordS * sizeof(int)); hipHostMalloc((void**)&chunkWTIndex, maxChunkWTLength * sizeof(int)); hipHostMalloc((void**)&chunkWTValue, maxChunkWTLength * sizeof(int)); memset(chunkNZWTCount, 0, numOfWordS * sizeof(int)); memset(chunkWTIndex, 0, maxChunkWTLength * sizeof(unsigned short int)); memset(chunkWTValue, 0, maxChunkWTLength * sizeof(unsigned short int)); memset(NZWTCount, 0, numOfWordS * sizeof(int)); memset(WTIndex, 0, maxWTLength * sizeof(unsigned short int)); memset(WTValue, 0, maxWTLength * sizeof(unsigned short int)); memset(WTRowSum, 0, K * sizeof(int)); } void WTAll::CPUMemSet() { memset(NZWTCount, 0, numOfWordS * sizeof(int)); memset(WTIndex, 0, maxWTLength * sizeof(unsigned short int)); memset(WTValue, 0, maxWTLength * sizeof(unsigned short int)); memset(WTCount, 0, wordLength * sizeof(int)); memset(WTOffset, 0, wordLength * sizeof(int)); memset(WTRowSum, 0, K * sizeof(int)); } void WTAll::GPUMemAllocate() { hipMalloc((void**)&deviceNZWTCount, (numOfWordS) * sizeof(int)); hipMalloc((void**)&deviceWTIndex, (maxWTLength) * sizeof(unsigned short int)); hipMalloc((void**)&deviceWTValue, (maxWTLength) * sizeof(unsigned short int)); hipMalloc((void**)&deviceWTCount, (wordLength) * sizeof(int)); hipMalloc((void**)&deviceWTOffset, (wordLength) * sizeof(int)); hipMalloc((void**)&deviceWTRowSum, (K) * sizeof(int)); hipMalloc((void**)&deviceBlockCount, (1) * sizeof(int)); hipMalloc((void**)&deviceWarpCount, (1) * sizeof(int)); for (int i = 0; i < numStreams; i++) { hipMalloc((void**)&deviceChunkWTCount[i], (numOfWordS) * sizeof(int)); hipMalloc((void**)&deviceChunkWTOffset[i], (numOfWordS) * sizeof(int)); hipMalloc((void**)&deviceChunkNZWTCount[i], (numOfWordS) * sizeof(int)); hipMalloc((void**)&deviceChunkWTIndex[i], (maxChunkWTLength) * sizeof(unsigned short int)); hipMalloc((void**)&deviceChunkWTValue[i], (maxChunkWTLength) * sizeof(unsigned short int)); } WTMemory = (6 * wordLength + 2 * maxWTLength + K + 2 * maxChunkWTLength) /1000000000.0 * sizeof(int); printf("WT memory usage(Sparse):%f GB\n", WTMemory); WTMemory = K /1000000000.0 * wordLength * sizeof(int); printf("WT memory usage(Dense):%f GB\n", WTMemory); } void WTAll::GPUMemset(hipStream_t& stream) { /*hipMemsetAsync(deviceNZWTCount, 0, (numOfWordS) * sizeof(int), stream); hipMemsetAsync(deviceWTIndex, 0, (maxWTLength) * sizeof(unsigned short int), stream); hipMemsetAsync(deviceWTValue, 0, (maxWTLength) * sizeof(unsigned short int), stream); hipMemsetAsync(deviceWTRowSum, 0, (K) * sizeof(int), stream);*/ hipMemcpyAsync(deviceNZWTCount, NZWTCount, (numOfWordS) * sizeof(int), hipMemcpyHostToDevice, stream); hipMemcpyAsync(deviceWTIndex, WTIndex, (maxWTLength) * sizeof(unsigned short int), hipMemcpyHostToDevice, stream); hipMemcpyAsync(deviceWTValue, WTValue, (maxWTLength) * sizeof(unsigned short int), hipMemcpyHostToDevice, stream); hipMemcpyAsync(deviceWTRowSum, WTRowSum, (K) * sizeof(int), hipMemcpyHostToDevice, stream); } void WTAll::chunkGPUMemset(int argStreamId, hipStream_t& stream) { /*hipMemsetAsync(deviceChunkNZWTCount[argStreamId], 0, (numOfWordS) * sizeof(int), stream); hipMemsetAsync(deviceChunkWTIndex[argStreamId], 0, (maxChunkWTLength) * sizeof(unsigned short int), stream); hipMemsetAsync(deviceChunkWTValue[argStreamId], 0, (maxChunkWTLength) * sizeof(unsigned short int), stream);*/ hipMemcpyAsync(deviceChunkNZWTCount[argStreamId], chunkNZWTCount, (numOfWordS) * sizeof(int), hipMemcpyHostToDevice,stream); hipMemcpyAsync(deviceChunkWTIndex[argStreamId], chunkWTIndex, (maxChunkWTLength) * sizeof(unsigned short int), hipMemcpyHostToDevice, stream); hipMemcpyAsync(deviceChunkWTValue[argStreamId], chunkWTValue, (maxChunkWTLength) * sizeof(unsigned short int), hipMemcpyHostToDevice,stream); //hipMemset(deviceWTRowSum, 0, (K) * sizeof(int)); } void WTAll::loadWTLength(string argFilePrefix) { ifstream WTLength((argFilePrefix + string("/WTLength.txt")).c_str(), ios::binary);//store max Doc and DT length for (int chunkId = 0; chunkId < numChunks; chunkId++) { WTLength >> WTLengthVec[chunkId]; } WTLength.close(); } void WTAll::loadWTCountOffset(string argFilePrefix) { //--------load chunkWTCountOffset-------------- for (int chunkId = 0; chunkId < numChunks; chunkId++) { WTChunkData chunkWTData(chunkId, wordLength, maxChunkWTLength, WTLengthVec[chunkId], numOfWordS); chunkWTData.CPUMemSet(); chunkWTData.loadWTCountOffset(argFilePrefix); WTChunkVec.push_back(chunkWTData); } //--------load chunkWTCountOffset-------------- //--------load WTCountOffset-------------- ifstream WTCountOffset((argFilePrefix + string("/WTCountOffset.txt")).c_str(), ios::binary);//store Word offset of TL blockCount = 0; for (int i = 0; i < wordLength; i++) { WTCountOffset >> WTCount[i] >> WTOffset[i]; if (i >= wordLength - numOfWordS) { if (WTCount[i] > 32) { blockCount++; } } } WTCountOffset.close(); warpCount = numOfWordS - blockCount; printf("WT Count and Offset loaded!...\n"); //--------load WTCountOffset-------------- } void WTAll::blockWarpCountCPU2GPU() { hipMemcpy(deviceBlockCount, &blockCount, (1) * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(deviceWarpCount, &warpCount, (1) * sizeof(int), hipMemcpyHostToDevice); } void WTAll::CPU2GPUCountOffset(hipStream_t& stream) { hipMemcpyAsync(deviceWTCount, WTCount, (wordLength) * sizeof(int), hipMemcpyHostToDevice, stream); hipMemcpyAsync(deviceWTOffset, WTOffset, (wordLength) * sizeof(int), hipMemcpyHostToDevice, stream); } void WTAll::WTCPU2GPU(hipStream_t& stream) { hipMemcpyAsync(deviceNZWTCount, NZWTCount, (numOfWordS) * sizeof(int), hipMemcpyHostToDevice, stream); hipMemcpyAsync(deviceWTIndex, WTIndex, (maxWTLength) * sizeof(unsigned short int), hipMemcpyHostToDevice, stream); hipMemcpyAsync(deviceWTValue, WTValue, (maxWTLength) * sizeof(unsigned short int), hipMemcpyHostToDevice, stream); } void WTAll::WTGPU2CPU() { hipMemcpy(NZWTCount, deviceNZWTCount, (numOfWordS) * sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(WTIndex, deviceWTIndex, (maxWTLength) * sizeof(unsigned short int), hipMemcpyDeviceToHost); hipMemcpy(WTValue, deviceWTValue, (maxWTLength) * sizeof(unsigned short int), hipMemcpyDeviceToHost); hipMemcpy(WTRowSum, deviceWTRowSum, (K) * sizeof(int), hipMemcpyDeviceToHost); } void WTAll::chunkCPU2GPUCountOffset(int argChunkId, int argStreamId, hipStream_t& stream) { /*int chunkId = argChunkId;*/ hipMemcpyAsync(deviceChunkWTCount[argStreamId], WTChunkVec[argChunkId].WTCount, (numOfWordS) * sizeof(int), hipMemcpyHostToDevice, stream); hipMemcpyAsync(deviceChunkWTOffset[argStreamId], WTChunkVec[argChunkId].WTOffset, (numOfWordS) * sizeof(int), hipMemcpyHostToDevice, stream); } void WTAll::chunkWTCPU2GPU(int argChunkId, int argStreamId, hipStream_t& stream) { /*int chunkId = argChunkId;*/ hipMemcpyAsync(deviceChunkNZWTCount[argStreamId], WTChunkVec[argChunkId].NZWTCount, (numOfWordS) * sizeof(int), hipMemcpyHostToDevice,stream); hipMemcpyAsync(deviceChunkWTIndex[argStreamId], WTChunkVec[argChunkId].WTIndex, (WTLengthVec[argChunkId]) * sizeof(unsigned short int), hipMemcpyHostToDevice,stream); hipMemcpyAsync(deviceChunkWTValue[argStreamId], WTChunkVec[argChunkId].WTValue, (WTLengthVec[argChunkId]) * sizeof(unsigned short int), hipMemcpyHostToDevice,stream); } void WTAll::chunkWTGPU2CPU(int argChunkId, int argStreamId, hipStream_t& stream) { /*int chunkId = argChunkId;*/ hipMemcpyAsync(WTChunkVec[argChunkId].NZWTCount, deviceChunkNZWTCount[argStreamId], (numOfWordS) * sizeof(int), hipMemcpyDeviceToHost, stream); hipMemcpyAsync(WTChunkVec[argChunkId].WTIndex, deviceChunkWTIndex[argStreamId], (WTLengthVec[argChunkId]) * sizeof(unsigned short int), hipMemcpyDeviceToHost, stream); hipMemcpyAsync(WTChunkVec[argChunkId].WTValue, deviceChunkWTValue[argStreamId], (WTLengthVec[argChunkId]) * sizeof(unsigned short int), hipMemcpyDeviceToHost, stream); } void WTAll::CPU2Disk(string argFilePrefix) { ofstream OutputNZWTCount((argFilePrefix + string("/NZWTCount.txt")).c_str(), ios::binary); for (int i = 0; i < numOfWordS; i++) { OutputNZWTCount << NZWTCount[i] << "\n"; } OutputNZWTCount.close(); ofstream OutputWTIndexValue((argFilePrefix + string("/WTIndexValue.txt")).c_str(), ios::binary); for (int i = 0; i < maxWTLength; i++) { OutputWTIndexValue << WTIndex[i] << " " << WTValue[i] << "\n"; } OutputWTIndexValue.close(); ofstream OutputWTRowSum((argFilePrefix + string("/WTRowSum.txt")).c_str(), ios::binary); for (int i = 0; i < K; i++) { OutputWTRowSum << WTRowSum[i]<< "\n"; } OutputWTRowSum.close(); } void WTAll::CPU2DiskChunk(string argFilePrefix, int argChunkId) { int chunkId = argChunkId; string chunkFolderName = argFilePrefix + "/chunk" + to_string(chunkId); ofstream OutputNZWTCount((chunkFolderName + string("/NZWTCount.txt")).c_str(), ios::binary); for (int i = 0; i < numOfWordS; i++) { OutputNZWTCount << WTChunkVec[chunkId].NZWTCount[i] << "\n"; } OutputNZWTCount.close(); ofstream OutputWTIndexValue((chunkFolderName + string("/WTIndexValue.txt")).c_str(), ios::binary); for (int i = 0; i < WTLengthVec[chunkId]; i++) { OutputWTIndexValue << WTChunkVec[chunkId].WTIndex[i] << " " << WTChunkVec[chunkId].WTValue[i] << "\n"; } OutputWTIndexValue.close(); }
d5fc51e30aeb2fdbc4999e5d421a8a5de1941b51.cu
#include "WT.cuh" WTAll::WTAll(int argmaxWTLength, int argWordLength, int argNumChunks, int argMaxChunkWTLength, int argNumOfWordS) { maxWTLength = argmaxWTLength; wordLength = argWordLength; numChunks = argNumChunks; maxChunkWTLength = argMaxChunkWTLength; numOfWordS = argNumOfWordS; /*WTLengthVec = new int[numChunks]; WTRowSum = new int[K]; NZWTCount = new int[numOfWordS]; WTIndex = new unsigned short int[maxWTLength]; WTValue = new unsigned short int[maxWTLength]; WTCount = new int[wordLength]; WTOffset = new int[wordLength];*/ cudaMallocHost((void**)&WTLengthVec, numChunks * sizeof(int)); cudaMallocHost((void**)&WTRowSum, K * sizeof(int)); cudaMallocHost((void**)&NZWTCount, numOfWordS * sizeof(int)); cudaMallocHost((void**)&WTIndex, maxWTLength * sizeof(unsigned short int)); cudaMallocHost((void**)&WTValue, maxWTLength * sizeof(unsigned short int)); cudaMallocHost((void**)&WTCount, wordLength * sizeof(int)); cudaMallocHost((void**)&WTOffset, wordLength * sizeof(int)); //////-----chunkWT-----for test-------- //chunkNZWTCount = new int[numOfWordS]; //chunkWTIndex = new int[maxChunkWTLength]; //chunkWTValue = new int[maxChunkWTLength]; //////-----chunkWT-----for test-------- cudaMallocHost((void**)&chunkNZWTCount, numOfWordS * sizeof(int)); cudaMallocHost((void**)&chunkWTIndex, maxChunkWTLength * sizeof(int)); cudaMallocHost((void**)&chunkWTValue, maxChunkWTLength * sizeof(int)); memset(chunkNZWTCount, 0, numOfWordS * sizeof(int)); memset(chunkWTIndex, 0, maxChunkWTLength * sizeof(unsigned short int)); memset(chunkWTValue, 0, maxChunkWTLength * sizeof(unsigned short int)); memset(NZWTCount, 0, numOfWordS * sizeof(int)); memset(WTIndex, 0, maxWTLength * sizeof(unsigned short int)); memset(WTValue, 0, maxWTLength * sizeof(unsigned short int)); memset(WTRowSum, 0, K * sizeof(int)); } void WTAll::CPUMemSet() { memset(NZWTCount, 0, numOfWordS * sizeof(int)); memset(WTIndex, 0, maxWTLength * sizeof(unsigned short int)); memset(WTValue, 0, maxWTLength * sizeof(unsigned short int)); memset(WTCount, 0, wordLength * sizeof(int)); memset(WTOffset, 0, wordLength * sizeof(int)); memset(WTRowSum, 0, K * sizeof(int)); } void WTAll::GPUMemAllocate() { cudaMalloc((void**)&deviceNZWTCount, (numOfWordS) * sizeof(int)); cudaMalloc((void**)&deviceWTIndex, (maxWTLength) * sizeof(unsigned short int)); cudaMalloc((void**)&deviceWTValue, (maxWTLength) * sizeof(unsigned short int)); cudaMalloc((void**)&deviceWTCount, (wordLength) * sizeof(int)); cudaMalloc((void**)&deviceWTOffset, (wordLength) * sizeof(int)); cudaMalloc((void**)&deviceWTRowSum, (K) * sizeof(int)); cudaMalloc((void**)&deviceBlockCount, (1) * sizeof(int)); cudaMalloc((void**)&deviceWarpCount, (1) * sizeof(int)); for (int i = 0; i < numStreams; i++) { cudaMalloc((void**)&deviceChunkWTCount[i], (numOfWordS) * sizeof(int)); cudaMalloc((void**)&deviceChunkWTOffset[i], (numOfWordS) * sizeof(int)); cudaMalloc((void**)&deviceChunkNZWTCount[i], (numOfWordS) * sizeof(int)); cudaMalloc((void**)&deviceChunkWTIndex[i], (maxChunkWTLength) * sizeof(unsigned short int)); cudaMalloc((void**)&deviceChunkWTValue[i], (maxChunkWTLength) * sizeof(unsigned short int)); } WTMemory = (6 * wordLength + 2 * maxWTLength + K + 2 * maxChunkWTLength) /1000000000.0 * sizeof(int); printf("WT memory usage(Sparse):%f GB\n", WTMemory); WTMemory = K /1000000000.0 * wordLength * sizeof(int); printf("WT memory usage(Dense):%f GB\n", WTMemory); } void WTAll::GPUMemset(cudaStream_t& stream) { /*cudaMemsetAsync(deviceNZWTCount, 0, (numOfWordS) * sizeof(int), stream); cudaMemsetAsync(deviceWTIndex, 0, (maxWTLength) * sizeof(unsigned short int), stream); cudaMemsetAsync(deviceWTValue, 0, (maxWTLength) * sizeof(unsigned short int), stream); cudaMemsetAsync(deviceWTRowSum, 0, (K) * sizeof(int), stream);*/ cudaMemcpyAsync(deviceNZWTCount, NZWTCount, (numOfWordS) * sizeof(int), cudaMemcpyHostToDevice, stream); cudaMemcpyAsync(deviceWTIndex, WTIndex, (maxWTLength) * sizeof(unsigned short int), cudaMemcpyHostToDevice, stream); cudaMemcpyAsync(deviceWTValue, WTValue, (maxWTLength) * sizeof(unsigned short int), cudaMemcpyHostToDevice, stream); cudaMemcpyAsync(deviceWTRowSum, WTRowSum, (K) * sizeof(int), cudaMemcpyHostToDevice, stream); } void WTAll::chunkGPUMemset(int argStreamId, cudaStream_t& stream) { /*cudaMemsetAsync(deviceChunkNZWTCount[argStreamId], 0, (numOfWordS) * sizeof(int), stream); cudaMemsetAsync(deviceChunkWTIndex[argStreamId], 0, (maxChunkWTLength) * sizeof(unsigned short int), stream); cudaMemsetAsync(deviceChunkWTValue[argStreamId], 0, (maxChunkWTLength) * sizeof(unsigned short int), stream);*/ cudaMemcpyAsync(deviceChunkNZWTCount[argStreamId], chunkNZWTCount, (numOfWordS) * sizeof(int), cudaMemcpyHostToDevice,stream); cudaMemcpyAsync(deviceChunkWTIndex[argStreamId], chunkWTIndex, (maxChunkWTLength) * sizeof(unsigned short int), cudaMemcpyHostToDevice, stream); cudaMemcpyAsync(deviceChunkWTValue[argStreamId], chunkWTValue, (maxChunkWTLength) * sizeof(unsigned short int), cudaMemcpyHostToDevice,stream); //cudaMemset(deviceWTRowSum, 0, (K) * sizeof(int)); } void WTAll::loadWTLength(string argFilePrefix) { ifstream WTLength((argFilePrefix + string("/WTLength.txt")).c_str(), ios::binary);//store max Doc and DT length for (int chunkId = 0; chunkId < numChunks; chunkId++) { WTLength >> WTLengthVec[chunkId]; } WTLength.close(); } void WTAll::loadWTCountOffset(string argFilePrefix) { //--------load chunkWTCountOffset-------------- for (int chunkId = 0; chunkId < numChunks; chunkId++) { WTChunkData chunkWTData(chunkId, wordLength, maxChunkWTLength, WTLengthVec[chunkId], numOfWordS); chunkWTData.CPUMemSet(); chunkWTData.loadWTCountOffset(argFilePrefix); WTChunkVec.push_back(chunkWTData); } //--------load chunkWTCountOffset-------------- //--------load WTCountOffset-------------- ifstream WTCountOffset((argFilePrefix + string("/WTCountOffset.txt")).c_str(), ios::binary);//store Word offset of TL blockCount = 0; for (int i = 0; i < wordLength; i++) { WTCountOffset >> WTCount[i] >> WTOffset[i]; if (i >= wordLength - numOfWordS) { if (WTCount[i] > 32) { blockCount++; } } } WTCountOffset.close(); warpCount = numOfWordS - blockCount; printf("WT Count and Offset loaded!...\n"); //--------load WTCountOffset-------------- } void WTAll::blockWarpCountCPU2GPU() { cudaMemcpy(deviceBlockCount, &blockCount, (1) * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(deviceWarpCount, &warpCount, (1) * sizeof(int), cudaMemcpyHostToDevice); } void WTAll::CPU2GPUCountOffset(cudaStream_t& stream) { cudaMemcpyAsync(deviceWTCount, WTCount, (wordLength) * sizeof(int), cudaMemcpyHostToDevice, stream); cudaMemcpyAsync(deviceWTOffset, WTOffset, (wordLength) * sizeof(int), cudaMemcpyHostToDevice, stream); } void WTAll::WTCPU2GPU(cudaStream_t& stream) { cudaMemcpyAsync(deviceNZWTCount, NZWTCount, (numOfWordS) * sizeof(int), cudaMemcpyHostToDevice, stream); cudaMemcpyAsync(deviceWTIndex, WTIndex, (maxWTLength) * sizeof(unsigned short int), cudaMemcpyHostToDevice, stream); cudaMemcpyAsync(deviceWTValue, WTValue, (maxWTLength) * sizeof(unsigned short int), cudaMemcpyHostToDevice, stream); } void WTAll::WTGPU2CPU() { cudaMemcpy(NZWTCount, deviceNZWTCount, (numOfWordS) * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(WTIndex, deviceWTIndex, (maxWTLength) * sizeof(unsigned short int), cudaMemcpyDeviceToHost); cudaMemcpy(WTValue, deviceWTValue, (maxWTLength) * sizeof(unsigned short int), cudaMemcpyDeviceToHost); cudaMemcpy(WTRowSum, deviceWTRowSum, (K) * sizeof(int), cudaMemcpyDeviceToHost); } void WTAll::chunkCPU2GPUCountOffset(int argChunkId, int argStreamId, cudaStream_t& stream) { /*int chunkId = argChunkId;*/ cudaMemcpyAsync(deviceChunkWTCount[argStreamId], WTChunkVec[argChunkId].WTCount, (numOfWordS) * sizeof(int), cudaMemcpyHostToDevice, stream); cudaMemcpyAsync(deviceChunkWTOffset[argStreamId], WTChunkVec[argChunkId].WTOffset, (numOfWordS) * sizeof(int), cudaMemcpyHostToDevice, stream); } void WTAll::chunkWTCPU2GPU(int argChunkId, int argStreamId, cudaStream_t& stream) { /*int chunkId = argChunkId;*/ cudaMemcpyAsync(deviceChunkNZWTCount[argStreamId], WTChunkVec[argChunkId].NZWTCount, (numOfWordS) * sizeof(int), cudaMemcpyHostToDevice,stream); cudaMemcpyAsync(deviceChunkWTIndex[argStreamId], WTChunkVec[argChunkId].WTIndex, (WTLengthVec[argChunkId]) * sizeof(unsigned short int), cudaMemcpyHostToDevice,stream); cudaMemcpyAsync(deviceChunkWTValue[argStreamId], WTChunkVec[argChunkId].WTValue, (WTLengthVec[argChunkId]) * sizeof(unsigned short int), cudaMemcpyHostToDevice,stream); } void WTAll::chunkWTGPU2CPU(int argChunkId, int argStreamId, cudaStream_t& stream) { /*int chunkId = argChunkId;*/ cudaMemcpyAsync(WTChunkVec[argChunkId].NZWTCount, deviceChunkNZWTCount[argStreamId], (numOfWordS) * sizeof(int), cudaMemcpyDeviceToHost, stream); cudaMemcpyAsync(WTChunkVec[argChunkId].WTIndex, deviceChunkWTIndex[argStreamId], (WTLengthVec[argChunkId]) * sizeof(unsigned short int), cudaMemcpyDeviceToHost, stream); cudaMemcpyAsync(WTChunkVec[argChunkId].WTValue, deviceChunkWTValue[argStreamId], (WTLengthVec[argChunkId]) * sizeof(unsigned short int), cudaMemcpyDeviceToHost, stream); } void WTAll::CPU2Disk(string argFilePrefix) { ofstream OutputNZWTCount((argFilePrefix + string("/NZWTCount.txt")).c_str(), ios::binary); for (int i = 0; i < numOfWordS; i++) { OutputNZWTCount << NZWTCount[i] << "\n"; } OutputNZWTCount.close(); ofstream OutputWTIndexValue((argFilePrefix + string("/WTIndexValue.txt")).c_str(), ios::binary); for (int i = 0; i < maxWTLength; i++) { OutputWTIndexValue << WTIndex[i] << " " << WTValue[i] << "\n"; } OutputWTIndexValue.close(); ofstream OutputWTRowSum((argFilePrefix + string("/WTRowSum.txt")).c_str(), ios::binary); for (int i = 0; i < K; i++) { OutputWTRowSum << WTRowSum[i]<< "\n"; } OutputWTRowSum.close(); } void WTAll::CPU2DiskChunk(string argFilePrefix, int argChunkId) { int chunkId = argChunkId; string chunkFolderName = argFilePrefix + "/chunk" + to_string(chunkId); ofstream OutputNZWTCount((chunkFolderName + string("/NZWTCount.txt")).c_str(), ios::binary); for (int i = 0; i < numOfWordS; i++) { OutputNZWTCount << WTChunkVec[chunkId].NZWTCount[i] << "\n"; } OutputNZWTCount.close(); ofstream OutputWTIndexValue((chunkFolderName + string("/WTIndexValue.txt")).c_str(), ios::binary); for (int i = 0; i < WTLengthVec[chunkId]; i++) { OutputWTIndexValue << WTChunkVec[chunkId].WTIndex[i] << " " << WTChunkVec[chunkId].WTValue[i] << "\n"; } OutputWTIndexValue.close(); }
392f02219687929e8d2a93f4777fe0cd6de0a897.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************** * * (C) Copyright 2010 The Board of Trustees of the * University of Illinois * All Rights Reserved * ***************************************************************************/ #include <hip/hip_runtime.h> #include <stdio.h> #include "scanLargeArray.h" #define UINT32_MAX 4294967295 #define BITS 4 #define LNB 4 #define SORT_BS 256 #define CONFLICT_FREE_OFFSET(index) ((index) >> LNB + (index) >> (2*LNB)) #define BLOCK_P_OFFSET (4*SORT_BS+1+(4*SORT_BS+1)/16+(4*SORT_BS+1)/64) __device__ void scan (unsigned int s_data[BLOCK_P_OFFSET]){ unsigned int thid = threadIdx.x; __syncthreads(); s_data[2*thid+1+CONFLICT_FREE_OFFSET(2*thid+1)] += s_data[2*thid+CONFLICT_FREE_OFFSET(2*thid)]; s_data[2*(blockDim.x+thid)+1+CONFLICT_FREE_OFFSET(2*(blockDim.x+thid)+1)] += s_data[2*(blockDim.x+thid)+CONFLICT_FREE_OFFSET(2*(blockDim.x+thid))]; unsigned int stride = 2; for (unsigned int d = blockDim.x; d > 0; d >>= 1) { __syncthreads(); if (thid < d) { unsigned int i = 2*stride*thid; unsigned int ai = i + stride - 1; unsigned int bi = ai + stride; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); s_data[bi] += s_data[ai]; } stride *= 2; } if (thid == 0){ unsigned int last = 4*blockDim.x-1; last += CONFLICT_FREE_OFFSET(last); s_data[4*blockDim.x+CONFLICT_FREE_OFFSET(4*blockDim.x)] = s_data[last]; s_data[last] = 0; } for (unsigned int d = 1; d <= blockDim.x; d *= 2) { stride >>= 1; __syncthreads(); if (thid < d) { unsigned int i = 2*stride*thid; unsigned int ai = i + stride - 1; unsigned int bi = ai + stride; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); unsigned int t = s_data[ai]; s_data[ai] = s_data[bi]; s_data[bi] += t; } } __syncthreads(); unsigned int temp = s_data[2*thid+CONFLICT_FREE_OFFSET(2*thid)]; s_data[2*thid+CONFLICT_FREE_OFFSET(2*thid)] = s_data[2*thid+1+CONFLICT_FREE_OFFSET(2*thid+1)]; s_data[2*thid+1+CONFLICT_FREE_OFFSET(2*thid+1)] += temp; unsigned int temp2 = s_data[2*(blockDim.x+thid)+CONFLICT_FREE_OFFSET(2*(blockDim.x+thid))]; s_data[2*(blockDim.x+thid)+CONFLICT_FREE_OFFSET(2*(blockDim.x+thid))] = s_data[2*(blockDim.x+thid)+1+CONFLICT_FREE_OFFSET(2*(blockDim.x+thid)+1)]; s_data[2*(blockDim.x+thid)+1+CONFLICT_FREE_OFFSET(2*(blockDim.x+thid)+1)] += temp2; __syncthreads(); } __global__ static void splitSort(int numElems, int iter, unsigned int* keys, unsigned int* values, unsigned int* histo) { __shared__ unsigned int flags[BLOCK_P_OFFSET]; __shared__ unsigned int histo_s[1<<BITS]; const unsigned int tid = threadIdx.x; const unsigned int gid = blockIdx.x*4*SORT_BS+4*threadIdx.x; // Copy input to shared mem. Assumes input is always even numbered uint4 lkey = { UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX}; uint4 lvalue; if (gid < numElems){ lkey = *((uint4*)(keys+gid)); lvalue = *((uint4*)(values+gid)); } if(tid < (1<<BITS)){ histo_s[tid] = 0; } __syncthreads(); atomicAdd(histo_s+((lkey.x&((1<<(BITS*(iter+1)))-1))>>(BITS*iter)),1); atomicAdd(histo_s+((lkey.y&((1<<(BITS*(iter+1)))-1))>>(BITS*iter)),1); atomicAdd(histo_s+((lkey.z&((1<<(BITS*(iter+1)))-1))>>(BITS*iter)),1); atomicAdd(histo_s+((lkey.w&((1<<(BITS*(iter+1)))-1))>>(BITS*iter)),1); uint4 index = {4*tid, 4*tid+1, 4*tid+2, 4*tid+3}; for (int i=BITS*iter; i<BITS*(iter+1);i++){ const uint4 flag = {(lkey.x>>i)&0x1,(lkey.y>>i)&0x1,(lkey.z>>i)&0x1,(lkey.w>>i)&0x1}; flags[index.x+CONFLICT_FREE_OFFSET(index.x)] = 1<<(16*flag.x); flags[index.y+CONFLICT_FREE_OFFSET(index.y)] = 1<<(16*flag.y); flags[index.z+CONFLICT_FREE_OFFSET(index.z)] = 1<<(16*flag.z); flags[index.w+CONFLICT_FREE_OFFSET(index.w)] = 1<<(16*flag.w); scan (flags); index.x = (flags[index.x+CONFLICT_FREE_OFFSET(index.x)]>>(16*flag.x))&0xFFFF; index.y = (flags[index.y+CONFLICT_FREE_OFFSET(index.y)]>>(16*flag.y))&0xFFFF; index.z = (flags[index.z+CONFLICT_FREE_OFFSET(index.z)]>>(16*flag.z))&0xFFFF; index.w = (flags[index.w+CONFLICT_FREE_OFFSET(index.w)]>>(16*flag.w))&0xFFFF; unsigned short offset = flags[4*blockDim.x+CONFLICT_FREE_OFFSET(4*blockDim.x)]&0xFFFF; index.x += (flag.x) ? offset : 0; index.y += (flag.y) ? offset : 0; index.z += (flag.z) ? offset : 0; index.w += (flag.w) ? offset : 0; __syncthreads(); } // Write result. if (gid < numElems){ keys[blockIdx.x*4*SORT_BS+index.x] = lkey.x; keys[blockIdx.x*4*SORT_BS+index.y] = lkey.y; keys[blockIdx.x*4*SORT_BS+index.z] = lkey.z; keys[blockIdx.x*4*SORT_BS+index.w] = lkey.w; values[blockIdx.x*4*SORT_BS+index.x] = lvalue.x; values[blockIdx.x*4*SORT_BS+index.y] = lvalue.y; values[blockIdx.x*4*SORT_BS+index.z] = lvalue.z; values[blockIdx.x*4*SORT_BS+index.w] = lvalue.w; } if (tid < (1<<BITS)){ histo[gridDim.x*threadIdx.x+blockIdx.x] = histo_s[tid]; } } __global__ void splitRearrange (int numElems, int iter, unsigned int* keys_i, unsigned int* keys_o, unsigned int* values_i, unsigned int* values_o, unsigned int* histo){ __shared__ unsigned int histo_s[(1<<BITS)]; __shared__ unsigned int array_s[4*SORT_BS]; int index = blockIdx.x*4*SORT_BS + 4*threadIdx.x; if (threadIdx.x < (1<<BITS)){ histo_s[threadIdx.x] = histo[gridDim.x*threadIdx.x+blockIdx.x]; } uint4 mine, value; if (index < numElems){ mine = *((uint4*)(keys_i+index)); value = *((uint4*)(values_i+index)); } else { mine.x = UINT32_MAX; mine.y = UINT32_MAX; mine.z = UINT32_MAX; mine.w = UINT32_MAX; } uint4 masks = {(mine.x&((1<<(BITS*(iter+1)))-1))>>(BITS*iter), (mine.y&((1<<(BITS*(iter+1)))-1))>>(BITS*iter), (mine.z&((1<<(BITS*(iter+1)))-1))>>(BITS*iter), (mine.w&((1<<(BITS*(iter+1)))-1))>>(BITS*iter)}; ((uint4*)array_s)[threadIdx.x] = masks; __syncthreads(); uint4 new_index = {histo_s[masks.x],histo_s[masks.y],histo_s[masks.z],histo_s[masks.w]}; int i = 4*threadIdx.x-1; while (i >= 0){ if (array_s[i] == masks.x){ new_index.x++; i--; } else { break; } } new_index.y = (masks.y == masks.x) ? new_index.x+1 : new_index.y; new_index.z = (masks.z == masks.y) ? new_index.y+1 : new_index.z; new_index.w = (masks.w == masks.z) ? new_index.z+1 : new_index.w; if (index < numElems){ keys_o[new_index.x] = mine.x; values_o[new_index.x] = value.x; keys_o[new_index.y] = mine.y; values_o[new_index.y] = value.y; keys_o[new_index.z] = mine.z; values_o[new_index.z] = value.z; keys_o[new_index.w] = mine.w; values_o[new_index.w] = value.w; } } void sort (int numElems, unsigned int max_value, unsigned int* &dkeys, unsigned int* &dvalues){ dim3 grid ((numElems+4*SORT_BS-1)/(4*SORT_BS)); dim3 block (SORT_BS); unsigned int iterations = 0; while(max_value > 0){ max_value >>= BITS; iterations++; } unsigned int *dhisto; unsigned int *dkeys_o, *dvalues_o; hipMalloc((void**)&dhisto, (1<<BITS)*grid.x*sizeof(unsigned int)); hipMalloc((void**)&dkeys_o, numElems*sizeof(unsigned int)); hipMalloc((void**)&dvalues_o, numElems*sizeof(unsigned int)); for (int i=0; i<iterations; i++){ hipLaunchKernelGGL(( splitSort), dim3(grid),dim3(block), 0, 0, numElems, i, dkeys, dvalues, dhisto); scanLargeArray(grid.x*(1<<BITS), dhisto); hipLaunchKernelGGL(( splitRearrange), dim3(grid),dim3(block), 0, 0, numElems, i, dkeys, dkeys_o, dvalues, dvalues_o, dhisto); unsigned int* temp = dkeys; dkeys = dkeys_o; dkeys_o = temp; temp = dvalues; dvalues = dvalues_o; dvalues_o = temp; } hipFree(dkeys_o); hipFree(dvalues_o); hipFree(dhisto); }
392f02219687929e8d2a93f4777fe0cd6de0a897.cu
/*************************************************************************** * * (C) Copyright 2010 The Board of Trustees of the * University of Illinois * All Rights Reserved * ***************************************************************************/ #include <cuda.h> #include <stdio.h> #include "scanLargeArray.h" #define UINT32_MAX 4294967295 #define BITS 4 #define LNB 4 #define SORT_BS 256 #define CONFLICT_FREE_OFFSET(index) ((index) >> LNB + (index) >> (2*LNB)) #define BLOCK_P_OFFSET (4*SORT_BS+1+(4*SORT_BS+1)/16+(4*SORT_BS+1)/64) __device__ void scan (unsigned int s_data[BLOCK_P_OFFSET]){ unsigned int thid = threadIdx.x; __syncthreads(); s_data[2*thid+1+CONFLICT_FREE_OFFSET(2*thid+1)] += s_data[2*thid+CONFLICT_FREE_OFFSET(2*thid)]; s_data[2*(blockDim.x+thid)+1+CONFLICT_FREE_OFFSET(2*(blockDim.x+thid)+1)] += s_data[2*(blockDim.x+thid)+CONFLICT_FREE_OFFSET(2*(blockDim.x+thid))]; unsigned int stride = 2; for (unsigned int d = blockDim.x; d > 0; d >>= 1) { __syncthreads(); if (thid < d) { unsigned int i = 2*stride*thid; unsigned int ai = i + stride - 1; unsigned int bi = ai + stride; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); s_data[bi] += s_data[ai]; } stride *= 2; } if (thid == 0){ unsigned int last = 4*blockDim.x-1; last += CONFLICT_FREE_OFFSET(last); s_data[4*blockDim.x+CONFLICT_FREE_OFFSET(4*blockDim.x)] = s_data[last]; s_data[last] = 0; } for (unsigned int d = 1; d <= blockDim.x; d *= 2) { stride >>= 1; __syncthreads(); if (thid < d) { unsigned int i = 2*stride*thid; unsigned int ai = i + stride - 1; unsigned int bi = ai + stride; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); unsigned int t = s_data[ai]; s_data[ai] = s_data[bi]; s_data[bi] += t; } } __syncthreads(); unsigned int temp = s_data[2*thid+CONFLICT_FREE_OFFSET(2*thid)]; s_data[2*thid+CONFLICT_FREE_OFFSET(2*thid)] = s_data[2*thid+1+CONFLICT_FREE_OFFSET(2*thid+1)]; s_data[2*thid+1+CONFLICT_FREE_OFFSET(2*thid+1)] += temp; unsigned int temp2 = s_data[2*(blockDim.x+thid)+CONFLICT_FREE_OFFSET(2*(blockDim.x+thid))]; s_data[2*(blockDim.x+thid)+CONFLICT_FREE_OFFSET(2*(blockDim.x+thid))] = s_data[2*(blockDim.x+thid)+1+CONFLICT_FREE_OFFSET(2*(blockDim.x+thid)+1)]; s_data[2*(blockDim.x+thid)+1+CONFLICT_FREE_OFFSET(2*(blockDim.x+thid)+1)] += temp2; __syncthreads(); } __global__ static void splitSort(int numElems, int iter, unsigned int* keys, unsigned int* values, unsigned int* histo) { __shared__ unsigned int flags[BLOCK_P_OFFSET]; __shared__ unsigned int histo_s[1<<BITS]; const unsigned int tid = threadIdx.x; const unsigned int gid = blockIdx.x*4*SORT_BS+4*threadIdx.x; // Copy input to shared mem. Assumes input is always even numbered uint4 lkey = { UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX}; uint4 lvalue; if (gid < numElems){ lkey = *((uint4*)(keys+gid)); lvalue = *((uint4*)(values+gid)); } if(tid < (1<<BITS)){ histo_s[tid] = 0; } __syncthreads(); atomicAdd(histo_s+((lkey.x&((1<<(BITS*(iter+1)))-1))>>(BITS*iter)),1); atomicAdd(histo_s+((lkey.y&((1<<(BITS*(iter+1)))-1))>>(BITS*iter)),1); atomicAdd(histo_s+((lkey.z&((1<<(BITS*(iter+1)))-1))>>(BITS*iter)),1); atomicAdd(histo_s+((lkey.w&((1<<(BITS*(iter+1)))-1))>>(BITS*iter)),1); uint4 index = {4*tid, 4*tid+1, 4*tid+2, 4*tid+3}; for (int i=BITS*iter; i<BITS*(iter+1);i++){ const uint4 flag = {(lkey.x>>i)&0x1,(lkey.y>>i)&0x1,(lkey.z>>i)&0x1,(lkey.w>>i)&0x1}; flags[index.x+CONFLICT_FREE_OFFSET(index.x)] = 1<<(16*flag.x); flags[index.y+CONFLICT_FREE_OFFSET(index.y)] = 1<<(16*flag.y); flags[index.z+CONFLICT_FREE_OFFSET(index.z)] = 1<<(16*flag.z); flags[index.w+CONFLICT_FREE_OFFSET(index.w)] = 1<<(16*flag.w); scan (flags); index.x = (flags[index.x+CONFLICT_FREE_OFFSET(index.x)]>>(16*flag.x))&0xFFFF; index.y = (flags[index.y+CONFLICT_FREE_OFFSET(index.y)]>>(16*flag.y))&0xFFFF; index.z = (flags[index.z+CONFLICT_FREE_OFFSET(index.z)]>>(16*flag.z))&0xFFFF; index.w = (flags[index.w+CONFLICT_FREE_OFFSET(index.w)]>>(16*flag.w))&0xFFFF; unsigned short offset = flags[4*blockDim.x+CONFLICT_FREE_OFFSET(4*blockDim.x)]&0xFFFF; index.x += (flag.x) ? offset : 0; index.y += (flag.y) ? offset : 0; index.z += (flag.z) ? offset : 0; index.w += (flag.w) ? offset : 0; __syncthreads(); } // Write result. if (gid < numElems){ keys[blockIdx.x*4*SORT_BS+index.x] = lkey.x; keys[blockIdx.x*4*SORT_BS+index.y] = lkey.y; keys[blockIdx.x*4*SORT_BS+index.z] = lkey.z; keys[blockIdx.x*4*SORT_BS+index.w] = lkey.w; values[blockIdx.x*4*SORT_BS+index.x] = lvalue.x; values[blockIdx.x*4*SORT_BS+index.y] = lvalue.y; values[blockIdx.x*4*SORT_BS+index.z] = lvalue.z; values[blockIdx.x*4*SORT_BS+index.w] = lvalue.w; } if (tid < (1<<BITS)){ histo[gridDim.x*threadIdx.x+blockIdx.x] = histo_s[tid]; } } __global__ void splitRearrange (int numElems, int iter, unsigned int* keys_i, unsigned int* keys_o, unsigned int* values_i, unsigned int* values_o, unsigned int* histo){ __shared__ unsigned int histo_s[(1<<BITS)]; __shared__ unsigned int array_s[4*SORT_BS]; int index = blockIdx.x*4*SORT_BS + 4*threadIdx.x; if (threadIdx.x < (1<<BITS)){ histo_s[threadIdx.x] = histo[gridDim.x*threadIdx.x+blockIdx.x]; } uint4 mine, value; if (index < numElems){ mine = *((uint4*)(keys_i+index)); value = *((uint4*)(values_i+index)); } else { mine.x = UINT32_MAX; mine.y = UINT32_MAX; mine.z = UINT32_MAX; mine.w = UINT32_MAX; } uint4 masks = {(mine.x&((1<<(BITS*(iter+1)))-1))>>(BITS*iter), (mine.y&((1<<(BITS*(iter+1)))-1))>>(BITS*iter), (mine.z&((1<<(BITS*(iter+1)))-1))>>(BITS*iter), (mine.w&((1<<(BITS*(iter+1)))-1))>>(BITS*iter)}; ((uint4*)array_s)[threadIdx.x] = masks; __syncthreads(); uint4 new_index = {histo_s[masks.x],histo_s[masks.y],histo_s[masks.z],histo_s[masks.w]}; int i = 4*threadIdx.x-1; while (i >= 0){ if (array_s[i] == masks.x){ new_index.x++; i--; } else { break; } } new_index.y = (masks.y == masks.x) ? new_index.x+1 : new_index.y; new_index.z = (masks.z == masks.y) ? new_index.y+1 : new_index.z; new_index.w = (masks.w == masks.z) ? new_index.z+1 : new_index.w; if (index < numElems){ keys_o[new_index.x] = mine.x; values_o[new_index.x] = value.x; keys_o[new_index.y] = mine.y; values_o[new_index.y] = value.y; keys_o[new_index.z] = mine.z; values_o[new_index.z] = value.z; keys_o[new_index.w] = mine.w; values_o[new_index.w] = value.w; } } void sort (int numElems, unsigned int max_value, unsigned int* &dkeys, unsigned int* &dvalues){ dim3 grid ((numElems+4*SORT_BS-1)/(4*SORT_BS)); dim3 block (SORT_BS); unsigned int iterations = 0; while(max_value > 0){ max_value >>= BITS; iterations++; } unsigned int *dhisto; unsigned int *dkeys_o, *dvalues_o; cudaMalloc((void**)&dhisto, (1<<BITS)*grid.x*sizeof(unsigned int)); cudaMalloc((void**)&dkeys_o, numElems*sizeof(unsigned int)); cudaMalloc((void**)&dvalues_o, numElems*sizeof(unsigned int)); for (int i=0; i<iterations; i++){ splitSort<<<grid,block>>>(numElems, i, dkeys, dvalues, dhisto); scanLargeArray(grid.x*(1<<BITS), dhisto); splitRearrange<<<grid,block>>>(numElems, i, dkeys, dkeys_o, dvalues, dvalues_o, dhisto); unsigned int* temp = dkeys; dkeys = dkeys_o; dkeys_o = temp; temp = dvalues; dvalues = dvalues_o; dvalues_o = temp; } cudaFree(dkeys_o); cudaFree(dvalues_o); cudaFree(dhisto); }
350c598c35c5335a16f8dbd1131d2018665416bd.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <math.h> #include <stdbool.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #define CUDA_CHECK_RETURN(value) {\ hipError_t _m_cudaStat = value;\ if (_m_cudaStat != hipSuccess) {\ fprintf(stderr, "Error %s at line %d in file %s\n",\ hipGetErrorString(_m_cudaStat),__LINE__,__FILE__);\ exit(1);\ }} float cuda_host_alloc_test(int size, int niter, bool up) { hipEvent_t start, stop; int *a, *dev_a; float elapsed_time; CUDA_CHECK_RETURN(hipEventCreate(&start)); CUDA_CHECK_RETURN(hipEventCreate(&stop)); CUDA_CHECK_RETURN(hipHostMalloc((void**)&a, size * sizeof(*a), hipHostMallocDefault)); CUDA_CHECK_RETURN(hipMalloc((void**)&dev_a, size * sizeof(*dev_a))); CUDA_CHECK_RETURN(hipEventRecord(start, 0)); for (int i = 0; i < niter; i++) { if (up == true) { CUDA_CHECK_RETURN(hipMemcpy(dev_a, a, size * sizeof(*a), hipMemcpyHostToDevice)); } else { CUDA_CHECK_RETURN(hipMemcpy(a, dev_a, size * sizeof(*dev_a), hipMemcpyDeviceToHost)); } } CUDA_CHECK_RETURN(hipEventRecord(stop, 0)); CUDA_CHECK_RETURN(hipEventSynchronize(stop)); CUDA_CHECK_RETURN(hipEventElapsedTime(&elapsed_time, start, stop)); CUDA_CHECK_RETURN(hipHostFree(a)); CUDA_CHECK_RETURN(hipFree(dev_a)); CUDA_CHECK_RETURN(hipEventDestroy(start)); CUDA_CHECK_RETURN(hipEventDestroy(stop)); return elapsed_time; } float cuda_malloc_test(int size, int niter, bool up) { hipEvent_t start, stop; int *a, *dev_a; float elapsed_time; CUDA_CHECK_RETURN(hipEventCreate(&start)); CUDA_CHECK_RETURN(hipEventCreate(&stop)); a = (int*)malloc(size * sizeof(*a)); CUDA_CHECK_RETURN(hipMalloc((void**)&dev_a, size * sizeof(*dev_a))); CUDA_CHECK_RETURN(hipEventRecord(start, 0)); for (int i = 0; i < niter; i++) { if (up == true) { CUDA_CHECK_RETURN(hipMemcpy(dev_a, a, size * sizeof(*a), hipMemcpyHostToDevice)); } else CUDA_CHECK_RETURN(hipMemcpy(a, dev_a, size * sizeof(*dev_a), hipMemcpyDeviceToHost)); } CUDA_CHECK_RETURN(hipEventRecord(stop, 0)); CUDA_CHECK_RETURN(hipEventSynchronize(stop)); CUDA_CHECK_RETURN(hipEventElapsedTime(&elapsed_time, start, stop)); free(a); CUDA_CHECK_RETURN(hipFree(dev_a)); CUDA_CHECK_RETURN(hipEventDestroy(start)); CUDA_CHECK_RETURN(hipEventDestroy(stop)); return elapsed_time; } int main(int argc, char const *argv[]) { const int size = (10 * pow(1024, 2)); const int niter = 100; float elapsed_time; float MB = (float)niter * size * sizeof(int) / 1024 / 1024; elapsed_time = cuda_malloc_test(size, niter, true); printf("Time using hipMalloc: %.6f\n", elapsed_time); printf("Speed CPU-->GPU: %.6f MB/s\n\n", MB / (elapsed_time / 1000)); elapsed_time = cuda_malloc_test(size, niter, false); printf("Time using hipMalloc: %.6f\n", elapsed_time); printf("Speed GPU-->CPU: %.6f MB/s\n\n", MB / (elapsed_time / 1000)); elapsed_time = cuda_host_alloc_test(size, niter, true); printf("Time using hipHostMalloc: %.6f\n", elapsed_time); printf("Speed CPU-->GPU: %.6f MB/s\n\n", MB / (elapsed_time / 1000)); elapsed_time = cuda_host_alloc_test(size, niter, false); printf("Time using hipHostMalloc: %.6f ms\n", elapsed_time); printf("Speed GPU-->CPU: %.6f MB/s\n\n", MB / (elapsed_time / 1000)); return 0; }
350c598c35c5335a16f8dbd1131d2018665416bd.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <stdbool.h> #include <cuda.h> #include <cuda_runtime.h> #define CUDA_CHECK_RETURN(value) {\ cudaError_t _m_cudaStat = value;\ if (_m_cudaStat != cudaSuccess) {\ fprintf(stderr, "Error %s at line %d in file %s\n",\ cudaGetErrorString(_m_cudaStat),__LINE__,__FILE__);\ exit(1);\ }} float cuda_host_alloc_test(int size, int niter, bool up) { cudaEvent_t start, stop; int *a, *dev_a; float elapsed_time; CUDA_CHECK_RETURN(cudaEventCreate(&start)); CUDA_CHECK_RETURN(cudaEventCreate(&stop)); CUDA_CHECK_RETURN(cudaHostAlloc((void**)&a, size * sizeof(*a), cudaHostAllocDefault)); CUDA_CHECK_RETURN(cudaMalloc((void**)&dev_a, size * sizeof(*dev_a))); CUDA_CHECK_RETURN(cudaEventRecord(start, 0)); for (int i = 0; i < niter; i++) { if (up == true) { CUDA_CHECK_RETURN(cudaMemcpy(dev_a, a, size * sizeof(*a), cudaMemcpyHostToDevice)); } else { CUDA_CHECK_RETURN(cudaMemcpy(a, dev_a, size * sizeof(*dev_a), cudaMemcpyDeviceToHost)); } } CUDA_CHECK_RETURN(cudaEventRecord(stop, 0)); CUDA_CHECK_RETURN(cudaEventSynchronize(stop)); CUDA_CHECK_RETURN(cudaEventElapsedTime(&elapsed_time, start, stop)); CUDA_CHECK_RETURN(cudaFreeHost(a)); CUDA_CHECK_RETURN(cudaFree(dev_a)); CUDA_CHECK_RETURN(cudaEventDestroy(start)); CUDA_CHECK_RETURN(cudaEventDestroy(stop)); return elapsed_time; } float cuda_malloc_test(int size, int niter, bool up) { cudaEvent_t start, stop; int *a, *dev_a; float elapsed_time; CUDA_CHECK_RETURN(cudaEventCreate(&start)); CUDA_CHECK_RETURN(cudaEventCreate(&stop)); a = (int*)malloc(size * sizeof(*a)); CUDA_CHECK_RETURN(cudaMalloc((void**)&dev_a, size * sizeof(*dev_a))); CUDA_CHECK_RETURN(cudaEventRecord(start, 0)); for (int i = 0; i < niter; i++) { if (up == true) { CUDA_CHECK_RETURN(cudaMemcpy(dev_a, a, size * sizeof(*a), cudaMemcpyHostToDevice)); } else CUDA_CHECK_RETURN(cudaMemcpy(a, dev_a, size * sizeof(*dev_a), cudaMemcpyDeviceToHost)); } CUDA_CHECK_RETURN(cudaEventRecord(stop, 0)); CUDA_CHECK_RETURN(cudaEventSynchronize(stop)); CUDA_CHECK_RETURN(cudaEventElapsedTime(&elapsed_time, start, stop)); free(a); CUDA_CHECK_RETURN(cudaFree(dev_a)); CUDA_CHECK_RETURN(cudaEventDestroy(start)); CUDA_CHECK_RETURN(cudaEventDestroy(stop)); return elapsed_time; } int main(int argc, char const *argv[]) { const int size = (10 * pow(1024, 2)); const int niter = 100; float elapsed_time; float MB = (float)niter * size * sizeof(int) / 1024 / 1024; elapsed_time = cuda_malloc_test(size, niter, true); printf("Time using cudaMalloc: %.6f\n", elapsed_time); printf("Speed CPU-->GPU: %.6f MB/s\n\n", MB / (elapsed_time / 1000)); elapsed_time = cuda_malloc_test(size, niter, false); printf("Time using cudaMalloc: %.6f\n", elapsed_time); printf("Speed GPU-->CPU: %.6f MB/s\n\n", MB / (elapsed_time / 1000)); elapsed_time = cuda_host_alloc_test(size, niter, true); printf("Time using cudaHostAlloc: %.6f\n", elapsed_time); printf("Speed CPU-->GPU: %.6f MB/s\n\n", MB / (elapsed_time / 1000)); elapsed_time = cuda_host_alloc_test(size, niter, false); printf("Time using cudaHostAlloc: %.6f ms\n", elapsed_time); printf("Speed GPU-->CPU: %.6f MB/s\n\n", MB / (elapsed_time / 1000)); return 0; }
3ab29cc699cc784c72de3004f866784cd34911e6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) Facebook, Inc. and its affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h> #include <float.h> #include <iostream> #include <tuple> #include "utils/dispatch.cuh" #include "utils/mink.cuh" // A chunk of work is blocksize-many points of P1. // The number of potential chunks to do is N*(1+(P1-1)/blocksize) // call (1+(P1-1)/blocksize) chunks_per_cloud // These chunks are divided among the gridSize-many blocks. // In block b, we work on chunks b, b+gridSize, b+2*gridSize etc . // In chunk i, we work on cloud i/chunks_per_cloud on points starting from // blocksize*(i%chunks_per_cloud). template <typename scalar_t> __global__ void KNearestNeighborKernelV0( const scalar_t* __restrict__ points1, const scalar_t* __restrict__ points2, const int64_t* __restrict__ lengths1, const int64_t* __restrict__ lengths2, scalar_t* __restrict__ dists, int64_t* __restrict__ idxs, const size_t N, const size_t P1, const size_t P2, const size_t D, const size_t K) { // Store both dists and indices for knn in global memory. const int64_t chunks_per_cloud = (1 + (P1 - 1) / blockDim.x); const int64_t chunks_to_do = N * chunks_per_cloud; for (int64_t chunk = blockIdx.x; chunk < chunks_to_do; chunk += gridDim.x) { const int64_t n = chunk / chunks_per_cloud; const int64_t start_point = blockDim.x * (chunk % chunks_per_cloud); int64_t p1 = start_point + threadIdx.x; if (p1 >= lengths1[n]) continue; int offset = n * P1 * K + p1 * K; int64_t length2 = lengths2[n]; MinK<scalar_t, int64_t> mink(dists + offset, idxs + offset, K); for (int p2 = 0; p2 < length2; ++p2) { // Find the distance between points1[n, p1] and points[n, p2] scalar_t dist = 0; for (int d = 0; d < D; ++d) { scalar_t coord1 = points1[n * P1 * D + p1 * D + d]; scalar_t coord2 = points2[n * P2 * D + p2 * D + d]; scalar_t diff = coord1 - coord2; dist += diff * diff; } mink.add(dist, p2); } } } template <typename scalar_t, int64_t D> __global__ void KNearestNeighborKernelV1( const scalar_t* __restrict__ points1, const scalar_t* __restrict__ points2, const int64_t* __restrict__ lengths1, const int64_t* __restrict__ lengths2, scalar_t* __restrict__ dists, int64_t* __restrict__ idxs, const size_t N, const size_t P1, const size_t P2, const size_t K) { // Same idea as the previous version, but hoist D into a template argument // so we can cache the current point in a thread-local array. We still store // the current best K dists and indices in global memory, so this should work // for very large K and fairly large D. scalar_t cur_point[D]; const int64_t chunks_per_cloud = (1 + (P1 - 1) / blockDim.x); const int64_t chunks_to_do = N * chunks_per_cloud; for (int64_t chunk = blockIdx.x; chunk < chunks_to_do; chunk += gridDim.x) { const int64_t n = chunk / chunks_per_cloud; const int64_t start_point = blockDim.x * (chunk % chunks_per_cloud); int64_t p1 = start_point + threadIdx.x; if (p1 >= lengths1[n]) continue; for (int d = 0; d < D; ++d) { cur_point[d] = points1[n * P1 * D + p1 * D + d]; } int offset = n * P1 * K + p1 * K; int64_t length2 = lengths2[n]; MinK<scalar_t, int64_t> mink(dists + offset, idxs + offset, K); for (int p2 = 0; p2 < length2; ++p2) { // Find the distance between cur_point and points[n, p2] scalar_t dist = 0; for (int d = 0; d < D; ++d) { scalar_t diff = cur_point[d] - points2[n * P2 * D + p2 * D + d]; dist += diff * diff; } mink.add(dist, p2); } } } // This is a shim functor to allow us to dispatch using DispatchKernel1D template <typename scalar_t, int64_t D> struct KNearestNeighborV1Functor { static void run( size_t blocks, size_t threads, const scalar_t* __restrict__ points1, const scalar_t* __restrict__ points2, const int64_t* __restrict__ lengths1, const int64_t* __restrict__ lengths2, scalar_t* __restrict__ dists, int64_t* __restrict__ idxs, const size_t N, const size_t P1, const size_t P2, const size_t K) { hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( KNearestNeighborKernelV1<scalar_t, D>), dim3(blocks), dim3(threads), 0, stream, points1, points2, lengths1, lengths2, dists, idxs, N, P1, P2, K); } }; template <typename scalar_t, int64_t D, int64_t K> __global__ void KNearestNeighborKernelV2( const scalar_t* __restrict__ points1, const scalar_t* __restrict__ points2, const int64_t* __restrict__ lengths1, const int64_t* __restrict__ lengths2, scalar_t* __restrict__ dists, int64_t* __restrict__ idxs, const int64_t N, const int64_t P1, const int64_t P2) { // Same general implementation as V2, but also hoist K into a template arg. scalar_t cur_point[D]; scalar_t min_dists[K]; int min_idxs[K]; const int64_t chunks_per_cloud = (1 + (P1 - 1) / blockDim.x); const int64_t chunks_to_do = N * chunks_per_cloud; for (int64_t chunk = blockIdx.x; chunk < chunks_to_do; chunk += gridDim.x) { const int64_t n = chunk / chunks_per_cloud; const int64_t start_point = blockDim.x * (chunk % chunks_per_cloud); int64_t p1 = start_point + threadIdx.x; if (p1 >= lengths1[n]) continue; for (int d = 0; d < D; ++d) { cur_point[d] = points1[n * P1 * D + p1 * D + d]; } int64_t length2 = lengths2[n]; MinK<scalar_t, int> mink(min_dists, min_idxs, K); for (int p2 = 0; p2 < length2; ++p2) { scalar_t dist = 0; for (int d = 0; d < D; ++d) { int offset = n * P2 * D + p2 * D + d; scalar_t diff = cur_point[d] - points2[offset]; dist += diff * diff; } mink.add(dist, p2); } for (int k = 0; k < mink.size(); ++k) { idxs[n * P1 * K + p1 * K + k] = min_idxs[k]; dists[n * P1 * K + p1 * K + k] = min_dists[k]; } } } // This is a shim so we can dispatch using DispatchKernel2D template <typename scalar_t, int64_t D, int64_t K> struct KNearestNeighborKernelV2Functor { static void run( size_t blocks, size_t threads, const scalar_t* __restrict__ points1, const scalar_t* __restrict__ points2, const int64_t* __restrict__ lengths1, const int64_t* __restrict__ lengths2, scalar_t* __restrict__ dists, int64_t* __restrict__ idxs, const int64_t N, const int64_t P1, const int64_t P2) { hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( KNearestNeighborKernelV2<scalar_t, D, K>), dim3(blocks), dim3(threads), 0, stream, points1, points2, lengths1, lengths2, dists, idxs, N, P1, P2); } }; template <typename scalar_t, int D, int K> __global__ void KNearestNeighborKernelV3( const scalar_t* __restrict__ points1, const scalar_t* __restrict__ points2, const int64_t* __restrict__ lengths1, const int64_t* __restrict__ lengths2, scalar_t* __restrict__ dists, int64_t* __restrict__ idxs, const size_t N, const size_t P1, const size_t P2) { // Same idea as V2, but use register indexing for thread-local arrays. // Enabling sorting for this version leads to huge slowdowns; I suspect // that it forces min_dists into local memory rather than registers. // As a result this version is always unsorted. scalar_t cur_point[D]; scalar_t min_dists[K]; int min_idxs[K]; const int64_t chunks_per_cloud = (1 + (P1 - 1) / blockDim.x); const int64_t chunks_to_do = N * chunks_per_cloud; for (int64_t chunk = blockIdx.x; chunk < chunks_to_do; chunk += gridDim.x) { const int64_t n = chunk / chunks_per_cloud; const int64_t start_point = blockDim.x * (chunk % chunks_per_cloud); int64_t p1 = start_point + threadIdx.x; if (p1 >= lengths1[n]) continue; for (int d = 0; d < D; ++d) { cur_point[d] = points1[n * P1 * D + p1 * D + d]; } int64_t length2 = lengths2[n]; RegisterMinK<scalar_t, int, K> mink(min_dists, min_idxs); for (int p2 = 0; p2 < length2; ++p2) { scalar_t dist = 0; for (int d = 0; d < D; ++d) { int offset = n * P2 * D + p2 * D + d; scalar_t diff = cur_point[d] - points2[offset]; dist += diff * diff; } mink.add(dist, p2); } for (int k = 0; k < mink.size(); ++k) { idxs[n * P1 * K + p1 * K + k] = min_idxs[k]; dists[n * P1 * K + p1 * K + k] = min_dists[k]; } } } // This is a shim so we can dispatch using DispatchKernel2D template <typename scalar_t, int64_t D, int64_t K> struct KNearestNeighborKernelV3Functor { static void run( size_t blocks, size_t threads, const scalar_t* __restrict__ points1, const scalar_t* __restrict__ points2, const int64_t* __restrict__ lengths1, const int64_t* __restrict__ lengths2, scalar_t* __restrict__ dists, int64_t* __restrict__ idxs, const size_t N, const size_t P1, const size_t P2) { hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( KNearestNeighborKernelV3<scalar_t, D, K>), dim3(blocks), dim3(threads), 0, stream, points1, points2, lengths1, lengths2, dists, idxs, N, P1, P2); } }; constexpr int V1_MIN_D = 1; constexpr int V1_MAX_D = 32; constexpr int V2_MIN_D = 1; constexpr int V2_MAX_D = 8; constexpr int V2_MIN_K = 1; constexpr int V2_MAX_K = 32; constexpr int V3_MIN_D = 1; constexpr int V3_MAX_D = 8; constexpr int V3_MIN_K = 1; constexpr int V3_MAX_K = 4; bool InBounds(const int64_t min, const int64_t x, const int64_t max) { return min <= x && x <= max; } bool KnnCheckVersion(int version, const int64_t D, const int64_t K) { if (version == 0) { return true; } else if (version == 1) { return InBounds(V1_MIN_D, D, V1_MAX_D); } else if (version == 2) { return InBounds(V2_MIN_D, D, V2_MAX_D) && InBounds(V2_MIN_K, K, V2_MAX_K); } else if (version == 3) { return InBounds(V3_MIN_D, D, V3_MAX_D) && InBounds(V3_MIN_K, K, V3_MAX_K); } return false; } int ChooseVersion(const int64_t D, const int64_t K) { for (int version = 3; version >= 1; version--) { if (KnnCheckVersion(version, D, K)) { return version; } } return 0; } std::tuple<at::Tensor, at::Tensor> KNearestNeighborIdxCuda( const at::Tensor& p1, const at::Tensor& p2, const at::Tensor& lengths1, const at::Tensor& lengths2, int K, int version) { // Check inputs are on the same device at::TensorArg p1_t{p1, "p1", 1}, p2_t{p2, "p2", 2}, lengths1_t{lengths1, "lengths1", 3}, lengths2_t{lengths2, "lengths2", 4}; at::CheckedFrom c = "KNearestNeighborIdxCuda"; at::checkAllSameGPU(c, {p1_t, p2_t, lengths1_t, lengths2_t}); at::checkAllSameType(c, {p1_t, p2_t}); // Set the device for the kernel launch based on the device of the input at::hip::HIPGuardMasqueradingAsCUDA device_guard(p1.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); const auto N = p1.size(0); const auto P1 = p1.size(1); const auto P2 = p2.size(1); const auto D = p2.size(2); const int64_t K_64 = K; TORCH_CHECK(p2.size(2) == D, "Point sets must have the same last dimension"); auto long_dtype = lengths1.options().dtype(at::kLong); auto idxs = at::zeros({N, P1, K}, long_dtype); auto dists = at::zeros({N, P1, K}, p1.options()); if (idxs.numel() == 0) { AT_CUDA_CHECK(hipGetLastError()); return std::make_tuple(idxs, dists); } if (version < 0) { version = ChooseVersion(D, K); } else if (!KnnCheckVersion(version, D, K)) { int new_version = ChooseVersion(D, K); std::cout << "WARNING: Requested KNN version " << version << " is not compatible with D = " << D << "; K = " << K << ". Falling back to version = " << new_version << std::endl; version = new_version; } // At this point we should have a valid version no matter what data the user // gave us. But we can check once more to be sure; however this time // assert fail since failing at this point means we have a bug in our version // selection or checking code. AT_ASSERTM(KnnCheckVersion(version, D, K), "Invalid version"); const size_t threads = 256; const size_t blocks = 256; if (version == 0) { AT_DISPATCH_FLOATING_TYPES( p1.scalar_type(), "knn_kernel_cuda", ([&] { hipLaunchKernelGGL(( KNearestNeighborKernelV0<scalar_t>), dim3(blocks), dim3(threads), 0, stream, p1.contiguous().data_ptr<scalar_t>(), p2.contiguous().data_ptr<scalar_t>(), lengths1.contiguous().data_ptr<int64_t>(), lengths2.contiguous().data_ptr<int64_t>(), dists.data_ptr<scalar_t>(), idxs.data_ptr<int64_t>(), N, P1, P2, D, K); })); } else if (version == 1) { AT_DISPATCH_FLOATING_TYPES(p1.scalar_type(), "knn_kernel_cuda", ([&] { DispatchKernel1D< KNearestNeighborV1Functor, scalar_t, V1_MIN_D, V1_MAX_D>( D, blocks, threads, p1.contiguous().data_ptr<scalar_t>(), p2.contiguous().data_ptr<scalar_t>(), lengths1.contiguous().data_ptr<int64_t>(), lengths2.contiguous().data_ptr<int64_t>(), dists.data_ptr<scalar_t>(), idxs.data_ptr<int64_t>(), N, P1, P2, K); })); } else if (version == 2) { AT_DISPATCH_FLOATING_TYPES(p1.scalar_type(), "knn_kernel_cuda", ([&] { DispatchKernel2D< KNearestNeighborKernelV2Functor, scalar_t, V2_MIN_D, V2_MAX_D, V2_MIN_K, V2_MAX_K>( D, K_64, blocks, threads, p1.contiguous().data_ptr<scalar_t>(), p2.contiguous().data_ptr<scalar_t>(), lengths1.contiguous().data_ptr<int64_t>(), lengths2.contiguous().data_ptr<int64_t>(), dists.data_ptr<scalar_t>(), idxs.data_ptr<int64_t>(), N, P1, P2); })); } else if (version == 3) { AT_DISPATCH_FLOATING_TYPES(p1.scalar_type(), "knn_kernel_cuda", ([&] { DispatchKernel2D< KNearestNeighborKernelV3Functor, scalar_t, V3_MIN_D, V3_MAX_D, V3_MIN_K, V3_MAX_K>( D, K_64, blocks, threads, p1.contiguous().data_ptr<scalar_t>(), p2.contiguous().data_ptr<scalar_t>(), lengths1.contiguous().data_ptr<int64_t>(), lengths2.contiguous().data_ptr<int64_t>(), dists.data_ptr<scalar_t>(), idxs.data_ptr<int64_t>(), N, P1, P2); })); } AT_CUDA_CHECK(hipGetLastError()); return std::make_tuple(idxs, dists); } // ------------------------------------------------------------- // // Backward Operators // // ------------------------------------------------------------- // // TODO(gkioxari) support all data types once AtomicAdd supports doubles. // Currently, support is for floats only. __global__ void KNearestNeighborBackwardKernel( const float* __restrict__ p1, // (N, P1, D) const float* __restrict__ p2, // (N, P2, D) const int64_t* __restrict__ lengths1, // (N,) const int64_t* __restrict__ lengths2, // (N,) const int64_t* __restrict__ idxs, // (N, P1, K) const float* __restrict__ grad_dists, // (N, P1, K) float* __restrict__ grad_p1, // (N, P1, D) float* __restrict__ grad_p2, // (N, P2, D) const size_t N, const size_t P1, const size_t P2, const size_t K, const size_t D) { const size_t tid = blockIdx.x * blockDim.x + threadIdx.x; const size_t stride = gridDim.x * blockDim.x; for (size_t i = tid; i < N * P1 * K * D; i += stride) { const size_t n = i / (P1 * K * D); // batch index size_t rem = i % (P1 * K * D); const size_t p1_idx = rem / (K * D); // index of point in p1 rem = rem % (K * D); const size_t k = rem / D; // k-th nearest neighbor const size_t d = rem % D; // d-th dimension in the feature vector const size_t num1 = lengths1[n]; // number of valid points in p1 in batch const size_t num2 = lengths2[n]; // number of valid points in p2 in batch if ((p1_idx < num1) && (k < num2)) { const float grad_dist = grad_dists[n * P1 * K + p1_idx * K + k]; // index of point in p2 corresponding to the k-th nearest neighbor const size_t p2_idx = idxs[n * P1 * K + p1_idx * K + k]; // If the index is the pad value of -1 then ignore it if (p2_idx == -1) { continue; } const float diff = 2.0 * grad_dist * (p1[n * P1 * D + p1_idx * D + d] - p2[n * P2 * D + p2_idx * D + d]); atomicAdd(grad_p1 + n * P1 * D + p1_idx * D + d, diff); atomicAdd(grad_p2 + n * P2 * D + p2_idx * D + d, -1.0f * diff); } } } std::tuple<at::Tensor, at::Tensor> KNearestNeighborBackwardCuda( const at::Tensor& p1, const at::Tensor& p2, const at::Tensor& lengths1, const at::Tensor& lengths2, const at::Tensor& idxs, const at::Tensor& grad_dists) { // Check inputs are on the same device at::TensorArg p1_t{p1, "p1", 1}, p2_t{p2, "p2", 2}, lengths1_t{lengths1, "lengths1", 3}, lengths2_t{lengths2, "lengths2", 4}, idxs_t{idxs, "idxs", 5}, grad_dists_t{grad_dists, "grad_dists", 6}; at::CheckedFrom c = "KNearestNeighborBackwardCuda"; at::checkAllSameGPU( c, {p1_t, p2_t, lengths1_t, lengths2_t, idxs_t, grad_dists_t}); at::checkAllSameType(c, {p1_t, p2_t, grad_dists_t}); // Set the device for the kernel launch based on the device of the input at::hip::HIPGuardMasqueradingAsCUDA device_guard(p1.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); const auto N = p1.size(0); const auto P1 = p1.size(1); const auto P2 = p2.size(1); const auto D = p2.size(2); const auto K = idxs.size(2); TORCH_CHECK(p1.size(2) == D, "Point sets must have the same last dimension"); TORCH_CHECK(idxs.size(0) == N, "KNN idxs must have the same batch dimension"); TORCH_CHECK( idxs.size(1) == P1, "KNN idxs must have the same point dimension as p1"); TORCH_CHECK(grad_dists.size(0) == N); TORCH_CHECK(grad_dists.size(1) == P1); TORCH_CHECK(grad_dists.size(2) == K); auto grad_p1 = at::zeros({N, P1, D}, p1.options()); auto grad_p2 = at::zeros({N, P2, D}, p2.options()); if (grad_p1.numel() == 0 || grad_p2.numel() == 0) { AT_CUDA_CHECK(hipGetLastError()); return std::make_tuple(grad_p1, grad_p2); } const int blocks = 64; const int threads = 512; hipLaunchKernelGGL(( KNearestNeighborBackwardKernel), dim3(blocks), dim3(threads), 0, stream, p1.contiguous().data_ptr<float>(), p2.contiguous().data_ptr<float>(), lengths1.contiguous().data_ptr<int64_t>(), lengths2.contiguous().data_ptr<int64_t>(), idxs.contiguous().data_ptr<int64_t>(), grad_dists.contiguous().data_ptr<float>(), grad_p1.data_ptr<float>(), grad_p2.data_ptr<float>(), N, P1, P2, K, D); AT_CUDA_CHECK(hipGetLastError()); return std::make_tuple(grad_p1, grad_p2); }
3ab29cc699cc784c72de3004f866784cd34911e6.cu
/* * Copyright (c) Facebook, Inc. and its affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <c10/cuda/CUDAGuard.h> #include <float.h> #include <iostream> #include <tuple> #include "utils/dispatch.cuh" #include "utils/mink.cuh" // A chunk of work is blocksize-many points of P1. // The number of potential chunks to do is N*(1+(P1-1)/blocksize) // call (1+(P1-1)/blocksize) chunks_per_cloud // These chunks are divided among the gridSize-many blocks. // In block b, we work on chunks b, b+gridSize, b+2*gridSize etc . // In chunk i, we work on cloud i/chunks_per_cloud on points starting from // blocksize*(i%chunks_per_cloud). template <typename scalar_t> __global__ void KNearestNeighborKernelV0( const scalar_t* __restrict__ points1, const scalar_t* __restrict__ points2, const int64_t* __restrict__ lengths1, const int64_t* __restrict__ lengths2, scalar_t* __restrict__ dists, int64_t* __restrict__ idxs, const size_t N, const size_t P1, const size_t P2, const size_t D, const size_t K) { // Store both dists and indices for knn in global memory. const int64_t chunks_per_cloud = (1 + (P1 - 1) / blockDim.x); const int64_t chunks_to_do = N * chunks_per_cloud; for (int64_t chunk = blockIdx.x; chunk < chunks_to_do; chunk += gridDim.x) { const int64_t n = chunk / chunks_per_cloud; const int64_t start_point = blockDim.x * (chunk % chunks_per_cloud); int64_t p1 = start_point + threadIdx.x; if (p1 >= lengths1[n]) continue; int offset = n * P1 * K + p1 * K; int64_t length2 = lengths2[n]; MinK<scalar_t, int64_t> mink(dists + offset, idxs + offset, K); for (int p2 = 0; p2 < length2; ++p2) { // Find the distance between points1[n, p1] and points[n, p2] scalar_t dist = 0; for (int d = 0; d < D; ++d) { scalar_t coord1 = points1[n * P1 * D + p1 * D + d]; scalar_t coord2 = points2[n * P2 * D + p2 * D + d]; scalar_t diff = coord1 - coord2; dist += diff * diff; } mink.add(dist, p2); } } } template <typename scalar_t, int64_t D> __global__ void KNearestNeighborKernelV1( const scalar_t* __restrict__ points1, const scalar_t* __restrict__ points2, const int64_t* __restrict__ lengths1, const int64_t* __restrict__ lengths2, scalar_t* __restrict__ dists, int64_t* __restrict__ idxs, const size_t N, const size_t P1, const size_t P2, const size_t K) { // Same idea as the previous version, but hoist D into a template argument // so we can cache the current point in a thread-local array. We still store // the current best K dists and indices in global memory, so this should work // for very large K and fairly large D. scalar_t cur_point[D]; const int64_t chunks_per_cloud = (1 + (P1 - 1) / blockDim.x); const int64_t chunks_to_do = N * chunks_per_cloud; for (int64_t chunk = blockIdx.x; chunk < chunks_to_do; chunk += gridDim.x) { const int64_t n = chunk / chunks_per_cloud; const int64_t start_point = blockDim.x * (chunk % chunks_per_cloud); int64_t p1 = start_point + threadIdx.x; if (p1 >= lengths1[n]) continue; for (int d = 0; d < D; ++d) { cur_point[d] = points1[n * P1 * D + p1 * D + d]; } int offset = n * P1 * K + p1 * K; int64_t length2 = lengths2[n]; MinK<scalar_t, int64_t> mink(dists + offset, idxs + offset, K); for (int p2 = 0; p2 < length2; ++p2) { // Find the distance between cur_point and points[n, p2] scalar_t dist = 0; for (int d = 0; d < D; ++d) { scalar_t diff = cur_point[d] - points2[n * P2 * D + p2 * D + d]; dist += diff * diff; } mink.add(dist, p2); } } } // This is a shim functor to allow us to dispatch using DispatchKernel1D template <typename scalar_t, int64_t D> struct KNearestNeighborV1Functor { static void run( size_t blocks, size_t threads, const scalar_t* __restrict__ points1, const scalar_t* __restrict__ points2, const int64_t* __restrict__ lengths1, const int64_t* __restrict__ lengths2, scalar_t* __restrict__ dists, int64_t* __restrict__ idxs, const size_t N, const size_t P1, const size_t P2, const size_t K) { cudaStream_t stream = at::cuda::getCurrentCUDAStream(); KNearestNeighborKernelV1<scalar_t, D><<<blocks, threads, 0, stream>>>( points1, points2, lengths1, lengths2, dists, idxs, N, P1, P2, K); } }; template <typename scalar_t, int64_t D, int64_t K> __global__ void KNearestNeighborKernelV2( const scalar_t* __restrict__ points1, const scalar_t* __restrict__ points2, const int64_t* __restrict__ lengths1, const int64_t* __restrict__ lengths2, scalar_t* __restrict__ dists, int64_t* __restrict__ idxs, const int64_t N, const int64_t P1, const int64_t P2) { // Same general implementation as V2, but also hoist K into a template arg. scalar_t cur_point[D]; scalar_t min_dists[K]; int min_idxs[K]; const int64_t chunks_per_cloud = (1 + (P1 - 1) / blockDim.x); const int64_t chunks_to_do = N * chunks_per_cloud; for (int64_t chunk = blockIdx.x; chunk < chunks_to_do; chunk += gridDim.x) { const int64_t n = chunk / chunks_per_cloud; const int64_t start_point = blockDim.x * (chunk % chunks_per_cloud); int64_t p1 = start_point + threadIdx.x; if (p1 >= lengths1[n]) continue; for (int d = 0; d < D; ++d) { cur_point[d] = points1[n * P1 * D + p1 * D + d]; } int64_t length2 = lengths2[n]; MinK<scalar_t, int> mink(min_dists, min_idxs, K); for (int p2 = 0; p2 < length2; ++p2) { scalar_t dist = 0; for (int d = 0; d < D; ++d) { int offset = n * P2 * D + p2 * D + d; scalar_t diff = cur_point[d] - points2[offset]; dist += diff * diff; } mink.add(dist, p2); } for (int k = 0; k < mink.size(); ++k) { idxs[n * P1 * K + p1 * K + k] = min_idxs[k]; dists[n * P1 * K + p1 * K + k] = min_dists[k]; } } } // This is a shim so we can dispatch using DispatchKernel2D template <typename scalar_t, int64_t D, int64_t K> struct KNearestNeighborKernelV2Functor { static void run( size_t blocks, size_t threads, const scalar_t* __restrict__ points1, const scalar_t* __restrict__ points2, const int64_t* __restrict__ lengths1, const int64_t* __restrict__ lengths2, scalar_t* __restrict__ dists, int64_t* __restrict__ idxs, const int64_t N, const int64_t P1, const int64_t P2) { cudaStream_t stream = at::cuda::getCurrentCUDAStream(); KNearestNeighborKernelV2<scalar_t, D, K><<<blocks, threads, 0, stream>>>( points1, points2, lengths1, lengths2, dists, idxs, N, P1, P2); } }; template <typename scalar_t, int D, int K> __global__ void KNearestNeighborKernelV3( const scalar_t* __restrict__ points1, const scalar_t* __restrict__ points2, const int64_t* __restrict__ lengths1, const int64_t* __restrict__ lengths2, scalar_t* __restrict__ dists, int64_t* __restrict__ idxs, const size_t N, const size_t P1, const size_t P2) { // Same idea as V2, but use register indexing for thread-local arrays. // Enabling sorting for this version leads to huge slowdowns; I suspect // that it forces min_dists into local memory rather than registers. // As a result this version is always unsorted. scalar_t cur_point[D]; scalar_t min_dists[K]; int min_idxs[K]; const int64_t chunks_per_cloud = (1 + (P1 - 1) / blockDim.x); const int64_t chunks_to_do = N * chunks_per_cloud; for (int64_t chunk = blockIdx.x; chunk < chunks_to_do; chunk += gridDim.x) { const int64_t n = chunk / chunks_per_cloud; const int64_t start_point = blockDim.x * (chunk % chunks_per_cloud); int64_t p1 = start_point + threadIdx.x; if (p1 >= lengths1[n]) continue; for (int d = 0; d < D; ++d) { cur_point[d] = points1[n * P1 * D + p1 * D + d]; } int64_t length2 = lengths2[n]; RegisterMinK<scalar_t, int, K> mink(min_dists, min_idxs); for (int p2 = 0; p2 < length2; ++p2) { scalar_t dist = 0; for (int d = 0; d < D; ++d) { int offset = n * P2 * D + p2 * D + d; scalar_t diff = cur_point[d] - points2[offset]; dist += diff * diff; } mink.add(dist, p2); } for (int k = 0; k < mink.size(); ++k) { idxs[n * P1 * K + p1 * K + k] = min_idxs[k]; dists[n * P1 * K + p1 * K + k] = min_dists[k]; } } } // This is a shim so we can dispatch using DispatchKernel2D template <typename scalar_t, int64_t D, int64_t K> struct KNearestNeighborKernelV3Functor { static void run( size_t blocks, size_t threads, const scalar_t* __restrict__ points1, const scalar_t* __restrict__ points2, const int64_t* __restrict__ lengths1, const int64_t* __restrict__ lengths2, scalar_t* __restrict__ dists, int64_t* __restrict__ idxs, const size_t N, const size_t P1, const size_t P2) { cudaStream_t stream = at::cuda::getCurrentCUDAStream(); KNearestNeighborKernelV3<scalar_t, D, K><<<blocks, threads, 0, stream>>>( points1, points2, lengths1, lengths2, dists, idxs, N, P1, P2); } }; constexpr int V1_MIN_D = 1; constexpr int V1_MAX_D = 32; constexpr int V2_MIN_D = 1; constexpr int V2_MAX_D = 8; constexpr int V2_MIN_K = 1; constexpr int V2_MAX_K = 32; constexpr int V3_MIN_D = 1; constexpr int V3_MAX_D = 8; constexpr int V3_MIN_K = 1; constexpr int V3_MAX_K = 4; bool InBounds(const int64_t min, const int64_t x, const int64_t max) { return min <= x && x <= max; } bool KnnCheckVersion(int version, const int64_t D, const int64_t K) { if (version == 0) { return true; } else if (version == 1) { return InBounds(V1_MIN_D, D, V1_MAX_D); } else if (version == 2) { return InBounds(V2_MIN_D, D, V2_MAX_D) && InBounds(V2_MIN_K, K, V2_MAX_K); } else if (version == 3) { return InBounds(V3_MIN_D, D, V3_MAX_D) && InBounds(V3_MIN_K, K, V3_MAX_K); } return false; } int ChooseVersion(const int64_t D, const int64_t K) { for (int version = 3; version >= 1; version--) { if (KnnCheckVersion(version, D, K)) { return version; } } return 0; } std::tuple<at::Tensor, at::Tensor> KNearestNeighborIdxCuda( const at::Tensor& p1, const at::Tensor& p2, const at::Tensor& lengths1, const at::Tensor& lengths2, int K, int version) { // Check inputs are on the same device at::TensorArg p1_t{p1, "p1", 1}, p2_t{p2, "p2", 2}, lengths1_t{lengths1, "lengths1", 3}, lengths2_t{lengths2, "lengths2", 4}; at::CheckedFrom c = "KNearestNeighborIdxCuda"; at::checkAllSameGPU(c, {p1_t, p2_t, lengths1_t, lengths2_t}); at::checkAllSameType(c, {p1_t, p2_t}); // Set the device for the kernel launch based on the device of the input at::cuda::CUDAGuard device_guard(p1.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); const auto N = p1.size(0); const auto P1 = p1.size(1); const auto P2 = p2.size(1); const auto D = p2.size(2); const int64_t K_64 = K; TORCH_CHECK(p2.size(2) == D, "Point sets must have the same last dimension"); auto long_dtype = lengths1.options().dtype(at::kLong); auto idxs = at::zeros({N, P1, K}, long_dtype); auto dists = at::zeros({N, P1, K}, p1.options()); if (idxs.numel() == 0) { AT_CUDA_CHECK(cudaGetLastError()); return std::make_tuple(idxs, dists); } if (version < 0) { version = ChooseVersion(D, K); } else if (!KnnCheckVersion(version, D, K)) { int new_version = ChooseVersion(D, K); std::cout << "WARNING: Requested KNN version " << version << " is not compatible with D = " << D << "; K = " << K << ". Falling back to version = " << new_version << std::endl; version = new_version; } // At this point we should have a valid version no matter what data the user // gave us. But we can check once more to be sure; however this time // assert fail since failing at this point means we have a bug in our version // selection or checking code. AT_ASSERTM(KnnCheckVersion(version, D, K), "Invalid version"); const size_t threads = 256; const size_t blocks = 256; if (version == 0) { AT_DISPATCH_FLOATING_TYPES( p1.scalar_type(), "knn_kernel_cuda", ([&] { KNearestNeighborKernelV0<scalar_t><<<blocks, threads, 0, stream>>>( p1.contiguous().data_ptr<scalar_t>(), p2.contiguous().data_ptr<scalar_t>(), lengths1.contiguous().data_ptr<int64_t>(), lengths2.contiguous().data_ptr<int64_t>(), dists.data_ptr<scalar_t>(), idxs.data_ptr<int64_t>(), N, P1, P2, D, K); })); } else if (version == 1) { AT_DISPATCH_FLOATING_TYPES(p1.scalar_type(), "knn_kernel_cuda", ([&] { DispatchKernel1D< KNearestNeighborV1Functor, scalar_t, V1_MIN_D, V1_MAX_D>( D, blocks, threads, p1.contiguous().data_ptr<scalar_t>(), p2.contiguous().data_ptr<scalar_t>(), lengths1.contiguous().data_ptr<int64_t>(), lengths2.contiguous().data_ptr<int64_t>(), dists.data_ptr<scalar_t>(), idxs.data_ptr<int64_t>(), N, P1, P2, K); })); } else if (version == 2) { AT_DISPATCH_FLOATING_TYPES(p1.scalar_type(), "knn_kernel_cuda", ([&] { DispatchKernel2D< KNearestNeighborKernelV2Functor, scalar_t, V2_MIN_D, V2_MAX_D, V2_MIN_K, V2_MAX_K>( D, K_64, blocks, threads, p1.contiguous().data_ptr<scalar_t>(), p2.contiguous().data_ptr<scalar_t>(), lengths1.contiguous().data_ptr<int64_t>(), lengths2.contiguous().data_ptr<int64_t>(), dists.data_ptr<scalar_t>(), idxs.data_ptr<int64_t>(), N, P1, P2); })); } else if (version == 3) { AT_DISPATCH_FLOATING_TYPES(p1.scalar_type(), "knn_kernel_cuda", ([&] { DispatchKernel2D< KNearestNeighborKernelV3Functor, scalar_t, V3_MIN_D, V3_MAX_D, V3_MIN_K, V3_MAX_K>( D, K_64, blocks, threads, p1.contiguous().data_ptr<scalar_t>(), p2.contiguous().data_ptr<scalar_t>(), lengths1.contiguous().data_ptr<int64_t>(), lengths2.contiguous().data_ptr<int64_t>(), dists.data_ptr<scalar_t>(), idxs.data_ptr<int64_t>(), N, P1, P2); })); } AT_CUDA_CHECK(cudaGetLastError()); return std::make_tuple(idxs, dists); } // ------------------------------------------------------------- // // Backward Operators // // ------------------------------------------------------------- // // TODO(gkioxari) support all data types once AtomicAdd supports doubles. // Currently, support is for floats only. __global__ void KNearestNeighborBackwardKernel( const float* __restrict__ p1, // (N, P1, D) const float* __restrict__ p2, // (N, P2, D) const int64_t* __restrict__ lengths1, // (N,) const int64_t* __restrict__ lengths2, // (N,) const int64_t* __restrict__ idxs, // (N, P1, K) const float* __restrict__ grad_dists, // (N, P1, K) float* __restrict__ grad_p1, // (N, P1, D) float* __restrict__ grad_p2, // (N, P2, D) const size_t N, const size_t P1, const size_t P2, const size_t K, const size_t D) { const size_t tid = blockIdx.x * blockDim.x + threadIdx.x; const size_t stride = gridDim.x * blockDim.x; for (size_t i = tid; i < N * P1 * K * D; i += stride) { const size_t n = i / (P1 * K * D); // batch index size_t rem = i % (P1 * K * D); const size_t p1_idx = rem / (K * D); // index of point in p1 rem = rem % (K * D); const size_t k = rem / D; // k-th nearest neighbor const size_t d = rem % D; // d-th dimension in the feature vector const size_t num1 = lengths1[n]; // number of valid points in p1 in batch const size_t num2 = lengths2[n]; // number of valid points in p2 in batch if ((p1_idx < num1) && (k < num2)) { const float grad_dist = grad_dists[n * P1 * K + p1_idx * K + k]; // index of point in p2 corresponding to the k-th nearest neighbor const size_t p2_idx = idxs[n * P1 * K + p1_idx * K + k]; // If the index is the pad value of -1 then ignore it if (p2_idx == -1) { continue; } const float diff = 2.0 * grad_dist * (p1[n * P1 * D + p1_idx * D + d] - p2[n * P2 * D + p2_idx * D + d]); atomicAdd(grad_p1 + n * P1 * D + p1_idx * D + d, diff); atomicAdd(grad_p2 + n * P2 * D + p2_idx * D + d, -1.0f * diff); } } } std::tuple<at::Tensor, at::Tensor> KNearestNeighborBackwardCuda( const at::Tensor& p1, const at::Tensor& p2, const at::Tensor& lengths1, const at::Tensor& lengths2, const at::Tensor& idxs, const at::Tensor& grad_dists) { // Check inputs are on the same device at::TensorArg p1_t{p1, "p1", 1}, p2_t{p2, "p2", 2}, lengths1_t{lengths1, "lengths1", 3}, lengths2_t{lengths2, "lengths2", 4}, idxs_t{idxs, "idxs", 5}, grad_dists_t{grad_dists, "grad_dists", 6}; at::CheckedFrom c = "KNearestNeighborBackwardCuda"; at::checkAllSameGPU( c, {p1_t, p2_t, lengths1_t, lengths2_t, idxs_t, grad_dists_t}); at::checkAllSameType(c, {p1_t, p2_t, grad_dists_t}); // Set the device for the kernel launch based on the device of the input at::cuda::CUDAGuard device_guard(p1.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); const auto N = p1.size(0); const auto P1 = p1.size(1); const auto P2 = p2.size(1); const auto D = p2.size(2); const auto K = idxs.size(2); TORCH_CHECK(p1.size(2) == D, "Point sets must have the same last dimension"); TORCH_CHECK(idxs.size(0) == N, "KNN idxs must have the same batch dimension"); TORCH_CHECK( idxs.size(1) == P1, "KNN idxs must have the same point dimension as p1"); TORCH_CHECK(grad_dists.size(0) == N); TORCH_CHECK(grad_dists.size(1) == P1); TORCH_CHECK(grad_dists.size(2) == K); auto grad_p1 = at::zeros({N, P1, D}, p1.options()); auto grad_p2 = at::zeros({N, P2, D}, p2.options()); if (grad_p1.numel() == 0 || grad_p2.numel() == 0) { AT_CUDA_CHECK(cudaGetLastError()); return std::make_tuple(grad_p1, grad_p2); } const int blocks = 64; const int threads = 512; KNearestNeighborBackwardKernel<<<blocks, threads, 0, stream>>>( p1.contiguous().data_ptr<float>(), p2.contiguous().data_ptr<float>(), lengths1.contiguous().data_ptr<int64_t>(), lengths2.contiguous().data_ptr<int64_t>(), idxs.contiguous().data_ptr<int64_t>(), grad_dists.contiguous().data_ptr<float>(), grad_p1.data_ptr<float>(), grad_p2.data_ptr<float>(), N, P1, P2, K, D); AT_CUDA_CHECK(cudaGetLastError()); return std::make_tuple(grad_p1, grad_p2); }
4128112d0b783bd23b5bcfd908ddce7a412095f8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernel.h" #define TX 32 #define TY 32 #define DIM 2100 struct hipComplex { float r; float i; __device__ hipComplex( float a, float b ) : r(a), i(b) {} __device__ float magnitude2( void ) { return r * r + i * i; } __device__ hipComplex operator*(const hipComplex& a) { return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i); } __device__ hipComplex operator-(const hipComplex& a) { return hipComplex(r-a.r, i-a.i); } __device__ hipComplex operator+(const hipComplex& a) { return hipComplex(r+a.r, i+a.i); } __device__ hipComplex operator/(const hipComplex& a) { return hipComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i)); } }; __device__ hipComplex conj(hipComplex m) { hipComplex out(m.r,-m.i); return out; } __device__ hipComplex nor(hipComplex m) { hipComplex out(m.r*m.r+m.i*m.i,0.0); return out; } __device__ float norg(hipComplex m) { return sqrtf(m.r*m.r+m.i*m.i); } __device__ hipComplex qpoch(hipComplex a, hipComplex q) { hipComplex out(1.0,0.0); hipComplex unity(1.0,0.0); int i = 0; hipComplex Q = q; if(q.magnitude2()>1.0) { return hipComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<80;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ hipComplex qp(hipComplex a, hipComplex q, int n) { hipComplex out(1.0,0.0); hipComplex unity(1.0,0.0); int i = 0; hipComplex Q = q; if(q.magnitude2()>1.0) { return hipComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<n;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ hipComplex ramphi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,mq)/qpoch(q,mq); } __device__ hipComplex rampsi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,q)*qpoch(q*q,q*q); } __device__ hipComplex ramchi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,q*q); } __device__ hipComplex ramf(hipComplex a, hipComplex b) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex ma = mone*a; hipComplex mb = mone*b; return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b); } // complex exponential __device__ hipComplex expc(hipComplex m) { hipComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i)); return out; } __device__ hipComplex powc(hipComplex ag, hipComplex bg) { hipComplex out(0.0,0.0); hipComplex mesp(0.0,0.0); hipComplex frim(0.0,0.0); double radiu, thet; /* get the proper polar form of the complex number */ radiu = sqrtf(ag.r*ag.r + ag.i*ag.i); thet = atan2f(ag.i,ag.r); /* mesp gives R^(c+di) */ mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu)); mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu)); /* frim gives e^(i theta (c+di)) */ /* now since we already have the machinery for performing complex exponentiation (just exp), we can just call that here */ frim.r = -1.0 * bg.i * thet; frim.i = bg.r * thet; frim = expc(frim); out = mesp*frim; return out; } // cosine (nothing algorithmically clean) __device__ hipComplex cosc(hipComplex m) { hipComplex ai(0.0,1.0); hipComplex ot(0.5,0.0); hipComplex mone(-1.0,0.0); hipComplex out = ot*(expc(m*ai) + expc(mone*m*ai)); return out; } __device__ hipComplex sins(hipComplex m) { hipComplex ai(0.0,1.0); hipComplex ot(0.0,0.5); hipComplex mone(-1.0,0.0); hipComplex out = ot*(expc(m*ai) - expc(mone*m*ai)); return out; } __device__ hipComplex tans(hipComplex m) { return sins(m)/cosc(m); } __device__ hipComplex moeb(hipComplex t, hipComplex a, hipComplex z) { hipComplex out(0.0,0.0); hipComplex ai(0.0,1.0); hipComplex unity(1.0,0.0); out = expc(ai*t) * (z-a)/(unity-conj(a)*z); return out; } __device__ hipComplex bnewt(hipComplex z) { hipComplex three(3.0,0.0); hipComplex unity(1.0,0.0); hipComplex out(0.0,0.0); hipComplex Z =z; hipComplex L(0.0,0.0); hipComplex R(0.62348980185873359,0.7818314824680298); hipComplex v(0.62348980185873359,0.7818314824680298); int i; for(i=0;i<100;i++) { L = sins(expc(Z)-cosc(Z))-Z; out = out + v*L; v = R * v; Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity); } return out; } __device__ hipComplex they3(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + powc(q,enn*enn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ hipComplex wahi(hipComplex z) { int u; hipComplex un(1.0,0.0); hipComplex ne(1.0,0.0); hipComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne); ne = ne + un; } out = out + un; return out; } __device__ hipComplex dwahi(hipComplex z) { int u; hipComplex un(1.0,0.0); hipComplex ne(1.0,0.0); hipComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne-un); ne = ne + un; } return out; } __device__ hipComplex they3p(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ hipComplex h3ey3p(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex aut(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); hipComplex vel(0.0,0.0); hipComplex rav(0.0,0.0); for(u=-40;u<40;u++) { vel = expc(dui*enn*z); rav = powc(q,enn*enn); aut = aut + (enn*enn)*rav/q*vel; out = out + rav*vel; enn = enn + onn; } return out/aut; } __device__ hipComplex thess(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex the1(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*sins(z); } __device__ hipComplex the2(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*cosc(z); } __device__ hipComplex the3(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex the4(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } /* routine to generate q-integers */ __device__ hipComplex qin(hipComplex a, hipComplex q) { hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); out = (unity - powc(q, a))/(unity-q); return out; } /* generating function for n^2 */ __device__ hipComplex geffa(hipComplex z, hipComplex q) { hipComplex out(0.0,0.0); hipComplex unity(1.0,0.0); hipComplex wu(0.0,0.0); hipComplex Z=unity; int v; for(v=0;v<20;v++) { out = out + qin(wu*wu,q)* Z; wu = wu + unity; Z = z * Z; } return out; } __device__ hipComplex thratd(hipComplex z, hipComplex q) { int n; hipComplex fau(4.0,0.0); hipComplex too(2.0,0.0); hipComplex unity(1.0,0.0); hipComplex ennn(1.0,0.0); hipComplex ni(-1.0,0.0); hipComplex noo(-1.0,0.0); hipComplex out(0.0,0.0); hipComplex loo = q; hipComplex qoo =q*q; for(n=0;n<80;n++) { out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z); qoo = qoo * q*q; loo = loo * q; ennn = ennn +unity; noo = ni * noo; } return out*fau; } __device__ hipComplex thess4(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex thesk(hipComplex z, hipComplex q, hipComplex r) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex roo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; roo = roo * r * r ; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r)); } return out; } __device__ hipComplex thass(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex rogers( hipComplex q) { hipComplex onf(0.2,0.0); hipComplex Q5 = q*q*q*q*q; hipComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5)); return out; } __device__ hipComplex flat(hipComplex m) { float ua = sqrtf(m.r*m.r + m.i*m.i); hipComplex out(m.r/ua,m.i/ua); return out; } __device__ hipComplex eff(hipComplex z, hipComplex lambda) { return z*z*z*z+ lambda/(z*z*z*z); } __device__ hipComplex thete(float R, hipComplex tau, hipComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ hipComplex A(0.0,0.0); /* miscellaneous setup */ hipComplex pai(3.14159265353898,0.0); hipComplex ai(0.0,1.0); hipComplex oo(1.0,0.0); hipComplex oot(2.0,0.0); hipComplex nini(9.0,0.0); hipComplex eigh(-18.0,0.0); /* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ hipComplex frann(1.0,0.0); frann = pai * ai * tau ; hipComplex shenn(1.0,0.0); shenn = oot * ai * z; hipComplex plenn(1.0,0.0); hipComplex enn(1.0,0.0); hipComplex ann(1.0,0.0); hipComplex bnn(1.0,0.0); hipComplex scrunn(1.0,0.0); float ca, cb,cc; int a, b; for(a=-10;a<10;a++) { ann.r = a; for(b=-10;b<10;b++) { bnn.r = b; if(((a+b)%2)==0) { scrunn.r = a*a + b*b; A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn)); } else { ca = 5.0 + a*a + b*b; cb = 2*(a * cos(R)- b * sin(R)); cc = 4*(b * cos(R)+a*sin(R)); scrunn.r = ca + cb + cc; A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn)); } } } return A; } __device__ hipComplex thetta(hipComplex tau, hipComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ hipComplex A(0.0,0.0); /* miscellaneous setup */ hipComplex pai(3.14159265353898,0.0); hipComplex ai(0.0,1.0); hipComplex oo(1.0,0.0); hipComplex oot(2.0,0.0); hipComplex nini(9.0,0.0); hipComplex eigh(-18.0,0.0); /* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ hipComplex frann(1.0,0.0); frann = pai * ai * tau ; hipComplex shenn(1.0,0.0); shenn = oot * ai * z; hipComplex plenn(1.0,0.0); hipComplex enn(1.0,0.0); int n; for(n=-10;n<10;n++) { enn.r = n; plenn = enn * enn; /* this get the hipComplex out of the event loop */ A = A + expc(frann* plenn) * expc(shenn* enn); } return A; } __device__ hipComplex mitlef(hipComplex z,hipComplex c) { hipComplex out(0.0,0.0); hipComplex Z(1.0,0.0); hipComplex frove(0.0,0.0); int v; for(v=0;v<20;v++) { frove.r = tgammaf(c.r*v+c.i); out = out + Z/frove; Z = Z * z; } return out; } __device__ hipComplex helva(hipComplex z) { hipComplex out(j0f(z.r),j1f(z.i)); return out; } __device__ hipComplex alver(hipComplex z) { hipComplex out(1.0/j0f(z.r),1.0/j1f(z.i)); return out; } __device__ hipComplex alvir(hipComplex z) { hipComplex out(j0f(z.r),1.0/j1f(z.i)); return out; } __device__ hipComplex hexva(int m, hipComplex z) { hipComplex out(jnf(m,z.r),jnf(m,z.i)); return out; } __device__ hipComplex hilva(hipComplex z) { hipComplex out(j1f(z.r),j0f(z.i)); return out; } __device__ hipComplex ahilv(hipComplex z) { hipComplex out(1.0/j1f(z.r),1.0/j0f(z.i)); return out; } __device__ hipComplex halva(hipComplex z) { hipComplex out(j0f(z.r),j0f(z.i)); return out; } __device__ hipComplex aciwa(hipComplex z) { hipComplex out(j0f(j1f(z.r)),j1f(j0f(z.i))); return out; } __device__ hipComplex hinva(hipComplex z) { hipComplex out(j1f(z.r),j1f(z.i)); return out; } __device__ hipComplex henga(hipComplex z) { hipComplex out(acoshf(z.r),asinhf(z.i)); return out; } __device__ hipComplex holva(hipComplex z) { hipComplex out(y0f(z.r),y1f(z.i)); return out; } __device__ hipComplex aliva(hipComplex z) { hipComplex out(j1f(z.r),cyl_bessel_i1f(z.i)); return out; } __device__ hipComplex ariva(hipComplex z) { hipComplex out(sinf(z.i),cbrtf(z.r)); return out; } __device__ hipComplex arago(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * hinva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex irigo(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * holva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex thy(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * conj(qoo/q * tw*hinva(z)) +hilva( qoo*qoo/(q*q))); } return out; } __device__ hipComplex urigo(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * powc(hilva(q*z),helva(q*z)) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex origo(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * powc(ahilv(q*z),ahilv(q*z)) + qoo*qoo/(q*q)); } return out; } __device__ unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); } __global__ void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) { const int c = blockIdx.x*blockDim.x + threadIdx.x; const int r= blockIdx.y*blockDim.y + threadIdx.y; const int i = c + r*w; // 1D indexing float pi = 3.1415926535898; hipComplex ip(pi,0.0); const float scale =20; float fx = -scale * (float)(DIM/2 - c)/(DIM/2); float fy = scale * (float)(DIM/2 - r)/(DIM/2); hipComplex effx(fx,0.0); hipComplex effy(fy,0.0); float LA = -scale * (float)(DIM/2 - pos.x)/(DIM/2); float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2); hipComplex mouse(LA,LB); hipComplex moux(LA,0.0); hipComplex mouy(0.0,LB); hipComplex q(fx,fy); /* hipComplex tik(sin(ticks/40.0f),0.0);*/ /* hipComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0)); hipComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024)); hipComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/ hipComplex fixon(.029348,.828934); hipComplex faxon(.029348,-.828934); hipComplex unity(1.0,0.0); hipComplex ai(0.0,1.0); hipComplex aon = expc(ai*moux); hipComplex uon= expc(mouy); hipComplex flurn(0.0,0.0); hipComplex accume(0.0,0.0); hipComplex eccume(0.0,0.0); hipComplex rhun(1.02871376821872462237195122725097462534904479,0.0); hipComplex cue = q; hipComplex lam(0.73736887807831963, -0.67549029426152396); hipComplex due(3.0,0.0); hipComplex tir(2.0,0.0); hipComplex selga(3.5,0.0); hipComplex vro(-1.0,0.0); hipComplex tle(1.0,0.0); hipComplex sle(4.0,0.0); hipComplex cherra(0.62348980185873359, 0.7818314824680298); hipComplex lerra = cherra*cherra; hipComplex ferra = lerra * cherra; hipComplex terra = ferra * cherra; hipComplex zerra = terra * cherra; hipComplex nerra = zerra * cherra; hipComplex vlarv(1/3.0,0.0); hipComplex sugna(0.70710678118654757, 0.70710678118654746); hipComplex regna(0.99966573338968745, 0.025853848581176047); hipComplex spa(sqrtf(2.0),0.0); hipComplex spb(sqrtf(3.0),0.0); hipComplex spc(sqrtf(4.0),0.0); hipComplex spd(sqrtf(5.0),0.0); hipComplex mrun(1/2.0,0.0); hipComplex gloon (4.0,0.0); hipComplex plenod(-.01,0.0); hipComplex nue = cue; hipComplex rhuva(3.0,0.0); hipComplex rarva(8.0,0.0); hipComplex bor(-10.0,0.0); hipComplex nat(0.0,-10.0); hipComplex rhus(1.0,0.0); hipComplex D(0.739085133215160641655312087674,0.0); /* if ((c >= w) || (r >= h)) return; // Check if within image bounds const int i = c + r*w; // 1D indexing const int dist = sqrtf((c - pos.x)*(c - pos.x) + (r - pos.y)*(r - pos.y)); const unsigned char intensity = clip(255 - dist);*/ // theta function varying on constant // cue =thess(cue,fixon*mouse); int v=1; int axa=-10; /*while((v<100)&&norg(cue)<2.0) { cue = cue*(cue-mouy)*(cue-moux) -cue * q; v++; }*/ // almost Klein's j-invariant //cue = (powc(powc(arago(flurn,q*aon),rarva)+ powc(the2(flurn,q),rarva) + powc(the4(flurn,q),rarva),rhuva))/powc(the4(flurn,q)*the3(flurn,q)*the2(flurn,q),rarva); for(v=0;v<10;v++) { /* stripes all over the place */ /*cue =cue -aon*ahilv(hilva(cue))-uon*hilva(ahilv(cue))/(uon*ahilv(hilva(q))-aon*ai*hilva(ahilv(cue)));*/ /*cue = (aon*hilva(cue)-uon*helva(cue))/(aon*halva(cue)-uon*hilva(cue));*/ cue = nue; cue =cue - helva(cue)/(alver(cue+aon*helva(cue))/ahilv(cue)-uon); accume = accume + origo(hilva(cue*nue),uon*fixon); } cue = accume; double tha; tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi)); d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2)); d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2)); d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2)); d_out[i].w = 255; } void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) { const dim3 blockSize(TX, TY); const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY); hipLaunchKernelGGL(( distanceKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_out, w, h, pos); } /*for(v=1;v<5;v++) { cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); } cue = accume;*/ /*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q)); rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q)); cue = rhus+cue; cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/ /*for(v=0;v<60;v++){ cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon)); accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue)); } cue = accume;*/ /* One for (x+d)/cos(d) -cos(x)/d Tungilipa D = cos(D) cos(sqrt(x*D))/D -1 = 0.0 The other for cos(x)-x Eripgrunna */
4128112d0b783bd23b5bcfd908ddce7a412095f8.cu
#include "kernel.h" #define TX 32 #define TY 32 #define DIM 2100 struct cuComplex { float r; float i; __device__ cuComplex( float a, float b ) : r(a), i(b) {} __device__ float magnitude2( void ) { return r * r + i * i; } __device__ cuComplex operator*(const cuComplex& a) { return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i); } __device__ cuComplex operator-(const cuComplex& a) { return cuComplex(r-a.r, i-a.i); } __device__ cuComplex operator+(const cuComplex& a) { return cuComplex(r+a.r, i+a.i); } __device__ cuComplex operator/(const cuComplex& a) { return cuComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i)); } }; __device__ cuComplex conj(cuComplex m) { cuComplex out(m.r,-m.i); return out; } __device__ cuComplex nor(cuComplex m) { cuComplex out(m.r*m.r+m.i*m.i,0.0); return out; } __device__ float norg(cuComplex m) { return sqrtf(m.r*m.r+m.i*m.i); } __device__ cuComplex qpoch(cuComplex a, cuComplex q) { cuComplex out(1.0,0.0); cuComplex unity(1.0,0.0); int i = 0; cuComplex Q = q; if(q.magnitude2()>1.0) { return cuComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<80;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ cuComplex qp(cuComplex a, cuComplex q, int n) { cuComplex out(1.0,0.0); cuComplex unity(1.0,0.0); int i = 0; cuComplex Q = q; if(q.magnitude2()>1.0) { return cuComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<n;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ cuComplex ramphi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,mq)/qpoch(q,mq); } __device__ cuComplex rampsi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,q)*qpoch(q*q,q*q); } __device__ cuComplex ramchi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,q*q); } __device__ cuComplex ramf(cuComplex a, cuComplex b) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex ma = mone*a; cuComplex mb = mone*b; return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b); } // complex exponential __device__ cuComplex expc(cuComplex m) { cuComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i)); return out; } __device__ cuComplex powc(cuComplex ag, cuComplex bg) { cuComplex out(0.0,0.0); cuComplex mesp(0.0,0.0); cuComplex frim(0.0,0.0); double radiu, thet; /* get the proper polar form of the complex number */ radiu = sqrtf(ag.r*ag.r + ag.i*ag.i); thet = atan2f(ag.i,ag.r); /* mesp gives R^(c+di) */ mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu)); mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu)); /* frim gives e^(i theta (c+di)) */ /* now since we already have the machinery for performing complex exponentiation (just exp), we can just call that here */ frim.r = -1.0 * bg.i * thet; frim.i = bg.r * thet; frim = expc(frim); out = mesp*frim; return out; } // cosine (nothing algorithmically clean) __device__ cuComplex cosc(cuComplex m) { cuComplex ai(0.0,1.0); cuComplex ot(0.5,0.0); cuComplex mone(-1.0,0.0); cuComplex out = ot*(expc(m*ai) + expc(mone*m*ai)); return out; } __device__ cuComplex sins(cuComplex m) { cuComplex ai(0.0,1.0); cuComplex ot(0.0,0.5); cuComplex mone(-1.0,0.0); cuComplex out = ot*(expc(m*ai) - expc(mone*m*ai)); return out; } __device__ cuComplex tans(cuComplex m) { return sins(m)/cosc(m); } __device__ cuComplex moeb(cuComplex t, cuComplex a, cuComplex z) { cuComplex out(0.0,0.0); cuComplex ai(0.0,1.0); cuComplex unity(1.0,0.0); out = expc(ai*t) * (z-a)/(unity-conj(a)*z); return out; } __device__ cuComplex bnewt(cuComplex z) { cuComplex three(3.0,0.0); cuComplex unity(1.0,0.0); cuComplex out(0.0,0.0); cuComplex Z =z; cuComplex L(0.0,0.0); cuComplex R(0.62348980185873359,0.7818314824680298); cuComplex v(0.62348980185873359,0.7818314824680298); int i; for(i=0;i<100;i++) { L = sins(expc(Z)-cosc(Z))-Z; out = out + v*L; v = R * v; Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity); } return out; } __device__ cuComplex they3(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + powc(q,enn*enn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ cuComplex wahi(cuComplex z) { int u; cuComplex un(1.0,0.0); cuComplex ne(1.0,0.0); cuComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne); ne = ne + un; } out = out + un; return out; } __device__ cuComplex dwahi(cuComplex z) { int u; cuComplex un(1.0,0.0); cuComplex ne(1.0,0.0); cuComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne-un); ne = ne + un; } return out; } __device__ cuComplex they3p(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ cuComplex h3ey3p(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex aut(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); cuComplex vel(0.0,0.0); cuComplex rav(0.0,0.0); for(u=-40;u<40;u++) { vel = expc(dui*enn*z); rav = powc(q,enn*enn); aut = aut + (enn*enn)*rav/q*vel; out = out + rav*vel; enn = enn + onn; } return out/aut; } __device__ cuComplex thess(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex the1(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*sins(z); } __device__ cuComplex the2(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*cosc(z); } __device__ cuComplex the3(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex the4(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } /* routine to generate q-integers */ __device__ cuComplex qin(cuComplex a, cuComplex q) { cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); out = (unity - powc(q, a))/(unity-q); return out; } /* generating function for n^2 */ __device__ cuComplex geffa(cuComplex z, cuComplex q) { cuComplex out(0.0,0.0); cuComplex unity(1.0,0.0); cuComplex wu(0.0,0.0); cuComplex Z=unity; int v; for(v=0;v<20;v++) { out = out + qin(wu*wu,q)* Z; wu = wu + unity; Z = z * Z; } return out; } __device__ cuComplex thratd(cuComplex z, cuComplex q) { int n; cuComplex fau(4.0,0.0); cuComplex too(2.0,0.0); cuComplex unity(1.0,0.0); cuComplex ennn(1.0,0.0); cuComplex ni(-1.0,0.0); cuComplex noo(-1.0,0.0); cuComplex out(0.0,0.0); cuComplex loo = q; cuComplex qoo =q*q; for(n=0;n<80;n++) { out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z); qoo = qoo * q*q; loo = loo * q; ennn = ennn +unity; noo = ni * noo; } return out*fau; } __device__ cuComplex thess4(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex thesk(cuComplex z, cuComplex q, cuComplex r) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex roo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; roo = roo * r * r ; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r)); } return out; } __device__ cuComplex thass(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex rogers( cuComplex q) { cuComplex onf(0.2,0.0); cuComplex Q5 = q*q*q*q*q; cuComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5)); return out; } __device__ cuComplex flat(cuComplex m) { float ua = sqrtf(m.r*m.r + m.i*m.i); cuComplex out(m.r/ua,m.i/ua); return out; } __device__ cuComplex eff(cuComplex z, cuComplex lambda) { return z*z*z*z+ lambda/(z*z*z*z); } __device__ cuComplex thete(float R, cuComplex tau, cuComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ cuComplex A(0.0,0.0); /* miscellaneous setup */ cuComplex pai(3.14159265353898,0.0); cuComplex ai(0.0,1.0); cuComplex oo(1.0,0.0); cuComplex oot(2.0,0.0); cuComplex nini(9.0,0.0); cuComplex eigh(-18.0,0.0); /* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ cuComplex frann(1.0,0.0); frann = pai * ai * tau ; cuComplex shenn(1.0,0.0); shenn = oot * ai * z; cuComplex plenn(1.0,0.0); cuComplex enn(1.0,0.0); cuComplex ann(1.0,0.0); cuComplex bnn(1.0,0.0); cuComplex scrunn(1.0,0.0); float ca, cb,cc; int a, b; for(a=-10;a<10;a++) { ann.r = a; for(b=-10;b<10;b++) { bnn.r = b; if(((a+b)%2)==0) { scrunn.r = a*a + b*b; A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn)); } else { ca = 5.0 + a*a + b*b; cb = 2*(a * cos(R)- b * sin(R)); cc = 4*(b * cos(R)+a*sin(R)); scrunn.r = ca + cb + cc; A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn)); } } } return A; } __device__ cuComplex thetta(cuComplex tau, cuComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ cuComplex A(0.0,0.0); /* miscellaneous setup */ cuComplex pai(3.14159265353898,0.0); cuComplex ai(0.0,1.0); cuComplex oo(1.0,0.0); cuComplex oot(2.0,0.0); cuComplex nini(9.0,0.0); cuComplex eigh(-18.0,0.0); /* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ cuComplex frann(1.0,0.0); frann = pai * ai * tau ; cuComplex shenn(1.0,0.0); shenn = oot * ai * z; cuComplex plenn(1.0,0.0); cuComplex enn(1.0,0.0); int n; for(n=-10;n<10;n++) { enn.r = n; plenn = enn * enn; /* this get the cuComplex out of the event loop */ A = A + expc(frann* plenn) * expc(shenn* enn); } return A; } __device__ cuComplex mitlef(cuComplex z,cuComplex c) { cuComplex out(0.0,0.0); cuComplex Z(1.0,0.0); cuComplex frove(0.0,0.0); int v; for(v=0;v<20;v++) { frove.r = tgammaf(c.r*v+c.i); out = out + Z/frove; Z = Z * z; } return out; } __device__ cuComplex helva(cuComplex z) { cuComplex out(j0f(z.r),j1f(z.i)); return out; } __device__ cuComplex alver(cuComplex z) { cuComplex out(1.0/j0f(z.r),1.0/j1f(z.i)); return out; } __device__ cuComplex alvir(cuComplex z) { cuComplex out(j0f(z.r),1.0/j1f(z.i)); return out; } __device__ cuComplex hexva(int m, cuComplex z) { cuComplex out(jnf(m,z.r),jnf(m,z.i)); return out; } __device__ cuComplex hilva(cuComplex z) { cuComplex out(j1f(z.r),j0f(z.i)); return out; } __device__ cuComplex ahilv(cuComplex z) { cuComplex out(1.0/j1f(z.r),1.0/j0f(z.i)); return out; } __device__ cuComplex halva(cuComplex z) { cuComplex out(j0f(z.r),j0f(z.i)); return out; } __device__ cuComplex aciwa(cuComplex z) { cuComplex out(j0f(j1f(z.r)),j1f(j0f(z.i))); return out; } __device__ cuComplex hinva(cuComplex z) { cuComplex out(j1f(z.r),j1f(z.i)); return out; } __device__ cuComplex henga(cuComplex z) { cuComplex out(acoshf(z.r),asinhf(z.i)); return out; } __device__ cuComplex holva(cuComplex z) { cuComplex out(y0f(z.r),y1f(z.i)); return out; } __device__ cuComplex aliva(cuComplex z) { cuComplex out(j1f(z.r),cyl_bessel_i1f(z.i)); return out; } __device__ cuComplex ariva(cuComplex z) { cuComplex out(sinf(z.i),cbrtf(z.r)); return out; } __device__ cuComplex arago(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * hinva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex irigo(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * holva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex thy(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * conj(qoo/q * tw*hinva(z)) +hilva( qoo*qoo/(q*q))); } return out; } __device__ cuComplex urigo(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * powc(hilva(q*z),helva(q*z)) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex origo(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * powc(ahilv(q*z),ahilv(q*z)) + qoo*qoo/(q*q)); } return out; } __device__ unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); } __global__ void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) { const int c = blockIdx.x*blockDim.x + threadIdx.x; const int r= blockIdx.y*blockDim.y + threadIdx.y; const int i = c + r*w; // 1D indexing float pi = 3.1415926535898; cuComplex ip(pi,0.0); const float scale =20; float fx = -scale * (float)(DIM/2 - c)/(DIM/2); float fy = scale * (float)(DIM/2 - r)/(DIM/2); cuComplex effx(fx,0.0); cuComplex effy(fy,0.0); float LA = -scale * (float)(DIM/2 - pos.x)/(DIM/2); float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2); cuComplex mouse(LA,LB); cuComplex moux(LA,0.0); cuComplex mouy(0.0,LB); cuComplex q(fx,fy); /* cuComplex tik(sin(ticks/40.0f),0.0);*/ /* cuComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0)); cuComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024)); cuComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/ cuComplex fixon(.029348,.828934); cuComplex faxon(.029348,-.828934); cuComplex unity(1.0,0.0); cuComplex ai(0.0,1.0); cuComplex aon = expc(ai*moux); cuComplex uon= expc(mouy); cuComplex flurn(0.0,0.0); cuComplex accume(0.0,0.0); cuComplex eccume(0.0,0.0); cuComplex rhun(1.02871376821872462237195122725097462534904479,0.0); cuComplex cue = q; cuComplex lam(0.73736887807831963, -0.67549029426152396); cuComplex due(3.0,0.0); cuComplex tir(2.0,0.0); cuComplex selga(3.5,0.0); cuComplex vro(-1.0,0.0); cuComplex tle(1.0,0.0); cuComplex sle(4.0,0.0); cuComplex cherra(0.62348980185873359, 0.7818314824680298); cuComplex lerra = cherra*cherra; cuComplex ferra = lerra * cherra; cuComplex terra = ferra * cherra; cuComplex zerra = terra * cherra; cuComplex nerra = zerra * cherra; cuComplex vlarv(1/3.0,0.0); cuComplex sugna(0.70710678118654757, 0.70710678118654746); cuComplex regna(0.99966573338968745, 0.025853848581176047); cuComplex spa(sqrtf(2.0),0.0); cuComplex spb(sqrtf(3.0),0.0); cuComplex spc(sqrtf(4.0),0.0); cuComplex spd(sqrtf(5.0),0.0); cuComplex mrun(1/2.0,0.0); cuComplex gloon (4.0,0.0); cuComplex plenod(-.01,0.0); cuComplex nue = cue; cuComplex rhuva(3.0,0.0); cuComplex rarva(8.0,0.0); cuComplex bor(-10.0,0.0); cuComplex nat(0.0,-10.0); cuComplex rhus(1.0,0.0); cuComplex D(0.739085133215160641655312087674,0.0); /* if ((c >= w) || (r >= h)) return; // Check if within image bounds const int i = c + r*w; // 1D indexing const int dist = sqrtf((c - pos.x)*(c - pos.x) + (r - pos.y)*(r - pos.y)); const unsigned char intensity = clip(255 - dist);*/ // theta function varying on constant // cue =thess(cue,fixon*mouse); int v=1; int axa=-10; /*while((v<100)&&norg(cue)<2.0) { cue = cue*(cue-mouy)*(cue-moux) -cue * q; v++; }*/ // almost Klein's j-invariant //cue = (powc(powc(arago(flurn,q*aon),rarva)+ powc(the2(flurn,q),rarva) + powc(the4(flurn,q),rarva),rhuva))/powc(the4(flurn,q)*the3(flurn,q)*the2(flurn,q),rarva); for(v=0;v<10;v++) { /* stripes all over the place */ /*cue =cue -aon*ahilv(hilva(cue))-uon*hilva(ahilv(cue))/(uon*ahilv(hilva(q))-aon*ai*hilva(ahilv(cue)));*/ /*cue = (aon*hilva(cue)-uon*helva(cue))/(aon*halva(cue)-uon*hilva(cue));*/ cue = nue; cue =cue - helva(cue)/(alver(cue+aon*helva(cue))/ahilv(cue)-uon); accume = accume + origo(hilva(cue*nue),uon*fixon); } cue = accume; double tha; tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi)); d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2)); d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2)); d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2)); d_out[i].w = 255; } void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) { const dim3 blockSize(TX, TY); const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY); distanceKernel<<<gridSize, blockSize>>>(d_out, w, h, pos); } /*for(v=1;v<5;v++) { cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); } cue = accume;*/ /*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q)); rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q)); cue = rhus+cue; cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/ /*for(v=0;v<60;v++){ cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon)); accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue)); } cue = accume;*/ /* One for (x+d)/cos(d) -cos(x)/d Tungilipa D = cos(D) cos(sqrt(x*D))/D -1 = 0.0 The other for cos(x)-x Eripgrunna */
0e7d9ac7a276e1e3f4fa9c24aaaf2579ae011f5f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef _DEV_PREDICT_CU_ #define _DEV_PREDICT_CU_ #include "dev_textures.h" __global__ void dev_predictor(int dt, float4 *bodies_pos, float4 *bodies_vel, float4 *hydro_data, float4 *bodies_dots, float4 *grav_data, float4 *bodies_old_vel) { int index = blockIdx.x * blockDim.x + threadIdx.x; float4 body_pos = bodies_pos[index]; float4 body_vel = bodies_vel[index]; float4 body_dots = bodies_dots[index]; float4 hydro_data = bodies_hydro_data[index]; float4 old_vel = {b float dth = 0.5*dt; body_pos.x += bodies_vel.x * dt; body_pos.y += bodies_vel.y * dt; body_pos.z += bodies_vel.z * dt; float4 body_pos = tex1Dfetch(bodies_pos_tex, index); int n_ngb = tex1Dfetch(n_ngb_tex, index); int offset = tex1Dfetch(ngb_offset_tex, index); } #endif //
0e7d9ac7a276e1e3f4fa9c24aaaf2579ae011f5f.cu
#ifndef _DEV_PREDICT_CU_ #define _DEV_PREDICT_CU_ #include "dev_textures.h" __global__ void dev_predictor(int dt, float4 *bodies_pos, float4 *bodies_vel, float4 *hydro_data, float4 *bodies_dots, float4 *grav_data, float4 *bodies_old_vel) { int index = blockIdx.x * blockDim.x + threadIdx.x; float4 body_pos = bodies_pos[index]; float4 body_vel = bodies_vel[index]; float4 body_dots = bodies_dots[index]; float4 hydro_data = bodies_hydro_data[index]; float4 old_vel = {b float dth = 0.5*dt; body_pos.x += bodies_vel.x * dt; body_pos.y += bodies_vel.y * dt; body_pos.z += bodies_vel.z * dt; float4 body_pos = tex1Dfetch(bodies_pos_tex, index); int n_ngb = tex1Dfetch(n_ngb_tex, index); int offset = tex1Dfetch(ngb_offset_tex, index); } #endif //
58617ab79bb085fb6d96d973f4092b52f12a30bb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <math.h> #include <assert.h> inline hipError_t checkCuda(hipError_t result) { if (result != hipSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result)); assert(result == hipSuccess); } return result; } // Simple define to index into a 1D array from 2D space #define I2D(num, c, r) ((r)*(num)+(c)) /* * `step_kernel_mod` is currently a direct copy of the CPU reference solution * `step_kernel_ref` below. Accelerate it to run as a CUDA kernel. */ __global__ void step_kernel_mod(int ni, int nj, float fact, float* temp_in, float* temp_out) { int i00, im10, ip10, i0m1, i0p1; float d2tdx2, d2tdy2; int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; // loop over all points in domain (except boundary) if ( j < nj - 1 && i < ni - 1 && j > 0 && i > 0 ){ // find indices into linear memory // for central point and neighbours i00 = I2D(ni, i, j); im10 = I2D(ni, i-1, j); ip10 = I2D(ni, i+1, j); i0m1 = I2D(ni, i, j-1); i0p1 = I2D(ni, i, j+1); // evaluate derivatives d2tdx2 = temp_in[im10]-2*temp_in[i00]+temp_in[ip10]; d2tdy2 = temp_in[i0m1]-2*temp_in[i00]+temp_in[i0p1]; // update temperatures temp_out[i00] = temp_in[i00]+fact*(d2tdx2 + d2tdy2); } } void step_kernel_ref(int ni, int nj, float fact, float* temp_in, float* temp_out) { int i00, im10, ip10, i0m1, i0p1; float d2tdx2, d2tdy2; // loop over all points in domain (except boundary) for ( int j=1; j < nj-1; j++ ) { for ( int i=1; i < ni-1; i++ ) { // find indices into linear memory // for central point and neighbours i00 = I2D(ni, i, j); im10 = I2D(ni, i-1, j); ip10 = I2D(ni, i+1, j); i0m1 = I2D(ni, i, j-1); i0p1 = I2D(ni, i, j+1); // evaluate derivatives d2tdx2 = temp_in[im10]-2*temp_in[i00]+temp_in[ip10]; d2tdy2 = temp_in[i0m1]-2*temp_in[i00]+temp_in[i0p1]; // update temperatures temp_out[i00] = temp_in[i00]+fact*(d2tdx2 + d2tdy2); } } } int main() { int istep; int nstep = 200; // number of time steps // Specify our 2D dimensions const int ni = 200; const int nj = 100; float tfac = 8.418e-5; // thermal diffusivity of silver float *temp1_ref, *temp2_ref, *temp1, *temp2, *temp_tmp; const int size = ni * nj * sizeof(float); temp1_ref = (float*)malloc(size); temp2_ref = (float*)malloc(size); hipMallocManaged ( &temp1, size); hipMallocManaged ( &temp2, size); dim3 threads_per_block (8,8,1); dim3 number_of_blocks ((nj + threads_per_block.x - 1)/ threads_per_block.x, (ni + threads_per_block.y - 1) / threads_per_block.y, 1); // Initialize with random data for( int i = 0; i < ni*nj; ++i) { temp1_ref[i] = temp2_ref[i] = temp1[i] = temp2[i] = (float)rand()/(float)(RAND_MAX/100.0f); } // Execute the CPU-only reference version for (istep=0; istep < nstep; istep++) { step_kernel_ref(ni, nj, tfac, temp1_ref, temp2_ref); // swap the temperature pointers temp_tmp = temp1_ref; temp1_ref = temp2_ref; temp2_ref= temp_tmp; } // Execute the modified version using same data for (istep=0; istep < nstep; istep++) { hipLaunchKernelGGL(( step_kernel_mod), dim3(number_of_blocks), dim3(threads_per_block), 0, 0, ni, nj, tfac, temp1, temp2); checkCuda(hipGetLastError()); checkCuda(hipDeviceSynchronize()); // swap the temperature pointers temp_tmp = temp1; temp1 = temp2; temp2= temp_tmp; } float maxError = 0; // Output should always be stored in the temp1 and temp1_ref at this point for( int i = 0; i < ni*nj; ++i ) { if (abs(temp1[i]-temp1_ref[i]) > maxError) { maxError = abs(temp1[i]-temp1_ref[i]); } } // Check and see if our maxError is greater than an error bound if (maxError > 0.0005f) printf("Problem! The Max Error of %.5f is NOT within acceptable bounds.\n", maxError); else printf("The Max Error of %.5f is within acceptable bounds.\n", maxError); free( temp1_ref ); free( temp2_ref ); hipFree( temp1 ); hipFree( temp2 ); return 0; }
58617ab79bb085fb6d96d973f4092b52f12a30bb.cu
#include <stdio.h> #include <math.h> #include <assert.h> inline cudaError_t checkCuda(cudaError_t result) { if (result != cudaSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result)); assert(result == cudaSuccess); } return result; } // Simple define to index into a 1D array from 2D space #define I2D(num, c, r) ((r)*(num)+(c)) /* * `step_kernel_mod` is currently a direct copy of the CPU reference solution * `step_kernel_ref` below. Accelerate it to run as a CUDA kernel. */ __global__ void step_kernel_mod(int ni, int nj, float fact, float* temp_in, float* temp_out) { int i00, im10, ip10, i0m1, i0p1; float d2tdx2, d2tdy2; int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; // loop over all points in domain (except boundary) if ( j < nj - 1 && i < ni - 1 && j > 0 && i > 0 ){ // find indices into linear memory // for central point and neighbours i00 = I2D(ni, i, j); im10 = I2D(ni, i-1, j); ip10 = I2D(ni, i+1, j); i0m1 = I2D(ni, i, j-1); i0p1 = I2D(ni, i, j+1); // evaluate derivatives d2tdx2 = temp_in[im10]-2*temp_in[i00]+temp_in[ip10]; d2tdy2 = temp_in[i0m1]-2*temp_in[i00]+temp_in[i0p1]; // update temperatures temp_out[i00] = temp_in[i00]+fact*(d2tdx2 + d2tdy2); } } void step_kernel_ref(int ni, int nj, float fact, float* temp_in, float* temp_out) { int i00, im10, ip10, i0m1, i0p1; float d2tdx2, d2tdy2; // loop over all points in domain (except boundary) for ( int j=1; j < nj-1; j++ ) { for ( int i=1; i < ni-1; i++ ) { // find indices into linear memory // for central point and neighbours i00 = I2D(ni, i, j); im10 = I2D(ni, i-1, j); ip10 = I2D(ni, i+1, j); i0m1 = I2D(ni, i, j-1); i0p1 = I2D(ni, i, j+1); // evaluate derivatives d2tdx2 = temp_in[im10]-2*temp_in[i00]+temp_in[ip10]; d2tdy2 = temp_in[i0m1]-2*temp_in[i00]+temp_in[i0p1]; // update temperatures temp_out[i00] = temp_in[i00]+fact*(d2tdx2 + d2tdy2); } } } int main() { int istep; int nstep = 200; // number of time steps // Specify our 2D dimensions const int ni = 200; const int nj = 100; float tfac = 8.418e-5; // thermal diffusivity of silver float *temp1_ref, *temp2_ref, *temp1, *temp2, *temp_tmp; const int size = ni * nj * sizeof(float); temp1_ref = (float*)malloc(size); temp2_ref = (float*)malloc(size); cudaMallocManaged ( &temp1, size); cudaMallocManaged ( &temp2, size); dim3 threads_per_block (8,8,1); dim3 number_of_blocks ((nj + threads_per_block.x - 1)/ threads_per_block.x, (ni + threads_per_block.y - 1) / threads_per_block.y, 1); // Initialize with random data for( int i = 0; i < ni*nj; ++i) { temp1_ref[i] = temp2_ref[i] = temp1[i] = temp2[i] = (float)rand()/(float)(RAND_MAX/100.0f); } // Execute the CPU-only reference version for (istep=0; istep < nstep; istep++) { step_kernel_ref(ni, nj, tfac, temp1_ref, temp2_ref); // swap the temperature pointers temp_tmp = temp1_ref; temp1_ref = temp2_ref; temp2_ref= temp_tmp; } // Execute the modified version using same data for (istep=0; istep < nstep; istep++) { step_kernel_mod<<< number_of_blocks, threads_per_block>>> (ni, nj, tfac, temp1, temp2); checkCuda(cudaGetLastError()); checkCuda(cudaDeviceSynchronize()); // swap the temperature pointers temp_tmp = temp1; temp1 = temp2; temp2= temp_tmp; } float maxError = 0; // Output should always be stored in the temp1 and temp1_ref at this point for( int i = 0; i < ni*nj; ++i ) { if (abs(temp1[i]-temp1_ref[i]) > maxError) { maxError = abs(temp1[i]-temp1_ref[i]); } } // Check and see if our maxError is greater than an error bound if (maxError > 0.0005f) printf("Problem! The Max Error of %.5f is NOT within acceptable bounds.\n", maxError); else printf("The Max Error of %.5f is within acceptable bounds.\n", maxError); free( temp1_ref ); free( temp2_ref ); cudaFree( temp1 ); cudaFree( temp2 ); return 0; }
73f5573b21caeaba83e31f3afcc04fbd70d9ef1b.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> //#include <cutil.h> #define TILE_WIDTH 64 #define WIDTH_PER_THREAD 8 #define SW TILE_WIDTH/WIDTH_PER_THREAD #define N 2048 texture<float, 2, hipReadModeElementType> tex_A; texture<float, 2, hipReadModeElementType> tex_B; surface<void, 2> surf_C; void err_handling(hipError_t *err, const char *str) { if (*err != hipSuccess) { printf("%s\n", str); exit(EXIT_FAILURE); } } __global__ void matMul(const float *A, const float *B, float *C, int m, int k, int n) { __shared__ float sA_bf[2][8*64]; __shared__ float sB_bf[2][8*64]; float *A_pref, *A_now; float *B_pref, *B_now; int x = threadIdx.x; int y = threadIdx.y; int bx = blockIdx.x*64; int by = blockIdx.y*64; int id = y*8+x; int inv_id = (id%32)/4*8 + id%4 + (id < 32 ? 0 : 4); int glbA_id = by + inv_id; int glbB_id = bx + inv_id; int row = by + y*8; int col = bx + x*8; float a0[8]; float a1[8]; float b0[8]; float b1[8]; float c00 = 0.0; float c01 = 0.0; float c02 = 0.0; float c03 = 0.0, c04 = 0.0; float c05 = 0.0; float c06 = 0.0; float c07 = 0.0; float c10 = 0.0; float c11 = 0.0; float c12 = 0.0; float c13 = 0.0, c14 = 0.0; float c15 = 0.0; float c16 = 0.0; float c17 = 0.0; float c20 = 0.0; float c21 = 0.0; float c22 = 0.0; float c23 = 0.0, c24 = 0.0; float c25 = 0.0; float c26 = 0.0; float c27 = 0.0; float c30 = 0.0; float c31 = 0.0; float c32 = 0.0; float c33 = 0.0, c34 = 0.0; float c35 = 0.0; float c36 = 0.0; float c37 = 0.0; float c40 = 0.0; float c41 = 0.0; float c42 = 0.0; float c43 = 0.0, c44 = 0.0; float c45 = 0.0; float c46 = 0.0; float c47 = 0.0; float c50 = 0.0; float c51 = 0.0; float c52 = 0.0; float c53 = 0.0, c54 = 0.0; float c55 = 0.0; float c56 = 0.0; float c57 = 0.0; float c60 = 0.0; float c61 = 0.0; float c62 = 0.0; float c63 = 0.0, c64 = 0.0; float c65 = 0.0; float c66 = 0.0; float c67 = 0.0; float c70 = 0.0; float c71 = 0.0; float c72 = 0.0; float c73 = 0.0, c74 = 0.0; float c75 = 0.0; float c76 = 0.0; float c77 = 0.0; sA_bf[0][0*64+id] = tex2D(tex_A, 0, glbA_id); sA_bf[0][1*64+id] = tex2D(tex_A, 1, glbA_id); sA_bf[0][2*64+id] = tex2D(tex_A, 2, glbA_id); sA_bf[0][3*64+id] = tex2D(tex_A, 3, glbA_id); sA_bf[0][4*64+id] = tex2D(tex_A, 4, glbA_id); sA_bf[0][5*64+id] = tex2D(tex_A, 5, glbA_id); sA_bf[0][6*64+id] = tex2D(tex_A, 6, glbA_id); sA_bf[0][7*64+id] = tex2D(tex_A, 7, glbA_id); sB_bf[0][0*64+id] = tex2D(tex_B, glbB_id, 0); sB_bf[0][1*64+id] = tex2D(tex_B, glbB_id, 1); sB_bf[0][2*64+id] = tex2D(tex_B, glbB_id, 2); sB_bf[0][3*64+id] = tex2D(tex_B, glbB_id, 3); sB_bf[0][4*64+id] = tex2D(tex_B, glbB_id, 4); sB_bf[0][5*64+id] = tex2D(tex_B, glbB_id, 5); sB_bf[0][6*64+id] = tex2D(tex_B, glbB_id, 6); sB_bf[0][7*64+id] = tex2D(tex_B, glbB_id, 7); A_pref = sA_bf[1]; B_pref = sB_bf[1]; A_now = sA_bf[0]; B_now = sB_bf[0]; int track_bf = 0; for (int t = 8; t < k; t += 8) { __syncthreads(); A_pref[id] = tex2D(tex_A, t, glbA_id); B_pref[id] = tex2D(tex_B, glbB_id, t); ((float4*)a0)[0] = ((float4*)A_now)[y]; ((float4*)a0)[1] = ((float4*)A_now)[y+8]; ((float4*)b0)[0] = ((float4*)B_now)[x]; ((float4*)b0)[1] = ((float4*)B_now)[x+8]; #pragma unroll for (int i = 1; i < 8; ++i) { int base = i * 16; A_pref[i*64+id] = tex2D(tex_A, t+i, glbA_id); B_pref[i*64+id] = tex2D(tex_B, glbB_id, t+i); if (i&1) { c00 += a0[0] * b0[0]; c01 += a0[0] * b0[1]; c02 += a0[0] * b0[2]; c03 += a0[0] * b0[3]; c04 += a0[0] * b0[4]; c05 += a0[0] * b0[5]; c06 += a0[0] * b0[6]; c07 += a0[0] * b0[7]; ((float4*)a1)[0] = ((float4*)A_now)[base+y]; c10 += a0[1] * b0[0]; c11 += a0[1] * b0[1]; c12 += a0[1] * b0[2]; c13 += a0[1] * b0[3]; c14 += a0[1] * b0[4]; c15 += a0[1] * b0[5]; c16 += a0[1] * b0[6]; c17 += a0[1] * b0[7]; c20 += a0[2] * b0[0]; c21 += a0[2] * b0[1]; c22 += a0[2] * b0[2]; c23 += a0[2] * b0[3]; c24 += a0[2] * b0[4]; c25 += a0[2] * b0[5]; c26 += a0[2] * b0[6]; c27 += a0[2] * b0[7]; ((float4*)b1)[0] = ((float4*)B_now)[base+x]; c30 += a0[3] * b0[0]; c31 += a0[3] * b0[1]; c32 += a0[3] * b0[2]; c33 += a0[3] * b0[3]; c34 += a0[3] * b0[4]; c35 += a0[3] * b0[5]; c36 += a0[3] * b0[6]; c37 += a0[3] * b0[7]; c40 += a0[4] * b0[0]; c41 += a0[4] * b0[1]; c42 += a0[4] * b0[2]; c43 += a0[4] * b0[3]; c44 += a0[4] * b0[4]; c45 += a0[4] * b0[5]; c46 += a0[4] * b0[6]; c47 += a0[4] * b0[7]; ((float4*)a1)[1] = ((float4*)A_now)[base+y+8]; c50 += a0[5] * b0[0]; c51 += a0[5] * b0[1]; c52 += a0[5] * b0[2]; c53 += a0[5] * b0[3]; c54 += a0[5] * b0[4]; c55 += a0[5] * b0[5]; c56 += a0[5] * b0[6]; c57 += a0[5] * b0[7]; c60 += a0[6] * b0[0]; c61 += a0[6] * b0[1]; c62 += a0[6] * b0[2]; c63 += a0[6] * b0[3]; c64 += a0[6] * b0[4]; c65 += a0[6] * b0[5]; c66 += a0[6] * b0[6]; c67 += a0[6] * b0[7]; ((float4*)b1)[1] = ((float4*)B_now)[base+x+8]; c70 += a0[7] * b0[0]; c71 += a0[7] * b0[1]; c72 += a0[7] * b0[2]; c73 += a0[7] * b0[3]; c74 += a0[7] * b0[4]; c75 += a0[7] * b0[5]; c76 += a0[7] * b0[6]; c77 += a0[7] * b0[7]; } else { c00 += a1[0] * b1[0]; c01 += a1[0] * b1[1]; c02 += a1[0] * b1[2]; c03 += a1[0] * b1[3]; c04 += a1[0] * b1[4]; c05 += a1[0] * b1[5]; c06 += a1[0] * b1[6]; c07 += a1[0] * b1[7]; ((float4*)a0)[0] = ((float4*)A_now)[base+y]; c10 += a1[1] * b1[0]; c11 += a1[1] * b1[1]; c12 += a1[1] * b1[2]; c13 += a1[1] * b1[3]; c14 += a1[1] * b1[4]; c15 += a1[1] * b1[5]; c16 += a1[1] * b1[6]; c17 += a1[1] * b1[7]; c20 += a1[2] * b1[0]; c21 += a1[2] * b1[1]; c22 += a1[2] * b1[2]; c23 += a1[2] * b1[3]; c24 += a1[2] * b1[4]; c25 += a1[2] * b1[5]; c26 += a1[2] * b1[6]; c27 += a1[2] * b1[7]; ((float4*)b0)[0] = ((float4*)B_now)[base+x]; c30 += a1[3] * b1[0]; c31 += a1[3] * b1[1]; c32 += a1[3] * b1[2]; c33 += a1[3] * b1[3]; c34 += a1[3] * b1[4]; c35 += a1[3] * b1[5]; c36 += a1[3] * b1[6]; c37 += a1[3] * b1[7]; c40 += a1[4] * b1[0]; c41 += a1[4] * b1[1]; c42 += a1[4] * b1[2]; c43 += a1[4] * b1[3]; c44 += a1[4] * b1[4]; c45 += a1[4] * b1[5]; c46 += a1[4] * b1[6]; c47 += a1[4] * b1[7]; ((float4*)a0)[1] = ((float4*)A_now)[base+y+8]; c50 += a1[5] * b1[0]; c51 += a1[5] * b1[1]; c52 += a1[5] * b1[2]; c53 += a1[5] * b1[3]; c54 += a1[5] * b1[4]; c55 += a1[5] * b1[5]; c56 += a1[5] * b1[6]; c57 += a1[5] * b1[7]; c60 += a1[6] * b1[0]; c61 += a1[6] * b1[1]; c62 += a1[6] * b1[2]; c63 += a1[6] * b1[3]; c64 += a1[6] * b1[4]; c65 += a1[6] * b1[5]; c66 += a1[6] * b1[6]; c67 += a1[6] * b1[7]; ((float4*)b0)[1] = ((float4*)B_now)[base+x+8]; c70 += a1[7] * b1[0]; c71 += a1[7] * b1[1]; c72 += a1[7] * b1[2]; c73 += a1[7] * b1[3]; c74 += a1[7] * b1[4]; c75 += a1[7] * b1[5]; c76 += a1[7] * b1[6]; c77 += a1[7] * b1[7]; } } c00 += a1[0] * b1[0]; c01 += a1[0] * b1[1]; c02 += a1[0] * b1[2]; c03 += a1[0] * b1[3]; c04 += a1[0] * b1[4]; c05 += a1[0] * b1[5]; c06 += a1[0] * b1[6]; c07 += a1[0] * b1[7]; c10 += a1[1] * b1[0]; c11 += a1[1] * b1[1]; c12 += a1[1] * b1[2]; c13 += a1[1] * b1[3]; c14 += a1[1] * b1[4]; c15 += a1[1] * b1[5]; c16 += a1[1] * b1[6]; c17 += a1[1] * b1[7]; c20 += a1[2] * b1[0]; c21 += a1[2] * b1[1]; c22 += a1[2] * b1[2]; c23 += a1[2] * b1[3]; c24 += a1[2] * b1[4]; c25 += a1[2] * b1[5]; c26 += a1[2] * b1[6]; c27 += a1[2] * b1[7]; c30 += a1[3] * b1[0]; c31 += a1[3] * b1[1]; c32 += a1[3] * b1[2]; c33 += a1[3] * b1[3]; c34 += a1[3] * b1[4]; c35 += a1[3] * b1[5]; c36 += a1[3] * b1[6]; c37 += a1[3] * b1[7]; c40 += a1[4] * b1[0]; c41 += a1[4] * b1[1]; c42 += a1[4] * b1[2]; c43 += a1[4] * b1[3]; c44 += a1[4] * b1[4]; c45 += a1[4] * b1[5]; c46 += a1[4] * b1[6]; c47 += a1[4] * b1[7]; c50 += a1[5] * b1[0]; c51 += a1[5] * b1[1]; c52 += a1[5] * b1[2]; c53 += a1[5] * b1[3]; c54 += a1[5] * b1[4]; c55 += a1[5] * b1[5]; c56 += a1[5] * b1[6]; c57 += a1[5] * b1[7]; c60 += a1[6] * b1[0]; c61 += a1[6] * b1[1]; c62 += a1[6] * b1[2]; c63 += a1[6] * b1[3]; c64 += a1[6] * b1[4]; c65 += a1[6] * b1[5]; c66 += a1[6] * b1[6]; c67 += a1[6] * b1[7]; c70 += a1[7] * b1[0]; c71 += a1[7] * b1[1]; c72 += a1[7] * b1[2]; c73 += a1[7] * b1[3]; c74 += a1[7] * b1[4]; c75 += a1[7] * b1[5]; c76 += a1[7] * b1[6]; c77 += a1[7] * b1[7]; A_pref = sA_bf[track_bf]; B_pref = sB_bf[track_bf]; A_now = sA_bf[1-track_bf]; B_now = sB_bf[1-track_bf]; track_bf = 1 - track_bf; } __syncthreads(); ((float4*)a0)[0] = ((float4*)A_now)[y]; ((float4*)a0)[1] = ((float4*)A_now)[y+8]; ((float4*)b0)[0] = ((float4*)B_now)[x]; ((float4*)b0)[1] = ((float4*)B_now)[x+8]; #pragma unroll for (int i = 1; i < 8; ++i) { int base = i * 16; if (i&1) { c00 += a0[0] * b0[0]; c01 += a0[0] * b0[1]; c02 += a0[0] * b0[2]; c03 += a0[0] * b0[3]; c04 += a0[0] * b0[4]; c05 += a0[0] * b0[5]; c06 += a0[0] * b0[6]; c07 += a0[0] * b0[7]; ((float4*)a1)[0] = ((float4*)A_now)[base+y]; c10 += a0[1] * b0[0]; c11 += a0[1] * b0[1]; c12 += a0[1] * b0[2]; c13 += a0[1] * b0[3]; c14 += a0[1] * b0[4]; c15 += a0[1] * b0[5]; c16 += a0[1] * b0[6]; c17 += a0[1] * b0[7]; c20 += a0[2] * b0[0]; c21 += a0[2] * b0[1]; c22 += a0[2] * b0[2]; c23 += a0[2] * b0[3]; c24 += a0[2] * b0[4]; c25 += a0[2] * b0[5]; c26 += a0[2] * b0[6]; c27 += a0[2] * b0[7]; ((float4*)b1)[0] = ((float4*)B_now)[base+x]; c30 += a0[3] * b0[0]; c31 += a0[3] * b0[1]; c32 += a0[3] * b0[2]; c33 += a0[3] * b0[3]; c34 += a0[3] * b0[4]; c35 += a0[3] * b0[5]; c36 += a0[3] * b0[6]; c37 += a0[3] * b0[7]; c40 += a0[4] * b0[0]; c41 += a0[4] * b0[1]; c42 += a0[4] * b0[2]; c43 += a0[4] * b0[3]; c44 += a0[4] * b0[4]; c45 += a0[4] * b0[5]; c46 += a0[4] * b0[6]; c47 += a0[4] * b0[7]; ((float4*)a1)[1] = ((float4*)A_now)[base+y+8]; c50 += a0[5] * b0[0]; c51 += a0[5] * b0[1]; c52 += a0[5] * b0[2]; c53 += a0[5] * b0[3]; c54 += a0[5] * b0[4]; c55 += a0[5] * b0[5]; c56 += a0[5] * b0[6]; c57 += a0[5] * b0[7]; c60 += a0[6] * b0[0]; c61 += a0[6] * b0[1]; c62 += a0[6] * b0[2]; c63 += a0[6] * b0[3]; c64 += a0[6] * b0[4]; c65 += a0[6] * b0[5]; c66 += a0[6] * b0[6]; c67 += a0[6] * b0[7]; ((float4*)b1)[1] = ((float4*)B_now)[base+x+8]; c70 += a0[7] * b0[0]; c71 += a0[7] * b0[1]; c72 += a0[7] * b0[2]; c73 += a0[7] * b0[3]; c74 += a0[7] * b0[4]; c75 += a0[7] * b0[5]; c76 += a0[7] * b0[6]; c77 += a0[7] * b0[7]; } else { c00 += a1[0] * b1[0]; c01 += a1[0] * b1[1]; c02 += a1[0] * b1[2]; c03 += a1[0] * b1[3]; c04 += a1[0] * b1[4]; c05 += a1[0] * b1[5]; c06 += a1[0] * b1[6]; c07 += a1[0] * b1[7]; ((float4*)a0)[0] = ((float4*)A_now)[base+y]; c10 += a1[1] * b1[0]; c11 += a1[1] * b1[1]; c12 += a1[1] * b1[2]; c13 += a1[1] * b1[3]; c14 += a1[1] * b1[4]; c15 += a1[1] * b1[5]; c16 += a1[1] * b1[6]; c17 += a1[1] * b1[7]; c20 += a1[2] * b1[0]; c21 += a1[2] * b1[1]; c22 += a1[2] * b1[2]; c23 += a1[2] * b1[3]; c24 += a1[2] * b1[4]; c25 += a1[2] * b1[5]; c26 += a1[2] * b1[6]; c27 += a1[2] * b1[7]; ((float4*)b0)[0] = ((float4*)B_now)[base+x]; c30 += a1[3] * b1[0]; c31 += a1[3] * b1[1]; c32 += a1[3] * b1[2]; c33 += a1[3] * b1[3]; c34 += a1[3] * b1[4]; c35 += a1[3] * b1[5]; c36 += a1[3] * b1[6]; c37 += a1[3] * b1[7]; c40 += a1[4] * b1[0]; c41 += a1[4] * b1[1]; c42 += a1[4] * b1[2]; c43 += a1[4] * b1[3]; c44 += a1[4] * b1[4]; c45 += a1[4] * b1[5]; c46 += a1[4] * b1[6]; c47 += a1[4] * b1[7]; ((float4*)a0)[1] = ((float4*)A_now)[base+y+8]; c50 += a1[5] * b1[0]; c51 += a1[5] * b1[1]; c52 += a1[5] * b1[2]; c53 += a1[5] * b1[3]; c54 += a1[5] * b1[4]; c55 += a1[5] * b1[5]; c56 += a1[5] * b1[6]; c57 += a1[5] * b1[7]; c60 += a1[6] * b1[0]; c61 += a1[6] * b1[1]; c62 += a1[6] * b1[2]; c63 += a1[6] * b1[3]; c64 += a1[6] * b1[4]; c65 += a1[6] * b1[5]; c66 += a1[6] * b1[6]; c67 += a1[6] * b1[7]; ((float4*)b0)[1] = ((float4*)B_now)[base+x+8]; c70 += a1[7] * b1[0]; c71 += a1[7] * b1[1]; c72 += a1[7] * b1[2]; c73 += a1[7] * b1[3]; c74 += a1[7] * b1[4]; c75 += a1[7] * b1[5]; c76 += a1[7] * b1[6]; c77 += a1[7] * b1[7]; } } c00 += a1[0] * b1[0]; c01 += a1[0] * b1[1]; c02 += a1[0] * b1[2]; c03 += a1[0] * b1[3]; c04 += a1[0] * b1[4]; c05 += a1[0] * b1[5]; c06 += a1[0] * b1[6]; c07 += a1[0] * b1[7]; c10 += a1[1] * b1[0]; c11 += a1[1] * b1[1]; c12 += a1[1] * b1[2]; c13 += a1[1] * b1[3]; c14 += a1[1] * b1[4]; c15 += a1[1] * b1[5]; c16 += a1[1] * b1[6]; c17 += a1[1] * b1[7]; c20 += a1[2] * b1[0]; c21 += a1[2] * b1[1]; c22 += a1[2] * b1[2]; c23 += a1[2] * b1[3]; c24 += a1[2] * b1[4]; c25 += a1[2] * b1[5]; c26 += a1[2] * b1[6]; c27 += a1[2] * b1[7]; c30 += a1[3] * b1[0]; c31 += a1[3] * b1[1]; c32 += a1[3] * b1[2]; c33 += a1[3] * b1[3]; c34 += a1[3] * b1[4]; c35 += a1[3] * b1[5]; c36 += a1[3] * b1[6]; c37 += a1[3] * b1[7]; c40 += a1[4] * b1[0]; c41 += a1[4] * b1[1]; c42 += a1[4] * b1[2]; c43 += a1[4] * b1[3]; c44 += a1[4] * b1[4]; c45 += a1[4] * b1[5]; c46 += a1[4] * b1[6]; c47 += a1[4] * b1[7]; c50 += a1[5] * b1[0]; c51 += a1[5] * b1[1]; c52 += a1[5] * b1[2]; c53 += a1[5] * b1[3]; c54 += a1[5] * b1[4]; c55 += a1[5] * b1[5]; c56 += a1[5] * b1[6]; c57 += a1[5] * b1[7]; c60 += a1[6] * b1[0]; c61 += a1[6] * b1[1]; c62 += a1[6] * b1[2]; c63 += a1[6] * b1[3]; c64 += a1[6] * b1[4]; c65 += a1[6] * b1[5]; c66 += a1[6] * b1[6]; c67 += a1[6] * b1[7]; c70 += a1[7] * b1[0]; c71 += a1[7] * b1[1]; c72 += a1[7] * b1[2]; c73 += a1[7] * b1[3]; c74 += a1[7] * b1[4]; c75 += a1[7] * b1[5]; c76 += a1[7] * b1[6]; c77 += a1[7] * b1[7]; /* for (int i = 0; i < 7; i++) { for (int j = 0; j < 7; j++) { surf2Dwrite(c00, surf_C, (col )*sizeof(float), row , hipBoundaryModeZero); } } */ surf2Dwrite(c00, surf_C, (col )*sizeof(float), row , hipBoundaryModeZero); surf2Dwrite(c01, surf_C, (col+1)*sizeof(float), row , hipBoundaryModeZero); surf2Dwrite(c02, surf_C, (col+2)*sizeof(float), row , hipBoundaryModeZero); surf2Dwrite(c03, surf_C, (col+3)*sizeof(float), row , hipBoundaryModeZero); surf2Dwrite(c04, surf_C, (col+4)*sizeof(float), row , hipBoundaryModeZero); surf2Dwrite(c05, surf_C, (col+5)*sizeof(float), row , hipBoundaryModeZero); surf2Dwrite(c06, surf_C, (col+6)*sizeof(float), row , hipBoundaryModeZero); surf2Dwrite(c07, surf_C, (col+7)*sizeof(float), row , hipBoundaryModeZero); surf2Dwrite(c10, surf_C, (col )*sizeof(float), row+1 , hipBoundaryModeZero); surf2Dwrite(c11, surf_C, (col+1)*sizeof(float), row+1 , hipBoundaryModeZero); surf2Dwrite(c12, surf_C, (col+2)*sizeof(float), row+1 , hipBoundaryModeZero); surf2Dwrite(c13, surf_C, (col+3)*sizeof(float), row+1 , hipBoundaryModeZero); surf2Dwrite(c14, surf_C, (col+4)*sizeof(float), row+1 , hipBoundaryModeZero); surf2Dwrite(c15, surf_C, (col+5)*sizeof(float), row+1 , hipBoundaryModeZero); surf2Dwrite(c16, surf_C, (col+6)*sizeof(float), row+1 , hipBoundaryModeZero); surf2Dwrite(c17, surf_C, (col+7)*sizeof(float), row+1 , hipBoundaryModeZero); surf2Dwrite(c20, surf_C, (col )*sizeof(float), row+2 , hipBoundaryModeZero); surf2Dwrite(c21, surf_C, (col+1)*sizeof(float), row+2 , hipBoundaryModeZero); surf2Dwrite(c22, surf_C, (col+2)*sizeof(float), row+2 , hipBoundaryModeZero); surf2Dwrite(c23, surf_C, (col+3)*sizeof(float), row+2 , hipBoundaryModeZero); surf2Dwrite(c24, surf_C, (col+4)*sizeof(float), row+2 , hipBoundaryModeZero); surf2Dwrite(c25, surf_C, (col+5)*sizeof(float), row+2 , hipBoundaryModeZero); surf2Dwrite(c26, surf_C, (col+6)*sizeof(float), row+2 , hipBoundaryModeZero); surf2Dwrite(c27, surf_C, (col+7)*sizeof(float), row+2 , hipBoundaryModeZero); surf2Dwrite(c30, surf_C, (col )*sizeof(float), row+3 , hipBoundaryModeZero); surf2Dwrite(c31, surf_C, (col+1)*sizeof(float), row+3 , hipBoundaryModeZero); surf2Dwrite(c32, surf_C, (col+2)*sizeof(float), row+3 , hipBoundaryModeZero); surf2Dwrite(c33, surf_C, (col+3)*sizeof(float), row+3 , hipBoundaryModeZero); surf2Dwrite(c34, surf_C, (col+4)*sizeof(float), row+3 , hipBoundaryModeZero); surf2Dwrite(c35, surf_C, (col+5)*sizeof(float), row+3 , hipBoundaryModeZero); surf2Dwrite(c36, surf_C, (col+6)*sizeof(float), row+3 , hipBoundaryModeZero); surf2Dwrite(c37, surf_C, (col+7)*sizeof(float), row+3 , hipBoundaryModeZero); surf2Dwrite(c40, surf_C, (col )*sizeof(float), row+4 , hipBoundaryModeZero); surf2Dwrite(c41, surf_C, (col+1)*sizeof(float), row+4 , hipBoundaryModeZero); surf2Dwrite(c42, surf_C, (col+2)*sizeof(float), row+4 , hipBoundaryModeZero); surf2Dwrite(c43, surf_C, (col+3)*sizeof(float), row+4 , hipBoundaryModeZero); surf2Dwrite(c44, surf_C, (col+4)*sizeof(float), row+4 , hipBoundaryModeZero); surf2Dwrite(c45, surf_C, (col+5)*sizeof(float), row+4 , hipBoundaryModeZero); surf2Dwrite(c46, surf_C, (col+6)*sizeof(float), row+4 , hipBoundaryModeZero); surf2Dwrite(c47, surf_C, (col+7)*sizeof(float), row+4 , hipBoundaryModeZero); surf2Dwrite(c50, surf_C, (col )*sizeof(float), row+5 , hipBoundaryModeZero); surf2Dwrite(c51, surf_C, (col+1)*sizeof(float), row+5 , hipBoundaryModeZero); surf2Dwrite(c52, surf_C, (col+2)*sizeof(float), row+5 , hipBoundaryModeZero); surf2Dwrite(c53, surf_C, (col+3)*sizeof(float), row+5 , hipBoundaryModeZero); surf2Dwrite(c54, surf_C, (col+4)*sizeof(float), row+5 , hipBoundaryModeZero); surf2Dwrite(c55, surf_C, (col+5)*sizeof(float), row+5 , hipBoundaryModeZero); surf2Dwrite(c56, surf_C, (col+6)*sizeof(float), row+5 , hipBoundaryModeZero); surf2Dwrite(c57, surf_C, (col+7)*sizeof(float), row+5 , hipBoundaryModeZero); surf2Dwrite(c60, surf_C, (col )*sizeof(float), row+6 , hipBoundaryModeZero); surf2Dwrite(c61, surf_C, (col+1)*sizeof(float), row+6 , hipBoundaryModeZero); surf2Dwrite(c62, surf_C, (col+2)*sizeof(float), row+6 , hipBoundaryModeZero); surf2Dwrite(c63, surf_C, (col+3)*sizeof(float), row+6 , hipBoundaryModeZero); surf2Dwrite(c64, surf_C, (col+4)*sizeof(float), row+6 , hipBoundaryModeZero); surf2Dwrite(c65, surf_C, (col+5)*sizeof(float), row+6 , hipBoundaryModeZero); surf2Dwrite(c66, surf_C, (col+6)*sizeof(float), row+6 , hipBoundaryModeZero); surf2Dwrite(c67, surf_C, (col+7)*sizeof(float), row+6 , hipBoundaryModeZero); surf2Dwrite(c70, surf_C, (col )*sizeof(float), row+7 , hipBoundaryModeZero); surf2Dwrite(c71, surf_C, (col+1)*sizeof(float), row+7 , hipBoundaryModeZero); surf2Dwrite(c72, surf_C, (col+2)*sizeof(float), row+7 , hipBoundaryModeZero); surf2Dwrite(c73, surf_C, (col+3)*sizeof(float), row+7 , hipBoundaryModeZero); surf2Dwrite(c74, surf_C, (col+4)*sizeof(float), row+7 , hipBoundaryModeZero); surf2Dwrite(c75, surf_C, (col+5)*sizeof(float), row+7 , hipBoundaryModeZero); surf2Dwrite(c76, surf_C, (col+6)*sizeof(float), row+7 , hipBoundaryModeZero); surf2Dwrite(c77, surf_C, (col+7)*sizeof(float), row+7 , hipBoundaryModeZero); } int main(void) { hipError_t err = hipSuccess; int m = N; int n = N; int k = N; float *A = (float*)malloc(m*k*sizeof(float)); float *B = (float*)malloc(k*n*sizeof(float)); float *C = (float*)malloc(m*n*sizeof(float)); if (A == NULL || B == NULL || C == NULL) { printf("allocate host error!\n"); return 1; } for (int i = 0; i < m*k; ++i) { A[i] = rand()/(float)RAND_MAX - rand()/(float)RAND_MAX; } for (int i = 0; i < k*n; ++i) { B[i] = rand()/(float)RAND_MAX - rand()/(float)RAND_MAX; } float *dev_A = NULL; float *dev_B = NULL; float *dev_C = NULL; err = hipMalloc((void**)&dev_A, m*k*sizeof(float)); err_handling(&err, "allocate devecie error A!"); err = hipMalloc((void**)&dev_B, k*n*sizeof(float)); err_handling(&err, "allocate devecie error B!"); err = hipMalloc((void**)&dev_C, m*n*sizeof(float)); err_handling(&err, "allocate devecie error C!"); err = hipMemcpy(dev_A, A, m*k*sizeof(float), hipMemcpyHostToDevice); err_handling(&err, "memcpy to A error!"); err = hipMemcpy(dev_B, B, k*n*sizeof(float), hipMemcpyHostToDevice); err_handling(&err, "memcpy to B error!"); hipChannelFormatDesc ADesc = hipCreateChannelDesc<float>(); hipChannelFormatDesc BDesc = hipCreateChannelDesc<float>(); hipChannelFormatDesc CDesc = hipCreateChannelDesc<float>(); hipArray *A_array, *B_array, *C_array; hipMallocArray(&A_array, &ADesc, k, m); hipMallocArray(&B_array, &BDesc, n, k); hipMallocArray(&C_array, &CDesc, n, m, hipArraySurfaceLoadStore); hipMemcpyToArray(A_array, 0, 0, A, m*k*sizeof(float), hipMemcpyHostToDevice); hipMemcpyToArray(B_array, 0, 0, B, k*n*sizeof(float), hipMemcpyHostToDevice); hipBindTextureToArray(tex_A, A_array); hipBindTextureToArray(tex_B, B_array); hipBindSurfaceToArray(surf_C, C_array); tex_A.addressMode[0] = hipAddressModeBorder; tex_A.addressMode[1] = hipAddressModeBorder; tex_B.addressMode[0] = hipAddressModeBorder; tex_B.addressMode[1] = hipAddressModeBorder; dim3 dimGrid((n-1)/TILE_WIDTH+1, (m-1)/TILE_WIDTH+1, 1); dim3 dimBlock(TILE_WIDTH/WIDTH_PER_THREAD, TILE_WIDTH/WIDTH_PER_THREAD, 1); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipLaunchKernelGGL(( matMul), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_A, dev_B, dev_C, m, k, n); hipEventRecord(stop, 0); hipEventSynchronize(start); hipEventSynchronize(stop); float time_elapsed = 0; hipEventElapsedTime(&time_elapsed, start, stop); printf("%fms\n", time_elapsed); err = cudaMemcpyFromArray(C, C_array, 0, 0, m*n*sizeof(float), hipMemcpyDeviceToHost); //err = hipMemcpy(C, dev_C, m*n*sizeof(float), hipMemcpyDeviceToHost); err_handling(&err, "memcpy to host C error!"); FILE *fp = fopen("gpu.out", "w"); for (int i = 0; i < m; i++) { for (int j = 0; j < n; j++) { fprintf(fp, "%f\n", C[i*N+j]); } } fclose(fp); err = hipFree(dev_A); err_handling(&err, "mem free A error!"); err = hipFree(dev_B); err_handling(&err, "mem free B error!"); err = hipFree(dev_C); err_handling(&err, "mem free C error!"); err = hipDeviceReset(); err_handling(&err, "device reset error!"); return 0; }
73f5573b21caeaba83e31f3afcc04fbd70d9ef1b.cu
#include <stdio.h> #include <cuda_runtime.h> //#include <cutil.h> #define TILE_WIDTH 64 #define WIDTH_PER_THREAD 8 #define SW TILE_WIDTH/WIDTH_PER_THREAD #define N 2048 texture<float, 2, cudaReadModeElementType> tex_A; texture<float, 2, cudaReadModeElementType> tex_B; surface<void, 2> surf_C; void err_handling(cudaError_t *err, const char *str) { if (*err != cudaSuccess) { printf("%s\n", str); exit(EXIT_FAILURE); } } __global__ void matMul(const float *A, const float *B, float *C, int m, int k, int n) { __shared__ float sA_bf[2][8*64]; __shared__ float sB_bf[2][8*64]; float *A_pref, *A_now; float *B_pref, *B_now; int x = threadIdx.x; int y = threadIdx.y; int bx = blockIdx.x*64; int by = blockIdx.y*64; int id = y*8+x; int inv_id = (id%32)/4*8 + id%4 + (id < 32 ? 0 : 4); int glbA_id = by + inv_id; int glbB_id = bx + inv_id; int row = by + y*8; int col = bx + x*8; float a0[8]; float a1[8]; float b0[8]; float b1[8]; float c00 = 0.0; float c01 = 0.0; float c02 = 0.0; float c03 = 0.0, c04 = 0.0; float c05 = 0.0; float c06 = 0.0; float c07 = 0.0; float c10 = 0.0; float c11 = 0.0; float c12 = 0.0; float c13 = 0.0, c14 = 0.0; float c15 = 0.0; float c16 = 0.0; float c17 = 0.0; float c20 = 0.0; float c21 = 0.0; float c22 = 0.0; float c23 = 0.0, c24 = 0.0; float c25 = 0.0; float c26 = 0.0; float c27 = 0.0; float c30 = 0.0; float c31 = 0.0; float c32 = 0.0; float c33 = 0.0, c34 = 0.0; float c35 = 0.0; float c36 = 0.0; float c37 = 0.0; float c40 = 0.0; float c41 = 0.0; float c42 = 0.0; float c43 = 0.0, c44 = 0.0; float c45 = 0.0; float c46 = 0.0; float c47 = 0.0; float c50 = 0.0; float c51 = 0.0; float c52 = 0.0; float c53 = 0.0, c54 = 0.0; float c55 = 0.0; float c56 = 0.0; float c57 = 0.0; float c60 = 0.0; float c61 = 0.0; float c62 = 0.0; float c63 = 0.0, c64 = 0.0; float c65 = 0.0; float c66 = 0.0; float c67 = 0.0; float c70 = 0.0; float c71 = 0.0; float c72 = 0.0; float c73 = 0.0, c74 = 0.0; float c75 = 0.0; float c76 = 0.0; float c77 = 0.0; sA_bf[0][0*64+id] = tex2D(tex_A, 0, glbA_id); sA_bf[0][1*64+id] = tex2D(tex_A, 1, glbA_id); sA_bf[0][2*64+id] = tex2D(tex_A, 2, glbA_id); sA_bf[0][3*64+id] = tex2D(tex_A, 3, glbA_id); sA_bf[0][4*64+id] = tex2D(tex_A, 4, glbA_id); sA_bf[0][5*64+id] = tex2D(tex_A, 5, glbA_id); sA_bf[0][6*64+id] = tex2D(tex_A, 6, glbA_id); sA_bf[0][7*64+id] = tex2D(tex_A, 7, glbA_id); sB_bf[0][0*64+id] = tex2D(tex_B, glbB_id, 0); sB_bf[0][1*64+id] = tex2D(tex_B, glbB_id, 1); sB_bf[0][2*64+id] = tex2D(tex_B, glbB_id, 2); sB_bf[0][3*64+id] = tex2D(tex_B, glbB_id, 3); sB_bf[0][4*64+id] = tex2D(tex_B, glbB_id, 4); sB_bf[0][5*64+id] = tex2D(tex_B, glbB_id, 5); sB_bf[0][6*64+id] = tex2D(tex_B, glbB_id, 6); sB_bf[0][7*64+id] = tex2D(tex_B, glbB_id, 7); A_pref = sA_bf[1]; B_pref = sB_bf[1]; A_now = sA_bf[0]; B_now = sB_bf[0]; int track_bf = 0; for (int t = 8; t < k; t += 8) { __syncthreads(); A_pref[id] = tex2D(tex_A, t, glbA_id); B_pref[id] = tex2D(tex_B, glbB_id, t); ((float4*)a0)[0] = ((float4*)A_now)[y]; ((float4*)a0)[1] = ((float4*)A_now)[y+8]; ((float4*)b0)[0] = ((float4*)B_now)[x]; ((float4*)b0)[1] = ((float4*)B_now)[x+8]; #pragma unroll for (int i = 1; i < 8; ++i) { int base = i * 16; A_pref[i*64+id] = tex2D(tex_A, t+i, glbA_id); B_pref[i*64+id] = tex2D(tex_B, glbB_id, t+i); if (i&1) { c00 += a0[0] * b0[0]; c01 += a0[0] * b0[1]; c02 += a0[0] * b0[2]; c03 += a0[0] * b0[3]; c04 += a0[0] * b0[4]; c05 += a0[0] * b0[5]; c06 += a0[0] * b0[6]; c07 += a0[0] * b0[7]; ((float4*)a1)[0] = ((float4*)A_now)[base+y]; c10 += a0[1] * b0[0]; c11 += a0[1] * b0[1]; c12 += a0[1] * b0[2]; c13 += a0[1] * b0[3]; c14 += a0[1] * b0[4]; c15 += a0[1] * b0[5]; c16 += a0[1] * b0[6]; c17 += a0[1] * b0[7]; c20 += a0[2] * b0[0]; c21 += a0[2] * b0[1]; c22 += a0[2] * b0[2]; c23 += a0[2] * b0[3]; c24 += a0[2] * b0[4]; c25 += a0[2] * b0[5]; c26 += a0[2] * b0[6]; c27 += a0[2] * b0[7]; ((float4*)b1)[0] = ((float4*)B_now)[base+x]; c30 += a0[3] * b0[0]; c31 += a0[3] * b0[1]; c32 += a0[3] * b0[2]; c33 += a0[3] * b0[3]; c34 += a0[3] * b0[4]; c35 += a0[3] * b0[5]; c36 += a0[3] * b0[6]; c37 += a0[3] * b0[7]; c40 += a0[4] * b0[0]; c41 += a0[4] * b0[1]; c42 += a0[4] * b0[2]; c43 += a0[4] * b0[3]; c44 += a0[4] * b0[4]; c45 += a0[4] * b0[5]; c46 += a0[4] * b0[6]; c47 += a0[4] * b0[7]; ((float4*)a1)[1] = ((float4*)A_now)[base+y+8]; c50 += a0[5] * b0[0]; c51 += a0[5] * b0[1]; c52 += a0[5] * b0[2]; c53 += a0[5] * b0[3]; c54 += a0[5] * b0[4]; c55 += a0[5] * b0[5]; c56 += a0[5] * b0[6]; c57 += a0[5] * b0[7]; c60 += a0[6] * b0[0]; c61 += a0[6] * b0[1]; c62 += a0[6] * b0[2]; c63 += a0[6] * b0[3]; c64 += a0[6] * b0[4]; c65 += a0[6] * b0[5]; c66 += a0[6] * b0[6]; c67 += a0[6] * b0[7]; ((float4*)b1)[1] = ((float4*)B_now)[base+x+8]; c70 += a0[7] * b0[0]; c71 += a0[7] * b0[1]; c72 += a0[7] * b0[2]; c73 += a0[7] * b0[3]; c74 += a0[7] * b0[4]; c75 += a0[7] * b0[5]; c76 += a0[7] * b0[6]; c77 += a0[7] * b0[7]; } else { c00 += a1[0] * b1[0]; c01 += a1[0] * b1[1]; c02 += a1[0] * b1[2]; c03 += a1[0] * b1[3]; c04 += a1[0] * b1[4]; c05 += a1[0] * b1[5]; c06 += a1[0] * b1[6]; c07 += a1[0] * b1[7]; ((float4*)a0)[0] = ((float4*)A_now)[base+y]; c10 += a1[1] * b1[0]; c11 += a1[1] * b1[1]; c12 += a1[1] * b1[2]; c13 += a1[1] * b1[3]; c14 += a1[1] * b1[4]; c15 += a1[1] * b1[5]; c16 += a1[1] * b1[6]; c17 += a1[1] * b1[7]; c20 += a1[2] * b1[0]; c21 += a1[2] * b1[1]; c22 += a1[2] * b1[2]; c23 += a1[2] * b1[3]; c24 += a1[2] * b1[4]; c25 += a1[2] * b1[5]; c26 += a1[2] * b1[6]; c27 += a1[2] * b1[7]; ((float4*)b0)[0] = ((float4*)B_now)[base+x]; c30 += a1[3] * b1[0]; c31 += a1[3] * b1[1]; c32 += a1[3] * b1[2]; c33 += a1[3] * b1[3]; c34 += a1[3] * b1[4]; c35 += a1[3] * b1[5]; c36 += a1[3] * b1[6]; c37 += a1[3] * b1[7]; c40 += a1[4] * b1[0]; c41 += a1[4] * b1[1]; c42 += a1[4] * b1[2]; c43 += a1[4] * b1[3]; c44 += a1[4] * b1[4]; c45 += a1[4] * b1[5]; c46 += a1[4] * b1[6]; c47 += a1[4] * b1[7]; ((float4*)a0)[1] = ((float4*)A_now)[base+y+8]; c50 += a1[5] * b1[0]; c51 += a1[5] * b1[1]; c52 += a1[5] * b1[2]; c53 += a1[5] * b1[3]; c54 += a1[5] * b1[4]; c55 += a1[5] * b1[5]; c56 += a1[5] * b1[6]; c57 += a1[5] * b1[7]; c60 += a1[6] * b1[0]; c61 += a1[6] * b1[1]; c62 += a1[6] * b1[2]; c63 += a1[6] * b1[3]; c64 += a1[6] * b1[4]; c65 += a1[6] * b1[5]; c66 += a1[6] * b1[6]; c67 += a1[6] * b1[7]; ((float4*)b0)[1] = ((float4*)B_now)[base+x+8]; c70 += a1[7] * b1[0]; c71 += a1[7] * b1[1]; c72 += a1[7] * b1[2]; c73 += a1[7] * b1[3]; c74 += a1[7] * b1[4]; c75 += a1[7] * b1[5]; c76 += a1[7] * b1[6]; c77 += a1[7] * b1[7]; } } c00 += a1[0] * b1[0]; c01 += a1[0] * b1[1]; c02 += a1[0] * b1[2]; c03 += a1[0] * b1[3]; c04 += a1[0] * b1[4]; c05 += a1[0] * b1[5]; c06 += a1[0] * b1[6]; c07 += a1[0] * b1[7]; c10 += a1[1] * b1[0]; c11 += a1[1] * b1[1]; c12 += a1[1] * b1[2]; c13 += a1[1] * b1[3]; c14 += a1[1] * b1[4]; c15 += a1[1] * b1[5]; c16 += a1[1] * b1[6]; c17 += a1[1] * b1[7]; c20 += a1[2] * b1[0]; c21 += a1[2] * b1[1]; c22 += a1[2] * b1[2]; c23 += a1[2] * b1[3]; c24 += a1[2] * b1[4]; c25 += a1[2] * b1[5]; c26 += a1[2] * b1[6]; c27 += a1[2] * b1[7]; c30 += a1[3] * b1[0]; c31 += a1[3] * b1[1]; c32 += a1[3] * b1[2]; c33 += a1[3] * b1[3]; c34 += a1[3] * b1[4]; c35 += a1[3] * b1[5]; c36 += a1[3] * b1[6]; c37 += a1[3] * b1[7]; c40 += a1[4] * b1[0]; c41 += a1[4] * b1[1]; c42 += a1[4] * b1[2]; c43 += a1[4] * b1[3]; c44 += a1[4] * b1[4]; c45 += a1[4] * b1[5]; c46 += a1[4] * b1[6]; c47 += a1[4] * b1[7]; c50 += a1[5] * b1[0]; c51 += a1[5] * b1[1]; c52 += a1[5] * b1[2]; c53 += a1[5] * b1[3]; c54 += a1[5] * b1[4]; c55 += a1[5] * b1[5]; c56 += a1[5] * b1[6]; c57 += a1[5] * b1[7]; c60 += a1[6] * b1[0]; c61 += a1[6] * b1[1]; c62 += a1[6] * b1[2]; c63 += a1[6] * b1[3]; c64 += a1[6] * b1[4]; c65 += a1[6] * b1[5]; c66 += a1[6] * b1[6]; c67 += a1[6] * b1[7]; c70 += a1[7] * b1[0]; c71 += a1[7] * b1[1]; c72 += a1[7] * b1[2]; c73 += a1[7] * b1[3]; c74 += a1[7] * b1[4]; c75 += a1[7] * b1[5]; c76 += a1[7] * b1[6]; c77 += a1[7] * b1[7]; A_pref = sA_bf[track_bf]; B_pref = sB_bf[track_bf]; A_now = sA_bf[1-track_bf]; B_now = sB_bf[1-track_bf]; track_bf = 1 - track_bf; } __syncthreads(); ((float4*)a0)[0] = ((float4*)A_now)[y]; ((float4*)a0)[1] = ((float4*)A_now)[y+8]; ((float4*)b0)[0] = ((float4*)B_now)[x]; ((float4*)b0)[1] = ((float4*)B_now)[x+8]; #pragma unroll for (int i = 1; i < 8; ++i) { int base = i * 16; if (i&1) { c00 += a0[0] * b0[0]; c01 += a0[0] * b0[1]; c02 += a0[0] * b0[2]; c03 += a0[0] * b0[3]; c04 += a0[0] * b0[4]; c05 += a0[0] * b0[5]; c06 += a0[0] * b0[6]; c07 += a0[0] * b0[7]; ((float4*)a1)[0] = ((float4*)A_now)[base+y]; c10 += a0[1] * b0[0]; c11 += a0[1] * b0[1]; c12 += a0[1] * b0[2]; c13 += a0[1] * b0[3]; c14 += a0[1] * b0[4]; c15 += a0[1] * b0[5]; c16 += a0[1] * b0[6]; c17 += a0[1] * b0[7]; c20 += a0[2] * b0[0]; c21 += a0[2] * b0[1]; c22 += a0[2] * b0[2]; c23 += a0[2] * b0[3]; c24 += a0[2] * b0[4]; c25 += a0[2] * b0[5]; c26 += a0[2] * b0[6]; c27 += a0[2] * b0[7]; ((float4*)b1)[0] = ((float4*)B_now)[base+x]; c30 += a0[3] * b0[0]; c31 += a0[3] * b0[1]; c32 += a0[3] * b0[2]; c33 += a0[3] * b0[3]; c34 += a0[3] * b0[4]; c35 += a0[3] * b0[5]; c36 += a0[3] * b0[6]; c37 += a0[3] * b0[7]; c40 += a0[4] * b0[0]; c41 += a0[4] * b0[1]; c42 += a0[4] * b0[2]; c43 += a0[4] * b0[3]; c44 += a0[4] * b0[4]; c45 += a0[4] * b0[5]; c46 += a0[4] * b0[6]; c47 += a0[4] * b0[7]; ((float4*)a1)[1] = ((float4*)A_now)[base+y+8]; c50 += a0[5] * b0[0]; c51 += a0[5] * b0[1]; c52 += a0[5] * b0[2]; c53 += a0[5] * b0[3]; c54 += a0[5] * b0[4]; c55 += a0[5] * b0[5]; c56 += a0[5] * b0[6]; c57 += a0[5] * b0[7]; c60 += a0[6] * b0[0]; c61 += a0[6] * b0[1]; c62 += a0[6] * b0[2]; c63 += a0[6] * b0[3]; c64 += a0[6] * b0[4]; c65 += a0[6] * b0[5]; c66 += a0[6] * b0[6]; c67 += a0[6] * b0[7]; ((float4*)b1)[1] = ((float4*)B_now)[base+x+8]; c70 += a0[7] * b0[0]; c71 += a0[7] * b0[1]; c72 += a0[7] * b0[2]; c73 += a0[7] * b0[3]; c74 += a0[7] * b0[4]; c75 += a0[7] * b0[5]; c76 += a0[7] * b0[6]; c77 += a0[7] * b0[7]; } else { c00 += a1[0] * b1[0]; c01 += a1[0] * b1[1]; c02 += a1[0] * b1[2]; c03 += a1[0] * b1[3]; c04 += a1[0] * b1[4]; c05 += a1[0] * b1[5]; c06 += a1[0] * b1[6]; c07 += a1[0] * b1[7]; ((float4*)a0)[0] = ((float4*)A_now)[base+y]; c10 += a1[1] * b1[0]; c11 += a1[1] * b1[1]; c12 += a1[1] * b1[2]; c13 += a1[1] * b1[3]; c14 += a1[1] * b1[4]; c15 += a1[1] * b1[5]; c16 += a1[1] * b1[6]; c17 += a1[1] * b1[7]; c20 += a1[2] * b1[0]; c21 += a1[2] * b1[1]; c22 += a1[2] * b1[2]; c23 += a1[2] * b1[3]; c24 += a1[2] * b1[4]; c25 += a1[2] * b1[5]; c26 += a1[2] * b1[6]; c27 += a1[2] * b1[7]; ((float4*)b0)[0] = ((float4*)B_now)[base+x]; c30 += a1[3] * b1[0]; c31 += a1[3] * b1[1]; c32 += a1[3] * b1[2]; c33 += a1[3] * b1[3]; c34 += a1[3] * b1[4]; c35 += a1[3] * b1[5]; c36 += a1[3] * b1[6]; c37 += a1[3] * b1[7]; c40 += a1[4] * b1[0]; c41 += a1[4] * b1[1]; c42 += a1[4] * b1[2]; c43 += a1[4] * b1[3]; c44 += a1[4] * b1[4]; c45 += a1[4] * b1[5]; c46 += a1[4] * b1[6]; c47 += a1[4] * b1[7]; ((float4*)a0)[1] = ((float4*)A_now)[base+y+8]; c50 += a1[5] * b1[0]; c51 += a1[5] * b1[1]; c52 += a1[5] * b1[2]; c53 += a1[5] * b1[3]; c54 += a1[5] * b1[4]; c55 += a1[5] * b1[5]; c56 += a1[5] * b1[6]; c57 += a1[5] * b1[7]; c60 += a1[6] * b1[0]; c61 += a1[6] * b1[1]; c62 += a1[6] * b1[2]; c63 += a1[6] * b1[3]; c64 += a1[6] * b1[4]; c65 += a1[6] * b1[5]; c66 += a1[6] * b1[6]; c67 += a1[6] * b1[7]; ((float4*)b0)[1] = ((float4*)B_now)[base+x+8]; c70 += a1[7] * b1[0]; c71 += a1[7] * b1[1]; c72 += a1[7] * b1[2]; c73 += a1[7] * b1[3]; c74 += a1[7] * b1[4]; c75 += a1[7] * b1[5]; c76 += a1[7] * b1[6]; c77 += a1[7] * b1[7]; } } c00 += a1[0] * b1[0]; c01 += a1[0] * b1[1]; c02 += a1[0] * b1[2]; c03 += a1[0] * b1[3]; c04 += a1[0] * b1[4]; c05 += a1[0] * b1[5]; c06 += a1[0] * b1[6]; c07 += a1[0] * b1[7]; c10 += a1[1] * b1[0]; c11 += a1[1] * b1[1]; c12 += a1[1] * b1[2]; c13 += a1[1] * b1[3]; c14 += a1[1] * b1[4]; c15 += a1[1] * b1[5]; c16 += a1[1] * b1[6]; c17 += a1[1] * b1[7]; c20 += a1[2] * b1[0]; c21 += a1[2] * b1[1]; c22 += a1[2] * b1[2]; c23 += a1[2] * b1[3]; c24 += a1[2] * b1[4]; c25 += a1[2] * b1[5]; c26 += a1[2] * b1[6]; c27 += a1[2] * b1[7]; c30 += a1[3] * b1[0]; c31 += a1[3] * b1[1]; c32 += a1[3] * b1[2]; c33 += a1[3] * b1[3]; c34 += a1[3] * b1[4]; c35 += a1[3] * b1[5]; c36 += a1[3] * b1[6]; c37 += a1[3] * b1[7]; c40 += a1[4] * b1[0]; c41 += a1[4] * b1[1]; c42 += a1[4] * b1[2]; c43 += a1[4] * b1[3]; c44 += a1[4] * b1[4]; c45 += a1[4] * b1[5]; c46 += a1[4] * b1[6]; c47 += a1[4] * b1[7]; c50 += a1[5] * b1[0]; c51 += a1[5] * b1[1]; c52 += a1[5] * b1[2]; c53 += a1[5] * b1[3]; c54 += a1[5] * b1[4]; c55 += a1[5] * b1[5]; c56 += a1[5] * b1[6]; c57 += a1[5] * b1[7]; c60 += a1[6] * b1[0]; c61 += a1[6] * b1[1]; c62 += a1[6] * b1[2]; c63 += a1[6] * b1[3]; c64 += a1[6] * b1[4]; c65 += a1[6] * b1[5]; c66 += a1[6] * b1[6]; c67 += a1[6] * b1[7]; c70 += a1[7] * b1[0]; c71 += a1[7] * b1[1]; c72 += a1[7] * b1[2]; c73 += a1[7] * b1[3]; c74 += a1[7] * b1[4]; c75 += a1[7] * b1[5]; c76 += a1[7] * b1[6]; c77 += a1[7] * b1[7]; /* for (int i = 0; i < 7; i++) { for (int j = 0; j < 7; j++) { surf2Dwrite(c00, surf_C, (col )*sizeof(float), row , cudaBoundaryModeZero); } } */ surf2Dwrite(c00, surf_C, (col )*sizeof(float), row , cudaBoundaryModeZero); surf2Dwrite(c01, surf_C, (col+1)*sizeof(float), row , cudaBoundaryModeZero); surf2Dwrite(c02, surf_C, (col+2)*sizeof(float), row , cudaBoundaryModeZero); surf2Dwrite(c03, surf_C, (col+3)*sizeof(float), row , cudaBoundaryModeZero); surf2Dwrite(c04, surf_C, (col+4)*sizeof(float), row , cudaBoundaryModeZero); surf2Dwrite(c05, surf_C, (col+5)*sizeof(float), row , cudaBoundaryModeZero); surf2Dwrite(c06, surf_C, (col+6)*sizeof(float), row , cudaBoundaryModeZero); surf2Dwrite(c07, surf_C, (col+7)*sizeof(float), row , cudaBoundaryModeZero); surf2Dwrite(c10, surf_C, (col )*sizeof(float), row+1 , cudaBoundaryModeZero); surf2Dwrite(c11, surf_C, (col+1)*sizeof(float), row+1 , cudaBoundaryModeZero); surf2Dwrite(c12, surf_C, (col+2)*sizeof(float), row+1 , cudaBoundaryModeZero); surf2Dwrite(c13, surf_C, (col+3)*sizeof(float), row+1 , cudaBoundaryModeZero); surf2Dwrite(c14, surf_C, (col+4)*sizeof(float), row+1 , cudaBoundaryModeZero); surf2Dwrite(c15, surf_C, (col+5)*sizeof(float), row+1 , cudaBoundaryModeZero); surf2Dwrite(c16, surf_C, (col+6)*sizeof(float), row+1 , cudaBoundaryModeZero); surf2Dwrite(c17, surf_C, (col+7)*sizeof(float), row+1 , cudaBoundaryModeZero); surf2Dwrite(c20, surf_C, (col )*sizeof(float), row+2 , cudaBoundaryModeZero); surf2Dwrite(c21, surf_C, (col+1)*sizeof(float), row+2 , cudaBoundaryModeZero); surf2Dwrite(c22, surf_C, (col+2)*sizeof(float), row+2 , cudaBoundaryModeZero); surf2Dwrite(c23, surf_C, (col+3)*sizeof(float), row+2 , cudaBoundaryModeZero); surf2Dwrite(c24, surf_C, (col+4)*sizeof(float), row+2 , cudaBoundaryModeZero); surf2Dwrite(c25, surf_C, (col+5)*sizeof(float), row+2 , cudaBoundaryModeZero); surf2Dwrite(c26, surf_C, (col+6)*sizeof(float), row+2 , cudaBoundaryModeZero); surf2Dwrite(c27, surf_C, (col+7)*sizeof(float), row+2 , cudaBoundaryModeZero); surf2Dwrite(c30, surf_C, (col )*sizeof(float), row+3 , cudaBoundaryModeZero); surf2Dwrite(c31, surf_C, (col+1)*sizeof(float), row+3 , cudaBoundaryModeZero); surf2Dwrite(c32, surf_C, (col+2)*sizeof(float), row+3 , cudaBoundaryModeZero); surf2Dwrite(c33, surf_C, (col+3)*sizeof(float), row+3 , cudaBoundaryModeZero); surf2Dwrite(c34, surf_C, (col+4)*sizeof(float), row+3 , cudaBoundaryModeZero); surf2Dwrite(c35, surf_C, (col+5)*sizeof(float), row+3 , cudaBoundaryModeZero); surf2Dwrite(c36, surf_C, (col+6)*sizeof(float), row+3 , cudaBoundaryModeZero); surf2Dwrite(c37, surf_C, (col+7)*sizeof(float), row+3 , cudaBoundaryModeZero); surf2Dwrite(c40, surf_C, (col )*sizeof(float), row+4 , cudaBoundaryModeZero); surf2Dwrite(c41, surf_C, (col+1)*sizeof(float), row+4 , cudaBoundaryModeZero); surf2Dwrite(c42, surf_C, (col+2)*sizeof(float), row+4 , cudaBoundaryModeZero); surf2Dwrite(c43, surf_C, (col+3)*sizeof(float), row+4 , cudaBoundaryModeZero); surf2Dwrite(c44, surf_C, (col+4)*sizeof(float), row+4 , cudaBoundaryModeZero); surf2Dwrite(c45, surf_C, (col+5)*sizeof(float), row+4 , cudaBoundaryModeZero); surf2Dwrite(c46, surf_C, (col+6)*sizeof(float), row+4 , cudaBoundaryModeZero); surf2Dwrite(c47, surf_C, (col+7)*sizeof(float), row+4 , cudaBoundaryModeZero); surf2Dwrite(c50, surf_C, (col )*sizeof(float), row+5 , cudaBoundaryModeZero); surf2Dwrite(c51, surf_C, (col+1)*sizeof(float), row+5 , cudaBoundaryModeZero); surf2Dwrite(c52, surf_C, (col+2)*sizeof(float), row+5 , cudaBoundaryModeZero); surf2Dwrite(c53, surf_C, (col+3)*sizeof(float), row+5 , cudaBoundaryModeZero); surf2Dwrite(c54, surf_C, (col+4)*sizeof(float), row+5 , cudaBoundaryModeZero); surf2Dwrite(c55, surf_C, (col+5)*sizeof(float), row+5 , cudaBoundaryModeZero); surf2Dwrite(c56, surf_C, (col+6)*sizeof(float), row+5 , cudaBoundaryModeZero); surf2Dwrite(c57, surf_C, (col+7)*sizeof(float), row+5 , cudaBoundaryModeZero); surf2Dwrite(c60, surf_C, (col )*sizeof(float), row+6 , cudaBoundaryModeZero); surf2Dwrite(c61, surf_C, (col+1)*sizeof(float), row+6 , cudaBoundaryModeZero); surf2Dwrite(c62, surf_C, (col+2)*sizeof(float), row+6 , cudaBoundaryModeZero); surf2Dwrite(c63, surf_C, (col+3)*sizeof(float), row+6 , cudaBoundaryModeZero); surf2Dwrite(c64, surf_C, (col+4)*sizeof(float), row+6 , cudaBoundaryModeZero); surf2Dwrite(c65, surf_C, (col+5)*sizeof(float), row+6 , cudaBoundaryModeZero); surf2Dwrite(c66, surf_C, (col+6)*sizeof(float), row+6 , cudaBoundaryModeZero); surf2Dwrite(c67, surf_C, (col+7)*sizeof(float), row+6 , cudaBoundaryModeZero); surf2Dwrite(c70, surf_C, (col )*sizeof(float), row+7 , cudaBoundaryModeZero); surf2Dwrite(c71, surf_C, (col+1)*sizeof(float), row+7 , cudaBoundaryModeZero); surf2Dwrite(c72, surf_C, (col+2)*sizeof(float), row+7 , cudaBoundaryModeZero); surf2Dwrite(c73, surf_C, (col+3)*sizeof(float), row+7 , cudaBoundaryModeZero); surf2Dwrite(c74, surf_C, (col+4)*sizeof(float), row+7 , cudaBoundaryModeZero); surf2Dwrite(c75, surf_C, (col+5)*sizeof(float), row+7 , cudaBoundaryModeZero); surf2Dwrite(c76, surf_C, (col+6)*sizeof(float), row+7 , cudaBoundaryModeZero); surf2Dwrite(c77, surf_C, (col+7)*sizeof(float), row+7 , cudaBoundaryModeZero); } int main(void) { cudaError_t err = cudaSuccess; int m = N; int n = N; int k = N; float *A = (float*)malloc(m*k*sizeof(float)); float *B = (float*)malloc(k*n*sizeof(float)); float *C = (float*)malloc(m*n*sizeof(float)); if (A == NULL || B == NULL || C == NULL) { printf("allocate host error!\n"); return 1; } for (int i = 0; i < m*k; ++i) { A[i] = rand()/(float)RAND_MAX - rand()/(float)RAND_MAX; } for (int i = 0; i < k*n; ++i) { B[i] = rand()/(float)RAND_MAX - rand()/(float)RAND_MAX; } float *dev_A = NULL; float *dev_B = NULL; float *dev_C = NULL; err = cudaMalloc((void**)&dev_A, m*k*sizeof(float)); err_handling(&err, "allocate devecie error A!"); err = cudaMalloc((void**)&dev_B, k*n*sizeof(float)); err_handling(&err, "allocate devecie error B!"); err = cudaMalloc((void**)&dev_C, m*n*sizeof(float)); err_handling(&err, "allocate devecie error C!"); err = cudaMemcpy(dev_A, A, m*k*sizeof(float), cudaMemcpyHostToDevice); err_handling(&err, "memcpy to A error!"); err = cudaMemcpy(dev_B, B, k*n*sizeof(float), cudaMemcpyHostToDevice); err_handling(&err, "memcpy to B error!"); cudaChannelFormatDesc ADesc = cudaCreateChannelDesc<float>(); cudaChannelFormatDesc BDesc = cudaCreateChannelDesc<float>(); cudaChannelFormatDesc CDesc = cudaCreateChannelDesc<float>(); cudaArray *A_array, *B_array, *C_array; cudaMallocArray(&A_array, &ADesc, k, m); cudaMallocArray(&B_array, &BDesc, n, k); cudaMallocArray(&C_array, &CDesc, n, m, cudaArraySurfaceLoadStore); cudaMemcpyToArray(A_array, 0, 0, A, m*k*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpyToArray(B_array, 0, 0, B, k*n*sizeof(float), cudaMemcpyHostToDevice); cudaBindTextureToArray(tex_A, A_array); cudaBindTextureToArray(tex_B, B_array); cudaBindSurfaceToArray(surf_C, C_array); tex_A.addressMode[0] = cudaAddressModeBorder; tex_A.addressMode[1] = cudaAddressModeBorder; tex_B.addressMode[0] = cudaAddressModeBorder; tex_B.addressMode[1] = cudaAddressModeBorder; dim3 dimGrid((n-1)/TILE_WIDTH+1, (m-1)/TILE_WIDTH+1, 1); dim3 dimBlock(TILE_WIDTH/WIDTH_PER_THREAD, TILE_WIDTH/WIDTH_PER_THREAD, 1); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); matMul<<<dimGrid, dimBlock>>>(dev_A, dev_B, dev_C, m, k, n); cudaEventRecord(stop, 0); cudaEventSynchronize(start); cudaEventSynchronize(stop); float time_elapsed = 0; cudaEventElapsedTime(&time_elapsed, start, stop); printf("%fms\n", time_elapsed); err = cudaMemcpyFromArray(C, C_array, 0, 0, m*n*sizeof(float), cudaMemcpyDeviceToHost); //err = cudaMemcpy(C, dev_C, m*n*sizeof(float), cudaMemcpyDeviceToHost); err_handling(&err, "memcpy to host C error!"); FILE *fp = fopen("gpu.out", "w"); for (int i = 0; i < m; i++) { for (int j = 0; j < n; j++) { fprintf(fp, "%f\n", C[i*N+j]); } } fclose(fp); err = cudaFree(dev_A); err_handling(&err, "mem free A error!"); err = cudaFree(dev_B); err_handling(&err, "mem free B error!"); err = cudaFree(dev_C); err_handling(&err, "mem free C error!"); err = cudaDeviceReset(); err_handling(&err, "device reset error!"); return 0; }
0a10fbdd21dd2fa8d12397150e1bb303bf0ced28.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel5_plus_4_right; int xdim0_update_halo_kernel5_plus_4_right_h = -1; __constant__ int ydim0_update_halo_kernel5_plus_4_right; int ydim0_update_halo_kernel5_plus_4_right_h = -1; __constant__ int xdim1_update_halo_kernel5_plus_4_right; int xdim1_update_halo_kernel5_plus_4_right_h = -1; __constant__ int ydim1_update_halo_kernel5_plus_4_right; int ydim1_update_halo_kernel5_plus_4_right_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel5_plus_4_right * (y) + \ xdim0_update_halo_kernel5_plus_4_right * \ ydim0_update_halo_kernel5_plus_4_right * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel5_plus_4_right * (y) + \ xdim1_update_halo_kernel5_plus_4_right * \ ydim1_update_halo_kernel5_plus_4_right * (z)) // user function __device__ inline void update_halo_kernel5_plus_4_right_gpu(double *vol_flux_z, double *mass_flux_z, const int *fields) { if (fields[FIELD_VOL_FLUX_Z] == 1) vol_flux_z[OPS_ACC0(0, 0, 0)] = (vol_flux_z[OPS_ACC0(-4, 0, 0)]); if (fields[FIELD_MASS_FLUX_Z] == 1) mass_flux_z[OPS_ACC1(0, 0, 0)] = (mass_flux_z[OPS_ACC1(-4, 0, 0)]); } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel5_plus_4_right(double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel5_plus_4_right + idx_z * 1 * 1 * xdim0_update_halo_kernel5_plus_4_right * ydim0_update_halo_kernel5_plus_4_right; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel5_plus_4_right + idx_z * 1 * 1 * xdim1_update_halo_kernel5_plus_4_right * ydim1_update_halo_kernel5_plus_4_right; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel5_plus_4_right_gpu(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel5_plus_4_right(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 3, range, 135)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(135, "update_halo_kernel5_plus_4_right"); OPS_kernels[135].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel5_plus_4_right_h || ydim0 != ydim0_update_halo_kernel5_plus_4_right_h || xdim1 != xdim1_update_halo_kernel5_plus_4_right_h || ydim1 != ydim1_update_halo_kernel5_plus_4_right_h) { hipMemcpyToSymbol(xdim0_update_halo_kernel5_plus_4_right, &xdim0, sizeof(int)); xdim0_update_halo_kernel5_plus_4_right_h = xdim0; hipMemcpyToSymbol(ydim0_update_halo_kernel5_plus_4_right, &ydim0, sizeof(int)); ydim0_update_halo_kernel5_plus_4_right_h = ydim0; hipMemcpyToSymbol(xdim1_update_halo_kernel5_plus_4_right, &xdim1, sizeof(int)); xdim1_update_halo_kernel5_plus_4_right_h = xdim1; hipMemcpyToSymbol(ydim1_update_halo_kernel5_plus_4_right, &ydim1, sizeof(int)); ydim1_update_halo_kernel5_plus_4_right_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[135].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_update_halo_kernel5_plus_4_right), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[135].time += t1 - t2; } ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[135].mpi_time += t2 - t1; OPS_kernels[135].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[135].transfer += ops_compute_transfer(dim, start, end, &arg1); } }
0a10fbdd21dd2fa8d12397150e1bb303bf0ced28.cu
// // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel5_plus_4_right; int xdim0_update_halo_kernel5_plus_4_right_h = -1; __constant__ int ydim0_update_halo_kernel5_plus_4_right; int ydim0_update_halo_kernel5_plus_4_right_h = -1; __constant__ int xdim1_update_halo_kernel5_plus_4_right; int xdim1_update_halo_kernel5_plus_4_right_h = -1; __constant__ int ydim1_update_halo_kernel5_plus_4_right; int ydim1_update_halo_kernel5_plus_4_right_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel5_plus_4_right * (y) + \ xdim0_update_halo_kernel5_plus_4_right * \ ydim0_update_halo_kernel5_plus_4_right * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel5_plus_4_right * (y) + \ xdim1_update_halo_kernel5_plus_4_right * \ ydim1_update_halo_kernel5_plus_4_right * (z)) // user function __device__ inline void update_halo_kernel5_plus_4_right_gpu(double *vol_flux_z, double *mass_flux_z, const int *fields) { if (fields[FIELD_VOL_FLUX_Z] == 1) vol_flux_z[OPS_ACC0(0, 0, 0)] = (vol_flux_z[OPS_ACC0(-4, 0, 0)]); if (fields[FIELD_MASS_FLUX_Z] == 1) mass_flux_z[OPS_ACC1(0, 0, 0)] = (mass_flux_z[OPS_ACC1(-4, 0, 0)]); } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel5_plus_4_right(double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel5_plus_4_right + idx_z * 1 * 1 * xdim0_update_halo_kernel5_plus_4_right * ydim0_update_halo_kernel5_plus_4_right; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel5_plus_4_right + idx_z * 1 * 1 * xdim1_update_halo_kernel5_plus_4_right * ydim1_update_halo_kernel5_plus_4_right; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel5_plus_4_right_gpu(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel5_plus_4_right(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 3, range, 135)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(135, "update_halo_kernel5_plus_4_right"); OPS_kernels[135].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel5_plus_4_right_h || ydim0 != ydim0_update_halo_kernel5_plus_4_right_h || xdim1 != xdim1_update_halo_kernel5_plus_4_right_h || ydim1 != ydim1_update_halo_kernel5_plus_4_right_h) { cudaMemcpyToSymbol(xdim0_update_halo_kernel5_plus_4_right, &xdim0, sizeof(int)); xdim0_update_halo_kernel5_plus_4_right_h = xdim0; cudaMemcpyToSymbol(ydim0_update_halo_kernel5_plus_4_right, &ydim0, sizeof(int)); ydim0_update_halo_kernel5_plus_4_right_h = ydim0; cudaMemcpyToSymbol(xdim1_update_halo_kernel5_plus_4_right, &xdim1, sizeof(int)); xdim1_update_halo_kernel5_plus_4_right_h = xdim1; cudaMemcpyToSymbol(ydim1_update_halo_kernel5_plus_4_right, &ydim1, sizeof(int)); ydim1_update_halo_kernel5_plus_4_right_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[135].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_update_halo_kernel5_plus_4_right<<<grid, tblock>>>( (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[135].time += t1 - t2; } ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[135].mpi_time += t2 - t1; OPS_kernels[135].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[135].transfer += ops_compute_transfer(dim, start, end, &arg1); } }
1eda901a9fbe8684888af960ab2a9e8ef2c47518.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include "para.h" struct kernel_para{ volatile int *A, *B; volatile int *C; volatile int size; volatile int block; volatile int thread; volatile int warp; volatile int req; volatile int funcId; volatile int taskId; volatile int doneHost; int doneGPU; }; struct kernel_para_GPU{ int warpId; int baseId; int taskId; }; __device__ void init_queue(struct kernel_para_GPU *warpPool){ int warpIdxx = (blockIdx.x*blockDim.x+threadIdx.x)/32; if((threadIdx.x) != 0){ warpPool[warpIdxx+threadIdx.x].warpId = 0; }else{ warpPool[warpIdxx+threadIdx.x].warpId = 1; } } __device__ void MatMul_kernel(int *A, int *B, int *C, int Size, int baseTid){ #if 1 int row = baseTid + (threadIdx.x & 0x1f); for (int j = 0; j < Size; j++){ int sum = 0; for (int k = 0; k < Size; k++){ int a = A[row * Size + k]; int b = B[k * Size + j]; sum += a * b; } C[row * Size + j] = sum; } #endif } __device__ void VecAdd_kernel(int *A, int *B, int *C, int size, int baseTid) { int i = baseTid + (threadIdx.x & 0x1f); //printf("In vec add with tid %d from block %d\n",i, blockIdx.x); // for(int j=0; j<200000; j++) if (i < size) C[i] = A[i] + B[i]; } __global__ void deviceRT(volatile int *done, volatile int *totalExecTasks, struct kernel_para_GPU *warpPool, volatile struct kernel_para *taskBuffer, struct kernel_para *taskArgs, volatile int *exec, volatile int *totalScheTasks){ int warpIdxx = (blockIdx.x*blockDim.x + threadIdx.x)/32; __shared__ int warp; __shared__ int warpQPointer; __shared__ int base; int threadDone; // Init warp queue contents and pointers #if 1 if(threadIdx.x < QSize){ init_queue(warpPool); warp = 0; warpQPointer = 0; base = 0; } __syncthreads(); #endif // scheduling in master warps if(threadIdx.x < 32) { while(!(*done)){ if(threadIdx.x < QSize){ // if(*done) return; if(taskBuffer[warpQPointer*BSize+blockIdx.x].req == 1 && !(*done)){ warp = taskBuffer[warpQPointer*BSize+blockIdx.x].warp; // search free warps while(1){ threadDone = 0; if(warpPool[warpIdxx+threadIdx.x].warpId == 0){ if(atomicSub(&warp, 1) > 0){ warpPool[warpIdxx+threadIdx.x].taskId = taskBuffer[warpQPointer*BSize+blockIdx.x].taskId; warpPool[warpIdxx+threadIdx.x].baseId = atomicAdd(&base, 1)*32; warpPool[warpIdxx+threadIdx.x].warpId = 1; __threadfence_block(); }// End if(warp > 0) }// End if (warpQ->contents) if(warp <= 0){ threadDone = 1; } if(__all(threadDone == 1) != 0){ if(threadIdx.x == 0){ taskBuffer[warpQPointer*BSize+blockIdx.x].req = 0; base = 0; // atomicAdd((int*)&totalExecTasks[0],1); } break; }// End warp vote }//End while(1) } // End taskBuffer if if(threadIdx.x == 0) { warpQPointer++; if(warpQPointer == SSize){ warpQPointer = 0; } } }// End while(!(*done)) }//End if(threadIdx.x < 32) } #if 1 else{ #if 1 while(!(*exec)){ // if(*exec) return; if(warpPool[warpIdxx].warpId == 1 && !(*exec)){ MatMul_kernel((int*)taskArgs[warpPool[warpIdxx].taskId].A, (int*)taskArgs[warpPool[warpIdxx].taskId].B, (int*)taskArgs[warpPool[warpIdxx].taskId].C, taskArgs[warpPool[warpIdxx].taskId].size, warpPool[warpIdxx].baseId); if((threadIdx.x & 0x1f) == 0){ if((atomicSub((int*)&taskArgs[warpPool[warpIdxx].taskId].doneGPU,1)) ==1){ taskArgs[warpPool[warpIdxx].taskId].doneHost = 0; //atomicAdd((int*)&totalExecTasks[blockIdx.x],1); atomicAdd((int*)&totalScheTasks[0],1); } warpPool[warpIdxx].warpId = 0; __threadfence_block(); } } } #endif }// End else #endif }
1eda901a9fbe8684888af960ab2a9e8ef2c47518.cu
#include <stdio.h> #include <stdlib.h> #include <stdint.h> #include "para.h" struct kernel_para{ volatile int *A, *B; volatile int *C; volatile int size; volatile int block; volatile int thread; volatile int warp; volatile int req; volatile int funcId; volatile int taskId; volatile int doneHost; int doneGPU; }; struct kernel_para_GPU{ int warpId; int baseId; int taskId; }; __device__ void init_queue(struct kernel_para_GPU *warpPool){ int warpIdxx = (blockIdx.x*blockDim.x+threadIdx.x)/32; if((threadIdx.x) != 0){ warpPool[warpIdxx+threadIdx.x].warpId = 0; }else{ warpPool[warpIdxx+threadIdx.x].warpId = 1; } } __device__ void MatMul_kernel(int *A, int *B, int *C, int Size, int baseTid){ #if 1 int row = baseTid + (threadIdx.x & 0x1f); for (int j = 0; j < Size; j++){ int sum = 0; for (int k = 0; k < Size; k++){ int a = A[row * Size + k]; int b = B[k * Size + j]; sum += a * b; } C[row * Size + j] = sum; } #endif } __device__ void VecAdd_kernel(int *A, int *B, int *C, int size, int baseTid) { int i = baseTid + (threadIdx.x & 0x1f); //printf("In vec add with tid %d from block %d\n",i, blockIdx.x); // for(int j=0; j<200000; j++) if (i < size) C[i] = A[i] + B[i]; } __global__ void deviceRT(volatile int *done, volatile int *totalExecTasks, struct kernel_para_GPU *warpPool, volatile struct kernel_para *taskBuffer, struct kernel_para *taskArgs, volatile int *exec, volatile int *totalScheTasks){ int warpIdxx = (blockIdx.x*blockDim.x + threadIdx.x)/32; __shared__ int warp; __shared__ int warpQPointer; __shared__ int base; int threadDone; // Init warp queue contents and pointers #if 1 if(threadIdx.x < QSize){ init_queue(warpPool); warp = 0; warpQPointer = 0; base = 0; } __syncthreads(); #endif // scheduling in master warps if(threadIdx.x < 32) { while(!(*done)){ if(threadIdx.x < QSize){ // if(*done) return; if(taskBuffer[warpQPointer*BSize+blockIdx.x].req == 1 && !(*done)){ warp = taskBuffer[warpQPointer*BSize+blockIdx.x].warp; // search free warps while(1){ threadDone = 0; if(warpPool[warpIdxx+threadIdx.x].warpId == 0){ if(atomicSub(&warp, 1) > 0){ warpPool[warpIdxx+threadIdx.x].taskId = taskBuffer[warpQPointer*BSize+blockIdx.x].taskId; warpPool[warpIdxx+threadIdx.x].baseId = atomicAdd(&base, 1)*32; warpPool[warpIdxx+threadIdx.x].warpId = 1; __threadfence_block(); }// End if(warp > 0) }// End if (warpQ->contents) if(warp <= 0){ threadDone = 1; } if(__all(threadDone == 1) != 0){ if(threadIdx.x == 0){ taskBuffer[warpQPointer*BSize+blockIdx.x].req = 0; base = 0; // atomicAdd((int*)&totalExecTasks[0],1); } break; }// End warp vote }//End while(1) } // End taskBuffer if if(threadIdx.x == 0) { warpQPointer++; if(warpQPointer == SSize){ warpQPointer = 0; } } }// End while(!(*done)) }//End if(threadIdx.x < 32) } #if 1 else{ #if 1 while(!(*exec)){ // if(*exec) return; if(warpPool[warpIdxx].warpId == 1 && !(*exec)){ MatMul_kernel((int*)taskArgs[warpPool[warpIdxx].taskId].A, (int*)taskArgs[warpPool[warpIdxx].taskId].B, (int*)taskArgs[warpPool[warpIdxx].taskId].C, taskArgs[warpPool[warpIdxx].taskId].size, warpPool[warpIdxx].baseId); if((threadIdx.x & 0x1f) == 0){ if((atomicSub((int*)&taskArgs[warpPool[warpIdxx].taskId].doneGPU,1)) ==1){ taskArgs[warpPool[warpIdxx].taskId].doneHost = 0; //atomicAdd((int*)&totalExecTasks[blockIdx.x],1); atomicAdd((int*)&totalScheTasks[0],1); } warpPool[warpIdxx].warpId = 0; __threadfence_block(); } } } #endif }// End else #endif }
5e44ca03c1fdb2b30e9942eee8a059ef84107d51.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ------------------------------------------------------------- // cuDPP -- CUDA Data Parallel Primitives library // ------------------------------------------------------------- // $Revision: 5633 $ // $Date: 2009-07-01 15:02:51 +1000 (Wed, 01 Jul 2009) $ // ------------------------------------------------------------- // This source code is distributed under the terms of license.txt // in the root directory of this source distribution. // ------------------------------------------------------------- /** * @file * scan_cta.cu * * @brief CUDPP CTA-level scan routines */ /** \defgroup cudpp_cta CUDPP CTA-Level API * The CUDPP CTA-Level API contains functions that run on the GPU * device. These are CUDA \c __device__ functions that are called * from within other CUDA device functions (typically * \link cudpp_kernel CUDPP Kernel-Level API\endlink functions). * They are called CTA-level functions because they typically process * s_data "owned" by each CTA within shared memory, and are agnostic of * any other CTAs that may be running (or how many CTAs are running), * other than to compute appropriate global memory addresses. * @{ */ /** @name Scan Functions * @{ */ #include "cudpp_globals.h" #include "cudpp_util.h" #include <math.h> #include "cudpp.h" /** * @brief Macro to insert necessary __syncthreads() in device emulation mode */ #ifdef __DEVICE_EMULATION__ #define __EMUSYNC __syncthreads() #else #define __EMUSYNC #endif /** * @brief Template class containing compile-time parameters to the scan functions * * ScanTraits is passed as a template parameter to all scan functions. By * using these compile-time functions we can enable generic code while * maintaining the highest performance. This is crucial for the performance * of low-level workhorse algorithms like scan. * * @param T The datatype of the scan * @param oper The ::CUDPPOperator to use for the scan (add, max, etc.) * @param multiRow True if this is a multi-row scan * @param unroll True if scan inner loops should be unrolled * @param sums True if each block should write it's sum to the d_blockSums array (false for single-block scans) * @param backward True if this is a backward scan * @param fullBlock True if all blocks in this scan are full (CTA_SIZE * SCAN_ELEMENTS_PER_THREAD elements) * @param exclusive True for exclusive scans, false for inclusive scans */ template <class T, CUDPPOperator oper, bool backward, bool exclusive, bool multiRow, bool sums, bool fullBlock> class ScanTraits { public: //! Returns true if this is a backward scan static __device__ bool isBackward() { return backward; }; //! Returns true if this is an exclusive scan static __device__ bool isExclusive() { return exclusive; }; //! Returns true if this a multi-row scan. static __device__ bool isMultiRow() { return multiRow; }; //! Returns true if this scan writes the sum of each block to the d_blockSums array (multi-block scans) static __device__ bool writeSums() { return sums; }; //! Returns true if this is a full scan -- all blocks process CTA_SIZE * SCAN_ELEMENTS_PER_THREAD elements static __device__ bool isFullBlock() { return fullBlock; }; //! The operator function used for the scan static __device__ T op(const T a, const T b) { return Operator<T, oper>::op(a, b); } //! The identity value used by the scan static __device__ T identity() { return Operator<T, oper>::identity(); } }; //! This is used to insert syncthreads to avoid perf loss caused by 128-bit //! load overlap that happens on G80. This gives about a 15% boost on scans on //! G80. //! @todo Parameterize this in case this perf detail changes on future GPUs. #define DISALLOW_LOADSTORE_OVERLAP 1 /** * @brief Handles loading input s_data from global memory to shared memory * (vec4 version) * * Load a chunk of 8*blockDim.x elements from global memory into a * shared memory array. Each thread loads two T4 elements (where * T4 is, e.g. int4 or float4), computes the scan of those two vec4s in * thread local arrays (in registers), and writes the two total sums of the * vec4s into shared memory, where they will be cooperatively scanned with * the other partial sums by all threads in the CTA. * * @param[out] s_out The output (shared) memory array * @param[out] threadScan0 Intermediate per-thread partial sums array 1 * @param[out] threadScan1 Intermediate per-thread partial sums array 2 * @param[in] d_in The input (device) memory array * @param[in] numElements The number of elements in the array being scanned * @param[in] iDataOffset the offset of the input array in global memory for this * thread block * @param[out] ai The shared memory address for the thread's first element * (returned for reuse) * @param[out] bi The shared memory address for the thread's second element * (returned for reuse) * @param[out] aiDev The device memory address for this thread's first element * (returned for reuse) * @param[out] biDev The device memory address for this thread's second element * (returned for reuse) */ template <class T, class traits> __device__ void loadSharedChunkFromMem4(T *s_out, T threadScan0[4], T threadScan1[4], const T *d_in, int numElements, int iDataOffset, int &ai, int &bi, int &aiDev, int &biDev) { int thid = threadIdx.x; aiDev = iDataOffset + thid; biDev = aiDev + blockDim.x; // convert to 4-vector typename typeToVector<T,4>::Result tempData; typename typeToVector<T,4>::Result* inData = (typename typeToVector<T,4>::Result*)d_in; ai = thid; bi = thid + blockDim.x; // read into tempData; if (traits::isBackward()) { int i = aiDev * 4; if (traits::isFullBlock() || i + 3 < numElements) { tempData = inData[aiDev]; threadScan0[3] = tempData.w; threadScan0[2] = traits::op(tempData.z, threadScan0[3]); threadScan0[1] = traits::op(tempData.y, threadScan0[2]); threadScan0[0] = s_out[ai] = traits::op(tempData.x, threadScan0[1]); } else { threadScan0[3] = traits::identity(); threadScan0[2] = traits::op(((i+2) < numElements) ? d_in[i+2] : traits::identity(), threadScan0[3]); threadScan0[1] = traits::op(((i+1) < numElements) ? d_in[i+1] : traits::identity(), threadScan0[2]); threadScan0[0] = s_out[ai] = traits::op((i < numElements) ? d_in[i] : traits::identity(), threadScan0[1]); } #ifdef DISALLOW_LOADSTORE_OVERLAP __syncthreads(); #endif i = biDev * 4; if (traits::isFullBlock() || i + 3 < numElements) { tempData = inData[biDev]; threadScan1[3] = tempData.w; threadScan1[2] = traits::op(tempData.z, threadScan1[3]); threadScan1[1] = traits::op(tempData.y, threadScan1[2]); threadScan1[0] = s_out[bi] = traits::op(tempData.x, threadScan1[1]); } else { threadScan1[3] = traits::identity(); threadScan1[2] = traits::op(((i+2) < numElements) ? d_in[i+2] : traits::identity(), threadScan1[3]); threadScan1[1] = traits::op(((i+1) < numElements) ? d_in[i+1] : traits::identity(), threadScan1[2]); threadScan1[0] = s_out[bi] = traits::op((i < numElements) ? d_in[i] : traits::identity(), threadScan1[1]); } __syncthreads(); // reverse s_data in shared memory if (ai < CTA_SIZE) { unsigned int leftIdx = ai; unsigned int rightIdx = (2 * CTA_SIZE - 1) - ai; if (leftIdx < rightIdx) { T tmp = s_out[leftIdx]; s_out[leftIdx] = s_out[rightIdx]; s_out[rightIdx] = tmp; } } __syncthreads(); } else { int i = aiDev * 4; if (traits::isFullBlock() || i + 3 < numElements) { tempData = inData[aiDev]; threadScan0[0] = tempData.x; threadScan0[1] = traits::op(tempData.y, threadScan0[0]); threadScan0[2] = traits::op(tempData.z, threadScan0[1]); threadScan0[3] = s_out[ai] = traits::op(tempData.w, threadScan0[2]); } else { threadScan0[0] = (i < numElements) ? d_in[i] : traits::identity(); threadScan0[1] = traits::op(((i+1) < numElements) ? d_in[i+1] : traits::identity(), threadScan0[0]); threadScan0[2] = traits::op(((i+2) < numElements) ? d_in[i+2] : traits::identity(), threadScan0[1]); threadScan0[3] = s_out[ai] = traits::op(((i+3) < numElements) ? d_in[i+3] : traits::identity(), threadScan0[2]); } #ifdef DISALLOW_LOADSTORE_OVERLAP __syncthreads(); #endif i = biDev * 4; if (traits::isFullBlock() || i + 3 < numElements) { tempData = inData[biDev]; threadScan1[0] = tempData.x; threadScan1[1] = traits::op(tempData.y, threadScan1[0]); threadScan1[2] = traits::op(tempData.z, threadScan1[1]); threadScan1[3] = s_out[bi] = traits::op(tempData.w, threadScan1[2]); } else { threadScan1[0] = (i < numElements) ? d_in[i] : traits::identity(); threadScan1[1] = traits::op(((i+1) < numElements) ? d_in[i+1] : traits::identity(), threadScan1[0]); threadScan1[2] = traits::op(((i+2) < numElements) ? d_in[i+2] : traits::identity(), threadScan1[1]); threadScan1[3] = s_out[bi] = traits::op(((i+3) < numElements) ? d_in[i+3] : traits::identity(), threadScan1[2]); } __syncthreads(); } } /** * @brief Handles storing result s_data from shared memory to global memory * (vec4 version) * * Store a chunk of SCAN_ELTS_PER_THREAD*blockDim.x elements from shared memory * into a device memory array. Each thread stores reads two elements from shared * memory, adds them to the intermediate sums computed in * loadSharedChunkFromMem4(), and writes two T4 elements (where * T4 is, e.g. int4 or float4) to global memory. * * @param[out] d_out The output (device) memory array * @param[in] threadScan0 Intermediate per-thread partial sums array 1 * (contents computed in loadSharedChunkFromMem4()) * @param[in] threadScan1 Intermediate per-thread partial sums array 2 * (contents computed in loadSharedChunkFromMem4()) * @param[in] s_in The input (shared) memory array * @param[in] numElements The number of elements in the array being scanned * @param[in] oDataOffset the offset of the output array in global memory * for this thread block * @param[in] ai The shared memory address for the thread's first element * (computed in loadSharedChunkFromMem4()) * @param[in] bi The shared memory address for the thread's second element * (computed in loadSharedChunkFromMem4()) * @param[in] aiDev The device memory address for this thread's first element * (computed in loadSharedChunkFromMem4()) * @param[in] biDev The device memory address for this thread's second element * (computed in loadSharedChunkFromMem4()) */ template <class T, class traits> __device__ void storeSharedChunkToMem4(T *d_out, T threadScan0[4], T threadScan1[4], T *s_in, int numElements, int oDataOffset, int ai, int bi, int aiDev, int biDev) { // Convert to 4-vector typename typeToVector<T,4>::Result tempData; typename typeToVector<T,4>::Result* outData = (typename typeToVector<T,4>::Result*)d_out; // write results to global memory if (traits::isBackward()) { if (ai < CTA_SIZE) { unsigned int leftIdx = ai; unsigned int rightIdx = (2 * CTA_SIZE - 1) - ai; if (leftIdx < rightIdx) { T tmp = s_in[leftIdx]; s_in[leftIdx] = s_in[rightIdx]; s_in[rightIdx] = tmp; } } __syncthreads(); T temp = s_in[ai]; if (traits::isExclusive()) { tempData.w = temp; tempData.z = traits::op(temp, threadScan0[3]); tempData.y = traits::op(temp, threadScan0[2]); tempData.x = traits::op(temp, threadScan0[1]); } else { tempData.w = traits::op(temp, threadScan0[3]); tempData.z = traits::op(temp, threadScan0[2]); tempData.y = traits::op(temp, threadScan0[1]); tempData.x = traits::op(temp, threadScan0[0]); } int i = aiDev * 4; if (traits::isFullBlock() || i + 3 < numElements) { outData[aiDev] = tempData; } else { if (i < numElements) { d_out[i] = tempData.x; if (i+1 < numElements) { d_out[i+1] = tempData.y; if (i+2 < numElements) { d_out[i+2] = tempData.z; }}} } #ifdef DISALLOW_LOADSTORE_OVERLAP __syncthreads(); #endif temp = s_in[bi]; if (traits::isExclusive()) { tempData.w = temp; tempData.z = traits::op(temp, threadScan1[3]); tempData.y = traits::op(temp, threadScan1[2]); tempData.x = traits::op(temp, threadScan1[1]); } else { tempData.w = traits::op(temp, threadScan1[3]); tempData.z = traits::op(temp, threadScan1[2]); tempData.y = traits::op(temp, threadScan1[1]); tempData.x = traits::op(temp, threadScan1[0]); } i = biDev * 4; if (traits::isFullBlock() || i + 3 < numElements) { outData[biDev] = tempData; } else { if (i < numElements) { d_out[i] = tempData.x; if (i+1 < numElements) { d_out[i+1] = tempData.y; if (i+2 < numElements) { d_out[i+2] = tempData.z; }}} } } else { T temp; temp = s_in[ai]; if (traits::isExclusive()) { tempData.x = temp; tempData.y = traits::op(temp, threadScan0[0]); tempData.z = traits::op(temp, threadScan0[1]); tempData.w = traits::op(temp, threadScan0[2]); } else { tempData.x = traits::op(temp, threadScan0[0]); tempData.y = traits::op(temp, threadScan0[1]); tempData.z = traits::op(temp, threadScan0[2]); tempData.w = traits::op(temp, threadScan0[3]); } int i = aiDev * 4; if (traits::isFullBlock() || i + 3 < numElements) { outData[aiDev] = tempData; } else { // we can't use vec4 because the original array isn't a multiple of // 4 elements if ( i < numElements) { d_out[i] = tempData.x; if ((i+1) < numElements) { d_out[i+1] = tempData.y; if ((i+2) < numElements) { d_out[i+2] = tempData.z; } } } } #ifdef DISALLOW_LOADSTORE_OVERLAP __syncthreads(); #endif temp = s_in[bi]; if (traits::isExclusive()) { tempData.x = temp; tempData.y = traits::op(temp, threadScan1[0]); tempData.z = traits::op(temp, threadScan1[1]); tempData.w = traits::op(temp, threadScan1[2]); } else { tempData.x = traits::op(temp, threadScan1[0]); tempData.y = traits::op(temp, threadScan1[1]); tempData.z = traits::op(temp, threadScan1[2]); tempData.w = traits::op(temp, threadScan1[3]); } i = biDev * 4; if (traits::isFullBlock() || i + 3 < numElements) { outData[biDev] = tempData; } else { // we can't use vec4 because the original array isn't a multiple of // 4 elements if ( i < numElements) { d_out[i] = tempData.x; if ((i+1) < numElements) { d_out[i+1] = tempData.y; if ((i+2) < numElements) { d_out[i+2] = tempData.z; } } } } } } /** @brief Scan all warps of a CTA without synchronization * * The warp-scan algorithm breaks a block of data into warp-sized chunks, and * scans the chunks independently with a warp of threads each. Because warps * execute instructions in SIMD fashion, there is no need to synchronize in * order to share data within a warp (only across warps). Also, in SIMD the * most efficient algorithm is a step-efficient algorithm. Therefore, within * each warp we use a Hillis-and-Steele-style scan that takes log2(N) steps * to scan the warp [Daniel Hillis and Guy Steele 1986], rather than the * work-efficient tree-based algorithm described by Guy Blelloch [1990] that * takes 2 * log(N) steps and is in general more complex to implement. * Previous versions of CUDPP used the Blelloch algorithm. For current GPUs, * the warp size is 32, so this takes five steps per warp. * * Each thread is responsible for a single element of the array to be scanned. * Each thread inputs a single value to the scan via \a val and returns * its own scanned result element. The threads of each warp cooperate * via the shared memory array \a s_data to scan WARP_SIZE elements. * * Template parameter \a maxlevel allows this warpscan to be performed on * partial warps. For example, if only the first 8 elements of each warp need * to be scanned, then warpscan only performs log2(8)=3 steps rather than 5. * * The computation uses 2 * WARP_SIZE elements of shared memory per warp to * enable warps to offset beyond their input data and receive the identity * element without using any branch instructions. * * \note s_data is declared volatile here to prevent the compiler from * optimizing away writes to shared memory, and ensure correct intrawarp * communication in the absence of __syncthreads. * * @return The result of the warp scan for the current thread * @param[in] val The current threads's input to the scan * @param[in,out] s_data A pointer to a temporary shared array of 2*CTA_SIZE * elements used to compute the warp scans */ template<class T, class traits,int maxlevel> __device__ T warpscan(T val, volatile T* s_data) { // The following is the same as 2 * 32 * warpId + threadInWarp = // 64*(threadIdx.x >> 5) + (threadIdx.x & (WARP_SIZE-1)) int idx = 2 * threadIdx.x - (threadIdx.x & (WARP_SIZE-1)); s_data[idx] = traits::identity(); idx += WARP_SIZE; T t = s_data[idx] = val; __EMUSYNC; // This code is needed because the warp size of device emulation // is only 1 thread, so sync-less cooperation within a warp doesn't // work. #ifdef __DEVICE_EMULATION__ t = s_data[idx - 1]; __EMUSYNC; s_data[idx] = traits::op(s_data[idx],t); __EMUSYNC; t = s_data[idx - 2]; __EMUSYNC; s_data[idx] = traits::op(s_data[idx],t); __EMUSYNC; t = s_data[idx - 4]; __EMUSYNC; s_data[idx] = traits::op(s_data[idx],t); __EMUSYNC; t = s_data[idx - 8]; __EMUSYNC; s_data[idx] = traits::op(s_data[idx],t); __EMUSYNC; t = s_data[idx - 16]; __EMUSYNC; s_data[idx] = traits::op(s_data[idx],t); __EMUSYNC; #else if (0 <= maxlevel) { s_data[idx] = t = traits::op(t, s_data[idx - 1]); } if (1 <= maxlevel) { s_data[idx] = t = traits::op(t, s_data[idx - 2]); } if (2 <= maxlevel) { s_data[idx] = t = traits::op(t, s_data[idx - 4]); } if (3 <= maxlevel) { s_data[idx] = t = traits::op(t, s_data[idx - 8]); } if (4 <= maxlevel) { s_data[idx] = t = traits::op(t, s_data[idx -16]); } #endif return s_data[idx-1]; // convert inclusive -> exclusive } /** @brief Perform a full CTA scan using the warp-scan algorithm * * As described in the comment for warpscan(), the warp-scan algorithm breaks * a block of data into warp-sized chunks, and scans the chunks independently * with a warp of threads each. To complete the scan, each warp <i>j</i> then * writes its last element to element <i>j</i> of a temporary shared array. * Then a single warp exclusive-scans these "warp sums". Finally, each thread * adds the result of the warp sum scan to the result of the scan from the * first pass. * * Because we scan 2*CTA_SIZE elements per thread, we have to call warpscan * twice. * * @param x The first input value for the current thread * @param y The second input value for the current thread * @param s_data Temporary shared memory space of 2*CTA_SIZE elements for * performing the scan */ template <class T, class traits> __device__ void scanWarps(T x, T y, T *s_data) { T val = warpscan<T, traits, 4>(x, s_data); __syncthreads(); T val2 = warpscan<T, traits, 4>(y, s_data); int idx = threadIdx.x; if ((idx & 31)==31) { s_data[idx >> 5] = traits::op(val, x); s_data[(idx + blockDim.x) >> 5] = traits::op(val2, y); } __syncthreads(); #ifndef __DEVICE_EMULATION__ if (idx < 32) #endif { s_data[idx] = warpscan<T,traits,(LOG_CTA_SIZE-LOG_WARP_SIZE+1)>(s_data[idx], s_data); } __syncthreads(); val = traits::op(val, s_data[idx >> 5]); val2 = traits::op(val2, s_data[(idx + blockDim.x) >> 5]); __syncthreads(); s_data[idx] = val; s_data[idx+blockDim.x] = val2; } /** * @brief CTA-level scan routine; scans s_data in shared memory in each thread block * * This function is the main CTA-level scan function. It may be called by other * CUDA __global__ or __device__ functions. This function scans 2 * CTA_SIZE elements. * Each thread is responsible for one element in each half of the input array. * \note This code is intended to be run on a CTA of 128 threads. Other sizes are * untested. * * @param[in] s_data The array to be scanned in shared memory * @param[out] d_blockSums Array of per-block sums * @param[in] blockSumIndex Location in \a d_blockSums to which to write this block's sum */ template <class T, class traits> __device__ void scanCTA(T *s_data, T *d_blockSums, unsigned int blockSumIndex) { T val = s_data[threadIdx.x]; T val2 = s_data[threadIdx.x + blockDim.x]; __syncthreads(); scanWarps<T,traits>(val, val2, s_data); __syncthreads(); if (traits::writeSums() && threadIdx.x == blockDim.x - 1) { d_blockSums[blockSumIndex] = traits::op(val2, s_data[threadIdx.x + blockDim.x]); } #ifdef __DEVICE_EMULATION__ // must sync in emulation mode when doing backward scans, because otherwise the // shared memory array will get reversed before the block sums are read! if (traits::isBackward()) __syncthreads(); #endif } /** @} */ // end scan functions /** @} */ // end cudpp_cta
5e44ca03c1fdb2b30e9942eee8a059ef84107d51.cu
// ------------------------------------------------------------- // cuDPP -- CUDA Data Parallel Primitives library // ------------------------------------------------------------- // $Revision: 5633 $ // $Date: 2009-07-01 15:02:51 +1000 (Wed, 01 Jul 2009) $ // ------------------------------------------------------------- // This source code is distributed under the terms of license.txt // in the root directory of this source distribution. // ------------------------------------------------------------- /** * @file * scan_cta.cu * * @brief CUDPP CTA-level scan routines */ /** \defgroup cudpp_cta CUDPP CTA-Level API * The CUDPP CTA-Level API contains functions that run on the GPU * device. These are CUDA \c __device__ functions that are called * from within other CUDA device functions (typically * \link cudpp_kernel CUDPP Kernel-Level API\endlink functions). * They are called CTA-level functions because they typically process * s_data "owned" by each CTA within shared memory, and are agnostic of * any other CTAs that may be running (or how many CTAs are running), * other than to compute appropriate global memory addresses. * @{ */ /** @name Scan Functions * @{ */ #include "cudpp_globals.h" #include "cudpp_util.h" #include <math.h> #include "cudpp.h" /** * @brief Macro to insert necessary __syncthreads() in device emulation mode */ #ifdef __DEVICE_EMULATION__ #define __EMUSYNC __syncthreads() #else #define __EMUSYNC #endif /** * @brief Template class containing compile-time parameters to the scan functions * * ScanTraits is passed as a template parameter to all scan functions. By * using these compile-time functions we can enable generic code while * maintaining the highest performance. This is crucial for the performance * of low-level workhorse algorithms like scan. * * @param T The datatype of the scan * @param oper The ::CUDPPOperator to use for the scan (add, max, etc.) * @param multiRow True if this is a multi-row scan * @param unroll True if scan inner loops should be unrolled * @param sums True if each block should write it's sum to the d_blockSums array (false for single-block scans) * @param backward True if this is a backward scan * @param fullBlock True if all blocks in this scan are full (CTA_SIZE * SCAN_ELEMENTS_PER_THREAD elements) * @param exclusive True for exclusive scans, false for inclusive scans */ template <class T, CUDPPOperator oper, bool backward, bool exclusive, bool multiRow, bool sums, bool fullBlock> class ScanTraits { public: //! Returns true if this is a backward scan static __device__ bool isBackward() { return backward; }; //! Returns true if this is an exclusive scan static __device__ bool isExclusive() { return exclusive; }; //! Returns true if this a multi-row scan. static __device__ bool isMultiRow() { return multiRow; }; //! Returns true if this scan writes the sum of each block to the d_blockSums array (multi-block scans) static __device__ bool writeSums() { return sums; }; //! Returns true if this is a full scan -- all blocks process CTA_SIZE * SCAN_ELEMENTS_PER_THREAD elements static __device__ bool isFullBlock() { return fullBlock; }; //! The operator function used for the scan static __device__ T op(const T a, const T b) { return Operator<T, oper>::op(a, b); } //! The identity value used by the scan static __device__ T identity() { return Operator<T, oper>::identity(); } }; //! This is used to insert syncthreads to avoid perf loss caused by 128-bit //! load overlap that happens on G80. This gives about a 15% boost on scans on //! G80. //! @todo Parameterize this in case this perf detail changes on future GPUs. #define DISALLOW_LOADSTORE_OVERLAP 1 /** * @brief Handles loading input s_data from global memory to shared memory * (vec4 version) * * Load a chunk of 8*blockDim.x elements from global memory into a * shared memory array. Each thread loads two T4 elements (where * T4 is, e.g. int4 or float4), computes the scan of those two vec4s in * thread local arrays (in registers), and writes the two total sums of the * vec4s into shared memory, where they will be cooperatively scanned with * the other partial sums by all threads in the CTA. * * @param[out] s_out The output (shared) memory array * @param[out] threadScan0 Intermediate per-thread partial sums array 1 * @param[out] threadScan1 Intermediate per-thread partial sums array 2 * @param[in] d_in The input (device) memory array * @param[in] numElements The number of elements in the array being scanned * @param[in] iDataOffset the offset of the input array in global memory for this * thread block * @param[out] ai The shared memory address for the thread's first element * (returned for reuse) * @param[out] bi The shared memory address for the thread's second element * (returned for reuse) * @param[out] aiDev The device memory address for this thread's first element * (returned for reuse) * @param[out] biDev The device memory address for this thread's second element * (returned for reuse) */ template <class T, class traits> __device__ void loadSharedChunkFromMem4(T *s_out, T threadScan0[4], T threadScan1[4], const T *d_in, int numElements, int iDataOffset, int &ai, int &bi, int &aiDev, int &biDev) { int thid = threadIdx.x; aiDev = iDataOffset + thid; biDev = aiDev + blockDim.x; // convert to 4-vector typename typeToVector<T,4>::Result tempData; typename typeToVector<T,4>::Result* inData = (typename typeToVector<T,4>::Result*)d_in; ai = thid; bi = thid + blockDim.x; // read into tempData; if (traits::isBackward()) { int i = aiDev * 4; if (traits::isFullBlock() || i + 3 < numElements) { tempData = inData[aiDev]; threadScan0[3] = tempData.w; threadScan0[2] = traits::op(tempData.z, threadScan0[3]); threadScan0[1] = traits::op(tempData.y, threadScan0[2]); threadScan0[0] = s_out[ai] = traits::op(tempData.x, threadScan0[1]); } else { threadScan0[3] = traits::identity(); threadScan0[2] = traits::op(((i+2) < numElements) ? d_in[i+2] : traits::identity(), threadScan0[3]); threadScan0[1] = traits::op(((i+1) < numElements) ? d_in[i+1] : traits::identity(), threadScan0[2]); threadScan0[0] = s_out[ai] = traits::op((i < numElements) ? d_in[i] : traits::identity(), threadScan0[1]); } #ifdef DISALLOW_LOADSTORE_OVERLAP __syncthreads(); #endif i = biDev * 4; if (traits::isFullBlock() || i + 3 < numElements) { tempData = inData[biDev]; threadScan1[3] = tempData.w; threadScan1[2] = traits::op(tempData.z, threadScan1[3]); threadScan1[1] = traits::op(tempData.y, threadScan1[2]); threadScan1[0] = s_out[bi] = traits::op(tempData.x, threadScan1[1]); } else { threadScan1[3] = traits::identity(); threadScan1[2] = traits::op(((i+2) < numElements) ? d_in[i+2] : traits::identity(), threadScan1[3]); threadScan1[1] = traits::op(((i+1) < numElements) ? d_in[i+1] : traits::identity(), threadScan1[2]); threadScan1[0] = s_out[bi] = traits::op((i < numElements) ? d_in[i] : traits::identity(), threadScan1[1]); } __syncthreads(); // reverse s_data in shared memory if (ai < CTA_SIZE) { unsigned int leftIdx = ai; unsigned int rightIdx = (2 * CTA_SIZE - 1) - ai; if (leftIdx < rightIdx) { T tmp = s_out[leftIdx]; s_out[leftIdx] = s_out[rightIdx]; s_out[rightIdx] = tmp; } } __syncthreads(); } else { int i = aiDev * 4; if (traits::isFullBlock() || i + 3 < numElements) { tempData = inData[aiDev]; threadScan0[0] = tempData.x; threadScan0[1] = traits::op(tempData.y, threadScan0[0]); threadScan0[2] = traits::op(tempData.z, threadScan0[1]); threadScan0[3] = s_out[ai] = traits::op(tempData.w, threadScan0[2]); } else { threadScan0[0] = (i < numElements) ? d_in[i] : traits::identity(); threadScan0[1] = traits::op(((i+1) < numElements) ? d_in[i+1] : traits::identity(), threadScan0[0]); threadScan0[2] = traits::op(((i+2) < numElements) ? d_in[i+2] : traits::identity(), threadScan0[1]); threadScan0[3] = s_out[ai] = traits::op(((i+3) < numElements) ? d_in[i+3] : traits::identity(), threadScan0[2]); } #ifdef DISALLOW_LOADSTORE_OVERLAP __syncthreads(); #endif i = biDev * 4; if (traits::isFullBlock() || i + 3 < numElements) { tempData = inData[biDev]; threadScan1[0] = tempData.x; threadScan1[1] = traits::op(tempData.y, threadScan1[0]); threadScan1[2] = traits::op(tempData.z, threadScan1[1]); threadScan1[3] = s_out[bi] = traits::op(tempData.w, threadScan1[2]); } else { threadScan1[0] = (i < numElements) ? d_in[i] : traits::identity(); threadScan1[1] = traits::op(((i+1) < numElements) ? d_in[i+1] : traits::identity(), threadScan1[0]); threadScan1[2] = traits::op(((i+2) < numElements) ? d_in[i+2] : traits::identity(), threadScan1[1]); threadScan1[3] = s_out[bi] = traits::op(((i+3) < numElements) ? d_in[i+3] : traits::identity(), threadScan1[2]); } __syncthreads(); } } /** * @brief Handles storing result s_data from shared memory to global memory * (vec4 version) * * Store a chunk of SCAN_ELTS_PER_THREAD*blockDim.x elements from shared memory * into a device memory array. Each thread stores reads two elements from shared * memory, adds them to the intermediate sums computed in * loadSharedChunkFromMem4(), and writes two T4 elements (where * T4 is, e.g. int4 or float4) to global memory. * * @param[out] d_out The output (device) memory array * @param[in] threadScan0 Intermediate per-thread partial sums array 1 * (contents computed in loadSharedChunkFromMem4()) * @param[in] threadScan1 Intermediate per-thread partial sums array 2 * (contents computed in loadSharedChunkFromMem4()) * @param[in] s_in The input (shared) memory array * @param[in] numElements The number of elements in the array being scanned * @param[in] oDataOffset the offset of the output array in global memory * for this thread block * @param[in] ai The shared memory address for the thread's first element * (computed in loadSharedChunkFromMem4()) * @param[in] bi The shared memory address for the thread's second element * (computed in loadSharedChunkFromMem4()) * @param[in] aiDev The device memory address for this thread's first element * (computed in loadSharedChunkFromMem4()) * @param[in] biDev The device memory address for this thread's second element * (computed in loadSharedChunkFromMem4()) */ template <class T, class traits> __device__ void storeSharedChunkToMem4(T *d_out, T threadScan0[4], T threadScan1[4], T *s_in, int numElements, int oDataOffset, int ai, int bi, int aiDev, int biDev) { // Convert to 4-vector typename typeToVector<T,4>::Result tempData; typename typeToVector<T,4>::Result* outData = (typename typeToVector<T,4>::Result*)d_out; // write results to global memory if (traits::isBackward()) { if (ai < CTA_SIZE) { unsigned int leftIdx = ai; unsigned int rightIdx = (2 * CTA_SIZE - 1) - ai; if (leftIdx < rightIdx) { T tmp = s_in[leftIdx]; s_in[leftIdx] = s_in[rightIdx]; s_in[rightIdx] = tmp; } } __syncthreads(); T temp = s_in[ai]; if (traits::isExclusive()) { tempData.w = temp; tempData.z = traits::op(temp, threadScan0[3]); tempData.y = traits::op(temp, threadScan0[2]); tempData.x = traits::op(temp, threadScan0[1]); } else { tempData.w = traits::op(temp, threadScan0[3]); tempData.z = traits::op(temp, threadScan0[2]); tempData.y = traits::op(temp, threadScan0[1]); tempData.x = traits::op(temp, threadScan0[0]); } int i = aiDev * 4; if (traits::isFullBlock() || i + 3 < numElements) { outData[aiDev] = tempData; } else { if (i < numElements) { d_out[i] = tempData.x; if (i+1 < numElements) { d_out[i+1] = tempData.y; if (i+2 < numElements) { d_out[i+2] = tempData.z; }}} } #ifdef DISALLOW_LOADSTORE_OVERLAP __syncthreads(); #endif temp = s_in[bi]; if (traits::isExclusive()) { tempData.w = temp; tempData.z = traits::op(temp, threadScan1[3]); tempData.y = traits::op(temp, threadScan1[2]); tempData.x = traits::op(temp, threadScan1[1]); } else { tempData.w = traits::op(temp, threadScan1[3]); tempData.z = traits::op(temp, threadScan1[2]); tempData.y = traits::op(temp, threadScan1[1]); tempData.x = traits::op(temp, threadScan1[0]); } i = biDev * 4; if (traits::isFullBlock() || i + 3 < numElements) { outData[biDev] = tempData; } else { if (i < numElements) { d_out[i] = tempData.x; if (i+1 < numElements) { d_out[i+1] = tempData.y; if (i+2 < numElements) { d_out[i+2] = tempData.z; }}} } } else { T temp; temp = s_in[ai]; if (traits::isExclusive()) { tempData.x = temp; tempData.y = traits::op(temp, threadScan0[0]); tempData.z = traits::op(temp, threadScan0[1]); tempData.w = traits::op(temp, threadScan0[2]); } else { tempData.x = traits::op(temp, threadScan0[0]); tempData.y = traits::op(temp, threadScan0[1]); tempData.z = traits::op(temp, threadScan0[2]); tempData.w = traits::op(temp, threadScan0[3]); } int i = aiDev * 4; if (traits::isFullBlock() || i + 3 < numElements) { outData[aiDev] = tempData; } else { // we can't use vec4 because the original array isn't a multiple of // 4 elements if ( i < numElements) { d_out[i] = tempData.x; if ((i+1) < numElements) { d_out[i+1] = tempData.y; if ((i+2) < numElements) { d_out[i+2] = tempData.z; } } } } #ifdef DISALLOW_LOADSTORE_OVERLAP __syncthreads(); #endif temp = s_in[bi]; if (traits::isExclusive()) { tempData.x = temp; tempData.y = traits::op(temp, threadScan1[0]); tempData.z = traits::op(temp, threadScan1[1]); tempData.w = traits::op(temp, threadScan1[2]); } else { tempData.x = traits::op(temp, threadScan1[0]); tempData.y = traits::op(temp, threadScan1[1]); tempData.z = traits::op(temp, threadScan1[2]); tempData.w = traits::op(temp, threadScan1[3]); } i = biDev * 4; if (traits::isFullBlock() || i + 3 < numElements) { outData[biDev] = tempData; } else { // we can't use vec4 because the original array isn't a multiple of // 4 elements if ( i < numElements) { d_out[i] = tempData.x; if ((i+1) < numElements) { d_out[i+1] = tempData.y; if ((i+2) < numElements) { d_out[i+2] = tempData.z; } } } } } } /** @brief Scan all warps of a CTA without synchronization * * The warp-scan algorithm breaks a block of data into warp-sized chunks, and * scans the chunks independently with a warp of threads each. Because warps * execute instructions in SIMD fashion, there is no need to synchronize in * order to share data within a warp (only across warps). Also, in SIMD the * most efficient algorithm is a step-efficient algorithm. Therefore, within * each warp we use a Hillis-and-Steele-style scan that takes log2(N) steps * to scan the warp [Daniel Hillis and Guy Steele 1986], rather than the * work-efficient tree-based algorithm described by Guy Blelloch [1990] that * takes 2 * log(N) steps and is in general more complex to implement. * Previous versions of CUDPP used the Blelloch algorithm. For current GPUs, * the warp size is 32, so this takes five steps per warp. * * Each thread is responsible for a single element of the array to be scanned. * Each thread inputs a single value to the scan via \a val and returns * its own scanned result element. The threads of each warp cooperate * via the shared memory array \a s_data to scan WARP_SIZE elements. * * Template parameter \a maxlevel allows this warpscan to be performed on * partial warps. For example, if only the first 8 elements of each warp need * to be scanned, then warpscan only performs log2(8)=3 steps rather than 5. * * The computation uses 2 * WARP_SIZE elements of shared memory per warp to * enable warps to offset beyond their input data and receive the identity * element without using any branch instructions. * * \note s_data is declared volatile here to prevent the compiler from * optimizing away writes to shared memory, and ensure correct intrawarp * communication in the absence of __syncthreads. * * @return The result of the warp scan for the current thread * @param[in] val The current threads's input to the scan * @param[in,out] s_data A pointer to a temporary shared array of 2*CTA_SIZE * elements used to compute the warp scans */ template<class T, class traits,int maxlevel> __device__ T warpscan(T val, volatile T* s_data) { // The following is the same as 2 * 32 * warpId + threadInWarp = // 64*(threadIdx.x >> 5) + (threadIdx.x & (WARP_SIZE-1)) int idx = 2 * threadIdx.x - (threadIdx.x & (WARP_SIZE-1)); s_data[idx] = traits::identity(); idx += WARP_SIZE; T t = s_data[idx] = val; __EMUSYNC; // This code is needed because the warp size of device emulation // is only 1 thread, so sync-less cooperation within a warp doesn't // work. #ifdef __DEVICE_EMULATION__ t = s_data[idx - 1]; __EMUSYNC; s_data[idx] = traits::op(s_data[idx],t); __EMUSYNC; t = s_data[idx - 2]; __EMUSYNC; s_data[idx] = traits::op(s_data[idx],t); __EMUSYNC; t = s_data[idx - 4]; __EMUSYNC; s_data[idx] = traits::op(s_data[idx],t); __EMUSYNC; t = s_data[idx - 8]; __EMUSYNC; s_data[idx] = traits::op(s_data[idx],t); __EMUSYNC; t = s_data[idx - 16]; __EMUSYNC; s_data[idx] = traits::op(s_data[idx],t); __EMUSYNC; #else if (0 <= maxlevel) { s_data[idx] = t = traits::op(t, s_data[idx - 1]); } if (1 <= maxlevel) { s_data[idx] = t = traits::op(t, s_data[idx - 2]); } if (2 <= maxlevel) { s_data[idx] = t = traits::op(t, s_data[idx - 4]); } if (3 <= maxlevel) { s_data[idx] = t = traits::op(t, s_data[idx - 8]); } if (4 <= maxlevel) { s_data[idx] = t = traits::op(t, s_data[idx -16]); } #endif return s_data[idx-1]; // convert inclusive -> exclusive } /** @brief Perform a full CTA scan using the warp-scan algorithm * * As described in the comment for warpscan(), the warp-scan algorithm breaks * a block of data into warp-sized chunks, and scans the chunks independently * with a warp of threads each. To complete the scan, each warp <i>j</i> then * writes its last element to element <i>j</i> of a temporary shared array. * Then a single warp exclusive-scans these "warp sums". Finally, each thread * adds the result of the warp sum scan to the result of the scan from the * first pass. * * Because we scan 2*CTA_SIZE elements per thread, we have to call warpscan * twice. * * @param x The first input value for the current thread * @param y The second input value for the current thread * @param s_data Temporary shared memory space of 2*CTA_SIZE elements for * performing the scan */ template <class T, class traits> __device__ void scanWarps(T x, T y, T *s_data) { T val = warpscan<T, traits, 4>(x, s_data); __syncthreads(); T val2 = warpscan<T, traits, 4>(y, s_data); int idx = threadIdx.x; if ((idx & 31)==31) { s_data[idx >> 5] = traits::op(val, x); s_data[(idx + blockDim.x) >> 5] = traits::op(val2, y); } __syncthreads(); #ifndef __DEVICE_EMULATION__ if (idx < 32) #endif { s_data[idx] = warpscan<T,traits,(LOG_CTA_SIZE-LOG_WARP_SIZE+1)>(s_data[idx], s_data); } __syncthreads(); val = traits::op(val, s_data[idx >> 5]); val2 = traits::op(val2, s_data[(idx + blockDim.x) >> 5]); __syncthreads(); s_data[idx] = val; s_data[idx+blockDim.x] = val2; } /** * @brief CTA-level scan routine; scans s_data in shared memory in each thread block * * This function is the main CTA-level scan function. It may be called by other * CUDA __global__ or __device__ functions. This function scans 2 * CTA_SIZE elements. * Each thread is responsible for one element in each half of the input array. * \note This code is intended to be run on a CTA of 128 threads. Other sizes are * untested. * * @param[in] s_data The array to be scanned in shared memory * @param[out] d_blockSums Array of per-block sums * @param[in] blockSumIndex Location in \a d_blockSums to which to write this block's sum */ template <class T, class traits> __device__ void scanCTA(T *s_data, T *d_blockSums, unsigned int blockSumIndex) { T val = s_data[threadIdx.x]; T val2 = s_data[threadIdx.x + blockDim.x]; __syncthreads(); scanWarps<T,traits>(val, val2, s_data); __syncthreads(); if (traits::writeSums() && threadIdx.x == blockDim.x - 1) { d_blockSums[blockSumIndex] = traits::op(val2, s_data[threadIdx.x + blockDim.x]); } #ifdef __DEVICE_EMULATION__ // must sync in emulation mode when doing backward scans, because otherwise the // shared memory array will get reversed before the block sums are read! if (traits::isBackward()) __syncthreads(); #endif } /** @} */ // end scan functions /** @} */ // end cudpp_cta
02b21080f129c7856dd9d6b36aef3e23f1f88e1e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define COALESCED_NUM 16 #define blockDimX 512 #define blockDimY 1 #define gridDimX (gridDim.x) #define gridDimY (gridDim.y) #define idx (blockIdx.x*blockDimX+threadIdx.x) #define idy (blockIdx.y*blockDimY+threadIdx.y) #define bidy (blockIdx.y) #define bidx (blockIdx.x) #define tidx (threadIdx.x) #define tidy (threadIdx.y) #define merger_y 16 #define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM) #define C(y,x) C[(y)*WIDTH_C+(x)] __global__ void vectormul(float * A, float * B, float * C, int width) { __shared__ float shared_0[16]; float sum_0; float sum_1; float sum_2; float sum_3; float sum_4; float sum_5; float sum_6; float sum_7; float sum_8; float sum_9; float sum_10; float sum_11; float sum_12; float sum_13; float sum_14; float sum_15; float a_0; float a_1; float a_2; float a_3; float a_4; float a_5; float a_6; float a_7; float a_8; float a_9; float a_10; float a_11; float a_12; float a_13; float a_14; float a_15; float b; sum_0=0; sum_1=0; sum_2=0; sum_3=0; sum_4=0; sum_5=0; sum_6=0; sum_7=0; sum_8=0; sum_9=0; sum_10=0; sum_11=0; sum_12=0; sum_13=0; sum_14=0; sum_15=0; if ((tidx<16)) { { shared_0[(tidx+0)]=A[(coalesced_idy+tidx)]; } } __syncthreads(); a_0=shared_0[((((bidy*16)+tidy)+0)+(( - 1)*coalesced_idy))]; a_1=shared_0[((((bidy*16)+tidy)+1)+(( - 1)*coalesced_idy))]; a_2=shared_0[((((bidy*16)+tidy)+2)+(( - 1)*coalesced_idy))]; a_3=shared_0[((((bidy*16)+tidy)+3)+(( - 1)*coalesced_idy))]; a_4=shared_0[((((bidy*16)+tidy)+4)+(( - 1)*coalesced_idy))]; a_5=shared_0[((((bidy*16)+tidy)+5)+(( - 1)*coalesced_idy))]; a_6=shared_0[((((bidy*16)+tidy)+6)+(( - 1)*coalesced_idy))]; a_7=shared_0[((((bidy*16)+tidy)+7)+(( - 1)*coalesced_idy))]; a_8=shared_0[((((bidy*16)+tidy)+8)+(( - 1)*coalesced_idy))]; a_9=shared_0[((((bidy*16)+tidy)+9)+(( - 1)*coalesced_idy))]; a_10=shared_0[((((bidy*16)+tidy)+10)+(( - 1)*coalesced_idy))]; a_11=shared_0[((((bidy*16)+tidy)+11)+(( - 1)*coalesced_idy))]; a_12=shared_0[((((bidy*16)+tidy)+12)+(( - 1)*coalesced_idy))]; a_13=shared_0[((((bidy*16)+tidy)+13)+(( - 1)*coalesced_idy))]; a_14=shared_0[((((bidy*16)+tidy)+14)+(( - 1)*coalesced_idy))]; a_15=shared_0[((((bidy*16)+tidy)+15)+(( - 1)*coalesced_idy))]; __syncthreads(); __syncthreads(); { b=B[idx]; } sum_0+=(a_0*b); sum_1+=(a_1*b); sum_2+=(a_2*b); sum_3+=(a_3*b); sum_4+=(a_4*b); sum_5+=(a_5*b); sum_6+=(a_6*b); sum_7+=(a_7*b); sum_8+=(a_8*b); sum_9+=(a_9*b); sum_10+=(a_10*b); sum_11+=(a_11*b); sum_12+=(a_12*b); sum_13+=(a_13*b); sum_14+=(a_14*b); sum_15+=(a_15*b); { C((((bidy*16)+tidy)+0), idx)+=sum_0; } { C((((bidy*16)+tidy)+1), idx)+=sum_1; } { C((((bidy*16)+tidy)+2), idx)+=sum_2; } { C((((bidy*16)+tidy)+3), idx)+=sum_3; } { C((((bidy*16)+tidy)+4), idx)+=sum_4; } { C((((bidy*16)+tidy)+5), idx)+=sum_5; } { C((((bidy*16)+tidy)+6), idx)+=sum_6; } { C((((bidy*16)+tidy)+7), idx)+=sum_7; } { C((((bidy*16)+tidy)+8), idx)+=sum_8; } { C((((bidy*16)+tidy)+9), idx)+=sum_9; } { C((((bidy*16)+tidy)+10), idx)+=sum_10; } { C((((bidy*16)+tidy)+11), idx)+=sum_11; } { C((((bidy*16)+tidy)+12), idx)+=sum_12; } { C((((bidy*16)+tidy)+13), idx)+=sum_13; } { C((((bidy*16)+tidy)+14), idx)+=sum_14; } { C((((bidy*16)+tidy)+15), idx)+=sum_15; } }
02b21080f129c7856dd9d6b36aef3e23f1f88e1e.cu
#define COALESCED_NUM 16 #define blockDimX 512 #define blockDimY 1 #define gridDimX (gridDim.x) #define gridDimY (gridDim.y) #define idx (blockIdx.x*blockDimX+threadIdx.x) #define idy (blockIdx.y*blockDimY+threadIdx.y) #define bidy (blockIdx.y) #define bidx (blockIdx.x) #define tidx (threadIdx.x) #define tidy (threadIdx.y) #define merger_y 16 #define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM) #define C(y,x) C[(y)*WIDTH_C+(x)] __global__ void vectormul(float * A, float * B, float * C, int width) { __shared__ float shared_0[16]; float sum_0; float sum_1; float sum_2; float sum_3; float sum_4; float sum_5; float sum_6; float sum_7; float sum_8; float sum_9; float sum_10; float sum_11; float sum_12; float sum_13; float sum_14; float sum_15; float a_0; float a_1; float a_2; float a_3; float a_4; float a_5; float a_6; float a_7; float a_8; float a_9; float a_10; float a_11; float a_12; float a_13; float a_14; float a_15; float b; sum_0=0; sum_1=0; sum_2=0; sum_3=0; sum_4=0; sum_5=0; sum_6=0; sum_7=0; sum_8=0; sum_9=0; sum_10=0; sum_11=0; sum_12=0; sum_13=0; sum_14=0; sum_15=0; if ((tidx<16)) { { shared_0[(tidx+0)]=A[(coalesced_idy+tidx)]; } } __syncthreads(); a_0=shared_0[((((bidy*16)+tidy)+0)+(( - 1)*coalesced_idy))]; a_1=shared_0[((((bidy*16)+tidy)+1)+(( - 1)*coalesced_idy))]; a_2=shared_0[((((bidy*16)+tidy)+2)+(( - 1)*coalesced_idy))]; a_3=shared_0[((((bidy*16)+tidy)+3)+(( - 1)*coalesced_idy))]; a_4=shared_0[((((bidy*16)+tidy)+4)+(( - 1)*coalesced_idy))]; a_5=shared_0[((((bidy*16)+tidy)+5)+(( - 1)*coalesced_idy))]; a_6=shared_0[((((bidy*16)+tidy)+6)+(( - 1)*coalesced_idy))]; a_7=shared_0[((((bidy*16)+tidy)+7)+(( - 1)*coalesced_idy))]; a_8=shared_0[((((bidy*16)+tidy)+8)+(( - 1)*coalesced_idy))]; a_9=shared_0[((((bidy*16)+tidy)+9)+(( - 1)*coalesced_idy))]; a_10=shared_0[((((bidy*16)+tidy)+10)+(( - 1)*coalesced_idy))]; a_11=shared_0[((((bidy*16)+tidy)+11)+(( - 1)*coalesced_idy))]; a_12=shared_0[((((bidy*16)+tidy)+12)+(( - 1)*coalesced_idy))]; a_13=shared_0[((((bidy*16)+tidy)+13)+(( - 1)*coalesced_idy))]; a_14=shared_0[((((bidy*16)+tidy)+14)+(( - 1)*coalesced_idy))]; a_15=shared_0[((((bidy*16)+tidy)+15)+(( - 1)*coalesced_idy))]; __syncthreads(); __syncthreads(); { b=B[idx]; } sum_0+=(a_0*b); sum_1+=(a_1*b); sum_2+=(a_2*b); sum_3+=(a_3*b); sum_4+=(a_4*b); sum_5+=(a_5*b); sum_6+=(a_6*b); sum_7+=(a_7*b); sum_8+=(a_8*b); sum_9+=(a_9*b); sum_10+=(a_10*b); sum_11+=(a_11*b); sum_12+=(a_12*b); sum_13+=(a_13*b); sum_14+=(a_14*b); sum_15+=(a_15*b); { C((((bidy*16)+tidy)+0), idx)+=sum_0; } { C((((bidy*16)+tidy)+1), idx)+=sum_1; } { C((((bidy*16)+tidy)+2), idx)+=sum_2; } { C((((bidy*16)+tidy)+3), idx)+=sum_3; } { C((((bidy*16)+tidy)+4), idx)+=sum_4; } { C((((bidy*16)+tidy)+5), idx)+=sum_5; } { C((((bidy*16)+tidy)+6), idx)+=sum_6; } { C((((bidy*16)+tidy)+7), idx)+=sum_7; } { C((((bidy*16)+tidy)+8), idx)+=sum_8; } { C((((bidy*16)+tidy)+9), idx)+=sum_9; } { C((((bidy*16)+tidy)+10), idx)+=sum_10; } { C((((bidy*16)+tidy)+11), idx)+=sum_11; } { C((((bidy*16)+tidy)+12), idx)+=sum_12; } { C((((bidy*16)+tidy)+13), idx)+=sum_13; } { C((((bidy*16)+tidy)+14), idx)+=sum_14; } { C((((bidy*16)+tidy)+15), idx)+=sum_15; } }
026c7bfd938275255a9bbc688bd62feaf26d52c9.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include"pathalg.h" static const int WORK_SIZE =258; void BFSor::copydata(int s,vector<edge>&edges,int nodenum){ }; void BFSor::dellocate(){ }; void BFSor::allocate(int maxn,int maxedge){ } void BFSor::topsort() { }; __global__ void cleanb(int *d,int *p,int N) { int i = threadIdx.x + blockIdx.x*blockDim.x; if(i>=N)return; d[i]=100000; p[i]=-1; }; void BFSor::updatE(vector<vector<int>>&esigns) { int count=0; for(int k=0;k<LY;k++) for(int i=0;i<nodenum;i++) for(int j=0;j<nein[i].size();j++) { if(esigns[k][neie[i][j]]<0) te[count]=-1; else te[count]=nein[i][j]; count++; } hipMemcpy(dev_te,te,LY*edges.size()*sizeof(int),hipMemcpyHostToDevice); }; void BFSor::updatS(vector<vector<Sot>>&stpair) { L[0]=0; L[1]=LY1; L[2]=LY2; S[0]=stpair[0].size(); S[1]=stpair[1].size(); stps=stpair; int count=0; ncount=L[1]*S[0]+L[2]*S[1]; int bigN=ncount*nodenum; hipLaunchKernelGGL(( cleanb), dim3(bigN/512+1),dim3(512),0, 0, dev_d,dev_p,bigN); hipMemcpy(d,dev_d,ncount*nodenum*sizeof(int),hipMemcpyDeviceToHost); for(int k=0;k<L[1];k++) { for(int j=0;j<stpair[0].size();j++) { d[count*nodenum+stpair[0][j].s]=0; count++; } } for(int k=0;k<L[2];k++) { for(int j=0;j<stpair[1].size();j++) { d[count*nodenum+stpair[1][j].s]=0; count++; } } Size[0]=edges.size()*L[1]*S[0]; Size[1]=edges.size()*L[2]*S[1]; hipMemcpy(dev_d,d,ncount*nodenum*sizeof(int),hipMemcpyHostToDevice); } void BFSor::init(pair<vector<edge>,vector<vector<int>>>ext,vector<pair<int,int>>stpair,int _nodenum) { //cout<<"in paraller BFS init"<<endl; nodenum=_nodenum; edges=ext.first; vector<vector<int>>esigns; esigns=ext.second; stp=stpair; mark=new int; *mark=0; W=WD+1; st=new int[edges.size()*LY]; te=new int[edges.size()*LY]; stid=new int[edges.size()*LY]; d=new int[nodenum*LY*YE]; p=new int[nodenum*LY*YE]; esignes=new int[edges.size()*LY]; vector<vector<int>>ein(nodenum*LY,vector<int>()); neibn=ein; vector<vector<int>>eie(nodenum,vector<int>()); neie=eie; for(int i=0;i<edges.size();i++) { int s=edges[i].s; int t=edges[i].t; neibn[s].push_back(t); neie[s].push_back(i); } nein=neibn; int count=0; for(int k=0;k<LY;k++) for(int i=0;i<nodenum;i++) for(int j=0;j<neibn[i].size();j++) { st[count]=i; if(esigns[k][neie[i][j]]<0) te[count]=-1; else te[count]=neibn[i][j]; stid[count]=neie[i][j]; count++; } for(int i=0;i<nodenum*LY*YE;i++) d[i]=2*WD+1,p[i]=-1; hipMalloc((void**)&dev_st,LY*edges.size()*sizeof(int)); hipMalloc((void**)&dev_te,LY*edges.size()*sizeof(int)); hipMalloc((void**)&dev_stid,LY*edges.size()*sizeof(int)); hipMalloc((void**)&dev_d,YE*LY*nodenum*sizeof(int)); hipMalloc((void**)&dev_p,YE*LY*nodenum*sizeof(int)); hipMemcpy(dev_te,te,LY*edges.size()*sizeof(int),hipMemcpyHostToDevice); hipMemcpy(dev_st,st,LY*edges.size()*sizeof(int),hipMemcpyHostToDevice); hipMemcpy(dev_stid,stid,LY*edges.size()*sizeof(int),hipMemcpyHostToDevice); }; BFSor::BFSor():L(PC+1,0),S(PC,0),NF(PC,0),Size(2,0) { }; __global__ void BFSfast(int *st,int *te,int *d,int* p,int *stid,int E,int N,int size,int round,int Leveloff,int numoff,int ye,int ly) { int i = threadIdx.x + blockIdx.x*blockDim.x; if(i>=size)return; int eid=(i%(E*ly)); int eeid=eid+Leveloff; int s=st[eeid],t=te[eeid]; if(t<0)return; int off=(i/(E*ly))*N+(eid/E)*N*ye+numoff; if(d[s+off]==round-1&&d[t+off]>round) { d[t+off]=round; //p[t+off]=stid[eeid]; } } __global__ void BFScolor(int *st,int *te,int *d,int* p,int *stid,int E,int N,int size,int round,int Leveloff,int numoff,int ye,int ly) { int i = threadIdx.x + blockIdx.x*blockDim.x; if(i>=size)return; int eid=(i%(E*ly)); int eeid=eid+Leveloff; int s=st[eeid],t=te[eeid]; if(t<0)return; int off=(i/(E*ly))*N+(eid/E)*N*ye+numoff; if(d[s+off]==d[t+off]-1) { //d[t+off]=round; p[t+off]=stid[eeid]; } } vector<vector<Rout>> BFSor::routalg(int s,int t,int bw) { //cout<<"blasting "<<endl; int kk=1; time_t start,end; start=clock(); int size=edges.size()*LY*YE; hipStream_t stream0; hipStreamCreate(&stream0); hipStream_t stream1; hipStreamCreate(&stream1); int leoff=edges.size()*L[1]; int nuoff=L[1]*S[0]*nodenum; for(int i=1;i<WD+1;i++) { hipLaunchKernelGGL(( BFSfast), dim3(Size[0]/512+1),dim3(512),0,stream0, dev_st,dev_te,dev_d,dev_p,dev_stid,edges.size(),nodenum,Size[0],i,0,0,S[0],L[1]); hipLaunchKernelGGL(( BFSfast), dim3(Size[1]/512+1),dim3(512),0,stream1, dev_st,dev_te,dev_d,dev_p,dev_stid,edges.size(),nodenum,Size[1],i,leoff,nuoff,S[1],L[2]); } hipLaunchKernelGGL(( BFScolor), dim3(Size[0]/512+1),dim3(512),0,stream0, dev_st,dev_te,dev_d,dev_p,dev_stid,edges.size(),nodenum,Size[0],0,0,0,S[0],L[1]); hipLaunchKernelGGL(( BFScolor), dim3(Size[1]/512+1),dim3(512),0,stream1, dev_st,dev_te,dev_d,dev_p,dev_stid,edges.size(),nodenum,Size[1],0,leoff,nuoff,S[1],L[2]); hipStreamSynchronize(stream1); hipStreamSynchronize(stream0); hipMemcpy(d,dev_d,ncount*nodenum*sizeof(int),hipMemcpyDeviceToHost); hipMemcpy(p,dev_p,ncount*nodenum*sizeof(int),hipMemcpyDeviceToHost); vector<vector<Rout>>result(2,vector<Rout>()); int offer=L[1]*nodenum*stps[0].size(); vector<int>LL(3,0); LL=L; LL[2]+=LL[1]; int count=0; for(int y=1;y<PC+1;y++) for(int k=LL[y-1];k<LL[y];k++) { for(int l=0;l<stps[y-1].size();l++) { int s=stps[y-1][l].s; vector<int>ters=stps[y-1][l].ters; int off=count*nodenum; for(int i=0;i<ters.size();i++) { int id=stps[y-1][l].mmpid[ters[i]]; int t=ters[i]; int ds=d[off+t]; if(ds>WD)continue; Rout S(s,t,id,ds,off,k,ds); result[y-1].push_back(S); } count++; } } end=clock(); //cout<<"GPU time is : "<<end-start<<endl; //cout<<"over!"<<endl; //hipFree(dev_te); //hipFree(dev_st); //hipFree(dev_d); //cout<<"before return"<<endl; return result; }; /*__global__ void BFSfast(int *st,int *te,int *d,int round,int E,int N,int size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if(i>size)return; int eid=(i%(E*LY)); int s=st[eid],t=te[eid]; int off=(i/(E*LY))*N+(eid/E)*N*YE; if(d[s+off]==round-1&&d[t+off]>round) d[t+off]=round; }*/
026c7bfd938275255a9bbc688bd62feaf26d52c9.cu
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include"pathalg.h" static const int WORK_SIZE =258; void BFSor::copydata(int s,vector<edge>&edges,int nodenum){ }; void BFSor::dellocate(){ }; void BFSor::allocate(int maxn,int maxedge){ } void BFSor::topsort() { }; __global__ void cleanb(int *d,int *p,int N) { int i = threadIdx.x + blockIdx.x*blockDim.x; if(i>=N)return; d[i]=100000; p[i]=-1; }; void BFSor::updatE(vector<vector<int>>&esigns) { int count=0; for(int k=0;k<LY;k++) for(int i=0;i<nodenum;i++) for(int j=0;j<nein[i].size();j++) { if(esigns[k][neie[i][j]]<0) te[count]=-1; else te[count]=nein[i][j]; count++; } cudaMemcpy(dev_te,te,LY*edges.size()*sizeof(int),cudaMemcpyHostToDevice); }; void BFSor::updatS(vector<vector<Sot>>&stpair) { L[0]=0; L[1]=LY1; L[2]=LY2; S[0]=stpair[0].size(); S[1]=stpair[1].size(); stps=stpair; int count=0; ncount=L[1]*S[0]+L[2]*S[1]; int bigN=ncount*nodenum; cleanb<<<bigN/512+1,512,0>>>(dev_d,dev_p,bigN); cudaMemcpy(d,dev_d,ncount*nodenum*sizeof(int),cudaMemcpyDeviceToHost); for(int k=0;k<L[1];k++) { for(int j=0;j<stpair[0].size();j++) { d[count*nodenum+stpair[0][j].s]=0; count++; } } for(int k=0;k<L[2];k++) { for(int j=0;j<stpair[1].size();j++) { d[count*nodenum+stpair[1][j].s]=0; count++; } } Size[0]=edges.size()*L[1]*S[0]; Size[1]=edges.size()*L[2]*S[1]; cudaMemcpy(dev_d,d,ncount*nodenum*sizeof(int),cudaMemcpyHostToDevice); } void BFSor::init(pair<vector<edge>,vector<vector<int>>>ext,vector<pair<int,int>>stpair,int _nodenum) { //cout<<"in paraller BFS init"<<endl; nodenum=_nodenum; edges=ext.first; vector<vector<int>>esigns; esigns=ext.second; stp=stpair; mark=new int; *mark=0; W=WD+1; st=new int[edges.size()*LY]; te=new int[edges.size()*LY]; stid=new int[edges.size()*LY]; d=new int[nodenum*LY*YE]; p=new int[nodenum*LY*YE]; esignes=new int[edges.size()*LY]; vector<vector<int>>ein(nodenum*LY,vector<int>()); neibn=ein; vector<vector<int>>eie(nodenum,vector<int>()); neie=eie; for(int i=0;i<edges.size();i++) { int s=edges[i].s; int t=edges[i].t; neibn[s].push_back(t); neie[s].push_back(i); } nein=neibn; int count=0; for(int k=0;k<LY;k++) for(int i=0;i<nodenum;i++) for(int j=0;j<neibn[i].size();j++) { st[count]=i; if(esigns[k][neie[i][j]]<0) te[count]=-1; else te[count]=neibn[i][j]; stid[count]=neie[i][j]; count++; } for(int i=0;i<nodenum*LY*YE;i++) d[i]=2*WD+1,p[i]=-1; cudaMalloc((void**)&dev_st,LY*edges.size()*sizeof(int)); cudaMalloc((void**)&dev_te,LY*edges.size()*sizeof(int)); cudaMalloc((void**)&dev_stid,LY*edges.size()*sizeof(int)); cudaMalloc((void**)&dev_d,YE*LY*nodenum*sizeof(int)); cudaMalloc((void**)&dev_p,YE*LY*nodenum*sizeof(int)); cudaMemcpy(dev_te,te,LY*edges.size()*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(dev_st,st,LY*edges.size()*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(dev_stid,stid,LY*edges.size()*sizeof(int),cudaMemcpyHostToDevice); }; BFSor::BFSor():L(PC+1,0),S(PC,0),NF(PC,0),Size(2,0) { }; __global__ void BFSfast(int *st,int *te,int *d,int* p,int *stid,int E,int N,int size,int round,int Leveloff,int numoff,int ye,int ly) { int i = threadIdx.x + blockIdx.x*blockDim.x; if(i>=size)return; int eid=(i%(E*ly)); int eeid=eid+Leveloff; int s=st[eeid],t=te[eeid]; if(t<0)return; int off=(i/(E*ly))*N+(eid/E)*N*ye+numoff; if(d[s+off]==round-1&&d[t+off]>round) { d[t+off]=round; //p[t+off]=stid[eeid]; } } __global__ void BFScolor(int *st,int *te,int *d,int* p,int *stid,int E,int N,int size,int round,int Leveloff,int numoff,int ye,int ly) { int i = threadIdx.x + blockIdx.x*blockDim.x; if(i>=size)return; int eid=(i%(E*ly)); int eeid=eid+Leveloff; int s=st[eeid],t=te[eeid]; if(t<0)return; int off=(i/(E*ly))*N+(eid/E)*N*ye+numoff; if(d[s+off]==d[t+off]-1) { //d[t+off]=round; p[t+off]=stid[eeid]; } } vector<vector<Rout>> BFSor::routalg(int s,int t,int bw) { //cout<<"blasting "<<endl; int kk=1; time_t start,end; start=clock(); int size=edges.size()*LY*YE; cudaStream_t stream0; cudaStreamCreate(&stream0); cudaStream_t stream1; cudaStreamCreate(&stream1); int leoff=edges.size()*L[1]; int nuoff=L[1]*S[0]*nodenum; for(int i=1;i<WD+1;i++) { BFSfast<<<Size[0]/512+1,512,0,stream0>>>(dev_st,dev_te,dev_d,dev_p,dev_stid,edges.size(),nodenum,Size[0],i,0,0,S[0],L[1]); BFSfast<<<Size[1]/512+1,512,0,stream1>>>(dev_st,dev_te,dev_d,dev_p,dev_stid,edges.size(),nodenum,Size[1],i,leoff,nuoff,S[1],L[2]); } BFScolor<<<Size[0]/512+1,512,0,stream0>>>(dev_st,dev_te,dev_d,dev_p,dev_stid,edges.size(),nodenum,Size[0],0,0,0,S[0],L[1]); BFScolor<<<Size[1]/512+1,512,0,stream1>>>(dev_st,dev_te,dev_d,dev_p,dev_stid,edges.size(),nodenum,Size[1],0,leoff,nuoff,S[1],L[2]); cudaStreamSynchronize(stream1); cudaStreamSynchronize(stream0); cudaMemcpy(d,dev_d,ncount*nodenum*sizeof(int),cudaMemcpyDeviceToHost); cudaMemcpy(p,dev_p,ncount*nodenum*sizeof(int),cudaMemcpyDeviceToHost); vector<vector<Rout>>result(2,vector<Rout>()); int offer=L[1]*nodenum*stps[0].size(); vector<int>LL(3,0); LL=L; LL[2]+=LL[1]; int count=0; for(int y=1;y<PC+1;y++) for(int k=LL[y-1];k<LL[y];k++) { for(int l=0;l<stps[y-1].size();l++) { int s=stps[y-1][l].s; vector<int>ters=stps[y-1][l].ters; int off=count*nodenum; for(int i=0;i<ters.size();i++) { int id=stps[y-1][l].mmpid[ters[i]]; int t=ters[i]; int ds=d[off+t]; if(ds>WD)continue; Rout S(s,t,id,ds,off,k,ds); result[y-1].push_back(S); } count++; } } end=clock(); //cout<<"GPU time is : "<<end-start<<endl; //cout<<"over!"<<endl; //cudaFree(dev_te); //cudaFree(dev_st); //cudaFree(dev_d); //cout<<"before return"<<endl; return result; }; /*__global__ void BFSfast(int *st,int *te,int *d,int round,int E,int N,int size) { int i = threadIdx.x + blockIdx.x*blockDim.x; if(i>size)return; int eid=(i%(E*LY)); int s=st[eid],t=te[eid]; int off=(i/(E*LY))*N+(eid/E)*N*YE; if(d[s+off]==round-1&&d[t+off]>round) d[t+off]=round; }*/