|
#include "acc.cuh" |
|
|
|
static __global__ void acc_f32(const float * x, const float * y, float * dst, const int ne, |
|
const int ne10, const int ne11, const int ne12, |
|
const int nb1, const int nb2, int offset) { |
|
const int i = blockDim.x * blockIdx.x + threadIdx.x; |
|
if (i >= ne) { |
|
return; |
|
} |
|
int src1_idx = i - offset; |
|
int oz = src1_idx / nb2; |
|
int oy = (src1_idx - (oz * nb2)) / nb1; |
|
int ox = src1_idx % nb1; |
|
if (src1_idx >= 0 && ox < ne10 && oy < ne11 && oz < ne12) { |
|
dst[i] = x[i] + y[ox + oy * ne10 + oz * ne10 * ne11]; |
|
} else { |
|
dst[i] = x[i]; |
|
} |
|
} |
|
|
|
static void acc_f32_cuda(const float * x, const float * y, float * dst, const int n_elements, |
|
const int ne10, const int ne11, const int ne12, |
|
const int nb1, const int nb2, const int offset, cudaStream_t stream) { |
|
int num_blocks = (n_elements + CUDA_ACC_BLOCK_SIZE - 1) / CUDA_ACC_BLOCK_SIZE; |
|
acc_f32<<<num_blocks, CUDA_ACC_BLOCK_SIZE, 0, stream>>>(x, y, dst, n_elements, ne10, ne11, ne12, nb1, nb2, offset); |
|
} |
|
|
|
void ggml_cuda_op_acc(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { |
|
const ggml_tensor * src0 = dst->src[0]; |
|
const ggml_tensor * src1 = dst->src[1]; |
|
const float * src0_d = (const float *)src0->data; |
|
const float * src1_d = (const float *)src1->data; |
|
float * dst_d = (float *)dst->data; |
|
cudaStream_t stream = ctx.stream(); |
|
|
|
GGML_ASSERT(src0->type == GGML_TYPE_F32); |
|
GGML_ASSERT(src1->type == GGML_TYPE_F32); |
|
GGML_ASSERT( dst->type == GGML_TYPE_F32); |
|
GGML_ASSERT(dst->ne[3] == 1); |
|
|
|
int nb1 = dst->op_params[0] / 4; |
|
int nb2 = dst->op_params[1] / 4; |
|
|
|
int offset = dst->op_params[3] / 4; |
|
|
|
acc_f32_cuda(src0_d, src1_d, dst_d, ggml_nelements(dst), src1->ne[0], src1->ne[1], src1->ne[2], nb1, nb2, offset, stream); |
|
} |
|
|