iris / llama.cpp /ggml /src /ggml-cuda /template-instances /fattn-vec-f16-instance-hs128-q4_1-q4_0.cu
Mat17892's picture
llamacpp
b664585 verified
raw
history blame contribute delete
179 Bytes
// This file has been autogenerated by generate_cu_files.py, do not edit manually.
#include "../fattn-vec-f16.cuh"
DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q4_0);