|
#include "arg.h" |
|
#include "common.h" |
|
#include "log.h" |
|
#include "llama.h" |
|
#include "ggml.h" |
|
|
|
#include <cstdio> |
|
#include <string> |
|
#include <vector> |
|
|
|
|
|
|
|
|
|
|
|
struct callback_data { |
|
std::vector<uint8_t> data; |
|
}; |
|
|
|
static std::string ggml_ne_string(const ggml_tensor * t) { |
|
std::string str; |
|
for (int i = 0; i < GGML_MAX_DIMS; ++i) { |
|
str += std::to_string(t->ne[i]); |
|
if (i + 1 < GGML_MAX_DIMS) { |
|
str += ", "; |
|
} |
|
} |
|
return str; |
|
} |
|
|
|
static void ggml_print_tensor(uint8_t * data, ggml_type type, const int64_t * ne, const size_t * nb, int64_t n) { |
|
GGML_ASSERT(n > 0); |
|
float sum = 0; |
|
for (int64_t i3 = 0; i3 < ne[3]; i3++) { |
|
LOG(" [\n"); |
|
for (int64_t i2 = 0; i2 < ne[2]; i2++) { |
|
if (i2 == n && ne[2] > 2*n) { |
|
LOG(" ..., \n"); |
|
i2 = ne[2] - n; |
|
} |
|
LOG(" [\n"); |
|
for (int64_t i1 = 0; i1 < ne[1]; i1++) { |
|
if (i1 == n && ne[1] > 2*n) { |
|
LOG(" ..., \n"); |
|
i1 = ne[1] - n; |
|
} |
|
LOG(" ["); |
|
for (int64_t i0 = 0; i0 < ne[0]; i0++) { |
|
if (i0 == n && ne[0] > 2*n) { |
|
LOG("..., "); |
|
i0 = ne[0] - n; |
|
} |
|
size_t i = i3 * nb[3] + i2 * nb[2] + i1 * nb[1] + i0 * nb[0]; |
|
float v; |
|
if (type == GGML_TYPE_F16) { |
|
v = ggml_fp16_to_fp32(*(ggml_fp16_t *) &data[i]); |
|
} else if (type == GGML_TYPE_F32) { |
|
v = *(float *) &data[i]; |
|
} else if (type == GGML_TYPE_I32) { |
|
v = (float) *(int32_t *) &data[i]; |
|
} else if (type == GGML_TYPE_I16) { |
|
v = (float) *(int16_t *) &data[i]; |
|
} else if (type == GGML_TYPE_I8) { |
|
v = (float) *(int8_t *) &data[i]; |
|
} else { |
|
GGML_ABORT("fatal error"); |
|
} |
|
LOG("%12.4f", v); |
|
sum += v; |
|
if (i0 < ne[0] - 1) LOG(", "); |
|
} |
|
LOG("],\n"); |
|
} |
|
LOG(" ],\n"); |
|
} |
|
LOG(" ]\n"); |
|
LOG(" sum = %f\n", sum); |
|
} |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static bool ggml_debug(struct ggml_tensor * t, bool ask, void * user_data) { |
|
auto * cb_data = (callback_data *) user_data; |
|
|
|
const struct ggml_tensor * src0 = t->src[0]; |
|
const struct ggml_tensor * src1 = t->src[1]; |
|
|
|
if (ask) { |
|
return true; |
|
} |
|
|
|
char src1_str[128] = {0}; |
|
if (src1) { |
|
snprintf(src1_str, sizeof(src1_str), "%s{%s}", src1->name, ggml_ne_string(src1).c_str()); |
|
} |
|
|
|
LOG("%s: %24s = (%s) %10s(%s{%s}, %s}) = {%s}\n", __func__, |
|
t->name, ggml_type_name(t->type), ggml_op_desc(t), |
|
src0->name, ggml_ne_string(src0).c_str(), |
|
src1 ? src1_str : "", |
|
ggml_ne_string(t).c_str()); |
|
|
|
|
|
|
|
const bool is_host = ggml_backend_buffer_is_host(t->buffer); |
|
|
|
if (!is_host) { |
|
auto n_bytes = ggml_nbytes(t); |
|
cb_data->data.resize(n_bytes); |
|
ggml_backend_tensor_get(t, cb_data->data.data(), 0, n_bytes); |
|
} |
|
|
|
if (!ggml_is_quantized(t->type)) { |
|
uint8_t * data = is_host ? (uint8_t *) t->data : cb_data->data.data(); |
|
ggml_print_tensor(data, t->type, t->ne, t->nb, 3); |
|
} |
|
|
|
return true; |
|
} |
|
|
|
static bool run(llama_context * ctx, const common_params & params) { |
|
const llama_model * model = llama_get_model(ctx); |
|
const llama_vocab * vocab = llama_model_get_vocab(model); |
|
|
|
const bool add_bos = llama_vocab_get_add_bos(vocab); |
|
|
|
std::vector<llama_token> tokens = common_tokenize(ctx, params.prompt, add_bos); |
|
|
|
if (llama_decode(ctx, llama_batch_get_one(tokens.data(), tokens.size()))) { |
|
LOG_ERR("%s : failed to eval\n", __func__); |
|
return false; |
|
} |
|
|
|
return true; |
|
} |
|
|
|
int main(int argc, char ** argv) { |
|
callback_data cb_data; |
|
|
|
common_params params; |
|
|
|
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_COMMON)) { |
|
return 1; |
|
} |
|
|
|
common_init(); |
|
|
|
llama_backend_init(); |
|
llama_numa_init(params.numa); |
|
|
|
|
|
|
|
params.cb_eval = ggml_debug; |
|
params.cb_eval_user_data = &cb_data; |
|
params.warmup = false; |
|
|
|
|
|
common_init_result llama_init = common_init_from_params(params); |
|
|
|
llama_model * model = llama_init.model.get(); |
|
llama_context * ctx = llama_init.context.get(); |
|
|
|
if (model == nullptr || ctx == nullptr) { |
|
LOG_ERR("%s : failed to init\n", __func__); |
|
return 1; |
|
} |
|
|
|
|
|
{ |
|
LOG_INF("\n"); |
|
LOG_INF("%s\n", common_params_get_system_info(params).c_str()); |
|
LOG_INF("\n"); |
|
} |
|
|
|
bool OK = run(ctx, params); |
|
if (!OK) { |
|
return 1; |
|
} |
|
|
|
LOG("\n"); |
|
llama_perf_context_print(ctx); |
|
|
|
llama_backend_free(); |
|
|
|
return 0; |
|
} |
|
|