From d72a23e2f1b062abb30223495358b3569e78c7c9 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Wed, 16 Aug 2023 14:37:07 +0300 Subject: [PATCH] gguf : better type names --- ggml.c | 19 ++++++++++--------- gguf-llama.cpp | 2 +- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/ggml.c b/ggml.c index 77f57a3fd..4df1945c0 100644 --- a/ggml.c +++ b/ggml.c @@ -18584,17 +18584,18 @@ static const size_t GGUF_TYPE_SIZE[GGUF_TYPE_COUNT] = { static_assert(GGUF_TYPE_COUNT == 10, "GGUF_TYPE_COUNT != 10"); static const char * GGUF_TYPE_NAME[GGUF_TYPE_COUNT] = { - [GGUF_TYPE_UINT8] = "uint8", - [GGUF_TYPE_INT8] = "int8", - [GGUF_TYPE_UINT16] = "uint16", - [GGUF_TYPE_INT16] = "int16", - [GGUF_TYPE_UINT32] = "uint32", - [GGUF_TYPE_INT32] = "int32", - [GGUF_TYPE_FLOAT32] = "float32", + [GGUF_TYPE_UINT8] = "u8", + [GGUF_TYPE_INT8] = "i8", + [GGUF_TYPE_UINT16] = "u16", + [GGUF_TYPE_INT16] = "i16", + [GGUF_TYPE_UINT32] = "u32", + [GGUF_TYPE_INT32] = "i32", + [GGUF_TYPE_FLOAT32] = "f32", [GGUF_TYPE_BOOL] = "bool", - [GGUF_TYPE_STRING] = "string", - [GGUF_TYPE_ARRAY] = "array", + [GGUF_TYPE_STRING] = "str", + [GGUF_TYPE_ARRAY] = "arr", }; +static_assert(GGUF_TYPE_COUNT == 10, "GGUF_TYPE_COUNT != 10"); union gguf_value { uint8_t uint8; diff --git a/gguf-llama.cpp b/gguf-llama.cpp index d99d752ec..5007fe8a7 100644 --- a/gguf-llama.cpp +++ b/gguf-llama.cpp @@ -1047,7 +1047,7 @@ struct llama_model_loader { const char * name = gguf_get_tensor_name(ctx_gguf, i); struct ggml_tensor * meta = ggml_get_tensor(ctx_meta, name); - LLAMA_LOG_INFO("%s: - tensor %3d: %32s %-8s [ %s ]\n", __func__, i, name, ggml_type_name(meta->type), llama_format_tensor_shape(meta).c_str()); + LLAMA_LOG_INFO("%s: - tensor %4d: %32s %-8s [ %s ]\n", __func__, i, name, ggml_type_name(meta->type), llama_format_tensor_shape(meta).c_str()); } for (int i = 0; i < n_kv; i++) {