From 229cd05f0df3576ae18b38099239f4cad0bc68ac Mon Sep 17 00:00:00 2001 From: FirstTimeEZ <179362031+FirstTimeEZ@users.noreply.github.com> Date: Fri, 15 Nov 2024 00:58:53 +1300 Subject: [PATCH] removes the implicit cast --- src/llama.cpp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/llama.cpp b/src/llama.cpp index 40ef7c9f7..206f547ca 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -2907,10 +2907,10 @@ struct llama_model { // for quantize-stats only std::vector> tensors_by_name; - uint64_t n_bytes = 0; - uint64_t n_elements = 0; + int64_t n_elements = 0; + size_t n_bytes = 0; - int64_t t_load_us = 0; + int64_t t_load_us = 0; int64_t t_start_us = 0; // keep track of loaded lora adapters @@ -19958,11 +19958,11 @@ int32_t llama_model_desc(const struct llama_model * model, char * buf, size_t bu llama_model_ftype_name(model->ftype).c_str()); } -uint64_t llama_model_size(const struct llama_model *model) { +size_t llama_model_size(const struct llama_model *model) { return model->n_bytes; } -uint64_t llama_model_n_params(const struct llama_model *model) { +int64_t llama_model_n_params(const struct llama_model *model) { return model->n_elements; }