From 15176afd7da6f45ad011dc2ce572b5441bd629c6 Mon Sep 17 00:00:00 2001 From: FirstTimeEZ <179362031+FirstTimeEZ@users.noreply.github.com> Date: Sat, 16 Nov 2024 13:33:40 +1300 Subject: [PATCH] llama_model_size is explicity sized as uint64_t this is an implicit cast --- include/llama.h | 2 +- src/llama.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/include/llama.h b/include/llama.h index 02d271352..5e742642e 100644 --- a/include/llama.h +++ b/include/llama.h @@ -469,7 +469,7 @@ extern "C" { LLAMA_API int32_t llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size); // Returns the total size of all the tensors in the model in bytes - LLAMA_API size_t llama_model_size(const struct llama_model * model); + LLAMA_API uint64_t llama_model_size(const struct llama_model * model); // Returns the total number of parameters in the model LLAMA_API uint64_t llama_model_n_params(const struct llama_model * model); diff --git a/src/llama.cpp b/src/llama.cpp index 40562a339..dc5dfba0c 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -19965,7 +19965,7 @@ int32_t llama_model_desc(const struct llama_model * model, char * buf, size_t bu llama_model_ftype_name(model->ftype).c_str()); } -size_t llama_model_size(const struct llama_model * model) { +uint64_t llama_model_size(const struct llama_model * model) { return model->n_bytes; }