From 970f1515c9c5975a11b8da333c9f09de262b8a9c Mon Sep 17 00:00:00 2001 From: FirstTimeEZ <179362031+FirstTimeEZ@users.noreply.github.com> Date: Fri, 15 Nov 2024 01:23:17 +1300 Subject: [PATCH] total size of tensors is size_t total size of tensors is size_t total number of parameters is int64_t --- include/llama.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/llama.h b/include/llama.h index 5e742642e..0fe725108 100644 --- a/include/llama.h +++ b/include/llama.h @@ -469,10 +469,10 @@ extern "C" { LLAMA_API int32_t llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size); // Returns the total size of all the tensors in the model in bytes - LLAMA_API uint64_t llama_model_size(const struct llama_model * model); + LLAMA_API size_t llama_model_size(const struct llama_model * model); // Returns the total number of parameters in the model - LLAMA_API uint64_t llama_model_n_params(const struct llama_model * model); + LLAMA_API int64_t llama_model_n_params(const struct llama_model * model); // Get a llama model tensor LLAMA_API struct ggml_tensor * llama_get_model_tensor(struct llama_model * model, const char * name);