diff --git a/ggml/src/gguf.cpp b/ggml/src/gguf.cpp index 11cc2bd43..6d518022d 100644 --- a/ggml/src/gguf.cpp +++ b/ggml/src/gguf.cpp @@ -407,7 +407,7 @@ struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_par } for (size_t j = 0; ok && j < ctx->kv.size(); ++j) { if (key == ctx->kv[j].key) { - fprintf(stderr, "%s: duplicate key '%s' for tensors %" PRIi64 " and %" PRIi64 " \n", __func__, key.c_str(), j, i); + fprintf(stderr, "%s: duplicate key '%s' for tensors %zu and %" PRIi64 " \n", __func__, key.c_str(), j, i); ok = false; } } diff --git a/src/llama-model-loader.cpp b/src/llama-model-loader.cpp index 33e4ae137..1c4e30878 100644 --- a/src/llama-model-loader.cpp +++ b/src/llama-model-loader.cpp @@ -554,7 +554,7 @@ llama_model_loader::llama_model_loader(const std::string & fname, bool use_mmap, const enum gguf_type type = gguf_get_kv_type(meta.get(), i); const std::string type_name = type == GGUF_TYPE_ARRAY - ? format("%s[%s,%d]", gguf_type_name(type), gguf_type_name(gguf_get_arr_type(meta.get(), i)), gguf_get_arr_n(meta.get(), i)) + ? format("%s[%s,%zu]", gguf_type_name(type), gguf_type_name(gguf_get_arr_type(meta.get(), i)), gguf_get_arr_n(meta.get(), i)) : gguf_type_name(type); std::string value = gguf_kv_to_str(meta.get(), i);