From 7e11fe088077501136d9861954407ef2a32aea54 Mon Sep 17 00:00:00 2001 From: Jared Van Bortel Date: Mon, 29 Jan 2024 12:52:54 -0500 Subject: [PATCH] kompute : remove llama_load_model_from_file_internal --- llama.cpp | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/llama.cpp b/llama.cpp index cd0122f8e..9631506c6 100644 --- a/llama.cpp +++ b/llama.cpp @@ -4146,6 +4146,7 @@ static int llama_model_load(const std::string & fname, llama_model & model, llam ) )) { // disable Vulkan due to unsupported model architecture or quantization type + // TODO(cebtenzzre): propagate this error outside of llama_load_model_from_file params.n_gpu_layers = 0; } #endif @@ -10118,11 +10119,9 @@ int64_t llama_time_us(void) { return ggml_time_us(); } -static struct llama_model * llama_load_model_from_file_internal( - const char * path_model, struct llama_model_params * params_p -) { - auto & params = *params_p; - +struct llama_model * llama_load_model_from_file( + const char * path_model, + struct llama_model_params params) { ggml_time_init(); llama_model * model = new llama_model; @@ -10159,10 +10158,6 @@ static struct llama_model * llama_load_model_from_file_internal( return model; } -struct llama_model * llama_load_model_from_file(const char * path_model, struct llama_model_params params) { - return llama_load_model_from_file_internal(path_model, ¶ms); -} - void llama_free_model(struct llama_model * model) { delete model; }