From 81dabd8edd4c74abb668d6f8211e4f42cd7c62c0 Mon Sep 17 00:00:00 2001 From: Andrew Godfrey Date: Mon, 23 Oct 2023 19:28:05 -0700 Subject: [PATCH] Tweak an error message --- llama.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama.cpp b/llama.cpp index 61f30c398..9d26b2dd2 100644 --- a/llama.cpp +++ b/llama.cpp @@ -8605,7 +8605,7 @@ static int llama_apply_lora_from_file_internal( if (dest_t->backend == GGML_BACKEND_GPU || dest_t->backend == GGML_BACKEND_GPU_SPLIT) { if (dest_t->type != GGML_TYPE_F16) { throw std::runtime_error(format( - "%s: error: the simultaneous use of LoRAs and GPU acceleration is only supported for f16 models", __func__)); + "%s: error: the simultaneous use of LoRAs and GPU acceleration is only supported for f16 models. dest_t->type: %d", __func__, dest_t->type)); } offload_func = ggml_cuda_assign_buffers; offload_func_force_inplace = ggml_cuda_assign_buffers_force_inplace;