From a1f82956f76e61f7abae9776af8281db1bf1f426 Mon Sep 17 00:00:00 2001 From: Xuan Son Nguyen Date: Wed, 8 Jan 2025 15:14:19 +0100 Subject: [PATCH] add some hints --- src/llama-adapter.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/llama-adapter.cpp b/src/llama-adapter.cpp index c8aaa39a6..d4879b778 100644 --- a/src/llama-adapter.cpp +++ b/src/llama-adapter.cpp @@ -264,7 +264,7 @@ static void llama_lora_adapter_init_impl(struct llama_model & model, const char // device buft and device ctx auto * model_tensor = llama_model_get_tensor(model, name.c_str()); if (!model_tensor) { - throw std::runtime_error("LoRA tensor '" + name + "' does not exist in base model"); + throw std::runtime_error("LoRA tensor '" + name + "' does not exist in base model (hint: maybe wrong base model?)"); } struct ggml_context * dev_ctx = ctx_for_buft(ggml_backend_buffer_get_type(model_tensor->buffer)); @@ -272,11 +272,11 @@ static void llama_lora_adapter_init_impl(struct llama_model & model, const char if (is_token_embd) { // expect B to be non-transposed, A and B are flipped; see llm_build_inp_embd() if (model_tensor->ne[0] != w.b->ne[1] || model_tensor->ne[1] != w.a->ne[1]) { - throw std::runtime_error("tensor '" + name + "' has incorrect shape"); + throw std::runtime_error("tensor '" + name + "' has incorrect shape (hint: maybe wrong base model?)"); } } else { if (model_tensor->ne[0] != w.a->ne[0] || model_tensor->ne[1] != w.b->ne[1]) { - throw std::runtime_error("tensor '" + name + "' has incorrect shape"); + throw std::runtime_error("tensor '" + name + "' has incorrect shape (hint: maybe wrong base model?)"); } if (w.a->ne[1] != w.b->ne[0]) { throw std::runtime_error("lora_a tensor is not transposed (hint: adapter from \"finetune\" example is no longer supported)");