From 9eda98d14be77de1b898350b49af7cc1717a5e35 Mon Sep 17 00:00:00 2001 From: jon-chuang Date: Thu, 27 Apr 2023 00:41:12 +0800 Subject: [PATCH] fix --- llama.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/llama.cpp b/llama.cpp index 8c1385ad6..ef08e3afd 100644 --- a/llama.cpp +++ b/llama.cpp @@ -2017,7 +2017,8 @@ int llama_apply_lora_from_file_internal(struct llama_context * ctx, const char * ggml_tensor * loraA = lora_tensors[base_name + ".loraA"]; ggml_tensor * loraB = lora_tensors[base_name + ".loraB"]; - if (base_t->ne[0] != loraB->ne[1] || base_t->ne[1] != loraA->ne[1]) { + // base indim = loraA transposed indim, base outdim = loraB outdim + if (base_t->ne[0] != loraA->ne[1] || base_t->ne[1] != loraB->ne[1]) { fprintf(stderr, "%s: incompatible tensor dimensions (outdims: %" PRId64 ", %" PRId64 ", indims: %" PRId64 ", %" PRId64 ");" " are you sure that this adapter is for this model?\n", __func__, base_t->ne[1], loraB->ne[1], base_t->ne[0], loraA->ne[1]); return 1;