This commit is contained in:
jon-chuang 2023-04-27 00:41:12 +08:00
parent b80bc36ab0
commit 9eda98d14b

View file

@ -2017,7 +2017,8 @@ int llama_apply_lora_from_file_internal(struct llama_context * ctx, const char *
ggml_tensor * loraA = lora_tensors[base_name + ".loraA"];
ggml_tensor * loraB = lora_tensors[base_name + ".loraB"];
if (base_t->ne[0] != loraB->ne[1] || base_t->ne[1] != loraA->ne[1]) {
// base indim = loraA transposed indim, base outdim = loraB outdim
if (base_t->ne[0] != loraA->ne[1] || base_t->ne[1] != loraB->ne[1]) {
fprintf(stderr, "%s: incompatible tensor dimensions (outdims: %" PRId64 ", %" PRId64 ", indims: %" PRId64 ", %" PRId64 ");"
" are you sure that this adapter is for this model?\n", __func__, base_t->ne[1], loraB->ne[1], base_t->ne[0], loraA->ne[1]);
return 1;