diff --git a/llama.cpp b/llama.cpp index ef08e3afd..8532a7ba7 100644 --- a/llama.cpp +++ b/llama.cpp @@ -2019,7 +2019,7 @@ int llama_apply_lora_from_file_internal(struct llama_context * ctx, const char * // base indim = loraA transposed indim, base outdim = loraB outdim if (base_t->ne[0] != loraA->ne[1] || base_t->ne[1] != loraB->ne[1]) { - fprintf(stderr, "%s: incompatible tensor dimensions (outdims: %" PRId64 ", %" PRId64 ", indims: %" PRId64 ", %" PRId64 ");" + fprintf(stderr, "%s: incompatible tensor dimensions (outdim: %" PRId64 ", %" PRId64 ", indim: %" PRId64 ", %" PRId64 ");" " are you sure that this adapter is for this model?\n", __func__, base_t->ne[1], loraB->ne[1], base_t->ne[0], loraA->ne[1]); return 1; }