diff --git a/convert_lora_to_gguf.py b/convert_lora_to_gguf.py index 83415ba8e..8b381d2b4 100755 --- a/convert_lora_to_gguf.py +++ b/convert_lora_to_gguf.py @@ -433,8 +433,8 @@ if __name__ == '__main__': assert isinstance(dest_data, LoraTorchTensor) lora_a, lora_b = dest_data.get_lora_A_B() - # token_embd A and B are already transposed by mergekit-extract-lora - # we transpose A back again because it is used by llm_build_inp_embd() + # note: mergekit-extract-lora flip and transpose A and B + # here we only need to transpose token_embd.lora_a, see llm_build_inp_embd() if "token_embd.weight" in dest_name: lora_a = lora_a.T diff --git a/src/llama-adapter.cpp b/src/llama-adapter.cpp index f5a6c24ec..c8aaa39a6 100644 --- a/src/llama-adapter.cpp +++ b/src/llama-adapter.cpp @@ -270,7 +270,7 @@ static void llama_lora_adapter_init_impl(struct llama_model & model, const char struct ggml_context * dev_ctx = ctx_for_buft(ggml_backend_buffer_get_type(model_tensor->buffer)); // validate tensor shape if (is_token_embd) { - // expect B to be transposed, see llm_build_inp_embd() + // expect B to be non-transposed, A and B are flipped; see llm_build_inp_embd() if (model_tensor->ne[0] != w.b->ne[1] || model_tensor->ne[1] != w.a->ne[1]) { throw std::runtime_error("tensor '" + name + "' has incorrect shape"); }