From 1eb4de0f0d3d3e95be06f7a20c8b40799ec53de6 Mon Sep 17 00:00:00 2001 From: xaedes Date: Fri, 29 Sep 2023 16:28:25 +0200 Subject: [PATCH] make sure KQ_pos is not reallocated in finetune --- examples/finetune/finetune.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/examples/finetune/finetune.cpp b/examples/finetune/finetune.cpp index 23ff1c10f..8ca1874da 100644 --- a/examples/finetune/finetune.cpp +++ b/examples/finetune/finetune.cpp @@ -787,6 +787,8 @@ static struct ggml_tensor * llama_build_lora_finetune_graphs( ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, t36->grad, one)); GGML_ASSERT(t36->grad->data == NULL && t36->grad->view_src == NULL); ggml_allocr_alloc(alloc, t36->grad); + // KQ_pos + ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, KQ_pos, one)); // make sure base model tensors data cannot be used in viewable operations ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, model->tok_embeddings, one));