From 6c9864003576d86cddbda8d938fe3c7d9622ff0a Mon Sep 17 00:00:00 2001 From: xaedes Date: Fri, 18 Aug 2023 20:10:04 +0200 Subject: [PATCH] bug fix: make sure finetune input gradient is allocated at begin and kept until end --- examples/finetune/finetune.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/examples/finetune/finetune.cpp b/examples/finetune/finetune.cpp index 375c8e4ba..23d9b2bfd 100644 --- a/examples/finetune/finetune.cpp +++ b/examples/finetune/finetune.cpp @@ -1224,6 +1224,10 @@ struct ggml_tensor * llama_build_lora_finetune_graphs( // output tensors ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, t35, one)); ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, t36, one)); + // input gradient + ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, t36->grad, one)); + GGML_ASSERT(t36->grad->data == NULL && !ggml_is_view(t36->grad)); + ggml_allocr_alloc(alloc, t36->grad); // make sure base model tensors data cannot be used in viewable operations ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, model->tok_embeddings, one));