bug fix: make sure finetune input gradient is allocated at begin and kept until end

This commit is contained in:
xaedes 2023-08-18 20:10:04 +02:00
parent 63cb374a99
commit 6c98640035
No known key found for this signature in database
GPG key ID: 30030EDD817EA2B1

View file

@ -1224,6 +1224,10 @@ struct ggml_tensor * llama_build_lora_finetune_graphs(
// output tensors
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, t35, one));
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, t36, one));
// input gradient
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, t36->grad, one));
GGML_ASSERT(t36->grad->data == NULL && !ggml_is_view(t36->grad));
ggml_allocr_alloc(alloc, t36->grad);
// make sure base model tensors data cannot be used in viewable operations
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, model->tok_embeddings, one));