make sure KQ_pos is not reallocated in finetune

This commit is contained in:
xaedes 2023-09-29 16:28:25 +02:00
parent 70e4a997ae
commit 1eb4de0f0d
No known key found for this signature in database
GPG key ID: 30030EDD817EA2B1

View file

@ -787,6 +787,8 @@ static struct ggml_tensor * llama_build_lora_finetune_graphs(
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, t36->grad, one));
GGML_ASSERT(t36->grad->data == NULL && t36->grad->view_src == NULL);
ggml_allocr_alloc(alloc, t36->grad);
// KQ_pos
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, KQ_pos, one));
// make sure base model tensors data cannot be used in viewable operations
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, model->tok_embeddings, one));