make sure KQ_pos is not reallocated in finetune
This commit is contained in:
parent
70e4a997ae
commit
1eb4de0f0d
1 changed files with 2 additions and 0 deletions
|
@ -787,6 +787,8 @@ static struct ggml_tensor * llama_build_lora_finetune_graphs(
|
|||
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, t36->grad, one));
|
||||
GGML_ASSERT(t36->grad->data == NULL && t36->grad->view_src == NULL);
|
||||
ggml_allocr_alloc(alloc, t36->grad);
|
||||
// KQ_pos
|
||||
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, KQ_pos, one));
|
||||
|
||||
// make sure base model tensors data cannot be used in viewable operations
|
||||
ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, model->tok_embeddings, one));
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue