sync : update graph copies to new ggml API
This commit is contained in:
parent
16e819d53c
commit
e2349ec13b
2 changed files with 2 additions and 2 deletions
|
@ -772,7 +772,7 @@ static struct ggml_tensor * llama_build_lora_finetune_graphs(
|
||||||
if (enable_checkpointing) {
|
if (enable_checkpointing) {
|
||||||
ggml_build_backward_gradient_checkpointing(ctx, gf, gb, gb_tmp, checkpoints.data(), (int) checkpoints.size());
|
ggml_build_backward_gradient_checkpointing(ctx, gf, gb, gb_tmp, checkpoints.data(), (int) checkpoints.size());
|
||||||
} else {
|
} else {
|
||||||
*gb = *gf;
|
ggml_graph_cpy(gf, gb);
|
||||||
ggml_build_backward_expand(ctx, gf, gb, true);
|
ggml_build_backward_expand(ctx, gf, gb, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -436,7 +436,7 @@ static struct ggml_tensor * llama_build_train_graphs(
|
||||||
if (enable_checkpointing) {
|
if (enable_checkpointing) {
|
||||||
ggml_build_backward_gradient_checkpointing(ctx, gf, gb, gb_tmp, checkpoints.data(), (int) checkpoints.size());
|
ggml_build_backward_gradient_checkpointing(ctx, gf, gb, gb_tmp, checkpoints.data(), (int) checkpoints.size());
|
||||||
} else {
|
} else {
|
||||||
*gb = *gf;
|
ggml_graph_cpy(gf, gb);
|
||||||
ggml_build_backward_expand(ctx, gf, gb, true);
|
ggml_build_backward_expand(ctx, gf, gb, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue