sync : ggml (backend v2) (#3912)
* sync : ggml (backend v2) (wip) * sync : migrate examples and llama.cpp to dynamic graphs (wip) * sync : update tests + fix max op params to 64 ggml-ci * sync : ggml-cuda ggml-ci * llama : fix save/load state context size ggml-ci * sync : try to fix build on tvOS * sync : pass custom graph sizes in training examples * sync : update graph copies to new ggml API * sync : update sync-ggml.sh with new files * scripts : fix header in sync script * train : fix context size calculations * llama : increase inference graph size up to 4096 nodes * train : allocate grads for backward graphs * train : allocate grads for gb_tmp
This commit is contained in:
parent
bb50a792ec
commit
4760e7cc0b
22 changed files with 1994 additions and 864 deletions
|
@ -231,9 +231,10 @@ static bool check_gradient(
|
|||
printf("GGML_N_THREADS = %d\n", n_threads);
|
||||
}
|
||||
|
||||
struct ggml_cgraph * gf = ggml_build_forward_ctx(ctx0, f);
|
||||
struct ggml_cgraph * gb = ggml_new_graph(ctx0);
|
||||
*gb = *gf;
|
||||
struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, GGML_DEFAULT_GRAPH_SIZE, true);
|
||||
struct ggml_cgraph * gb = ggml_new_graph_custom(ctx0, GGML_DEFAULT_GRAPH_SIZE, true);
|
||||
ggml_build_forward_expand(gf, f);
|
||||
ggml_graph_cpy(gf, gb);
|
||||
ggml_build_backward_expand(ctx0, gf, gb, false);
|
||||
|
||||
ggml_graph_compute_with_ctx(ctx0, gf, n_threads);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue