diff --git a/ggml-backend.h b/ggml-backend.h index c097cbcbe..f13c69bff 100644 --- a/ggml-backend.h +++ b/ggml-backend.h @@ -80,6 +80,7 @@ extern "C" { // // CPU backend // + GGML_API ggml_backend_t ggml_backend_cpu_init(void); GGML_API GGML_CALL bool ggml_backend_is_cpu (ggml_backend_t backend); diff --git a/llama.cpp b/llama.cpp index e1ac2600d..9876e5cee 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1948,7 +1948,7 @@ struct llama_context { std::vector buf_compute_meta; ggml_backend_sched_t sched = nullptr; - ggml_abort_callback abort_callback = nullptr; + ggml_abort_callback abort_callback = nullptr; void * abort_callback_data = nullptr; // input tensors