From de3d90dd3b3119f8b7d1189b61603452285b4c98 Mon Sep 17 00:00:00 2001 From: Michael Podvitskiy Date: Mon, 26 Feb 2024 14:59:54 +0100 Subject: [PATCH] format fix --- ggml-backend.h | 1 + llama.cpp | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/ggml-backend.h b/ggml-backend.h index c097cbcbe..f13c69bff 100644 --- a/ggml-backend.h +++ b/ggml-backend.h @@ -80,6 +80,7 @@ extern "C" { // // CPU backend // + GGML_API ggml_backend_t ggml_backend_cpu_init(void); GGML_API GGML_CALL bool ggml_backend_is_cpu (ggml_backend_t backend); diff --git a/llama.cpp b/llama.cpp index e1ac2600d..9876e5cee 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1948,7 +1948,7 @@ struct llama_context { std::vector buf_compute_meta; ggml_backend_sched_t sched = nullptr; - ggml_abort_callback abort_callback = nullptr; + ggml_abort_callback abort_callback = nullptr; void * abort_callback_data = nullptr; // input tensors