From bead7d47fbe90bd333e5d911beae55b25aba3ec8 Mon Sep 17 00:00:00 2001 From: Max Krasnyansky Date: Tue, 27 Aug 2024 22:33:03 -0700 Subject: [PATCH] threadpool: minor indent fixes --- ggml/src/ggml-backend.c | 8 ++++---- src/llama.cpp | 6 +++--- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/ggml/src/ggml-backend.c b/ggml/src/ggml-backend.c index 99ec15a0f..04c7feb89 100644 --- a/ggml/src/ggml-backend.c +++ b/ggml/src/ggml-backend.c @@ -722,11 +722,11 @@ ggml_backend_buffer_type_t ggml_backend_cpu_hbm_buffer_type(void) { #endif struct ggml_backend_cpu_context { - int n_threads; - ggml_threadpool_t threadpool; + int n_threads; + ggml_threadpool_t threadpool; - void * work_data; - size_t work_size; + void * work_data; + size_t work_size; ggml_abort_callback abort_callback; void * abort_callback_data; diff --git a/src/llama.cpp b/src/llama.cpp index 9bf6e22af..fe1942c5d 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -15497,9 +15497,9 @@ static void llama_output_reorder(struct llama_context * ctx) { } static void llama_graph_compute( - llama_context & lctx, - ggml_cgraph * gf, - int n_threads, + llama_context & lctx, + ggml_cgraph * gf, + int n_threads, ggml_threadpool * threadpool) { #ifdef GGML_USE_METAL if (ggml_backend_is_metal(lctx.backend_metal)) {