From b88957e519c3bdd5cf231522d41163b72e5d5b12 Mon Sep 17 00:00:00 2001 From: slaren Date: Thu, 6 Jun 2024 00:35:55 +0200 Subject: [PATCH] rename GGML_USE_OPENBLAS to GGML_USE_BLAS --- CMakeLists.txt | 2 +- Makefile | 6 +++--- ggml-blas.c | 5 +---- ggml.c | 2 +- llama.cpp | 8 ++++---- 5 files changed, 10 insertions(+), 13 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 2933e7148..e4eaed070 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -374,7 +374,7 @@ if (LLAMA_BLAS) add_compile_options(${BLAS_LINKER_FLAGS}) - add_compile_definitions(GGML_USE_OPENBLAS) + add_compile_definitions(GGML_USE_BLAS) if (${BLAS_INCLUDE_DIRS} MATCHES "mkl" AND (${LLAMA_BLAS_VENDOR} MATCHES "Generic" OR ${LLAMA_BLAS_VENDOR} MATCHES "Intel")) add_compile_definitions(GGML_BLAS_USE_MKL) diff --git a/Makefile b/Makefile index d45b2759b..59dd85336 100644 --- a/Makefile +++ b/Makefile @@ -419,21 +419,21 @@ ifndef LLAMA_NO_OPENMP endif # LLAMA_NO_OPENMP ifdef LLAMA_OPENBLAS - MK_CPPFLAGS += -DGGML_USE_OPENBLAS $(shell pkg-config --cflags-only-I openblas) + MK_CPPFLAGS += -DGGML_USE_BLAS $(shell pkg-config --cflags-only-I openblas) MK_CFLAGS += $(shell pkg-config --cflags-only-other openblas) MK_LDFLAGS += $(shell pkg-config --libs openblas) OBJS += ggml-blas.o endif # LLAMA_OPENBLAS ifdef LLAMA_OPENBLAS64 - MK_CPPFLAGS += -DGGML_USE_OPENBLAS $(shell pkg-config --cflags-only-I openblas64) + MK_CPPFLAGS += -DGGML_USE_BLAS $(shell pkg-config --cflags-only-I openblas64) MK_CFLAGS += $(shell pkg-config --cflags-only-other openblas64) MK_LDFLAGS += $(shell pkg-config --libs openblas64) OBJS += ggml-blas.o endif # LLAMA_OPENBLAS64 ifdef LLAMA_BLIS - MK_CPPFLAGS += -DGGML_USE_OPENBLAS -I/usr/local/include/blis -I/usr/include/blis + MK_CPPFLAGS += -DGGML_USE_BLAS -I/usr/local/include/blis -I/usr/include/blis MK_LDFLAGS += -lblis -L/usr/local/lib OBJS += ggml-blas.o endif # LLAMA_BLIS diff --git a/ggml-blas.c b/ggml-blas.c index dab6fcf47..6d527c041 100644 --- a/ggml-blas.c +++ b/ggml-blas.c @@ -5,7 +5,7 @@ #if defined(GGML_USE_ACCELERATE) # include -#elif defined(GGML_USE_OPENBLAS) +#elif defined(GGML_USE_BLAS) # if defined(GGML_BLAS_USE_MKL) # include # else @@ -25,9 +25,6 @@ static bool ggml_compute_forward_mul_mat_use_blas(const struct ggml_tensor * dst const struct ggml_tensor * src0 = dst->src[0]; const struct ggml_tensor * src1 = dst->src[1]; - //const int64_t ne00 = src0->ne[0]; - //const int64_t ne01 = src0->ne[1]; - const int64_t ne10 = src1->ne[0]; const int64_t ne0 = dst->ne[0]; diff --git a/ggml.c b/ggml.c index 0724b3b49..e4ef34f25 100644 --- a/ggml.c +++ b/ggml.c @@ -22645,7 +22645,7 @@ int ggml_cpu_has_wasm_simd(void) { } int ggml_cpu_has_blas(void) { -#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) || defined(GGML_USE_CUDA) || defined(GGML_USE_VULKAN) || defined(GGML_USE_SYCL) +#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_BLAS) || defined(GGML_USE_CUDA) || defined(GGML_USE_VULKAN) || defined(GGML_USE_SYCL) return 1; #else return 0; diff --git a/llama.cpp b/llama.cpp index 8208786be..57d007f33 100644 --- a/llama.cpp +++ b/llama.cpp @@ -21,7 +21,7 @@ # include "ggml-kompute.h" #endif -#if defined(GGML_USE_OPENBLAS) || defined(GGML_USE_ACCELERATE) +#if defined(GGML_USE_BLAS) || defined(GGML_USE_ACCELERATE) # include "ggml-blas.h" #endif @@ -2303,7 +2303,7 @@ struct llama_context { #ifdef GGML_USE_METAL ggml_backend_t backend_metal = nullptr; #endif -#if defined(GGML_USE_OPENBLAS) || defined(GGML_USE_ACCELERATE) +#if defined(GGML_USE_BLAS) || defined(GGML_USE_ACCELERATE) ggml_backend_t backend_blas = nullptr; #endif ggml_backend_t backend_cpu = nullptr; @@ -12025,7 +12025,7 @@ static void llama_graph_compute( ggml_backend_cpu_set_n_threads(lctx.backend_cpu, n_threads); ggml_backend_cpu_set_abort_callback(lctx.backend_cpu, lctx.abort_callback, lctx.abort_callback_data); } -#if defined(GGML_USE_OPENBLAS) || defined(GGML_USE_ACCELERATE) +#if defined(GGML_USE_BLAS) || defined(GGML_USE_ACCELERATE) if (lctx.backend_blas != nullptr) { ggml_backend_blas_set_n_threads(lctx.backend_blas, n_threads); } @@ -16240,7 +16240,7 @@ struct llama_context * llama_new_context_with_model( } #endif -#if defined(GGML_USE_OPENBLAS) || defined(GGML_USE_ACCELERATE) +#if defined(GGML_USE_BLAS) || defined(GGML_USE_ACCELERATE) ctx->backend_blas = ggml_backend_blas_init(); if (ctx->backend_blas == nullptr) { LLAMA_LOG_WARN("%s: failed to initialize BLAS backend\n", __func__);