rename GGML_USE_OPENBLAS to GGML_USE_BLAS

This commit is contained in:
slaren 2024-06-06 00:35:55 +02:00
parent 7f58793c56
commit b88957e519
5 changed files with 10 additions and 13 deletions

View file

@ -374,7 +374,7 @@ if (LLAMA_BLAS)
add_compile_options(${BLAS_LINKER_FLAGS})
add_compile_definitions(GGML_USE_OPENBLAS)
add_compile_definitions(GGML_USE_BLAS)
if (${BLAS_INCLUDE_DIRS} MATCHES "mkl" AND (${LLAMA_BLAS_VENDOR} MATCHES "Generic" OR ${LLAMA_BLAS_VENDOR} MATCHES "Intel"))
add_compile_definitions(GGML_BLAS_USE_MKL)

View file

@ -419,21 +419,21 @@ ifndef LLAMA_NO_OPENMP
endif # LLAMA_NO_OPENMP
ifdef LLAMA_OPENBLAS
MK_CPPFLAGS += -DGGML_USE_OPENBLAS $(shell pkg-config --cflags-only-I openblas)
MK_CPPFLAGS += -DGGML_USE_BLAS $(shell pkg-config --cflags-only-I openblas)
MK_CFLAGS += $(shell pkg-config --cflags-only-other openblas)
MK_LDFLAGS += $(shell pkg-config --libs openblas)
OBJS += ggml-blas.o
endif # LLAMA_OPENBLAS
ifdef LLAMA_OPENBLAS64
MK_CPPFLAGS += -DGGML_USE_OPENBLAS $(shell pkg-config --cflags-only-I openblas64)
MK_CPPFLAGS += -DGGML_USE_BLAS $(shell pkg-config --cflags-only-I openblas64)
MK_CFLAGS += $(shell pkg-config --cflags-only-other openblas64)
MK_LDFLAGS += $(shell pkg-config --libs openblas64)
OBJS += ggml-blas.o
endif # LLAMA_OPENBLAS64
ifdef LLAMA_BLIS
MK_CPPFLAGS += -DGGML_USE_OPENBLAS -I/usr/local/include/blis -I/usr/include/blis
MK_CPPFLAGS += -DGGML_USE_BLAS -I/usr/local/include/blis -I/usr/include/blis
MK_LDFLAGS += -lblis -L/usr/local/lib
OBJS += ggml-blas.o
endif # LLAMA_BLIS

View file

@ -5,7 +5,7 @@
#if defined(GGML_USE_ACCELERATE)
# include <Accelerate/Accelerate.h>
#elif defined(GGML_USE_OPENBLAS)
#elif defined(GGML_USE_BLAS)
# if defined(GGML_BLAS_USE_MKL)
# include <mkl.h>
# else
@ -25,9 +25,6 @@ static bool ggml_compute_forward_mul_mat_use_blas(const struct ggml_tensor * dst
const struct ggml_tensor * src0 = dst->src[0];
const struct ggml_tensor * src1 = dst->src[1];
//const int64_t ne00 = src0->ne[0];
//const int64_t ne01 = src0->ne[1];
const int64_t ne10 = src1->ne[0];
const int64_t ne0 = dst->ne[0];

2
ggml.c
View file

@ -22645,7 +22645,7 @@ int ggml_cpu_has_wasm_simd(void) {
}
int ggml_cpu_has_blas(void) {
#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) || defined(GGML_USE_CUDA) || defined(GGML_USE_VULKAN) || defined(GGML_USE_SYCL)
#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_BLAS) || defined(GGML_USE_CUDA) || defined(GGML_USE_VULKAN) || defined(GGML_USE_SYCL)
return 1;
#else
return 0;

View file

@ -21,7 +21,7 @@
# include "ggml-kompute.h"
#endif
#if defined(GGML_USE_OPENBLAS) || defined(GGML_USE_ACCELERATE)
#if defined(GGML_USE_BLAS) || defined(GGML_USE_ACCELERATE)
# include "ggml-blas.h"
#endif
@ -2303,7 +2303,7 @@ struct llama_context {
#ifdef GGML_USE_METAL
ggml_backend_t backend_metal = nullptr;
#endif
#if defined(GGML_USE_OPENBLAS) || defined(GGML_USE_ACCELERATE)
#if defined(GGML_USE_BLAS) || defined(GGML_USE_ACCELERATE)
ggml_backend_t backend_blas = nullptr;
#endif
ggml_backend_t backend_cpu = nullptr;
@ -12025,7 +12025,7 @@ static void llama_graph_compute(
ggml_backend_cpu_set_n_threads(lctx.backend_cpu, n_threads);
ggml_backend_cpu_set_abort_callback(lctx.backend_cpu, lctx.abort_callback, lctx.abort_callback_data);
}
#if defined(GGML_USE_OPENBLAS) || defined(GGML_USE_ACCELERATE)
#if defined(GGML_USE_BLAS) || defined(GGML_USE_ACCELERATE)
if (lctx.backend_blas != nullptr) {
ggml_backend_blas_set_n_threads(lctx.backend_blas, n_threads);
}
@ -16240,7 +16240,7 @@ struct llama_context * llama_new_context_with_model(
}
#endif
#if defined(GGML_USE_OPENBLAS) || defined(GGML_USE_ACCELERATE)
#if defined(GGML_USE_BLAS) || defined(GGML_USE_ACCELERATE)
ctx->backend_blas = ggml_backend_blas_init();
if (ctx->backend_blas == nullptr) {
LLAMA_LOG_WARN("%s: failed to initialize BLAS backend\n", __func__);