From eeb20c083dc163722fcf89a3ebd667499a7abcdd Mon Sep 17 00:00:00 2001 From: Kunshang Ji Date: Thu, 31 Aug 2023 01:39:15 +0000 Subject: [PATCH] add cpu hbm support --- CMakeLists.txt | 8 ++++++++ ggml.c | 15 ++++++++++++++- llama.cpp | 14 ++++++++++++-- 3 files changed, 34 insertions(+), 3 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index d4fa5c261..f8cee71c8 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -557,6 +557,11 @@ endif() # ggml +if (GGML_USE_CPU_HBM) + add_definitions(-DGGML_USE_CPU_HBM) + find_library(memkind memkind REQUIRED) +endif() + add_library(ggml OBJECT ggml.c ggml.h @@ -572,6 +577,9 @@ add_library(ggml OBJECT target_include_directories(ggml PUBLIC . ${LLAMA_EXTRA_INCLUDES}) target_compile_features(ggml PUBLIC c_std_11) # don't bump target_link_libraries(ggml PUBLIC Threads::Threads ${LLAMA_EXTRA_LIBS}) +if (GGML_USE_CPU_HBM) + target_link_libraries(ggml PUBLIC memkind) +endif() add_library(ggml_static STATIC $) if (BUILD_SHARED_LIBS) diff --git a/ggml.c b/ggml.c index 8a677ab2a..04f938db3 100644 --- a/ggml.c +++ b/ggml.c @@ -103,6 +103,9 @@ typedef void * thread_ret_t; #include #include +#endif +#ifdef GGML_USE_CPU_HBM +#include #endif // __FMA__ and __F16C__ are not defined in MSVC, however they are implied with AVX2/AVX512 @@ -193,7 +196,13 @@ typedef void * thread_ret_t; #else inline static void * ggml_aligned_malloc(size_t size) { void * aligned_memory = NULL; -#ifdef GGML_USE_METAL +#ifdef GGML_USE_CPU_HBM + if (size == 0) { + GGML_PRINT("WARNING: Behavior may be unexpected when allocate 0 byte for hbw_posix_memalign!"); + return NULL; + } + int result = hbw_posix_memalign(&aligned_memory, 16, size); +#elif GGML_USE_METAL int result = posix_memalign(&aligned_memory, sysconf(_SC_PAGESIZE), size); #else int result = posix_memalign(&aligned_memory, GGML_MEM_ALIGN, size); @@ -215,8 +224,12 @@ inline static void * ggml_aligned_malloc(size_t size) { return aligned_memory; } #define GGML_ALIGNED_MALLOC(size) ggml_aligned_malloc(size) +#ifdef GGML_USE_CPU_HBM +#define GGML_ALIGNED_FREE(ptr) if(NULL != ptr) hbw_free(ptr) +#else #define GGML_ALIGNED_FREE(ptr) free(ptr) #endif +#endif #define UNUSED GGML_UNUSED #define SWAP(x, y, T) do { T SWAP = x; x = y; y = SWAP; } while (0) diff --git a/llama.cpp b/llama.cpp index 208dcef0e..9f59092fc 100644 --- a/llama.cpp +++ b/llama.cpp @@ -126,6 +126,9 @@ void replace_all(std::string & s, const std::string & search, const std::string } s = std::move(result); } +#ifdef GGML_USE_CPU_HBM +#include +#endif static void zeros(std::ofstream & file, size_t n) { char zero = 0; @@ -450,6 +453,9 @@ static void ggml_graph_compute_helper(std::vector & buf, ggml_cgraph * #elif GGML_USE_METAL # define llama_host_malloc(n) ggml_metal_host_malloc(n) # define llama_host_free(data) ggml_metal_host_free(data) +#elif GGML_USE_CPU_HBM +# define llama_host_malloc(n) hbw_malloc(n) +# define llama_host_free(data) if(data != NULL) hbw_free(data) #else # define llama_host_malloc(n) malloc(n) # define llama_host_free(data) free(data) @@ -1489,7 +1495,11 @@ struct llama_model_loader { // allocate temp buffer if not using mmap if (!use_mmap && cur->data == NULL) { GGML_ASSERT(cur->backend != GGML_BACKEND_CPU); - cur->data = malloc(ggml_nbytes(cur)); + #ifdef GGML_USE_CPU_HBM + cur->data = (uint8_t*)hbw_malloc(ggml_nbytes(cur)); + #else + cur->data = (uint8_t*)malloc(ggml_nbytes(cur)); + #endif } load_data_for(cur); @@ -5396,7 +5406,7 @@ void llama_backend_init(bool numa) { // needed to initialize f16 tables { - struct ggml_init_params params = { 0, NULL, false }; + struct ggml_init_params params = { 1, NULL, false }; struct ggml_context * ctx = ggml_init(params); ggml_free(ctx); }