add cpu hbm support

This commit is contained in:
Kunshang Ji 2023-08-31 01:39:15 +00:00
parent 6336d834ec
commit eeb20c083d
3 changed files with 34 additions and 3 deletions

View file

@ -557,6 +557,11 @@ endif()
# ggml
if (GGML_USE_CPU_HBM)
add_definitions(-DGGML_USE_CPU_HBM)
find_library(memkind memkind REQUIRED)
endif()
add_library(ggml OBJECT
ggml.c
ggml.h
@ -572,6 +577,9 @@ add_library(ggml OBJECT
target_include_directories(ggml PUBLIC . ${LLAMA_EXTRA_INCLUDES})
target_compile_features(ggml PUBLIC c_std_11) # don't bump
target_link_libraries(ggml PUBLIC Threads::Threads ${LLAMA_EXTRA_LIBS})
if (GGML_USE_CPU_HBM)
target_link_libraries(ggml PUBLIC memkind)
endif()
add_library(ggml_static STATIC $<TARGET_OBJECTS:ggml>)
if (BUILD_SHARED_LIBS)

15
ggml.c
View file

@ -103,6 +103,9 @@ typedef void * thread_ret_t;
#include <sys/stat.h>
#include <unistd.h>
#endif
#ifdef GGML_USE_CPU_HBM
#include <hbwmalloc.h>
#endif
// __FMA__ and __F16C__ are not defined in MSVC, however they are implied with AVX2/AVX512
@ -193,7 +196,13 @@ typedef void * thread_ret_t;
#else
inline static void * ggml_aligned_malloc(size_t size) {
void * aligned_memory = NULL;
#ifdef GGML_USE_METAL
#ifdef GGML_USE_CPU_HBM
if (size == 0) {
GGML_PRINT("WARNING: Behavior may be unexpected when allocate 0 byte for hbw_posix_memalign!");
return NULL;
}
int result = hbw_posix_memalign(&aligned_memory, 16, size);
#elif GGML_USE_METAL
int result = posix_memalign(&aligned_memory, sysconf(_SC_PAGESIZE), size);
#else
int result = posix_memalign(&aligned_memory, GGML_MEM_ALIGN, size);
@ -215,8 +224,12 @@ inline static void * ggml_aligned_malloc(size_t size) {
return aligned_memory;
}
#define GGML_ALIGNED_MALLOC(size) ggml_aligned_malloc(size)
#ifdef GGML_USE_CPU_HBM
#define GGML_ALIGNED_FREE(ptr) if(NULL != ptr) hbw_free(ptr)
#else
#define GGML_ALIGNED_FREE(ptr) free(ptr)
#endif
#endif
#define UNUSED GGML_UNUSED
#define SWAP(x, y, T) do { T SWAP = x; x = y; y = SWAP; } while (0)

View file

@ -126,6 +126,9 @@ void replace_all(std::string & s, const std::string & search, const std::string
}
s = std::move(result);
}
#ifdef GGML_USE_CPU_HBM
#include <hbwmalloc.h>
#endif
static void zeros(std::ofstream & file, size_t n) {
char zero = 0;
@ -450,6 +453,9 @@ static void ggml_graph_compute_helper(std::vector<uint8_t> & buf, ggml_cgraph *
#elif GGML_USE_METAL
# define llama_host_malloc(n) ggml_metal_host_malloc(n)
# define llama_host_free(data) ggml_metal_host_free(data)
#elif GGML_USE_CPU_HBM
# define llama_host_malloc(n) hbw_malloc(n)
# define llama_host_free(data) if(data != NULL) hbw_free(data)
#else
# define llama_host_malloc(n) malloc(n)
# define llama_host_free(data) free(data)
@ -1489,7 +1495,11 @@ struct llama_model_loader {
// allocate temp buffer if not using mmap
if (!use_mmap && cur->data == NULL) {
GGML_ASSERT(cur->backend != GGML_BACKEND_CPU);
cur->data = malloc(ggml_nbytes(cur));
#ifdef GGML_USE_CPU_HBM
cur->data = (uint8_t*)hbw_malloc(ggml_nbytes(cur));
#else
cur->data = (uint8_t*)malloc(ggml_nbytes(cur));
#endif
}
load_data_for(cur);
@ -5396,7 +5406,7 @@ void llama_backend_init(bool numa) {
// needed to initialize f16 tables
{
struct ggml_init_params params = { 0, NULL, false };
struct ggml_init_params params = { 1, NULL, false };
struct ggml_context * ctx = ggml_init(params);
ggml_free(ctx);
}