ggml : add NUMA support (#1556)

* detect NUMA systems and pin work threads to nodes (linux)

* disable mmap prefetch/readahead for NUMA systems

* avoid sending finalize op to thread pool if it does nothing

* silence robot

* fix args

* make --numa a param

* recommendation that n_nodes evenly divide n_threads did not warrant such aggressive enforcement

* lower synchronization overhead

* statically allocate

* move numa state to g_state

* add description for --numa

* ggml : minor style changes

* ggml : minor style + try fix sanitizer build

* llama : allow to initialize backend with NUMA support

* llama : avoid ggml include in llama-util.h

* ggml : style / formatting

* ggml : fix handling of ops with n_threads > n_tasks > 1

* server : utilize numa parameter

---------

Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
This commit is contained in:
zrm 2023-06-26 13:57:59 -04:00 committed by GitHub
parent 9225baef71
commit b853d45601
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
14 changed files with 339 additions and 236 deletions

View file

@ -774,7 +774,7 @@ struct llama_model_loader {
}
if (use_mmap) {
mapping.reset(new llama_mmap(&file_loaders.at(0)->file, prefetch_size));
mapping.reset(new llama_mmap(&file_loaders.at(0)->file, prefetch_size, ggml_is_numa()));
if (lmlock) {
lmlock->init(mapping->addr);
}
@ -977,7 +977,7 @@ bool llama_mlock_supported() {
return llama_mlock::SUPPORTED;
}
void llama_init_backend() {
void llama_init_backend(bool numa) {
ggml_time_init();
// needed to initialize f16 tables
@ -986,6 +986,10 @@ void llama_init_backend() {
struct ggml_context * ctx = ggml_init(params);
ggml_free(ctx);
}
if (numa) {
ggml_numa_init();
}
}
int64_t llama_time_us() {
@ -2899,7 +2903,7 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const
// maybe this should in llama_model_loader
if (model_loader->use_mmap) {
model_loader->mapping.reset(new llama_mmap(&model_loader->file_loaders.at(0)->file, /* prefetch */ 0));
model_loader->mapping.reset(new llama_mmap(&model_loader->file_loaders.at(0)->file, /* prefetch */ 0, ggml_is_numa()));
}
}