From 16eff5af6971205c5eff94e18d3c510b35ddf0e9 Mon Sep 17 00:00:00 2001 From: Branden Butler Date: Mon, 25 Sep 2023 17:41:57 -0500 Subject: [PATCH] Disable warmup under MPI --- common/common.cpp | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/common/common.cpp b/common/common.cpp index 0d9b19cbe..a6bdae68f 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -1402,10 +1402,17 @@ std::tuple llama_init_from_gpt_par { LOG("warming up the model with an empty run\n"); +#ifndef GGML_USE_MPI + // When using MPI, llama_eval() enters into an infinite loop + // on non-head nodes. Thus, we only want to warmup the model here + // if we aren't using MPI. + // FIXME have a way to terminate the infinite loop so we can warmup the model + // in MPI mode std::vector tmp = { llama_token_bos(model), llama_token_eos(model), }; llama_decode(lctx, llama_batch_get_one(tmp.data(), std::min(tmp.size(), (size_t) params.n_batch), 0, 0)); llama_kv_cache_clear(lctx); llama_synchronize(lctx); +#endif llama_reset_timings(lctx); }