From d41b53ca7bb71ed75625b834a034b539e45a9889 Mon Sep 17 00:00:00 2001 From: slaren Date: Fri, 22 Sep 2023 00:58:45 +0200 Subject: [PATCH] fix mpi build --- llama.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/llama.cpp b/llama.cpp index aa8a36ff3..64bb20479 100644 --- a/llama.cpp +++ b/llama.cpp @@ -3680,6 +3680,7 @@ static bool llama_eval_internal( GGML_ASSERT((!tokens && embd) || (tokens && !embd)); // NOLINT const auto & cparams = lctx.cparams; + const int n_ctx = cparams.n_ctx; const int n_batch = cparams.n_batch; @@ -6456,7 +6457,7 @@ struct llama_context * llama_new_context_with_model( if (ggml_mpi_rank(ctx->ctx_mpi) > 0) { // Enter a blocking eval loop with dummy input, letting rank=0 drive the process const std::vector tmp(cparams.n_ctx, llama_token_bos(ctx)); - while (!llama_eval(ctx, tmp.data(), tmp.size(), 0, 0)) {}; + while (!llama_eval(ctx, tmp.data(), tmp.size(), 0)) {}; llama_backend_free(); exit(1); }