llama : fix MPI build
ggml-ci
This commit is contained in:
parent
5d2656d670
commit
a4ad2bf35c
1 changed files with 1 additions and 1 deletions
|
@ -4419,7 +4419,7 @@ struct llama_context * llama_new_context_with_model(
|
|||
|
||||
if (ggml_mpi_rank(ctx->ctx_mpi) > 0) {
|
||||
// Enter a blocking eval loop with dummy input, letting rank=0 drive the process
|
||||
const std::vector<llama_token> tmp(ctx->model.hparams.n_ctx, llama_token_bos());
|
||||
const std::vector<llama_token> tmp(ctx->model.hparams.n_ctx, llama_token_bos(ctx));
|
||||
while (!llama_eval(ctx, tmp.data(), tmp.size(), 0, 0)) {};
|
||||
llama_backend_free();
|
||||
exit(1);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue