From 015e1bfe6453ac535c4be305607e19b7898bc83d Mon Sep 17 00:00:00 2001 From: slaren Date: Wed, 13 Mar 2024 14:59:32 +0100 Subject: [PATCH] llama : do not limit n_batch to n_ctx with non-casual attn --- llama.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/llama.cpp b/llama.cpp index 50c744ad8..b05d362f6 100644 --- a/llama.cpp +++ b/llama.cpp @@ -12773,7 +12773,8 @@ struct llama_context * llama_new_context_with_model( cparams.rope_freq_base = params.rope_freq_base == 0.0f ? hparams.rope_freq_base_train : params.rope_freq_base; cparams.rope_freq_scale = params.rope_freq_scale == 0.0f ? hparams.rope_freq_scale_train : params.rope_freq_scale; - cparams.n_batch = std::min(cparams.n_ctx, params.n_batch); + // with causal attention, the batch size is limited by the context size + cparams.n_batch = hparams.causal_attn ? std::min(cparams.n_ctx, params.n_batch) : params.n_batch; cparams.n_ubatch = std::min(cparams.n_batch, params.n_ubatch == 0 ? params.n_batch : params.n_ubatch);