Update src/llama.cpp

Co-authored-by: slaren <slarengh@gmail.com>
This commit is contained in:
Andrei 2024-06-28 16:07:47 -04:00 committed by GitHub
parent f4424c150f
commit 3a2471811f
No known key found for this signature in database
GPG key ID: B5690EEEBB952194

View file

@ -17402,11 +17402,12 @@ struct llama_context * llama_new_context_with_model(
params.flash_attn = false; params.flash_attn = false;
} }
if (params.flash_attn && model->arch == LLM_ARCH_GEMMA2) { if (params.flash_attn && model->hparams.attn_soft_cap) {
LLAMA_LOG_WARN("%s: flash_attn is not compatible with Gemma2 - forcing off\n", __func__); LLAMA_LOG_WARN("%s: flash_attn is not compatible with attn_soft_cap - forcing off\n", __func__);
params.flash_attn = false; params.flash_attn = false;
} }
if (params.flash_attn && model->hparams.n_embd_head_k != model->hparams.n_embd_head_v) { if (params.flash_attn && model->hparams.n_embd_head_k != model->hparams.n_embd_head_v) {
LLAMA_LOG_WARN("%s: flash_attn requires n_embd_head_k == n_embd_head_v - forcing off\n", __func__); LLAMA_LOG_WARN("%s: flash_attn requires n_embd_head_k == n_embd_head_v - forcing off\n", __func__);
params.flash_attn = false; params.flash_attn = false;