From d8a0b870911f7e457cceb61c4764fd23673c4ea9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=A4=C3=9Fler?= Date: Fri, 31 May 2024 18:34:38 +0200 Subject: [PATCH] error if type_v != FP16 and not flash_attn --- llama.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/llama.cpp b/llama.cpp index f67cb7e23..c48b79ecc 100644 --- a/llama.cpp +++ b/llama.cpp @@ -15866,6 +15866,11 @@ struct llama_context * llama_new_context_with_model( params.flash_attn = false; } + if (params.type_v != GGML_TYPE_F16 && !params.flash_attn) { + LLAMA_LOG_ERROR("%s: V cache quantization requires flash_attn\n", __func__); + return nullptr; + } + llama_context * ctx = new llama_context(*model); const auto & hparams = model->hparams;