diff --git a/llama.cpp b/llama.cpp index f67cb7e23..c48b79ecc 100644 --- a/llama.cpp +++ b/llama.cpp @@ -15866,6 +15866,11 @@ struct llama_context * llama_new_context_with_model( params.flash_attn = false; } + if (params.type_v != GGML_TYPE_F16 && !params.flash_attn) { + LLAMA_LOG_ERROR("%s: V cache quantization requires flash_attn\n", __func__); + return nullptr; + } + llama_context * ctx = new llama_context(*model); const auto & hparams = model->hparams;