error if type_v != FP16 and not flash_attn

This commit is contained in:
Johannes Gäßler 2024-05-31 18:34:38 +02:00
parent cc7aef6829
commit d8a0b87091

View file

@ -15866,6 +15866,11 @@ struct llama_context * llama_new_context_with_model(
params.flash_attn = false; params.flash_attn = false;
} }
if (params.type_v != GGML_TYPE_F16 && !params.flash_attn) {
LLAMA_LOG_ERROR("%s: V cache quantization requires flash_attn\n", __func__);
return nullptr;
}
llama_context * ctx = new llama_context(*model); llama_context * ctx = new llama_context(*model);
const auto & hparams = model->hparams; const auto & hparams = model->hparams;