CUDA: quantized KV support for FA vec (#7527)

* CUDA: quantized KV support for FA vec

* try CI fix

* fix commented-out kernel variants

* add q8_0 q4_0 tests

* fix nwarps > batch size

* split fattn compile via extern templates

* fix flake8

* fix metal tests

* fix cmake

* make generate_cu_files.py executable

* add autogenerated .cu files

* fix AMD

* error if type_v != FP16 and not flash_attn

* remove obsolete code
This commit is contained in:
Johannes Gäßler 2024-06-01 08:44:14 +02:00 committed by GitHub
parent a323ec60af
commit 9b596417af
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
110 changed files with 2697 additions and 1200 deletions

View file

@ -16221,6 +16221,11 @@ struct llama_context * llama_new_context_with_model(
params.flash_attn = false;
}
if (params.type_v != GGML_TYPE_F16 && !params.flash_attn) {
LLAMA_LOG_ERROR("%s: V cache quantization requires flash_attn\n", __func__);
return nullptr;
}
llama_context * ctx = new llama_context(*model);
const auto & hparams = model->hparams;