fixup! fixup! CUDA: add FP32 FlashAttention vector kernel

This commit is contained in:
Johannes Gäßler 2024-05-11 15:32:46 +02:00
parent 41f5f3a4e4
commit f3c3eafa6e

View file

@ -312,7 +312,7 @@ void ggml_cuda_flash_attn_ext_vec_f16(ggml_backend_cuda_context & ctx, ggml_tens
ggml_tensor * KQV = dst;
const int32_t precision = KQV->op_params[1];
const int32_t precision = KQV->op_params[2];
GGML_ASSERT(precision == GGML_PREC_DEFAULT);
constexpr int cols_per_block = 1;