From aa9cbd76608e8aacf5e02e9568d935e9c4e9fbfe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=A4=C3=9Fler?= Date: Sat, 11 May 2024 15:34:09 +0200 Subject: [PATCH] fixup! fixup! fixup! CUDA: add FP32 FlashAttention vector kernel --- ggml-cuda/fattn-vec-f16.cu | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ggml-cuda/fattn-vec-f16.cu b/ggml-cuda/fattn-vec-f16.cu index 54307fcfb..cbf5f7835 100644 --- a/ggml-cuda/fattn-vec-f16.cu +++ b/ggml-cuda/fattn-vec-f16.cu @@ -342,7 +342,7 @@ void ggml_cuda_flash_attn_ext_vec_f16_no_mma(ggml_backend_cuda_context & ctx, gg ggml_tensor * KQV = dst; - const int32_t precision = KQV->op_params[1]; + const int32_t precision = KQV->op_params[2]; GGML_ASSERT(precision == GGML_PREC_DEFAULT); GGML_ASSERT(Q->ne[0] == 64 || Q->ne[0] == 128 && "FlashAttention without tensor cores only supports head sizes 64 and 128.");