fixup! fixup! fixup! CUDA: add FP32 FlashAttention vector kernel
This commit is contained in:
parent
f3c3eafa6e
commit
aa9cbd7660
1 changed files with 1 additions and 1 deletions
|
@ -342,7 +342,7 @@ void ggml_cuda_flash_attn_ext_vec_f16_no_mma(ggml_backend_cuda_context & ctx, gg
|
||||||
|
|
||||||
ggml_tensor * KQV = dst;
|
ggml_tensor * KQV = dst;
|
||||||
|
|
||||||
const int32_t precision = KQV->op_params[1];
|
const int32_t precision = KQV->op_params[2];
|
||||||
GGML_ASSERT(precision == GGML_PREC_DEFAULT);
|
GGML_ASSERT(precision == GGML_PREC_DEFAULT);
|
||||||
GGML_ASSERT(Q->ne[0] == 64 || Q->ne[0] == 128 && "FlashAttention without tensor cores only supports head sizes 64 and 128.");
|
GGML_ASSERT(Q->ne[0] == 64 || Q->ne[0] == 128 && "FlashAttention without tensor cores only supports head sizes 64 and 128.");
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue