fixup! fixup! CUDA: add FP32 FlashAttention vector kernel
This commit is contained in:
parent
41f5f3a4e4
commit
f3c3eafa6e
1 changed files with 1 additions and 1 deletions
|
@ -312,7 +312,7 @@ void ggml_cuda_flash_attn_ext_vec_f16(ggml_backend_cuda_context & ctx, ggml_tens
|
|||
|
||||
ggml_tensor * KQV = dst;
|
||||
|
||||
const int32_t precision = KQV->op_params[1];
|
||||
const int32_t precision = KQV->op_params[2];
|
||||
GGML_ASSERT(precision == GGML_PREC_DEFAULT);
|
||||
|
||||
constexpr int cols_per_block = 1;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue