Merge branch 'master' into gg/flash-attn
This commit is contained in:
commit
013721df2b
157 changed files with 19090 additions and 15488 deletions
6
ggml-cuda/fattn.cuh
Normal file
6
ggml-cuda/fattn.cuh
Normal file
|
@ -0,0 +1,6 @@
|
|||
#include "common.cuh"
|
||||
|
||||
void ggml_cuda_flash_attn_ext(
|
||||
ggml_backend_cuda_context & ctx,
|
||||
const ggml_tensor * Q, const ggml_tensor * K, const ggml_tensor * V,
|
||||
const ggml_tensor * mask, ggml_tensor * KQV);
|
Loading…
Add table
Add a link
Reference in a new issue