CUDA: add FP32 FlashAttention vector kernel (#7188)

* CUDA: add FP32 FlashAttention vector kernel

* fixup! CUDA: add FP32 FlashAttention vector kernel

* fixup! fixup! CUDA: add FP32 FlashAttention vector kernel

* fixup! fixup! fixup! CUDA: add FP32 FlashAttention vector kernel
This commit is contained in:
Johannes Gäßler 2024-05-12 19:40:45 +02:00 committed by GitHub
parent 6f1b63606f
commit dc685be466
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
9 changed files with 899 additions and 458 deletions

View file

@ -321,6 +321,10 @@ static __device__ __forceinline__ int __dp4a(const int a, const int b, int c) {
#define FP16_MMA_AVAILABLE !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_VOLTA
static bool fast_fp16_available(const int cc) {
return cc >= CC_PASCAL && cc != 610;
}
static bool fp16_mma_available(const int cc) {
return cc < CC_OFFSET_AMD && cc >= CC_VOLTA;
}