fixup! Add basic chipStar support

This commit is contained in:
Quinten Kock 2023-12-17 01:52:36 +01:00
parent 2a86c00ffa
commit 21b68f3032

View file

@ -8326,153 +8326,155 @@ static __global__ void k_compute_batched_ptrs(
ptrs_dst[0*ne23 + i12 + i13*ne12] = ( char *) dst_f16 + i12* nb2/2 + i13* nb3/2;
}
// static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
// GGML_ASSERT(!ggml_is_transposed(src0));
// GGML_ASSERT(!ggml_is_transposed(src1));
//
// GGML_ASSERT(src0->backend != GGML_BACKEND_GPU_SPLIT);
// GGML_ASSERT(src0->type == GGML_TYPE_F16);
// GGML_ASSERT(src1->type == GGML_TYPE_F32);
//
// const int64_t ne00 = src0->ne[0]; GGML_UNUSED(ne00);
// const int64_t ne01 = src0->ne[1];
// const int64_t ne02 = src0->ne[2];
// const int64_t ne03 = src0->ne[3];
//
// const int64_t nb01 = src0->nb[1];
// const int64_t nb02 = src0->nb[2]; GGML_UNUSED(nb02);
// const int64_t nb03 = src0->nb[3]; GGML_UNUSED(nb03);
//
// const int64_t ne10 = src1->ne[0];
// const int64_t ne11 = src1->ne[1];
// const int64_t ne12 = src1->ne[2];
// const int64_t ne13 = src1->ne[3];
//
// const int64_t nb11 = src1->nb[1];
// const int64_t nb12 = src1->nb[2]; GGML_UNUSED(nb12);
// const int64_t nb13 = src1->nb[3]; GGML_UNUSED(nb13);
//
// const int64_t ne1 = ggml_nelements(src1);
// const int64_t ne = ggml_nelements(dst);
//
// CUDA_CHECK(ggml_cuda_set_device(g_main_device));
// cudaStream_t main_stream = g_cudaStreams[g_main_device][0];
//
// CUBLAS_CHECK(cublasSetStream(g_cublas_handles[g_main_device], main_stream));
//
// ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu *) src0->extra;
// void * src0_ddq = src0_extra->data_device[g_main_device];
// half * src0_as_f16 = (half *) src0_ddq;
//
// ggml_tensor_extra_gpu * src1_extra = (ggml_tensor_extra_gpu *) src1->extra;
// float * src1_ddf = (float *) src1_extra->data_device[g_main_device];
//
// ggml_tensor_extra_gpu * dst_extra = (ggml_tensor_extra_gpu *) dst->extra;
// float * dst_ddf = (float *) dst_extra->data_device[g_main_device];
//
// // convert src1 to fp16
// const to_fp16_cuda_t to_fp16_cuda = ggml_get_to_fp16_cuda(src1->type);
// GGML_ASSERT(to_fp16_cuda != nullptr);
//
// size_t src1_as = 0;
// half * src1_as_f16 = (half *) ggml_cuda_pool_malloc(ne1 * sizeof(half), &src1_as);
// to_fp16_cuda(src1_ddf, src1_as_f16, ne1, main_stream);
//
// size_t dst_as = 0;
// half * dst_f16 = (half *) ggml_cuda_pool_malloc(ne * sizeof(half), &dst_as);
//
// GGML_ASSERT(ne12 % ne02 == 0);
// GGML_ASSERT(ne13 % ne03 == 0);
//
// // broadcast factors
// const int64_t r2 = ne12/ne02;
// const int64_t r3 = ne13/ne03;
//
// const half alpha_f16 = 1.0f;
// const half beta_f16 = 0.0f;
//
// #if 0
// // use cublasGemmEx
// {
// for (int i13 = 0; i13 < ne13; ++i13) {
// for (int i12 = 0; i12 < ne12; ++i12) {
// int i03 = i13 / r3;
// int i02 = i12 / r2;
//
// CUBLAS_CHECK(
// cublasGemmEx(g_cublas_handles[id], CUBLAS_OP_T, CUBLAS_OP_N,
// ne01, ne11, ne10,
// &alpha_f16, (const char *) src0_as_f16 + i02*src0->nb[2] + i03*src0->nb[3] , CUDA_R_16F, nb01/sizeof(half),
// (const char *) src1_as_f16 + i12*src1->nb[2]/2 + i13*src1->nb[3]/2, CUDA_R_16F, nb11/sizeof(float),
// &beta_f16, ( char *) dst_f16 + i12* dst->nb[2]/2 + i13* dst->nb[3]/2, CUDA_R_16F, ne01,
// CUBLAS_COMPUTE_16F,
// CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// }
// }
// }
// #else
// if (r2 == 1 && r3 == 1 && src0->nb[2]*src0->ne[2] == src0->nb[3] && src1->nb[2]*src1->ne[2] == src1->nb[3]) {
// // there is no broadcast and src0, src1 are contiguous across dims 2, 3
// // use cublasGemmStridedBatchedEx
// CUBLAS_CHECK(
// cublasGemmStridedBatchedEx(g_cublas_handles[g_main_device], CUBLAS_OP_T, CUBLAS_OP_N,
// ne01, ne11, ne10,
// &alpha_f16, (const char *) src0_as_f16, CUDA_R_16F, nb01/sizeof(half), src0->nb[2]/sizeof(half), // strideA
// (const char *) src1_as_f16, CUDA_R_16F, nb11/sizeof(float), src1->nb[2]/sizeof(float), // strideB
// &beta_f16, ( char *) dst_f16, CUDA_R_16F, ne01, dst->nb[2]/sizeof(float), // strideC
// ne12*ne13,
// CUBLAS_COMPUTE_16F,
// CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// } else {
// // use cublasGemmBatchedEx
// const int ne23 = ne12*ne13;
//
// const void ** ptrs_src = nullptr;
// void ** ptrs_dst = nullptr;
//
// size_t ptrs_src_s = 0;
// size_t ptrs_dst_s = 0;
//
// ptrs_src = (const void **) ggml_cuda_pool_malloc(2*ne23*sizeof(void *), &ptrs_src_s);
// ptrs_dst = ( void **) ggml_cuda_pool_malloc(1*ne23*sizeof(void *), &ptrs_dst_s);
//
// dim3 block_dims(ne13, ne12);
// k_compute_batched_ptrs<<<1, block_dims, 0, main_stream>>>(
// src0_as_f16, src1_as_f16, dst_f16,
// ptrs_src, ptrs_dst,
// ne12, ne13,
// ne23,
// nb02, nb03,
// nb12, nb13,
// dst->nb[2], dst->nb[3],
// r2, r3);
// CUDA_CHECK(cudaGetLastError());
//
// CUBLAS_CHECK(
// cublasGemmBatchedEx(g_cublas_handles[g_main_device], CUBLAS_OP_T, CUBLAS_OP_N,
// ne01, ne11, ne10,
// &alpha_f16, (const void **) (ptrs_src + 0*ne23), CUDA_R_16F, nb01/sizeof(half),
// (const void **) (ptrs_src + 1*ne23), CUDA_R_16F, nb11/sizeof(float),
// &beta_f16, ( void **) (ptrs_dst + 0*ne23), CUDA_R_16F, ne01,
// ne23,
// CUBLAS_COMPUTE_16F,
// CUBLAS_GEMM_DEFAULT_TENSOR_OP));
//
// if (ptrs_src_s != 0) {
// ggml_cuda_pool_free(ptrs_src, ptrs_src_s);
// }
// if (ptrs_dst_s != 0) {
// ggml_cuda_pool_free(ptrs_dst, ptrs_dst_s);
// }
// }
// #endif
//
// const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(GGML_TYPE_F16);
// to_fp32_cuda(dst_f16, dst_ddf, ne, main_stream);
//
// ggml_cuda_pool_free(src1_as_f16, src1_as);
// ggml_cuda_pool_free(dst_f16, dst_as);
// }
#ifndef GGML_USE_CHIPSTAR
static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
GGML_ASSERT(!ggml_is_transposed(src0));
GGML_ASSERT(!ggml_is_transposed(src1));
GGML_ASSERT(src0->backend != GGML_BACKEND_GPU_SPLIT);
GGML_ASSERT(src0->type == GGML_TYPE_F16);
GGML_ASSERT(src1->type == GGML_TYPE_F32);
const int64_t ne00 = src0->ne[0]; GGML_UNUSED(ne00);
const int64_t ne01 = src0->ne[1];
const int64_t ne02 = src0->ne[2];
const int64_t ne03 = src0->ne[3];
const int64_t nb01 = src0->nb[1];
const int64_t nb02 = src0->nb[2]; GGML_UNUSED(nb02);
const int64_t nb03 = src0->nb[3]; GGML_UNUSED(nb03);
const int64_t ne10 = src1->ne[0];
const int64_t ne11 = src1->ne[1];
const int64_t ne12 = src1->ne[2];
const int64_t ne13 = src1->ne[3];
const int64_t nb11 = src1->nb[1];
const int64_t nb12 = src1->nb[2]; GGML_UNUSED(nb12);
const int64_t nb13 = src1->nb[3]; GGML_UNUSED(nb13);
const int64_t ne1 = ggml_nelements(src1);
const int64_t ne = ggml_nelements(dst);
CUDA_CHECK(ggml_cuda_set_device(g_main_device));
cudaStream_t main_stream = g_cudaStreams[g_main_device][0];
CUBLAS_CHECK(cublasSetStream(g_cublas_handles[g_main_device], main_stream));
ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu *) src0->extra;
void * src0_ddq = src0_extra->data_device[g_main_device];
half * src0_as_f16 = (half *) src0_ddq;
ggml_tensor_extra_gpu * src1_extra = (ggml_tensor_extra_gpu *) src1->extra;
float * src1_ddf = (float *) src1_extra->data_device[g_main_device];
ggml_tensor_extra_gpu * dst_extra = (ggml_tensor_extra_gpu *) dst->extra;
float * dst_ddf = (float *) dst_extra->data_device[g_main_device];
// convert src1 to fp16
const to_fp16_cuda_t to_fp16_cuda = ggml_get_to_fp16_cuda(src1->type);
GGML_ASSERT(to_fp16_cuda != nullptr);
size_t src1_as = 0;
half * src1_as_f16 = (half *) ggml_cuda_pool_malloc(ne1 * sizeof(half), &src1_as);
to_fp16_cuda(src1_ddf, src1_as_f16, ne1, main_stream);
size_t dst_as = 0;
half * dst_f16 = (half *) ggml_cuda_pool_malloc(ne * sizeof(half), &dst_as);
GGML_ASSERT(ne12 % ne02 == 0);
GGML_ASSERT(ne13 % ne03 == 0);
// broadcast factors
const int64_t r2 = ne12/ne02;
const int64_t r3 = ne13/ne03;
const half alpha_f16 = 1.0f;
const half beta_f16 = 0.0f;
#if 0
// use cublasGemmEx
{
for (int i13 = 0; i13 < ne13; ++i13) {
for (int i12 = 0; i12 < ne12; ++i12) {
int i03 = i13 / r3;
int i02 = i12 / r2;
CUBLAS_CHECK(
cublasGemmEx(g_cublas_handles[id], CUBLAS_OP_T, CUBLAS_OP_N,
ne01, ne11, ne10,
&alpha_f16, (const char *) src0_as_f16 + i02*src0->nb[2] + i03*src0->nb[3] , CUDA_R_16F, nb01/sizeof(half),
(const char *) src1_as_f16 + i12*src1->nb[2]/2 + i13*src1->nb[3]/2, CUDA_R_16F, nb11/sizeof(float),
&beta_f16, ( char *) dst_f16 + i12* dst->nb[2]/2 + i13* dst->nb[3]/2, CUDA_R_16F, ne01,
CUBLAS_COMPUTE_16F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
}
}
}
#else
if (r2 == 1 && r3 == 1 && src0->nb[2]*src0->ne[2] == src0->nb[3] && src1->nb[2]*src1->ne[2] == src1->nb[3]) {
// there is no broadcast and src0, src1 are contiguous across dims 2, 3
// use cublasGemmStridedBatchedEx
CUBLAS_CHECK(
cublasGemmStridedBatchedEx(g_cublas_handles[g_main_device], CUBLAS_OP_T, CUBLAS_OP_N,
ne01, ne11, ne10,
&alpha_f16, (const char *) src0_as_f16, CUDA_R_16F, nb01/sizeof(half), src0->nb[2]/sizeof(half), // strideA
(const char *) src1_as_f16, CUDA_R_16F, nb11/sizeof(float), src1->nb[2]/sizeof(float), // strideB
&beta_f16, ( char *) dst_f16, CUDA_R_16F, ne01, dst->nb[2]/sizeof(float), // strideC
ne12*ne13,
CUBLAS_COMPUTE_16F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
} else {
// use cublasGemmBatchedEx
const int ne23 = ne12*ne13;
const void ** ptrs_src = nullptr;
void ** ptrs_dst = nullptr;
size_t ptrs_src_s = 0;
size_t ptrs_dst_s = 0;
ptrs_src = (const void **) ggml_cuda_pool_malloc(2*ne23*sizeof(void *), &ptrs_src_s);
ptrs_dst = ( void **) ggml_cuda_pool_malloc(1*ne23*sizeof(void *), &ptrs_dst_s);
dim3 block_dims(ne13, ne12);
k_compute_batched_ptrs<<<1, block_dims, 0, main_stream>>>(
src0_as_f16, src1_as_f16, dst_f16,
ptrs_src, ptrs_dst,
ne12, ne13,
ne23,
nb02, nb03,
nb12, nb13,
dst->nb[2], dst->nb[3],
r2, r3);
CUDA_CHECK(cudaGetLastError());
CUBLAS_CHECK(
cublasGemmBatchedEx(g_cublas_handles[g_main_device], CUBLAS_OP_T, CUBLAS_OP_N,
ne01, ne11, ne10,
&alpha_f16, (const void **) (ptrs_src + 0*ne23), CUDA_R_16F, nb01/sizeof(half),
(const void **) (ptrs_src + 1*ne23), CUDA_R_16F, nb11/sizeof(float),
&beta_f16, ( void **) (ptrs_dst + 0*ne23), CUDA_R_16F, ne01,
ne23,
CUBLAS_COMPUTE_16F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
if (ptrs_src_s != 0) {
ggml_cuda_pool_free(ptrs_src, ptrs_src_s);
}
if (ptrs_dst_s != 0) {
ggml_cuda_pool_free(ptrs_dst, ptrs_dst_s);
}
}
#endif
const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(GGML_TYPE_F16);
to_fp32_cuda(dst_f16, dst_ddf, ne, main_stream);
ggml_cuda_pool_free(src1_as_f16, src1_as);
ggml_cuda_pool_free(dst_f16, dst_as);
}
#endif
static void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
const bool all_on_device =
@ -8509,10 +8511,14 @@ static void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1
} else if (!split && all_on_device && !use_tensor_cores && src0->type == GGML_TYPE_F16 && !ggml_is_contiguous(src0) && !ggml_is_transposed(src1) && src1->ne[1] == 1) {
// KQV single-batch
ggml_cuda_mul_mat_vec_nc(src0, src1, dst);
} else if (!split && all_on_device && use_tensor_cores && src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32 && !ggml_is_transposed(src0) && !ggml_is_transposed(src1)) {
}
#ifndef GGML_USE_CHIPSTAR
else if (!split && all_on_device && use_tensor_cores && src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32 && !ggml_is_transposed(src0) && !ggml_is_transposed(src1)) {
// KQ + KQV multi-batch
// ggml_cuda_mul_mat_mat_batched_cublas(src0, src1, dst);
} else if (src0->type == GGML_TYPE_F32) {
ggml_cuda_mul_mat_mat_batched_cublas(src0, src1, dst);
}
#endif
else if (src0->type == GGML_TYPE_F32) {
ggml_cuda_op_mul_mat(src0, src1, dst, ggml_cuda_op_mul_mat_cublas, false);
} else if (ggml_is_quantized(src0->type) || src0->type == GGML_TYPE_F16) {
if (src1->ne[1] == 1 && src0->ne[0] % GGML_CUDA_DMMV_X == 0) {