mmvq in cuda path

This commit is contained in:
Meng, Hengyu 2024-08-19 05:42:49 +00:00
parent 5b6d224695
commit 299412e6bc

View file

@ -3581,7 +3581,8 @@ static void ggml_sycl_mul_mat(ggml_backend_sycl_context & ctx, const ggml_tensor
bool use_mul_mat_vec_q = ggml_is_quantized(src0->type) bool use_mul_mat_vec_q = ggml_is_quantized(src0->type)
&& src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32
&& src1->ne[1] <= MMVQ_MAX_BATCH_SIZE && src1->ne[1] > MMVQ_MIN_BATCH_SIZE; && src1->ne[1] <= MMVQ_MAX_BATCH_SIZE
&& ctx.stream()->get_backend() == sycl::backend::ext_oneapi_cuda ? true: src1->ne[1] > MMVQ_MIN_BATCH_SIZE;
bool use_mul_mat_q = ggml_sycl_supports_mmq(src0->type) bool use_mul_mat_q = ggml_sycl_supports_mmq(src0->type)
&& src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32; && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32;