From d9bc43c555ef617d0bf5de6d1a4f70152b30a2d6 Mon Sep 17 00:00:00 2001 From: Henri Vasserman Date: Fri, 28 Apr 2023 17:38:22 +0300 Subject: [PATCH] Cuda: non-contiguous tensor support --- ggml-cuda.cu | 38 ++++++++++++++++++++++++++++++++++++++ ggml-cuda.h | 7 +++++++ ggml.c | 29 +++++++++++++++-------------- 3 files changed, 60 insertions(+), 14 deletions(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index b1bd29b10..2869c8032 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -339,3 +339,41 @@ void ggml_init_cublas(void) { // CUBLAS_CHECK(cublasLoggerConfigure(1, 1, 0, NULL)); } } + +void * ggml_cuda_host_malloc(size_t size) { + void * ptr; + CUDA_CHECK(cudaMallocHost((void **) &ptr, size)); + return ptr; +} + +void ggml_cuda_host_free(void * ptr) { + CUDA_CHECK(cudaFreeHost(ptr)); +} + +cudaError_t ggml_cuda_cpy_tensor2D(void * dst, const struct ggml_tensor * src, uint64_t i3, uint64_t i2, cudaStream_t stream) { + const uint64_t ne0 = src->ne[0]; + const uint64_t ne1 = src->ne[1]; + const uint64_t nb0 = src->nb[0]; + const uint64_t nb1 = src->nb[1]; + const uint64_t nb2 = src->nb[2]; + const uint64_t nb3 = src->nb[3]; + const enum ggml_type type = src->type; + const size_t ts = ggml_type_size(type); + const size_t bs = ggml_blck_size(type); + + const void * x = (const void *) ((const char *) src->data + i2*nb2 + i3*nb3); + if (nb0 == ts && nb1 == ts*ne0/bs) { + return cudaMemcpyAsync(dst, x, ne1*nb1, cudaMemcpyHostToDevice, stream); + } else if (nb0 == ts) { + return cudaMemcpy2DAsync(dst, ts*ne0/bs, x, nb1, ts*ne0/bs, ne1, cudaMemcpyHostToDevice, stream); + } else { + for (uint64_t i1 = 0; i1 < ne1; i1++) { + const void * rx = (const void *) ((const char *) x + i1*nb1); + void * rd = (void *) ((char *) dst + i1*ts*ne0/bs); + // pretend the row is a matrix with cols=1 + cudaError_t r = cudaMemcpy2DAsync(rd, ts/bs, rx, nb0, ts/bs, ne0, cudaMemcpyHostToDevice, stream); + if (r != cudaSuccess) return r; + } + return cudaSuccess; + } +} diff --git a/ggml-cuda.h b/ggml-cuda.h index ed9b44184..151cbd239 100644 --- a/ggml-cuda.h +++ b/ggml-cuda.h @@ -1,5 +1,6 @@ #include #include +#include "ggml.h" #ifdef __cplusplus extern "C" { @@ -39,6 +40,12 @@ void dequantize_row_q5_0_cuda(const void * vx, float * y, int k, cudaStream_t st void dequantize_row_q5_1_cuda(const void * vx, float * y, int k, cudaStream_t stream); void dequantize_row_q8_0_cuda(const void * vx, float * y, int k, cudaStream_t stream); +void ggml_cuda_convert_fp16_to_fp32(const ggml_fp16_t * x, float * y, int n, cudaStream_t stream); +cudaError_t ggml_cuda_cpy_tensor2D(void * dst, const struct ggml_tensor * src, uint64_t i3, uint64_t i2, cudaStream_t stream); + +typedef void (*dequantize_row_q_cuda_t)(const void * x, float * y, int k, cudaStream_t stream); +dequantize_row_q_cuda_t ggml_get_dequantize_row_q_cuda(enum ggml_type type); + #ifdef __cplusplus } #endif diff --git a/ggml.c b/ggml.c index 1fbf2955d..c1bfe5c8b 100644 --- a/ggml.c +++ b/ggml.c @@ -8120,8 +8120,12 @@ static bool ggml_compute_forward_mul_mat_use_blas( const int64_t ne1 = dst->ne[1]; // TODO: find the optimal values for these - if (ggml_is_contiguous(src0) && - ggml_is_contiguous(src1) && ((ne0 >= 32 && ne1 >= 32 && ne10 >= 32))) { + if ( +#if !defined(GGML_USE_CUBLAS) + ggml_is_contiguous(src0) && + ggml_is_contiguous(src1) && +#endif + ((ne0 >= 32 && ne1 >= 32 && ne10 >= 32))) { /*printf("BLAS: %d %d %d %d %d\n", ne0, ne1, ne10, ne00, ne01);*/ return true; @@ -8230,15 +8234,12 @@ static void ggml_compute_forward_mul_mat_f32( for (int64_t i03 = 0; i03 < ne03; i03++) { for (int64_t i02 = 0; i02 < ne02; i02++) { - const float * x = (float *) ((char *) src0->data + i02*nb02 + i03*nb03); - const float * y = (float *) ((char *) src1->data + i02*nb12 + i03*nb13); - float * d = (float *) ((char *) dst->data + i02*nb2 + i03*nb3); #if defined(GGML_USE_CUBLAS) // copy data to device - CUDA_CHECK(cudaMemcpyAsync(d_X, x, sizeof(float) * x_ne, cudaMemcpyHostToDevice, g_cudaStream)); - CUDA_CHECK(cudaMemcpyAsync(d_Y, y, sizeof(float) * y_ne, cudaMemcpyHostToDevice, g_cudaStream)); + CUDA_CHECK(ggml_cuda_cpy_tensor2D(d_X, src0, i03, i02, g_cudaStream)); + CUDA_CHECK(ggml_cuda_cpy_tensor2D(d_Y, src1, i03, i02, g_cudaStream)); // compute CUBLAS_CHECK( @@ -8251,6 +8252,9 @@ static void ggml_compute_forward_mul_mat_f32( // copy data to host CUDA_CHECK(cudaMemcpyAsync(d, d_D, sizeof(float) * d_ne, cudaMemcpyDeviceToHost, g_cudaStream)); #else + const float * x = (float *) ((char *) src0->data + i02*nb02 + i03*nb03); + const float * y = (float *) ((char *) src1->data + i02*nb12 + i03*nb13); + // zT = y * xT cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans, ne11, ne01, ne10, @@ -8457,7 +8461,7 @@ static void ggml_compute_forward_mul_mat_f16_f32( float * d = (float *) ((char *) dst->data + i02*nb2 + i03*nb3); // copy data to device - CUDA_CHECK(cudaMemcpyAsync(d_X, x, sizeof(ggml_fp16_t) * x_ne, cudaMemcpyHostToDevice, g_cudaStream)); + CUDA_CHECK(ggml_cuda_cpy_tensor2D(d_X, src0, i03, i02, g_cudaStream)); CUDA_CHECK(cudaMemcpyAsync(d_Y, y, sizeof(ggml_fp16_t) * y_ne, cudaMemcpyHostToDevice, g_cudaStream)); // compute @@ -8705,19 +8709,16 @@ static void ggml_compute_forward_mul_mat_q_f32( for (int64_t i03 = 0; i03 < ne03; i03++) { for (int64_t i02 = 0; i02 < ne02; i02++) { - const float * y = (float *) ((char *) src1->data + i02*nb12 + i03*nb13); - float * d = (float *) ((char *) dst->data + i02*nb2 + i03*nb3); #if defined(GGML_USE_CUBLAS) // copy and dequantize on device - CUDA_CHECK( - cudaMemcpyAsync(d_Q, (char *) src0->data + i03*nb03 + i02*nb02, - GGML_TYPE_SIZE[type] * x_ne / GGML_BLCK_SIZE[type], cudaMemcpyHostToDevice, g_cudaStream)); + CUDA_CHECK(ggml_cuda_cpy_tensor2D(d_Q, src0, i03, i02, g_cudaStream)); dequantize_row_q_cuda(d_Q, d_X, ne01 * ne00, g_cudaStream); CUDA_CHECK(cudaGetLastError()); #else + const float * y = (float *) ((char *) src1->data + i02*nb12 + i03*nb13); { size_t id = 0; for (int64_t i01 = 0; i01 < ne01; ++i01) { @@ -8731,7 +8732,7 @@ static void ggml_compute_forward_mul_mat_q_f32( #if defined(GGML_USE_CUBLAS) // copy data to device - CUDA_CHECK(cudaMemcpyAsync(d_Y, y, sizeof(float) * y_ne, cudaMemcpyHostToDevice, g_cudaStream)); + CUDA_CHECK(ggml_cuda_cpy_tensor2D(d_Y, src1, i03, i02, g_cudaStream)); // compute CUBLAS_CHECK(