diff --git a/ggml-cuda.cu b/ggml-cuda.cu index fbf12044d..5714320b8 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -340,7 +340,7 @@ void ggml_init_cublas(void) { } } -cudaError_t ggml_cuda_cpy_tensor2D(void * dst, const struct ggml_tensor * src, uint64_t i3, uint64_t i2, cudaStream_t stream) { +cudaError_t ggml_cuda_h2d_tensor_2d(void * dst, const struct ggml_tensor * src, uint64_t i3, uint64_t i2, cudaStream_t stream) { const uint64_t ne0 = src->ne[0]; const uint64_t ne1 = src->ne[1]; const uint64_t nb0 = src->nb[0]; diff --git a/ggml-cuda.h b/ggml-cuda.h index 2db34609f..06e2a2886 100644 --- a/ggml-cuda.h +++ b/ggml-cuda.h @@ -40,7 +40,7 @@ void dequantize_row_q5_0_cuda(const void * vx, float * y, int k, cudaStream_t st void dequantize_row_q5_1_cuda(const void * vx, float * y, int k, cudaStream_t stream); void dequantize_row_q8_0_cuda(const void * vx, float * y, int k, cudaStream_t stream); -cudaError_t ggml_cuda_cpy_tensor2D(void * dst, const struct ggml_tensor * src, uint64_t i3, uint64_t i2, cudaStream_t stream); +cudaError_t ggml_cuda_h2d_tensor_2d(void * dst, const struct ggml_tensor * src, uint64_t i3, uint64_t i2, cudaStream_t stream); #ifdef __cplusplus } diff --git a/ggml.c b/ggml.c index c1bfe5c8b..00932a7fe 100644 --- a/ggml.c +++ b/ggml.c @@ -8238,8 +8238,8 @@ static void ggml_compute_forward_mul_mat_f32( #if defined(GGML_USE_CUBLAS) // copy data to device - CUDA_CHECK(ggml_cuda_cpy_tensor2D(d_X, src0, i03, i02, g_cudaStream)); - CUDA_CHECK(ggml_cuda_cpy_tensor2D(d_Y, src1, i03, i02, g_cudaStream)); + CUDA_CHECK(ggml_cuda_h2d_tensor_2d(d_X, src0, i03, i02, g_cudaStream)); + CUDA_CHECK(ggml_cuda_h2d_tensor_2d(d_Y, src1, i03, i02, g_cudaStream)); // compute CUBLAS_CHECK( @@ -8461,7 +8461,7 @@ static void ggml_compute_forward_mul_mat_f16_f32( float * d = (float *) ((char *) dst->data + i02*nb2 + i03*nb3); // copy data to device - CUDA_CHECK(ggml_cuda_cpy_tensor2D(d_X, src0, i03, i02, g_cudaStream)); + CUDA_CHECK(ggml_cuda_h2d_tensor_2d(d_X, src0, i03, i02, g_cudaStream)); CUDA_CHECK(cudaMemcpyAsync(d_Y, y, sizeof(ggml_fp16_t) * y_ne, cudaMemcpyHostToDevice, g_cudaStream)); // compute @@ -8713,7 +8713,7 @@ static void ggml_compute_forward_mul_mat_q_f32( #if defined(GGML_USE_CUBLAS) // copy and dequantize on device - CUDA_CHECK(ggml_cuda_cpy_tensor2D(d_Q, src0, i03, i02, g_cudaStream)); + CUDA_CHECK(ggml_cuda_h2d_tensor_2d(d_Q, src0, i03, i02, g_cudaStream)); dequantize_row_q_cuda(d_Q, d_X, ne01 * ne00, g_cudaStream); CUDA_CHECK(cudaGetLastError()); @@ -8732,7 +8732,7 @@ static void ggml_compute_forward_mul_mat_q_f32( #if defined(GGML_USE_CUBLAS) // copy data to device - CUDA_CHECK(ggml_cuda_cpy_tensor2D(d_Y, src1, i03, i02, g_cudaStream)); + CUDA_CHECK(ggml_cuda_h2d_tensor_2d(d_Y, src1, i03, i02, g_cudaStream)); // compute CUBLAS_CHECK(