From 64bb149d53e9023ddc5cad1e9ce488ce11e2ab6d Mon Sep 17 00:00:00 2001 From: Eve <139727413+netrunnereve@users.noreply.github.com> Date: Sun, 29 Dec 2024 17:05:19 -0500 Subject: [PATCH] data b cache example, slower than original --- .../vulkan-shaders/mul_mat_vec_q6_k.comp | 25 +++++++++++-------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q6_k.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q6_k.comp index 25919ce85..4f60aa8be 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q6_k.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q6_k.comp @@ -12,6 +12,7 @@ layout (constant_id = 1) const uint NUM_ROWS = 1; shared FLOAT_TYPE tmpsh[NUM_ROWS][BLOCK_SIZE]; shared FLOAT_TYPE sccache[BLOCK_SIZE/16][16]; shared block_q6_K_packed16 blkcache[BLOCK_SIZE/16]; +shared B_TYPE bycache[BLOCK_SIZE/16][QUANT_K]; void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { uint a_offset, b_offset, d_offset; @@ -34,7 +35,6 @@ void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { const uint ql_offset = 64*v_im + l0; const uint qh_offset = 32*v_im + l0; const uint s_offset = 8*v_im + is; - const uint y_offset = 128*v_im + l0; const uint bcs_offset = (itid%2 == 1) ? 8 : 0; FLOAT_TYPE temp[NUM_ROWS]; @@ -45,12 +45,16 @@ void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { [[unroll]] for (uint i0 = 0; i0 < num_blocks_per_row; i0 += it_size) { uint i = i0 + ix; // 16 thread group specific counter - const uint y_idx = i * QUANT_K + y_offset; + const uint y_idx = i0 * QUANT_K; + const int blim = min(int(num_blocks_per_row) - int(i0), 4); - B_TYPE_VEC4 by0 = data_b_v4[(b_offset + y_idx) / 4]; - B_TYPE_VEC4 by32 = data_b_v4[(b_offset + y_idx) / 4 + 8]; - B_TYPE_VEC4 by64 = data_b_v4[(b_offset + y_idx) / 4 + 16]; - B_TYPE_VEC4 by96 = data_b_v4[(b_offset + y_idx) / 4 + 24]; + // assume 64 threads + [[unroll]] for (int n = 0; n < blim; ++n) { + [[unroll]] for (int l = 0; l < 4; ++l) { + bycache[n][tid + 64*l] = data_b[b_offset + y_idx + QUANT_K*n + tid + 64*l]; + } + } + barrier(); uint ibi = first_row*num_blocks_per_row; [[unroll]] for (uint n = 0; n < num_rows; ++n) { @@ -59,7 +63,6 @@ void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { // cache full superblock into shared memory with coalesced reads // we assume 64 threads here! - const int blim = min(int(num_blocks_per_row) - int(i0), 4); // this is required as this loop is super sensitive to unrolling with hardcoded 4 if (blim == 4) { if (tid < 52) { @@ -109,10 +112,10 @@ void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { FLOAT_TYPE sum[4] = {0, 0, 0, 0}; [[unroll]] for (uint l = 0; l < 4; ++l) { - sum[0] = fma(FLOAT_TYPE(by0[l]), FLOAT_TYPE(int8_t(q0[l]) - 32), sum[0]); - sum[1] = fma(FLOAT_TYPE(by32[l]), FLOAT_TYPE(int8_t(q1[l]) - 32), sum[1]); - sum[2] = fma(FLOAT_TYPE(by64[l]), FLOAT_TYPE(int8_t(q2[l]) - 32), sum[2]); - sum[3] = fma(FLOAT_TYPE(by96[l]), FLOAT_TYPE(int8_t(q3[l]) - 32), sum[3]); + sum[0] = fma(FLOAT_TYPE(bycache[ix][128*v_im + l0 + l]), FLOAT_TYPE(int8_t(q0[l]) - 32), sum[0]); + sum[1] = fma(FLOAT_TYPE(bycache[ix][128*v_im + l0 + 32 + l]), FLOAT_TYPE(int8_t(q1[l]) - 32), sum[1]); + sum[2] = fma(FLOAT_TYPE(bycache[ix][128*v_im + l0 + 64 + l]), FLOAT_TYPE(int8_t(q2[l]) - 32), sum[2]); + sum[3] = fma(FLOAT_TYPE(bycache[ix][128*v_im + l0 + 96 + l]), FLOAT_TYPE(int8_t(q3[l]) - 32), sum[3]); } [[unroll]] for (uint l = 0; l < 4; ++l)