From f5b6ed315e24242589a6be8380ffaaae35c449a7 Mon Sep 17 00:00:00 2001 From: Iwan Kawrakow Date: Fri, 9 Jun 2023 08:35:49 +0300 Subject: [PATCH] metal : Q3_K support Something is not quite right yet. --- ggml-metal.m | 25 ++++-- ggml-metal.metal | 219 +++++++++++++++++++++++++++++++++++++++++++---- llama.cpp | 10 +-- 3 files changed, 224 insertions(+), 30 deletions(-) diff --git a/ggml-metal.m b/ggml-metal.m index 16a362fd7..92142e880 100644 --- a/ggml-metal.m +++ b/ggml-metal.m @@ -52,6 +52,7 @@ struct ggml_metal_context { GGML_METAL_DECL_KERNEL(get_rows_q4_0); GGML_METAL_DECL_KERNEL(get_rows_q4_1); GGML_METAL_DECL_KERNEL(get_rows_q2_k); + GGML_METAL_DECL_KERNEL(get_rows_q3_k); GGML_METAL_DECL_KERNEL(get_rows_q4_k); GGML_METAL_DECL_KERNEL(get_rows_q6_k); GGML_METAL_DECL_KERNEL(rms_norm); @@ -59,6 +60,7 @@ struct ggml_metal_context { GGML_METAL_DECL_KERNEL(mul_mat_q4_0_f32); GGML_METAL_DECL_KERNEL(mul_mat_q4_1_f32); GGML_METAL_DECL_KERNEL(mul_mat_q2_k_f32); + GGML_METAL_DECL_KERNEL(mul_mat_q3_k_f32); GGML_METAL_DECL_KERNEL(mul_mat_q4_k_f32); GGML_METAL_DECL_KERNEL(mul_mat_q6_k_f32); GGML_METAL_DECL_KERNEL(rope); @@ -152,6 +154,7 @@ struct ggml_metal_context * ggml_metal_init(void) { GGML_METAL_ADD_KERNEL(get_rows_q4_0); GGML_METAL_ADD_KERNEL(get_rows_q4_1); GGML_METAL_ADD_KERNEL(get_rows_q2_k); + GGML_METAL_ADD_KERNEL(get_rows_q3_k); GGML_METAL_ADD_KERNEL(get_rows_q4_k); GGML_METAL_ADD_KERNEL(get_rows_q6_k); GGML_METAL_ADD_KERNEL(rms_norm); @@ -159,6 +162,7 @@ struct ggml_metal_context * ggml_metal_init(void) { GGML_METAL_ADD_KERNEL(mul_mat_q4_0_f32); GGML_METAL_ADD_KERNEL(mul_mat_q4_1_f32); GGML_METAL_ADD_KERNEL(mul_mat_q2_k_f32); + GGML_METAL_ADD_KERNEL(mul_mat_q3_k_f32); GGML_METAL_ADD_KERNEL(mul_mat_q4_k_f32); GGML_METAL_ADD_KERNEL(mul_mat_q6_k_f32); GGML_METAL_ADD_KERNEL(rope); @@ -574,6 +578,15 @@ void ggml_metal_graph_compute( nth1 = 16; [encoder setComputePipelineState:ctx->pipeline_mul_mat_q2_k_f32]; } break; + case GGML_TYPE_Q3_K: + { + GGML_ASSERT(ne02 == 1); + GGML_ASSERT(ne12 == 1); + + nth0 = 4; + nth1 = 16; + [encoder setComputePipelineState:ctx->pipeline_mul_mat_q3_k_f32]; + } break; case GGML_TYPE_Q4_K: { GGML_ASSERT(ne02 == 1); @@ -619,15 +632,12 @@ void ggml_metal_graph_compute( if (src0t == GGML_TYPE_Q4_0 || src0t == GGML_TYPE_Q4_1) { [encoder setThreadgroupMemoryLength:nth0*nth1*sizeof(float) atIndex:0]; [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne11, 1) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; - } else if (src0t == GGML_TYPE_Q2_K) { + } else if (src0t == GGML_TYPE_Q2_K || + src0t == GGML_TYPE_Q3_K || + src0t == GGML_TYPE_Q4_K || + src0t == GGML_TYPE_Q6_K) { [encoder setThreadgroupMemoryLength:nth0*nth1*sizeof(float) atIndex:0]; [encoder dispatchThreadgroups:MTLSizeMake(ne01, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; - } else if (src0t == GGML_TYPE_Q4_K) { - [encoder setThreadgroupMemoryLength:nth0*nth1*sizeof(float) atIndex:0]; - [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne11, 1) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; - } else if (src0t == GGML_TYPE_Q6_K) { - [encoder setThreadgroupMemoryLength:nth0*nth1*sizeof(float) atIndex:0]; - [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne11, 1) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; } else { [encoder setThreadgroupMemoryLength:nth0*sizeof(float) atIndex:0]; [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne11, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; @@ -645,6 +655,7 @@ void ggml_metal_graph_compute( case GGML_TYPE_Q4_0: [encoder setComputePipelineState:ctx->pipeline_get_rows_q4_0]; break; case GGML_TYPE_Q4_1: [encoder setComputePipelineState:ctx->pipeline_get_rows_q4_1]; break; case GGML_TYPE_Q2_K: [encoder setComputePipelineState:ctx->pipeline_get_rows_q2_k]; break; + case GGML_TYPE_Q3_K: [encoder setComputePipelineState:ctx->pipeline_get_rows_q3_k]; break; case GGML_TYPE_Q4_K: [encoder setComputePipelineState:ctx->pipeline_get_rows_q4_k]; break; case GGML_TYPE_Q6_K: [encoder setComputePipelineState:ctx->pipeline_get_rows_q6_k]; break; default: GGML_ASSERT(false && "not implemented"); diff --git a/ggml-metal.metal b/ggml-metal.metal index fd990a4f5..821de5767 100644 --- a/ggml-metal.metal +++ b/ggml-metal.metal @@ -318,8 +318,8 @@ kernel void kernel_mul_mat_q4_0_f32( device const block_q4_0 * x = (device const block_q4_0 *) src0 + r0*nb; device const float * y = (device const float *) src1 + r1*ne10; - const uint nth = tptg.x*tptg.y; - const uint ith = tptg.y*tpitg.x + tpitg.y; + const int nth = tptg.x*tptg.y; + const int ith = tptg.y*tpitg.x + tpitg.y; const int ix = tpitg.y/4; // 0 or 1 const int iy = tpitg.y - 4*ix; // 0...3 @@ -635,6 +635,13 @@ typedef struct { half dmin; // super-block scale for quantized mins } block_q2_k; +typedef struct { + uint8_t hmask[QK_K/8]; // quants - high bit + uint8_t qs[QK_K/4]; // quants - low 2 bits + uint8_t scales[3*QK_K/64]; // scales, quantized with 6 bits + half d; // super-block scale +} block_q3_k; + typedef struct { half d; // super-block scale for quantized scales half dmin; // super-block scale for quantized mins @@ -698,6 +705,64 @@ static void dequantize_row_q2_k(device const block_q2_k * x, device float * y, i } } +static void dequantize_row_q3_k(device const block_q3_k * x, device float * y, int k) { + assert(k % QK_K == 0); + const int nb = k / QK_K; + + const uint32_t kmask1 = 0x03030303; + const uint32_t kmask2 = 0x0f0f0f0f; + + uint32_t aux[4]; + + for (int i = 0; i < nb; i++) { + + const float d_all = (float)x[i].d; + + device const uint8_t * q = x[i].qs; + device const uint8_t * hm = x[i].hmask; + uint8_t m = 1; + + device const uint32_t * a = (device const uint32_t *)x[i].scales; + uint32_t tmp = a[2]; + aux[2] = ((a[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4); + aux[3] = ((a[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4); + aux[0] = (a[0] & kmask2) | (((tmp >> 0) & kmask1) << 4); + aux[1] = (a[1] & kmask2) | (((tmp >> 2) & kmask1) << 4); + + char4 scales; + + int ia = 0; + int is = 4; + float dl; + for (int n = 0; n < QK_K; n += 128) { + int shift = 0; + for (int j = 0; j < 4; ++j) { + + if (is == 4) { + scales = as_type(aux[ia++]); + is = 0; + } + + dl = d_all * (scales[is++] - 32); + for (int l = 0; l < 16; ++l) { + *y++ = dl * ((int8_t)((q[l+ 0] >> shift) & 3) - ((hm[l+ 0] & m) ? 0 : 4)); + } + + dl = d_all * (scales[is++] - 32); + for (int l = 0; l < 16; ++l) { + *y++ = dl * ((int8_t)((q[l+16] >> shift) & 3) - ((hm[l+16] & m) ? 0 : 4)); + } + + shift += 2; + m <<= 1; + } + q += 32; + } + + } + +} + static void dequantize_row_q4_k(device const block_q4_k * x, device float * y, int k) { assert(k % QK_K == 0); const int nb = k / QK_K; @@ -771,6 +836,22 @@ kernel void kernel_get_rows_q2_k( (device float *) ((device char *) dst + i*nb1), ne00); } +kernel void kernel_get_rows_q3_k( + device const void * src0, + device const int * src1, + device float * dst, + constant int64_t & ne00, + constant uint64_t & nb01, + constant uint64_t & nb1, + uint tpig[[thread_position_in_grid]]) { + const int i = tpig; + const int r = ((device int32_t *) src1)[i]; + + dequantize_row_q3_k( + (device const block_q3_k *) ((device char *) src0 + r*nb01), + (device float *) ((device char *) dst + i*nb1), ne00); +} + kernel void kernel_get_rows_q4_k( device const void * src0, device const int * src1, @@ -903,19 +984,123 @@ kernel void kernel_mul_mat_q2_k_f32( for (int i = 16; i < nth; i += 16) sum[0] += sum[i]; dst[r1*ne0 + r0] = sum[0]; } +} - //// accumulate the sum from all threads in the threadgroup - //threadgroup_barrier(mem_flags::mem_threadgroup); - //for (uint i = nth/2; i > 0; i /= 2) { - // if (ith < i) { - // sum[ith] += sum[ith + i]; - // } - // threadgroup_barrier(mem_flags::mem_threadgroup); - //} +kernel void kernel_mul_mat_q3_k_f32( + device const void * src0, + device const float * src1, + device float * dst, + constant int64_t & ne00, + constant int64_t & ne01, + constant uint64_t & nb00, + constant uint64_t & nb01, + constant uint64_t & nb02, + constant int64_t & ne10, + constant int64_t & ne11, + constant uint64_t & nb10, + constant uint64_t & nb11, + constant uint64_t & nb12, + constant int64_t & ne0, + constant int64_t & ne1, + threadgroup float * sum [[threadgroup(0)]], + uint2 tgpig[[threadgroup_position_in_grid]], + uint2 tpig[[thread_position_in_grid]], // we don't use this for now + uint2 tpitg[[thread_position_in_threadgroup]], + uint2 tptg[[threads_per_threadgroup]]) { + + const int nb = ne00/QK_K; + + const int64_t r0 = tgpig.x; + const int64_t r1 = tgpig.y; + + device const block_q3_k * x = (device const block_q3_k *) src0 + r0*nb; + device const float * yy = (device const float *) src1 + r1*ne10; + + const int nth = tptg.x*tptg.y; + const int ith = tptg.y*tpitg.x + tpitg.y; + + const int step = QK_K / tptg.y; // we expect this to be 16 + const int iqs = step * tpitg.y; // 0...240 in steps of 16 + const int ip = iqs / 128; // 0 or 1 + const int il = (iqs - 128*ip)/16; // 0...7 + const int n = 4; + const int l0 = n * il; + const int is = l0/16; + const uint8_t m = 1 << (4*ip); + //const int shift1 = 4*ip; + //const int shift2 = 4*ip + 2; + + int8_t sc[4]; + + float sumf = 0; + for (int i = tpitg.x; i < nb; i += tptg.x) { + + device const uint8_t * q = x[i].qs + 32*ip + l0; + device const uint8_t * hm = x[i].hmask + l0; + device const uint8_t * scales = x[i].scales + is; + + device const float * y = yy + i * QK_K + 128*ip + l0; + + const float dall = x[i].d; + + //sc[0] = ((scales[ 8] >> shift1) & 3) << 4; + //sc[1] = ((scales[10] >> shift1) & 3) << 4; + //sc[2] = ((scales[ 8] >> shift2) & 3) << 4; + //sc[3] = ((scales[10] >> shift2) & 3) << 4; + //if (ip == 0) { + // sc[0] |= (scales[0] & 0xF); + // sc[1] |= (scales[2] & 0xF); + // sc[2] |= (scales[4] & 0xF); + // sc[3] |= (scales[6] & 0xF); + //} else { + // sc[0] |= (scales[0] >> 4); + // sc[1] |= (scales[2] >> 4); + // sc[2] |= (scales[4] >> 4); + // sc[3] |= (scales[6] >> 4); + //} + if (ip == 0) { + sc[0] = (scales[0] & 0xF) | (((scales[ 8] >> 0) & 3) << 4); + sc[1] = (scales[2] & 0xF) | (((scales[10] >> 0) & 3) << 4); + sc[2] = (scales[4] & 0xF) | (((scales[ 8] >> 2) & 3) << 4); + sc[3] = (scales[6] & 0xF) | (((scales[10] >> 2) & 3) << 4); + } else { + sc[0] = (scales[0] >> 4) | (((scales[ 8] >> 4) & 3) << 4); + sc[1] = (scales[2] >> 4) | (((scales[10] >> 4) & 3) << 4); + sc[2] = (scales[4] >> 4) | (((scales[ 8] >> 6) & 3) << 4); + sc[3] = (scales[6] >> 4) | (((scales[10] >> 6) & 3) << 4); + } + + float4 sums = {0.f, 0.f, 0.f, 0.f}; + for (int l = 0; l < n; ++l) { + sums[0] += y[l+ 0] * ((int8_t)((q[l] >> 0) & 3) - (hm[l] & (m << 0) ? 0 : 4)); + sums[1] += y[l+32] * ((int8_t)((q[l] >> 2) & 3) - (hm[l] & (m << 1) ? 0 : 4)); + sums[2] += y[l+64] * ((int8_t)((q[l] >> 4) & 3) - (hm[l] & (m << 2) ? 0 : 4)); + sums[3] += y[l+96] * ((int8_t)((q[l] >> 6) & 3) - (hm[l] & (m << 3) ? 0 : 4)); + } + + sumf += dall * (sums[0] * (sc[0] - 32) + sums[1] * (sc[1] - 32) + sums[2] * (sc[2] - 32) + sums[3] * (sc[3] - 32)); + + } + + sum[ith] = sumf; + + // + // Accumulate the sum from all threads in the threadgroup + // + threadgroup_barrier(mem_flags::mem_threadgroup); + if (ith%4 == 0) { + for (int i = 1; i < 4; ++i) sum[ith] += sum[ith + i]; + } + threadgroup_barrier(mem_flags::mem_threadgroup); + if (ith%16 == 0) { + for (int i = 4; i < 16; i += 4) sum[ith] += sum[ith + i]; + } + threadgroup_barrier(mem_flags::mem_threadgroup); + if (ith == 0) { + for (int i = 16; i < nth; i += 16) sum[0] += sum[i]; + dst[r1*ne0 + r0] = sum[0]; + } - //if (ith == 0) { - // dst[r1*ne0 + r0] = sum[0]; - //} } kernel void kernel_mul_mat_q4_k_f32( @@ -942,8 +1127,8 @@ kernel void kernel_mul_mat_q4_k_f32( device const block_q4_k * x = (device const block_q4_k *) src0 + r0*nb; device const float * yy = (device const float *) src1 + r1*ne10; - const uint nth = tptg.x*tptg.y; - const uint ith = tptg.y*tpitg.x + tpitg.y; + const int nth = tptg.x*tptg.y; + const int ith = tptg.y*tpitg.x + tpitg.y; const int tid = tpitg.y; // 0...16 const int il = tid/4; // 0...3 @@ -1051,8 +1236,8 @@ kernel void kernel_mul_mat_q6_k_f32( device const block_q6_k * x = (device const block_q6_k *) src0 + r0*nb; device const float * yy = (device const float *) src1 + r1*ne10; - const uint nth = tptg.x*tptg.y; - const uint ith = tptg.y*tpitg.x + tpitg.y; + const int nth = tptg.x*tptg.y; + const int ith = tptg.y*tpitg.x + tpitg.y; // Note: we absolutely assume that tptg.y = 16 and QK_K = 256! const int iqs = 16 * tpitg.y; diff --git a/llama.cpp b/llama.cpp index e100e2bc9..32bee5b70 100644 --- a/llama.cpp +++ b/llama.cpp @@ -2390,12 +2390,10 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s printf("size = %8.3f MB\n", tensor.size/1024.0/1024.0); } else { new_type = quantized_type; - // TODO: temporary disabled until Metal / OpenCL support is available - // ref: https://github.com/ggerganov/llama.cpp/issues/1711 - //if (tensor.name == "output.weight") { - // new_type = GGML_TYPE_Q6_K; - //} - if (tensor.name.find("attention.wv.weight") != std::string::npos) { + if (tensor.name == "output.weight") { + new_type = GGML_TYPE_Q6_K; + } + else if (tensor.name.find("attention.wv.weight") != std::string::npos) { if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q4_K; else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K; else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) &&