From 27a69d6a75f8f412365a2d35e277fbbdb3a95d12 Mon Sep 17 00:00:00 2001 From: Iwan Kawrakow Date: Sun, 11 Jun 2023 19:00:57 +0300 Subject: [PATCH] metal : q3_K finally working Not optimized at all. What was the issue? The scales are not 4-bytes aligned, and I was accessing them with a uint32_t pointer. When I tried that on CUDA, I got an error (illegal memory access) and added a memcpy to a local array of 3 uint32_t's. But on Metal it told me there is no memcpy, so I tried accessing directly. There is no error, just garbage results. At some point I did try accessing the scales with an uint16_t pointer (the scales are for sure 2-byte aligned), but was still getting garbage. I guess, there must have been another bug. No access to scales is via a uint16_t pointer and, after starting from scratch from the C dequantize function, it finally works. --- ggml-metal.metal | 151 ++++++++++++++++++++++++----------------------- 1 file changed, 78 insertions(+), 73 deletions(-) diff --git a/ggml-metal.metal b/ggml-metal.metal index 23c0803f9..e985f8a9e 100644 --- a/ggml-metal.metal +++ b/ggml-metal.metal @@ -727,39 +727,48 @@ static void dequantize_row_q3_k(device const block_q3_k * x, device float * y, i const uint32_t kmask1 = 0x03030303; const uint32_t kmask2 = 0x0f0f0f0f; - uint32_t aux[4]; - thread const int8_t * scales = (thread const int8_t *)aux; + //uint32_t aux[4]; + uint16_t aux[8]; + thread const int8_t * scales = (thread const int8_t*)aux; for (int i = 0; i < nb; i++) { - const float d_all = (float)x[i].d; + const float d_all = (float)(x[i].d); device const uint8_t * q = x[i].qs; - device const uint8_t * hm = x[i].hmask; + device const uint8_t * h = x[i].hmask; uint8_t m = 1; - device const uint32_t * a = (device const uint32_t *)x[i].scales; - uint32_t tmp = a[2]; - aux[2] = ((a[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4); - aux[3] = ((a[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4); - aux[0] = (a[0] & kmask2) | (((tmp >> 0) & kmask1) << 4); - aux[1] = (a[1] & kmask2) | (((tmp >> 2) & kmask1) << 4); + //device const uint32_t * a = (device const uint32_t *)x[i].scales; + //aux[0] = (a[0] & kmask2) | (((a[2] >> 0) & kmask1) << 4); + //aux[1] = (a[1] & kmask2) | (((a[2] >> 2) & kmask1) << 4); + //aux[2] = ((a[0] >> 4) & kmask2) | (((a[2] >> 4) & kmask1) << 4); + //aux[3] = ((a[1] >> 4) & kmask2) | (((a[2] >> 6) & kmask1) << 4); + + device const uint16_t * a = (device const uint16_t *)x[i].scales; + aux[0] = (a[0] & kmask2) | (((a[4] >> 0) & kmask1) << 4); + aux[1] = (a[1] & kmask2) | (((a[5] >> 0) & kmask1) << 4); + aux[2] = (a[2] & kmask2) | (((a[4] >> 2) & kmask1) << 4); + aux[3] = (a[3] & kmask2) | (((a[5] >> 2) & kmask1) << 4); + aux[4] = ((a[0] >> 4) & kmask2) | (((a[4] >> 4) & kmask1) << 4); + aux[5] = ((a[1] >> 4) & kmask2) | (((a[5] >> 4) & kmask1) << 4); + aux[6] = ((a[2] >> 4) & kmask2) | (((a[4] >> 6) & kmask1) << 4); + aux[7] = ((a[3] >> 4) & kmask2) | (((a[5] >> 6) & kmask1) << 4); int is = 0; float dl; for (int n = 0; n < QK_K; n += 128) { - int shift = 0; for (int j = 0; j < 4; ++j) { dl = d_all * (scales[is++] - 32); for (int l = 0; l < 16; ++l) { - *y++ = dl * ((int8_t)((q[l+ 0] >> shift) & 3) - ((hm[l+ 0] & m) ? 0 : 4)); + *y++ = dl * ((int8_t)((q[l+ 0] >> shift) & 3) - ((h[l+ 0] & m) ? 0 : 4)); } dl = d_all * (scales[is++] - 32); for (int l = 0; l < 16; ++l) { - *y++ = dl * ((int8_t)((q[l+16] >> shift) & 3) - ((hm[l+16] & m) ? 0 : 4)); + *y++ = dl * ((int8_t)((q[l+16] >> shift) & 3) - ((h[l+16] & m) ? 0 : 4)); } shift += 2; @@ -1052,8 +1061,10 @@ kernel void kernel_mul_mat_q3_k_f32( uint2 tpitg[[thread_position_in_threadgroup]], uint2 tptg[[threads_per_threadgroup]]) { - const uint32_t kmask1 = 0x03030303; - const uint32_t kmask2 = 0x0f0f0f0f; + //const uint32_t kmask1 = 0x03030303; + //const uint32_t kmask2 = 0x0f0f0f0f; + const uint16_t kmask1 = 0x0303; + const uint16_t kmask2 = 0x0f0f; const uint8_t m3 = 3; const int8_t m4 = 4; @@ -1069,80 +1080,74 @@ kernel void kernel_mul_mat_q3_k_f32( const int nth = tptg.x*tptg.y; const int ith = tptg.y*tpitg.x + tpitg.y; - const int tid = tpitg.y; - const int il = tid/4; // 0...3 0 -> 0...63, 1 -> 64...127, 2 -> 128...191, 3 -> 192...255 - const int ip = il / 2; // 0 or 1 0 -> use 1st 32 q's (0...127), 1 -> 2nd 32 (128...255) - const int is = il % 2; // 0 or 1 0 -> 0...63, 128...191, 1 -> 64...127, 192...255 - const int ir = tid - 4*il; // 0...3 - const int n = 4; - const int l0 = n * ir; // first index for this thread within a group of 32 (0, 4, 8, 12) - // 0...31 use 1<<0, 32...63 use 1<<1, 64...95 use 1<<2, 96...128 use 1<<3, etc. - // we process 64*il...64*il+63 -> 1st mask is 1<<(2*il), second is 1<<(2*il+1) - // masks for high bit - const uint8_t m = 1 << (2*il); - const uchar2 mask = {m, (uint8_t)(m << 1)}; + const int tid = tpitg.y; // expecting 16 + const int ip = tid/8; // 0 or 1 + const int il = tid/2 - 4*ip; // 0...3 + const int ir = tid%2; + const int n = 8; + const int l0 = n*ir; - const int shift1 = 4*ip; // 1st shift for scale. must be 0 (0...127) or 4 (128...255) - const int shift2 = 2*il; // 2nd shift for scale. 0, 2, 4, or 6 - // 1st shift for quants must be 0 in 0...31, 2 in 32...64, 4 in 64...96, 6 in 96...128, then agsin 0, 2, etc. - const int shift3 = 4*is; - const int shift4 = shift3 + 2; - - const int q_offset = 32*ip + l0; - const int y_offset = 64*il + l0; + uint16_t aux[8]; + thread const int8_t * scales = (thread const int8_t*)aux; float sumf = 0; for (int i = tpitg.x; i < nb; i += tptg.x) { - // Copied from the C de-quantization code - //aux[0] = ((a[0] >> 0) & kmask2) | (((a[2] >> 0) & kmask1) << 4); - //aux[1] = ((a[1] >> 0) & kmask2) | (((a[2] >> 2) & kmask1) << 4); + const float d_all = (float)(x[i].d); + + device const uint8_t * q = x[i].qs + 32*ip + l0; + device const uint8_t * h = x[i].hmask + l0; + device const float * y = yy + i * QK_K + 128*ip + 32*il + l0; + + //device const uint32_t * a = (device const uint32_t *)x[i].scales; + //aux[0] = (a[0] & kmask2) | (((a[2] >> 0) & kmask1) << 4); + //aux[1] = (a[1] & kmask2) | (((a[2] >> 2) & kmask1) << 4); //aux[2] = ((a[0] >> 4) & kmask2) | (((a[2] >> 4) & kmask1) << 4); //aux[3] = ((a[1] >> 4) & kmask2) | (((a[2] >> 6) & kmask1) << 4); - //// 0....63 we need a[0] with shift=0, a[2] with shift 0 - //// 64...127 we need a[1] with shift=0, a[2] with shift 2 - ////128...191 we need a[0] with shift=4, a[2] with shift 4 - ////192...255 we need a[1] with shift=4, a[2] with shift 6 - //// a[is] >> (4*ip) & 0xF | a[2] >> (2*il) & 3 - device const uint32_t * a = (device const uint32_t *)x[i].scales; - const char4 sc = as_type(((a[is] >> shift1) & kmask2) | (((a[2] >> shift2) & kmask1) << 4)); + device const uint16_t * a = (device const uint16_t *)x[i].scales; + aux[0] = (a[0] & kmask2) | (((a[4] >> 0) & kmask1) << 4); + aux[1] = (a[1] & kmask2) | (((a[5] >> 0) & kmask1) << 4); + aux[2] = (a[2] & kmask2) | (((a[4] >> 2) & kmask1) << 4); + aux[3] = (a[3] & kmask2) | (((a[5] >> 2) & kmask1) << 4); + aux[4] = ((a[0] >> 4) & kmask2) | (((a[4] >> 4) & kmask1) << 4); + aux[5] = ((a[1] >> 4) & kmask2) | (((a[5] >> 4) & kmask1) << 4); + aux[6] = ((a[2] >> 4) & kmask2) | (((a[4] >> 6) & kmask1) << 4); + aux[7] = ((a[3] >> 4) & kmask2) | (((a[5] >> 6) & kmask1) << 4); - // Here I was thinking "what if the above is not processed correctly because x[i].scales is not 4-byte - // aligned?". If that was the issue, using a uint16_t pointer should solve it as x[i].scales is 2-byte aligned. - // It does not solve the problem, it just makes it run slower. - //device const uint16_t * a = (device const uint16_t *)x[i].scales; - //const char2 sc1 = as_type((uint16_t)(((a[2*is+0] >> shift1) & kmask2) | (((a[4] >> shift2) & kmask1) << 4))); - //const char2 sc2 = as_type((uint16_t)(((a[2*is+1] >> shift1) & kmask2) | (((a[5] >> shift2) & kmask1) << 4))); + uint8_t m = 1 << (4*ip + il); + int is = 8*ip + 2*il; + float dl; + //for (int n = 0; n < QK_K; n += 128) { + int shift = 2*il; + //for (int j = 0; j < 4; ++j) { - device const uint8_t * q = x[i].qs + q_offset; - device const uint8_t * h = x[i].hmask + l0; + dl = d_all * (scales[is++] - 32); + for (int l = 0; l < n; ++l) { + sumf += y[l+ 0] * dl * ((int8_t)((q[l+ 0] >> shift) & 3) - ((h[l+ 0] & m) ? 0 : 4)); + } - device const float * y = yy + i * QK_K + y_offset; - - const float dall = (float)x[i].d; - - float4 sums = {0.f, 0.f, 0.f, 0.f}; - for (int l = 0; l < n; ++l) { - sums[0] += y[l+ 0] * ((int8_t)((q[l+ 0] >> shift3) & m3) - ((h[l+ 0] & mask[0]) ? 0 : m4)); - sums[1] += y[l+16] * ((int8_t)((q[l+16] >> shift3) & m3) - ((h[l+16] & mask[0]) ? 0 : m4)); - sums[2] += y[l+32] * ((int8_t)((q[l+ 0] >> shift4) & m3) - ((h[l+ 0] & mask[1]) ? 0 : m4)); - sums[3] += y[l+48] * ((int8_t)((q[l+16] >> shift4) & m3) - ((h[l+16] & mask[1]) ? 0 : m4)); - } - - sumf += dall * (sums[0] * (sc[0] - 32) - + sums[1] * (sc[1] - 32) - + sums[2] * (sc[2] - 32) - + sums[3] * (sc[3] - 32)); - //sumf += dall * (sums[0] * (sc1[0] - 32) - // + sums[1] * (sc1[1] - 32) - // + sums[2] * (sc2[0] - 32) - // + sums[3] * (sc2[1] - 32)); + dl = d_all * (scales[is++] - 32); + for (int l = 0; l < n; ++l) { + sumf += y[l+16] * dl * ((int8_t)((q[l+16] >> shift) & 3) - ((h[l+16] & m) ? 0 : 4)); + } + y += 32; + shift += 2; + m <<= 1; + //} + //q += 32; + //} } sum[ith] = sumf; + //threadgroup_barrier(mem_flags::mem_threadgroup); + //if (ith == 0) { + // for (int i = 1; i < nth; ++i) sum[0] += sum[i]; + // dst[r1*ne0 + r0] = sum[0]; + //} + // // Accumulate the sum from all threads in the threadgroup //