From 5e1c4089d82142da2ef2560925fca76de8b660f3 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Thu, 7 Sep 2023 14:11:21 +0300 Subject: [PATCH 1/5] metal : fix kernel_norm ggml-ci --- ggml-metal.m | 8 ++++++-- ggml-metal.metal | 39 ++++++++++++++++++--------------------- 2 files changed, 24 insertions(+), 23 deletions(-) diff --git a/ggml-metal.m b/ggml-metal.m index 521ca180f..5135e1cbb 100644 --- a/ggml-metal.m +++ b/ggml-metal.m @@ -995,8 +995,12 @@ void ggml_metal_graph_compute( else if (src0t == GGML_TYPE_Q6_K) { [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 1)/2, ne11, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; } else { - int64_t ny = (ne11 + 3)/4; - [encoder dispatchThreadgroups:MTLSizeMake(ne01, ny, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; + [encoder setThreadgroupMemoryLength:nth0*sizeof(float) atIndex:0]; + [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne11, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; + + // TODO: this breaks for Q4_0 - understand why and fix it + //int64_t ny = (ne11 + 3)/4; + //[encoder dispatchThreadgroups:MTLSizeMake(ne01, ny, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; } } } break; diff --git a/ggml-metal.metal b/ggml-metal.metal index 119fcbeb6..a107e7d97 100644 --- a/ggml-metal.metal +++ b/ggml-metal.metal @@ -111,7 +111,7 @@ kernel void kernel_soft_max( uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { - const int64_t i03 = tgpig[2]; + const int64_t i03 = tgpig[2]; const int64_t i02 = tgpig[1]; const int64_t i01 = tgpig[0]; @@ -220,27 +220,26 @@ kernel void kernel_norm( } threadgroup_barrier(mem_flags::mem_threadgroup); } - //// broadcast - //if (tpitg == 0) { - // sum[0] /= ne00; - //} - //threadgroup_barrier(mem_flags::mem_threadgroup); + // broadcast + if (tpitg == 0) { + sum[0] /= ne00; + } + threadgroup_barrier(mem_flags::mem_threadgroup); const float mean = sum[0]; - // recenter and VARIANCE + // recenter device float * y = dst + tgpig*ne00; - sum[tpitg] = 0.0f; for (int i00 = tpitg; i00 < ne00; i00 += ntg) { y[i00] = x[i00] - mean; + } + + // VARIANCE + // parallel sum + sum[tpitg] = 0.0f; + for (int i00 = tpitg; i00 < ne00; i00 += ntg) { sum[tpitg] += y[i00] * y[i00]; } - //// VARIANCE - //// parallel sum - //sum[tpitg] = 0.0f; - //for (int i00 = tpitg; i00 < ne00; i00 += ntg) { - // sum[tpitg] += y[i00] * y[i00]; - //} // reduce threadgroup_barrier(mem_flags::mem_threadgroup); for (uint i = ntg/2; i > 0; i /= 2) { @@ -249,11 +248,11 @@ kernel void kernel_norm( } threadgroup_barrier(mem_flags::mem_threadgroup); } - //// broadcast - //if (tpitg == 0) { - // sum[0] /= ne00; - //} - //threadgroup_barrier(mem_flags::mem_threadgroup); + // broadcast + if (tpitg == 0) { + sum[0] /= ne00; + } + threadgroup_barrier(mem_flags::mem_threadgroup); const float variance = sum[0]; const float scale = 1.0f/sqrt(variance + eps); @@ -262,7 +261,6 @@ kernel void kernel_norm( } } - kernel void kernel_rms_norm( device const void * src0, device float * dst, @@ -630,7 +628,6 @@ kernel void kernel_mul_mat_f16_f32( } } } - } kernel void kernel_alibi_f32( From ed92c3d4b20df7fc95d4cecb2208aa7302c61a89 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Thu, 7 Sep 2023 14:59:48 +0300 Subject: [PATCH 2/5] metal : put warning in kernel_norm to not combine the loops --- ggml-metal.metal | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/ggml-metal.metal b/ggml-metal.metal index a107e7d97..b8e67b2c5 100644 --- a/ggml-metal.metal +++ b/ggml-metal.metal @@ -235,6 +235,12 @@ kernel void kernel_norm( // VARIANCE // parallel sum + // + // WARNING: combining this loop with the one above will give you wrong results for nth == 256 + // I have no idea why, so for now I am keeping them separate. But this behavior is very concerning. + // Tested with: + // ./perplexity -m ./falcon-7b/ggml-model-q4_0.gguf -f wiki.test.raw -ngl 1 -t 4 + // sum[tpitg] = 0.0f; for (int i00 = tpitg; i00 < ne00; i00 += ntg) { sum[tpitg] += y[i00] * y[i00]; From 783379670a38cb5f748073d58e820d9352a5a0f3 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Thu, 7 Sep 2023 15:20:07 +0300 Subject: [PATCH 3/5] metal : restore original F16 mat-vec multiplication It works after the norm fixes --- ggml-metal.m | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/ggml-metal.m b/ggml-metal.m index 5135e1cbb..521ca180f 100644 --- a/ggml-metal.m +++ b/ggml-metal.m @@ -995,12 +995,8 @@ void ggml_metal_graph_compute( else if (src0t == GGML_TYPE_Q6_K) { [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 1)/2, ne11, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; } else { - [encoder setThreadgroupMemoryLength:nth0*sizeof(float) atIndex:0]; - [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne11, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; - - // TODO: this breaks for Q4_0 - understand why and fix it - //int64_t ny = (ne11 + 3)/4; - //[encoder dispatchThreadgroups:MTLSizeMake(ne01, ny, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; + int64_t ny = (ne11 + 3)/4; + [encoder dispatchThreadgroups:MTLSizeMake(ne01, ny, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; } } } break; From efac2d469fa257c9320ac8af77b34e3cc389cc20 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Thu, 7 Sep 2023 15:32:19 +0300 Subject: [PATCH 4/5] common : don't do warm-up with more than n_batch tokens (close #3058) ggml-ci --- common/common.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/common.cpp b/common/common.cpp index 22f65ac46..28b7c6300 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -773,7 +773,7 @@ std::tuple llama_init_from_gpt_par LOG("warming up the model with an empty run\n"); const std::vector tmp = { llama_token_bos(lctx), llama_token_eos(lctx), }; - llama_eval(lctx, tmp.data(), tmp.size(), 0, params.n_threads); + llama_eval(lctx, tmp.data(), std::min(tmp.size(), (size_t) params.n_batch), 0, params.n_threads); llama_reset_timings(lctx); } From 2f689dee06592eedab53fcb822463eac1b238b87 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Thu, 7 Sep 2023 15:33:21 +0300 Subject: [PATCH 5/5] metal : minor --- ggml-metal.metal | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ggml-metal.metal b/ggml-metal.metal index b8e67b2c5..d66ff340a 100644 --- a/ggml-metal.metal +++ b/ggml-metal.metal @@ -111,7 +111,7 @@ kernel void kernel_soft_max( uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { - const int64_t i03 = tgpig[2]; + const int64_t i03 = tgpig[2]; const int64_t i02 = tgpig[1]; const int64_t i01 = tgpig[0];