From 059e99066d95d73d1ca26c3375d47c0e35596229 Mon Sep 17 00:00:00 2001 From: Aisuko Date: Sun, 11 Jun 2023 00:08:11 +1000 Subject: [PATCH 01/11] doc : fix wrong address of BLIS.md (#1772) Signed-off-by: Aisuko --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 0c87af6ee..cc3bd5394 100644 --- a/README.md +++ b/README.md @@ -308,7 +308,7 @@ Building the program with BLAS support may lead to some performance improvements - #### BLIS - Check [BLIS.md](BLIS.md) for more information. + Check [BLIS.md](docs/BLIS.md) for more information. - #### Intel MKL From 303f5809f1b4ec49823dbe70cacd2124ec1d0df0 Mon Sep 17 00:00:00 2001 From: Andrei Date: Sat, 10 Jun 2023 10:47:34 -0400 Subject: [PATCH 02/11] metal : fix issue with ggml-metal.metal path. Closes #1769 (#1782) * Fix issue with ggml-metal.metal path * Add ggml-metal.metal as a resource for llama target * Update flake.nix metal kernel substitution --- CMakeLists.txt | 6 ++++++ flake.nix | 2 +- ggml-metal.m | 9 ++++++++- 3 files changed, 15 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 41f5bb737..84e2a88cb 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -218,6 +218,9 @@ if (LLAMA_METAL) # copy ggml-metal.metal to bin directory configure_file(ggml-metal.metal bin/ggml-metal.metal COPYONLY) + if (LLAMA_METAL) + set_target_properties(llama PROPERTIES RESOURCE "${CMAKE_CURRENT_SOURCE_DIR}/ggml-metal.metal") + endif() set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} ${FOUNDATION_LIBRARY} @@ -432,6 +435,9 @@ target_link_libraries(llama PRIVATE if (BUILD_SHARED_LIBS) set_target_properties(llama PROPERTIES POSITION_INDEPENDENT_CODE ON) target_compile_definitions(llama PRIVATE LLAMA_SHARED LLAMA_BUILD) + if (LLAMA_METAL) + set_target_properties(llama PROPERTIES RESOURCE "${CMAKE_CURRENT_SOURCE_DIR}/ggml-metal.metal") + endif() endif() if (GGML_SOURCES_CUDA) diff --git a/flake.nix b/flake.nix index 619100449..f3180c841 100644 --- a/flake.nix +++ b/flake.nix @@ -28,7 +28,7 @@ postPatch = if isM1 then '' substituteInPlace ./ggml-metal.m \ - --replace '[[NSBundle mainBundle] pathForResource:@"ggml-metal" ofType:@"metal"];' "@\"$out/ggml-metal.metal\";" + --replace '[bundle pathForResource:@"ggml-metal" ofType:@"metal"];' "@\"$out/ggml-metal.metal\";" '' else ""; nativeBuildInputs = with pkgs; [ cmake ]; buildInputs = osSpecific; diff --git a/ggml-metal.m b/ggml-metal.m index 167ebd467..16a362fd7 100644 --- a/ggml-metal.m +++ b/ggml-metal.m @@ -73,6 +73,12 @@ struct ggml_metal_context { // for now it is easier to work in a separate file static NSString * const msl_library_source = @"see metal.metal"; +// Here to assist with NSBundle Path Hack +@interface GGMLMetalClass : NSObject +@end +@implementation GGMLMetalClass +@end + struct ggml_metal_context * ggml_metal_init(void) { fprintf(stderr, "%s: allocating\n", __func__); @@ -108,7 +114,8 @@ struct ggml_metal_context * ggml_metal_init(void) { NSError * error = nil; //NSString * path = [[NSBundle mainBundle] pathForResource:@"../../examples/metal/metal" ofType:@"metal"]; - NSString * path = [[NSBundle mainBundle] pathForResource:@"ggml-metal" ofType:@"metal"]; + NSBundle * bundle = [NSBundle bundleForClass:[GGMLMetalClass class]]; + NSString * path = [bundle pathForResource:@"ggml-metal" ofType:@"metal"]; fprintf(stderr, "%s: loading '%s'\n", __func__, [path UTF8String]); NSString * src = [NSString stringWithContentsOfFile:path encoding:NSUTF8StringEncoding error:&error]; From 3f1223155a462477ac933474ebc4eab0ce3ca264 Mon Sep 17 00:00:00 2001 From: Artyom Lebedev Date: Sat, 10 Jun 2023 22:51:36 +0300 Subject: [PATCH 03/11] k-quants : GCC12 compilation fix (#1792) --- k_quants.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/k_quants.c b/k_quants.c index 4d524494d..a48c82171 100644 --- a/k_quants.c +++ b/k_quants.c @@ -1519,7 +1519,7 @@ void ggml_vec_dot_q4_K_q8_K(const int n, float * restrict s, const void * restri const uint8x16_t m4b = vdupq_n_u8(0xf); #ifdef __ARM_FEATURE_DOTPROD - const uint32x4_t mzero = vdupq_n_s32(0); + const int32x4_t mzero = vdupq_n_s32(0); #endif int8x16x2_t q4bytes; @@ -1745,7 +1745,7 @@ void ggml_vec_dot_q5_K_q8_K(const int n, float * restrict s, const void * restri #ifdef __ARM_NEON const uint8x16_t m4b = vdupq_n_u8(0xf); - const uint32x4_t mzero = vdupq_n_u32(0); + const int32x4_t mzero = vdupq_n_s32(0); const uint8x16_t mone = vdupq_n_u8(1); const uint8x16_t mtwo = vdupq_n_u8(2); @@ -2242,5 +2242,3 @@ void ggml_vec_dot_q6_K_q8_K(const int n, float * restrict s, const void * restri *s = sumf; #endif } - - From 4de0334f5cabf4696eced2e5d6e279fdfaa6c0f2 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sat, 10 Jun 2023 22:56:53 +0300 Subject: [PATCH 04/11] cmake : fix Metal build (close #1791) --- CMakeLists.txt | 3 --- 1 file changed, 3 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 84e2a88cb..19cd42dd2 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -218,9 +218,6 @@ if (LLAMA_METAL) # copy ggml-metal.metal to bin directory configure_file(ggml-metal.metal bin/ggml-metal.metal COPYONLY) - if (LLAMA_METAL) - set_target_properties(llama PROPERTIES RESOURCE "${CMAKE_CURRENT_SOURCE_DIR}/ggml-metal.metal") - endif() set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} ${FOUNDATION_LIBRARY} From 31d2b5f4a4bae081e59b36ab37c6ff6f5b5940ad Mon Sep 17 00:00:00 2001 From: Ryan Landay Date: Sun, 11 Jun 2023 17:38:53 +0800 Subject: [PATCH 05/11] Update SHA256SUMS with current hashes for models quantized using q4_0 (#1798) --- SHA256SUMS | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/SHA256SUMS b/SHA256SUMS index 593c8efaa..ca4d5a4a5 100644 --- a/SHA256SUMS +++ b/SHA256SUMS @@ -1,6 +1,6 @@ 700df0d3013b703a806d2ae7f1bfb8e59814e3d06ae78be0c66368a50059f33d models/7B/consolidated.00.pth 666a4bb533b303bdaf89e1b6a3b6f93535d868de31d903afdc20983dc526c847 models/7B/ggml-model-f16.bin -ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff models/7B/ggml-model-q4_0.bin +ec2f2d1f0dfb73b72a4cbac7fa121abbe04c37ab327125a38248f930c0f09ddf models/7B/ggml-model-q4_0.bin ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff models/7B/ggml-model-q4_1.bin ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff models/7B/ggml-model-q5_0.bin ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff models/7B/ggml-model-q5_1.bin @@ -8,7 +8,7 @@ ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff models/7B/ggml 745bf4e29a4dd6f411e72976d92b452da1b49168a4f41c951cfcc8051823cf08 models/13B/consolidated.00.pth d5ccbcc465c71c0de439a5aeffebe8344c68a519bce70bc7f9f92654ee567085 models/13B/consolidated.01.pth 2b206e9b21fb1076f11cafc624e2af97c9e48ea09312a0962153acc20d45f808 models/13B/ggml-model-f16.bin -ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff models/13B/ggml-model-q4_0.bin +fad169e6f0f575402cf75945961cb4a8ecd824ba4da6be2af831f320c4348fa5 models/13B/ggml-model-q4_0.bin ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff models/13B/ggml-model-q4_1.bin ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff models/13B/ggml-model-q5_0.bin ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff models/13B/ggml-model-q5_1.bin @@ -18,7 +18,7 @@ e23294a58552d8cdec5b7e8abb87993b97ea6eced4178ff2697c02472539d067 models/30B/con 24a87f01028cbd3a12de551dcedb712346c0b5cbdeff1454e0ddf2df9b675378 models/30B/consolidated.02.pth 1adfcef71420886119544949767f6a56cb6339b4d5fcde755d80fe68b49de93b models/30B/consolidated.03.pth 7e1b524061a9f4b27c22a12d6d2a5bf13b8ebbea73e99f218809351ed9cf7d37 models/30B/ggml-model-f16.bin -ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff models/30B/ggml-model-q4_0.bin +d2a441403944819492ec8c2002cc36fa38468149bfb4b7b4c52afc7bd9a7166d models/30B/ggml-model-q4_0.bin ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff models/30B/ggml-model-q4_1.bin ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff models/30B/ggml-model-q5_0.bin ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff models/30B/ggml-model-q5_1.bin @@ -32,7 +32,7 @@ a287c0dfe49081626567c7fe87f74cce5831f58e459b427b5e05567641f47b78 models/65B/con 72b4eba67a1a3b18cb67a85b70f8f1640caae9b40033ea943fb166bd80a7b36b models/65B/consolidated.06.pth d27f5b0677d7ff129ceacd73fd461c4d06910ad7787cf217b249948c3f3bc638 models/65B/consolidated.07.pth 60758f2384d74e423dffddfd020ffed9d3bb186ebc54506f9c4a787d0f5367b0 models/65B/ggml-model-f16.bin -ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff models/65B/ggml-model-q4_0.bin +cde053439fa4910ae454407e2717cc46cc2c2b4995c00c93297a2b52e790fa92 models/65B/ggml-model-q4_0.bin ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff models/65B/ggml-model-q4_1.bin ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff models/65B/ggml-model-q5_0.bin ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff models/65B/ggml-model-q5_1.bin From 12b063f0ecf280e98028e444fc492ee6222cdcdc Mon Sep 17 00:00:00 2001 From: Kyle Liang Date: Sun, 11 Jun 2023 21:20:52 +0800 Subject: [PATCH 06/11] Fixed WSL cuda's OOM error (#1594) * In the function , add the cuda error bypass. * remove excessive codes and prints --------- Co-authored-by: liang --- ggml-cuda.cu | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index a62f26e1e..4f2195f77 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -1105,6 +1105,9 @@ void * ggml_cuda_host_malloc(size_t size) { void * ptr = nullptr; cudaError_t err = cudaMallocHost((void **) &ptr, size); if (err != cudaSuccess) { + // The allocation error can be bypassed. A null ptr will assigned out of this function. + // This can fixed the OOM error in WSL. + cudaGetLastError(); fprintf(stderr, "WARNING: failed to allocate %.2f MB of pinned memory: %s\n", size/1024.0/1024.0, cudaGetErrorString(err)); return nullptr; From fa84c4b3e80199a5683438f062009c031a06c4fa Mon Sep 17 00:00:00 2001 From: Kerfuffle <44031344+KerfuffleV2@users.noreply.github.com> Date: Sun, 11 Jun 2023 08:19:17 -0600 Subject: [PATCH 07/11] Fix issue where interactive mode crashes when input exceeds ctx size (#1789) * Fix issue where interactive mode in the main example crashes when input exceeds ctx size * Ensure the context size is at least 8 tokens in the main example. Closes #1768 --- examples/common.cpp | 3 +++ examples/common.h | 3 ++- examples/main/main.cpp | 16 ++++++++++++++++ 3 files changed, 21 insertions(+), 1 deletion(-) diff --git a/examples/common.cpp b/examples/common.cpp index f5d886acf..df69f2736 100644 --- a/examples/common.cpp +++ b/examples/common.cpp @@ -632,6 +632,9 @@ void console_set_color(console_state & con_st, console_color_t color) { case CONSOLE_COLOR_USER_INPUT: fprintf(con_st.out, ANSI_BOLD ANSI_COLOR_GREEN); break; + case CONSOLE_COLOR_ERROR: + fprintf(con_st.out, ANSI_BOLD ANSI_COLOR_RED); + break; } con_st.color = color; fflush(con_st.out); diff --git a/examples/common.h b/examples/common.h index 826e2ae59..6fedb414a 100644 --- a/examples/common.h +++ b/examples/common.h @@ -112,7 +112,8 @@ struct llama_context * llama_init_from_gpt_params(const gpt_params & params); enum console_color_t { CONSOLE_COLOR_DEFAULT=0, CONSOLE_COLOR_PROMPT, - CONSOLE_COLOR_USER_INPUT + CONSOLE_COLOR_USER_INPUT, + CONSOLE_COLOR_ERROR }; struct console_state { diff --git a/examples/main/main.cpp b/examples/main/main.cpp index de63faa3e..66d563143 100644 --- a/examples/main/main.cpp +++ b/examples/main/main.cpp @@ -81,6 +81,9 @@ int main(int argc, char ** argv) { if (params.n_ctx > 2048) { fprintf(stderr, "%s: warning: model does not support context sizes greater than 2048 tokens (%d specified);" "expect poor results\n", __func__, params.n_ctx); + } else if (params.n_ctx < 8) { + fprintf(stderr, "%s: warning: minimum context size is 8, using minimum size.\n", __func__); + params.n_ctx = 8; } fprintf(stderr, "%s: build = %d (%s)\n", __func__, BUILD_NUMBER, BUILD_COMMIT); @@ -331,6 +334,19 @@ int main(int argc, char ** argv) { while ((n_remain != 0 && !is_antiprompt) || params.interactive) { // predict if (embd.size() > 0) { + // Note: n_ctx - 4 here is to match the logic for commandline prompt handling via + // --prompt or --file which uses the same value. + auto max_embd_size = n_ctx - 4; + // Ensure the input doesn't exceed the context size by truncating embd if necessary. + if ((int)embd.size() > max_embd_size) { + auto skipped_tokens = embd.size() - max_embd_size; + console_set_color(con_st, CONSOLE_COLOR_ERROR); + printf("<>", skipped_tokens, skipped_tokens != 1 ? "s" : ""); + console_set_color(con_st, CONSOLE_COLOR_DEFAULT); + fflush(stdout); + embd.resize(max_embd_size); + } + // infinite text generation via context swapping // if we run out of context: // - take the n_keep first tokens from the original prompt (via n_past) From 8c0a10e64dbf60fd9946c0cd5e6f59690800b123 Mon Sep 17 00:00:00 2001 From: Kawrakow <48489457+ikawrakow@users.noreply.github.com> Date: Mon, 12 Jun 2023 14:31:36 +0300 Subject: [PATCH 08/11] metal : fix failure to load model (#1817) The number of buffers in the ggml context was left unitialized. This leads to sporadic failures to load the model on startup. It is actually strange that the failure occurred so infrequantly. Co-authored-by: Iwan Kawrakow --- ggml-metal.m | 1 + 1 file changed, 1 insertion(+) diff --git a/ggml-metal.m b/ggml-metal.m index 16a362fd7..b73f51f24 100644 --- a/ggml-metal.m +++ b/ggml-metal.m @@ -86,6 +86,7 @@ struct ggml_metal_context * ggml_metal_init(void) { ctx->device = MTLCreateSystemDefaultDevice(); ctx->queue = [ctx->device newCommandQueue]; + ctx->n_buffers = 0; // determine if we can use MPS if (MPSSupportsMTLDevice(ctx->device)) { From 58970a4c39124a647ac2a640d9e178ea6c961e65 Mon Sep 17 00:00:00 2001 From: Howard Su Date: Mon, 12 Jun 2023 20:44:16 +0800 Subject: [PATCH 09/11] Leverage mmap for offloading tensors to GPU (#1597) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Rebase to latest * Show progress * Add assert to make sure we only allocate temp buffer for non-CPU backend tensor Co-authored-by: Johannes Gäßler --------- Co-authored-by: Johannes Gäßler --- ggml-cuda.cu | 23 ++--------- ggml-cuda.h | 3 +- ggml-opencl.cpp | 35 ++-------------- ggml-opencl.h | 3 +- llama.cpp | 107 +++++++++++++++++++++--------------------------- 5 files changed, 56 insertions(+), 115 deletions(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 4f2195f77..3b9a5ddfb 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -1713,8 +1713,7 @@ void ggml_cuda_nop(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tens (void) dst; } -void ggml_cuda_load_data(const char * fname, struct ggml_tensor * tensor, const size_t offset) { - FILE * fp = fopen(fname, "rb"); +void ggml_cuda_transform_tensor(void * data, struct ggml_tensor * tensor) { int nrows = ggml_nrows(tensor); const size_t nb1 = tensor->nb[1]; ggml_backend backend = tensor->backend; @@ -1748,35 +1747,19 @@ void ggml_cuda_load_data(const char * fname, struct ggml_tensor * tensor, const int64_t nrows_split = row_high - row_low; - const size_t offset_split = offset + row_low*nb1; + const size_t offset_split = row_low*nb1; const size_t size = ggml_nbytes_split(tensor, nrows_split); void * buf; CUDA_CHECK(cudaMalloc(&buf, size)); - void * buf_host = malloc(size); - -#ifdef _WIN32 - int ret = _fseeki64(fp, (__int64) offset_split, SEEK_SET); -#else - int ret = fseek(fp, (long) offset_split, SEEK_SET); -#endif - GGML_ASSERT(ret == 0); // same - - size_t ret2 = fread(buf_host, size, 1, fp); - if (ret2 != 1) { - fprintf(stderr, "unexpectedly reached end of file"); - exit(1); - } + void * buf_host = (char*)data + offset_split; cudaMemcpy(buf, buf_host, size, cudaMemcpyHostToDevice); - cudaDeviceSynchronize(); - free(buf_host); extra->data_device[id] = buf; } tensor->extra = extra; - fclose(fp); } void ggml_cuda_free_data(struct ggml_tensor * tensor) { diff --git a/ggml-cuda.h b/ggml-cuda.h index 3b74e32e2..fde6d4085 100644 --- a/ggml-cuda.h +++ b/ggml-cuda.h @@ -24,7 +24,8 @@ void ggml_cuda_mul_mat(const struct ggml_tensor * src0, const struct ggml_tens void * ggml_cuda_host_malloc(size_t size); void ggml_cuda_host_free(void * ptr); -void ggml_cuda_load_data(const char * fname, struct ggml_tensor * tensors, size_t offset); +void ggml_cuda_transform_tensor(void * data, struct ggml_tensor * tensor); + void ggml_cuda_free_data(struct ggml_tensor * tensor); void ggml_cuda_assign_buffers(struct ggml_tensor * tensor); void ggml_cuda_set_main_device(int main_device); diff --git a/ggml-opencl.cpp b/ggml-opencl.cpp index 7b6daf4a8..5df922abd 100644 --- a/ggml-opencl.cpp +++ b/ggml-opencl.cpp @@ -1167,7 +1167,7 @@ size_t ggml_cl_mul_mat_get_wsize(const struct ggml_tensor * src0, const struct g return 0; } -void ggml_cl_transform_tensor(ggml_tensor * tensor) { +void ggml_cl_transform_tensor(void * data, ggml_tensor * tensor) { const int64_t ne0 = tensor->ne[0]; const int64_t ne1 = tensor->ne[1]; const int64_t ne2 = tensor->ne[2]; @@ -1179,6 +1179,7 @@ void ggml_cl_transform_tensor(ggml_tensor * tensor) { size_t q_size; cl_mem dst = ggml_cl_pool_malloc(q_sz, &q_size); + tensor->data = data; // copy tensor to device for (int64_t i3 = 0; i3 < ne3; i3++) { for (int64_t i2 = 0; i2 < ne2; i2++) { @@ -1190,35 +1191,5 @@ void ggml_cl_transform_tensor(ggml_tensor * tensor) { CL_CHECK(clFinish(queue)); tensor->data = dst; - tensor->backend = GGML_BACKEND_GPU; -} - -void ggml_cl_load_data(const char * fname, struct ggml_tensor * tensor, const size_t offset) { - cl_int err; - FILE * fp = fopen(fname, "rb"); - - const size_t size = ggml_nbytes(tensor); - - cl_mem dst; - CL_CHECK((dst = clCreateBuffer(context, CL_MEM_READ_ONLY, size, nullptr, &err), err)); - void * buf_host = malloc(size); - -#ifdef _WIN32 - int ret = _fseeki64(fp, (__int64) offset, SEEK_SET); -#else - int ret = fseek(fp, (long) offset, SEEK_SET); -#endif - GGML_ASSERT(ret == 0); // same - - size_t ret2 = fread(buf_host, size, 1, fp); - if (ret2 != 1) { - fprintf(stderr, "unexpectedly reached end of file"); - exit(1); - } - - clEnqueueWriteBuffer(queue, dst, CL_TRUE, 0, size, buf_host, 0, nullptr, nullptr); - - tensor->data = dst; - free(buf_host); - fclose(fp); + GGML_ASSERT(tensor->backend == GGML_BACKEND_GPU); } diff --git a/ggml-opencl.h b/ggml-opencl.h index bf95e5cd0..a92b445c9 100644 --- a/ggml-opencl.h +++ b/ggml-opencl.h @@ -18,8 +18,7 @@ void ggml_cl_host_free(void * ptr); void ggml_cl_free_data(const struct ggml_tensor* tensor); -void ggml_cl_transform_tensor(struct ggml_tensor * tensor); -void ggml_cl_load_data(const char * fname, struct ggml_tensor * tensor, size_t offset); +void ggml_cl_transform_tensor(void * data, struct ggml_tensor * tensor); #ifdef __cplusplus } diff --git a/llama.cpp b/llama.cpp index e100e2bc9..a9a7794ae 100644 --- a/llama.cpp +++ b/llama.cpp @@ -707,6 +707,9 @@ struct llama_model_loader { struct ggml_tensor * get_tensor_for(llama_load_tensor & lt, ggml_backend backend) { struct ggml_tensor * tensor; + if (backend != GGML_BACKEND_CPU) { + ggml_set_no_alloc(ggml_ctx, true); + } if (lt.ne.size() == 2) { tensor = ggml_new_tensor_2d(ggml_ctx, lt.type, lt.ne.at(0), lt.ne.at(1)); } else { @@ -716,6 +719,9 @@ struct llama_model_loader { ggml_set_name(tensor, lt.name.c_str()); LLAMA_ASSERT(lt.ggml_tensor == NULL); // if this fails, we called get_tensor twice on the same tensor + if (backend != GGML_BACKEND_CPU) { + ggml_set_no_alloc(ggml_ctx, use_mmap); + } tensor->backend = backend; lt.ggml_tensor = tensor; num_ggml_tensors_created++; @@ -731,6 +737,7 @@ struct llama_model_loader { void load_all_data(llama_progress_callback progress_callback, void * progress_callback_user_data, llama_mlock * lmlock) { size_t data_size = 0; size_t prefetch_size = 0; + size_t lock_size = 0; for (const llama_load_tensor & lt : tensors_map.tensors) { data_size += lt.size; if (lt.ggml_tensor->backend == GGML_BACKEND_CPU) { @@ -740,11 +747,6 @@ struct llama_model_loader { if (use_mmap) { mapping.reset(new llama_mmap(&file_loaders.at(0)->file, prefetch_size)); - if (!lmlock) { - // Don't call the callback since the actual loading will be lazy - // and we can't measure it. - progress_callback = NULL; - } if (lmlock) { lmlock->init(mapping->addr); } @@ -752,20 +754,49 @@ struct llama_model_loader { size_t done_size = 0; for (llama_load_tensor & lt : tensors_map.tensors) { - if (lt.ggml_tensor->backend != GGML_BACKEND_CPU) { - continue; - } if (progress_callback) { progress_callback((float) done_size / data_size, progress_callback_user_data); } LLAMA_ASSERT(lt.ggml_tensor); // unused tensors should have been caught by load_data already lt.data = (uint8_t *) lt.ggml_tensor->data; - load_data_for(lt); - lt.ggml_tensor->data = lt.data; - done_size += lt.size; - if (use_mmap && lmlock) { - lmlock->grow_to(done_size); + + // allocate temp buffer if not using mmap + if (!use_mmap && lt.data == NULL) { + GGML_ASSERT(lt.ggml_tensor->backend != GGML_BACKEND_CPU); + lt.data = (uint8_t*)malloc(ggml_nbytes(lt.ggml_tensor)); } + + load_data_for(lt); + + switch(lt.ggml_tensor->backend) { + case GGML_BACKEND_CPU: + lt.ggml_tensor->data = lt.data; + if (use_mmap && lmlock) { + lock_size += lt.size; + lmlock->grow_to(lock_size); + } + break; +#if defined(GGML_USE_CUBLAS) + case GGML_BACKEND_GPU: + case GGML_BACKEND_GPU_SPLIT: + ggml_cuda_transform_tensor(lt.data, lt.ggml_tensor); + if (!use_mmap) { + free(lt.data); + } + break; +#elif defined(GGML_USE_CLBLAST) + case GGML_BACKEND_GPU: + ggml_cl_transform_tensor(lt.data, lt.ggml_tensor); + if (!use_mmap) { + free(lt.data); + } + break; +#endif + default: + continue; + } + + done_size += lt.size; } } @@ -1141,7 +1172,7 @@ static void llama_model_load_internal( if (backend == GGML_BACKEND_GPU) { vram_weights += ggml_nbytes(layer.attention_norm) + ggml_nbytes(layer.wq) + ggml_nbytes(layer.wk) + - ggml_nbytes(layer.wv) + ggml_nbytes(layer.wo) + ggml_nbytes(layer.attention_norm) + + ggml_nbytes(layer.wv) + ggml_nbytes(layer.wo) + ggml_nbytes(layer.ffn_norm) + ggml_nbytes(layer.w1) + ggml_nbytes(layer.w2) + ggml_nbytes(layer.w3); } } @@ -1196,58 +1227,14 @@ static void llama_model_load_internal( model.tensors_by_name.emplace_back(lt.name, lt.ggml_tensor); } - ml->load_all_data(progress_callback, progress_callback_user_data, use_mlock ? &lctx.model.mlock_mmap : NULL); - #if defined(GGML_USE_CUBLAS) { ggml_cuda_set_tensor_split(tensor_split); - - size_t done_size = 0; - size_t data_size = 0; - for (llama_load_tensor & lt : ml->tensors_map.tensors) { - data_size += lt.size; - if (lt.ggml_tensor->backend == GGML_BACKEND_CPU) { - done_size += lt.size; - } - } - for (llama_load_tensor & lt : ml->tensors_map.tensors) { - ggml_backend backend = lt.ggml_tensor->backend; - if (backend != GGML_BACKEND_GPU && backend != GGML_BACKEND_GPU_SPLIT) { - continue; - } - if (progress_callback) { - progress_callback((float) done_size / data_size, progress_callback_user_data); - } - ggml_cuda_load_data(fname.c_str(), lt.ggml_tensor, lt.shards.at(0).file_off); - done_size += lt.size; - } } -#elif defined(GGML_USE_CLBLAST) - { - size_t done_size = 0; - size_t data_size = 0; - for (llama_load_tensor & lt : ml->tensors_map.tensors) { - data_size += lt.size; - if (lt.ggml_tensor->backend == GGML_BACKEND_CPU) { - done_size += lt.size; - } - } - for (llama_load_tensor & lt : ml->tensors_map.tensors) { - if (lt.ggml_tensor->backend != GGML_BACKEND_GPU) { - continue; - } - if (progress_callback) { - progress_callback((float) done_size / data_size, progress_callback_user_data); - } - ggml_cl_load_data(fname.c_str(), lt.ggml_tensor, lt.shards.at(0).file_off); - done_size += lt.size; - } - } -#else - (void) n_batch; - (void) tensor_split; #endif + ml->load_all_data(progress_callback, progress_callback_user_data, use_mlock ? &lctx.model.mlock_mmap : NULL); + if (progress_callback) { progress_callback(1.0f, progress_callback_user_data); } From e4caa8da59c1c97dc23fa336f4d726984a20560f Mon Sep 17 00:00:00 2001 From: slaren Date: Mon, 12 Jun 2023 19:12:47 +0200 Subject: [PATCH 10/11] ci : run when changing only the CUDA sources (#1800) --- .github/workflows/build.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index c98cbcbbe..b87ea76bc 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -10,10 +10,10 @@ on: push: branches: - master - paths: ['.github/workflows/**', '**/CMakeLists.txt', '**/Makefile', '**/*.h', '**/*.hpp', '**/*.c', '**/*.cpp'] + paths: ['.github/workflows/**', '**/CMakeLists.txt', '**/Makefile', '**/*.h', '**/*.hpp', '**/*.c', '**/*.cpp', '**/*.cu'] pull_request: types: [opened, synchronize, reopened] - paths: ['**/CMakeLists.txt', '**/Makefile', '**/*.h', '**/*.hpp', '**/*.c', '**/*.cpp'] + paths: ['**/CMakeLists.txt', '**/Makefile', '**/*.h', '**/*.hpp', '**/*.c', '**/*.cpp', '**/*.cu'] env: BRANCH_NAME: ${{ github.head_ref || github.ref_name }} From 74a6d922f12ccfe16b0c265f43be8978c6f25e98 Mon Sep 17 00:00:00 2001 From: Kawrakow <48489457+ikawrakow@users.noreply.github.com> Date: Mon, 12 Jun 2023 22:39:21 +0300 Subject: [PATCH 11/11] Metal implementation for all k_quants (#1807) * metal : improve q4_K 28.3 -> 26.0 ms/token by avoiding a branch in the calculation of the scales. * metal : small improvement for Q4_K * metal : still optimizing Q4_K This commit pushes it down to 25.3 ms / token. The crazy idea of using 6 bits for the scales is really costly on Metal: if I remove the bit fiddling necessary to make the block scales, time goes almost to the Q4_0 23 ms/token. Before pushing the k-quants upstream I had a Q4_K variant that had used 8-bit scales. It wasn't more accurate, used 0.125 bits more per weight, was running slightly slower on the CPU (due to the larger model size and being memory bound there), and the difference was entirely negligible under CUDA. So, I decided to publish the version with 6-bit scales. Perhaps I should re-consider and change to 8-bit scales? * metal : some more optimizations Q2_K: 25.4 ms/token Q6_K: 27.3 ms/token Q4_0: 22.8 ms/token Q4_1: 23.1 ms/token * metal : Q3_K support Something is not quite right yet. * metal : Q5_K support Initial version achieves 31.2 ms/token, 210 GB/s * metal : still not able to figure out why q3_K does not work * Minor * metal : yet another failed attempt to make q3_K work * metal : optimize Q5_K 31.2 ms -> 27.8 ms. 250 GB/s. * metal : q3_K still not working Adding a heavily commented q3_K metal kernel to explain my obviously faulty logic. Perhaps someone could spot the issue? * metal : q3_K finally working Not optimized at all. What was the issue? The scales are not 4-bytes aligned, and I was accessing them with a uint32_t pointer. When I tried that on CUDA, I got an error (illegal memory access) and added a memcpy to a local array of 3 uint32_t's. But on Metal it told me there is no memcpy, so I tried accessing directly. There is no error, just garbage results. At some point I did try accessing the scales with an uint16_t pointer (the scales are for sure 2-byte aligned), but was still getting garbage. I guess, there must have been another bug. No access to scales is via a uint16_t pointer and, after starting from scratch from the C dequantize function, it finally works. * metal : Q3_K 1st optimization pass * metal : Q3_K second optimization pass - 29.6 ms/token * metal : Q3_K cleanup * metal : fixed accidentally broken Q2_K --------- Co-authored-by: Iwan Kawrakow --- ggml-metal.m | 41 +++- ggml-metal.metal | 547 ++++++++++++++++++++++++++++++++++++----------- llama.cpp | 10 +- 3 files changed, 463 insertions(+), 135 deletions(-) diff --git a/ggml-metal.m b/ggml-metal.m index b73f51f24..658c392e0 100644 --- a/ggml-metal.m +++ b/ggml-metal.m @@ -52,14 +52,18 @@ struct ggml_metal_context { GGML_METAL_DECL_KERNEL(get_rows_q4_0); GGML_METAL_DECL_KERNEL(get_rows_q4_1); GGML_METAL_DECL_KERNEL(get_rows_q2_k); + GGML_METAL_DECL_KERNEL(get_rows_q3_k); GGML_METAL_DECL_KERNEL(get_rows_q4_k); + GGML_METAL_DECL_KERNEL(get_rows_q5_k); GGML_METAL_DECL_KERNEL(get_rows_q6_k); GGML_METAL_DECL_KERNEL(rms_norm); GGML_METAL_DECL_KERNEL(mul_mat_f16_f32); GGML_METAL_DECL_KERNEL(mul_mat_q4_0_f32); GGML_METAL_DECL_KERNEL(mul_mat_q4_1_f32); GGML_METAL_DECL_KERNEL(mul_mat_q2_k_f32); + GGML_METAL_DECL_KERNEL(mul_mat_q3_k_f32); GGML_METAL_DECL_KERNEL(mul_mat_q4_k_f32); + GGML_METAL_DECL_KERNEL(mul_mat_q5_k_f32); GGML_METAL_DECL_KERNEL(mul_mat_q6_k_f32); GGML_METAL_DECL_KERNEL(rope); GGML_METAL_DECL_KERNEL(cpy_f32_f16); @@ -153,14 +157,18 @@ struct ggml_metal_context * ggml_metal_init(void) { GGML_METAL_ADD_KERNEL(get_rows_q4_0); GGML_METAL_ADD_KERNEL(get_rows_q4_1); GGML_METAL_ADD_KERNEL(get_rows_q2_k); + GGML_METAL_ADD_KERNEL(get_rows_q3_k); GGML_METAL_ADD_KERNEL(get_rows_q4_k); + GGML_METAL_ADD_KERNEL(get_rows_q5_k); GGML_METAL_ADD_KERNEL(get_rows_q6_k); GGML_METAL_ADD_KERNEL(rms_norm); GGML_METAL_ADD_KERNEL(mul_mat_f16_f32); GGML_METAL_ADD_KERNEL(mul_mat_q4_0_f32); GGML_METAL_ADD_KERNEL(mul_mat_q4_1_f32); GGML_METAL_ADD_KERNEL(mul_mat_q2_k_f32); + GGML_METAL_ADD_KERNEL(mul_mat_q3_k_f32); GGML_METAL_ADD_KERNEL(mul_mat_q4_k_f32); + GGML_METAL_ADD_KERNEL(mul_mat_q5_k_f32); GGML_METAL_ADD_KERNEL(mul_mat_q6_k_f32); GGML_METAL_ADD_KERNEL(rope); GGML_METAL_ADD_KERNEL(cpy_f32_f16); @@ -575,6 +583,15 @@ void ggml_metal_graph_compute( nth1 = 16; [encoder setComputePipelineState:ctx->pipeline_mul_mat_q2_k_f32]; } break; + case GGML_TYPE_Q3_K: + { + GGML_ASSERT(ne02 == 1); + GGML_ASSERT(ne12 == 1); + + nth0 = 4; + nth1 = 16; + [encoder setComputePipelineState:ctx->pipeline_mul_mat_q3_k_f32]; + } break; case GGML_TYPE_Q4_K: { GGML_ASSERT(ne02 == 1); @@ -584,6 +601,15 @@ void ggml_metal_graph_compute( nth1 = 16; [encoder setComputePipelineState:ctx->pipeline_mul_mat_q4_k_f32]; } break; + case GGML_TYPE_Q5_K: + { + GGML_ASSERT(ne02 == 1); + GGML_ASSERT(ne12 == 1); + + nth0 = 4; + nth1 = 16; + [encoder setComputePipelineState:ctx->pipeline_mul_mat_q5_k_f32]; + } break; case GGML_TYPE_Q6_K: { GGML_ASSERT(ne02 == 1); @@ -620,15 +646,14 @@ void ggml_metal_graph_compute( if (src0t == GGML_TYPE_Q4_0 || src0t == GGML_TYPE_Q4_1) { [encoder setThreadgroupMemoryLength:nth0*nth1*sizeof(float) atIndex:0]; [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne11, 1) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; - } else if (src0t == GGML_TYPE_Q2_K) { + } + else if (src0t == GGML_TYPE_Q2_K || + src0t == GGML_TYPE_Q3_K || + src0t == GGML_TYPE_Q4_K || + src0t == GGML_TYPE_Q5_K || + src0t == GGML_TYPE_Q6_K) { [encoder setThreadgroupMemoryLength:nth0*nth1*sizeof(float) atIndex:0]; [encoder dispatchThreadgroups:MTLSizeMake(ne01, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; - } else if (src0t == GGML_TYPE_Q4_K) { - [encoder setThreadgroupMemoryLength:nth0*nth1*sizeof(float) atIndex:0]; - [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne11, 1) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; - } else if (src0t == GGML_TYPE_Q6_K) { - [encoder setThreadgroupMemoryLength:nth0*nth1*sizeof(float) atIndex:0]; - [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne11, 1) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; } else { [encoder setThreadgroupMemoryLength:nth0*sizeof(float) atIndex:0]; [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne11, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; @@ -646,7 +671,9 @@ void ggml_metal_graph_compute( case GGML_TYPE_Q4_0: [encoder setComputePipelineState:ctx->pipeline_get_rows_q4_0]; break; case GGML_TYPE_Q4_1: [encoder setComputePipelineState:ctx->pipeline_get_rows_q4_1]; break; case GGML_TYPE_Q2_K: [encoder setComputePipelineState:ctx->pipeline_get_rows_q2_k]; break; + case GGML_TYPE_Q3_K: [encoder setComputePipelineState:ctx->pipeline_get_rows_q3_k]; break; case GGML_TYPE_Q4_K: [encoder setComputePipelineState:ctx->pipeline_get_rows_q4_k]; break; + case GGML_TYPE_Q5_K: [encoder setComputePipelineState:ctx->pipeline_get_rows_q5_k]; break; case GGML_TYPE_Q6_K: [encoder setComputePipelineState:ctx->pipeline_get_rows_q6_k]; break; default: GGML_ASSERT(false && "not implemented"); } diff --git a/ggml-metal.metal b/ggml-metal.metal index ccd36386b..09e12a879 100644 --- a/ggml-metal.metal +++ b/ggml-metal.metal @@ -304,34 +304,22 @@ kernel void kernel_mul_mat_q4_0_f32( device const float * src1, device float * dst, constant int64_t & ne00, - constant int64_t & ne01, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, constant int64_t & ne10, - constant int64_t & ne11, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant uint64_t & nb12, constant int64_t & ne0, - constant int64_t & ne1, threadgroup float * sum [[threadgroup(0)]], uint2 tgpig[[threadgroup_position_in_grid]], - uint2 tpig[[thread_position_in_grid]], uint2 tpitg[[thread_position_in_threadgroup]], uint2 tptg[[threads_per_threadgroup]]) { const int nb = ne00/QK4_0; - const int8_t m8 = 8; - const int64_t r0 = tgpig.x; const int64_t r1 = tgpig.y; device const block_q4_0 * x = (device const block_q4_0 *) src0 + r0*nb; device const float * y = (device const float *) src1 + r1*ne10; - const uint nth = tptg.x*tptg.y; - const uint ith = tptg.y*tpitg.x + tpitg.y; + const int nth = tptg.x*tptg.y; + const int ith = tptg.y*tpitg.x + tpitg.y; const int ix = tpitg.y/4; // 0 or 1 const int iy = tpitg.y - 4*ix; // 0...3 @@ -351,47 +339,32 @@ kernel void kernel_mul_mat_q4_0_f32( for (int j = 0; j < 4; ++j) { - acc[0] += yl[j+ 0] * ((int8_t)(xl[j] & 0xF) - m8); - acc[1] += yl[j+16] * ((int8_t)(xl[j] >> 4) - m8); + acc[0] += yl[j] * (xl[j] & 0xF) + yl[j+16] * (xl[j] >> 4); + acc[1] += yl[j] + yl[j+16]; } - sumf += d * (acc[0] + acc[1]); + sumf += d * (acc[0] - 8.f*acc[1]); } sum[ith] = sumf; // // Accumulate the sum from all threads in the threadgroup - // This version is slightly faster than the commented out one below, - // which I copy-pasted from ggerganov's q4_0 dot product for metal. // threadgroup_barrier(mem_flags::mem_threadgroup); if (ith%4 == 0) { - for (int i = 1; i < 4; ++i) sum[ith] += sum[ith + i]; + sum[ith] += sum[ith+1] + sum[ith+2] + sum[ith+3]; } threadgroup_barrier(mem_flags::mem_threadgroup); if (ith%16 == 0) { - for (int i = 4; i < 16; i += 4) sum[ith] += sum[ith + i]; + sum[ith] += sum[ith+4] + sum[ith+8] + sum[ith+12]; } threadgroup_barrier(mem_flags::mem_threadgroup); if (ith == 0) { - for (int i = 16; i < nth; i += 16) sum[0] += sum[i]; + for (uint i = 16; i < nth; i += 16) sum[0] += sum[i]; dst[r1*ne0 + r0] = sum[0]; } - - //// accumulate the sum from all threads in the threadgroup - //threadgroup_barrier(mem_flags::mem_threadgroup); - //for (uint i = nth/2; i > 0; i /= 2) { - // if (ith < i) { - // sum[ith] += sum[ith + i]; - // } - // threadgroup_barrier(mem_flags::mem_threadgroup); - //} - - //if (ith == 0) { - // dst[r1*ne0 + r0] = sum[0]; - //} } kernel void kernel_mul_mat_q4_1_f32( @@ -399,20 +372,10 @@ kernel void kernel_mul_mat_q4_1_f32( device const float * src1, device float * dst, constant int64_t & ne00, - constant int64_t & ne01, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, constant int64_t & ne10, - constant int64_t & ne11, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant uint64_t & nb12, constant int64_t & ne0, - constant int64_t & ne1, threadgroup float * sum [[threadgroup(0)]], uint2 tgpig[[threadgroup_position_in_grid]], - uint2 tpig[[thread_position_in_grid]], uint2 tpitg[[thread_position_in_threadgroup]], uint2 tptg[[threads_per_threadgroup]]) { const int nb = ne00/QK4_1; @@ -460,11 +423,11 @@ kernel void kernel_mul_mat_q4_1_f32( // threadgroup_barrier(mem_flags::mem_threadgroup); if (ith%4 == 0) { - for (int i = 1; i < 4; ++i) sum[ith] += sum[ith + i]; + sum[ith] += sum[ith+1] + sum[ith+2] + sum[ith+3]; } threadgroup_barrier(mem_flags::mem_threadgroup); if (ith%16 == 0) { - for (int i = 4; i < 16; i += 4) sum[ith] += sum[ith + i]; + sum[ith] += sum[ith+4] + sum[ith+8] + sum[ith+12]; } threadgroup_barrier(mem_flags::mem_threadgroup); if (ith == 0) { @@ -671,6 +634,15 @@ typedef struct { half d; // super-block scale for quantized scales half dmin; // super-block scale for quantized mins } block_q2_k; +// 84 bytes / block + +typedef struct { + uint8_t hmask[QK_K/8]; // quants - high bit + uint8_t qs[QK_K/4]; // quants - low 2 bits + uint8_t scales[3*QK_K/64]; // scales, quantized with 6 bits + half d; // super-block scale +} block_q3_k; +// 110 bytes / block typedef struct { half d; // super-block scale for quantized scales @@ -678,6 +650,16 @@ typedef struct { uint8_t scales[3*QK_K/64]; // scales and mins, quantized with 6 bits uint8_t qs[QK_K/2]; // 4--bit quants } block_q4_k; +// 144 bytes / block + +typedef struct { + half d; // super-block scale for quantized scales + half dmin; // super-block scale for quantized mins + uint8_t scales[3*QK_K/64]; // scales and mins, quantized with 6 bits + uint8_t qh[QK_K/8]; // quants, high bit + uint8_t qs[QK_K/2]; // quants, low 4 bits +} block_q5_k; +// 176 bytes / block typedef struct { uint8_t ql[QK_K/2]; // quants, lower 4 bits @@ -685,16 +667,19 @@ typedef struct { int8_t scales[QK_K/16]; // scales, quantized with 8 bits half d; // super-block scale } block_q6_k; +// 210 bytes / block static inline uchar4 get_scale_min_k4(int j, device const uint8_t * q) { uchar4 r; if (j < 4) { - r[0] = q[j+0] & 63; r[1] = q[j+4] & 63; - r[2] = q[j+1] & 63; r[3] = q[j+5] & 63; + r[0] = q[j+0] & 63; + r[2] = q[j+1] & 63; + r[1] = q[j+4] & 63; + r[3] = q[j+5] & 63; } else { r[0] = (q[j+4] & 0xF) | ((q[j-4] >> 6) << 4); - r[1] = (q[j+4] >> 4) | ((q[j-0] >> 6) << 4); r[2] = (q[j+5] & 0xF) | ((q[j-3] >> 6) << 4); + r[1] = (q[j+4] >> 4) | ((q[j-0] >> 6) << 4); r[3] = (q[j+5] >> 4) | ((q[j+1] >> 6) << 4); } return r; @@ -735,10 +720,65 @@ static void dequantize_row_q2_k(device const block_q2_k * x, device float * y, i } } +static void dequantize_row_q3_k(device const block_q3_k * x, device float * y, int k) { + assert(k % QK_K == 0); + const int nb = k / QK_K; + + const uint16_t kmask1 = 0x0303; + const uint16_t kmask2 = 0x0f0f; + + uint16_t aux[8]; + thread const int8_t * scales = (thread const int8_t*)aux; + + for (int i = 0; i < nb; i++) { + + const float d_all = (float)(x[i].d); + + device const uint8_t * q = x[i].qs; + device const uint8_t * h = x[i].hmask; + uint8_t m = 1; + + device const uint16_t * a = (device const uint16_t *)x[i].scales; + aux[0] = (a[0] & kmask2) | (((a[4] >> 0) & kmask1) << 4); + aux[1] = (a[1] & kmask2) | (((a[5] >> 0) & kmask1) << 4); + aux[2] = (a[2] & kmask2) | (((a[4] >> 2) & kmask1) << 4); + aux[3] = (a[3] & kmask2) | (((a[5] >> 2) & kmask1) << 4); + aux[4] = ((a[0] >> 4) & kmask2) | (((a[4] >> 4) & kmask1) << 4); + aux[5] = ((a[1] >> 4) & kmask2) | (((a[5] >> 4) & kmask1) << 4); + aux[6] = ((a[2] >> 4) & kmask2) | (((a[4] >> 6) & kmask1) << 4); + aux[7] = ((a[3] >> 4) & kmask2) | (((a[5] >> 6) & kmask1) << 4); + + int is = 0; + float dl; + for (int n = 0; n < QK_K; n += 128) { + int shift = 0; + for (int j = 0; j < 4; ++j) { + + dl = d_all * (scales[is++] - 32); + for (int l = 0; l < 16; ++l) { + *y++ = dl * ((int8_t)((q[l+ 0] >> shift) & 3) - ((h[l+ 0] & m) ? 0 : 4)); + } + + dl = d_all * (scales[is++] - 32); + for (int l = 0; l < 16; ++l) { + *y++ = dl * ((int8_t)((q[l+16] >> shift) & 3) - ((h[l+16] & m) ? 0 : 4)); + } + + shift += 2; + m <<= 1; + } + q += 32; + } + + } + +} + static void dequantize_row_q4_k(device const block_q4_k * x, device float * y, int k) { assert(k % QK_K == 0); const int nb = k / QK_K; + for (int i = 0; i < nb; i++) { const float d = x[i].d; @@ -760,6 +800,33 @@ static void dequantize_row_q4_k(device const block_q4_k * x, device float * y, i } } +static void dequantize_row_q5_k(device const block_q5_k * x, device float * y, int k) { + assert(k % QK_K == 0); + const int nb = k / QK_K; + + for (int i = 0; i < nb; i++) { + + const float d = (float)(x[i].d); + const float min = (float)(x[i].dmin); + + device const uint8_t * ql = x[i].qs; + device const uint8_t * qh = x[i].qh; + + int is = 0; + uint8_t u1 = 1, u2 = 2; + for (int j = 0; j < QK_K; j += 64) { + const uchar4 sc = get_scale_min_k4(is, x[i].scales); + const float d1 = d * sc[0]; const float m1 = min * sc[1]; + const float d2 = d * sc[2]; const float m2 = min * sc[3]; + for (int l = 0; l < 32; ++l) *y++ = d1 * ((ql[l] & 0xF) + (qh[l] & u1 ? 16 : 0)) - m1; + for (int l = 0; l < 32; ++l) *y++ = d2 * ((ql[l] >> 4) + (qh[l] & u2 ? 16 : 0)) - m2; + ql += 32; is += 2; + u1 <<= 2; u2 <<= 2; + } + } + +} + static void dequantize_row_q6_k(device const block_q6_k * x, device float * y, int k) { assert(k % QK_K == 0); const int nb = k / QK_K; @@ -808,6 +875,22 @@ kernel void kernel_get_rows_q2_k( (device float *) ((device char *) dst + i*nb1), ne00); } +kernel void kernel_get_rows_q3_k( + device const void * src0, + device const int * src1, + device float * dst, + constant int64_t & ne00, + constant uint64_t & nb01, + constant uint64_t & nb1, + uint tpig[[thread_position_in_grid]]) { + const int i = tpig; + const int r = ((device int32_t *) src1)[i]; + + dequantize_row_q3_k( + (device const block_q3_k *) ((device char *) src0 + r*nb01), + (device float *) ((device char *) dst + i*nb1), ne00); +} + kernel void kernel_get_rows_q4_k( device const void * src0, device const int * src1, @@ -824,6 +907,22 @@ kernel void kernel_get_rows_q4_k( (device float *) ((device char *) dst + i*nb1), ne00); } +kernel void kernel_get_rows_q5_k( + device const void * src0, + device const int * src1, + device float * dst, + constant int64_t & ne00, + constant uint64_t & nb01, + constant uint64_t & nb1, + uint tpig[[thread_position_in_grid]]) { + const int i = tpig; + const int r = ((device int32_t *) src1)[i]; + + dequantize_row_q5_k( + (device const block_q5_k *) ((device char *) src0 + r*nb01), + (device float *) ((device char *) dst + i*nb1), ne00); +} + kernel void kernel_get_rows_q6_k( device const void * src0, device const int * src1, @@ -847,20 +946,10 @@ kernel void kernel_mul_mat_q2_k_f32( device const float * src1, device float * dst, constant int64_t & ne00, - constant int64_t & ne01, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, constant int64_t & ne10, - constant int64_t & ne11, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant uint64_t & nb12, constant int64_t & ne0, - constant int64_t & ne1, threadgroup float * sum [[threadgroup(0)]], uint2 tgpig[[threadgroup_position_in_grid]], - uint2 tpig[[thread_position_in_grid]], // we don't use this for now uint2 tpitg[[thread_position_in_threadgroup]], uint2 tptg[[threads_per_threadgroup]]) { @@ -875,7 +964,6 @@ kernel void kernel_mul_mat_q2_k_f32( const int nth = tptg.x*tptg.y; const int ith = tptg.y*tpitg.x + tpitg.y; - const int tid = tpitg.y; // 0...16 const int il = tid/4; // 0...3 const int ir = tid%4; // 0...3 @@ -885,35 +973,54 @@ kernel void kernel_mul_mat_q2_k_f32( const int n = 8; const int is = 4*il + (n*ir)/16; + const int y_offset = 64*il + n*ir; + const int q_offset = 32*ip + n*ir; + sum[ith] = 0.0f; float sumf = 0; for (int i = tpitg.x; i < nb; i += tptg.x) { - device const uint8_t * q = x[i].qs + 32*ip + n*ir; + device const uint8_t * q = x[i].qs + q_offset; device const uint8_t * scales = x[i].scales + is; uint8_t d1 = scales[0] & 0xF; - uint8_t m1 = scales[0] >> 4; uint8_t d2 = scales[2] & 0xF; + uint8_t m1 = scales[0] >> 4; uint8_t m2 = scales[2] >> 4; - device const float * y = yy + i*QK_K + 64*il + n*ir; + device const float * y = yy + i*QK_K + y_offset; + + //float4 s = {0.f, 0.f, 0.f, 0.f}; + float2 s = {0.f, 0.f}; + float smin = 0; + for (int l = 0; l < n; ++l) { + s[0] += y[l+ 0] * ((q[l] >> shift1) & 3); + s[1] += y[l+32] * ((q[l] >> shift2) & 3); + smin += y[l+ 0] * m1 + y[l+32] * m2; + } const float dall = (float)x[i].d; const float dmin = (float)x[i].dmin; - float4 s = {0.f, 0.f, 0.f, 0.f}; - for (int l = 0; l < n; ++l) { - s[0] += y[l+ 0] * ((q[l] >> shift1) & 3); s[1] += y[l+ 0]; - s[2] += y[l+32] * ((q[l] >> shift2) & 3); s[3] += y[l+32]; - } - sumf += dall * (s[0] * d1 + s[2] * d2) - dmin * (s[1] * m1 + s[3] * m2); - + sumf += dall * (s[0] * d1 + s[1] * d2) - dmin * smin; } sum[ith] = sumf; + //int mask1 = (ith%4 == 0); + //int mask2 = (ith%16 == 0); + + //threadgroup_barrier(mem_flags::mem_threadgroup); + //for (int i = 1; i < 4; ++i) sum[ith] += mask1 * sum[ith + i]; + //threadgroup_barrier(mem_flags::mem_threadgroup); + //for (int i = 4; i < 16; i += 4) sum[ith] += mask2 * sum[ith + i]; + //threadgroup_barrier(mem_flags::mem_threadgroup); + //if (ith == 0) { + // for (int i = 16; i < nth; i += 16) sum[0] += sum[i]; + // dst[r1*ne0 + r0] = sum[0]; + //} + // // Accumulate the sum from all threads in the threadgroup // This version is slightly faster than the commented out one below, @@ -932,19 +1039,109 @@ kernel void kernel_mul_mat_q2_k_f32( for (int i = 16; i < nth; i += 16) sum[0] += sum[i]; dst[r1*ne0 + r0] = sum[0]; } +} - //// accumulate the sum from all threads in the threadgroup - //threadgroup_barrier(mem_flags::mem_threadgroup); - //for (uint i = nth/2; i > 0; i /= 2) { - // if (ith < i) { - // sum[ith] += sum[ith + i]; - // } - // threadgroup_barrier(mem_flags::mem_threadgroup); - //} +kernel void kernel_mul_mat_q3_k_f32( + device const void * src0, + device const float * src1, + device float * dst, + constant int64_t & ne00, + constant int64_t & ne10, + constant int64_t & ne0, + constant int64_t & ne1, + threadgroup float * sum [[threadgroup(0)]], + uint2 tgpig[[threadgroup_position_in_grid]], + uint2 tpitg[[thread_position_in_threadgroup]], + uint2 tptg[[threads_per_threadgroup]]) { + + const uint16_t kmask1 = 0x0303; + const uint16_t kmask2 = 0x0f0f; + + const uint8_t m3 = 3; + const int8_t m4 = 4; + + const int nb = ne00/QK_K; + + const int64_t r0 = tgpig.x; + const int64_t r1 = tgpig.y; + + device const block_q3_k * x = (device const block_q3_k *) src0 + r0*nb; + device const float * yy = (device const float *) src1 + r1*ne10; + + const int nth = tptg.x*tptg.y; + const int ith = tptg.y*tpitg.x + tpitg.y; + + const int tid = tpitg.y; // expecting 16 + const int ip = tid/8; // 0 or 1 + const int il = tid/2 - 4*ip; // 0...3 + const int ir = tid%2; + const int n = 8; + const int l0 = n*ir; + + const uint8_t m = 1 << (4*ip + il); + + const int shift = 2*il; + + const uint16_t s_shift1 = 4*ip; + const uint16_t s_shift2 = s_shift1 + 2*(il/2); + const int ik = 4 + (il%2); + + const int q_offset = 32*ip + l0; + const int y_offset = 128*ip + 32*il + l0; + + //float sumf = 0; + float sumf1 = 0, sumf2 = 0; + for (int i = tpitg.x; i < nb; i += tptg.x) { + + const float d_all = (float)(x[i].d); + + device const uint8_t * q = x[i].qs + q_offset; + device const uint8_t * h = x[i].hmask + l0; + device const float * y = yy + i * QK_K + y_offset; + + device const uint16_t * a = (device const uint16_t *)x[i].scales; + const char2 scales = as_type((uint16_t)(((a[il] >> s_shift1) & kmask2) | (((a[ik] >> s_shift2) & kmask1) << 4))); + + float s = 0; + for (int l = 0; l < n; ++l) { + s += y[l+ 0] * ((int8_t)((q[l+ 0] >> shift) & m3) - ((h[l+ 0] & m) ? 0 : m4)); + } + float d = d_all * s; + sumf1 += d * scales[0]; + sumf2 += d; + //sumf += d_all * s * (scales[0] - 32); + + s = 0; + for (int l = 0; l < n; ++l) { + s += y[l+16] * ((int8_t)((q[l+16] >> shift) & m3) - ((h[l+16] & m) ? 0 : m4)); + } + d = d_all * s; + sumf1 += d * scales[1]; + sumf2 += d; + //sumf += d_all * s * (scales[1] - 32); + + } + + //sum[ith] = sumf; + sum[ith] = sumf1 - 32.f*sumf2; + + // + // Accumulate the sum from all threads in the threadgroup + // + threadgroup_barrier(mem_flags::mem_threadgroup); + if (ith%4 == 0) { + for (int i = 1; i < 4; ++i) sum[ith] += sum[ith + i]; + } + threadgroup_barrier(mem_flags::mem_threadgroup); + if (ith%16 == 0) { + for (int i = 4; i < 16; i += 4) sum[ith] += sum[ith + i]; + } + threadgroup_barrier(mem_flags::mem_threadgroup); + if (ith == 0) { + for (int i = 16; i < nth; i += 16) sum[0] += sum[i]; + dst[r1*ne0 + r0] = sum[0]; + } - //if (ith == 0) { - // dst[r1*ne0 + r0] = sum[0]; - //} } kernel void kernel_mul_mat_q4_k_f32( @@ -952,23 +1149,17 @@ kernel void kernel_mul_mat_q4_k_f32( device const float * src1, device float * dst, constant int64_t & ne00, - constant int64_t & ne01, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, constant int64_t & ne10, - constant int64_t & ne11, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant uint64_t & nb12, constant int64_t & ne0, - constant int64_t & ne1, threadgroup float * sum [[threadgroup(0)]], uint2 tgpig[[threadgroup_position_in_grid]], - uint2 tpig[[thread_position_in_grid]], // we don't use this for now uint2 tpitg[[thread_position_in_threadgroup]], uint2 tptg[[threads_per_threadgroup]]) { + const uint16_t kmask1 = 0x3f3f; + const uint16_t kmask2 = 0x0f0f; + const uint16_t kmask3 = 0xc0c0; + const int nb = ne00/QK_K; const int64_t r0 = tgpig.x; @@ -977,37 +1168,55 @@ kernel void kernel_mul_mat_q4_k_f32( device const block_q4_k * x = (device const block_q4_k *) src0 + r0*nb; device const float * yy = (device const float *) src1 + r1*ne10; - const uint nth = tptg.x*tptg.y; - const uint ith = tptg.y*tpitg.x + tpitg.y; + const int nth = tptg.x*tptg.y; + const int ith = tptg.y*tpitg.x + tpitg.y; const int tid = tpitg.y; // 0...16 const int il = tid/4; // 0...3 - const int ir = tid%4; // 0...3 - const int n = 8; - const int is = 2*il; + const int ir = tid - 4*il;// 0...3 + const int n = 4; + + const int im = il/2; // 0 or 1. 0 computes 0,32 + 128,160, 1 computes 64,96 + 192,224 + const int in = il%2; + + const int l0 = n*(2*ir + in); + const int q_offset = 32*im + l0; + const int y_offset = 64*im + l0; sum[ith] = 0.0f; + uchar2 sc1, sc2, sc3, sc4; + float sumf = 0; for (int i = tpitg.x; i < nb; i += tptg.x) { - device const uint8_t * q = (x + i)->qs + 32*il + n*ir; - device const float * y = yy + i*QK_K + 64*il + n*ir; - device const uint8_t * scales = (x + i)->scales; + device const uint8_t * q1 = (x + i)->qs + q_offset; + device const uint8_t * q2 = q1 + 64; + device const float * y1 = yy + i*QK_K + y_offset; + device const float * y2 = y1 + 128; const float dall = (float)((x + i)->d); const float dmin = (float)((x + i)->dmin); - const uchar4 sc = get_scale_min_k4(is, scales); + device const uint16_t * a = (device const uint16_t *)(x + i)->scales; + sc1 = as_type((uint16_t)(a[im+0] & kmask1)); + sc2 = as_type((uint16_t)(a[im+2] & kmask1)); + sc3 = as_type((uint16_t)(((a[im+4] >> 0) & kmask2) | ((a[im+0] & kmask3) >> 2))); + sc4 = as_type((uint16_t)(((a[im+4] >> 4) & kmask2) | ((a[im+2] & kmask3) >> 2))); float4 s = {0.f, 0.f, 0.f, 0.f}; + float smin = 0; for (int l = 0; l < n; ++l) { - s[0] += y[l+ 0] * (q[l] & 0xF); s[1] += y[l+ 0]; - s[2] += y[l+32] * (q[l] >> 4); s[3] += y[l+32]; + + s[0] += y1[l] * (q1[l] & 0xF); s[1] += y1[l+32] * (q1[l] >> 4); + s[2] += y2[l] * (q2[l] & 0xF); s[3] += y2[l+32] * (q2[l] >> 4); + smin += y1[l] * sc2[0] + y1[l+32] * sc2[1] + y2[l] * sc4[0] + y2[l+32] * sc4[1]; + } - sumf += dall * (s[0] * sc[0] + s[2] * sc[2]) - dmin * (s[1] * sc[1] + s[3] * sc[3]); + sumf += dall * (s[0] * sc1[0] + s[1] * sc1[1] + s[2] * sc3[0] + s[3] * sc3[1]) - dmin * smin; } + sum[ith] = sumf; // @@ -1043,25 +1252,114 @@ kernel void kernel_mul_mat_q4_k_f32( //} } +kernel void kernel_mul_mat_q5_k_f32( + device const void * src0, + device const float * src1, + device float * dst, + constant int64_t & ne00, + constant int64_t & ne10, + constant int64_t & ne0, + threadgroup float * sum [[threadgroup(0)]], + uint2 tgpig[[threadgroup_position_in_grid]], + uint2 tpitg[[thread_position_in_threadgroup]], + uint2 tptg[[threads_per_threadgroup]]) { + + const uint16_t kmask1 = 0x3f3f; + const uint16_t kmask2 = 0x0f0f; + const uint16_t kmask3 = 0xc0c0; + + const int nb = ne00/QK_K; + + const int64_t r0 = tgpig.x; + const int64_t r1 = tgpig.y; + + device const block_q5_k * x = (device const block_q5_k *) src0 + r0*nb; + device const float * yy = (device const float *) src1 + r1*ne10; + + const int nth = tptg.x*tptg.y; + const int ith = tptg.y*tpitg.x + tpitg.y; + + const int tid = tpitg.y; // 0...16 + const int il = tid/4; // 0...3 + const int ir = tid - 4*il;// 0...3 + const int n = 4; + + const int im = il/2; // 0 or 1. 0 computes 0,32 + 128,160, 1 computes 64,96 + 192,224 + const int in = il%2; + + const int l0 = n*(2*ir + in); + const int q_offset = 32*im + l0; + const int y_offset = 64*im + l0; + + const uint8_t hm1 = 1u << (2*im); + const uint8_t hm2 = hm1 << 1; + const uint8_t hm3 = hm1 << 4; + const uint8_t hm4 = hm2 << 4; + + uchar2 sc1, sc2, sc3, sc4; + + float sumf = 0; + for (int i = tpitg.x; i < nb; i += tptg.x) { + + device const uint8_t * q1 = (x + i)->qs + q_offset; + device const uint8_t * q2 = q1 + 64; + device const uint8_t * qh = (x + i)->qh + l0; + device const float * y1 = yy + i*QK_K + y_offset; + device const float * y2 = y1 + 128; + + const float dall = (float)((x + i)->d); + const float dmin = (float)((x + i)->dmin); + + device const uint16_t * a = (device const uint16_t *)(x + i)->scales; + sc1 = as_type((uint16_t)(a[im+0] & kmask1)); + sc2 = as_type((uint16_t)(a[im+2] & kmask1)); + sc3 = as_type((uint16_t)(((a[im+4] >> 0) & kmask2) | ((a[im+0] & kmask3) >> 2))); + sc4 = as_type((uint16_t)(((a[im+4] >> 4) & kmask2) | ((a[im+2] & kmask3) >> 2))); + + float4 s = {0.f, 0.f, 0.f, 0.f}; + float smin = 0; + for (int l = 0; l < n; ++l) { + + s[0] += y1[l+ 0] * ((q1[l] & 0xF) + (qh[l] & hm1 ? 16 : 0)); + s[1] += y1[l+32] * ((q1[l] >> 4) + (qh[l] & hm2 ? 16 : 0)); + s[2] += y2[l+ 0] * ((q2[l] & 0xF) + (qh[l] & hm3 ? 16 : 0)); + s[3] += y2[l+32] * ((q2[l] >> 4) + (qh[l] & hm4 ? 16 : 0)); + smin += y1[l] * sc2[0] + y1[l+32] * sc2[1] + y2[l] * sc4[0] + y2[l+32] * sc4[1]; + + } + sumf += dall * (s[0] * sc1[0] + s[1] * sc1[1] + s[2] * sc3[0] + s[3] * sc3[1]) - dmin * smin; + + } + sum[ith] = sumf; + + // + // Accumulate the sum from all threads in the threadgroup + // + threadgroup_barrier(mem_flags::mem_threadgroup); + if (ith%4 == 0) { + sum[ith] += sum[ith+1] + sum[ith+2] + sum[ith+3]; + } + threadgroup_barrier(mem_flags::mem_threadgroup); + if (ith%16 == 0) { + sum[ith] += sum[ith+4] + sum[ith+8] + sum[ith+12]; + } + threadgroup_barrier(mem_flags::mem_threadgroup); + if (ith == 0) { + for (int i = 16; i < nth; i += 16) sum[0] += sum[i]; + dst[r1*ne0 + r0] = sum[0]; + } + +} + kernel void kernel_mul_mat_q6_k_f32( device const void * src0, device const float * src1, device float * dst, constant int64_t & ne00, - constant int64_t & ne01, - constant uint64_t & nb00, - constant uint64_t & nb01, - constant uint64_t & nb02, constant int64_t & ne10, - constant int64_t & ne11, - constant uint64_t & nb10, - constant uint64_t & nb11, - constant uint64_t & nb12, constant int64_t & ne0, - constant int64_t & ne1, threadgroup float * sum [[threadgroup(0)]], uint2 tgpig[[threadgroup_position_in_grid]], - uint2 tpig[[thread_position_in_grid]], // we don't use this for now uint2 tpitg[[thread_position_in_threadgroup]], uint2 tptg[[threads_per_threadgroup]]) { @@ -1078,24 +1376,29 @@ kernel void kernel_mul_mat_q6_k_f32( device const block_q6_k * x = (device const block_q6_k *) src0 + r0*nb; device const float * yy = (device const float *) src1 + r1*ne10; - const uint nth = tptg.x*tptg.y; - const uint ith = tptg.y*tpitg.x + tpitg.y; + const int nth = tptg.x*tptg.y; + const int ith = tptg.y*tpitg.x + tpitg.y; - const int step = QK_K / tptg.y; // we expect this to be 16 - const int iqs = step * tpitg.y; // 0...240 in steps of 16 + // Note: we absolutely assume that tptg.y = 16 and QK_K = 256! + const int iqs = 16 * tpitg.y; const int ip = iqs / 128; // 0 or 1 const int il = (iqs - 128*ip)/16; // 0...7 const int n = 4; - const int is = 8*ip + (n*il)/16; + const int l0 = n*il; + const int is = 8*ip + l0/16; + + const int y_offset = 128*ip + l0; + const int q_offset_l = 64*ip + l0; + const int q_offset_h = 32*ip + l0; float sumf = 0; for (int i = tpitg.x; i < nb; i += tptg.x) { - device const uint8_t * ql = x[i].ql + 64*ip + n*il; - device const uint8_t * qh = x[i].qh + 32*ip + n*il; + device const uint8_t * ql = x[i].ql + q_offset_l; + device const uint8_t * qh = x[i].qh + q_offset_h; device const int8_t * sc = x[i].scales + is; - device const float * y = yy + i * QK_K + 128*ip + n*il; + device const float * y = yy + i * QK_K + y_offset; const float dall = x[i].d; diff --git a/llama.cpp b/llama.cpp index a9a7794ae..f0f9124d8 100644 --- a/llama.cpp +++ b/llama.cpp @@ -2377,12 +2377,10 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s printf("size = %8.3f MB\n", tensor.size/1024.0/1024.0); } else { new_type = quantized_type; - // TODO: temporary disabled until Metal / OpenCL support is available - // ref: https://github.com/ggerganov/llama.cpp/issues/1711 - //if (tensor.name == "output.weight") { - // new_type = GGML_TYPE_Q6_K; - //} - if (tensor.name.find("attention.wv.weight") != std::string::npos) { + if (tensor.name == "output.weight") { + new_type = GGML_TYPE_Q6_K; + } + else if (tensor.name.find("attention.wv.weight") != std::string::npos) { if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q4_K; else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K; else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) &&