From ea79e549f0982053aad636d832d181dcc3d5863b Mon Sep 17 00:00:00 2001 From: Concedo <39025047+LostRuins@users.noreply.github.com> Date: Wed, 5 Jul 2023 17:29:35 +0800 Subject: [PATCH] fixed refusing to quantize some models --- llama.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/llama.cpp b/llama.cpp index b335d2b4a..626979741 100644 --- a/llama.cpp +++ b/llama.cpp @@ -2404,9 +2404,9 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s int ny = tensor.ne.at(1); if (nx % QK_K != 0 || ny % QK_K != 0) { fprintf(stderr, "\n\n========================= Tensor sizes %d x %d are not divisible by %d\n",nx,ny,QK_K); - fprintf(stderr, "This is required to be able to use k-quants for now!\n"); + fprintf(stderr, "Verify before using\n"); fprintf(stderr, "========================================================================================\n\n"); - throw std::runtime_error("Unsupported tensor size encountered\n"); + // throw std::runtime_error("Unsupported tensor size encountered\n"); } } if (tensor.name == "output.weight") {