From 59fb174678c65bb6cf2bd26ce9bd8bdd353234bb Mon Sep 17 00:00:00 2001 From: Concedo <39025047+LostRuins@users.noreply.github.com> Date: Mon, 24 Apr 2023 23:20:06 +0800 Subject: [PATCH] fixed compile errors, made mmap automatic when lora is selected, added updated quantizers and quantization handling for gpt neox gpt 2 and gptj --- Makefile | 6 +- ggml_clblast_dequant.cl | 6 +- koboldcpp.py | 4 +- otherarch/gpt2_v2.cpp | 34 ++---- otherarch/gptj_v2.cpp | 37 ++---- otherarch/neox.cpp | 27 +---- otherarch/tools/common-ggml.cpp | 192 ++++++++++++++++++++++++++++++ otherarch/tools/common-ggml.h | 23 ++++ otherarch/tools/gpt2_quantize.cpp | 187 +++-------------------------- otherarch/tools/gptj_quantize.cpp | 180 +++------------------------- otherarch/tools/neox_quantize.cpp | 191 ++--------------------------- 11 files changed, 297 insertions(+), 590 deletions(-) create mode 100644 otherarch/tools/common-ggml.cpp create mode 100644 otherarch/tools/common-ggml.h diff --git a/Makefile b/Makefile index ac9fc877d..079c68996 100644 --- a/Makefile +++ b/Makefile @@ -269,13 +269,13 @@ koboldcpp_clblast: ggml_clblast.o ggml_rwkv.o ggml_v1.o expose.o common.o gpttyp quantize_llama: examples/quantize/quantize.cpp ggml.o llama.o $(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS) -quantize_gptj: ggml.o llama.o otherarch/tools/gptj_quantize.cpp +quantize_gptj: ggml.o llama.o otherarch/tools/gptj_quantize.cpp otherarch/tools/common-ggml.cpp $(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS) -quantize_gpt2: ggml.o llama.o otherarch/tools/gpt2_quantize.cpp +quantize_gpt2: ggml.o llama.o otherarch/tools/gpt2_quantize.cpp otherarch/tools/common-ggml.cpp $(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS) -quantize_neox: ggml.o llama.o otherarch/tools/neox_quantize.cpp +quantize_neox: ggml.o llama.o otherarch/tools/neox_quantize.cpp otherarch/tools/common-ggml.cpp $(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS) quantize-stats: examples/quantize-stats/quantize-stats.cpp ggml.o llama.o $(OBJS) diff --git a/ggml_clblast_dequant.cl b/ggml_clblast_dequant.cl index 99474fdb3..191b2e575 100644 --- a/ggml_clblast_dequant.cl +++ b/ggml_clblast_dequant.cl @@ -51,7 +51,7 @@ __kernel void dequantize_row_q4_2(__global struct block_q4_2* blocks, __global f const uint i = get_global_id(0) / 16; const uint l = get_local_id(0); - const float d = vload_half(0, (const half*) &blocks[i].d);; + const float d = vload_half(0, (__global half*) &blocks[i].d);; const uchar vi = blocks[i].qs[l]; @@ -71,8 +71,8 @@ __kernel void dequantize_row_q4_3(__global struct block_q4_3* blocks, __global f const uint i = get_global_id(0) / 16; const uint l = get_local_id(0); - const float d = vload_half(0, (const half*) &(blocks[i].d)); - const float m = vload_half(0, (const half*) &(blocks[i].m)); + const float d = vload_half(0, (__global half*) &(blocks[i].d)); + const float m = vload_half(0, (__global half*) &(blocks[i].m)); const uchar vi = blocks[i].qs[l]; diff --git a/koboldcpp.py b/koboldcpp.py index 2a0245135..92f99e7d4 100644 --- a/koboldcpp.py +++ b/koboldcpp.py @@ -99,6 +99,8 @@ def load_model(model_filename): inputs.threads = args.threads inputs.f16_kv = True inputs.use_mmap = (not args.nommap) + if args.lora and args.lora!="": + inputs.use_mmap = False inputs.use_smartcontext = args.smartcontext inputs.unban_tokens = args.unbantokens inputs.blasbatchsize = args.blasbatchsize @@ -141,7 +143,7 @@ maxctx = 2048 maxlen = 128 modelbusy = False defaultport = 5001 -KcppVersion = "1.13" +KcppVersion = "1.13.1" class ServerRequestHandler(http.server.SimpleHTTPRequestHandler): sys_version = "" diff --git a/otherarch/gpt2_v2.cpp b/otherarch/gpt2_v2.cpp index ee180f86c..db230866f 100644 --- a/otherarch/gpt2_v2.cpp +++ b/otherarch/gpt2_v2.cpp @@ -91,6 +91,8 @@ ModelLoadResult gpt2_model_load(const std::string & fname, gpt2_model & model, g case 1: wtype = GGML_TYPE_F16; break; case 2: wtype = GGML_TYPE_Q4_0; break; case 3: wtype = GGML_TYPE_Q4_1; break; + case 5: wtype = GGML_TYPE_Q4_2; break; + case 6: wtype = GGML_TYPE_Q4_3; break; default: { fprintf(stderr, "%s: invalid model file '%s' (bad f16 value %d)\n", @@ -143,7 +145,6 @@ ModelLoadResult gpt2_model_load(const std::string & fname, gpt2_model & model, g ctx_size += (6 + 12*n_layer)*256; // object overhead - printf("%s: ggml ctx size = %6.2f MB\n", __func__, ctx_size/(1024.0*1024.0)); } @@ -258,22 +259,20 @@ ModelLoadResult gpt2_model_load(const std::string & fname, gpt2_model & model, g while (true) { int32_t n_dims; int32_t length; - int32_t ftype; + int32_t ttype; fin.read(reinterpret_cast(&n_dims), sizeof(n_dims)); fin.read(reinterpret_cast(&length), sizeof(length)); - fin.read(reinterpret_cast(&ftype), sizeof(ftype)); + fin.read(reinterpret_cast(&ttype), sizeof(ttype)); if (fin.eof()) { break; } int32_t nelements = 1; - int64_t ne[2] = { 1, 1 }; + int32_t ne[2] = { 1, 1 }; for (int i = 0; i < n_dims; ++i) { - int32_t ne_cur; - fin.read(reinterpret_cast(&ne_cur), sizeof(ne_cur)); - ne[i] = ne_cur; + fin.read(reinterpret_cast(&ne[i]), sizeof(ne[i])); nelements *= ne[i]; } @@ -297,24 +296,12 @@ ModelLoadResult gpt2_model_load(const std::string & fname, gpt2_model & model, g return ModelLoadResult::FAIL; } + // for debugging if (0) { - static const char * ftype_str[] = { "f32", "f16", "q4_0", "q4_1", }; - printf("%24s - [%5lld, %5lld], type = %6s, %6.2f MB, %9zu bytes\n", name.data(), ne[0], ne[1], ftype_str[ftype], ggml_nbytes(tensor)/1024.0/1024.0, ggml_nbytes(tensor)); + printf("%24s - [%5d, %5d], type = %6s, %6.2f MB, %9zu bytes\n", name.data(), ne[0], ne[1], ggml_type_name(ggml_type(ttype)), ggml_nbytes(tensor)/1024.0/1024.0, ggml_nbytes(tensor)); } - size_t bpe = 0; - - switch (ftype) { - case 0: bpe = ggml_type_size(GGML_TYPE_F32); break; - case 1: bpe = ggml_type_size(GGML_TYPE_F16); break; - case 2: bpe = ggml_type_size(GGML_TYPE_Q4_0); assert(ne[0] % 64 == 0); break; - case 3: bpe = ggml_type_size(GGML_TYPE_Q4_1); assert(ne[0] % 64 == 0); break; - default: - { - fprintf(stderr, "%s: unknown ftype %d in model file\n", __func__, ftype); - return ModelLoadResult::FAIL; - } - }; + const size_t bpe = ggml_type_size(ggml_type(ttype)); if ((nelements*bpe)/ggml_blck_size(tensor->type) != ggml_nbytes(tensor)) { fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n", @@ -370,8 +357,7 @@ bool gpt2_eval( const int n_head = hparams.n_head; const int n_vocab = hparams.n_vocab; - //todo: there is a bug that causes the buffer to oom and I cannot figure it out, hack to increase size for now - static size_t buf_size = 256u*1024*1024; + static size_t buf_size = 256u*1024*1024; static void * buf = malloc(buf_size); if (mem_per_token > 0 && mem_per_token*N*1.9 > buf_size) { diff --git a/otherarch/gptj_v2.cpp b/otherarch/gptj_v2.cpp index 212c79a47..0f8bd8815 100644 --- a/otherarch/gptj_v2.cpp +++ b/otherarch/gptj_v2.cpp @@ -91,6 +91,8 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g case 1: wtype = GGML_TYPE_F16; break; case 2: wtype = GGML_TYPE_Q4_0; break; case 3: wtype = GGML_TYPE_Q4_1; break; + case 5: wtype = GGML_TYPE_Q4_2; break; + case 6: wtype = GGML_TYPE_Q4_3; break; default: { fprintf(stderr, "%s: invalid model file '%s' (bad f16 value %d)\n", @@ -254,22 +256,20 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g while (true) { int32_t n_dims; int32_t length; - int32_t ftype; + int32_t ttype; fin.read(reinterpret_cast(&n_dims), sizeof(n_dims)); fin.read(reinterpret_cast(&length), sizeof(length)); - fin.read(reinterpret_cast(&ftype), sizeof(ftype)); + fin.read(reinterpret_cast(&ttype), sizeof(ttype)); if (fin.eof()) { break; } - int64_t nelements = 1; - int64_t ne[2] = { 1, 1 }; + int32_t nelements = 1; + int32_t ne[2] = { 1, 1 }; for (int i = 0; i < n_dims; ++i) { - int32_t ne_cur; - fin.read(reinterpret_cast(&ne_cur), sizeof(ne_cur)); - ne[i] = ne_cur; + fin.read(reinterpret_cast(&ne[i]), sizeof(ne[i])); nelements *= ne[i]; } @@ -306,24 +306,12 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g } + // for debugging if (0) { - static const char * ftype_str[] = { "f32", "f16", "q4_0", "q4_1", }; - printf("%24s - [%5lld, %5lld], type = %6s, %6.2f MB, %9zu bytes\n", name.data(), ne[0], ne[1], ftype_str[ftype], ggml_nbytes(tensor)/1024.0/1024.0, ggml_nbytes(tensor)); + printf("%24s - [%5d, %5d], type = %6s, %6.2f MB, %9zu bytes\n", name.data(), ne[0], ne[1], ggml_type_name(ggml_type(ttype)), ggml_nbytes(tensor)/1024.0/1024.0, ggml_nbytes(tensor)); } - size_t bpe = 0; - - switch (ftype) { - case 0: bpe = ggml_type_size(GGML_TYPE_F32); break; - case 1: bpe = ggml_type_size(GGML_TYPE_F16); break; - case 2: bpe = ggml_type_size(GGML_TYPE_Q4_0); assert(ne[0] % 64 == 0); break; - case 3: bpe = ggml_type_size(GGML_TYPE_Q4_1); assert(ne[0] % 64 == 0); break; - default: - { - fprintf(stderr, "%s: unknown ftype %d in model file\n", __func__, ftype); - return ModelLoadResult::FAIL; - } - }; + const size_t bpe = ggml_type_size(ggml_type(ttype)); if ((nelements*bpe)/ggml_blck_size(tensor->type) != ggml_nbytes(tensor)) { fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n", @@ -333,7 +321,7 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g fin.read(reinterpret_cast(tensor->data), ggml_nbytes(tensor)); - //printf("%42s - [%5d, %5d], type = %6s, %6.2f MB\n", name.data(), ne[0], ne[1], ftype == 0 ? "float" : "f16", ggml_nbytes(tensor)/1024.0/1024.0); + //printf("%42s - [%5d, %5d], type = %6s, %6.2f MB\n", name.data(), ne[0], ne[1], ttype == 0 ? "float" : "f16", ggml_nbytes(tensor)/1024.0/1024.0); total_size += ggml_nbytes(tensor); if (++n_tensors % 8 == 0) { printf("."); @@ -381,8 +369,7 @@ bool gptj_eval( const int d_key = n_embd/n_head; - //todo: there is a bug that causes the buffer to oom and I cannot figure it out, hack to increase size for now - static size_t buf_size = 256u*1024*1024; + static size_t buf_size = 256u*1024*1024; static void * buf = malloc(buf_size); if (mem_per_token > 0 && mem_per_token*N*1.9 > buf_size) { diff --git a/otherarch/neox.cpp b/otherarch/neox.cpp index 6866736f8..203df0983 100644 --- a/otherarch/neox.cpp +++ b/otherarch/neox.cpp @@ -254,11 +254,11 @@ bool stablelm_model_load(const std::string & fname, stablelm_model & model, gpt_ while (true) { int32_t n_dims; int32_t length; - int32_t ftype; + int32_t ttype; fin.read(reinterpret_cast(&n_dims), sizeof(n_dims)); fin.read(reinterpret_cast(&length), sizeof(length)); - fin.read(reinterpret_cast(&ftype), sizeof(ftype)); + fin.read(reinterpret_cast(&ttype), sizeof(ttype)); if (fin.eof()) { break; @@ -291,26 +291,12 @@ bool stablelm_model_load(const std::string & fname, stablelm_model & model, gpt_ return false; } + // for debugging if (0) { - static const char * ftype_str[] = { "f32", "f16", "q4_0", "q4_1", "q4_2", }; - printf("%24s - [%5d, %5d], type = %6s, %6.2f MB, %9zu bytes\n", name.data(), ne[0], ne[1], ftype_str[ftype], ggml_nbytes(tensor)/1024.0/1024.0, ggml_nbytes(tensor)); + printf("%24s - [%5d, %5d], type = %6s, %6.2f MB, %9zu bytes\n", name.data(), ne[0], ne[1], ggml_type_name(ggml_type(ttype)), ggml_nbytes(tensor)/1024.0/1024.0, ggml_nbytes(tensor)); } - size_t bpe = 0; - - switch (ftype) { - case 0: bpe = ggml_type_size(GGML_TYPE_F32); break; - case 1: bpe = ggml_type_size(GGML_TYPE_F16); break; - case 2: bpe = ggml_type_size(GGML_TYPE_Q4_0); assert(ne[0] % 64 == 0); break; - case 3: bpe = ggml_type_size(GGML_TYPE_Q4_1); assert(ne[0] % 64 == 0); break; - case 5: bpe = ggml_type_size(GGML_TYPE_Q4_2); assert(ne[0] % 64 == 0); break; - case 6: bpe = ggml_type_size(GGML_TYPE_Q4_3); assert(ne[0] % 64 == 0); break; - default: - { - fprintf(stderr, "%s: unknown ftype %d in model file\n", __func__, ftype); - return false; - } - }; + const size_t bpe = ggml_type_size(ggml_type(ttype)); if ((nelements*bpe)/ggml_blck_size(tensor->type) != ggml_nbytes(tensor)) { fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n", @@ -320,7 +306,6 @@ bool stablelm_model_load(const std::string & fname, stablelm_model & model, gpt_ fin.read(reinterpret_cast(tensor->data), ggml_nbytes(tensor)); - //printf("%42s - [%5d, %5d], type = %6s, %6.2f MB\n", name.data(), ne[0], ne[1], ftype == 0 ? "float" : "f16", ggml_nbytes(tensor)/1024.0/1024.0); total_size += ggml_nbytes(tensor); if (++n_tensors % 8 == 0) { printf("."); @@ -364,7 +349,7 @@ bool stablelm_eval( const int n_vocab = hparams.n_vocab; const int n_rot = hparams.n_rot; - static size_t buf_size = 256u*1024*1024; + static size_t buf_size = 256u*1024*1024; static void * buf = malloc(buf_size); if (mem_per_token > 0 && mem_per_token*N*1.9 > buf_size) { diff --git a/otherarch/tools/common-ggml.cpp b/otherarch/tools/common-ggml.cpp new file mode 100644 index 000000000..71406ced7 --- /dev/null +++ b/otherarch/tools/common-ggml.cpp @@ -0,0 +1,192 @@ +#include "otherarch/tools/common-ggml.h" + +#include "ggml.h" + +#include + +bool ggml_common_quantize_0( + std::ifstream & finp, + std::ofstream & fout, + const ggml_mtype mtype, + const std::vector & to_quant, + const std::vector & to_skip) { + + ggml_type qtype = GGML_TYPE_F32; + + switch (mtype) { + case 2: qtype = GGML_TYPE_Q4_0; break; + case 3: qtype = GGML_TYPE_Q4_1; break; + case 5: qtype = GGML_TYPE_Q4_2; break; + case 6: qtype = GGML_TYPE_Q4_3; break; + default: + { + fprintf(stderr, "%s: invalid model type %d\n", __func__, mtype); + return false; + } + }; + + if (!ggml_is_quantized(qtype)) { + fprintf(stderr, "%s: invalid quantization type %d (%s)\n", __func__, qtype, ggml_type_name(qtype)); + return false; + } + + size_t total_size_org = 0; + size_t total_size_new = 0; + + std::vector work; + + std::vector data_u8; + std::vector data_f16; + std::vector data_f32; + + std::vector hist_all(1 << 4, 0); + + while (true) { + int32_t n_dims; + int32_t length; + int32_t ttype; + + finp.read(reinterpret_cast(&n_dims), sizeof(n_dims)); + finp.read(reinterpret_cast(&length), sizeof(length)); + finp.read(reinterpret_cast(&ttype), sizeof(ttype)); + + if (finp.eof()) { + break; + } + + int32_t nelements = 1; + int32_t ne[2] = { 1, 1 }; + for (int i = 0; i < n_dims; ++i) { + finp.read (reinterpret_cast(&ne[i]), sizeof(ne[i])); + nelements *= ne[i]; + } + + std::string name(length, 0); + finp.read (&name[0], length); + + printf("%64s - [%5d, %5d], type = %6s ", name.data(), ne[0], ne[1], ggml_type_name((ggml_type) ttype)); + + bool quantize = false; + + // check if we should quantize this tensor + for (const auto & s : to_quant) { + if (std::regex_match(name, std::regex(s))) { + quantize = true; + break; + } + } + + // check if we should skip this tensor + for (const auto & s : to_skip) { + if (std::regex_match(name, std::regex(s))) { + quantize = false; + break; + } + } + + // quantize only 2D tensors + quantize &= (n_dims == 2); + + if (quantize) { + if (ttype != GGML_TYPE_F32 && ttype != GGML_TYPE_F16) { + fprintf(stderr, "%s: unsupported ttype %d (%s) for integer quantization\n", __func__, ttype, ggml_type_name((ggml_type) ttype)); + return false; + } + + if (ttype == GGML_TYPE_F16) { + data_f16.resize(nelements); + finp.read(reinterpret_cast(data_f16.data()), nelements * sizeof(ggml_fp16_t)); + data_f32.resize(nelements); + for (int i = 0; i < nelements; ++i) { + data_f32[i] = ggml_fp16_to_fp32(data_f16[i]); + } + } else { + data_f32.resize(nelements); + finp.read(reinterpret_cast(data_f32.data()), nelements * sizeof(float)); + } + + ttype = qtype; + } else { + const int bpe = (ttype == 0) ? sizeof(float) : sizeof(uint16_t); + + data_u8.resize(nelements*bpe); + finp.read(reinterpret_cast(data_u8.data()), nelements * bpe); + } + + fout.write(reinterpret_cast(&n_dims), sizeof(n_dims)); + fout.write(reinterpret_cast(&length), sizeof(length)); + fout.write(reinterpret_cast(&ttype), sizeof(ttype)); + for (int i = 0; i < n_dims; ++i) { + fout.write(reinterpret_cast(&ne[i]), sizeof(ne[i])); + } + fout.write(&name[0], length); + + if (quantize) { + work.resize(nelements); // for quantization + + size_t cur_size = 0; + std::vector hist_cur(1 << 4, 0); + + switch (ttype) { + case GGML_TYPE_Q4_0: + { + cur_size = ggml_quantize_q4_0(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data()); + } break; + case GGML_TYPE_Q4_1: + { + cur_size = ggml_quantize_q4_1(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data()); + } break; + case GGML_TYPE_Q4_2: + { + cur_size = ggml_quantize_q4_2(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data()); + } break; + case GGML_TYPE_Q4_3: + { + cur_size = ggml_quantize_q4_3(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data()); + } break; + default: + { + fprintf(stderr, "%s: unsupported quantization type %d (%s)\n", __func__, ttype, ggml_type_name((ggml_type) ttype)); + return false; + } + } + + fout.write(reinterpret_cast(work.data()), cur_size); + total_size_new += cur_size; + + printf("size = %8.2f MB -> %8.2f MB | hist: ", nelements * sizeof(float)/1024.0/1024.0, cur_size/1024.0/1024.0); + for (int i = 0; i < hist_cur.size(); ++i) { + hist_all[i] += hist_cur[i]; + } + + for (int i = 0; i < hist_cur.size(); ++i) { + printf("%5.3f ", hist_cur[i] / (float)nelements); + } + printf("\n"); + } else { + printf("size = %8.3f MB\n", data_u8.size()/1024.0/1024.0); + fout.write(reinterpret_cast(data_u8.data()), data_u8.size()); + total_size_new += data_u8.size(); + } + + total_size_org += nelements * sizeof(float); + } + + printf("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0); + printf("%s: quant size = %8.2f MB | mtype = %d (%s)\n", __func__, total_size_new/1024.0/1024.0, mtype, ggml_type_name(qtype)); + + { + int64_t sum_all = 0; + for (int i = 0; i < hist_all.size(); ++i) { + sum_all += hist_all[i]; + } + + printf("%s: hist: ", __func__); + for (int i = 0; i < hist_all.size(); ++i) { + printf("%5.3f ", hist_all[i] / (float)sum_all); + } + printf("\n"); + } + + return true; +} \ No newline at end of file diff --git a/otherarch/tools/common-ggml.h b/otherarch/tools/common-ggml.h new file mode 100644 index 000000000..6299cfdb5 --- /dev/null +++ b/otherarch/tools/common-ggml.h @@ -0,0 +1,23 @@ +#pragma once + +#include +#include +#include + +// model file types +enum ggml_mtype { + GGML_MTYPE_ALL_F32 = 0, + GGML_MTYPE_MOSTLY_F16 = 1, // except 1d tensors + GGML_MTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors + GGML_MTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors + GGML_MTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16 + GGML_MTYPE_MOSTLY_Q4_2 = 5, // except 1d tensors + GGML_MTYPE_MOSTLY_Q4_3 = 6, // except 1d tensors +}; + +bool ggml_common_quantize_0( + std::ifstream & finp, + std::ofstream & fout, + const ggml_mtype mtype, + const std::vector & to_quant, + const std::vector & to_skip); \ No newline at end of file diff --git a/otherarch/tools/gpt2_quantize.cpp b/otherarch/tools/gpt2_quantize.cpp index 9eb7b79b1..196db0cfd 100644 --- a/otherarch/tools/gpt2_quantize.cpp +++ b/otherarch/tools/gpt2_quantize.cpp @@ -1,6 +1,7 @@ #include "ggml.h" #include "otherarch/utils.h" +#include "common-ggml.h" #include #include @@ -23,20 +24,7 @@ struct gpt2_hparams { }; // quantize a model -bool gpt2_model_quantize(const std::string & fname_inp, const std::string & fname_out, int itype) { - ggml_type type = GGML_TYPE_Q4_1; - - switch (itype) { - case 2: type = GGML_TYPE_Q4_0; break; - case 3: type = GGML_TYPE_Q4_1; break; - default: fprintf(stderr, "%s: invalid quantization type %d\n", __func__, itype); return 1; - }; - - if (type != GGML_TYPE_Q4_0 && type != GGML_TYPE_Q4_1) { - fprintf(stderr, "%s: invalid quantization type %d\n", __func__, type); - return false; - } - +bool gpt2_model_quantize(const std::string & fname_inp, const std::string & fname_out, ggml_mtype mtype) { gpt_vocab vocab; printf("%s: loading model from '%s'\n", __func__, fname_inp.c_str()); @@ -88,7 +76,7 @@ bool gpt2_model_quantize(const std::string & fname_inp, const std::string & fnam fout.write((char *) &hparams.n_embd, sizeof(hparams.n_embd)); fout.write((char *) &hparams.n_head, sizeof(hparams.n_head)); fout.write((char *) &hparams.n_layer, sizeof(hparams.n_layer)); - fout.write((char *) &itype, sizeof(hparams.f16)); + fout.write((char *) &mtype, sizeof(hparams.f16)); } // load vocab @@ -118,158 +106,19 @@ bool gpt2_model_quantize(const std::string & fname_inp, const std::string & fnam } } - // load weights - { - size_t total_size_org = 0; - size_t total_size_new = 0; + // regexes of tensor names to be quantized + const std::vector to_quant = { + "model/wte", + "model/lm_head", + "model/h.*/attn/c_attn/w", + "model/h.*/attn/c_proj/w", + "model/h.*/mlp/c_fc/w", + "model/h.*/mlp/c_proj/w", + }; - std::vector work; - - std::vector data_u8; - std::vector data_f16; - std::vector data_f32; - - std::vector hist_all(1 << 4, 0); - - while (true) { - int32_t n_dims; - int32_t length; - int32_t ftype; - - finp.read(reinterpret_cast(&n_dims), sizeof(n_dims)); - finp.read(reinterpret_cast(&length), sizeof(length)); - finp.read(reinterpret_cast(&ftype), sizeof(ftype)); - - if (finp.eof()) { - break; - } - - int32_t nelements = 1; - int32_t ne[2] = { 1, 1 }; - for (int i = 0; i < n_dims; ++i) { - finp.read (reinterpret_cast(&ne[i]), sizeof(ne[i])); - nelements *= ne[i]; - } - - std::string name(length, 0); - finp.read (&name[0], length); - - { - static const char * ftype_str[] = { "f32", "f16", "q4_0", "q4_1", }; - printf("%24s - [%5d, %5d], type = %6s ", name.data(), ne[0], ne[1], ftype_str[ftype]); - } - - // regexes of tensor names to be quantized - const std::vector k_names = { - "model/wte", - "model/lm_head", - "model/h.*/attn/c_attn/w", - "model/h.*/attn/c_proj/w", - "model/h.*/mlp/c_fc/w", - "model/h.*/mlp/c_proj/w", - }; - - bool quantize = false; - for (const auto & s : k_names) { - if (std::regex_match(name, std::regex(s))) { - quantize = true; - break; - } - } - - if (quantize) { - if (ftype != 0 && ftype != 1) { - fprintf(stderr, "%s: unsupported ftype %d for integer quantization\n", __func__, ftype); - return false; - } - - if (ftype == 1) { - data_f16.resize(nelements); - finp.read(reinterpret_cast(data_f16.data()), nelements * sizeof(ggml_fp16_t)); - data_f32.resize(nelements); - for (int i = 0; i < nelements; ++i) { - data_f32[i] = ggml_fp16_to_fp32(data_f16[i]); - } - } else { - data_f32.resize(nelements); - finp.read(reinterpret_cast(data_f32.data()), nelements * sizeof(float)); - } - - ftype = itype; - } else { - const int bpe = (ftype == 0) ? sizeof(float) : sizeof(uint16_t); - - data_u8.resize(nelements*bpe); - finp.read(reinterpret_cast(data_u8.data()), nelements * bpe); - } - - fout.write(reinterpret_cast(&n_dims), sizeof(n_dims)); - fout.write(reinterpret_cast(&length), sizeof(length)); - fout.write(reinterpret_cast(&ftype), sizeof(ftype)); - for (int i = 0; i < n_dims; ++i) { - fout.write(reinterpret_cast(&ne[i]), sizeof(ne[i])); - } - fout.write(&name[0], length); - - if (quantize) { - printf("quantizing .. "); - work.resize(nelements); // for quantization - - size_t cur_size = 0; - std::vector hist_cur(1 << 4, 0); - - switch (type) { - case GGML_TYPE_Q4_0: - { - cur_size = ggml_quantize_q4_0(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data()); - } break; - case GGML_TYPE_Q4_1: - { - cur_size = ggml_quantize_q4_1(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data()); - } break; - default: - { - fprintf(stderr, "%s: unsupported quantization type %d\n", __func__, type); - return false; - } - } - - fout.write(reinterpret_cast(work.data()), cur_size); - total_size_new += cur_size; - - printf("size = %8.2f MB -> %8.2f MB | hist: ", nelements * sizeof(float)/1024.0/1024.0, cur_size/1024.0/1024.0); - for (int i = 0; i < hist_cur.size(); ++i) { - hist_all[i] += hist_cur[i]; - } - - for (int i = 0; i < hist_cur.size(); ++i) { - printf("%5.3f ", hist_cur[i] / (float)nelements); - } - printf("\n"); - } else { - printf("size = %8.3f MB\n", data_u8.size()/1024.0/1024.0); - fout.write(reinterpret_cast(data_u8.data()), data_u8.size()); - total_size_new += data_u8.size(); - } - - total_size_org += nelements * sizeof(float); - } - - printf("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0); - printf("%s: quant size = %8.2f MB\n", __func__, total_size_new/1024.0/1024.0); - - { - int64_t sum_all = 0; - for (int i = 0; i < hist_all.size(); ++i) { - sum_all += hist_all[i]; - } - - printf("%s: hist: ", __func__); - for (int i = 0; i < hist_all.size(); ++i) { - printf("%5.3f ", hist_all[i] / (float)sum_all); - } - printf("\n"); - } + if (!ggml_common_quantize_0(finp, fout, mtype, to_quant, {})) { + fprintf(stderr, "%s: failed to quantize model '%s'\n", __func__, fname_inp.c_str()); + return false; } finp.close(); @@ -287,6 +136,8 @@ int main(int argc, char ** argv) { fprintf(stderr, "usage: %s model-f32.bin model-quant.bin type\n", argv[0]); fprintf(stderr, " type = 2 - q4_0\n"); fprintf(stderr, " type = 3 - q4_1\n"); + fprintf(stderr, " type = 5 - q4_2\n"); + fprintf(stderr, " type = 6 - q4_3\n"); return 1; } @@ -300,7 +151,7 @@ int main(int argc, char ** argv) { const std::string fname_inp = argv[1]; const std::string fname_out = argv[2]; - const int itype = atoi(argv[3]); + const int mtype = atoi(argv[3]); const int64_t t_main_start_us = ggml_time_us(); @@ -310,7 +161,7 @@ int main(int argc, char ** argv) { { const int64_t t_start_us = ggml_time_us(); - if (!gpt2_model_quantize(fname_inp, fname_out, itype)) { + if (!gpt2_model_quantize(fname_inp, fname_out, ggml_mtype(mtype))) { fprintf(stderr, "%s: failed to quantize model from '%s'\n", __func__, fname_inp.c_str()); return 1; } diff --git a/otherarch/tools/gptj_quantize.cpp b/otherarch/tools/gptj_quantize.cpp index 46eb94792..f3ca21d71 100644 --- a/otherarch/tools/gptj_quantize.cpp +++ b/otherarch/tools/gptj_quantize.cpp @@ -1,6 +1,7 @@ #include "ggml.h" #include "otherarch/utils.h" +#include "common-ggml.h" #include #include @@ -24,20 +25,7 @@ struct gptj_hparams { }; // quantize a model -bool gptj_model_quantize(const std::string & fname_inp, const std::string & fname_out, int itype) { - ggml_type type = GGML_TYPE_Q4_1; - - switch (itype) { - case 2: type = GGML_TYPE_Q4_0; break; - case 3: type = GGML_TYPE_Q4_1; break; - default: fprintf(stderr, "%s: invalid quantization type %d\n", __func__, itype); return 1; - }; - - if (type != GGML_TYPE_Q4_0 && type != GGML_TYPE_Q4_1) { - fprintf(stderr, "%s: invalid quantization type %d\n", __func__, type); - return false; - } - +bool gptj_model_quantize(const std::string & fname_inp, const std::string & fname_out, ggml_mtype mtype) { gpt_vocab vocab; printf("%s: loading model from '%s'\n", __func__, fname_inp.c_str()); @@ -91,7 +79,7 @@ bool gptj_model_quantize(const std::string & fname_inp, const std::string & fnam fout.write((char *) &hparams.n_head, sizeof(hparams.n_head)); fout.write((char *) &hparams.n_layer, sizeof(hparams.n_layer)); fout.write((char *) &hparams.n_rot, sizeof(hparams.n_rot)); - fout.write((char *) &itype, sizeof(hparams.f16)); + fout.write((char *) &mtype, sizeof(hparams.f16)); } // load vocab @@ -121,156 +109,14 @@ bool gptj_model_quantize(const std::string & fname_inp, const std::string & fnam } } - // load weights - { - size_t total_size_org = 0; - size_t total_size_new = 0; + // regexes of tensor names to be quantized + const std::vector to_quant = { + ".*weight", + }; - std::vector work; - - std::vector data_u8; - std::vector data_f16; - std::vector data_f32; - - std::vector hist_all(1 << 4, 0); - - while (true) { - int32_t n_dims; - int32_t length; - int32_t ftype; - - finp.read(reinterpret_cast(&n_dims), sizeof(n_dims)); - finp.read(reinterpret_cast(&length), sizeof(length)); - finp.read(reinterpret_cast(&ftype), sizeof(ftype)); - - if (finp.eof()) { - break; - } - - int32_t nelements = 1; - int32_t ne[2] = { 1, 1 }; - for (int i = 0; i < n_dims; ++i) { - finp.read (reinterpret_cast(&ne[i]), sizeof(ne[i])); - nelements *= ne[i]; - } - - std::string name(length, 0); - finp.read (&name[0], length); - - { - static const char * ftype_str[] = { "f32", "f16", "q4_0", "q4_1", }; - printf("%48s - [%5d, %5d], type = %6s ", name.data(), ne[0], ne[1], ftype_str[ftype]); - } - - // regexes of tensor names to be quantized - const std::vector k_names = { - ".*weight", - }; - - bool quantize = false; - for (const auto & s : k_names) { - if (std::regex_match(name, std::regex(s))) { - quantize = true; - break; - } - } - - // quantize only 2D tensors - quantize &= (n_dims == 2); - - if (quantize) { - if (ftype != 0 && ftype != 1) { - fprintf(stderr, "%s: unsupported ftype %d for integer quantization\n", __func__, ftype); - return false; - } - - if (ftype == 1) { - data_f16.resize(nelements); - finp.read(reinterpret_cast(data_f16.data()), nelements * sizeof(ggml_fp16_t)); - data_f32.resize(nelements); - for (int i = 0; i < nelements; ++i) { - data_f32[i] = ggml_fp16_to_fp32(data_f16[i]); - } - } else { - data_f32.resize(nelements); - finp.read(reinterpret_cast(data_f32.data()), nelements * sizeof(float)); - } - - ftype = itype; - } else { - const int bpe = (ftype == 0) ? sizeof(float) : sizeof(uint16_t); - - data_u8.resize(nelements*bpe); - finp.read(reinterpret_cast(data_u8.data()), nelements * bpe); - } - - fout.write(reinterpret_cast(&n_dims), sizeof(n_dims)); - fout.write(reinterpret_cast(&length), sizeof(length)); - fout.write(reinterpret_cast(&ftype), sizeof(ftype)); - for (int i = 0; i < n_dims; ++i) { - fout.write(reinterpret_cast(&ne[i]), sizeof(ne[i])); - } - fout.write(&name[0], length); - - if (quantize) { - printf("quantizing .. "); - work.resize(nelements); // for quantization - - size_t cur_size = 0; - std::vector hist_cur(1 << 4, 0); - - switch (type) { - case GGML_TYPE_Q4_0: - { - cur_size = ggml_quantize_q4_0(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data()); - } break; - case GGML_TYPE_Q4_1: - { - cur_size = ggml_quantize_q4_1(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data()); - } break; - default: - { - fprintf(stderr, "%s: unsupported quantization type %d\n", __func__, type); - return false; - } - } - - fout.write(reinterpret_cast(work.data()), cur_size); - total_size_new += cur_size; - - printf("size = %8.2f MB -> %8.2f MB | hist: ", nelements * sizeof(float)/1024.0/1024.0, cur_size/1024.0/1024.0); - for (int i = 0; i < hist_cur.size(); ++i) { - hist_all[i] += hist_cur[i]; - } - - for (int i = 0; i < hist_cur.size(); ++i) { - printf("%5.3f ", hist_cur[i] / (float)nelements); - } - printf("\n"); - } else { - printf("size = %8.3f MB\n", data_u8.size()/1024.0/1024.0); - fout.write(reinterpret_cast(data_u8.data()), data_u8.size()); - total_size_new += data_u8.size(); - } - - total_size_org += nelements * sizeof(float); - } - - printf("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0); - printf("%s: quant size = %8.2f MB\n", __func__, total_size_new/1024.0/1024.0); - - { - int64_t sum_all = 0; - for (int i = 0; i < hist_all.size(); ++i) { - sum_all += hist_all[i]; - } - - printf("%s: hist: ", __func__); - for (int i = 0; i < hist_all.size(); ++i) { - printf("%5.3f ", hist_all[i] / (float)sum_all); - } - printf("\n"); - } + if (!ggml_common_quantize_0(finp, fout, mtype, to_quant, {})) { + fprintf(stderr, "%s: failed to quantize model '%s'\n", __func__, fname_inp.c_str()); + return false; } finp.close(); @@ -288,6 +134,8 @@ int main(int argc, char ** argv) { fprintf(stderr, "usage: %s model-f32.bin model-quant.bin type\n", argv[0]); fprintf(stderr, " type = 2 - q4_0\n"); fprintf(stderr, " type = 3 - q4_1\n"); + fprintf(stderr, " type = 5 - q4_2\n"); + fprintf(stderr, " type = 6 - q4_3\n"); return 1; } @@ -301,7 +149,7 @@ int main(int argc, char ** argv) { const std::string fname_inp = argv[1]; const std::string fname_out = argv[2]; - const int itype = atoi(argv[3]); + const int mtype = atoi(argv[3]); const int64_t t_main_start_us = ggml_time_us(); @@ -311,7 +159,7 @@ int main(int argc, char ** argv) { { const int64_t t_start_us = ggml_time_us(); - if (!gptj_model_quantize(fname_inp, fname_out, itype)) { + if (!gptj_model_quantize(fname_inp, fname_out, ggml_mtype(mtype))) { fprintf(stderr, "%s: failed to quantize model from '%s'\n", __func__, fname_inp.c_str()); return 1; } diff --git a/otherarch/tools/neox_quantize.cpp b/otherarch/tools/neox_quantize.cpp index 60a27df53..d32282eac 100644 --- a/otherarch/tools/neox_quantize.cpp +++ b/otherarch/tools/neox_quantize.cpp @@ -1,6 +1,7 @@ #include "ggml.h" #include "otherarch/utils.h" +#include "common-ggml.h" #include #include @@ -24,22 +25,7 @@ struct stablelm_hparams { }; // quantize a model -bool stablelm_model_quantize(const std::string & fname_inp, const std::string & fname_out, int itype) { - ggml_type type = GGML_TYPE_Q4_1; - - switch (itype) { - case 2: type = GGML_TYPE_Q4_0; break; - case 3: type = GGML_TYPE_Q4_1; break; - case 5: type = GGML_TYPE_Q4_2; break; - case 6: type = GGML_TYPE_Q4_3; break; - default: fprintf(stderr, "%s: invalid quantization type %d\n", __func__, itype); return 1; - }; - - if (!ggml_is_quantized(type)) { - fprintf(stderr, "%s: invalid quantization type %d\n", __func__, type); - return false; - } - +bool stablelm_model_quantize(const std::string & fname_inp, const std::string & fname_out, ggml_mtype mtype) { gpt_vocab vocab; printf("%s: loading model from '%s'\n", __func__, fname_inp.c_str()); @@ -93,7 +79,7 @@ bool stablelm_model_quantize(const std::string & fname_inp, const std::string & fout.write((char *) &hparams.n_head, sizeof(hparams.n_head)); fout.write((char *) &hparams.n_layer, sizeof(hparams.n_layer)); fout.write((char *) &hparams.n_rot, sizeof(hparams.n_rot)); - fout.write((char *) &itype, sizeof(hparams.ftype)); + fout.write((char *) &mtype, sizeof(hparams.ftype)); } // load vocab @@ -115,164 +101,14 @@ bool stablelm_model_quantize(const std::string & fname_inp, const std::string & } } - // load weights - { - size_t total_size_org = 0; - size_t total_size_new = 0; + // regexes of tensor names to be quantized + const std::vector to_quant = { + ".*weight", + }; - std::vector work; - - std::vector data_u8; - std::vector data_f16; - std::vector data_f32; - - std::vector hist_all(1 << 4, 0); - - while (true) { - int32_t n_dims; - int32_t length; - int32_t ftype; - - finp.read(reinterpret_cast(&n_dims), sizeof(n_dims)); - finp.read(reinterpret_cast(&length), sizeof(length)); - finp.read(reinterpret_cast(&ftype), sizeof(ftype)); - - if (finp.eof()) { - break; - } - - int32_t nelements = 1; - int32_t ne[2] = { 1, 1 }; - for (int i = 0; i < n_dims; ++i) { - finp.read (reinterpret_cast(&ne[i]), sizeof(ne[i])); - nelements *= ne[i]; - } - - std::string name(length, 0); - finp.read (&name[0], length); - - { - static const char * ftype_str[] = { "f32", "f16", "q4_0", "q4_1", "q4_2" }; - printf("%64s - [%5d, %5d], type = %6s ", name.data(), ne[0], ne[1], ftype_str[ftype]); - } - - // regexes of tensor names to be quantized - const std::vector k_names = { - ".*weight", - }; - - bool quantize = false; - for (const auto & s : k_names) { - if (std::regex_match(name, std::regex(s))) { - quantize = true; - break; - } - } - - // quantize only 2D tensors - quantize &= (n_dims == 2); - - if (quantize) { - if (ftype != 0 && ftype != 1) { - fprintf(stderr, "%s: unsupported ftype %d for integer quantization\n", __func__, ftype); - return false; - } - - if (ftype == 1) { - data_f16.resize(nelements); - finp.read(reinterpret_cast(data_f16.data()), nelements * sizeof(ggml_fp16_t)); - data_f32.resize(nelements); - for (int i = 0; i < nelements; ++i) { - data_f32[i] = ggml_fp16_to_fp32(data_f16[i]); - } - } else { - data_f32.resize(nelements); - finp.read(reinterpret_cast(data_f32.data()), nelements * sizeof(float)); - } - - ftype = itype; - } else { - const int bpe = (ftype == 0) ? sizeof(float) : sizeof(uint16_t); - - data_u8.resize(nelements*bpe); - finp.read(reinterpret_cast(data_u8.data()), nelements * bpe); - } - - fout.write(reinterpret_cast(&n_dims), sizeof(n_dims)); - fout.write(reinterpret_cast(&length), sizeof(length)); - fout.write(reinterpret_cast(&ftype), sizeof(ftype)); - for (int i = 0; i < n_dims; ++i) { - fout.write(reinterpret_cast(&ne[i]), sizeof(ne[i])); - } - fout.write(&name[0], length); - - if (quantize) { - printf("quantizing .. "); - work.resize(nelements); // for quantization - - size_t cur_size = 0; - std::vector hist_cur(1 << 4, 0); - - switch (type) { - case GGML_TYPE_Q4_0: - { - cur_size = ggml_quantize_q4_0(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data()); - } break; - case GGML_TYPE_Q4_1: - { - cur_size = ggml_quantize_q4_1(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data()); - } break; - case GGML_TYPE_Q4_2: - { - cur_size = ggml_quantize_q4_2(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data()); - } break; - case GGML_TYPE_Q4_3: - { - cur_size = ggml_quantize_q4_3(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data()); - } break; - default: - { - fprintf(stderr, "%s: unsupported quantization type %d\n", __func__, type); - return false; - } - } - - fout.write(reinterpret_cast(work.data()), cur_size); - total_size_new += cur_size; - - printf("size = %8.2f MB -> %8.2f MB | hist: ", nelements * sizeof(float)/1024.0/1024.0, cur_size/1024.0/1024.0); - for (int i = 0; i < hist_cur.size(); ++i) { - hist_all[i] += hist_cur[i]; - } - - for (int i = 0; i < hist_cur.size(); ++i) { - printf("%5.3f ", hist_cur[i] / (float)nelements); - } - printf("\n"); - } else { - printf("size = %8.3f MB\n", data_u8.size()/1024.0/1024.0); - fout.write(reinterpret_cast(data_u8.data()), data_u8.size()); - total_size_new += data_u8.size(); - } - - total_size_org += nelements * sizeof(float); - } - - printf("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0); - printf("%s: quant size = %8.2f MB\n", __func__, total_size_new/1024.0/1024.0); - - { - int64_t sum_all = 0; - for (int i = 0; i < hist_all.size(); ++i) { - sum_all += hist_all[i]; - } - - printf("%s: hist: ", __func__); - for (int i = 0; i < hist_all.size(); ++i) { - printf("%5.3f ", hist_all[i] / (float)sum_all); - } - printf("\n"); - } + if (!ggml_common_quantize_0(finp, fout, mtype, to_quant, {})) { + fprintf(stderr, "%s: failed to quantize model '%s'\n", __func__, fname_inp.c_str()); + return false; } finp.close(); @@ -285,9 +121,6 @@ bool stablelm_model_quantize(const std::string & fname_inp, const std::string & // ./stablelm2-quantize models/stablelm2-117M/ggml-model.bin models/stablelm2-117M/ggml-model-quant.bin type // int main(int argc, char ** argv) { - - ggml_time_init(); - if (argc != 4) { fprintf(stderr, "usage: %s model-f32.bin model-quant.bin type\n", argv[0]); fprintf(stderr, " type = 2 - q4_0\n"); @@ -307,7 +140,7 @@ int main(int argc, char ** argv) { const std::string fname_inp = argv[1]; const std::string fname_out = argv[2]; - const int itype = atoi(argv[3]); + const int mtype = atoi(argv[3]); const int64_t t_main_start_us = ggml_time_us(); @@ -317,7 +150,7 @@ int main(int argc, char ** argv) { { const int64_t t_start_us = ggml_time_us(); - if (!stablelm_model_quantize(fname_inp, fname_out, itype)) { + if (!stablelm_model_quantize(fname_inp, fname_out, ggml_mtype(mtype))) { fprintf(stderr, "%s: failed to quantize model from '%s'\n", __func__, fname_inp.c_str()); return 1; }