Allow "quantizing" to f16 and f32

Fix an issue where quantizing didn't respect LLAMA_NO_K_QUANTS

Add brief help to the list of quantization types in the quantize tool

Ignore case for quantization type arguments in the quantize tool
This commit is contained in:
KerfuffleV2 2023-06-10 05:03:16 -06:00
parent 74a6d922f1
commit 1e361c531c
4 changed files with 154 additions and 45 deletions

View file

@ -127,6 +127,7 @@ endif
ifndef LLAMA_NO_K_QUANTS
CFLAGS += -DGGML_USE_K_QUANTS
CXXFLAGS += -DGGML_USE_K_QUANTS
OBJS += k_quants.o
endif

View file

@ -4,43 +4,137 @@
#include <cstdio>
#include <cstring>
#include <map>
#include <vector>
#include <iostream>
#include <iomanip>
#include <string>
static const std::map<std::string, llama_ftype> LLAMA_FTYPE_MAP = {
{"q4_0", LLAMA_FTYPE_MOSTLY_Q4_0},
{"q4_1", LLAMA_FTYPE_MOSTLY_Q4_1},
{"q5_0", LLAMA_FTYPE_MOSTLY_Q5_0},
{"q5_1", LLAMA_FTYPE_MOSTLY_Q5_1},
{"q8_0", LLAMA_FTYPE_MOSTLY_Q8_0},
{"q2_K", LLAMA_FTYPE_MOSTLY_Q2_K},
{"q3_K", LLAMA_FTYPE_MOSTLY_Q3_K_M},
{"q3_K_S", LLAMA_FTYPE_MOSTLY_Q3_K_S},
{"q3_K_M", LLAMA_FTYPE_MOSTLY_Q3_K_M},
{"q3_K_L", LLAMA_FTYPE_MOSTLY_Q3_K_L},
{"q4_K", LLAMA_FTYPE_MOSTLY_Q4_K_M},
{"q4_K_S", LLAMA_FTYPE_MOSTLY_Q4_K_S},
{"q4_K_M", LLAMA_FTYPE_MOSTLY_Q4_K_M},
{"q5_K", LLAMA_FTYPE_MOSTLY_Q5_K_M},
{"q5_K_S", LLAMA_FTYPE_MOSTLY_Q5_K_S},
{"q5_K_M", LLAMA_FTYPE_MOSTLY_Q5_K_M},
{"q6_K", LLAMA_FTYPE_MOSTLY_Q6_K},
struct quant_option {
std::string name;
llama_ftype ftype;
std::string desc;
};
bool try_parse_ftype(const std::string & ftype_str, llama_ftype & ftype, std::string & ftype_str_out) {
auto it = LLAMA_FTYPE_MAP.find(ftype_str);
if (it != LLAMA_FTYPE_MAP.end()) {
ftype = it->second;
ftype_str_out = it->first;
return true;
static const std::vector<struct quant_option> QUANT_OPTIONS = {
{
"q4_0",
LLAMA_FTYPE_MOSTLY_Q4_0,
"approx +0.2499 perplexity, 3.50G output @ 7B",
},
{
"q4_1",
LLAMA_FTYPE_MOSTLY_Q4_1,
"approx +0.1846 perplexity, 3.90G output @ 7B",
},
{
"q5_0",
LLAMA_FTYPE_MOSTLY_Q5_0,
"approx +0.0796 perplexity, 4.30G output @ 7B",
},
{
"q5_1",
LLAMA_FTYPE_MOSTLY_Q5_1,
"approx +0.0415 perplexity, 4.70G output @ 7B",
},
#ifdef GGML_USE_K_QUANTS
{
"q2_k",
LLAMA_FTYPE_MOSTLY_Q2_K,
"approx +0.8698 perplexity, 2.67G output @ 7B",
},
{
"q3_k",
LLAMA_FTYPE_MOSTLY_Q3_K_M,
"alias for q3_k_m"
},
{
"q3_k_s",
LLAMA_FTYPE_MOSTLY_Q3_K_S,
"approx +0.5505 perplexity, 2.75G output @ 7B",
},
{
"q3_k_m",
LLAMA_FTYPE_MOSTLY_Q3_K_M,
"approx +0.2437 perplexity, 3.06G output @ 7B",
},
{
"q3_k_l",
LLAMA_FTYPE_MOSTLY_Q3_K_L,
"approx +0.1803 perplexity, 3.35G output @ 7B",
},
{
"q4_k",
LLAMA_FTYPE_MOSTLY_Q4_K_M,
"alias for q4_k_m",
},
{
"q4_k_s",
LLAMA_FTYPE_MOSTLY_Q4_K_S,
"approx +0.1149 perplexity, 3.56G output @ 7B",
},
{
"q4_k_m",
LLAMA_FTYPE_MOSTLY_Q4_K_M,
"approx +0.0535 perplexity, 3.80G output @ 7B",
},
{
"q5_k",
LLAMA_FTYPE_MOSTLY_Q5_K_M,
"alias for q5_k_m",
},
{
"q5_k_s",
LLAMA_FTYPE_MOSTLY_Q5_K_S,
"approx +0.0353 perplexity, 4.33G output @ 7B",
},
{
"q5_k_m",
LLAMA_FTYPE_MOSTLY_Q5_K_M,
"approx +0.0142 perplexity, 4.45G output @ 7B",
},
{
"q6_k",
LLAMA_FTYPE_MOSTLY_Q6_K,
"approx +0.0044 perplexity, 5.15G output @ 7B",
},
#endif
{
"q8_0",
LLAMA_FTYPE_MOSTLY_Q8_0,
"approx +0.0004 perplexity, 6.70G output @ 7B",
},
{
"f16",
LLAMA_FTYPE_MOSTLY_F16,
"no significant perplexity increase, 13.00G output @ 7B",
},
{
"f32",
LLAMA_FTYPE_ALL_F32,
"full quality, 26.00G output @ 7B",
},
};
bool try_parse_ftype(const std::string & ftype_str_in, llama_ftype & ftype, std::string & ftype_str_out) {
std::string ftype_str;
for (auto ch : ftype_str_in) {
ftype_str.push_back(std::tolower(ch));
}
for (auto & it : QUANT_OPTIONS) {
if (it.name == ftype_str) {
ftype = it.ftype;
ftype_str_out = it.name;
return true;
}
}
// try to parse as an integer
try {
int ftype_int = std::stoi(ftype_str);
for (auto it = LLAMA_FTYPE_MAP.begin(); it != LLAMA_FTYPE_MAP.end(); it++) {
if (it->second == ftype_int) {
ftype = it->second;
ftype_str_out = it->first;
for (auto & it : QUANT_OPTIONS) {
if (it.ftype == ftype_int) {
ftype = it.ftype;
ftype_str_out = it.name;
return true;
}
}
@ -59,8 +153,8 @@ void usage(const char * executable) {
fprintf(stderr, " --allow-requantize: Allows requantizing tensors that have already been quantized. Warning: This can severely reduce quality compared to quantizing from 16bit or 32bit\n");
fprintf(stderr, " --leave-output-tensor: Will leave output.weight un(re)quantized. Increases model size but may also increase quality, especially when requantizing\n");
fprintf(stderr, "Allowed quantization types:\n");
for (auto it = LLAMA_FTYPE_MAP.begin(); it != LLAMA_FTYPE_MAP.end(); it++) {
fprintf(stderr, " type = \"%s\" or %d\n", it->first.c_str(), it->second);
for (auto & it : QUANT_OPTIONS) {
std::cout << " " << std::setw(2) << it.ftype << " or " << std::setw(6) << it.name << " : " << it.desc << "\n";
}
exit(1);
}

13
ggml.c
View file

@ -16301,6 +16301,19 @@ size_t ggml_quantize_chunk(enum ggml_type type, const float * src, void * dst, i
result = ggml_quantize_q6_K(src + start, block, n, n, hist);
} break;
#endif
case GGML_TYPE_F16:
{
int elemsize = sizeof(ggml_fp16_t);
ggml_fp32_to_fp16_row(src + start, (ggml_fp16_t *)dst + start, n);
result = n * elemsize;
} break;
case GGML_TYPE_F32:
{
int elemsize = sizeof(float);
result = n * elemsize;
memcpy((uint8_t *)dst + start * elemsize, src + start, result);
} break;
default:
assert(false);
}

View file

@ -2298,7 +2298,10 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
case LLAMA_FTYPE_MOSTLY_Q5_0: quantized_type = GGML_TYPE_Q5_0; break;
case LLAMA_FTYPE_MOSTLY_Q5_1: quantized_type = GGML_TYPE_Q5_1; break;
case LLAMA_FTYPE_MOSTLY_Q8_0: quantized_type = GGML_TYPE_Q8_0; break;
case LLAMA_FTYPE_MOSTLY_F16: quantized_type = GGML_TYPE_F16; break;
case LLAMA_FTYPE_ALL_F32: quantized_type = GGML_TYPE_F32; break;
#ifdef GGML_USE_K_QUANTS
// K-quants
case LLAMA_FTYPE_MOSTLY_Q2_K: quantized_type = GGML_TYPE_Q2_K; break;
case LLAMA_FTYPE_MOSTLY_Q3_K_S:
@ -2309,6 +2312,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
case LLAMA_FTYPE_MOSTLY_Q5_K_S:
case LLAMA_FTYPE_MOSTLY_Q5_K_M: quantized_type = GGML_TYPE_Q5_K; break;
case LLAMA_FTYPE_MOSTLY_Q6_K: quantized_type = GGML_TYPE_Q6_K; break;
#endif
default: throw std::runtime_error(format("invalid output file type %d\n", ftype));
}
@ -2320,6 +2324,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
/*vocab_only*/ false));
llama_file_saver file_saver(fname_out.c_str(), model_loader->file_loaders.at(0).get(), params->ftype);
#ifdef GGML_USE_K_QUANTS
int n_attention_wv = 0;
int n_feed_forward_w2 = 0;
for (auto& tensor : model_loader->tensors_map.tensors) {
@ -2333,6 +2338,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
int i_attention_wv = 0;
int i_feed_forward_w2 = 0;
#endif
size_t total_size_org = 0;
size_t total_size_new = 0;
@ -2358,12 +2364,8 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
// quantize only 2D tensors
quantize &= (tensor.ne.size() == 2);
// uncomment this to keep the output layer in FP16
if (!params->quantize_output_tensor && tensor.name == "output.weight") {
quantize = false;
}
quantize = quantize && quantized_type != tensor.type;
quantize &= params->quantize_output_tensor || tensor.name != "output.weight";
quantize &= quantized_type != tensor.type;
enum ggml_type new_type;
void * new_data;
@ -2377,29 +2379,28 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
printf("size = %8.3f MB\n", tensor.size/1024.0/1024.0);
} else {
new_type = quantized_type;
#ifdef GGML_USE_K_QUANTS
if (tensor.name == "output.weight") {
new_type = GGML_TYPE_Q6_K;
}
else if (tensor.name.find("attention.wv.weight") != std::string::npos) {
new_type = GGML_TYPE_Q6_K;
} else if (tensor.name.find("attention.wv.weight") != std::string::npos) {
if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q4_K;
else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) &&
(i_attention_wv < n_attention_wv/8 || i_attention_wv >= 7*n_attention_wv/8 ||
(i_attention_wv - n_attention_wv/8)%3 == 2)) new_type = GGML_TYPE_Q6_K;
++i_attention_wv;
}
if (tensor.name.find("feed_forward.w2.weight") != std::string::npos) {
} else if (tensor.name.find("feed_forward.w2.weight") != std::string::npos) {
if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q4_K;
else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) &&
(i_feed_forward_w2 < n_feed_forward_w2/8 || i_feed_forward_w2 >= 7*n_feed_forward_w2/8 ||
(i_feed_forward_w2 - n_feed_forward_w2/8)%3 == 2)) new_type = GGML_TYPE_Q6_K;
++i_feed_forward_w2;
}
if (tensor.name.find("attention.wo.weight") != std::string::npos) {
} else if (tensor.name.find("attention.wo.weight") != std::string::npos) {
if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q4_K;
else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
}
#endif
float * f32_data;
size_t nelements = tensor.ne.at(0) * tensor.ne.at(1);