As an alternative, to avoid failing on Metal due to lack of Q8_0 support, instead quantize tok_embeddings.weight to Q4_0 and retain output.weight as F16. This results in a net gain of about 55mb for a 7B model compared to previous approach, but should minimize adverse impact to model quality.
This commit is contained in:
parent
048dca9809
commit
fd9a2fdfe2
1 changed files with 9 additions and 2 deletions
11
llama.cpp
11
llama.cpp
|
@ -2435,7 +2435,6 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
||||||
int ny = tensor.ne.at(1);
|
int ny = tensor.ne.at(1);
|
||||||
if (nx % QK_K != 0 || ny % QK_K != 0) {
|
if (nx % QK_K != 0 || ny % QK_K != 0) {
|
||||||
fprintf(stderr, "\n\nTensor sizes %d x %d are not divisible by %d, required for k-quants.\n",nx,ny,QK_K);
|
fprintf(stderr, "\n\nTensor sizes %d x %d are not divisible by %d, required for k-quants.\n",nx,ny,QK_K);
|
||||||
fprintf(stderr, "Q8_0 will be used for this tensor instead.\n");
|
|
||||||
convert_incompatible_tensor = true;
|
convert_incompatible_tensor = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2465,7 +2464,15 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
||||||
else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
|
else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
|
||||||
}
|
}
|
||||||
if (convert_incompatible_tensor) {
|
if (convert_incompatible_tensor) {
|
||||||
new_type = GGML_TYPE_Q8_0; //fall back to Q8_0 instead of just failing.
|
if (tensor.name == "output.weight") {
|
||||||
|
new_type = GGML_TYPE_F16; //fall back to F16 instead of just failing.
|
||||||
|
fprintf(stderr, "F16 will be used for this tensor instead.\n");
|
||||||
|
} else if (tensor.name == "tok_embeddings.weight") {
|
||||||
|
new_type = GGML_TYPE_Q4_0; //fall back to Q4_0 instead of just failing.
|
||||||
|
fprintf(stderr, "Q4_0 will be used for this tensor instead.\n");
|
||||||
|
} else {
|
||||||
|
throw std::runtime_error("Unsupported tensor size encountered\n");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue