Don't print zeros/NaNs when no count histogram has been collected

This commit is contained in:
Iwan Kawrakow 2023-06-01 14:07:42 +03:00
parent e51ce72e03
commit c5959d53ff

View file

@ -2248,13 +2248,17 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
} }
printf("size = %8.2f MB -> %8.2f MB | hist: ", tensor.size/1024.0/1024.0, new_size/1024.0/1024.0); printf("size = %8.2f MB -> %8.2f MB | hist: ", tensor.size/1024.0/1024.0, new_size/1024.0/1024.0);
int64_t tot_count = 0;
for (size_t i = 0; i < hist_cur.size(); i++) { for (size_t i = 0; i < hist_cur.size(); i++) {
hist_all[i] += hist_cur[i]; hist_all[i] += hist_cur[i];
tot_count += hist_cur[i];
} }
if (tot_count > 0) {
for (size_t i = 0; i < hist_cur.size(); i++) { for (size_t i = 0; i < hist_cur.size(); i++) {
printf("%5.3f ", hist_cur[i] / float(nelements)); printf("%5.3f ", hist_cur[i] / float(nelements));
} }
}
printf("\n"); printf("\n");
} }
total_size_org += tensor.size; total_size_org += tensor.size;
@ -2271,6 +2275,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
sum_all += hist_all[i]; sum_all += hist_all[i];
} }
if (sum_all > 0) {
printf("%s: hist: ", __func__); printf("%s: hist: ", __func__);
for (size_t i = 0; i < hist_all.size(); i++) { for (size_t i = 0; i < hist_all.size(); i++) {
printf("%5.3f ", hist_all[i] / float(sum_all)); printf("%5.3f ", hist_all[i] / float(sum_all));
@ -2278,6 +2283,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
printf("\n"); printf("\n");
} }
} }
}
// //
// interface implementation // interface implementation