ggml : remove hist data from the quantization API
ggml-ci
This commit is contained in:
parent
a62902ac8e
commit
95ea0ff2df
5 changed files with 9 additions and 69 deletions
|
@ -189,12 +189,10 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
int32_t nelements = sizex*sizey;
|
int32_t nelements = sizex*sizey;
|
||||||
|
|
||||||
std::vector<int64_t> hist_cur(1 << 4, 0);
|
|
||||||
|
|
||||||
// Set up a the benchmark matrices
|
// Set up a the benchmark matrices
|
||||||
// printf("Creating new tensor q11 & Running quantize\n");
|
// printf("Creating new tensor q11 & Running quantize\n");
|
||||||
struct ggml_tensor * q11 = ggml_new_tensor_2d(ctx, qtype, sizex, sizey);
|
struct ggml_tensor * q11 = ggml_new_tensor_2d(ctx, qtype, sizex, sizey);
|
||||||
ggml_quantize_chunk(qtype, (const float *) m11->data, q11->data, 0, nelements/m11->ne[0], m11->ne[0], hist_cur.data(), nullptr);
|
ggml_quantize_chunk(qtype, (const float *) m11->data, q11->data, 0, nelements/m11->ne[0], m11->ne[0], nullptr);
|
||||||
|
|
||||||
// Set up a the compute graph
|
// Set up a the compute graph
|
||||||
// printf("Creating new tensor q31\n");
|
// printf("Creating new tensor q31\n");
|
||||||
|
@ -207,7 +205,7 @@ int main(int argc, char ** argv) {
|
||||||
// Set up a second graph computation to make sure we override the CPU cache lines
|
// Set up a second graph computation to make sure we override the CPU cache lines
|
||||||
// printf("Creating new tensor q12 & Running quantize\n");
|
// printf("Creating new tensor q12 & Running quantize\n");
|
||||||
struct ggml_tensor * q12 = ggml_new_tensor_2d(ctx, qtype, sizex, sizey);
|
struct ggml_tensor * q12 = ggml_new_tensor_2d(ctx, qtype, sizex, sizey);
|
||||||
ggml_quantize_chunk(qtype, (const float *) m12->data, q12->data, 0, nelements/m12->ne[0], m12->ne[0], hist_cur.data(), nullptr);
|
ggml_quantize_chunk(qtype, (const float *) m12->data, q12->data, 0, nelements/m12->ne[0], m12->ne[0], nullptr);
|
||||||
|
|
||||||
// printf("Creating new tensor q32\n");
|
// printf("Creating new tensor q32\n");
|
||||||
struct ggml_tensor * q32 = ggml_mul_mat(ctx, q12, m2);
|
struct ggml_tensor * q32 = ggml_mul_mat(ctx, q12, m2);
|
||||||
|
|
|
@ -1862,7 +1862,6 @@ bool clip_model_quantize(const char * fname_inp, const char * fname_out, const i
|
||||||
|
|
||||||
std::vector<uint8_t> work(512);
|
std::vector<uint8_t> work(512);
|
||||||
std::vector<float> conv_buf(512);
|
std::vector<float> conv_buf(512);
|
||||||
std::vector<int64_t> hist_all(1 << 4, 0);
|
|
||||||
size_t total_size_org = 0;
|
size_t total_size_org = 0;
|
||||||
size_t total_size_new = 0;
|
size_t total_size_new = 0;
|
||||||
|
|
||||||
|
@ -1917,13 +1916,7 @@ bool clip_model_quantize(const char * fname_inp, const char * fname_out, const i
|
||||||
}
|
}
|
||||||
new_data = work.data();
|
new_data = work.data();
|
||||||
|
|
||||||
std::vector<int64_t> hist_cur(1 << 4, 0);
|
new_size = ggml_quantize_chunk(new_type, f32_data, new_data, 0, n_elms/cur->ne[0], cur->ne[0], nullptr);
|
||||||
|
|
||||||
new_size = ggml_quantize_chunk(new_type, f32_data, new_data, 0, n_elms/cur->ne[0], cur->ne[0], hist_cur.data(), nullptr);
|
|
||||||
|
|
||||||
for (size_t j = 0; j < hist_cur.size(); ++j) {
|
|
||||||
hist_all[j] += hist_cur[j];
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
new_type = cur->type;
|
new_type = cur->type;
|
||||||
new_data = cur->data;
|
new_data = cur->data;
|
||||||
|
@ -1958,17 +1951,6 @@ bool clip_model_quantize(const char * fname_inp, const char * fname_out, const i
|
||||||
{
|
{
|
||||||
printf("%s: original size = %8.2f MB\n", __func__, total_size_org / 1024.0 / 1024.0);
|
printf("%s: original size = %8.2f MB\n", __func__, total_size_org / 1024.0 / 1024.0);
|
||||||
printf("%s: quantized size = %8.2f MB\n", __func__, total_size_new / 1024.0 / 1024.0);
|
printf("%s: quantized size = %8.2f MB\n", __func__, total_size_new / 1024.0 / 1024.0);
|
||||||
|
|
||||||
int64_t sum_all = 0;
|
|
||||||
for (size_t i = 0; i < hist_all.size(); ++i) {
|
|
||||||
sum_all += hist_all[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
printf("%s: hist: ", __func__);
|
|
||||||
for (size_t i = 0; i < hist_all.size(); ++i) {
|
|
||||||
printf("%5.3f ", hist_all[i] / (float)sum_all);
|
|
||||||
}
|
|
||||||
printf("\n");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
|
|
3
ggml.c
3
ggml.c
|
@ -20173,7 +20173,6 @@ size_t ggml_quantize_chunk(
|
||||||
int start,
|
int start,
|
||||||
int nrows,
|
int nrows,
|
||||||
int n_per_row,
|
int n_per_row,
|
||||||
int64_t * hist,
|
|
||||||
const float * imatrix) {
|
const float * imatrix) {
|
||||||
const int n = nrows * n_per_row;
|
const int n = nrows * n_per_row;
|
||||||
|
|
||||||
|
@ -20232,8 +20231,6 @@ size_t ggml_quantize_chunk(
|
||||||
|
|
||||||
GGML_ASSERT(result == nrows * row_size);
|
GGML_ASSERT(result == nrows * row_size);
|
||||||
|
|
||||||
GGML_UNUSED(hist); // TODO: populate
|
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
1
ggml.h
1
ggml.h
|
@ -2205,7 +2205,6 @@ extern "C" {
|
||||||
int start,
|
int start,
|
||||||
int nrows,
|
int nrows,
|
||||||
int n_per_row,
|
int n_per_row,
|
||||||
int64_t * hist,
|
|
||||||
const float * imatrix);
|
const float * imatrix);
|
||||||
|
|
||||||
//
|
//
|
||||||
|
|
48
llama.cpp
48
llama.cpp
|
@ -11890,17 +11890,16 @@ static ggml_type get_k_quant_type(quantize_state_internal & qs, ggml_type new_ty
|
||||||
return new_type;
|
return new_type;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t llama_tensor_quantize_internal(enum ggml_type new_type, const float * f32_data, void * new_data, const int chunk_size, int nrows, int n_per_row, int64_t * hist_cur, const float * imatrix, std::vector<std::thread> & workers, const int nthread) {
|
static int32_t llama_tensor_quantize_internal(enum ggml_type new_type, const float * f32_data, void * new_data, const int chunk_size, int nrows, int n_per_row, const float * imatrix, std::vector<std::thread> & workers, const int nthread) {
|
||||||
std::mutex mutex;
|
std::mutex mutex;
|
||||||
int counter = 0;
|
int counter = 0;
|
||||||
size_t new_size = 0;
|
size_t new_size = 0;
|
||||||
if (nthread < 2) {
|
if (nthread < 2) {
|
||||||
// single-thread
|
// single-thread
|
||||||
return ggml_quantize_chunk(new_type, f32_data, new_data, 0, nrows, n_per_row, hist_cur, imatrix);
|
return ggml_quantize_chunk(new_type, f32_data, new_data, 0, nrows, n_per_row, imatrix);
|
||||||
}
|
}
|
||||||
auto compute = [&mutex, &counter, &hist_cur, &new_size, new_type, f32_data, new_data, chunk_size,
|
auto compute = [&mutex, &counter, &new_size, new_type, f32_data, new_data, chunk_size,
|
||||||
nrows, n_per_row, imatrix]() {
|
nrows, n_per_row, imatrix]() {
|
||||||
std::array<int64_t, 1 << 4> local_hist = {};
|
|
||||||
const int nrows_per_chunk = chunk_size / n_per_row;
|
const int nrows_per_chunk = chunk_size / n_per_row;
|
||||||
size_t local_size = 0;
|
size_t local_size = 0;
|
||||||
while (true) {
|
while (true) {
|
||||||
|
@ -11908,17 +11907,13 @@ static int32_t llama_tensor_quantize_internal(enum ggml_type new_type, const flo
|
||||||
int first_row = counter; counter += nrows_per_chunk;
|
int first_row = counter; counter += nrows_per_chunk;
|
||||||
if (first_row >= nrows) {
|
if (first_row >= nrows) {
|
||||||
if (local_size > 0) {
|
if (local_size > 0) {
|
||||||
for (int j=0; j<int(local_hist.size()); ++j) {
|
|
||||||
hist_cur[j] += local_hist[j];
|
|
||||||
}
|
|
||||||
new_size += local_size;
|
new_size += local_size;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
lock.unlock();
|
lock.unlock();
|
||||||
const int this_nrow = std::min(nrows - first_row, nrows_per_chunk);
|
const int this_nrow = std::min(nrows - first_row, nrows_per_chunk);
|
||||||
local_size += ggml_quantize_chunk(new_type, f32_data, new_data,
|
local_size += ggml_quantize_chunk(new_type, f32_data, new_data, first_row * n_per_row, this_nrow, n_per_row, imatrix);
|
||||||
first_row * n_per_row, this_nrow, n_per_row, local_hist.data(), imatrix);
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
for (int it = 0; it < nthread - 1; ++it) {
|
for (int it = 0; it < nthread - 1; ++it) {
|
||||||
|
@ -12041,7 +12036,6 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
||||||
|
|
||||||
size_t total_size_org = 0;
|
size_t total_size_org = 0;
|
||||||
size_t total_size_new = 0;
|
size_t total_size_new = 0;
|
||||||
std::vector<int64_t> hist_all(1 << 4, 0);
|
|
||||||
|
|
||||||
std::vector<std::thread> workers;
|
std::vector<std::thread> workers;
|
||||||
workers.reserve(nthread);
|
workers.reserve(nthread);
|
||||||
|
@ -12175,7 +12169,6 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
||||||
work.resize(nelements * 4); // upper bound on size
|
work.resize(nelements * 4); // upper bound on size
|
||||||
}
|
}
|
||||||
new_data = work.data();
|
new_data = work.data();
|
||||||
std::array<int64_t, 1 << 4> hist_cur = {};
|
|
||||||
|
|
||||||
const int n_per_row = tensor->ne[0];
|
const int n_per_row = tensor->ne[0];
|
||||||
const int nrows = nelements / n_per_row;
|
const int nrows = nelements / n_per_row;
|
||||||
|
@ -12185,22 +12178,9 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
||||||
|
|
||||||
const int nchunk = (nelements + chunk_size - 1)/chunk_size;
|
const int nchunk = (nelements + chunk_size - 1)/chunk_size;
|
||||||
const int nthread_use = nthread > 1 ? std::max(1, std::min(nthread, nchunk)) : 1;
|
const int nthread_use = nthread > 1 ? std::max(1, std::min(nthread, nchunk)) : 1;
|
||||||
new_size = llama_tensor_quantize_internal(new_type, f32_data, new_data, chunk_size, nrows, n_per_row, hist_cur.data(), imatrix, workers, nthread_use);
|
new_size = llama_tensor_quantize_internal(new_type, f32_data, new_data, chunk_size, nrows, n_per_row, imatrix, workers, nthread_use);
|
||||||
|
|
||||||
LLAMA_LOG_INFO("size = %8.2f MiB -> %8.2f MiB", ggml_nbytes(tensor)/1024.0/1024.0, new_size/1024.0/1024.0);
|
LLAMA_LOG_INFO("size = %8.2f MiB -> %8.2f MiB\n", ggml_nbytes(tensor)/1024.0/1024.0, new_size/1024.0/1024.0);
|
||||||
int64_t tot_count = 0;
|
|
||||||
for (size_t i = 0; i < hist_cur.size(); i++) {
|
|
||||||
hist_all[i] += hist_cur[i];
|
|
||||||
tot_count += hist_cur[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
if (tot_count > 0) {
|
|
||||||
LLAMA_LOG_INFO(" | hist: ");
|
|
||||||
for (size_t i = 0; i < hist_cur.size(); i++) {
|
|
||||||
LLAMA_LOG_INFO("%5.3f ", hist_cur[i] / float(nelements));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
LLAMA_LOG_INFO("\n");
|
|
||||||
}
|
}
|
||||||
total_size_org += ggml_nbytes(tensor);
|
total_size_org += ggml_nbytes(tensor);
|
||||||
total_size_new += new_size;
|
total_size_new += new_size;
|
||||||
|
@ -12229,22 +12209,6 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
||||||
LLAMA_LOG_INFO("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0);
|
LLAMA_LOG_INFO("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0);
|
||||||
LLAMA_LOG_INFO("%s: quant size = %8.2f MB\n", __func__, total_size_new/1024.0/1024.0);
|
LLAMA_LOG_INFO("%s: quant size = %8.2f MB\n", __func__, total_size_new/1024.0/1024.0);
|
||||||
|
|
||||||
// print histogram for all tensors
|
|
||||||
{
|
|
||||||
int64_t sum_all = 0;
|
|
||||||
for (size_t i = 0; i < hist_all.size(); i++) {
|
|
||||||
sum_all += hist_all[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
if (sum_all > 0) {
|
|
||||||
LLAMA_LOG_INFO("%s: hist: ", __func__);
|
|
||||||
for (size_t i = 0; i < hist_all.size(); i++) {
|
|
||||||
LLAMA_LOG_INFO("%5.3f ", hist_all[i] / float(sum_all));
|
|
||||||
}
|
|
||||||
LLAMA_LOG_INFO("\n");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (qs.n_fallback > 0) {
|
if (qs.n_fallback > 0) {
|
||||||
LLAMA_LOG_WARN("%s: WARNING: %d of %d tensor(s) incompatible with k-quants and required fallback quantization\n",
|
LLAMA_LOG_WARN("%s: WARNING: %d of %d tensor(s) incompatible with k-quants and required fallback quantization\n",
|
||||||
__func__, qs.n_fallback, qs.n_k_quantized + qs.n_fallback);
|
__func__, qs.n_fallback, qs.n_k_quantized + qs.n_fallback);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue