allocate chunk counter in wdata

parallelize src1 quantization by column to allows parallelization even when there is only one row
This commit is contained in:
slaren 2025-02-05 16:17:33 +01:00
parent 7fd0ae588b
commit 0f0d8c3ae7

View file

@ -7,10 +7,8 @@
#include "ggml-cpu-impl.h"
#include "ggml-cpu.h"
#include "ggml-impl.h"
#include "ggml-quants.h"
#include "ggml-cpu-quants.h"
#include "ggml-threading.h"
#include "amx/amx.h"
#include "ggml.h"
#if defined(_MSC_VER) || defined(__MINGW32__)
@ -1297,7 +1295,7 @@ struct ggml_threadpool {
atomic_int n_graph; // incremented when there is work to be done (i.e each graph)
atomic_int GGML_CACHE_ALIGN n_barrier;
atomic_int GGML_CACHE_ALIGN n_barrier_passed;
atomic_int current_chunk; // currently processing chunk during Mat_Mul, shared between all the threads.
atomic_int GGML_CACHE_ALIGN current_chunk; // currently processing chunk during Mat_Mul, shared between all the threads.
// these are atomic as an annotation for thread-sanitizer
atomic_bool stop; // Used for stopping the threadpool altogether
@ -7496,6 +7494,7 @@ UseGgmlGemm1:;
if (src1->type != vec_dot_type) {
char * wdata = params->wdata;
const size_t nbw0 = ggml_type_size(vec_dot_type);
const size_t nbw1 = ggml_row_size(vec_dot_type, ne10);
const size_t nbw2 = nbw1*ne11;
const size_t nbw3 = nbw2*ne12;
@ -7503,6 +7502,7 @@ UseGgmlGemm1:;
assert(params->wsize >= ne13*nbw3);
GGML_ASSERT(src1->type == GGML_TYPE_F32);
#if 0
for (int64_t i13 = 0; i13 < ne13; ++i13) {
for (int64_t i12 = 0; i12 < ne12; ++i12) {
for (int64_t i11 = ith; i11 < ne11; i11 += nth) {
@ -7512,6 +7512,20 @@ UseGgmlGemm1:;
}
}
}
#else
for (int64_t i13 = 0; i13 < ne13; ++i13) {
for (int64_t i12 = 0; i12 < ne12; ++i12) {
for (int64_t i11 = 0; i11 < ne11; ++i11) {
size_t bs = ggml_blck_size(vec_dot_type);
int64_t ne10_block_start = (ith * ne10/bs) / nth;
int64_t ne10_block_end = ((ith + 1) * ne10/bs) / nth;
from_float((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + ne10_block_start*bs*nb10),
(void *) (wdata + i13*nbw3 + i12*nbw2 + i11*nbw1 + ne10_block_start*nbw0),
(ne10_block_end - ne10_block_start) * bs);
}
}
}
#endif
}
if (ith == 0) {
@ -7611,7 +7625,7 @@ UseGgmlGemm2:;
// ggml_compute_forward_mul_mat_id
#define MMID_MATRIX_ROW(row_id, i1) matrix_rows[(row_id)*ne12 + (i1)]
#define MMID_MATRIX_ROW(row_id, i1) matrix_rows[(row_id)*ids->ne[0]*ids->ne[1] + (i1)]
struct mmid_row_mapping {
int32_t i1;
@ -7622,6 +7636,7 @@ static void ggml_compute_forward_mul_mat_id_one_chunk(
struct ggml_tensor * dst,
const struct ggml_tensor * src0,
const struct ggml_tensor * src1,
const struct ggml_tensor * ids,
const int64_t cur_a,
const int64_t ir0_start,
const int64_t ir0_end,
@ -7680,6 +7695,14 @@ static void ggml_compute_forward_mul_mat_id_one_chunk(
}
}
static void * incr_ptr_aligned(void ** p, size_t size, size_t align) {
void * ptr = *p;
ptr = (void *) GGML_PAD((uintptr_t) ptr, align);
*p = (void *) ((char *) ptr + size);
return ptr;
}
static void ggml_compute_forward_mul_mat_id(
const struct ggml_compute_params * params,
struct ggml_tensor * dst) {
@ -7714,16 +7737,27 @@ static void ggml_compute_forward_mul_mat_id(
const int n_ids = ids->ne[0]; // n_expert_used
const int n_as = ne02; // n_expert
char * wdata_src1_end = (src1->type == vec_dot_type) ?
(char *) params->wdata :
(char *) params->wdata + GGML_PAD(ggml_row_size(vec_dot_type, ggml_nelements(src1)), sizeof(int64_t));
void * wdata_cur = params->wdata;
int64_t * matrix_row_counts = (int64_t *) (wdata_src1_end); // [n_as]
struct mmid_row_mapping * matrix_rows = (struct mmid_row_mapping *)(matrix_row_counts + n_as); // [n_as][ne11]
if (src1->type != vec_dot_type) {
incr_ptr_aligned(&wdata_cur, ggml_row_size(vec_dot_type, ggml_nelements(src1)), sizeof(int64_t));
}
int64_t * matrix_row_counts = // [n_as]
incr_ptr_aligned(&wdata_cur, n_as*sizeof(int64_t), sizeof(int64_t));
struct mmid_row_mapping * matrix_rows = // [n_as][ids->ne[0]*ids->ne[1]]
incr_ptr_aligned(&wdata_cur, n_as*ids->ne[0]*ids->ne[1]*sizeof(struct mmid_row_mapping), sizeof(int64_t));
char (*atomic_current_chunk)[CACHE_LINE_SIZE] = // [n_as]
incr_ptr_aligned(&wdata_cur, CACHE_LINE_SIZE * n_as, CACHE_LINE_SIZE);
GGML_ASSERT(params->wsize >= (size_t)((char *) wdata_cur - (char *) params->wdata));
if (src1->type != vec_dot_type) {
char * wdata = params->wdata;
const size_t nbw0 = ggml_type_size(vec_dot_type);
const size_t nbw1 = ggml_row_size(vec_dot_type, ne10);
const size_t nbw2 = nbw1*ne11;
const size_t nbw3 = nbw2*ne12;
@ -7731,6 +7765,7 @@ static void ggml_compute_forward_mul_mat_id(
assert(params->wsize >= ne13*nbw3);
GGML_ASSERT(src1->type == GGML_TYPE_F32);
#if 0
for (int64_t i13 = 0; i13 < ne13; ++i13) {
for (int64_t i12 = ith; i12 < ne12; i12 += nth) {
for (int64_t i11 = 0; i11 < ne11; ++i11) {
@ -7740,12 +7775,23 @@ static void ggml_compute_forward_mul_mat_id(
}
}
}
#else
for (int64_t i13 = 0; i13 < ne13; ++i13) {
for (int64_t i12 = 0; i12 < ne12; ++i12) {
for (int64_t i11 = 0; i11 < ne11; ++i11) {
size_t bs = ggml_blck_size(vec_dot_type);
int64_t ne10_block_start = (ith * ne10/bs) / nth;
int64_t ne10_block_end = ((ith + 1) * ne10/bs) / nth;
from_float((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + ne10_block_start*bs*nb10),
(void *) (wdata + i13*nbw3 + i12*nbw2 + i11*nbw1 + ne10_block_start*nbw0),
(ne10_block_end - ne10_block_start) * bs);
}
}
}
#endif
}
if (ith == 0) {
// Every thread starts at ith, so the first unprocessed chunk is nth. This save a bit of coordination right at the start.
atomic_store_explicit(&params->threadpool->current_chunk, nth, memory_order_relaxed);
// initialize matrix_row_counts
memset(matrix_row_counts, 0, n_as*sizeof(int64_t));
@ -7760,13 +7806,16 @@ static void ggml_compute_forward_mul_mat_id(
matrix_row_counts[i02] += 1;
}
}
} else {
// reset current_chunk
for (int cur_a = ith - 1; cur_a < n_as; cur_a += (nth - 1)) {
atomic_int * current_chunk_ctr = (atomic_int *)(atomic_current_chunk + cur_a);
*current_chunk_ctr = nth;
}
}
ggml_barrier(params->threadpool);
const int64_t rows_total = ggml_nelements(ids);
int64_t rows_processed = 0;
for (int cur_a = 0; cur_a < n_as; ++cur_a) {
const int64_t cne1 = matrix_row_counts[cur_a];
@ -7774,7 +7823,7 @@ static void ggml_compute_forward_mul_mat_id(
continue;
}
rows_processed += cne1;
//rows_processed += cne1;
const char * src0_cur = (const char *) src0->data + cur_a * nb02;
const void * wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata;
@ -7783,6 +7832,7 @@ static void ggml_compute_forward_mul_mat_id(
const int64_t nr0 = ne01;
const int64_t nr1 = cne1;
//int chunk_size = (nr0 + nr1) / nth;
int chunk_size = 16;
if (nr0 == 1 || nr1 == 1) {
chunk_size = 64;
@ -7801,6 +7851,8 @@ static void ggml_compute_forward_mul_mat_id(
int current_chunk = ith;
atomic_int * current_chunk_ctr = (atomic_int *)(atomic_current_chunk + cur_a);
while (current_chunk < nchunk0 * nchunk1) {
const int64_t ith0 = current_chunk % nchunk0;
const int64_t ith1 = current_chunk / nchunk0;
@ -7812,7 +7864,7 @@ static void ggml_compute_forward_mul_mat_id(
const int64_t ir1_end = MIN(ir1_start + dr1, nr1);
ggml_compute_forward_mul_mat_id_one_chunk(
dst, src0, src1, cur_a,
dst, src0, src1, ids, cur_a,
ir0_start, ir0_end, ir1_start, ir1_end,
src0_cur, matrix_rows, row_size, src1_cont, wdata
);
@ -7821,21 +7873,8 @@ static void ggml_compute_forward_mul_mat_id(
break;
}
current_chunk = atomic_fetch_add_explicit(&params->threadpool->current_chunk, 1, memory_order_relaxed);
current_chunk = atomic_fetch_add_explicit(current_chunk_ctr, 1, memory_order_relaxed);
}
if (rows_processed == rows_total) {
break;
}
ggml_barrier(params->threadpool);
if (ith == 0) {
// Every thread starts at ith, so the first unprocessed chunk is nth. This save a bit of coordination right at the start.
atomic_store_explicit(&params->threadpool->current_chunk, nth, memory_order_relaxed);
}
ggml_barrier(params->threadpool);
}
}
@ -13773,14 +13812,19 @@ struct ggml_cplan ggml_graph_plan(
cur = 0;
const struct ggml_tensor * src0 = node->src[0];
const struct ggml_tensor * src1 = node->src[1];
const struct ggml_tensor * ids = node->src[2];
const enum ggml_type vec_dot_type = type_traits_cpu[src0->type].vec_dot_type;
if (src1->type != vec_dot_type) {
cur += ggml_row_size(vec_dot_type, ggml_nelements(src1));
}
const int n_as = src0->ne[2];
cur += GGML_PAD(cur, sizeof(int64_t)); // align
cur += n_as * sizeof(int64_t); // matrix_row_counts
cur += n_as * src1->ne[2] * sizeof(int64_t); // matrix_rows
// src1
if (src1->type != vec_dot_type) {
cur += ggml_row_size(vec_dot_type, ggml_nelements(src1)) + sizeof(int64_t);
}
// matrix_row_counts
cur += n_as * sizeof(int64_t) + sizeof(int64_t);
// matrix_rows
cur += n_as*ids->ne[0]*ids->ne[1]*sizeof(struct mmid_row_mapping) + sizeof(int64_t);
// atomic_current_chunk
cur += CACHE_LINE_SIZE*n_as + CACHE_LINE_SIZE;
} break;
case GGML_OP_OUT_PROD:
{