Arm AArch64: add optimized GEMV and GEMM asm kernels for q4_0_q8_0 quantization and refactor code to address llama.cpp pr#5780 suggestions
This commit is contained in:
parent
340ef07fca
commit
81215ff43a
3 changed files with 25 additions and 284 deletions
|
@ -199,6 +199,30 @@ typedef struct {
|
||||||
} block_q8_1;
|
} block_q8_1;
|
||||||
static_assert(sizeof(block_q8_1) == 2*sizeof(ggml_half) + QK8_1, "wrong q8_1 block size/padding");
|
static_assert(sizeof(block_q8_1) == 2*sizeof(ggml_half) + QK8_1, "wrong q8_1 block size/padding");
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
ggml_half d[4]; // deltas for 4 q4_0 blocks
|
||||||
|
uint8_t qs[QK4_0 * 2]; // nibbles / quants for 4 q4_0 blocks
|
||||||
|
} block_q4_0x4;
|
||||||
|
static_assert(sizeof(block_q4_0x4) == 4 * sizeof(ggml_half) + QK4_0 * 2, "wrong q4_0x4 block size/padding");
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
ggml_half d[8]; // deltas for 8 q4_0 blocks
|
||||||
|
uint8_t qs[QK4_0 * 4]; // nibbles / quants for 8 q4_0 blocks
|
||||||
|
} block_q4_0x8;
|
||||||
|
static_assert(sizeof(block_q4_0x8) == 8 * sizeof(ggml_half) + QK4_0 * 4, "wrong q4_0x8 block size/padding");
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
ggml_half d[4]; // deltas for 4 q8_0 blocks
|
||||||
|
int8_t qs[QK8_0 * 4]; // quants for 4 q8_0 blocks
|
||||||
|
} block_q8_0x4;
|
||||||
|
static_assert(sizeof(block_q8_0x4) == 4 * sizeof(ggml_half) + QK8_0 * 4, "wrong q8_0x4 block size/padding");
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
ggml_half d[8]; // deltas for 8 q8_0 blocks
|
||||||
|
int8_t qs[QK8_0 * 8]; // quants for 8 q8_0 blocks
|
||||||
|
} block_q8_0x8;
|
||||||
|
static_assert(sizeof(block_q8_0x8) == 8 * sizeof(ggml_half) + QK8_0 * 8, "wrong q8_0x8 block size/padding");
|
||||||
|
|
||||||
//
|
//
|
||||||
// Super-block quantization structures
|
// Super-block quantization structures
|
||||||
//
|
//
|
||||||
|
|
|
@ -700,64 +700,6 @@ void quantize_row_q4_0(const float * restrict x, void * restrict y, int64_t k) {
|
||||||
quantize_row_q4_0_reference(x, y, k);
|
quantize_row_q4_0_reference(x, y, k);
|
||||||
}
|
}
|
||||||
|
|
||||||
void quantize_row_q4_0_aarch64(const float * src, void * dst, int n, int k) {
|
|
||||||
int nrows_interleaved, blocklen_per_row;
|
|
||||||
typedef block_q4_0x8 block_q4_0xn;
|
|
||||||
typedef block_q4_0xn (*make_block_q4_0xn_t)(const block_q4_0 *, unsigned int, unsigned int);
|
|
||||||
make_block_q4_0xn_t make_block_q4_0xn = make_block_q4_0x8;
|
|
||||||
|
|
||||||
if (ggml_cpu_has_sve() && (svcntw() == 8)) {
|
|
||||||
nrows_interleaved = 8;
|
|
||||||
blocklen_per_row = 8;
|
|
||||||
typedef block_q4_0x8 block_q4_0xn;
|
|
||||||
make_block_q4_0xn = make_block_q4_0x8;
|
|
||||||
}
|
|
||||||
else if (ggml_cpu_has_neon() && ggml_cpu_has_matmul_int8()) {
|
|
||||||
nrows_interleaved = 4;
|
|
||||||
blocklen_per_row = 8;
|
|
||||||
typedef block_q4_0x4 block_q4_0xn;
|
|
||||||
make_block_q4_0xn = make_block_q4_0x4;
|
|
||||||
}
|
|
||||||
else if (ggml_cpu_has_neon()) {
|
|
||||||
nrows_interleaved = 4;
|
|
||||||
blocklen_per_row = 4;
|
|
||||||
typedef block_q4_0x4 block_q4_0xn;
|
|
||||||
make_block_q4_0xn = make_block_q4_0x4;
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
assert(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
assert(k % QK4_0 == 0);
|
|
||||||
const int nb = k / QK4_0;
|
|
||||||
|
|
||||||
block_q4_0xn * out_ptr_B = (block_q4_0xn *) malloc(sizeof(block_q4_0xn) * nb);
|
|
||||||
block_q4_0xn * out_ptr_B_start = out_ptr_B;
|
|
||||||
|
|
||||||
for (int b = 0; b < n; b += nrows_interleaved * k) {
|
|
||||||
const block_q4_0 * in_ptrs[nrows_interleaved];
|
|
||||||
|
|
||||||
for (int i = 0; i < nrows_interleaved; i++ ) {
|
|
||||||
in_ptrs[i] = (block_q4_0 *) dst + (b + i * k) / QK4_0;
|
|
||||||
quantize_row_q4_0_reference(src + b + i * k, in_ptrs[i], k);
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int64_t x = 0; x < nb; x++) {
|
|
||||||
*out_ptr_B = make_block_q4_0xn(in_ptrs, blocklen_per_row, 0x88);
|
|
||||||
out_ptr_B++;
|
|
||||||
|
|
||||||
for (int i = 0; i < nrows_interleaved; i++) {
|
|
||||||
in_ptrs[i]++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
out_ptr_B = out_ptr_B_start;
|
|
||||||
memcpy ((block_q4_0 *) dst + b / QK4_0, out_ptr_B_start, sizeof(block_q4_0xn) * nb);
|
|
||||||
}
|
|
||||||
if (out_ptr_B_start) free(out_ptr_B_start);
|
|
||||||
|
|
||||||
return (n / QK4_0 * sizeof(block_q4_0));
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void quantize_row_q4_1_reference(const float * restrict x, block_q4_1 * restrict y, int64_t k) {
|
void quantize_row_q4_1_reference(const float * restrict x, block_q4_1 * restrict y, int64_t k) {
|
||||||
const int qk = QK4_1;
|
const int qk = QK4_1;
|
||||||
|
@ -14835,6 +14777,7 @@ void quantize_row_iq2_s(const float * restrict x, void * restrict vy, int64_t k)
|
||||||
assert(k % QK_K == 0);
|
assert(k % QK_K == 0);
|
||||||
block_iq2_s * restrict y = vy;
|
block_iq2_s * restrict y = vy;
|
||||||
quantize_row_iq2_s_reference(x, y, k);
|
quantize_row_iq2_s_reference(x, y, k);
|
||||||
|
}
|
||||||
|
|
||||||
// Routines to create the blocked formats
|
// Routines to create the blocked formats
|
||||||
// Note input is array of pointers.
|
// Note input is array of pointers.
|
||||||
|
|
|
@ -8,232 +8,6 @@
|
||||||
|
|
||||||
// GGML internal header
|
// GGML internal header
|
||||||
|
|
||||||
#include <stdint.h>
|
|
||||||
#include <stddef.h>
|
|
||||||
|
|
||||||
#define QK4_0 32
|
|
||||||
typedef struct {
|
|
||||||
ggml_fp16_t d; // delta
|
|
||||||
uint8_t qs[QK4_0 / 2]; // nibbles / quants
|
|
||||||
} block_q4_0;
|
|
||||||
static_assert(sizeof(block_q4_0) == sizeof(ggml_fp16_t) + QK4_0 / 2, "wrong q4_0 block size/padding");
|
|
||||||
|
|
||||||
#define QK4_1 32
|
|
||||||
typedef struct {
|
|
||||||
ggml_fp16_t d; // delta
|
|
||||||
ggml_fp16_t m; // min
|
|
||||||
uint8_t qs[QK4_1 / 2]; // nibbles / quants
|
|
||||||
} block_q4_1;
|
|
||||||
static_assert(sizeof(block_q4_1) == 2 * sizeof(ggml_fp16_t) + QK4_1 / 2, "wrong q4_1 block size/padding");
|
|
||||||
|
|
||||||
#define QK5_0 32
|
|
||||||
typedef struct {
|
|
||||||
ggml_fp16_t d; // delta
|
|
||||||
uint8_t qh[4]; // 5-th bit of quants
|
|
||||||
uint8_t qs[QK5_0 / 2]; // nibbles / quants
|
|
||||||
} block_q5_0;
|
|
||||||
static_assert(sizeof(block_q5_0) == sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5_0 / 2, "wrong q5_0 block size/padding");
|
|
||||||
|
|
||||||
#define QK5_1 32
|
|
||||||
typedef struct {
|
|
||||||
ggml_fp16_t d; // delta
|
|
||||||
ggml_fp16_t m; // min
|
|
||||||
uint8_t qh[4]; // 5-th bit of quants
|
|
||||||
uint8_t qs[QK5_1 / 2]; // nibbles / quants
|
|
||||||
} block_q5_1;
|
|
||||||
static_assert(sizeof(block_q5_1) == 2 * sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5_1 / 2, "wrong q5_1 block size/padding");
|
|
||||||
|
|
||||||
#define QK8_0 32
|
|
||||||
typedef struct {
|
|
||||||
ggml_fp16_t d; // delta
|
|
||||||
int8_t qs[QK8_0]; // quants
|
|
||||||
} block_q8_0;
|
|
||||||
static_assert(sizeof(block_q8_0) == sizeof(ggml_fp16_t) + QK8_0, "wrong q8_0 block size/padding");
|
|
||||||
|
|
||||||
#define QK8_1 32
|
|
||||||
typedef struct {
|
|
||||||
float d; // delta
|
|
||||||
float s; // d * sum(qs[i])
|
|
||||||
int8_t qs[QK8_1]; // quants
|
|
||||||
} block_q8_1;
|
|
||||||
static_assert(sizeof(block_q8_1) == 2*sizeof(float) + QK8_1, "wrong q8_1 block size/padding");
|
|
||||||
|
|
||||||
typedef struct {
|
|
||||||
ggml_fp16_t d[4]; // deltas for 4 q4_0 blocks
|
|
||||||
uint8_t qs[QK4_0 * 2]; // nibbles / quants for 4 q4_0 blocks
|
|
||||||
} block_q4_0x4;
|
|
||||||
static_assert(sizeof(block_q4_0x4) == 4 * sizeof(ggml_fp16_t) + QK4_0 * 2, "wrong q4_0x4 block size/padding");
|
|
||||||
|
|
||||||
typedef struct {
|
|
||||||
ggml_fp16_t d[8]; // deltas for 8 q4_0 blocks
|
|
||||||
uint8_t qs[QK4_0 * 4]; // nibbles / quants for 8 q4_0 blocks
|
|
||||||
} block_q4_0x8;
|
|
||||||
static_assert(sizeof(block_q4_0x8) == 8 * sizeof(ggml_fp16_t) + QK4_0 * 4, "wrong q4_0x8 block size/padding");
|
|
||||||
|
|
||||||
typedef struct {
|
|
||||||
ggml_fp16_t d[4]; // deltas for 4 q8_0 blocks
|
|
||||||
int8_t qs[QK8_0 * 4]; // quants for 4 q8_0 blocks
|
|
||||||
} block_q8_0x4;
|
|
||||||
static_assert(sizeof(block_q8_0x4) == 4 * sizeof(ggml_fp16_t) + QK8_0 * 4, "wrong q8_0x4 block size/padding");
|
|
||||||
|
|
||||||
typedef struct {
|
|
||||||
ggml_fp16_t d[8]; // deltas for 8 q8_0 blocks
|
|
||||||
int8_t qs[QK8_0 * 8]; // quants for 8 q8_0 blocks
|
|
||||||
} block_q8_0x8;
|
|
||||||
static_assert(sizeof(block_q8_0x8) == 8 * sizeof(ggml_fp16_t) + QK8_0 * 8, "wrong q8_0x8 block size/padding");
|
|
||||||
|
|
||||||
//
|
|
||||||
// Super-block quantization structures
|
|
||||||
//
|
|
||||||
|
|
||||||
// Super-block size
|
|
||||||
#ifdef GGML_QKK_64
|
|
||||||
#define QK_K 64
|
|
||||||
#define K_SCALE_SIZE 4
|
|
||||||
#else
|
|
||||||
#define QK_K 256
|
|
||||||
#define K_SCALE_SIZE 12
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// 2-bit quantization
|
|
||||||
// weight is represented as x = a * q + b
|
|
||||||
// 16 blocks of 16 elements each
|
|
||||||
// Effectively 2.625 bits per weight
|
|
||||||
typedef struct {
|
|
||||||
uint8_t scales[QK_K/16]; // scales and mins, quantized with 4 bits
|
|
||||||
uint8_t qs[QK_K/4]; // quants
|
|
||||||
ggml_fp16_t d; // super-block scale for quantized scales
|
|
||||||
ggml_fp16_t dmin; // super-block scale for quantized mins
|
|
||||||
} block_q2_K;
|
|
||||||
static_assert(sizeof(block_q2_K) == 2*sizeof(ggml_fp16_t) + QK_K/16 + QK_K/4, "wrong q2_K block size/padding");
|
|
||||||
|
|
||||||
// 3-bit quantization
|
|
||||||
// weight is represented as x = a * q
|
|
||||||
// 16 blocks of 16 elements each
|
|
||||||
// Effectively 3.4375 bits per weight
|
|
||||||
#ifdef GGML_QKK_64
|
|
||||||
typedef struct {
|
|
||||||
uint8_t hmask[QK_K/8]; // quants - high bit
|
|
||||||
uint8_t qs[QK_K/4]; // quants - low 2 bits
|
|
||||||
uint8_t scales[2];
|
|
||||||
ggml_fp16_t d; // super-block scale
|
|
||||||
} block_q3_K;
|
|
||||||
static_assert(sizeof(block_q3_K) == sizeof(ggml_fp16_t) + QK_K / 4 + QK_K / 8 + 2, "wrong q3_K block size/padding");
|
|
||||||
#else
|
|
||||||
typedef struct {
|
|
||||||
uint8_t hmask[QK_K/8]; // quants - high bit
|
|
||||||
uint8_t qs[QK_K/4]; // quants - low 2 bits
|
|
||||||
uint8_t scales[12]; // scales, quantized with 6 bits
|
|
||||||
ggml_fp16_t d; // super-block scale
|
|
||||||
} block_q3_K;
|
|
||||||
static_assert(sizeof(block_q3_K) == sizeof(ggml_fp16_t) + QK_K / 4 + QK_K / 8 + 12, "wrong q3_K block size/padding");
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// 4-bit quantization
|
|
||||||
// 8 blocks of 32 elements each
|
|
||||||
// weight is represented as x = a * q + b
|
|
||||||
// Effectively 4.5 bits per weight
|
|
||||||
#ifdef GGML_QKK_64
|
|
||||||
typedef struct {
|
|
||||||
ggml_fp16_t d[2]; // super-block scales/mins
|
|
||||||
uint8_t scales[2]; // 4-bit block scales/mins
|
|
||||||
uint8_t qs[QK_K/2]; // 4--bit quants
|
|
||||||
} block_q4_K;
|
|
||||||
static_assert(sizeof(block_q4_K) == 2*sizeof(ggml_fp16_t) + QK_K/2 + 2, "wrong q4_K block size/padding");
|
|
||||||
#else
|
|
||||||
typedef struct {
|
|
||||||
ggml_fp16_t d; // super-block scale for quantized scales
|
|
||||||
ggml_fp16_t dmin; // super-block scale for quantized mins
|
|
||||||
uint8_t scales[K_SCALE_SIZE]; // scales and mins, quantized with 6 bits
|
|
||||||
uint8_t qs[QK_K/2]; // 4--bit quants
|
|
||||||
} block_q4_K;
|
|
||||||
static_assert(sizeof(block_q4_K) == 2*sizeof(ggml_fp16_t) + K_SCALE_SIZE + QK_K/2, "wrong q4_K block size/padding");
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// 5-bit quantization
|
|
||||||
// 8 blocks of 32 elements each
|
|
||||||
// weight is represented as x = a * q + b
|
|
||||||
// Effectively 5.5 bits per weight
|
|
||||||
#ifdef GGML_QKK_64
|
|
||||||
typedef struct {
|
|
||||||
ggml_fp16_t d; // super-block scale
|
|
||||||
int8_t scales[QK_K/16]; // 8-bit block scales
|
|
||||||
uint8_t qh[QK_K/8]; // quants, high bit
|
|
||||||
uint8_t qs[QK_K/2]; // quants, low 4 bits
|
|
||||||
} block_q5_K;
|
|
||||||
static_assert(sizeof(block_q5_K) == sizeof(ggml_fp16_t) + QK_K/2 + QK_K/8 + QK_K/16, "wrong q5_K block size/padding");
|
|
||||||
#else
|
|
||||||
typedef struct {
|
|
||||||
ggml_fp16_t d; // super-block scale for quantized scales
|
|
||||||
ggml_fp16_t dmin; // super-block scale for quantized mins
|
|
||||||
uint8_t scales[K_SCALE_SIZE]; // scales and mins, quantized with 6 bits
|
|
||||||
uint8_t qh[QK_K/8]; // quants, high bit
|
|
||||||
uint8_t qs[QK_K/2]; // quants, low 4 bits
|
|
||||||
} block_q5_K;
|
|
||||||
static_assert(sizeof(block_q5_K) == 2*sizeof(ggml_fp16_t) + K_SCALE_SIZE + QK_K/2 + QK_K/8, "wrong q5_K block size/padding");
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// 6-bit quantization
|
|
||||||
// weight is represented as x = a * q
|
|
||||||
// 16 blocks of 16 elements each
|
|
||||||
// Effectively 6.5625 bits per weight
|
|
||||||
typedef struct {
|
|
||||||
uint8_t ql[QK_K/2]; // quants, lower 4 bits
|
|
||||||
uint8_t qh[QK_K/4]; // quants, upper 2 bits
|
|
||||||
int8_t scales[QK_K/16]; // scales, quantized with 8 bits
|
|
||||||
ggml_fp16_t d; // super-block scale
|
|
||||||
} block_q6_K;
|
|
||||||
static_assert(sizeof(block_q6_K) == sizeof(ggml_fp16_t) + QK_K / 16 + 3*QK_K/4, "wrong q6_K block size/padding");
|
|
||||||
|
|
||||||
// This is only used for intermediate quantization and dot products
|
|
||||||
typedef struct {
|
|
||||||
float d; // delta
|
|
||||||
int8_t qs[QK_K]; // quants
|
|
||||||
int16_t bsums[QK_K/16]; // sum of quants in groups of 16
|
|
||||||
} block_q8_K;
|
|
||||||
static_assert(sizeof(block_q8_K) == sizeof(float) + QK_K + QK_K/16*sizeof(int16_t), "wrong q8_K block size/padding");
|
|
||||||
|
|
||||||
// (Almost) "true" 2-bit quantization.
|
|
||||||
// Due to the need to use blocks as per ggml design, it ends up using
|
|
||||||
// 2.0625 bpw because of the 16-bit scale for each block of 256.
|
|
||||||
typedef struct {
|
|
||||||
ggml_fp16_t d;
|
|
||||||
uint16_t qs[QK_K/8];
|
|
||||||
} block_iq2_xxs;
|
|
||||||
static_assert(sizeof(block_iq2_xxs) == sizeof(ggml_fp16_t) + QK_K/8*sizeof(uint16_t), "wrong iq2_xxs block size/padding");
|
|
||||||
|
|
||||||
// 2.3125 bpw quants
|
|
||||||
typedef struct {
|
|
||||||
ggml_fp16_t d;
|
|
||||||
uint16_t qs[QK_K/8];
|
|
||||||
uint8_t scales[QK_K/32];
|
|
||||||
} block_iq2_xs;
|
|
||||||
static_assert(sizeof(block_iq2_xs) == sizeof(ggml_fp16_t) + QK_K/8*sizeof(uint16_t) + QK_K/32, "wrong iq2_xs block size/padding");
|
|
||||||
|
|
||||||
// (Almost) "true" 3-bit quantization.
|
|
||||||
// Due to the need to use blocks as per ggml design, it ends up using
|
|
||||||
// 3.0625 bpw because of the 16-bit scale for each block of 256.
|
|
||||||
typedef struct {
|
|
||||||
ggml_fp16_t d;
|
|
||||||
uint8_t qs[3*QK_K/8];
|
|
||||||
} block_iq3_xxs;
|
|
||||||
static_assert(sizeof(block_iq3_xxs) == sizeof(ggml_fp16_t) + 3*(QK_K/8), "wrong iq3_xxs block size/padding");
|
|
||||||
|
|
||||||
typedef struct {
|
|
||||||
ggml_fp16_t d;
|
|
||||||
uint8_t qs[QK_K/8];
|
|
||||||
uint8_t scales[QK_K/16];
|
|
||||||
} block_iq1_s;
|
|
||||||
static_assert(sizeof(block_iq1_s) == sizeof(ggml_fp16_t) + QK_K/8 + QK_K/16, "wrong iq1_s block size/padding");
|
|
||||||
|
|
||||||
// Non-linear quants
|
|
||||||
#define QK4_NL 32
|
|
||||||
typedef struct {
|
|
||||||
ggml_fp16_t d;
|
|
||||||
uint8_t qs[QK4_NL/2];
|
|
||||||
} block_iq4_nl;
|
|
||||||
static_assert(sizeof(block_iq4_nl) == sizeof(ggml_fp16_t) + QK4_NL/2, "wrong iq4_nl block size/padding");
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
extern "C" {
|
extern "C" {
|
||||||
#endif
|
#endif
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue