mirror of
https://github.com/jart/cosmopolitan.git
synced 2025-01-31 19:43:32 +00:00
fa20edc44d
- Remove most __ASSEMBLER__ __LINKER__ ifdefs - Rename libc/intrin/bits.h to libc/serialize.h - Block pthread cancelation in fchmodat() polyfill - Remove `clang-format off` statements in third_party
408 lines
16 KiB
C
408 lines
16 KiB
C
/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:4;tab-width:8;coding:utf-8 -*-│
|
|
│vi: set net ft=c ts=4 sts=4 sw=4 fenc=utf-8 :vi│
|
|
╚──────────────────────────────────────────────────────────────────────────────╝
|
|
│ │
|
|
│ GGML │
|
|
│ Copyright (c) 2023 Georgi Gerganov │
|
|
│ │
|
|
│ Permission is hereby granted, free of charge, to any person obtaining │
|
|
│ a copy of this software and associated documentation files (the │
|
|
│ "Software"), to deal in the Software without restriction, including │
|
|
│ without limitation the rights to use, copy, modify, merge, publish, │
|
|
│ distribute, sublicense, and/or sell copies of the Software, and to │
|
|
│ permit persons to whom the Software is furnished to do so, subject to │
|
|
│ the following conditions: │
|
|
│ │
|
|
│ The above copyright notice and this permission notice shall be │
|
|
│ included in all copies or substantial portions of the Software. │
|
|
│ │
|
|
│ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, │
|
|
│ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF │
|
|
│ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. │
|
|
│ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY │
|
|
│ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, │
|
|
│ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE │
|
|
│ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. │
|
|
│ │
|
|
╚─────────────────────────────────────────────────────────────────────────────*/
|
|
#include "third_party/ggml/ggjt.v2.q5_1.h"
|
|
#include "libc/assert.h"
|
|
#include "libc/math.h"
|
|
#include "libc/str/str.h"
|
|
#include "third_party/ggml/fp16.internal.h"
|
|
#include "third_party/ggml/ggjt.v2.internal.h"
|
|
#include "third_party/ggml/ggjt.v2.q8_1.h"
|
|
|
|
static_assert(sizeof(block_v2_q5_1) == 2 * sizeof(ggml_fp16_t) + sizeof(uint32_t) + V2_QK5_1 / 2,
|
|
"wrong q5_1 block size/padding");
|
|
|
|
void dequantize_row_v2_q5_1(const void * restrict x_, float * restrict y, int k) {
|
|
const block_v2_q5_1 * restrict x = x_;
|
|
static const int qk = V2_QK5_1;
|
|
|
|
assert(k % qk == 0);
|
|
|
|
const int nb = k / qk;
|
|
|
|
for (int i = 0; i < nb; i++) {
|
|
const float d = GGML_FP16_TO_FP32(x[i].d);
|
|
const float m = GGML_FP16_TO_FP32(x[i].m);
|
|
|
|
uint32_t qh;
|
|
memcpy(&qh, x[i].qh, sizeof(qh));
|
|
|
|
for (int j = 0; j < qk/2; ++j) {
|
|
const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
|
|
const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
|
|
|
|
const int x0 = (x[i].qs[j] & 0x0F) | xh_0;
|
|
const int x1 = (x[i].qs[j] >> 4) | xh_1;
|
|
|
|
y[i*qk + j + 0 ] = x0*d + m;
|
|
y[i*qk + j + qk/2] = x1*d + m;
|
|
}
|
|
}
|
|
}
|
|
|
|
void quantize_row_v2_q5_1(const float * restrict x, void * restrict y, int k) {
|
|
quantize_row_v2_q5_1_reference(x, y, k);
|
|
}
|
|
|
|
size_t ggml_quantize_v2_q5_1(const float * src, void * dst, int n, int k, int64_t * hist) {
|
|
assert(k % V2_QK5_1 == 0);
|
|
const int nb = k / V2_QK5_1;
|
|
|
|
for (int b = 0; b < n; b += k) {
|
|
block_v2_q5_1 * restrict y = (block_v2_q5_1 *)dst + b/V2_QK5_1;
|
|
|
|
quantize_row_v2_q5_1_reference(src + b, y, k);
|
|
|
|
for (int i = 0; i < nb; i++) {
|
|
uint32_t qh;
|
|
memcpy(&qh, &y[i].qh, sizeof(qh));
|
|
|
|
for (int j = 0; j < V2_QK5_1; j += 2) {
|
|
const uint8_t vh0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4;
|
|
const uint8_t vh1 = ((qh & (1u << (j + 16))) >> (j + 12));
|
|
|
|
// cast to 16 bins
|
|
const uint8_t vi0 = ((y[i].qs[j/2] & 0x0F) | vh0) / 2;
|
|
const uint8_t vi1 = ((y[i].qs[j/2] >> 4) | vh1) / 2;
|
|
|
|
hist[vi0]++;
|
|
hist[vi1]++;
|
|
}
|
|
}
|
|
}
|
|
|
|
return (n/V2_QK5_1*sizeof(block_v2_q5_1));
|
|
}
|
|
|
|
void ggml_vec_dot_v2_q5_1_q8_1(const int n,
|
|
float * restrict s,
|
|
const void * restrict vx,
|
|
const void * restrict vy) {
|
|
const int qk = V2_QK8_1;
|
|
const int nb = n / qk;
|
|
|
|
assert(n % qk == 0);
|
|
assert(nb % 2 == 0);
|
|
assert(qk == V2_QK5_1);
|
|
|
|
const block_v2_q5_1 * restrict x = vx;
|
|
const block_v2_q8_1 * restrict y = vy;
|
|
|
|
#if defined(__ARM_NEON)
|
|
float32x4_t sumv0 = vdupq_n_f32(0.0f);
|
|
float32x4_t sumv1 = vdupq_n_f32(0.0f);
|
|
|
|
float summs0 = 0.0f;
|
|
float summs1 = 0.0f;
|
|
|
|
uint32_t qh0;
|
|
uint32_t qh1;
|
|
|
|
uint64_t tmp0[4];
|
|
uint64_t tmp1[4];
|
|
|
|
for (int i = 0; i < nb; i += 2) {
|
|
const block_v2_q5_1 * restrict x0 = &x[i];
|
|
const block_v2_q5_1 * restrict x1 = &x[i + 1];
|
|
const block_v2_q8_1 * restrict y0 = &y[i];
|
|
const block_v2_q8_1 * restrict y1 = &y[i + 1];
|
|
|
|
const uint8x16_t m4b = vdupq_n_u8(0x0F);
|
|
|
|
summs0 += GGML_FP16_TO_FP32(x0->m) * y0->s;
|
|
summs1 += GGML_FP16_TO_FP32(x1->m) * y1->s;
|
|
|
|
// extract the 5th bit via lookup table ((b) << 4)
|
|
memcpy(&qh0, x0->qh, sizeof(qh0));
|
|
memcpy(&qh1, x1->qh, sizeof(qh1));
|
|
|
|
tmp0[0] = table_b2b_0[(qh0 >> 0) & 0xFF];
|
|
tmp0[1] = table_b2b_0[(qh0 >> 8) & 0xFF];
|
|
tmp0[2] = table_b2b_0[(qh0 >> 16) & 0xFF];
|
|
tmp0[3] = table_b2b_0[(qh0 >> 24) ];
|
|
|
|
tmp1[0] = table_b2b_0[(qh1 >> 0) & 0xFF];
|
|
tmp1[1] = table_b2b_0[(qh1 >> 8) & 0xFF];
|
|
tmp1[2] = table_b2b_0[(qh1 >> 16) & 0xFF];
|
|
tmp1[3] = table_b2b_0[(qh1 >> 24) ];
|
|
|
|
const int8x16_t qhl0 = vld1q_s8((const int8_t *)(tmp0 + 0));
|
|
const int8x16_t qhh0 = vld1q_s8((const int8_t *)(tmp0 + 2));
|
|
const int8x16_t qhl1 = vld1q_s8((const int8_t *)(tmp1 + 0));
|
|
const int8x16_t qhh1 = vld1q_s8((const int8_t *)(tmp1 + 2));
|
|
|
|
const uint8x16_t v0_0 = vld1q_u8(x0->qs);
|
|
const uint8x16_t v0_1 = vld1q_u8(x1->qs);
|
|
|
|
// 4-bit -> 8-bit
|
|
const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
|
|
const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
|
|
const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
|
|
const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
|
|
|
|
// add high bit
|
|
const int8x16_t v0_0lf = vorrq_s8(v0_0l, qhl0);
|
|
const int8x16_t v0_0hf = vorrq_s8(v0_0h, qhh0);
|
|
const int8x16_t v0_1lf = vorrq_s8(v0_1l, qhl1);
|
|
const int8x16_t v0_1hf = vorrq_s8(v0_1h, qhh1);
|
|
|
|
// load y
|
|
const int8x16_t v1_0l = vld1q_s8(y0->qs);
|
|
const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
|
|
const int8x16_t v1_1l = vld1q_s8(y1->qs);
|
|
const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
|
|
|
|
const float x0d = GGML_FP16_TO_FP32(x0->d);
|
|
const float x1d = GGML_FP16_TO_FP32(x1->d);
|
|
|
|
#if defined(__ARM_FEATURE_DOTPROD)
|
|
sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(
|
|
vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l),
|
|
vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), x0d*y0->d);
|
|
sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(
|
|
vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l),
|
|
vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), x1d*y1->d);
|
|
#else
|
|
const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0lf), vget_low_s8 (v1_0l));
|
|
const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0lf), vget_high_s8(v1_0l));
|
|
const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hf), vget_low_s8 (v1_0h));
|
|
const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hf), vget_high_s8(v1_0h));
|
|
|
|
const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1lf), vget_low_s8 (v1_1l));
|
|
const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1lf), vget_high_s8(v1_1l));
|
|
const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hf), vget_low_s8 (v1_1h));
|
|
const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hf), vget_high_s8(v1_1h));
|
|
|
|
const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h));
|
|
const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h));
|
|
const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h));
|
|
const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h));
|
|
|
|
sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), x0d*y0->d);
|
|
sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), x1d*y1->d);
|
|
#endif
|
|
}
|
|
|
|
*s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs0 + summs1;
|
|
#elif defined(__wasm_simd128__)
|
|
v128_t sumv = wasm_f32x4_splat(0.0f);
|
|
|
|
float summs = 0.0f;
|
|
|
|
uint32_t qh;
|
|
uint64_t tmp[4];
|
|
|
|
// TODO: check if unrolling this is better
|
|
for (int i = 0; i < nb; ++i) {
|
|
const block_v2_q5_1 * restrict x0 = &x[i];
|
|
const block_v2_q8_1 * restrict y0 = &y[i];
|
|
|
|
summs += GGML_FP16_TO_FP32(x0->m) * y0->s;
|
|
|
|
const v128_t m4b = wasm_i8x16_splat(0x0F);
|
|
|
|
// extract the 5th bit
|
|
memcpy(&qh, x0->qh, sizeof(qh));
|
|
|
|
tmp[0] = table_b2b_0[(qh >> 0) & 0xFF];
|
|
tmp[1] = table_b2b_0[(qh >> 8) & 0xFF];
|
|
tmp[2] = table_b2b_0[(qh >> 16) & 0xFF];
|
|
tmp[3] = table_b2b_0[(qh >> 24) ];
|
|
|
|
const v128_t qhl = wasm_v128_load(tmp + 0);
|
|
const v128_t qhh = wasm_v128_load(tmp + 2);
|
|
|
|
const v128_t v0 = wasm_v128_load(x0->qs);
|
|
|
|
// 4-bit -> 8-bit
|
|
const v128_t v0l = wasm_v128_and (v0, m4b);
|
|
const v128_t v0h = wasm_u8x16_shr(v0, 4);
|
|
|
|
static bool x = true;
|
|
|
|
// add high bit
|
|
const v128_t v0lf = wasm_v128_or(v0l, qhl);
|
|
const v128_t v0hf = wasm_v128_or(v0h, qhh);
|
|
|
|
// load y
|
|
const v128_t v1l = wasm_v128_load(y0->qs);
|
|
const v128_t v1h = wasm_v128_load(y0->qs + 16);
|
|
|
|
// int8x16 -> int16x8
|
|
const v128_t v0lfl = wasm_i16x8_extend_low_i8x16 (v0lf);
|
|
const v128_t v0lfh = wasm_i16x8_extend_high_i8x16(v0lf);
|
|
const v128_t v0hfl = wasm_i16x8_extend_low_i8x16 (v0hf);
|
|
const v128_t v0hfh = wasm_i16x8_extend_high_i8x16(v0hf);
|
|
|
|
const v128_t v1ll = wasm_i16x8_extend_low_i8x16 (v1l);
|
|
const v128_t v1lh = wasm_i16x8_extend_high_i8x16(v1l);
|
|
const v128_t v1hl = wasm_i16x8_extend_low_i8x16 (v1h);
|
|
const v128_t v1hh = wasm_i16x8_extend_high_i8x16(v1h);
|
|
|
|
const float x0d = GGML_FP16_TO_FP32(x0->d);
|
|
|
|
// dot product
|
|
sumv = wasm_f32x4_add(sumv, wasm_f32x4_mul(wasm_f32x4_convert_i32x4(
|
|
wasm_i32x4_add(
|
|
wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0lfl, v1ll),
|
|
wasm_i32x4_dot_i16x8(v0lfh, v1lh)),
|
|
wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl),
|
|
wasm_i32x4_dot_i16x8(v0hfh, v1hh)))), wasm_f32x4_splat(x0d*y0->d)));
|
|
}
|
|
|
|
*s = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) +
|
|
wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3) + summs;
|
|
#elif defined(__AVX2__)
|
|
// Initialize accumulator with zeros
|
|
__m256 acc = _mm256_setzero_ps();
|
|
|
|
float summs = 0.0f;
|
|
|
|
// Main loop
|
|
for (int i = 0; i < nb; i++) {
|
|
const __m256 dx = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d));
|
|
|
|
summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s;
|
|
|
|
__m256i bx = bytes_from_nibbles_32(x[i].qs);
|
|
__m256i bxhi = bytes_from_bits_32(x[i].qh);
|
|
bxhi = _mm256_and_si256(bxhi, _mm256_set1_epi8(0x10));
|
|
bx = _mm256_or_si256(bx, bxhi);
|
|
|
|
const __m256 dy = _mm256_broadcast_ss(&y[i].d);
|
|
const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
|
|
|
|
const __m256 q = mul_sum_us8_pairs_float(bx, by);
|
|
|
|
acc = _mm256_fmadd_ps(q, _mm256_mul_ps(dx, dy), acc);
|
|
}
|
|
|
|
*s = hsum_float_8(acc) + summs;
|
|
#elif defined(__AVX__)
|
|
// Initialize accumulator with zeros
|
|
__m256 acc = _mm256_setzero_ps();
|
|
__m128i mask = _mm_set1_epi8(0x10);
|
|
|
|
float summs = 0.0f;
|
|
|
|
// Main loop
|
|
for (int i = 0; i < nb; i++) {
|
|
const __m256 dx = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d));
|
|
|
|
summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s;
|
|
|
|
__m256i bx = bytes_from_nibbles_32(x[i].qs);
|
|
const __m256i bxhi = bytes_from_bits_32(x[i].qh);
|
|
__m128i bxhil = _mm256_castsi256_si128(bxhi);
|
|
__m128i bxhih = _mm256_extractf128_si256(bxhi, 1);
|
|
bxhil = _mm_and_si128(bxhil, mask);
|
|
bxhih = _mm_and_si128(bxhih, mask);
|
|
__m128i bxl = _mm256_castsi256_si128(bx);
|
|
__m128i bxh = _mm256_extractf128_si256(bx, 1);
|
|
bxl = _mm_or_si128(bxl, bxhil);
|
|
bxh = _mm_or_si128(bxh, bxhih);
|
|
bx = _mm256_set_m128i(bxh, bxl);
|
|
|
|
const __m256 dy = _mm256_broadcast_ss(&y[i].d);
|
|
const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
|
|
|
|
const __m256 q = mul_sum_us8_pairs_float(bx, by);
|
|
|
|
acc = _mm256_add_ps(_mm256_mul_ps(q, _mm256_mul_ps(dx, dy)), acc);
|
|
}
|
|
|
|
*s = hsum_float_8(acc) + summs;
|
|
#else
|
|
// scalar
|
|
float sumf = 0.0;
|
|
|
|
for (int i = 0; i < nb; i++) {
|
|
uint32_t qh;
|
|
memcpy(&qh, x[i].qh, sizeof(qh));
|
|
|
|
int sumi = 0;
|
|
|
|
for (int j = 0; j < qk/2; ++j) {
|
|
const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
|
|
const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
|
|
|
|
const int32_t x0 = (x[i].qs[j] & 0xF) | xh_0;
|
|
const int32_t x1 = (x[i].qs[j] >> 4) | xh_1;
|
|
|
|
sumi += (x0 * y[i].qs[j]) + (x1 * y[i].qs[j + qk/2]);
|
|
}
|
|
|
|
sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
|
|
}
|
|
|
|
*s = sumf;
|
|
#endif
|
|
}
|
|
|
|
void quantize_row_v2_q5_1_reference(const float * restrict x, block_v2_q5_1 * restrict y, int k) {
|
|
const int qk = V2_QK5_1;
|
|
|
|
assert(k % qk == 0);
|
|
|
|
const int nb = k / qk;
|
|
|
|
for (int i = 0; i < nb; i++) {
|
|
float min = FLT_MAX;
|
|
float max = -FLT_MAX;
|
|
|
|
for (int j = 0; j < qk; j++) {
|
|
const float v = x[i*qk + j];
|
|
|
|
if (v < min) min = v;
|
|
if (v > max) max = v;
|
|
}
|
|
|
|
const float d = (max - min) / ((1 << 5) - 1);
|
|
const float id = d ? 1.0f/d : 0.0f;
|
|
|
|
y[i].d = GGML_FP32_TO_FP16(d);
|
|
y[i].m = GGML_FP32_TO_FP16(min);
|
|
|
|
uint32_t qh = 0;
|
|
|
|
for (int j = 0; j < qk/2; ++j) {
|
|
const float x0 = (x[i*qk + 0 + j] - min)*id;
|
|
const float x1 = (x[i*qk + qk/2 + j] - min)*id;
|
|
|
|
const uint8_t xi0 = (uint8_t)(x0 + 0.5f);
|
|
const uint8_t xi1 = (uint8_t)(x1 + 0.5f);
|
|
|
|
y[i].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
|
|
|
|
// get the 5-th bit and store it in qh at the right position
|
|
qh |= ((xi0 & 0x10) >> 4) << (j + 0);
|
|
qh |= ((xi1 & 0x10) >> 4) << (j + qk/2);
|
|
}
|
|
|
|
memcpy(&y[i].qh, &qh, sizeof(y[i].qh));
|
|
}
|
|
}
|