Merge branch 'ggerganov:master' into master
This commit is contained in:
commit
98e070c120
4 changed files with 58 additions and 61 deletions
2
.github/workflows/build.yml
vendored
2
.github/workflows/build.yml
vendored
|
@ -92,7 +92,7 @@ jobs:
|
||||||
name: llama-bin-macos-arm64.zip
|
name: llama-bin-macos-arm64.zip
|
||||||
|
|
||||||
macOS-latest-cmake-x64:
|
macOS-latest-cmake-x64:
|
||||||
runs-on: macos-12
|
runs-on: macos-13
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
|
|
|
@ -304,6 +304,7 @@ static const struct ggml_type_traits_cpu type_traits_cpu[GGML_TYPE_COUNT] = {
|
||||||
.nrows = 1,
|
.nrows = 1,
|
||||||
},
|
},
|
||||||
[GGML_TYPE_Q8_0] = {
|
[GGML_TYPE_Q8_0] = {
|
||||||
|
.from_float_to_mat = quantize_mat_q8_0,
|
||||||
.vec_dot = ggml_vec_dot_q8_0_q8_0,
|
.vec_dot = ggml_vec_dot_q8_0_q8_0,
|
||||||
.vec_dot_type = GGML_TYPE_Q8_0,
|
.vec_dot_type = GGML_TYPE_Q8_0,
|
||||||
#if defined (__ARM_FEATURE_MATMUL_INT8)
|
#if defined (__ARM_FEATURE_MATMUL_INT8)
|
||||||
|
@ -13789,6 +13790,13 @@ int ggml_cpu_get_sve_cnt(void) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void ggml_cpu_init(void) {
|
void ggml_cpu_init(void) {
|
||||||
|
// needed to initialize f16 tables
|
||||||
|
{
|
||||||
|
struct ggml_init_params params = { 0, NULL, false };
|
||||||
|
struct ggml_context * ctx = ggml_init(params);
|
||||||
|
ggml_free(ctx);
|
||||||
|
}
|
||||||
|
|
||||||
ggml_critical_section_start();
|
ggml_critical_section_start();
|
||||||
|
|
||||||
static bool is_first_call = true;
|
static bool is_first_call = true;
|
||||||
|
@ -13796,24 +13804,21 @@ void ggml_cpu_init(void) {
|
||||||
if (is_first_call) {
|
if (is_first_call) {
|
||||||
// initialize GELU, Quick GELU, SILU and EXP F32 tables
|
// initialize GELU, Quick GELU, SILU and EXP F32 tables
|
||||||
{
|
{
|
||||||
// FIXME: this may be called before ggml_init
|
const uint64_t t_start = ggml_time_us(); UNUSED(t_start);
|
||||||
//const uint64_t t_start = ggml_time_us(); UNUSED(t_start);
|
|
||||||
|
|
||||||
for (int i = 0; i < (1 << 16); ++i) {
|
for (int i = 0; i < (1 << 16); ++i) {
|
||||||
union {
|
union {
|
||||||
uint16_t u16;
|
uint16_t u16;
|
||||||
ggml_fp16_t fp16;
|
ggml_fp16_t fp16;
|
||||||
} u = {i};
|
} u = {i};
|
||||||
// FIXME: this table is used in conversion functions outside of compute
|
float f = GGML_FP16_TO_FP32(u.fp16);
|
||||||
// current code depends on ggml_init initializing this table
|
|
||||||
float f = ggml_table_f32_f16[i] = GGML_COMPUTE_FP16_TO_FP32(u.fp16);
|
|
||||||
ggml_table_gelu_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_f32(f));
|
ggml_table_gelu_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_f32(f));
|
||||||
ggml_table_gelu_quick_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_quick_f32(f));
|
ggml_table_gelu_quick_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_quick_f32(f));
|
||||||
}
|
}
|
||||||
|
|
||||||
//const uint64_t t_end = ggml_time_us(); UNUSED(t_end);
|
const uint64_t t_end = ggml_time_us(); UNUSED(t_end);
|
||||||
|
|
||||||
//GGML_PRINT_DEBUG("%s: GELU, Quick GELU, SILU and EXP tables initialized in %f ms\n", __func__, (t_end - t_start)/1000.0);
|
GGML_PRINT_DEBUG("%s: GELU, Quick GELU, SILU and EXP tables initialized in %f ms\n", __func__, (t_end - t_start)/1000.0);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(__ARM_ARCH)
|
#if defined(__ARM_ARCH)
|
||||||
|
|
|
@ -9104,10 +9104,8 @@ void ggml_vec_dot_q6_K_q8_K(int n, float * restrict s, size_t bs, const void * r
|
||||||
|
|
||||||
#elif defined __AVX__
|
#elif defined __AVX__
|
||||||
|
|
||||||
const __m128i m4 = _mm_set1_epi8(0xF);
|
|
||||||
const __m128i m3 = _mm_set1_epi8(3);
|
const __m128i m3 = _mm_set1_epi8(3);
|
||||||
const __m128i m32s = _mm_set1_epi8(32);
|
const __m128i m15 = _mm_set1_epi8(15);
|
||||||
const __m128i m2 = _mm_set1_epi8(2);
|
|
||||||
|
|
||||||
__m256 acc = _mm256_setzero_ps();
|
__m256 acc = _mm256_setzero_ps();
|
||||||
|
|
||||||
|
@ -9119,12 +9117,20 @@ void ggml_vec_dot_q6_K_q8_K(int n, float * restrict s, size_t bs, const void * r
|
||||||
const uint8_t * restrict qh = x[i].qh;
|
const uint8_t * restrict qh = x[i].qh;
|
||||||
const int8_t * restrict q8 = y[i].qs;
|
const int8_t * restrict q8 = y[i].qs;
|
||||||
|
|
||||||
|
// handle the q6_k -32 offset separately using bsums
|
||||||
|
const __m128i q8sums_0 = _mm_loadu_si128((const __m128i*)y[i].bsums);
|
||||||
|
const __m128i q8sums_1 = _mm_loadu_si128((const __m128i*)y[i].bsums + 1);
|
||||||
const __m128i scales = _mm_loadu_si128((const __m128i*)x[i].scales);
|
const __m128i scales = _mm_loadu_si128((const __m128i*)x[i].scales);
|
||||||
|
const __m128i scales_16_0 = _mm_cvtepi8_epi16(scales);
|
||||||
|
const __m128i scales_16_1 = _mm_cvtepi8_epi16(_mm_bsrli_si128(scales, 8));
|
||||||
|
const __m128i q8sclsub_0 = _mm_slli_epi32(_mm_madd_epi16(q8sums_0, scales_16_0), 5);
|
||||||
|
const __m128i q8sclsub_1 = _mm_slli_epi32(_mm_madd_epi16(q8sums_1, scales_16_1), 5);
|
||||||
|
|
||||||
__m128i sumi_0 = _mm_setzero_si128();
|
__m128i sumi_0 = _mm_setzero_si128();
|
||||||
__m128i sumi_1 = _mm_setzero_si128();
|
__m128i sumi_1 = _mm_setzero_si128();
|
||||||
|
|
||||||
__m128i shuffle = _mm_set_epi64x(0x0101010101010101, 0x0000000000000000);
|
int is = 0;
|
||||||
|
|
||||||
for (int j = 0; j < QK_K/128; ++j) {
|
for (int j = 0; j < QK_K/128; ++j) {
|
||||||
|
|
||||||
const __m128i q4bitsH_0 = _mm_loadu_si128((const __m128i*)qh); qh += 16;
|
const __m128i q4bitsH_0 = _mm_loadu_si128((const __m128i*)qh); qh += 16;
|
||||||
|
@ -9132,26 +9138,26 @@ void ggml_vec_dot_q6_K_q8_K(int n, float * restrict s, size_t bs, const void * r
|
||||||
|
|
||||||
const __m128i q4h_0 = _mm_slli_epi16(_mm_and_si128(q4bitsH_0, m3), 4);
|
const __m128i q4h_0 = _mm_slli_epi16(_mm_and_si128(q4bitsH_0, m3), 4);
|
||||||
const __m128i q4h_1 = _mm_slli_epi16(_mm_and_si128(q4bitsH_1, m3), 4);
|
const __m128i q4h_1 = _mm_slli_epi16(_mm_and_si128(q4bitsH_1, m3), 4);
|
||||||
const __m128i q4h_2 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_0, 2), m3), 4);
|
const __m128i q4h_2 = _mm_slli_epi16(_mm_and_si128(q4bitsH_0, _mm_set1_epi8(12)), 2);
|
||||||
const __m128i q4h_3 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_1, 2), m3), 4);
|
const __m128i q4h_3 = _mm_slli_epi16(_mm_and_si128(q4bitsH_1, _mm_set1_epi8(12)), 2);
|
||||||
const __m128i q4h_4 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_0, 4), m3), 4);
|
const __m128i q4h_4 = _mm_and_si128(q4bitsH_0, _mm_set1_epi8(48));
|
||||||
const __m128i q4h_5 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_1, 4), m3), 4);
|
const __m128i q4h_5 = _mm_and_si128(q4bitsH_1, _mm_set1_epi8(48));
|
||||||
const __m128i q4h_6 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_0, 6), m3), 4);
|
const __m128i q4h_6 = _mm_srli_epi16(_mm_and_si128(q4bitsH_0, _mm_set1_epi8(-64)), 2);
|
||||||
const __m128i q4h_7 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_1, 6), m3), 4);
|
const __m128i q4h_7 = _mm_srli_epi16(_mm_and_si128(q4bitsH_1, _mm_set1_epi8(-64)), 2);
|
||||||
|
|
||||||
const __m128i q4bits1_0 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
|
const __m128i q4bits1_0 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
|
||||||
const __m128i q4bits1_1 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
|
const __m128i q4bits1_1 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
|
||||||
const __m128i q4bits2_0 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
|
const __m128i q4bits2_0 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
|
||||||
const __m128i q4bits2_1 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
|
const __m128i q4bits2_1 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
|
||||||
|
|
||||||
const __m128i q4_0 = _mm_or_si128(_mm_and_si128(q4bits1_0, m4), q4h_0);
|
const __m128i q4_0 = _mm_or_si128(_mm_and_si128(q4bits1_0, m15), q4h_0);
|
||||||
const __m128i q4_1 = _mm_or_si128(_mm_and_si128(q4bits1_1, m4), q4h_1);
|
const __m128i q4_1 = _mm_or_si128(_mm_and_si128(q4bits1_1, m15), q4h_1);
|
||||||
const __m128i q4_2 = _mm_or_si128(_mm_and_si128(q4bits2_0, m4), q4h_2);
|
const __m128i q4_2 = _mm_or_si128(_mm_and_si128(q4bits2_0, m15), q4h_2);
|
||||||
const __m128i q4_3 = _mm_or_si128(_mm_and_si128(q4bits2_1, m4), q4h_3);
|
const __m128i q4_3 = _mm_or_si128(_mm_and_si128(q4bits2_1, m15), q4h_3);
|
||||||
const __m128i q4_4 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits1_0, 4), m4), q4h_4);
|
const __m128i q4_4 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits1_0, 4), m15), q4h_4);
|
||||||
const __m128i q4_5 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits1_1, 4), m4), q4h_5);
|
const __m128i q4_5 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits1_1, 4), m15), q4h_5);
|
||||||
const __m128i q4_6 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits2_0, 4), m4), q4h_6);
|
const __m128i q4_6 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits2_0, 4), m15), q4h_6);
|
||||||
const __m128i q4_7 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits2_1, 4), m4), q4h_7);
|
const __m128i q4_7 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits2_1, 4), m15), q4h_7);
|
||||||
|
|
||||||
const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
|
const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
|
||||||
const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
|
const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
|
||||||
|
@ -9162,15 +9168,6 @@ void ggml_vec_dot_q6_K_q8_K(int n, float * restrict s, size_t bs, const void * r
|
||||||
const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
|
const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
|
||||||
const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
|
const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
|
||||||
|
|
||||||
__m128i q8s_0 = _mm_maddubs_epi16(m32s, q8_0);
|
|
||||||
__m128i q8s_1 = _mm_maddubs_epi16(m32s, q8_1);
|
|
||||||
__m128i q8s_2 = _mm_maddubs_epi16(m32s, q8_2);
|
|
||||||
__m128i q8s_3 = _mm_maddubs_epi16(m32s, q8_3);
|
|
||||||
__m128i q8s_4 = _mm_maddubs_epi16(m32s, q8_4);
|
|
||||||
__m128i q8s_5 = _mm_maddubs_epi16(m32s, q8_5);
|
|
||||||
__m128i q8s_6 = _mm_maddubs_epi16(m32s, q8_6);
|
|
||||||
__m128i q8s_7 = _mm_maddubs_epi16(m32s, q8_7);
|
|
||||||
|
|
||||||
__m128i p16_0 = _mm_maddubs_epi16(q4_0, q8_0);
|
__m128i p16_0 = _mm_maddubs_epi16(q4_0, q8_0);
|
||||||
__m128i p16_1 = _mm_maddubs_epi16(q4_1, q8_1);
|
__m128i p16_1 = _mm_maddubs_epi16(q4_1, q8_1);
|
||||||
__m128i p16_2 = _mm_maddubs_epi16(q4_2, q8_2);
|
__m128i p16_2 = _mm_maddubs_epi16(q4_2, q8_2);
|
||||||
|
@ -9180,32 +9177,20 @@ void ggml_vec_dot_q6_K_q8_K(int n, float * restrict s, size_t bs, const void * r
|
||||||
__m128i p16_6 = _mm_maddubs_epi16(q4_6, q8_6);
|
__m128i p16_6 = _mm_maddubs_epi16(q4_6, q8_6);
|
||||||
__m128i p16_7 = _mm_maddubs_epi16(q4_7, q8_7);
|
__m128i p16_7 = _mm_maddubs_epi16(q4_7, q8_7);
|
||||||
|
|
||||||
p16_0 = _mm_sub_epi16(p16_0, q8s_0);
|
const __m128i scale_0 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 0));
|
||||||
p16_1 = _mm_sub_epi16(p16_1, q8s_1);
|
const __m128i scale_1 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 1));
|
||||||
p16_2 = _mm_sub_epi16(p16_2, q8s_2);
|
const __m128i scale_2 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 2));
|
||||||
p16_3 = _mm_sub_epi16(p16_3, q8s_3);
|
const __m128i scale_3 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 3));
|
||||||
p16_4 = _mm_sub_epi16(p16_4, q8s_4);
|
is += 4;
|
||||||
p16_5 = _mm_sub_epi16(p16_5, q8s_5);
|
|
||||||
p16_6 = _mm_sub_epi16(p16_6, q8s_6);
|
|
||||||
p16_7 = _mm_sub_epi16(p16_7, q8s_7);
|
|
||||||
|
|
||||||
const __m128i scale_0 = _mm_shuffle_epi8(scales, shuffle);
|
|
||||||
shuffle = _mm_add_epi8(shuffle, m2);
|
|
||||||
const __m128i scale_1 = _mm_shuffle_epi8(scales, shuffle);
|
|
||||||
shuffle = _mm_add_epi8(shuffle, m2);
|
|
||||||
const __m128i scale_2 = _mm_shuffle_epi8(scales, shuffle);
|
|
||||||
shuffle = _mm_add_epi8(shuffle, m2);
|
|
||||||
const __m128i scale_3 = _mm_shuffle_epi8(scales, shuffle);
|
|
||||||
shuffle = _mm_add_epi8(shuffle, m2);
|
|
||||||
|
|
||||||
p16_0 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_0), p16_0);
|
p16_0 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_0), p16_0);
|
||||||
p16_1 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_0, scale_0)), p16_1);
|
p16_1 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_bsrli_si128(scale_0, 8)), p16_1);
|
||||||
p16_2 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_1), p16_2);
|
p16_2 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_1), p16_2);
|
||||||
p16_3 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_1, scale_1)), p16_3);
|
p16_3 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_bsrli_si128(scale_1, 8)), p16_3);
|
||||||
p16_4 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_2), p16_4);
|
p16_4 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_2), p16_4);
|
||||||
p16_5 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_2, scale_2)), p16_5);
|
p16_5 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_bsrli_si128(scale_2, 8)), p16_5);
|
||||||
p16_6 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_3), p16_6);
|
p16_6 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_3), p16_6);
|
||||||
p16_7 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_3, scale_3)), p16_7);
|
p16_7 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_bsrli_si128(scale_3, 8)), p16_7);
|
||||||
|
|
||||||
sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2));
|
sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2));
|
||||||
sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3));
|
sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3));
|
||||||
|
@ -9214,8 +9199,10 @@ void ggml_vec_dot_q6_K_q8_K(int n, float * restrict s, size_t bs, const void * r
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
__m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
|
sumi_0 = _mm_sub_epi32(sumi_0, q8sclsub_0);
|
||||||
acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi)), acc);
|
sumi_1 = _mm_sub_epi32(sumi_1, q8sclsub_1);
|
||||||
|
const __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
|
||||||
|
acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(sumi)), acc);
|
||||||
}
|
}
|
||||||
|
|
||||||
*s = hsum_float_8(acc);
|
*s = hsum_float_8(acc);
|
||||||
|
|
|
@ -220,8 +220,10 @@ void ggml_log_callback_default(enum ggml_log_level level, const char * text, voi
|
||||||
|
|
||||||
|
|
||||||
void * ggml_aligned_malloc(size_t size) {
|
void * ggml_aligned_malloc(size_t size) {
|
||||||
|
const int alignment = 64;
|
||||||
|
|
||||||
#if defined(_MSC_VER) || defined(__MINGW32__)
|
#if defined(_MSC_VER) || defined(__MINGW32__)
|
||||||
return _aligned_malloc(size, TENSOR_ALIGNMENT);
|
return _aligned_malloc(size, alignment);
|
||||||
#else
|
#else
|
||||||
if (size == 0) {
|
if (size == 0) {
|
||||||
GGML_LOG_WARN("Behavior may be unexpected when allocating 0 bytes for ggml_aligned_malloc!\n");
|
GGML_LOG_WARN("Behavior may be unexpected when allocating 0 bytes for ggml_aligned_malloc!\n");
|
||||||
|
@ -229,8 +231,9 @@ void * ggml_aligned_malloc(size_t size) {
|
||||||
}
|
}
|
||||||
void * aligned_memory = NULL;
|
void * aligned_memory = NULL;
|
||||||
#ifdef GGML_USE_CPU_HBM
|
#ifdef GGML_USE_CPU_HBM
|
||||||
int result = hbw_posix_memalign(&aligned_memory, TENSOR_ALIGNMENT, size);
|
int result = hbw_posix_memalign(&aligned_memory, alignment, size);
|
||||||
#elif TARGET_OS_OSX
|
#elif TARGET_OS_OSX
|
||||||
|
GGML_UNUSED(alignment);
|
||||||
kern_return_t alloc_status = vm_allocate((vm_map_t) mach_task_self(), (vm_address_t *) &aligned_memory, size, VM_FLAGS_ANYWHERE);
|
kern_return_t alloc_status = vm_allocate((vm_map_t) mach_task_self(), (vm_address_t *) &aligned_memory, size, VM_FLAGS_ANYWHERE);
|
||||||
int result = EFAULT;
|
int result = EFAULT;
|
||||||
switch (alloc_status) {
|
switch (alloc_status) {
|
||||||
|
@ -248,7 +251,7 @@ void * ggml_aligned_malloc(size_t size) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
int result = posix_memalign(&aligned_memory, TENSOR_ALIGNMENT, size);
|
int result = posix_memalign(&aligned_memory, alignment, size);
|
||||||
#endif
|
#endif
|
||||||
if (result != 0) {
|
if (result != 0) {
|
||||||
// Handle allocation failure
|
// Handle allocation failure
|
||||||
|
@ -392,6 +395,8 @@ void ggml_bf16_to_fp32_row(const ggml_bf16_t * x, float * y, int64_t n) {
|
||||||
16)));
|
16)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
#if defined(__AVX2__)
|
||||||
if (ggml_cpu_has_avx2()) {
|
if (ggml_cpu_has_avx2()) {
|
||||||
for (; i + 8 <= n; i += 8) {
|
for (; i + 8 <= n; i += 8) {
|
||||||
_mm256_storeu_ps(y + i,
|
_mm256_storeu_ps(y + i,
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue