mirror of
https://github.com/jart/cosmopolitan.git
synced 2025-02-25 07:19:02 +00:00
This change makes quantized models (e.g. q4_0) go 10% faster on Macs however doesn't offer much improvement for Intel PC hardware. This change syncs llama.cpp 699b1ad7fe6f7b9e41d3cb41e61a8cc3ea5fc6b5 which recently made a breaking change to nearly all its file formats without any migration. Since that'll break hundreds upon hundreds of models on websites like HuggingFace llama.com will support both file formats because llama.com will never ever break the GGJT file format
27 lines
721 B
C
27 lines
721 B
C
#ifndef COSMOPOLITAN_THIRD_PARTY_GGML_F16_H_
|
|
#define COSMOPOLITAN_THIRD_PARTY_GGML_F16_H_
|
|
#if !(__ASSEMBLER__ + __LINKER__ + 0)
|
|
COSMOPOLITAN_C_START_
|
|
|
|
#define GGML_GELU_FP16
|
|
#define GGML_SILU_FP16
|
|
|
|
#ifdef __ARM_NEON
|
|
// we use the built-in 16-bit float type
|
|
typedef __fp16 ggml_fp16_t;
|
|
#else
|
|
typedef uint16_t ggml_fp16_t;
|
|
#endif
|
|
|
|
void ggml_fp16_init(void);
|
|
|
|
// convert FP16 <-> FP32
|
|
float ggml_fp16_to_fp32(ggml_fp16_t x);
|
|
ggml_fp16_t ggml_fp32_to_fp16(float x);
|
|
|
|
void ggml_fp16_to_fp32_row(const ggml_fp16_t* x, float* y, size_t n);
|
|
void ggml_fp32_to_fp16_row(const float* x, ggml_fp16_t* y, size_t n);
|
|
|
|
COSMOPOLITAN_C_END_
|
|
#endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */
|
|
#endif /* COSMOPOLITAN_THIRD_PARTY_GGML_F16_H_ */
|