Merge branch 'master' into concedo_experimental
# Conflicts: # CMakeLists.txt # Makefile # ggml-opencl.c
This commit is contained in:
commit
3de34ee492
11 changed files with 260 additions and 95 deletions
2
.gitignore
vendored
2
.gitignore
vendored
|
@ -28,7 +28,7 @@ models/*
|
|||
/result
|
||||
/perplexity
|
||||
/embedding
|
||||
/benchmark-q4_0-matmult
|
||||
/benchmark-matmult
|
||||
/vdot
|
||||
/Pipfile
|
||||
|
||||
|
|
16
Makefile
16
Makefile
|
@ -144,19 +144,21 @@ ifdef LLAMA_PERF
|
|||
CXXFLAGS += -DGGML_PERF
|
||||
endif
|
||||
ifneq ($(filter aarch64%,$(UNAME_M)),)
|
||||
# Apple M1, M2, etc.
|
||||
# Raspberry Pi 3, 4, Zero 2 (64-bit)
|
||||
CFLAGS +=
|
||||
CXXFLAGS +=
|
||||
endif
|
||||
ifneq ($(filter armv6%,$(UNAME_M)),)
|
||||
# Raspberry Pi 1, 2, 3
|
||||
# Raspberry Pi 1, Zero
|
||||
CFLAGS += -mfpu=neon-fp-armv8 -mfp16-format=ieee -mno-unaligned-access
|
||||
endif
|
||||
ifneq ($(filter armv7%,$(UNAME_M)),)
|
||||
# Raspberry Pi 4
|
||||
# Raspberry Pi 2
|
||||
CFLAGS += -mfpu=neon-fp-armv8 -mfp16-format=ieee -mno-unaligned-access -funsafe-math-optimizations
|
||||
endif
|
||||
ifneq ($(filter armv8%,$(UNAME_M)),)
|
||||
# Raspberry Pi 4
|
||||
# Raspberry Pi 3, 4, Zero 2 (32-bit)
|
||||
CFLAGS += -mfp16-format=ieee -mno-unaligned-access
|
||||
endif
|
||||
|
||||
|
@ -249,7 +251,7 @@ gpttype_adapter.o: gpttype_adapter.cpp
|
|||
$(CXX) $(CXXFLAGS) -c $< -o $@
|
||||
|
||||
clean:
|
||||
rm -vf *.o main quantize_llama quantize_gpt2 quantize_gptj quantize_neox quantize-stats perplexity embedding benchmark-q4_0-matmult main.exe quantize_llama.exe quantize_gptj.exe quantize_gpt2.exe quantize_neox.exe koboldcpp.dll koboldcpp_openblas.dll koboldcpp_noavx2.dll koboldcpp_openblas_noavx2.dll koboldcpp_clblast.dll koboldcpp.so koboldcpp_openblas.so koboldcpp_noavx2.so koboldcpp_openblas_noavx2.so koboldcpp_clblast.so gptj.exe gpt2.exe
|
||||
rm -vf *.o main quantize_llama quantize_gpt2 quantize_gptj quantize_neox quantize-stats perplexity embedding benchmark-matmult main.exe quantize_llama.exe quantize_gptj.exe quantize_gpt2.exe quantize_neox.exe koboldcpp.dll koboldcpp_openblas.dll koboldcpp_noavx2.dll koboldcpp_openblas_noavx2.dll koboldcpp_clblast.dll koboldcpp.so koboldcpp_openblas.so koboldcpp_noavx2.so koboldcpp_openblas_noavx2.so koboldcpp_clblast.so gptj.exe gpt2.exe
|
||||
|
||||
main: examples/main/main.cpp ggml.o llama.o common.o $(OBJS)
|
||||
$(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS)
|
||||
|
@ -303,9 +305,9 @@ libllama.so: llama.o ggml.o $(OBJS)
|
|||
# Tests
|
||||
#
|
||||
|
||||
benchmark: examples/benchmark/benchmark-q4_0-matmult.c ggml.o $(OBJS)
|
||||
$(CXX) $(CXXFLAGS) $^ -o benchmark-q4_0-matmult $(LDFLAGS)
|
||||
./benchmark-q4_0-matmult
|
||||
benchmark-matmult: examples/benchmark/benchmark-matmult.cpp ggml.o $(OBJS)
|
||||
$(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS)
|
||||
./$@
|
||||
|
||||
.PHONY: tests
|
||||
tests:
|
||||
|
|
|
@ -35,4 +35,5 @@ else()
|
|||
add_subdirectory(perplexity)
|
||||
add_subdirectory(embedding)
|
||||
add_subdirectory(save-load-state)
|
||||
add_subdirectory(benchmark)
|
||||
endif()
|
||||
|
|
4
examples/benchmark/CMakeLists.txt
Normal file
4
examples/benchmark/CMakeLists.txt
Normal file
|
@ -0,0 +1,4 @@
|
|||
set(TARGET benchmark)
|
||||
add_executable(${TARGET} benchmark-matmult.cpp)
|
||||
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
||||
target_compile_features(${TARGET} PRIVATE cxx_std_11)
|
|
@ -1,11 +1,3 @@
|
|||
/*
|
||||
License: MIT License
|
||||
|
||||
Changelog:
|
||||
- 2023-03-31 Initial version by Sebastian Apel (https://github.com/SebastianApel)
|
||||
|
||||
*/
|
||||
|
||||
#include <locale.h>
|
||||
#include "ggml.h"
|
||||
#include <assert.h>
|
||||
|
@ -45,7 +37,7 @@ float tensor_sum_elements(struct ggml_tensor * tensor) {
|
|||
|
||||
#define TENSOR_TYPE_AS_STR(TYPE) TYPE == GGML_TYPE_F32 ? "FP32" : TYPE == GGML_TYPE_F16 ? "FP16" : TYPE == GGML_TYPE_Q4_0 ? "Q4_0" : TYPE == GGML_TYPE_Q4_1 ? "Q4_1" : "UNKNOWN"
|
||||
|
||||
#define TENSOR_DUMP(TENSOR) printf("%15s: type = %i (%5s) ne = %5d x %5d x %5d, nb = (%5li, %5li, %5li) - ", #TENSOR, \
|
||||
#define TENSOR_DUMP(TENSOR) printf("%15s: type = %i (%5s) ne = %5ld x %5ld x %5ld, nb = (%5li, %5li, %5li) - ", #TENSOR, \
|
||||
TENSOR->type,TENSOR_TYPE_AS_STR(TENSOR->type),\
|
||||
TENSOR->ne[0], TENSOR->ne[1], TENSOR->ne[2], TENSOR->nb[0], TENSOR->nb[1], TENSOR->nb[2]); \
|
||||
{ float sum = tensor_sum_elements(TENSOR); printf("Sum of tensor %s is %6.2f\n",#TENSOR, sum); }
|
||||
|
@ -98,12 +90,9 @@ int main(int argc, char ** argv) {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
// create the ggml context
|
||||
printf("Starting Test\n");
|
||||
|
||||
|
||||
|
||||
struct ggml_context * ctx;
|
||||
//const int sizex = 4096;
|
||||
//const int sizey = 11008;
|
||||
|
@ -125,16 +114,18 @@ int main(int argc, char ** argv) {
|
|||
#endif
|
||||
|
||||
//printf("Memsize required = %i\n", sizex*sizex);
|
||||
ggml_type wtype = GGML_TYPE_F32;
|
||||
|
||||
size_t ctx_size = 0;
|
||||
ctx_size += sizex*sizey*ggml_type_sizef(wtype);
|
||||
ctx_size += sizex*sizey*ggml_type_sizef(wtype);
|
||||
ctx_size += sizex*sizey*ggml_type_sizef(GGML_TYPE_F32);
|
||||
ctx_size += sizex*sizeof(float);
|
||||
ctx_size += 1024*1024*100;
|
||||
ctx_size += sizex*sizey*ggml_type_sizef(GGML_TYPE_F32);
|
||||
ctx_size += sizex*sizez*ggml_type_sizef(GGML_TYPE_F32);
|
||||
ctx_size += sizex*sizey*ggml_type_sizef(GGML_TYPE_Q4_0);
|
||||
ctx_size += sizex*sizey*ggml_type_sizef(GGML_TYPE_Q4_0);
|
||||
ctx_size += sizex*sizey*ggml_type_sizef(GGML_TYPE_F32); // BLAS
|
||||
ctx_size += sizex*sizey*ggml_type_sizef(GGML_TYPE_F32); // BLAS
|
||||
ctx_size += 1024*1024*16;
|
||||
|
||||
printf("Allocating Memory of size %li byes, %li MB\n",ctx_size, (ctx_size/1024/1024));
|
||||
printf("Allocating Memory of size %li bytes, %li MB\n",ctx_size, (ctx_size/1024/1024));
|
||||
|
||||
struct ggml_init_params params = {
|
||||
/*.mem_size =*/ ctx_size,
|
||||
|
@ -217,7 +208,7 @@ int main(int argc, char ** argv) {
|
|||
const int dimz = sizez;
|
||||
long long int flops_per_dot_product = dimy + dimy;
|
||||
long long int flops_per_matrix = flops_per_dot_product * dimx * dimz; ;
|
||||
printf("Matrix Multiplication of (%i,%i,%i) x (%i,%i,%i) - aboout %6.2f gFLOPS\n\n", sizex, sizey, 1, sizex, sizez, 1, 1.0f*flops_per_matrix / 1000 / 1000 / 1000);
|
||||
printf("Matrix Multiplication of (%i,%i,%i) x (%i,%i,%i) - about %6.2f gFLOPS\n\n", sizex, sizey, 1, sizex, sizez, 1, 1.0f*flops_per_matrix / 1000 / 1000 / 1000);
|
||||
|
||||
|
||||
// Let's use the F32 result from above as a reference for the q4_0 multiplication
|
||||
|
@ -234,7 +225,6 @@ int main(int argc, char ** argv) {
|
|||
ggml_graph_compute(ctx, &gf31);
|
||||
long long int stop = ggml_time_us();
|
||||
long long int usec = stop-start;
|
||||
float sec = usec/1000000;
|
||||
float flops_per_usec = (1.0f*flops_per_matrix)/usec;
|
||||
printf("%9i;%8i;%6i;%6i;%6i;%15lli;%18lli;%19.2f\n",
|
||||
i,
|
|
@ -1,13 +1,18 @@
|
|||
#include "common.h"
|
||||
|
||||
#include <cassert>
|
||||
#include <iostream>
|
||||
#include <cstring>
|
||||
#include <fstream>
|
||||
#include <string>
|
||||
#include <iterator>
|
||||
#include <algorithm>
|
||||
#include <sstream>
|
||||
#include <iostream>
|
||||
|
||||
#if defined(__APPLE__) && defined(__MACH__)
|
||||
#include <sys/types.h>
|
||||
#include <sys/sysctl.h>
|
||||
#endif
|
||||
|
||||
#if defined (_WIN32)
|
||||
#include <fcntl.h>
|
||||
|
@ -25,19 +30,43 @@ extern "C" __declspec(dllimport) int __stdcall WideCharToMultiByte(unsigned int
|
|||
#define CP_UTF8 65001
|
||||
#endif
|
||||
|
||||
bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
|
||||
// determine sensible default number of threads.
|
||||
// std::thread::hardware_concurrency may not be equal to the number of cores, or may return 0.
|
||||
int32_t get_num_physical_cores() {
|
||||
#ifdef __linux__
|
||||
std::ifstream cpuinfo("/proc/cpuinfo");
|
||||
params.n_threads = std::count(std::istream_iterator<std::string>(cpuinfo),
|
||||
std::istream_iterator<std::string>(),
|
||||
std::string("processor"));
|
||||
#endif
|
||||
if (params.n_threads == 0) {
|
||||
params.n_threads = std::max(1, (int32_t) std::thread::hardware_concurrency());
|
||||
std::string line;
|
||||
while (std::getline(cpuinfo, line)) {
|
||||
std::size_t pos = line.find("cpu cores");
|
||||
if (pos != std::string::npos) {
|
||||
pos = line.find(": ", pos);
|
||||
if (pos != std::string::npos) {
|
||||
try {
|
||||
// Extract the number and return it
|
||||
return static_cast<int32_t>(std::stoul(line.substr(pos + 2)));
|
||||
} catch (const std::invalid_argument &) {
|
||||
// Ignore if we could not parse
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#elif defined(__APPLE__) && defined(__MACH__)
|
||||
int32_t num_physical_cores;
|
||||
size_t len = sizeof(num_physical_cores);
|
||||
int result = sysctlbyname("hw.perflevel0.physicalcpu", &num_physical_cores, &len, NULL, 0);
|
||||
if (result == 0) {
|
||||
return num_physical_cores;
|
||||
}
|
||||
result = sysctlbyname("hw.physicalcpu", &num_physical_cores, &len, NULL, 0);
|
||||
if (result == 0) {
|
||||
return num_physical_cores;
|
||||
}
|
||||
#elif defined(_WIN32)
|
||||
//TODO: Implement
|
||||
#endif
|
||||
unsigned int n_threads = std::thread::hardware_concurrency();
|
||||
return n_threads > 0 ? (n_threads <= 4 ? n_threads : n_threads / 2) : 4;
|
||||
}
|
||||
|
||||
bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
|
||||
bool invalid_param = false;
|
||||
std::string arg;
|
||||
gpt_params default_params;
|
||||
|
|
|
@ -13,11 +13,12 @@
|
|||
//
|
||||
// CLI argument parsing
|
||||
//
|
||||
int32_t get_num_physical_cores();
|
||||
|
||||
struct gpt_params {
|
||||
int32_t seed = -1; // RNG seed
|
||||
int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency());
|
||||
int32_t n_predict = -1; // new tokens to predict
|
||||
int32_t n_threads = get_num_physical_cores();
|
||||
int32_t n_predict = -1; // new tokens to predict
|
||||
int32_t n_parts = -1; // amount of model parts (-1 = determine from model dimensions)
|
||||
int32_t n_ctx = 512; // context size
|
||||
int32_t n_batch = 512; // batch size for prompt processing (must be >=32 to use BLAS)
|
||||
|
|
189
ggml.c
189
ggml.c
|
@ -331,7 +331,7 @@ static ggml_fp16_t table_exp_f16[1 << 16];
|
|||
// precomputed f32 table for f16 (256 KB)
|
||||
static float table_f32_f16[1 << 16];
|
||||
|
||||
#if defined(__ARM_NEON)
|
||||
#if defined(__ARM_NEON) || defined(__wasm_simd128__)
|
||||
#define B1(c,s,n) 0x ## n ## c , 0x ## n ## s
|
||||
#define B2(c,s,n) B1(c,s,n ## c), B1(c,s,n ## s)
|
||||
#define B3(c,s,n) B2(c,s,n ## c), B2(c,s,n ## s)
|
||||
|
@ -1096,7 +1096,7 @@ static void quantize_row_q4_0(const float * restrict x, void * restrict vy, int
|
|||
const v128_t v = wasm_f32x4_mul(srcv[l], wasm_f32x4_splat(id));
|
||||
const v128_t vf = wasm_f32x4_add(v, wasm_f32x4_splat(8.5f));
|
||||
const v128_t vi = wasm_i32x4_trunc_sat_f32x4(vf);
|
||||
const v128_t vc = wasm_i32x4_min_u(vi, wasm_i32x4_splat(15));
|
||||
const v128_t vc = wasm_i32x4_min(vi, wasm_i32x4_splat(15));
|
||||
|
||||
y[i].qs[2*l + 0] = wasm_i32x4_extract_lane(vc, 0) | (wasm_i32x4_extract_lane(vc, 1) << 4);
|
||||
y[i].qs[2*l + 1] = wasm_i32x4_extract_lane(vc, 2) | (wasm_i32x4_extract_lane(vc, 3) << 4);
|
||||
|
@ -1993,8 +1993,8 @@ static void dequantize_row_q5_0(const void * restrict vx, float * restrict y, in
|
|||
const uint8_t vi = pp[l/2];
|
||||
|
||||
// extract the 5-th bit from qh
|
||||
const uint8_t vh0 = ((qh & (1 << (l + 0))) >> (l + 0)) << 4;
|
||||
const uint8_t vh1 = ((qh & (1 << (l + 1))) >> (l + 1)) << 4;
|
||||
const uint8_t vh0 = ((qh & (1u << (l + 0))) >> (l + 0)) << 4;
|
||||
const uint8_t vh1 = ((qh & (1u << (l + 1))) >> (l + 1)) << 4;
|
||||
|
||||
const int8_t vi0 = (vi & 0x0F) | vh0;
|
||||
const int8_t vi1 = (vi >> 4) | vh1;
|
||||
|
@ -2030,8 +2030,8 @@ static void dequantize_row_q5_1(const void * restrict vx, float * restrict y, in
|
|||
const uint8_t vi = pp[l/2];
|
||||
|
||||
// extract the 5-th bit from qh
|
||||
const uint8_t vh0 = ((qh & (1 << (l + 0))) >> (l + 0)) << 4;
|
||||
const uint8_t vh1 = ((qh & (1 << (l + 1))) >> (l + 1)) << 4;
|
||||
const uint8_t vh0 = ((qh & (1u << (l + 0))) >> (l + 0)) << 4;
|
||||
const uint8_t vh1 = ((qh & (1u << (l + 1))) >> (l + 1)) << 4;
|
||||
|
||||
const uint8_t vi0 = (vi & 0x0F) | vh0;
|
||||
const uint8_t vi1 = (vi >> 4) | vh1;
|
||||
|
@ -3401,6 +3401,72 @@ static void ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void *
|
|||
}
|
||||
|
||||
*s = vaddvq_f32(sumv);
|
||||
#elif defined(__wasm_simd128__)
|
||||
v128_t sumv = wasm_f32x4_splat(0.0f);
|
||||
|
||||
uint64_t tmp[4];
|
||||
|
||||
for (int i = 0; i < nb; ++i) {
|
||||
const block_q5_0 * restrict x0 = &x[i];
|
||||
const block_q8_0 * restrict y0 = &y[i];
|
||||
|
||||
const v128_t m4b = wasm_i8x16_splat(0x0F);
|
||||
const v128_t s16b = wasm_i8x16_splat(0x10);
|
||||
|
||||
// extract the 5th bit
|
||||
uint32_t qh;
|
||||
memcpy(&qh, x0->qh, sizeof(qh));
|
||||
|
||||
tmp[0] = table_b2b_u[(qh >> 0) & 0xFF];
|
||||
tmp[1] = table_b2b_u[(qh >> 8) & 0xFF];
|
||||
tmp[2] = table_b2b_u[(qh >> 16) & 0xFF];
|
||||
tmp[3] = table_b2b_u[(qh >> 24) ];
|
||||
|
||||
const v128_t qhl = wasm_v128_load(tmp + 0);
|
||||
const v128_t qhh = wasm_v128_load(tmp + 2);
|
||||
|
||||
const v128_t v0 = wasm_v128_load(x0->qs);
|
||||
|
||||
// 4-bit -> 8-bit
|
||||
const v128_t v0l = wasm_v128_and (v0, m4b);
|
||||
const v128_t v0h = wasm_u8x16_shr(v0, 4);
|
||||
|
||||
// interleave
|
||||
const v128_t v0lz = wasm_v8x16_shuffle(v0l, v0h, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
|
||||
const v128_t v0hz = wasm_v8x16_shuffle(v0l, v0h, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
|
||||
|
||||
// add high bit and sub 16
|
||||
const v128_t v0lf = wasm_i8x16_sub(wasm_v128_or(v0lz, qhl), s16b);
|
||||
const v128_t v0hf = wasm_i8x16_sub(wasm_v128_or(v0hz, qhh), s16b);
|
||||
|
||||
// load y
|
||||
const v128_t v1l = wasm_v128_load(y0->qs);
|
||||
const v128_t v1h = wasm_v128_load(y0->qs + 16);
|
||||
|
||||
// int8x16 -> int16x8
|
||||
const v128_t v0lfl = wasm_i16x8_extend_low_i8x16 (v0lf);
|
||||
const v128_t v0lfh = wasm_i16x8_extend_high_i8x16(v0lf);
|
||||
const v128_t v0hfl = wasm_i16x8_extend_low_i8x16 (v0hf);
|
||||
const v128_t v0hfh = wasm_i16x8_extend_high_i8x16(v0hf);
|
||||
|
||||
const v128_t v1ll = wasm_i16x8_extend_low_i8x16 (v1l);
|
||||
const v128_t v1lh = wasm_i16x8_extend_high_i8x16(v1l);
|
||||
const v128_t v1hl = wasm_i16x8_extend_low_i8x16 (v1h);
|
||||
const v128_t v1hh = wasm_i16x8_extend_high_i8x16(v1h);
|
||||
|
||||
const float x0d = GGML_FP16_TO_FP32(x0->d);
|
||||
|
||||
// dot product
|
||||
sumv = wasm_f32x4_add(sumv, wasm_f32x4_mul(wasm_f32x4_convert_i32x4(
|
||||
wasm_i32x4_add(
|
||||
wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0lfl, v1ll),
|
||||
wasm_i32x4_dot_i16x8(v0lfh, v1lh)),
|
||||
wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl),
|
||||
wasm_i32x4_dot_i16x8(v0hfh, v1hh)))), wasm_f32x4_splat(x0d*y0->d)));
|
||||
}
|
||||
|
||||
*s = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) +
|
||||
wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3);
|
||||
#elif defined(__AVX2__)
|
||||
// Initialize accumulator with zeros
|
||||
__m256 acc = _mm256_setzero_ps();
|
||||
|
@ -3441,8 +3507,8 @@ static void ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void *
|
|||
for (int j = 0; j < QK8_0/2; j++) {
|
||||
const uint8_t v0 = x0[j];
|
||||
|
||||
const int x0_0h = ((qh & (1 << (2*j + 0))) >> (2*j + 0)) << 4;
|
||||
const int x1_0h = ((qh & (1 << (2*j + 1))) >> (2*j + 1)) << 4;
|
||||
const int x0_0h = ((qh & (1u << (2*j + 0))) >> (2*j + 0)) << 4;
|
||||
const int x1_0h = ((qh & (1u << (2*j + 1))) >> (2*j + 1)) << 4;
|
||||
|
||||
const int x0_0 = ((v0 & 0x0F) | x0_0h) - 16;
|
||||
const int x1_0 = ((v0 >> 4) | x1_0h) - 16;
|
||||
|
@ -3532,6 +3598,77 @@ static void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void *
|
|||
}
|
||||
|
||||
*s = vaddvq_f32(sumv) + summs;
|
||||
#elif defined(__wasm_simd128__)
|
||||
v128_t sumv = wasm_f32x4_splat(0.0f);
|
||||
|
||||
float summs = 0.0f;
|
||||
|
||||
uint64_t tmp[4];
|
||||
|
||||
for (int i = 0; i < nb; ++i) {
|
||||
const block_q5_1 * restrict x0 = &x[i];
|
||||
const block_q8_1 * restrict y0 = &y[i];
|
||||
|
||||
summs += GGML_FP16_TO_FP32(x0->m) * (y0->s0 + y0->s1);
|
||||
|
||||
const v128_t m4b = wasm_i8x16_splat(0x0F);
|
||||
|
||||
// extract the 5th bit
|
||||
uint32_t qh;
|
||||
memcpy(&qh, x0->qh, sizeof(qh));
|
||||
|
||||
tmp[0] = table_b2b_u[(qh >> 0) & 0xFF];
|
||||
tmp[1] = table_b2b_u[(qh >> 8) & 0xFF];
|
||||
tmp[2] = table_b2b_u[(qh >> 16) & 0xFF];
|
||||
tmp[3] = table_b2b_u[(qh >> 24) ];
|
||||
|
||||
const v128_t qhl = wasm_v128_load(tmp + 0);
|
||||
const v128_t qhh = wasm_v128_load(tmp + 2);
|
||||
|
||||
const v128_t v0 = wasm_v128_load(x0->qs);
|
||||
|
||||
// 4-bit -> 8-bit
|
||||
const v128_t v0l = wasm_v128_and (v0, m4b);
|
||||
const v128_t v0h = wasm_u8x16_shr(v0, 4);
|
||||
|
||||
static bool x = true;
|
||||
|
||||
// interleave
|
||||
const v128_t v0lz = wasm_v8x16_shuffle(v0l, v0h, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
|
||||
const v128_t v0hz = wasm_v8x16_shuffle(v0l, v0h, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
|
||||
|
||||
// add high bit
|
||||
const v128_t v0lf = wasm_v128_or(v0lz, qhl);
|
||||
const v128_t v0hf = wasm_v128_or(v0hz, qhh);
|
||||
|
||||
// load y
|
||||
const v128_t v1l = wasm_v128_load(y0->qs);
|
||||
const v128_t v1h = wasm_v128_load(y0->qs + 16);
|
||||
|
||||
// int8x16 -> int16x8
|
||||
const v128_t v0lfl = wasm_i16x8_extend_low_i8x16 (v0lf);
|
||||
const v128_t v0lfh = wasm_i16x8_extend_high_i8x16(v0lf);
|
||||
const v128_t v0hfl = wasm_i16x8_extend_low_i8x16 (v0hf);
|
||||
const v128_t v0hfh = wasm_i16x8_extend_high_i8x16(v0hf);
|
||||
|
||||
const v128_t v1ll = wasm_i16x8_extend_low_i8x16 (v1l);
|
||||
const v128_t v1lh = wasm_i16x8_extend_high_i8x16(v1l);
|
||||
const v128_t v1hl = wasm_i16x8_extend_low_i8x16 (v1h);
|
||||
const v128_t v1hh = wasm_i16x8_extend_high_i8x16(v1h);
|
||||
|
||||
const float x0d = GGML_FP16_TO_FP32(x0->d);
|
||||
|
||||
// dot product
|
||||
sumv = wasm_f32x4_add(sumv, wasm_f32x4_mul(wasm_f32x4_convert_i32x4(
|
||||
wasm_i32x4_add(
|
||||
wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0lfl, v1ll),
|
||||
wasm_i32x4_dot_i16x8(v0lfh, v1lh)),
|
||||
wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl),
|
||||
wasm_i32x4_dot_i16x8(v0hfh, v1hh)))), wasm_f32x4_splat(x0d*y0->d)));
|
||||
}
|
||||
|
||||
*s = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) +
|
||||
wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3) + summs;
|
||||
#elif defined(__AVX2__)
|
||||
// Initialize accumulator with zeros
|
||||
__m256 acc = _mm256_setzero_ps();
|
||||
|
@ -3575,8 +3712,8 @@ static void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void *
|
|||
for (int j = 0; j < QK8_1/2; j++) {
|
||||
const uint8_t v0 = x0[j];
|
||||
|
||||
const int x0_0h = ((qh & (1 << (2*j + 0))) >> (2*j + 0)) << 4;
|
||||
const int x1_0h = ((qh & (1 << (2*j + 1))) >> (2*j + 1)) << 4;
|
||||
const int x0_0h = ((qh & (1u << (2*j + 0))) >> (2*j + 0)) << 4;
|
||||
const int x1_0h = ((qh & (1u << (2*j + 1))) >> (2*j + 1)) << 4;
|
||||
|
||||
const int x0_0 = (v0 & 0x0F) | x0_0h;
|
||||
const int x1_0 = (v0 >> 4) | x1_0h;
|
||||
|
@ -4052,6 +4189,7 @@ static const char * GGML_OP_LABEL[GGML_OP_COUNT] = {
|
|||
"DIAG_MASK_INF",
|
||||
"SOFT_MAX",
|
||||
"ROPE",
|
||||
"ALIBI",
|
||||
"CONV_1D_1S",
|
||||
"CONV_1D_2S",
|
||||
|
||||
|
@ -4100,6 +4238,7 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = {
|
|||
"diag_mask_inf(x)",
|
||||
"soft_max(x)",
|
||||
"rope(x)",
|
||||
"alibi(x)",
|
||||
"conv_1d_1s(x)",
|
||||
"conv_1d_2s(x)",
|
||||
|
||||
|
@ -4280,6 +4419,28 @@ bool ggml_is_quantized(enum ggml_type type) {
|
|||
return GGML_IS_QUANTIZED[type];
|
||||
}
|
||||
|
||||
enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype) {
|
||||
enum ggml_type wtype = GGML_TYPE_COUNT;
|
||||
|
||||
switch (ftype) {
|
||||
case GGML_FTYPE_ALL_F32: wtype = GGML_TYPE_F32; break;
|
||||
case GGML_FTYPE_MOSTLY_F16: wtype = GGML_TYPE_F16; break;
|
||||
case GGML_FTYPE_MOSTLY_Q4_0: wtype = GGML_TYPE_Q4_0; break;
|
||||
case GGML_FTYPE_MOSTLY_Q4_1: wtype = GGML_TYPE_Q4_1; break;
|
||||
case GGML_FTYPE_MOSTLY_Q4_2: wtype = GGML_TYPE_Q4_2; break;
|
||||
case GGML_FTYPE_MOSTLY_Q4_3: wtype = GGML_TYPE_Q4_3; break;
|
||||
case GGML_FTYPE_MOSTLY_Q5_0: wtype = GGML_TYPE_Q5_0; break;
|
||||
case GGML_FTYPE_MOSTLY_Q5_1: wtype = GGML_TYPE_Q5_1; break;
|
||||
case GGML_FTYPE_MOSTLY_Q8_0: wtype = GGML_TYPE_Q8_0; break;
|
||||
case GGML_FTYPE_UNKNOWN: wtype = GGML_TYPE_COUNT; break;
|
||||
case GGML_FTYPE_MOSTLY_Q4_1_SOME_F16: wtype = GGML_TYPE_COUNT; break;
|
||||
}
|
||||
|
||||
GGML_ASSERT(wtype != GGML_TYPE_COUNT);
|
||||
|
||||
return wtype;
|
||||
}
|
||||
|
||||
static inline bool ggml_is_transposed(const struct ggml_tensor * tensor) {
|
||||
return tensor->nb[0] > tensor->nb[1];
|
||||
}
|
||||
|
@ -13149,8 +13310,8 @@ size_t ggml_quantize_q5_0(const float * src, void * dst, int n, int k, int64_t *
|
|||
memcpy(&qh, &y[i].qh, sizeof(qh));
|
||||
|
||||
for (int l = 0; l < QK5_0; l += 2) {
|
||||
const uint8_t vh0 = ((qh & (1 << (l + 0))) >> (l + 0)) << 4;
|
||||
const uint8_t vh1 = ((qh & (1 << (l + 1))) >> (l + 1)) << 4;
|
||||
const uint8_t vh0 = ((qh & (1u << (l + 0))) >> (l + 0)) << 4;
|
||||
const uint8_t vh1 = ((qh & (1u << (l + 1))) >> (l + 1)) << 4;
|
||||
|
||||
// cast to 16 bins
|
||||
const uint8_t vi0 = ((y[i].qs[l/2] & 0x0F) | vh0) / 2;
|
||||
|
@ -13179,8 +13340,8 @@ size_t ggml_quantize_q5_1(const float * src, void * dst, int n, int k, int64_t *
|
|||
memcpy(&qh, &y[i].qh, sizeof(qh));
|
||||
|
||||
for (int l = 0; l < QK5_1; l += 2) {
|
||||
const uint8_t vh0 = ((qh & (1 << (l + 0))) >> (l + 0)) << 4;
|
||||
const uint8_t vh1 = ((qh & (1 << (l + 1))) >> (l + 1)) << 4;
|
||||
const uint8_t vh0 = ((qh & (1u << (l + 0))) >> (l + 0)) << 4;
|
||||
const uint8_t vh1 = ((qh & (1u << (l + 1))) >> (l + 1)) << 4;
|
||||
|
||||
// cast to 16 bins
|
||||
const uint8_t vi0 = ((y[i].qs[l/2] & 0x0F) | vh0) / 2;
|
||||
|
|
18
ggml.h
18
ggml.h
|
@ -232,6 +232,21 @@ extern "C" {
|
|||
GGML_TYPE_COUNT,
|
||||
};
|
||||
|
||||
// model file types
|
||||
enum ggml_ftype {
|
||||
GGML_FTYPE_UNKNOWN = -1,
|
||||
GGML_FTYPE_ALL_F32 = 0,
|
||||
GGML_FTYPE_MOSTLY_F16 = 1, // except 1d tensors
|
||||
GGML_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors
|
||||
GGML_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors
|
||||
GGML_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16
|
||||
GGML_FTYPE_MOSTLY_Q4_2 = 5, // except 1d tensors
|
||||
GGML_FTYPE_MOSTLY_Q4_3 = 6, // except 1d tensors
|
||||
GGML_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors
|
||||
GGML_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors
|
||||
GGML_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors
|
||||
};
|
||||
|
||||
// available tensor operations:
|
||||
enum ggml_op {
|
||||
GGML_OP_NONE = 0,
|
||||
|
@ -385,6 +400,9 @@ extern "C" {
|
|||
|
||||
GGML_API bool ggml_is_quantized(enum ggml_type type);
|
||||
|
||||
// TODO: temporary until model loading of ggml examples is refactored
|
||||
GGML_API enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype);
|
||||
|
||||
// main
|
||||
|
||||
GGML_API struct ggml_context * ggml_init(struct ggml_init_params params);
|
||||
|
|
|
@ -34,30 +34,6 @@ enum ggml_ftype ggml_parse_ftype(const char * str) {
|
|||
return ftype;
|
||||
}
|
||||
|
||||
enum ggml_type ggml_ftype_to_ggml_type(const enum ggml_ftype ftype) {
|
||||
ggml_type wtype = GGML_TYPE_COUNT;
|
||||
|
||||
switch (ftype) {
|
||||
case GGML_FTYPE_ALL_F32: wtype = GGML_TYPE_F32; break;
|
||||
case GGML_FTYPE_MOSTLY_F16: wtype = GGML_TYPE_F16; break;
|
||||
case GGML_FTYPE_MOSTLY_Q4_0: wtype = GGML_TYPE_Q4_0; break;
|
||||
case GGML_FTYPE_MOSTLY_Q4_1: wtype = GGML_TYPE_Q4_1; break;
|
||||
case GGML_FTYPE_MOSTLY_Q4_2: wtype = GGML_TYPE_Q4_2; break;
|
||||
case GGML_FTYPE_MOSTLY_Q4_3: wtype = GGML_TYPE_Q4_3; break;
|
||||
case GGML_FTYPE_MOSTLY_Q5_0: wtype = GGML_TYPE_Q5_0; break;
|
||||
case GGML_FTYPE_MOSTLY_Q5_1: wtype = GGML_TYPE_Q5_1; break;
|
||||
case GGML_FTYPE_MOSTLY_Q8_0: wtype = GGML_TYPE_Q8_0; break;
|
||||
case GGML_FTYPE_UNKNOWN: wtype = GGML_TYPE_COUNT; break;
|
||||
case GGML_FTYPE_MOSTLY_Q4_1_SOME_F16: wtype = GGML_TYPE_COUNT; break;
|
||||
}
|
||||
|
||||
if (wtype == GGML_TYPE_COUNT) {
|
||||
fprintf(stderr, "%s: invalid model type %d\n", __func__, ftype);
|
||||
}
|
||||
|
||||
return wtype;
|
||||
}
|
||||
|
||||
bool ggml_common_quantize_0(
|
||||
std::ifstream & finp,
|
||||
std::ofstream & fout,
|
||||
|
|
|
@ -7,28 +7,11 @@
|
|||
#include <vector>
|
||||
#include <string>
|
||||
|
||||
// model file types
|
||||
enum ggml_ftype {
|
||||
GGML_FTYPE_UNKNOWN = -1,
|
||||
GGML_FTYPE_ALL_F32 = 0,
|
||||
GGML_FTYPE_MOSTLY_F16 = 1, // except 1d tensors
|
||||
GGML_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors
|
||||
GGML_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors
|
||||
GGML_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16
|
||||
GGML_FTYPE_MOSTLY_Q4_2 = 5, // except 1d tensors
|
||||
GGML_FTYPE_MOSTLY_Q4_3 = 6, // except 1d tensors
|
||||
GGML_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors
|
||||
GGML_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors
|
||||
GGML_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors
|
||||
};
|
||||
|
||||
void ggml_print_ftypes(FILE * fp = stderr);
|
||||
|
||||
enum ggml_ftype ggml_parse_ftype(const char * str);
|
||||
|
||||
// TODO: temporary
|
||||
enum ggml_type ggml_ftype_to_ggml_type(const enum ggml_ftype ftype);
|
||||
|
||||
bool ggml_common_quantize_0(
|
||||
std::ifstream & finp,
|
||||
std::ofstream & fout,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue