diff --git a/Makefile b/Makefile index 4611f6881..17fac632e 100644 --- a/Makefile +++ b/Makefile @@ -206,7 +206,7 @@ endif BLAS_BUILD = ifeq ($(OS),Windows_NT) - BLAS_BUILD = $(CXX) $(CXXFLAGS) ggml_blas.o expose.o llama_adapter.o llamaextra.o common.o libopenblas.lib -shared -o llamacpp_blas.dll $(LDFLAGS) + BLAS_BUILD = $(CXX) $(CXXFLAGS) ggml_blas.o ggml_v1.o expose.o common.o llama_adapter.o gptj_adapter.o libopenblas.lib -shared -o llamacpp_blas.dll $(LDFLAGS) else BLAS_BUILD = @echo 'Your OS is $(OS) and does not appear to be Windows. If you want to use openblas, please link it manually with LLAMA_OPENBLAS=1' endif @@ -247,20 +247,17 @@ llama.o: llama.cpp llama.h common.o: examples/common.cpp examples/common.h $(CXX) $(CXXFLAGS) -c examples/common.cpp -o common.o -llamaextra.o: llamaextra.cpp llamaextra.h - $(CXX) $(CXXFLAGS) -c llamaextra.cpp -o llamaextra.o - expose.o: expose.cpp expose.h $(CXX) $(CXXFLAGS) -c expose.cpp -o expose.o llama_adapter.o: $(CXX) $(CXXFLAGS) -c llama_adapter.cpp -o llama_adapter.o - -gptj_adapter.o: ggml.o - $(CXX) $(CXXFLAGS) otherarch/gptj.cpp otherarch/utils.cpp ggml.o gptj_adapter.cpp -o gptj_adapter.o + +gptj_adapter.o: + $(CXX) $(CXXFLAGS) -c gptj_adapter.cpp -o gptj_adapter.o clean: - rm -vf *.o main quantize perplexity embedding main.exe quantize.exe llamacpp.dll llamacpp_blas.dll gpt2.exe gptj.exe + rm -vf *.o main quantize perplexity embedding main.exe quantize.exe llamacpp.dll llamacpp_blas.dll gptj.exe main: examples/main/main.cpp ggml.o llama.o common.o $(CXX) $(CXXFLAGS) examples/main/main.cpp ggml.o llama.o common.o -o main $(LDFLAGS) @@ -268,17 +265,10 @@ main: examples/main/main.cpp ggml.o llama.o common.o @echo '==== Run ./main -h for help. ====' @echo -gptj: ggml.o - $(CXX) $(CXXFLAGS) otherarch/gptj.cpp otherarch/utils.cpp ggml.o -o gptj $(LDFLAGS) +llamalib: ggml.o ggml_v1.o expose.o common.o llama_adapter.o gptj_adapter.o + $(CXX) $(CXXFLAGS) ggml.o ggml_v1.o expose.o common.o llama_adapter.o gptj_adapter.o -shared -o llamacpp.dll $(LDFLAGS) -gptjold: ggml_v1.o - $(CXX) $(CXXFLAGS) otherarch/gptj_old.cpp otherarch/utils.cpp ggml_v1.o -o gptj $(LDFLAGS) - - -llamalib: ggml.o expose.o llama_adapter.o llamaextra.o common.o - $(CXX) $(CXXFLAGS) expose.o ggml.o llama_adapter.o llamaextra.o common.o -shared -o llamacpp.dll $(LDFLAGS) - -llamalib_blas: ggml_blas.o expose.o llama_adapter.o llamaextra.o common.o +llamalib_blas: ggml_blas.o ggml_v1.o expose.o common.o llama_adapter.o gptj_adapter.o $(BLAS_BUILD) quantize: examples/quantize/quantize.cpp ggml.o llama.o diff --git a/expose.cpp b/expose.cpp index b436c9c30..29e06954e 100644 --- a/expose.cpp +++ b/expose.cpp @@ -7,11 +7,18 @@ //No dynamic memory allocation! Setup structs with FIXED (known) shapes and sizes for ALL output fields //Python will ALWAYS provide the memory, we just write to it. -#include "model_adapter.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + #include "expose.h" -#include "llamaextra.h" - - +#include "model_adapter.cpp" extern "C" { @@ -23,13 +30,28 @@ extern "C" { std::string model = inputs.model_filename; file_format = check_file_format(model.c_str()); - printf("\n---\nIdentified as LLAMA model: (ver %d)\nAttempting to Load...\n---\n", file_format); - - return llama_load_model(inputs, file_format); + + if(file_format==GPTJ1 || file_format==GPTJ2) + { + printf("\n---\nIdentified as GPT-J model: (ver %d)\nAttempting to Load...\n---\n", file_format); + return gptj_load_model(inputs, file_format); + } + else + { + printf("\n---\nIdentified as LLAMA model: (ver %d)\nAttempting to Load...\n---\n", file_format); + return llama_load_model(inputs, file_format); + } } generation_outputs generate(const generation_inputs inputs, generation_outputs &output) { - return llama_generate(inputs, output); + if (file_format == GPTJ1 || file_format == GPTJ2) + { + return gptj_generate(inputs, output); + } + else + { + return llama_generate(inputs, output); + } } } \ No newline at end of file diff --git a/gptj_adapter.cpp b/gptj_adapter.cpp index 1a42d4cbf..2a6ff5b6c 100644 --- a/gptj_adapter.cpp +++ b/gptj_adapter.cpp @@ -10,12 +10,17 @@ #include #include "model_adapter.h" #include "otherarch/otherarch.h" -#include "llamaextra.h" + +//concat source files into one file for compilation purposes +#include "otherarch/utils.cpp" +#include "otherarch/gptj_v1.cpp" +#include "otherarch/gptj_v2.cpp" //return val: 0=fail, 1=(original ggml, alpaca), 2=(ggmf), 3=(ggjt) static FileFormat file_format = FileFormat::FAIL; static gpt_vocab vocab; -static gptj_model model; +static gptj_model_v1 model_v1; +static gptj_model model_v2; static gpt_params params; static int n_past = 0; static int n_threads = 4; @@ -35,7 +40,7 @@ bool gptj_load_model(const load_model_inputs inputs, FileFormat in_file_format) n_batch = params.n_batch = inputs.batch_size; modelname = params.model = inputs.model_filename; - if (!gptj_model_load(params.model, model, vocab)) { + if (!legacy_gptj_model_load(params.model, model_v1, vocab)) { fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str()); return false; } @@ -46,7 +51,7 @@ bool gptj_load_model(const load_model_inputs inputs, FileFormat in_file_format) } // determine the required inference memory per token: - legacy_gptj_eval(model, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token); + legacy_gptj_eval(model_v1, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token); return true; } @@ -77,9 +82,9 @@ generation_outputs gptj_generate(const generation_inputs inputs, generation_outp std::vector embd_inp = ::gpt_tokenize(vocab, params.prompt); //truncate to front of the prompt if its too long - if (embd_inp.size() + params.n_predict > model.hparams.n_ctx) + if (embd_inp.size() + params.n_predict > model_v1.hparams.n_ctx) { - int offset = embd_inp.size() - model.hparams.n_ctx + params.n_predict; + int offset = embd_inp.size() - model_v1.hparams.n_ctx + params.n_predict; embd_inp = std::vector(embd_inp.begin() + offset, embd_inp.end()); } @@ -130,7 +135,7 @@ generation_outputs gptj_generate(const generation_inputs inputs, generation_outp timer_start(); double time1 = 0, time2 = 0; unsigned int embd_inp_size = embd_inp.size(); - const int n_vocab = model.hparams.n_vocab; + const int n_vocab = model_v1.hparams.n_vocab; printf("\n"); @@ -151,7 +156,7 @@ generation_outputs gptj_generate(const generation_inputs inputs, generation_outp printf("\rGenerating (%d / %d tokens)", (1 + params.n_predict - remaining_tokens), params.n_predict); } - if (!gptj_eval(model, params.n_threads, n_past, embd, logits, mem_per_token)) + if (!legacy_gptj_eval(model_v1, params.n_threads, n_past, embd, logits, mem_per_token)) { fprintf(stderr, "Failed to predict\n"); snprintf(output.text, sizeof(output.text), "%s", ""); diff --git a/llama_adapter.cpp b/llama_adapter.cpp index d9f9dac05..39f79395c 100644 --- a/llama_adapter.cpp +++ b/llama_adapter.cpp @@ -12,6 +12,9 @@ #include "ggml.h" #include "model_adapter.h" +//for easier compilation +#include "llamaextra.cpp" + //return val: 0=fail, 1=(original ggml, alpaca), 2=(ggmf), 3=(ggjt) static FileFormat file_format = FileFormat::FAIL; static llama_context_params ctx_params; @@ -109,7 +112,6 @@ generation_outputs llama_generate(const generation_inputs inputs, generation_out embd_inp = ::llama_tokenize(ctx, params.prompt, true); } - //params.n_predict = std::min(params.n_predict, params.n_ctx - (int) embd_inp.size()); //truncate to front of the prompt if its too long if (embd_inp.size() + params.n_predict > params.n_ctx) { @@ -124,11 +126,6 @@ generation_outputs llama_generate(const generation_inputs inputs, generation_out int last_n_size = params.repeat_last_n; last_n_tokens.resize(last_n_size); - //display usage - // std::string tst = " "; - // char * tst2 = (char*)tst.c_str(); - // gpt_print_usage(1,&tst2,params); - std::fill(last_n_tokens.begin(), last_n_tokens.end(), 0); n_past = 0; @@ -194,7 +191,7 @@ generation_outputs llama_generate(const generation_inputs inputs, generation_out { printf("\rGenerating (%d / %d tokens)", (1 + params.n_predict - remaining_tokens), params.n_predict); } - //printf("\nnp:%d embd:%d txt:%s",n_past,embd.size(),llama_token_to_str(ctx, embd[0])); + if (llama_eval(ctx, embd.data(), embdsize, n_past, params.n_threads)) { fprintf(stderr, "Failed to predict\n"); diff --git a/llamacpp.dll b/llamacpp.dll index 9936ef2cb..8a514f776 100644 Binary files a/llamacpp.dll and b/llamacpp.dll differ diff --git a/llamacpp_blas.dll b/llamacpp_blas.dll index 8ac9ee21d..f8398003e 100644 Binary files a/llamacpp_blas.dll and b/llamacpp_blas.dll differ diff --git a/llamacpp_for_kobold.py b/llamacpp_for_kobold.py index b1b426095..c64819957 100644 --- a/llamacpp_for_kobold.py +++ b/llamacpp_for_kobold.py @@ -5,6 +5,7 @@ import ctypes import os +import psutil import argparse import json, http.server, threading, socket, sys, time @@ -315,7 +316,7 @@ def main(args): mdl_nparts = sum(1 for n in range(1, 9) if os.path.exists(f"{ggml_selected_file}.{n}")) + 1 modelname = os.path.abspath(ggml_selected_file) - print(f"Loading model: {modelname}, Parts: {mdl_nparts}, Threads: {args.threads}") + print(f"Loading model: {modelname} \n[Parts: {mdl_nparts}, Threads: {args.threads}]") loadok = load_model(modelname,8,maxctx,mdl_nparts,args.threads) print("Load Model OK: " + str(loadok)) @@ -349,7 +350,10 @@ if __name__ == '__main__': portgroup.add_argument("--port", help="Port to listen on", default=5001, type=int) portgroup.add_argument("port", help="Port to listen on", default=5001, nargs="?", type=int) parser.add_argument("--host", help="Host IP to listen on. If empty, all routable interfaces are accepted.", default="") - default_threads = (os.cpu_count() if os.cpu_count()<=6 else max(6,os.cpu_count()-2)) + + physical_core_limit = psutil.cpu_count(logical=False) + # logical_core_limit = (os.cpu_count() if os.cpu_count()<=4 else max(4,os.cpu_count()-4)) + default_threads = (physical_core_limit if physical_core_limit<=4 else max(4,physical_core_limit-1)) parser.add_argument("--threads", help="Use a custom number of threads if specified. Otherwise, uses an amount based on CPU cores", type=int, default=default_threads) parser.add_argument("--stream", help="Uses pseudo streaming", action='store_true') parser.add_argument("--noblas", help="Do not use OpenBLAS for accelerated prompt ingestion", action='store_true') diff --git a/llamaextra.cpp b/llamaextra.cpp index 2407bf53e..2dcf4ad80 100644 --- a/llamaextra.cpp +++ b/llamaextra.cpp @@ -18,65 +18,7 @@ #include #endif -static clock_t bench_timer = 0; -void timer_start() -{ - bench_timer = clock(); -} -double timer_check() -{ - double ticks = clock() - bench_timer; - double time_taken = ((double)ticks) / CLOCKS_PER_SEC; - return time_taken; -} - -void print_tok_vec(std::vector &embd) -{ - std::cout << "["; - bool first = true; - for (auto i : embd) - { - if (!first) - { - std::cout << ','; - } - first = false; - std::cout << i; - } - std::cout << "]"; -} - -//return val: 0=fail, 1=(original ggml, alpaca), 2=(ggmf), 3=(ggjt) - FileFormat check_file_format(const std::string & fname) - { - std::vector f_buf(1024*1024); - - auto fin = std::ifstream(fname, std::ios::binary); - fin.rdbuf()->pubsetbuf(f_buf.data(), f_buf.size()); - if (!fin) { - fprintf(stderr, "%s: failed to open '%s'\n", __func__, fname.c_str()); - return FileFormat::FAIL; - } - - FileFormat fileformat = FileFormat::FAIL; - uint32_t magic; - fin.read((char *) &magic, sizeof(magic)); - if (magic == 0x67676d6c) { //v1 format ggml, alpaca - fileformat = FileFormat::GGML; - } - else if(magic == 0x67676d66) //v2 format ggmf - { - fileformat = FileFormat::GGHF; - } - else if(magic == 0x67676a74) //v3 format ggjt - { - fileformat = FileFormat::GGJT; //ggjt by default - } - fin.close(); - - return fileformat; - } //freeze all the configurations for model loading for v1 and v2 formats struct llama_context * legacy_llama_init_from_file(const char * path_model, struct llama_context_params params) diff --git a/llamaextra.h b/llamaextra.h index c31e840b8..5d5829221 100644 --- a/llamaextra.h +++ b/llamaextra.h @@ -15,22 +15,7 @@ #include "llama.h" #include "ggml.h" -//return val: 0=fail, 1=(original ggml, alpaca), 2=(ggmf), 3=(ggjt) -enum FileFormat -{ - FAIL=0, - GGML=1, - GGHF=2, - GGJT=3, - GPTJ1=100, - GPTJ2=101 -}; - -void timer_start(); -double timer_check(); -void print_tok_vec(std::vector &embd); -FileFormat check_file_format(const std::string & fname); std::vector legacy_llama_tokenize(struct llama_context * ctx, const std::string & text, bool add_bos); static bool legacy_llama_model_load(const std::string & fname, llama_context & lctx, int n_ctx, int n_parts, ggml_type memory_type, bool vocab_only, llama_progress_callback progress_callback, void *progress_callback_user_data); diff --git a/model_adapter.cpp b/model_adapter.cpp new file mode 100644 index 000000000..e4edff05e --- /dev/null +++ b/model_adapter.cpp @@ -0,0 +1,79 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "model_adapter.h" + +static clock_t bench_timer = 0; + +void timer_start() +{ + bench_timer = clock(); +} +double timer_check() +{ + double ticks = clock() - bench_timer; + double time_taken = ((double)ticks) / CLOCKS_PER_SEC; + return time_taken; +} + +void print_tok_vec(std::vector &embd) +{ + std::cout << "["; + bool first = true; + for (auto i : embd) + { + if (!first) + { + std::cout << ','; + } + first = false; + std::cout << i; + } + std::cout << "]"; +} + +//return val: 0=fail, 1=(original ggml, alpaca), 2=(ggmf), 3=(ggjt) + FileFormat check_file_format(const std::string & fname) + { + std::vector f_buf(1024*1024); + + auto fin = std::ifstream(fname, std::ios::binary); + fin.rdbuf()->pubsetbuf(f_buf.data(), f_buf.size()); + if (!fin) { + fprintf(stderr, "%s: failed to open '%s'\n", __func__, fname.c_str()); + return FileFormat::FAIL; + } + + FileFormat fileformat = FileFormat::FAIL; + uint32_t magic; + fin.read((char *) &magic, sizeof(magic)); + if (magic == 0x67676d6c) { //v1 format ggml, alpaca, old gptj and gpt2 models + fileformat = FileFormat::GGML; + //we need to read more to determine + int32_t vocabsiz = 0; + fin.read((char *) &vocabsiz, sizeof(int32_t)); + if(vocabsiz==50400) //know GPT-J vocab size + { + fileformat = FileFormat::GPTJ1; + } + } + else if(magic == 0x67676d66) //v2 format ggmf + { + fileformat = FileFormat::GGHF; + } + else if(magic == 0x67676a74) //v3 format ggjt + { + fileformat = FileFormat::GGJT; //ggjt by default + } + fin.close(); + + return fileformat; + } \ No newline at end of file diff --git a/model_adapter.h b/model_adapter.h index 803c08563..1540eee52 100644 --- a/model_adapter.h +++ b/model_adapter.h @@ -1,8 +1,39 @@ #pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + #include "expose.h" -#include "llamaextra.h" + +//return val: 0=fail, 1=(original ggml, alpaca), 2=(ggmf), 3=(ggjt) +enum FileFormat +{ + FAIL=0, + GGML=1, + GGHF=2, + GGJT=3, + + GPTJ1=100, + GPTJ2=101, + + GPT2=200, +}; bool llama_load_model(const load_model_inputs inputs, FileFormat file_format); generation_outputs llama_generate(const generation_inputs inputs, generation_outputs &output); bool gptj_load_model(const load_model_inputs inputs, FileFormat in_file_format); -generation_outputs gptj_generate(const generation_inputs inputs, generation_outputs &output); \ No newline at end of file +generation_outputs gptj_generate(const generation_inputs inputs, generation_outputs &output); + + +void timer_start(); +double timer_check(); +void print_tok_vec(std::vector &embd); +FileFormat check_file_format(const std::string & fname); \ No newline at end of file diff --git a/otherarch/ggml_v1.c b/otherarch/ggml_v1.c index a5bbff566..fd5e1d54f 100644 --- a/otherarch/ggml_v1.c +++ b/otherarch/ggml_v1.c @@ -82,39 +82,39 @@ typedef void* thread_ret_t; #define static_assert(cond, msg) _Static_assert(cond, msg) #endif -/*#define GGML_PERF*/ -#define GGML_DEBUG 0 -#define GGML_GELU_FP16 +/*#define GGML_V1_PERF*/ +#define GGML_V1_DEBUG 0 +#define GGML_V1_GELU_FP16 -#define GGML_SOFT_MAX_UNROLL 4 -#define GGML_VEC_DOT_UNROLL 2 +#define GGML_V1_SOFT_MAX_UNROLL 4 +#define GGML_V1_VEC_DOT_UNROLL 2 -#ifdef GGML_USE_ACCELERATE +#ifdef GGML_V1_USE_ACCELERATE // uncomment to use vDSP for soft max computation // note: not sure if it is actually faster -//#define GGML_SOFT_MAX_ACCELERATE +//#define GGML_V1_SOFT_MAX_ACCELERATE #endif #if UINTPTR_MAX == 0xFFFFFFFF - #define GGML_MEM_ALIGN 4 + #define GGML_V1_MEM_ALIGN 4 #else - #define GGML_MEM_ALIGN 16 + #define GGML_V1_MEM_ALIGN 16 #endif #define UNUSED(x) (void)(x) #define SWAP(x, y, T) do { T SWAP = x; x = y; y = SWAP; } while (0) -#define GGML_ASSERT(x) \ +#define GGML_V1_ASSERT(x) \ do { \ if (!(x)) { \ - fprintf(stderr, "GGML_ASSERT: %s:%d: %s\n", __FILE__, __LINE__, #x); \ + fprintf(stderr, "GGML_V1_ASSERT: %s:%d: %s\n", __FILE__, __LINE__, #x); \ abort(); \ } \ } while (0) -#ifdef GGML_USE_ACCELERATE +#ifdef GGML_V1_USE_ACCELERATE #include -#elif GGML_USE_OPENBLAS +#elif GGML_V1_USE_OPENBLAS #include #endif @@ -124,7 +124,7 @@ typedef void* thread_ret_t; #define MAX(a, b) ((a) > (b) ? (a) : (b)) // floating point type used to accumulate sums -typedef double ggml_float; +typedef double ggml_v1_float; // 16-bit float // on Arm, we use __fp16 @@ -137,11 +137,11 @@ typedef double ggml_float; // #include -#define GGML_COMPUTE_FP16_TO_FP32(x) (x) -#define GGML_COMPUTE_FP32_TO_FP16(x) (x) +#define GGML_V1_COMPUTE_FP16_TO_FP32(x) (x) +#define GGML_V1_COMPUTE_FP32_TO_FP16(x) (x) -#define GGML_FP16_TO_FP32(x) (x) -#define GGML_FP32_TO_FP16(x) (x) +#define GGML_V1_FP16_TO_FP32(x) (x) +#define GGML_V1_FP32_TO_FP16(x) (x) #else @@ -159,8 +159,8 @@ typedef double ggml_float; #ifdef __F16C__ -#define GGML_COMPUTE_FP16_TO_FP32(x) _cvtsh_ss(x) -#define GGML_COMPUTE_FP32_TO_FP16(x) _cvtss_sh(x, 0) +#define GGML_V1_COMPUTE_FP16_TO_FP32(x) _cvtsh_ss(x) +#define GGML_V1_COMPUTE_FP32_TO_FP16(x) _cvtss_sh(x, 0) #else @@ -185,7 +185,7 @@ static inline uint32_t fp32_to_bits(float f) { return fp32.as_bits; } -static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) { +static inline float ggml_v1_compute_fp16_to_fp32(ggml_v1_fp16_t h) { const uint32_t w = (uint32_t) h << 16; const uint32_t sign = w & UINT32_C(0x80000000); const uint32_t two_w = w + w; @@ -208,7 +208,7 @@ static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) { return fp32_from_bits(result); } -static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) { +static inline ggml_v1_fp16_t ggml_v1_compute_fp32_to_fp16(float f) { #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__) const float scale_to_inf = 0x1.0p+112f; const float scale_to_zero = 0x1.0p-110f; @@ -234,8 +234,8 @@ static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) { return (sign >> 16) | (shl1_w > UINT32_C(0xFF000000) ? UINT16_C(0x7E00) : nonsign); } -#define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x) -#define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x) +#define GGML_V1_COMPUTE_FP16_TO_FP32(x) ggml_v1_compute_fp16_to_fp32(x) +#define GGML_V1_COMPUTE_FP32_TO_FP16(x) ggml_v1_compute_fp32_to_fp16(x) #endif // __F16C__ @@ -246,37 +246,37 @@ static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) { // // precomputed gelu table for f16 (128 KB) -static ggml_fp16_t table_gelu_f16[1 << 16]; +static ggml_v1_fp16_t table_gelu_f16[1 << 16]; // precomputed exp table for f16 (128 KB) -static ggml_fp16_t table_exp_f16[1 << 16]; +static ggml_v1_fp16_t table_exp_f16[1 << 16]; // precomputed f32 table for f16 (256 KB) static float table_f32_f16[1 << 16]; -// On ARM NEON, it's quicker to directly convert x -> x instead of calling into ggml_lookup_fp16_to_fp32, -// so we define GGML_FP16_TO_FP32 and GGML_FP32_TO_FP16 elsewhere for NEON. -#if !defined(GGML_FP16_TO_FP32) || !defined(GGML_FP32_TO_FP16) +// On ARM NEON, it's quicker to directly convert x -> x instead of calling into ggml_v1_lookup_fp16_to_fp32, +// so we define GGML_V1_FP16_TO_FP32 and GGML_V1_FP32_TO_FP16 elsewhere for NEON. +#if !defined(GGML_V1_FP16_TO_FP32) || !defined(GGML_V1_FP32_TO_FP16) -inline static float ggml_lookup_fp16_to_fp32(ggml_fp16_t f) { +inline static float ggml_v1_lookup_fp16_to_fp32(ggml_v1_fp16_t f) { uint16_t s; memcpy(&s, &f, sizeof(uint16_t)); return table_f32_f16[s]; } -#define GGML_FP16_TO_FP32(x) ggml_lookup_fp16_to_fp32(x) -#define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x) +#define GGML_V1_FP16_TO_FP32(x) ggml_v1_lookup_fp16_to_fp32(x) +#define GGML_V1_FP32_TO_FP16(x) GGML_V1_COMPUTE_FP32_TO_FP16(x) #endif // note: do not use these inside ggml.c // these are meant to be used via the ggml.h API -float ggml_fp16_to_fp32(ggml_fp16_t x) { - return GGML_FP16_TO_FP32(x); +float ggml_v1_fp16_to_fp32(ggml_v1_fp16_t x) { + return GGML_V1_FP16_TO_FP32(x); } -ggml_fp16_t ggml_fp32_to_fp16(float x) { - return GGML_FP32_TO_FP16(x); +ggml_v1_fp16_t ggml_v1_fp32_to_fp16(float x) { + return GGML_V1_FP32_TO_FP16(x); } // @@ -285,54 +285,54 @@ ggml_fp16_t ggml_fp32_to_fp16(float x) { #if defined(_MSC_VER) || defined(__MINGW32__) static int64_t timer_freq; -void ggml_time_init(void) { +void ggml_v1_time_init(void) { LARGE_INTEGER frequency; QueryPerformanceFrequency(&frequency); timer_freq = frequency.QuadPart; } -int64_t ggml_time_ms(void) { +int64_t ggml_v1_time_ms(void) { LARGE_INTEGER t; QueryPerformanceCounter(&t); return (t.QuadPart * 1000) / timer_freq; } -int64_t ggml_time_us(void) { +int64_t ggml_v1_time_us(void) { LARGE_INTEGER t; QueryPerformanceCounter(&t); return (t.QuadPart * 1000000) / timer_freq; } #else -void ggml_time_init(void) {} -int64_t ggml_time_ms(void) { +void ggml_v1_time_init(void) {} +int64_t ggml_v1_time_ms(void) { struct timespec ts; clock_gettime(CLOCK_MONOTONIC, &ts); return (int64_t)ts.tv_sec*1000 + (int64_t)ts.tv_nsec/1000000; } -int64_t ggml_time_us(void) { +int64_t ggml_v1_time_us(void) { struct timespec ts; clock_gettime(CLOCK_MONOTONIC, &ts); return (int64_t)ts.tv_sec*1000000 + (int64_t)ts.tv_nsec/1000; } #endif -int64_t ggml_cycles(void) { +int64_t ggml_v1_cycles(void) { return clock(); } -int64_t ggml_cycles_per_ms(void) { +int64_t ggml_v1_cycles_per_ms(void) { return CLOCKS_PER_SEC/1000; } -#ifdef GGML_PERF -#define ggml_perf_time_ms() ggml_time_ms() -#define ggml_perf_time_us() ggml_time_us() -#define ggml_perf_cycles() ggml_cycles() -#define ggml_perf_cycles_per_ms() ggml_cycles_per_ms() +#ifdef GGML_V1_PERF +#define ggml_v1_perf_time_ms() ggml_v1_time_ms() +#define ggml_v1_perf_time_us() ggml_v1_time_us() +#define ggml_v1_perf_cycles() ggml_v1_cycles() +#define ggml_v1_perf_cycles_per_ms() ggml_v1_cycles_per_ms() #else -#define ggml_perf_time_ms() 0 -#define ggml_perf_time_us() 0 -#define ggml_perf_cycles() 0 -#define ggml_perf_cycles_per_ms() 0 +#define ggml_v1_perf_time_ms() 0 +#define ggml_v1_perf_time_us() 0 +#define ggml_v1_perf_cycles() 0 +#define ggml_v1_perf_cycles_per_ms() 0 #endif // @@ -359,86 +359,86 @@ static const size_t CACHE_LINE_SIZE_F32 = CACHE_LINE_SIZE/sizeof(float); // we then implement the fundamental computation operations below using only these macros // adding support for new architectures requires to define the corresponding SIMD macros // -// GGML_F32_STEP / GGML_F16_STEP +// GGML_V1_F32_STEP / GGML_V1_F16_STEP // number of elements to process in a single step // -// GGML_F32_EPR / GGML_F16_EPR +// GGML_V1_F32_EPR / GGML_V1_F16_EPR // number of elements to fit in a single register // #if defined(__ARM_NEON) && defined(__ARM_FEATURE_FMA) -#define GGML_SIMD +#define GGML_V1_SIMD // F32 NEON -#define GGML_F32_STEP 16 -#define GGML_F32_EPR 4 +#define GGML_V1_F32_STEP 16 +#define GGML_V1_F32_EPR 4 -#define GGML_F32x4 float32x4_t -#define GGML_F32x4_ZERO vdupq_n_f32(0.0f) -#define GGML_F32x4_SET1(x) vdupq_n_f32(x) -#define GGML_F32x4_LOAD vld1q_f32 -#define GGML_F32x4_STORE vst1q_f32 -#define GGML_F32x4_FMA(a, b, c) vfmaq_f32(a, b, c) -#define GGML_F32x4_ADD vaddq_f32 -#define GGML_F32x4_MUL vmulq_f32 +#define GGML_V1_F32x4 float32x4_t +#define GGML_V1_F32x4_ZERO vdupq_n_f32(0.0f) +#define GGML_V1_F32x4_SET1(x) vdupq_n_f32(x) +#define GGML_V1_F32x4_LOAD vld1q_f32 +#define GGML_V1_F32x4_STORE vst1q_f32 +#define GGML_V1_F32x4_FMA(a, b, c) vfmaq_f32(a, b, c) +#define GGML_V1_F32x4_ADD vaddq_f32 +#define GGML_V1_F32x4_MUL vmulq_f32 #if defined(__ARM_FEATURE_QRDMX) - #define GGML_F32x4_REDUCE_ONE(x) vaddvq_f32(x) + #define GGML_V1_F32x4_REDUCE_ONE(x) vaddvq_f32(x) #else - #define GGML_F32x4_REDUCE_ONE(x) \ + #define GGML_V1_F32x4_REDUCE_ONE(x) \ (vgetq_lane_f32(x, 0) + \ vgetq_lane_f32(x, 1) + \ vgetq_lane_f32(x, 2) + \ vgetq_lane_f32(x, 3)) #endif -#define GGML_F32x4_REDUCE(res, x) \ +#define GGML_V1_F32x4_REDUCE(res, x) \ { \ - for (int i = 0; i < GGML_F32_ARR/2; ++i) { \ + for (int i = 0; i < GGML_V1_F32_ARR/2; ++i) { \ x[2*i] = vaddq_f32(x[2*i], x[2*i+1]); \ } \ - for (int i = 0; i < GGML_F32_ARR/4; ++i) { \ + for (int i = 0; i < GGML_V1_F32_ARR/4; ++i) { \ x[4*i] = vaddq_f32(x[4*i], x[4*i+2]); \ } \ - for (int i = 0; i < GGML_F32_ARR/8; ++i) { \ + for (int i = 0; i < GGML_V1_F32_ARR/8; ++i) { \ x[8*i] = vaddq_f32(x[8*i], x[8*i+4]); \ } \ - res = GGML_F32x4_REDUCE_ONE(x[0]); \ + res = GGML_V1_F32x4_REDUCE_ONE(x[0]); \ } -#define GGML_F32_VEC GGML_F32x4 -#define GGML_F32_VEC_ZERO GGML_F32x4_ZERO -#define GGML_F32_VEC_SET1 GGML_F32x4_SET1 -#define GGML_F32_VEC_LOAD GGML_F32x4_LOAD -#define GGML_F32_VEC_STORE GGML_F32x4_STORE -#define GGML_F32_VEC_FMA GGML_F32x4_FMA -#define GGML_F32_VEC_ADD GGML_F32x4_ADD -#define GGML_F32_VEC_MUL GGML_F32x4_MUL -#define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE +#define GGML_V1_F32_VEC GGML_V1_F32x4 +#define GGML_V1_F32_VEC_ZERO GGML_V1_F32x4_ZERO +#define GGML_V1_F32_VEC_SET1 GGML_V1_F32x4_SET1 +#define GGML_V1_F32_VEC_LOAD GGML_V1_F32x4_LOAD +#define GGML_V1_F32_VEC_STORE GGML_V1_F32x4_STORE +#define GGML_V1_F32_VEC_FMA GGML_V1_F32x4_FMA +#define GGML_V1_F32_VEC_ADD GGML_V1_F32x4_ADD +#define GGML_V1_F32_VEC_MUL GGML_V1_F32x4_MUL +#define GGML_V1_F32_VEC_REDUCE GGML_V1_F32x4_REDUCE // F16 NEON #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) - #define GGML_F16_STEP 32 - #define GGML_F16_EPR 8 + #define GGML_V1_F16_STEP 32 + #define GGML_V1_F16_EPR 8 - #define GGML_F16x8 float16x8_t - #define GGML_F16x8_ZERO vdupq_n_f16(0.0f) - #define GGML_F16x8_SET1(x) vdupq_n_f16(x) - #define GGML_F16x8_LOAD vld1q_f16 - #define GGML_F16x8_STORE vst1q_f16 - #define GGML_F16x8_FMA(a, b, c) vfmaq_f16(a, b, c) - #define GGML_F16x8_ADD vaddq_f16 - #define GGML_F16x8_MUL vmulq_f16 - #define GGML_F16x8_REDUCE(res, x) \ + #define GGML_V1_F16x8 float16x8_t + #define GGML_V1_F16x8_ZERO vdupq_n_f16(0.0f) + #define GGML_V1_F16x8_SET1(x) vdupq_n_f16(x) + #define GGML_V1_F16x8_LOAD vld1q_f16 + #define GGML_V1_F16x8_STORE vst1q_f16 + #define GGML_V1_F16x8_FMA(a, b, c) vfmaq_f16(a, b, c) + #define GGML_V1_F16x8_ADD vaddq_f16 + #define GGML_V1_F16x8_MUL vmulq_f16 + #define GGML_V1_F16x8_REDUCE(res, x) \ { \ - for (int i = 0; i < GGML_F16_ARR/2; ++i) { \ + for (int i = 0; i < GGML_V1_F16_ARR/2; ++i) { \ x[2*i] = vaddq_f16(x[2*i], x[2*i+1]); \ } \ - for (int i = 0; i < GGML_F16_ARR/4; ++i) { \ + for (int i = 0; i < GGML_V1_F16_ARR/4; ++i) { \ x[4*i] = vaddq_f16(x[4*i], x[4*i+2]); \ } \ - for (int i = 0; i < GGML_F16_ARR/8; ++i) { \ + for (int i = 0; i < GGML_V1_F16_ARR/8; ++i) { \ x[8*i] = vaddq_f16(x[8*i], x[8*i+4]); \ } \ const float32x4_t t0 = vcvt_f32_f16(vget_low_f16 (x[0])); \ @@ -446,73 +446,73 @@ static const size_t CACHE_LINE_SIZE_F32 = CACHE_LINE_SIZE/sizeof(float); res = vaddvq_f32(vaddq_f32(t0, t1)); \ } - #define GGML_F16_VEC GGML_F16x8 - #define GGML_F16_VEC_ZERO GGML_F16x8_ZERO - #define GGML_F16_VEC_SET1 GGML_F16x8_SET1 - #define GGML_F16_VEC_LOAD(p, i) GGML_F16x8_LOAD(p) - #define GGML_F16_VEC_STORE(p, r, i) GGML_F16x8_STORE(p, r[i]) - #define GGML_F16_VEC_FMA GGML_F16x8_FMA - #define GGML_F16_VEC_ADD GGML_F16x8_ADD - #define GGML_F16_VEC_MUL GGML_F16x8_MUL - #define GGML_F16_VEC_REDUCE GGML_F16x8_REDUCE + #define GGML_V1_F16_VEC GGML_V1_F16x8 + #define GGML_V1_F16_VEC_ZERO GGML_V1_F16x8_ZERO + #define GGML_V1_F16_VEC_SET1 GGML_V1_F16x8_SET1 + #define GGML_V1_F16_VEC_LOAD(p, i) GGML_V1_F16x8_LOAD(p) + #define GGML_V1_F16_VEC_STORE(p, r, i) GGML_V1_F16x8_STORE(p, r[i]) + #define GGML_V1_F16_VEC_FMA GGML_V1_F16x8_FMA + #define GGML_V1_F16_VEC_ADD GGML_V1_F16x8_ADD + #define GGML_V1_F16_VEC_MUL GGML_V1_F16x8_MUL + #define GGML_V1_F16_VEC_REDUCE GGML_V1_F16x8_REDUCE #else // if FP16 vector arithmetic is not supported, we use FP32 instead // and take advantage of the vcvt_ functions to convert to/from FP16 - #define GGML_F16_STEP 16 - #define GGML_F16_EPR 4 + #define GGML_V1_F16_STEP 16 + #define GGML_V1_F16_EPR 4 - #define GGML_F32Cx4 float32x4_t - #define GGML_F32Cx4_ZERO vdupq_n_f32(0.0f) - #define GGML_F32Cx4_SET1(x) vdupq_n_f32(x) - #define GGML_F32Cx4_LOAD(x) vcvt_f32_f16(vld1_f16(x)) - #define GGML_F32Cx4_STORE(x, y) vst1_f16(x, vcvt_f16_f32(y)) - #define GGML_F32Cx4_FMA(a, b, c) vfmaq_f32(a, b, c) - #define GGML_F32Cx4_ADD vaddq_f32 - #define GGML_F32Cx4_MUL vmulq_f32 - #define GGML_F32Cx4_REDUCE GGML_F32x4_REDUCE + #define GGML_V1_F32Cx4 float32x4_t + #define GGML_V1_F32Cx4_ZERO vdupq_n_f32(0.0f) + #define GGML_V1_F32Cx4_SET1(x) vdupq_n_f32(x) + #define GGML_V1_F32Cx4_LOAD(x) vcvt_f32_f16(vld1_f16(x)) + #define GGML_V1_F32Cx4_STORE(x, y) vst1_f16(x, vcvt_f16_f32(y)) + #define GGML_V1_F32Cx4_FMA(a, b, c) vfmaq_f32(a, b, c) + #define GGML_V1_F32Cx4_ADD vaddq_f32 + #define GGML_V1_F32Cx4_MUL vmulq_f32 + #define GGML_V1_F32Cx4_REDUCE GGML_V1_F32x4_REDUCE - #define GGML_F16_VEC GGML_F32Cx4 - #define GGML_F16_VEC_ZERO GGML_F32Cx4_ZERO - #define GGML_F16_VEC_SET1 GGML_F32Cx4_SET1 - #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx4_LOAD(p) - #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx4_STORE(p, r[i]) - #define GGML_F16_VEC_FMA GGML_F32Cx4_FMA - #define GGML_F16_VEC_ADD GGML_F32Cx4_ADD - #define GGML_F16_VEC_MUL GGML_F32Cx4_MUL - #define GGML_F16_VEC_REDUCE GGML_F32Cx4_REDUCE + #define GGML_V1_F16_VEC GGML_V1_F32Cx4 + #define GGML_V1_F16_VEC_ZERO GGML_V1_F32Cx4_ZERO + #define GGML_V1_F16_VEC_SET1 GGML_V1_F32Cx4_SET1 + #define GGML_V1_F16_VEC_LOAD(p, i) GGML_V1_F32Cx4_LOAD(p) + #define GGML_V1_F16_VEC_STORE(p, r, i) GGML_V1_F32Cx4_STORE(p, r[i]) + #define GGML_V1_F16_VEC_FMA GGML_V1_F32Cx4_FMA + #define GGML_V1_F16_VEC_ADD GGML_V1_F32Cx4_ADD + #define GGML_V1_F16_VEC_MUL GGML_V1_F32Cx4_MUL + #define GGML_V1_F16_VEC_REDUCE GGML_V1_F32Cx4_REDUCE #endif #elif defined(__AVX__) -#define GGML_SIMD +#define GGML_V1_SIMD // F32 AVX -#define GGML_F32_STEP 32 -#define GGML_F32_EPR 8 +#define GGML_V1_F32_STEP 32 +#define GGML_V1_F32_EPR 8 -#define GGML_F32x8 __m256 -#define GGML_F32x8_ZERO _mm256_setzero_ps() -#define GGML_F32x8_SET1(x) _mm256_set1_ps(x) -#define GGML_F32x8_LOAD _mm256_loadu_ps -#define GGML_F32x8_STORE _mm256_storeu_ps +#define GGML_V1_F32x8 __m256 +#define GGML_V1_F32x8_ZERO _mm256_setzero_ps() +#define GGML_V1_F32x8_SET1(x) _mm256_set1_ps(x) +#define GGML_V1_F32x8_LOAD _mm256_loadu_ps +#define GGML_V1_F32x8_STORE _mm256_storeu_ps #if defined(__FMA__) - #define GGML_F32x8_FMA(a, b, c) _mm256_fmadd_ps(b, c, a) + #define GGML_V1_F32x8_FMA(a, b, c) _mm256_fmadd_ps(b, c, a) #else - #define GGML_F32x8_FMA(a, b, c) _mm256_add_ps(_mm256_mul_ps(b, c), a) + #define GGML_V1_F32x8_FMA(a, b, c) _mm256_add_ps(_mm256_mul_ps(b, c), a) #endif -#define GGML_F32x8_ADD _mm256_add_ps -#define GGML_F32x8_MUL _mm256_mul_ps -#define GGML_F32x8_REDUCE(res, x) \ +#define GGML_V1_F32x8_ADD _mm256_add_ps +#define GGML_V1_F32x8_MUL _mm256_mul_ps +#define GGML_V1_F32x8_REDUCE(res, x) \ { \ - for (int i = 0; i < GGML_F32_ARR/2; ++i) { \ + for (int i = 0; i < GGML_V1_F32_ARR/2; ++i) { \ x[2*i] = _mm256_add_ps(x[2*i], x[2*i+1]); \ } \ - for (int i = 0; i < GGML_F32_ARR/4; ++i) { \ + for (int i = 0; i < GGML_V1_F32_ARR/4; ++i) { \ x[4*i] = _mm256_add_ps(x[4*i], x[4*i+2]); \ } \ - for (int i = 0; i < GGML_F32_ARR/8; ++i) { \ + for (int i = 0; i < GGML_V1_F32_ARR/8; ++i) { \ x[8*i] = _mm256_add_ps(x[8*i], x[8*i+4]); \ } \ const __m128 t0 = _mm_add_ps(_mm256_castps256_ps128(x[0]), \ @@ -522,70 +522,70 @@ static const size_t CACHE_LINE_SIZE_F32 = CACHE_LINE_SIZE/sizeof(float); } // TODO: is this optimal ? -#define GGML_F32_VEC GGML_F32x8 -#define GGML_F32_VEC_ZERO GGML_F32x8_ZERO -#define GGML_F32_VEC_SET1 GGML_F32x8_SET1 -#define GGML_F32_VEC_LOAD GGML_F32x8_LOAD -#define GGML_F32_VEC_STORE GGML_F32x8_STORE -#define GGML_F32_VEC_FMA GGML_F32x8_FMA -#define GGML_F32_VEC_ADD GGML_F32x8_ADD -#define GGML_F32_VEC_MUL GGML_F32x8_MUL -#define GGML_F32_VEC_REDUCE GGML_F32x8_REDUCE +#define GGML_V1_F32_VEC GGML_V1_F32x8 +#define GGML_V1_F32_VEC_ZERO GGML_V1_F32x8_ZERO +#define GGML_V1_F32_VEC_SET1 GGML_V1_F32x8_SET1 +#define GGML_V1_F32_VEC_LOAD GGML_V1_F32x8_LOAD +#define GGML_V1_F32_VEC_STORE GGML_V1_F32x8_STORE +#define GGML_V1_F32_VEC_FMA GGML_V1_F32x8_FMA +#define GGML_V1_F32_VEC_ADD GGML_V1_F32x8_ADD +#define GGML_V1_F32_VEC_MUL GGML_V1_F32x8_MUL +#define GGML_V1_F32_VEC_REDUCE GGML_V1_F32x8_REDUCE // F16 AVX -#define GGML_F16_STEP 32 -#define GGML_F16_EPR 8 +#define GGML_V1_F16_STEP 32 +#define GGML_V1_F16_EPR 8 // F16 arithmetic is not supported by AVX, so we use F32 instead // we take advantage of the _mm256_cvt intrinsics to convert F16 <-> F32 -#define GGML_F32Cx8 __m256 -#define GGML_F32Cx8_ZERO _mm256_setzero_ps() -#define GGML_F32Cx8_SET1(x) _mm256_set1_ps(x) -#define GGML_F32Cx8_LOAD(x) _mm256_cvtph_ps(_mm_loadu_si128((__m128i *)(x))) -#define GGML_F32Cx8_STORE(x, y) _mm_storeu_si128((__m128i *)(x), _mm256_cvtps_ph(y, 0)) -#define GGML_F32Cx8_FMA GGML_F32x8_FMA -#define GGML_F32Cx8_ADD _mm256_add_ps -#define GGML_F32Cx8_MUL _mm256_mul_ps -#define GGML_F32Cx8_REDUCE GGML_F32x8_REDUCE +#define GGML_V1_F32Cx8 __m256 +#define GGML_V1_F32Cx8_ZERO _mm256_setzero_ps() +#define GGML_V1_F32Cx8_SET1(x) _mm256_set1_ps(x) +#define GGML_V1_F32Cx8_LOAD(x) _mm256_cvtph_ps(_mm_loadu_si128((__m128i *)(x))) +#define GGML_V1_F32Cx8_STORE(x, y) _mm_storeu_si128((__m128i *)(x), _mm256_cvtps_ph(y, 0)) +#define GGML_V1_F32Cx8_FMA GGML_V1_F32x8_FMA +#define GGML_V1_F32Cx8_ADD _mm256_add_ps +#define GGML_V1_F32Cx8_MUL _mm256_mul_ps +#define GGML_V1_F32Cx8_REDUCE GGML_V1_F32x8_REDUCE -#define GGML_F16_VEC GGML_F32Cx8 -#define GGML_F16_VEC_ZERO GGML_F32Cx8_ZERO -#define GGML_F16_VEC_SET1 GGML_F32Cx8_SET1 -#define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx8_LOAD(p) -#define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx8_STORE(p, r[i]) -#define GGML_F16_VEC_FMA GGML_F32Cx8_FMA -#define GGML_F16_VEC_ADD GGML_F32Cx8_ADD -#define GGML_F16_VEC_MUL GGML_F32Cx8_MUL -#define GGML_F16_VEC_REDUCE GGML_F32Cx8_REDUCE +#define GGML_V1_F16_VEC GGML_V1_F32Cx8 +#define GGML_V1_F16_VEC_ZERO GGML_V1_F32Cx8_ZERO +#define GGML_V1_F16_VEC_SET1 GGML_V1_F32Cx8_SET1 +#define GGML_V1_F16_VEC_LOAD(p, i) GGML_V1_F32Cx8_LOAD(p) +#define GGML_V1_F16_VEC_STORE(p, r, i) GGML_V1_F32Cx8_STORE(p, r[i]) +#define GGML_V1_F16_VEC_FMA GGML_V1_F32Cx8_FMA +#define GGML_V1_F16_VEC_ADD GGML_V1_F32Cx8_ADD +#define GGML_V1_F16_VEC_MUL GGML_V1_F32Cx8_MUL +#define GGML_V1_F16_VEC_REDUCE GGML_V1_F32Cx8_REDUCE #elif defined(__POWER9_VECTOR__) -#define GGML_SIMD +#define GGML_V1_SIMD // F32 POWER9 -#define GGML_F32_STEP 32 -#define GGML_F32_EPR 4 +#define GGML_V1_F32_STEP 32 +#define GGML_V1_F32_EPR 4 -#define GGML_F32x4 vector float -#define GGML_F32x4_ZERO 0.0f -#define GGML_F32x4_SET1 vec_splats -#define GGML_F32x4_LOAD(p) vec_xl(0, p) -#define GGML_F32x4_STORE(p, r) vec_xst(r, 0, p) -#define GGML_F32x4_FMA(a, b, c) vec_madd(b, c, a) -#define GGML_F32x4_ADD vec_add -#define GGML_F32x4_MUL vec_mul -#define GGML_F32x4_REDUCE(res, x) \ +#define GGML_V1_F32x4 vector float +#define GGML_V1_F32x4_ZERO 0.0f +#define GGML_V1_F32x4_SET1 vec_splats +#define GGML_V1_F32x4_LOAD(p) vec_xl(0, p) +#define GGML_V1_F32x4_STORE(p, r) vec_xst(r, 0, p) +#define GGML_V1_F32x4_FMA(a, b, c) vec_madd(b, c, a) +#define GGML_V1_F32x4_ADD vec_add +#define GGML_V1_F32x4_MUL vec_mul +#define GGML_V1_F32x4_REDUCE(res, x) \ { \ - for (int i = 0; i < GGML_F32_ARR/2; ++i) { \ + for (int i = 0; i < GGML_V1_F32_ARR/2; ++i) { \ x[2*i] = vec_add(x[2*i], x[2*i+1]); \ } \ - for (int i = 0; i < GGML_F32_ARR/4; ++i) { \ + for (int i = 0; i < GGML_V1_F32_ARR/4; ++i) { \ x[4*i] = vec_add(x[4*i], x[4*i+2]); \ } \ - for (int i = 0; i < GGML_F32_ARR/8; ++i) { \ + for (int i = 0; i < GGML_V1_F32_ARR/8; ++i) { \ x[8*i] = vec_add(x[8*i], x[8*i+4]); \ } \ res = vec_extract(x[0], 0) + \ @@ -594,61 +594,61 @@ static const size_t CACHE_LINE_SIZE_F32 = CACHE_LINE_SIZE/sizeof(float); vec_extract(x[0], 3); \ } -#define GGML_F32_VEC GGML_F32x4 -#define GGML_F32_VEC_ZERO GGML_F32x4_ZERO -#define GGML_F32_VEC_SET1 GGML_F32x4_SET1 -#define GGML_F32_VEC_LOAD GGML_F32x4_LOAD -#define GGML_F32_VEC_STORE GGML_F32x4_STORE -#define GGML_F32_VEC_FMA GGML_F32x4_FMA -#define GGML_F32_VEC_ADD GGML_F32x4_ADD -#define GGML_F32_VEC_MUL GGML_F32x4_MUL -#define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE +#define GGML_V1_F32_VEC GGML_V1_F32x4 +#define GGML_V1_F32_VEC_ZERO GGML_V1_F32x4_ZERO +#define GGML_V1_F32_VEC_SET1 GGML_V1_F32x4_SET1 +#define GGML_V1_F32_VEC_LOAD GGML_V1_F32x4_LOAD +#define GGML_V1_F32_VEC_STORE GGML_V1_F32x4_STORE +#define GGML_V1_F32_VEC_FMA GGML_V1_F32x4_FMA +#define GGML_V1_F32_VEC_ADD GGML_V1_F32x4_ADD +#define GGML_V1_F32_VEC_MUL GGML_V1_F32x4_MUL +#define GGML_V1_F32_VEC_REDUCE GGML_V1_F32x4_REDUCE // F16 POWER9 -#define GGML_F16_STEP GGML_F32_STEP -#define GGML_F16_EPR GGML_F32_EPR -#define GGML_F16_VEC GGML_F32x4 -#define GGML_F16_VEC_ZERO GGML_F32x4_ZERO -#define GGML_F16_VEC_SET1 GGML_F32x4_SET1 -#define GGML_F16_VEC_FMA GGML_F32x4_FMA -#define GGML_F16_VEC_REDUCE GGML_F32x4_REDUCE +#define GGML_V1_F16_STEP GGML_V1_F32_STEP +#define GGML_V1_F16_EPR GGML_V1_F32_EPR +#define GGML_V1_F16_VEC GGML_V1_F32x4 +#define GGML_V1_F16_VEC_ZERO GGML_V1_F32x4_ZERO +#define GGML_V1_F16_VEC_SET1 GGML_V1_F32x4_SET1 +#define GGML_V1_F16_VEC_FMA GGML_V1_F32x4_FMA +#define GGML_V1_F16_VEC_REDUCE GGML_V1_F32x4_REDUCE // Use vec_xl, not vec_ld, in case the load address is not aligned. -#define GGML_F16_VEC_LOAD(p, i) (i & 0x1) ? \ - vec_extract_fp32_from_shorth(vec_xl(0, p - GGML_F16_EPR)) : \ +#define GGML_V1_F16_VEC_LOAD(p, i) (i & 0x1) ? \ + vec_extract_fp32_from_shorth(vec_xl(0, p - GGML_V1_F16_EPR)) : \ vec_extract_fp32_from_shortl(vec_xl(0, p)) -#define GGML_ENDIAN_BYTE(i) ((unsigned char *)&(uint16_t){1})[i] -#define GGML_F16_VEC_STORE(p, r, i) \ +#define GGML_V1_ENDIAN_BYTE(i) ((unsigned char *)&(uint16_t){1})[i] +#define GGML_V1_F16_VEC_STORE(p, r, i) \ if (i & 0x1) \ - vec_xst(vec_pack_to_short_fp32(r[i - GGML_ENDIAN_BYTE(1)], \ - r[i - GGML_ENDIAN_BYTE(0)]), \ - 0, p - GGML_F16_EPR) + vec_xst(vec_pack_to_short_fp32(r[i - GGML_V1_ENDIAN_BYTE(1)], \ + r[i - GGML_V1_ENDIAN_BYTE(0)]), \ + 0, p - GGML_V1_F16_EPR) #elif defined(__wasm_simd128__) -#define GGML_SIMD +#define GGML_V1_SIMD // F32 WASM -#define GGML_F32_STEP 16 -#define GGML_F32_EPR 4 +#define GGML_V1_F32_STEP 16 +#define GGML_V1_F32_EPR 4 -#define GGML_F32x4 v128_t -#define GGML_F32x4_ZERO wasm_f32x4_splat(0.0f) -#define GGML_F32x4_SET1(x) wasm_f32x4_splat(x) -#define GGML_F32x4_LOAD wasm_v128_load -#define GGML_F32x4_STORE wasm_v128_store -#define GGML_F32x4_FMA(a, b, c) wasm_f32x4_add(wasm_f32x4_mul(b, c), a) -#define GGML_F32x4_ADD wasm_f32x4_add -#define GGML_F32x4_MUL wasm_f32x4_mul -#define GGML_F32x4_REDUCE(res, x) \ +#define GGML_V1_F32x4 v128_t +#define GGML_V1_F32x4_ZERO wasm_f32x4_splat(0.0f) +#define GGML_V1_F32x4_SET1(x) wasm_f32x4_splat(x) +#define GGML_V1_F32x4_LOAD wasm_v128_load +#define GGML_V1_F32x4_STORE wasm_v128_store +#define GGML_V1_F32x4_FMA(a, b, c) wasm_f32x4_add(wasm_f32x4_mul(b, c), a) +#define GGML_V1_F32x4_ADD wasm_f32x4_add +#define GGML_V1_F32x4_MUL wasm_f32x4_mul +#define GGML_V1_F32x4_REDUCE(res, x) \ { \ - for (int i = 0; i < GGML_F32_ARR/2; ++i) { \ + for (int i = 0; i < GGML_V1_F32_ARR/2; ++i) { \ x[2*i] = wasm_f32x4_add(x[2*i], x[2*i+1]); \ } \ - for (int i = 0; i < GGML_F32_ARR/4; ++i) { \ + for (int i = 0; i < GGML_V1_F32_ARR/4; ++i) { \ x[4*i] = wasm_f32x4_add(x[4*i], x[4*i+2]); \ } \ - for (int i = 0; i < GGML_F32_ARR/8; ++i) { \ + for (int i = 0; i < GGML_V1_F32_ARR/8; ++i) { \ x[8*i] = wasm_f32x4_add(x[8*i], x[8*i+4]); \ } \ res = wasm_f32x4_extract_lane(x[0], 0) + \ @@ -657,60 +657,60 @@ static const size_t CACHE_LINE_SIZE_F32 = CACHE_LINE_SIZE/sizeof(float); wasm_f32x4_extract_lane(x[0], 3); \ } -#define GGML_F32_VEC GGML_F32x4 -#define GGML_F32_VEC_ZERO GGML_F32x4_ZERO -#define GGML_F32_VEC_SET1 GGML_F32x4_SET1 -#define GGML_F32_VEC_LOAD GGML_F32x4_LOAD -#define GGML_F32_VEC_STORE GGML_F32x4_STORE -#define GGML_F32_VEC_FMA GGML_F32x4_FMA -#define GGML_F32_VEC_ADD GGML_F32x4_ADD -#define GGML_F32_VEC_MUL GGML_F32x4_MUL -#define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE +#define GGML_V1_F32_VEC GGML_V1_F32x4 +#define GGML_V1_F32_VEC_ZERO GGML_V1_F32x4_ZERO +#define GGML_V1_F32_VEC_SET1 GGML_V1_F32x4_SET1 +#define GGML_V1_F32_VEC_LOAD GGML_V1_F32x4_LOAD +#define GGML_V1_F32_VEC_STORE GGML_V1_F32x4_STORE +#define GGML_V1_F32_VEC_FMA GGML_V1_F32x4_FMA +#define GGML_V1_F32_VEC_ADD GGML_V1_F32x4_ADD +#define GGML_V1_F32_VEC_MUL GGML_V1_F32x4_MUL +#define GGML_V1_F32_VEC_REDUCE GGML_V1_F32x4_REDUCE // F16 WASM -#define GGML_F16_STEP 16 -#define GGML_F16_EPR 4 +#define GGML_V1_F16_STEP 16 +#define GGML_V1_F16_EPR 4 -inline static v128_t __wasm_f16x4_load(const ggml_fp16_t * p) { +inline static v128_t __wasm_f16x4_load(const ggml_v1_fp16_t * p) { float tmp[4]; - tmp[0] = GGML_FP16_TO_FP32(p[0]); - tmp[1] = GGML_FP16_TO_FP32(p[1]); - tmp[2] = GGML_FP16_TO_FP32(p[2]); - tmp[3] = GGML_FP16_TO_FP32(p[3]); + tmp[0] = GGML_V1_FP16_TO_FP32(p[0]); + tmp[1] = GGML_V1_FP16_TO_FP32(p[1]); + tmp[2] = GGML_V1_FP16_TO_FP32(p[2]); + tmp[3] = GGML_V1_FP16_TO_FP32(p[3]); return wasm_v128_load(tmp); } -inline static void __wasm_f16x4_store(ggml_fp16_t * p, v128_t x) { +inline static void __wasm_f16x4_store(ggml_v1_fp16_t * p, v128_t x) { float tmp[4]; wasm_v128_store(tmp, x); - p[0] = GGML_FP32_TO_FP16(tmp[0]); - p[1] = GGML_FP32_TO_FP16(tmp[1]); - p[2] = GGML_FP32_TO_FP16(tmp[2]); - p[3] = GGML_FP32_TO_FP16(tmp[3]); + p[0] = GGML_V1_FP32_TO_FP16(tmp[0]); + p[1] = GGML_V1_FP32_TO_FP16(tmp[1]); + p[2] = GGML_V1_FP32_TO_FP16(tmp[2]); + p[3] = GGML_V1_FP32_TO_FP16(tmp[3]); } -#define GGML_F16x4 v128_t -#define GGML_F16x4_ZERO wasm_f32x4_splat(0.0f) -#define GGML_F16x4_SET1(x) wasm_f32x4_splat(x) -#define GGML_F16x4_LOAD(x) __wasm_f16x4_load(x) -#define GGML_F16x4_STORE(x, y) __wasm_f16x4_store(x, y) -#define GGML_F16x4_FMA GGML_F32x4_FMA -#define GGML_F16x4_ADD wasm_f32x4_add -#define GGML_F16x4_MUL wasm_f32x4_mul -#define GGML_F16x4_REDUCE(res, x) \ +#define GGML_V1_F16x4 v128_t +#define GGML_V1_F16x4_ZERO wasm_f32x4_splat(0.0f) +#define GGML_V1_F16x4_SET1(x) wasm_f32x4_splat(x) +#define GGML_V1_F16x4_LOAD(x) __wasm_f16x4_load(x) +#define GGML_V1_F16x4_STORE(x, y) __wasm_f16x4_store(x, y) +#define GGML_V1_F16x4_FMA GGML_V1_F32x4_FMA +#define GGML_V1_F16x4_ADD wasm_f32x4_add +#define GGML_V1_F16x4_MUL wasm_f32x4_mul +#define GGML_V1_F16x4_REDUCE(res, x) \ { \ - for (int i = 0; i < GGML_F16_ARR/2; ++i) { \ + for (int i = 0; i < GGML_V1_F16_ARR/2; ++i) { \ x[2*i] = wasm_f32x4_add(x[2*i], x[2*i+1]); \ } \ - for (int i = 0; i < GGML_F16_ARR/4; ++i) { \ + for (int i = 0; i < GGML_V1_F16_ARR/4; ++i) { \ x[4*i] = wasm_f32x4_add(x[4*i], x[4*i+2]); \ } \ - for (int i = 0; i < GGML_F16_ARR/8; ++i) { \ + for (int i = 0; i < GGML_V1_F16_ARR/8; ++i) { \ x[8*i] = wasm_f32x4_add(x[8*i], x[8*i+4]); \ } \ res = wasm_f32x4_extract_lane(x[0], 0) + \ @@ -719,47 +719,47 @@ inline static void __wasm_f16x4_store(ggml_fp16_t * p, v128_t x) { wasm_f32x4_extract_lane(x[0], 3); \ } -#define GGML_F16_VEC GGML_F16x4 -#define GGML_F16_VEC_ZERO GGML_F16x4_ZERO -#define GGML_F16_VEC_SET1 GGML_F16x4_SET1 -#define GGML_F16_VEC_LOAD(p, i) GGML_F16x4_LOAD(p) -#define GGML_F16_VEC_STORE(p, r, i) GGML_F16x4_STORE(p, r[i]) -#define GGML_F16_VEC_FMA GGML_F16x4_FMA -#define GGML_F16_VEC_ADD GGML_F16x4_ADD -#define GGML_F16_VEC_MUL GGML_F16x4_MUL -#define GGML_F16_VEC_REDUCE GGML_F16x4_REDUCE +#define GGML_V1_F16_VEC GGML_V1_F16x4 +#define GGML_V1_F16_VEC_ZERO GGML_V1_F16x4_ZERO +#define GGML_V1_F16_VEC_SET1 GGML_V1_F16x4_SET1 +#define GGML_V1_F16_VEC_LOAD(p, i) GGML_V1_F16x4_LOAD(p) +#define GGML_V1_F16_VEC_STORE(p, r, i) GGML_V1_F16x4_STORE(p, r[i]) +#define GGML_V1_F16_VEC_FMA GGML_V1_F16x4_FMA +#define GGML_V1_F16_VEC_ADD GGML_V1_F16x4_ADD +#define GGML_V1_F16_VEC_MUL GGML_V1_F16x4_MUL +#define GGML_V1_F16_VEC_REDUCE GGML_V1_F16x4_REDUCE #elif defined(__SSE3__) -#define GGML_SIMD +#define GGML_V1_SIMD // F32 SSE -#define GGML_F32_STEP 32 -#define GGML_F32_EPR 4 +#define GGML_V1_F32_STEP 32 +#define GGML_V1_F32_EPR 4 -#define GGML_F32x4 __m128 -#define GGML_F32x4_ZERO _mm_setzero_ps() -#define GGML_F32x4_SET1(x) _mm_set1_ps(x) -#define GGML_F32x4_LOAD _mm_loadu_ps -#define GGML_F32x4_STORE _mm_storeu_ps +#define GGML_V1_F32x4 __m128 +#define GGML_V1_F32x4_ZERO _mm_setzero_ps() +#define GGML_V1_F32x4_SET1(x) _mm_set1_ps(x) +#define GGML_V1_F32x4_LOAD _mm_loadu_ps +#define GGML_V1_F32x4_STORE _mm_storeu_ps #if defined(__FMA__) // TODO: Does this work? - #define GGML_F32x4_FMA(a, b, c) _mm_fmadd_ps(b, c, a) + #define GGML_V1_F32x4_FMA(a, b, c) _mm_fmadd_ps(b, c, a) #else - #define GGML_F32x4_FMA(a, b, c) _mm_add_ps(_mm_mul_ps(b, c), a) + #define GGML_V1_F32x4_FMA(a, b, c) _mm_add_ps(_mm_mul_ps(b, c), a) #endif -#define GGML_F32x4_ADD _mm_add_ps -#define GGML_F32x4_MUL _mm_mul_ps -#define GGML_F32x4_REDUCE(res, x) \ +#define GGML_V1_F32x4_ADD _mm_add_ps +#define GGML_V1_F32x4_MUL _mm_mul_ps +#define GGML_V1_F32x4_REDUCE(res, x) \ { \ - for (int i = 0; i < GGML_F32_ARR/2; ++i) { \ + for (int i = 0; i < GGML_V1_F32_ARR/2; ++i) { \ x[2*i] = _mm_add_ps(x[2*i], x[2*i+1]); \ } \ - for (int i = 0; i < GGML_F32_ARR/4; ++i) { \ + for (int i = 0; i < GGML_V1_F32_ARR/4; ++i) { \ x[4*i] = _mm_add_ps(x[4*i], x[4*i+2]); \ } \ - for (int i = 0; i < GGML_F32_ARR/8; ++i) { \ + for (int i = 0; i < GGML_V1_F32_ARR/8; ++i) { \ x[8*i] = _mm_add_ps(x[8*i], x[8*i+4]); \ } \ const __m128 t0 = _mm_hadd_ps(x[0], x[0]); \ @@ -767,116 +767,116 @@ inline static void __wasm_f16x4_store(ggml_fp16_t * p, v128_t x) { } // TODO: is this optimal ? -#define GGML_F32_VEC GGML_F32x4 -#define GGML_F32_VEC_ZERO GGML_F32x4_ZERO -#define GGML_F32_VEC_SET1 GGML_F32x4_SET1 -#define GGML_F32_VEC_LOAD GGML_F32x4_LOAD -#define GGML_F32_VEC_STORE GGML_F32x4_STORE -#define GGML_F32_VEC_FMA GGML_F32x4_FMA -#define GGML_F32_VEC_ADD GGML_F32x4_ADD -#define GGML_F32_VEC_MUL GGML_F32x4_MUL -#define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE +#define GGML_V1_F32_VEC GGML_V1_F32x4 +#define GGML_V1_F32_VEC_ZERO GGML_V1_F32x4_ZERO +#define GGML_V1_F32_VEC_SET1 GGML_V1_F32x4_SET1 +#define GGML_V1_F32_VEC_LOAD GGML_V1_F32x4_LOAD +#define GGML_V1_F32_VEC_STORE GGML_V1_F32x4_STORE +#define GGML_V1_F32_VEC_FMA GGML_V1_F32x4_FMA +#define GGML_V1_F32_VEC_ADD GGML_V1_F32x4_ADD +#define GGML_V1_F32_VEC_MUL GGML_V1_F32x4_MUL +#define GGML_V1_F32_VEC_REDUCE GGML_V1_F32x4_REDUCE // F16 SSE -#define GGML_F16_STEP 32 -#define GGML_F16_EPR 4 +#define GGML_V1_F16_STEP 32 +#define GGML_V1_F16_EPR 4 -static inline __m128 __sse_f16x4_load(ggml_fp16_t *x) { +static inline __m128 __sse_f16x4_load(ggml_v1_fp16_t *x) { float tmp[4]; - tmp[0] = GGML_FP16_TO_FP32(x[0]); - tmp[1] = GGML_FP16_TO_FP32(x[1]); - tmp[2] = GGML_FP16_TO_FP32(x[2]); - tmp[3] = GGML_FP16_TO_FP32(x[3]); + tmp[0] = GGML_V1_FP16_TO_FP32(x[0]); + tmp[1] = GGML_V1_FP16_TO_FP32(x[1]); + tmp[2] = GGML_V1_FP16_TO_FP32(x[2]); + tmp[3] = GGML_V1_FP16_TO_FP32(x[3]); return _mm_loadu_ps(tmp); } -static inline void __sse_f16x4_store(ggml_fp16_t *x, __m128 y) { +static inline void __sse_f16x4_store(ggml_v1_fp16_t *x, __m128 y) { float arr[4]; _mm_storeu_ps(arr, y); - x[0] = GGML_FP32_TO_FP16(arr[0]); - x[1] = GGML_FP32_TO_FP16(arr[1]); - x[2] = GGML_FP32_TO_FP16(arr[2]); - x[3] = GGML_FP32_TO_FP16(arr[3]); + x[0] = GGML_V1_FP32_TO_FP16(arr[0]); + x[1] = GGML_V1_FP32_TO_FP16(arr[1]); + x[2] = GGML_V1_FP32_TO_FP16(arr[2]); + x[3] = GGML_V1_FP32_TO_FP16(arr[3]); } -#define GGML_F32Cx4 __m128 -#define GGML_F32Cx4_ZERO _mm_setzero_ps() -#define GGML_F32Cx4_SET1(x) _mm_set1_ps(x) -#define GGML_F32Cx4_LOAD(x) __sse_f16x4_load(x) -#define GGML_F32Cx4_STORE(x, y) __sse_f16x4_store(x, y) -#define GGML_F32Cx4_FMA GGML_F32x4_FMA -#define GGML_F32Cx4_ADD _mm_add_ps -#define GGML_F32Cx4_MUL _mm_mul_ps -#define GGML_F32Cx4_REDUCE GGML_F32x4_REDUCE +#define GGML_V1_F32Cx4 __m128 +#define GGML_V1_F32Cx4_ZERO _mm_setzero_ps() +#define GGML_V1_F32Cx4_SET1(x) _mm_set1_ps(x) +#define GGML_V1_F32Cx4_LOAD(x) __sse_f16x4_load(x) +#define GGML_V1_F32Cx4_STORE(x, y) __sse_f16x4_store(x, y) +#define GGML_V1_F32Cx4_FMA GGML_V1_F32x4_FMA +#define GGML_V1_F32Cx4_ADD _mm_add_ps +#define GGML_V1_F32Cx4_MUL _mm_mul_ps +#define GGML_V1_F32Cx4_REDUCE GGML_V1_F32x4_REDUCE -#define GGML_F16_VEC GGML_F32Cx4 -#define GGML_F16_VEC_ZERO GGML_F32Cx4_ZERO -#define GGML_F16_VEC_SET1 GGML_F32Cx4_SET1 -#define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx4_LOAD(p) -#define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx4_STORE(p, r[i]) -#define GGML_F16_VEC_FMA GGML_F32Cx4_FMA -#define GGML_F16_VEC_ADD GGML_F32Cx4_ADD -#define GGML_F16_VEC_MUL GGML_F32Cx4_MUL -#define GGML_F16_VEC_REDUCE GGML_F32Cx4_REDUCE +#define GGML_V1_F16_VEC GGML_V1_F32Cx4 +#define GGML_V1_F16_VEC_ZERO GGML_V1_F32Cx4_ZERO +#define GGML_V1_F16_VEC_SET1 GGML_V1_F32Cx4_SET1 +#define GGML_V1_F16_VEC_LOAD(p, i) GGML_V1_F32Cx4_LOAD(p) +#define GGML_V1_F16_VEC_STORE(p, r, i) GGML_V1_F32Cx4_STORE(p, r[i]) +#define GGML_V1_F16_VEC_FMA GGML_V1_F32Cx4_FMA +#define GGML_V1_F16_VEC_ADD GGML_V1_F32Cx4_ADD +#define GGML_V1_F16_VEC_MUL GGML_V1_F32Cx4_MUL +#define GGML_V1_F16_VEC_REDUCE GGML_V1_F32Cx4_REDUCE #endif -// GGML_F32_ARR / GGML_F16_ARR +// GGML_V1_F32_ARR / GGML_V1_F16_ARR // number of registers to use per step -#ifdef GGML_SIMD -#define GGML_F32_ARR (GGML_F32_STEP/GGML_F32_EPR) -#define GGML_F16_ARR (GGML_F16_STEP/GGML_F16_EPR) +#ifdef GGML_V1_SIMD +#define GGML_V1_F32_ARR (GGML_V1_F32_STEP/GGML_V1_F32_EPR) +#define GGML_V1_F16_ARR (GGML_V1_F16_STEP/GGML_V1_F16_EPR) #endif // // fundamental operations // -inline static void ggml_vec_set_i8(const int n, int8_t * x, const int8_t v) { for (int i = 0; i < n; ++i) x[i] = v; } +inline static void ggml_v1_vec_set_i8(const int n, int8_t * x, const int8_t v) { for (int i = 0; i < n; ++i) x[i] = v; } -inline static void ggml_vec_set_i16(const int n, int16_t * x, const int16_t v) { for (int i = 0; i < n; ++i) x[i] = v; } +inline static void ggml_v1_vec_set_i16(const int n, int16_t * x, const int16_t v) { for (int i = 0; i < n; ++i) x[i] = v; } -inline static void ggml_vec_set_i32(const int n, int32_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; } +inline static void ggml_v1_vec_set_i32(const int n, int32_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; } -inline static void ggml_vec_set_f16(const int n, ggml_fp16_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; } +inline static void ggml_v1_vec_set_f16(const int n, ggml_v1_fp16_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; } -inline static void ggml_vec_add_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] + y[i]; } -inline static void ggml_vec_acc_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] += x[i]; } -inline static void ggml_vec_acc1_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] += v; } -inline static void ggml_vec_sub_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] - y[i]; } -inline static void ggml_vec_set_f32 (const int n, float * x, const float v) { for (int i = 0; i < n; ++i) x[i] = v; } -inline static void ggml_vec_cpy_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]; } -inline static void ggml_vec_neg_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = -x[i]; } -inline static void ggml_vec_mul_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]*y[i]; } -inline static void ggml_vec_div_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]/y[i]; } +inline static void ggml_v1_vec_add_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] + y[i]; } +inline static void ggml_v1_vec_acc_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] += x[i]; } +inline static void ggml_v1_vec_acc1_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] += v; } +inline static void ggml_v1_vec_sub_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] - y[i]; } +inline static void ggml_v1_vec_set_f32 (const int n, float * x, const float v) { for (int i = 0; i < n; ++i) x[i] = v; } +inline static void ggml_v1_vec_cpy_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]; } +inline static void ggml_v1_vec_neg_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = -x[i]; } +inline static void ggml_v1_vec_mul_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]*y[i]; } +inline static void ggml_v1_vec_div_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]/y[i]; } -inline static void ggml_vec_dot_f32(const int n, float * restrict s, const float * restrict x, const float * restrict y) { - ggml_float sumf = 0.0; +inline static void ggml_v1_vec_dot_f32(const int n, float * restrict s, const float * restrict x, const float * restrict y) { + ggml_v1_float sumf = 0.0; -#ifdef GGML_SIMD - const int np = (n & ~(GGML_F32_STEP - 1)); +#ifdef GGML_V1_SIMD + const int np = (n & ~(GGML_V1_F32_STEP - 1)); - GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; + GGML_V1_F32_VEC sum[GGML_V1_F32_ARR] = { GGML_V1_F32_VEC_ZERO }; - GGML_F32_VEC ax[GGML_F32_ARR]; - GGML_F32_VEC ay[GGML_F32_ARR]; + GGML_V1_F32_VEC ax[GGML_V1_F32_ARR]; + GGML_V1_F32_VEC ay[GGML_V1_F32_ARR]; - for (int i = 0; i < np; i += GGML_F32_STEP) { - for (int j = 0; j < GGML_F32_ARR; j++) { - ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR); - ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR); + for (int i = 0; i < np; i += GGML_V1_F32_STEP) { + for (int j = 0; j < GGML_V1_F32_ARR; j++) { + ax[j] = GGML_V1_F32_VEC_LOAD(x + i + j*GGML_V1_F32_EPR); + ay[j] = GGML_V1_F32_VEC_LOAD(y + i + j*GGML_V1_F32_EPR); - sum[j] = GGML_F32_VEC_FMA(sum[j], ax[j], ay[j]); + sum[j] = GGML_V1_F32_VEC_FMA(sum[j], ax[j], ay[j]); } } // reduce sum0..sum3 to sum0 - GGML_F32_VEC_REDUCE(sumf, sum); + GGML_V1_F32_VEC_REDUCE(sumf, sum); // leftovers for (int i = np; i < n; ++i) { @@ -892,113 +892,113 @@ inline static void ggml_vec_dot_f32(const int n, float * restrict s, const float *s = sumf; } -inline static void ggml_vec_dot_f16(const int n, float * restrict s, ggml_fp16_t * restrict x, ggml_fp16_t * restrict y) { - ggml_float sumf = 0.0; +inline static void ggml_v1_vec_dot_f16(const int n, float * restrict s, ggml_v1_fp16_t * restrict x, ggml_v1_fp16_t * restrict y) { + ggml_v1_float sumf = 0.0; -#if defined(GGML_SIMD) - const int np = (n & ~(GGML_F16_STEP - 1)); +#if defined(GGML_V1_SIMD) + const int np = (n & ~(GGML_V1_F16_STEP - 1)); - GGML_F16_VEC sum[GGML_F16_ARR] = { GGML_F16_VEC_ZERO }; + GGML_V1_F16_VEC sum[GGML_V1_F16_ARR] = { GGML_V1_F16_VEC_ZERO }; - GGML_F16_VEC ax[GGML_F16_ARR]; - GGML_F16_VEC ay[GGML_F16_ARR]; + GGML_V1_F16_VEC ax[GGML_V1_F16_ARR]; + GGML_V1_F16_VEC ay[GGML_V1_F16_ARR]; - for (int i = 0; i < np; i += GGML_F16_STEP) { - for (int j = 0; j < GGML_F16_ARR; j++) { - ax[j] = GGML_F16_VEC_LOAD(x + i + j*GGML_F16_EPR, j); - ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j); + for (int i = 0; i < np; i += GGML_V1_F16_STEP) { + for (int j = 0; j < GGML_V1_F16_ARR; j++) { + ax[j] = GGML_V1_F16_VEC_LOAD(x + i + j*GGML_V1_F16_EPR, j); + ay[j] = GGML_V1_F16_VEC_LOAD(y + i + j*GGML_V1_F16_EPR, j); - sum[j] = GGML_F16_VEC_FMA(sum[j], ax[j], ay[j]); + sum[j] = GGML_V1_F16_VEC_FMA(sum[j], ax[j], ay[j]); } } // reduce sum0..sum3 to sum0 - GGML_F16_VEC_REDUCE(sumf, sum); + GGML_V1_F16_VEC_REDUCE(sumf, sum); // leftovers for (int i = np; i < n; ++i) { - sumf += GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i]); + sumf += GGML_V1_FP16_TO_FP32(x[i])*GGML_V1_FP16_TO_FP32(y[i]); } #else for (int i = 0; i < n; ++i) { - sumf += GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i]); + sumf += GGML_V1_FP16_TO_FP32(x[i])*GGML_V1_FP16_TO_FP32(y[i]); } #endif *s = sumf; } -// compute GGML_VEC_DOT_UNROLL dot products at once +// compute GGML_V1_VEC_DOT_UNROLL dot products at once // xs - x row stride in bytes -inline static void ggml_vec_dot_f16_unroll(const int n, const int xs, float * restrict s, void * restrict xv, ggml_fp16_t * restrict y) { - ggml_float sumf[GGML_VEC_DOT_UNROLL] = { 0.0 }; +inline static void ggml_v1_vec_dot_f16_unroll(const int n, const int xs, float * restrict s, void * restrict xv, ggml_v1_fp16_t * restrict y) { + ggml_v1_float sumf[GGML_V1_VEC_DOT_UNROLL] = { 0.0 }; - ggml_fp16_t * restrict x[GGML_VEC_DOT_UNROLL]; + ggml_v1_fp16_t * restrict x[GGML_V1_VEC_DOT_UNROLL]; - for (int i = 0; i < GGML_VEC_DOT_UNROLL; ++i) { - x[i] = (ggml_fp16_t *) ((char *) xv + i*xs); + for (int i = 0; i < GGML_V1_VEC_DOT_UNROLL; ++i) { + x[i] = (ggml_v1_fp16_t *) ((char *) xv + i*xs); } -#if defined(GGML_SIMD) - const int np = (n & ~(GGML_F16_STEP - 1)); +#if defined(GGML_V1_SIMD) + const int np = (n & ~(GGML_V1_F16_STEP - 1)); - GGML_F16_VEC sum[GGML_VEC_DOT_UNROLL][GGML_F16_ARR] = { { GGML_F16_VEC_ZERO } }; + GGML_V1_F16_VEC sum[GGML_V1_VEC_DOT_UNROLL][GGML_V1_F16_ARR] = { { GGML_V1_F16_VEC_ZERO } }; - GGML_F16_VEC ax[GGML_F16_ARR]; - GGML_F16_VEC ay[GGML_F16_ARR]; + GGML_V1_F16_VEC ax[GGML_V1_F16_ARR]; + GGML_V1_F16_VEC ay[GGML_V1_F16_ARR]; - for (int i = 0; i < np; i += GGML_F16_STEP) { - for (int j = 0; j < GGML_F16_ARR; j++) { - ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j); + for (int i = 0; i < np; i += GGML_V1_F16_STEP) { + for (int j = 0; j < GGML_V1_F16_ARR; j++) { + ay[j] = GGML_V1_F16_VEC_LOAD(y + i + j*GGML_V1_F16_EPR, j); - for (int k = 0; k < GGML_VEC_DOT_UNROLL; ++k) { - ax[j] = GGML_F16_VEC_LOAD(x[k] + i + j*GGML_F16_EPR, j); + for (int k = 0; k < GGML_V1_VEC_DOT_UNROLL; ++k) { + ax[j] = GGML_V1_F16_VEC_LOAD(x[k] + i + j*GGML_V1_F16_EPR, j); - sum[k][j] = GGML_F16_VEC_FMA(sum[k][j], ax[j], ay[j]); + sum[k][j] = GGML_V1_F16_VEC_FMA(sum[k][j], ax[j], ay[j]); } } } // reduce sum0..sum3 to sum0 - for (int k = 0; k < GGML_VEC_DOT_UNROLL; ++k) { - GGML_F16_VEC_REDUCE(sumf[k], sum[k]); + for (int k = 0; k < GGML_V1_VEC_DOT_UNROLL; ++k) { + GGML_V1_F16_VEC_REDUCE(sumf[k], sum[k]); } // leftovers for (int i = np; i < n; ++i) { - for (int j = 0; j < GGML_VEC_DOT_UNROLL; ++j) { - sumf[j] += GGML_FP16_TO_FP32(x[j][i])*GGML_FP16_TO_FP32(y[i]); + for (int j = 0; j < GGML_V1_VEC_DOT_UNROLL; ++j) { + sumf[j] += GGML_V1_FP16_TO_FP32(x[j][i])*GGML_V1_FP16_TO_FP32(y[i]); } } #else for (int i = 0; i < n; ++i) { - for (int j = 0; j < GGML_VEC_DOT_UNROLL; ++j) { - sumf[j] += GGML_FP16_TO_FP32(x[j][i])*GGML_FP16_TO_FP32(y[i]); + for (int j = 0; j < GGML_V1_VEC_DOT_UNROLL; ++j) { + sumf[j] += GGML_V1_FP16_TO_FP32(x[j][i])*GGML_V1_FP16_TO_FP32(y[i]); } } #endif - for (int i = 0; i < GGML_VEC_DOT_UNROLL; ++i) { + for (int i = 0; i < GGML_V1_VEC_DOT_UNROLL; ++i) { s[i] = sumf[i]; } } -inline static void ggml_vec_mad_f32(const int n, float * restrict y, const float * restrict x, const float v) { -#if defined(GGML_SIMD) - const int np = (n & ~(GGML_F32_STEP - 1)); +inline static void ggml_v1_vec_mad_f32(const int n, float * restrict y, const float * restrict x, const float v) { +#if defined(GGML_V1_SIMD) + const int np = (n & ~(GGML_V1_F32_STEP - 1)); - GGML_F32_VEC vx = GGML_F32_VEC_SET1(v); + GGML_V1_F32_VEC vx = GGML_V1_F32_VEC_SET1(v); - GGML_F32_VEC ax[GGML_F32_ARR]; - GGML_F32_VEC ay[GGML_F32_ARR]; + GGML_V1_F32_VEC ax[GGML_V1_F32_ARR]; + GGML_V1_F32_VEC ay[GGML_V1_F32_ARR]; - for (int i = 0; i < np; i += GGML_F32_STEP) { - for (int j = 0; j < GGML_F32_ARR; j++) { - ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR); - ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR); - ay[j] = GGML_F32_VEC_FMA(ay[j], ax[j], vx); + for (int i = 0; i < np; i += GGML_V1_F32_STEP) { + for (int j = 0; j < GGML_V1_F32_ARR; j++) { + ax[j] = GGML_V1_F32_VEC_LOAD(x + i + j*GGML_V1_F32_EPR); + ay[j] = GGML_V1_F32_VEC_LOAD(y + i + j*GGML_V1_F32_EPR); + ay[j] = GGML_V1_F32_VEC_FMA(ay[j], ax[j], vx); - GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]); + GGML_V1_F32_VEC_STORE(y + i + j*GGML_V1_F32_EPR, ay[j]); } } @@ -1014,52 +1014,52 @@ inline static void ggml_vec_mad_f32(const int n, float * restrict y, const float #endif } -inline static void ggml_vec_mad_f16(const int n, ggml_fp16_t * restrict y, ggml_fp16_t * restrict x, const float v) { -#if defined(GGML_SIMD) - const int np = (n & ~(GGML_F16_STEP - 1)); +inline static void ggml_v1_vec_mad_f16(const int n, ggml_v1_fp16_t * restrict y, ggml_v1_fp16_t * restrict x, const float v) { +#if defined(GGML_V1_SIMD) + const int np = (n & ~(GGML_V1_F16_STEP - 1)); - GGML_F16_VEC vx = GGML_F16_VEC_SET1(v); + GGML_V1_F16_VEC vx = GGML_V1_F16_VEC_SET1(v); - GGML_F16_VEC ax[GGML_F16_ARR]; - GGML_F16_VEC ay[GGML_F16_ARR]; + GGML_V1_F16_VEC ax[GGML_V1_F16_ARR]; + GGML_V1_F16_VEC ay[GGML_V1_F16_ARR]; - for (int i = 0; i < np; i += GGML_F16_STEP) { - for (int j = 0; j < GGML_F16_ARR; j++) { - ax[j] = GGML_F16_VEC_LOAD(x + i + j*GGML_F16_EPR, j); - ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j); - ay[j] = GGML_F16_VEC_FMA(ay[j], ax[j], vx); + for (int i = 0; i < np; i += GGML_V1_F16_STEP) { + for (int j = 0; j < GGML_V1_F16_ARR; j++) { + ax[j] = GGML_V1_F16_VEC_LOAD(x + i + j*GGML_V1_F16_EPR, j); + ay[j] = GGML_V1_F16_VEC_LOAD(y + i + j*GGML_V1_F16_EPR, j); + ay[j] = GGML_V1_F16_VEC_FMA(ay[j], ax[j], vx); - GGML_F16_VEC_STORE(y + i + j*GGML_F16_EPR, ay, j); + GGML_V1_F16_VEC_STORE(y + i + j*GGML_V1_F16_EPR, ay, j); } } // leftovers for (int i = np; i < n; ++i) { - GGML_ASSERT(false); - y[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(y[i]) + GGML_FP16_TO_FP32(x[i])*v); + GGML_V1_ASSERT(false); + y[i] = GGML_V1_FP32_TO_FP16(GGML_V1_FP16_TO_FP32(y[i]) + GGML_V1_FP16_TO_FP32(x[i])*v); } #else for (int i = 0; i < n; ++i) { - y[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(y[i]) + GGML_FP16_TO_FP32(x[i])*v); + y[i] = GGML_V1_FP32_TO_FP16(GGML_V1_FP16_TO_FP32(y[i]) + GGML_V1_FP16_TO_FP32(x[i])*v); } #endif } -//inline static void ggml_vec_scale_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] *= v; } -inline static void ggml_vec_scale_f32(const int n, float * y, const float v) { -#if defined(GGML_SIMD) - const int np = (n & ~(GGML_F32_STEP - 1)); +//inline static void ggml_v1_vec_scale_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] *= v; } +inline static void ggml_v1_vec_scale_f32(const int n, float * y, const float v) { +#if defined(GGML_V1_SIMD) + const int np = (n & ~(GGML_V1_F32_STEP - 1)); - GGML_F32_VEC vx = GGML_F32_VEC_SET1(v); + GGML_V1_F32_VEC vx = GGML_V1_F32_VEC_SET1(v); - GGML_F32_VEC ay[GGML_F32_ARR]; + GGML_V1_F32_VEC ay[GGML_V1_F32_ARR]; - for (int i = 0; i < np; i += GGML_F32_STEP) { - for (int j = 0; j < GGML_F32_ARR; j++) { - ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR); - ay[j] = GGML_F32_VEC_MUL(ay[j], vx); + for (int i = 0; i < np; i += GGML_V1_F32_STEP) { + for (int j = 0; j < GGML_V1_F32_ARR; j++) { + ay[j] = GGML_V1_F32_VEC_LOAD(y + i + j*GGML_V1_F32_EPR); + ay[j] = GGML_V1_F32_VEC_MUL(ay[j], vx); - GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]); + GGML_V1_F32_VEC_STORE(y + i + j*GGML_V1_F32_EPR, ay[j]); } } @@ -1075,48 +1075,48 @@ inline static void ggml_vec_scale_f32(const int n, float * y, const float v) { #endif } -inline static void ggml_vec_norm_f32 (const int n, float * s, const float * x) { ggml_vec_dot_f32(n, s, x, x); *s = sqrt(*s); } -inline static void ggml_vec_sqr_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]*x[i]; } -inline static void ggml_vec_sqrt_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = sqrt(x[i]); } -inline static void ggml_vec_abs_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = fabsf(x[i]); } -inline static void ggml_vec_sgn_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? 1.f : ((x[i] < 0.f) ? -1.f : 0.f); } -inline static void ggml_vec_step_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? 1.f : 0.f; } -inline static void ggml_vec_relu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : 0.f; } +inline static void ggml_v1_vec_norm_f32 (const int n, float * s, const float * x) { ggml_v1_vec_dot_f32(n, s, x, x); *s = sqrt(*s); } +inline static void ggml_v1_vec_sqr_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]*x[i]; } +inline static void ggml_v1_vec_sqrt_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = sqrt(x[i]); } +inline static void ggml_v1_vec_abs_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = fabsf(x[i]); } +inline static void ggml_v1_vec_sgn_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? 1.f : ((x[i] < 0.f) ? -1.f : 0.f); } +inline static void ggml_v1_vec_step_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? 1.f : 0.f; } +inline static void ggml_v1_vec_relu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : 0.f; } -static const ggml_float GELU_COEF_A = 0.044715; -static const ggml_float SQRT_2_OVER_PI = 0.79788456080286535587989211986876; +static const ggml_v1_float GELU_COEF_A = 0.044715; +static const ggml_v1_float SQRT_2_OVER_PI = 0.79788456080286535587989211986876; -inline static float ggml_gelu_f32(float x) { +inline static float ggml_v1_gelu_f32(float x) { return 0.5*x*(1.0 + tanh(SQRT_2_OVER_PI*x*(1.0 + GELU_COEF_A*x*x))); } -inline static void ggml_vec_gelu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { +inline static void ggml_v1_vec_gelu_f16(const int n, ggml_v1_fp16_t * y, const ggml_v1_fp16_t * x) { const uint16_t * i16 = (const uint16_t *) x; for (int i = 0; i < n; ++i) { y[i] = table_gelu_f16[i16[i]]; } } -#ifdef GGML_GELU_FP16 -inline static void ggml_vec_gelu_f32(const int n, float * y, const float * x) { +#ifdef GGML_V1_GELU_FP16 +inline static void ggml_v1_vec_gelu_f32(const int n, float * y, const float * x) { uint16_t t; for (int i = 0; i < n; ++i) { - ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]); + ggml_v1_fp16_t fp16 = GGML_V1_FP32_TO_FP16(x[i]); memcpy(&t, &fp16, sizeof(uint16_t)); - y[i] = GGML_FP16_TO_FP32(table_gelu_f16[t]); + y[i] = GGML_V1_FP16_TO_FP32(table_gelu_f16[t]); } } #else -inline static void ggml_vec_gelu_f32(const int n, float * y, const float * x) { +inline static void ggml_v1_vec_gelu_f32(const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) { - y[i] = ggml_gelu_f32(x[i]); + y[i] = ggml_v1_gelu_f32(x[i]); } } #endif -inline static void ggml_vec_sum_f32(const int n, float * s, const float * x) { -#ifndef GGML_USE_ACCELERATE - ggml_float sum = 0.0; +inline static void ggml_v1_vec_sum_f32(const int n, float * s, const float * x) { +#ifndef GGML_V1_USE_ACCELERATE + ggml_v1_float sum = 0.0; for (int i = 0; i < n; ++i) { sum += x[i]; } @@ -1126,9 +1126,9 @@ inline static void ggml_vec_sum_f32(const int n, float * s, const float * x) { #endif } -inline static void ggml_vec_max_f32(const int n, float * s, const float * x) { -#ifndef GGML_USE_ACCELERATE - ggml_float max = -INFINITY; +inline static void ggml_v1_vec_max_f32(const int n, float * s, const float * x) { +#ifndef GGML_V1_USE_ACCELERATE + ggml_v1_float max = -INFINITY; for (int i = 0; i < n; ++i) { max = MAX(max, x[i]); } @@ -1138,45 +1138,45 @@ inline static void ggml_vec_max_f32(const int n, float * s, const float * x) { #endif } -inline static void ggml_vec_norm_inv_f32(const int n, float * s, const float * x) { ggml_vec_norm_f32(n, s, x); *s = 1./(*s); } +inline static void ggml_v1_vec_norm_inv_f32(const int n, float * s, const float * x) { ggml_v1_vec_norm_f32(n, s, x); *s = 1./(*s); } // // logging // -#if (GGML_DEBUG >= 1) -#define GGML_PRINT_DEBUG(...) printf(__VA_ARGS__) +#if (GGML_V1_DEBUG >= 1) +#define GGML_V1_PRINT_DEBUG(...) printf(__VA_ARGS__) #else -#define GGML_PRINT_DEBUG(...) +#define GGML_V1_PRINT_DEBUG(...) #endif -#if (GGML_DEBUG >= 5) -#define GGML_PRINT_DEBUG_5(...) printf(__VA_ARGS__) +#if (GGML_V1_DEBUG >= 5) +#define GGML_V1_PRINT_DEBUG_5(...) printf(__VA_ARGS__) #else -#define GGML_PRINT_DEBUG_5(...) +#define GGML_V1_PRINT_DEBUG_5(...) #endif -#if (GGML_DEBUG >= 10) -#define GGML_PRINT_DEBUG_10(...) printf(__VA_ARGS__) +#if (GGML_V1_DEBUG >= 10) +#define GGML_V1_PRINT_DEBUG_10(...) printf(__VA_ARGS__) #else -#define GGML_PRINT_DEBUG_10(...) +#define GGML_V1_PRINT_DEBUG_10(...) #endif -#define GGML_PRINT(...) printf(__VA_ARGS__) +#define GGML_V1_PRINT(...) printf(__VA_ARGS__) // // data types // -static const size_t GGML_TYPE_SIZE[GGML_TYPE_COUNT] = { +static const size_t GGML_V1_TYPE_SIZE[GGML_V1_TYPE_COUNT] = { sizeof(int8_t ), sizeof(int16_t), sizeof(int32_t), - sizeof(ggml_fp16_t), + sizeof(ggml_v1_fp16_t), sizeof(float ), }; -static const char * GGML_OP_LABEL[GGML_OP_COUNT] = { +static const char * GGML_V1_OP_LABEL[GGML_V1_OP_COUNT] = { "NONE", "DUP", @@ -1216,7 +1216,7 @@ static const char * GGML_OP_LABEL[GGML_OP_COUNT] = { "FLASH_FF", }; -static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = { +static const char * GGML_V1_OP_SYMBOL[GGML_V1_OP_COUNT] = { "none", "x", @@ -1260,56 +1260,56 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = { // ggml object // -struct ggml_object { +struct ggml_v1_object { size_t offs; size_t size; - struct ggml_object * next; + struct ggml_v1_object * next; char padding[8]; }; -static const size_t GGML_OBJECT_SIZE = sizeof(struct ggml_object); +static const size_t GGML_V1_OBJECT_SIZE = sizeof(struct ggml_v1_object); -static_assert(sizeof(struct ggml_object)%GGML_MEM_ALIGN == 0, "ggml_object size must be a multiple of GGML_MEM_ALIGN"); -static_assert(sizeof(struct ggml_tensor)%GGML_MEM_ALIGN == 0, "ggml_tensor size must be a multiple of GGML_MEM_ALIGN"); +static_assert(sizeof(struct ggml_v1_object)%GGML_V1_MEM_ALIGN == 0, "ggml_v1_object size must be a multiple of GGML_V1_MEM_ALIGN"); +static_assert(sizeof(struct ggml_v1_tensor)%GGML_V1_MEM_ALIGN == 0, "ggml_v1_tensor size must be a multiple of GGML_V1_MEM_ALIGN"); // // ggml context // -struct ggml_context { +struct ggml_v1_context { size_t mem_size; void * mem_buffer; bool mem_buffer_owned; int n_objects; - struct ggml_object * objects_begin; - struct ggml_object * objects_end; + struct ggml_v1_object * objects_begin; + struct ggml_v1_object * objects_end; - struct ggml_scratch scratch; - struct ggml_scratch scratch_save; + struct ggml_v1_scratch scratch; + struct ggml_v1_scratch scratch_save; }; -struct ggml_context_container { +struct ggml_v1_context_container { bool used; - struct ggml_context context; + struct ggml_v1_context context; }; // // compute types // -enum ggml_task_type { - GGML_TASK_INIT = 0, - GGML_TASK_COMPUTE, - GGML_TASK_FINALIZE, +enum ggml_v1_task_type { + GGML_V1_TASK_INIT = 0, + GGML_V1_TASK_COMPUTE, + GGML_V1_TASK_FINALIZE, }; -struct ggml_compute_params { - enum ggml_task_type type; +struct ggml_v1_compute_params { + enum ggml_v1_task_type type; int ith, nth; @@ -1322,16 +1322,16 @@ struct ggml_compute_params { // ggml state // -struct ggml_state { - struct ggml_context_container contexts[GGML_MAX_CONTEXTS]; +struct ggml_v1_state { + struct ggml_v1_context_container contexts[GGML_V1_MAX_CONTEXTS]; }; // global state -static struct ggml_state g_state; +static struct ggml_v1_state g_state; static atomic_int g_state_barrier = 0; // barrier via spin lock -inline static void ggml_critical_section_start(void) { +inline static void ggml_v1_critical_section_start(void) { int processing = atomic_fetch_add(&g_state_barrier, 1); while (processing > 0) { @@ -1344,76 +1344,76 @@ inline static void ggml_critical_section_start(void) { // TODO: make this somehow automatically executed // some sort of "sentry" mechanism -inline static void ggml_critical_section_end(void) { +inline static void ggml_v1_critical_section_end(void) { atomic_fetch_sub(&g_state_barrier, 1); } //////////////////////////////////////////////////////////////////////////////// -void ggml_print_object(const struct ggml_object * obj) { - GGML_PRINT(" - ggml_object: offset = %zu, size = %zu, next = %p\n", +void ggml_v1_print_object(const struct ggml_v1_object * obj) { + GGML_V1_PRINT(" - ggml_v1_object: offset = %zu, size = %zu, next = %p\n", obj->offs, obj->size, (const void *) obj->next); } -void ggml_print_objects(const struct ggml_context * ctx) { - struct ggml_object * obj = ctx->objects_begin; +void ggml_v1_print_objects(const struct ggml_v1_context * ctx) { + struct ggml_v1_object * obj = ctx->objects_begin; - GGML_PRINT("%s: objects in context %p:\n", __func__, (const void *) ctx); + GGML_V1_PRINT("%s: objects in context %p:\n", __func__, (const void *) ctx); while (obj != NULL) { - ggml_print_object(obj); + ggml_v1_print_object(obj); obj = obj->next; } - GGML_PRINT("%s: --- end ---\n", __func__); + GGML_V1_PRINT("%s: --- end ---\n", __func__); } -int ggml_nelements(const struct ggml_tensor * tensor) { - static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); +int ggml_v1_nelements(const struct ggml_v1_tensor * tensor) { + static_assert(GGML_V1_MAX_DIMS == 4, "GGML_V1_MAX_DIMS is not 4 - update this function"); return tensor->ne[0]*tensor->ne[1]*tensor->ne[2]*tensor->ne[3]; } -int ggml_nrows(const struct ggml_tensor * tensor) { - static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); +int ggml_v1_nrows(const struct ggml_v1_tensor * tensor) { + static_assert(GGML_V1_MAX_DIMS == 4, "GGML_V1_MAX_DIMS is not 4 - update this function"); return tensor->ne[1]*tensor->ne[2]*tensor->ne[3]; } -size_t ggml_nbytes(const struct ggml_tensor * tensor) { - static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); +size_t ggml_v1_nbytes(const struct ggml_v1_tensor * tensor) { + static_assert(GGML_V1_MAX_DIMS == 4, "GGML_V1_MAX_DIMS is not 4 - update this function"); - return ggml_nelements(tensor)*GGML_TYPE_SIZE[tensor->type]; + return ggml_v1_nelements(tensor)*GGML_V1_TYPE_SIZE[tensor->type]; } -size_t ggml_type_size(enum ggml_type type) { - return GGML_TYPE_SIZE[type]; +size_t ggml_v1_type_size(enum ggml_v1_type type) { + return GGML_V1_TYPE_SIZE[type]; } -size_t ggml_element_size(const struct ggml_tensor * tensor) { - return GGML_TYPE_SIZE[tensor->type]; +size_t ggml_v1_element_size(const struct ggml_v1_tensor * tensor) { + return GGML_V1_TYPE_SIZE[tensor->type]; } -static inline bool ggml_is_scalar(const struct ggml_tensor * tensor) { - static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); +static inline bool ggml_v1_is_scalar(const struct ggml_v1_tensor * tensor) { + static_assert(GGML_V1_MAX_DIMS == 4, "GGML_V1_MAX_DIMS is not 4 - update this function"); return tensor->ne[0] == 1 && tensor->ne[1] == 1 && tensor->ne[2] == 1 && tensor->ne[3] == 1; } -static inline bool ggml_is_vector(const struct ggml_tensor * tensor) { - static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); +static inline bool ggml_v1_is_vector(const struct ggml_v1_tensor * tensor) { + static_assert(GGML_V1_MAX_DIMS == 4, "GGML_V1_MAX_DIMS is not 4 - update this function"); return tensor->ne[1] == 1 && tensor->ne[2] == 1 && tensor->ne[3] == 1; } -static inline bool ggml_is_matrix(const struct ggml_tensor * tensor) { - static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); +static inline bool ggml_v1_is_matrix(const struct ggml_v1_tensor * tensor) { + static_assert(GGML_V1_MAX_DIMS == 4, "GGML_V1_MAX_DIMS is not 4 - update this function"); return tensor->ne[2] == 1 && tensor->ne[3] == 1; } -static inline bool ggml_can_mul_mat(const struct ggml_tensor * t0, const struct ggml_tensor * t1) { - static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); +static inline bool ggml_v1_can_mul_mat(const struct ggml_v1_tensor * t0, const struct ggml_v1_tensor * t1) { + static_assert(GGML_V1_MAX_DIMS == 4, "GGML_V1_MAX_DIMS is not 4 - update this function"); return (t0->ne[0] == t1->ne[0]) && @@ -1421,27 +1421,27 @@ static inline bool ggml_can_mul_mat(const struct ggml_tensor * t0, const struct (t0->ne[3] == t1->ne[3]); } -static inline bool ggml_is_contiguous(const struct ggml_tensor * tensor) { - static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); +static inline bool ggml_v1_is_contiguous(const struct ggml_v1_tensor * tensor) { + static_assert(GGML_V1_MAX_DIMS == 4, "GGML_V1_MAX_DIMS is not 4 - update this function"); return - tensor->nb[0] == GGML_TYPE_SIZE[tensor->type] && + tensor->nb[0] == GGML_V1_TYPE_SIZE[tensor->type] && tensor->nb[1] == tensor->nb[0]*tensor->ne[0] && tensor->nb[2] == tensor->nb[1]*tensor->ne[1] && tensor->nb[3] == tensor->nb[2]*tensor->ne[2]; } -static inline bool ggml_is_padded_1d(const struct ggml_tensor * tensor) { - static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); +static inline bool ggml_v1_is_padded_1d(const struct ggml_v1_tensor * tensor) { + static_assert(GGML_V1_MAX_DIMS == 4, "GGML_V1_MAX_DIMS is not 4 - update this function"); return - tensor->nb[0] == GGML_TYPE_SIZE[tensor->type] && + tensor->nb[0] == GGML_V1_TYPE_SIZE[tensor->type] && tensor->nb[2] == tensor->nb[1]*tensor->ne[1] && tensor->nb[3] == tensor->nb[2]*tensor->ne[2]; } -static inline bool ggml_are_same_shape(const struct ggml_tensor * t0, const struct ggml_tensor * t1) { - static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); +static inline bool ggml_v1_are_same_shape(const struct ggml_v1_tensor * t0, const struct ggml_v1_tensor * t1) { + static_assert(GGML_V1_MAX_DIMS == 4, "GGML_V1_MAX_DIMS is not 4 - update this function"); return (t0->ne[0] == t1->ne[0] ) && @@ -1451,8 +1451,8 @@ static inline bool ggml_are_same_shape(const struct ggml_tensor * t0, const stru } // check if t1 can be represented as a repeatition of t0 -static inline bool ggml_can_repeat(const struct ggml_tensor * t0, const struct ggml_tensor * t1) { - static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); +static inline bool ggml_v1_can_repeat(const struct ggml_v1_tensor * t0, const struct ggml_v1_tensor * t1) { + static_assert(GGML_V1_MAX_DIMS == 4, "GGML_V1_MAX_DIMS is not 4 - update this function"); return (t1->ne[0]%t0->ne[0] == 0) && @@ -1461,96 +1461,96 @@ static inline bool ggml_can_repeat(const struct ggml_tensor * t0, const struct g (t1->ne[3]%t0->ne[3] == 0); } -static inline int ggml_up32(int n) { +static inline int ggml_v1_up32(int n) { return (n + 31) & ~31; } -static inline int ggml_up64(int n) { +static inline int ggml_v1_up64(int n) { return (n + 63) & ~63; } -static inline int ggml_up(int n, int m) { +static inline int ggml_v1_up(int n, int m) { // assert m is a power of 2 - GGML_ASSERT((m & (m - 1)) == 0); + GGML_V1_ASSERT((m & (m - 1)) == 0); return (n + m - 1) & ~(m - 1); } -// assert that pointer is aligned to GGML_MEM_ALIGN -#define ggml_assert_aligned(ptr) \ - assert(((uintptr_t) (ptr))%GGML_MEM_ALIGN == 0) +// assert that pointer is aligned to GGML_V1_MEM_ALIGN +#define ggml_v1_assert_aligned(ptr) \ + assert(((uintptr_t) (ptr))%GGML_V1_MEM_ALIGN == 0) //////////////////////////////////////////////////////////////////////////////// -struct ggml_context * ggml_init(struct ggml_init_params params) { +struct ggml_v1_context * ggml_v1_init(struct ggml_v1_init_params params) { // make this function thread safe - ggml_critical_section_start(); + ggml_v1_critical_section_start(); static bool is_first_call = true; if (is_first_call) { // initialize time system (required on Windows) - ggml_time_init(); + ggml_v1_time_init(); // initialize GELU, EXP and F32 tables { - const uint64_t t_start = ggml_time_us(); UNUSED(t_start); + const uint64_t t_start = ggml_v1_time_us(); UNUSED(t_start); - ggml_fp16_t ii; + ggml_v1_fp16_t ii; for (int i = 0; i < (1 << 16); ++i) { uint16_t ui = i; memcpy(&ii, &ui, sizeof(ii)); - const float f = table_f32_f16[i] = GGML_COMPUTE_FP16_TO_FP32(ii); - table_gelu_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_f32(f)); - table_exp_f16[i] = GGML_FP32_TO_FP16(exp(f)); + const float f = table_f32_f16[i] = GGML_V1_COMPUTE_FP16_TO_FP32(ii); + table_gelu_f16[i] = GGML_V1_FP32_TO_FP16(ggml_v1_gelu_f32(f)); + table_exp_f16[i] = GGML_V1_FP32_TO_FP16(exp(f)); } - const uint64_t t_end = ggml_time_us(); UNUSED(t_end); + const uint64_t t_end = ggml_v1_time_us(); UNUSED(t_end); - GGML_PRINT_DEBUG("%s: GELU and EXP tables initialized in %f ms\n", __func__, (t_end - t_start)/1000.0f); + GGML_V1_PRINT_DEBUG("%s: GELU and EXP tables initialized in %f ms\n", __func__, (t_end - t_start)/1000.0f); } // initialize g_state { - const uint64_t t_start = ggml_time_us(); UNUSED(t_start); + const uint64_t t_start = ggml_v1_time_us(); UNUSED(t_start); - g_state = (struct ggml_state) { + g_state = (struct ggml_v1_state) { /*.contexts =*/ { { 0 } }, }; - for (int i = 0; i < GGML_MAX_CONTEXTS; ++i) { + for (int i = 0; i < GGML_V1_MAX_CONTEXTS; ++i) { g_state.contexts[i].used = false; } - const uint64_t t_end = ggml_time_us(); UNUSED(t_end); + const uint64_t t_end = ggml_v1_time_us(); UNUSED(t_end); - GGML_PRINT_DEBUG("%s: g_state initialized in %f ms\n", __func__, (t_end - t_start)/1000.0f); + GGML_V1_PRINT_DEBUG("%s: g_state initialized in %f ms\n", __func__, (t_end - t_start)/1000.0f); } is_first_call = false; } // find non-used context in g_state - struct ggml_context * ctx = NULL; + struct ggml_v1_context * ctx = NULL; - for (int i = 0; i < GGML_MAX_CONTEXTS; i++) { + for (int i = 0; i < GGML_V1_MAX_CONTEXTS; i++) { if (!g_state.contexts[i].used) { g_state.contexts[i].used = true; ctx = &g_state.contexts[i].context; - GGML_PRINT_DEBUG("%s: found unused context %d\n", __func__, i); + GGML_V1_PRINT_DEBUG("%s: found unused context %d\n", __func__, i); break; } } if (ctx == NULL) { - GGML_PRINT_DEBUG("%s: no unused context found\n", __func__); + GGML_V1_PRINT_DEBUG("%s: no unused context found\n", __func__); - ggml_critical_section_end(); + ggml_v1_critical_section_end(); return NULL; } - *ctx = (struct ggml_context) { + *ctx = (struct ggml_v1_context) { /*.mem_size =*/ params.mem_size, /*.mem_buffer =*/ params.mem_buffer ? params.mem_buffer : malloc(params.mem_size), /*.mem_buffer_owned =*/ params.mem_buffer ? false : true, @@ -1561,26 +1561,26 @@ struct ggml_context * ggml_init(struct ggml_init_params params) { /*.scratch_save =*/ { 0, 0, NULL, }, }; - ggml_assert_aligned(ctx->mem_buffer); + ggml_v1_assert_aligned(ctx->mem_buffer); - GGML_PRINT_DEBUG("%s: context initialized\n", __func__); + GGML_V1_PRINT_DEBUG("%s: context initialized\n", __func__); - ggml_critical_section_end(); + ggml_v1_critical_section_end(); return ctx; } -void ggml_free(struct ggml_context * ctx) { +void ggml_v1_free(struct ggml_v1_context * ctx) { // make this function thread safe - ggml_critical_section_start(); + ggml_v1_critical_section_start(); bool found = false; - for (int i = 0; i < GGML_MAX_CONTEXTS; i++) { + for (int i = 0; i < GGML_V1_MAX_CONTEXTS; i++) { if (&g_state.contexts[i].context == ctx) { g_state.contexts[i].used = false; - GGML_PRINT_DEBUG("%s: context %d with %d objects has been freed. memory used = %zu\n", + GGML_V1_PRINT_DEBUG("%s: context %d with %d objects has been freed. memory used = %zu\n", __func__, i, ctx->n_objects, ctx->objects_end->offs + ctx->objects_end->size); if (ctx->mem_buffer_owned) { @@ -1593,17 +1593,17 @@ void ggml_free(struct ggml_context * ctx) { } if (!found) { - GGML_PRINT_DEBUG("%s: context not found\n", __func__); + GGML_V1_PRINT_DEBUG("%s: context not found\n", __func__); } - ggml_critical_section_end(); + ggml_v1_critical_section_end(); } -size_t ggml_used_mem(const struct ggml_context * ctx) { +size_t ggml_v1_used_mem(const struct ggml_v1_context * ctx) { return ctx->objects_end->offs + ctx->objects_end->size; } -size_t ggml_set_scratch(struct ggml_context * ctx, struct ggml_scratch scratch) { +size_t ggml_v1_set_scratch(struct ggml_v1_context * ctx, struct ggml_v1_scratch scratch) { const size_t result = ctx->scratch.data ? ctx->scratch.offs : 0; ctx->scratch = scratch; @@ -1613,14 +1613,14 @@ size_t ggml_set_scratch(struct ggml_context * ctx, struct ggml_scratch scratch) //////////////////////////////////////////////////////////////////////////////// -struct ggml_tensor * ggml_new_tensor_impl( - struct ggml_context * ctx, - enum ggml_type type, +struct ggml_v1_tensor * ggml_v1_new_tensor_impl( + struct ggml_v1_context * ctx, + enum ggml_v1_type type, int n_dims, const int* ne, void* data) { // always insert objects at the end of the context's memory pool - struct ggml_object * obj_cur = ctx->objects_end; + struct ggml_v1_object * obj_cur = ctx->objects_end; const size_t cur_offs = obj_cur == NULL ? 0 : obj_cur->offs; const size_t cur_size = obj_cur == NULL ? 0 : obj_cur->size; @@ -1629,51 +1629,51 @@ struct ggml_tensor * ggml_new_tensor_impl( size_t size_needed = 0; if (data == NULL) { - size_needed += GGML_TYPE_SIZE[type]; + size_needed += GGML_V1_TYPE_SIZE[type]; for (int i = 0; i < n_dims; i++) { size_needed *= ne[i]; } - // align to GGML_MEM_ALIGN - size_needed = ((size_needed + GGML_MEM_ALIGN - 1)/GGML_MEM_ALIGN)*GGML_MEM_ALIGN; + // align to GGML_V1_MEM_ALIGN + size_needed = ((size_needed + GGML_V1_MEM_ALIGN - 1)/GGML_V1_MEM_ALIGN)*GGML_V1_MEM_ALIGN; } char * const mem_buffer = ctx->mem_buffer; - struct ggml_object * const obj_new = (struct ggml_object *)(mem_buffer + cur_end); + struct ggml_v1_object * const obj_new = (struct ggml_v1_object *)(mem_buffer + cur_end); if (ctx->scratch.data == NULL || data != NULL) { - size_needed += sizeof(struct ggml_tensor); + size_needed += sizeof(struct ggml_v1_tensor); - if (cur_end + size_needed + GGML_OBJECT_SIZE > ctx->mem_size) { - GGML_PRINT("%s: not enough space in the context's memory pool (needed %zu, available %zu)\n", - __func__, cur_end + size_needed + GGML_OBJECT_SIZE, ctx->mem_size); + if (cur_end + size_needed + GGML_V1_OBJECT_SIZE > ctx->mem_size) { + GGML_V1_PRINT("%s: not enough space in the context's memory pool (needed %zu, available %zu)\n", + __func__, cur_end + size_needed + GGML_V1_OBJECT_SIZE, ctx->mem_size); assert(false); return NULL; } - *obj_new = (struct ggml_object) { - .offs = cur_end + GGML_OBJECT_SIZE, + *obj_new = (struct ggml_v1_object) { + .offs = cur_end + GGML_V1_OBJECT_SIZE, .size = size_needed, .next = NULL, }; } else { if (ctx->scratch.offs + size_needed > ctx->scratch.size) { - GGML_PRINT("%s: not enough space in the scratch memory\n", __func__); + GGML_V1_PRINT("%s: not enough space in the scratch memory\n", __func__); assert(false); return NULL; } - if (cur_end + sizeof(struct ggml_tensor) + GGML_OBJECT_SIZE > ctx->mem_size) { - GGML_PRINT("%s: not enough space in the context's memory pool (needed %zu, available %zu)\n", - __func__, cur_end + sizeof(struct ggml_tensor) + GGML_OBJECT_SIZE, ctx->mem_size); + if (cur_end + sizeof(struct ggml_v1_tensor) + GGML_V1_OBJECT_SIZE > ctx->mem_size) { + GGML_V1_PRINT("%s: not enough space in the context's memory pool (needed %zu, available %zu)\n", + __func__, cur_end + sizeof(struct ggml_v1_tensor) + GGML_V1_OBJECT_SIZE, ctx->mem_size); assert(false); return NULL; } data = (char * const) ctx->scratch.data + ctx->scratch.offs; - *obj_new = (struct ggml_object) { - .offs = cur_end + GGML_OBJECT_SIZE, - .size = sizeof(struct ggml_tensor), + *obj_new = (struct ggml_v1_object) { + .offs = cur_end + GGML_V1_OBJECT_SIZE, + .size = sizeof(struct ggml_v1_tensor), .next = NULL, }; @@ -1693,16 +1693,16 @@ struct ggml_tensor * ggml_new_tensor_impl( //printf("%s: inserted new object at %zu, size = %zu\n", __func__, cur_end, obj_new->size); - struct ggml_tensor * const result = (struct ggml_tensor *)(mem_buffer + obj_new->offs); + struct ggml_v1_tensor * const result = (struct ggml_v1_tensor *)(mem_buffer + obj_new->offs); - ggml_assert_aligned(result); + ggml_v1_assert_aligned(result); - *result = (struct ggml_tensor) { + *result = (struct ggml_v1_tensor) { /*.type =*/ type, /*.n_dims =*/ n_dims, /*.ne =*/ { 1, 1, 1, 1 }, /*.nb =*/ { 0, 0, 0, 0 }, - /*.op =*/ GGML_OP_NONE, + /*.op =*/ GGML_V1_OP_NONE, /*.is_param =*/ false, /*.grad =*/ NULL, /*.src0 =*/ NULL, @@ -1716,14 +1716,14 @@ struct ggml_tensor * ggml_new_tensor_impl( /*.pad =*/ { 0 }, }; - ggml_assert_aligned(result->data); + ggml_v1_assert_aligned(result->data); for (int i = 0; i < n_dims; i++) { result->ne[i] = ne[i]; } - result->nb[0] = GGML_TYPE_SIZE[type]; - for (int i = 1; i < GGML_MAX_DIMS; i++) { + result->nb[0] = GGML_V1_TYPE_SIZE[type]; + for (int i = 1; i < GGML_V1_MAX_DIMS; i++) { result->nb[i] = result->nb[i - 1]*result->ne[i - 1]; } @@ -1732,130 +1732,130 @@ struct ggml_tensor * ggml_new_tensor_impl( return result; } -struct ggml_tensor * ggml_new_tensor( - struct ggml_context * ctx, - enum ggml_type type, +struct ggml_v1_tensor * ggml_v1_new_tensor( + struct ggml_v1_context * ctx, + enum ggml_v1_type type, int n_dims, const int * ne) { - return ggml_new_tensor_impl(ctx, type, n_dims, ne, NULL); + return ggml_v1_new_tensor_impl(ctx, type, n_dims, ne, NULL); } -struct ggml_tensor * ggml_new_tensor_1d( - struct ggml_context * ctx, - enum ggml_type type, +struct ggml_v1_tensor * ggml_v1_new_tensor_1d( + struct ggml_v1_context * ctx, + enum ggml_v1_type type, int ne0) { - return ggml_new_tensor(ctx, type, 1, &ne0); + return ggml_v1_new_tensor(ctx, type, 1, &ne0); } -struct ggml_tensor * ggml_new_tensor_2d( - struct ggml_context * ctx, - enum ggml_type type, +struct ggml_v1_tensor * ggml_v1_new_tensor_2d( + struct ggml_v1_context * ctx, + enum ggml_v1_type type, int ne0, int ne1) { const int ne[2] = { ne0, ne1 }; - return ggml_new_tensor(ctx, type, 2, ne); + return ggml_v1_new_tensor(ctx, type, 2, ne); } -struct ggml_tensor * ggml_new_tensor_3d( - struct ggml_context * ctx, - enum ggml_type type, +struct ggml_v1_tensor * ggml_v1_new_tensor_3d( + struct ggml_v1_context * ctx, + enum ggml_v1_type type, int ne0, int ne1, int ne2) { const int ne[3] = { ne0, ne1, ne2 }; - return ggml_new_tensor(ctx, type, 3, ne); + return ggml_v1_new_tensor(ctx, type, 3, ne); } -struct ggml_tensor * ggml_new_tensor_4d( - struct ggml_context * ctx, - enum ggml_type type, +struct ggml_v1_tensor * ggml_v1_new_tensor_4d( + struct ggml_v1_context * ctx, + enum ggml_v1_type type, int ne0, int ne1, int ne2, int ne3) { const int ne[4] = { ne0, ne1, ne2, ne3 }; - return ggml_new_tensor(ctx, type, 4, ne); + return ggml_v1_new_tensor(ctx, type, 4, ne); } -struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value) { +struct ggml_v1_tensor * ggml_v1_new_i32(struct ggml_v1_context * ctx, int32_t value) { ctx->scratch_save = ctx->scratch; ctx->scratch.data = NULL; - struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 1); + struct ggml_v1_tensor * result = ggml_v1_new_tensor_1d(ctx, GGML_V1_TYPE_I32, 1); ctx->scratch = ctx->scratch_save; - ggml_set_i32(result, value); + ggml_v1_set_i32(result, value); return result; } -struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value) { +struct ggml_v1_tensor * ggml_v1_new_f32(struct ggml_v1_context * ctx, float value) { ctx->scratch_save = ctx->scratch; ctx->scratch.data = NULL; - struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1); + struct ggml_v1_tensor * result = ggml_v1_new_tensor_1d(ctx, GGML_V1_TYPE_F32, 1); ctx->scratch = ctx->scratch_save; - ggml_set_f32(result, value); + ggml_v1_set_f32(result, value); return result; } -struct ggml_tensor * ggml_dup_tensor(struct ggml_context * ctx, const struct ggml_tensor * src) { - return ggml_new_tensor_impl(ctx, src->type, src->n_dims, src->ne, NULL); +struct ggml_v1_tensor * ggml_v1_dup_tensor(struct ggml_v1_context * ctx, const struct ggml_v1_tensor * src) { + return ggml_v1_new_tensor_impl(ctx, src->type, src->n_dims, src->ne, NULL); } -struct ggml_tensor * ggml_set_zero(struct ggml_tensor * tensor) { - memset(tensor->data, 0, ggml_nbytes(tensor)); +struct ggml_v1_tensor * ggml_v1_set_zero(struct ggml_v1_tensor * tensor) { + memset(tensor->data, 0, ggml_v1_nbytes(tensor)); return tensor; } -struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value) { - const int n = ggml_nrows(tensor); +struct ggml_v1_tensor * ggml_v1_set_i32 (struct ggml_v1_tensor * tensor, int32_t value) { + const int n = ggml_v1_nrows(tensor); const int nc = tensor->ne[0]; const size_t n1 = tensor->nb[1]; char * const data = tensor->data; switch (tensor->type) { - case GGML_TYPE_I8: + case GGML_V1_TYPE_I8: { assert(tensor->nb[0] == sizeof(int8_t)); for (int i = 0; i < n; i++) { - ggml_vec_set_i8(nc, (int8_t *)(data + i*n1), value); + ggml_v1_vec_set_i8(nc, (int8_t *)(data + i*n1), value); } } break; - case GGML_TYPE_I16: + case GGML_V1_TYPE_I16: { assert(tensor->nb[0] == sizeof(int16_t)); for (int i = 0; i < n; i++) { - ggml_vec_set_i16(nc, (int16_t *)(data + i*n1), value); + ggml_v1_vec_set_i16(nc, (int16_t *)(data + i*n1), value); } } break; - case GGML_TYPE_I32: + case GGML_V1_TYPE_I32: { assert(tensor->nb[0] == sizeof(int32_t)); for (int i = 0; i < n; i++) { - ggml_vec_set_i32(nc, (int32_t *)(data + i*n1), value); + ggml_v1_vec_set_i32(nc, (int32_t *)(data + i*n1), value); } } break; - case GGML_TYPE_F16: + case GGML_V1_TYPE_F16: { - assert(tensor->nb[0] == sizeof(ggml_fp16_t)); + assert(tensor->nb[0] == sizeof(ggml_v1_fp16_t)); for (int i = 0; i < n; i++) { - ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), value); + ggml_v1_vec_set_f16(nc, (ggml_v1_fp16_t *)(data + i*n1), value); } } break; - case GGML_TYPE_F32: + case GGML_V1_TYPE_F32: { assert(tensor->nb[0] == sizeof(float)); for (int i = 0; i < n; i++) { - ggml_vec_set_f32(nc, (float *)(data + i*n1), value); + ggml_v1_vec_set_f32(nc, (float *)(data + i*n1), value); } } break; - case GGML_TYPE_COUNT: + case GGML_V1_TYPE_COUNT: { assert(false); } break; @@ -1864,50 +1864,50 @@ struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value) { return tensor; } -struct ggml_tensor * ggml_set_f32(struct ggml_tensor * tensor, float value) { - const int n = ggml_nrows(tensor); +struct ggml_v1_tensor * ggml_v1_set_f32(struct ggml_v1_tensor * tensor, float value) { + const int n = ggml_v1_nrows(tensor); const int nc = tensor->ne[0]; const size_t n1 = tensor->nb[1]; char * const data = tensor->data; switch (tensor->type) { - case GGML_TYPE_I8: + case GGML_V1_TYPE_I8: { assert(tensor->nb[0] == sizeof(int8_t)); for (int i = 0; i < n; i++) { - ggml_vec_set_i8(nc, (int8_t *)(data + i*n1), value); + ggml_v1_vec_set_i8(nc, (int8_t *)(data + i*n1), value); } } break; - case GGML_TYPE_I16: + case GGML_V1_TYPE_I16: { assert(tensor->nb[0] == sizeof(int16_t)); for (int i = 0; i < n; i++) { - ggml_vec_set_i16(nc, (int16_t *)(data + i*n1), value); + ggml_v1_vec_set_i16(nc, (int16_t *)(data + i*n1), value); } } break; - case GGML_TYPE_I32: + case GGML_V1_TYPE_I32: { assert(tensor->nb[0] == sizeof(int32_t)); for (int i = 0; i < n; i++) { - ggml_vec_set_i32(nc, (int32_t *)(data + i*n1), value); + ggml_v1_vec_set_i32(nc, (int32_t *)(data + i*n1), value); } } break; - case GGML_TYPE_F16: + case GGML_V1_TYPE_F16: { - assert(tensor->nb[0] == sizeof(ggml_fp16_t)); + assert(tensor->nb[0] == sizeof(ggml_v1_fp16_t)); for (int i = 0; i < n; i++) { - ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), value); + ggml_v1_vec_set_f16(nc, (ggml_v1_fp16_t *)(data + i*n1), value); } } break; - case GGML_TYPE_F32: + case GGML_V1_TYPE_F32: { assert(tensor->nb[0] == sizeof(float)); for (int i = 0; i < n; i++) { - ggml_vec_set_f32(nc, (float *)(data + i*n1), value); + ggml_v1_vec_set_f32(nc, (float *)(data + i*n1), value); } } break; - case GGML_TYPE_COUNT: + case GGML_V1_TYPE_COUNT: { assert(false); } break; @@ -1916,168 +1916,168 @@ struct ggml_tensor * ggml_set_f32(struct ggml_tensor * tensor, float value) { return tensor; } -int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i) { +int32_t ggml_v1_get_i32_1d(const struct ggml_v1_tensor * tensor, int i) { switch (tensor->type) { - case GGML_TYPE_I8: + case GGML_V1_TYPE_I8: { - GGML_ASSERT(tensor->nb[0] == sizeof(int8_t)); + GGML_V1_ASSERT(tensor->nb[0] == sizeof(int8_t)); return ((int8_t *)(tensor->data))[i]; } break; - case GGML_TYPE_I16: + case GGML_V1_TYPE_I16: { - GGML_ASSERT(tensor->nb[0] == sizeof(int16_t)); + GGML_V1_ASSERT(tensor->nb[0] == sizeof(int16_t)); return ((int16_t *)(tensor->data))[i]; } break; - case GGML_TYPE_I32: + case GGML_V1_TYPE_I32: { - GGML_ASSERT(tensor->nb[0] == sizeof(int32_t)); + GGML_V1_ASSERT(tensor->nb[0] == sizeof(int32_t)); return ((int32_t *)(tensor->data))[i]; } break; - case GGML_TYPE_F16: + case GGML_V1_TYPE_F16: { - GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t)); - return GGML_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]); + GGML_V1_ASSERT(tensor->nb[0] == sizeof(ggml_v1_fp16_t)); + return GGML_V1_FP16_TO_FP32(((ggml_v1_fp16_t *)(tensor->data))[i]); } break; - case GGML_TYPE_F32: + case GGML_V1_TYPE_F32: { - GGML_ASSERT(tensor->nb[0] == sizeof(float)); + GGML_V1_ASSERT(tensor->nb[0] == sizeof(float)); return ((float *)(tensor->data))[i]; } break; - case GGML_TYPE_COUNT: + case GGML_V1_TYPE_COUNT: { - GGML_ASSERT(false); + GGML_V1_ASSERT(false); } break; } return 0.0f; } -void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value) { +void ggml_v1_set_i32_1d(const struct ggml_v1_tensor * tensor, int i, int32_t value) { switch (tensor->type) { - case GGML_TYPE_I8: + case GGML_V1_TYPE_I8: { - GGML_ASSERT(tensor->nb[0] == sizeof(int8_t)); + GGML_V1_ASSERT(tensor->nb[0] == sizeof(int8_t)); ((int8_t *)(tensor->data))[i] = value; } break; - case GGML_TYPE_I16: + case GGML_V1_TYPE_I16: { - GGML_ASSERT(tensor->nb[0] == sizeof(int16_t)); + GGML_V1_ASSERT(tensor->nb[0] == sizeof(int16_t)); ((int16_t *)(tensor->data))[i] = value; } break; - case GGML_TYPE_I32: + case GGML_V1_TYPE_I32: { - GGML_ASSERT(tensor->nb[0] == sizeof(int32_t)); + GGML_V1_ASSERT(tensor->nb[0] == sizeof(int32_t)); ((int32_t *)(tensor->data))[i] = value; } break; - case GGML_TYPE_F16: + case GGML_V1_TYPE_F16: { - GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t)); - ((ggml_fp16_t *)(tensor->data))[i] = GGML_FP32_TO_FP16(value); + GGML_V1_ASSERT(tensor->nb[0] == sizeof(ggml_v1_fp16_t)); + ((ggml_v1_fp16_t *)(tensor->data))[i] = GGML_V1_FP32_TO_FP16(value); } break; - case GGML_TYPE_F32: + case GGML_V1_TYPE_F32: { - GGML_ASSERT(tensor->nb[0] == sizeof(float)); + GGML_V1_ASSERT(tensor->nb[0] == sizeof(float)); ((float *)(tensor->data))[i] = value; } break; - case GGML_TYPE_COUNT: + case GGML_V1_TYPE_COUNT: { - GGML_ASSERT(false); + GGML_V1_ASSERT(false); } break; } } -float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i) { +float ggml_v1_get_f32_1d(const struct ggml_v1_tensor * tensor, int i) { switch (tensor->type) { - case GGML_TYPE_I8: + case GGML_V1_TYPE_I8: { - GGML_ASSERT(tensor->nb[0] == sizeof(int8_t)); + GGML_V1_ASSERT(tensor->nb[0] == sizeof(int8_t)); return ((int8_t *)(tensor->data))[i]; } break; - case GGML_TYPE_I16: + case GGML_V1_TYPE_I16: { - GGML_ASSERT(tensor->nb[0] == sizeof(int16_t)); + GGML_V1_ASSERT(tensor->nb[0] == sizeof(int16_t)); return ((int16_t *)(tensor->data))[i]; } break; - case GGML_TYPE_I32: + case GGML_V1_TYPE_I32: { - GGML_ASSERT(tensor->nb[0] == sizeof(int32_t)); + GGML_V1_ASSERT(tensor->nb[0] == sizeof(int32_t)); return ((int32_t *)(tensor->data))[i]; } break; - case GGML_TYPE_F16: + case GGML_V1_TYPE_F16: { - GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t)); - return GGML_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]); + GGML_V1_ASSERT(tensor->nb[0] == sizeof(ggml_v1_fp16_t)); + return GGML_V1_FP16_TO_FP32(((ggml_v1_fp16_t *)(tensor->data))[i]); } break; - case GGML_TYPE_F32: + case GGML_V1_TYPE_F32: { - GGML_ASSERT(tensor->nb[0] == sizeof(float)); + GGML_V1_ASSERT(tensor->nb[0] == sizeof(float)); return ((float *)(tensor->data))[i]; } break; - case GGML_TYPE_COUNT: + case GGML_V1_TYPE_COUNT: { - GGML_ASSERT(false); + GGML_V1_ASSERT(false); } break; } return 0.0f; } -void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value) { +void ggml_v1_set_f32_1d(const struct ggml_v1_tensor * tensor, int i, float value) { switch (tensor->type) { - case GGML_TYPE_I8: + case GGML_V1_TYPE_I8: { - GGML_ASSERT(tensor->nb[0] == sizeof(int8_t)); + GGML_V1_ASSERT(tensor->nb[0] == sizeof(int8_t)); ((int8_t *)(tensor->data))[i] = value; } break; - case GGML_TYPE_I16: + case GGML_V1_TYPE_I16: { - GGML_ASSERT(tensor->nb[0] == sizeof(int16_t)); + GGML_V1_ASSERT(tensor->nb[0] == sizeof(int16_t)); ((int16_t *)(tensor->data))[i] = value; } break; - case GGML_TYPE_I32: + case GGML_V1_TYPE_I32: { - GGML_ASSERT(tensor->nb[0] == sizeof(int32_t)); + GGML_V1_ASSERT(tensor->nb[0] == sizeof(int32_t)); ((int32_t *)(tensor->data))[i] = value; } break; - case GGML_TYPE_F16: + case GGML_V1_TYPE_F16: { - GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t)); - ((ggml_fp16_t *)(tensor->data))[i] = GGML_FP32_TO_FP16(value); + GGML_V1_ASSERT(tensor->nb[0] == sizeof(ggml_v1_fp16_t)); + ((ggml_v1_fp16_t *)(tensor->data))[i] = GGML_V1_FP32_TO_FP16(value); } break; - case GGML_TYPE_F32: + case GGML_V1_TYPE_F32: { - GGML_ASSERT(tensor->nb[0] == sizeof(float)); + GGML_V1_ASSERT(tensor->nb[0] == sizeof(float)); ((float *)(tensor->data))[i] = value; } break; - case GGML_TYPE_COUNT: + case GGML_V1_TYPE_COUNT: { - GGML_ASSERT(false); + GGML_V1_ASSERT(false); } break; } } -void * ggml_get_data(const struct ggml_tensor * tensor) { +void * ggml_v1_get_data(const struct ggml_v1_tensor * tensor) { return tensor->data; } -float * ggml_get_data_f32(const struct ggml_tensor * tensor) { - assert(tensor->type == GGML_TYPE_F32); +float * ggml_v1_get_data_f32(const struct ggml_v1_tensor * tensor) { + assert(tensor->type == GGML_V1_TYPE_F32); return (float *)(tensor->data); } -struct ggml_tensor * ggml_view_tensor( - struct ggml_context * ctx, - const struct ggml_tensor * src) { - return ggml_new_tensor_impl(ctx, src->type, src->n_dims, src->ne, src->data); +struct ggml_v1_tensor * ggml_v1_view_tensor( + struct ggml_v1_context * ctx, + const struct ggml_v1_tensor * src) { + return ggml_v1_new_tensor_impl(ctx, src->type, src->n_dims, src->ne, src->data); } //////////////////////////////////////////////////////////////////////////////// -// ggml_dup +// ggml_v1_dup -struct ggml_tensor * ggml_dup_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct ggml_v1_tensor * ggml_v1_dup_impl( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, bool inplace) { bool is_node = false; @@ -2085,36 +2085,36 @@ struct ggml_tensor * ggml_dup_impl( is_node = true; } - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + struct ggml_v1_tensor * result = inplace ? ggml_v1_view_tensor(ctx, a) : ggml_v1_dup_tensor(ctx, a); - result->op = GGML_OP_DUP; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = GGML_V1_OP_DUP; + result->grad = is_node ? ggml_v1_dup_tensor(ctx, result) : NULL; result->src0 = a; result->src1 = NULL; return result; } -struct ggml_tensor * ggml_dup( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_dup_impl(ctx, a, false); +struct ggml_v1_tensor * ggml_v1_dup( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a) { + return ggml_v1_dup_impl(ctx, a, false); } -struct ggml_tensor * ggml_dup_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_dup_impl(ctx, a, true); +struct ggml_v1_tensor * ggml_v1_dup_inplace( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a) { + return ggml_v1_dup_impl(ctx, a, true); } -// ggml_add +// ggml_v1_add -struct ggml_tensor * ggml_add_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, +struct ggml_v1_tensor * ggml_v1_add_impl( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, + struct ggml_v1_tensor * b, bool inplace) { - assert(ggml_are_same_shape(a, b)); + assert(ggml_v1_are_same_shape(a, b)); bool is_node = false; @@ -2122,38 +2122,38 @@ struct ggml_tensor * ggml_add_impl( is_node = true; } - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + struct ggml_v1_tensor * result = inplace ? ggml_v1_view_tensor(ctx, a) : ggml_v1_dup_tensor(ctx, a); - result->op = GGML_OP_ADD; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = GGML_V1_OP_ADD; + result->grad = is_node ? ggml_v1_dup_tensor(ctx, result) : NULL; result->src0 = a; result->src1 = b; return result; } -struct ggml_tensor * ggml_add( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - return ggml_add_impl(ctx, a, b, false); +struct ggml_v1_tensor * ggml_v1_add( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, + struct ggml_v1_tensor * b) { + return ggml_v1_add_impl(ctx, a, b, false); } -struct ggml_tensor * ggml_add_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - return ggml_add_impl(ctx, a, b, true); +struct ggml_v1_tensor * ggml_v1_add_inplace( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, + struct ggml_v1_tensor * b) { + return ggml_v1_add_impl(ctx, a, b, true); } -// ggml_sub +// ggml_v1_sub -struct ggml_tensor * ggml_sub_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, +struct ggml_v1_tensor * ggml_v1_sub_impl( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, + struct ggml_v1_tensor * b, bool inplace) { - assert(ggml_are_same_shape(a, b)); + assert(ggml_v1_are_same_shape(a, b)); bool is_node = false; @@ -2161,38 +2161,38 @@ struct ggml_tensor * ggml_sub_impl( is_node = true; } - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + struct ggml_v1_tensor * result = inplace ? ggml_v1_view_tensor(ctx, a) : ggml_v1_dup_tensor(ctx, a); - result->op = GGML_OP_SUB; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = GGML_V1_OP_SUB; + result->grad = is_node ? ggml_v1_dup_tensor(ctx, result) : NULL; result->src0 = a; result->src1 = b; return result; } -struct ggml_tensor * ggml_sub( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - return ggml_sub_impl(ctx, a, b, false); +struct ggml_v1_tensor * ggml_v1_sub( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, + struct ggml_v1_tensor * b) { + return ggml_v1_sub_impl(ctx, a, b, false); } -struct ggml_tensor * ggml_sub_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - return ggml_sub_impl(ctx, a, b, true); +struct ggml_v1_tensor * ggml_v1_sub_inplace( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, + struct ggml_v1_tensor * b) { + return ggml_v1_sub_impl(ctx, a, b, true); } -// ggml_mul +// ggml_v1_mul -struct ggml_tensor * ggml_mul_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, +struct ggml_v1_tensor * ggml_v1_mul_impl( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, + struct ggml_v1_tensor * b, bool inplace) { - assert(ggml_are_same_shape(a, b)); + assert(ggml_v1_are_same_shape(a, b)); bool is_node = false; @@ -2204,38 +2204,38 @@ struct ggml_tensor * ggml_mul_impl( assert(is_node == false); } - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + struct ggml_v1_tensor * result = inplace ? ggml_v1_view_tensor(ctx, a) : ggml_v1_dup_tensor(ctx, a); - result->op = GGML_OP_MUL; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = GGML_V1_OP_MUL; + result->grad = is_node ? ggml_v1_dup_tensor(ctx, result) : NULL; result->src0 = a; result->src1 = b; return result; } -struct ggml_tensor * ggml_mul( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - return ggml_mul_impl(ctx, a, b, false); +struct ggml_v1_tensor * ggml_v1_mul( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, + struct ggml_v1_tensor * b) { + return ggml_v1_mul_impl(ctx, a, b, false); } -struct ggml_tensor * ggml_mul_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - return ggml_mul_impl(ctx, a, b, true); +struct ggml_v1_tensor * ggml_v1_mul_inplace( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, + struct ggml_v1_tensor * b) { + return ggml_v1_mul_impl(ctx, a, b, true); } -// ggml_div +// ggml_v1_div -struct ggml_tensor * ggml_div_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, +struct ggml_v1_tensor * ggml_v1_div_impl( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, + struct ggml_v1_tensor * b, bool inplace) { - assert(ggml_are_same_shape(a, b)); + assert(ggml_v1_are_same_shape(a, b)); bool is_node = false; @@ -2247,35 +2247,35 @@ struct ggml_tensor * ggml_div_impl( assert(is_node == false); } - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + struct ggml_v1_tensor * result = inplace ? ggml_v1_view_tensor(ctx, a) : ggml_v1_dup_tensor(ctx, a); - result->op = GGML_OP_DIV; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = GGML_V1_OP_DIV; + result->grad = is_node ? ggml_v1_dup_tensor(ctx, result) : NULL; result->src0 = a; result->src1 = b; return result; } -struct ggml_tensor * ggml_div( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - return ggml_div_impl(ctx, a, b, false); +struct ggml_v1_tensor * ggml_v1_div( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, + struct ggml_v1_tensor * b) { + return ggml_v1_div_impl(ctx, a, b, false); } -struct ggml_tensor * ggml_div_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - return ggml_div_impl(ctx, a, b, true); +struct ggml_v1_tensor * ggml_v1_div_inplace( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, + struct ggml_v1_tensor * b) { + return ggml_v1_div_impl(ctx, a, b, true); } -// ggml_sqr +// ggml_v1_sqr -struct ggml_tensor * ggml_sqr_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct ggml_v1_tensor * ggml_v1_sqr_impl( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, bool inplace) { bool is_node = false; @@ -2283,33 +2283,33 @@ struct ggml_tensor * ggml_sqr_impl( is_node = true; } - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + struct ggml_v1_tensor * result = inplace ? ggml_v1_view_tensor(ctx, a) : ggml_v1_dup_tensor(ctx, a); - result->op = GGML_OP_SQR; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = GGML_V1_OP_SQR; + result->grad = is_node ? ggml_v1_dup_tensor(ctx, result) : NULL; result->src0 = a; result->src1 = NULL; return result; } -struct ggml_tensor * ggml_sqr( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_sqr_impl(ctx, a, false); +struct ggml_v1_tensor * ggml_v1_sqr( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a) { + return ggml_v1_sqr_impl(ctx, a, false); } -struct ggml_tensor * ggml_sqr_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_sqr_impl(ctx, a, true); +struct ggml_v1_tensor * ggml_v1_sqr_inplace( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a) { + return ggml_v1_sqr_impl(ctx, a, true); } -// ggml_sqrt +// ggml_v1_sqrt -struct ggml_tensor * ggml_sqrt_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct ggml_v1_tensor * ggml_v1_sqrt_impl( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, bool inplace) { bool is_node = false; @@ -2317,54 +2317,54 @@ struct ggml_tensor * ggml_sqrt_impl( is_node = true; } - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + struct ggml_v1_tensor * result = inplace ? ggml_v1_view_tensor(ctx, a) : ggml_v1_dup_tensor(ctx, a); - result->op = GGML_OP_SQRT; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = GGML_V1_OP_SQRT; + result->grad = is_node ? ggml_v1_dup_tensor(ctx, result) : NULL; result->src0 = a; result->src1 = NULL; return result; } -struct ggml_tensor * ggml_sqrt( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_sqrt_impl(ctx, a, false); +struct ggml_v1_tensor * ggml_v1_sqrt( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a) { + return ggml_v1_sqrt_impl(ctx, a, false); } -struct ggml_tensor * ggml_sqrt_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_sqrt_impl(ctx, a, true); +struct ggml_v1_tensor * ggml_v1_sqrt_inplace( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a) { + return ggml_v1_sqrt_impl(ctx, a, true); } -// ggml_sum +// ggml_v1_sum -struct ggml_tensor * ggml_sum( - struct ggml_context * ctx, - struct ggml_tensor * a) { +struct ggml_v1_tensor * ggml_v1_sum( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a) { bool is_node = false; if (a->grad) { is_node = true; } - struct ggml_tensor * result = ggml_new_tensor_1d(ctx, a->type, 1); + struct ggml_v1_tensor * result = ggml_v1_new_tensor_1d(ctx, a->type, 1); - result->op = GGML_OP_SUM; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = GGML_V1_OP_SUM; + result->grad = is_node ? ggml_v1_dup_tensor(ctx, result) : NULL; result->src0 = a; result->src1 = NULL; return result; } -// ggml_mean +// ggml_v1_mean -struct ggml_tensor * ggml_mean( - struct ggml_context * ctx, - struct ggml_tensor * a) { +struct ggml_v1_tensor * ggml_v1_mean( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a) { bool is_node = false; if (a->grad) { @@ -2372,24 +2372,24 @@ struct ggml_tensor * ggml_mean( is_node = true; } - int ne[GGML_MAX_DIMS] = { 1, a->ne[1], a->ne[2], a->ne[3] }; - struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, a->n_dims, ne); + int ne[GGML_V1_MAX_DIMS] = { 1, a->ne[1], a->ne[2], a->ne[3] }; + struct ggml_v1_tensor * result = ggml_v1_new_tensor(ctx, GGML_V1_TYPE_F32, a->n_dims, ne); - result->op = GGML_OP_MEAN; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = GGML_V1_OP_MEAN; + result->grad = is_node ? ggml_v1_dup_tensor(ctx, result) : NULL; result->src0 = a; result->src1 = NULL; return result; } -// ggml_repeat +// ggml_v1_repeat -struct ggml_tensor * ggml_repeat( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - assert(ggml_can_repeat(a, b)); +struct ggml_v1_tensor * ggml_v1_repeat( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, + struct ggml_v1_tensor * b) { + assert(ggml_v1_can_repeat(a, b)); bool is_node = false; @@ -2397,25 +2397,25 @@ struct ggml_tensor * ggml_repeat( is_node = true; } - if (ggml_are_same_shape(a, b) && !is_node) { + if (ggml_v1_are_same_shape(a, b) && !is_node) { return a; } - struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, b->n_dims, b->ne); + struct ggml_v1_tensor * result = ggml_v1_new_tensor(ctx, a->type, b->n_dims, b->ne); - result->op = GGML_OP_REPEAT; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = GGML_V1_OP_REPEAT; + result->grad = is_node ? ggml_v1_dup_tensor(ctx, result) : NULL; result->src0 = a; result->src1 = b; return result; } -// ggml_abs +// ggml_v1_abs -struct ggml_tensor * ggml_abs_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct ggml_v1_tensor * ggml_v1_abs_impl( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, bool inplace) { bool is_node = false; @@ -2423,34 +2423,34 @@ struct ggml_tensor * ggml_abs_impl( is_node = true; } - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + struct ggml_v1_tensor * result = inplace ? ggml_v1_view_tensor(ctx, a) : ggml_v1_dup_tensor(ctx, a); - result->op = GGML_OP_ABS; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = GGML_V1_OP_ABS; + result->grad = is_node ? ggml_v1_dup_tensor(ctx, result) : NULL; result->src0 = a; result->src1 = NULL; return result; } -struct ggml_tensor * ggml_abs( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_abs_impl(ctx, a, false); +struct ggml_v1_tensor * ggml_v1_abs( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a) { + return ggml_v1_abs_impl(ctx, a, false); } -struct ggml_tensor * ggml_abs_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_abs_impl(ctx, a, true); +struct ggml_v1_tensor * ggml_v1_abs_inplace( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a) { + return ggml_v1_abs_impl(ctx, a, true); } -// ggml_sgn +// ggml_v1_sgn -struct ggml_tensor * ggml_sgn_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct ggml_v1_tensor * ggml_v1_sgn_impl( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, bool inplace) { bool is_node = false; @@ -2458,33 +2458,33 @@ struct ggml_tensor * ggml_sgn_impl( is_node = true; } - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + struct ggml_v1_tensor * result = inplace ? ggml_v1_view_tensor(ctx, a) : ggml_v1_dup_tensor(ctx, a); - result->op = GGML_OP_SGN; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = GGML_V1_OP_SGN; + result->grad = is_node ? ggml_v1_dup_tensor(ctx, result) : NULL; result->src0 = a; result->src1 = NULL; return result; } -struct ggml_tensor * ggml_sgn( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_sgn_impl(ctx, a, false); +struct ggml_v1_tensor * ggml_v1_sgn( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a) { + return ggml_v1_sgn_impl(ctx, a, false); } -struct ggml_tensor * ggml_sgn_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_sgn_impl(ctx, a, true); +struct ggml_v1_tensor * ggml_v1_sgn_inplace( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a) { + return ggml_v1_sgn_impl(ctx, a, true); } -// ggml_neg +// ggml_v1_neg -struct ggml_tensor * ggml_neg_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct ggml_v1_tensor * ggml_v1_neg_impl( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, bool inplace) { bool is_node = false; @@ -2492,33 +2492,33 @@ struct ggml_tensor * ggml_neg_impl( is_node = true; } - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + struct ggml_v1_tensor * result = inplace ? ggml_v1_view_tensor(ctx, a) : ggml_v1_dup_tensor(ctx, a); - result->op = GGML_OP_NEG; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = GGML_V1_OP_NEG; + result->grad = is_node ? ggml_v1_dup_tensor(ctx, result) : NULL; result->src0 = a; result->src1 = NULL; return result; } -struct ggml_tensor * ggml_neg( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_neg_impl(ctx, a, false); +struct ggml_v1_tensor * ggml_v1_neg( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a) { + return ggml_v1_neg_impl(ctx, a, false); } -struct ggml_tensor * ggml_neg_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_neg_impl(ctx, a, true); +struct ggml_v1_tensor * ggml_v1_neg_inplace( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a) { + return ggml_v1_neg_impl(ctx, a, true); } -// ggml_step +// ggml_v1_step -struct ggml_tensor * ggml_step_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct ggml_v1_tensor * ggml_v1_step_impl( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, bool inplace) { bool is_node = false; @@ -2526,33 +2526,33 @@ struct ggml_tensor * ggml_step_impl( is_node = true; } - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + struct ggml_v1_tensor * result = inplace ? ggml_v1_view_tensor(ctx, a) : ggml_v1_dup_tensor(ctx, a); - result->op = GGML_OP_STEP; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = GGML_V1_OP_STEP; + result->grad = is_node ? ggml_v1_dup_tensor(ctx, result) : NULL; result->src0 = a; result->src1 = NULL; return result; } -struct ggml_tensor * ggml_step( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_step_impl(ctx, a, false); +struct ggml_v1_tensor * ggml_v1_step( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a) { + return ggml_v1_step_impl(ctx, a, false); } -struct ggml_tensor * ggml_step_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_step_impl(ctx, a, true); +struct ggml_v1_tensor * ggml_v1_step_inplace( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a) { + return ggml_v1_step_impl(ctx, a, true); } -// ggml_relu +// ggml_v1_relu -struct ggml_tensor * ggml_relu_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct ggml_v1_tensor * ggml_v1_relu_impl( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, bool inplace) { bool is_node = false; @@ -2560,33 +2560,33 @@ struct ggml_tensor * ggml_relu_impl( is_node = true; } - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + struct ggml_v1_tensor * result = inplace ? ggml_v1_view_tensor(ctx, a) : ggml_v1_dup_tensor(ctx, a); - result->op = GGML_OP_RELU; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = GGML_V1_OP_RELU; + result->grad = is_node ? ggml_v1_dup_tensor(ctx, result) : NULL; result->src0 = a; result->src1 = NULL; return result; } -struct ggml_tensor * ggml_relu( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_relu_impl(ctx, a, false); +struct ggml_v1_tensor * ggml_v1_relu( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a) { + return ggml_v1_relu_impl(ctx, a, false); } -struct ggml_tensor * ggml_relu_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_relu_impl(ctx, a, true); +struct ggml_v1_tensor * ggml_v1_relu_inplace( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a) { + return ggml_v1_relu_impl(ctx, a, true); } -// ggml_gelu +// ggml_v1_gelu -struct ggml_tensor * ggml_gelu_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct ggml_v1_tensor * ggml_v1_gelu_impl( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, bool inplace) { bool is_node = false; @@ -2594,33 +2594,33 @@ struct ggml_tensor * ggml_gelu_impl( is_node = true; } - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + struct ggml_v1_tensor * result = inplace ? ggml_v1_view_tensor(ctx, a) : ggml_v1_dup_tensor(ctx, a); - result->op = GGML_OP_GELU; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = GGML_V1_OP_GELU; + result->grad = is_node ? ggml_v1_dup_tensor(ctx, result) : NULL; result->src0 = a; result->src1 = NULL; return result; } -struct ggml_tensor * ggml_gelu( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_gelu_impl(ctx, a, false); +struct ggml_v1_tensor * ggml_v1_gelu( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a) { + return ggml_v1_gelu_impl(ctx, a, false); } -struct ggml_tensor * ggml_gelu_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_gelu_impl(ctx, a, true); +struct ggml_v1_tensor * ggml_v1_gelu_inplace( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a) { + return ggml_v1_gelu_impl(ctx, a, true); } -// ggml_norm +// ggml_v1_norm -struct ggml_tensor * ggml_norm_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct ggml_v1_tensor * ggml_v1_norm_impl( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, bool inplace) { bool is_node = false; @@ -2629,35 +2629,35 @@ struct ggml_tensor * ggml_norm_impl( is_node = true; } - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + struct ggml_v1_tensor * result = inplace ? ggml_v1_view_tensor(ctx, a) : ggml_v1_dup_tensor(ctx, a); - result->op = GGML_OP_NORM; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = GGML_V1_OP_NORM; + result->grad = is_node ? ggml_v1_dup_tensor(ctx, result) : NULL; result->src0 = a; result->src1 = NULL; // TODO: maybe store epsilon here? return result; } -struct ggml_tensor * ggml_norm( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_norm_impl(ctx, a, false); +struct ggml_v1_tensor * ggml_v1_norm( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a) { + return ggml_v1_norm_impl(ctx, a, false); } -struct ggml_tensor * ggml_norm_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_norm_impl(ctx, a, true); +struct ggml_v1_tensor * ggml_v1_norm_inplace( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a) { + return ggml_v1_norm_impl(ctx, a, true); } -// ggml_mul_mat +// ggml_v1_mul_mat -struct ggml_tensor * ggml_mul_mat( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - assert(ggml_can_mul_mat(a, b)); +struct ggml_v1_tensor * ggml_v1_mul_mat( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, + struct ggml_v1_tensor * b) { + assert(ggml_v1_can_mul_mat(a, b)); bool is_node = false; @@ -2666,25 +2666,25 @@ struct ggml_tensor * ggml_mul_mat( } const int ne[4] = { a->ne[1], b->ne[1], a->ne[2], b->ne[3] }; - struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, MIN(a->n_dims, b->n_dims), ne); + struct ggml_v1_tensor * result = ggml_v1_new_tensor(ctx, GGML_V1_TYPE_F32, MIN(a->n_dims, b->n_dims), ne); - result->op = GGML_OP_MUL_MAT; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = GGML_V1_OP_MUL_MAT; + result->grad = is_node ? ggml_v1_dup_tensor(ctx, result) : NULL; result->src0 = a; result->src1 = b; return result; } -// ggml_scale +// ggml_v1_scale -struct ggml_tensor * ggml_scale_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, +struct ggml_v1_tensor * ggml_v1_scale_impl( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, + struct ggml_v1_tensor * b, bool inplace) { - assert(ggml_is_scalar(b)); - assert(ggml_is_padded_1d(a)); + assert(ggml_v1_is_scalar(b)); + assert(ggml_v1_is_padded_1d(a)); bool is_node = false; @@ -2694,39 +2694,39 @@ struct ggml_tensor * ggml_scale_impl( } // TODO: when implement backward, fix this: - //struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); - struct ggml_tensor * result = ggml_view_tensor(ctx, a); + //struct ggml_v1_tensor * result = inplace ? ggml_v1_view_tensor(ctx, a) : ggml_v1_dup_tensor(ctx, a); + struct ggml_v1_tensor * result = ggml_v1_view_tensor(ctx, a); - result->op = GGML_OP_SCALE; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = GGML_V1_OP_SCALE; + result->grad = is_node ? ggml_v1_dup_tensor(ctx, result) : NULL; result->src0 = a; result->src1 = b; return result; } -struct ggml_tensor * ggml_scale( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - return ggml_scale_impl(ctx, a, b, false); +struct ggml_v1_tensor * ggml_v1_scale( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, + struct ggml_v1_tensor * b) { + return ggml_v1_scale_impl(ctx, a, b, false); } -struct ggml_tensor * ggml_scale_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - return ggml_scale_impl(ctx, a, b, true); +struct ggml_v1_tensor * ggml_v1_scale_inplace( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, + struct ggml_v1_tensor * b) { + return ggml_v1_scale_impl(ctx, a, b, true); } -// ggml_cpy +// ggml_v1_cpy -struct ggml_tensor * ggml_cpy_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, +struct ggml_v1_tensor * ggml_v1_cpy_impl( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, + struct ggml_v1_tensor * b, bool inplace) { - assert(ggml_nelements(a) == ggml_nelements(b)); + assert(ggml_v1_nelements(a) == ggml_v1_nelements(b)); bool is_node = false; @@ -2736,39 +2736,39 @@ struct ggml_tensor * ggml_cpy_impl( } // make a view of the destination - struct ggml_tensor * result = ggml_view_tensor(ctx, b); + struct ggml_v1_tensor * result = ggml_v1_view_tensor(ctx, b); - result->op = GGML_OP_CPY; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = GGML_V1_OP_CPY; + result->grad = is_node ? ggml_v1_dup_tensor(ctx, result) : NULL; result->src0 = a; result->src1 = b; return result; } -struct ggml_tensor * ggml_cpy( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - return ggml_cpy_impl(ctx, a, b, false); +struct ggml_v1_tensor * ggml_v1_cpy( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, + struct ggml_v1_tensor * b) { + return ggml_v1_cpy_impl(ctx, a, b, false); } -struct ggml_tensor * ggml_cpy_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - return ggml_cpy_impl(ctx, a, b, true); +struct ggml_v1_tensor * ggml_v1_cpy_inplace( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, + struct ggml_v1_tensor * b) { + return ggml_v1_cpy_impl(ctx, a, b, true); } -// ggml_reshape +// ggml_v1_reshape -struct ggml_tensor * ggml_reshape( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - assert(ggml_is_contiguous(a)); - assert(ggml_is_contiguous(b)); - assert(ggml_nelements(a) == ggml_nelements(b)); +struct ggml_v1_tensor * ggml_v1_reshape( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, + struct ggml_v1_tensor * b) { + assert(ggml_v1_is_contiguous(a)); + assert(ggml_v1_is_contiguous(b)); + assert(ggml_v1_nelements(a) == ggml_v1_nelements(b)); bool is_node = false; @@ -2777,23 +2777,23 @@ struct ggml_tensor * ggml_reshape( is_node = true; } - struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, b->n_dims, b->ne, a->data); + struct ggml_v1_tensor * result = ggml_v1_new_tensor_impl(ctx, a->type, b->n_dims, b->ne, a->data); - result->op = GGML_OP_RESHAPE; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = GGML_V1_OP_RESHAPE; + result->grad = is_node ? ggml_v1_dup_tensor(ctx, result) : NULL; result->src0 = a; result->src1 = NULL; return result; } -struct ggml_tensor * ggml_reshape_2d( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct ggml_v1_tensor * ggml_v1_reshape_2d( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, int ne0, int ne1) { - assert(ggml_is_contiguous(a)); - assert(ggml_nelements(a) == ne0*ne1); + assert(ggml_v1_is_contiguous(a)); + assert(ggml_v1_nelements(a) == ne0*ne1); bool is_node = false; @@ -2803,24 +2803,24 @@ struct ggml_tensor * ggml_reshape_2d( } const int ne[2] = { ne0, ne1 }; - struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 2, ne, a->data); + struct ggml_v1_tensor * result = ggml_v1_new_tensor_impl(ctx, a->type, 2, ne, a->data); - result->op = GGML_OP_RESHAPE; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = GGML_V1_OP_RESHAPE; + result->grad = is_node ? ggml_v1_dup_tensor(ctx, result) : NULL; result->src0 = a; result->src1 = NULL; return result; } -struct ggml_tensor * ggml_reshape_3d( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct ggml_v1_tensor * ggml_v1_reshape_3d( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, int ne0, int ne1, int ne2) { - assert(ggml_is_contiguous(a)); - assert(ggml_nelements(a) == ne0*ne1*ne2); + assert(ggml_v1_is_contiguous(a)); + assert(ggml_v1_nelements(a) == ne0*ne1*ne2); bool is_node = false; @@ -2830,30 +2830,30 @@ struct ggml_tensor * ggml_reshape_3d( } const int ne[3] = { ne0, ne1, ne2 }; - struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 3, ne, a->data); + struct ggml_v1_tensor * result = ggml_v1_new_tensor_impl(ctx, a->type, 3, ne, a->data); - result->op = GGML_OP_RESHAPE; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = GGML_V1_OP_RESHAPE; + result->grad = is_node ? ggml_v1_dup_tensor(ctx, result) : NULL; result->src0 = a; result->src1 = NULL; return result; } -// ggml_view_1d +// ggml_v1_view_1d -struct ggml_tensor * ggml_view_1d( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct ggml_v1_tensor * ggml_v1_view_1d( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, int ne0, size_t offset) { if (a->grad) { assert(false); // gradient propagation is not supported } - struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 1, &ne0, (char *) a->data + offset); + struct ggml_v1_tensor * result = ggml_v1_new_tensor_impl(ctx, a->type, 1, &ne0, (char *) a->data + offset); - result->op = GGML_OP_VIEW; + result->op = GGML_V1_OP_VIEW; result->grad = NULL; result->src0 = a; result->src1 = NULL; // TODO: maybe store the offset here? @@ -2861,11 +2861,11 @@ struct ggml_tensor * ggml_view_1d( return result; } -// ggml_view_2d +// ggml_v1_view_2d -struct ggml_tensor * ggml_view_2d( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct ggml_v1_tensor * ggml_v1_view_2d( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, int ne0, int ne1, size_t nb1, @@ -2874,15 +2874,15 @@ struct ggml_tensor * ggml_view_2d( assert(false); // gradient propagation is not supported } - const int ne[GGML_MAX_DIMS] = { ne0, ne1, 1, 1 }; + const int ne[GGML_V1_MAX_DIMS] = { ne0, ne1, 1, 1 }; - struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 2, ne, (char *) a->data + offset); + struct ggml_v1_tensor * result = ggml_v1_new_tensor_impl(ctx, a->type, 2, ne, (char *) a->data + offset); result->nb[1] = nb1; result->nb[2] = result->nb[1]*ne1; result->nb[3] = result->nb[2]; - result->op = GGML_OP_VIEW; + result->op = GGML_V1_OP_VIEW; result->grad = NULL; result->src0 = a; result->src1 = NULL; // TODO: maybe store the offset here? @@ -2890,19 +2890,19 @@ struct ggml_tensor * ggml_view_2d( return result; } -// ggml_permute +// ggml_v1_permute -struct ggml_tensor * ggml_permute( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct ggml_v1_tensor * ggml_v1_permute( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, int axis0, int axis1, int axis2, int axis3) { - assert(axis0 >= 0 && axis0 < GGML_MAX_DIMS); - assert(axis1 >= 0 && axis1 < GGML_MAX_DIMS); - assert(axis2 >= 0 && axis2 < GGML_MAX_DIMS); - assert(axis3 >= 0 && axis3 < GGML_MAX_DIMS); + assert(axis0 >= 0 && axis0 < GGML_V1_MAX_DIMS); + assert(axis1 >= 0 && axis1 < GGML_V1_MAX_DIMS); + assert(axis2 >= 0 && axis2 < GGML_V1_MAX_DIMS); + assert(axis3 >= 0 && axis3 < GGML_V1_MAX_DIMS); assert(axis0 != axis1); assert(axis0 != axis2); @@ -2918,10 +2918,10 @@ struct ggml_tensor * ggml_permute( is_node = true; } - struct ggml_tensor * result = ggml_view_tensor(ctx, a); + struct ggml_v1_tensor * result = ggml_v1_view_tensor(ctx, a); - int ne[GGML_MAX_DIMS]; - int nb[GGML_MAX_DIMS]; + int ne[GGML_V1_MAX_DIMS]; + int nb[GGML_V1_MAX_DIMS]; ne[axis0] = a->ne[0]; ne[axis1] = a->ne[1]; @@ -2943,19 +2943,19 @@ struct ggml_tensor * ggml_permute( result->nb[2] = nb[2]; result->nb[3] = nb[3]; - result->op = GGML_OP_PERMUTE; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = GGML_V1_OP_PERMUTE; + result->grad = is_node ? ggml_v1_dup_tensor(ctx, result) : NULL; result->src0 = a; result->src1 = NULL; // TODO: maybe store the permutation here? return result; } -// ggml_transpose +// ggml_v1_transpose -struct ggml_tensor * ggml_transpose( - struct ggml_context * ctx, - struct ggml_tensor * a) { +struct ggml_v1_tensor * ggml_v1_transpose( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a) { bool is_node = false; if (a->grad) { @@ -2963,7 +2963,7 @@ struct ggml_tensor * ggml_transpose( is_node = true; } - struct ggml_tensor * result = ggml_view_tensor(ctx, a); + struct ggml_v1_tensor * result = ggml_v1_view_tensor(ctx, a); result->ne[0] = a->ne[1]; result->ne[1] = a->ne[0]; @@ -2971,21 +2971,21 @@ struct ggml_tensor * ggml_transpose( result->nb[0] = a->nb[1]; result->nb[1] = a->nb[0]; - result->op = GGML_OP_TRANSPOSE; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = GGML_V1_OP_TRANSPOSE; + result->grad = is_node ? ggml_v1_dup_tensor(ctx, result) : NULL; result->src0 = a; result->src1 = NULL; return result; } -// ggml_get_rows +// ggml_v1_get_rows -struct ggml_tensor * ggml_get_rows( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - assert(ggml_is_matrix(a) && ggml_is_vector(b) && b->type == GGML_TYPE_I32); +struct ggml_v1_tensor * ggml_v1_get_rows( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, + struct ggml_v1_tensor * b) { + assert(ggml_v1_is_matrix(a) && ggml_v1_is_vector(b) && b->type == GGML_V1_TYPE_I32); bool is_node = false; @@ -2995,22 +2995,22 @@ struct ggml_tensor * ggml_get_rows( } // TODO: implement non F32 return - //struct ggml_tensor * result = ggml_new_tensor_2d(ctx, a->type, a->ne[0], b->ne[0]); - struct ggml_tensor * result = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, a->ne[0], b->ne[0]); + //struct ggml_v1_tensor * result = ggml_v1_new_tensor_2d(ctx, a->type, a->ne[0], b->ne[0]); + struct ggml_v1_tensor * result = ggml_v1_new_tensor_2d(ctx, GGML_V1_TYPE_F32, a->ne[0], b->ne[0]); - result->op = GGML_OP_GET_ROWS; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = GGML_V1_OP_GET_ROWS; + result->grad = is_node ? ggml_v1_dup_tensor(ctx, result) : NULL; result->src0 = a; result->src1 = b; return result; } -// ggml_diag_mask_inf +// ggml_v1_diag_mask_inf -struct ggml_tensor * ggml_diag_mask_inf( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct ggml_v1_tensor * ggml_v1_diag_mask_inf( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, int n_past) { bool is_node = false; @@ -3020,23 +3020,23 @@ struct ggml_tensor * ggml_diag_mask_inf( } // TODO: when implement backward, fix this: - //struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); - struct ggml_tensor * result = ggml_view_tensor(ctx, a); - struct ggml_tensor * b = ggml_new_i32(ctx, n_past); + //struct ggml_v1_tensor * result = inplace ? ggml_v1_view_tensor(ctx, a) : ggml_v1_dup_tensor(ctx, a); + struct ggml_v1_tensor * result = ggml_v1_view_tensor(ctx, a); + struct ggml_v1_tensor * b = ggml_v1_new_i32(ctx, n_past); - result->op = GGML_OP_DIAG_MASK_INF; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = GGML_V1_OP_DIAG_MASK_INF; + result->grad = is_node ? ggml_v1_dup_tensor(ctx, result) : NULL; result->src0 = a; result->src1 = b; return result; } -// ggml_soft_max +// ggml_v1_soft_max -struct ggml_tensor * ggml_soft_max( - struct ggml_context * ctx, - struct ggml_tensor * a) { +struct ggml_v1_tensor * ggml_v1_soft_max( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a) { bool is_node = false; if (a->grad) { @@ -3045,22 +3045,22 @@ struct ggml_tensor * ggml_soft_max( } // TODO: when implement backward, fix this: - //struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); - struct ggml_tensor * result = ggml_view_tensor(ctx, a); + //struct ggml_v1_tensor * result = inplace ? ggml_v1_view_tensor(ctx, a) : ggml_v1_dup_tensor(ctx, a); + struct ggml_v1_tensor * result = ggml_v1_view_tensor(ctx, a); - result->op = GGML_OP_SOFT_MAX; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = GGML_V1_OP_SOFT_MAX; + result->grad = is_node ? ggml_v1_dup_tensor(ctx, result) : NULL; result->src0 = a; result->src1 = NULL; return result; } -// ggml_rope +// ggml_v1_rope -struct ggml_tensor * ggml_rope( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct ggml_v1_tensor * ggml_v1_rope( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, int n_past, int n_dims, int mode) { @@ -3073,29 +3073,29 @@ struct ggml_tensor * ggml_rope( } // TODO: when implement backward, fix this: - //struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); - struct ggml_tensor * result = ggml_view_tensor(ctx, a); + //struct ggml_v1_tensor * result = inplace ? ggml_v1_view_tensor(ctx, a) : ggml_v1_dup_tensor(ctx, a); + struct ggml_v1_tensor * result = ggml_v1_view_tensor(ctx, a); - struct ggml_tensor * b = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 3); + struct ggml_v1_tensor * b = ggml_v1_new_tensor_1d(ctx, GGML_V1_TYPE_I32, 3); ((int32_t *) b->data)[0] = n_past; ((int32_t *) b->data)[1] = n_dims; ((int32_t *) b->data)[2] = mode; - result->op = GGML_OP_ROPE; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = GGML_V1_OP_ROPE; + result->grad = is_node ? ggml_v1_dup_tensor(ctx, result) : NULL; result->src0 = a; result->src1 = b; return result; } -// ggml_conv_1d_1s +// ggml_v1_conv_1d_1s -struct ggml_tensor * ggml_conv_1d_1s( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - assert(ggml_is_matrix(b)); +struct ggml_v1_tensor * ggml_v1_conv_1d_1s( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, + struct ggml_v1_tensor * b) { + assert(ggml_v1_is_matrix(b)); assert(a->ne[1] == b->ne[1]); assert(a->ne[3] == 1); bool is_node = false; @@ -3106,23 +3106,23 @@ struct ggml_tensor * ggml_conv_1d_1s( } const int ne[4] = { b->ne[0], a->ne[2], 1, 1, }; - struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 2, ne); + struct ggml_v1_tensor * result = ggml_v1_new_tensor(ctx, GGML_V1_TYPE_F32, 2, ne); - result->op = GGML_OP_CONV_1D_1S; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = GGML_V1_OP_CONV_1D_1S; + result->grad = is_node ? ggml_v1_dup_tensor(ctx, result) : NULL; result->src0 = a; result->src1 = b; return result; } -// ggml_conv_1d_2s +// ggml_v1_conv_1d_2s -struct ggml_tensor * ggml_conv_1d_2s( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - assert(ggml_is_matrix(b)); +struct ggml_v1_tensor * ggml_v1_conv_1d_2s( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, + struct ggml_v1_tensor * b) { + assert(ggml_v1_is_matrix(b)); assert(a->ne[1] == b->ne[1]); assert(a->ne[3] == 1); bool is_node = false; @@ -3133,71 +3133,71 @@ struct ggml_tensor * ggml_conv_1d_2s( } const int ne[4] = { b->ne[0]/2, a->ne[2], 1, 1, }; - struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 2, ne); + struct ggml_v1_tensor * result = ggml_v1_new_tensor(ctx, GGML_V1_TYPE_F32, 2, ne); - result->op = GGML_OP_CONV_1D_2S; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = GGML_V1_OP_CONV_1D_2S; + result->grad = is_node ? ggml_v1_dup_tensor(ctx, result) : NULL; result->src0 = a; result->src1 = b; return result; } -// ggml_flash_attn +// ggml_v1_flash_attn -struct ggml_tensor * ggml_flash_attn( - struct ggml_context * ctx, - struct ggml_tensor * q, - struct ggml_tensor * k, - struct ggml_tensor * v, +struct ggml_v1_tensor * ggml_v1_flash_attn( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * q, + struct ggml_v1_tensor * k, + struct ggml_v1_tensor * v, bool masked) { - assert(ggml_can_mul_mat(k, q)); + assert(ggml_v1_can_mul_mat(k, q)); // TODO: check if vT can be multiplied by (k*qT) bool is_node = false; if (q->grad || k->grad || v->grad) { - GGML_ASSERT(false); // TODO: implement backward + GGML_V1_ASSERT(false); // TODO: implement backward is_node = true; } - //struct ggml_tensor * result = ggml_dup_tensor(ctx, q); - struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, q->ne); + //struct ggml_v1_tensor * result = ggml_v1_dup_tensor(ctx, q); + struct ggml_v1_tensor * result = ggml_v1_new_tensor(ctx, GGML_V1_TYPE_F32, 4, q->ne); - result->op = GGML_OP_FLASH_ATTN; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = GGML_V1_OP_FLASH_ATTN; + result->grad = is_node ? ggml_v1_dup_tensor(ctx, result) : NULL; result->src0 = q; result->src1 = k; result->opt[0] = v; - result->opt[1] = ggml_new_i32(ctx, masked ? 1 : 0); + result->opt[1] = ggml_v1_new_i32(ctx, masked ? 1 : 0); return result; } -// ggml_flash_ff +// ggml_v1_flash_ff -struct ggml_tensor * ggml_flash_ff( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b0, - struct ggml_tensor * b1, - struct ggml_tensor * c0, - struct ggml_tensor * c1) { - assert(ggml_can_mul_mat(b0, a)); +struct ggml_v1_tensor * ggml_v1_flash_ff( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, + struct ggml_v1_tensor * b0, + struct ggml_v1_tensor * b1, + struct ggml_v1_tensor * c0, + struct ggml_v1_tensor * c1) { + assert(ggml_v1_can_mul_mat(b0, a)); // TODO: more checks bool is_node = false; if (a->grad || b0->grad || b1->grad || c0->grad || c1->grad) { - GGML_ASSERT(false); // TODO: implement backward + GGML_V1_ASSERT(false); // TODO: implement backward is_node = true; } - //struct ggml_tensor * result = ggml_dup_tensor(ctx, a); - struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, a->ne); + //struct ggml_v1_tensor * result = ggml_v1_dup_tensor(ctx, a); + struct ggml_v1_tensor * result = ggml_v1_new_tensor(ctx, GGML_V1_TYPE_F32, 4, a->ne); - result->op = GGML_OP_FLASH_FF; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = GGML_V1_OP_FLASH_FF; + result->grad = is_node ? ggml_v1_dup_tensor(ctx, result) : NULL; result->src0 = a; result->src1 = b0; result->opt[0] = b1; @@ -3209,26 +3209,26 @@ struct ggml_tensor * ggml_flash_ff( //////////////////////////////////////////////////////////////////////////////// -void ggml_set_param( - struct ggml_context * ctx, - struct ggml_tensor * tensor) { +void ggml_v1_set_param( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * tensor) { tensor->is_param = true; assert(tensor->grad == NULL); - tensor->grad = ggml_dup_tensor(ctx, tensor); + tensor->grad = ggml_v1_dup_tensor(ctx, tensor); } -// ggml_compute_forward_dup +// ggml_v1_compute_forward_dup -static void ggml_compute_forward_dup_f16( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void ggml_v1_compute_forward_dup_f16( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + struct ggml_v1_tensor * dst) { assert(params->ith == 0); - assert(ggml_is_contiguous(dst)); - assert(ggml_nelements(dst) == ggml_nelements(src0)); + assert(ggml_v1_is_contiguous(dst)); + assert(ggml_v1_nelements(dst) == ggml_v1_nelements(src0)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == GGML_V1_TASK_INIT || params->type == GGML_V1_TASK_FINALIZE) { return; } @@ -3242,13 +3242,13 @@ static void ggml_compute_forward_dup_f16( const size_t nb02 = src0->nb[2]; const size_t nb03 = src0->nb[3]; - if (ggml_is_contiguous(src0) && src0->type == dst->type) { - memcpy(dst->data, src0->data, ggml_nelements(dst) * GGML_TYPE_SIZE[src0->type]); + if (ggml_v1_is_contiguous(src0) && src0->type == dst->type) { + memcpy(dst->data, src0->data, ggml_v1_nelements(dst) * GGML_V1_TYPE_SIZE[src0->type]); return; } - if (src0->nb[0] == sizeof(ggml_fp16_t)) { - if (dst->type == GGML_TYPE_F16) { + if (src0->nb[0] == sizeof(ggml_v1_fp16_t)) { + if (dst->type == GGML_V1_TYPE_F16) { int id = 0; const size_t rs = ne00*nb00; @@ -3264,7 +3264,7 @@ static void ggml_compute_forward_dup_f16( } } } - } else if (dst->type == GGML_TYPE_F32) { + } else if (dst->type == GGML_V1_TYPE_F32) { int id = 0; float * dst_ptr = (float *) dst->data; @@ -3272,21 +3272,21 @@ static void ggml_compute_forward_dup_f16( for (int i02 = 0; i02 < ne02; i02++) { for (int i01 = 0; i01 < ne01; i01++) { for (int i00 = 0; i00 < ne00; i00++) { - const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); + const ggml_v1_fp16_t * src0_ptr = (ggml_v1_fp16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); - dst_ptr[id] = GGML_FP16_TO_FP32(*src0_ptr); + dst_ptr[id] = GGML_V1_FP16_TO_FP32(*src0_ptr); id++; } } } } } else { - GGML_ASSERT(false); // TODO: implement + GGML_V1_ASSERT(false); // TODO: implement } } else { //printf("%s: this is not optimal - fix me\n", __func__); - if (dst->type == GGML_TYPE_F32) { + if (dst->type == GGML_V1_TYPE_F32) { int id = 0; float * dst_ptr = (float *) dst->data; @@ -3294,23 +3294,23 @@ static void ggml_compute_forward_dup_f16( for (int i02 = 0; i02 < ne02; i02++) { for (int i01 = 0; i01 < ne01; i01++) { for (int i00 = 0; i00 < ne00; i00++) { - const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); + const ggml_v1_fp16_t * src0_ptr = (ggml_v1_fp16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); - dst_ptr[id] = GGML_FP16_TO_FP32(*src0_ptr); + dst_ptr[id] = GGML_V1_FP16_TO_FP32(*src0_ptr); id++; } } } } - } else if (dst->type == GGML_TYPE_F16) { + } else if (dst->type == GGML_V1_TYPE_F16) { int id = 0; - ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data; + ggml_v1_fp16_t * dst_ptr = (ggml_v1_fp16_t *) dst->data; for (int i03 = 0; i03 < ne03; i03++) { for (int i02 = 0; i02 < ne02; i02++) { for (int i01 = 0; i01 < ne01; i01++) { for (int i00 = 0; i00 < ne00; i00++) { - const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); + const ggml_v1_fp16_t * src0_ptr = (ggml_v1_fp16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); dst_ptr[id] = *src0_ptr; id++; @@ -3319,20 +3319,20 @@ static void ggml_compute_forward_dup_f16( } } } else { - GGML_ASSERT(false); // TODO: implement + GGML_V1_ASSERT(false); // TODO: implement } } } -static void ggml_compute_forward_dup_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - GGML_ASSERT(params->ith == 0); - GGML_ASSERT(ggml_is_contiguous(dst)); - GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0)); +static void ggml_v1_compute_forward_dup_f32( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + struct ggml_v1_tensor * dst) { + GGML_V1_ASSERT(params->ith == 0); + GGML_V1_ASSERT(ggml_v1_is_contiguous(dst)); + GGML_V1_ASSERT(ggml_v1_nelements(dst) == ggml_v1_nelements(src0)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == GGML_V1_TASK_INIT || params->type == GGML_V1_TASK_FINALIZE) { return; } @@ -3346,13 +3346,13 @@ static void ggml_compute_forward_dup_f32( const size_t nb02 = src0->nb[2]; const size_t nb03 = src0->nb[3]; - if (ggml_is_contiguous(src0) && src0->type == dst->type) { - memcpy(dst->data, src0->data, ggml_nelements(dst) * GGML_TYPE_SIZE[src0->type]); + if (ggml_v1_is_contiguous(src0) && src0->type == dst->type) { + memcpy(dst->data, src0->data, ggml_v1_nelements(dst) * GGML_V1_TYPE_SIZE[src0->type]); return; } if (src0->nb[0] == sizeof(float)) { - if (dst->type == GGML_TYPE_F32) { + if (dst->type == GGML_V1_TYPE_F32) { int id = 0; const size_t rs = ne00*nb00; @@ -3368,9 +3368,9 @@ static void ggml_compute_forward_dup_f32( } } } - } else if (dst->type == GGML_TYPE_F16) { + } else if (dst->type == GGML_V1_TYPE_F16) { int id = 0; - ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data; + ggml_v1_fp16_t * dst_ptr = (ggml_v1_fp16_t *) dst->data; for (int i03 = 0; i03 < ne03; i03++) { for (int i02 = 0; i02 < ne02; i02++) { @@ -3378,19 +3378,19 @@ static void ggml_compute_forward_dup_f32( for (int i00 = 0; i00 < ne00; i00++) { const float * src0_ptr = (float *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); - dst_ptr[id] = GGML_FP32_TO_FP16(*src0_ptr); + dst_ptr[id] = GGML_V1_FP32_TO_FP16(*src0_ptr); id++; } } } } } else { - GGML_ASSERT(false); // TODO: implement + GGML_V1_ASSERT(false); // TODO: implement } } else { //printf("%s: this is not optimal - fix me\n", __func__); - if (dst->type == GGML_TYPE_F32) { + if (dst->type == GGML_V1_TYPE_F32) { int id = 0; float * dst_ptr = (float *) dst->data; @@ -3406,9 +3406,9 @@ static void ggml_compute_forward_dup_f32( } } } - } else if (dst->type == GGML_TYPE_F16) { + } else if (dst->type == GGML_V1_TYPE_F16) { int id = 0; - ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data; + ggml_v1_fp16_t * dst_ptr = (ggml_v1_fp16_t *) dst->data; for (int i03 = 0; i03 < ne03; i03++) { for (int i02 = 0; i02 < ne02; i02++) { @@ -3416,58 +3416,58 @@ static void ggml_compute_forward_dup_f32( for (int i00 = 0; i00 < ne00; i00++) { const float * src0_ptr = (float *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); - dst_ptr[id] = GGML_FP32_TO_FP16(*src0_ptr); + dst_ptr[id] = GGML_V1_FP32_TO_FP16(*src0_ptr); id++; } } } } } else { - GGML_ASSERT(false); // TODO: implement + GGML_V1_ASSERT(false); // TODO: implement } } } -static void ggml_compute_forward_dup( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void ggml_v1_compute_forward_dup( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + struct ggml_v1_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F16: + case GGML_V1_TYPE_F16: { - ggml_compute_forward_dup_f16(params, src0, dst); + ggml_v1_compute_forward_dup_f16(params, src0, dst); } break; - case GGML_TYPE_F32: + case GGML_V1_TYPE_F32: { - ggml_compute_forward_dup_f32(params, src0, dst); + ggml_v1_compute_forward_dup_f32(params, src0, dst); } break; - case GGML_TYPE_I8: - case GGML_TYPE_I16: - case GGML_TYPE_I32: - case GGML_TYPE_COUNT: + case GGML_V1_TYPE_I8: + case GGML_V1_TYPE_I16: + case GGML_V1_TYPE_I32: + case GGML_V1_TYPE_COUNT: { - GGML_ASSERT(false); + GGML_V1_ASSERT(false); } break; } } -// ggml_compute_forward_add +// ggml_v1_compute_forward_add -static void ggml_compute_forward_add_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst)); +static void ggml_v1_compute_forward_add_f32( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + const struct ggml_v1_tensor * src1, + struct ggml_v1_tensor * dst) { + GGML_V1_ASSERT(ggml_v1_are_same_shape(src0, src1) && ggml_v1_are_same_shape(src0, dst)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == GGML_V1_TASK_INIT || params->type == GGML_V1_TASK_FINALIZE) { return; } const int ith = params->ith; const int nth = params->nth; - const int n = ggml_nrows(src0); + const int n = ggml_v1_nrows(src0); const int nc = src0->ne[0]; const size_t nb00 = src0->nb[0]; @@ -3479,15 +3479,15 @@ static void ggml_compute_forward_add_f32( const size_t nb0 = dst->nb[0]; const size_t nb1 = dst->nb[1]; - GGML_ASSERT( nb0 == sizeof(float)); - GGML_ASSERT(nb00 == sizeof(float)); + GGML_V1_ASSERT( nb0 == sizeof(float)); + GGML_V1_ASSERT(nb00 == sizeof(float)); if (nb10 == sizeof(float)) { const int j0 = (n/nth)*ith; const int j1 = ith == nth - 1 ? n : (n/nth)*(ith + 1); for (int j = j0; j < j1; j++) { - ggml_vec_add_f32(nc, + ggml_v1_vec_add_f32(nc, (float *) ((char *) dst->data + j*nb1), (float *) ((char *) src0->data + j*nb01), (float *) ((char *) src1->data + j*nb11)); @@ -3506,42 +3506,42 @@ static void ggml_compute_forward_add_f32( } } -static void ggml_compute_forward_add( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { +static void ggml_v1_compute_forward_add( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + const struct ggml_v1_tensor * src1, + struct ggml_v1_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case GGML_V1_TYPE_F32: { - ggml_compute_forward_add_f32(params, src0, src1, dst); + ggml_v1_compute_forward_add_f32(params, src0, src1, dst); } break; - case GGML_TYPE_I8: - case GGML_TYPE_I16: - case GGML_TYPE_I32: - case GGML_TYPE_F16: - case GGML_TYPE_COUNT: + case GGML_V1_TYPE_I8: + case GGML_V1_TYPE_I16: + case GGML_V1_TYPE_I32: + case GGML_V1_TYPE_F16: + case GGML_V1_TYPE_COUNT: { assert(false); } break; } } -// ggml_compute_forward_sub +// ggml_v1_compute_forward_sub -static void ggml_compute_forward_sub_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { +static void ggml_v1_compute_forward_sub_f32( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + const struct ggml_v1_tensor * src1, + struct ggml_v1_tensor * dst) { assert(params->ith == 0); - assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst)); + assert(ggml_v1_are_same_shape(src0, src1) && ggml_v1_are_same_shape(src0, dst)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == GGML_V1_TASK_INIT || params->type == GGML_V1_TASK_FINALIZE) { return; } - const int n = ggml_nrows(src0); + const int n = ggml_v1_nrows(src0); const int nc = src0->ne[0]; assert( dst->nb[0] == sizeof(float)); @@ -3549,49 +3549,49 @@ static void ggml_compute_forward_sub_f32( assert(src1->nb[0] == sizeof(float)); for (int i = 0; i < n; i++) { - ggml_vec_sub_f32(nc, + ggml_v1_vec_sub_f32(nc, (float *) ((char *) dst->data + i*( dst->nb[1])), (float *) ((char *) src0->data + i*(src0->nb[1])), (float *) ((char *) src1->data + i*(src1->nb[1]))); } } -static void ggml_compute_forward_sub( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { +static void ggml_v1_compute_forward_sub( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + const struct ggml_v1_tensor * src1, + struct ggml_v1_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case GGML_V1_TYPE_F32: { - ggml_compute_forward_sub_f32(params, src0, src1, dst); + ggml_v1_compute_forward_sub_f32(params, src0, src1, dst); } break; - case GGML_TYPE_I8: - case GGML_TYPE_I16: - case GGML_TYPE_I32: - case GGML_TYPE_F16: - case GGML_TYPE_COUNT: + case GGML_V1_TYPE_I8: + case GGML_V1_TYPE_I16: + case GGML_V1_TYPE_I32: + case GGML_V1_TYPE_F16: + case GGML_V1_TYPE_COUNT: { assert(false); } break; } } -// ggml_compute_forward_mul +// ggml_v1_compute_forward_mul -static void ggml_compute_forward_mul_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { +static void ggml_v1_compute_forward_mul_f32( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + const struct ggml_v1_tensor * src1, + struct ggml_v1_tensor * dst) { assert(params->ith == 0); - assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst)); + assert(ggml_v1_are_same_shape(src0, src1) && ggml_v1_are_same_shape(src0, dst)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == GGML_V1_TASK_INIT || params->type == GGML_V1_TASK_FINALIZE) { return; } - const int n = ggml_nrows(src0); + const int n = ggml_v1_nrows(src0); const int nc = src0->ne[0]; assert( dst->nb[0] == sizeof(float)); @@ -3599,49 +3599,49 @@ static void ggml_compute_forward_mul_f32( assert(src1->nb[0] == sizeof(float)); for (int i = 0; i < n; i++) { - ggml_vec_mul_f32(nc, + ggml_v1_vec_mul_f32(nc, (float *) ((char *) dst->data + i*( dst->nb[1])), (float *) ((char *) src0->data + i*(src0->nb[1])), (float *) ((char *) src1->data + i*(src1->nb[1]))); } } -static void ggml_compute_forward_mul( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { +static void ggml_v1_compute_forward_mul( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + const struct ggml_v1_tensor * src1, + struct ggml_v1_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case GGML_V1_TYPE_F32: { - ggml_compute_forward_mul_f32(params, src0, src1, dst); + ggml_v1_compute_forward_mul_f32(params, src0, src1, dst); } break; - case GGML_TYPE_I8: - case GGML_TYPE_I16: - case GGML_TYPE_I32: - case GGML_TYPE_F16: - case GGML_TYPE_COUNT: + case GGML_V1_TYPE_I8: + case GGML_V1_TYPE_I16: + case GGML_V1_TYPE_I32: + case GGML_V1_TYPE_F16: + case GGML_V1_TYPE_COUNT: { assert(false); } break; } } -// ggml_compute_forward_div +// ggml_v1_compute_forward_div -static void ggml_compute_forward_div_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { +static void ggml_v1_compute_forward_div_f32( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + const struct ggml_v1_tensor * src1, + struct ggml_v1_tensor * dst) { assert(params->ith == 0); - assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst)); + assert(ggml_v1_are_same_shape(src0, src1) && ggml_v1_are_same_shape(src0, dst)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == GGML_V1_TASK_INIT || params->type == GGML_V1_TASK_FINALIZE) { return; } - const int n = ggml_nrows(src0); + const int n = ggml_v1_nrows(src0); const int nc = src0->ne[0]; assert( dst->nb[0] == sizeof(float)); @@ -3649,140 +3649,140 @@ static void ggml_compute_forward_div_f32( assert(src1->nb[0] == sizeof(float)); for (int i = 0; i < n; i++) { - ggml_vec_div_f32(nc, + ggml_v1_vec_div_f32(nc, (float *) ((char *) dst->data + i*( dst->nb[1])), (float *) ((char *) src0->data + i*(src0->nb[1])), (float *) ((char *) src1->data + i*(src1->nb[1]))); } } -static void ggml_compute_forward_div( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { +static void ggml_v1_compute_forward_div( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + const struct ggml_v1_tensor * src1, + struct ggml_v1_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case GGML_V1_TYPE_F32: { - ggml_compute_forward_div_f32(params, src0, src1, dst); + ggml_v1_compute_forward_div_f32(params, src0, src1, dst); } break; - case GGML_TYPE_I8: - case GGML_TYPE_I16: - case GGML_TYPE_I32: - case GGML_TYPE_F16: - case GGML_TYPE_COUNT: + case GGML_V1_TYPE_I8: + case GGML_V1_TYPE_I16: + case GGML_V1_TYPE_I32: + case GGML_V1_TYPE_F16: + case GGML_V1_TYPE_COUNT: { assert(false); } break; } } -// ggml_compute_forward_sqr +// ggml_v1_compute_forward_sqr -static void ggml_compute_forward_sqr_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void ggml_v1_compute_forward_sqr_f32( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + struct ggml_v1_tensor * dst) { assert(params->ith == 0); - assert(ggml_are_same_shape(src0, dst)); + assert(ggml_v1_are_same_shape(src0, dst)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == GGML_V1_TASK_INIT || params->type == GGML_V1_TASK_FINALIZE) { return; } - const int n = ggml_nrows(src0); + const int n = ggml_v1_nrows(src0); const int nc = src0->ne[0]; assert( dst->nb[0] == sizeof(float)); assert(src0->nb[0] == sizeof(float)); for (int i = 0; i < n; i++) { - ggml_vec_sqr_f32(nc, + ggml_v1_vec_sqr_f32(nc, (float *) ((char *) dst->data + i*( dst->nb[1])), (float *) ((char *) src0->data + i*(src0->nb[1]))); } } -static void ggml_compute_forward_sqr( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void ggml_v1_compute_forward_sqr( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + struct ggml_v1_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case GGML_V1_TYPE_F32: { - ggml_compute_forward_sqr_f32(params, src0, dst); + ggml_v1_compute_forward_sqr_f32(params, src0, dst); } break; - case GGML_TYPE_I8: - case GGML_TYPE_I16: - case GGML_TYPE_I32: - case GGML_TYPE_F16: - case GGML_TYPE_COUNT: + case GGML_V1_TYPE_I8: + case GGML_V1_TYPE_I16: + case GGML_V1_TYPE_I32: + case GGML_V1_TYPE_F16: + case GGML_V1_TYPE_COUNT: { assert(false); } break; } } -// ggml_compute_forward_sqrt +// ggml_v1_compute_forward_sqrt -static void ggml_compute_forward_sqrt_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void ggml_v1_compute_forward_sqrt_f32( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + struct ggml_v1_tensor * dst) { assert(params->ith == 0); - assert(ggml_are_same_shape(src0, dst)); + assert(ggml_v1_are_same_shape(src0, dst)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == GGML_V1_TASK_INIT || params->type == GGML_V1_TASK_FINALIZE) { return; } - const int n = ggml_nrows(src0); + const int n = ggml_v1_nrows(src0); const int nc = src0->ne[0]; assert( dst->nb[0] == sizeof(float)); assert(src0->nb[0] == sizeof(float)); for (int i = 0; i < n; i++) { - ggml_vec_sqrt_f32(nc, + ggml_v1_vec_sqrt_f32(nc, (float *) ((char *) dst->data + i*( dst->nb[1])), (float *) ((char *) src0->data + i*(src0->nb[1]))); } } -static void ggml_compute_forward_sqrt( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void ggml_v1_compute_forward_sqrt( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + struct ggml_v1_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case GGML_V1_TYPE_F32: { - ggml_compute_forward_sqrt_f32(params, src0, dst); + ggml_v1_compute_forward_sqrt_f32(params, src0, dst); } break; - case GGML_TYPE_I8: - case GGML_TYPE_I16: - case GGML_TYPE_I32: - case GGML_TYPE_F16: - case GGML_TYPE_COUNT: + case GGML_V1_TYPE_I8: + case GGML_V1_TYPE_I16: + case GGML_V1_TYPE_I32: + case GGML_V1_TYPE_F16: + case GGML_V1_TYPE_COUNT: { assert(false); } break; } } -// ggml_compute_forward_sum +// ggml_v1_compute_forward_sum -static void ggml_compute_forward_sum_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void ggml_v1_compute_forward_sum_f32( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + struct ggml_v1_tensor * dst) { assert(params->ith == 0); - assert(ggml_is_scalar(dst)); + assert(ggml_v1_is_scalar(dst)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == GGML_V1_TASK_INIT || params->type == GGML_V1_TASK_FINALIZE) { return; } - assert(ggml_is_scalar(dst)); + assert(ggml_v1_is_scalar(dst)); assert(src0->nb[0] == sizeof(float)); const int ne00 = src0->ne[0]; @@ -3797,7 +3797,7 @@ static void ggml_compute_forward_sum_f32( for (int i03 = 0; i03 < ne03; i03++) { for (int i02 = 0; i02 < ne02; i02++) { for (int i01 = 0; i01 < ne01; i01++) { - ggml_vec_sum_f32(ne00, + ggml_v1_vec_sum_f32(ne00, (float *) (dst->data), (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03)); } @@ -3805,35 +3805,35 @@ static void ggml_compute_forward_sum_f32( } } -static void ggml_compute_forward_sum( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void ggml_v1_compute_forward_sum( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + struct ggml_v1_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case GGML_V1_TYPE_F32: { - ggml_compute_forward_sum_f32(params, src0, dst); + ggml_v1_compute_forward_sum_f32(params, src0, dst); } break; - case GGML_TYPE_I8: - case GGML_TYPE_I16: - case GGML_TYPE_I32: - case GGML_TYPE_F16: - case GGML_TYPE_COUNT: + case GGML_V1_TYPE_I8: + case GGML_V1_TYPE_I16: + case GGML_V1_TYPE_I32: + case GGML_V1_TYPE_F16: + case GGML_V1_TYPE_COUNT: { assert(false); } break; } } -// ggml_compute_forward_mean +// ggml_v1_compute_forward_mean -static void ggml_compute_forward_mean_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void ggml_v1_compute_forward_mean_f32( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + struct ggml_v1_tensor * dst) { assert(params->ith == 0); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == GGML_V1_TASK_INIT || params->type == GGML_V1_TASK_FINALIZE) { return; } @@ -3870,7 +3870,7 @@ static void ggml_compute_forward_mean_f32( for (int i03 = 0; i03 < ne03; i03++) { for (int i02 = 0; i02 < ne02; i02++) { for (int i01 = 0; i01 < ne01; i01++) { - ggml_vec_sum_f32(ne00, + ggml_v1_vec_sum_f32(ne00, (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3), (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03)); @@ -3880,36 +3880,36 @@ static void ggml_compute_forward_mean_f32( } } -static void ggml_compute_forward_mean( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void ggml_v1_compute_forward_mean( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + struct ggml_v1_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case GGML_V1_TYPE_F32: { - ggml_compute_forward_mean_f32(params, src0, dst); + ggml_v1_compute_forward_mean_f32(params, src0, dst); } break; - case GGML_TYPE_I8: - case GGML_TYPE_I16: - case GGML_TYPE_I32: - case GGML_TYPE_F16: - case GGML_TYPE_COUNT: + case GGML_V1_TYPE_I8: + case GGML_V1_TYPE_I16: + case GGML_V1_TYPE_I32: + case GGML_V1_TYPE_F16: + case GGML_V1_TYPE_COUNT: { assert(false); } break; } } -// ggml_compute_forward_repeat +// ggml_v1_compute_forward_repeat -static void ggml_compute_forward_repeat_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void ggml_v1_compute_forward_repeat_f32( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + struct ggml_v1_tensor * dst) { assert(params->ith == 0); - assert(ggml_can_repeat(src0, dst)); + assert(ggml_v1_can_repeat(src0, dst)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == GGML_V1_TASK_INIT || params->type == GGML_V1_TASK_FINALIZE) { return; } @@ -3923,8 +3923,8 @@ static void ggml_compute_forward_repeat_f32( const int nr = dst->ne[1]; const int nc0 = src0->ne[0]; const int nr0 = src0->ne[1]; - const int ncr = nc/nc0; // guaranteed to be an integer due to the check in ggml_can_repeat - const int nrr = nr/nr0; // guaranteed to be an integer due to the check in ggml_can_repeat + const int ncr = nc/nc0; // guaranteed to be an integer due to the check in ggml_v1_can_repeat + const int nrr = nr/nr0; // guaranteed to be an integer due to the check in ggml_v1_can_repeat // TODO: support for transposed / permuted tensors assert( dst->nb[0] == sizeof(float)); @@ -3934,7 +3934,7 @@ static void ggml_compute_forward_repeat_f32( for (int i = 0; i < nrr; i++) { for (int j = 0; j < ncr; j++) { for (int k = 0; k < nr0; k++) { - ggml_vec_cpy_f32(nc0, + ggml_v1_vec_cpy_f32(nc0, (float *) ((char *) dst->data + (i*nr0 + k)*( dst->nb[1]) + j*nc0*( dst->nb[0])), (float *) ((char *) src0->data + ( k)*(src0->nb[1]))); } @@ -3942,267 +3942,267 @@ static void ggml_compute_forward_repeat_f32( } } -static void ggml_compute_forward_repeat( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void ggml_v1_compute_forward_repeat( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + struct ggml_v1_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case GGML_V1_TYPE_F32: { - ggml_compute_forward_repeat_f32(params, src0, dst); + ggml_v1_compute_forward_repeat_f32(params, src0, dst); } break; - case GGML_TYPE_I8: - case GGML_TYPE_I16: - case GGML_TYPE_I32: - case GGML_TYPE_F16: - case GGML_TYPE_COUNT: + case GGML_V1_TYPE_I8: + case GGML_V1_TYPE_I16: + case GGML_V1_TYPE_I32: + case GGML_V1_TYPE_F16: + case GGML_V1_TYPE_COUNT: { assert(false); } break; } } -// ggml_compute_forward_abs +// ggml_v1_compute_forward_abs -static void ggml_compute_forward_abs_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void ggml_v1_compute_forward_abs_f32( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + struct ggml_v1_tensor * dst) { assert(params->ith == 0); - assert(ggml_are_same_shape(src0, dst)); + assert(ggml_v1_are_same_shape(src0, dst)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == GGML_V1_TASK_INIT || params->type == GGML_V1_TASK_FINALIZE) { return; } - const int n = ggml_nrows(src0); + const int n = ggml_v1_nrows(src0); const int nc = src0->ne[0]; assert(dst->nb[0] == sizeof(float)); assert(src0->nb[0] == sizeof(float)); for (int i = 0; i < n; i++) { - ggml_vec_abs_f32(nc, + ggml_v1_vec_abs_f32(nc, (float *) ((char *) dst->data + i*( dst->nb[1])), (float *) ((char *) src0->data + i*(src0->nb[1]))); } } -static void ggml_compute_forward_abs( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void ggml_v1_compute_forward_abs( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + struct ggml_v1_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case GGML_V1_TYPE_F32: { - ggml_compute_forward_abs_f32(params, src0, dst); + ggml_v1_compute_forward_abs_f32(params, src0, dst); } break; - case GGML_TYPE_I8: - case GGML_TYPE_I16: - case GGML_TYPE_I32: - case GGML_TYPE_F16: - case GGML_TYPE_COUNT: + case GGML_V1_TYPE_I8: + case GGML_V1_TYPE_I16: + case GGML_V1_TYPE_I32: + case GGML_V1_TYPE_F16: + case GGML_V1_TYPE_COUNT: { assert(false); } break; } } -// ggml_compute_forward_sgn +// ggml_v1_compute_forward_sgn -static void ggml_compute_forward_sgn_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void ggml_v1_compute_forward_sgn_f32( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + struct ggml_v1_tensor * dst) { assert(params->ith == 0); - assert(ggml_are_same_shape(src0, dst)); + assert(ggml_v1_are_same_shape(src0, dst)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == GGML_V1_TASK_INIT || params->type == GGML_V1_TASK_FINALIZE) { return; } - const int n = ggml_nrows(src0); + const int n = ggml_v1_nrows(src0); const int nc = src0->ne[0]; assert(dst->nb[0] == sizeof(float)); assert(src0->nb[0] == sizeof(float)); for (int i = 0; i < n; i++) { - ggml_vec_sgn_f32(nc, + ggml_v1_vec_sgn_f32(nc, (float *) ((char *) dst->data + i*( dst->nb[1])), (float *) ((char *) src0->data + i*(src0->nb[1]))); } } -static void ggml_compute_forward_sgn( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void ggml_v1_compute_forward_sgn( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + struct ggml_v1_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case GGML_V1_TYPE_F32: { - ggml_compute_forward_sgn_f32(params, src0, dst); + ggml_v1_compute_forward_sgn_f32(params, src0, dst); } break; - case GGML_TYPE_I8: - case GGML_TYPE_I16: - case GGML_TYPE_I32: - case GGML_TYPE_F16: - case GGML_TYPE_COUNT: + case GGML_V1_TYPE_I8: + case GGML_V1_TYPE_I16: + case GGML_V1_TYPE_I32: + case GGML_V1_TYPE_F16: + case GGML_V1_TYPE_COUNT: { assert(false); } break; } } -// ggml_compute_forward_neg +// ggml_v1_compute_forward_neg -static void ggml_compute_forward_neg_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void ggml_v1_compute_forward_neg_f32( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + struct ggml_v1_tensor * dst) { assert(params->ith == 0); - assert(ggml_are_same_shape(src0, dst)); + assert(ggml_v1_are_same_shape(src0, dst)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == GGML_V1_TASK_INIT || params->type == GGML_V1_TASK_FINALIZE) { return; } - const int n = ggml_nrows(src0); + const int n = ggml_v1_nrows(src0); const int nc = src0->ne[0]; assert(dst->nb[0] == sizeof(float)); assert(src0->nb[0] == sizeof(float)); for (int i = 0; i < n; i++) { - ggml_vec_neg_f32(nc, + ggml_v1_vec_neg_f32(nc, (float *) ((char *) dst->data + i*( dst->nb[1])), (float *) ((char *) src0->data + i*(src0->nb[1]))); } } -static void ggml_compute_forward_neg( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void ggml_v1_compute_forward_neg( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + struct ggml_v1_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case GGML_V1_TYPE_F32: { - ggml_compute_forward_neg_f32(params, src0, dst); + ggml_v1_compute_forward_neg_f32(params, src0, dst); } break; - case GGML_TYPE_I8: - case GGML_TYPE_I16: - case GGML_TYPE_I32: - case GGML_TYPE_F16: - case GGML_TYPE_COUNT: + case GGML_V1_TYPE_I8: + case GGML_V1_TYPE_I16: + case GGML_V1_TYPE_I32: + case GGML_V1_TYPE_F16: + case GGML_V1_TYPE_COUNT: { assert(false); } break; } } -// ggml_compute_forward_step +// ggml_v1_compute_forward_step -static void ggml_compute_forward_step_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void ggml_v1_compute_forward_step_f32( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + struct ggml_v1_tensor * dst) { assert(params->ith == 0); - assert(ggml_are_same_shape(src0, dst)); + assert(ggml_v1_are_same_shape(src0, dst)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == GGML_V1_TASK_INIT || params->type == GGML_V1_TASK_FINALIZE) { return; } - const int n = ggml_nrows(src0); + const int n = ggml_v1_nrows(src0); const int nc = src0->ne[0]; assert(dst->nb[0] == sizeof(float)); assert(src0->nb[0] == sizeof(float)); for (int i = 0; i < n; i++) { - ggml_vec_step_f32(nc, + ggml_v1_vec_step_f32(nc, (float *) ((char *) dst->data + i*( dst->nb[1])), (float *) ((char *) src0->data + i*(src0->nb[1]))); } } -static void ggml_compute_forward_step( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void ggml_v1_compute_forward_step( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + struct ggml_v1_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case GGML_V1_TYPE_F32: { - ggml_compute_forward_step_f32(params, src0, dst); + ggml_v1_compute_forward_step_f32(params, src0, dst); } break; - case GGML_TYPE_I8: - case GGML_TYPE_I16: - case GGML_TYPE_I32: - case GGML_TYPE_F16: - case GGML_TYPE_COUNT: + case GGML_V1_TYPE_I8: + case GGML_V1_TYPE_I16: + case GGML_V1_TYPE_I32: + case GGML_V1_TYPE_F16: + case GGML_V1_TYPE_COUNT: { assert(false); } break; } } -// ggml_compute_forward_relu +// ggml_v1_compute_forward_relu -static void ggml_compute_forward_relu_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void ggml_v1_compute_forward_relu_f32( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + struct ggml_v1_tensor * dst) { assert(params->ith == 0); - assert(ggml_are_same_shape(src0, dst)); + assert(ggml_v1_are_same_shape(src0, dst)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == GGML_V1_TASK_INIT || params->type == GGML_V1_TASK_FINALIZE) { return; } - const int n = ggml_nrows(src0); + const int n = ggml_v1_nrows(src0); const int nc = src0->ne[0]; assert(dst->nb[0] == sizeof(float)); assert(src0->nb[0] == sizeof(float)); for (int i = 0; i < n; i++) { - ggml_vec_relu_f32(nc, + ggml_v1_vec_relu_f32(nc, (float *) ((char *) dst->data + i*( dst->nb[1])), (float *) ((char *) src0->data + i*(src0->nb[1]))); } } -static void ggml_compute_forward_relu( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void ggml_v1_compute_forward_relu( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + struct ggml_v1_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case GGML_V1_TYPE_F32: { - ggml_compute_forward_relu_f32(params, src0, dst); + ggml_v1_compute_forward_relu_f32(params, src0, dst); } break; - case GGML_TYPE_I8: - case GGML_TYPE_I16: - case GGML_TYPE_I32: - case GGML_TYPE_F16: - case GGML_TYPE_COUNT: + case GGML_V1_TYPE_I8: + case GGML_V1_TYPE_I16: + case GGML_V1_TYPE_I32: + case GGML_V1_TYPE_F16: + case GGML_V1_TYPE_COUNT: { assert(false); } break; } } -// ggml_compute_forward_gelu +// ggml_v1_compute_forward_gelu -static void ggml_compute_forward_gelu_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - GGML_ASSERT(ggml_is_contiguous(src0)); - GGML_ASSERT(ggml_is_contiguous(dst)); - GGML_ASSERT(ggml_are_same_shape(src0, dst)); +static void ggml_v1_compute_forward_gelu_f32( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + struct ggml_v1_tensor * dst) { + GGML_V1_ASSERT(ggml_v1_is_contiguous(src0)); + GGML_V1_ASSERT(ggml_v1_is_contiguous(dst)); + GGML_V1_ASSERT(ggml_v1_are_same_shape(src0, dst)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == GGML_V1_TASK_INIT || params->type == GGML_V1_TASK_FINALIZE) { return; } @@ -4210,7 +4210,7 @@ static void ggml_compute_forward_gelu_f32( const int nth = params->nth; const int nc = src0->ne[0]; - const int nr = ggml_nrows(src0); + const int nr = ggml_v1_nrows(src0); // rows per thread const int dr = (nr + nth - 1)/nth; @@ -4220,7 +4220,7 @@ static void ggml_compute_forward_gelu_f32( const int ir1 = MIN(ir0 + dr, nr); for (int i1 = ir0; i1 < ir1; i1++) { - ggml_vec_gelu_f32(nc, + ggml_v1_vec_gelu_f32(nc, (float *) ((char *) dst->data + i1*( dst->nb[1])), (float *) ((char *) src0->data + i1*(src0->nb[1]))); @@ -4235,39 +4235,39 @@ static void ggml_compute_forward_gelu_f32( } } -static void ggml_compute_forward_gelu( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void ggml_v1_compute_forward_gelu( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + struct ggml_v1_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case GGML_V1_TYPE_F32: { - ggml_compute_forward_gelu_f32(params, src0, dst); + ggml_v1_compute_forward_gelu_f32(params, src0, dst); } break; - case GGML_TYPE_I8: - case GGML_TYPE_I16: - case GGML_TYPE_I32: - case GGML_TYPE_F16: - case GGML_TYPE_COUNT: + case GGML_V1_TYPE_I8: + case GGML_V1_TYPE_I16: + case GGML_V1_TYPE_I32: + case GGML_V1_TYPE_F16: + case GGML_V1_TYPE_COUNT: { assert(false); } break; } } -// ggml_compute_forward_norm +// ggml_v1_compute_forward_norm -static void ggml_compute_forward_norm_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - GGML_ASSERT(ggml_are_same_shape(src0, dst)); +static void ggml_v1_compute_forward_norm_f32( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + struct ggml_v1_tensor * dst) { + GGML_V1_ASSERT(ggml_v1_are_same_shape(src0, dst)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == GGML_V1_TASK_INIT || params->type == GGML_V1_TASK_FINALIZE) { return; } - GGML_ASSERT(src0->nb[0] == sizeof(float)); + GGML_V1_ASSERT(src0->nb[0] == sizeof(float)); const int ith = params->ith; const int nth = params->nth; @@ -4285,7 +4285,7 @@ static void ggml_compute_forward_norm_f32( const size_t nb2 = dst->nb[2]; const size_t nb3 = dst->nb[3]; - const ggml_float eps = 1e-5f; // TODO: make this a parameter + const ggml_v1_float eps = 1e-5f; // TODO: make this a parameter // TODO: optimize for (int i03 = 0; i03 < ne03; i03++) { @@ -4293,7 +4293,7 @@ static void ggml_compute_forward_norm_f32( for (int i01 = ith; i01 < ne01; i01 += nth) { const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03); - ggml_float mean = 0.0; + ggml_v1_float mean = 0.0; for (int i00 = 0; i00 < ne00; i00++) { mean += x[i00]; } @@ -4302,50 +4302,50 @@ static void ggml_compute_forward_norm_f32( float * y = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3); - ggml_float sum2 = 0.0; + ggml_v1_float sum2 = 0.0; for (int i00 = 0; i00 < ne00; i00++) { - ggml_float v = x[i00] - mean; + ggml_v1_float v = x[i00] - mean; y[i00] = v; sum2 += v*v; } const float scale = 1.0/sqrt(sum2/ne00 + eps); - ggml_vec_scale_f32(ne00, y, scale); + ggml_v1_vec_scale_f32(ne00, y, scale); } } } } -static void ggml_compute_forward_norm( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void ggml_v1_compute_forward_norm( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + struct ggml_v1_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case GGML_V1_TYPE_F32: { - ggml_compute_forward_norm_f32(params, src0, dst); + ggml_v1_compute_forward_norm_f32(params, src0, dst); } break; - case GGML_TYPE_I8: - case GGML_TYPE_I16: - case GGML_TYPE_I32: - case GGML_TYPE_F16: - case GGML_TYPE_COUNT: + case GGML_V1_TYPE_I8: + case GGML_V1_TYPE_I16: + case GGML_V1_TYPE_I32: + case GGML_V1_TYPE_F16: + case GGML_V1_TYPE_COUNT: { assert(false); } break; } } -// ggml_compute_forward_mul_mat +// ggml_v1_compute_forward_mul_mat -#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) +#if defined(GGML_V1_USE_ACCELERATE) || defined(GGML_V1_USE_OPENBLAS) // helper function to determine if it is better to use BLAS or not // for large matrices, BLAS is faster -static bool ggml_compute_forward_mul_mat_use_blas( - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { +static bool ggml_v1_compute_forward_mul_mat_use_blas( + const struct ggml_v1_tensor * src0, + const struct ggml_v1_tensor * src1, + struct ggml_v1_tensor * dst) { UNUSED(src0); const int ne10 = src1->ne[0]; @@ -4354,7 +4354,7 @@ static bool ggml_compute_forward_mul_mat_use_blas( const int ne1 = dst->ne[1]; // TODO: find the optimal values for these - if (ggml_is_contiguous(src0) && ggml_is_contiguous(src1) && ( + if (ggml_v1_is_contiguous(src0) && ggml_v1_is_contiguous(src1) && ( (ne0 >= 32 && ne1 >= 32 && ne10 >= 32) )) { //printf("BLAS: %d %d %d\n", ne0, ne1, ne10); @@ -4365,12 +4365,12 @@ static bool ggml_compute_forward_mul_mat_use_blas( } #endif -static void ggml_compute_forward_mul_mat_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - int64_t t0 = ggml_perf_time_us(); +static void ggml_v1_compute_forward_mul_mat_f32( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + const struct ggml_v1_tensor * src1, + struct ggml_v1_tensor * dst) { + int64_t t0 = ggml_v1_perf_time_us(); UNUSED(t0); const int ne00 = src0->ne[0]; @@ -4432,19 +4432,19 @@ static void ggml_compute_forward_mul_mat_f32( // nb00 < nb01 - src0 is transposed // compute by src0 columns -#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) - if (ggml_compute_forward_mul_mat_use_blas(src0, src1, dst)) { - GGML_ASSERT(nb10 == sizeof(float)); +#if defined(GGML_V1_USE_ACCELERATE) || defined(GGML_V1_USE_OPENBLAS) + if (ggml_v1_compute_forward_mul_mat_use_blas(src0, src1, dst)) { + GGML_V1_ASSERT(nb10 == sizeof(float)); if (params->ith != 0) { return; } - if (params->type == GGML_TASK_INIT) { + if (params->type == GGML_V1_TASK_INIT) { return; } - if (params->type == GGML_TASK_FINALIZE) { + if (params->type == GGML_V1_TASK_FINALIZE) { return; } @@ -4466,13 +4466,13 @@ static void ggml_compute_forward_mul_mat_f32( } } - //printf("CBLAS F32 = %f ms, %d x %d x %d x %d\n", (ggml_perf_time_us() - t0)/1000.0, ne0, ne1, ne2, ne3); + //printf("CBLAS F32 = %f ms, %d x %d x %d x %d\n", (ggml_v1_perf_time_us() - t0)/1000.0, ne0, ne1, ne2, ne3); return; } #endif - if (params->type == GGML_TASK_INIT) { + if (params->type == GGML_V1_TASK_INIT) { if (nb01 >= nb00) { return; } @@ -4482,13 +4482,13 @@ static void ggml_compute_forward_mul_mat_f32( return; } - if (params->type == GGML_TASK_FINALIZE) { + if (params->type == GGML_V1_TASK_FINALIZE) { if (nb01 >= nb00) { return; } // TODO: fix this memset (wsize is overestimated) - //assert(params->wsize == (ggml_nbytes(dst) + CACHE_LINE_SIZE)*nth); + //assert(params->wsize == (ggml_v1_nbytes(dst) + CACHE_LINE_SIZE)*nth); float * const wdata = params->wdata; @@ -4499,10 +4499,10 @@ static void ggml_compute_forward_mul_mat_f32( const int ic0 = dc*ith; const int ic1 = MIN(ic0 + dc, ne); - ggml_vec_cpy_f32(ic1 - ic0, (float *) dst->data + ic0, wdata + ic0); + ggml_v1_vec_cpy_f32(ic1 - ic0, (float *) dst->data + ic0, wdata + ic0); for (int k = 1; k < nth; k++) { - ggml_vec_acc_f32(ic1 - ic0, (float *) dst->data + ic0, wdata + (ne + CACHE_LINE_SIZE_F32)*k + ic0); + ggml_v1_vec_acc_f32(ic1 - ic0, (float *) dst->data + ic0, wdata + (ne + CACHE_LINE_SIZE_F32)*k + ic0); } return; @@ -4512,7 +4512,7 @@ static void ggml_compute_forward_mul_mat_f32( // TODO: do not support transposed src1 assert(nb10 == sizeof(float)); - // parallelize by src0 rows using ggml_vec_dot_f32 + // parallelize by src0 rows using ggml_v1_vec_dot_f32 // total rows in src0 const int nr = ne01*ne02*ne03; @@ -4542,14 +4542,14 @@ static void ggml_compute_forward_mul_mat_f32( const int i2 = i02; const int i3 = i03; - ggml_vec_dot_f32(ne00, + ggml_v1_vec_dot_f32(ne00, (float *) ((char *) dst->data + (i0*nb0 + i1*nb1 + i2*nb2 + i3*nb3)), (float *) ((char *) src0->data + (i01*nb01 + i02*nb02 + i03*nb03)), (float *) ((char *) src1->data + (i11*nb11 + i12*nb12 + i13*nb13))); } } } else { - // parallelize by src1 columns using ggml_vec_mad_f32 + // parallelize by src1 columns using ggml_v1_vec_mad_f32 // each thread has its own work data // during FINALIZE we accumulate all work data into dst @@ -4586,7 +4586,7 @@ static void ggml_compute_forward_mul_mat_f32( assert(sizeof(float)*(wo + i3*ne2*ne1*ne0 + i2*ne1*ne0 + i1*ne0 + ne01) <= params->wsize); - ggml_vec_mad_f32(ne01, + ggml_v1_vec_mad_f32(ne01, (float *) (wdata + wo + i3*ne2*ne1*ne0 + i2*ne1*ne0 + i1*ne0), (float *) ((char *) src0->data + (i00*nb00 + i02*nb02 + i03*nb03)), *(float *) ((char *) src1->data + (i10*nb10 + i11*nb11 + i12*nb12 + i13*nb13))); @@ -4596,7 +4596,7 @@ static void ggml_compute_forward_mul_mat_f32( } } - //int64_t t1 = ggml_perf_time_us(); + //int64_t t1 = ggml_v1_perf_time_us(); //static int64_t acc = 0; //acc += t1 - t0; //if (t1 - t0 > 10) { @@ -4610,12 +4610,12 @@ static void ggml_compute_forward_mul_mat_f32( //} } -static void ggml_compute_forward_mul_mat_f16_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - int64_t t0 = ggml_perf_time_us(); +static void ggml_v1_compute_forward_mul_mat_f16_f32( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + const struct ggml_v1_tensor * src1, + struct ggml_v1_tensor * dst) { + int64_t t0 = ggml_v1_perf_time_us(); UNUSED(t0); const int ne00 = src0->ne[0]; @@ -4652,24 +4652,24 @@ static void ggml_compute_forward_mul_mat_f16_f32( const int ith = params->ith; const int nth = params->nth; - GGML_ASSERT(ne02 == ne12); - GGML_ASSERT(ne03 == ne13); - GGML_ASSERT(ne2 == ne12); - GGML_ASSERT(ne3 == ne13); + GGML_V1_ASSERT(ne02 == ne12); + GGML_V1_ASSERT(ne03 == ne13); + GGML_V1_ASSERT(ne2 == ne12); + GGML_V1_ASSERT(ne3 == ne13); // TODO: we don't support permuted src0 - GGML_ASSERT(nb00 == sizeof(ggml_fp16_t) || nb01 == sizeof(ggml_fp16_t)); + GGML_V1_ASSERT(nb00 == sizeof(ggml_v1_fp16_t) || nb01 == sizeof(ggml_v1_fp16_t)); // dst cannot be transposed or permuted - GGML_ASSERT(nb0 == sizeof(float)); - GGML_ASSERT(nb0 <= nb1); - GGML_ASSERT(nb1 <= nb2); - GGML_ASSERT(nb2 <= nb3); + GGML_V1_ASSERT(nb0 == sizeof(float)); + GGML_V1_ASSERT(nb0 <= nb1); + GGML_V1_ASSERT(nb1 <= nb2); + GGML_V1_ASSERT(nb2 <= nb3); - GGML_ASSERT(ne0 == ne01); - GGML_ASSERT(ne1 == ne11); - GGML_ASSERT(ne2 == ne02); - GGML_ASSERT(ne3 == ne03); + GGML_V1_ASSERT(ne0 == ne01); + GGML_V1_ASSERT(ne1 == ne11); + GGML_V1_ASSERT(ne2 == ne02); + GGML_V1_ASSERT(ne3 == ne03); // nb01 >= nb00 - src0 is not transposed // compute by src0 rows @@ -4677,19 +4677,19 @@ static void ggml_compute_forward_mul_mat_f16_f32( // nb00 < nb01 - src0 is transposed // compute by src0 columns -#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) - if (ggml_compute_forward_mul_mat_use_blas(src0, src1, dst)) { - GGML_ASSERT(nb10 == sizeof(float)); +#if defined(GGML_V1_USE_ACCELERATE) || defined(GGML_V1_USE_OPENBLAS) + if (ggml_v1_compute_forward_mul_mat_use_blas(src0, src1, dst)) { + GGML_V1_ASSERT(nb10 == sizeof(float)); if (params->ith != 0) { return; } - if (params->type == GGML_TASK_INIT) { + if (params->type == GGML_V1_TASK_INIT) { return; } - if (params->type == GGML_TASK_FINALIZE) { + if (params->type == GGML_V1_TASK_FINALIZE) { return; } @@ -4701,7 +4701,7 @@ static void ggml_compute_forward_mul_mat_f16_f32( int id = 0; for (int i01 = 0; i01 < ne01; ++i01) { for (int i00 = 0; i00 < ne00; ++i00) { - wdata[id++] = GGML_FP16_TO_FP32(*(ggml_fp16_t *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00)); + wdata[id++] = GGML_V1_FP16_TO_FP32(*(ggml_v1_fp16_t *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00)); } } } @@ -4749,28 +4749,28 @@ static void ggml_compute_forward_mul_mat_f16_f32( } } - //printf("CBLAS = %f ms, %d x %d x %d x %d\n", (ggml_perf_time_us() - t0)/1000.0, ne0, ne1, ne2, ne3); + //printf("CBLAS = %f ms, %d x %d x %d x %d\n", (ggml_v1_perf_time_us() - t0)/1000.0, ne0, ne1, ne2, ne3); return; } #endif - if (params->type == GGML_TASK_INIT) { + if (params->type == GGML_V1_TASK_INIT) { if (nb01 >= nb00) { - ggml_fp16_t * const wdata = params->wdata; + ggml_v1_fp16_t * const wdata = params->wdata; int id = 0; for (int i13 = 0; i13 < ne13; ++i13) { for (int i12 = 0; i12 < ne12; ++i12) { for (int i11 = 0; i11 < ne11; ++i11) { for (int i10 = 0; i10 < ne10; ++i10) { - wdata[id++] = GGML_FP32_TO_FP16(*(float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + i10*nb10)); + wdata[id++] = GGML_V1_FP32_TO_FP16(*(float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + i10*nb10)); } } } } - GGML_ASSERT(id*sizeof(ggml_fp16_t) <= params->wsize); + GGML_V1_ASSERT(id*sizeof(ggml_v1_fp16_t) <= params->wsize); return; } @@ -4780,15 +4780,15 @@ static void ggml_compute_forward_mul_mat_f16_f32( return; } - if (params->type == GGML_TASK_FINALIZE) { + if (params->type == GGML_V1_TASK_FINALIZE) { if (nb01 >= nb00) { return; } // TODO: fix this memset (wsize is overestimated) - //assert(params->wsize == (ggml_nbytes(dst) + CACHE_LINE_SIZE)*nth); + //assert(params->wsize == (ggml_v1_nbytes(dst) + CACHE_LINE_SIZE)*nth); - ggml_fp16_t * const wdata = params->wdata; + ggml_v1_fp16_t * const wdata = params->wdata; // cols per thread const int dc = (ne + nth - 1)/nth; @@ -4798,12 +4798,12 @@ static void ggml_compute_forward_mul_mat_f16_f32( const int ic1 = MIN(ic0 + dc, ne); for (int i = ic0; i < ic1; ++i) { - ((float *) dst->data)[i] = GGML_FP16_TO_FP32(wdata[i]); + ((float *) dst->data)[i] = GGML_V1_FP16_TO_FP32(wdata[i]); } for (int k = 1; k < nth; k++) { for (int i = ic0; i < ic1; ++i) { - ((float *) dst->data)[i] += GGML_FP16_TO_FP32(wdata[(ne + CACHE_LINE_SIZE_F32)*k + i]); + ((float *) dst->data)[i] += GGML_V1_FP16_TO_FP32(wdata[(ne + CACHE_LINE_SIZE_F32)*k + i]); } } @@ -4813,9 +4813,9 @@ static void ggml_compute_forward_mul_mat_f16_f32( if (nb01 >= nb00) { // fp16 -> half the size, so divide by 2 // TODO: do not support transposed src1 - assert(nb10/2 == sizeof(ggml_fp16_t)); + assert(nb10/2 == sizeof(ggml_v1_fp16_t)); - // parallelize by src0 rows using ggml_vec_dot_f16 + // parallelize by src0 rows using ggml_v1_vec_dot_f16 // total rows in src0 const int nr = ne01*ne02*ne03; @@ -4827,7 +4827,7 @@ static void ggml_compute_forward_mul_mat_f16_f32( const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); - ggml_fp16_t * wdata = params->wdata; + ggml_v1_fp16_t * wdata = params->wdata; for (int ir = ir0; ir < ir1; ++ir) { // src0 indices @@ -4842,19 +4842,19 @@ static void ggml_compute_forward_mul_mat_f16_f32( const int i2 = i02; const int i3 = i03; - ggml_fp16_t * src0_row = (ggml_fp16_t *) ((char *) src0->data + (i01*nb01 + i02*nb02 + i03*nb03)); - ggml_fp16_t * src1_col = wdata + ( 0 + i12*ne11 + i13*ne12*ne11)*ne00; + ggml_v1_fp16_t * src0_row = (ggml_v1_fp16_t *) ((char *) src0->data + (i01*nb01 + i02*nb02 + i03*nb03)); + ggml_v1_fp16_t * src1_col = wdata + ( 0 + i12*ne11 + i13*ne12*ne11)*ne00; float * dst_col = (float *) ((char *) dst->data + (i0*nb0 + 0*nb1 + i2*nb2 + i3*nb3)); assert(ne00 % 32 == 0); for (int ic = 0; ic < ne11; ++ic) { - ggml_vec_dot_f16(ne00, &dst_col[ic*ne0], src0_row, src1_col + ic*ne00); + ggml_v1_vec_dot_f16(ne00, &dst_col[ic*ne0], src0_row, src1_col + ic*ne00); } } } else { - // parallelize by src1 columns using ggml_vec_mad_f16 + // parallelize by src1 columns using ggml_v1_vec_mad_f16 // each thread has its own work data // during FINALIZE we accumulate all work data into dst @@ -4870,7 +4870,7 @@ static void ggml_compute_forward_mul_mat_f16_f32( // work data for thread const int wo = (ne + CACHE_LINE_SIZE_F32)*ith; - ggml_fp16_t * const wdata = params->wdata; + ggml_v1_fp16_t * const wdata = params->wdata; for (int i13 = 0; i13 < ne13; ++i13) { for (int i12 = 0; i12 < ne12; ++i12) { @@ -4880,7 +4880,7 @@ static void ggml_compute_forward_mul_mat_f16_f32( const int i2 = i12; const int i3 = i13; - ggml_fp16_t * dst_row = wdata + wo + i3*ne2*ne1*ne0 + i2*ne1*ne0 + i1*ne0; + ggml_v1_fp16_t * dst_row = wdata + wo + i3*ne2*ne1*ne0 + i2*ne1*ne0 + i1*ne0; for (int ic = ic0; ic < ic1; ++ic) { // src1 indices @@ -4891,19 +4891,19 @@ static void ggml_compute_forward_mul_mat_f16_f32( const int i02 = i12; const int i00 = ic; - assert(sizeof(ggml_fp16_t)*(wo + i3*ne2*ne1*ne0 + i2*ne1*ne0 + i1*ne0 + ne01) <= params->wsize); + assert(sizeof(ggml_v1_fp16_t)*(wo + i3*ne2*ne1*ne0 + i2*ne1*ne0 + i1*ne0 + ne01) <= params->wsize); - ggml_fp16_t * src0_col = (ggml_fp16_t *) ((char *) src0->data + (i00*nb00 + i02*nb02 + i03*nb03)); + ggml_v1_fp16_t * src0_col = (ggml_v1_fp16_t *) ((char *) src0->data + (i00*nb00 + i02*nb02 + i03*nb03)); float src1_val = * (float *) ((char *) src1->data + (i10*nb10 + i11*nb11 + i12*nb12 + i13*nb13)); - ggml_vec_mad_f16(ne01, dst_row, src0_col, src1_val); + ggml_v1_vec_mad_f16(ne01, dst_row, src0_col, src1_val); } } } } } - //int64_t t1 = ggml_time_us(); + //int64_t t1 = ggml_v1_time_us(); //static int64_t acc = 0; //acc += t1 - t0; //if (t1 - t0 > 10) { @@ -4916,43 +4916,43 @@ static void ggml_compute_forward_mul_mat_f16_f32( //} } -static void ggml_compute_forward_mul_mat( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { +static void ggml_v1_compute_forward_mul_mat( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + const struct ggml_v1_tensor * src1, + struct ggml_v1_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F16: + case GGML_V1_TYPE_F16: { - ggml_compute_forward_mul_mat_f16_f32(params, src0, src1, dst); + ggml_v1_compute_forward_mul_mat_f16_f32(params, src0, src1, dst); } break; - case GGML_TYPE_F32: + case GGML_V1_TYPE_F32: { - ggml_compute_forward_mul_mat_f32(params, src0, src1, dst); + ggml_v1_compute_forward_mul_mat_f32(params, src0, src1, dst); } break; - case GGML_TYPE_I8: - case GGML_TYPE_I16: - case GGML_TYPE_I32: - case GGML_TYPE_COUNT: + case GGML_V1_TYPE_I8: + case GGML_V1_TYPE_I16: + case GGML_V1_TYPE_I32: + case GGML_V1_TYPE_COUNT: { assert(false); } break; } } -// ggml_compute_forward_scale +// ggml_v1_compute_forward_scale -static void ggml_compute_forward_scale_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - GGML_ASSERT(ggml_is_contiguous(src0)); - GGML_ASSERT(ggml_is_contiguous(dst)); - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - GGML_ASSERT(ggml_is_scalar(src1)); +static void ggml_v1_compute_forward_scale_f32( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + const struct ggml_v1_tensor * src1, + struct ggml_v1_tensor * dst) { + GGML_V1_ASSERT(ggml_v1_is_contiguous(src0)); + GGML_V1_ASSERT(ggml_v1_is_contiguous(dst)); + GGML_V1_ASSERT(ggml_v1_are_same_shape(src0, dst)); + GGML_V1_ASSERT(ggml_v1_is_scalar(src1)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == GGML_V1_TASK_INIT || params->type == GGML_V1_TASK_FINALIZE) { return; } @@ -4963,7 +4963,7 @@ static void ggml_compute_forward_scale_f32( const int nth = params->nth; const int nc = src0->ne[0]; - const int nr = ggml_nrows(src0); + const int nr = ggml_v1_nrows(src0); // rows per thread const int dr = (nr + nth - 1)/nth; @@ -4973,125 +4973,125 @@ static void ggml_compute_forward_scale_f32( const int ir1 = MIN(ir0 + dr, nr); for (int i1 = ir0; i1 < ir1; i1++) { - ggml_vec_scale_f32(nc, (float *) ((char *) dst->data + i1*(dst->nb[1])), v); + ggml_v1_vec_scale_f32(nc, (float *) ((char *) dst->data + i1*(dst->nb[1])), v); } } -static void ggml_compute_forward_scale( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { +static void ggml_v1_compute_forward_scale( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + const struct ggml_v1_tensor * src1, + struct ggml_v1_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case GGML_V1_TYPE_F32: { - ggml_compute_forward_scale_f32(params, src0, src1, dst); + ggml_v1_compute_forward_scale_f32(params, src0, src1, dst); } break; - case GGML_TYPE_I8: - case GGML_TYPE_I16: - case GGML_TYPE_I32: - case GGML_TYPE_F16: - case GGML_TYPE_COUNT: + case GGML_V1_TYPE_I8: + case GGML_V1_TYPE_I16: + case GGML_V1_TYPE_I32: + case GGML_V1_TYPE_F16: + case GGML_V1_TYPE_COUNT: { assert(false); } break; } } -// ggml_compute_forward_cpy +// ggml_v1_compute_forward_cpy -static void ggml_compute_forward_cpy( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - ggml_compute_forward_dup(params, src0, dst); +static void ggml_v1_compute_forward_cpy( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + struct ggml_v1_tensor * dst) { + ggml_v1_compute_forward_dup(params, src0, dst); } -// ggml_compute_forward_reshape +// ggml_v1_compute_forward_reshape -static void ggml_compute_forward_reshape( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void ggml_v1_compute_forward_reshape( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + struct ggml_v1_tensor * dst) { // NOP UNUSED(params); UNUSED(src0); UNUSED(dst); } -// ggml_compute_forward_view +// ggml_v1_compute_forward_view -static void ggml_compute_forward_view( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0) { +static void ggml_v1_compute_forward_view( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0) { // NOP UNUSED(params); UNUSED(src0); } -// ggml_compute_forward_permute +// ggml_v1_compute_forward_permute -static void ggml_compute_forward_permute( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0) { +static void ggml_v1_compute_forward_permute( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0) { // NOP UNUSED(params); UNUSED(src0); } -// ggml_compute_forward_transpose +// ggml_v1_compute_forward_transpose -static void ggml_compute_forward_transpose( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0) { +static void ggml_v1_compute_forward_transpose( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0) { // NOP UNUSED(params); UNUSED(src0); } -// ggml_compute_forward_get_rows +// ggml_v1_compute_forward_get_rows -static void ggml_compute_forward_get_rows_f16( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { +static void ggml_v1_compute_forward_get_rows_f16( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + const struct ggml_v1_tensor * src1, + struct ggml_v1_tensor * dst) { assert(params->ith == 0); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == GGML_V1_TASK_INIT || params->type == GGML_V1_TASK_FINALIZE) { return; } const int nc = src0->ne[0]; - const int nr = ggml_nelements(src1); + const int nr = ggml_v1_nelements(src1); assert( dst->ne[0] == nc); assert( dst->ne[1] == nr); - assert(src0->nb[0] == sizeof(ggml_fp16_t)); + assert(src0->nb[0] == sizeof(ggml_v1_fp16_t)); for (int i = 0; i < nr; ++i) { const int r = ((int32_t *) src1->data)[i]; for (int j = 0; j < nc; ++j) { - ggml_fp16_t v = ((ggml_fp16_t *) ((char *) src0->data + r*src0->nb[1]))[j]; - ((float *) ((char *) dst->data + i*dst->nb[1]))[j] = GGML_FP16_TO_FP32(v); + ggml_v1_fp16_t v = ((ggml_v1_fp16_t *) ((char *) src0->data + r*src0->nb[1]))[j]; + ((float *) ((char *) dst->data + i*dst->nb[1]))[j] = GGML_V1_FP16_TO_FP32(v); } } } -static void ggml_compute_forward_get_rows_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { +static void ggml_v1_compute_forward_get_rows_f32( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + const struct ggml_v1_tensor * src1, + struct ggml_v1_tensor * dst) { assert(params->ith == 0); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == GGML_V1_TASK_INIT || params->type == GGML_V1_TASK_FINALIZE) { return; } const int nc = src0->ne[0]; - const int nr = ggml_nelements(src1); + const int nr = ggml_v1_nelements(src1); assert( dst->ne[0] == nc); assert( dst->ne[1] == nr); @@ -5100,48 +5100,48 @@ static void ggml_compute_forward_get_rows_f32( for (int i = 0; i < nr; ++i) { const int r = ((int32_t *) src1->data)[i]; - ggml_vec_cpy_f32(nc, + ggml_v1_vec_cpy_f32(nc, (float *) ((char *) dst->data + i*dst->nb[1]), (float *) ((char *) src0->data + r*src0->nb[1])); } } -static void ggml_compute_forward_get_rows( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { +static void ggml_v1_compute_forward_get_rows( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + const struct ggml_v1_tensor * src1, + struct ggml_v1_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F16: + case GGML_V1_TYPE_F16: { - ggml_compute_forward_get_rows_f16(params, src0, src1, dst); + ggml_v1_compute_forward_get_rows_f16(params, src0, src1, dst); } break; - case GGML_TYPE_F32: + case GGML_V1_TYPE_F32: { - ggml_compute_forward_get_rows_f32(params, src0, src1, dst); + ggml_v1_compute_forward_get_rows_f32(params, src0, src1, dst); } break; - case GGML_TYPE_I8: - case GGML_TYPE_I16: - case GGML_TYPE_I32: - case GGML_TYPE_COUNT: + case GGML_V1_TYPE_I8: + case GGML_V1_TYPE_I16: + case GGML_V1_TYPE_I32: + case GGML_V1_TYPE_COUNT: { assert(false); } break; } } -// ggml_compute_forward_diag_mask_inf +// ggml_v1_compute_forward_diag_mask_inf -static void ggml_compute_forward_diag_mask_inf_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { +static void ggml_v1_compute_forward_diag_mask_inf_f32( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + const struct ggml_v1_tensor * src1, + struct ggml_v1_tensor * dst) { assert(params->ith == 0); - assert(src1->type == GGML_TYPE_I32); - assert(ggml_nelements(src1) == 1); + assert(src1->type == GGML_V1_TYPE_I32); + assert(ggml_v1_nelements(src1) == 1); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == GGML_V1_TASK_INIT || params->type == GGML_V1_TASK_FINALIZE) { return; } @@ -5149,7 +5149,7 @@ static void ggml_compute_forward_diag_mask_inf_f32( // TODO: handle transposed/permuted matrices - const int n = ggml_nrows(src0); + const int n = ggml_v1_nrows(src0); const int nc = src0->ne[0]; const int nr = src0->ne[1]; const int nz = n/nr; @@ -5168,38 +5168,38 @@ static void ggml_compute_forward_diag_mask_inf_f32( } } -static void ggml_compute_forward_diag_mask_inf( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { +static void ggml_v1_compute_forward_diag_mask_inf( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + const struct ggml_v1_tensor * src1, + struct ggml_v1_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case GGML_V1_TYPE_F32: { - ggml_compute_forward_diag_mask_inf_f32(params, src0, src1, dst); + ggml_v1_compute_forward_diag_mask_inf_f32(params, src0, src1, dst); } break; - case GGML_TYPE_I8: - case GGML_TYPE_I16: - case GGML_TYPE_I32: - case GGML_TYPE_F16: - case GGML_TYPE_COUNT: + case GGML_V1_TYPE_I8: + case GGML_V1_TYPE_I16: + case GGML_V1_TYPE_I32: + case GGML_V1_TYPE_F16: + case GGML_V1_TYPE_COUNT: { assert(false); } break; } } -// ggml_compute_forward_soft_max +// ggml_v1_compute_forward_soft_max -static void ggml_compute_forward_soft_max_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - GGML_ASSERT(ggml_is_contiguous(src0)); - GGML_ASSERT(ggml_is_contiguous(dst)); - GGML_ASSERT(ggml_are_same_shape(src0, dst)); +static void ggml_v1_compute_forward_soft_max_f32( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + struct ggml_v1_tensor * dst) { + GGML_V1_ASSERT(ggml_v1_is_contiguous(src0)); + GGML_V1_ASSERT(ggml_v1_is_contiguous(dst)); + GGML_V1_ASSERT(ggml_v1_are_same_shape(src0, dst)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == GGML_V1_TASK_INIT || params->type == GGML_V1_TASK_FINALIZE) { return; } @@ -5209,7 +5209,7 @@ static void ggml_compute_forward_soft_max_f32( const int nth = params->nth; const int nc = src0->ne[0]; - const int nr = ggml_nrows(src0); + const int nr = ggml_v1_nrows(src0); // rows per thread const int dr = (nr + nth - 1)/nth; @@ -5228,9 +5228,9 @@ static void ggml_compute_forward_soft_max_f32( #endif float max = -INFINITY; - ggml_vec_max_f32(nc, &max, p); + ggml_v1_vec_max_f32(nc, &max, p); - ggml_float sum = 0.0; + ggml_v1_float sum = 0.0; uint16_t scvt; for (int i = 0; i < nc; i++) { @@ -5238,9 +5238,9 @@ static void ggml_compute_forward_soft_max_f32( p[i] = 0.0f; } else { //const float val = (p[i] == -INFINITY) ? 0.0 : exp(p[i] - max); - ggml_fp16_t s = GGML_FP32_TO_FP16(p[i] - max); + ggml_v1_fp16_t s = GGML_V1_FP32_TO_FP16(p[i] - max); memcpy(&scvt, &s, sizeof(scvt)); - const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt]); + const float val = GGML_V1_FP16_TO_FP32(table_exp_f16[scvt]); sum += val; p[i] = val; } @@ -5249,7 +5249,7 @@ static void ggml_compute_forward_soft_max_f32( assert(sum > 0.0f); sum = 1.0/sum; - ggml_vec_scale_f32(nc, p, sum); + ggml_v1_vec_scale_f32(nc, p, sum); #ifndef NDEBUG for (int i = 0; i < nc; ++i) { @@ -5260,38 +5260,38 @@ static void ggml_compute_forward_soft_max_f32( } } -static void ggml_compute_forward_soft_max( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void ggml_v1_compute_forward_soft_max( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + struct ggml_v1_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case GGML_V1_TYPE_F32: { - ggml_compute_forward_soft_max_f32(params, src0, dst); + ggml_v1_compute_forward_soft_max_f32(params, src0, dst); } break; - case GGML_TYPE_I8: - case GGML_TYPE_I16: - case GGML_TYPE_I32: - case GGML_TYPE_F16: - case GGML_TYPE_COUNT: + case GGML_V1_TYPE_I8: + case GGML_V1_TYPE_I16: + case GGML_V1_TYPE_I32: + case GGML_V1_TYPE_F16: + case GGML_V1_TYPE_COUNT: { assert(false); } break; } } -// ggml_compute_forward_rope +// ggml_v1_compute_forward_rope -static void ggml_compute_forward_rope_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { +static void ggml_v1_compute_forward_rope_f32( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + const struct ggml_v1_tensor * src1, + struct ggml_v1_tensor * dst) { assert(params->ith == 0); - assert(src1->type == GGML_TYPE_I32); - assert(ggml_nelements(src1) == 3); + assert(src1->type == GGML_V1_TYPE_I32); + assert(ggml_v1_nelements(src1) == 3); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == GGML_V1_TASK_INIT || params->type == GGML_V1_TASK_FINALIZE) { return; } @@ -5339,39 +5339,39 @@ static void ggml_compute_forward_rope_f32( } } -static void ggml_compute_forward_rope( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { +static void ggml_v1_compute_forward_rope( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + const struct ggml_v1_tensor * src1, + struct ggml_v1_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case GGML_V1_TYPE_F32: { - ggml_compute_forward_rope_f32(params, src0, src1, dst); + ggml_v1_compute_forward_rope_f32(params, src0, src1, dst); } break; - case GGML_TYPE_I8: - case GGML_TYPE_I16: - case GGML_TYPE_I32: - case GGML_TYPE_F16: - case GGML_TYPE_COUNT: + case GGML_V1_TYPE_I8: + case GGML_V1_TYPE_I16: + case GGML_V1_TYPE_I32: + case GGML_V1_TYPE_F16: + case GGML_V1_TYPE_COUNT: { assert(false); } break; } } -// ggml_compute_forward_conv_1d_1s +// ggml_v1_compute_forward_conv_1d_1s -static void ggml_compute_forward_conv_1d_1s_f16_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - GGML_ASSERT(src0->type == GGML_TYPE_F16); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT( dst->type == GGML_TYPE_F32); +static void ggml_v1_compute_forward_conv_1d_1s_f16_f32( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + const struct ggml_v1_tensor * src1, + struct ggml_v1_tensor * dst) { + GGML_V1_ASSERT(src0->type == GGML_V1_TYPE_F16); + GGML_V1_ASSERT(src1->type == GGML_V1_TYPE_F32); + GGML_V1_ASSERT( dst->type == GGML_V1_TYPE_F32); - int64_t t0 = ggml_perf_time_us(); + int64_t t0 = ggml_v1_perf_time_us(); UNUSED(t0); const int ne00 = src0->ne[0]; @@ -5411,24 +5411,24 @@ static void ggml_compute_forward_conv_1d_1s_f16_f32( const int nk = ne00; const int nh = nk/2; - const int ew0 = ggml_up32(ne01); + const int ew0 = ggml_v1_up32(ne01); - GGML_ASSERT(ne00 % 2 == 1); // TODO: support even kernel sizes - GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nb10 == sizeof(float)); + GGML_V1_ASSERT(ne00 % 2 == 1); // TODO: support even kernel sizes + GGML_V1_ASSERT(nb00 == sizeof(ggml_v1_fp16_t)); + GGML_V1_ASSERT(nb10 == sizeof(float)); - if (params->type == GGML_TASK_INIT) { + if (params->type == GGML_V1_TASK_INIT) { // TODO: fix this memset (wsize is overestimated) memset(params->wdata, 0, params->wsize); // prepare kernel data (src0) { - ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; + ggml_v1_fp16_t * const wdata = (ggml_v1_fp16_t *) params->wdata + 0; for (int i02 = 0; i02 < ne02; i02++) { for (int i01 = 0; i01 < ne01; i01++) { - const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i02*nb02 + i01*nb01); - ggml_fp16_t * dst_data = wdata + i02*ew0*ne00; + const ggml_v1_fp16_t * const src = (ggml_v1_fp16_t *)((char *) src0->data + i02*nb02 + i01*nb01); + ggml_v1_fp16_t * dst_data = wdata + i02*ew0*ne00; for (int i00 = 0; i00 < ne00; i00++) { dst_data[i00*ew0 + i01] = src[i00]; } @@ -5438,13 +5438,13 @@ static void ggml_compute_forward_conv_1d_1s_f16_f32( // prepare source data (src1) { - ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + ne02*ew0*ne00; + ggml_v1_fp16_t * const wdata = (ggml_v1_fp16_t *) params->wdata + ne02*ew0*ne00; for (int i11 = 0; i11 < ne11; i11++) { const float * const src = (float *)((char *) src1->data + i11*nb11); - ggml_fp16_t * dst_data = wdata; + ggml_v1_fp16_t * dst_data = wdata; for (int i10 = 0; i10 < ne10; i10++) { - dst_data[(i10 + nh)*ew0 + i11] = GGML_FP32_TO_FP16(src[i10]); + dst_data[(i10 + nh)*ew0 + i11] = GGML_V1_FP32_TO_FP16(src[i10]); } } } @@ -5452,7 +5452,7 @@ static void ggml_compute_forward_conv_1d_1s_f16_f32( return; } - if (params->type == GGML_TASK_FINALIZE) { + if (params->type == GGML_V1_TASK_FINALIZE) { return; } @@ -5472,9 +5472,9 @@ static void ggml_compute_forward_conv_1d_1s_f16_f32( dst_data[i0] = 0; for (int k = -nh; k <= nh; k++) { float v = 0.0f; - ggml_vec_dot_f16(ew0, &v, - (ggml_fp16_t *) params->wdata + i1*ew0*ne00 + (nh + k)*ew0, - (ggml_fp16_t *) params->wdata + ne02*ew0*ne00 + (i0 + nh + k)*ew0); + ggml_v1_vec_dot_f16(ew0, &v, + (ggml_v1_fp16_t *) params->wdata + i1*ew0*ne00 + (nh + k)*ew0, + (ggml_v1_fp16_t *) params->wdata + ne02*ew0*ne00 + (i0 + nh + k)*ew0); dst_data[i0] += v; } @@ -5482,16 +5482,16 @@ static void ggml_compute_forward_conv_1d_1s_f16_f32( } } -static void ggml_compute_forward_conv_1d_1s_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - GGML_ASSERT(src0->type == GGML_TYPE_F32); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT( dst->type == GGML_TYPE_F32); +static void ggml_v1_compute_forward_conv_1d_1s_f32( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + const struct ggml_v1_tensor * src1, + struct ggml_v1_tensor * dst) { + GGML_V1_ASSERT(src0->type == GGML_V1_TYPE_F32); + GGML_V1_ASSERT(src1->type == GGML_V1_TYPE_F32); + GGML_V1_ASSERT( dst->type == GGML_V1_TYPE_F32); - int64_t t0 = ggml_perf_time_us(); + int64_t t0 = ggml_v1_perf_time_us(); UNUSED(t0); const int ne00 = src0->ne[0]; @@ -5531,13 +5531,13 @@ static void ggml_compute_forward_conv_1d_1s_f32( const int nk = ne00; const int nh = nk/2; - const int ew0 = ggml_up32(ne01); + const int ew0 = ggml_v1_up32(ne01); - GGML_ASSERT(ne00 % 2 == 1); // TODO: support even kernel sizes - GGML_ASSERT(nb00 == sizeof(float)); - GGML_ASSERT(nb10 == sizeof(float)); + GGML_V1_ASSERT(ne00 % 2 == 1); // TODO: support even kernel sizes + GGML_V1_ASSERT(nb00 == sizeof(float)); + GGML_V1_ASSERT(nb10 == sizeof(float)); - if (params->type == GGML_TASK_INIT) { + if (params->type == GGML_V1_TASK_INIT) { // TODO: fix this memset (wsize is overestimated) memset(params->wdata, 0, params->wsize); @@ -5572,7 +5572,7 @@ static void ggml_compute_forward_conv_1d_1s_f32( return; } - if (params->type == GGML_TASK_FINALIZE) { + if (params->type == GGML_V1_TASK_FINALIZE) { return; } @@ -5592,7 +5592,7 @@ static void ggml_compute_forward_conv_1d_1s_f32( dst_data[i0] = 0; for (int k = -nh; k <= nh; k++) { float v = 0.0f; - ggml_vec_dot_f32(ew0, &v, + ggml_v1_vec_dot_f32(ew0, &v, (float *) params->wdata + i1*ew0*ne00 + (nh + k)*ew0, (float *) params->wdata + ne02*ew0*ne00 + (i0 + nh + k)*ew0); @@ -5602,42 +5602,42 @@ static void ggml_compute_forward_conv_1d_1s_f32( } } -static void ggml_compute_forward_conv_1d_1s( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { +static void ggml_v1_compute_forward_conv_1d_1s( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + const struct ggml_v1_tensor * src1, + struct ggml_v1_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F16: + case GGML_V1_TYPE_F16: { - ggml_compute_forward_conv_1d_1s_f16_f32(params, src0, src1, dst); + ggml_v1_compute_forward_conv_1d_1s_f16_f32(params, src0, src1, dst); } break; - case GGML_TYPE_F32: + case GGML_V1_TYPE_F32: { - ggml_compute_forward_conv_1d_1s_f32(params, src0, src1, dst); + ggml_v1_compute_forward_conv_1d_1s_f32(params, src0, src1, dst); } break; - case GGML_TYPE_I8: - case GGML_TYPE_I16: - case GGML_TYPE_I32: - case GGML_TYPE_COUNT: + case GGML_V1_TYPE_I8: + case GGML_V1_TYPE_I16: + case GGML_V1_TYPE_I32: + case GGML_V1_TYPE_COUNT: { - GGML_ASSERT(false); + GGML_V1_ASSERT(false); } break; } } -// ggml_compute_forward_conv_1d_2s +// ggml_v1_compute_forward_conv_1d_2s -static void ggml_compute_forward_conv_1d_2s_f16_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - GGML_ASSERT(src0->type == GGML_TYPE_F16); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT( dst->type == GGML_TYPE_F32); +static void ggml_v1_compute_forward_conv_1d_2s_f16_f32( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + const struct ggml_v1_tensor * src1, + struct ggml_v1_tensor * dst) { + GGML_V1_ASSERT(src0->type == GGML_V1_TYPE_F16); + GGML_V1_ASSERT(src1->type == GGML_V1_TYPE_F32); + GGML_V1_ASSERT( dst->type == GGML_V1_TYPE_F32); - int64_t t0 = ggml_perf_time_us(); + int64_t t0 = ggml_v1_perf_time_us(); UNUSED(t0); const int ne00 = src0->ne[0]; @@ -5677,24 +5677,24 @@ static void ggml_compute_forward_conv_1d_2s_f16_f32( const int nk = ne00; const int nh = nk/2; - const int ew0 = ggml_up32(ne01); + const int ew0 = ggml_v1_up32(ne01); - GGML_ASSERT(ne00 % 2 == 1); // TODO: support even kernel sizes - GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nb10 == sizeof(float)); + GGML_V1_ASSERT(ne00 % 2 == 1); // TODO: support even kernel sizes + GGML_V1_ASSERT(nb00 == sizeof(ggml_v1_fp16_t)); + GGML_V1_ASSERT(nb10 == sizeof(float)); - if (params->type == GGML_TASK_INIT) { + if (params->type == GGML_V1_TASK_INIT) { // TODO: fix this memset (wsize is overestimated) memset(params->wdata, 0, params->wsize); // prepare kernel data (src0) { - ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; + ggml_v1_fp16_t * const wdata = (ggml_v1_fp16_t *) params->wdata + 0; for (int i02 = 0; i02 < ne02; i02++) { for (int i01 = 0; i01 < ne01; i01++) { - const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i02*nb02 + i01*nb01); - ggml_fp16_t * dst_data = wdata + i02*ew0*ne00; + const ggml_v1_fp16_t * const src = (ggml_v1_fp16_t *)((char *) src0->data + i02*nb02 + i01*nb01); + ggml_v1_fp16_t * dst_data = wdata + i02*ew0*ne00; for (int i00 = 0; i00 < ne00; i00++) { dst_data[i00*ew0 + i01] = src[i00]; } @@ -5704,13 +5704,13 @@ static void ggml_compute_forward_conv_1d_2s_f16_f32( // prepare source data (src1) { - ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + ne02*ew0*ne00; + ggml_v1_fp16_t * const wdata = (ggml_v1_fp16_t *) params->wdata + ne02*ew0*ne00; for (int i11 = 0; i11 < ne11; i11++) { const float * const src = (float *)((char *) src1->data + i11*nb11); - ggml_fp16_t * dst_data = wdata; + ggml_v1_fp16_t * dst_data = wdata; for (int i10 = 0; i10 < ne10; i10++) { - dst_data[(i10 + nh)*ew0 + i11] = GGML_FP32_TO_FP16(src[i10]); + dst_data[(i10 + nh)*ew0 + i11] = GGML_V1_FP32_TO_FP16(src[i10]); } } } @@ -5718,7 +5718,7 @@ static void ggml_compute_forward_conv_1d_2s_f16_f32( return; } - if (params->type == GGML_TASK_FINALIZE) { + if (params->type == GGML_V1_TASK_FINALIZE) { return; } @@ -5738,9 +5738,9 @@ static void ggml_compute_forward_conv_1d_2s_f16_f32( dst_data[i0/2] = 0; for (int k = -nh; k <= nh; k++) { float v = 0.0f; - ggml_vec_dot_f16(ew0, &v, - (ggml_fp16_t *) params->wdata + i1*ew0*ne00 + (nh + k)*ew0, - (ggml_fp16_t *) params->wdata + ne02*ew0*ne00 + (i0 + nh + k)*ew0); + ggml_v1_vec_dot_f16(ew0, &v, + (ggml_v1_fp16_t *) params->wdata + i1*ew0*ne00 + (nh + k)*ew0, + (ggml_v1_fp16_t *) params->wdata + ne02*ew0*ne00 + (i0 + nh + k)*ew0); dst_data[i0/2] += v; } @@ -5748,16 +5748,16 @@ static void ggml_compute_forward_conv_1d_2s_f16_f32( } } -static void ggml_compute_forward_conv_1d_2s_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - GGML_ASSERT(src0->type == GGML_TYPE_F32); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT( dst->type == GGML_TYPE_F32); +static void ggml_v1_compute_forward_conv_1d_2s_f32( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + const struct ggml_v1_tensor * src1, + struct ggml_v1_tensor * dst) { + GGML_V1_ASSERT(src0->type == GGML_V1_TYPE_F32); + GGML_V1_ASSERT(src1->type == GGML_V1_TYPE_F32); + GGML_V1_ASSERT( dst->type == GGML_V1_TYPE_F32); - int64_t t0 = ggml_perf_time_us(); + int64_t t0 = ggml_v1_perf_time_us(); UNUSED(t0); const int ne00 = src0->ne[0]; @@ -5797,13 +5797,13 @@ static void ggml_compute_forward_conv_1d_2s_f32( const int nk = ne00; const int nh = nk/2; - const int ew0 = ggml_up32(ne01); + const int ew0 = ggml_v1_up32(ne01); - GGML_ASSERT(ne00 % 2 == 1); // TODO: support even kernel sizes - GGML_ASSERT(nb00 == sizeof(float)); - GGML_ASSERT(nb10 == sizeof(float)); + GGML_V1_ASSERT(ne00 % 2 == 1); // TODO: support even kernel sizes + GGML_V1_ASSERT(nb00 == sizeof(float)); + GGML_V1_ASSERT(nb10 == sizeof(float)); - if (params->type == GGML_TASK_INIT) { + if (params->type == GGML_V1_TASK_INIT) { // TODO: fix this memset (wsize is overestimated) memset(params->wdata, 0, params->wsize); @@ -5838,7 +5838,7 @@ static void ggml_compute_forward_conv_1d_2s_f32( return; } - if (params->type == GGML_TASK_FINALIZE) { + if (params->type == GGML_V1_TASK_FINALIZE) { return; } @@ -5858,7 +5858,7 @@ static void ggml_compute_forward_conv_1d_2s_f32( dst_data[i0/2] = 0; for (int k = -nh; k <= nh; k++) { float v = 0.0f; - ggml_vec_dot_f32(ew0, &v, + ggml_v1_vec_dot_f32(ew0, &v, (float *) params->wdata + i1*ew0*ne00 + (nh + k)*ew0, (float *) params->wdata + ne02*ew0*ne00 + (i0 + nh + k)*ew0); @@ -5868,40 +5868,40 @@ static void ggml_compute_forward_conv_1d_2s_f32( } } -static void ggml_compute_forward_conv_1d_2s( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { +static void ggml_v1_compute_forward_conv_1d_2s( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * src0, + const struct ggml_v1_tensor * src1, + struct ggml_v1_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F16: + case GGML_V1_TYPE_F16: { - ggml_compute_forward_conv_1d_2s_f16_f32(params, src0, src1, dst); + ggml_v1_compute_forward_conv_1d_2s_f16_f32(params, src0, src1, dst); } break; - case GGML_TYPE_F32: + case GGML_V1_TYPE_F32: { - ggml_compute_forward_conv_1d_2s_f32(params, src0, src1, dst); + ggml_v1_compute_forward_conv_1d_2s_f32(params, src0, src1, dst); } break; - case GGML_TYPE_I8: - case GGML_TYPE_I16: - case GGML_TYPE_I32: - case GGML_TYPE_COUNT: + case GGML_V1_TYPE_I8: + case GGML_V1_TYPE_I16: + case GGML_V1_TYPE_I32: + case GGML_V1_TYPE_COUNT: { - GGML_ASSERT(false); + GGML_V1_ASSERT(false); } break; } } -// ggml_compute_forward_flash_attn +// ggml_v1_compute_forward_flash_attn -static void ggml_compute_forward_flash_attn_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * q, - const struct ggml_tensor * k, - const struct ggml_tensor * v, +static void ggml_v1_compute_forward_flash_attn_f32( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * q, + const struct ggml_v1_tensor * k, + const struct ggml_v1_tensor * v, const bool masked, - struct ggml_tensor * dst) { - int64_t t0 = ggml_perf_time_us(); + struct ggml_v1_tensor * dst) { + int64_t t0 = ggml_v1_perf_time_us(); UNUSED(t0); const int neq0 = q->ne[0]; @@ -5952,39 +5952,39 @@ static void ggml_compute_forward_flash_attn_f32( const int P = nek1 - N; const int M = P + N; - const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL); + const int Mup = ggml_v1_up(M, GGML_V1_SOFT_MAX_UNROLL); - GGML_ASSERT(ne0 == D); - GGML_ASSERT(ne1 == N); - GGML_ASSERT(P >= 0); + GGML_V1_ASSERT(ne0 == D); + GGML_V1_ASSERT(ne1 == N); + GGML_V1_ASSERT(P >= 0); - GGML_ASSERT(nbq0 == sizeof(float)); - GGML_ASSERT(nbk0 == sizeof(float)); - GGML_ASSERT(nbv0 == sizeof(float)); + GGML_V1_ASSERT(nbq0 == sizeof(float)); + GGML_V1_ASSERT(nbk0 == sizeof(float)); + GGML_V1_ASSERT(nbv0 == sizeof(float)); - GGML_ASSERT(neq0 == D); - GGML_ASSERT(nek0 == D); - GGML_ASSERT(nev1 == D); + GGML_V1_ASSERT(neq0 == D); + GGML_V1_ASSERT(nek0 == D); + GGML_V1_ASSERT(nev1 == D); - GGML_ASSERT(neq1 == N); - GGML_ASSERT(nek1 == N + P); - GGML_ASSERT(nev1 == D); + GGML_V1_ASSERT(neq1 == N); + GGML_V1_ASSERT(nek1 == N + P); + GGML_V1_ASSERT(nev1 == D); // dst cannot be transposed or permuted - GGML_ASSERT(nb0 == sizeof(float)); - GGML_ASSERT(nb0 <= nb1); - GGML_ASSERT(nb1 <= nb2); - GGML_ASSERT(nb2 <= nb3); + GGML_V1_ASSERT(nb0 == sizeof(float)); + GGML_V1_ASSERT(nb0 <= nb1); + GGML_V1_ASSERT(nb1 <= nb2); + GGML_V1_ASSERT(nb2 <= nb3); - if (params->type == GGML_TASK_INIT) { + if (params->type == GGML_V1_TASK_INIT) { return; } - if (params->type == GGML_TASK_FINALIZE) { + if (params->type == GGML_V1_TASK_FINALIZE) { return; } - // parallelize by q rows using ggml_vec_dot_f32 + // parallelize by q rows using ggml_v1_vec_dot_f32 // total rows in q const int nr = neq1*neq2*neq3; @@ -6021,14 +6021,14 @@ static void ggml_compute_forward_flash_attn_f32( // S indices const int i1 = ik1; - ggml_vec_dot_f32(neq0, + ggml_v1_vec_dot_f32(neq0, S + i1, (float *) ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)), (float *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3))); } // scale - ggml_vec_scale_f32(nek1, S, scale); + ggml_v1_vec_scale_f32(nek1, S, scale); if (masked) { for (int i = P; i < M; i++) { @@ -6041,36 +6041,36 @@ static void ggml_compute_forward_flash_attn_f32( // softmax { float max = -INFINITY; - ggml_vec_max_f32(M, &max, S); + ggml_v1_vec_max_f32(M, &max, S); float sum = 0.0f; { -#ifdef GGML_SOFT_MAX_ACCELERATE +#ifdef GGML_V1_SOFT_MAX_ACCELERATE max = -max; vDSP_vsadd(S, 1, &max, S, 1, Mup); vvexpf(S, S, &Mup); - ggml_vec_sum_f32(Mup, &sum, S); + ggml_v1_vec_sum_f32(Mup, &sum, S); #else - uint16_t scvt[GGML_SOFT_MAX_UNROLL]; - ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 }; + uint16_t scvt[GGML_V1_SOFT_MAX_UNROLL]; + ggml_v1_float sump[GGML_V1_SOFT_MAX_UNROLL] = { 0.0 }; - for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) { + for (int i = 0; i < Mup; i += GGML_V1_SOFT_MAX_UNROLL) { float * SS = S + i; - for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) { + for (int j = 0; j < GGML_V1_SOFT_MAX_UNROLL; ++j) { if (SS[j] == -INFINITY) { SS[j] = 0.0f; } else { - ggml_fp16_t s = GGML_FP32_TO_FP16(SS[j] - max); + ggml_v1_fp16_t s = GGML_V1_FP32_TO_FP16(SS[j] - max); memcpy(&scvt[j], &s, sizeof(uint16_t)); - const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt[j]]); + const float val = GGML_V1_FP16_TO_FP32(table_exp_f16[scvt[j]]); sump[j] += val; SS[j] = val; } } } - for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) { + for (int i = 0; i < GGML_V1_SOFT_MAX_UNROLL; i++) { sum += sump[i]; } #endif @@ -6079,7 +6079,7 @@ static void ggml_compute_forward_flash_attn_f32( assert(sum > 0.0f); sum = 1.0/sum; - ggml_vec_scale_f32(M, S, sum); + ggml_v1_vec_scale_f32(M, S, sum); #ifndef NDEBUG for (int i = 0; i < M; ++i) { @@ -6095,7 +6095,7 @@ static void ggml_compute_forward_flash_attn_f32( const int i2 = iq2; const int i3 = iq3; - ggml_vec_dot_f32(nek1, + ggml_v1_vec_dot_f32(nek1, (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)), (float *) ((char *) v->data + ( ic*nbv1 + i2*nbv2 + i3*nbv3)), S); @@ -6103,14 +6103,14 @@ static void ggml_compute_forward_flash_attn_f32( } } -static void ggml_compute_forward_flash_attn_f16( - const struct ggml_compute_params * params, - const struct ggml_tensor * q, - const struct ggml_tensor * k, - const struct ggml_tensor * v, +static void ggml_v1_compute_forward_flash_attn_f16( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * q, + const struct ggml_v1_tensor * k, + const struct ggml_v1_tensor * v, const bool masked, - struct ggml_tensor * dst) { - int64_t t0 = ggml_perf_time_us(); + struct ggml_v1_tensor * dst) { + int64_t t0 = ggml_v1_perf_time_us(); UNUSED(t0); const int neq0 = q->ne[0]; @@ -6161,39 +6161,39 @@ static void ggml_compute_forward_flash_attn_f16( const int P = nek1 - N; const int M = P + N; - const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL); + const int Mup = ggml_v1_up(M, GGML_V1_SOFT_MAX_UNROLL); - GGML_ASSERT(ne0 == D); - GGML_ASSERT(ne1 == N); - GGML_ASSERT(P >= 0); + GGML_V1_ASSERT(ne0 == D); + GGML_V1_ASSERT(ne1 == N); + GGML_V1_ASSERT(P >= 0); - GGML_ASSERT(nbq0 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nbk0 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nbv0 == sizeof(ggml_fp16_t)); + GGML_V1_ASSERT(nbq0 == sizeof(ggml_v1_fp16_t)); + GGML_V1_ASSERT(nbk0 == sizeof(ggml_v1_fp16_t)); + GGML_V1_ASSERT(nbv0 == sizeof(ggml_v1_fp16_t)); - GGML_ASSERT(neq0 == D); - GGML_ASSERT(nek0 == D); - GGML_ASSERT(nev1 == D); + GGML_V1_ASSERT(neq0 == D); + GGML_V1_ASSERT(nek0 == D); + GGML_V1_ASSERT(nev1 == D); - GGML_ASSERT(neq1 == N); - GGML_ASSERT(nek1 == N + P); - GGML_ASSERT(nev1 == D); + GGML_V1_ASSERT(neq1 == N); + GGML_V1_ASSERT(nek1 == N + P); + GGML_V1_ASSERT(nev1 == D); // dst cannot be transposed or permuted - GGML_ASSERT(nb0 == sizeof(float)); - GGML_ASSERT(nb0 <= nb1); - GGML_ASSERT(nb1 <= nb2); - GGML_ASSERT(nb2 <= nb3); + GGML_V1_ASSERT(nb0 == sizeof(float)); + GGML_V1_ASSERT(nb0 <= nb1); + GGML_V1_ASSERT(nb1 <= nb2); + GGML_V1_ASSERT(nb2 <= nb3); - if (params->type == GGML_TASK_INIT) { + if (params->type == GGML_V1_TASK_INIT) { return; } - if (params->type == GGML_TASK_FINALIZE) { + if (params->type == GGML_V1_TASK_FINALIZE) { return; } - // parallelize by q rows using ggml_vec_dot_f32 + // parallelize by q rows using ggml_v1_vec_dot_f32 // total rows in q const int nr = neq1*neq2*neq3; @@ -6221,7 +6221,7 @@ static void ggml_compute_forward_flash_attn_f16( S[i] = -INFINITY; } - if (GGML_VEC_DOT_UNROLL > 2 || nek1 % GGML_VEC_DOT_UNROLL != 0) { + if (GGML_V1_VEC_DOT_UNROLL > 2 || nek1 % GGML_V1_VEC_DOT_UNROLL != 0) { for (int ic = 0; ic < nek1; ++ic) { // k indices const int ik3 = iq3; @@ -6231,13 +6231,13 @@ static void ggml_compute_forward_flash_attn_f16( // S indices const int i1 = ik1; - ggml_vec_dot_f16(neq0, + ggml_v1_vec_dot_f16(neq0, S + i1, - (ggml_fp16_t *) ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)), - (ggml_fp16_t *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3))); + (ggml_v1_fp16_t *) ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)), + (ggml_v1_fp16_t *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3))); } } else { - for (int ic = 0; ic < nek1; ic += GGML_VEC_DOT_UNROLL) { + for (int ic = 0; ic < nek1; ic += GGML_V1_VEC_DOT_UNROLL) { // k indices const int ik3 = iq3; const int ik2 = iq2; @@ -6246,15 +6246,15 @@ static void ggml_compute_forward_flash_attn_f16( // S indices const int i1 = ik1; - ggml_vec_dot_f16_unroll(neq0, nbk1, + ggml_v1_vec_dot_f16_unroll(neq0, nbk1, S + i1, ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)), - (ggml_fp16_t *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3))); + (ggml_v1_fp16_t *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3))); } } // scale - ggml_vec_scale_f32(nek1, S, scale); + ggml_v1_vec_scale_f32(nek1, S, scale); if (masked) { for (int i = P; i < M; i++) { @@ -6267,36 +6267,36 @@ static void ggml_compute_forward_flash_attn_f16( // softmax { float max = -INFINITY; - ggml_vec_max_f32(M, &max, S); + ggml_v1_vec_max_f32(M, &max, S); float sum = 0.0f; { -#ifdef GGML_SOFT_MAX_ACCELERATE +#ifdef GGML_V1_SOFT_MAX_ACCELERATE max = -max; vDSP_vsadd(S, 1, &max, S, 1, Mup); vvexpf(S, S, &Mup); - ggml_vec_sum_f32(Mup, &sum, S); + ggml_v1_vec_sum_f32(Mup, &sum, S); #else - uint16_t scvt[GGML_SOFT_MAX_UNROLL]; - ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 }; + uint16_t scvt[GGML_V1_SOFT_MAX_UNROLL]; + ggml_v1_float sump[GGML_V1_SOFT_MAX_UNROLL] = { 0.0 }; - for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) { + for (int i = 0; i < Mup; i += GGML_V1_SOFT_MAX_UNROLL) { float * SS = S + i; - for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) { + for (int j = 0; j < GGML_V1_SOFT_MAX_UNROLL; ++j) { if (SS[j] == -INFINITY) { SS[j] = 0.0f; } else { - ggml_fp16_t s = GGML_FP32_TO_FP16(SS[j] - max); + ggml_v1_fp16_t s = GGML_V1_FP32_TO_FP16(SS[j] - max); memcpy(&scvt[j], &s, sizeof(uint16_t)); - const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt[j]]); + const float val = GGML_V1_FP16_TO_FP32(table_exp_f16[scvt[j]]); sump[j] += val; SS[j] = val; } } } - for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) { + for (int i = 0; i < GGML_V1_SOFT_MAX_UNROLL; i++) { sum += sump[i]; } #endif @@ -6305,7 +6305,7 @@ static void ggml_compute_forward_flash_attn_f16( assert(sum > 0.0f); sum = 1.0/sum; - ggml_vec_scale_f32(M, S, sum); + ggml_v1_vec_scale_f32(M, S, sum); #ifndef NDEBUG for (int i = 0; i < M; ++i) { @@ -6315,32 +6315,32 @@ static void ggml_compute_forward_flash_attn_f16( #endif } - ggml_fp16_t * S16 = (ggml_fp16_t *) ((float *) params->wdata + ith*(2*Mup + CACHE_LINE_SIZE_F32) + Mup); + ggml_v1_fp16_t * S16 = (ggml_v1_fp16_t *) ((float *) params->wdata + ith*(2*Mup + CACHE_LINE_SIZE_F32) + Mup); for (int i = 0; i < M; i++) { - S16[i] = GGML_FP32_TO_FP16(S[i]); + S16[i] = GGML_V1_FP32_TO_FP16(S[i]); } - if (GGML_VEC_DOT_UNROLL == 1 || (nev1 % GGML_VEC_DOT_UNROLL != 0)) { + if (GGML_V1_VEC_DOT_UNROLL == 1 || (nev1 % GGML_V1_VEC_DOT_UNROLL != 0)) { for (int ic = 0; ic < nev1; ++ic) { // dst indices const int i1 = iq1; const int i2 = iq2; const int i3 = iq3; - ggml_vec_dot_f16(nek1, + ggml_v1_vec_dot_f16(nek1, (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)), - (ggml_fp16_t *) ((char *) v->data + ( ic*nbv1 + i2*nbv2 + i3*nbv3)), + (ggml_v1_fp16_t *) ((char *) v->data + ( ic*nbv1 + i2*nbv2 + i3*nbv3)), S16); } } else { - for (int ic = 0; ic < nev1; ic += GGML_VEC_DOT_UNROLL) { + for (int ic = 0; ic < nev1; ic += GGML_V1_VEC_DOT_UNROLL) { // dst indices const int i1 = iq1; const int i2 = iq2; const int i3 = iq3; - ggml_vec_dot_f16_unroll(nek1, nbv1, + ggml_v1_vec_dot_f16_unroll(nek1, nbv1, (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)), ((char *) v->data + ( ic*nbv1 + i2*nbv2 + i3*nbv3)), S16); @@ -6349,43 +6349,43 @@ static void ggml_compute_forward_flash_attn_f16( } } -static void ggml_compute_forward_flash_attn( - const struct ggml_compute_params * params, - const struct ggml_tensor * q, - const struct ggml_tensor * k, - const struct ggml_tensor * v, +static void ggml_v1_compute_forward_flash_attn( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * q, + const struct ggml_v1_tensor * k, + const struct ggml_v1_tensor * v, const bool masked, - struct ggml_tensor * dst) { + struct ggml_v1_tensor * dst) { switch (q->type) { - case GGML_TYPE_F16: + case GGML_V1_TYPE_F16: { - ggml_compute_forward_flash_attn_f16(params, q, k, v, masked, dst); + ggml_v1_compute_forward_flash_attn_f16(params, q, k, v, masked, dst); } break; - case GGML_TYPE_F32: + case GGML_V1_TYPE_F32: { - ggml_compute_forward_flash_attn_f32(params, q, k, v, masked, dst); + ggml_v1_compute_forward_flash_attn_f32(params, q, k, v, masked, dst); } break; - case GGML_TYPE_I8: - case GGML_TYPE_I16: - case GGML_TYPE_I32: - case GGML_TYPE_COUNT: + case GGML_V1_TYPE_I8: + case GGML_V1_TYPE_I16: + case GGML_V1_TYPE_I32: + case GGML_V1_TYPE_COUNT: { assert(false); } break; } } -// ggml_compute_forward_flash_ff +// ggml_v1_compute_forward_flash_ff -static void ggml_compute_forward_flash_ff_f16( - const struct ggml_compute_params * params, - const struct ggml_tensor * a, // F16 - const struct ggml_tensor * b0, // F16 fc_w - const struct ggml_tensor * b1, // F32 fc_b - const struct ggml_tensor * c0, // F16 proj_w - const struct ggml_tensor * c1, // F32 proj_b - struct ggml_tensor * dst) { - int64_t t0 = ggml_perf_time_us(); +static void ggml_v1_compute_forward_flash_ff_f16( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * a, // F16 + const struct ggml_v1_tensor * b0, // F16 fc_w + const struct ggml_v1_tensor * b1, // F32 fc_b + const struct ggml_v1_tensor * c0, // F16 proj_w + const struct ggml_v1_tensor * c1, // F32 proj_b + struct ggml_v1_tensor * dst) { + int64_t t0 = ggml_v1_perf_time_us(); UNUSED(t0); const int nea0 = a->ne[0]; @@ -6455,41 +6455,41 @@ static void ggml_compute_forward_flash_ff_f16( //const int N = nea1; const int M = neb01; - GGML_ASSERT(ne0 == nea0); - GGML_ASSERT(ne1 == nea1); - GGML_ASSERT(ne2 == nea2); + GGML_V1_ASSERT(ne0 == nea0); + GGML_V1_ASSERT(ne1 == nea1); + GGML_V1_ASSERT(ne2 == nea2); - GGML_ASSERT(nba0 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nbb00 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nbb10 == sizeof(float)); - GGML_ASSERT(nbc00 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nbc10 == sizeof(float)); + GGML_V1_ASSERT(nba0 == sizeof(ggml_v1_fp16_t)); + GGML_V1_ASSERT(nbb00 == sizeof(ggml_v1_fp16_t)); + GGML_V1_ASSERT(nbb10 == sizeof(float)); + GGML_V1_ASSERT(nbc00 == sizeof(ggml_v1_fp16_t)); + GGML_V1_ASSERT(nbc10 == sizeof(float)); - GGML_ASSERT(neb00 == D); - GGML_ASSERT(neb01 == M); - GGML_ASSERT(neb10 == M); - GGML_ASSERT(neb11 == 1); + GGML_V1_ASSERT(neb00 == D); + GGML_V1_ASSERT(neb01 == M); + GGML_V1_ASSERT(neb10 == M); + GGML_V1_ASSERT(neb11 == 1); - GGML_ASSERT(nec00 == M); - GGML_ASSERT(nec01 == D); - GGML_ASSERT(nec10 == D); - GGML_ASSERT(nec11 == 1); + GGML_V1_ASSERT(nec00 == M); + GGML_V1_ASSERT(nec01 == D); + GGML_V1_ASSERT(nec10 == D); + GGML_V1_ASSERT(nec11 == 1); // dst cannot be transposed or permuted - GGML_ASSERT(nb0 == sizeof(float)); - GGML_ASSERT(nb0 <= nb1); - GGML_ASSERT(nb1 <= nb2); - GGML_ASSERT(nb2 <= nb3); + GGML_V1_ASSERT(nb0 == sizeof(float)); + GGML_V1_ASSERT(nb0 <= nb1); + GGML_V1_ASSERT(nb1 <= nb2); + GGML_V1_ASSERT(nb2 <= nb3); - if (params->type == GGML_TASK_INIT) { + if (params->type == GGML_V1_TASK_INIT) { return; } - if (params->type == GGML_TASK_FINALIZE) { + if (params->type == GGML_V1_TASK_FINALIZE) { return; } - // parallelize by a rows using ggml_vec_dot_f32 + // parallelize by a rows using ggml_v1_vec_dot_f32 // total rows in a const int nr = nea1*nea2*nea3; @@ -6518,22 +6518,22 @@ static void ggml_compute_forward_flash_ff_f16( // S indices const int i1 = ib01; - ggml_vec_dot_f16(nea0, + ggml_v1_vec_dot_f16(nea0, S + i1, - (ggml_fp16_t *) ((char *) b0->data + (ib01*nbb01 + ib02*nbb02 + ib03*nbb03)), - (ggml_fp16_t *) ((char *) a->data + ( ia1*nba1 + ia2*nba2 + ia3*nba3))); + (ggml_v1_fp16_t *) ((char *) b0->data + (ib01*nbb01 + ib02*nbb02 + ib03*nbb03)), + (ggml_v1_fp16_t *) ((char *) a->data + ( ia1*nba1 + ia2*nba2 + ia3*nba3))); } - ggml_vec_add_f32(neb01, S, S, (float *) b1->data); - //ggml_vec_gelu_f32(neb01, S, S); + ggml_v1_vec_add_f32(neb01, S, S, (float *) b1->data); + //ggml_v1_vec_gelu_f32(neb01, S, S); - ggml_fp16_t * S16 = (ggml_fp16_t *) ((float *) params->wdata + ith*(2*M + CACHE_LINE_SIZE_F32) + M); + ggml_v1_fp16_t * S16 = (ggml_v1_fp16_t *) ((float *) params->wdata + ith*(2*M + CACHE_LINE_SIZE_F32) + M); for (int i = 0; i < M; i++) { - S16[i] = GGML_FP32_TO_FP16(S[i]); + S16[i] = GGML_V1_FP32_TO_FP16(S[i]); } - ggml_vec_gelu_f16(neb01, S16, S16); + ggml_v1_vec_gelu_f16(neb01, S16, S16); { // dst indices @@ -6543,13 +6543,13 @@ static void ggml_compute_forward_flash_ff_f16( for (int ic = 0; ic < nec01; ++ic) { - ggml_vec_dot_f16(neb01, + ggml_v1_vec_dot_f16(neb01, (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)), - (ggml_fp16_t *) ((char *) c0->data + ( ic*nbc01 + i2*nbc02 + i3*nbc03)), + (ggml_v1_fp16_t *) ((char *) c0->data + ( ic*nbc01 + i2*nbc02 + i3*nbc03)), S16); } - ggml_vec_add_f32(nec01, + ggml_v1_vec_add_f32(nec01, (float *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3)), (float *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3)), (float *) c1->data); @@ -6557,27 +6557,27 @@ static void ggml_compute_forward_flash_ff_f16( } } -static void ggml_compute_forward_flash_ff( - const struct ggml_compute_params * params, - const struct ggml_tensor * a, - const struct ggml_tensor * b0, - const struct ggml_tensor * b1, - const struct ggml_tensor * c0, - const struct ggml_tensor * c1, - struct ggml_tensor * dst) { +static void ggml_v1_compute_forward_flash_ff( + const struct ggml_v1_compute_params * params, + const struct ggml_v1_tensor * a, + const struct ggml_v1_tensor * b0, + const struct ggml_v1_tensor * b1, + const struct ggml_v1_tensor * c0, + const struct ggml_v1_tensor * c1, + struct ggml_v1_tensor * dst) { switch (b0->type) { - case GGML_TYPE_F16: + case GGML_V1_TYPE_F16: { - ggml_compute_forward_flash_ff_f16(params, a, b0, b1, c0, c1, dst); + ggml_v1_compute_forward_flash_ff_f16(params, a, b0, b1, c0, c1, dst); } break; - case GGML_TYPE_F32: + case GGML_V1_TYPE_F32: { - GGML_ASSERT(false); // TODO + GGML_V1_ASSERT(false); // TODO } break; - case GGML_TYPE_I8: - case GGML_TYPE_I16: - case GGML_TYPE_I32: - case GGML_TYPE_COUNT: + case GGML_V1_TYPE_I8: + case GGML_V1_TYPE_I16: + case GGML_V1_TYPE_I32: + case GGML_V1_TYPE_COUNT: { assert(false); } break; @@ -6586,404 +6586,404 @@ static void ggml_compute_forward_flash_ff( ///////////////////////////////// -static void ggml_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor) { +static void ggml_v1_compute_forward(struct ggml_v1_compute_params * params, struct ggml_v1_tensor * tensor) { assert(params); switch (tensor->op) { - case GGML_OP_DUP: + case GGML_V1_OP_DUP: { - ggml_compute_forward_dup(params, tensor->src0, tensor); + ggml_v1_compute_forward_dup(params, tensor->src0, tensor); } break; - case GGML_OP_ADD: + case GGML_V1_OP_ADD: { - ggml_compute_forward_add(params, tensor->src0, tensor->src1, tensor); + ggml_v1_compute_forward_add(params, tensor->src0, tensor->src1, tensor); } break; - case GGML_OP_SUB: + case GGML_V1_OP_SUB: { - ggml_compute_forward_sub(params, tensor->src0, tensor->src1, tensor); + ggml_v1_compute_forward_sub(params, tensor->src0, tensor->src1, tensor); } break; - case GGML_OP_MUL: + case GGML_V1_OP_MUL: { - ggml_compute_forward_mul(params, tensor->src0, tensor->src1, tensor); + ggml_v1_compute_forward_mul(params, tensor->src0, tensor->src1, tensor); } break; - case GGML_OP_DIV: + case GGML_V1_OP_DIV: { - ggml_compute_forward_div(params, tensor->src0, tensor->src1, tensor); + ggml_v1_compute_forward_div(params, tensor->src0, tensor->src1, tensor); } break; - case GGML_OP_SQR: + case GGML_V1_OP_SQR: { - ggml_compute_forward_sqr(params, tensor->src0, tensor); + ggml_v1_compute_forward_sqr(params, tensor->src0, tensor); } break; - case GGML_OP_SQRT: + case GGML_V1_OP_SQRT: { - ggml_compute_forward_sqrt(params, tensor->src0, tensor); + ggml_v1_compute_forward_sqrt(params, tensor->src0, tensor); } break; - case GGML_OP_SUM: + case GGML_V1_OP_SUM: { - ggml_compute_forward_sum(params, tensor->src0, tensor); + ggml_v1_compute_forward_sum(params, tensor->src0, tensor); } break; - case GGML_OP_MEAN: + case GGML_V1_OP_MEAN: { - ggml_compute_forward_mean(params, tensor->src0, tensor); + ggml_v1_compute_forward_mean(params, tensor->src0, tensor); } break; - case GGML_OP_REPEAT: + case GGML_V1_OP_REPEAT: { - ggml_compute_forward_repeat(params, tensor->src0, tensor); + ggml_v1_compute_forward_repeat(params, tensor->src0, tensor); } break; - case GGML_OP_ABS: + case GGML_V1_OP_ABS: { - ggml_compute_forward_abs(params, tensor->src0, tensor); + ggml_v1_compute_forward_abs(params, tensor->src0, tensor); } break; - case GGML_OP_SGN: + case GGML_V1_OP_SGN: { - ggml_compute_forward_sgn(params, tensor->src0, tensor); + ggml_v1_compute_forward_sgn(params, tensor->src0, tensor); } break; - case GGML_OP_NEG: + case GGML_V1_OP_NEG: { - ggml_compute_forward_neg(params, tensor->src0, tensor); + ggml_v1_compute_forward_neg(params, tensor->src0, tensor); } break; - case GGML_OP_STEP: + case GGML_V1_OP_STEP: { - ggml_compute_forward_step(params, tensor->src0, tensor); + ggml_v1_compute_forward_step(params, tensor->src0, tensor); } break; - case GGML_OP_RELU: + case GGML_V1_OP_RELU: { - ggml_compute_forward_relu(params, tensor->src0, tensor); + ggml_v1_compute_forward_relu(params, tensor->src0, tensor); } break; - case GGML_OP_GELU: + case GGML_V1_OP_GELU: { - ggml_compute_forward_gelu(params, tensor->src0, tensor); + ggml_v1_compute_forward_gelu(params, tensor->src0, tensor); } break; - case GGML_OP_NORM: + case GGML_V1_OP_NORM: { - ggml_compute_forward_norm(params, tensor->src0, tensor); + ggml_v1_compute_forward_norm(params, tensor->src0, tensor); } break; - case GGML_OP_MUL_MAT: + case GGML_V1_OP_MUL_MAT: { - ggml_compute_forward_mul_mat(params, tensor->src0, tensor->src1, tensor); + ggml_v1_compute_forward_mul_mat(params, tensor->src0, tensor->src1, tensor); } break; - case GGML_OP_SCALE: + case GGML_V1_OP_SCALE: { - ggml_compute_forward_scale(params, tensor->src0, tensor->src1, tensor); + ggml_v1_compute_forward_scale(params, tensor->src0, tensor->src1, tensor); } break; - case GGML_OP_CPY: + case GGML_V1_OP_CPY: { - ggml_compute_forward_cpy(params, tensor->src0, tensor); + ggml_v1_compute_forward_cpy(params, tensor->src0, tensor); } break; - case GGML_OP_RESHAPE: + case GGML_V1_OP_RESHAPE: { - ggml_compute_forward_reshape(params, tensor->src0, tensor); + ggml_v1_compute_forward_reshape(params, tensor->src0, tensor); } break; - case GGML_OP_VIEW: + case GGML_V1_OP_VIEW: { - ggml_compute_forward_view(params, tensor->src0); + ggml_v1_compute_forward_view(params, tensor->src0); } break; - case GGML_OP_PERMUTE: + case GGML_V1_OP_PERMUTE: { - ggml_compute_forward_permute(params, tensor->src0); + ggml_v1_compute_forward_permute(params, tensor->src0); } break; - case GGML_OP_TRANSPOSE: + case GGML_V1_OP_TRANSPOSE: { - ggml_compute_forward_transpose(params, tensor->src0); + ggml_v1_compute_forward_transpose(params, tensor->src0); } break; - case GGML_OP_GET_ROWS: + case GGML_V1_OP_GET_ROWS: { - ggml_compute_forward_get_rows(params, tensor->src0, tensor->src1, tensor); + ggml_v1_compute_forward_get_rows(params, tensor->src0, tensor->src1, tensor); } break; - case GGML_OP_DIAG_MASK_INF: + case GGML_V1_OP_DIAG_MASK_INF: { - ggml_compute_forward_diag_mask_inf(params, tensor->src0, tensor->src1, tensor); + ggml_v1_compute_forward_diag_mask_inf(params, tensor->src0, tensor->src1, tensor); } break; - case GGML_OP_SOFT_MAX: + case GGML_V1_OP_SOFT_MAX: { - ggml_compute_forward_soft_max(params, tensor->src0, tensor); + ggml_v1_compute_forward_soft_max(params, tensor->src0, tensor); } break; - case GGML_OP_ROPE: + case GGML_V1_OP_ROPE: { - ggml_compute_forward_rope(params, tensor->src0, tensor->src1, tensor); + ggml_v1_compute_forward_rope(params, tensor->src0, tensor->src1, tensor); } break; - case GGML_OP_CONV_1D_1S: + case GGML_V1_OP_CONV_1D_1S: { - ggml_compute_forward_conv_1d_1s(params, tensor->src0, tensor->src1, tensor); + ggml_v1_compute_forward_conv_1d_1s(params, tensor->src0, tensor->src1, tensor); } break; - case GGML_OP_CONV_1D_2S: + case GGML_V1_OP_CONV_1D_2S: { - ggml_compute_forward_conv_1d_2s(params, tensor->src0, tensor->src1, tensor); + ggml_v1_compute_forward_conv_1d_2s(params, tensor->src0, tensor->src1, tensor); } break; - case GGML_OP_FLASH_ATTN: + case GGML_V1_OP_FLASH_ATTN: { - int32_t t = ggml_get_i32_1d(tensor->opt[1], 0); - GGML_ASSERT(t == 0 || t == 1); + int32_t t = ggml_v1_get_i32_1d(tensor->opt[1], 0); + GGML_V1_ASSERT(t == 0 || t == 1); bool masked = t != 0; - ggml_compute_forward_flash_attn(params, tensor->src0, tensor->src1, tensor->opt[0], masked, tensor); + ggml_v1_compute_forward_flash_attn(params, tensor->src0, tensor->src1, tensor->opt[0], masked, tensor); } break; - case GGML_OP_FLASH_FF: + case GGML_V1_OP_FLASH_FF: { - ggml_compute_forward_flash_ff(params, tensor->src0, tensor->src1, tensor->opt[0], tensor->opt[1], tensor->opt[2], tensor); + ggml_v1_compute_forward_flash_ff(params, tensor->src0, tensor->src1, tensor->opt[0], tensor->opt[1], tensor->opt[2], tensor); } break; - case GGML_OP_NONE: + case GGML_V1_OP_NONE: { // nop } break; - case GGML_OP_COUNT: + case GGML_V1_OP_COUNT: { - GGML_ASSERT(false); + GGML_V1_ASSERT(false); } break; } } //////////////////////////////////////////////////////////////////////////////// -static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor * tensor, bool inplace) { - struct ggml_tensor * src0 = tensor->src0; - struct ggml_tensor * src1 = tensor->src1; +static void ggml_v1_compute_backward(struct ggml_v1_context * ctx, struct ggml_v1_tensor * tensor, bool inplace) { + struct ggml_v1_tensor * src0 = tensor->src0; + struct ggml_v1_tensor * src1 = tensor->src1; switch (tensor->op) { - case GGML_OP_DUP: + case GGML_V1_OP_DUP: { if (src0->grad) { - src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace); + src0->grad = ggml_v1_add_impl(ctx, src0->grad, tensor->grad, inplace); } } break; - case GGML_OP_ADD: + case GGML_V1_OP_ADD: { if (src0->grad) { - src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace); + src0->grad = ggml_v1_add_impl(ctx, src0->grad, tensor->grad, inplace); } if (src1->grad) { - src1->grad = ggml_add_impl(ctx, src1->grad, tensor->grad, inplace); + src1->grad = ggml_v1_add_impl(ctx, src1->grad, tensor->grad, inplace); } } break; - case GGML_OP_SUB: + case GGML_V1_OP_SUB: { if (src0->grad) { - src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace); + src0->grad = ggml_v1_add_impl(ctx, src0->grad, tensor->grad, inplace); } if (src1->grad) { - src1->grad = ggml_sub_impl(ctx, src1->grad, tensor->grad, inplace); + src1->grad = ggml_v1_sub_impl(ctx, src1->grad, tensor->grad, inplace); } } break; - case GGML_OP_MUL: + case GGML_V1_OP_MUL: { if (src0->grad) { src0->grad = - ggml_add_impl(ctx, + ggml_v1_add_impl(ctx, src0->grad, - ggml_mul(ctx, src1, tensor->grad), + ggml_v1_mul(ctx, src1, tensor->grad), inplace); } if (src1->grad) { src1->grad = - ggml_add_impl(ctx, + ggml_v1_add_impl(ctx, src1->grad, - ggml_mul(ctx, src0, tensor->grad), + ggml_v1_mul(ctx, src0, tensor->grad), inplace); } } break; - case GGML_OP_DIV: + case GGML_V1_OP_DIV: { if (src0->grad) { src0->grad = - ggml_add_impl(ctx, + ggml_v1_add_impl(ctx, src0->grad, - ggml_div(ctx, tensor->grad, src1), + ggml_v1_div(ctx, tensor->grad, src1), inplace); } if (src1->grad) { src1->grad = - ggml_sub_impl(ctx, + ggml_v1_sub_impl(ctx, src1->grad, - ggml_mul(ctx, + ggml_v1_mul(ctx, tensor->grad, - ggml_div(ctx, tensor, src1)), + ggml_v1_div(ctx, tensor, src1)), inplace); } } break; - case GGML_OP_SQR: + case GGML_V1_OP_SQR: { if (src0->grad) { src0->grad = - ggml_add_impl(ctx, + ggml_v1_add_impl(ctx, src0->grad, - ggml_mul(ctx, - ggml_mul(ctx, src0, tensor->grad), - ggml_repeat(ctx, ggml_new_f32(ctx, 2.0f), src0)), + ggml_v1_mul(ctx, + ggml_v1_mul(ctx, src0, tensor->grad), + ggml_v1_repeat(ctx, ggml_v1_new_f32(ctx, 2.0f), src0)), inplace); } } break; - case GGML_OP_SQRT: + case GGML_V1_OP_SQRT: { if (src0->grad) { src0->grad = - ggml_add_impl(ctx, + ggml_v1_add_impl(ctx, src0->grad, - ggml_div(ctx, - ggml_repeat(ctx, ggml_new_f32(ctx, 0.5f), tensor), + ggml_v1_div(ctx, + ggml_v1_repeat(ctx, ggml_v1_new_f32(ctx, 0.5f), tensor), tensor), inplace); } } break; - case GGML_OP_SUM: + case GGML_V1_OP_SUM: { if (src0->grad) { src0->grad = - ggml_add_impl(ctx, + ggml_v1_add_impl(ctx, src0->grad, - ggml_repeat(ctx, tensor->grad, src0->grad), + ggml_v1_repeat(ctx, tensor->grad, src0->grad), inplace); } } break; - case GGML_OP_MEAN: + case GGML_V1_OP_MEAN: { assert(false); // TODO: implement } break; - case GGML_OP_REPEAT: + case GGML_V1_OP_REPEAT: { if (src0->grad) { src0->grad = - ggml_add_impl(ctx, + ggml_v1_add_impl(ctx, src0->grad, - ggml_sum(ctx, tensor->grad), + ggml_v1_sum(ctx, tensor->grad), inplace); } } break; - case GGML_OP_ABS: + case GGML_V1_OP_ABS: { if (src0->grad) { src0->grad = - ggml_add_impl(ctx, + ggml_v1_add_impl(ctx, src0->grad, - ggml_mul(ctx, - ggml_sgn(ctx, src0), + ggml_v1_mul(ctx, + ggml_v1_sgn(ctx, src0), tensor->grad), inplace); } } break; - case GGML_OP_SGN: + case GGML_V1_OP_SGN: { if (src0->grad) { // noop } } break; - case GGML_OP_NEG: + case GGML_V1_OP_NEG: { if (src0->grad) { - src0->grad = ggml_sub_impl(ctx, src0->grad, tensor->grad, inplace); + src0->grad = ggml_v1_sub_impl(ctx, src0->grad, tensor->grad, inplace); } } break; - case GGML_OP_STEP: + case GGML_V1_OP_STEP: { if (src0->grad) { // noop } } break; - case GGML_OP_RELU: + case GGML_V1_OP_RELU: { if (src0->grad) { - src0->grad = ggml_sub_impl(ctx, + src0->grad = ggml_v1_sub_impl(ctx, src0->grad, - ggml_mul(ctx, - ggml_step(ctx, src0), + ggml_v1_mul(ctx, + ggml_v1_step(ctx, src0), tensor->grad), inplace); } } break; - case GGML_OP_GELU: + case GGML_V1_OP_GELU: { assert(false); // TODO: not implemented } break; - case GGML_OP_NORM: + case GGML_V1_OP_NORM: { assert(false); // TODO: not implemented } break; - case GGML_OP_MUL_MAT: + case GGML_V1_OP_MUL_MAT: { if (src0->grad) { - // TODO: this requires outer product - ggml_out_prod(ctx, src1, tensor->grad); + // TODO: this requires outer product - ggml_v1_out_prod(ctx, src1, tensor->grad); assert(false); } if (src1->grad) { src1->grad = - ggml_add_impl(ctx, + ggml_v1_add_impl(ctx, src1->grad, // TODO: fix transpose, the node will break the graph connections - ggml_mul_mat(ctx, ggml_transpose(ctx, src0), tensor->grad), + ggml_v1_mul_mat(ctx, ggml_v1_transpose(ctx, src0), tensor->grad), inplace); } } break; - case GGML_OP_SCALE: + case GGML_V1_OP_SCALE: { - GGML_ASSERT(false); // TODO: not implemented + GGML_V1_ASSERT(false); // TODO: not implemented } break; - case GGML_OP_CPY: + case GGML_V1_OP_CPY: { - GGML_ASSERT(false); // TODO: not implemented + GGML_V1_ASSERT(false); // TODO: not implemented } break; - case GGML_OP_RESHAPE: + case GGML_V1_OP_RESHAPE: { - GGML_ASSERT(false); // TODO: not implemented + GGML_V1_ASSERT(false); // TODO: not implemented } break; - case GGML_OP_VIEW: + case GGML_V1_OP_VIEW: { - GGML_ASSERT(false); // not supported + GGML_V1_ASSERT(false); // not supported } break; - case GGML_OP_PERMUTE: + case GGML_V1_OP_PERMUTE: { - GGML_ASSERT(false); // TODO: not implemented + GGML_V1_ASSERT(false); // TODO: not implemented } break; - case GGML_OP_TRANSPOSE: + case GGML_V1_OP_TRANSPOSE: { - GGML_ASSERT(false); // TODO: not implemented + GGML_V1_ASSERT(false); // TODO: not implemented } break; - case GGML_OP_GET_ROWS: + case GGML_V1_OP_GET_ROWS: { - GGML_ASSERT(false); // TODO: not implemented + GGML_V1_ASSERT(false); // TODO: not implemented } break; - case GGML_OP_DIAG_MASK_INF: + case GGML_V1_OP_DIAG_MASK_INF: { - GGML_ASSERT(false); // TODO: not implemented + GGML_V1_ASSERT(false); // TODO: not implemented } break; - case GGML_OP_SOFT_MAX: + case GGML_V1_OP_SOFT_MAX: { - GGML_ASSERT(false); // TODO: not implemented + GGML_V1_ASSERT(false); // TODO: not implemented } break; - case GGML_OP_ROPE: + case GGML_V1_OP_ROPE: { - GGML_ASSERT(false); // TODO: not implemented + GGML_V1_ASSERT(false); // TODO: not implemented } break; - case GGML_OP_CONV_1D_1S: + case GGML_V1_OP_CONV_1D_1S: { - GGML_ASSERT(false); // TODO: not implemented + GGML_V1_ASSERT(false); // TODO: not implemented } break; - case GGML_OP_CONV_1D_2S: + case GGML_V1_OP_CONV_1D_2S: { - GGML_ASSERT(false); // TODO: not implemented + GGML_V1_ASSERT(false); // TODO: not implemented } break; - case GGML_OP_FLASH_ATTN: + case GGML_V1_OP_FLASH_ATTN: { - GGML_ASSERT(false); // not supported + GGML_V1_ASSERT(false); // not supported } break; - case GGML_OP_FLASH_FF: + case GGML_V1_OP_FLASH_FF: { - GGML_ASSERT(false); // not supported + GGML_V1_ASSERT(false); // not supported } break; - case GGML_OP_NONE: + case GGML_V1_OP_NONE: { // nop } break; - case GGML_OP_COUNT: + case GGML_V1_OP_COUNT: { - GGML_ASSERT(false); + GGML_V1_ASSERT(false); } break; } } -static void ggml_visit_parents(struct ggml_cgraph * cgraph, struct ggml_tensor * node) { +static void ggml_v1_visit_parents(struct ggml_v1_cgraph * cgraph, struct ggml_v1_tensor * node) { if (node->grad == NULL) { // this usually happens when we generate intermediate nodes from constants in the backward pass // it can also happen during forward pass, if the user performs computations with constants - if (node->op != GGML_OP_NONE) { - //GGML_PRINT_DEBUG("%s: warning: node %p has no grad, but op %d\n", __func__, (void *) node, node->op); + if (node->op != GGML_V1_OP_NONE) { + //GGML_V1_PRINT_DEBUG("%s: warning: node %p has no grad, but op %d\n", __func__, (void *) node, node->op); } } @@ -7001,27 +7001,27 @@ static void ggml_visit_parents(struct ggml_cgraph * cgraph, struct ggml_tensor * } if (node->src0) { - ggml_visit_parents(cgraph, node->src0); + ggml_v1_visit_parents(cgraph, node->src0); } if (node->src1) { - ggml_visit_parents(cgraph, node->src1); + ggml_v1_visit_parents(cgraph, node->src1); } - for (int i = 0; i < GGML_MAX_OPT; ++i) { + for (int i = 0; i < GGML_V1_MAX_OPT; ++i) { if (node->opt[i]) { - ggml_visit_parents(cgraph, node->opt[i]); + ggml_v1_visit_parents(cgraph, node->opt[i]); } } - if (node->op == GGML_OP_NONE && node->grad == NULL) { + if (node->op == GGML_V1_OP_NONE && node->grad == NULL) { // reached a leaf node, not part of the gradient graph (e.g. a constant) - assert(cgraph->n_leafs < GGML_MAX_NODES); + assert(cgraph->n_leafs < GGML_V1_MAX_NODES); cgraph->leafs[cgraph->n_leafs] = node; cgraph->n_leafs++; } else { - assert(cgraph->n_nodes < GGML_MAX_NODES); + assert(cgraph->n_nodes < GGML_V1_MAX_NODES); cgraph->nodes[cgraph->n_nodes] = node; cgraph->grads[cgraph->n_nodes] = node->grad; @@ -7029,7 +7029,7 @@ static void ggml_visit_parents(struct ggml_cgraph * cgraph, struct ggml_tensor * } } -static void ggml_build_forward_impl(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor, bool expand) { +static void ggml_v1_build_forward_impl(struct ggml_v1_cgraph * cgraph, struct ggml_v1_tensor * tensor, bool expand) { if (!expand) { cgraph->n_nodes = 0; cgraph->n_leafs = 0; @@ -7038,10 +7038,10 @@ static void ggml_build_forward_impl(struct ggml_cgraph * cgraph, struct ggml_ten const int n0 = cgraph->n_nodes; UNUSED(n0); - ggml_visit_parents(cgraph, tensor); + ggml_v1_visit_parents(cgraph, tensor); const int n_new = cgraph->n_nodes - n0; - GGML_PRINT_DEBUG("%s: visited %d new nodes\n", __func__, n_new); + GGML_V1_PRINT_DEBUG("%s: visited %d new nodes\n", __func__, n_new); if (n_new > 0) { // the last added node should always be starting point @@ -7049,12 +7049,12 @@ static void ggml_build_forward_impl(struct ggml_cgraph * cgraph, struct ggml_ten } } -void ggml_build_forward_expand(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor) { - ggml_build_forward_impl(cgraph, tensor, true); +void ggml_v1_build_forward_expand(struct ggml_v1_cgraph * cgraph, struct ggml_v1_tensor * tensor) { + ggml_v1_build_forward_impl(cgraph, tensor, true); } -struct ggml_cgraph ggml_build_forward(struct ggml_tensor * tensor) { - struct ggml_cgraph result = { +struct ggml_v1_cgraph ggml_v1_build_forward(struct ggml_v1_tensor * tensor) { + struct ggml_v1_cgraph result = { /*.n_nodes =*/ 0, /*.n_leafs =*/ 0, /*.n_threads =*/ 0, @@ -7068,43 +7068,43 @@ struct ggml_cgraph ggml_build_forward(struct ggml_tensor * tensor) { /*.perf_time_us =*/ 0, }; - ggml_build_forward_impl(&result, tensor, false); + ggml_v1_build_forward_impl(&result, tensor, false); return result; } -struct ggml_cgraph ggml_build_backward(struct ggml_context * ctx, struct ggml_cgraph * gf, bool keep) { - struct ggml_cgraph result = *gf; +struct ggml_v1_cgraph ggml_v1_build_backward(struct ggml_v1_context * ctx, struct ggml_v1_cgraph * gf, bool keep) { + struct ggml_v1_cgraph result = *gf; assert(gf->n_nodes > 0); // if we are keeping the gradient graph, we have to detach the gradient nodes from the original graph if (keep) { for (int i = 0; i < gf->n_nodes; i++) { - struct ggml_tensor * node = gf->nodes[i]; + struct ggml_v1_tensor * node = gf->nodes[i]; if (node->grad) { - node->grad = ggml_dup_tensor(ctx, node); + node->grad = ggml_v1_dup_tensor(ctx, node); gf->grads[i] = node->grad; } } } for (int i = gf->n_nodes - 1; i >= 0; i--) { - struct ggml_tensor * node = gf->nodes[i]; + struct ggml_v1_tensor * node = gf->nodes[i]; // because we detached the grad nodes from the original graph, we can afford inplace operations if (node->grad) { - ggml_compute_backward(ctx, node, keep); + ggml_v1_compute_backward(ctx, node, keep); } } for (int i = gf->n_nodes - 1; i >= 0; i--) { - struct ggml_tensor * node = gf->nodes[i]; + struct ggml_v1_tensor * node = gf->nodes[i]; if (node->is_param) { - GGML_PRINT_DEBUG("%s: found root node %p\n", __func__, (void *) node); - ggml_build_forward_impl(&result, node->grad, true); + GGML_V1_PRINT_DEBUG("%s: found root node %p\n", __func__, (void *) node); + ggml_v1_build_forward_impl(&result, node->grad, true); } } @@ -7122,56 +7122,56 @@ struct ggml_cgraph ggml_build_backward(struct ggml_context * ctx, struct ggml_cg //#include // -//typedef os_unfair_lock ggml_lock_t; +//typedef os_unfair_lock ggml_v1_lock_t; // -//#define ggml_lock_init(x) UNUSED(x) -//#define ggml_lock_destroy(x) UNUSED(x) -//#define ggml_lock_lock os_unfair_lock_lock -//#define ggml_lock_unlock os_unfair_lock_unlock +//#define ggml_v1_lock_init(x) UNUSED(x) +//#define ggml_v1_lock_destroy(x) UNUSED(x) +//#define ggml_v1_lock_lock os_unfair_lock_lock +//#define ggml_v1_lock_unlock os_unfair_lock_unlock // -//#define GGML_LOCK_INITIALIZER OS_UNFAIR_LOCK_INIT +//#define GGML_V1_LOCK_INITIALIZER OS_UNFAIR_LOCK_INIT -typedef int ggml_lock_t; +typedef int ggml_v1_lock_t; -#define ggml_lock_init(x) UNUSED(x) -#define ggml_lock_destroy(x) UNUSED(x) -#define ggml_lock_lock(x) UNUSED(x) -#define ggml_lock_unlock(x) UNUSED(x) +#define ggml_v1_lock_init(x) UNUSED(x) +#define ggml_v1_lock_destroy(x) UNUSED(x) +#define ggml_v1_lock_lock(x) UNUSED(x) +#define ggml_v1_lock_unlock(x) UNUSED(x) -#define GGML_LOCK_INITIALIZER 0 +#define GGML_V1_LOCK_INITIALIZER 0 -typedef pthread_t ggml_thread_t; +typedef pthread_t ggml_v1_thread_t; -#define ggml_thread_create pthread_create -#define ggml_thread_join pthread_join +#define ggml_v1_thread_create pthread_create +#define ggml_v1_thread_join pthread_join #else -//typedef pthread_spinlock_t ggml_lock_t; +//typedef pthread_spinlock_t ggml_v1_lock_t; -//#define ggml_lock_init(x) pthread_spin_init(x, PTHREAD_PROCESS_PRIVATE) -//#define ggml_lock_destroy pthread_spin_destroy -//#define ggml_lock_lock pthread_spin_lock -//#define ggml_lock_unlock pthread_spin_unlock +//#define ggml_v1_lock_init(x) pthread_spin_init(x, PTHREAD_PROCESS_PRIVATE) +//#define ggml_v1_lock_destroy pthread_spin_destroy +//#define ggml_v1_lock_lock pthread_spin_lock +//#define ggml_v1_lock_unlock pthread_spin_unlock -typedef int ggml_lock_t; +typedef int ggml_v1_lock_t; -#define ggml_lock_init(x) UNUSED(x) -#define ggml_lock_destroy(x) UNUSED(x) -#define ggml_lock_lock(x) UNUSED(x) -#define ggml_lock_unlock(x) UNUSED(x) +#define ggml_v1_lock_init(x) UNUSED(x) +#define ggml_v1_lock_destroy(x) UNUSED(x) +#define ggml_v1_lock_lock(x) UNUSED(x) +#define ggml_v1_lock_unlock(x) UNUSED(x) -#define GGML_LOCK_INITIALIZER 0 +#define GGML_V1_LOCK_INITIALIZER 0 -typedef pthread_t ggml_thread_t; +typedef pthread_t ggml_v1_thread_t; -#define ggml_thread_create pthread_create -#define ggml_thread_join pthread_join +#define ggml_v1_thread_create pthread_create +#define ggml_v1_thread_join pthread_join #endif -struct ggml_compute_state_shared { - ggml_lock_t spin; +struct ggml_v1_compute_state_shared { + ggml_v1_lock_t spin; int n_threads; @@ -7181,17 +7181,17 @@ struct ggml_compute_state_shared { atomic_bool stop; // stop all threads }; -struct ggml_compute_state { - ggml_thread_t thrd; +struct ggml_v1_compute_state { + ggml_v1_thread_t thrd; - struct ggml_compute_params params; - struct ggml_tensor * node; + struct ggml_v1_compute_params params; + struct ggml_v1_tensor * node; - struct ggml_compute_state_shared * shared; + struct ggml_v1_compute_state_shared * shared; }; -static thread_ret_t ggml_graph_compute_thread(void * data) { - struct ggml_compute_state * state = (struct ggml_compute_state *) data; +static thread_ret_t ggml_v1_graph_compute_thread(void * data) { + struct ggml_v1_compute_state * state = (struct ggml_v1_compute_state *) data; const int n_threads = state->shared->n_threads; @@ -7203,8 +7203,8 @@ static thread_ret_t ggml_graph_compute_thread(void * data) { if (atomic_load(&state->shared->stop)) { return 0; } - ggml_lock_lock (&state->shared->spin); - ggml_lock_unlock(&state->shared->spin); + ggml_v1_lock_lock (&state->shared->spin); + ggml_v1_lock_unlock(&state->shared->spin); } } @@ -7215,8 +7215,8 @@ static thread_ret_t ggml_graph_compute_thread(void * data) { if (atomic_load(&state->shared->stop)) { return 0; } - ggml_lock_lock (&state->shared->spin); - ggml_lock_unlock(&state->shared->spin); + ggml_v1_lock_lock (&state->shared->spin); + ggml_v1_lock_unlock(&state->shared->spin); } // check if we should stop @@ -7226,7 +7226,7 @@ static thread_ret_t ggml_graph_compute_thread(void * data) { if (state->node) { if (state->params.ith < state->params.nth) { - ggml_compute_forward(&state->params, state->node); + ggml_v1_compute_forward(&state->params, state->node); } state->node = NULL; @@ -7238,43 +7238,43 @@ static thread_ret_t ggml_graph_compute_thread(void * data) { return 0; } -void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph) { +void ggml_v1_graph_compute(struct ggml_v1_context * ctx, struct ggml_v1_cgraph * cgraph) { if (cgraph->n_threads <= 0) { cgraph->n_threads = 8; } const int n_threads = cgraph->n_threads; - struct ggml_compute_state_shared state_shared = { - /*.spin =*/ GGML_LOCK_INITIALIZER, + struct ggml_v1_compute_state_shared state_shared = { + /*.spin =*/ GGML_V1_LOCK_INITIALIZER, /*.n_threads =*/ n_threads, /*.n_ready =*/ 0, /*.has_work =*/ false, /*.stop =*/ false, }; - struct ggml_compute_state * workers = n_threads > 1 ? alloca(sizeof(struct ggml_compute_state)*(n_threads - 1)) : NULL; + struct ggml_v1_compute_state * workers = n_threads > 1 ? alloca(sizeof(struct ggml_v1_compute_state)*(n_threads - 1)) : NULL; // create thread pool if (n_threads > 1) { - ggml_lock_init(&state_shared.spin); + ggml_v1_lock_init(&state_shared.spin); atomic_store(&state_shared.has_work, true); for (int j = 0; j < n_threads - 1; j++) { - workers[j] = (struct ggml_compute_state) { + workers[j] = (struct ggml_v1_compute_state) { .thrd = 0, .params = { - .type = GGML_TASK_COMPUTE, + .type = GGML_V1_TASK_COMPUTE, .ith = j + 1, .nth = n_threads, - .wsize = cgraph->work ? ggml_nbytes(cgraph->work) : 0, + .wsize = cgraph->work ? ggml_v1_nbytes(cgraph->work) : 0, .wdata = cgraph->work ? cgraph->work->data : NULL, }, .node = NULL, .shared = &state_shared, }; - int rc = ggml_thread_create(&workers[j].thrd, NULL, ggml_graph_compute_thread, &workers[j]); + int rc = ggml_v1_thread_create(&workers[j].thrd, NULL, ggml_v1_graph_compute_thread, &workers[j]); assert(rc == 0); UNUSED(rc); } @@ -7286,48 +7286,48 @@ void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph) // thread scheduling for the different operations for (int i = 0; i < cgraph->n_nodes; i++) { - struct ggml_tensor * node = cgraph->nodes[i]; + struct ggml_v1_tensor * node = cgraph->nodes[i]; switch (node->op) { - case GGML_OP_DUP: + case GGML_V1_OP_DUP: { node->n_tasks = 1; } break; - case GGML_OP_ADD: + case GGML_V1_OP_ADD: { node->n_tasks = n_threads; } break; - case GGML_OP_SUB: - case GGML_OP_MUL: - case GGML_OP_DIV: - case GGML_OP_SQR: - case GGML_OP_SQRT: - case GGML_OP_SUM: - case GGML_OP_MEAN: - case GGML_OP_REPEAT: - case GGML_OP_ABS: - case GGML_OP_SGN: - case GGML_OP_NEG: - case GGML_OP_STEP: - case GGML_OP_RELU: + case GGML_V1_OP_SUB: + case GGML_V1_OP_MUL: + case GGML_V1_OP_DIV: + case GGML_V1_OP_SQR: + case GGML_V1_OP_SQRT: + case GGML_V1_OP_SUM: + case GGML_V1_OP_MEAN: + case GGML_V1_OP_REPEAT: + case GGML_V1_OP_ABS: + case GGML_V1_OP_SGN: + case GGML_V1_OP_NEG: + case GGML_V1_OP_STEP: + case GGML_V1_OP_RELU: { node->n_tasks = 1; } break; - case GGML_OP_GELU: + case GGML_V1_OP_GELU: { node->n_tasks = n_threads; } break; - case GGML_OP_NORM: + case GGML_V1_OP_NORM: { node->n_tasks = n_threads; } break; - case GGML_OP_MUL_MAT: + case GGML_V1_OP_MUL_MAT: { node->n_tasks = n_threads; // TODO: use different scheduling for different matrix sizes - //const int nr0 = ggml_nrows(node->src0); - //const int nr1 = ggml_nrows(node->src1); + //const int nr0 = ggml_v1_nrows(node->src0); + //const int nr1 = ggml_v1_nrows(node->src1); //node->n_tasks = MIN(n_threads, MAX(1, nr0/128)); //printf("nr0 = %8d, nr1 = %8d, nr0*nr1 = %8d, n_tasks = %d\n", nr0, nr1, nr0*nr1, node->n_tasks); @@ -7336,12 +7336,12 @@ void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph) // TODO: better way to determine if the matrix is transposed if (node->src0->nb[1] < node->src0->nb[0]) { - cur = ggml_nbytes(node)*node->n_tasks; // TODO: this can become (n_tasks-1) + cur = ggml_v1_nbytes(node)*node->n_tasks; // TODO: this can become (n_tasks-1) } else { - if (node->src0->type == GGML_TYPE_F16 && - node->src1->type == GGML_TYPE_F32) { -#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) - if (ggml_compute_forward_mul_mat_use_blas(node->src0, node->src1, node)) { + if (node->src0->type == GGML_V1_TYPE_F16 && + node->src1->type == GGML_V1_TYPE_F32) { +#if defined(GGML_V1_USE_ACCELERATE) || defined(GGML_V1_USE_OPENBLAS) + if (ggml_v1_compute_forward_mul_mat_use_blas(node->src0, node->src1, node)) { node->n_tasks = 1; // TODO: this actually is doing nothing // the threads are still spinning cur = sizeof(float)*(node->src0->ne[0]*node->src0->ne[1]); @@ -7349,116 +7349,116 @@ void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph) //printf("src1: ne0 = %d, ne1 = %d, ne = %d\n", node->src1->ne[0], node->src1->ne[1], node->src1->ne[0]*node->src1->ne[1]); //printf("cur = %zu\n", cur); } else { - cur = sizeof(ggml_fp16_t)*ggml_nelements(node->src1); + cur = sizeof(ggml_v1_fp16_t)*ggml_v1_nelements(node->src1); } #else - cur = sizeof(ggml_fp16_t)*ggml_nelements(node->src1); + cur = sizeof(ggml_v1_fp16_t)*ggml_v1_nelements(node->src1); #endif - } else if (node->src0->type == GGML_TYPE_F32 && - node->src1->type == GGML_TYPE_F32) { + } else if (node->src0->type == GGML_V1_TYPE_F32 && + node->src1->type == GGML_V1_TYPE_F32) { cur = 0; } else { - GGML_ASSERT(false); + GGML_V1_ASSERT(false); } } work_size = MAX(work_size, cur); } break; - case GGML_OP_SCALE: + case GGML_V1_OP_SCALE: { node->n_tasks = n_threads; } break; - case GGML_OP_CPY: - case GGML_OP_RESHAPE: - case GGML_OP_VIEW: - case GGML_OP_PERMUTE: - case GGML_OP_TRANSPOSE: - case GGML_OP_GET_ROWS: - case GGML_OP_DIAG_MASK_INF: + case GGML_V1_OP_CPY: + case GGML_V1_OP_RESHAPE: + case GGML_V1_OP_VIEW: + case GGML_V1_OP_PERMUTE: + case GGML_V1_OP_TRANSPOSE: + case GGML_V1_OP_GET_ROWS: + case GGML_V1_OP_DIAG_MASK_INF: { node->n_tasks = 1; } break; - case GGML_OP_SOFT_MAX: + case GGML_V1_OP_SOFT_MAX: { node->n_tasks = n_threads; } break; - case GGML_OP_ROPE: + case GGML_V1_OP_ROPE: { node->n_tasks = 1; } break; - case GGML_OP_CONV_1D_1S: - case GGML_OP_CONV_1D_2S: + case GGML_V1_OP_CONV_1D_1S: + case GGML_V1_OP_CONV_1D_2S: { node->n_tasks = n_threads; - GGML_ASSERT(node->src0->ne[3] == 1); - GGML_ASSERT(node->src1->ne[2] == 1); - GGML_ASSERT(node->src1->ne[3] == 1); + GGML_V1_ASSERT(node->src0->ne[3] == 1); + GGML_V1_ASSERT(node->src1->ne[2] == 1); + GGML_V1_ASSERT(node->src1->ne[3] == 1); size_t cur = 0; const int nk = node->src0->ne[0]; - if (node->src0->type == GGML_TYPE_F16 && - node->src1->type == GGML_TYPE_F32) { - cur = sizeof(ggml_fp16_t)*( - nk*ggml_up32(node->src0->ne[1])*node->src0->ne[2] + + if (node->src0->type == GGML_V1_TYPE_F16 && + node->src1->type == GGML_V1_TYPE_F32) { + cur = sizeof(ggml_v1_fp16_t)*( + nk*ggml_v1_up32(node->src0->ne[1])*node->src0->ne[2] + ( 2*(nk/2) + node->src1->ne[0])*node->src1->ne[1] ); - } else if (node->src0->type == GGML_TYPE_F32 && - node->src1->type == GGML_TYPE_F32) { + } else if (node->src0->type == GGML_V1_TYPE_F32 && + node->src1->type == GGML_V1_TYPE_F32) { cur = sizeof(float)*( - nk*ggml_up32(node->src0->ne[1])*node->src0->ne[2] + + nk*ggml_v1_up32(node->src0->ne[1])*node->src0->ne[2] + ( 2*(nk/2) + node->src1->ne[0])*node->src1->ne[1] ); } else { - GGML_ASSERT(false); + GGML_V1_ASSERT(false); } work_size = MAX(work_size, cur); } break; - case GGML_OP_FLASH_ATTN: + case GGML_V1_OP_FLASH_ATTN: { node->n_tasks = n_threads; size_t cur = 0; - const int ne11 = ggml_up(node->src1->ne[1], GGML_SOFT_MAX_UNROLL); + const int ne11 = ggml_v1_up(node->src1->ne[1], GGML_V1_SOFT_MAX_UNROLL); - if (node->src1->type == GGML_TYPE_F32) { + if (node->src1->type == GGML_V1_TYPE_F32) { cur = sizeof(float)*ne11*node->n_tasks; // TODO: this can become (n_tasks-1) cur += sizeof(float)*ne11*node->n_tasks; // this is overestimated by x2 } - if (node->src1->type == GGML_TYPE_F16) { + if (node->src1->type == GGML_V1_TYPE_F16) { cur = sizeof(float)*ne11*node->n_tasks; // TODO: this can become (n_tasks-1) cur += sizeof(float)*ne11*node->n_tasks; // this is overestimated by x2 } work_size = MAX(work_size, cur); } break; - case GGML_OP_FLASH_FF: + case GGML_V1_OP_FLASH_FF: { node->n_tasks = n_threads; size_t cur = 0; - if (node->src1->type == GGML_TYPE_F32) { + if (node->src1->type == GGML_V1_TYPE_F32) { cur = sizeof(float)*node->src1->ne[1]*node->n_tasks; // TODO: this can become (n_tasks-1) cur += sizeof(float)*node->src1->ne[1]*node->n_tasks; // this is overestimated by x2 } - if (node->src1->type == GGML_TYPE_F16) { + if (node->src1->type == GGML_V1_TYPE_F16) { cur = sizeof(float)*node->src1->ne[1]*node->n_tasks; // TODO: this can become (n_tasks-1) cur += sizeof(float)*node->src1->ne[1]*node->n_tasks; // this is overestimated by x2 } work_size = MAX(work_size, cur); } break; - case GGML_OP_NONE: + case GGML_V1_OP_NONE: { node->n_tasks = 1; } break; - case GGML_OP_COUNT: + case GGML_V1_OP_COUNT: { assert(false); } break; @@ -7472,37 +7472,37 @@ void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph) if (work_size > 0 && cgraph->work == NULL) { cgraph->work_size = work_size + CACHE_LINE_SIZE*(n_threads - 1); - GGML_PRINT_DEBUG("%s: allocating work buffer for graph (%zu bytes)\n", __func__, cgraph->work_size); - cgraph->work = ggml_new_tensor_1d(ctx, GGML_TYPE_I8, cgraph->work_size); + GGML_V1_PRINT_DEBUG("%s: allocating work buffer for graph (%zu bytes)\n", __func__, cgraph->work_size); + cgraph->work = ggml_v1_new_tensor_1d(ctx, GGML_V1_TYPE_I8, cgraph->work_size); } } - const int64_t perf_start_cycles = ggml_perf_cycles(); - const int64_t perf_start_time_us = ggml_perf_time_us(); + const int64_t perf_start_cycles = ggml_v1_perf_cycles(); + const int64_t perf_start_time_us = ggml_v1_perf_time_us(); for (int i = 0; i < cgraph->n_nodes; i++) { - GGML_PRINT_DEBUG_5("%s: %d/%d\n", __func__, i, cgraph->n_nodes); + GGML_V1_PRINT_DEBUG_5("%s: %d/%d\n", __func__, i, cgraph->n_nodes); - struct ggml_tensor * node = cgraph->nodes[i]; + struct ggml_v1_tensor * node = cgraph->nodes[i]; // TODO: this could be used to avoid unnecessary computations, but it needs to be improved //if (node->grad == NULL && node->perf_runs > 0) { // continue; //} - const int64_t perf_node_start_cycles = ggml_perf_cycles(); - const int64_t perf_node_start_time_us = ggml_perf_time_us(); + const int64_t perf_node_start_cycles = ggml_v1_perf_cycles(); + const int64_t perf_node_start_time_us = ggml_v1_perf_time_us(); // INIT - struct ggml_compute_params params = { - /*.type =*/ GGML_TASK_INIT, + struct ggml_v1_compute_params params = { + /*.type =*/ GGML_V1_TASK_INIT, /*.ith =*/ 0, /*.nth =*/ node->n_tasks, - /*.wsize =*/ cgraph->work ? ggml_nbytes(cgraph->work) : 0, + /*.wsize =*/ cgraph->work ? ggml_v1_nbytes(cgraph->work) : 0, /*.wdata =*/ cgraph->work ? cgraph->work->data : NULL, }; - ggml_compute_forward(¶ms, node); + ggml_v1_compute_forward(¶ms, node); // COMPUTE if (node->n_tasks > 1) { @@ -7511,17 +7511,17 @@ void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph) } while (atomic_load(&state_shared.has_work)) { - ggml_lock_lock (&state_shared.spin); - ggml_lock_unlock(&state_shared.spin); + ggml_v1_lock_lock (&state_shared.spin); + ggml_v1_lock_unlock(&state_shared.spin); } // launch thread pool for (int j = 0; j < n_threads - 1; j++) { - workers[j].params = (struct ggml_compute_params) { - .type = GGML_TASK_COMPUTE, + workers[j].params = (struct ggml_v1_compute_params) { + .type = GGML_V1_TASK_COMPUTE, .ith = j + 1, .nth = node->n_tasks, - .wsize = cgraph->work ? ggml_nbytes(cgraph->work) : 0, + .wsize = cgraph->work ? ggml_v1_nbytes(cgraph->work) : 0, .wdata = cgraph->work ? cgraph->work->data : NULL, }; workers[j].node = node; @@ -7530,15 +7530,15 @@ void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph) atomic_fetch_sub(&state_shared.n_ready, 1); while (atomic_load(&state_shared.n_ready) > 0) { - ggml_lock_lock (&state_shared.spin); - ggml_lock_unlock(&state_shared.spin); + ggml_v1_lock_lock (&state_shared.spin); + ggml_v1_lock_unlock(&state_shared.spin); } atomic_store(&state_shared.has_work, true); } - params.type = GGML_TASK_COMPUTE; - ggml_compute_forward(¶ms, node); + params.type = GGML_V1_TASK_COMPUTE; + ggml_v1_compute_forward(¶ms, node); // wait for thread pool if (node->n_tasks > 1) { @@ -7547,15 +7547,15 @@ void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph) } while (atomic_load(&state_shared.has_work)) { - ggml_lock_lock (&state_shared.spin); - ggml_lock_unlock(&state_shared.spin); + ggml_v1_lock_lock (&state_shared.spin); + ggml_v1_lock_unlock(&state_shared.spin); } atomic_fetch_sub(&state_shared.n_ready, 1); while (atomic_load(&state_shared.n_ready) != 0) { - ggml_lock_lock (&state_shared.spin); - ggml_lock_unlock(&state_shared.spin); + ggml_v1_lock_lock (&state_shared.spin); + ggml_v1_lock_unlock(&state_shared.spin); } } @@ -7566,17 +7566,17 @@ void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph) } while (atomic_load(&state_shared.has_work)) { - ggml_lock_lock (&state_shared.spin); - ggml_lock_unlock(&state_shared.spin); + ggml_v1_lock_lock (&state_shared.spin); + ggml_v1_lock_unlock(&state_shared.spin); } // launch thread pool for (int j = 0; j < n_threads - 1; j++) { - workers[j].params = (struct ggml_compute_params) { - .type = GGML_TASK_FINALIZE, + workers[j].params = (struct ggml_v1_compute_params) { + .type = GGML_V1_TASK_FINALIZE, .ith = j + 1, .nth = node->n_tasks, - .wsize = cgraph->work ? ggml_nbytes(cgraph->work) : 0, + .wsize = cgraph->work ? ggml_v1_nbytes(cgraph->work) : 0, .wdata = cgraph->work ? cgraph->work->data : NULL, }; workers[j].node = node; @@ -7585,15 +7585,15 @@ void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph) atomic_fetch_sub(&state_shared.n_ready, 1); while (atomic_load(&state_shared.n_ready) > 0) { - ggml_lock_lock (&state_shared.spin); - ggml_lock_unlock(&state_shared.spin); + ggml_v1_lock_lock (&state_shared.spin); + ggml_v1_lock_unlock(&state_shared.spin); } atomic_store(&state_shared.has_work, true); } - params.type = GGML_TASK_FINALIZE; - ggml_compute_forward(¶ms, node); + params.type = GGML_V1_TASK_FINALIZE; + ggml_v1_compute_forward(¶ms, node); // wait for thread pool if (node->n_tasks > 1) { @@ -7602,22 +7602,22 @@ void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph) } while (atomic_load(&state_shared.has_work)) { - ggml_lock_lock (&state_shared.spin); - ggml_lock_unlock(&state_shared.spin); + ggml_v1_lock_lock (&state_shared.spin); + ggml_v1_lock_unlock(&state_shared.spin); } atomic_fetch_sub(&state_shared.n_ready, 1); while (atomic_load(&state_shared.n_ready) != 0) { - ggml_lock_lock (&state_shared.spin); - ggml_lock_unlock(&state_shared.spin); + ggml_v1_lock_lock (&state_shared.spin); + ggml_v1_lock_unlock(&state_shared.spin); } } // performance stats (node) { - int64_t perf_cycles_cur = ggml_perf_cycles() - perf_node_start_cycles; - int64_t perf_time_us_cur = ggml_perf_time_us() - perf_node_start_time_us; + int64_t perf_cycles_cur = ggml_v1_perf_cycles() - perf_node_start_cycles; + int64_t perf_time_us_cur = ggml_v1_perf_time_us() - perf_node_start_time_us; node->perf_runs++; node->perf_cycles += perf_cycles_cur; @@ -7631,85 +7631,85 @@ void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph) atomic_store(&state_shared.has_work, true); for (int j = 0; j < n_threads - 1; j++) { - int rc = ggml_thread_join(workers[j].thrd, NULL); + int rc = ggml_v1_thread_join(workers[j].thrd, NULL); assert(rc == 0); UNUSED(rc); } - ggml_lock_destroy(&state_shared.spin); + ggml_v1_lock_destroy(&state_shared.spin); } // performance stats (graph) { - int64_t perf_cycles_cur = ggml_perf_cycles() - perf_start_cycles; - int64_t perf_time_us_cur = ggml_perf_time_us() - perf_start_time_us; + int64_t perf_cycles_cur = ggml_v1_perf_cycles() - perf_start_cycles; + int64_t perf_time_us_cur = ggml_v1_perf_time_us() - perf_start_time_us; cgraph->perf_runs++; cgraph->perf_cycles += perf_cycles_cur; cgraph->perf_time_us += perf_time_us_cur; - GGML_PRINT_DEBUG("%s: perf (%d) - cpu = %.3f / %.3f ms, wall = %.3f / %.3f ms\n", + GGML_V1_PRINT_DEBUG("%s: perf (%d) - cpu = %.3f / %.3f ms, wall = %.3f / %.3f ms\n", __func__, cgraph->perf_runs, - (double) perf_cycles_cur / (double) ggml_cycles_per_ms(), - (double) cgraph->perf_cycles / (double) ggml_cycles_per_ms() / (double) cgraph->perf_runs, + (double) perf_cycles_cur / (double) ggml_v1_cycles_per_ms(), + (double) cgraph->perf_cycles / (double) ggml_v1_cycles_per_ms() / (double) cgraph->perf_runs, (double) perf_time_us_cur / 1000.0, (double) cgraph->perf_time_us / 1000.0 / cgraph->perf_runs); } } -void ggml_graph_reset(struct ggml_cgraph * cgraph) { +void ggml_v1_graph_reset(struct ggml_v1_cgraph * cgraph) { for (int i = 0; i < cgraph->n_nodes; i++) { - struct ggml_tensor * grad = cgraph->grads[i]; + struct ggml_v1_tensor * grad = cgraph->grads[i]; if (grad) { - ggml_set_zero(grad); + ggml_v1_set_zero(grad); } } } -void ggml_graph_print(const struct ggml_cgraph * cgraph) { - int64_t perf_total_per_op_us[GGML_OP_COUNT] = {0}; +void ggml_v1_graph_print(const struct ggml_v1_cgraph * cgraph) { + int64_t perf_total_per_op_us[GGML_V1_OP_COUNT] = {0}; - GGML_PRINT("=== GRAPH ===\n"); + GGML_V1_PRINT("=== GRAPH ===\n"); - GGML_PRINT_DEBUG("n_threads = %d\n", cgraph->n_threads); - GGML_PRINT_DEBUG("total work size = %zu bytes\n",cgraph->work_size); + GGML_V1_PRINT_DEBUG("n_threads = %d\n", cgraph->n_threads); + GGML_V1_PRINT_DEBUG("total work size = %zu bytes\n",cgraph->work_size); - GGML_PRINT("n_nodes = %d\n", cgraph->n_nodes); + GGML_V1_PRINT("n_nodes = %d\n", cgraph->n_nodes); for (int i = 0; i < cgraph->n_nodes; i++) { - struct ggml_tensor * node = cgraph->nodes[i]; + struct ggml_v1_tensor * node = cgraph->nodes[i]; perf_total_per_op_us[node->op] += node->perf_time_us; - GGML_PRINT(" - %3d: [ %6d, %6d, %6d] %16s %s (%3d) cpu = %7.3f / %7.3f ms, wall = %7.3f / %7.3f ms\n", + GGML_V1_PRINT(" - %3d: [ %6d, %6d, %6d] %16s %s (%3d) cpu = %7.3f / %7.3f ms, wall = %7.3f / %7.3f ms\n", i, node->ne[0], node->ne[1], node->ne[2], - GGML_OP_LABEL[node->op], node->is_param ? "x" : node->grad ? "g" : " ", node->perf_runs, - (double) node->perf_cycles / (double) ggml_cycles_per_ms(), - (double) node->perf_cycles / (double) ggml_cycles_per_ms() / (double) node->perf_runs, + GGML_V1_OP_LABEL[node->op], node->is_param ? "x" : node->grad ? "g" : " ", node->perf_runs, + (double) node->perf_cycles / (double) ggml_v1_cycles_per_ms(), + (double) node->perf_cycles / (double) ggml_v1_cycles_per_ms() / (double) node->perf_runs, (double) node->perf_time_us / 1000.0, (double) node->perf_time_us / 1000.0 / node->perf_runs); } - GGML_PRINT("n_leafs = %d\n", cgraph->n_leafs); + GGML_V1_PRINT("n_leafs = %d\n", cgraph->n_leafs); for (int i = 0; i < cgraph->n_leafs; i++) { - struct ggml_tensor * node = cgraph->leafs[i]; + struct ggml_v1_tensor * node = cgraph->leafs[i]; - GGML_PRINT(" - %3d: [ %6d, %6d] %8s\n", + GGML_V1_PRINT(" - %3d: [ %6d, %6d] %8s\n", i, node->ne[0], node->ne[1], - GGML_OP_LABEL[node->op]); + GGML_V1_OP_LABEL[node->op]); } - for (int i = 0; i < GGML_OP_COUNT; i++) { - GGML_PRINT("perf_total_per_op_us[%16s] = %7.3f ms\n", GGML_OP_LABEL[i], (double) perf_total_per_op_us[i] / 1000.0); + for (int i = 0; i < GGML_V1_OP_COUNT; i++) { + GGML_V1_PRINT("perf_total_per_op_us[%16s] = %7.3f ms\n", GGML_V1_OP_LABEL[i], (double) perf_total_per_op_us[i] / 1000.0); } - GGML_PRINT("========================================\n"); + GGML_V1_PRINT("========================================\n"); } // check if node is part of the graph -static bool ggml_graph_find(const struct ggml_cgraph * cgraph, const struct ggml_tensor * node) { +static bool ggml_v1_graph_find(const struct ggml_v1_cgraph * cgraph, const struct ggml_v1_tensor * node) { if (cgraph == NULL) { return true; } @@ -7723,9 +7723,9 @@ static bool ggml_graph_find(const struct ggml_cgraph * cgraph, const struct ggml return false; } -static struct ggml_tensor * ggml_graph_get_parent(const struct ggml_cgraph * cgraph, const struct ggml_tensor * node) { +static struct ggml_v1_tensor * ggml_v1_graph_get_parent(const struct ggml_v1_cgraph * cgraph, const struct ggml_v1_tensor * node) { for (int i = 0; i < cgraph->n_nodes; i++) { - struct ggml_tensor * parent = cgraph->nodes[i]; + struct ggml_v1_tensor * parent = cgraph->nodes[i]; if (parent->grad == node) { return parent; @@ -7735,7 +7735,7 @@ static struct ggml_tensor * ggml_graph_get_parent(const struct ggml_cgraph * cgr return NULL; } -void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph * gf, const char * filename) { +void ggml_v1_graph_dump_dot(const struct ggml_v1_cgraph * gb, const struct ggml_v1_cgraph * gf, const char * filename) { char color[16]; FILE * fp = fopen(filename, "w"); @@ -7746,16 +7746,16 @@ void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph fprintf(fp, " rankdir = LR;\n"); for (int i = 0; i < gb->n_nodes; i++) { - struct ggml_tensor * node = gb->nodes[i]; + struct ggml_v1_tensor * node = gb->nodes[i]; - if (ggml_graph_get_parent(gb, node) != NULL) { + if (ggml_v1_graph_get_parent(gb, node) != NULL) { continue; } if (node->is_param) { snprintf(color, sizeof(color), "yellow"); } else if (node->grad) { - if (ggml_graph_find(gf, node)) { + if (ggml_v1_graph_find(gf, node)) { snprintf(color, sizeof(color), "green"); } else { snprintf(color, sizeof(color), "lightblue"); @@ -7769,25 +7769,25 @@ style = filled; fillcolor = %s; shape = record; \ label=\"%d [%d, %d] | %s", (void *) node, color, i, node->ne[0], node->ne[1], - GGML_OP_SYMBOL[node->op]); + GGML_V1_OP_SYMBOL[node->op]); if (node->grad) { - fprintf(fp, " | %s\"; ]\n", GGML_OP_SYMBOL[node->grad->op]); + fprintf(fp, " | %s\"; ]\n", GGML_V1_OP_SYMBOL[node->grad->op]); } else { fprintf(fp, "\"; ]\n"); } } for (int i = 0; i < gb->n_leafs; i++) { - struct ggml_tensor * node = gb->leafs[i]; + struct ggml_v1_tensor * node = gb->leafs[i]; snprintf(color, sizeof(color), "pink"); - if (ggml_nelements(node) == 1) { + if (ggml_v1_nelements(node) == 1) { fprintf(fp, " \"%p\" [ \ style = filled; fillcolor = %s; shape = record; \ label=\"%.1e\"; ]\n", - (void *) node, color, ggml_get_f32_1d(node, 0)); + (void *) node, color, ggml_v1_get_f32_1d(node, 0)); } else { fprintf(fp, " \"%p\" [ \ style = filled; fillcolor = %s; shape = record; \ @@ -7798,12 +7798,12 @@ label=\"CONST %d [%d, %d]\"; ]\n", } for (int i = 0; i < gb->n_nodes; i++) { - struct ggml_tensor * node = gb->nodes[i]; + struct ggml_v1_tensor * node = gb->nodes[i]; - struct ggml_tensor * parent = ggml_graph_get_parent(gb, node); + struct ggml_v1_tensor * parent = ggml_v1_graph_get_parent(gb, node); if (node->src0) { - struct ggml_tensor * parent0 = ggml_graph_get_parent(gb, node->src0); + struct ggml_v1_tensor * parent0 = ggml_v1_graph_get_parent(gb, node->src0); fprintf(fp, " \"%p\":%s -> \"%p\":%s [ arrowhead = %s; style = %s; label = \"x\"; ]\n", parent0 ? (void *) parent0 : (void *) node->src0, @@ -7815,7 +7815,7 @@ label=\"CONST %d [%d, %d]\"; ]\n", } if (node->src1) { - struct ggml_tensor * parent1 = ggml_graph_get_parent(gb, node->src1); + struct ggml_v1_tensor * parent1 = ggml_v1_graph_get_parent(gb, node->src1); fprintf(fp, " \"%p\":%s -> \"%p\":%s [ arrowhead = %s; style = %s; label = \"y\"; ]\n", parent1 ? (void *) parent1 : (void *) node->src1, @@ -7828,7 +7828,7 @@ label=\"CONST %d [%d, %d]\"; ]\n", } for (int i = 0; i < gb->n_leafs; i++) { - struct ggml_tensor * node = gb->leafs[i]; + struct ggml_v1_tensor * node = gb->leafs[i]; if (node->src0) { fprintf(fp, " \"%p\":%s -> \"%p\":%s [ label = \"x\"; ]\n", @@ -7847,40 +7847,40 @@ label=\"CONST %d [%d, %d]\"; ]\n", fclose(fp); - GGML_PRINT("%s: dot -Tpng %s -o %s.png && open %s.png\n", __func__, filename, filename, filename); + GGML_V1_PRINT("%s: dot -Tpng %s -o %s.png && open %s.png\n", __func__, filename, filename, filename); } //////////////////////////////////////////////////////////////////////////////// -static void ggml_opt_set_params(int np, struct ggml_tensor * const ps[], const float * x) { +static void ggml_v1_opt_set_params(int np, struct ggml_v1_tensor * const ps[], const float * x) { int i = 0; for (int p = 0; p < np; ++p) { - const int ne = ggml_nelements(ps[p]) ; + const int ne = ggml_v1_nelements(ps[p]) ; // TODO: add function to set tensor from array for (int j = 0; j < ne; ++j) { - ggml_set_f32_1d(ps[p], j, x[i++]); + ggml_v1_set_f32_1d(ps[p], j, x[i++]); } } } -static void ggml_opt_get_params(int np, struct ggml_tensor * const ps[], float * x) { +static void ggml_v1_opt_get_params(int np, struct ggml_v1_tensor * const ps[], float * x) { int i = 0; for (int p = 0; p < np; ++p) { - const int ne = ggml_nelements(ps[p]) ; + const int ne = ggml_v1_nelements(ps[p]) ; // TODO: add function to get all elements at once for (int j = 0; j < ne; ++j) { - x[i++] = ggml_get_f32_1d(ps[p], j); + x[i++] = ggml_v1_get_f32_1d(ps[p], j); } } } -static void ggml_opt_get_grad(int np, struct ggml_tensor * const ps[], float * g) { +static void ggml_v1_opt_get_grad(int np, struct ggml_v1_tensor * const ps[], float * g) { int i = 0; for (int p = 0; p < np; ++p) { - const int ne = ggml_nelements(ps[p]) ; + const int ne = ggml_v1_nelements(ps[p]) ; // TODO: add function to get all elements at once for (int j = 0; j < ne; ++j) { - g[i++] = ggml_get_f32_1d(ps[p]->grad, j); + g[i++] = ggml_v1_get_f32_1d(ps[p]->grad, j); } } } @@ -7891,30 +7891,30 @@ static void ggml_opt_get_grad(int np, struct ggml_tensor * const ps[], float * g // ref: https://arxiv.org/pdf/1412.6980.pdf // -static enum ggml_opt_result ggml_opt_adam( - struct ggml_context * ctx, - struct ggml_opt_params params, - struct ggml_tensor * f, - struct ggml_cgraph * gf, - struct ggml_cgraph * gb) { - assert(ggml_is_scalar(f)); +static enum ggml_v1_opt_result ggml_v1_opt_adam( + struct ggml_v1_context * ctx, + struct ggml_v1_opt_params params, + struct ggml_v1_tensor * f, + struct ggml_v1_cgraph * gf, + struct ggml_v1_cgraph * gb) { + assert(ggml_v1_is_scalar(f)); gf->n_threads = params.n_threads; gb->n_threads = params.n_threads; // these will store the parameters we want to optimize - struct ggml_tensor * ps[GGML_MAX_PARAMS]; + struct ggml_v1_tensor * ps[GGML_V1_MAX_PARAMS]; int np = 0; int nx = 0; for (int i = 0; i < gf->n_nodes; ++i) { if (gf->nodes[i]->is_param) { - GGML_PRINT_DEBUG("found param %d: grad->op = %d\n", np, gf->nodes[i]->grad->op); + GGML_V1_PRINT_DEBUG("found param %d: grad->op = %d\n", np, gf->nodes[i]->grad->op); - assert(np < GGML_MAX_PARAMS); + assert(np < GGML_V1_MAX_PARAMS); ps[np++] = gf->nodes[i]; - nx += ggml_nelements(gf->nodes[i]); + nx += ggml_v1_nelements(gf->nodes[i]); } } @@ -7924,29 +7924,29 @@ static enum ggml_opt_result ggml_opt_adam( const float beta2 = params.adam.beta2; const float eps = params.adam.eps; - float * x = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // view of the parameters - float * g1 = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // gradient - float * g2 = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // gradient squared - float * m = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // first moment - float * v = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // second moment - float * mh = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // first moment hat - float * vh = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // second moment hat + float * x = ggml_v1_new_tensor_1d(ctx, GGML_V1_TYPE_F32, nx)->data; // view of the parameters + float * g1 = ggml_v1_new_tensor_1d(ctx, GGML_V1_TYPE_F32, nx)->data; // gradient + float * g2 = ggml_v1_new_tensor_1d(ctx, GGML_V1_TYPE_F32, nx)->data; // gradient squared + float * m = ggml_v1_new_tensor_1d(ctx, GGML_V1_TYPE_F32, nx)->data; // first moment + float * v = ggml_v1_new_tensor_1d(ctx, GGML_V1_TYPE_F32, nx)->data; // second moment + float * mh = ggml_v1_new_tensor_1d(ctx, GGML_V1_TYPE_F32, nx)->data; // first moment hat + float * vh = ggml_v1_new_tensor_1d(ctx, GGML_V1_TYPE_F32, nx)->data; // second moment hat - float * pf = params.past > 0 ? ggml_new_tensor_1d(ctx, GGML_TYPE_F32, params.past)->data : NULL; // past function values + float * pf = params.past > 0 ? ggml_v1_new_tensor_1d(ctx, GGML_V1_TYPE_F32, params.past)->data : NULL; // past function values // initialize - ggml_vec_set_f32(nx, m, 0.0f); - ggml_vec_set_f32(nx, v, 0.0f); + ggml_v1_vec_set_f32(nx, m, 0.0f); + ggml_v1_vec_set_f32(nx, v, 0.0f); // update view - ggml_opt_get_params(np, ps, x); + ggml_v1_opt_get_params(np, ps, x); // compute the function value - ggml_graph_reset (gf); - ggml_set_f32 (f->grad, 1.0f); - ggml_graph_compute(ctx, gb); + ggml_v1_graph_reset (gf); + ggml_v1_set_f32 (f->grad, 1.0f); + ggml_v1_graph_compute(ctx, gb); - float fx_prev = ggml_get_f32_1d(f, 0); + float fx_prev = ggml_v1_get_f32_1d(f, 0); if (pf) { pf[0] = fx_prev; } @@ -7956,67 +7956,67 @@ static enum ggml_opt_result ggml_opt_adam( // run the optimizer for (int t = 0; t < params.adam.n_iter; ++t) { - GGML_PRINT_DEBUG ("=== iter %d ===\n", t); + GGML_V1_PRINT_DEBUG ("=== iter %d ===\n", t); - GGML_PRINT_DEBUG ("f = %10.6f\n", ggml_get_f32_1d(f, 0)); - GGML_PRINT_DEBUG_5("df/dx0 = %10.6f\n", ggml_get_f32_1d(ps[0]->grad, 0)); - GGML_PRINT_DEBUG_5("df/dx1 = %10.6f\n", ggml_get_f32_1d(ps[1]->grad, 0)); + GGML_V1_PRINT_DEBUG ("f = %10.6f\n", ggml_v1_get_f32_1d(f, 0)); + GGML_V1_PRINT_DEBUG_5("df/dx0 = %10.6f\n", ggml_v1_get_f32_1d(ps[0]->grad, 0)); + GGML_V1_PRINT_DEBUG_5("df/dx1 = %10.6f\n", ggml_v1_get_f32_1d(ps[1]->grad, 0)); for (int i = 0; i < np; ++i) { - GGML_PRINT_DEBUG("param %d: %10.6f, g = %10.6f\n", i, - ggml_get_f32_1d(ps[i], 0), ggml_get_f32_1d(ps[i]->grad, 0)); + GGML_V1_PRINT_DEBUG("param %d: %10.6f, g = %10.6f\n", i, + ggml_v1_get_f32_1d(ps[i], 0), ggml_v1_get_f32_1d(ps[i]->grad, 0)); } - const int64_t t_start_wall = ggml_time_us(); - const int64_t t_start_cpu = ggml_cycles(); + const int64_t t_start_wall = ggml_v1_time_us(); + const int64_t t_start_cpu = ggml_v1_cycles(); UNUSED(t_start_wall); UNUSED(t_start_cpu); { // update the gradient - ggml_opt_get_grad(np, ps, g1); + ggml_v1_opt_get_grad(np, ps, g1); // m_t = beta1*m_t-1 + (1 - beta1)*g_t - ggml_vec_scale_f32(nx, m, beta1); - ggml_vec_mad_f32 (nx, m, g1, 1.0f - beta1); + ggml_v1_vec_scale_f32(nx, m, beta1); + ggml_v1_vec_mad_f32 (nx, m, g1, 1.0f - beta1); // g2 = g1^2 - ggml_vec_sqr_f32 (nx, g2, g1); + ggml_v1_vec_sqr_f32 (nx, g2, g1); // v_t = beta2*v_t-1 + (1 - beta2)*g_t^2 - ggml_vec_scale_f32(nx, v, beta2); - ggml_vec_mad_f32 (nx, v, g2, 1.0f - beta2); + ggml_v1_vec_scale_f32(nx, v, beta2); + ggml_v1_vec_mad_f32 (nx, v, g2, 1.0f - beta2); // m^hat = m_t / (1 - beta1^t) // v^hat = v_t / (1 - beta2^t) // x_t = x_t-1 - alpha*m^hat/(sqrt(v^hat) + eps) - ggml_vec_cpy_f32 (nx, mh, m); - ggml_vec_cpy_f32 (nx, vh, v); + ggml_v1_vec_cpy_f32 (nx, mh, m); + ggml_v1_vec_cpy_f32 (nx, vh, v); - ggml_vec_scale_f32(nx, mh, alpha/(1.0f - powf(beta1, t + 1))); - ggml_vec_scale_f32(nx, vh, 1.0f/(1.0f - powf(beta2, t + 1))); + ggml_v1_vec_scale_f32(nx, mh, alpha/(1.0f - powf(beta1, t + 1))); + ggml_v1_vec_scale_f32(nx, vh, 1.0f/(1.0f - powf(beta2, t + 1))); - ggml_vec_sqrt_f32 (nx, vh, vh); - ggml_vec_acc1_f32 (nx, vh, eps); + ggml_v1_vec_sqrt_f32 (nx, vh, vh); + ggml_v1_vec_acc1_f32 (nx, vh, eps); - ggml_vec_div_f32 (nx, mh, mh, vh); - ggml_vec_sub_f32 (nx, x, x, mh); + ggml_v1_vec_div_f32 (nx, mh, mh, vh); + ggml_v1_vec_sub_f32 (nx, x, x, mh); // update the parameters - ggml_opt_set_params(np, ps, x); + ggml_v1_opt_set_params(np, ps, x); } - ggml_graph_reset (gf); - ggml_set_f32 (f->grad, 1.0f); - ggml_graph_compute(ctx, gb); + ggml_v1_graph_reset (gf); + ggml_v1_set_f32 (f->grad, 1.0f); + ggml_v1_graph_compute(ctx, gb); - const float fx = ggml_get_f32_1d(f, 0); + const float fx = ggml_v1_get_f32_1d(f, 0); // check convergence if (fabsf(fx - fx_prev)/fx < params.adam.eps_f) { - GGML_PRINT_DEBUG("converged\n"); + GGML_V1_PRINT_DEBUG("converged\n"); - return GGML_OPT_OK; + return GGML_V1_OPT_OK; } // delta-based convergence test @@ -8026,7 +8026,7 @@ static enum ggml_opt_result ggml_opt_adam( const float rate = (pf[t%params.past] - fx)/fx; if (fabs(rate) < params.delta) { - return GGML_OPT_OK; + return GGML_V1_OPT_OK; } } @@ -8042,7 +8042,7 @@ static enum ggml_opt_result ggml_opt_adam( ++n_no_improvement; if (n_no_improvement >= params.max_no_improvement) { - return GGML_OPT_OK; + return GGML_V1_OPT_OK; } } } @@ -8050,17 +8050,17 @@ static enum ggml_opt_result ggml_opt_adam( fx_prev = fx; { - const int64_t t_end_cpu = ggml_cycles(); - GGML_PRINT_DEBUG("time iter: %5.3f s\n", ((float)(t_end_cpu - t_start_cpu))/CLOCKS_PER_SEC); + const int64_t t_end_cpu = ggml_v1_cycles(); + GGML_V1_PRINT_DEBUG("time iter: %5.3f s\n", ((float)(t_end_cpu - t_start_cpu))/CLOCKS_PER_SEC); UNUSED(t_end_cpu); - const int64_t t_end_wall = ggml_time_us(); - GGML_PRINT_DEBUG("wall time iter: %5.3f s\n", (t_end_wall - t_start_wall)/1e6); + const int64_t t_end_wall = ggml_v1_time_us(); + GGML_V1_PRINT_DEBUG("wall time iter: %5.3f s\n", (t_end_wall - t_start_wall)/1e6); UNUSED(t_end_wall); } } - return GGML_OPT_DID_NOT_CONVERGE; + return GGML_V1_OPT_DID_NOT_CONVERGE; } // @@ -8071,16 +8071,16 @@ static enum ggml_opt_result ggml_opt_adam( // https://github.com/chokkan/liblbfgs // -struct ggml_lbfgs_iteration_data { +struct ggml_v1_lbfgs_iteration_data { float alpha; float ys; float * s; float * y; }; -static enum ggml_opt_result linesearch_backtracking( - struct ggml_context * ctx, - const struct ggml_opt_params * params, +static enum ggml_v1_opt_result linesearch_backtracking( + struct ggml_v1_context * ctx, + const struct ggml_v1_opt_params * params, int nx, float * x, float * fx, @@ -8088,11 +8088,11 @@ static enum ggml_opt_result linesearch_backtracking( float * d, float * step, const float * xp, - struct ggml_tensor * f, - struct ggml_cgraph * gf, - struct ggml_cgraph * gb, + struct ggml_v1_tensor * f, + struct ggml_v1_cgraph * gf, + struct ggml_v1_cgraph * gb, const int np, - struct ggml_tensor * ps[]) { + struct ggml_v1_tensor * ps[]) { int count = 0; float width = 0.0f; @@ -8105,15 +8105,15 @@ static enum ggml_opt_result linesearch_backtracking( const float inc = 2.1f; if (*step <= 0.) { - return GGML_LINESEARCH_INVALID_PARAMETERS; + return GGML_V1_LINESEARCH_INVALID_PARAMETERS; } // compute the initial gradient in the search direction - ggml_vec_dot_f32(nx, &dginit, g, d); + ggml_v1_vec_dot_f32(nx, &dginit, g, d); // make sure that d points to a descent direction if (0 < dginit) { - return GGML_LINESEARCH_FAIL; + return GGML_V1_LINESEARCH_FAIL; } // initialize local variables @@ -8121,20 +8121,20 @@ static enum ggml_opt_result linesearch_backtracking( dgtest = params->lbfgs.ftol*dginit; while (true) { - ggml_vec_cpy_f32(nx, x, xp); - ggml_vec_mad_f32(nx, x, d, *step); + ggml_v1_vec_cpy_f32(nx, x, xp); + ggml_v1_vec_mad_f32(nx, x, d, *step); // evaluate the function and gradient values { - ggml_opt_set_params(np, ps, x); + ggml_v1_opt_set_params(np, ps, x); - ggml_graph_reset (gf); - ggml_set_f32 (f->grad, 1.0f); - ggml_graph_compute(ctx, gb); + ggml_v1_graph_reset (gf); + ggml_v1_set_f32 (f->grad, 1.0f); + ggml_v1_graph_compute(ctx, gb); - ggml_opt_get_grad(np, ps, g); + ggml_v1_opt_get_grad(np, ps, g); - *fx = ggml_get_f32_1d(f, 0); + *fx = ggml_v1_get_f32_1d(f, 0); } ++count; @@ -8143,17 +8143,17 @@ static enum ggml_opt_result linesearch_backtracking( width = dec; } else { // Armijo condition is satisfied - if (params->lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_ARMIJO) { + if (params->lbfgs.linesearch == GGML_V1_LINESEARCH_BACKTRACKING_ARMIJO) { return count; } - ggml_vec_dot_f32(nx, &dg, g, d); + ggml_v1_vec_dot_f32(nx, &dg, g, d); // check the Wolfe condition if (dg < params->lbfgs.wolfe * dginit) { width = inc; } else { - if(params->lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_WOLFE) { + if(params->lbfgs.linesearch == GGML_V1_LINESEARCH_BACKTRACKING_WOLFE) { // regular Wolfe conditions return count; } @@ -8161,7 +8161,7 @@ static enum ggml_opt_result linesearch_backtracking( if(dg > -params->lbfgs.wolfe*dginit) { width = dec; } else { - // strong Wolfe condition (GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE) + // strong Wolfe condition (GGML_V1_LINESEARCH_BACKTRACKING_STRONG_WOLFE) return count; } return count; @@ -8169,31 +8169,31 @@ static enum ggml_opt_result linesearch_backtracking( } if (*step < params->lbfgs.min_step) { - return GGML_LINESEARCH_MINIMUM_STEP; + return GGML_V1_LINESEARCH_MINIMUM_STEP; } if (*step > params->lbfgs.max_step) { - return GGML_LINESEARCH_MAXIMUM_STEP; + return GGML_V1_LINESEARCH_MAXIMUM_STEP; } if (params->lbfgs.max_linesearch <= count) { - return GGML_LINESEARCH_MAXIMUM_ITERATIONS; + return GGML_V1_LINESEARCH_MAXIMUM_ITERATIONS; } (*step) *= width; } - return GGML_LINESEARCH_FAIL; + return GGML_V1_LINESEARCH_FAIL; } -static enum ggml_opt_result ggml_opt_lbfgs( - struct ggml_context * ctx, - struct ggml_opt_params params, - struct ggml_tensor * f, - struct ggml_cgraph * gf, - struct ggml_cgraph * gb) { - if (params.lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_WOLFE || - params.lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE) { +static enum ggml_v1_opt_result ggml_v1_opt_lbfgs( + struct ggml_v1_context * ctx, + struct ggml_v1_opt_params params, + struct ggml_v1_tensor * f, + struct ggml_v1_cgraph * gf, + struct ggml_v1_cgraph * gb) { + if (params.lbfgs.linesearch == GGML_V1_LINESEARCH_BACKTRACKING_WOLFE || + params.lbfgs.linesearch == GGML_V1_LINESEARCH_BACKTRACKING_STRONG_WOLFE) { if (params.lbfgs.wolfe <= params.lbfgs.ftol || 1. <= params.lbfgs.wolfe) { - return GGML_OPT_INVALID_WOLFE; + return GGML_V1_OPT_INVALID_WOLFE; } } @@ -8203,28 +8203,28 @@ static enum ggml_opt_result ggml_opt_lbfgs( const int m = params.lbfgs.m; // these will store the parameters we want to optimize - struct ggml_tensor * ps[GGML_MAX_PARAMS]; + struct ggml_v1_tensor * ps[GGML_V1_MAX_PARAMS]; int np = 0; int nx = 0; for (int i = 0; i < gf->n_nodes; ++i) { if (gf->nodes[i]->is_param) { - GGML_PRINT_DEBUG("found param %d: grad->op = %d\n", np, gf->nodes[i]->grad->op); + GGML_V1_PRINT_DEBUG("found param %d: grad->op = %d\n", np, gf->nodes[i]->grad->op); - assert(np < GGML_MAX_PARAMS); + assert(np < GGML_V1_MAX_PARAMS); ps[np++] = gf->nodes[i]; - nx += ggml_nelements(gf->nodes[i]); + nx += ggml_v1_nelements(gf->nodes[i]); } } - float * x = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // current parameters - float * xp = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // previous parameters - float * g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // current gradient - float * gp = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // previous gradient - float * d = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // search direction + float * x = ggml_v1_new_tensor_1d(ctx, GGML_V1_TYPE_F32, nx)->data; // current parameters + float * xp = ggml_v1_new_tensor_1d(ctx, GGML_V1_TYPE_F32, nx)->data; // previous parameters + float * g = ggml_v1_new_tensor_1d(ctx, GGML_V1_TYPE_F32, nx)->data; // current gradient + float * gp = ggml_v1_new_tensor_1d(ctx, GGML_V1_TYPE_F32, nx)->data; // previous gradient + float * d = ggml_v1_new_tensor_1d(ctx, GGML_V1_TYPE_F32, nx)->data; // search direction - float * pf = params.past > 0 ? ggml_new_tensor_1d(ctx, GGML_TYPE_F32, params.past)->data : NULL; // past function values + float * pf = params.past > 0 ? ggml_v1_new_tensor_1d(ctx, GGML_V1_TYPE_F32, params.past)->data : NULL; // past function values float fx = 0.0f; // cost function value float xnorm = 0.0f; // ||x|| @@ -8232,29 +8232,29 @@ static enum ggml_opt_result ggml_opt_lbfgs( float step = 0.0f; // initialize x from the graph nodes - ggml_opt_get_params(np, ps, x); + ggml_v1_opt_get_params(np, ps, x); // the L-BFGS memory - struct ggml_lbfgs_iteration_data * lm = alloca(sizeof(struct ggml_lbfgs_iteration_data)*m); + struct ggml_v1_lbfgs_iteration_data * lm = alloca(sizeof(struct ggml_v1_lbfgs_iteration_data)*m); for (int i = 0; i < m; ++i) { lm[i].alpha = 0.0f; lm[i].ys = 0.0f; - lm[i].s = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; - lm[i].y = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; + lm[i].s = ggml_v1_new_tensor_1d(ctx, GGML_V1_TYPE_F32, nx)->data; + lm[i].y = ggml_v1_new_tensor_1d(ctx, GGML_V1_TYPE_F32, nx)->data; } // evaluate the function value and its gradient { - ggml_opt_set_params(np, ps, x); + ggml_v1_opt_set_params(np, ps, x); - ggml_graph_reset (gf); - ggml_set_f32 (f->grad, 1.0f); - ggml_graph_compute(ctx, gb); + ggml_v1_graph_reset (gf); + ggml_v1_set_f32 (f->grad, 1.0f); + ggml_v1_graph_compute(ctx, gb); - ggml_opt_get_grad(np, ps, g); + ggml_v1_opt_get_grad(np, ps, g); - fx = ggml_get_f32_1d(f, 0); + fx = ggml_v1_get_f32_1d(f, 0); } if (pf) { @@ -8264,11 +8264,11 @@ static enum ggml_opt_result ggml_opt_lbfgs( float fx_best = fx; // search direction = -gradient - ggml_vec_neg_f32(nx, d, g); + ggml_v1_vec_neg_f32(nx, d, g); // ||x||, ||g|| - ggml_vec_norm_f32(nx, &xnorm, x); - ggml_vec_norm_f32(nx, &gnorm, g); + ggml_v1_vec_norm_f32(nx, &xnorm, x); + ggml_v1_vec_norm_f32(nx, &gnorm, g); if (xnorm < 1.0f) { xnorm = 1.0f; @@ -8276,11 +8276,11 @@ static enum ggml_opt_result ggml_opt_lbfgs( // already optimized if (gnorm/xnorm <= params.lbfgs.eps) { - return GGML_OPT_OK; + return GGML_V1_OPT_OK; } // initial step - ggml_vec_norm_inv_f32(nx, &step, d); + ggml_v1_vec_norm_inv_f32(nx, &step, d); int j = 0; int k = 1; @@ -8295,30 +8295,30 @@ static enum ggml_opt_result ggml_opt_lbfgs( while (true) { // store the current position and gradient vectors - ggml_vec_cpy_f32(nx, xp, x); - ggml_vec_cpy_f32(nx, gp, g); + ggml_v1_vec_cpy_f32(nx, xp, x); + ggml_v1_vec_cpy_f32(nx, gp, g); ls = linesearch_backtracking(ctx, ¶ms, nx, x, &fx, g, d, &step, xp, f, gf, gb, np, ps); if (ls < 0) { // linesearch failed - go back to the previous point and return - ggml_vec_cpy_f32(nx, x, xp); - ggml_vec_cpy_f32(nx, g, gp); + ggml_v1_vec_cpy_f32(nx, x, xp); + ggml_v1_vec_cpy_f32(nx, g, gp); return ls; } - ggml_vec_norm_f32(nx, &xnorm, x); - ggml_vec_norm_f32(nx, &gnorm, g); + ggml_v1_vec_norm_f32(nx, &xnorm, x); + ggml_v1_vec_norm_f32(nx, &gnorm, g); - GGML_PRINT_DEBUG("f = %10.6f\n", ggml_get_f32_1d(f, 0)); + GGML_V1_PRINT_DEBUG("f = %10.6f\n", ggml_v1_get_f32_1d(f, 0)); if (xnorm < 1.0) { xnorm = 1.0; } if (gnorm/xnorm <= params.lbfgs.eps) { // converged - return GGML_OPT_OK; + return GGML_V1_OPT_OK; } // delta-based convergence test @@ -8328,7 +8328,7 @@ static enum ggml_opt_result ggml_opt_lbfgs( const float rate = (pf[k%params.past] - fx)/fx; if (fabs(rate) < params.delta) { - return GGML_OPT_OK; + return GGML_V1_OPT_OK; } } @@ -8344,29 +8344,29 @@ static enum ggml_opt_result ggml_opt_lbfgs( n_no_improvement++; if (n_no_improvement >= params.max_no_improvement) { - return GGML_OPT_OK; + return GGML_V1_OPT_OK; } } } if (params.lbfgs.n_iter != 0 && params.lbfgs.n_iter < k + 1) { // reached the maximum number of iterations - return GGML_OPT_DID_NOT_CONVERGE; + return GGML_V1_OPT_DID_NOT_CONVERGE; } // update vectors s and y: // s_{k+1} = x_{k+1} - x_{k} = \step * d_{k}. // y_{k+1} = g_{k+1} - g_{k}. // - ggml_vec_sub_f32(nx, lm[end].s, x, xp); - ggml_vec_sub_f32(nx, lm[end].y, g, gp); + ggml_v1_vec_sub_f32(nx, lm[end].s, x, xp); + ggml_v1_vec_sub_f32(nx, lm[end].y, g, gp); // compute scalars ys and yy: // ys = y^t \cdot s -> 1 / \rho. // yy = y^t \cdot y. // - ggml_vec_dot_f32(nx, &ys, lm[end].y, lm[end].s); - ggml_vec_dot_f32(nx, &yy, lm[end].y, lm[end].y); + ggml_v1_vec_dot_f32(nx, &ys, lm[end].y, lm[end].s); + ggml_v1_vec_dot_f32(nx, &yy, lm[end].y, lm[end].y); lm[end].ys = ys; @@ -8378,43 +8378,43 @@ static enum ggml_opt_result ggml_opt_lbfgs( end = (end + 1)%m; // initialize search direction with -g - ggml_vec_neg_f32(nx, d, g); + ggml_v1_vec_neg_f32(nx, d, g); j = end; for (int i = 0; i < bound; ++i) { j = (j + m - 1) % m; // \alpha_{j} = \rho_{j} s^{t}_{j} \cdot q_{k+1} - ggml_vec_dot_f32(nx, &lm[j].alpha, lm[j].s, d); + ggml_v1_vec_dot_f32(nx, &lm[j].alpha, lm[j].s, d); lm[j].alpha /= lm[j].ys; // q_{i} = q_{i+1} - \alpha_{i} y_{i} - ggml_vec_mad_f32(nx, d, lm[j].y, -lm[j].alpha); + ggml_v1_vec_mad_f32(nx, d, lm[j].y, -lm[j].alpha); } - ggml_vec_scale_f32(nx, d, ys/yy); + ggml_v1_vec_scale_f32(nx, d, ys/yy); for (int i = 0; i < bound; ++i) { // \beta_{j} = \rho_{j} y^t_{j} \cdot \gamma_{i} - ggml_vec_dot_f32(nx, &beta, lm[j].y, d); + ggml_v1_vec_dot_f32(nx, &beta, lm[j].y, d); beta /= lm[j].ys; // \gamma_{i+1} = \gamma_{i} + (\alpha_{j} - \beta_{j}) s_{j} - ggml_vec_mad_f32(nx, d, lm[j].s, lm[j].alpha - beta); + ggml_v1_vec_mad_f32(nx, d, lm[j].s, lm[j].alpha - beta); j = (j + 1)%m; } step = 1.0; } - return GGML_OPT_DID_NOT_CONVERGE; + return GGML_V1_OPT_DID_NOT_CONVERGE; } -struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type) { - struct ggml_opt_params result; +struct ggml_v1_opt_params ggml_v1_opt_default_params(enum ggml_v1_opt_type type) { + struct ggml_v1_opt_params result; switch (type) { - case GGML_OPT_ADAM: + case GGML_V1_OPT_ADAM: { - result = (struct ggml_opt_params) { - .type = GGML_OPT_ADAM, + result = (struct ggml_v1_opt_params) { + .type = GGML_V1_OPT_ADAM, .n_threads = 1, .past = 0, .delta = 1e-5f, @@ -8435,10 +8435,10 @@ struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type) { }, }; } break; - case GGML_OPT_LBFGS: + case GGML_V1_OPT_LBFGS: { - result = (struct ggml_opt_params) { - .type = GGML_OPT_LBFGS, + result = (struct ggml_v1_opt_params) { + .type = GGML_V1_OPT_LBFGS, .n_threads = 1, .past = 0, .delta = 1e-5f, @@ -8459,7 +8459,7 @@ struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type) { .min_step = 1e-20f, .max_step = 1e+20f, - .linesearch = GGML_LINESEARCH_DEFAULT, + .linesearch = GGML_V1_LINESEARCH_DEFAULT, }, }; } break; @@ -8468,54 +8468,54 @@ struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type) { return result; } -enum ggml_opt_result ggml_opt( - struct ggml_context * ctx, - struct ggml_opt_params params, - struct ggml_tensor * f) { +enum ggml_v1_opt_result ggml_v1_opt( + struct ggml_v1_context * ctx, + struct ggml_v1_opt_params params, + struct ggml_v1_tensor * f) { bool free_ctx = false; if (ctx == NULL) { - struct ggml_init_params params_ctx = { + struct ggml_v1_init_params params_ctx = { .mem_size = 16*1024*1024, .mem_buffer = NULL, }; - ctx = ggml_init(params_ctx); + ctx = ggml_v1_init(params_ctx); if (ctx == NULL) { - return GGML_OPT_NO_CONTEXT; + return GGML_V1_OPT_NO_CONTEXT; } free_ctx = true; } - enum ggml_opt_result result = GGML_OPT_OK; + enum ggml_v1_opt_result result = GGML_V1_OPT_OK; // build forward + backward compute graphs - struct ggml_cgraph gf = ggml_build_forward (f); - struct ggml_cgraph gb = ggml_build_backward(ctx, &gf, false); + struct ggml_v1_cgraph gf = ggml_v1_build_forward (f); + struct ggml_v1_cgraph gb = ggml_v1_build_backward(ctx, &gf, false); switch (params.type) { - case GGML_OPT_ADAM: + case GGML_V1_OPT_ADAM: { - result = ggml_opt_adam(ctx, params, f, &gf, &gb); + result = ggml_v1_opt_adam(ctx, params, f, &gf, &gb); } break; - case GGML_OPT_LBFGS: + case GGML_V1_OPT_LBFGS: { - result = ggml_opt_lbfgs(ctx, params, f, &gf, &gb); + result = ggml_v1_opt_lbfgs(ctx, params, f, &gf, &gb); } break; } if (params.print_forward_graph) { - ggml_graph_print (&gf); - ggml_graph_dump_dot(&gf, NULL, "opt-forward.dot"); + ggml_v1_graph_print (&gf); + ggml_v1_graph_dump_dot(&gf, NULL, "opt-forward.dot"); } if (params.print_backward_graph) { - ggml_graph_print (&gb); - ggml_graph_dump_dot(&gb, &gf, "opt-backward.dot"); + ggml_v1_graph_print (&gb); + ggml_v1_graph_dump_dot(&gb, &gf, "opt-backward.dot"); } if (free_ctx) { - ggml_free(ctx); + ggml_v1_free(ctx); } return result; @@ -8523,7 +8523,7 @@ enum ggml_opt_result ggml_opt( //////////////////////////////////////////////////////////////////////////////// -int ggml_cpu_has_avx(void) { +int ggml_v1_cpu_has_avx(void) { #if defined(__AVX__) return 1; #else @@ -8531,7 +8531,7 @@ int ggml_cpu_has_avx(void) { #endif } -int ggml_cpu_has_avx2(void) { +int ggml_v1_cpu_has_avx2(void) { #if defined(__AVX2__) return 1; #else @@ -8539,7 +8539,7 @@ int ggml_cpu_has_avx2(void) { #endif } -int ggml_cpu_has_avx512(void) { +int ggml_v1_cpu_has_avx512(void) { #if defined(__AVX512F__) return 1; #else @@ -8547,7 +8547,7 @@ int ggml_cpu_has_avx512(void) { #endif } -int ggml_cpu_has_fma(void) { +int ggml_v1_cpu_has_fma(void) { #if defined(__FMA__) return 1; #else @@ -8555,7 +8555,7 @@ int ggml_cpu_has_fma(void) { #endif } -int ggml_cpu_has_neon(void) { +int ggml_v1_cpu_has_neon(void) { #if defined(__ARM_NEON) return 1; #else @@ -8563,7 +8563,7 @@ int ggml_cpu_has_neon(void) { #endif } -int ggml_cpu_has_arm_fma(void) { +int ggml_v1_cpu_has_arm_fma(void) { #if defined(__ARM_FEATURE_FMA) return 1; #else @@ -8571,7 +8571,7 @@ int ggml_cpu_has_arm_fma(void) { #endif } -int ggml_cpu_has_f16c(void) { +int ggml_v1_cpu_has_f16c(void) { #if defined(__F16C__) return 1; #else @@ -8579,7 +8579,7 @@ int ggml_cpu_has_f16c(void) { #endif } -int ggml_cpu_has_fp16_va(void) { +int ggml_v1_cpu_has_fp16_va(void) { #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) return 1; #else @@ -8587,7 +8587,7 @@ int ggml_cpu_has_fp16_va(void) { #endif } -int ggml_cpu_has_wasm_simd(void) { +int ggml_v1_cpu_has_wasm_simd(void) { #if defined(__wasm_simd128__) return 1; #else @@ -8595,15 +8595,15 @@ int ggml_cpu_has_wasm_simd(void) { #endif } -int ggml_cpu_has_blas(void) { -#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) +int ggml_v1_cpu_has_blas(void) { +#if defined(GGML_V1_USE_ACCELERATE) || defined(GGML_V1_USE_OPENBLAS) return 1; #else return 0; #endif } -int ggml_cpu_has_sse3(void) { +int ggml_v1_cpu_has_sse3(void) { #if defined(__SSE3__) return 1; #else @@ -8611,7 +8611,7 @@ int ggml_cpu_has_sse3(void) { #endif } -int ggml_cpu_has_vsx(void) { +int ggml_v1_cpu_has_vsx(void) { #if defined(__POWER9_VECTOR__) return 1; #else diff --git a/otherarch/ggml_v1.h b/otherarch/ggml_v1.h index 18f317bec..0debe0925 100644 --- a/otherarch/ggml_v1.h +++ b/otherarch/ggml_v1.h @@ -32,22 +32,22 @@ // For example, here we define the function: f(x) = a*x^2 + b // // { -// struct ggml_init_params params = { +// struct ggml_v1_init_params params = { // .mem_size = 16*1024*1024, // .mem_buffer = NULL, // }; // // // memory allocation happens here -// struct ggml_context * ctx = ggml_init(params); +// struct ggml_v1_context * ctx = ggml_v1_init(params); // -// struct ggml_tensor * x = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1); +// struct ggml_v1_tensor * x = ggml_v1_new_tensor_1d(ctx, GGML_V1_TYPE_F32, 1); // -// ggml_set_param(ctx, x); // x is an input variable +// ggml_v1_set_param(ctx, x); // x is an input variable // -// struct ggml_tensor * a = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1); -// struct ggml_tensor * b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1); -// struct ggml_tensor * x2 = ggml_mul(ctx, x, x); -// struct ggml_tensor * f = ggml_add(ctx, ggml_mul(ctx, a, x2), b); +// struct ggml_v1_tensor * a = ggml_v1_new_tensor_1d(ctx, GGML_V1_TYPE_F32, 1); +// struct ggml_v1_tensor * b = ggml_v1_new_tensor_1d(ctx, GGML_V1_TYPE_F32, 1); +// struct ggml_v1_tensor * x2 = ggml_v1_mul(ctx, x, x); +// struct ggml_v1_tensor * f = ggml_v1_add(ctx, ggml_v1_mul(ctx, a, x2), b); // // ... // } @@ -58,33 +58,33 @@ // { // ... // -// struct ggml_cgraph gf = ggml_build_forward(f); +// struct ggml_v1_cgraph gf = ggml_v1_build_forward(f); // // // set the input variable and parameter values -// ggml_set_f32(x, 2.0f); -// ggml_set_f32(a, 3.0f); -// ggml_set_f32(b, 4.0f); +// ggml_v1_set_f32(x, 2.0f); +// ggml_v1_set_f32(a, 3.0f); +// ggml_v1_set_f32(b, 4.0f); // -// ggml_graph_compute(ctx0, &gf); +// ggml_v1_graph_compute(ctx0, &gf); // -// printf("f = %f\n", ggml_get_f32_1d(f, 0)); +// printf("f = %f\n", ggml_v1_get_f32_1d(f, 0)); // // ... // } // -// The actual computation is performed in the ggml_graph_compute() function. +// The actual computation is performed in the ggml_v1_graph_compute() function. // -// The ggml_new_tensor_...() functions create new tensors. They are allocated in the memory buffer provided to the -// ggml_init() function. You have to be careful not to exceed the memory buffer size. Therefore, you have to know +// The ggml_v1_new_tensor_...() functions create new tensors. They are allocated in the memory buffer provided to the +// ggml_v1_init() function. You have to be careful not to exceed the memory buffer size. Therefore, you have to know // in advance how much memory you need for your computation. Alternatively, you can allocate a large enough memory -// and after defining the computation graph, call the ggml_used_mem() function to find out how much memory was +// and after defining the computation graph, call the ggml_v1_used_mem() function to find out how much memory was // actually needed. // -// The ggml_set_param() function marks a tensor as an input variable. This is used by the automatic +// The ggml_v1_set_param() function marks a tensor as an input variable. This is used by the automatic // differentiation and optimization algorithms. // // The described approach allows to define the function graph once and then compute its forward or backward graphs -// multiple times. All computations will use the same memory buffer allocated in the ggml_init() function. This way +// multiple times. All computations will use the same memory buffer allocated in the ggml_v1_init() function. This way // the user can avoid the memory allocation overhead at runtime. // // The library supports multi-dimensional tensors - up to 4 dimensions. The FP16 and FP32 data types are first class @@ -95,9 +95,9 @@ // clear that the library needs to support more complex operations. The way to support these operations is not clear // yet, but a few examples are demonstrated in the following operations: // -// - ggml_permute() -// - ggml_conv_1d_1s() -// - ggml_conv_1d_2s() +// - ggml_v1_permute() +// - ggml_v1_conv_1d_1s() +// - ggml_v1_conv_1d_2s() // // For each tensor operator, the library implements a forward and backward computation function. The forward function // computes the output tensor value given the input tensor values. The backward function computes the adjoint of the @@ -108,20 +108,20 @@ // https://www.youtube.com/watch?v=wG_nF1awSSY // // -// ## Tensor data (struct ggml_tensor) +// ## Tensor data (struct ggml_v1_tensor) // -// The tensors are stored in memory via the ggml_tensor struct. The structure provides information about the size of +// The tensors are stored in memory via the ggml_v1_tensor struct. The structure provides information about the size of // the tensor, the data type, and the memory buffer where the tensor data is stored. Additionally, it contains // pointers to the "source" tensors - i.e. the tensors that were used to compute the current tensor. For example: // // { -// struct ggml_tensor * c = ggml_add(ctx, a, b); +// struct ggml_v1_tensor * c = ggml_v1_add(ctx, a, b); // // assert(c->src[0] == a); // assert(c->src[1] == b); // } // -// The multi-dimensional tensors are stored in row-major order. The ggml_tensor struct contains fields for the +// The multi-dimensional tensors are stored in row-major order. The ggml_v1_tensor struct contains fields for the // number of elements in each dimension ("ne") as well as the number of bytes ("nb", a.k.a. stride). This allows // to store tensors that are not contiguous in memory, which is useful for operations such as transposition and // permutation. All tensor operations have to take the stride into account and not assume that the tensor is @@ -130,7 +130,7 @@ // The data of the tensor is accessed via the "data" pointer. For example: // // { -// struct ggml_tensor * a = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, 2, 3); +// struct ggml_v1_tensor * a = ggml_v1_new_tensor_2d(ctx, GGML_V1_TYPE_F32, 2, 3); // // // a[1, 2] = 1.0f; // *(float *) ((char *) a->data + 2*a->nb[1] + 1*a->nb[0]) = 1.0f; @@ -141,9 +141,9 @@ // ... // } // -// Alternatively, there are helper functions, such as ggml_get_f32_1d() and ggml_set_f32_1d() that can be used. +// Alternatively, there are helper functions, such as ggml_v1_get_f32_1d() and ggml_v1_set_f32_1d() that can be used. // -// ## The matrix multiplication operator (ggml_mul_mat) +// ## The matrix multiplication operator (ggml_v1_mul_mat) // // TODO // @@ -177,98 +177,98 @@ extern "C" { #include #include -#define GGML_MAX_DIMS 4 -#define GGML_MAX_NODES 4096 -#define GGML_MAX_PARAMS 16 -#define GGML_MAX_CONTEXTS 64 -#define GGML_MAX_OPT 4 +#define GGML_V1_MAX_DIMS 4 +#define GGML_V1_MAX_NODES 4096 +#define GGML_V1_MAX_PARAMS 16 +#define GGML_V1_MAX_CONTEXTS 64 +#define GGML_V1_MAX_OPT 4 #ifdef __ARM_NEON // we use the built-in 16-bit float type -typedef __fp16 ggml_fp16_t; +typedef __fp16 ggml_v1_fp16_t; #else -typedef uint16_t ggml_fp16_t; +typedef uint16_t ggml_v1_fp16_t; #endif // convert FP16 <-> FP32 -float ggml_fp16_to_fp32(ggml_fp16_t x); -ggml_fp16_t ggml_fp32_to_fp16(float x); +float ggml_v1_fp16_to_fp32(ggml_v1_fp16_t x); +ggml_v1_fp16_t ggml_v1_fp32_to_fp16(float x); -struct ggml_object; -struct ggml_context; +struct ggml_v1_object; +struct ggml_v1_context; -enum ggml_type { - GGML_TYPE_I8, - GGML_TYPE_I16, - GGML_TYPE_I32, - GGML_TYPE_F16, - GGML_TYPE_F32, - GGML_TYPE_COUNT, +enum ggml_v1_type { + GGML_V1_TYPE_I8, + GGML_V1_TYPE_I16, + GGML_V1_TYPE_I32, + GGML_V1_TYPE_F16, + GGML_V1_TYPE_F32, + GGML_V1_TYPE_COUNT, }; // available tensor operations: -enum ggml_op { - GGML_OP_NONE = 0, +enum ggml_v1_op { + GGML_V1_OP_NONE = 0, - GGML_OP_DUP, - GGML_OP_ADD, - GGML_OP_SUB, - GGML_OP_MUL, - GGML_OP_DIV, - GGML_OP_SQR, - GGML_OP_SQRT, - GGML_OP_SUM, - GGML_OP_MEAN, - GGML_OP_REPEAT, - GGML_OP_ABS, - GGML_OP_SGN, - GGML_OP_NEG, - GGML_OP_STEP, - GGML_OP_RELU, - GGML_OP_GELU, - GGML_OP_NORM, // normalize + GGML_V1_OP_DUP, + GGML_V1_OP_ADD, + GGML_V1_OP_SUB, + GGML_V1_OP_MUL, + GGML_V1_OP_DIV, + GGML_V1_OP_SQR, + GGML_V1_OP_SQRT, + GGML_V1_OP_SUM, + GGML_V1_OP_MEAN, + GGML_V1_OP_REPEAT, + GGML_V1_OP_ABS, + GGML_V1_OP_SGN, + GGML_V1_OP_NEG, + GGML_V1_OP_STEP, + GGML_V1_OP_RELU, + GGML_V1_OP_GELU, + GGML_V1_OP_NORM, // normalize - GGML_OP_MUL_MAT, + GGML_V1_OP_MUL_MAT, - GGML_OP_SCALE, - GGML_OP_CPY, - GGML_OP_RESHAPE, - GGML_OP_VIEW, - GGML_OP_PERMUTE, - GGML_OP_TRANSPOSE, - GGML_OP_GET_ROWS, - GGML_OP_DIAG_MASK_INF, - GGML_OP_SOFT_MAX, - GGML_OP_ROPE, - GGML_OP_CONV_1D_1S, - GGML_OP_CONV_1D_2S, + GGML_V1_OP_SCALE, + GGML_V1_OP_CPY, + GGML_V1_OP_RESHAPE, + GGML_V1_OP_VIEW, + GGML_V1_OP_PERMUTE, + GGML_V1_OP_TRANSPOSE, + GGML_V1_OP_GET_ROWS, + GGML_V1_OP_DIAG_MASK_INF, + GGML_V1_OP_SOFT_MAX, + GGML_V1_OP_ROPE, + GGML_V1_OP_CONV_1D_1S, + GGML_V1_OP_CONV_1D_2S, - GGML_OP_FLASH_ATTN, - GGML_OP_FLASH_FF, + GGML_V1_OP_FLASH_ATTN, + GGML_V1_OP_FLASH_FF, - GGML_OP_COUNT, + GGML_V1_OP_COUNT, }; // n-dimensional tensor -struct ggml_tensor { - enum ggml_type type; +struct ggml_v1_tensor { + enum ggml_v1_type type; int n_dims; - int ne[GGML_MAX_DIMS]; // number of elements - size_t nb[GGML_MAX_DIMS]; // stride in bytes: + int ne[GGML_V1_MAX_DIMS]; // number of elements + size_t nb[GGML_V1_MAX_DIMS]; // stride in bytes: // nb[0] = sizeof(type) // nb[1] = nb[0] * ne[0] + padding // nb[i] = nb[i-1] * ne[i-1] // compute data - enum ggml_op op; + enum ggml_v1_op op; bool is_param; - struct ggml_tensor * grad; - struct ggml_tensor * src0; - struct ggml_tensor * src1; - struct ggml_tensor * opt[GGML_MAX_OPT]; + struct ggml_v1_tensor * grad; + struct ggml_v1_tensor * src0; + struct ggml_v1_tensor * src1; + struct ggml_v1_tensor * opt[GGML_V1_MAX_OPT]; // thread scheduling int n_tasks; @@ -283,17 +283,17 @@ struct ggml_tensor { }; // computation graph -struct ggml_cgraph { +struct ggml_v1_cgraph { int n_nodes; int n_leafs; int n_threads; size_t work_size; - struct ggml_tensor * work; + struct ggml_v1_tensor * work; - struct ggml_tensor * nodes[GGML_MAX_NODES]; - struct ggml_tensor * grads[GGML_MAX_NODES]; - struct ggml_tensor * leafs[GGML_MAX_NODES]; + struct ggml_v1_tensor * nodes[GGML_V1_MAX_NODES]; + struct ggml_v1_tensor * grads[GGML_V1_MAX_NODES]; + struct ggml_v1_tensor * leafs[GGML_V1_MAX_NODES]; // performance int perf_runs; @@ -302,276 +302,276 @@ struct ggml_cgraph { }; // scratch buffer -struct ggml_scratch { +struct ggml_v1_scratch { size_t offs; size_t size; void * data; }; -struct ggml_init_params { +struct ggml_v1_init_params { // memory pool size_t mem_size; // bytes void * mem_buffer; // if NULL, memory will be allocated internally }; -void ggml_time_init(void); // call this once at the beginning of the program -int64_t ggml_time_ms(void); -int64_t ggml_time_us(void); -int64_t ggml_cycles(void); -int64_t ggml_cycles_per_ms(void); +void ggml_v1_time_init(void); // call this once at the beginning of the program +int64_t ggml_v1_time_ms(void); +int64_t ggml_v1_time_us(void); +int64_t ggml_v1_cycles(void); +int64_t ggml_v1_cycles_per_ms(void); -void ggml_print_object (const struct ggml_object * obj); -void ggml_print_objects(const struct ggml_context * ctx); +void ggml_v1_print_object (const struct ggml_v1_object * obj); +void ggml_v1_print_objects(const struct ggml_v1_context * ctx); -int ggml_nelements(const struct ggml_tensor * tensor); -size_t ggml_nbytes (const struct ggml_tensor * tensor); +int ggml_v1_nelements(const struct ggml_v1_tensor * tensor); +size_t ggml_v1_nbytes (const struct ggml_v1_tensor * tensor); -size_t ggml_type_size (enum ggml_type type); -size_t ggml_element_size(const struct ggml_tensor * tensor); +size_t ggml_v1_type_size (enum ggml_v1_type type); +size_t ggml_v1_element_size(const struct ggml_v1_tensor * tensor); -struct ggml_context * ggml_init(struct ggml_init_params params); -void ggml_free(struct ggml_context * ctx); +struct ggml_v1_context * ggml_v1_init(struct ggml_v1_init_params params); +void ggml_v1_free(struct ggml_v1_context * ctx); -size_t ggml_used_mem(const struct ggml_context * ctx); +size_t ggml_v1_used_mem(const struct ggml_v1_context * ctx); -size_t ggml_set_scratch(struct ggml_context * ctx, struct ggml_scratch scratch); +size_t ggml_v1_set_scratch(struct ggml_v1_context * ctx, struct ggml_v1_scratch scratch); -struct ggml_tensor * ggml_new_tensor( - struct ggml_context * ctx, - enum ggml_type type, +struct ggml_v1_tensor * ggml_v1_new_tensor( + struct ggml_v1_context * ctx, + enum ggml_v1_type type, int n_dims, const int *ne); -struct ggml_tensor * ggml_new_tensor_1d( - struct ggml_context * ctx, - enum ggml_type type, +struct ggml_v1_tensor * ggml_v1_new_tensor_1d( + struct ggml_v1_context * ctx, + enum ggml_v1_type type, int ne0); -struct ggml_tensor * ggml_new_tensor_2d( - struct ggml_context * ctx, - enum ggml_type type, +struct ggml_v1_tensor * ggml_v1_new_tensor_2d( + struct ggml_v1_context * ctx, + enum ggml_v1_type type, int ne0, int ne1); -struct ggml_tensor * ggml_new_tensor_3d( - struct ggml_context * ctx, - enum ggml_type type, +struct ggml_v1_tensor * ggml_v1_new_tensor_3d( + struct ggml_v1_context * ctx, + enum ggml_v1_type type, int ne0, int ne1, int ne2); -struct ggml_tensor * ggml_new_tensor_4d( - struct ggml_context * ctx, - enum ggml_type type, +struct ggml_v1_tensor * ggml_v1_new_tensor_4d( + struct ggml_v1_context * ctx, + enum ggml_v1_type type, int ne0, int ne1, int ne2, int ne3); -struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value); -struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value); +struct ggml_v1_tensor * ggml_v1_new_i32(struct ggml_v1_context * ctx, int32_t value); +struct ggml_v1_tensor * ggml_v1_new_f32(struct ggml_v1_context * ctx, float value); -struct ggml_tensor * ggml_dup_tensor (struct ggml_context * ctx, const struct ggml_tensor * src); -struct ggml_tensor * ggml_view_tensor(struct ggml_context * ctx, const struct ggml_tensor * src); +struct ggml_v1_tensor * ggml_v1_dup_tensor (struct ggml_v1_context * ctx, const struct ggml_v1_tensor * src); +struct ggml_v1_tensor * ggml_v1_view_tensor(struct ggml_v1_context * ctx, const struct ggml_v1_tensor * src); -struct ggml_tensor * ggml_set_zero(struct ggml_tensor * tensor); -struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value); -struct ggml_tensor * ggml_set_f32 (struct ggml_tensor * tensor, float value); +struct ggml_v1_tensor * ggml_v1_set_zero(struct ggml_v1_tensor * tensor); +struct ggml_v1_tensor * ggml_v1_set_i32 (struct ggml_v1_tensor * tensor, int32_t value); +struct ggml_v1_tensor * ggml_v1_set_f32 (struct ggml_v1_tensor * tensor, float value); -int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i); -void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value); +int32_t ggml_v1_get_i32_1d(const struct ggml_v1_tensor * tensor, int i); +void ggml_v1_set_i32_1d(const struct ggml_v1_tensor * tensor, int i, int32_t value); -float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i); -void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value); +float ggml_v1_get_f32_1d(const struct ggml_v1_tensor * tensor, int i); +void ggml_v1_set_f32_1d(const struct ggml_v1_tensor * tensor, int i, float value); - void * ggml_get_data (const struct ggml_tensor * tensor); -float * ggml_get_data_f32(const struct ggml_tensor * tensor); + void * ggml_v1_get_data (const struct ggml_v1_tensor * tensor); +float * ggml_v1_get_data_f32(const struct ggml_v1_tensor * tensor); // // operations on tensors with backpropagation // -struct ggml_tensor * ggml_dup( - struct ggml_context * ctx, - struct ggml_tensor * a); +struct ggml_v1_tensor * ggml_v1_dup( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a); -struct ggml_tensor * ggml_add( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); +struct ggml_v1_tensor * ggml_v1_add( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, + struct ggml_v1_tensor * b); -struct ggml_tensor * ggml_sub( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); +struct ggml_v1_tensor * ggml_v1_sub( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, + struct ggml_v1_tensor * b); -struct ggml_tensor * ggml_mul( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); +struct ggml_v1_tensor * ggml_v1_mul( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, + struct ggml_v1_tensor * b); -struct ggml_tensor * ggml_div( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); +struct ggml_v1_tensor * ggml_v1_div( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, + struct ggml_v1_tensor * b); -struct ggml_tensor * ggml_sqr( - struct ggml_context * ctx, - struct ggml_tensor * a); +struct ggml_v1_tensor * ggml_v1_sqr( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a); -struct ggml_tensor * ggml_sqrt( - struct ggml_context * ctx, - struct ggml_tensor * a); +struct ggml_v1_tensor * ggml_v1_sqrt( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a); // return scalar // TODO: compute sum along rows -struct ggml_tensor * ggml_sum( - struct ggml_context * ctx, - struct ggml_tensor * a); +struct ggml_v1_tensor * ggml_v1_sum( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a); // mean along rows -struct ggml_tensor * ggml_mean( - struct ggml_context * ctx, - struct ggml_tensor * a); +struct ggml_v1_tensor * ggml_v1_mean( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a); // if a is the same shape as b, and a is not parameter, return a // otherwise, return a new tensor: repeat(a) to fit in b -struct ggml_tensor * ggml_repeat( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); +struct ggml_v1_tensor * ggml_v1_repeat( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, + struct ggml_v1_tensor * b); -struct ggml_tensor * ggml_abs( - struct ggml_context * ctx, - struct ggml_tensor * a); +struct ggml_v1_tensor * ggml_v1_abs( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a); -struct ggml_tensor * ggml_sgn( - struct ggml_context * ctx, - struct ggml_tensor * a); +struct ggml_v1_tensor * ggml_v1_sgn( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a); -struct ggml_tensor * ggml_neg( - struct ggml_context * ctx, - struct ggml_tensor * a); +struct ggml_v1_tensor * ggml_v1_neg( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a); -struct ggml_tensor * ggml_step( - struct ggml_context * ctx, - struct ggml_tensor * a); +struct ggml_v1_tensor * ggml_v1_step( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a); -struct ggml_tensor * ggml_relu( - struct ggml_context * ctx, - struct ggml_tensor * a); +struct ggml_v1_tensor * ggml_v1_relu( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a); // TODO: double-check this computation is correct -struct ggml_tensor * ggml_gelu( - struct ggml_context * ctx, - struct ggml_tensor * a); +struct ggml_v1_tensor * ggml_v1_gelu( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a); // normalize along rows // TODO: eps is hardcoded to 1e-5 for now -struct ggml_tensor * ggml_norm( - struct ggml_context * ctx, - struct ggml_tensor * a); +struct ggml_v1_tensor * ggml_v1_norm( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a); // A: m rows, n columns // B: p rows, n columns (i.e. we transpose it internally) // result is m columns, p rows -struct ggml_tensor * ggml_mul_mat( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); +struct ggml_v1_tensor * ggml_v1_mul_mat( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, + struct ggml_v1_tensor * b); // // operations on tensors without backpropagation // // in-place, returns view(a) -struct ggml_tensor * ggml_scale( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); +struct ggml_v1_tensor * ggml_v1_scale( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, + struct ggml_v1_tensor * b); // a -> b, return view(b) -struct ggml_tensor * ggml_cpy( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); +struct ggml_v1_tensor * ggml_v1_cpy( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, + struct ggml_v1_tensor * b); // return view(a), b specifies the new shape // TODO: when we start computing gradient, make a copy instead of view -struct ggml_tensor * ggml_reshape( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); +struct ggml_v1_tensor * ggml_v1_reshape( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, + struct ggml_v1_tensor * b); // return view(a) // TODO: when we start computing gradient, make a copy instead of view -struct ggml_tensor * ggml_reshape_2d( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct ggml_v1_tensor * ggml_v1_reshape_2d( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, int ne0, int ne1); // return view(a) // TODO: when we start computing gradient, make a copy instead of view -struct ggml_tensor * ggml_reshape_3d( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct ggml_v1_tensor * ggml_v1_reshape_3d( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, int ne0, int ne1, int ne2); // offset in bytes -struct ggml_tensor * ggml_view_1d( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct ggml_v1_tensor * ggml_v1_view_1d( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, int ne0, size_t offset); -struct ggml_tensor * ggml_view_2d( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct ggml_v1_tensor * ggml_v1_view_2d( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, int ne0, int ne1, size_t nb1, // row stride in bytes size_t offset); -struct ggml_tensor * ggml_permute( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct ggml_v1_tensor * ggml_v1_permute( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, int axis0, int axis1, int axis2, int axis3); -// alias for ggml_permute(ctx, a, 1, 0, 2, 3) -struct ggml_tensor * ggml_transpose( - struct ggml_context * ctx, - struct ggml_tensor * a); +// alias for ggml_v1_permute(ctx, a, 1, 0, 2, 3) +struct ggml_v1_tensor * ggml_v1_transpose( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a); -struct ggml_tensor * ggml_get_rows( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); +struct ggml_v1_tensor * ggml_v1_get_rows( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, + struct ggml_v1_tensor * b); // set elements above the diagonal to -INF // in-place, returns view(a) -struct ggml_tensor * ggml_diag_mask_inf( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct ggml_v1_tensor * ggml_v1_diag_mask_inf( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, int n_past); // in-place, returns view(a) -struct ggml_tensor * ggml_soft_max( - struct ggml_context * ctx, - struct ggml_tensor * a); +struct ggml_v1_tensor * ggml_v1_soft_max( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a); // rotary position embedding // in-place, returns view(a) // if mode == 1, skip n_past elements // TODO: avoid creating a new tensor every time -struct ggml_tensor * ggml_rope( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct ggml_v1_tensor * ggml_v1_rope( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, int n_past, int n_dims, int mode); @@ -580,93 +580,93 @@ struct ggml_tensor * ggml_rope( // TODO: we don't support extra parameters for now // that's why we are hard-coding the stride, padding, and dilation // not great .. -struct ggml_tensor * ggml_conv_1d_1s( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); +struct ggml_v1_tensor * ggml_v1_conv_1d_1s( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, + struct ggml_v1_tensor * b); -struct ggml_tensor * ggml_conv_1d_2s( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); +struct ggml_v1_tensor * ggml_v1_conv_1d_2s( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, + struct ggml_v1_tensor * b); -struct ggml_tensor * ggml_flash_attn( - struct ggml_context * ctx, - struct ggml_tensor * q, - struct ggml_tensor * k, - struct ggml_tensor * v, +struct ggml_v1_tensor * ggml_v1_flash_attn( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * q, + struct ggml_v1_tensor * k, + struct ggml_v1_tensor * v, bool masked); -struct ggml_tensor * ggml_flash_ff( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b0, - struct ggml_tensor * b1, - struct ggml_tensor * c0, - struct ggml_tensor * c1); +struct ggml_v1_tensor * ggml_v1_flash_ff( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * a, + struct ggml_v1_tensor * b0, + struct ggml_v1_tensor * b1, + struct ggml_v1_tensor * c0, + struct ggml_v1_tensor * c1); // // automatic differentiation // -void ggml_set_param( - struct ggml_context * ctx, - struct ggml_tensor * tensor); +void ggml_v1_set_param( + struct ggml_v1_context * ctx, + struct ggml_v1_tensor * tensor); -void ggml_build_forward_expand(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor); +void ggml_v1_build_forward_expand(struct ggml_v1_cgraph * cgraph, struct ggml_v1_tensor * tensor); -struct ggml_cgraph ggml_build_forward (struct ggml_tensor * tensor); -struct ggml_cgraph ggml_build_backward(struct ggml_context * ctx, struct ggml_cgraph * gf, bool keep); +struct ggml_v1_cgraph ggml_v1_build_forward (struct ggml_v1_tensor * tensor); +struct ggml_v1_cgraph ggml_v1_build_backward(struct ggml_v1_context * ctx, struct ggml_v1_cgraph * gf, bool keep); -void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph); -void ggml_graph_reset (struct ggml_cgraph * cgraph); +void ggml_v1_graph_compute(struct ggml_v1_context * ctx, struct ggml_v1_cgraph * cgraph); +void ggml_v1_graph_reset (struct ggml_v1_cgraph * cgraph); // print info and performance information for the graph -void ggml_graph_print(const struct ggml_cgraph * cgraph); +void ggml_v1_graph_print(const struct ggml_v1_cgraph * cgraph); // dump the graph into a file using the dot format -void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph * gf, const char * filename); +void ggml_v1_graph_dump_dot(const struct ggml_v1_cgraph * gb, const struct ggml_v1_cgraph * gf, const char * filename); // // optimization // // optimization methods -enum ggml_opt_type { - GGML_OPT_ADAM, - GGML_OPT_LBFGS, +enum ggml_v1_opt_type { + GGML_V1_OPT_ADAM, + GGML_V1_OPT_LBFGS, }; // linesearch methods -enum ggml_linesearch { - GGML_LINESEARCH_DEFAULT = 1, +enum ggml_v1_linesearch { + GGML_V1_LINESEARCH_DEFAULT = 1, - GGML_LINESEARCH_BACKTRACKING_ARMIJO = 0, - GGML_LINESEARCH_BACKTRACKING_WOLFE = 1, - GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE = 2, + GGML_V1_LINESEARCH_BACKTRACKING_ARMIJO = 0, + GGML_V1_LINESEARCH_BACKTRACKING_WOLFE = 1, + GGML_V1_LINESEARCH_BACKTRACKING_STRONG_WOLFE = 2, }; // optimization return values -enum ggml_opt_result { - GGML_OPT_OK = 0, - GGML_OPT_DID_NOT_CONVERGE, - GGML_OPT_NO_CONTEXT, - GGML_OPT_INVALID_WOLFE, - GGML_OPT_FAIL, +enum ggml_v1_opt_result { + GGML_V1_OPT_OK = 0, + GGML_V1_OPT_DID_NOT_CONVERGE, + GGML_V1_OPT_NO_CONTEXT, + GGML_V1_OPT_INVALID_WOLFE, + GGML_V1_OPT_FAIL, - GGML_LINESEARCH_FAIL = -128, - GGML_LINESEARCH_MINIMUM_STEP, - GGML_LINESEARCH_MAXIMUM_STEP, - GGML_LINESEARCH_MAXIMUM_ITERATIONS, - GGML_LINESEARCH_INVALID_PARAMETERS, + GGML_V1_LINESEARCH_FAIL = -128, + GGML_V1_LINESEARCH_MINIMUM_STEP, + GGML_V1_LINESEARCH_MAXIMUM_STEP, + GGML_V1_LINESEARCH_MAXIMUM_ITERATIONS, + GGML_V1_LINESEARCH_INVALID_PARAMETERS, }; // optimization parameters // -// see ggml.c (ggml_opt_default_params) for default values +// see ggml.c (ggml_v1_opt_default_params) for default values // -struct ggml_opt_params { - enum ggml_opt_type type; +struct ggml_v1_opt_params { + enum ggml_v1_opt_type type; int n_threads; @@ -714,34 +714,34 @@ struct ggml_opt_params { float min_step; float max_step; - enum ggml_linesearch linesearch; + enum ggml_v1_linesearch linesearch; } lbfgs; }; -struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type); +struct ggml_v1_opt_params ggml_v1_opt_default_params(enum ggml_v1_opt_type type); // optimize the function defined by the tensor f -enum ggml_opt_result ggml_opt( - struct ggml_context * ctx, - struct ggml_opt_params params, - struct ggml_tensor * f); +enum ggml_v1_opt_result ggml_v1_opt( + struct ggml_v1_context * ctx, + struct ggml_v1_opt_params params, + struct ggml_v1_tensor * f); // // system info // -int ggml_cpu_has_avx(void); -int ggml_cpu_has_avx2(void); -int ggml_cpu_has_avx512(void); -int ggml_cpu_has_fma(void); -int ggml_cpu_has_neon(void); -int ggml_cpu_has_arm_fma(void); -int ggml_cpu_has_f16c(void); -int ggml_cpu_has_fp16_va(void); -int ggml_cpu_has_wasm_simd(void); -int ggml_cpu_has_blas(void); -int ggml_cpu_has_sse3(void); -int ggml_cpu_has_vsx(void); +int ggml_v1_cpu_has_avx(void); +int ggml_v1_cpu_has_avx2(void); +int ggml_v1_cpu_has_avx512(void); +int ggml_v1_cpu_has_fma(void); +int ggml_v1_cpu_has_neon(void); +int ggml_v1_cpu_has_arm_fma(void); +int ggml_v1_cpu_has_f16c(void); +int ggml_v1_cpu_has_fp16_va(void); +int ggml_v1_cpu_has_wasm_simd(void); +int ggml_v1_cpu_has_blas(void); +int ggml_v1_cpu_has_sse3(void); +int ggml_v1_cpu_has_vsx(void); #ifdef __cplusplus } diff --git a/otherarch/gpt2.cpp b/otherarch/gpt2.cpp deleted file mode 100644 index 5f8db6492..000000000 --- a/otherarch/gpt2.cpp +++ /dev/null @@ -1,854 +0,0 @@ -#include "ggml.h" - -#include "utils.h" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -// default hparams (GPT-2 117M) -struct gpt2_hparams { - int32_t n_vocab = 50257; - int32_t n_ctx = 1024; - int32_t n_embd = 768; - int32_t n_head = 12; - int32_t n_layer = 12; - int32_t f16 = 1; -}; - -struct gpt2_layer { - // normalization - struct ggml_tensor * ln_1_g; - struct ggml_tensor * ln_1_b; - - struct ggml_tensor * ln_2_g; - struct ggml_tensor * ln_2_b; - - // attention - struct ggml_tensor * c_attn_attn_w; - struct ggml_tensor * c_attn_attn_b; - - struct ggml_tensor * c_attn_proj_w; - struct ggml_tensor * c_attn_proj_b; - - // mlp - struct ggml_tensor * c_mlp_fc_w; - struct ggml_tensor * c_mlp_fc_b; - - struct ggml_tensor * c_mlp_proj_w; - struct ggml_tensor * c_mlp_proj_b; -}; - -struct gpt2_model { - gpt2_hparams hparams; - - // normalization - struct ggml_tensor * ln_f_g; - struct ggml_tensor * ln_f_b; - - struct ggml_tensor * wte; // position embedding - struct ggml_tensor * wpe; // token embedding - struct ggml_tensor * lm_head; // language model head - - std::vector layers; - - // key + value memory - struct ggml_tensor * memory_k; - struct ggml_tensor * memory_v; - - // - struct ggml_context * ctx; - std::map tensors; -}; - -// load the model's weights from a file -bool gpt2_model_load(const std::string & fname, gpt2_model & model, gpt_vocab & vocab) { - printf("%s: loading model from '%s'\n", __func__, fname.c_str()); - - auto fin = std::ifstream(fname, std::ios::binary); - if (!fin) { - fprintf(stderr, "%s: failed to open '%s'\n", __func__, fname.c_str()); - return false; - } - - // verify magic - { - uint32_t magic; - fin.read((char *) &magic, sizeof(magic)); - if (magic != 0x67676d6c) { - fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname.c_str()); - return false; - } - } - - // load hparams - { - auto & hparams = model.hparams; - - fin.read((char *) &hparams.n_vocab, sizeof(hparams.n_vocab)); - fin.read((char *) &hparams.n_ctx, sizeof(hparams.n_ctx)); - fin.read((char *) &hparams.n_embd, sizeof(hparams.n_embd)); - fin.read((char *) &hparams.n_head, sizeof(hparams.n_head)); - fin.read((char *) &hparams.n_layer, sizeof(hparams.n_layer)); - fin.read((char *) &hparams.f16, sizeof(hparams.f16)); - - printf("%s: n_vocab = %d\n", __func__, hparams.n_vocab); - printf("%s: n_ctx = %d\n", __func__, hparams.n_ctx); - printf("%s: n_embd = %d\n", __func__, hparams.n_embd); - printf("%s: n_head = %d\n", __func__, hparams.n_head); - printf("%s: n_layer = %d\n", __func__, hparams.n_layer); - printf("%s: f16 = %d\n", __func__, hparams.f16); - } - - // load vocab - { - int32_t n_vocab = 0; - fin.read((char *) &n_vocab, sizeof(n_vocab)); - - if (n_vocab != model.hparams.n_vocab) { - fprintf(stderr, "%s: invalid model file '%s' (bad vocab size %d != %d)\n", - __func__, fname.c_str(), n_vocab, model.hparams.n_vocab); - return false; - } - - std::string word; - for (int i = 0; i < n_vocab; i++) { - uint32_t len; - fin.read((char *) &len, sizeof(len)); - - word.resize(len); - fin.read((char *) word.data(), len); - - vocab.token_to_id[word] = i; - vocab.id_to_token[i] = word; - } - } - - // for the big tensors, we have the option to store the data in 16-bit floats or quantized - // in order to save memory and also to speed up the computation - ggml_type wtype = GGML_TYPE_COUNT; - switch (model.hparams.f16) { - case 0: wtype = GGML_TYPE_F32; break; - case 1: wtype = GGML_TYPE_F16; break; - case 2: wtype = GGML_TYPE_Q4_0; break; - case 3: wtype = GGML_TYPE_Q4_1; break; - default: - { - fprintf(stderr, "%s: invalid model file '%s' (bad f16 value %d)\n", - __func__, fname.c_str(), model.hparams.f16); - return false; - } - } - - const ggml_type wtype2 = GGML_TYPE_F32; - - auto & ctx = model.ctx; - - size_t ctx_size = 0; - - { - const auto & hparams = model.hparams; - - const int n_embd = hparams.n_embd; - const int n_layer = hparams.n_layer; - const int n_ctx = hparams.n_ctx; - const int n_vocab = hparams.n_vocab; - - ctx_size += n_embd*ggml_type_sizef(GGML_TYPE_F32); // ln_f_g - ctx_size += n_embd*ggml_type_sizef(GGML_TYPE_F32); // ln_f_b - - ctx_size += n_vocab*n_embd*ggml_type_sizef(wtype); // wte - ctx_size += n_ctx*n_embd*ggml_type_sizef(GGML_TYPE_F32); // wpe - ctx_size += n_vocab*n_embd*ggml_type_sizef(wtype); // lm_head - - ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // ln_1_g - ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // ln_1_b - - ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // ln_2_g - ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // ln_2_b - - ctx_size += n_layer*(3*n_embd*n_embd*ggml_type_sizef(wtype)); // c_attn_attn_w - ctx_size += n_layer*( 3*n_embd*ggml_type_sizef(GGML_TYPE_F32)); // c_attn_attn_b - - ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // c_attn_proj_w - ctx_size += n_layer*( n_embd*ggml_type_sizef(GGML_TYPE_F32)); // c_attn_proj_b - - ctx_size += n_layer*(4*n_embd*n_embd*ggml_type_sizef(wtype)); // c_mlp_fc_w - ctx_size += n_layer*( 4*n_embd*ggml_type_sizef(GGML_TYPE_F32)); // c_mlp_fc_b - - ctx_size += n_layer*(4*n_embd*n_embd*ggml_type_sizef(wtype)); // c_mlp_proj_w - ctx_size += n_layer*( n_embd*ggml_type_sizef(GGML_TYPE_F32)); // c_mlp_proj_b - - ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F32); // memory_k - ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F32); // memory_v - - ctx_size += (6 + 12*n_layer)*256; // object overhead - - printf("%s: ggml ctx size = %6.2f MB\n", __func__, ctx_size/(1024.0*1024.0)); - } - - // create the ggml context - { - struct ggml_init_params params = { - .mem_size = ctx_size, - .mem_buffer = NULL, - }; - - model.ctx = ggml_init(params); - if (!model.ctx) { - fprintf(stderr, "%s: ggml_init() failed\n", __func__); - return false; - } - } - - // prepare memory for the weights - { - const auto & hparams = model.hparams; - - const int n_embd = hparams.n_embd; - const int n_layer = hparams.n_layer; - const int n_ctx = hparams.n_ctx; - const int n_vocab = hparams.n_vocab; - - model.layers.resize(n_layer); - - model.ln_f_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); - model.ln_f_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); - - model.wte = ggml_new_tensor_2d(ctx, wtype, n_embd, n_vocab); - model.wpe = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_ctx); - model.lm_head = ggml_new_tensor_2d(ctx, wtype, n_embd, n_vocab); - - // map by name - model.tensors["model/ln_f/g"] = model.ln_f_g; - model.tensors["model/ln_f/b"] = model.ln_f_b; - - model.tensors["model/wte"] = model.wte; - model.tensors["model/wpe"] = model.wpe; - model.tensors["model/lm_head"] = model.lm_head; - - for (int i = 0; i < n_layer; ++i) { - auto & layer = model.layers[i]; - - layer.ln_1_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); - layer.ln_1_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); - - layer.ln_2_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); - layer.ln_2_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); - - layer.c_attn_attn_w = ggml_new_tensor_2d(ctx, wtype, n_embd, 3*n_embd); - layer.c_attn_attn_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 3*n_embd); - - layer.c_attn_proj_w = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd); - layer.c_attn_proj_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); - - layer.c_mlp_fc_w = ggml_new_tensor_2d(ctx, wtype, n_embd, 4*n_embd); - layer.c_mlp_fc_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 4*n_embd); - - layer.c_mlp_proj_w = ggml_new_tensor_2d(ctx, wtype, 4*n_embd, n_embd); - layer.c_mlp_proj_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); - - // map by name - model.tensors["model/h" + std::to_string(i) + "/ln_1/g"] = layer.ln_1_g; - model.tensors["model/h" + std::to_string(i) + "/ln_1/b"] = layer.ln_1_b; - - model.tensors["model/h" + std::to_string(i) + "/ln_2/g"] = layer.ln_2_g; - model.tensors["model/h" + std::to_string(i) + "/ln_2/b"] = layer.ln_2_b; - - model.tensors["model/h" + std::to_string(i) + "/attn/c_attn/w"] = layer.c_attn_attn_w; - model.tensors["model/h" + std::to_string(i) + "/attn/c_attn/b"] = layer.c_attn_attn_b; - - model.tensors["model/h" + std::to_string(i) + "/attn/c_proj/w"] = layer.c_attn_proj_w; - model.tensors["model/h" + std::to_string(i) + "/attn/c_proj/b"] = layer.c_attn_proj_b; - - model.tensors["model/h" + std::to_string(i) + "/mlp/c_fc/w"] = layer.c_mlp_fc_w; - model.tensors["model/h" + std::to_string(i) + "/mlp/c_fc/b"] = layer.c_mlp_fc_b; - - model.tensors["model/h" + std::to_string(i) + "/mlp/c_proj/w"] = layer.c_mlp_proj_w; - model.tensors["model/h" + std::to_string(i) + "/mlp/c_proj/b"] = layer.c_mlp_proj_b; - } - } - - // key + value memory - { - const auto & hparams = model.hparams; - - const int n_embd = hparams.n_embd; - const int n_layer = hparams.n_layer; - const int n_ctx = hparams.n_ctx; - - const int n_mem = n_layer*n_ctx; - const int n_elements = n_embd*n_mem; - - model.memory_k = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_elements); - model.memory_v = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_elements); - - const size_t memory_size = ggml_nbytes(model.memory_k) + ggml_nbytes(model.memory_v); - - printf("%s: memory size = %8.2f MB, n_mem = %d\n", __func__, memory_size/1024.0/1024.0, n_mem); - } - - // load weights - { - size_t total_size = 0; - - bool has_lm_head = false; - - while (true) { - int32_t n_dims; - int32_t length; - int32_t ftype; - - fin.read(reinterpret_cast(&n_dims), sizeof(n_dims)); - fin.read(reinterpret_cast(&length), sizeof(length)); - fin.read(reinterpret_cast(&ftype), sizeof(ftype)); - - if (fin.eof()) { - break; - } - - int32_t nelements = 1; - int32_t ne[2] = { 1, 1 }; - for (int i = 0; i < n_dims; ++i) { - fin.read(reinterpret_cast(&ne[i]), sizeof(ne[i])); - nelements *= ne[i]; - } - - std::string name(length, 0); - fin.read(&name[0], length); - - if (model.tensors.find(name.data()) == model.tensors.end()) { - fprintf(stderr, "%s: unknown tensor '%s' in model file\n", __func__, name.data()); - return false; - } - - auto tensor = model.tensors[name.data()]; - if (ggml_nelements(tensor) != nelements) { - fprintf(stderr, "%s: tensor '%s' has wrong size in model file\n", __func__, name.data()); - return false; - } - - if (tensor->ne[0] != ne[0] || tensor->ne[1] != ne[1]) { - fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%d, %d], expected [%d, %d]\n", - __func__, name.data(), tensor->ne[0], tensor->ne[1], ne[0], ne[1]); - return false; - } - - if (0) { - static const char * ftype_str[] = { "f32", "f16", "q4_0", "q4_1", }; - printf("%24s - [%5d, %5d], type = %6s, %6.2f MB, %9zu bytes\n", name.data(), ne[0], ne[1], ftype_str[ftype], ggml_nbytes(tensor)/1024.0/1024.0, ggml_nbytes(tensor)); - } - - size_t bpe = 0; - - switch (ftype) { - case 0: bpe = ggml_type_size(GGML_TYPE_F32); break; - case 1: bpe = ggml_type_size(GGML_TYPE_F16); break; - case 2: bpe = ggml_type_size(GGML_TYPE_Q4_0); assert(ne[0] % 64 == 0); break; - case 3: bpe = ggml_type_size(GGML_TYPE_Q4_1); assert(ne[0] % 64 == 0); break; - default: - { - fprintf(stderr, "%s: unknown ftype %d in model file\n", __func__, ftype); - return false; - } - }; - - if ((nelements*bpe)/ggml_blck_size(tensor->type) != ggml_nbytes(tensor)) { - fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n", - __func__, name.data(), ggml_nbytes(tensor), nelements*bpe); - return false; - } - - fin.read(reinterpret_cast(tensor->data), ggml_nbytes(tensor)); - - // GPT-2 models share the WTE tensor as the LM head - if (name == "model/wte" && has_lm_head == false) { - memcpy(model.lm_head->data, tensor->data, ggml_nbytes(tensor)); - } - - if (name == "model/lm_head") { - has_lm_head = true; - } - - total_size += ggml_nbytes(tensor); - } - - printf("%s: model size = %8.2f MB\n", __func__, total_size/1024.0/1024.0); - } - - fin.close(); - - return true; -} - -// evaluate the transformer -// -// - model: the model -// - n_threads: number of threads to use -// - n_past: the context size so far -// - embd_inp: the embeddings of the tokens in the context -// - embd_w: the predicted logits for the next token -// -bool gpt2_eval( - const gpt2_model & model, - const int n_threads, - const int n_past, - const std::vector & embd_inp, - std::vector & embd_w, - size_t & mem_per_token) { - const int N = embd_inp.size(); - - const auto & hparams = model.hparams; - - const int n_embd = hparams.n_embd; - const int n_layer = hparams.n_layer; - const int n_ctx = hparams.n_ctx; - const int n_head = hparams.n_head; - const int n_vocab = hparams.n_vocab; - - static size_t buf_size = 256u*1024*1024; - static void * buf = malloc(buf_size); - - if (mem_per_token > 0 && mem_per_token*N > buf_size) { - const size_t buf_size_new = 1.1*(mem_per_token*N); // add 10% to account for ggml object overhead - //printf("\n%s: reallocating buffer from %zu to %zu bytes\n", __func__, buf_size, buf_size_new); - - // reallocate - buf_size = buf_size_new; - buf = realloc(buf, buf_size); - if (buf == nullptr) { - fprintf(stderr, "%s: failed to allocate %zu bytes\n", __func__, buf_size); - return false; - } - } - - struct ggml_init_params params = { - .mem_size = buf_size, - .mem_buffer = buf, - }; - - struct ggml_context * ctx0 = ggml_init(params); - struct ggml_cgraph gf = { .n_threads = n_threads }; - - struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N); - memcpy(embd->data, embd_inp.data(), N*ggml_element_size(embd)); - - struct ggml_tensor * position = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N); - for (int i = 0; i < N; ++i) { - ((int32_t *) position->data)[i] = n_past + i; - } - - // wte + wpe - struct ggml_tensor * inpL = - ggml_add(ctx0, - ggml_get_rows(ctx0, model.wte, embd), - ggml_get_rows(ctx0, model.wpe, position)); - - for (int il = 0; il < n_layer; ++il) { - struct ggml_tensor * cur; - - // norm - { - // [ 768, N] - cur = ggml_norm(ctx0, inpL); - - // cur = ln_1_g*cur + ln_1_b - // [ 768, N] - cur = ggml_add(ctx0, - ggml_mul(ctx0, - ggml_repeat(ctx0, model.layers[il].ln_1_g, cur), - cur), - ggml_repeat(ctx0, model.layers[il].ln_1_b, cur)); - } - - // attn - // [2304, 768] - model.layers[il].c_attn_attn_w - // [2304, 1] - model.layers[il].c_attn_attn_b - // [ 768, N] - cur (in) - // [2304, N] - cur (out) - // - // cur = attn_w*cur + attn_b - // [2304, N] - { - cur = ggml_mul_mat(ctx0, - model.layers[il].c_attn_attn_w, - cur); - - cur = ggml_add(ctx0, - ggml_repeat(ctx0, model.layers[il].c_attn_attn_b, cur), - cur); - } - - // self-attention - { - struct ggml_tensor * Qcur = ggml_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 0*sizeof(float)*n_embd); - struct ggml_tensor * Kcur = ggml_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 1*sizeof(float)*n_embd); - struct ggml_tensor * Vcur = ggml_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 2*sizeof(float)*n_embd); - - // store key and value to memory - if (N >= 1) { - struct ggml_tensor * k = ggml_view_1d(ctx0, model.memory_k, N*n_embd, (ggml_element_size(model.memory_k)*n_embd)*(il*n_ctx + n_past)); - struct ggml_tensor * v = ggml_view_1d(ctx0, model.memory_v, N*n_embd, (ggml_element_size(model.memory_v)*n_embd)*(il*n_ctx + n_past)); - - ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Kcur, k)); - ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Vcur, v)); - } - - // Q = Qcur.contiguous().view(n_embd/n_head, n_head, N).permute(0, 2, 1, 3) - // [64, N, 12] - struct ggml_tensor * Q = - ggml_permute(ctx0, - ggml_cpy(ctx0, - Qcur, - ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_embd/n_head, n_head, N)), - 0, 2, 1, 3); - - // K = Kmem.view(n_embd/n_head, n_head, n_past + N).permute(0, 2, 1, 3) - // [64, n_past + N, 12] - struct ggml_tensor * K = - ggml_permute(ctx0, - ggml_reshape_3d(ctx0, - ggml_view_1d(ctx0, model.memory_k, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_k)*n_embd), - n_embd/n_head, n_head, n_past + N), - 0, 2, 1, 3); - - // GG: flash attention - //struct ggml_tensor * V = - // ggml_cpy(ctx0, - // ggml_permute(ctx0, - // ggml_reshape_3d(ctx0, - // ggml_view_1d(ctx0, model.memory_v, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_v)*n_embd), - // n_embd/n_head, n_head, n_past + N), - // 1, 2, 0, 3), - // ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_past + N, n_embd/n_head, n_head)); - - //struct ggml_tensor * KQV = ggml_flash_attn(ctx0, Q, K, V, true); - - // K * Q - // [n_past + N, N, 12] - struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q); - - // KQ_scaled = KQ / sqrt(n_embd/n_head) - // [n_past + N, N, 12] - struct ggml_tensor * KQ_scaled = - ggml_scale(ctx0, - KQ, - ggml_new_f32(ctx0, 1.0f/sqrt(float(n_embd)/n_head)) - ); - - // KQ_masked = mask_past(KQ_scaled) - // [n_past + N, N, 12] - struct ggml_tensor * KQ_masked = ggml_diag_mask_inf(ctx0, KQ_scaled, n_past); - - // KQ = soft_max(KQ_masked) - // [n_past + N, N, 12] - struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctx0, KQ_masked); - - // V_trans = Vmem.view(n_embd/n_head, n_head, n_past + N).permute(1, 2, 0, 3).contiguous() - // [n_past + N, 64, 12] - struct ggml_tensor * V_trans = - ggml_cpy(ctx0, - ggml_permute(ctx0, - ggml_reshape_3d(ctx0, - ggml_view_1d(ctx0, model.memory_v, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_v)*n_embd), - n_embd/n_head, n_head, n_past + N), - 1, 2, 0, 3), - ggml_new_tensor_3d(ctx0, model.memory_v->type, n_past + N, n_embd/n_head, n_head)); - - // KQV = transpose(V) * KQ_soft_max - // [64, N, 12] - struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V_trans, KQ_soft_max); - - // KQV_merged = KQV.permute(0, 2, 1, 3) - // [64, 12, N] - struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3); - - // cur = KQV_merged.contiguous().view(n_embd, N) - // [768, N] - cur = ggml_cpy(ctx0, - KQV_merged, - ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N)); - } - - // projection - // [ 768, 768] - model.layers[il].c_attn_proj_w - // [ 768, 1] - model.layers[il].c_attn_proj_b - // [ 768, N] - cur (in) - // [ 768, N] - cur (out) - // - // cur = proj_w*cur + proj_b - // [768, N] - { - cur = ggml_mul_mat(ctx0, - model.layers[il].c_attn_proj_w, - cur); - - cur = ggml_add(ctx0, - ggml_repeat(ctx0, model.layers[il].c_attn_proj_b, cur), - cur); - } - - // add the input - cur = ggml_add(ctx0, cur, inpL); - - struct ggml_tensor * inpFF = cur; - - // feed-forward network - { - // norm - { - cur = ggml_norm(ctx0, inpFF); - - // cur = ln_2_g*cur + ln_2_b - // [ 768, N] - cur = ggml_add(ctx0, - ggml_mul(ctx0, - ggml_repeat(ctx0, model.layers[il].ln_2_g, cur), - cur), - ggml_repeat(ctx0, model.layers[il].ln_2_b, cur)); - } - - // fully connected - // [3072, 768] - model.layers[il].c_mlp_fc_w - // [3072, 1] - model.layers[il].c_mlp_fc_b - // [ 768, N] - cur (in) - // [3072, N] - cur (out) - // - // cur = fc_w*cur + fc_b - // [3072, N] - cur = ggml_mul_mat(ctx0, - model.layers[il].c_mlp_fc_w, - cur); - - cur = ggml_add(ctx0, - ggml_repeat(ctx0, model.layers[il].c_mlp_fc_b, cur), - cur); - - // GELU activation - // [3072, N] - cur = ggml_gelu(ctx0, cur); - - // projection - // [ 768, 3072] - model.layers[il].c_mlp_proj_w - // [ 768, 1] - model.layers[il].c_mlp_proj_b - // [3072, N] - cur (in) - // [ 768, N] - cur (out) - // - // cur = proj_w*cur + proj_b - // [768, N] - cur = ggml_mul_mat(ctx0, - model.layers[il].c_mlp_proj_w, - cur); - - cur = ggml_add(ctx0, - ggml_repeat(ctx0, model.layers[il].c_mlp_proj_b, cur), - cur); - } - - // input for next layer - inpL = ggml_add(ctx0, cur, inpFF); - } - - // norm - { - // [ 768, N] - inpL = ggml_norm(ctx0, inpL); - - // inpL = ln_f_g*inpL + ln_f_b - // [ 768, N] - inpL = ggml_add(ctx0, - ggml_mul(ctx0, - ggml_repeat(ctx0, model.ln_f_g, inpL), - inpL), - ggml_repeat(ctx0, model.ln_f_b, inpL)); - } - - // inpL = WTE * inpL - // [ 768, 50257] - model.lm_head - // [ 768, N] - inpL - inpL = ggml_mul_mat(ctx0, model.lm_head, inpL); - - // logits -> probs - //inpL = ggml_soft_max(ctx0, inpL); - - // run the computation - ggml_build_forward_expand(&gf, inpL); - ggml_graph_compute (ctx0, &gf); - - //if (n_past%100 == 0) { - // ggml_graph_print (&gf); - // ggml_graph_dump_dot(&gf, NULL, "gpt-2.dot"); - //} - - //embd_w.resize(n_vocab*N); - //memcpy(embd_w.data(), ggml_get_data(inpL), sizeof(float)*n_vocab*N); - - // return result just for the last token - embd_w.resize(n_vocab); - memcpy(embd_w.data(), (float *) ggml_get_data(inpL) + (n_vocab*(N-1)), sizeof(float)*n_vocab); - - if (mem_per_token == 0) { - mem_per_token = ggml_used_mem(ctx0)/N; - } - //printf("used_mem = %zu\n", ggml_used_mem(ctx0)); - - ggml_free(ctx0); - - return true; -} - -int main(int argc, char ** argv) { - ggml_time_init(); - const int64_t t_main_start_us = ggml_time_us(); - - gpt_params params; - params.model = "models/gpt-2-117M/ggml-model.bin"; - - if (gpt_params_parse(argc, argv, params) == false) { - return 1; - } - - if (params.seed < 0) { - params.seed = time(NULL); - } - - printf("%s: seed = %d\n", __func__, params.seed); - - std::mt19937 rng(params.seed); - if (params.prompt.empty()) { - if( !isatty(STDIN_FILENO) ){ - std::string line; - while( std::getline(std::cin, line) ){ - params.prompt = params.prompt + "\n" + line; - } - } else { - params.prompt = gpt_random_prompt(rng); - } - } - - int64_t t_load_us = 0; - - gpt_vocab vocab; - gpt2_model model; - - // load the model - { - const int64_t t_start_us = ggml_time_us(); - - if (!gpt2_model_load(params.model, model, vocab)) { - fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str()); - return 1; - } - - t_load_us = ggml_time_us() - t_start_us; - } - - int n_past = 0; - - int64_t t_sample_us = 0; - int64_t t_predict_us = 0; - - std::vector logits; - - // tokenize the prompt - std::vector embd_inp = ::gpt_tokenize(vocab, params.prompt); - - params.n_predict = std::min(params.n_predict, model.hparams.n_ctx - (int) embd_inp.size()); - - printf("%s: prompt: '%s'\n", __func__, params.prompt.c_str()); - printf("%s: number of tokens in prompt = %zu, first 8 tokens: ", __func__, embd_inp.size()); - for (int i = 0; i < std::min(8, (int) embd_inp.size()); i++) { - printf("%d ", embd_inp[i]); - } - printf("\n\n"); - - // submit the input prompt token-by-token - // this reduces the memory usage during inference, at the cost of a bit of speed at the beginning - std::vector embd; - - // determine the required inference memory per token: - size_t mem_per_token = 0; - gpt2_eval(model, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token); - - for (int i = embd.size(); i < embd_inp.size() + params.n_predict; i++) { - // predict - if (embd.size() > 0) { - const int64_t t_start_us = ggml_time_us(); - - if (!gpt2_eval(model, params.n_threads, n_past, embd, logits, mem_per_token)) { - printf("Failed to predict\n"); - return 1; - } - - t_predict_us += ggml_time_us() - t_start_us; - } - - n_past += embd.size(); - embd.clear(); - - if (i >= embd_inp.size()) { - // sample next token - const int top_k = params.top_k; - const float top_p = params.top_p; - const float temp = params.temp; - - const int n_vocab = model.hparams.n_vocab; - - gpt_vocab::id id = 0; - - { - const int64_t t_start_sample_us = ggml_time_us(); - - id = gpt_sample_top_k_top_p(vocab, logits.data() + (logits.size() - n_vocab), top_k, top_p, temp, rng); - - t_sample_us += ggml_time_us() - t_start_sample_us; - } - - // add it to the context - embd.push_back(id); - } else { - // if here, it means we are still processing the input prompt - for (int k = i; k < embd_inp.size(); k++) { - embd.push_back(embd_inp[k]); - if (embd.size() >= params.n_batch) { - break; - } - } - i += embd.size() - 1; - } - - // display text - for (auto id : embd) { - printf("%s", vocab.id_to_token[id].c_str()); - } - fflush(stdout); - - // end of text token - if (embd.back() == 50256) { - break; - } - } - - // report timing - { - const int64_t t_main_end_us = ggml_time_us(); - - printf("\n\n"); - printf("%s: mem per token = %8zu bytes\n", __func__, mem_per_token); - printf("%s: load time = %8.2f ms\n", __func__, t_load_us/1000.0f); - printf("%s: sample time = %8.2f ms\n", __func__, t_sample_us/1000.0f); - printf("%s: predict time = %8.2f ms / %.2f ms per token\n", __func__, t_predict_us/1000.0f, t_predict_us/1000.0f/n_past); - printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0f); - } - - ggml_free(model.ctx); - - return 0; -} \ No newline at end of file diff --git a/otherarch/gpt2_quantize.cpp b/otherarch/gpt2_quantize.cpp deleted file mode 100644 index 9a7916d49..000000000 --- a/otherarch/gpt2_quantize.cpp +++ /dev/null @@ -1,323 +0,0 @@ -#include "ggml.h" - -#include "utils.h" - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -// default hparams (GPT-2 117M) -struct gpt2_hparams { - int32_t n_vocab = 50257; - int32_t n_ctx = 1024; - int32_t n_embd = 768; - int32_t n_head = 12; - int32_t n_layer = 12; - int32_t f16 = 1; -}; - -// quantize a model -bool gpt2_model_quantize(const std::string & fname_inp, const std::string & fname_out, int itype) { - ggml_type type = GGML_TYPE_Q4_1; - - switch (itype) { - case 2: type = GGML_TYPE_Q4_0; break; - case 3: type = GGML_TYPE_Q4_1; break; - default: fprintf(stderr, "%s: invalid quantization type %d\n", __func__, itype); return 1; - }; - - if (type != GGML_TYPE_Q4_0 && type != GGML_TYPE_Q4_1) { - fprintf(stderr, "%s: invalid quantization type %d\n", __func__, type); - return false; - } - - gpt_vocab vocab; - - printf("%s: loading model from '%s'\n", __func__, fname_inp.c_str()); - - auto finp = std::ifstream(fname_inp, std::ios::binary); - if (!finp) { - fprintf(stderr, "%s: failed to open '%s' for reading\n", __func__, fname_inp.c_str()); - return false; - } - - auto fout = std::ofstream(fname_out, std::ios::binary); - if (!fout) { - fprintf(stderr, "%s: failed to open '%s' for writing\n", __func__, fname_out.c_str()); - return false; - } - - // verify magic - { - uint32_t magic; - finp.read((char *) &magic, sizeof(magic)); - if (magic != 0x67676d6c) { - fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname_inp.c_str()); - return false; - } - - fout.write((char *) &magic, sizeof(magic)); - } - - gpt2_hparams hparams; - - // load hparams - { - finp.read((char *) &hparams.n_vocab, sizeof(hparams.n_vocab)); - finp.read((char *) &hparams.n_ctx, sizeof(hparams.n_ctx)); - finp.read((char *) &hparams.n_embd, sizeof(hparams.n_embd)); - finp.read((char *) &hparams.n_head, sizeof(hparams.n_head)); - finp.read((char *) &hparams.n_layer, sizeof(hparams.n_layer)); - finp.read((char *) &hparams.f16, sizeof(hparams.f16)); - - printf("%s: n_vocab = %d\n", __func__, hparams.n_vocab); - printf("%s: n_ctx = %d\n", __func__, hparams.n_ctx); - printf("%s: n_embd = %d\n", __func__, hparams.n_embd); - printf("%s: n_head = %d\n", __func__, hparams.n_head); - printf("%s: n_layer = %d\n", __func__, hparams.n_layer); - printf("%s: f16 = %d\n", __func__, hparams.f16); - - fout.write((char *) &hparams.n_vocab, sizeof(hparams.n_vocab)); - fout.write((char *) &hparams.n_ctx, sizeof(hparams.n_ctx)); - fout.write((char *) &hparams.n_embd, sizeof(hparams.n_embd)); - fout.write((char *) &hparams.n_head, sizeof(hparams.n_head)); - fout.write((char *) &hparams.n_layer, sizeof(hparams.n_layer)); - fout.write((char *) &itype, sizeof(hparams.f16)); - } - - // load vocab - { - int32_t n_vocab = 0; - finp.read ((char *) &n_vocab, sizeof(n_vocab)); - fout.write((char *) &n_vocab, sizeof(n_vocab)); - - if (n_vocab != hparams.n_vocab) { - fprintf(stderr, "%s: invalid model file '%s' (bad vocab size %d != %d)\n", - __func__, fname_inp.c_str(), n_vocab, hparams.n_vocab); - return false; - } - - std::string word; - for (int i = 0; i < n_vocab; i++) { - uint32_t len; - finp.read ((char *) &len, sizeof(len)); - fout.write((char *) &len, sizeof(len)); - - word.resize(len); - finp.read ((char *) word.data(), len); - fout.write((char *) word.data(), len); - - vocab.token_to_id[word] = i; - vocab.id_to_token[i] = word; - } - } - - // load weights - { - size_t total_size_org = 0; - size_t total_size_new = 0; - - std::vector work; - - std::vector data_u8; - std::vector data_f16; - std::vector data_f32; - - std::vector hist_all(1 << 4, 0); - - while (true) { - int32_t n_dims; - int32_t length; - int32_t ftype; - - finp.read(reinterpret_cast(&n_dims), sizeof(n_dims)); - finp.read(reinterpret_cast(&length), sizeof(length)); - finp.read(reinterpret_cast(&ftype), sizeof(ftype)); - - if (finp.eof()) { - break; - } - - int32_t nelements = 1; - int32_t ne[2] = { 1, 1 }; - for (int i = 0; i < n_dims; ++i) { - finp.read (reinterpret_cast(&ne[i]), sizeof(ne[i])); - nelements *= ne[i]; - } - - std::string name(length, 0); - finp.read (&name[0], length); - - { - static const char * ftype_str[] = { "f32", "f16", "q4_0", "q4_1", }; - printf("%24s - [%5d, %5d], type = %6s ", name.data(), ne[0], ne[1], ftype_str[ftype]); - } - - // regexes of tensor names to be quantized - const std::vector k_names = { - "model/wte", - "model/lm_head", - "model/h.*/attn/c_attn/w", - "model/h.*/attn/c_proj/w", - "model/h.*/mlp/c_fc/w", - "model/h.*/mlp/c_proj/w", - }; - - bool quantize = false; - for (const auto & s : k_names) { - if (std::regex_match(name, std::regex(s))) { - quantize = true; - break; - } - } - - if (quantize) { - if (ftype != 0 && ftype != 1) { - fprintf(stderr, "%s: unsupported ftype %d for integer quantization\n", __func__, ftype); - return false; - } - - if (ftype == 1) { - data_f16.resize(nelements); - finp.read(reinterpret_cast(data_f16.data()), nelements * sizeof(ggml_fp16_t)); - data_f32.resize(nelements); - for (int i = 0; i < nelements; ++i) { - data_f32[i] = ggml_fp16_to_fp32(data_f16[i]); - } - } else { - data_f32.resize(nelements); - finp.read(reinterpret_cast(data_f32.data()), nelements * sizeof(float)); - } - - ftype = itype; - } else { - const int bpe = (ftype == 0) ? sizeof(float) : sizeof(uint16_t); - - data_u8.resize(nelements*bpe); - finp.read(reinterpret_cast(data_u8.data()), nelements * bpe); - } - - fout.write(reinterpret_cast(&n_dims), sizeof(n_dims)); - fout.write(reinterpret_cast(&length), sizeof(length)); - fout.write(reinterpret_cast(&ftype), sizeof(ftype)); - for (int i = 0; i < n_dims; ++i) { - fout.write(reinterpret_cast(&ne[i]), sizeof(ne[i])); - } - fout.write(&name[0], length); - - if (quantize) { - printf("quantizing .. "); - work.resize(nelements); // for quantization - - size_t cur_size = 0; - std::vector hist_cur(1 << 4, 0); - - switch (type) { - case GGML_TYPE_Q4_0: - { - cur_size = ggml_quantize_q4_0(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data()); - } break; - case GGML_TYPE_Q4_1: - { - cur_size = ggml_quantize_q4_1(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data()); - } break; - default: - { - fprintf(stderr, "%s: unsupported quantization type %d\n", __func__, type); - return false; - } - } - - fout.write(reinterpret_cast(work.data()), cur_size); - total_size_new += cur_size; - - printf("size = %8.2f MB -> %8.2f MB | hist: ", nelements * sizeof(float)/1024.0/1024.0, cur_size/1024.0/1024.0); - for (int i = 0; i < hist_cur.size(); ++i) { - hist_all[i] += hist_cur[i]; - } - - for (int i = 0; i < hist_cur.size(); ++i) { - printf("%5.3f ", hist_cur[i] / (float)nelements); - } - printf("\n"); - } else { - printf("size = %8.3f MB\n", data_u8.size()/1024.0/1024.0); - fout.write(reinterpret_cast(data_u8.data()), data_u8.size()); - total_size_new += data_u8.size(); - } - - total_size_org += nelements * sizeof(float); - } - - printf("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0); - printf("%s: quant size = %8.2f MB\n", __func__, total_size_new/1024.0/1024.0); - - { - int64_t sum_all = 0; - for (int i = 0; i < hist_all.size(); ++i) { - sum_all += hist_all[i]; - } - - printf("%s: hist: ", __func__); - for (int i = 0; i < hist_all.size(); ++i) { - printf("%5.3f ", hist_all[i] / (float)sum_all); - } - printf("\n"); - } - } - - finp.close(); - fout.close(); - - return true; -} - -// usage: -// ./gpt-2-quantize models/gpt-2-117M/ggml-model.bin models/gpt-2-117M/ggml-model-quant.bin type -// -int main(int argc, char ** argv) { - if (argc != 4) { - fprintf(stderr, "usage: %s model-f32.bin model-quant.bin type\n", argv[0]); - fprintf(stderr, " type = 2 - q4_0\n"); - fprintf(stderr, " type = 3 - q4_1\n"); - return 1; - } - - const std::string fname_inp = argv[1]; - const std::string fname_out = argv[2]; - - const int itype = atoi(argv[3]); - - const int64_t t_main_start_us = ggml_time_us(); - - int64_t t_quantize_us = 0; - - // load the model - { - const int64_t t_start_us = ggml_time_us(); - - if (!gpt2_model_quantize(fname_inp, fname_out, itype)) { - fprintf(stderr, "%s: failed to quantize model from '%s'\n", __func__, fname_inp.c_str()); - return 1; - } - - t_quantize_us = ggml_time_us() - t_start_us; - } - - // report timing - { - const int64_t t_main_end_us = ggml_time_us(); - - printf("\n"); - printf("%s: quantize time = %8.2f ms\n", __func__, t_quantize_us/1000.0f); - printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0f); - } - - return 0; -} \ No newline at end of file diff --git a/otherarch/gptj_old.cpp b/otherarch/gptj_old.cpp deleted file mode 100644 index ef4e5f9c6..000000000 --- a/otherarch/gptj_old.cpp +++ /dev/null @@ -1,682 +0,0 @@ -#include "ggml_v1.h" -#include "otherarch.h" - -#include "utils.h" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - - - -// load the model's weights from a file -bool legacy_gptj_model_load(const std::string & fname, gptj_model & model, gpt_vocab & vocab) { - printf("%s: loading model from '%s' - please wait ...\n", __func__, fname.c_str()); - - auto fin = std::ifstream(fname, std::ios::binary); - if (!fin) { - fprintf(stderr, "%s: failed to open '%s'\n", __func__, fname.c_str()); - return false; - } - - // verify magic - { - uint32_t magic; - fin.read((char *) &magic, sizeof(magic)); - if (magic != 0x67676d6c) { - fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname.c_str()); - return false; - } - } - - // load hparams - { - auto & hparams = model.hparams; - - fin.read((char *) &hparams.n_vocab, sizeof(hparams.n_vocab)); - fin.read((char *) &hparams.n_ctx, sizeof(hparams.n_ctx)); - fin.read((char *) &hparams.n_embd, sizeof(hparams.n_embd)); - fin.read((char *) &hparams.n_head, sizeof(hparams.n_head)); - fin.read((char *) &hparams.n_layer, sizeof(hparams.n_layer)); - fin.read((char *) &hparams.n_rot, sizeof(hparams.n_rot)); - fin.read((char *) &hparams.f16, sizeof(hparams.f16)); - - printf("%s: n_vocab = %d\n", __func__, hparams.n_vocab); - printf("%s: n_ctx = %d\n", __func__, hparams.n_ctx); - printf("%s: n_embd = %d\n", __func__, hparams.n_embd); - printf("%s: n_head = %d\n", __func__, hparams.n_head); - printf("%s: n_layer = %d\n", __func__, hparams.n_layer); - printf("%s: n_rot = %d\n", __func__, hparams.n_rot); - printf("%s: f16 = %d\n", __func__, hparams.f16); - } - - // load vocab - { - int32_t n_vocab = 0; - fin.read((char *) &n_vocab, sizeof(n_vocab)); - - if (n_vocab != model.hparams.n_vocab) { - fprintf(stderr, "%s: invalid model file '%s' (bad vocab size %d != %d)\n", - __func__, fname.c_str(), n_vocab, model.hparams.n_vocab); - return false; - } - - std::string word; - for (int i = 0; i < n_vocab; i++) { - uint32_t len; - fin.read((char *) &len, sizeof(len)); - - word.resize(len); - fin.read((char *) word.data(), len); - - vocab.token_to_id[word] = i; - vocab.id_to_token[i] = word; - } - } - - // for the big tensors, we have the option to store the data in 16-bit floats - // in order to save memory and also to speed up the computation - const ggml_type wtype = model.hparams.f16 ? GGML_TYPE_F16 : GGML_TYPE_F32; - - auto & ctx = model.ctx; - - size_t ctx_size = 0; - - { - const auto & hparams = model.hparams; - - const int n_embd = hparams.n_embd; - const int n_layer = hparams.n_layer; - const int n_ctx = hparams.n_ctx; - const int n_vocab = hparams.n_vocab; - - ctx_size += n_embd*ggml_type_size(GGML_TYPE_F32); // ln_f_g - ctx_size += n_embd*ggml_type_size(GGML_TYPE_F32); // ln_f_b - - ctx_size += n_embd*n_vocab*ggml_type_size(wtype); // wte - - ctx_size += n_embd*n_vocab*ggml_type_size(wtype); // lmh_g - ctx_size += n_vocab*ggml_type_size(GGML_TYPE_F32); // lmh_b - - ctx_size += n_layer*(n_embd*ggml_type_size(GGML_TYPE_F32)); // ln_1_g - ctx_size += n_layer*(n_embd*ggml_type_size(GGML_TYPE_F32)); // ln_1_b - - ctx_size += n_layer*(n_embd*n_embd*ggml_type_size(wtype)); // c_attn_q_proj_w - ctx_size += n_layer*(n_embd*n_embd*ggml_type_size(wtype)); // c_attn_k_proj_w - ctx_size += n_layer*(n_embd*n_embd*ggml_type_size(wtype)); // c_attn_v_proj_w - - ctx_size += n_layer*(n_embd*n_embd*ggml_type_size(wtype)); // c_attn_proj_w - - ctx_size += n_layer*(4*n_embd*n_embd*ggml_type_size(wtype)); // c_mlp_fc_w - ctx_size += n_layer*( 4*n_embd*ggml_type_size(GGML_TYPE_F32)); // c_mlp_fc_b - - ctx_size += n_layer*(4*n_embd*n_embd*ggml_type_size(wtype)); // c_mlp_proj_w_trans - ctx_size += n_layer*( n_embd*ggml_type_size(GGML_TYPE_F32)); // c_mlp_proj_b - - ctx_size += n_ctx*n_layer*n_embd*ggml_type_size(GGML_TYPE_F32); // memory_k - ctx_size += n_ctx*n_layer*n_embd*ggml_type_size(GGML_TYPE_F32); // memory_v - - ctx_size += (5 + 10*n_layer)*256; // object overhead - - printf("%s: ggml ctx size = %6.2f MB\n", __func__, ctx_size/(1024.0*1024.0)); - } - - // create the ggml context - { - struct ggml_init_params params = { - .mem_size = ctx_size, - .mem_buffer = NULL, - }; - - model.ctx = ggml_init(params); - if (!model.ctx) { - fprintf(stderr, "%s: ggml_init() failed\n", __func__); - return false; - } - } - - // prepare memory for the weights - { - const auto & hparams = model.hparams; - - const int n_embd = hparams.n_embd; - const int n_layer = hparams.n_layer; - const int n_ctx = hparams.n_ctx; - const int n_vocab = hparams.n_vocab; - - model.layers.resize(n_layer); - - model.wte = ggml_new_tensor_2d(ctx, wtype, n_embd, n_vocab); - - model.ln_f_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); - model.ln_f_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); - - model.lmh_g = ggml_new_tensor_2d(ctx, wtype, n_embd, n_vocab); - model.lmh_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_vocab); - - // map by name - model.tensors["transformer.wte.weight"] = model.wte; - - model.tensors["transformer.ln_f.weight"] = model.ln_f_g; - model.tensors["transformer.ln_f.bias"] = model.ln_f_b; - - model.tensors["lm_head.weight"] = model.lmh_g; - model.tensors["lm_head.bias"] = model.lmh_b; - - for (int i = 0; i < n_layer; ++i) { - auto & layer = model.layers[i]; - - layer.ln_1_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); - layer.ln_1_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); - - layer.c_attn_q_proj_w = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd); - layer.c_attn_k_proj_w = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd); - layer.c_attn_v_proj_w = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd); - - layer.c_attn_proj_w = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd); - - layer.c_mlp_fc_w = ggml_new_tensor_2d(ctx, wtype, 4*n_embd, n_embd); - layer.c_mlp_fc_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 4*n_embd); - - layer.c_mlp_proj_w_trans = ggml_new_tensor_2d(ctx, wtype, 4*n_embd, n_embd); - layer.c_mlp_proj_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); - - // map by name - model.tensors["transformer.h." + std::to_string(i) + ".ln_1.weight"] = layer.ln_1_g; - model.tensors["transformer.h." + std::to_string(i) + ".ln_1.bias"] = layer.ln_1_b; - - model.tensors["transformer.h." + std::to_string(i) + ".attn.q_proj.weight"] = layer.c_attn_q_proj_w; - model.tensors["transformer.h." + std::to_string(i) + ".attn.k_proj.weight"] = layer.c_attn_k_proj_w; - model.tensors["transformer.h." + std::to_string(i) + ".attn.v_proj.weight"] = layer.c_attn_v_proj_w; - - model.tensors["transformer.h." + std::to_string(i) + ".attn.out_proj.weight"] = layer.c_attn_proj_w; - - model.tensors["transformer.h." + std::to_string(i) + ".mlp.fc_in.weight"] = layer.c_mlp_fc_w; - model.tensors["transformer.h." + std::to_string(i) + ".mlp.fc_in.bias"] = layer.c_mlp_fc_b; - - model.tensors["transformer.h." + std::to_string(i) + ".mlp.fc_out.weight"] = layer.c_mlp_proj_w_trans; - model.tensors["transformer.h." + std::to_string(i) + ".mlp.fc_out.bias"] = layer.c_mlp_proj_b; - } - } - - // key + value memory - { - const auto & hparams = model.hparams; - - const int n_embd = hparams.n_embd; - const int n_layer = hparams.n_layer; - const int n_ctx = hparams.n_ctx; - - const int n_mem = n_layer*n_ctx; - const int n_elements = n_embd*n_mem; - - model.memory_k = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_elements); - model.memory_v = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_elements); - - const size_t memory_size = ggml_nbytes(model.memory_k) + ggml_nbytes(model.memory_v); - - printf("%s: memory_size = %8.2f MB, n_mem = %d\n", __func__, memory_size/1024.0/1024.0, n_mem); - } - - // load weights - { - int n_tensors = 0; - size_t total_size = 0; - - printf("%s: ", __func__); - - while (true) { - int32_t n_dims; - int32_t length; - int32_t ftype; - - fin.read(reinterpret_cast(&n_dims), sizeof(n_dims)); - fin.read(reinterpret_cast(&length), sizeof(length)); - fin.read(reinterpret_cast(&ftype), sizeof(ftype)); - - if (fin.eof()) { - break; - } - - int32_t nelements = 1; - int32_t ne[2] = { 1, 1 }; - for (int i = 0; i < n_dims; ++i) { - fin.read(reinterpret_cast(&ne[i]), sizeof(ne[i])); - nelements *= ne[i]; - } - - std::string name(length, 0); - fin.read(&name[0], length); - - if (model.tensors.find(name.data()) == model.tensors.end()) { - fprintf(stderr, "%s: unknown tensor '%s' in model file\n", __func__, name.data()); - return false; - } - - auto tensor = model.tensors[name.data()]; - if (ggml_nelements(tensor) != nelements) { - fprintf(stderr, "%s: tensor '%s' has wrong size in model file\n", __func__, name.data()); - return false; - } - - if (tensor->ne[0] != ne[0] || tensor->ne[1] != ne[1]) { - fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%d, %d], expected [%d, %d]\n", - __func__, name.data(), tensor->ne[0], tensor->ne[1], ne[0], ne[1]); - return false; - } - - const size_t bpe = tensor->type == GGML_TYPE_I8 ? 1 : (ftype == 0) ? sizeof(float) : sizeof(ggml_fp16_t); - - if (nelements*bpe != ggml_nbytes(tensor)) { - fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n", - __func__, name.data(), ggml_nbytes(tensor), nelements*bpe); - return false; - } - - fin.read(reinterpret_cast(tensor->data), ggml_nbytes(tensor)); - - //printf("%42s - [%5d, %5d], type = %6s, %6.2f MB\n", name.data(), ne[0], ne[1], ftype == 0 ? "float" : "f16", ggml_nbytes(tensor)/1024.0/1024.0); - total_size += ggml_nbytes(tensor); - if (++n_tensors % 8 == 0) { - printf("."); - fflush(stdout); - } - } - - printf(" done\n"); - - printf("%s: model size = %8.2f MB / num tensors = %d\n", __func__, total_size/1024.0/1024.0, n_tensors); - } - - fin.close(); - - return true; -} - -// evaluate the transformer -// -// - model: the model -// - n_threads: number of threads to use -// - n_past: the context size so far -// - embd_inp: the embeddings of the tokens in the context -// - embd_w: the predicted logits for the next token -// -// The GPT-J model requires about 16MB of memory per input token. -// -bool legacy_gptj_eval( - const gptj_model & model, - const int n_threads, - const int n_past, - const std::vector & embd_inp, - std::vector & embd_w, - size_t & mem_per_token) { - const int N = embd_inp.size(); - - const auto & hparams = model.hparams; - - const int n_embd = hparams.n_embd; - const int n_layer = hparams.n_layer; - const int n_ctx = hparams.n_ctx; - const int n_head = hparams.n_head; - const int n_vocab = hparams.n_vocab; - const int n_rot = hparams.n_rot; - - const int d_key = n_embd/n_head; - - static size_t buf_size = 256u*1024*1024; - static void * buf = malloc(buf_size); - - if (mem_per_token > 0 && mem_per_token*N > buf_size) { - const size_t buf_size_new = 1.1*(mem_per_token*N); // add 10% to account for ggml object overhead - //printf("\n%s: reallocating buffer from %zu to %zu bytes\n", __func__, buf_size, buf_size_new); - - // reallocate - buf_size = buf_size_new; - buf = realloc(buf, buf_size); - if (buf == nullptr) { - fprintf(stderr, "%s: failed to allocate %zu bytes\n", __func__, buf_size); - return false; - } - } - - struct ggml_init_params params = { - .mem_size = buf_size, - .mem_buffer = buf, - }; - - struct ggml_context * ctx0 = ggml_init(params); - struct ggml_cgraph gf = { .n_threads = n_threads }; - - struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N); - memcpy(embd->data, embd_inp.data(), N*ggml_element_size(embd)); - - // wte - struct ggml_tensor * inpL = ggml_get_rows(ctx0, model.wte, embd); - - for (int il = 0; il < n_layer; ++il) { - struct ggml_tensor * cur; - - // norm - { - cur = ggml_norm(ctx0, inpL); - - // cur = ln_1_g*cur + ln_1_b - cur = ggml_add(ctx0, - ggml_mul(ctx0, - ggml_repeat(ctx0, model.layers[il].ln_1_g, cur), - cur), - ggml_repeat(ctx0, model.layers[il].ln_1_b, cur)); - } - - struct ggml_tensor * inpSA = cur; - - // self-attention - { - struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, ggml_transpose(ctx0, model.layers[il].c_attn_q_proj_w), cur); - struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, ggml_transpose(ctx0, model.layers[il].c_attn_k_proj_w), cur); - struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, ggml_transpose(ctx0, model.layers[il].c_attn_v_proj_w), cur); - - // store key and value to memory - if (N >= 1) { - struct ggml_tensor * k = ggml_view_1d(ctx0, model.memory_k, N*n_embd, (ggml_element_size(model.memory_k)*n_embd)*(il*n_ctx + n_past)); - struct ggml_tensor * v = ggml_view_1d(ctx0, model.memory_v, N*n_embd, (ggml_element_size(model.memory_v)*n_embd)*(il*n_ctx + n_past)); - - ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Kcur, k)); - ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Vcur, v)); - } - - // Q = Qcur.contiguous().view(n_embd/n_head, n_head, N).permute(0, 2, 1, 3) - struct ggml_tensor * Q = - ggml_permute(ctx0, - ggml_rope(ctx0, - ggml_cpy(ctx0, - Qcur, - ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_embd/n_head, n_head, N)), - n_past, n_rot, 0), - 0, 2, 1, 3); - - // K = Kmem.view(n_embd/n_head, n_head, n_past + N).permute(0, 2, 1, 3) - struct ggml_tensor * K = - ggml_permute(ctx0, - ggml_rope(ctx0, - ggml_reshape_3d(ctx0, - ggml_view_1d(ctx0, model.memory_k, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_k)*n_embd), - n_embd/n_head, n_head, n_past + N), - n_past, n_rot, 1), - 0, 2, 1, 3); - - // K * Q - struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q); - - // KQ_scaled = KQ / sqrt(n_embd/n_head) - struct ggml_tensor * KQ_scaled = - ggml_scale(ctx0, - KQ, - ggml_new_f32(ctx0, 1.0f/sqrt(float(n_embd)/n_head)) - ); - - // KQ_masked = mask_past(KQ_scaled) - struct ggml_tensor * KQ_masked = ggml_diag_mask_inf(ctx0, KQ_scaled, n_past); - - // KQ = soft_max(KQ_masked) - struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctx0, KQ_masked); - - // V_trans = Vmem.view(n_embd/n_head, n_head, n_past + N).permute(1, 2, 0, 3).contiguous() - struct ggml_tensor * V_trans = - ggml_permute(ctx0, - ggml_reshape_3d(ctx0, - ggml_view_1d(ctx0, model.memory_v, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_v)*n_embd), - n_embd/n_head, n_head, n_past + N), - 1, 2, 0, 3); - - // KQV = transpose(V) * KQ_soft_max - struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V_trans, KQ_soft_max); - - // KQV_merged = KQV.permute(0, 2, 1, 3) - struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3); - - // cur = KQV_merged.contiguous().view(n_embd, N) - cur = ggml_cpy(ctx0, - KQV_merged, - ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N)); - - // projection (no bias) - cur = ggml_mul_mat(ctx0, - ggml_transpose(ctx0, model.layers[il].c_attn_proj_w), - cur); - } - - struct ggml_tensor * inpFF = cur; - - // feed-forward network - // this is independent of the self-attention result, so it could be done in parallel to the self-attention - { - // note here we pass inpSA instead of cur - cur = ggml_mul_mat(ctx0, - ggml_transpose(ctx0, model.layers[il].c_mlp_fc_w), - inpSA); - - cur = ggml_add(ctx0, - ggml_repeat(ctx0, model.layers[il].c_mlp_fc_b, cur), - cur); - - // GELU activation - cur = ggml_gelu(ctx0, cur); - - // projection - // cur = proj_w*cur + proj_b - cur = ggml_mul_mat(ctx0, - model.layers[il].c_mlp_proj_w_trans, - cur); - - cur = ggml_add(ctx0, - ggml_repeat(ctx0, model.layers[il].c_mlp_proj_b, cur), - cur); - } - - // self-attention + FF - cur = ggml_add(ctx0, cur, inpFF); - - // input for next layer - inpL = ggml_add(ctx0, cur, inpL); - } - - // norm - { - inpL = ggml_norm(ctx0, inpL); - - // inpL = ln_f_g*inpL + ln_f_b - inpL = ggml_add(ctx0, - ggml_mul(ctx0, - ggml_repeat(ctx0, model.ln_f_g, inpL), - inpL), - ggml_repeat(ctx0, model.ln_f_b, inpL)); - } - - // lm_head - { - inpL = ggml_mul_mat(ctx0, model.lmh_g, inpL); - - inpL = ggml_add(ctx0, - ggml_repeat(ctx0, model.lmh_b, inpL), - inpL); - } - - // logits -> probs - //inpL = ggml_soft_max(ctx0, inpL); - - // run the computation - ggml_build_forward_expand(&gf, inpL); - ggml_graph_compute (ctx0, &gf); - - //if (n_past%100 == 0) { - // ggml_graph_print (&gf); - // ggml_graph_dump_dot(&gf, NULL, "gpt-2.dot"); - //} - - //embd_w.resize(n_vocab*N); - //memcpy(embd_w.data(), ggml_get_data(inpL), sizeof(float)*n_vocab*N); - - // return result for just the last token - embd_w.resize(n_vocab); - memcpy(embd_w.data(), (float *) ggml_get_data(inpL) + (n_vocab*(N-1)), sizeof(float)*n_vocab); - - if (mem_per_token == 0) { - mem_per_token = ggml_used_mem(ctx0)/N; - } - //printf("used_mem = %zu\n", ggml_used_mem(ctx0)); - - ggml_free(ctx0); - - return true; -} - -int main(int argc, char ** argv) { - ggml_time_init(); - const int64_t t_main_start_us = ggml_time_us(); - - gpt_params params; - params.model = "models/gpt-j-6B/ggml-model.bin"; - - if (gpt_params_parse(argc, argv, params) == false) { - return 1; - } - - if (params.seed < 0) { - params.seed = time(NULL); - } - - printf("%s: seed = %d\n", __func__, params.seed); - - std::mt19937 rng(params.seed); - if (params.prompt.empty()) { - if( !isatty(STDIN_FILENO) ){ - std::string line; - while( std::getline(std::cin, line) ){ - params.prompt = params.prompt + "\n" + line; - } - } else { - params.prompt = gpt_random_prompt(rng); - } - } - - int64_t t_load_us = 0; - - gpt_vocab vocab; - gptj_model model; - - // load the model - { - const int64_t t_start_us = ggml_time_us(); - - if (!legacy_gptj_model_load(params.model, model, vocab)) { - fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str()); - return 1; - } - - t_load_us = ggml_time_us() - t_start_us; - } - - int n_past = 0; - - int64_t t_sample_us = 0; - int64_t t_predict_us = 0; - - std::vector logits; - - // tokenize the prompt - std::vector embd_inp = ::gpt_tokenize(vocab, params.prompt); - - params.n_predict = std::min(params.n_predict, model.hparams.n_ctx - (int) embd_inp.size()); - - printf("%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size()); - printf("\n"); - - std::vector embd; - - // determine the required inference memory per token: - size_t mem_per_token = 0; - legacy_gptj_eval(model, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token); - - for (int i = embd.size(); i < embd_inp.size() + params.n_predict; i++) { - // predict - if (embd.size() > 0) { - const int64_t t_start_us = ggml_time_us(); - - if (!legacy_gptj_eval(model, params.n_threads, n_past, embd, logits, mem_per_token)) { - printf("Failed to predict\n"); - return 1; - } - - t_predict_us += ggml_time_us() - t_start_us; - } - - n_past += embd.size(); - embd.clear(); - - if (i >= embd_inp.size()) { - // sample next token - const int top_k = params.top_k; - const float top_p = params.top_p; - const float temp = params.temp; - - const int n_vocab = model.hparams.n_vocab; - - gpt_vocab::id id = 0; - - { - const int64_t t_start_sample_us = ggml_time_us(); - - id = gpt_sample_top_k_top_p(vocab, logits.data() + (logits.size() - n_vocab), top_k, top_p, temp, rng); - - t_sample_us += ggml_time_us() - t_start_sample_us; - } - - // add it to the context - embd.push_back(id); - } else { - // if here, it means we are still processing the input prompt - for (int k = i; k < embd_inp.size(); k++) { - embd.push_back(embd_inp[k]); - if (embd.size() > params.n_batch) { - break; - } - } - i += embd.size() - 1; - } - - // display text - for (auto id : embd) { - printf("%s", vocab.id_to_token[id].c_str()); - } - fflush(stdout); - - // end of text token - if (embd.back() == 50256) { - break; - } - } - - // report timing - { - const int64_t t_main_end_us = ggml_time_us(); - - printf("\n\n"); - printf("%s: mem per token = %8zu bytes\n", __func__, mem_per_token); - printf("%s: load time = %8.2f ms\n", __func__, t_load_us/1000.0f); - printf("%s: sample time = %8.2f ms\n", __func__, t_sample_us/1000.0f); - printf("%s: predict time = %8.2f ms / %.2f ms per token\n", __func__, t_predict_us/1000.0f, t_predict_us/1000.0f/n_past); - printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0f); - } - - ggml_free(model.ctx); - - return 0; -} diff --git a/otherarch/gptj_v1.cpp b/otherarch/gptj_v1.cpp new file mode 100644 index 000000000..93e7d0684 --- /dev/null +++ b/otherarch/gptj_v1.cpp @@ -0,0 +1,682 @@ +#include "ggml_v1.h" +#include "otherarch.h" + +#include "utils.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +// load the model's weights from a file +bool legacy_gptj_model_load(const std::string & fname, gptj_model_v1 & model, gpt_vocab & vocab) { + printf("%s: loading model from '%s' - please wait ...\n", __func__, fname.c_str()); + + auto fin = std::ifstream(fname, std::ios::binary); + if (!fin) { + fprintf(stderr, "%s: failed to open '%s'\n", __func__, fname.c_str()); + return false; + } + + // verify magic + { + uint32_t magic; + fin.read((char *) &magic, sizeof(magic)); + if (magic != 0x67676d6c) { + fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname.c_str()); + return false; + } + } + + // load hparams + { + auto & hparams = model.hparams; + + fin.read((char *) &hparams.n_vocab, sizeof(hparams.n_vocab)); + fin.read((char *) &hparams.n_ctx, sizeof(hparams.n_ctx)); + fin.read((char *) &hparams.n_embd, sizeof(hparams.n_embd)); + fin.read((char *) &hparams.n_head, sizeof(hparams.n_head)); + fin.read((char *) &hparams.n_layer, sizeof(hparams.n_layer)); + fin.read((char *) &hparams.n_rot, sizeof(hparams.n_rot)); + fin.read((char *) &hparams.f16, sizeof(hparams.f16)); + + printf("%s: n_vocab = %d\n", __func__, hparams.n_vocab); + printf("%s: n_ctx = %d\n", __func__, hparams.n_ctx); + printf("%s: n_embd = %d\n", __func__, hparams.n_embd); + printf("%s: n_head = %d\n", __func__, hparams.n_head); + printf("%s: n_layer = %d\n", __func__, hparams.n_layer); + printf("%s: n_rot = %d\n", __func__, hparams.n_rot); + printf("%s: f16 = %d\n", __func__, hparams.f16); + } + + // load vocab + { + int32_t n_vocab = 0; + fin.read((char *) &n_vocab, sizeof(n_vocab)); + + if (n_vocab != model.hparams.n_vocab) { + fprintf(stderr, "%s: invalid model file '%s' (bad vocab size %d != %d)\n", + __func__, fname.c_str(), n_vocab, model.hparams.n_vocab); + return false; + } + + std::string word; + for (int i = 0; i < n_vocab; i++) { + uint32_t len; + fin.read((char *) &len, sizeof(len)); + + word.resize(len); + fin.read((char *) word.data(), len); + + vocab.token_to_id[word] = i; + vocab.id_to_token[i] = word; + } + } + + // for the big tensors, we have the option to store the data in 16-bit floats + // in order to save memory and also to speed up the computation + const ggml_v1_type wtype = model.hparams.f16 ? GGML_V1_TYPE_F16 : GGML_V1_TYPE_F32; + + auto & ctx = model.ctx; + + size_t ctx_size = 0; + + { + const auto & hparams = model.hparams; + + const int n_embd = hparams.n_embd; + const int n_layer = hparams.n_layer; + const int n_ctx = hparams.n_ctx; + const int n_vocab = hparams.n_vocab; + + ctx_size += n_embd*ggml_v1_type_size(GGML_V1_TYPE_F32); // ln_f_g + ctx_size += n_embd*ggml_v1_type_size(GGML_V1_TYPE_F32); // ln_f_b + + ctx_size += n_embd*n_vocab*ggml_v1_type_size(wtype); // wte + + ctx_size += n_embd*n_vocab*ggml_v1_type_size(wtype); // lmh_g + ctx_size += n_vocab*ggml_v1_type_size(GGML_V1_TYPE_F32); // lmh_b + + ctx_size += n_layer*(n_embd*ggml_v1_type_size(GGML_V1_TYPE_F32)); // ln_1_g + ctx_size += n_layer*(n_embd*ggml_v1_type_size(GGML_V1_TYPE_F32)); // ln_1_b + + ctx_size += n_layer*(n_embd*n_embd*ggml_v1_type_size(wtype)); // c_attn_q_proj_w + ctx_size += n_layer*(n_embd*n_embd*ggml_v1_type_size(wtype)); // c_attn_k_proj_w + ctx_size += n_layer*(n_embd*n_embd*ggml_v1_type_size(wtype)); // c_attn_v_proj_w + + ctx_size += n_layer*(n_embd*n_embd*ggml_v1_type_size(wtype)); // c_attn_proj_w + + ctx_size += n_layer*(4*n_embd*n_embd*ggml_v1_type_size(wtype)); // c_mlp_fc_w + ctx_size += n_layer*( 4*n_embd*ggml_v1_type_size(GGML_V1_TYPE_F32)); // c_mlp_fc_b + + ctx_size += n_layer*(4*n_embd*n_embd*ggml_v1_type_size(wtype)); // c_mlp_proj_w_trans + ctx_size += n_layer*( n_embd*ggml_v1_type_size(GGML_V1_TYPE_F32)); // c_mlp_proj_b + + ctx_size += n_ctx*n_layer*n_embd*ggml_v1_type_size(GGML_V1_TYPE_F32); // memory_k + ctx_size += n_ctx*n_layer*n_embd*ggml_v1_type_size(GGML_V1_TYPE_F32); // memory_v + + ctx_size += (5 + 10*n_layer)*256; // object overhead + + printf("%s: ggml ctx size = %6.2f MB\n", __func__, ctx_size/(1024.0*1024.0)); + } + + // create the ggml context + { + struct ggml_v1_init_params params = { + .mem_size = ctx_size, + .mem_buffer = NULL, + }; + + model.ctx = ggml_v1_init(params); + if (!model.ctx) { + fprintf(stderr, "%s: ggml_v1_init() failed\n", __func__); + return false; + } + } + + // prepare memory for the weights + { + const auto & hparams = model.hparams; + + const int n_embd = hparams.n_embd; + const int n_layer = hparams.n_layer; + const int n_ctx = hparams.n_ctx; + const int n_vocab = hparams.n_vocab; + + model.layers.resize(n_layer); + + model.wte = ggml_v1_new_tensor_2d(ctx, wtype, n_embd, n_vocab); + + model.ln_f_g = ggml_v1_new_tensor_1d(ctx, GGML_V1_TYPE_F32, n_embd); + model.ln_f_b = ggml_v1_new_tensor_1d(ctx, GGML_V1_TYPE_F32, n_embd); + + model.lmh_g = ggml_v1_new_tensor_2d(ctx, wtype, n_embd, n_vocab); + model.lmh_b = ggml_v1_new_tensor_1d(ctx, GGML_V1_TYPE_F32, n_vocab); + + // map by name + model.tensors["transformer.wte.weight"] = model.wte; + + model.tensors["transformer.ln_f.weight"] = model.ln_f_g; + model.tensors["transformer.ln_f.bias"] = model.ln_f_b; + + model.tensors["lm_head.weight"] = model.lmh_g; + model.tensors["lm_head.bias"] = model.lmh_b; + + for (int i = 0; i < n_layer; ++i) { + auto & layer = model.layers[i]; + + layer.ln_1_g = ggml_v1_new_tensor_1d(ctx, GGML_V1_TYPE_F32, n_embd); + layer.ln_1_b = ggml_v1_new_tensor_1d(ctx, GGML_V1_TYPE_F32, n_embd); + + layer.c_attn_q_proj_w = ggml_v1_new_tensor_2d(ctx, wtype, n_embd, n_embd); + layer.c_attn_k_proj_w = ggml_v1_new_tensor_2d(ctx, wtype, n_embd, n_embd); + layer.c_attn_v_proj_w = ggml_v1_new_tensor_2d(ctx, wtype, n_embd, n_embd); + + layer.c_attn_proj_w = ggml_v1_new_tensor_2d(ctx, wtype, n_embd, n_embd); + + layer.c_mlp_fc_w = ggml_v1_new_tensor_2d(ctx, wtype, 4*n_embd, n_embd); + layer.c_mlp_fc_b = ggml_v1_new_tensor_1d(ctx, GGML_V1_TYPE_F32, 4*n_embd); + + layer.c_mlp_proj_w_trans = ggml_v1_new_tensor_2d(ctx, wtype, 4*n_embd, n_embd); + layer.c_mlp_proj_b = ggml_v1_new_tensor_1d(ctx, GGML_V1_TYPE_F32, n_embd); + + // map by name + model.tensors["transformer.h." + std::to_string(i) + ".ln_1.weight"] = layer.ln_1_g; + model.tensors["transformer.h." + std::to_string(i) + ".ln_1.bias"] = layer.ln_1_b; + + model.tensors["transformer.h." + std::to_string(i) + ".attn.q_proj.weight"] = layer.c_attn_q_proj_w; + model.tensors["transformer.h." + std::to_string(i) + ".attn.k_proj.weight"] = layer.c_attn_k_proj_w; + model.tensors["transformer.h." + std::to_string(i) + ".attn.v_proj.weight"] = layer.c_attn_v_proj_w; + + model.tensors["transformer.h." + std::to_string(i) + ".attn.out_proj.weight"] = layer.c_attn_proj_w; + + model.tensors["transformer.h." + std::to_string(i) + ".mlp.fc_in.weight"] = layer.c_mlp_fc_w; + model.tensors["transformer.h." + std::to_string(i) + ".mlp.fc_in.bias"] = layer.c_mlp_fc_b; + + model.tensors["transformer.h." + std::to_string(i) + ".mlp.fc_out.weight"] = layer.c_mlp_proj_w_trans; + model.tensors["transformer.h." + std::to_string(i) + ".mlp.fc_out.bias"] = layer.c_mlp_proj_b; + } + } + + // key + value memory + { + const auto & hparams = model.hparams; + + const int n_embd = hparams.n_embd; + const int n_layer = hparams.n_layer; + const int n_ctx = hparams.n_ctx; + + const int n_mem = n_layer*n_ctx; + const int n_elements = n_embd*n_mem; + + model.memory_k = ggml_v1_new_tensor_1d(ctx, GGML_V1_TYPE_F32, n_elements); + model.memory_v = ggml_v1_new_tensor_1d(ctx, GGML_V1_TYPE_F32, n_elements); + + const size_t memory_size = ggml_v1_nbytes(model.memory_k) + ggml_v1_nbytes(model.memory_v); + + printf("%s: memory_size = %8.2f MB, n_mem = %d\n", __func__, memory_size/1024.0/1024.0, n_mem); + } + + // load weights + { + int n_tensors = 0; + size_t total_size = 0; + + printf("%s: ", __func__); + + while (true) { + int32_t n_dims; + int32_t length; + int32_t ftype; + + fin.read(reinterpret_cast(&n_dims), sizeof(n_dims)); + fin.read(reinterpret_cast(&length), sizeof(length)); + fin.read(reinterpret_cast(&ftype), sizeof(ftype)); + + if (fin.eof()) { + break; + } + + int32_t nelements = 1; + int32_t ne[2] = { 1, 1 }; + for (int i = 0; i < n_dims; ++i) { + fin.read(reinterpret_cast(&ne[i]), sizeof(ne[i])); + nelements *= ne[i]; + } + + std::string name(length, 0); + fin.read(&name[0], length); + + if (model.tensors.find(name.data()) == model.tensors.end()) { + fprintf(stderr, "%s: unknown tensor '%s' in model file\n", __func__, name.data()); + return false; + } + + auto tensor = model.tensors[name.data()]; + if (ggml_v1_nelements(tensor) != nelements) { + fprintf(stderr, "%s: tensor '%s' has wrong size in model file\n", __func__, name.data()); + return false; + } + + if (tensor->ne[0] != ne[0] || tensor->ne[1] != ne[1]) { + fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%d, %d], expected [%d, %d]\n", + __func__, name.data(), tensor->ne[0], tensor->ne[1], ne[0], ne[1]); + return false; + } + + const size_t bpe = tensor->type == GGML_V1_TYPE_I8 ? 1 : (ftype == 0) ? sizeof(float) : sizeof(ggml_v1_fp16_t); + + if (nelements*bpe != ggml_v1_nbytes(tensor)) { + fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n", + __func__, name.data(), ggml_v1_nbytes(tensor), nelements*bpe); + return false; + } + + fin.read(reinterpret_cast(tensor->data), ggml_v1_nbytes(tensor)); + + //printf("%42s - [%5d, %5d], type = %6s, %6.2f MB\n", name.data(), ne[0], ne[1], ftype == 0 ? "float" : "f16", ggml_v1_nbytes(tensor)/1024.0/1024.0); + total_size += ggml_v1_nbytes(tensor); + if (++n_tensors % 8 == 0) { + printf("."); + fflush(stdout); + } + } + + printf(" done\n"); + + printf("%s: model size = %8.2f MB / num tensors = %d\n", __func__, total_size/1024.0/1024.0, n_tensors); + } + + fin.close(); + + return true; +} + +// evaluate the transformer +// +// - model: the model +// - n_threads: number of threads to use +// - n_past: the context size so far +// - embd_inp: the embeddings of the tokens in the context +// - embd_w: the predicted logits for the next token +// +// The GPT-J model requires about 16MB of memory per input token. +// +bool legacy_gptj_eval( + const gptj_model_v1 & model, + const int n_threads, + const int n_past, + const std::vector & embd_inp, + std::vector & embd_w, + size_t & mem_per_token) { + const int N = embd_inp.size(); + + const auto & hparams = model.hparams; + + const int n_embd = hparams.n_embd; + const int n_layer = hparams.n_layer; + const int n_ctx = hparams.n_ctx; + const int n_head = hparams.n_head; + const int n_vocab = hparams.n_vocab; + const int n_rot = hparams.n_rot; + + const int d_key = n_embd/n_head; + + static size_t buf_size = 256u*1024*1024; + static void * buf = malloc(buf_size); + + if (mem_per_token > 0 && mem_per_token*N > buf_size) { + const size_t buf_size_new = 1.1*(mem_per_token*N); // add 10% to account for ggml object overhead + //printf("\n%s: reallocating buffer from %zu to %zu bytes\n", __func__, buf_size, buf_size_new); + + // reallocate + buf_size = buf_size_new; + buf = realloc(buf, buf_size); + if (buf == nullptr) { + fprintf(stderr, "%s: failed to allocate %zu bytes\n", __func__, buf_size); + return false; + } + } + + struct ggml_v1_init_params params = { + .mem_size = buf_size, + .mem_buffer = buf, + }; + + struct ggml_v1_context * ctx0 = ggml_v1_init(params); + struct ggml_v1_cgraph gf = { .n_threads = n_threads }; + + struct ggml_v1_tensor * embd = ggml_v1_new_tensor_1d(ctx0, GGML_V1_TYPE_I32, N); + memcpy(embd->data, embd_inp.data(), N*ggml_v1_element_size(embd)); + + // wte + struct ggml_v1_tensor * inpL = ggml_v1_get_rows(ctx0, model.wte, embd); + + for (int il = 0; il < n_layer; ++il) { + struct ggml_v1_tensor * cur; + + // norm + { + cur = ggml_v1_norm(ctx0, inpL); + + // cur = ln_1_g*cur + ln_1_b + cur = ggml_v1_add(ctx0, + ggml_v1_mul(ctx0, + ggml_v1_repeat(ctx0, model.layers[il].ln_1_g, cur), + cur), + ggml_v1_repeat(ctx0, model.layers[il].ln_1_b, cur)); + } + + struct ggml_v1_tensor * inpSA = cur; + + // self-attention + { + struct ggml_v1_tensor * Qcur = ggml_v1_mul_mat(ctx0, ggml_v1_transpose(ctx0, model.layers[il].c_attn_q_proj_w), cur); + struct ggml_v1_tensor * Kcur = ggml_v1_mul_mat(ctx0, ggml_v1_transpose(ctx0, model.layers[il].c_attn_k_proj_w), cur); + struct ggml_v1_tensor * Vcur = ggml_v1_mul_mat(ctx0, ggml_v1_transpose(ctx0, model.layers[il].c_attn_v_proj_w), cur); + + // store key and value to memory + if (N >= 1) { + struct ggml_v1_tensor * k = ggml_v1_view_1d(ctx0, model.memory_k, N*n_embd, (ggml_v1_element_size(model.memory_k)*n_embd)*(il*n_ctx + n_past)); + struct ggml_v1_tensor * v = ggml_v1_view_1d(ctx0, model.memory_v, N*n_embd, (ggml_v1_element_size(model.memory_v)*n_embd)*(il*n_ctx + n_past)); + + ggml_v1_build_forward_expand(&gf, ggml_v1_cpy(ctx0, Kcur, k)); + ggml_v1_build_forward_expand(&gf, ggml_v1_cpy(ctx0, Vcur, v)); + } + + // Q = Qcur.contiguous().view(n_embd/n_head, n_head, N).permute(0, 2, 1, 3) + struct ggml_v1_tensor * Q = + ggml_v1_permute(ctx0, + ggml_v1_rope(ctx0, + ggml_v1_cpy(ctx0, + Qcur, + ggml_v1_new_tensor_3d(ctx0, GGML_V1_TYPE_F32, n_embd/n_head, n_head, N)), + n_past, n_rot, 0), + 0, 2, 1, 3); + + // K = Kmem.view(n_embd/n_head, n_head, n_past + N).permute(0, 2, 1, 3) + struct ggml_v1_tensor * K = + ggml_v1_permute(ctx0, + ggml_v1_rope(ctx0, + ggml_v1_reshape_3d(ctx0, + ggml_v1_view_1d(ctx0, model.memory_k, (n_past + N)*n_embd, il*n_ctx*ggml_v1_element_size(model.memory_k)*n_embd), + n_embd/n_head, n_head, n_past + N), + n_past, n_rot, 1), + 0, 2, 1, 3); + + // K * Q + struct ggml_v1_tensor * KQ = ggml_v1_mul_mat(ctx0, K, Q); + + // KQ_scaled = KQ / sqrt(n_embd/n_head) + struct ggml_v1_tensor * KQ_scaled = + ggml_v1_scale(ctx0, + KQ, + ggml_v1_new_f32(ctx0, 1.0f/sqrt(float(n_embd)/n_head)) + ); + + // KQ_masked = mask_past(KQ_scaled) + struct ggml_v1_tensor * KQ_masked = ggml_v1_diag_mask_inf(ctx0, KQ_scaled, n_past); + + // KQ = soft_max(KQ_masked) + struct ggml_v1_tensor * KQ_soft_max = ggml_v1_soft_max(ctx0, KQ_masked); + + // V_trans = Vmem.view(n_embd/n_head, n_head, n_past + N).permute(1, 2, 0, 3).contiguous() + struct ggml_v1_tensor * V_trans = + ggml_v1_permute(ctx0, + ggml_v1_reshape_3d(ctx0, + ggml_v1_view_1d(ctx0, model.memory_v, (n_past + N)*n_embd, il*n_ctx*ggml_v1_element_size(model.memory_v)*n_embd), + n_embd/n_head, n_head, n_past + N), + 1, 2, 0, 3); + + // KQV = transpose(V) * KQ_soft_max + struct ggml_v1_tensor * KQV = ggml_v1_mul_mat(ctx0, V_trans, KQ_soft_max); + + // KQV_merged = KQV.permute(0, 2, 1, 3) + struct ggml_v1_tensor * KQV_merged = ggml_v1_permute(ctx0, KQV, 0, 2, 1, 3); + + // cur = KQV_merged.contiguous().view(n_embd, N) + cur = ggml_v1_cpy(ctx0, + KQV_merged, + ggml_v1_new_tensor_2d(ctx0, GGML_V1_TYPE_F32, n_embd, N)); + + // projection (no bias) + cur = ggml_v1_mul_mat(ctx0, + ggml_v1_transpose(ctx0, model.layers[il].c_attn_proj_w), + cur); + } + + struct ggml_v1_tensor * inpFF = cur; + + // feed-forward network + // this is independent of the self-attention result, so it could be done in parallel to the self-attention + { + // note here we pass inpSA instead of cur + cur = ggml_v1_mul_mat(ctx0, + ggml_v1_transpose(ctx0, model.layers[il].c_mlp_fc_w), + inpSA); + + cur = ggml_v1_add(ctx0, + ggml_v1_repeat(ctx0, model.layers[il].c_mlp_fc_b, cur), + cur); + + // GELU activation + cur = ggml_v1_gelu(ctx0, cur); + + // projection + // cur = proj_w*cur + proj_b + cur = ggml_v1_mul_mat(ctx0, + model.layers[il].c_mlp_proj_w_trans, + cur); + + cur = ggml_v1_add(ctx0, + ggml_v1_repeat(ctx0, model.layers[il].c_mlp_proj_b, cur), + cur); + } + + // self-attention + FF + cur = ggml_v1_add(ctx0, cur, inpFF); + + // input for next layer + inpL = ggml_v1_add(ctx0, cur, inpL); + } + + // norm + { + inpL = ggml_v1_norm(ctx0, inpL); + + // inpL = ln_f_g*inpL + ln_f_b + inpL = ggml_v1_add(ctx0, + ggml_v1_mul(ctx0, + ggml_v1_repeat(ctx0, model.ln_f_g, inpL), + inpL), + ggml_v1_repeat(ctx0, model.ln_f_b, inpL)); + } + + // lm_head + { + inpL = ggml_v1_mul_mat(ctx0, model.lmh_g, inpL); + + inpL = ggml_v1_add(ctx0, + ggml_v1_repeat(ctx0, model.lmh_b, inpL), + inpL); + } + + // logits -> probs + //inpL = ggml_v1_soft_max(ctx0, inpL); + + // run the computation + ggml_v1_build_forward_expand(&gf, inpL); + ggml_v1_graph_compute (ctx0, &gf); + + //if (n_past%100 == 0) { + // ggml_v1_graph_print (&gf); + // ggml_v1_graph_dump_dot(&gf, NULL, "gpt-2.dot"); + //} + + //embd_w.resize(n_vocab*N); + //memcpy(embd_w.data(), ggml_v1_get_data(inpL), sizeof(float)*n_vocab*N); + + // return result for just the last token + embd_w.resize(n_vocab); + memcpy(embd_w.data(), (float *) ggml_v1_get_data(inpL) + (n_vocab*(N-1)), sizeof(float)*n_vocab); + + if (mem_per_token == 0) { + mem_per_token = ggml_v1_used_mem(ctx0)/N; + } + //printf("used_mem = %zu\n", ggml_v1_used_mem(ctx0)); + + ggml_v1_free(ctx0); + + return true; +} + +// int main(int argc, char ** argv) { +// ggml_v1_time_init(); +// const int64_t t_main_start_us = ggml_v1_time_us(); + +// gpt_params params; +// params.model = "models/gpt-j-6B/ggml-model.bin"; + +// if (utils_gpt_params_parse(argc, argv, params) == false) { +// return 1; +// } + +// if (params.seed < 0) { +// params.seed = time(NULL); +// } + +// printf("%s: seed = %d\n", __func__, params.seed); + +// std::mt19937 rng(params.seed); +// if (params.prompt.empty()) { +// if( !isatty(STDIN_FILENO) ){ +// std::string line; +// while( std::getline(std::cin, line) ){ +// params.prompt = params.prompt + "\n" + line; +// } +// } else { +// params.prompt = utils_gpt_random_prompt(rng); +// } +// } + +// int64_t t_load_us = 0; + +// gpt_vocab vocab; +// gptj_model_v1 model; + +// // load the model +// { +// const int64_t t_start_us = ggml_v1_time_us(); + +// if (!legacy_gptj_model_load(params.model, model, vocab)) { +// fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str()); +// return 1; +// } + +// t_load_us = ggml_v1_time_us() - t_start_us; +// } + +// int n_past = 0; + +// int64_t t_sample_us = 0; +// int64_t t_predict_us = 0; + +// std::vector logits; + +// // tokenize the prompt +// std::vector embd_inp = ::gpt_tokenize(vocab, params.prompt); + +// params.n_predict = std::min(params.n_predict, model.hparams.n_ctx - (int) embd_inp.size()); + +// printf("%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size()); +// printf("\n"); + +// std::vector embd; + +// // determine the required inference memory per token: +// size_t mem_per_token = 0; +// legacy_gptj_eval(model, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token); + +// for (int i = embd.size(); i < embd_inp.size() + params.n_predict; i++) { +// // predict +// if (embd.size() > 0) { +// const int64_t t_start_us = ggml_v1_time_us(); + +// if (!legacy_gptj_eval(model, params.n_threads, n_past, embd, logits, mem_per_token)) { +// printf("Failed to predict\n"); +// return 1; +// } + +// t_predict_us += ggml_v1_time_us() - t_start_us; +// } + +// n_past += embd.size(); +// embd.clear(); + +// if (i >= embd_inp.size()) { +// // sample next token +// const int top_k = params.top_k; +// const float top_p = params.top_p; +// const float temp = params.temp; + +// const int n_vocab = model.hparams.n_vocab; + +// gpt_vocab::id id = 0; + +// { +// const int64_t t_start_sample_us = ggml_v1_time_us(); + +// id = gpt_sample_top_k_top_p(vocab, logits.data() + (logits.size() - n_vocab), top_k, top_p, temp, rng); + +// t_sample_us += ggml_v1_time_us() - t_start_sample_us; +// } + +// // add it to the context +// embd.push_back(id); +// } else { +// // if here, it means we are still processing the input prompt +// for (int k = i; k < embd_inp.size(); k++) { +// embd.push_back(embd_inp[k]); +// if (embd.size() > params.n_batch) { +// break; +// } +// } +// i += embd.size() - 1; +// } + +// // display text +// for (auto id : embd) { +// printf("%s", vocab.id_to_token[id].c_str()); +// } +// fflush(stdout); + +// // end of text token +// if (embd.back() == 50256) { +// break; +// } +// } + +// // report timing +// { +// const int64_t t_main_end_us = ggml_v1_time_us(); + +// printf("\n\n"); +// printf("%s: mem per token = %8zu bytes\n", __func__, mem_per_token); +// printf("%s: load time = %8.2f ms\n", __func__, t_load_us/1000.0f); +// printf("%s: sample time = %8.2f ms\n", __func__, t_sample_us/1000.0f); +// printf("%s: predict time = %8.2f ms / %.2f ms per token\n", __func__, t_predict_us/1000.0f, t_predict_us/1000.0f/n_past); +// printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0f); +// } + +// ggml_v1_free(model.ctx); + +// return 0; +// } diff --git a/otherarch/gptj.cpp b/otherarch/gptj_v2.cpp similarity index 83% rename from otherarch/gptj.cpp rename to otherarch/gptj_v2.cpp index ffb68153d..86a03106c 100644 --- a/otherarch/gptj.cpp +++ b/otherarch/gptj_v2.cpp @@ -585,145 +585,145 @@ bool gptj_eval( return true; } -int main(int argc, char ** argv) { - ggml_time_init(); - const int64_t t_main_start_us = ggml_time_us(); +// int main(int argc, char ** argv) { +// ggml_time_init(); +// const int64_t t_main_start_us = ggml_time_us(); - gpt_params params; - params.model = "models/gpt-j-6B/ggml-model.bin"; +// gpt_params params; +// params.model = "models/gpt-j-6B/ggml-model.bin"; - if (gpt_params_parse(argc, argv, params) == false) { - return 1; - } +// if (utils_gpt_params_parse(argc, argv, params) == false) { +// return 1; +// } - if (params.seed < 0) { - params.seed = time(NULL); - } +// if (params.seed < 0) { +// params.seed = time(NULL); +// } - printf("%s: seed = %d\n", __func__, params.seed); +// printf("%s: seed = %d\n", __func__, params.seed); - std::mt19937 rng(params.seed); - if (params.prompt.empty()) { - if( !isatty(STDIN_FILENO) ){ - std::string line; - while( std::getline(std::cin, line) ){ - params.prompt = params.prompt + "\n" + line; - } - } else { - params.prompt = gpt_random_prompt(rng); - } - } +// std::mt19937 rng(params.seed); +// if (params.prompt.empty()) { +// if( !isatty(STDIN_FILENO) ){ +// std::string line; +// while( std::getline(std::cin, line) ){ +// params.prompt = params.prompt + "\n" + line; +// } +// } else { +// params.prompt = utils_gpt_random_prompt(rng); +// } +// } - int64_t t_load_us = 0; +// int64_t t_load_us = 0; - gpt_vocab vocab; - gptj_model model; +// gpt_vocab vocab; +// gptj_model model; - // load the model - { - const int64_t t_start_us = ggml_time_us(); +// // load the model +// { +// const int64_t t_start_us = ggml_time_us(); - if (!gptj_model_load(params.model, model, vocab)) { - fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str()); - return 1; - } +// if (!gptj_model_load(params.model, model, vocab)) { +// fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str()); +// return 1; +// } - t_load_us = ggml_time_us() - t_start_us; - } +// t_load_us = ggml_time_us() - t_start_us; +// } - int n_past = 0; +// int n_past = 0; - int64_t t_sample_us = 0; - int64_t t_predict_us = 0; +// int64_t t_sample_us = 0; +// int64_t t_predict_us = 0; - std::vector logits; +// std::vector logits; - // tokenize the prompt - std::vector embd_inp = ::gpt_tokenize(vocab, params.prompt); +// // tokenize the prompt +// std::vector embd_inp = ::gpt_tokenize(vocab, params.prompt); - params.n_predict = std::min(params.n_predict, model.hparams.n_ctx - (int) embd_inp.size()); +// params.n_predict = std::min(params.n_predict, model.hparams.n_ctx - (int) embd_inp.size()); - printf("%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size()); - printf("\n"); +// printf("%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size()); +// printf("\n"); - std::vector embd; +// std::vector embd; - // determine the required inference memory per token: - size_t mem_per_token = 0; - gptj_eval(model, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token); +// // determine the required inference memory per token: +// size_t mem_per_token = 0; +// gptj_eval(model, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token); - for (int i = embd.size(); i < embd_inp.size() + params.n_predict; i++) { - // predict - if (embd.size() > 0) { - const int64_t t_start_us = ggml_time_us(); +// for (int i = embd.size(); i < embd_inp.size() + params.n_predict; i++) { +// // predict +// if (embd.size() > 0) { +// const int64_t t_start_us = ggml_time_us(); - if (!gptj_eval(model, params.n_threads, n_past, embd, logits, mem_per_token)) { - printf("Failed to predict\n"); - return 1; - } +// if (!gptj_eval(model, params.n_threads, n_past, embd, logits, mem_per_token)) { +// printf("Failed to predict\n"); +// return 1; +// } - t_predict_us += ggml_time_us() - t_start_us; - } +// t_predict_us += ggml_time_us() - t_start_us; +// } - n_past += embd.size(); - embd.clear(); +// n_past += embd.size(); +// embd.clear(); - if (i >= embd_inp.size()) { - // sample next token - const int top_k = params.top_k; - const float top_p = params.top_p; - const float temp = params.temp; +// if (i >= embd_inp.size()) { +// // sample next token +// const int top_k = params.top_k; +// const float top_p = params.top_p; +// const float temp = params.temp; - const int n_vocab = model.hparams.n_vocab; +// const int n_vocab = model.hparams.n_vocab; - gpt_vocab::id id = 0; +// gpt_vocab::id id = 0; - { - const int64_t t_start_sample_us = ggml_time_us(); +// { +// const int64_t t_start_sample_us = ggml_time_us(); - id = gpt_sample_top_k_top_p(vocab, logits.data() + (logits.size() - n_vocab), top_k, top_p, temp, rng); +// id = gpt_sample_top_k_top_p(vocab, logits.data() + (logits.size() - n_vocab), top_k, top_p, temp, rng); - t_sample_us += ggml_time_us() - t_start_sample_us; - } +// t_sample_us += ggml_time_us() - t_start_sample_us; +// } - // add it to the context - embd.push_back(id); - } else { - // if here, it means we are still processing the input prompt - for (int k = i; k < embd_inp.size(); k++) { - embd.push_back(embd_inp[k]); - if (embd.size() > params.n_batch) { - break; - } - } - i += embd.size() - 1; - } +// // add it to the context +// embd.push_back(id); +// } else { +// // if here, it means we are still processing the input prompt +// for (int k = i; k < embd_inp.size(); k++) { +// embd.push_back(embd_inp[k]); +// if (embd.size() > params.n_batch) { +// break; +// } +// } +// i += embd.size() - 1; +// } - // display text - for (auto id : embd) { - printf("%s", vocab.id_to_token[id].c_str()); - } - fflush(stdout); +// // display text +// for (auto id : embd) { +// printf("%s", vocab.id_to_token[id].c_str()); +// } +// fflush(stdout); - // end of text token - if (embd.back() == 50256) { - break; - } - } +// // end of text token +// if (embd.back() == 50256) { +// break; +// } +// } - // report timing - { - const int64_t t_main_end_us = ggml_time_us(); +// // report timing +// { +// const int64_t t_main_end_us = ggml_time_us(); - printf("\n\n"); - printf("%s: mem per token = %8zu bytes\n", __func__, mem_per_token); - printf("%s: load time = %8.2f ms\n", __func__, t_load_us/1000.0f); - printf("%s: sample time = %8.2f ms\n", __func__, t_sample_us/1000.0f); - printf("%s: predict time = %8.2f ms / %.2f ms per token\n", __func__, t_predict_us/1000.0f, t_predict_us/1000.0f/n_past); - printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0f); - } +// printf("\n\n"); +// printf("%s: mem per token = %8zu bytes\n", __func__, mem_per_token); +// printf("%s: load time = %8.2f ms\n", __func__, t_load_us/1000.0f); +// printf("%s: sample time = %8.2f ms\n", __func__, t_sample_us/1000.0f); +// printf("%s: predict time = %8.2f ms / %.2f ms per token\n", __func__, t_predict_us/1000.0f, t_predict_us/1000.0f/n_past); +// printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0f); +// } - ggml_free(model.ctx); +// ggml_free(model.ctx); - return 0; -} \ No newline at end of file +// return 0; +// } \ No newline at end of file diff --git a/otherarch/otherarch.h b/otherarch/otherarch.h index 71ad4269a..21e7103bb 100644 --- a/otherarch/otherarch.h +++ b/otherarch/otherarch.h @@ -46,6 +46,49 @@ struct gptj_layer { struct ggml_tensor * c_mlp_proj_w_trans; //for backwards compatibility struct ggml_tensor * c_mlp_proj_b; }; +struct gptj_layer_v1 { + // normalization + struct ggml_v1_tensor * ln_1_g; + struct ggml_v1_tensor * ln_1_b; + + // attention + struct ggml_v1_tensor * c_attn_q_proj_w; + struct ggml_v1_tensor * c_attn_k_proj_w; + struct ggml_v1_tensor * c_attn_v_proj_w; + + struct ggml_v1_tensor * c_attn_proj_w; + + // ff + struct ggml_v1_tensor * c_mlp_fc_w; + struct ggml_v1_tensor * c_mlp_fc_b; + + struct ggml_v1_tensor * c_mlp_proj_w; + struct ggml_v1_tensor * c_mlp_proj_w_trans; //for backwards compatibility + struct ggml_v1_tensor * c_mlp_proj_b; +}; + +struct gptj_model_v1 { + gptj_hparams hparams; + + // normalization + struct ggml_v1_tensor * ln_f_g; + struct ggml_v1_tensor * ln_f_b; + + struct ggml_v1_tensor * wte; // position embedding + + struct ggml_v1_tensor * lmh_g; // language model head + struct ggml_v1_tensor * lmh_b; // language model bias + + std::vector layers; + + // key + value memory + struct ggml_v1_tensor * memory_k; + struct ggml_v1_tensor * memory_v; + + // + struct ggml_v1_context * ctx; + std::map tensors; +}; struct gptj_model { gptj_hparams hparams; @@ -70,7 +113,7 @@ struct gptj_model { std::map tensors; }; -bool legacy_gptj_model_load(const std::string &fname, gptj_model &model, gpt_vocab &vocab); -bool legacy_gptj_eval(const gptj_model &model, const int n_threads, const int n_past, const std::vector &embd_inp, std::vector &embd_w, size_t &mem_per_token); +bool legacy_gptj_model_load(const std::string &fname, gptj_model_v1 &model, gpt_vocab &vocab); +bool legacy_gptj_eval(const gptj_model_v1 &model, const int n_threads, const int n_past, const std::vector &embd_inp, std::vector &embd_w, size_t &mem_per_token); bool gptj_model_load(const std::string &fname, gptj_model &model, gpt_vocab &vocab); bool gptj_eval(const gptj_model &model, const int n_threads, const int n_past, const std::vector &embd_inp, std::vector &embd_w, size_t &mem_per_token); diff --git a/otherarch/utils.cpp b/otherarch/utils.cpp index 7c2bed00f..9afdaaf86 100644 --- a/otherarch/utils.cpp +++ b/otherarch/utils.cpp @@ -3,7 +3,7 @@ #include #include -bool gpt_params_parse(int argc, char ** argv, gpt_params & params) { +bool utils_gpt_params_parse(int argc, char ** argv, gpt_params & params) { for (int i = 1; i < argc; i++) { std::string arg = argv[i]; @@ -26,11 +26,11 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) { } else if (arg == "-m" || arg == "--model") { params.model = argv[++i]; } else if (arg == "-h" || arg == "--help") { - gpt_print_usage(argc, argv, params); + utils_gpt_print_usage(argc, argv, params); exit(0); } else { fprintf(stderr, "error: unknown argument: %s\n", arg.c_str()); - gpt_print_usage(argc, argv, params); + utils_gpt_print_usage(argc, argv, params); exit(0); } } @@ -38,7 +38,7 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) { return true; } -void gpt_print_usage(int argc, char ** argv, const gpt_params & params) { +void utils_gpt_print_usage(int argc, char ** argv, const gpt_params & params) { fprintf(stderr, "usage: %s [options]\n", argv[0]); fprintf(stderr, "\n"); fprintf(stderr, "options:\n"); @@ -57,7 +57,7 @@ void gpt_print_usage(int argc, char ** argv, const gpt_params & params) { fprintf(stderr, "\n"); } -std::string gpt_random_prompt(std::mt19937 & rng) { +std::string utils_gpt_random_prompt(std::mt19937 & rng) { const int r = rng() % 10; switch (r) { case 0: return "So"; diff --git a/otherarch/utils.h b/otherarch/utils.h index 92bc29027..9248868bf 100644 --- a/otherarch/utils.h +++ b/otherarch/utils.h @@ -60,4 +60,8 @@ gpt_vocab::id gpt_sample_top_k_top_p( int top_k, double top_p, double temp, - std::mt19937 & rng); \ No newline at end of file + std::mt19937 & rng); + +bool utils_gpt_params_parse(int argc, char ** argv, gpt_params & params); +void utils_gpt_print_usage(int argc, char ** argv, const gpt_params & params); +std::string utils_gpt_random_prompt(std::mt19937 & rng);