diff --git a/third_party/radpajama/README.cosmo b/third_party/radpajama/README.cosmo index 8afbd578e..57d66694b 100644 --- a/third_party/radpajama/README.cosmo +++ b/third_party/radpajama/README.cosmo @@ -1,6 +1,6 @@ DESCRIPTION - ggml is a machine learning library useful for LLM inference on CPUs + radpajama is a port of ggml for the open source Red Pajama LLM. It started as a fork of redpajama.cpp from Together Computer. LICENSE @@ -8,22 +8,12 @@ LICENSE ORIGIN - https://github.com/ggerganov/llama.cpp - commit 0b2da20538d01926b77ea237dd1c930c4d20b686 - Author: Stephan Walter - Date: Wed Apr 26 20:26:42 2023 +0000 - ggml : slightly faster AVX2 implementation for Q5 (#1197) + github.com/togethercomputer/redpajama.cpp/ + commit bfa6466199b8ef92185ecb72e2a550e12baf6602 + Author: Szhangce + Date: Tue May 9 00:50:22 2023 +0200 + radpajama : Update README.md LOCAL CHANGES - - Make it possible for loaded prompts to be cached to disk - - Introduce -v and --verbose flags - - Reduce batch size from 512 to 32 - - Allow --n_keep to specify a substring of prompt - - Don't print stats / diagnostics unless -v is passed - - Reduce --top_p default from 0.95 to 0.70 - - Change --reverse-prompt to no longer imply --interactive - - Permit --reverse-prompt specifying custom EOS if non-interactive - - Refactor headers per cosmo convention - - Replace code like 'ggjt' with READ32BE("ggjt") - - Remove C++ exceptions; use Die() function instead + - Updated headers for COSMO build. diff --git a/third_party/radpajama/README.md b/third_party/radpajama/README.md new file mode 100644 index 000000000..68054411f --- /dev/null +++ b/third_party/radpajama/README.md @@ -0,0 +1,143 @@ +# gglm Support for RedPajama Model + +## Ackonwledgement + +We highly appreciate the great effort from the fork of [gptneox.cpp](https://github.com/byroneverson/gptneox.cpp). Our support of the RedPajama Model is mainly based on this implementation. We extend the model configure and fixed a bug when setting use_parallel_residual flag to False in their original implementation. We also extend the chat model for RedPajama. + +## Usage: + +### RedPajama Chat model: + +- Make the code: + + make redpajama-chat quantize-gptneox + + +- Prepare the RedPajama model (f16 and q4_0) for gglm: + + bash ./examples/redpajama/scripts/install-RedPajama-INCITE-Chat-3B-v1.sh + +- Run RedPajama chat model (fp16): + + ./redpajama-chat -m ./examples/redpajama/models/pythia/ggml-RedPajama-INCITE-Chat-3B-v1-f16.bin \ + -c 2048 \ + -b 128 \ + -n 1 \ + -t 8 \ + --instruct \ + --color \ + --top_k 30 \ + --top_p 0.95 \ + --temp 0.8 \ + --repeat_last_n 3 \ + --repeat_penalty 1.1 \ + --seed 0 + + Note that you may need to install torch and transformers to run the above scripts, e.g.: + + pip install torch==2.0.0 + pip install transformers==4.28.1 + + +- Run RedPajama chat model (q4_0): + + ./redpajama-chat -m ./examples/redpajama/models/pythia/ggml-RedPajama-INCITE-Chat-3B-v1-q4_0.bin \ + -c 2048 \ + -b 128 \ + -n 1 \ + -t 8 \ + --instruct \ + --color \ + --top_k 30 \ + --top_p 0.95 \ + --temp 0.8 \ + --repeat_last_n 3 \ + --repeat_penalty 1.1 \ + --seed 0 + +- Run other quantized version of RedPajama Chat model (Make sure you get the f16 model prepared before you run this): + + - Make the code to quantize the model if you have not: + + make quantize-gptneox + + - Generate the quantized model, the supported types include: q4_0, q4_1, q4_2, q5_0, q5_1, and q8_0. For example, to run q4_1, you need to do the following convertion: + + python ./examples/redpajama/scripts/quantize-gptneox.py ./examples/redpajama/models/pythia/ggml-RedPajama-INCITE-Chat-3B-v1-f16.bin --quantize-output-type q4_1 + + - Then you can chat with the quantized model: + + ./redpajama-chat -m ./examples/redpajama/models/pythia/ggml-RedPajama-INCITE-Chat-3B-v1-q4_1.bin \ + -c 2048 \ + -b 128 \ + -n 1 \ + -t 8 \ + --instruct \ + --color \ + --top_k 30 \ + --top_p 0.95 \ + --temp 0.8 \ + --repeat_last_n 3 \ + --repeat_penalty 1.1 \ + --seed 0 + + + + +### RedPajama Base/Instruct model: + +- Make the code: + + make redpajama quantize-gptneox + + +- Prepare the RedPajama Base/Instruct model (f16 and q4_0) for gglm: + + bash ./examples/redpajama/scripts/install-RedPajama-INCITE-Base-3B-v1.sh + + # Or + + bash ./examples/redpajama/scripts/install-RedPajama-INCITE-Instruct-3B-v1.sh + +- Run other quantize version of RedPajama Base/Instruct model (Make sure you get the f16 model prepared before you run this). Then you can generate the quantized model, the supported types include: q4_0, q4_1, q4_2, q5_0, q5_1, and q8_0. For example, to run q4_1, you need to do the following convertion, e.g for RedPajama-Base q8_0: + + python ./examples/redpajama/scripts/quantize-gptneox.py ./examples/redpajama/models/pythia/ggml-RedPajama-INCITE-Base-3B-v1-f16.bin --quantize-output-type q8_0 + +- Run RedPajama Base/Instruct model (e.g., RedPajama-Instruct q8_0) : + + ./redpajama -m ./examples/redpajama/models/pythia/ggml-RedPajama-INCITE-Instruct-3B-v1-q8_0.bin \ + -c 2048 \ + -b 128 \ + -n 1 \ + -t 8 \ + --color \ + --top_k 30 \ + --top_p 0.95 \ + --temp 0.8 \ + --repeat_last_n 3 \ + --repeat_penalty 1.1 \ + --seed 0 \ + --n_predict 256 \ + --verbose-prompt \ + -p "How to schedule a tour to Anfield:" + + +## Attribution + +The following files are covered by a MIT license and were taken from: + +https://github.com/byroneverson/gptneox.cpp + +Thank you Byron. + +``` +common-gptneox.cpp +copy-gptneox.cpp +gptneox.cpp +quantize-gptneox.cpp +common-gptneox.h +gptneox-util.h +gptneox.h +convert_gptneox_to_ggml.py +quantize-gptneox.py +``` \ No newline at end of file diff --git a/third_party/radpajama/common-gptneox.cpp b/third_party/radpajama/common-gptneox.cpp new file mode 100644 index 000000000..a522f0837 --- /dev/null +++ b/third_party/radpajama/common-gptneox.cpp @@ -0,0 +1,429 @@ +#include "common-gptneox.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined (_WIN32) +#include +#include +#pragma comment(lib,"kernel32.lib") +extern "C" __declspec(dllimport) void* __stdcall GetStdHandle(unsigned long nStdHandle); +extern "C" __declspec(dllimport) int __stdcall GetConsoleMode(void* hConsoleHandle, unsigned long* lpMode); +extern "C" __declspec(dllimport) int __stdcall SetConsoleMode(void* hConsoleHandle, unsigned long dwMode); +extern "C" __declspec(dllimport) int __stdcall SetConsoleCP(unsigned int wCodePageID); +extern "C" __declspec(dllimport) int __stdcall SetConsoleOutputCP(unsigned int wCodePageID); +extern "C" __declspec(dllimport) int __stdcall WideCharToMultiByte(unsigned int CodePage, unsigned long dwFlags, + const wchar_t * lpWideCharStr, int cchWideChar, + char * lpMultiByteStr, int cbMultiByte, + const char * lpDefaultChar, bool * lpUsedDefaultChar); +#define CP_UTF8 65001 +#endif + +bool gpt_params_parse(int argc, char ** argv, gpt_params & params) { + // determine sensible default number of threads. + // std::thread::hardware_concurrency may not be equal to the number of cores, or may return 0. +#ifdef __linux__ + std::ifstream cpuinfo("/proc/cpuinfo"); + params.n_threads = std::count(std::istream_iterator(cpuinfo), + std::istream_iterator(), + std::string("processor")); +#endif + if (params.n_threads == 0) { + params.n_threads = std::max(1, (int32_t) std::thread::hardware_concurrency()); + } + + bool invalid_param = false; + std::string arg; + gpt_params default_params; + + for (int i = 1; i < argc; i++) { + arg = argv[i]; + + if (arg == "-s" || arg == "--seed") { + if (++i >= argc) { + invalid_param = true; + break; + } + params.seed = std::stoi(argv[i]); + } else if (arg == "-t" || arg == "--threads") { + if (++i >= argc) { + invalid_param = true; + break; + } + params.n_threads = std::stoi(argv[i]); + } else if (arg == "-p" || arg == "--prompt") { + if (++i >= argc) { + invalid_param = true; + break; + } + params.prompt = argv[i]; + } else if (arg == "--session") { + if (++i >= argc) { + invalid_param = true; + break; + } + params.path_session = argv[i]; + } else if (arg == "-f" || arg == "--file") { + if (++i >= argc) { + invalid_param = true; + break; + } + std::ifstream file(argv[i]); + if (!file) { + fprintf(stderr, "error: failed to open file '%s'\n", argv[i]); + invalid_param = true; + break; + } + std::copy(std::istreambuf_iterator(file), std::istreambuf_iterator(), back_inserter(params.prompt)); + if (params.prompt.back() == '\n') { + params.prompt.pop_back(); + } + } else if (arg == "-n" || arg == "--n_predict") { + if (++i >= argc) { + invalid_param = true; + break; + } + params.n_predict = std::stoi(argv[i]); + } else if (arg == "--top_k") { + if (++i >= argc) { + invalid_param = true; + break; + } + params.top_k = std::stoi(argv[i]); + } else if (arg == "-c" || arg == "--ctx_size") { + if (++i >= argc) { + invalid_param = true; + break; + } + params.n_ctx = std::stoi(argv[i]); + } else if (arg == "--memory_f32") { + params.memory_f16 = false; + } else if (arg == "--top_p") { + if (++i >= argc) { + invalid_param = true; + break; + } + params.top_p = std::stof(argv[i]); + } else if (arg == "--temp") { + if (++i >= argc) { + invalid_param = true; + break; + } + params.temp = std::stof(argv[i]); + } else if (arg == "--tfs") { + if (++i >= argc) { + invalid_param = true; + break; + } + params.tfs_z = std::stof(argv[i]); + } else if (arg == "--typical") { + if (++i >= argc) { + invalid_param = true; + break; + } + params.typical_p = std::stof(argv[i]); + } else if (arg == "--repeat_last_n") { + if (++i >= argc) { + invalid_param = true; + break; + } + params.repeat_last_n = std::stoi(argv[i]); + } else if (arg == "--repeat_penalty") { + if (++i >= argc) { + invalid_param = true; + break; + } + params.repeat_penalty = std::stof(argv[i]); + } else if (arg == "--frequency_penalty") { + if (++i >= argc) { + invalid_param = true; + break; + } + params.frequency_penalty = std::stof(argv[i]); + } else if (arg == "--presence_penalty") { + if (++i >= argc) { + invalid_param = true; + break; + } + params.presence_penalty = std::stof(argv[i]); + } else if (arg == "--mirostat") { + if (++i >= argc) { + invalid_param = true; + break; + } + params.mirostat = std::stoi(argv[i]); + } else if (arg == "--mirostat_lr") { + if (++i >= argc) { + invalid_param = true; + break; + } + params.mirostat_eta = std::stof(argv[i]); + } else if (arg == "--mirostat_ent") { + if (++i >= argc) { + invalid_param = true; + break; + } + params.mirostat_tau = std::stof(argv[i]); + } else if (arg == "-b" || arg == "--batch_size") { + if (++i >= argc) { + invalid_param = true; + break; + } + params.n_batch = std::stoi(argv[i]); + params.n_batch = std::min(512, params.n_batch); + } else if (arg == "--keep") { + if (++i >= argc) { + invalid_param = true; + break; + } + params.n_keep = std::stoi(argv[i]); + } else if (arg == "-m" || arg == "--model") { + if (++i >= argc) { + invalid_param = true; + break; + } + params.model = argv[i]; + } else if (arg == "--lora") { + if (++i >= argc) { + invalid_param = true; + break; + } + params.lora_adapter = argv[i]; + params.use_mmap = false; + } else if (arg == "--lora-base") { + if (++i >= argc) { + invalid_param = true; + break; + } + params.lora_base = argv[i]; + } else if (arg == "-i" || arg == "--interactive") { + params.interactive = true; + } else if (arg == "--embedding") { + params.embedding = true; + } else if (arg == "--interactive-first") { + params.interactive_first = true; + } else if (arg == "-ins" || arg == "--instruct") { + params.instruct = true; + } else if (arg == "--color") { + params.use_color = true; + } else if (arg == "--mlock") { + params.use_mlock = true; + } else if (arg == "--no-mmap") { + params.use_mmap = false; + } else if (arg == "--mtest") { + params.mem_test = true; + } else if (arg == "--verbose-prompt") { + params.verbose_prompt = true; + } else if (arg == "-r" || arg == "--reverse-prompt") { + if (++i >= argc) { + invalid_param = true; + break; + } + params.antiprompt.push_back(argv[i]); + } else if (arg == "--perplexity") { + params.perplexity = true; + } else if (arg == "--ignore-eos") { + params.logit_bias[gptneox_token_eos()] = -INFINITY; + } else if (arg == "--no-penalize-nl") { + params.penalize_nl = false; + } else if (arg == "-l" || arg == "--logit-bias") { + if (++i >= argc) { + invalid_param = true; + break; + } + std::stringstream ss(argv[i]); + gptneox_token key; + char sign; + std::string value_str; + try { + if (ss >> key && ss >> sign && std::getline(ss, value_str) && (sign == '+' || sign == '-')) { + params.logit_bias[key] = std::stof(value_str) * ((sign == '-') ? -1.0f : 1.0f); + } else { + throw std::exception(); + } + } catch (const std::exception &e) { + invalid_param = true; + break; + } + } else if (arg == "--n_parts") { + if (++i >= argc) { + invalid_param = true; + break; + } + params.n_parts = std::stoi(argv[i]); + } else if (arg == "-h" || arg == "--help") { + gpt_print_usage(argc, argv, default_params); + exit(0); + } else if (arg == "--random-prompt") { + params.random_prompt = true; + } else if (arg == "--in-prefix") { + if (++i >= argc) { + invalid_param = true; + break; + } + params.input_prefix = argv[i]; + } else { + fprintf(stderr, "error: unknown argument: %s\n", arg.c_str()); + gpt_print_usage(argc, argv, default_params); + exit(1); + } + } + if (invalid_param) { + fprintf(stderr, "error: invalid parameter for argument: %s\n", arg.c_str()); + gpt_print_usage(argc, argv, default_params); + exit(1); + } + + return true; +} + +void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { + fprintf(stderr, "usage: %s [options]\n", argv[0]); + fprintf(stderr, "\n"); + fprintf(stderr, "options:\n"); + fprintf(stderr, " -h, --help show this help message and exit\n"); + fprintf(stderr, " -i, --interactive run in interactive mode\n"); + fprintf(stderr, " --interactive-first run in interactive mode and wait for input right away\n"); + fprintf(stderr, " -ins, --instruct run in instruction mode\n"); + fprintf(stderr, " -r PROMPT, --reverse-prompt PROMPT\n"); + fprintf(stderr, " run in interactive mode and poll user input upon seeing PROMPT (can be\n"); + fprintf(stderr, " specified more than once for multiple prompts).\n"); + fprintf(stderr, " --color colorise output to distinguish prompt and user input from generations\n"); + fprintf(stderr, " -s SEED, --seed SEED RNG seed (default: -1, use random seed for <= 0)\n"); + fprintf(stderr, " -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads); + fprintf(stderr, " -p PROMPT, --prompt PROMPT\n"); + fprintf(stderr, " prompt to start generation with (default: empty)\n"); + fprintf(stderr, " --session FNAME file to cache model state in (may be large!) (default: none)\n"); + fprintf(stderr, " --random-prompt start with a randomized prompt.\n"); + fprintf(stderr, " --in-prefix STRING string to prefix user inputs with (default: empty)\n"); + fprintf(stderr, " -f FNAME, --file FNAME\n"); + fprintf(stderr, " prompt file to start generation.\n"); + fprintf(stderr, " -n N, --n_predict N number of tokens to predict (default: %d, -1 = infinity)\n", params.n_predict); + fprintf(stderr, " --top_k N top-k sampling (default: %d, 0 = disabled)\n", params.top_k); + fprintf(stderr, " --top_p N top-p sampling (default: %.1f, 1.0 = disabled)\n", (double)params.top_p); + fprintf(stderr, " --tfs N tail free sampling, parameter z (default: %.1f, 1.0 = disabled)\n", (double)params.tfs_z); + fprintf(stderr, " --typical N locally typical sampling, parameter p (default: %.1f, 1.0 = disabled)\n", (double)params.typical_p); + fprintf(stderr, " --repeat_last_n N last n tokens to consider for penalize (default: %d, 0 = disabled, -1 = ctx_size)\n", params.repeat_last_n); + fprintf(stderr, " --repeat_penalty N penalize repeat sequence of tokens (default: %.1f, 1.0 = disabled)\n", (double)params.repeat_penalty); + fprintf(stderr, " --presence_penalty N repeat alpha presence penalty (default: %.1f, 0.0 = disabled)\n", (double)params.presence_penalty); + fprintf(stderr, " --frequency_penalty N repeat alpha frequency penalty (default: %.1f, 0.0 = disabled)\n", (double)params.frequency_penalty); + fprintf(stderr, " --mirostat N use Mirostat sampling.\n"); + fprintf(stderr, " Top K, Nucleus, Tail Free and Locally Typical samplers are ignored if used.\n"); + fprintf(stderr, " (default: %d, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)\n", params.mirostat); + fprintf(stderr, " --mirostat_lr N Mirostat learning rate, parameter eta (default: %.1f)\n", (double)params.mirostat_eta); + fprintf(stderr, " --mirostat_ent N Mirostat target entropy, parameter tau (default: %.1f)\n", (double)params.mirostat_tau); + fprintf(stderr, " -l TOKEN_ID(+/-)BIAS, --logit-bias TOKEN_ID(+/-)BIAS\n"); + fprintf(stderr, " modifies the likelihood of token appearing in the completion,\n"); + fprintf(stderr, " i.e. `--logit-bias 15043+1` to increase likelihood of token ' Hello',\n"); + fprintf(stderr, " or `--logit-bias 15043-1` to decrease likelihood of token ' Hello'\n"); + fprintf(stderr, " -c N, --ctx_size N size of the prompt context (default: %d)\n", params.n_ctx); + fprintf(stderr, " --ignore-eos ignore end of stream token and continue generating (implies --logit-bias 2-inf)\n"); + fprintf(stderr, " --no-penalize-nl do not penalize newline token\n"); + fprintf(stderr, " --memory_f32 use f32 instead of f16 for memory key+value\n"); + fprintf(stderr, " --temp N temperature (default: %.1f)\n", (double)params.temp); + fprintf(stderr, " --n_parts N number of model parts (default: -1 = determine from dimensions)\n"); + fprintf(stderr, " -b N, --batch_size N batch size for prompt processing (default: %d)\n", params.n_batch); + fprintf(stderr, " --perplexity compute perplexity over the prompt\n"); + fprintf(stderr, " --keep number of tokens to keep from the initial prompt (default: %d, -1 = all)\n", params.n_keep); + if (gptneox_mlock_supported()) { + fprintf(stderr, " --mlock force system to keep model in RAM rather than swapping or compressing\n"); + } + if (gptneox_mmap_supported()) { + fprintf(stderr, " --no-mmap do not memory-map model (slower load but may reduce pageouts if not using mlock)\n"); + } + fprintf(stderr, " --mtest compute maximum memory usage\n"); + fprintf(stderr, " --verbose-prompt print prompt before generation\n"); + fprintf(stderr, " --lora FNAME apply LoRA adapter (implies --no-mmap)\n"); + fprintf(stderr, " --lora-base FNAME optional model to use as a base for the layers modified by the LoRA adapter\n"); + fprintf(stderr, " -m FNAME, --model FNAME\n"); + fprintf(stderr, " model path (default: %s)\n", params.model.c_str()); + fprintf(stderr, "\n"); +} + +std::string gpt_random_prompt(std::mt19937 & rng) { + const int r = rng() % 10; + switch (r) { + case 0: return "So"; + case 1: return "Once upon a time"; + case 2: return "When"; + case 3: return "The"; + case 4: return "After"; + case 5: return "If"; + case 6: return "import"; + case 7: return "He"; + case 8: return "She"; + case 9: return "They"; + default: return "To"; + } + + return "The"; +} + +// TODO: not great allocating this every time +std::vector gptneox_tokenize(struct gptneox_context * ctx, const std::string & text, bool add_bos) { + // initialize to prompt numer of chars, since n_tokens <= n_prompt_chars + std::vector res(text.size() + (int)add_bos); + int n = gptneox_tokenize(ctx, text.c_str(), res.data(), res.size(), add_bos); + assert(n >= 0); + res.resize(n); + + return res; +} + +/* Keep track of current color of output, and emit ANSI code if it changes. */ +void set_console_color(console_state & con_st, console_color_t color) { + if (con_st.use_color && con_st.color != color) { + switch(color) { + case CONSOLE_COLOR_DEFAULT: + printf(ANSI_COLOR_RESET); + break; + case CONSOLE_COLOR_PROMPT: + printf(ANSI_COLOR_YELLOW); + break; + case CONSOLE_COLOR_USER_INPUT: + printf(ANSI_BOLD ANSI_COLOR_GREEN); + break; + } + con_st.color = color; + } +} + +#if defined (_WIN32) +void win32_console_init(bool enable_color) { + unsigned long dwMode = 0; + void* hConOut = GetStdHandle((unsigned long)-11); // STD_OUTPUT_HANDLE (-11) + if (!hConOut || hConOut == (void*)-1 || !GetConsoleMode(hConOut, &dwMode)) { + hConOut = GetStdHandle((unsigned long)-12); // STD_ERROR_HANDLE (-12) + if (hConOut && (hConOut == (void*)-1 || !GetConsoleMode(hConOut, &dwMode))) { + hConOut = 0; + } + } + if (hConOut) { + // Enable ANSI colors on Windows 10+ + if (enable_color && !(dwMode & 0x4)) { + SetConsoleMode(hConOut, dwMode | 0x4); // ENABLE_VIRTUAL_TERMINAL_PROCESSING (0x4) + } + // Set console output codepage to UTF8 + SetConsoleOutputCP(CP_UTF8); + } + void* hConIn = GetStdHandle((unsigned long)-10); // STD_INPUT_HANDLE (-10) + if (hConIn && hConIn != (void*)-1 && GetConsoleMode(hConIn, &dwMode)) { + // Set console input codepage to UTF16 + _setmode(_fileno(stdin), _O_WTEXT); + } +} + +// Convert a wide Unicode string to an UTF8 string +void win32_utf8_encode(const std::wstring & wstr, std::string & str) { + int size_needed = WideCharToMultiByte(CP_UTF8, 0, &wstr[0], (int)wstr.size(), NULL, 0, NULL, NULL); + std::string strTo(size_needed, 0); + WideCharToMultiByte(CP_UTF8, 0, &wstr[0], (int)wstr.size(), &strTo[0], size_needed, NULL, NULL); + str = strTo; +} +#endif diff --git a/third_party/radpajama/common-gptneox.h b/third_party/radpajama/common-gptneox.h new file mode 100644 index 000000000..81c25494e --- /dev/null +++ b/third_party/radpajama/common-gptneox.h @@ -0,0 +1,108 @@ +// Various helper functions and utilities + +#pragma once + +#include "gptneox.h" + +#include +#include +#include +#include +#include + +// +// CLI argument parsing +// + +struct gpt_params { + int32_t seed = -1; // RNG seed + int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency()); + int32_t n_predict = 128; // new tokens to predict + int32_t n_parts = -1; // amount of model parts (-1 = determine from model dimensions) + int32_t n_ctx = 512; // context size + int32_t n_batch = 512; // batch size for prompt processing (must be >=32 to use BLAS) + int32_t n_keep = 0; // number of tokens to keep from initial prompt + + // sampling parameters + std::unordered_map logit_bias; // logit bias for specific tokens + int32_t top_k = 40; // <= 0 to use vocab size + float top_p = 0.95f; // 1.0 = disabled + float tfs_z = 1.00f; // 1.0 = disabled + float typical_p = 1.00f; // 1.0 = disabled + float temp = 0.80f; // 1.0 = disabled + float repeat_penalty = 1.10f; // 1.0 = disabled + int32_t repeat_last_n = 64; // last n tokens to penalize (0 = disable penalty, -1 = context size) + float frequency_penalty = 0.00f; // 0.0 = disabled + float presence_penalty = 0.00f; // 0.0 = disabled + int mirostat = 0; // 0 = disabled, 1 = mirostat, 2 = mirostat 2.0 + float mirostat_tau = 5.00f; // target entropy + float mirostat_eta = 0.10f; // learning rate + + std::string model = "./examples/redpajama/models/pythia/ggml-RedPajama-INCITE-Chat/Instruct-3B-v1-f16.bin"; // model path + std::string prompt = ""; + std::string path_session = ""; // path to file for saving/loading model eval state + std::string input_prefix = ""; // string to prefix user inputs with + std::vector antiprompt; // string upon seeing which more user input is prompted + + std::string lora_adapter = ""; // lora adapter path + std::string lora_base = ""; // base model path for the lora adapter + + bool memory_f16 = true; // use f16 instead of f32 for memory kv + bool random_prompt = false; // do not randomize prompt if none provided + bool use_color = false; // use color to distinguish generations and inputs + bool interactive = false; // interactive mode + + bool embedding = false; // get only sentence embedding + bool interactive_first = false; // wait for user input immediately + + bool instruct = false; // instruction mode + bool penalize_nl = true; // consider newlines as a repeatable token + bool perplexity = false; // compute perplexity over the prompt + bool use_mmap = true; // use mmap for faster loads + bool use_mlock = false; // use mlock to keep model in memory + bool mem_test = false; // compute maximum memory usage + bool verbose_prompt = false; // print prompt tokens before generation +}; + +bool gpt_params_parse(int argc, char ** argv, gpt_params & params); + +void gpt_print_usage(int argc, char ** argv, const gpt_params & params); + +std::string gpt_random_prompt(std::mt19937 & rng); + +// +// Vocab utils +// + +std::vector gptneox_tokenize(struct gptneox_context * ctx, const std::string & text, bool add_bos); + +// +// Console utils +// + +#define ANSI_COLOR_RED "\x1b[31m" +#define ANSI_COLOR_GREEN "\x1b[32m" +#define ANSI_COLOR_YELLOW "\x1b[33m" +#define ANSI_COLOR_BLUE "\x1b[34m" +#define ANSI_COLOR_MAGENTA "\x1b[35m" +#define ANSI_COLOR_CYAN "\x1b[36m" +#define ANSI_COLOR_RESET "\x1b[0m" +#define ANSI_BOLD "\x1b[1m" + +enum console_color_t { + CONSOLE_COLOR_DEFAULT=0, + CONSOLE_COLOR_PROMPT, + CONSOLE_COLOR_USER_INPUT +}; + +struct console_state { + bool use_color = false; + console_color_t color = CONSOLE_COLOR_DEFAULT; +}; + +void set_console_color(console_state & con_st, console_color_t color); + +#if defined (_WIN32) +void win32_console_init(bool enable_color); +void win32_utf8_encode(const std::wstring & wstr, std::string & str); +#endif diff --git a/third_party/radpajama/copy-gptneox.cpp b/third_party/radpajama/copy-gptneox.cpp new file mode 100644 index 000000000..160000e19 --- /dev/null +++ b/third_party/radpajama/copy-gptneox.cpp @@ -0,0 +1,57 @@ +#include "ggml.h" +#include "gptneox.h" + +#include +#include +#include + +static const std::map GPTNEOX_FTYPE_MAP = { + {"q4_0", GPTNEOX_FTYPE_MOSTLY_Q4_0}, + {"q4_1", GPTNEOX_FTYPE_MOSTLY_Q4_1}, + {"q4_2", GPTNEOX_FTYPE_MOSTLY_Q4_2}, + //{"q4_3", GPTNEOX_FTYPE_MOSTLY_Q4_3}, + {"q5_0", GPTNEOX_FTYPE_MOSTLY_Q5_0}, + {"q5_1", GPTNEOX_FTYPE_MOSTLY_Q5_1}, + {"q8_0", GPTNEOX_FTYPE_MOSTLY_Q8_0}, +}; + +// usage: +// ./quantize models/llama/ggml-model.bin models/llama/ggml-model-quant.bin type +// +int main(int argc, char ** argv) { + ggml_time_init(); + + if (argc < 4) { + fprintf(stderr, "usage: %s model-f32.bin model-quant.bin ftype\n", argv[0]); + for (auto it = GPTNEOX_FTYPE_MAP.begin(); it != GPTNEOX_FTYPE_MAP.end(); it++) { + fprintf(stderr, " type = \"%s\" or %d\n", it->first.c_str(), it->second); + } + return 1; + } + + // needed to initialize f16 tables + { + struct ggml_init_params params = { 0, NULL, false }; + struct ggml_context * ctx = ggml_init(params); + ggml_free(ctx); + } + + const std::string fname_inp = argv[1]; + const std::string fname_out = argv[2]; + + enum gptneox_ftype ftype; + if (argv[3][0] == 'q') { + auto it = GPTNEOX_FTYPE_MAP.find(argv[3]); + if (it == GPTNEOX_FTYPE_MAP.end()) { + fprintf(stderr, "%s: unknown ftype '%s'\n", __func__, argv[3]); + return 1; + } + ftype = it->second; + } else { + ftype = (enum gptneox_ftype)atoi(argv[3]); + } + + gptneox_model_copy(fname_inp.c_str(), fname_out.c_str(), ftype); + + return 0; +} diff --git a/third_party/radpajama/gptneox-util.h b/third_party/radpajama/gptneox-util.h new file mode 100644 index 000000000..52d7bf80a --- /dev/null +++ b/third_party/radpajama/gptneox-util.h @@ -0,0 +1,433 @@ +// Internal header to be included only by llama.cpp. +// Contains wrappers around OS interfaces. + +#ifndef GPTNEOX_UTIL_H +#define GPTNEOX_UTIL_H + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#ifdef __has_include + #if __has_include() + #include + #if defined(_POSIX_MAPPED_FILES) + #include + #endif + #if defined(_POSIX_MEMLOCK_RANGE) + #include + #endif + #endif +#endif + +#if defined(_WIN32) + #define WIN32_LEAN_AND_MEAN + #ifndef NOMINMAX + #define NOMINMAX + #endif + #include + #include + #include // for _fseeki64 +#endif + +#define GPTNEOX_ASSERT(x) \ + do { \ + if (!(x)) { \ + fprintf(stderr, "GPTNEOX_ASSERT: %s:%d: %s\n", __FILE__, __LINE__, #x); \ + abort(); \ + } \ + } while (0) + +#ifdef __GNUC__ +#ifdef __MINGW32__ +__attribute__((format(gnu_printf, 1, 2))) +#else +__attribute__((format(printf, 1, 2))) +#endif +#endif +static std::string format(const char * fmt, ...) { + va_list ap, ap2; + va_start(ap, fmt); + va_copy(ap2, ap); + int size = vsnprintf(NULL, 0, fmt, ap); + GPTNEOX_ASSERT(size >= 0 && size < INT_MAX); + std::vector buf(size + 1); + int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2); + GPTNEOX_ASSERT(size2 == size); + va_end(ap2); + va_end(ap); + return std::string(buf.data(), size); +} + +struct gptneox_file { + // use FILE * so we don't have to re-open the file to mmap + FILE * fp; + size_t size; + + gptneox_file(const char * fname, const char * mode) { + fp = std::fopen(fname, mode); + if (fp == NULL) { + throw format("failed to open %s: %s", fname, std::strerror(errno)); + } + seek(0, SEEK_END); + size = tell(); + seek(0, SEEK_SET); + } + + size_t tell() const { +#ifdef _WIN32 + __int64 ret = _ftelli64(fp); +#else + long ret = std::ftell(fp); +#endif + GPTNEOX_ASSERT(ret != -1); // this really shouldn't fail + return (size_t) ret; + } + + void seek(size_t offset, int whence) { +#ifdef _WIN32 + int ret = _fseeki64(fp, (__int64) offset, whence); +#else + int ret = std::fseek(fp, (long) offset, whence); +#endif + GPTNEOX_ASSERT(ret == 0); // same + } + + void read_raw(void * ptr, size_t size) { + if (size == 0) { + return; + } + errno = 0; + std::size_t ret = std::fread(ptr, size, 1, fp); + if (ferror(fp)) { + throw format("read error: %s", strerror(errno)); + } + if (ret != 1) { + throw std::string("unexpectedly reached end of file"); + } + } + + std::uint32_t read_u32() { + std::uint32_t ret; + read_raw(&ret, sizeof(ret)); + return ret; + } + + std::string read_string(std::uint32_t len) { + std::vector chars(len); + read_raw(chars.data(), len); + return std::string(chars.data(), len); + } + + void write_raw(const void * ptr, size_t size) { + if (size == 0) { + return; + } + errno = 0; + size_t ret = std::fwrite(ptr, size, 1, fp); + if (ret != 1) { + throw format("write error: %s", strerror(errno)); + } + } + + void write_u32(std::uint32_t val) { + write_raw(&val, sizeof(val)); + } + + ~gptneox_file() { + if (fp) { + std::fclose(fp); + } + } +}; + +#if defined(_WIN32) +static std::string gptneox_format_win_err(DWORD err) { + LPSTR buf; + size_t size = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, + NULL, err, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)&buf, 0, NULL); + if (!size) { + return "FormatMessageA failed"; + } + std::string ret(buf, size); + LocalFree(buf); + return ret; +} +#endif + +struct gptneox_mmap { + void * addr; + size_t size; + + gptneox_mmap(const gptneox_mmap &) = delete; + +#ifdef _POSIX_MAPPED_FILES + static constexpr bool SUPPORTED = true; + + gptneox_mmap(struct gptneox_file * file, bool prefetch = true) { + size = file->size; + int fd = fileno(file->fp); + int flags = MAP_SHARED; +#ifdef __linux__ + flags |= MAP_POPULATE; +#endif + addr = mmap(NULL, file->size, PROT_READ, flags, fd, 0); + if (addr == MAP_FAILED) { + throw format("mmap failed: %s", strerror(errno)); + } + + if (prefetch) { + // Advise the kernel to preload the mapped memory + if (madvise(addr, file->size, MADV_WILLNEED)) { + fprintf(stderr, "warning: madvise(.., MADV_WILLNEED) failed: %s\n", + strerror(errno)); + } + } + } + + ~gptneox_mmap() { + munmap(addr, size); + } +#elif defined(_WIN32) + static constexpr bool SUPPORTED = true; + + gptneox_mmap(struct gptneox_file * file, bool prefetch = true) { + size = file->size; + + HANDLE hFile = (HANDLE) _get_osfhandle(_fileno(file->fp)); + + HANDLE hMapping = CreateFileMappingA(hFile, NULL, PAGE_READONLY, 0, 0, NULL); + DWORD error = GetLastError(); + + if (hMapping == NULL) { + throw format("CreateFileMappingA failed: %s", gptneox_format_win_err(error).c_str()); + } + + addr = MapViewOfFile(hMapping, FILE_MAP_READ, 0, 0, 0); + error = GetLastError(); + CloseHandle(hMapping); + + if (addr == NULL) { + throw format("MapViewOfFile failed: %s", gptneox_format_win_err(error).c_str()); + } + + #if _WIN32_WINNT >= _WIN32_WINNT_WIN8 + if (prefetch) { + // Advise the kernel to preload the mapped memory + WIN32_MEMORY_RANGE_ENTRY range; + range.VirtualAddress = addr; + range.NumberOfBytes = (SIZE_T)size; + if (!PrefetchVirtualMemory(GetCurrentProcess(), 1, &range, 0)) { + fprintf(stderr, "warning: PrefetchVirtualMemory failed: %s\n", + gptneox_format_win_err(GetLastError()).c_str()); + } + } + #else + #pragma message("warning: You are building for pre-Windows 8; prefetch not supported") + #endif // _WIN32_WINNT >= _WIN32_WINNT_WIN8 + } + + ~gptneox_mmap() { + if (!UnmapViewOfFile(addr)) { + fprintf(stderr, "warning: UnmapViewOfFile failed: %s\n", + gptneox_format_win_err(GetLastError()).c_str()); + } + } +#else + static constexpr bool SUPPORTED = false; + + gptneox_mmap(struct gptneox_file *) { + throw std::string("mmap not supported"); + } +#endif +}; + +// Represents some region of memory being locked using mlock or VirtualLock; +// will automatically unlock on destruction. +struct gptneox_mlock { + void * addr = NULL; + size_t size = 0; + bool failed_already = false; + + gptneox_mlock() {} + gptneox_mlock(const gptneox_mlock &) = delete; + + ~gptneox_mlock() { + if (size) { + raw_unlock(addr, size); + } + } + + void init(void * addr) { + GPTNEOX_ASSERT(this->addr == NULL && this->size == 0); + this->addr = addr; + } + + void grow_to(size_t target_size) { + GPTNEOX_ASSERT(addr); + if (failed_already) { + return; + } + size_t granularity = lock_granularity(); + target_size = (target_size + granularity - 1) & ~(granularity - 1); + if (target_size > size) { + if (raw_lock((uint8_t *) addr + size, target_size - size)) { + size = target_size; + } else { + failed_already = true; + } + } + } + +#ifdef _POSIX_MEMLOCK_RANGE + static constexpr bool SUPPORTED = true; + + size_t lock_granularity() { + return (size_t) sysconf(_SC_PAGESIZE); + } + + #ifdef __APPLE__ + #define MLOCK_SUGGESTION \ + "Try increasing the sysctl values 'vm.user_wire_limit' and 'vm.global_user_wire_limit' and/or " \ + "decreasing 'vm.global_no_user_wire_amount'. Also try increasing RLIMIT_MLOCK (ulimit -l).\n" + #else + #define MLOCK_SUGGESTION \ + "Try increasing RLIMIT_MLOCK ('ulimit -l' as root).\n" + #endif + + bool raw_lock(const void * addr, size_t size) { + if (!mlock(addr, size)) { + return true; + } else { + char* errmsg = std::strerror(errno); + bool suggest = (errno == ENOMEM); + + // Check if the resource limit is fine after all + struct rlimit lock_limit; + if (suggest && getrlimit(RLIMIT_MEMLOCK, &lock_limit)) + suggest = false; + if (suggest && (lock_limit.rlim_max > lock_limit.rlim_cur + size)) + suggest = false; + + fprintf(stderr, "warning: failed to mlock %zu-byte buffer (after previously locking %zu bytes): %s\n%s", + size, this->size, errmsg, suggest ? MLOCK_SUGGESTION : ""); + return false; + } + } + + #undef MLOCK_SUGGESTION + + void raw_unlock(void * addr, size_t size) { + if (munlock(addr, size)) { + fprintf(stderr, "warning: failed to munlock buffer: %s\n", std::strerror(errno)); + } + } +#elif defined(_WIN32) + static constexpr bool SUPPORTED = true; + + size_t lock_granularity() { + SYSTEM_INFO si; + GetSystemInfo(&si); + return (size_t) si.dwPageSize; + } + + bool raw_lock(void * addr, size_t size) { + for (int tries = 1; ; tries++) { + if (VirtualLock(addr, size)) { + return true; + } + if (tries == 2) { + fprintf(stderr, "warning: failed to VirtualLock %zu-byte buffer (after previously locking %zu bytes): %s\n", + size, this->size, gptneox_format_win_err(GetLastError()).c_str()); + return false; + } + + // It failed but this was only the first try; increase the working + // set size and try again. + SIZE_T min_ws_size, max_ws_size; + if (!GetProcessWorkingSetSize(GetCurrentProcess(), &min_ws_size, &max_ws_size)) { + fprintf(stderr, "warning: GetProcessWorkingSetSize failed: %s\n", + gptneox_format_win_err(GetLastError()).c_str()); + return false; + } + // Per MSDN: "The maximum number of pages that a process can lock + // is equal to the number of pages in its minimum working set minus + // a small overhead." + // Hopefully a megabyte is enough overhead: + size_t increment = size + 1048576; + // The minimum must be <= the maximum, so we need to increase both: + min_ws_size += increment; + max_ws_size += increment; + if (!SetProcessWorkingSetSize(GetCurrentProcess(), min_ws_size, max_ws_size)) { + fprintf(stderr, "warning: SetProcessWorkingSetSize failed: %s\n", + gptneox_format_win_err(GetLastError()).c_str()); + return false; + } + } + } + + void raw_unlock(void * addr, size_t size) { + if (!VirtualUnlock(addr, size)) { + fprintf(stderr, "warning: failed to VirtualUnlock buffer: %s\n", + gptneox_format_win_err(GetLastError()).c_str()); + } + } +#else + static constexpr bool SUPPORTED = false; + + void raw_lock(const void * addr, size_t size) { + fprintf(stderr, "warning: mlock not supported on this system\n"); + } + + void raw_unlock(const void * addr, size_t size) {} +#endif +}; + +// Replacement for std::vector that doesn't require zero-initialization. +struct gptneox_buffer { + uint8_t * addr = NULL; + size_t size = 0; + + void resize(size_t size) { + delete[] addr; + addr = new uint8_t[size]; + this->size = size; + } + + ~gptneox_buffer() { + delete[] addr; + } +}; + +#ifdef GGML_USE_CUBLAS +#include "ggml-cuda.h" +struct gptneox_ctx_buffer { + uint8_t * addr = NULL; + size_t size = 0; + + void resize(size_t size) { + if (addr) { + ggml_cuda_host_free(addr); + } + addr = (uint8_t *) ggml_cuda_host_malloc(size); + this->size = size; + } + + ~gptneox_ctx_buffer() { + if (addr) { + ggml_cuda_host_free(addr); + } + } +}; +#else +typedef gptneox_buffer gptneox_ctx_buffer; +#endif + +#endif diff --git a/third_party/radpajama/gptneox.cpp b/third_party/radpajama/gptneox.cpp new file mode 100644 index 000000000..f7fcfc292 --- /dev/null +++ b/third_party/radpajama/gptneox.cpp @@ -0,0 +1,2942 @@ +// Defines fileno on msys: +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#include +#include +#endif + +#include "gptneox-util.h" +#include "gptneox.h" + +#include "../ggml.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// TODO: Add back in n_ctx (max_position_embeddings) to ggml model, it is currently hard-coded to 2048 max for llama + +#define GPTNEOX_USE_SCRATCH +#define GPTNEOX_MAX_SCRATCH_BUFFERS 16 + + +enum e_model { + MODEL_UNKNOWN, + MODEL_3B, // StabilityAI Base Alpha 3B + MODEL_7B, + MODEL_12B, + MODEL_20B, +}; + +static const size_t MiB = 1024*1024; + +// computed for n_ctx == 2048 +// TODO: dynamically determine these sizes +// TODO: To load the stablelm 3B model on my test XR will require some tricks, small ggml context size, mmap support, among others, but is maybe feasible, is a smaller n_ctx required? 512 instead of 2048/4096? Does mmap work as desired on iOS? +// needs modifications in ggml + +// TODO: Modify for gptneox, how are these values actually determined? +// TODO: This is now priority, +static const std::map & MEM_REQ_SCRATCH0() +{ + static std::map _MEM_REQ_SCRATCH0 = { + { MODEL_3B, 128ull * MiB }, + { MODEL_7B, 512ull * MiB }, + { MODEL_12B, 512ull * MiB }, + { MODEL_20B, 512ull * MiB }, + }; + return _MEM_REQ_SCRATCH0; +} + +// TODO: Modify for gptneox, how are these values actually determined? +static const std::map & MEM_REQ_SCRATCH1() +{ + static std::map _MEM_REQ_SCRATCH1 = { + { MODEL_3B, 128ull * MiB }, + { MODEL_7B, 512ull * MiB }, + { MODEL_12B, 512ull * MiB }, + { MODEL_20B, 512ull * MiB }, + }; + return _MEM_REQ_SCRATCH1; +} + +// TODO: Modify for gptneox, how are these values actually determined? +// 2*n_embd*n_ctx*n_layer*sizeof(float16) +// llama 7B: 2 * 768 * 32 * 2 = 98304 +static const std::map & MEM_REQ_KV_SELF() +{ + static std::map _MEM_REQ_KV_SELF = { + { MODEL_3B, 512ull * MiB }, + { MODEL_7B, 1026ull * MiB }, + { MODEL_12B, 1608ull * MiB }, + { MODEL_20B, 1608ull * MiB }, + }; + return _MEM_REQ_KV_SELF; +} + +// TODO: Modify for gptneox, how are these values actually determined? +// this is mostly needed for temporary mul_mat buffers to dequantize the data +// not actually needed if BLAS is disabled +static const std::map & MEM_REQ_EVAL() +{ + static std::map _MEM_REQ_EVAL = { + { MODEL_3B, 512ull * MiB }, + { MODEL_7B, 768ull * MiB }, + { MODEL_12B, 1024ull * MiB }, + { MODEL_20B, 1024ull * MiB }, + }; + return _MEM_REQ_EVAL; +} + +// default hparams (GPT-NeoX oasst 12B) +struct gptneox_hparams { + uint32_t n_vocab = 50288; + uint32_t n_ctx = 4096; // this is provided as user input? + uint32_t n_embd = 5120; + uint32_t n_head = 40; + uint32_t n_layer = 36; + uint32_t n_rot = 32; + uint32_t use_parallel_residual = 1; // 1 = true, 0 = false + enum gptneox_ftype ftype = GPTNEOX_FTYPE_MOSTLY_F16; + + bool operator!=(const gptneox_hparams & other) const { + return memcmp(this, &other, sizeof(gptneox_hparams)); + } +}; + +struct gptneox_layer { + // input_layernorm + struct ggml_tensor * ln_attn_g; + struct ggml_tensor * ln_attn_b; + + // post_attention_layernorm + struct ggml_tensor * ln_ff_g; + struct ggml_tensor * ln_ff_b; + + // attention + struct ggml_tensor * c_attn_attn_w; + + struct ggml_tensor * c_attn_attn_b; + + struct ggml_tensor * c_attn_proj_w; + struct ggml_tensor * c_attn_proj_b; + + // ff + struct ggml_tensor * c_mlp_fc_w; + struct ggml_tensor * c_mlp_fc_b; + + struct ggml_tensor * c_mlp_proj_w; + struct ggml_tensor * c_mlp_proj_b; +}; + +struct gptneox_kv_cache { + struct ggml_tensor * k; + struct ggml_tensor * v; + + struct ggml_context * ctx = NULL; + + gptneox_buffer buf; + + int n; // number of tokens currently in the cache + + ~gptneox_kv_cache() { + if (ctx) { + ggml_free(ctx); + } + } +}; + +struct gptneox_model { + e_model type = MODEL_UNKNOWN; + + gptneox_hparams hparams; + + // final normalization + struct ggml_tensor * ln_f_g; + struct ggml_tensor * ln_f_b; + + // word embedding + struct ggml_tensor * wte; + + // language model head + struct ggml_tensor * lmh_g; + + std::vector layers; + + // context + struct ggml_context * ctx = NULL; + + // key + value cache for the self attention + // TODO: move to gptneox_state + struct gptneox_kv_cache kv_self; + + // the model memory buffer + gptneox_buffer buf; + + // model memory mapped file + std::unique_ptr mapping; + + // objects representing data potentially being locked in memory + gptneox_mlock mlock_buf; + gptneox_mlock mlock_mmap; + + // for quantize-stats only + std::vector> tensors_by_name; + + ~gptneox_model() { + if (ctx) { + ggml_free(ctx); + } + } +}; + +struct gptneox_vocab { + using id = int32_t; + using token = std::string; + + struct token_score { + token tok; + float score; + }; + + std::unordered_map token_to_id; + std::vector id_to_token; +}; + +struct gptneox_context { + std::mt19937 rng; + + int64_t t_load_us = 0; + int64_t t_start_us = 0; + bool has_evaluated_once = false; + + int64_t t_sample_us = 0; + int64_t t_eval_us = 0; + int64_t t_p_eval_us = 0; + + int32_t n_sample = 0; // number of tokens sampled + int32_t n_eval = 0; // number of eval calls + int32_t n_p_eval = 0; // number of tokens in eval calls for the prompt (with batch size > 1) + + gptneox_model model; + gptneox_vocab vocab; + + size_t mem_per_token = 0; + + // decode output (2-dimensional array: [n_tokens][n_vocab]) + std::vector logits; + bool logits_all = false; + + // input embedding (1-dimensional array: [n_embd]) + std::vector embedding; + + // memory buffers used to evaluate the model + // TODO: move in gptneox_state + gptneox_buffer buf_compute; + gptneox_buffer buf_scratch[GPTNEOX_MAX_SCRATCH_BUFFERS]; + + int buf_last = 0; + size_t buf_max_size[GPTNEOX_MAX_SCRATCH_BUFFERS] = { 0 }; + + void use_buf(struct ggml_context * ctx, int i) { +#if defined(GPTNEOX_USE_SCRATCH) + size_t last_size = 0; + + if (i == -1) { + last_size = ggml_set_scratch(ctx, { 0, 0, nullptr, }); + } else { + auto & buf = buf_scratch[i]; + last_size = ggml_set_scratch(ctx, { 0, buf.size, buf.addr, }); + } + + if (buf_last >= 0) { + buf_max_size[buf_last] = std::max(buf_max_size[buf_last], last_size); + } + + buf_last = i; +#else + (void) i; + (void) ctx; +#endif + } + + size_t get_buf_max_mem(int i) const { +#if defined(GPTNEOX_USE_SCRATCH) + return buf_max_size[i]; +#else + (void) i; + return 0; +#endif + } +}; + +template +static T checked_mul(T a, T b) { + T ret = a * b; + if (a != 0 && ret / a != b) { + throw format("overflow multiplying %llu * %llu", + (unsigned long long) a, (unsigned long long) b); + } + return ret; +} + +static size_t checked_div(size_t a, size_t b) { + if (b == 0 || a % b != 0) { + throw format("error dividing %zu / %zu", a, b); + } + return a / b; +} + +static std::string gptneox_format_tensor_shape(const std::vector & ne) { + char buf[256]; + snprintf(buf, sizeof(buf), "%5u", ne.at(0)); + for (size_t i = 1; i < ne.size(); i++) { + snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " x %5u", ne.at(i)); + } + return buf; +} + +static size_t gptneox_calc_tensor_size(const std::vector & ne, enum ggml_type type) { + size_t size = ggml_type_size(type); + for (uint32_t dim : ne) { + size = checked_mul(size, dim); + } + return size / ggml_blck_size(type); +} + +struct gptneox_load_tensor_shard { + std::vector ne; + size_t size; + enum ggml_type type; + size_t file_idx; + size_t file_off; + + void calc_size() { + size = gptneox_calc_tensor_size(ne, type); + } +}; + +enum gptneox_split_type { + SPLIT_NONE, + SPLIT_BY_COLUMNS, + SPLIT_BY_ROWS +}; + +struct gptneox_load_tensor { + std::vector shards; + + std::string name; + enum ggml_type type = GGML_TYPE_F32; + gptneox_split_type split_type = SPLIT_NONE; + std::vector ne; + size_t size; + struct ggml_tensor * ggml_tensor = NULL; + uint8_t * data; + + gptneox_load_tensor(const std::string & name) : name(name) {} + + void calc_all() { + calc_type(); + calc_split_type(); + calc_ne(); + calc_size(); + } + + void calc_type() { + const auto & first_shard = shards.at(0); + for (const auto & shard : shards) { + if (shard.type != first_shard.type) { + throw format("inconsistent tensor shard type in '%s'", name.c_str()); + } + } + type = first_shard.type; + } + + void calc_split_type() { + if (shards.at(0).ne.size() == 1 || // 1D tensors are just duplicated in every file + shards.size() == 1) { // only one file? + split_type = SPLIT_NONE; + } else if (name.find("tok_embeddings.") == 0 || + name.find(".attention.wo.weight") != std::string::npos || + name.find(".feed_forward.w2.weight") != std::string::npos) { + split_type = SPLIT_BY_COLUMNS; + } else { + split_type = SPLIT_BY_ROWS; + } + } + + void calc_ne() { + const auto & first_shard = shards.at(0); + for (const auto & shard : shards) { + if (shard.ne != first_shard.ne) { + throw format("inconsistent tensor shard shape in '%s': first was %s, other was %s", + name.c_str(), gptneox_format_tensor_shape(first_shard.ne).c_str(), gptneox_format_tensor_shape(shard.ne).c_str()); + } + } + ne = first_shard.ne; + GPTNEOX_ASSERT(shards.size() <= UINT32_MAX); + uint32_t n_shards = (uint32_t) shards.size(); + switch (split_type) { + case SPLIT_NONE: + ne = first_shard.ne; + break; + case SPLIT_BY_COLUMNS: + ne = {checked_mul(first_shard.ne[0], n_shards), + first_shard.ne[1]}; + break; + case SPLIT_BY_ROWS: + ne = {first_shard.ne[0], + checked_mul(first_shard.ne[1], n_shards)}; + break; + } + } + + void calc_size() { + size = gptneox_calc_tensor_size(ne, type); + } +}; + +struct gptneox_load_tensors_map { + // tensors is kept in a separate vector to preserve file order + std::vector tensors; + std::unordered_map name_to_idx; +}; + +enum gptneox_file_version { + GPTNEOX_FILE_VERSION_GGML, + GPTNEOX_FILE_VERSION_GGMF_V1, // added version field and scores in vocab + GPTNEOX_FILE_VERSION_GGJT_V1, // added padding +}; + +struct gptneox_file_loader { + gptneox_file file; + gptneox_file_version file_version; + gptneox_hparams hparams; + gptneox_vocab vocab; + + gptneox_file_loader(const char * fname, size_t file_idx, gptneox_load_tensors_map & tensors_map) + : file(fname, "rb") { + fprintf(stderr, "gptneox.cpp: loading model from %s\n", fname); + read_magic(); + read_hparams(); + read_vocab(); + read_tensor_metadata(file_idx, tensors_map); + } + void read_magic() { + uint32_t magic = file.read_u32(); + uint32_t version = 0; + + if (magic != 'ggml') { + version = file.read_u32(); + } + + if (magic == 'ggml' && version == 0) { + file_version = GPTNEOX_FILE_VERSION_GGML; + } else if (magic == 'ggmf' && version == 1) { + file_version = GPTNEOX_FILE_VERSION_GGMF_V1; + } else if (magic == 'ggjt' && version == 1) { + file_version = GPTNEOX_FILE_VERSION_GGJT_V1; + } else { + throw format("unknown (magic, version) combination: %08x, %08x; is this really a GGML file?", + magic, version); + } + } + void read_hparams() { + hparams.n_vocab = file.read_u32(); + hparams.n_ctx = file.read_u32(); + hparams.n_embd = file.read_u32(); + hparams.n_head = file.read_u32(); + hparams.n_layer = file.read_u32(); + hparams.n_rot = file.read_u32(); + hparams.use_parallel_residual = file.read_u32(); + hparams.ftype = (enum gptneox_ftype) file.read_u32(); + } + void read_vocab() { + vocab.id_to_token.resize(hparams.n_vocab); + + for (uint32_t i = 0; i < hparams.n_vocab; i++) { + uint32_t len = file.read_u32(); + std::string word = file.read_string(len); + + float score = 0.0f; + // TODO: Implement scores in gptneox + /*if (file_version >= GPTNEOX_FILE_VERSION_GGMF_V1) { + file.read_raw(&score, sizeof(score)); + }*/ + + vocab.token_to_id[word] = i; + + auto & tok_score = vocab.id_to_token[i]; + tok_score.tok = std::move(word); + tok_score.score = score; + } + } + void read_tensor_metadata(size_t file_idx, gptneox_load_tensors_map & tensors_map) { + while (file.tell() < file.size) { + gptneox_load_tensor_shard shard; + uint32_t n_dims = file.read_u32(); + uint32_t name_len = file.read_u32(); + shard.type = (enum ggml_type) file.read_u32(); + shard.ne.resize(n_dims); + file.read_raw(shard.ne.data(), sizeof(shard.ne[0]) * n_dims); + std::string name = file.read_string(name_len); + if (n_dims < 1 || n_dims > 2) { + throw format("gptneox.cpp: tensor '%s' should not be %u-dimensional", name.c_str(), n_dims); + } + switch (shard.type) { + case GGML_TYPE_F32: + case GGML_TYPE_F16: + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: + case GGML_TYPE_Q4_2: + case GGML_TYPE_Q5_0: + case GGML_TYPE_Q5_1: + case GGML_TYPE_Q8_0: + break; + default: { + throw format("unrecognized tensor type %u\n", shard.type); + } + } + + if (file_version >= GPTNEOX_FILE_VERSION_GGJT_V1) { + // skip to the next multiple of 32 bytes + file.seek(-file.tell() & 31, SEEK_CUR); + } + shard.file_idx = file_idx; + shard.file_off = file.tell(); + + shard.calc_size(); + file.seek(shard.size, SEEK_CUR); + + auto it = tensors_map.name_to_idx.find(name); + size_t idx; + if (it != tensors_map.name_to_idx.end()) { + idx = it->second; + } else { + tensors_map.tensors.emplace_back(name); + idx = tensors_map.tensors.size() - 1; + tensors_map.name_to_idx.emplace(name, idx); + } + tensors_map.tensors.at(idx).shards.push_back(shard); + } + } +}; + +struct gptneox_file_saver { + gptneox_file file; + gptneox_file_loader * any_file_loader; + gptneox_file_saver(const char * fname, gptneox_file_loader * any_file_loader, enum gptneox_ftype new_ftype) + : file(fname, "wb"), any_file_loader(any_file_loader) { + fprintf(stderr, "gptneox.cpp: saving model to %s\n", fname); + write_magic(); + write_hparams(new_ftype); + write_vocab(); + } + void write_magic() { + file.write_u32('ggjt'); // magic + file.write_u32(1); // version + } + void write_hparams(enum gptneox_ftype new_ftype) { + const gptneox_hparams & hparams = any_file_loader->hparams; + file.write_u32(hparams.n_vocab); + file.write_u32(hparams.n_ctx); + file.write_u32(hparams.n_embd); + file.write_u32(hparams.n_head); + file.write_u32(hparams.n_layer); + file.write_u32(hparams.n_rot); + file.write_u32(hparams.use_parallel_residual); + file.write_u32(new_ftype); + } + void write_vocab() { + if (any_file_loader->file_version == GPTNEOX_FILE_VERSION_GGML) { + fprintf(stderr, "gptneox.cpp: WARNING: input is an old file that doesn't have scores; will add dummy scores\n"); + } + uint32_t n_vocab = any_file_loader->hparams.n_vocab; + for (uint32_t i = 0; i < n_vocab; i++) { + const auto & token_score = any_file_loader->vocab.id_to_token.at(i); + file.write_u32((uint32_t) token_score.tok.size()); + file.write_raw(token_score.tok.data(), token_score.tok.size()); + // TODO: Implement scores in gptneox? + //file.write_raw(&token_score.score, sizeof(token_score.score)); + } + } + void write_tensor(gptneox_load_tensor & tensor, enum ggml_type new_type, const void * new_data, size_t new_size) { + switch (new_type) { + case GGML_TYPE_F32: + case GGML_TYPE_F16: + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: + case GGML_TYPE_Q4_2: + case GGML_TYPE_Q5_0: + case GGML_TYPE_Q5_1: + case GGML_TYPE_Q8_0: + break; + default: GPTNEOX_ASSERT(false); + } + file.write_u32((uint32_t) tensor.ne.size()); + file.write_u32((uint32_t) tensor.name.size()); + file.write_u32(new_type); + file.write_raw(tensor.ne.data(), sizeof(tensor.ne[0]) * tensor.ne.size()); + file.write_raw(tensor.name.data(), tensor.name.size()); + file.seek(-file.tell() & 31, SEEK_CUR); + GPTNEOX_ASSERT(new_size == gptneox_calc_tensor_size(tensor.ne, new_type)); + file.write_raw(new_data, new_size); + } +}; + +struct gptneox_model_loader { + std::vector> file_loaders; + gptneox_load_tensors_map tensors_map; + bool use_mmap; + size_t num_ggml_tensors_created = 0; + struct ggml_context * ggml_ctx = NULL; + std::unique_ptr mapping; + + gptneox_model_loader(const std::string & fname_base, bool use_mmap, bool vocab_only) { + auto first_file = new gptneox_file_loader(fname_base.c_str(), 0, tensors_map); + file_loaders.emplace_back(first_file); + uint32_t n_parts = vocab_only ? 1 : guess_n_parts(); + for (uint32_t i = 1; i < n_parts; i++) { + std::string fname = fname_base + "." + std::to_string(i); + auto ith_file = new gptneox_file_loader(fname.c_str(), i, tensors_map); + file_loaders.emplace_back(ith_file); + if (ith_file->hparams != first_file->hparams) { + throw format("gptneox.cpp: hparams inconsistent between files"); + } + } + if (!gptneox_mmap::SUPPORTED) { + use_mmap = false; + } + if (use_mmap && alignment_prevents_mmap()) { + fprintf(stderr, "gptneox.cpp: can't use mmap because tensors are not aligned; convert to new format to avoid this\n"); + use_mmap = false; + } + this->use_mmap = use_mmap; + for (gptneox_load_tensor & lt : tensors_map.tensors) { + lt.calc_all(); + } + } + + bool alignment_prevents_mmap() { + for (const gptneox_load_tensor & lt : tensors_map.tensors) { + for (const gptneox_load_tensor_shard & shard : lt.shards) { + if (shard.file_off & 3) { + return true; + } + } + } + return false; + } + + uint32_t guess_n_parts() const { + auto it = tensors_map.name_to_idx.find("gpt_neox.embed_in.weight"); + if (it == tensors_map.name_to_idx.end()) { + throw std::string("missing gpt_neox.embed_in.weight"); + } + const gptneox_load_tensor & lt = tensors_map.tensors.at(it->second); + return file_loaders.at(0)->hparams.n_embd / lt.shards.at(0).ne.at(0); + } + + void calc_sizes(size_t * ctx_size_p, size_t * mmapped_size_p) const { + *ctx_size_p = *mmapped_size_p = 0; + for (const gptneox_load_tensor & lt : tensors_map.tensors) { + *ctx_size_p += sizeof(struct ggml_tensor) + GGML_OBJECT_SIZE; + *(use_mmap ? mmapped_size_p : ctx_size_p) += lt.size; + } + } + + struct ggml_tensor * get_tensor(const std::string & name, std::vector ne) { + auto it = tensors_map.name_to_idx.find(name); + if (it == tensors_map.name_to_idx.end()) { + throw format("gptneox.cpp: tensor '%s' is missing from model", name.c_str()); + } + gptneox_load_tensor & lt = tensors_map.tensors.at(it->second); + if (lt.ne != ne) { + throw format("gptneox.cpp: tensor '%s' has wrong shape; expected %s, got %s", + name.c_str(), gptneox_format_tensor_shape(ne).c_str(), gptneox_format_tensor_shape(lt.ne).c_str()); + } + + return get_tensor_for(lt); + } + + struct ggml_tensor * get_tensor_for(gptneox_load_tensor & lt) { + struct ggml_tensor * tensor; + if (lt.ne.size() == 2) { + tensor = ggml_new_tensor_2d(ggml_ctx, lt.type, lt.ne.at(0), lt.ne.at(1)); + } else { + GPTNEOX_ASSERT(lt.ne.size() == 1); + tensor = ggml_new_tensor_1d(ggml_ctx, lt.type, lt.ne.at(0)); + } + GPTNEOX_ASSERT(lt.ggml_tensor == NULL); // if this fails, we called get_tensor twice on the same tensor + lt.ggml_tensor = tensor; + num_ggml_tensors_created++; + return tensor; + } + + void done_getting_tensors() { + if (num_ggml_tensors_created != tensors_map.tensors.size()) { + throw std::string("gptneox.cpp: file contained more tensors than expected"); + } + } + + void load_all_data(gptneox_progress_callback progress_callback, void * progress_callback_user_data, gptneox_mlock * lmlock) { + size_t data_size = 0; + for (const gptneox_load_tensor & lt : tensors_map.tensors) { + data_size += lt.size; + } + + if (use_mmap) { + mapping.reset(new gptneox_mmap(&file_loaders.at(0)->file)); + if (!lmlock) { + // Don't call the callback since the actual loading will be lazy + // and we can't measure it. + progress_callback = NULL; + } + if (lmlock) { + lmlock->init(mapping->addr); + } + } + + size_t done_size = 0; + for (gptneox_load_tensor & lt : tensors_map.tensors) { + if (progress_callback) { + progress_callback((float) done_size / data_size, progress_callback_user_data); + } + GPTNEOX_ASSERT(lt.ggml_tensor); // unused tensors should have been caught by load_data already + lt.data = (uint8_t *) lt.ggml_tensor->data; + load_data_for(lt); + lt.ggml_tensor->data = lt.data; + done_size += lt.size; + if (use_mmap && lmlock) { + lmlock->grow_to(done_size); + } + } + if (progress_callback) { + progress_callback(1.0f, progress_callback_user_data); + } + } + + void load_data_for(gptneox_load_tensor & lt) { + if (use_mmap) { + GPTNEOX_ASSERT(lt.shards.size() == 1); + lt.data = (uint8_t *) mapping->addr + lt.shards.at(0).file_off; + } else if (lt.split_type == SPLIT_NONE) { + gptneox_file & file = file_loaders.at(lt.shards.at(0).file_idx)->file; + file.seek(lt.shards.at(0).file_off, SEEK_SET); + file.read_raw(lt.data, lt.size); + } else if (lt.split_type == SPLIT_BY_ROWS) { + size_t offset = 0; + for (gptneox_load_tensor_shard & shard : lt.shards) { + gptneox_file & file = file_loaders.at(shard.file_idx)->file; + file.seek(shard.file_off, SEEK_SET); + file.read_raw(lt.data + offset, shard.size); + offset += shard.size; + } + GPTNEOX_ASSERT(offset == lt.size); + } else if (lt.split_type == SPLIT_BY_COLUMNS) { + // Let's load the data into temporary buffers to ensure the OS performs large loads. + std::vector tmp_bufs; + tmp_bufs.resize(lt.shards.size()); + for (size_t i = 0; i < lt.shards.size(); i++) { + gptneox_load_tensor_shard & shard = lt.shards.at(i); + gptneox_file & file = file_loaders.at(shard.file_idx)->file; + file.seek(shard.file_off, SEEK_SET); + tmp_bufs.at(i).resize(shard.size); + file.read_raw(tmp_bufs.at(i).addr, shard.size); + } + // Then reshape. + size_t num_rows = lt.ne.at(1); + size_t per_shard_row_size = lt.shards.at(0).size / num_rows; + size_t out_offset = 0; + for (size_t row = 0; row < num_rows; row++) { + for (gptneox_buffer & tmp_buf : tmp_bufs) { + memcpy(lt.data + out_offset, + tmp_buf.addr + row * per_shard_row_size, + per_shard_row_size); + out_offset += per_shard_row_size; + } + } + GPTNEOX_ASSERT(out_offset == lt.size); + } + if (0) { + print_checksum(lt); + } + } + + static void print_checksum(gptneox_load_tensor & lt) { + uint32_t sum = 0; + for (size_t i = 0; i < lt.size; i++) { + uint8_t byte = lt.data[i]; + sum = byte + (sum << 6) + (sum << 16) - sum; // sdbm hash + } + fprintf(stderr, "%s checksum: %#08x (%s, size %zu)\n", lt.name.c_str(), sum, + gptneox_format_tensor_shape(lt.ne).c_str(), lt.size); + } + +}; + + +// +// kv cache +// + +static bool kv_cache_init( + const struct gptneox_hparams & hparams, + struct gptneox_kv_cache & cache, + ggml_type wtype, + int n_ctx) { + const int n_embd = hparams.n_embd; + const int n_layer = hparams.n_layer; + + const int64_t n_mem = (int64_t)n_layer*n_ctx; + const int64_t n_elements = n_embd*n_mem; + + cache.buf.resize(2u*n_elements*ggml_type_size(wtype) + 2u*MiB); + + struct ggml_init_params params; + params.mem_size = cache.buf.size; + params.mem_buffer = cache.buf.addr; + params.no_alloc = false; + + cache.ctx = ggml_init(params); + + if (!cache.ctx) { + fprintf(stderr, "%s: failed to allocate memory for kv cache\n", __func__); + return false; + } + + cache.k = ggml_new_tensor_1d(cache.ctx, wtype, n_elements); + cache.v = ggml_new_tensor_1d(cache.ctx, wtype, n_elements); + + return true; +} + +struct gptneox_context_params gptneox_context_default_params() { + struct gptneox_context_params result = { + /*.n_ctx =*/ 512, + /*.n_parts =*/ -1, + /*.seed =*/ 0, + /*.f16_kv =*/ false, + /*.logits_all =*/ false, + /*.vocab_only =*/ false, + /*.use_mmap =*/ true, + /*.use_mlock =*/ false, + /*.embedding =*/ false, + /*.progress_callback =*/ nullptr, + /*.progress_callback_user_data =*/ nullptr, + }; + + return result; +} + +bool gptneox_mmap_supported() { + return gptneox_mmap::SUPPORTED; +} + +bool gptneox_mlock_supported() { + return gptneox_mlock::SUPPORTED; +} + +// +// model loading +// + +static const char *gptneox_file_version_name(gptneox_file_version version) { + switch (version) { + case GPTNEOX_FILE_VERSION_GGML: return "'ggml' (old version with low tokenizer quality and no mmap support)"; + case GPTNEOX_FILE_VERSION_GGMF_V1: return "ggmf v1 (old version with no mmap support)"; + case GPTNEOX_FILE_VERSION_GGJT_V1: return "ggjt v1 (latest)"; + default: GPTNEOX_ASSERT(false); + } +} + +static const char *gptneox_ftype_name(enum gptneox_ftype ftype) { + switch (ftype) { + case GPTNEOX_FTYPE_ALL_F32: return "all F32"; + case GPTNEOX_FTYPE_MOSTLY_F16: return "mostly F16"; + case GPTNEOX_FTYPE_MOSTLY_Q4_0: return "mostly Q4_0"; + case GPTNEOX_FTYPE_MOSTLY_Q4_1: return "mostly Q4_1"; + case GPTNEOX_FTYPE_MOSTLY_Q4_1_SOME_F16: + return "mostly Q4_1, some F16"; + case GPTNEOX_FTYPE_MOSTLY_Q4_2: return "mostly Q4_2"; + //case GPTNEOX_FTYPE_MOSTLY_Q4_3: return "mostly Q4_3"; + case GPTNEOX_FTYPE_MOSTLY_Q5_0: return "mostly Q5_0"; + case GPTNEOX_FTYPE_MOSTLY_Q5_1: return "mostly Q5_1"; + case GPTNEOX_FTYPE_MOSTLY_Q8_0: return "mostly Q8_0"; + default: return "unknown, may not work"; + } +} + +static const char *gptneox_model_type_name(e_model type) { + switch (type) { + case MODEL_3B: return "3B"; + case MODEL_7B: return "7B"; + case MODEL_12B: return "12B"; + case MODEL_20B: return "20B"; + case MODEL_UNKNOWN: return "UNKNOWN"; + default: GPTNEOX_ASSERT(false); + } +} + +static void gptneox_model_load_internal( + const std::string & fname, + gptneox_context & lctx, + int n_ctx, + ggml_type memory_type, + bool use_mmap, + bool use_mlock, + bool vocab_only, + gptneox_progress_callback progress_callback, + void * progress_callback_user_data) { + + lctx.t_start_us = ggml_time_us(); + + std::unique_ptr ml(new gptneox_model_loader(fname, use_mmap, vocab_only)); + + lctx.vocab = std::move(ml->file_loaders.at(0)->vocab); + auto & model = lctx.model; + model.hparams = ml->file_loaders.at(0)->hparams; + gptneox_file_version file_version = ml->file_loaders.at(0)->file_version; + auto & hparams = model.hparams; + + { + switch (hparams.n_layer) { + case 16: { + if (hparams.n_embd < 6144) { + model.type = e_model::MODEL_3B; + } else { + model.type = e_model::MODEL_7B; + } + break; + } + // # : we extend the model type settings for RedPajama models. + case 32:{ + if (hparams.n_embd == 2560) { + model.type = e_model::MODEL_3B; + } else if (hparams.n_embd == 4096) { + model.type = e_model::MODEL_7B; + } + else { + model.type = e_model::MODEL_UNKNOWN; + } + break; + } + case 36: model.type = e_model::MODEL_12B; break; + case 44: model.type = e_model::MODEL_20B; break; + } + + hparams.n_ctx = n_ctx; + } + + { + fprintf(stderr, "%s: format = %s\n", __func__, gptneox_file_version_name(file_version)); + fprintf(stderr, "%s: n_vocab = %u\n", __func__, hparams.n_vocab); + fprintf(stderr, "%s: n_ctx = %u\n", __func__, hparams.n_ctx); + fprintf(stderr, "%s: n_embd = %u\n", __func__, hparams.n_embd); + fprintf(stderr, "%s: n_head = %u\n", __func__, hparams.n_head); + fprintf(stderr, "%s: n_layer = %u\n", __func__, hparams.n_layer); + fprintf(stderr, "%s: n_rot = %u\n", __func__, hparams.n_rot); + fprintf(stderr, "%s: use_parallel_residual = %d\n", __func__, hparams.use_parallel_residual); + fprintf(stderr, "%s: ftype = %u (%s)\n", __func__, hparams.ftype, gptneox_ftype_name(hparams.ftype)); + fprintf(stderr, "%s: n_parts = %zu\n", __func__, ml->file_loaders.size()); + fprintf(stderr, "%s: model size = %s\n", __func__, gptneox_model_type_name(model.type)); + } + + if (vocab_only) { + return; + } + + auto & ctx = model.ctx; + + size_t ctx_size, mmapped_size; + ml->calc_sizes(&ctx_size, &mmapped_size); + fprintf(stderr, "%s: ggml ctx size = %6.2f KiB\n", __func__, ctx_size/1024.0); + + // print memory requirements + { + const size_t scale = memory_type == GGML_TYPE_F32 ? 2 : 1; + + // this is the total memory required to run the inference + const size_t mem_required = + ctx_size + + mmapped_size + + MEM_REQ_SCRATCH0().at(model.type) + + MEM_REQ_SCRATCH1().at(model.type) + + MEM_REQ_EVAL().at(model.type); + + // this is the memory required by one gptneox_state + const size_t mem_required_state = + scale*MEM_REQ_KV_SELF().at(model.type); + + fprintf(stderr, "%s: mem required = %7.2f MiB (+ %7.2f MiB per state)\n", __func__, + mem_required / 1024.0 / 1024.0, mem_required_state / 1024.0 / 1024.0); + } + + // create the ggml context + { + lctx.model.buf.resize(ctx_size); + if (use_mlock) { + lctx.model.mlock_buf.init(lctx.model.buf.addr); + lctx.model.mlock_buf.grow_to(lctx.model.buf.size); + } + + struct ggml_init_params params = { + /*.mem_size =*/ lctx.model.buf.size, + /*.mem_buffer =*/ lctx.model.buf.addr, + /*.no_alloc =*/ ml->use_mmap, + }; + + model.ctx = ggml_init(params); + if (!model.ctx) { + throw format("ggml_init() failed"); + } + } + + // prepare memory for the weights + { + const auto & hparams = model.hparams; + + const uint32_t n_embd = hparams.n_embd; + const uint32_t n_layer = hparams.n_layer; + const uint32_t n_vocab = hparams.n_vocab; + + ml->ggml_ctx = ctx; + + model.wte = ml->get_tensor("gpt_neox.embed_in.weight", {n_embd, n_vocab}); + model.ln_f_g = ml->get_tensor("gpt_neox.final_layer_norm.weight", {n_embd}); + model.ln_f_b = ml->get_tensor("gpt_neox.final_layer_norm.bias", {n_embd}); + model.lmh_g = ml->get_tensor("embed_out.weight", {n_embd, n_vocab}); + + model.layers.resize(n_layer); + for (uint32_t i = 0; i < n_layer; ++i) { + auto & layer = model.layers[i]; + + std::string layers_i = "gpt_neox.layers." + std::to_string(i); + + layer.ln_attn_g = ml->get_tensor(layers_i + ".input_layernorm.weight", {n_embd}); + layer.ln_attn_b = ml->get_tensor(layers_i + ".input_layernorm.bias", {n_embd}); + + layer.c_attn_attn_w = ml->get_tensor(layers_i + ".attention.query_key_value.weight", {n_embd, n_embd * 3}); + layer.c_attn_attn_b = ml->get_tensor(layers_i + ".attention.query_key_value.bias", {n_embd * 3}); + layer.c_attn_proj_w = ml->get_tensor(layers_i + ".attention.dense.weight", {n_embd, n_embd}); + layer.c_attn_proj_b = ml->get_tensor(layers_i + ".attention.dense.bias", {n_embd}); + + layer.ln_ff_g = ml->get_tensor(layers_i + ".post_attention_layernorm.weight", {n_embd}); + layer.ln_ff_b = ml->get_tensor(layers_i + ".post_attention_layernorm.bias", {n_embd}); + + layer.c_mlp_fc_w = ml->get_tensor(layers_i + ".mlp.dense_h_to_4h.weight", {n_embd, n_embd * 4}); + layer.c_mlp_fc_b = ml->get_tensor(layers_i + ".mlp.dense_h_to_4h.bias", {n_embd * 4}); + layer.c_mlp_proj_w = ml->get_tensor(layers_i + ".mlp.dense_4h_to_h.weight", {n_embd * 4, n_embd}); + layer.c_mlp_proj_b = ml->get_tensor(layers_i + ".mlp.dense_4h_to_h.bias", {n_embd}); + } + } + + ml->done_getting_tensors(); + + // populate `tensors_by_name` + for (gptneox_load_tensor & lt : ml->tensors_map.tensors) { + model.tensors_by_name.emplace_back(lt.name, lt.ggml_tensor); + } + + ml->load_all_data(progress_callback, progress_callback_user_data, use_mlock ? &lctx.model.mlock_mmap : NULL); + + model.mapping = std::move(ml->mapping); + + // loading time will be recalculate after the first eval, so + // we take page faults deferred by mmap() into consideration + lctx.t_load_us = ggml_time_us() - lctx.t_start_us; +} + +static bool gptneox_model_load( + const std::string & fname, + gptneox_context & lctx, + int n_ctx, + ggml_type memory_type, + bool use_mmap, + bool use_mlock, + bool vocab_only, + gptneox_progress_callback progress_callback, + void *progress_callback_user_data) { + try { + gptneox_model_load_internal(fname, lctx, n_ctx, memory_type, use_mmap, use_mlock, + vocab_only, progress_callback, progress_callback_user_data); + return true; + } catch (const std::string & err) { + fprintf(stderr, "error loading model: %s\n", err.c_str()); + return false; + } +} + +// evaluate the transformer +// +// - lctx: llama context +// - tokens: new batch of tokens to process +// - n_past: the context size so far +// - n_threads: number of threads to use +// +static bool gptneox_eval_internal( + gptneox_context & lctx, + const gptneox_token * tokens, + const int n_tokens, + const int n_past, + const int n_threads) { + const int64_t t_start_us = ggml_time_us(); + + const int N = n_tokens; + + const auto & model = lctx.model; + const auto & hparams = model.hparams; + + auto & kv_self = model.kv_self; + + GPTNEOX_ASSERT(!!kv_self.ctx); + + const int n_embd = hparams.n_embd; + const int n_layer = hparams.n_layer; + const int n_ctx = hparams.n_ctx; + const int n_head = hparams.n_head; + const int n_vocab = hparams.n_vocab; + const int n_rot = hparams.n_rot; + + auto & mem_per_token = lctx.mem_per_token; + auto & buf_compute = lctx.buf_compute; + + struct ggml_init_params params = { + /*.mem_size =*/ buf_compute.size, + /*.mem_buffer =*/ buf_compute.addr, + /*.no_alloc =*/ false, + }; + + struct ggml_context * ctx0 = ggml_init(params); + + // for big prompts, if BLAS is enabled, it is better to use only one thread + // otherwise, the threads are spin-lock waiting for the BLAS calls and are degrading the performance + ggml_cgraph gf = {}; + gf.n_threads = N >= 32 && ggml_cpu_has_blas() && !ggml_cpu_has_cublas() ? 1 : n_threads; + + struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N); + memcpy(embd->data, tokens, N*ggml_element_size(embd)); + + struct ggml_tensor * inpL = ggml_get_rows(ctx0, model.wte, embd); + + for (int il = 0; il < n_layer; ++il) { + struct ggml_tensor * cur; + + lctx.use_buf(ctx0, 0); + + // input norm + { + cur = ggml_norm(ctx0, inpL); + + // cur = ln_attn_g*cur + ln_attn_b + cur = ggml_add(ctx0, + ggml_mul(ctx0, + ggml_repeat(ctx0, model.layers[il].ln_attn_g, cur), + cur), + ggml_repeat(ctx0, model.layers[il].ln_attn_b, cur)); + } + + // self-attention + { + // attn + // [3*n_embd, n_embd] - model.layers[il].c_attn_attn_w + // [3*n_embd, 1] - model.layers[il].c_attn_attn_b + // [ n_embd, N] - cur (in) + // [3*n_embd, N] - cur (out) + // + // cur = attn_w*cur + attn_b + // [3*n_embd, N] + { + cur = ggml_mul_mat(ctx0, model.layers[il].c_attn_attn_w, cur); + cur = ggml_add(ctx0, + ggml_repeat(ctx0, + model.layers[il].c_attn_attn_b, cur), + cur); + } + + // Split QKV and make contiguous + struct ggml_tensor * Qcur = ggml_view_3d(ctx0, cur, + n_embd/n_head, + n_head, + N, + ggml_element_size(cur) * 3 * n_embd/n_head, + ggml_element_size(cur) * 3 * n_embd, + ggml_element_size(cur) * n_embd/n_head * 0); + struct ggml_tensor * Kcur = ggml_view_3d(ctx0, cur, + n_embd/n_head, + n_head, + N, + ggml_element_size(cur) * 3 * n_embd/n_head, + ggml_element_size(cur) * 3 * n_embd, + ggml_element_size(cur) * n_embd/n_head * 1); + struct ggml_tensor * Vcur = ggml_view_3d(ctx0, cur, + n_embd/n_head, + n_head, + N, + ggml_element_size(cur) * 3 * n_embd/n_head, + ggml_element_size(cur) * 3 * n_embd, + ggml_element_size(cur) * n_embd/n_head * 2); + // TODO: Flatten without copying, or see if non-contiguous can be used for any of QKV. + Qcur = ggml_cpy(ctx0, Qcur, + ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_embd/n_head, n_head, N)); + Kcur = ggml_cpy(ctx0, Kcur, + ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_embd/n_head, n_head, N)); + Vcur = ggml_cpy(ctx0, Vcur, + ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_embd/n_head, n_head, N)); + + // MARK: gptneox RoPE Q and K, before cache + // Bit 2 for gptneox style (2) + // Bit 1 is zero for dont skip n_past +(0), use (2+1) = (3) if rope is applied to cache of k (after cache only) + Qcur = ggml_rope(ctx0, Qcur, n_past, n_rot, 2); + Kcur = ggml_rope(ctx0, Kcur, n_past, n_rot, 2); //3); + + // store key and value to memory, not required if prompt if only a single token (not practical or likely) + //if (N >= 1) { + // Each entry in kv_self has byte size of (ggml_element_size * n_embd * n_ctx * n_layer) + Vcur = ggml_view_2d(ctx0, Vcur, + n_embd, + N, + ggml_element_size(Vcur) * n_embd, + 0); + Vcur = ggml_transpose(ctx0, Vcur); + + struct ggml_tensor * k = ggml_view_1d(ctx0, kv_self.k, + n_embd * N, // num elements in current context (up to n_embd*n_ctx but usually less) + ggml_element_size(kv_self.k) * n_embd * (il * n_ctx + n_past)); + struct ggml_tensor * v = ggml_view_2d(ctx0, kv_self.v, + N, + n_embd, + ggml_element_size(kv_self.v) * n_ctx, + ggml_element_size(kv_self.v) * ((il * n_ctx * n_embd) + n_past)); + + // important: storing RoPE-ed version of K in the KV cache! + ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Kcur, k)); + ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Vcur, v)); + //} + + // Q = Qcur.contiguous().view(n_embd/n_head, n_head, N).permute(0, 2, 1, 3) + struct ggml_tensor * Q = + ggml_permute(ctx0, + Qcur, + 0, 2, 1, 3); + + // K = Kmem.view(n_embd/n_head, n_head, n_past + N).permute(0, 2, 1, 3) + struct ggml_tensor * K = + ggml_permute(ctx0, + ggml_reshape_3d(ctx0, + ggml_view_1d(ctx0, kv_self.k, + (n_past + N) * n_embd, + ggml_element_size(kv_self.k) * il * n_ctx * n_embd), + n_embd/n_head, n_head, n_past + N), + 0, 2, 1, 3); + + // K * Q + // Will use internally ggml_compute_forward_mul_mat_f16_f32 because K is f16 (cache) and Q is f32 (from q4_0) + // Outputs [N, N, H, B], so it seems like this is correct for "scores" + // K is internally transposed by ggml_mul_mat + struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q); + // KQ_scaled = KQ / sqrt(n_embd/n_head) + struct ggml_tensor * KQ_scaled = ggml_scale(ctx0, KQ, + ggml_new_f32(ctx0, 1.0f/sqrt(float(n_embd)/n_head))); + // KQ_masked = mask_past(KQ_scaled) + struct ggml_tensor * KQ_masked = ggml_diag_mask_inf(ctx0, KQ_scaled, n_past); + // KQ = soft_max(KQ_masked) + struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctx0, KQ_masked); + + // V_trans = Vmem.view(n_embd/n_head, n_head, n_past + N).permute(1, 2, 0, 3).contiguous() + struct ggml_tensor * V_trans = ggml_view_3d(ctx0, kv_self.v, + n_past + N, + n_embd/n_head, + n_head, + ggml_element_size(kv_self.v) * n_ctx, + ggml_element_size(kv_self.v) * n_ctx * n_embd/n_head, + ggml_element_size(kv_self.v) * il * n_ctx * n_embd); + + // KQV = transpose(V) * KQ_soft_max + struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V_trans, KQ_soft_max); + + // KQV_merged = KQV.permute(0, 2, 1, 3) + struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3); + + // cur = KQV_merged.contiguous().view(n_embd, N) + cur = ggml_cpy(ctx0, KQV_merged, + ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N)); + + // projection (first weight) + cur = ggml_mul_mat(ctx0, model.layers[il].c_attn_proj_w, cur); + + // projection (then bias) + cur = ggml_add(ctx0, ggml_repeat(ctx0, model.layers[il].c_attn_proj_b, cur), cur); + } + + lctx.use_buf(ctx0, 1); + + if (hparams.use_parallel_residual == 1) { + //printf("use_parallel_residual == 1\n"); + + // This is independent of the self-attention result, so it could be done in parallel to the self-attention + struct ggml_tensor * outAttn = cur; + + // post attention layer norm + { + cur = ggml_norm(ctx0, inpL); + + // cur = ln_attn_g*inpFF + ln_attn_b + cur = ggml_add(ctx0, + ggml_mul(ctx0, + ggml_repeat(ctx0, model.layers[il].ln_ff_g, cur), + cur), + ggml_repeat(ctx0, model.layers[il].ln_ff_b, cur)); + } + + + // feed-forward network + { + // note here we pass inpFF instead of cur + cur = ggml_mul_mat(ctx0, model.layers[il].c_mlp_fc_w, cur); + + cur = ggml_add(ctx0, + ggml_repeat(ctx0, model.layers[il].c_mlp_fc_b, cur), + cur); + + // GELU activation + cur = ggml_gelu(ctx0, cur); + + // projection + // cur = proj_w*inpFF + proj_b + cur = ggml_mul_mat(ctx0, model.layers[il].c_mlp_proj_w, cur); + + cur = ggml_add(ctx0, + ggml_repeat(ctx0, model.layers[il].c_mlp_proj_b, cur), + cur); + } + //# pseudocode: + //# x = x + attn(ln1(x)) + mlp(ln2(x)) + // inpL = inpL + outAttn + cur + cur = ggml_add(ctx0, outAttn, cur); + inpL = ggml_add(ctx0, inpL, cur); + } else if (hparams.use_parallel_residual == 0) { + //printf("use_parallel_residual == 0\n"); + + // This takes the self-attention residual output as input to Feedforward + struct ggml_tensor * outAttn = cur; + struct ggml_tensor * inpFF = ggml_add(ctx0, outAttn, inpL); + + // post attention layer norm + { + cur = ggml_norm(ctx0, inpFF); + + // inpFF = ln_attn_g*inpFF + ln_attn_b + cur = ggml_add(ctx0, + ggml_mul(ctx0, + ggml_repeat(ctx0, model.layers[il].ln_ff_g, cur), + cur), + ggml_repeat(ctx0, model.layers[il].ln_ff_b, cur)); + } + + // feed-forward network + { + // note here we pass inpFF instead of cur + cur = ggml_mul_mat(ctx0, model.layers[il].c_mlp_fc_w, cur); + + cur = ggml_add(ctx0, ggml_repeat(ctx0, model.layers[il].c_mlp_fc_b, cur), cur); + + cur = ggml_gelu(ctx0, cur); + + cur = ggml_mul_mat(ctx0, model.layers[il].c_mlp_proj_w, cur); + + cur = ggml_add(ctx0, ggml_repeat(ctx0, model.layers[il].c_mlp_proj_b, cur), cur); + } + + //# pseudocode: + //# x = x + attn(ln1(x)) (residual above as input to mlp) + //# x = x + mlp(ln2(x)) (residual after mlp aka inpL + cur) + //# : we fixed a small issue in the gptneox.cpp fork when setting use_parallel_residual to False; + inpL = ggml_add(ctx0, inpFF, cur); + } else { + printf("use_parallel_residual == %d\n", hparams.use_parallel_residual); + assert(0); + } + } + + lctx.use_buf(ctx0, 0); + + // used at the end to optionally extract the embeddings + struct ggml_tensor * embeddings = NULL; + + // norm + { + inpL = ggml_norm(ctx0, inpL); + + // inpL = ln_f_g*inpL + ln_f_b + inpL = ggml_add(ctx0, + ggml_mul(ctx0, + ggml_repeat(ctx0, model.ln_f_g, inpL), + inpL), + ggml_repeat(ctx0, model.ln_f_b, inpL)); + + embeddings = inpL; + } + + // lm_head + inpL = ggml_mul_mat(ctx0, model.lmh_g, inpL); + + lctx.use_buf(ctx0, -1); + + // logits -> probs + //inpL = ggml_soft_max(ctx0, inpL); + + // run the computation + ggml_build_forward_expand(&gf, inpL); + ggml_graph_compute (ctx0, &gf); + +#ifdef GGML_PERF + // print timing information per ggml operation (for debugging purposes) + // requires GGML_PERF to be defined + ggml_graph_print(&gf); +#endif + + // plot the computation graph in dot format (for debugging purposes) + //if (n_past%100 == 0) { + // ggml_graph_dump_dot(&gf, NULL, "llama.dot"); + //} + + //embd_w.resize(n_vocab*N); + //memcpy(embd_w.data(), ggml_get_data(inpL), sizeof(float)*n_vocab*N); + + // extract logits + { + auto & logits_out = lctx.logits; + + if (lctx.logits_all) { + logits_out.resize(n_vocab * N); + memcpy(logits_out.data(), (float *) ggml_get_data(inpL), sizeof(float)*n_vocab*N); + } else { + // return result for just the last token + logits_out.resize(n_vocab); + memcpy(logits_out.data(), (float *) ggml_get_data(inpL) + (n_vocab*(N-1)), sizeof(float)*n_vocab); + } + } + + // extract embeddings + if (lctx.embedding.size()) { + auto & embedding_out = lctx.embedding; + + embedding_out.resize(n_embd); + memcpy(embedding_out.data(), (float *) ggml_get_data(embeddings) + (n_embd*(N - 1)), sizeof(float)*n_embd); + } + + if (mem_per_token == 0) { + mem_per_token = ggml_used_mem(ctx0)/N; + } + +#if 0 + printf("\n%s: used_mem = %.3f MiB, scratch -- %.3f MiB %.3f MiB\n", __func__, + ggml_used_mem(ctx0)/1024.0/1024.0, + lctx.get_buf_max_mem(0)/1024.0/1024.0, + lctx.get_buf_max_mem(1)/1024.0/1024.0); +#endif + + ggml_free(ctx0); + + // measure the performance only for the single-token evals + if (N == 1) { + lctx.t_eval_us += ggml_time_us() - t_start_us; + lctx.n_eval++; + } + else if (N > 1) { + lctx.t_p_eval_us += ggml_time_us() - t_start_us; + lctx.n_p_eval += N; + } + + return true; +} + +// +// tokenizer +// + +static size_t utf8_len(char src) { + const size_t lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 4 }; + uint8_t highbits = static_cast(src) >> 4; + return lookup[highbits]; +} + +struct gptneox_sp_symbol { + using index = int; + index prev; + index next; + const char * text; + size_t n; +}; + +struct gptneox_sp_bigram { + struct comparator { + bool operator()(gptneox_sp_bigram & l, gptneox_sp_bigram & r) { + return (l.score < r.score) || (l.score == r.score && l.left > r.left); + } + }; + using queue_storage = std::vector; + using queue = std::priority_queue; + gptneox_sp_symbol::index left; + gptneox_sp_symbol::index right; + float score; + size_t size; +}; + +// original implementation: +// https://github.com/ggerganov/llama.cpp/commit/074bea2eb1f1349a0118239c4152914aecaa1be4 +struct gptneox_tokenizer { + gptneox_tokenizer(const gptneox_vocab & vocab): vocab_(vocab) {} + + void tokenize(const std::string & text, std::vector & output) { + // split string into utf8 chars + int index = 0; + size_t offs = 0; + while (offs < text.size()) { + gptneox_sp_symbol sym; + size_t char_len = std::min(text.size() - offs, utf8_len(text[offs])); + sym.text = text.c_str() + offs; + sym.n = char_len; + offs += char_len; + sym.prev = index - 1; + sym.next = offs == text.size() ? -1 : index + 1; + index++; + symbols_.emplace_back(std::move(sym)); + } + + // seed the work queue with all possible 2-character tokens. + for (size_t i = 1; i < symbols_.size(); ++i) { + try_add_bigram(i - 1, i); + } + + // keep substituting the highest frequency pairs for as long as we can. + while (!work_queue_.empty()) { + auto bigram = work_queue_.top(); + work_queue_.pop(); + + auto & left_sym = symbols_[bigram.left]; + auto & right_sym = symbols_[bigram.right]; + + // if one of the symbols already got merged, skip it. + if (left_sym.n == 0 || right_sym.n == 0 || + left_sym.n + right_sym.n != bigram.size) { + continue; + } + + // merge the right sym into the left one + left_sym.n += right_sym.n; + right_sym.n = 0; + + //printf("left = '%*s' size = %zu\n", (int) left_sym.n, left_sym.text, bigram.size); + + // remove the right sym from the chain + left_sym.next = right_sym.next; + if (right_sym.next >= 0) { + symbols_[right_sym.next].prev = bigram.left; + } + + // find more substitutions + try_add_bigram(left_sym.prev, bigram.left); + try_add_bigram(bigram.left, left_sym.next); + } + + for (int i = 0; i != -1; i = symbols_[i].next) { + auto & symbol = symbols_[i]; + auto token = vocab_.token_to_id.find(std::string(symbol.text, symbol.n)); + + if (token == vocab_.token_to_id.end()) { + // output any symbols that did not form tokens as bytes. + for (int j = 0; j < (int) symbol.n; ++j) { + gptneox_vocab::id token_id = static_cast(symbol.text[j]) + 3; + output.push_back(token_id); + } + } else { + output.push_back((*token).second); + } + } + } + +private: + void try_add_bigram(int left, int right) { + if (left == -1 || right == -1) { + return; + } + + const std::string text = std::string(symbols_[left].text, symbols_[left].n + symbols_[right].n); + auto token = vocab_.token_to_id.find(text); + + if (token == vocab_.token_to_id.end()) { + return; + } + + if (static_cast((*token).second) >= vocab_.id_to_token.size()) { + return; + } + + const auto &tok_score = vocab_.id_to_token[(*token).second]; + + gptneox_sp_bigram bigram; + bigram.left = left; + bigram.right = right; + bigram.score = tok_score.score; + bigram.size = text.size(); + work_queue_.push(bigram); + } + + const gptneox_vocab & vocab_; + std::vector symbols_; + gptneox_sp_bigram::queue work_queue_; +}; + +static std::vector gptneox_tokenize(const gptneox_vocab & vocab, const std::string & text, bool bos) { + gptneox_tokenizer tokenizer(vocab); + std::vector output; + + if (text.size() == 0) { + return output; + } + + if (bos) { + output.push_back(gptneox_token_bos()); + } + + tokenizer.tokenize(text, output); + return output; +} + +// +// sampling +// + +void gptneox_sample_softmax(struct gptneox_context * ctx, gptneox_token_data_array * candidates) { + assert(candidates->size > 0); + + const int64_t t_start_sample_us = ggml_time_us(); + + // Sort the logits in descending order + if (!candidates->sorted) { + std::sort(candidates->data, candidates->data + candidates->size, [](const gptneox_token_data & a, const gptneox_token_data & b) { + return a.logit > b.logit; + }); + candidates->sorted = true; + } + + float max_l = candidates->data[0].logit; + float cum_sum = 0.0f; + for (size_t i = 0; i < candidates->size; ++i) { + float p = expf(candidates->data[i].logit - max_l); + candidates->data[i].p = p; + cum_sum += p; + } + for (size_t i = 0; i < candidates->size; ++i) { + candidates->data[i].p /= cum_sum; + } + + if (ctx) { + ctx->t_sample_us += ggml_time_us() - t_start_sample_us; + } +} + +void gptneox_sample_top_k(struct gptneox_context * ctx, gptneox_token_data_array * candidates, int k, size_t min_keep) { + const int64_t t_start_sample_us = ggml_time_us(); + + k = std::max(k, (int) min_keep); + k = std::min(k, (int) candidates->size); + + // Sort scores in descending order + if (!candidates->sorted) { + auto comp = [](const gptneox_token_data & a, const gptneox_token_data & b) { + return a.logit > b.logit; + }; + if (k == (int) candidates->size) { + std::sort(candidates->data, candidates->data + candidates->size, comp); + } else { + std::partial_sort(candidates->data, candidates->data + k, candidates->data + candidates->size, comp); + } + candidates->sorted = true; + } + candidates->size = k; + + if (ctx) { + ctx->t_sample_us += ggml_time_us() - t_start_sample_us; + } +} + +void gptneox_sample_top_p(struct gptneox_context * ctx, gptneox_token_data_array * candidates, float p, size_t min_keep) { + if (p >= 1.0f) { + return; + } + + const int64_t t_start_sample_us = ggml_time_us(); + + gptneox_sample_softmax(ctx, candidates); + + // Compute the cumulative probabilities + float cum_sum = 0.0f; + size_t last_idx = candidates->size; + + for (size_t i = 0; i < candidates->size; ++i) { + cum_sum += candidates->data[i].p; + + // Check if the running sum is greater than p or if we have kept at least min_keep tokens + if (cum_sum > p && i >= min_keep) { + last_idx = i; + break; + } + } + + // Resize the output vector to keep only the top-p tokens + candidates->size = last_idx; + + if (ctx) { + ctx->t_sample_us += ggml_time_us() - t_start_sample_us; + } +} + +void gptneox_sample_tail_free(struct gptneox_context * ctx, gptneox_token_data_array * candidates, float z, size_t min_keep) { + if (z >= 1.0f || candidates->size <= 2) { + return; + } + + const int64_t t_start_sample_us = ggml_time_us(); + + gptneox_sample_softmax(nullptr, candidates); + + // Compute the first and second derivatives + std::vector first_derivatives(candidates->size - 1); + std::vector second_derivatives(candidates->size - 2); + + for (size_t i = 0; i < first_derivatives.size(); ++i) { + first_derivatives[i] = candidates->data[i].p - candidates->data[i + 1].p; + } + for (size_t i = 0; i < second_derivatives.size(); ++i) { + second_derivatives[i] = first_derivatives[i] - first_derivatives[i + 1]; + } + + // Calculate absolute value of second derivatives + for (size_t i = 0; i < second_derivatives.size(); ++i) { + second_derivatives[i] = abs(second_derivatives[i]); + } + + // Normalize the second derivatives + float second_derivatives_sum = std::accumulate(second_derivatives.begin(), second_derivatives.end(), 0.0f); + for (float & value : second_derivatives) { + value /= second_derivatives_sum; + } + + float cum_sum = 0.0f; + size_t last_idx = candidates->size; + for (size_t i = 0; i < second_derivatives.size(); ++i) { + cum_sum += second_derivatives[i]; + + // Check if the running sum is greater than z or if we have kept at least min_keep tokens + if (cum_sum > z && i >= min_keep) { + last_idx = i; + break; + } + } + + // Resize the output vector to keep only the tokens above the tail location + candidates->size = last_idx; + + if (ctx) { + ctx->t_sample_us += ggml_time_us() - t_start_sample_us; + } +} + + +void gptneox_sample_typical(struct gptneox_context * ctx, gptneox_token_data_array * candidates, float p, size_t min_keep) { + // Reference implementation: + // https://github.com/huggingface/transformers/compare/main...cimeister:typical-sampling:typical-pr + if (p >= 1.0f) { + return; + } + + const int64_t t_start_sample_us = ggml_time_us(); + + // Compute the softmax of logits and calculate entropy + gptneox_sample_softmax(nullptr, candidates); + + float entropy = 0.0f; + for (size_t i = 0; i < candidates->size; ++i) { + entropy += -candidates->data[i].p * logf(candidates->data[i].p); + } + + // Compute the absolute difference between negative log probability and entropy for each candidate + std::vector shifted_scores; + for (size_t i = 0; i < candidates->size; ++i) { + float shifted_score = fabsf(-logf(candidates->data[i].p) - entropy); + shifted_scores.push_back(shifted_score); + } + + // Sort tokens based on the shifted_scores and their corresponding indices + std::vector indices(candidates->size); + std::iota(indices.begin(), indices.end(), 0); + + std::sort(indices.begin(), indices.end(), [&](size_t a, size_t b) { + return shifted_scores[a] < shifted_scores[b]; + }); + + // Compute the cumulative probabilities + float cum_sum = 0.0f; + size_t last_idx = indices.size(); + + for (size_t i = 0; i < indices.size(); ++i) { + size_t idx = indices[i]; + cum_sum += candidates->data[idx].p; + + // Check if the running sum is greater than typical or if we have kept at least min_keep tokens + if (cum_sum > p && i >= min_keep - 1) { + last_idx = i + 1; + break; + } + } + + // Resize the output vector to keep only the locally typical tokens + std::vector new_candidates; + for (size_t i = 0; i < last_idx; ++i) { + size_t idx = indices[i]; + new_candidates.push_back(candidates->data[idx]); + } + + // Replace the data in candidates with the new_candidates data + std::copy(new_candidates.begin(), new_candidates.end(), candidates->data); + candidates->size = new_candidates.size(); + + if (ctx) { + ctx->t_sample_us += ggml_time_us() - t_start_sample_us; + } +} + +void gptneox_sample_temperature(struct gptneox_context * ctx, gptneox_token_data_array * candidates_p, float temp) { + const int64_t t_start_sample_us = ggml_time_us(); + + for (size_t i = 0; i < candidates_p->size; ++i) { + candidates_p->data[i].logit /= temp; + } + + if (ctx) { + ctx->t_sample_us += ggml_time_us() - t_start_sample_us; + } +} + +void gptneox_sample_repetition_penalty(struct gptneox_context * ctx, gptneox_token_data_array * candidates, gptneox_token * last_tokens, size_t last_tokens_size, float penalty) { + if (last_tokens_size == 0 || penalty == 1.0f) { + return; + } + + const int64_t t_start_sample_us = ggml_time_us(); + + for (size_t i = 0; i < candidates->size; ++i) { + auto token_iter = std::find(last_tokens, last_tokens + last_tokens_size, candidates->data[i].id); + if (token_iter == last_tokens + last_tokens_size) { + continue; + } + + // The academic publication that described this technique actually just only divided, but that would cause tokens with negative logits to become more likely, which is obviously wrong. + // This is common fix for this problem, which is to multiply by the penalty instead of dividing. + if (candidates->data[i].logit <= 0) { + candidates->data[i].logit *= penalty; + } else { + candidates->data[i].logit /= penalty; + } + } + + candidates->sorted = false; + + if (ctx) { + ctx->t_sample_us += ggml_time_us() - t_start_sample_us; + } +} + +void gptneox_sample_frequency_and_presence_penalties(struct gptneox_context * ctx, gptneox_token_data_array * candidates, gptneox_token * last_tokens_p, size_t last_tokens_size, float alpha_frequency, float alpha_presence) { + if (last_tokens_size == 0 || (alpha_frequency == 0.0f && alpha_presence == 0.0f)) { + return; + } + + const int64_t t_start_sample_us = ggml_time_us(); + + // Create a frequency map to count occurrences of each token in last_tokens + std::unordered_map token_count; + for (size_t i = 0; i < last_tokens_size; ++i) { + token_count[last_tokens_p[i]]++; + } + + // Apply frequency and presence penalties to the candidates + for (size_t i = 0; i < candidates->size; ++i) { + auto token_iter = token_count.find(candidates->data[i].id); + if (token_iter == token_count.end()) { + continue; + } + + int count = token_iter->second; + candidates->data[i].logit -= float(count) * alpha_frequency + float(count > 0) * alpha_presence; + } + + candidates->sorted = false; + + if (ctx) { + ctx->t_sample_us += ggml_time_us() - t_start_sample_us; + } +} + + +gptneox_token gptneox_sample_token_mirostat(struct gptneox_context * ctx, gptneox_token_data_array * candidates, float tau, float eta, int m, float * mu) { + assert(ctx); + auto N = float(gptneox_n_vocab(ctx)); + int64_t t_start_sample_us; + t_start_sample_us = ggml_time_us(); + + gptneox_sample_softmax(nullptr, candidates); + + // Estimate s_hat using the most probable m tokens + float s_hat = 0.0; + float sum_ti_bi = 0.0; + float sum_ti_sq = 0.0; + for (size_t i = 0; i < size_t(m - 1) && i < candidates->size - 1; ++i) { + float t_i = logf(float(i + 2) / float(i + 1)); + float b_i = logf(candidates->data[i].p / candidates->data[i + 1].p); + sum_ti_bi += t_i * b_i; + sum_ti_sq += t_i * t_i; + } + s_hat = sum_ti_bi / sum_ti_sq; + + // Compute k from the estimated s_hat and target surprise value + float epsilon_hat = s_hat - 1; + float k = powf((epsilon_hat * powf(2, *mu)) / (1 - powf(N, -epsilon_hat)), 1 / s_hat); + + // Sample the next word X using top-k sampling + gptneox_sample_top_k(nullptr, candidates, int(k), 1); + if (ctx) { + ctx->t_sample_us += ggml_time_us() - t_start_sample_us; + } + gptneox_token X = gptneox_sample_token(ctx, candidates); + t_start_sample_us = ggml_time_us(); + + // Compute error as the difference between observed surprise and target surprise value + size_t X_idx = std::distance(candidates->data, std::find_if(candidates->data, candidates->data + candidates->size, [&](const gptneox_token_data & candidate) { + return candidate.id == X; + })); + float observed_surprise = -log2f(candidates->data[X_idx].p); + float e = observed_surprise - tau; + + // Update mu using the learning rate and error + *mu = *mu - eta * e; + + if (ctx) { + ctx->t_sample_us += ggml_time_us() - t_start_sample_us; + ctx->n_sample++; + } + return X; +} + +gptneox_token gptneox_sample_token_mirostat_v2(struct gptneox_context * ctx, gptneox_token_data_array * candidates, float tau, float eta, float * mu) { + assert(ctx); + int64_t t_start_sample_us; + t_start_sample_us = ggml_time_us(); + + gptneox_sample_softmax(ctx, candidates); + + // Truncate the words with surprise values greater than mu + candidates->size = std::distance(candidates->data, std::find_if(candidates->data, candidates->data + candidates->size, [&](const gptneox_token_data & candidate) { + return -log2f(candidate.p) > *mu; + })); + + // Normalize the probabilities of the remaining words + gptneox_sample_softmax(ctx, candidates); + + // Sample the next word X from the remaining words + if (ctx) { + ctx->t_sample_us += ggml_time_us() - t_start_sample_us; + } + gptneox_token X = gptneox_sample_token(ctx, candidates); + t_start_sample_us = ggml_time_us(); + + // Compute error as the difference between observed surprise and target surprise value + size_t X_idx = std::distance(candidates->data, std::find_if(candidates->data, candidates->data + candidates->size, [&](const gptneox_token_data & candidate) { + return candidate.id == X; + })); + float observed_surprise = -log2f(candidates->data[X_idx].p); + float e = observed_surprise - tau; + + // Update mu using the learning rate and error + *mu = *mu - eta * e; + + if (ctx) { + ctx->t_sample_us += ggml_time_us() - t_start_sample_us; + } + return X; +} + +gptneox_token gptneox_sample_token_greedy(struct gptneox_context * ctx, gptneox_token_data_array * candidates) { + const int64_t t_start_sample_us = ggml_time_us(); + + // Find max element + auto max_iter = std::max_element(candidates->data, candidates->data + candidates->size, [](const gptneox_token_data & a, const gptneox_token_data & b) { + return a.logit < b.logit; + }); + + gptneox_token result = max_iter->id; + if (ctx) { + ctx->t_sample_us += ggml_time_us() - t_start_sample_us; + ctx->n_sample++; + } + return result; +} + +gptneox_token gptneox_sample_token(struct gptneox_context * ctx, gptneox_token_data_array * candidates) { + assert(ctx); + const int64_t t_start_sample_us = ggml_time_us(); + gptneox_sample_softmax(nullptr, candidates); + + std::vector probs; + probs.reserve(candidates->size); + for (size_t i = 0; i < candidates->size; ++i) { + probs.push_back(candidates->data[i].p); + } + + std::discrete_distribution<> dist(probs.begin(), probs.end()); + auto & rng = ctx->rng; + int idx = dist(rng); + + gptneox_token result = candidates->data[idx].id; + + ctx->t_sample_us += ggml_time_us() - t_start_sample_us; + ctx->n_sample++; + return result; +} + +// +// quantization +// + +// temp - load then save model, allows for load and save to be different +static void gptneox_model_copy_internal(const std::string & fname_inp, const std::string & fname_out, enum gptneox_ftype ftype) { + std::unique_ptr model_loader(new gptneox_model_loader(fname_inp.c_str(), + /*use_mmap*/ false, + /*vocab_only*/ false)); + gptneox_file_saver file_saver(fname_out.c_str(), model_loader->file_loaders.at(0).get(), ftype); + + size_t idx = 0; + for (gptneox_load_tensor & tensor : model_loader->tensors_map.tensors) { + gptneox_buffer read_data; + read_data.resize(tensor.size); + tensor.data = read_data.addr; + model_loader->load_data_for(tensor); + + printf("[%4zu/%4zu] %36s - %16s, type = %6s, ", + ++idx, model_loader->tensors_map.tensors.size(), + tensor.name.c_str(), gptneox_format_tensor_shape(tensor.ne).c_str(), + ggml_type_name(tensor.type)); + + file_saver.write_tensor(tensor, tensor.type, tensor.data, tensor.size); + } +} + +int gptneox_model_copy( + const char * fname_inp, + const char * fname_out, + enum gptneox_ftype ftype) { + try { + gptneox_model_copy_internal(fname_inp, fname_out, ftype); + return 0; + } catch (const std::string & err) { + fprintf(stderr, "%s: failed to copy: %s\n", __func__, err.c_str()); + return 1; + } +} + + +static void gptneox_model_quantize_internal(const std::string & fname_inp, const std::string & fname_out, enum gptneox_ftype ftype, int nthread) { + ggml_type quantized_type; + switch (ftype) { + case GPTNEOX_FTYPE_MOSTLY_Q4_0: quantized_type = GGML_TYPE_Q4_0; break; + case GPTNEOX_FTYPE_MOSTLY_Q4_1: quantized_type = GGML_TYPE_Q4_1; break; + case GPTNEOX_FTYPE_MOSTLY_Q4_2: quantized_type = GGML_TYPE_Q4_2; break; + case GPTNEOX_FTYPE_MOSTLY_Q5_0: quantized_type = GGML_TYPE_Q5_0; break; + case GPTNEOX_FTYPE_MOSTLY_Q5_1: quantized_type = GGML_TYPE_Q5_1; break; + case GPTNEOX_FTYPE_MOSTLY_Q8_0: quantized_type = GGML_TYPE_Q8_0; break; + default: throw format("invalid output file type %d\n", ftype); + }; + + if (nthread <= 0) { + nthread = std::thread::hardware_concurrency(); + } + + std::unique_ptr model_loader(new gptneox_model_loader(fname_inp.c_str(), /*use_mmap*/ false, + /*vocab_only*/ false)); + gptneox_file_saver file_saver(fname_out.c_str(), model_loader->file_loaders.at(0).get(), ftype); + + size_t total_size_org = 0; + size_t total_size_new = 0; + std::vector hist_all(1 << 4, 0); + + std::vector workers; + std::mutex mutex; + + size_t idx = 0; + for (gptneox_load_tensor & tensor : model_loader->tensors_map.tensors) { + gptneox_buffer read_data; + read_data.resize(tensor.size); + tensor.data = read_data.addr; + model_loader->load_data_for(tensor); + + printf("[%4zu/%4zu] %36s - %16s, type = %6s, ", + ++idx, model_loader->tensors_map.tensors.size(), + tensor.name.c_str(), gptneox_format_tensor_shape(tensor.ne).c_str(), + ggml_type_name(tensor.type)); + + // This used to be a regex, but has an extreme cost to compile times. + bool quantize = tensor.name.rfind("weight") == tensor.name.size() - 6; // ends with 'weight'? + + // quantize only 2D tensors + quantize &= (tensor.ne.size() == 2); + + // uncomment this to keep the output layer in FP16 + //if (tensor.name == "output.weight") { + // quantize = false; + //} + + enum ggml_type new_type; + void * new_data; + size_t new_size; + gptneox_buffer work; + + if (!quantize) { + new_type = tensor.type; + new_data = tensor.data; + new_size = tensor.size; + printf("size = %8.3f MiB\n", tensor.size/1024.0/1024.0); + } else { + new_type = quantized_type; + float * f32_data; + size_t nelements = tensor.ne.at(0) * tensor.ne.at(1); + gptneox_buffer f32_conv_buf; + if (tensor.type == GGML_TYPE_F32) { + f32_data = (float *) tensor.data; + } else if (tensor.type == GGML_TYPE_F16) { + f32_conv_buf.resize(nelements * sizeof(float)); + f32_data = (float *) f32_conv_buf.addr; + auto f16_data = (const ggml_fp16_t *) tensor.data; + for (size_t i = 0; i < nelements; i++) { + f32_data[i] = ggml_fp16_to_fp32(f16_data[i]); + } + } else { + throw format("type %s unsupported for integer quantization", ggml_type_name(tensor.type)); + } + + printf("quantizing .. "); + fflush(stdout); + + work.resize(nelements * 4); // upper bound on size + new_data = work.addr; + std::vector hist_cur(1 << 4, 0); + + int chunk_size = 32 * 512; + const int nchunk = (nelements + chunk_size - 1)/chunk_size; + const int nthread_use = nthread > 1 ? std::max(1, std::min(nthread, nchunk)) : 1; + if (nthread_use < 2) { + new_size = ggml_quantize_chunk(new_type, f32_data, new_data, 0, nelements, hist_cur.data()); + } else { + size_t counter = 0; + new_size = 0; + auto compute = [&mutex, &counter, &hist_cur, &new_size, new_type, f32_data, new_data, nelements, chunk_size] () { + std::vector local_hist; + size_t local_size = 0; + while (true) { + std::unique_lock lock(mutex); + size_t first = counter; counter += chunk_size; + if (first >= nelements) { + if (!local_hist.empty()) { + for (int j=0; j %8.2f MiB | hist: ", tensor.size/1024.0/1024.0, new_size/1024.0/1024.0); + for (size_t i = 0; i < hist_cur.size(); i++) { + hist_all[i] += hist_cur[i]; + } + + for (size_t i = 0; i < hist_cur.size(); i++) { + printf("%5.3f ", hist_cur[i] / float(nelements)); + } + printf("\n"); + } + total_size_org += tensor.size; + total_size_new += new_size; + file_saver.write_tensor(tensor, new_type, new_data, new_size); + } + + printf("%s: model size = %8.2f MiB\n", __func__, total_size_org/1024.0/1024.0); + printf("%s: quant size = %8.2f MiB\n", __func__, total_size_new/1024.0/1024.0); + + { + int64_t sum_all = 0; + for (size_t i = 0; i < hist_all.size(); i++) { + sum_all += hist_all[i]; + } + + printf("%s: hist: ", __func__); + for (size_t i = 0; i < hist_all.size(); i++) { + printf("%5.3f ", hist_all[i] / float(sum_all)); + } + printf("\n"); + } +} + +// +// interface implementation +// + +struct gptneox_context * gptneox_init_from_file( + const char * path_model, + struct gptneox_context_params params) { + ggml_time_init(); + + gptneox_context * ctx = new gptneox_context; + + if (params.seed <= 0) { + params.seed = time(NULL); + } + + unsigned cur_percentage = 0; + if (params.progress_callback == NULL) { + params.progress_callback_user_data = &cur_percentage; + params.progress_callback = [](float progress, void * ctx) { + unsigned * cur_percentage_p = (unsigned *) ctx; + unsigned percentage = (unsigned) (100 * progress); + while (percentage > *cur_percentage_p) { + ++*cur_percentage_p; + fprintf(stderr, "."); + fflush(stderr); + if (percentage >= 100) { + fprintf(stderr, "\n"); + } + } + }; + } + + ctx->rng = std::mt19937(params.seed); + ctx->logits_all = params.logits_all; + + ggml_type memory_type = params.f16_kv ? GGML_TYPE_F16 : GGML_TYPE_F32; + + if (!gptneox_model_load(path_model, *ctx, params.n_ctx, memory_type, + params.use_mmap, params.use_mlock, params.vocab_only, + params.progress_callback, params.progress_callback_user_data)) { + fprintf(stderr, "%s: failed to load model\n", __func__); + gptneox_free(ctx); + return nullptr; + } + + // reserve memory for context buffers + if (!params.vocab_only) { + if (!kv_cache_init(ctx->model.hparams, ctx->model.kv_self, memory_type, ctx->model.hparams.n_ctx)) { + fprintf(stderr, "%s: kv_cache_init() failed for self-attention cache\n", __func__); + gptneox_free(ctx); + return nullptr; + } + + { + const size_t memory_size = ggml_nbytes(ctx->model.kv_self.k) + ggml_nbytes(ctx->model.kv_self.v); + fprintf(stderr, "%s: kv self size = %7.2f MiB\n", __func__, memory_size / 1024.0 / 1024.0); + } + + const auto & hparams = ctx->model.hparams; + + // resized during inference + if (params.logits_all) { + ctx->logits.reserve(hparams.n_ctx*hparams.n_vocab); + } else { + ctx->logits.reserve(hparams.n_vocab); + } + + if (params.embedding){ + ctx->embedding.resize(hparams.n_embd); + } + + ctx->buf_compute.resize(MEM_REQ_EVAL().at(ctx->model.type)); + + ctx->buf_scratch[0].resize(MEM_REQ_SCRATCH0().at(ctx->model.type)); + ctx->buf_scratch[1].resize(MEM_REQ_SCRATCH1().at(ctx->model.type)); + } + + return ctx; +} + +void gptneox_free(struct gptneox_context * ctx) { + delete ctx; +} + +int gptneox_model_quantize( + const char * fname_inp, + const char * fname_out, + enum gptneox_ftype ftype, + int nthread) { + try { + gptneox_model_quantize_internal(fname_inp, fname_out, ftype, nthread); + return 0; + } catch (const std::string & err) { + fprintf(stderr, "%s: failed to quantize: %s\n", __func__, err.c_str()); + return 1; + } +} + +int gptneox_apply_lora_from_file_internal(struct gptneox_context * ctx, const char * path_lora, const char * path_base_model, int n_threads) { + fprintf(stderr, "%s: applying lora adapter from '%s' - please wait ...\n", __func__, path_lora); + + auto & model = ctx->model; + + const int64_t t_start_lora_us = ggml_time_us(); + + auto fin = std::ifstream(path_lora, std::ios::binary); + if (!fin) { + fprintf(stderr, "%s: failed to open '%s'\n", __func__, path_lora); + return 1; + } + + // verify magic and version + { + uint32_t magic; + fin.read((char *) &magic, sizeof(magic)); + if (magic != 'ggla') { + fprintf(stderr, "%s: bad file magic\n", __func__); + return 1; + } + uint32_t format_version; + fin.read((char *) &format_version, sizeof(format_version)); + + if (format_version != 1) { + fprintf(stderr, "%s: unsupported file version\n", __func__ ); + return 1; + } + } + + int32_t lora_r; + int32_t lora_alpha; + fin.read((char *) &lora_r, sizeof(lora_r)); + fin.read((char *) &lora_alpha, sizeof(lora_alpha)); + float scaling = (float)lora_alpha / (float)lora_r; + + fprintf(stderr, "%s: r = %d, alpha = %d, scaling = %.2f\n", __func__, lora_r, lora_alpha, scaling); + + + // create a temporary ggml context to store the lora tensors + // todo: calculate size from biggest possible tensor + std::vector lora_buf(1024ull * 1024ull * 1024ull); + struct ggml_init_params params; + params.mem_size = lora_buf.size(); + params.mem_buffer = lora_buf.data(); + params.no_alloc = false; + + ggml_context * lora_ctx = ggml_init(params); + std::unordered_map lora_tensors; + + // create a name -> tensor map of the model to accelerate lookups + std::unordered_map model_tensors; + for (auto & kv: model.tensors_by_name) { + model_tensors.insert(kv); + } + + + // load base model + std::unique_ptr model_loader; + ggml_context * base_ctx = NULL; + gptneox_buffer base_buf; + if (path_base_model) { + fprintf(stderr, "%s: loading base model from '%s'\n", __func__, path_base_model); + model_loader.reset(new gptneox_model_loader(path_base_model, /*use_mmap*/ true, /*vocab_only*/ false)); + + size_t ctx_size, mmapped_size; + model_loader->calc_sizes(&ctx_size, &mmapped_size); + base_buf.resize(ctx_size); + + ggml_init_params base_params; + base_params.mem_size = base_buf.size; + base_params.mem_buffer = base_buf.addr; + base_params.no_alloc = model_loader->use_mmap; + + base_ctx = ggml_init(base_params); + + model_loader->ggml_ctx = base_ctx; + + // maybe this should in gptneox_model_loader + if (model_loader->use_mmap) { + model_loader->mapping.reset(new gptneox_mmap(&model_loader->file_loaders.at(0)->file, /* prefetch */ false)); + } + } + + // read tensors and apply + bool warned = false; + int n_tensors = 0; + while (true) { + int32_t n_dims; + int32_t length; + int32_t ftype; + + fin.read(reinterpret_cast(&n_dims), sizeof(n_dims)); + fin.read(reinterpret_cast(&length), sizeof(length)); + fin.read(reinterpret_cast(&ftype), sizeof(ftype)); + if (fin.eof()) { + break; + } + + int32_t ne[2] = { 1, 1 }; + for (int i = 0; i < n_dims; ++i) { + fin.read(reinterpret_cast(&ne[i]), sizeof(ne[i])); + } + + std::string name(length, 0); + fin.read(&name[0], length); + + // check for lora suffix and get the type of tensor + const std::string lora_suffix = ".lora"; + size_t pos = name.rfind(lora_suffix); + if (pos == std::string::npos) { + fprintf(stderr, "%s: error: '%s' is not a lora tensor\n", __func__, name.c_str()); + return 1; + } + + std::string lora_type = name.substr(pos + lora_suffix.length()); + std::string base_name = name; + base_name.erase(pos); + // fprintf(stderr, "%s: %s => %s (lora type %s) ", __func__, name.c_str(),base_name.c_str(), lora_type.c_str()); + + if (model_tensors.find(base_name.data()) == model_tensors.end()) { + fprintf(stderr, "%s: unknown tensor '%s' in lora adapter\n", __func__, name.data()); + return 1; + } + + // create ggml tensor + ggml_type wtype; + switch (ftype) { + case 0: wtype = GGML_TYPE_F32; break; + case 1: wtype = GGML_TYPE_F16; break; + default: + { + fprintf(stderr, "%s: invalid tensor data type '%d'\n", + __func__, ftype); + return false; + } + } + ggml_tensor* lora_tensor; + if (n_dims == 2) { + lora_tensor = ggml_new_tensor_2d(lora_ctx, wtype, ne[0], ne[1]); + } + else { + fprintf(stderr, "%s: unsupported tensor dimension %d\n", __func__, n_dims); + return 1; + } + + // load tensor data + size_t offset = fin.tellg(); + size_t tensor_data_size = ggml_nbytes(lora_tensor); + offset = (offset + 31) & -32; + fin.seekg(offset); + fin.read((char*)lora_tensor->data, tensor_data_size); + + lora_tensors[name] = lora_tensor; + + // check if we have both A and B tensors and apply + if (lora_tensors.find(base_name + ".loraA") != lora_tensors.end() && + lora_tensors.find(base_name + ".loraB") != lora_tensors.end()) { + + ggml_tensor * dest_t = model_tensors[base_name]; + ggml_tensor * base_t; + if (model_loader) { + // load from base model + if (model_loader->tensors_map.name_to_idx.find(base_name) == model_loader->tensors_map.name_to_idx.end()) { + fprintf(stderr, "%s: error: tensor '%s' not found in base model\n", __func__, base_name.c_str()); + return 1; + } + size_t idx = model_loader->tensors_map.name_to_idx[base_name]; + gptneox_load_tensor & lt = model_loader->tensors_map.tensors[idx]; + base_t = model_loader->get_tensor(base_name, { (uint32_t)dest_t->ne[0], (uint32_t)dest_t->ne[1] }); + lt.data = (uint8_t *) lt.ggml_tensor->data; + model_loader->load_data_for(lt); + lt.ggml_tensor->data = lt.data; + } + else { + base_t = dest_t; + } + + if (ggml_is_quantized(base_t->type)) { + if (!warned) { + fprintf(stderr, "%s: warning: using a lora adapter with a quantized model may result in poor quality, " + "use a f16 or f32 base model with --lora-base\n", __func__); + warned = true; + } + } + + ggml_tensor * loraA = lora_tensors[base_name + ".loraA"]; + ggml_tensor * loraB = lora_tensors[base_name + ".loraB"]; + + if (base_t->ne[0] != loraA->ne[1] || base_t->ne[1] != loraB->ne[1]) { + fprintf(stderr, "%s: incompatible tensor dimensions (%" PRId64 " and %" PRId64 ");" + " are you sure that this adapter is for this model?\n", __func__, base_t->ne[0], loraA->ne[1]); + return 1; + } + + // w = w + BA*s + ggml_tensor * BA = ggml_mul_mat(lora_ctx, loraA, loraB); + + if (scaling != 1.0f) { + ggml_tensor * scale_tensor = ggml_new_f32(lora_ctx, scaling); + BA = ggml_scale(lora_ctx, BA, scale_tensor); + } + + ggml_tensor * r; + if (base_t == dest_t) { + r = ggml_add_inplace(lora_ctx, dest_t, BA); + } + else { + r = ggml_add(lora_ctx, base_t, BA); + r = ggml_cpy(lora_ctx, r, dest_t); + } + + struct ggml_cgraph gf = ggml_build_forward(r); + gf.n_threads = n_threads; + ggml_graph_compute(lora_ctx, &gf); + + // we won't need these tensors again, reset the context to save memory + ggml_free(lora_ctx); + lora_ctx = ggml_init(params); + lora_tensors.clear(); + + n_tensors++; + if (n_tensors % 4 == 0) + fprintf(stderr, "."); + } + } + + // TODO: this should be in a destructor, it will leak on failure + ggml_free(lora_ctx); + if (base_ctx) { + ggml_free(base_ctx); + } + + const int64_t t_lora_us = ggml_time_us() - t_start_lora_us; + fprintf(stderr, " done (%.2f ms)\n", t_lora_us / 1000.0); + + return 0; +} + +int gptneox_apply_lora_from_file(struct gptneox_context * ctx, const char * path_lora, const char * path_base_model, int n_threads) { + try { + return gptneox_apply_lora_from_file_internal(ctx, path_lora, path_base_model, n_threads); + } catch (const std::string & err) { + fprintf(stderr, "%s: failed to apply lora adapter: %s\n", __func__, err.c_str()); + return 1; + } +} + +int gptneox_get_kv_cache_token_count(struct gptneox_context * ctx) { + return ctx->model.kv_self.n; +} + +#define GPTNEOX_MAX_RNG_STATE 64*1024 + +void gptneox_set_rng_seed(struct gptneox_context * ctx, int seed) { + if (seed <= 0) { + seed = time(NULL); + } + ctx->rng.seed(seed); +} + +// Returns the size of the state +size_t gptneox_get_state_size(struct gptneox_context * ctx) { + // we don't know size of rng until we actually serialize it. so reserve more than enough memory for its serialized state. + // for reference, std::mt19937(1337) serializes to 6701 bytes. + const size_t s_rng_size = sizeof(size_t); + const size_t s_rng = GPTNEOX_MAX_RNG_STATE; + const size_t s_logits_capacity = sizeof(size_t); + const size_t s_logits_size = sizeof(size_t); + const size_t s_logits = ctx->logits.capacity() * sizeof(float); + const size_t s_embedding_size = sizeof(size_t); + const size_t s_embedding = ctx->embedding.size() * sizeof(float); + const size_t s_kv_size = sizeof(size_t); + const size_t s_kv_ntok = sizeof(int); + const size_t s_kv = ctx->model.kv_self.buf.size; + + const size_t s_total = ( + + s_rng_size + + s_rng + + s_logits_capacity + + s_logits_size + + s_logits + + s_embedding_size + + s_embedding + + s_kv_size + + s_kv_ntok + + s_kv + ); + + return s_total; +} + +// Copies the state to the specified destination address +size_t gptneox_copy_state_data(struct gptneox_context * ctx, uint8_t * dest) { + uint8_t * out = dest; + + // copy rng + { + std::stringstream rng_ss; + rng_ss << ctx->rng; + + const size_t rng_size = rng_ss.str().size(); + char rng_buf[GPTNEOX_MAX_RNG_STATE]; + + memset(&rng_buf[0], 0, GPTNEOX_MAX_RNG_STATE); + memcpy(&rng_buf[0], rng_ss.str().data(), rng_ss.str().size()); + + memcpy(out, &rng_size, sizeof(rng_size)); out += sizeof(rng_size); + memcpy(out, &rng_buf[0], GPTNEOX_MAX_RNG_STATE); out += GPTNEOX_MAX_RNG_STATE; + } + + // copy logits + { + const size_t logits_cap = ctx->logits.capacity(); + const size_t logits_size = ctx->logits.size(); + + memcpy(out, &logits_cap, sizeof(logits_cap)); out += sizeof(logits_cap); + memcpy(out, &logits_size, sizeof(logits_size)); out += sizeof(logits_size); + + if (logits_size) { + memcpy(out, ctx->logits.data(), logits_size * sizeof(float)); + } + + out += logits_cap * sizeof(float); + } + + // copy embeddings + { + const size_t embedding_size = ctx->embedding.size(); + + memcpy(out, &embedding_size, sizeof(embedding_size)); out += sizeof(embedding_size); + + if (embedding_size) { + memcpy(out, ctx->embedding.data(), embedding_size * sizeof(float)); + out += embedding_size * sizeof(float); + } + } + + // copy kv cache + { + const size_t kv_size = ctx->model.kv_self.buf.size; + const int kv_ntok = gptneox_get_kv_cache_token_count(ctx); + + memcpy(out, &kv_size, sizeof(kv_size)); out += sizeof(kv_size); + memcpy(out, &kv_ntok, sizeof(kv_ntok)); out += sizeof(kv_ntok); + + if (kv_size) { + memcpy(out, ctx->model.kv_self.buf.addr, kv_size); out += kv_size; + } + } + + const size_t written = out - dest; + const size_t expected = gptneox_get_state_size(ctx); + + GPTNEOX_ASSERT(written == expected); + + return written; +} + +// Sets the state reading from the specified source address +size_t gptneox_set_state_data(struct gptneox_context * ctx, const uint8_t * src) { + const uint8_t * in = src; + + // set rng + { + size_t rng_size; + char rng_buf[GPTNEOX_MAX_RNG_STATE]; + + memcpy(&rng_size, in, sizeof(rng_size)); in += sizeof(rng_size); + memcpy(&rng_buf[0], in, GPTNEOX_MAX_RNG_STATE); in += GPTNEOX_MAX_RNG_STATE; + + std::stringstream rng_ss; + rng_ss.str(std::string(&rng_buf[0], rng_size)); + rng_ss >> ctx->rng; + + GPTNEOX_ASSERT(rng_ss.fail() == false); + } + + // set logits + { + size_t logits_cap; + size_t logits_size; + + memcpy(&logits_cap, in, sizeof(logits_cap)); in += sizeof(logits_cap); + memcpy(&logits_size, in, sizeof(logits_size)); in += sizeof(logits_size); + + GPTNEOX_ASSERT(ctx->logits.capacity() == logits_cap); + + if (logits_size) { + ctx->logits.resize(logits_size); + memcpy(ctx->logits.data(), in, logits_size * sizeof(float)); + } + + in += logits_cap * sizeof(float); + } + + // set embeddings + { + size_t embedding_size; + + memcpy(&embedding_size, in, sizeof(embedding_size)); in += sizeof(embedding_size); + + GPTNEOX_ASSERT(ctx->embedding.capacity() == embedding_size); + + if (embedding_size) { + memcpy(ctx->embedding.data(), in, embedding_size * sizeof(float)); + in += embedding_size * sizeof(float); + } + } + + // set kv cache + { + size_t kv_size; + int kv_ntok; + + memcpy(&kv_size, in, sizeof(kv_size)); in += sizeof(kv_size); + memcpy(&kv_ntok, in, sizeof(kv_ntok)); in += sizeof(kv_ntok); + + if (kv_size) { + GPTNEOX_ASSERT(ctx->model.kv_self.buf.size == kv_size); + + void * k_data = ctx->model.kv_self.k->data; // remember data pointers + void * v_data = ctx->model.kv_self.v->data; // because their value is stored in buf and overwritten by memcpy + + memcpy(ctx->model.kv_self.buf.addr, in, kv_size); in += kv_size; + + ctx->model.kv_self.k->data = k_data; // restore correct data pointers + ctx->model.kv_self.v->data = v_data; + + } + + ctx->model.kv_self.n = kv_ntok; + } + + const size_t nread = in - src; + const size_t expected = gptneox_get_state_size(ctx); + + GPTNEOX_ASSERT(nread == expected); + + return nread; +} + +int gptneox_eval( + struct gptneox_context * ctx, + const gptneox_token * tokens, + int n_tokens, + int n_past, + int n_threads) { + if (!gptneox_eval_internal(*ctx, tokens, n_tokens, n_past, n_threads)) { + fprintf(stderr, "%s: failed to eval\n", __func__); + return 1; + } + // get a more accurate load time, upon first eval + if (!ctx->has_evaluated_once) { + ctx->t_load_us = ggml_time_us() - ctx->t_start_us; + ctx->has_evaluated_once = true; + } + return 0; +} + +int gptneox_tokenize( + struct gptneox_context * ctx, + const char * text, + gptneox_token * tokens, + int n_max_tokens, + bool add_bos) { + auto res = gptneox_tokenize(ctx->vocab, text, add_bos); + + if (n_max_tokens < (int) res.size()) { + fprintf(stderr, "%s: too many tokens\n", __func__); + return -((int) res.size()); + } + + for (size_t i = 0; i < res.size(); i++) { + tokens[i] = res[i]; + } + + return res.size(); +} + +int gptneox_n_vocab(struct gptneox_context * ctx) { + return ctx->vocab.id_to_token.size(); +} + +int gptneox_n_ctx(struct gptneox_context * ctx) { + return ctx->model.hparams.n_ctx; +} + +int gptneox_n_embd(struct gptneox_context * ctx) { + return ctx->model.hparams.n_embd; +} + +float * gptneox_get_logits(struct gptneox_context * ctx) { + return ctx->logits.data(); +} + +float * gptneox_get_embeddings(struct gptneox_context * ctx) { + return ctx->embedding.data(); +} + +const char * gptneox_token_to_str(struct gptneox_context * ctx, gptneox_token token) { + if (token >= gptneox_n_vocab(ctx)) { + return nullptr; + } + + return ctx->vocab.id_to_token[token].tok.c_str(); +} + +gptneox_token gptneox_str_to_token(struct gptneox_context * ctx, const char * str) { + return ctx->vocab.token_to_id[str]; +} + +gptneox_token gptneox_token_bos() { + return 0; +} + +gptneox_token gptneox_token_eos() { + return 0; +} + +// Varies depending on gptneox model, use gptneox_str_to_token instead +gptneox_token gptneox_token_nl() { + return 13; +} + + +void gptneox_print_timings(struct gptneox_context * ctx) { + const int64_t t_end_us = ggml_time_us(); + + const int32_t n_sample = std::max(1, ctx->n_sample); + const int32_t n_eval = std::max(1, ctx->n_eval); + const int32_t n_p_eval = std::max(1, ctx->n_p_eval); + + fprintf(stderr, "\n"); + fprintf(stderr, "%s: load time = %8.2f ms\n", __func__, ctx->t_load_us / 1000.0); + fprintf(stderr, "%s: sample time = %8.2f ms / %5d runs (%8.2f ms per run)\n", __func__, 1e-3 * ctx->t_sample_us, n_sample, 1e-3 * ctx->t_sample_us / n_sample); + fprintf(stderr, "%s: prompt eval time = %8.2f ms / %5d tokens (%8.2f ms per token)\n", __func__, 1e-3 * ctx->t_p_eval_us, n_p_eval, 1e-3 * ctx->t_p_eval_us / n_p_eval); + fprintf(stderr, "%s: eval time = %8.2f ms / %5d runs (%8.2f ms per run)\n", __func__, 1e-3 * ctx->t_eval_us, n_eval, 1e-3 * ctx->t_eval_us / n_eval); + fprintf(stderr, "%s: total time = %8.2f ms\n", __func__, (t_end_us - ctx->t_start_us)/1000.0); +} + +void gptneox_reset_timings(struct gptneox_context * ctx) { + ctx->t_start_us = ggml_time_us(); + ctx->t_sample_us = ctx->n_sample = 0; + ctx->t_eval_us = ctx->n_eval = 0; + ctx->t_p_eval_us = ctx->n_p_eval = 0; +} + +const char * gptneox_print_system_info(void) { + static std::string s; + + s = ""; + s += "AVX = " + std::to_string(ggml_cpu_has_avx()) + " | "; + s += "AVX2 = " + std::to_string(ggml_cpu_has_avx2()) + " | "; + s += "AVX512 = " + std::to_string(ggml_cpu_has_avx512()) + " | "; + s += "AVX512_VBMI = " + std::to_string(ggml_cpu_has_avx512_vbmi()) + " | "; + s += "AVX512_VNNI = " + std::to_string(ggml_cpu_has_avx512_vnni()) + " | "; + s += "FMA = " + std::to_string(ggml_cpu_has_fma()) + " | "; + s += "NEON = " + std::to_string(ggml_cpu_has_neon()) + " | "; + s += "ARM_FMA = " + std::to_string(ggml_cpu_has_arm_fma()) + " | "; + s += "F16C = " + std::to_string(ggml_cpu_has_f16c()) + " | "; + s += "FP16_VA = " + std::to_string(ggml_cpu_has_fp16_va()) + " | "; + s += "WASM_SIMD = " + std::to_string(ggml_cpu_has_wasm_simd()) + " | "; + s += "BLAS = " + std::to_string(ggml_cpu_has_blas()) + " | "; + s += "SSE3 = " + std::to_string(ggml_cpu_has_sse3()) + " | "; + s += "VSX = " + std::to_string(ggml_cpu_has_vsx()) + " | "; + + return s.c_str(); +} + +// For internal test use +std::vector>& gptneox_internal_get_tensor_map(struct gptneox_context * ctx) { + return ctx->model.tensors_by_name; +} + +size_t gptneox_load_session_file(struct gptneox_context * ctx, const char * path_session, gptneox_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) { + // TODO leverage mmap + gptneox_file file(path_session, "rb"); + const uint32_t magic = file.read_u32(); + const uint32_t version = file.read_u32(); + + if (!(magic == 'ggsn' && version == 0)) { + fprintf(stderr, "%s : unknown (magic, version) for session file: %08x, %08x\n", __func__, magic, version); + return 0; + } + + gptneox_hparams session_hparams; + file.read_raw(&session_hparams, sizeof(gptneox_hparams)); + + // REVIEW + if (session_hparams != ctx->model.hparams) { + fprintf(stderr, "%s : model hparams didn't match from session file!\n", __func__); + return 0; + } + + const uint32_t n_token_count = file.read_u32(); + GPTNEOX_ASSERT(n_token_capacity >= n_token_count); + file.read_raw(tokens_out, sizeof(gptneox_token) * n_token_count); + *n_token_count_out = n_token_count; + + const size_t n_state_size = file.size - file.tell(); + const size_t n_orig_state_size = gptneox_get_state_size(ctx); + if (n_state_size != n_orig_state_size) { + fprintf(stderr, "%s : failed to validate state size\n", __func__); + } + std::unique_ptr state_data(new uint8_t[n_state_size]); + file.read_raw(state_data.get(), n_state_size); + return gptneox_set_state_data(ctx, state_data.get()); +} + +size_t gptneox_save_session_file(struct gptneox_context * ctx, const char * path_session, const gptneox_token * tokens, size_t n_token_count) { + // TODO save temp & swap + gptneox_file file(path_session, "wb"); + + const size_t n_state_size = gptneox_get_state_size(ctx); + std::unique_ptr state_data(new uint8_t[n_state_size]); + gptneox_copy_state_data(ctx, state_data.get()); + + file.write_u32('ggsn'); // magic + file.write_u32(0); // version + file.write_raw(&ctx->model.hparams, sizeof(gptneox_hparams)); + + file.write_u32((uint32_t) n_token_count); // REVIEW + file.write_raw(tokens, sizeof(gptneox_token) * n_token_count); + + file.write_raw(state_data.get(), n_state_size); + return n_state_size; // REVIEW +} + diff --git a/third_party/radpajama/gptneox.h b/third_party/radpajama/gptneox.h new file mode 100644 index 000000000..1b1cfea25 --- /dev/null +++ b/third_party/radpajama/gptneox.h @@ -0,0 +1,275 @@ +#ifndef GPTNEOX_H +#define GPTNEOX_H + +#include +#include +#include + +#ifdef GPTNEOX_SHARED +# if defined(_WIN32) && !defined(__MINGW32__) +# ifdef GPTNEOX_BUILD +# define GPTNEOX_API __declspec(dllexport) +# else +# define GPTNEOX_API __declspec(dllimport) +# endif +# else +# define GPTNEOX_API __attribute__ ((visibility ("default"))) +# endif +#else +# define GPTNEOX_API +#endif + +#define GPTNEOX_FILE_VERSION 1 +#define GPTNEOX_FILE_MAGIC 0x67676a74 // 'ggjt' in hex +#define GPTNEOX_FILE_MAGIC_UNVERSIONED 0x67676d6c // pre-versioned files + +#ifdef __cplusplus +extern "C" { +#endif + + // + // C interface + // + // TODO: show sample usage + // + + struct gptneox_context; + + typedef int gptneox_token; + + typedef struct gptneox_token_data { + gptneox_token id; // token id + float logit; // log-odds of the token + float p; // probability of the token + } gptneox_token_data; + + typedef struct gptneox_token_data_array { + gptneox_token_data * data; + size_t size; + bool sorted; + } gptneox_token_data_array; + + typedef void (*gptneox_progress_callback)(float progress, void *ctx); + + struct gptneox_context_params { + int n_ctx; // text context + int n_parts; // -1 for default + int seed; // RNG seed, 0 for random + + bool f16_kv; // use fp16 for KV cache + bool logits_all; // the gptneox_eval() call computes all logits, not just the last one + bool vocab_only; // only load the vocabulary, no weights + bool use_mmap; // use mmap if possible + bool use_mlock; // force system to keep model in RAM + bool embedding; // embedding mode only + + // called with a progress value between 0 and 1, pass NULL to disable + gptneox_progress_callback progress_callback; + // context pointer passed to the progress callback + void * progress_callback_user_data; + }; + + // model file types + enum gptneox_ftype { + GPTNEOX_FTYPE_ALL_F32 = 0, + GPTNEOX_FTYPE_MOSTLY_F16 = 1, // except 1d tensors + GPTNEOX_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors + GPTNEOX_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors + GPTNEOX_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16 + GPTNEOX_FTYPE_MOSTLY_Q4_2 = 5, // except 1d tensors + // GPTNEOX_FTYPE_MOSTLY_Q4_3 (6) support has been removed + GPTNEOX_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors + GPTNEOX_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors + GPTNEOX_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors + }; + + GPTNEOX_API struct gptneox_context_params gptneox_context_default_params(); + + GPTNEOX_API bool gptneox_mmap_supported(); + GPTNEOX_API bool gptneox_mlock_supported(); + + // Various functions for loading a ggml llama model. + // Allocate (almost) all memory needed for the model. + // Return NULL on failure + GPTNEOX_API struct gptneox_context * gptneox_init_from_file( + const char * path_model, + struct gptneox_context_params params); + + // Frees all allocated memory + GPTNEOX_API void gptneox_free(struct gptneox_context * ctx); + + // TODO: not great API - very likely to change + // Returns 0 on success + // nthread - how many threads to use. If <=0, will use std::thread::hardware_concurrency(), else the number given + GPTNEOX_API int gptneox_model_quantize( + const char * fname_inp, + const char * fname_out, + enum gptneox_ftype ftype, + int nthread); + + GPTNEOX_API int gptneox_model_copy( + const char * fname_inp, + const char * fname_out, + enum gptneox_ftype ftype); + + // Apply a LoRA adapter to a loaded model + // path_base_model is the path to a higher quality model to use as a base for + // the layers modified by the adapter. Can be NULL to use the current loaded model. + // The model needs to be reloaded before applying a new adapter, otherwise the adapter + // will be applied on top of the previous one + // Returns 0 on success + GPTNEOX_API int gptneox_apply_lora_from_file( + struct gptneox_context * ctx, + const char * path_lora, + const char * path_base_model, + int n_threads); + + // Returns the number of tokens in the KV cache + GPTNEOX_API int gptneox_get_kv_cache_token_count(struct gptneox_context * ctx); + + // Sets the current rng seed. + GPTNEOX_API void gptneox_set_rng_seed(struct gptneox_context * ctx, int seed); + + // Returns the size in bytes of the state (rng, logits, embedding and kv_cache) + GPTNEOX_API size_t gptneox_get_state_size(struct gptneox_context * ctx); + + // Copies the state to the specified destination address. + // Destination needs to have allocated enough memory. + // Returns the number of bytes copied + GPTNEOX_API size_t gptneox_copy_state_data(struct gptneox_context * ctx, uint8_t * dest); + + // Set the state reading from the specified address + // Returns the number of bytes read + GPTNEOX_API size_t gptneox_set_state_data(struct gptneox_context * ctx, const uint8_t * src); + + // Save/load session file + GPTNEOX_API size_t gptneox_load_session_file(struct gptneox_context * ctx, const char * path_session, gptneox_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out); + GPTNEOX_API size_t gptneox_save_session_file(struct gptneox_context * ctx, const char * path_session, const gptneox_token * tokens, size_t n_token_count); + + // Run the llama inference to obtain the logits and probabilities for the next token. + // tokens + n_tokens is the provided batch of new tokens to process + // n_past is the number of tokens to use from previous eval calls + // Returns 0 on success + GPTNEOX_API int gptneox_eval( + struct gptneox_context * ctx, + const gptneox_token * tokens, + int n_tokens, + int n_past, + int n_threads); + + // Convert the provided text into tokens. + // The tokens pointer must be large enough to hold the resulting tokens. + // Returns the number of tokens on success, no more than n_max_tokens + // Returns a negative number on failure - the number of tokens that would have been returned + // TODO: not sure if correct + GPTNEOX_API int gptneox_tokenize( + struct gptneox_context * ctx, + const char * text, + gptneox_token * tokens, + int n_max_tokens, + bool add_bos); + + GPTNEOX_API int gptneox_n_vocab(struct gptneox_context * ctx); + GPTNEOX_API int gptneox_n_ctx (struct gptneox_context * ctx); + GPTNEOX_API int gptneox_n_embd (struct gptneox_context * ctx); + + // Token logits obtained from the last call to gptneox_eval() + // The logits for the last token are stored in the last row + // Can be mutated in order to change the probabilities of the next token + // Rows: n_tokens + // Cols: n_vocab + GPTNEOX_API float * gptneox_get_logits(struct gptneox_context * ctx); + + // Get the embeddings for the input + // shape: [n_embd] (1-dimensional) + GPTNEOX_API float * gptneox_get_embeddings(struct gptneox_context * ctx); + + // Token Id -> String. Uses the vocabulary in the provided context + GPTNEOX_API const char * gptneox_token_to_str(struct gptneox_context * ctx, gptneox_token token); + + // String -> Token Id. Uses the vocabulary in the provided context + GPTNEOX_API gptneox_token gptneox_str_to_token(struct gptneox_context * ctx, const char * str); + + // Special tokens + GPTNEOX_API gptneox_token gptneox_token_bos(); + GPTNEOX_API gptneox_token gptneox_token_eos(); + // GPTNEOX_API gptneox_token gptneox_token_nl(); + + // TODO: improve the last_n_tokens interface ? + GPTNEOX_API gptneox_token gptneox_sample_top_p_top_k( + struct gptneox_context * ctx, + const gptneox_token * last_n_tokens_data, + int last_n_tokens_size, + int top_k, + float top_p, + float temp, + float repeat_penalty); + + // Sampling functions + + /// @details Repetition penalty described in CTRL academic paper https://arxiv.org/abs/1909.05858, with negative logit fix. + GPTNEOX_API void gptneox_sample_repetition_penalty(struct gptneox_context * ctx, gptneox_token_data_array * candidates, gptneox_token * last_tokens, size_t last_tokens_size, float penalty); + + /// @details Frequency and presence penalties described in OpenAI API https://platform.openai.com/docs/api-reference/parameter-details. + GPTNEOX_API void gptneox_sample_frequency_and_presence_penalties(struct gptneox_context * ctx, gptneox_token_data_array * candidates, gptneox_token * last_tokens, size_t last_tokens_size, float alpha_frequency, float alpha_presence); + + /// @details Sorts candidate tokens by their logits in descending order and calculate probabilities based on logits. + GPTNEOX_API void gptneox_sample_softmax(struct gptneox_context * ctx, gptneox_token_data_array * candidates); + + /// @details Top-K sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751 + GPTNEOX_API void gptneox_sample_top_k(struct gptneox_context * ctx, gptneox_token_data_array * candidates, int k, size_t min_keep); + + /// @details Nucleus sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751 + GPTNEOX_API void gptneox_sample_top_p(struct gptneox_context * ctx, gptneox_token_data_array * candidates, float p, size_t min_keep); + + /// @details Tail Free Sampling described in https://www.trentonbricken.com/Tail-Free-Sampling/. + GPTNEOX_API void gptneox_sample_tail_free(struct gptneox_context * ctx, gptneox_token_data_array * candidates, float z, size_t min_keep); + + /// @details Locally Typical Sampling implementation described in the paper https://arxiv.org/abs/2202.00666. + GPTNEOX_API void gptneox_sample_typical(struct gptneox_context * ctx, gptneox_token_data_array * candidates, float p, size_t min_keep); + GPTNEOX_API void gptneox_sample_temperature(struct gptneox_context * ctx, gptneox_token_data_array * candidates, float temp); + + /// @details Mirostat 1.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words. + /// @param candidates A vector of `gptneox_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text. + /// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text. + /// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates. + /// @param m The number of tokens considered in the estimation of `s_hat`. This is an arbitrary value that is used to calculate `s_hat`, which in turn helps to calculate the value of `k`. In the paper, they use `m = 100`, but you can experiment with different values to see how it affects the performance of the algorithm. + /// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal. + GPTNEOX_API gptneox_token gptneox_sample_token_mirostat(struct gptneox_context * ctx, gptneox_token_data_array * candidates, float tau, float eta, int m, float * mu); + + /// @details Mirostat 2.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words. + /// @param candidates A vector of `gptneox_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text. + /// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text. + /// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates. + /// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal. + GPTNEOX_API gptneox_token gptneox_sample_token_mirostat_v2(struct gptneox_context * ctx, gptneox_token_data_array * candidates, float tau, float eta, float * mu); + + /// @details Selects the token with the highest probability. + GPTNEOX_API gptneox_token gptneox_sample_token_greedy(struct gptneox_context * ctx, gptneox_token_data_array * candidates); + + /// @details Randomly selects a token from the candidates based on their probabilities. + GPTNEOX_API gptneox_token gptneox_sample_token(struct gptneox_context * ctx, gptneox_token_data_array * candidates); + + // Performance information + GPTNEOX_API void gptneox_print_timings(struct gptneox_context * ctx); + GPTNEOX_API void gptneox_reset_timings(struct gptneox_context * ctx); + + // Print system information + GPTNEOX_API const char * gptneox_print_system_info(void); + +#ifdef __cplusplus +} +#endif + +// Internal API to be implemented by llama.cpp and used by tests/benchmarks only +#ifdef GPTNEOX_API_INTERNAL + +#include +#include +struct ggml_tensor; + +std::vector>& gptneox_internal_get_tensor_map(struct gptneox_context * ctx); + +#endif + +#endif // GPTNEOX_H diff --git a/third_party/radpajama/main-redpajama-chat.cpp b/third_party/radpajama/main-redpajama-chat.cpp new file mode 100644 index 000000000..ae4d358d8 --- /dev/null +++ b/third_party/radpajama/main-redpajama-chat.cpp @@ -0,0 +1,467 @@ +// Defines sigaction on msys: +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif + +#include "common-gptneox.h" +#include "gptneox.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) +#include +#include +#elif defined (_WIN32) +#include +#endif + +static console_state con_st; +static gptneox_context ** g_ctx; + +static bool is_interacting = false; + +#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32) +void sigint_handler(int signo) { + set_console_color(con_st, CONSOLE_COLOR_DEFAULT); + printf("\n"); // this also force flush stdout. + if (signo == SIGINT) { + if (!is_interacting) { + is_interacting=true; + } else { + gptneox_print_timings(*g_ctx); + _exit(130); + } + } +} +#endif + +int main(int argc, char ** argv) { + gpt_params params; + params.model = "./examples/redpajama/models/pythia/ggml-RedPajama-INCITE-Chat-3B-v1-f16.bin"; + + if (gpt_params_parse(argc, argv, params) == false) { + return 1; + } + + // save choice to use color for later + // (note for later: this is a slightly awkward choice) + con_st.use_color = params.use_color; + +#if defined (_WIN32) + win32_console_init(params.use_color); +#endif + + if (params.perplexity) { + printf("\n************\n"); + printf("%s: please use the 'perplexity' tool for perplexity calculations\n", __func__); + printf("************\n\n"); + + return 0; + } + + if (params.embedding) { + printf("\n************\n"); + printf("%s: please use the 'embedding' tool for embedding calculations\n", __func__); + printf("************\n\n"); + + return 0; + } + + if (params.n_ctx > 2048) { + fprintf(stderr, "%s: warning: model does not support context sizes greater than 2048 tokens (%d specified);" + "expect poor results\n", __func__, params.n_ctx); + } + + if (params.seed <= 0) { + params.seed = time(NULL); + } + + fprintf(stderr, "%s: seed = %d\n", __func__, params.seed); + + std::mt19937 rng(params.seed); + if (params.random_prompt) { + params.prompt = gpt_random_prompt(rng); + } + + gptneox_context * ctx; + g_ctx = &ctx; + + // load the model + { + auto lparams = gptneox_context_default_params(); + + lparams.n_ctx = params.n_ctx; + lparams.n_parts = params.n_parts; + lparams.seed = params.seed; + lparams.f16_kv = params.memory_f16; + lparams.use_mmap = params.use_mmap; + lparams.use_mlock = params.use_mlock; + + ctx = gptneox_init_from_file(params.model.c_str(), lparams); + + if (ctx == NULL) { + fprintf(stderr, "%s: error: failed to load model '%s'\n", __func__, params.model.c_str()); + return 1; + } + } + + if (!params.lora_adapter.empty()) { + int err = gptneox_apply_lora_from_file(ctx, + params.lora_adapter.c_str(), + params.lora_base.empty() ? NULL : params.lora_base.c_str(), + params.n_threads); + if (err != 0) { + fprintf(stderr, "%s: error: failed to apply lora adapter\n", __func__); + return 1; + } + } + + // print system information + { + fprintf(stderr, "\n"); + fprintf(stderr, "system_info: n_threads = %d / %d | %s\n", + params.n_threads, std::thread::hardware_concurrency(), gptneox_print_system_info()); + } + + // determine the maximum memory usage needed to do inference for the given n_batch and n_predict parameters + if (params.mem_test) { + { + const std::vector tmp(params.n_batch, 0); + gptneox_eval(ctx, tmp.data(), tmp.size(), 0, params.n_threads); + } + + { + const std::vector tmp = { 0, }; + gptneox_eval(ctx, tmp.data(), tmp.size(), params.n_predict - 1, params.n_threads); + } + + gptneox_print_timings(ctx); + gptneox_free(ctx); + + return 0; + } + + // Always interactive for RedPajama chat model + params.interactive = true; + + if (params.interactive) { +#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) + struct sigaction sigint_action; + sigint_action.sa_handler = sigint_handler; + sigemptyset (&sigint_action.sa_mask); + sigint_action.sa_flags = 0; + sigaction(SIGINT, &sigint_action, NULL); +#elif defined (_WIN32) + signal(SIGINT, sigint_handler); +#endif + } + fprintf(stderr, "sampling: temp = %f, top_k = %d, top_p = %f, repeat_last_n = %i, repeat_penalty = %f\n", + params.temp, params.top_k, params.top_p, params.repeat_last_n, params.repeat_penalty); + fprintf(stderr, "generate: n_ctx = %d, n_batch = %d, n_predict = %d, n_keep = %d\n", params.n_ctx, params.n_batch, params.n_predict, params.n_keep); + fprintf(stderr, "\n\n"); + + // TODO: replace with ring-buffer + std::vector last_n_tokens = std::vector(); + //std::fill(last_n_tokens.begin(), last_n_tokens.end(), 0); + + set_console_color(con_st, CONSOLE_COLOR_PROMPT); + + if (params.interactive) { + printf("== Running in interactive mode. ==\n" +#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32) + " - Press Ctrl+C to interject at any time.\n" +#endif + " - Press Return to return control to RedPajama.\n" + " - If you want to submit another line, end your input in '\\'.\n\n"); + } + + const int32_t top_k = params.top_k; + const float top_p = params.top_p; + const float temp = params.temp; + const float repeat_penalty = params.repeat_penalty; + + // Chat loop + while (true) { + is_interacting = true; + + int n_past = 0; + + // Get input + + // potentially set color to indicate we are taking user input + set_console_color(con_st, CONSOLE_COLOR_USER_INPUT); + +#if defined (_WIN32) + // Windows: must reactivate sigint handler after each signal + signal(SIGINT, sigint_handler); +#endif + + if (params.instruct) { + printf("\n: "); + } + + std::string buffer; + if (!params.input_prefix.empty()) { + buffer += params.input_prefix; + printf("%s", buffer.c_str()); + } + + std::string line; + bool another_line = true; + do { +#if defined(_WIN32) + std::wstring wline; + if (!std::getline(std::wcin, wline)) { + // input stream is bad or EOF received + return 0; + } + win32_utf8_encode(wline, line); +#else + if (!std::getline(std::cin, line)) { + // input stream is bad or EOF received + return 0; + } +#endif + if (line.empty() || line.back() != '\\') { + another_line = false; + } else { + line.pop_back(); // Remove the continue character + } + buffer += line; + if (another_line) { + buffer += '\n'; + } + } while (another_line); + + is_interacting = false; + + // done taking input, reset color + set_console_color(con_st, CONSOLE_COLOR_DEFAULT); + + // Check for input + if (buffer.length() <= 0) { + continue; // Restart loop for input + } + + // Tokenize prompt with RedPajama special tokens + + auto prompt_embd = ::gptneox_tokenize(ctx, buffer, false); + auto embd_inp = std::vector(); + + // Redpajama: insert special tokens for OA. (prefix) + embd_inp.push_back(gptneox_str_to_token(ctx, "<")); + embd_inp.push_back(gptneox_str_to_token(ctx, "human")); + embd_inp.push_back(gptneox_str_to_token(ctx, ">:")); + + embd_inp.insert(embd_inp.end(), prompt_embd.begin(), prompt_embd.end()); + + // Redpajama: insert special tokens for OA. (postfix) + embd_inp.push_back(gptneox_str_to_token(ctx, "\n")); + embd_inp.push_back(gptneox_str_to_token(ctx, "<")); + embd_inp.push_back(gptneox_str_to_token(ctx, "bot")); + embd_inp.push_back(gptneox_str_to_token(ctx, ">:")); + + + // Verbose prompt + if (params.verbose_prompt) { + fprintf(stderr, "\n"); + fprintf(stderr, "%s: prompt: '%s'\n", __func__, buffer.c_str()); + fprintf(stderr, "%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size()); + for (int i = 0; i < (int) embd_inp.size(); i++) { + fprintf(stderr, "%6d -> '%s'\n", embd_inp[i], gptneox_token_to_str(ctx, embd_inp[i])); + } + fprintf(stderr, "\n"); + } + + // How many tokens to generate - check if theres space in context for atleast one token (or batch size tokens?) + auto inp_size = embd_inp.size(); + auto space = params.n_ctx - inp_size; + if(space <= 0) { + fprintf(stderr, "%s : input too long\n", __func__); + continue; + } + // Send batches to eval + while (n_past < inp_size) { + auto remaining = inp_size - n_past; + int n_eval = params.n_batch < remaining ? params.n_batch : remaining; + if (gptneox_eval(ctx, &embd_inp[n_past], n_eval, n_past, params.n_threads)) { + fprintf(stderr, ": %s : failed to eval\n", __func__); + return 1; + } + n_past += n_eval; + } + + const int n_ctx = gptneox_n_ctx(ctx); + const int n_vocab = gptneox_n_vocab(ctx); + + const float temp = params.temp; + const int32_t top_k = params.top_k <= 0 ? gptneox_n_vocab(ctx) : params.top_k; + const float top_p = params.top_p; + const float tfs_z = params.tfs_z; + const float typical_p = params.typical_p; + const int32_t repeat_last_n = params.repeat_last_n < 0 ? n_ctx : params.repeat_last_n; + const float repeat_penalty = params.repeat_penalty; + const float alpha_presence = params.presence_penalty; + const float alpha_frequency = params.frequency_penalty; + const int mirostat = params.mirostat; + const float mirostat_tau = params.mirostat_tau; + const float mirostat_eta = params.mirostat_eta; + const bool penalize_nl = params.penalize_nl; + + // Eval until space runs out + auto out_count = 0; + + printf(":"); + while (space > 0) { + // Get token + gptneox_token id = 0; + + { + auto logits = gptneox_get_logits(ctx); + + // Apply params.logit_bias map + for (auto it = params.logit_bias.begin(); it != params.logit_bias.end(); it++) { + logits[it->first] += it->second; + } + + std::vector candidates; + candidates.reserve(n_vocab); + for (gptneox_token token_id = 0; token_id < n_vocab; token_id++) { + candidates.emplace_back(gptneox_token_data{token_id, logits[token_id], 0.0f}); + } + + gptneox_token_data_array candidates_p = { candidates.data(), candidates.size(), false }; + + // Apply penalties + gptneox_token nl_token = gptneox_str_to_token(ctx, "\n"); + float nl_logit = logits[nl_token]; + auto last_n_repeat = std::min(std::min((int)last_n_tokens.size(), repeat_last_n), n_ctx); + gptneox_sample_repetition_penalty(ctx, &candidates_p, + last_n_tokens.data() + last_n_tokens.size() - last_n_repeat, + last_n_repeat, repeat_penalty); + gptneox_sample_frequency_and_presence_penalties(ctx, &candidates_p, + last_n_tokens.data() + last_n_tokens.size() - last_n_repeat, + last_n_repeat, alpha_frequency, alpha_presence); + if (!penalize_nl) { + logits[nl_token] = nl_logit; + } + + if (temp <= 0) { + // Greedy sampling + id = gptneox_sample_token_greedy(ctx, &candidates_p); + } else { + if (mirostat == 1) { + static float mirostat_mu = 2.0f * mirostat_tau; + const int mirostat_m = 100; + gptneox_sample_temperature(ctx, &candidates_p, temp); + id = gptneox_sample_token_mirostat(ctx, &candidates_p, mirostat_tau, mirostat_eta, mirostat_m, &mirostat_mu); + } else if (mirostat == 2) { + static float mirostat_mu = 2.0f * mirostat_tau; + gptneox_sample_temperature(ctx, &candidates_p, temp); + id = gptneox_sample_token_mirostat_v2(ctx, &candidates_p, mirostat_tau, mirostat_eta, &mirostat_mu); + } else { + // Temperature sampling + gptneox_sample_top_k(ctx, &candidates_p, top_k, 1); + gptneox_sample_tail_free(ctx, &candidates_p, tfs_z, 1); + gptneox_sample_typical(ctx, &candidates_p, typical_p, 1); + gptneox_sample_top_p(ctx, &candidates_p, top_p, 1); + gptneox_sample_temperature(ctx, &candidates_p, temp); + id = gptneox_sample_token(ctx, &candidates_p); + } + } + } + + // Inc out count and dec space + out_count += 1; + space -= 1; + // Repeat tokens update + last_n_tokens.push_back(id); + if (last_n_tokens.size() > params.repeat_last_n) { + last_n_tokens.erase(last_n_tokens.begin()); + } + // Redpajama: check if the interactive is done. + //std::cout<<" last_n_tokens.size: "<< last_n_tokens[0] <<" "<< last_n_tokens[1] <<" "<< last_n_tokens[2] << std::endl; + if (last_n_tokens.size()==3 && last_n_tokens[0]==gptneox_str_to_token(ctx, "<") + && last_n_tokens[1]==gptneox_str_to_token(ctx, "human") && last_n_tokens[2]==gptneox_str_to_token(ctx, ">:")){ + space = 0; + continue; + } + + // Check for eos - end early - check eos before bos in case they are the same + if (id == gptneox_token_eos()) { + space = 0; + continue; + } + // Check for bos - skip callback if so + if (id == gptneox_token_bos()) { + continue; + } + // Convert token to string and display + // printf("%s(%d)", gptneox_token_to_str(ctx, id), id); + + + if (last_n_tokens[2]==gptneox_str_to_token(ctx, "<")){ + ; + } + else if (last_n_tokens[2]==gptneox_str_to_token(ctx, "human")){ + if (last_n_tokens[1]==gptneox_str_to_token(ctx, "<")){ + ; + } + else{ + printf("%s", gptneox_token_to_str(ctx, id)); + } + } + else if (last_n_tokens[1]==gptneox_str_to_token(ctx, "<")){ + printf("<"); + printf("%s", gptneox_token_to_str(ctx, id)); + } + else{ + printf("%s", gptneox_token_to_str(ctx, id)); + } + fflush(stdout); + // Check if we need to run another eval + if (space > 0) { + // Send generated token back into model for next generation + if (gptneox_eval(ctx, &id, 1, n_past, params.n_threads)) { + fprintf(stderr, "%s : failed to eval\n", __func__); + return 1; + } + // Increment past count + n_past += 1; + } + // Check for user interrupt + if (is_interacting) { space = 0; } + } + printf("\n"); + //printf("\n %d", space); + fflush(stdout); + } + +#if defined (_WIN32) + signal(SIGINT, SIG_DFL); +#endif + + gptneox_print_timings(ctx); + gptneox_free(ctx); + + set_console_color(con_st, CONSOLE_COLOR_DEFAULT); + + return 0; +} + + + + + + + diff --git a/third_party/radpajama/main-redpajama.cpp b/third_party/radpajama/main-redpajama.cpp new file mode 100644 index 000000000..a37740b96 --- /dev/null +++ b/third_party/radpajama/main-redpajama.cpp @@ -0,0 +1,622 @@ +// Defines sigaction on msys: +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif + +#include "common-gptneox.h" +#include "gptneox.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) +#include +#include +#elif defined (_WIN32) +#include +#endif + +static console_state con_st; +static gptneox_context ** g_ctx; + +static bool is_interacting = false; + +#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32) +void sigint_handler(int signo) { + set_console_color(con_st, CONSOLE_COLOR_DEFAULT); + printf("\n"); // this also force flush stdout. + if (signo == SIGINT) { + if (!is_interacting) { + is_interacting=true; + } else { + gptneox_print_timings(*g_ctx); + _exit(130); + } + } +} +#endif + +int main(int argc, char ** argv) { + gpt_params params; + params.model = "./examples/redpajama/models/pythia/ggml-RedPajama-INCITE-Instruct-3B-v1-f16.bin"; + + if (gpt_params_parse(argc, argv, params) == false) { + return 1; + } + + // save choice to use color for later + // (note for later: this is a slightly awkward choice) + con_st.use_color = params.use_color; + +#if defined (_WIN32) + win32_console_init(params.use_color); +#endif + + if (params.perplexity) { + printf("\n************\n"); + printf("%s: please use the 'perplexity' tool for perplexity calculations\n", __func__); + printf("************\n\n"); + + return 0; + } + + if (params.embedding) { + printf("\n************\n"); + printf("%s: please use the 'embedding' tool for embedding calculations\n", __func__); + printf("************\n\n"); + + return 0; + } + + if (params.n_ctx > 2048) { + fprintf(stderr, "%s: warning: model does not support context sizes greater than 2048 tokens (%d specified);" + "expect poor results\n", __func__, params.n_ctx); + } + + if (params.seed < 0) { + params.seed = time(NULL); + } + + fprintf(stderr, "%s: seed = %d\n", __func__, params.seed); + + std::mt19937 rng(params.seed); + if (params.random_prompt) { + params.prompt = gpt_random_prompt(rng); + } + +// params.prompt = R"(// this function checks if the number n is prime +//bool is_prime(int n) {)"; + + gptneox_context * ctx; + g_ctx = &ctx; + + // load the model + { + auto lparams = gptneox_context_default_params(); + + lparams.n_ctx = params.n_ctx; + lparams.n_parts = params.n_parts; + lparams.seed = params.seed; + lparams.f16_kv = params.memory_f16; + lparams.use_mmap = params.use_mmap; + lparams.use_mlock = params.use_mlock; + + ctx = gptneox_init_from_file(params.model.c_str(), lparams); + + if (ctx == NULL) { + fprintf(stderr, "%s: error: failed to load model '%s'\n", __func__, params.model.c_str()); + return 1; + } + } + + if (!params.lora_adapter.empty()) { + int err = gptneox_apply_lora_from_file(ctx, + params.lora_adapter.c_str(), + params.lora_base.empty() ? NULL : params.lora_base.c_str(), + params.n_threads); + if (err != 0) { + fprintf(stderr, "%s: error: failed to apply lora adapter\n", __func__); + return 1; + } + } + + // print system information + { + fprintf(stderr, "\n"); + fprintf(stderr, "system_info: n_threads = %d / %d | %s\n", + params.n_threads, std::thread::hardware_concurrency(), gptneox_print_system_info()); + } + + // determine the maximum memory usage needed to do inference for the given n_batch and n_predict parameters + // uncomment the "used_mem" line in llama.cpp to see the results + if (params.mem_test) { + { + const std::vector tmp(params.n_batch, 0); + gptneox_eval(ctx, tmp.data(), tmp.size(), 0, params.n_threads); + } + + { + const std::vector tmp = { 0, }; + gptneox_eval(ctx, tmp.data(), tmp.size(), params.n_predict - 1, params.n_threads); + } + + gptneox_print_timings(ctx); + gptneox_free(ctx); + + return 0; + } + + std::string path_session = params.path_session; + std::vector session_tokens; + + if (!path_session.empty()) { + fprintf(stderr, "%s: attempting to load saved session from %s..\n", __func__, path_session.c_str()); + + // REVIEW - fopen to check for existing session + FILE * fp = std::fopen(path_session.c_str(), "rb"); + if (fp != NULL) { + std::fclose(fp); + + session_tokens.resize(params.n_ctx); + size_t n_token_count_out = 0; + const size_t n_session_bytes = gptneox_load_session_file(ctx, path_session.c_str(), session_tokens.data(), session_tokens.capacity(), &n_token_count_out); + session_tokens.resize(n_token_count_out); + + if (n_session_bytes > 0) { + fprintf(stderr, "%s: loaded %zu bytes of session data!\n", __func__, n_session_bytes); + } else { + fprintf(stderr, "%s: could not load session file, will recreate\n", __func__); + } + } else { + fprintf(stderr, "%s: session file does not exist, will create\n", __func__); + } + } + + // tokenize the prompt + auto embd_inp = ::gptneox_tokenize(ctx, params.prompt, false); //true); + + const int n_ctx = gptneox_n_ctx(ctx); + + if ((int) embd_inp.size() > n_ctx - 4) { + fprintf(stderr, "%s: error: prompt is too long (%d tokens, max %d)\n", __func__, (int) embd_inp.size(), n_ctx - 4); + return 1; + } + + // debug message about similarity of saved session, if applicable + size_t n_matching_session_tokens = 0; + if (session_tokens.size()) { + for (gptneox_token id : session_tokens) { + if (n_matching_session_tokens >= embd_inp.size() || id != embd_inp[n_matching_session_tokens]) { + break; + } + n_matching_session_tokens++; + } + if (n_matching_session_tokens >= embd_inp.size()) { + fprintf(stderr, "%s: session file has exact match for prompt!\n", __func__); + } else if (n_matching_session_tokens < (embd_inp.size() / 2)) { + fprintf(stderr, "%s: warning: session file has low similarity to prompt (%zu / %zu tokens); will mostly be reevaluated\n", + __func__, n_matching_session_tokens, embd_inp.size()); + } else { + fprintf(stderr, "%s: session file matches %zu / %zu tokens of prompt\n", + __func__, n_matching_session_tokens, embd_inp.size()); + } + } + + // number of tokens to keep when resetting context + if (params.n_keep < 0 || params.n_keep > (int)embd_inp.size() || params.instruct) { + params.n_keep = (int)embd_inp.size(); + } + + // in instruct mode, we inject a prefix and a suffix to each input by the user + if (params.instruct) { + params.interactive_first = true; + params.antiprompt.push_back("<|prompter|>"); + } + + // enable interactive mode if reverse prompt or interactive start is specified + if (params.antiprompt.size() != 0 || params.interactive_first) { + params.interactive = true; + } + + // determine newline token + auto gptneox_token_newline = ::gptneox_tokenize(ctx, "\n", false); + + if (params.verbose_prompt) { + fprintf(stderr, "\n"); + fprintf(stderr, "%s: prompt: '%s'\n", __func__, params.prompt.c_str()); + fprintf(stderr, "%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size()); + for (int i = 0; i < (int) embd_inp.size(); i++) { + fprintf(stderr, "%6d -> '%s'\n", embd_inp[i], gptneox_token_to_str(ctx, embd_inp[i])); + } + if (params.n_keep > 0) { + fprintf(stderr, "%s: static prompt based on n_keep: '", __func__); + for (int i = 0; i < params.n_keep; i++) { + fprintf(stderr, "%s", gptneox_token_to_str(ctx, embd_inp[i])); + } + fprintf(stderr, "'\n"); + } + fprintf(stderr, "\n"); + } + + if (params.interactive) { +#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) + struct sigaction sigint_action; + sigint_action.sa_handler = sigint_handler; + sigemptyset (&sigint_action.sa_mask); + sigint_action.sa_flags = 0; + sigaction(SIGINT, &sigint_action, NULL); +#elif defined (_WIN32) + signal(SIGINT, sigint_handler); +#endif + + fprintf(stderr, "%s: interactive mode on.\n", __func__); + + if (params.antiprompt.size()) { + for (auto antiprompt : params.antiprompt) { + fprintf(stderr, "Reverse prompt: '%s'\n", antiprompt.c_str()); + } + } + + if (!params.input_prefix.empty()) { + fprintf(stderr, "Input prefix: '%s'\n", params.input_prefix.c_str()); + } + } + fprintf(stderr, "sampling: repeat_last_n = %d, repeat_penalty = %f, presence_penalty = %f, frequency_penalty = %f, top_k = %d, tfs_z = %f, top_p = %f, typical_p = %f, temp = %f, mirostat = %d, mirostat_lr = %f, mirostat_ent = %f\n", + params.repeat_last_n, params.repeat_penalty, params.presence_penalty, params.frequency_penalty, params.top_k, params.tfs_z, params.top_p, params.typical_p, params.temp, params.mirostat, params.mirostat_eta, params.mirostat_tau); + fprintf(stderr, "generate: n_ctx = %d, n_batch = %d, n_predict = %d, n_keep = %d\n", n_ctx, params.n_batch, params.n_predict, params.n_keep); + fprintf(stderr, "\n\n"); + + // TODO: replace with ring-buffer + std::vector last_n_tokens(n_ctx); + std::fill(last_n_tokens.begin(), last_n_tokens.end(), 0); + + if (params.interactive) { + fprintf(stderr, "== Running in interactive mode. ==\n" +#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32) + " - Press Ctrl+C to interject at any time.\n" +#endif + " - Press Return to return control to RedPajama.\n" + " - If you want to submit another line, end your input in '\\'.\n\n"); + is_interacting = params.interactive_first; + } + + bool is_antiprompt = false; + bool input_noecho = false; + + // HACK - because session saving incurs a non-negligible delay, for now skip re-saving session + // if we loaded a session with at least 75% similarity. It's currently just used to speed up the + // initial prompt so it doesn't need to be an exact match. + bool need_to_save_session = !path_session.empty() && n_matching_session_tokens < (embd_inp.size() * 3 / 4); + + + int n_past = 0; + int n_remain = params.n_predict; + int n_consumed = 0; + int n_session_consumed = 0; + + // the first thing we will do is to output the prompt, so set color accordingly + set_console_color(con_st, CONSOLE_COLOR_PROMPT); + + std::vector embd; + + while (n_remain != 0 || params.interactive) { + // predict + if (embd.size() > 0) { + // infinite text generation via context swapping + // if we run out of context: + // - take the n_keep first tokens from the original prompt (via n_past) + // - take half of the last (n_ctx - n_keep) tokens and recompute the logits in batches + if (n_past + (int) embd.size() > n_ctx) { + const int n_left = n_past - params.n_keep; + + n_past = params.n_keep; + + // insert n_left/2 tokens at the start of embd from last_n_tokens + embd.insert(embd.begin(), last_n_tokens.begin() + n_ctx - n_left/2 - embd.size(), last_n_tokens.end() - embd.size()); + + // REVIEW - stop saving session if we run out of context + path_session = ""; + + //printf("\n---\n"); + //printf("resetting: '"); + //for (int i = 0; i < (int) embd.size(); i++) { + // printf("%s", gptneox_token_to_str(ctx, embd[i])); + //} + //printf("'\n"); + //printf("\n---\n"); + } + + // try to reuse a matching prefix from the loaded session instead of re-eval (via n_past) + // REVIEW + if (n_session_consumed < (int) session_tokens.size()) { + size_t i = 0; + for ( ; i < embd.size(); i++) { + if (embd[i] != session_tokens[n_session_consumed]) { + session_tokens.resize(n_session_consumed); + break; + } + + n_past++; + n_session_consumed++; + + if (n_session_consumed >= (int) session_tokens.size()) { + break; + } + } + if (i > 0) { + embd.erase(embd.begin(), embd.begin() + i); + } + } + + // evaluate tokens in batches + // embd is typically prepared beforehand to fit within a batch, but not always + for (int i = 0; i < (int) embd.size(); i += params.n_batch) { + int n_eval = (int) embd.size() - i; + if (n_eval > params.n_batch) { + n_eval = params.n_batch; + } + if (gptneox_eval(ctx, &embd[i], n_eval, n_past, params.n_threads)) { + fprintf(stderr, "%s : failed to eval\n", __func__); + return 1; + } + n_past += n_eval; + } + + if (embd.size() > 0 && !path_session.empty()) { + session_tokens.insert(session_tokens.end(), embd.begin(), embd.end()); + n_session_consumed = session_tokens.size(); + } + } + + embd.clear(); + + if ((int) embd_inp.size() <= n_consumed && !is_interacting) { + // out of user input, sample next token + const float temp = params.temp; + const int32_t top_k = params.top_k <= 0 ? gptneox_n_vocab(ctx) : params.top_k; + const float top_p = params.top_p; + const float tfs_z = params.tfs_z; + const float typical_p = params.typical_p; + const int32_t repeat_last_n = params.repeat_last_n < 0 ? n_ctx : params.repeat_last_n; + const float repeat_penalty = params.repeat_penalty; + const float alpha_presence = params.presence_penalty; + const float alpha_frequency = params.frequency_penalty; + const int mirostat = params.mirostat; + const float mirostat_tau = params.mirostat_tau; + const float mirostat_eta = params.mirostat_eta; + const bool penalize_nl = params.penalize_nl; + + // optionally save the session on first sample (for faster prompt loading next time) + if (!path_session.empty() && need_to_save_session) { + need_to_save_session = false; + gptneox_save_session_file(ctx, path_session.c_str(), session_tokens.data(), session_tokens.size()); + } + + gptneox_token id = 0; + + { + auto logits = gptneox_get_logits(ctx); + auto n_vocab = gptneox_n_vocab(ctx); + + // Apply params.logit_bias map + for (auto it = params.logit_bias.begin(); it != params.logit_bias.end(); it++) { + logits[it->first] += it->second; + } + + std::vector candidates; + candidates.reserve(n_vocab); + for (gptneox_token token_id = 0; token_id < n_vocab; token_id++) { + candidates.emplace_back(gptneox_token_data{token_id, logits[token_id], 0.0f}); + } + + gptneox_token_data_array candidates_p = { candidates.data(), candidates.size(), false }; + + // Apply penalties + gptneox_token nl_token = gptneox_str_to_token(ctx, "\n"); + float nl_logit = logits[nl_token]; + auto last_n_repeat = std::min(std::min((int)last_n_tokens.size(), repeat_last_n), n_ctx); + gptneox_sample_repetition_penalty(ctx, &candidates_p, + last_n_tokens.data() + last_n_tokens.size() - last_n_repeat, + last_n_repeat, repeat_penalty); + gptneox_sample_frequency_and_presence_penalties(ctx, &candidates_p, + last_n_tokens.data() + last_n_tokens.size() - last_n_repeat, + last_n_repeat, alpha_frequency, alpha_presence); + if (!penalize_nl) { + logits[nl_token] = nl_logit; + } + + if (temp <= 0) { + // Greedy sampling + id = gptneox_sample_token_greedy(ctx, &candidates_p); + } else { + if (mirostat == 1) { + static float mirostat_mu = 2.0f * mirostat_tau; + const int mirostat_m = 100; + gptneox_sample_temperature(ctx, &candidates_p, temp); + id = gptneox_sample_token_mirostat(ctx, &candidates_p, mirostat_tau, mirostat_eta, mirostat_m, &mirostat_mu); + } else if (mirostat == 2) { + static float mirostat_mu = 2.0f * mirostat_tau; + gptneox_sample_temperature(ctx, &candidates_p, temp); + id = gptneox_sample_token_mirostat_v2(ctx, &candidates_p, mirostat_tau, mirostat_eta, &mirostat_mu); + } else { + // Temperature sampling + gptneox_sample_top_k(ctx, &candidates_p, top_k, 1); + gptneox_sample_tail_free(ctx, &candidates_p, tfs_z, 1); + gptneox_sample_typical(ctx, &candidates_p, typical_p, 1); + gptneox_sample_top_p(ctx, &candidates_p, top_p, 1); + gptneox_sample_temperature(ctx, &candidates_p, temp); + id = gptneox_sample_token(ctx, &candidates_p); + } + } + // printf("`%d`", candidates_p.size); + + last_n_tokens.erase(last_n_tokens.begin()); + last_n_tokens.push_back(id); + } + + // replace end of text token with newline token when in interactive mode + if (id == gptneox_token_eos() && params.interactive && !params.instruct) { + id = gptneox_token_newline.front(); + if (params.antiprompt.size() != 0) { + // tokenize and inject first reverse prompt + const auto first_antiprompt = ::gptneox_tokenize(ctx, params.antiprompt.front(), false); + embd_inp.insert(embd_inp.end(), first_antiprompt.begin(), first_antiprompt.end()); + } + } + + // add it to the context + embd.push_back(id); + + // echo this to console + input_noecho = false; + + // decrement remaining sampling budget + --n_remain; + } else { + // some user input remains from prompt or interaction, forward it to processing + while ((int) embd_inp.size() > n_consumed) { + embd.push_back(embd_inp[n_consumed]); + last_n_tokens.erase(last_n_tokens.begin()); + last_n_tokens.push_back(embd_inp[n_consumed]); + ++n_consumed; + if ((int) embd.size() >= params.n_batch) { + break; + } + } + } + + // display text + if (!input_noecho) { + for (auto id : embd) { + printf("%s", gptneox_token_to_str(ctx, id)); + } + fflush(stdout); + } + // reset color to default if we there is no pending user input + if (!input_noecho && (int)embd_inp.size() == n_consumed) { + set_console_color(con_st, CONSOLE_COLOR_DEFAULT); + } + + // in interactive mode, and not currently processing queued inputs; + // check if we should prompt the user for more + if (params.interactive && (int) embd_inp.size() <= n_consumed) { + + // check for reverse prompt + if (params.antiprompt.size()) { + std::string last_output; + for (auto id : last_n_tokens) { + last_output += gptneox_token_to_str(ctx, id); + } + + is_antiprompt = false; + // Check if each of the reverse prompts appears at the end of the output. + for (std::string & antiprompt : params.antiprompt) { + if (last_output.find(antiprompt.c_str(), last_output.length() - antiprompt.length(), antiprompt.length()) != std::string::npos) { + is_interacting = true; + is_antiprompt = true; + set_console_color(con_st, CONSOLE_COLOR_USER_INPUT); + fflush(stdout); + break; + } + } + } + + if (n_past > 0 && is_interacting) { + // potentially set color to indicate we are taking user input + set_console_color(con_st, CONSOLE_COLOR_USER_INPUT); + +#if defined (_WIN32) + // Windows: must reactivate sigint handler after each signal + signal(SIGINT, sigint_handler); +#endif + + if (params.instruct) { + printf("\n> "); + } + + std::string buffer; + if (!params.input_prefix.empty()) { + buffer += params.input_prefix; + printf("%s", buffer.c_str()); + } + + std::string line; + bool another_line = true; + do { +#if defined(_WIN32) + std::wstring wline; + if (!std::getline(std::wcin, wline)) { + // input stream is bad or EOF received + return 0; + } + win32_utf8_encode(wline, line); +#else + if (!std::getline(std::cin, line)) { + // input stream is bad or EOF received + return 0; + } +#endif + if (line.empty() || line.back() != '\\') { + another_line = false; + } else { + line.pop_back(); // Remove the continue character + } + buffer += line + '\n'; // Append the line to the result + } while (another_line); + + // done taking input, reset color + set_console_color(con_st, CONSOLE_COLOR_DEFAULT); + + // Add tokens to embd only if the input buffer is non-empty + // Entering a empty line lets the user pass control back + if (buffer.length() > 1) { + + auto line_inp = ::gptneox_tokenize(ctx, buffer, false); + embd_inp.insert(embd_inp.end(), line_inp.begin(), line_inp.end()); + n_remain -= line_inp.size(); + } + + input_noecho = true; // do not echo this again + } + + if (n_past > 0) { + is_interacting = false; + } + } + + // end of text token + if (!embd.empty() && embd.back() == gptneox_token_eos()) { + if (params.instruct) { + is_interacting = true; + } else { + fprintf(stderr, " [end of text]\n"); + break; + } + } + + // In interactive mode, respect the maximum number of tokens and drop back to user input when reached. + if (params.interactive && n_remain <= 0 && params.n_predict != -1) { + n_remain = params.n_predict; + is_interacting = true; + } + } + +#if defined (_WIN32) + signal(SIGINT, SIG_DFL); +#endif + printf("\n\n"); + gptneox_print_timings(ctx); + gptneox_free(ctx); + + set_console_color(con_st, CONSOLE_COLOR_DEFAULT); + + return 0; +} \ No newline at end of file diff --git a/third_party/radpajama/quantize-gptneox.cpp b/third_party/radpajama/quantize-gptneox.cpp new file mode 100644 index 000000000..1f2f69555 --- /dev/null +++ b/third_party/radpajama/quantize-gptneox.cpp @@ -0,0 +1,82 @@ +#include "ggml.h" +#include "gptneox.h" + +#include +#include +#include + +static const std::map GPTNEOX_FTYPE_MAP = { + {"q4_0", GPTNEOX_FTYPE_MOSTLY_Q4_0}, + {"q4_1", GPTNEOX_FTYPE_MOSTLY_Q4_1}, + {"q4_2", GPTNEOX_FTYPE_MOSTLY_Q4_2}, + //{"q4_3", GPTNEOX_FTYPE_MOSTLY_Q4_3}, + {"q5_0", GPTNEOX_FTYPE_MOSTLY_Q5_0}, + {"q5_1", GPTNEOX_FTYPE_MOSTLY_Q5_1}, + {"q8_0", GPTNEOX_FTYPE_MOSTLY_Q8_0}, +}; + +// usage: +// ./quantize models/llama/ggml-model.bin models/llama/ggml-model-quant.bin type +// +int main(int argc, char ** argv) { + ggml_time_init(); + + if (argc < 4) { + fprintf(stderr, "usage: %s model-f32.bin model-quant.bin type [nthread]\n", argv[0]); + for (auto it = GPTNEOX_FTYPE_MAP.begin(); it != GPTNEOX_FTYPE_MAP.end(); it++) { + fprintf(stderr, " type = \"%s\" or %d\n", it->first.c_str(), it->second); + } + return 1; + } + + // needed to initialize f16 tables + { + struct ggml_init_params params = { 0, NULL, false }; + struct ggml_context * ctx = ggml_init(params); + ggml_free(ctx); + } + + const std::string fname_inp = argv[1]; + const std::string fname_out = argv[2]; + + enum gptneox_ftype ftype; + if (argv[3][0] == 'q') { + auto it = GPTNEOX_FTYPE_MAP.find(argv[3]); + if (it == GPTNEOX_FTYPE_MAP.end()) { + fprintf(stderr, "%s: unknown ftype '%s'\n", __func__, argv[3]); + return 1; + } + ftype = it->second; + } else { + ftype = (enum gptneox_ftype)atoi(argv[3]); + } + + int nthread = argc > 4 ? atoi(argv[4]) : 0; + + const int64_t t_main_start_us = ggml_time_us(); + + int64_t t_quantize_us = 0; + + // load the model + { + const int64_t t_start_us = ggml_time_us(); + + if (gptneox_model_quantize(fname_inp.c_str(), fname_out.c_str(), ftype, nthread)) { + fprintf(stderr, "%s: failed to quantize model from '%s'\n", __func__, fname_inp.c_str()); + return 1; + } + + t_quantize_us = ggml_time_us() - t_start_us; + } + + // report timing + { + const int64_t t_main_end_us = ggml_time_us(); + + printf("\n"); + printf("%s: quantize time = %8.2f ms\n", __func__, t_quantize_us/1000.0); + printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0); + } + + return 0; +} \ No newline at end of file diff --git a/third_party/radpajama/scripts/convert_gptneox_to_ggml.py b/third_party/radpajama/scripts/convert_gptneox_to_ggml.py new file mode 100644 index 000000000..6a3294261 --- /dev/null +++ b/third_party/radpajama/scripts/convert_gptneox_to_ggml.py @@ -0,0 +1,144 @@ +# Convert Hugging Face fine-tuned gpt-neox-like models to ggml format + +import io +import os +import sys +import struct +import json +import code +import torch +import numpy as np + +from transformers import AutoModelForCausalLM, AutoTokenizer + +# ref: https://github.com/openai/gpt-2/blob/master/src/encoder.py +def bytes_to_unicode(): + """ + Returns list of utf-8 byte and a corresponding list of unicode strings. + The reversible bpe codes work on unicode strings. + This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. + When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. + This is a significant percentage of your normal, say, 32K bpe vocab. + To avoid that, we want lookup tables between utf-8 bytes and unicode strings. + And avoids mapping to whitespace/control characters the bpe code barfs on. + """ + bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1)) + cs = bs[:] + n = 0 + for b in range(2**8): + if b not in bs: + bs.append(b) + cs.append(2**8+n) + n += 1 + cs = [chr(n) for n in cs] + return dict(zip(bs, cs)) + +if len(sys.argv) < 3: + print("Usage: python convert-hf-to-ggml.py model_name dir-output [use-f32]") + print(" model_name: name of the model to convert. Example: 'bigscience/bloomz-560m'") + print(" dir-output: directory where the output file will be written") + print(" use-f32: if present, use float32 instead of float16") + sys.exit(1) + +model_name = sys.argv[1] +dir_out = sys.argv[2] +model_cache_dir = dir_out + "-cache" + +# make sure the output directory exists +os.makedirs(dir_out, exist_ok=True) + +# possible data types +# ftype == 0 -> float32 +# ftype == 1 -> float16 +# +# map from ftype to string +ftype_str = ["f32", "f16"] +ftype = 1 +if len(sys.argv) > 3: + ftype = 0 + +tokenizer = AutoTokenizer.from_pretrained(model_name) +print("Loading model: ", model_name) +model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16 if ftype == 1 else torch.float32, + cache_dir=model_cache_dir) +model.eval() +for p in model.parameters(): + p.requires_grad = False +hparams = model.config.to_dict() +print("Model loaded: ", model_name) + +fn_bin = f"/ggml-{model_name.split('/')[-1]}-{ftype_str[ftype]}.bin" +fn_out = dir_out + fn_bin +fout = open(fn_out, "wb") + +ggml_file_magic = 0x67676d66 # 0x67676d6c is unversioned +ggml_file_version = 0x00000001 # v1 + +hparams["multiple_of"] = 1 +fout.write(struct.pack("i", ggml_file_magic)) # magic: ggmf in hex +fout.write(struct.pack("i", ggml_file_version)) +fout.write(struct.pack("i", hparams["vocab_size"])) +fout.write(struct.pack("i", hparams["max_position_embeddings"])) +fout.write(struct.pack("i", hparams["hidden_size"])) +fout.write(struct.pack("i", hparams["num_attention_heads"])) +fout.write(struct.pack("i", hparams["num_hidden_layers"])) +fout.write(struct.pack("i", int((hparams["hidden_size"] / hparams["num_attention_heads"] + ) * hparams["rotary_pct"]))) # rotary_dim +fout.write(struct.pack("i", int(hparams["use_parallel_residual"]))) +fout.write(struct.pack("i", ftype)) + +# Is this correct?? +dot_token = tokenizer.encode(".")[0] +for i in range(hparams["vocab_size"]): + text = tokenizer.decode([i]).encode('utf-8') + fout.write(struct.pack("i", len(text))) + fout.write(text) + +list_vars = model.state_dict() + +print(hparams) + +for name in list_vars.keys(): + if name.startswith('gpt_neox.layers.'): + if 'attention.masked_bias' in name or \ + 'attention.rotary_emb.inv_freq' in name or \ + 'attention.bias' in name: + continue + # No gradients for these + list_vars[name].requires_grad = False + src = name + nn = name + + print(src, ' -> ', name) + data = list_vars[src].squeeze().numpy() + data = data.astype(np.float32) + + n_dims = len(data.shape) + print(name, n_dims, data.shape) + + # default type is fp32 + ftype_cur = 0 + if ftype == 1 and n_dims > 1: + print(" Converting to float16", data.shape, data[:3, :3].tolist()) + data = data.astype(np.float16) + ftype_cur = 1 + else: + print(" Converting to float32", data.shape, + data[:3, :3].tolist() if n_dims > 1 else data[:3].tolist()) + data = data.astype(np.float32) + + # header + str = name.encode('utf-8') + fout.write(struct.pack("iii", n_dims, len(str), ftype_cur)) + for i in range(n_dims): + fout.write(struct.pack("i", data.shape[n_dims - 1 - i])) + print(str) + fout.write(str) + + # data + data.tofile(fout) + +fout.close() + +print("Done. Output file: " + fn_out) +print("") diff --git a/third_party/radpajama/scripts/install-RedPajama-INCITE-Base-3B-v1.sh b/third_party/radpajama/scripts/install-RedPajama-INCITE-Base-3B-v1.sh new file mode 100644 index 000000000..95be87af1 --- /dev/null +++ b/third_party/radpajama/scripts/install-RedPajama-INCITE-Base-3B-v1.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +# cd to scripts dir +cd `dirname $0` + +# download model to models dir +echo "Downloading model" +python ./convert_gptneox_to_ggml.py togethercomputer/RedPajama-INCITE-Base-3B-v1 ../models/pythia + +# remove temp cache dir +echo "Removing temp cache dir" +rm -r ../models/pythia-cache + +# quantize model +echo "Quantizing model (q4_0)" +cd ../../.. +python ./examples/redpajama/scripts/quantize-gptneox.py ./examples/redpajama/models/pythia/ggml-RedPajama-INCITE-Base-3B-v1-f16.bin + + +# done! +echo "Done." \ No newline at end of file diff --git a/third_party/radpajama/scripts/install-RedPajama-INCITE-Chat-3B-v1.sh b/third_party/radpajama/scripts/install-RedPajama-INCITE-Chat-3B-v1.sh new file mode 100644 index 000000000..efa8de356 --- /dev/null +++ b/third_party/radpajama/scripts/install-RedPajama-INCITE-Chat-3B-v1.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +# cd to scripts dir +cd `dirname $0` + +# download model to models dir +echo "Downloading model" +python ./convert_gptneox_to_ggml.py togethercomputer/RedPajama-INCITE-Chat-3B-v1 ../models/pythia + +# remove temp cache dir +echo "Removing temp cache dir" +rm -r ../models/pythia-cache + +# quantize model +echo "Quantizing model (q4_0)" +cd ../../.. +python ./examples/redpajama/scripts/quantize-gptneox.py ./examples/redpajama/models/pythia/ggml-RedPajama-INCITE-Chat-3B-v1-f16.bin + + +# done! +echo "Done." \ No newline at end of file diff --git a/third_party/radpajama/scripts/install-RedPajama-INCITE-Instruct-3B-v1.sh b/third_party/radpajama/scripts/install-RedPajama-INCITE-Instruct-3B-v1.sh new file mode 100644 index 000000000..3d6d70b9c --- /dev/null +++ b/third_party/radpajama/scripts/install-RedPajama-INCITE-Instruct-3B-v1.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +# cd to scripts dir +cd `dirname $0` + +# download model to models dir +echo "Downloading model" +python ./convert_gptneox_to_ggml.py togethercomputer/RedPajama-INCITE-Instruct-3B-v1 ../models/pythia + +# remove temp cache dir +echo "Removing temp cache dir" +rm -r ../models/pythia-cache + +# quantize model +echo "Quantizing model (q4_0)" +cd ../../.. +python ./examples/redpajama/scripts/quantize-gptneox.py ./examples/redpajama/models/pythia/ggml-RedPajama-INCITE-Instruct-3B-v1-f16.bin + + +# done! +echo "Done." \ No newline at end of file diff --git a/third_party/radpajama/scripts/quantize-gptneox.py b/third_party/radpajama/scripts/quantize-gptneox.py new file mode 100644 index 000000000..629c1b411 --- /dev/null +++ b/third_party/radpajama/scripts/quantize-gptneox.py @@ -0,0 +1,141 @@ +#!/usr/bin/env python3 + +"""Script to execute the "quantize" script on a given set of models.""" + +import subprocess +import argparse +import glob +import sys +import os + + +def main(): + """Update the quantize binary name depending on the platform and parse + the command line arguments and execute the script. + """ + + if "linux" in sys.platform or "darwin" in sys.platform: + quantize_script_binary = "quantize-gptneox" + + elif "win32" in sys.platform or "cygwin" in sys.platform: + quantize_script_binary = "quantize-gptneox.exe" + + else: + print("WARNING: Unknown platform. Assuming a UNIX-like OS.\n") + quantize_script_binary = "quantize-gptneox" + + parser = argparse.ArgumentParser( + prog='python3 quantize-gptneox.py', + description='This script quantizes the given models by applying the ' + f'"{quantize_script_binary}" script on them.' + ) + parser.add_argument('model_path') + #parser.add_argument( + # 'models', nargs='+', choices=('7B', '13B', '30B', '65B'), + # help='The models to quantize.' + #) + parser.add_argument( + '-r', '--remove-16', action='store_true', dest='remove_f16', + help='Remove the f16 model after quantizing it.' + ) + #parser.add_argument( + # '-m', '--models-path', dest='models_path', + # default=os.path.join(os.getcwd(), "models"), + # help='Specify the directory where the models are located.' + #) + parser.add_argument( + '-q', '--quantize-script-path', dest='quantize_script_path', + default=os.path.join(os.getcwd(), quantize_script_binary), + help='Specify the path to the "quantize" script.' + ) + + parser.add_argument( + '--quantize-output-type', dest='quantize_output_type', type=str, + default='q4_0', + help='Specify the path to the "quantize" script.' + ) + + + # TODO: Revise this code + # parser.add_argument( + # '-t', '--threads', dest='threads', type='int', + # default=os.cpu_count(), + # help='Specify the number of threads to use to quantize many models at ' + # 'once. Defaults to os.cpu_count().' + # ) + + args = parser.parse_args() + args.model_path = os.path.abspath(args.model_path) + #args.models_path = os.path.abspath(args.models_path) + + if not os.path.isfile(args.quantize_script_path): + print( + f'The "{quantize_script_binary}" script was not found in the ' + "current location.\nIf you want to use it from another location, " + "set the --quantize-script-path argument from the command line." + ) + sys.exit(1) + + #for model in args.models: + # The model is separated in various parts + # (ggml-model-f16.bin, ggml-model-f16.bin.0, ggml-model-f16.bin.1...) + #f16_model_path_base = os.path.join( + # args.models_path, model, "ggml-model-f16.bin" + #) + f16_model_path_base = args.model_path + + if not os.path.isfile(f16_model_path_base): + print(f'The file %s was not found' % f16_model_path_base) + sys.exit(1) + + f16_model_parts_paths = map( + lambda filename: os.path.join(f16_model_path_base, filename), + glob.glob(f"{f16_model_path_base}*") + ) + + for f16_model_part_path in f16_model_parts_paths: + if not os.path.isfile(f16_model_part_path): + print( + f"The f16 model {os.path.basename(f16_model_part_path)} " + f"was not found in {args.models_path}{os.path.sep}" + ". If you want to use it from another location, set the " + "--models-path argument from the command line." + ) + sys.exit(1) + + __run_quantize_script( + args.quantize_script_path, f16_model_part_path, args.quantize_output_type + ) + + if args.remove_f16: + os.remove(f16_model_part_path) + + +# This was extracted to a top-level function for parallelization, if +# implemented. See https://github.com/ggerganov/llama.cpp/pull/222/commits/f8db3d6cd91bf1a1342db9d29e3092bc12dd783c#r1140496406 + +def __run_quantize_script(script_path, f16_model_part_path, quantize_output_type): + """Run the quantize script specifying the path to it and the path to the + f16 model to quantize. + """ + + new_quantized_model_path = f16_model_part_path.replace("f16", quantize_output_type) + subprocess.run( + [script_path, f16_model_part_path, new_quantized_model_path, quantize_output_type], + check=True + ) + + +if __name__ == "__main__": + try: + main() + + except subprocess.CalledProcessError: + print("\nAn error ocurred while trying to quantize the models.") + sys.exit(1) + + except KeyboardInterrupt: + sys.exit(0) + + else: + print("\nSuccesfully quantized all models.") \ No newline at end of file