mirror of
https://github.com/jart/cosmopolitan.git
synced 2025-02-07 06:53:33 +00:00
Import radpajama (a redpajama.cpp fork) (#814)
This is the relevant commit: bfa6466199
Model download links:
https://huggingface.co/ceonlabs/radpajama/tree/main
This commit is contained in:
parent
cba15bc77a
commit
b3e3359d22
30 changed files with 26497 additions and 0 deletions
1
Makefile
1
Makefile
|
@ -147,6 +147,7 @@ include net/net.mk # │
|
|||
include third_party/vqsort/vqsort.mk # │
|
||||
include libc/log/log.mk # │
|
||||
include third_party/ggml/ggml.mk # │
|
||||
#include third_party/radpajama/radpajama.mk # │
|
||||
include third_party/bzip2/bzip2.mk # │
|
||||
include dsp/core/core.mk # │
|
||||
include libc/x/x.mk # │
|
||||
|
|
21
third_party/radpajama/LICENSE
vendored
Normal file
21
third_party/radpajama/LICENSE
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) 2023 Georgi Gerganov
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
19
third_party/radpajama/README.cosmo
vendored
Normal file
19
third_party/radpajama/README.cosmo
vendored
Normal file
|
@ -0,0 +1,19 @@
|
|||
DESCRIPTION
|
||||
|
||||
radpajama is a port of ggml for the open source Red Pajama LLM. It started as a fork of redpajama.cpp from Together Computer.
|
||||
|
||||
LICENSE
|
||||
|
||||
MIT
|
||||
|
||||
ORIGIN
|
||||
|
||||
github.com/togethercomputer/redpajama.cpp/
|
||||
commit bfa6466199b8ef92185ecb72e2a550e12baf6602
|
||||
Author: Szhangce <czhang@cs.stanford.edu>
|
||||
Date: Tue May 9 00:50:22 2023 +0200
|
||||
radpajama : Update README.md
|
||||
|
||||
LOCAL CHANGES
|
||||
|
||||
- Updated headers for COSMO build.
|
143
third_party/radpajama/README.md
vendored
Normal file
143
third_party/radpajama/README.md
vendored
Normal file
|
@ -0,0 +1,143 @@
|
|||
# gglm Support for RedPajama Model
|
||||
|
||||
## Ackonwledgement
|
||||
|
||||
We highly appreciate the great effort from the fork of [gptneox.cpp](https://github.com/byroneverson/gptneox.cpp). Our support of the RedPajama Model is mainly based on this implementation. We extend the model configure and fixed a bug when setting use_parallel_residual flag to False in their original implementation. We also extend the chat model for RedPajama.
|
||||
|
||||
## Usage:
|
||||
|
||||
### RedPajama Chat model:
|
||||
|
||||
- Make the code:
|
||||
|
||||
make redpajama-chat quantize-gptneox
|
||||
|
||||
|
||||
- Prepare the RedPajama model (f16 and q4_0) for gglm:
|
||||
|
||||
bash ./examples/redpajama/scripts/install-RedPajama-INCITE-Chat-3B-v1.sh
|
||||
|
||||
- Run RedPajama chat model (fp16):
|
||||
|
||||
./redpajama-chat -m ./examples/redpajama/models/pythia/ggml-RedPajama-INCITE-Chat-3B-v1-f16.bin \
|
||||
-c 2048 \
|
||||
-b 128 \
|
||||
-n 1 \
|
||||
-t 8 \
|
||||
--instruct \
|
||||
--color \
|
||||
--top_k 30 \
|
||||
--top_p 0.95 \
|
||||
--temp 0.8 \
|
||||
--repeat_last_n 3 \
|
||||
--repeat_penalty 1.1 \
|
||||
--seed 0
|
||||
|
||||
Note that you may need to install torch and transformers to run the above scripts, e.g.:
|
||||
|
||||
pip install torch==2.0.0
|
||||
pip install transformers==4.28.1
|
||||
|
||||
|
||||
- Run RedPajama chat model (q4_0):
|
||||
|
||||
./redpajama-chat -m ./examples/redpajama/models/pythia/ggml-RedPajama-INCITE-Chat-3B-v1-q4_0.bin \
|
||||
-c 2048 \
|
||||
-b 128 \
|
||||
-n 1 \
|
||||
-t 8 \
|
||||
--instruct \
|
||||
--color \
|
||||
--top_k 30 \
|
||||
--top_p 0.95 \
|
||||
--temp 0.8 \
|
||||
--repeat_last_n 3 \
|
||||
--repeat_penalty 1.1 \
|
||||
--seed 0
|
||||
|
||||
- Run other quantized version of RedPajama Chat model (Make sure you get the f16 model prepared before you run this):
|
||||
|
||||
- Make the code to quantize the model if you have not:
|
||||
|
||||
make quantize-gptneox
|
||||
|
||||
- Generate the quantized model, the supported types include: q4_0, q4_1, q4_2, q5_0, q5_1, and q8_0. For example, to run q4_1, you need to do the following convertion:
|
||||
|
||||
python ./examples/redpajama/scripts/quantize-gptneox.py ./examples/redpajama/models/pythia/ggml-RedPajama-INCITE-Chat-3B-v1-f16.bin --quantize-output-type q4_1
|
||||
|
||||
- Then you can chat with the quantized model:
|
||||
|
||||
./redpajama-chat -m ./examples/redpajama/models/pythia/ggml-RedPajama-INCITE-Chat-3B-v1-q4_1.bin \
|
||||
-c 2048 \
|
||||
-b 128 \
|
||||
-n 1 \
|
||||
-t 8 \
|
||||
--instruct \
|
||||
--color \
|
||||
--top_k 30 \
|
||||
--top_p 0.95 \
|
||||
--temp 0.8 \
|
||||
--repeat_last_n 3 \
|
||||
--repeat_penalty 1.1 \
|
||||
--seed 0
|
||||
|
||||
|
||||
|
||||
|
||||
### RedPajama Base/Instruct model:
|
||||
|
||||
- Make the code:
|
||||
|
||||
make redpajama quantize-gptneox
|
||||
|
||||
|
||||
- Prepare the RedPajama Base/Instruct model (f16 and q4_0) for gglm:
|
||||
|
||||
bash ./examples/redpajama/scripts/install-RedPajama-INCITE-Base-3B-v1.sh
|
||||
|
||||
# Or
|
||||
|
||||
bash ./examples/redpajama/scripts/install-RedPajama-INCITE-Instruct-3B-v1.sh
|
||||
|
||||
- Run other quantize version of RedPajama Base/Instruct model (Make sure you get the f16 model prepared before you run this). Then you can generate the quantized model, the supported types include: q4_0, q4_1, q4_2, q5_0, q5_1, and q8_0. For example, to run q4_1, you need to do the following convertion, e.g for RedPajama-Base q8_0:
|
||||
|
||||
python ./examples/redpajama/scripts/quantize-gptneox.py ./examples/redpajama/models/pythia/ggml-RedPajama-INCITE-Base-3B-v1-f16.bin --quantize-output-type q8_0
|
||||
|
||||
- Run RedPajama Base/Instruct model (e.g., RedPajama-Instruct q8_0) :
|
||||
|
||||
./redpajama -m ./examples/redpajama/models/pythia/ggml-RedPajama-INCITE-Instruct-3B-v1-q8_0.bin \
|
||||
-c 2048 \
|
||||
-b 128 \
|
||||
-n 1 \
|
||||
-t 8 \
|
||||
--color \
|
||||
--top_k 30 \
|
||||
--top_p 0.95 \
|
||||
--temp 0.8 \
|
||||
--repeat_last_n 3 \
|
||||
--repeat_penalty 1.1 \
|
||||
--seed 0 \
|
||||
--n_predict 256 \
|
||||
--verbose-prompt \
|
||||
-p "How to schedule a tour to Anfield:"
|
||||
|
||||
|
||||
## Attribution
|
||||
|
||||
The following files are covered by a MIT license and were taken from:
|
||||
|
||||
https://github.com/byroneverson/gptneox.cpp
|
||||
|
||||
Thank you Byron.
|
||||
|
||||
```
|
||||
common-gptneox.cpp
|
||||
copy-gptneox.cpp
|
||||
gptneox.cpp
|
||||
quantize-gptneox.cpp
|
||||
common-gptneox.h
|
||||
gptneox-util.h
|
||||
gptneox.h
|
||||
convert_gptneox_to_ggml.py
|
||||
quantize-gptneox.py
|
||||
```
|
429
third_party/radpajama/common-gptneox.cpp
vendored
Normal file
429
third_party/radpajama/common-gptneox.cpp
vendored
Normal file
|
@ -0,0 +1,429 @@
|
|||
#include "common-gptneox.h"
|
||||
|
||||
#include <cassert>
|
||||
#include <cstring>
|
||||
#include <fstream>
|
||||
#include <string>
|
||||
#include <iterator>
|
||||
#include <algorithm>
|
||||
#include <sstream>
|
||||
#include <iostream>
|
||||
|
||||
#if defined (_WIN32)
|
||||
#include <fcntl.h>
|
||||
#include <io.h>
|
||||
#pragma comment(lib,"kernel32.lib")
|
||||
extern "C" __declspec(dllimport) void* __stdcall GetStdHandle(unsigned long nStdHandle);
|
||||
extern "C" __declspec(dllimport) int __stdcall GetConsoleMode(void* hConsoleHandle, unsigned long* lpMode);
|
||||
extern "C" __declspec(dllimport) int __stdcall SetConsoleMode(void* hConsoleHandle, unsigned long dwMode);
|
||||
extern "C" __declspec(dllimport) int __stdcall SetConsoleCP(unsigned int wCodePageID);
|
||||
extern "C" __declspec(dllimport) int __stdcall SetConsoleOutputCP(unsigned int wCodePageID);
|
||||
extern "C" __declspec(dllimport) int __stdcall WideCharToMultiByte(unsigned int CodePage, unsigned long dwFlags,
|
||||
const wchar_t * lpWideCharStr, int cchWideChar,
|
||||
char * lpMultiByteStr, int cbMultiByte,
|
||||
const char * lpDefaultChar, bool * lpUsedDefaultChar);
|
||||
#define CP_UTF8 65001
|
||||
#endif
|
||||
|
||||
bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
|
||||
// determine sensible default number of threads.
|
||||
// std::thread::hardware_concurrency may not be equal to the number of cores, or may return 0.
|
||||
#ifdef __linux__
|
||||
std::ifstream cpuinfo("/proc/cpuinfo");
|
||||
params.n_threads = std::count(std::istream_iterator<std::string>(cpuinfo),
|
||||
std::istream_iterator<std::string>(),
|
||||
std::string("processor"));
|
||||
#endif
|
||||
if (params.n_threads == 0) {
|
||||
params.n_threads = std::max(1, (int32_t) std::thread::hardware_concurrency());
|
||||
}
|
||||
|
||||
bool invalid_param = false;
|
||||
std::string arg;
|
||||
gpt_params default_params;
|
||||
|
||||
for (int i = 1; i < argc; i++) {
|
||||
arg = argv[i];
|
||||
|
||||
if (arg == "-s" || arg == "--seed") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.seed = std::stoi(argv[i]);
|
||||
} else if (arg == "-t" || arg == "--threads") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.n_threads = std::stoi(argv[i]);
|
||||
} else if (arg == "-p" || arg == "--prompt") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.prompt = argv[i];
|
||||
} else if (arg == "--session") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.path_session = argv[i];
|
||||
} else if (arg == "-f" || arg == "--file") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
std::ifstream file(argv[i]);
|
||||
if (!file) {
|
||||
fprintf(stderr, "error: failed to open file '%s'\n", argv[i]);
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
std::copy(std::istreambuf_iterator<char>(file), std::istreambuf_iterator<char>(), back_inserter(params.prompt));
|
||||
if (params.prompt.back() == '\n') {
|
||||
params.prompt.pop_back();
|
||||
}
|
||||
} else if (arg == "-n" || arg == "--n_predict") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.n_predict = std::stoi(argv[i]);
|
||||
} else if (arg == "--top_k") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.top_k = std::stoi(argv[i]);
|
||||
} else if (arg == "-c" || arg == "--ctx_size") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.n_ctx = std::stoi(argv[i]);
|
||||
} else if (arg == "--memory_f32") {
|
||||
params.memory_f16 = false;
|
||||
} else if (arg == "--top_p") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.top_p = std::stof(argv[i]);
|
||||
} else if (arg == "--temp") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.temp = std::stof(argv[i]);
|
||||
} else if (arg == "--tfs") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.tfs_z = std::stof(argv[i]);
|
||||
} else if (arg == "--typical") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.typical_p = std::stof(argv[i]);
|
||||
} else if (arg == "--repeat_last_n") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.repeat_last_n = std::stoi(argv[i]);
|
||||
} else if (arg == "--repeat_penalty") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.repeat_penalty = std::stof(argv[i]);
|
||||
} else if (arg == "--frequency_penalty") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.frequency_penalty = std::stof(argv[i]);
|
||||
} else if (arg == "--presence_penalty") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.presence_penalty = std::stof(argv[i]);
|
||||
} else if (arg == "--mirostat") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.mirostat = std::stoi(argv[i]);
|
||||
} else if (arg == "--mirostat_lr") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.mirostat_eta = std::stof(argv[i]);
|
||||
} else if (arg == "--mirostat_ent") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.mirostat_tau = std::stof(argv[i]);
|
||||
} else if (arg == "-b" || arg == "--batch_size") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.n_batch = std::stoi(argv[i]);
|
||||
params.n_batch = std::min(512, params.n_batch);
|
||||
} else if (arg == "--keep") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.n_keep = std::stoi(argv[i]);
|
||||
} else if (arg == "-m" || arg == "--model") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.model = argv[i];
|
||||
} else if (arg == "--lora") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.lora_adapter = argv[i];
|
||||
params.use_mmap = false;
|
||||
} else if (arg == "--lora-base") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.lora_base = argv[i];
|
||||
} else if (arg == "-i" || arg == "--interactive") {
|
||||
params.interactive = true;
|
||||
} else if (arg == "--embedding") {
|
||||
params.embedding = true;
|
||||
} else if (arg == "--interactive-first") {
|
||||
params.interactive_first = true;
|
||||
} else if (arg == "-ins" || arg == "--instruct") {
|
||||
params.instruct = true;
|
||||
} else if (arg == "--color") {
|
||||
params.use_color = true;
|
||||
} else if (arg == "--mlock") {
|
||||
params.use_mlock = true;
|
||||
} else if (arg == "--no-mmap") {
|
||||
params.use_mmap = false;
|
||||
} else if (arg == "--mtest") {
|
||||
params.mem_test = true;
|
||||
} else if (arg == "--verbose-prompt") {
|
||||
params.verbose_prompt = true;
|
||||
} else if (arg == "-r" || arg == "--reverse-prompt") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.antiprompt.push_back(argv[i]);
|
||||
} else if (arg == "--perplexity") {
|
||||
params.perplexity = true;
|
||||
} else if (arg == "--ignore-eos") {
|
||||
params.logit_bias[gptneox_token_eos()] = -INFINITY;
|
||||
} else if (arg == "--no-penalize-nl") {
|
||||
params.penalize_nl = false;
|
||||
} else if (arg == "-l" || arg == "--logit-bias") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
std::stringstream ss(argv[i]);
|
||||
gptneox_token key;
|
||||
char sign;
|
||||
std::string value_str;
|
||||
try {
|
||||
if (ss >> key && ss >> sign && std::getline(ss, value_str) && (sign == '+' || sign == '-')) {
|
||||
params.logit_bias[key] = std::stof(value_str) * ((sign == '-') ? -1.0f : 1.0f);
|
||||
} else {
|
||||
throw std::exception();
|
||||
}
|
||||
} catch (const std::exception &e) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
} else if (arg == "--n_parts") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.n_parts = std::stoi(argv[i]);
|
||||
} else if (arg == "-h" || arg == "--help") {
|
||||
gpt_print_usage(argc, argv, default_params);
|
||||
exit(0);
|
||||
} else if (arg == "--random-prompt") {
|
||||
params.random_prompt = true;
|
||||
} else if (arg == "--in-prefix") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.input_prefix = argv[i];
|
||||
} else {
|
||||
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
|
||||
gpt_print_usage(argc, argv, default_params);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
if (invalid_param) {
|
||||
fprintf(stderr, "error: invalid parameter for argument: %s\n", arg.c_str());
|
||||
gpt_print_usage(argc, argv, default_params);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
|
||||
fprintf(stderr, "usage: %s [options]\n", argv[0]);
|
||||
fprintf(stderr, "\n");
|
||||
fprintf(stderr, "options:\n");
|
||||
fprintf(stderr, " -h, --help show this help message and exit\n");
|
||||
fprintf(stderr, " -i, --interactive run in interactive mode\n");
|
||||
fprintf(stderr, " --interactive-first run in interactive mode and wait for input right away\n");
|
||||
fprintf(stderr, " -ins, --instruct run in instruction mode\n");
|
||||
fprintf(stderr, " -r PROMPT, --reverse-prompt PROMPT\n");
|
||||
fprintf(stderr, " run in interactive mode and poll user input upon seeing PROMPT (can be\n");
|
||||
fprintf(stderr, " specified more than once for multiple prompts).\n");
|
||||
fprintf(stderr, " --color colorise output to distinguish prompt and user input from generations\n");
|
||||
fprintf(stderr, " -s SEED, --seed SEED RNG seed (default: -1, use random seed for <= 0)\n");
|
||||
fprintf(stderr, " -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads);
|
||||
fprintf(stderr, " -p PROMPT, --prompt PROMPT\n");
|
||||
fprintf(stderr, " prompt to start generation with (default: empty)\n");
|
||||
fprintf(stderr, " --session FNAME file to cache model state in (may be large!) (default: none)\n");
|
||||
fprintf(stderr, " --random-prompt start with a randomized prompt.\n");
|
||||
fprintf(stderr, " --in-prefix STRING string to prefix user inputs with (default: empty)\n");
|
||||
fprintf(stderr, " -f FNAME, --file FNAME\n");
|
||||
fprintf(stderr, " prompt file to start generation.\n");
|
||||
fprintf(stderr, " -n N, --n_predict N number of tokens to predict (default: %d, -1 = infinity)\n", params.n_predict);
|
||||
fprintf(stderr, " --top_k N top-k sampling (default: %d, 0 = disabled)\n", params.top_k);
|
||||
fprintf(stderr, " --top_p N top-p sampling (default: %.1f, 1.0 = disabled)\n", (double)params.top_p);
|
||||
fprintf(stderr, " --tfs N tail free sampling, parameter z (default: %.1f, 1.0 = disabled)\n", (double)params.tfs_z);
|
||||
fprintf(stderr, " --typical N locally typical sampling, parameter p (default: %.1f, 1.0 = disabled)\n", (double)params.typical_p);
|
||||
fprintf(stderr, " --repeat_last_n N last n tokens to consider for penalize (default: %d, 0 = disabled, -1 = ctx_size)\n", params.repeat_last_n);
|
||||
fprintf(stderr, " --repeat_penalty N penalize repeat sequence of tokens (default: %.1f, 1.0 = disabled)\n", (double)params.repeat_penalty);
|
||||
fprintf(stderr, " --presence_penalty N repeat alpha presence penalty (default: %.1f, 0.0 = disabled)\n", (double)params.presence_penalty);
|
||||
fprintf(stderr, " --frequency_penalty N repeat alpha frequency penalty (default: %.1f, 0.0 = disabled)\n", (double)params.frequency_penalty);
|
||||
fprintf(stderr, " --mirostat N use Mirostat sampling.\n");
|
||||
fprintf(stderr, " Top K, Nucleus, Tail Free and Locally Typical samplers are ignored if used.\n");
|
||||
fprintf(stderr, " (default: %d, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)\n", params.mirostat);
|
||||
fprintf(stderr, " --mirostat_lr N Mirostat learning rate, parameter eta (default: %.1f)\n", (double)params.mirostat_eta);
|
||||
fprintf(stderr, " --mirostat_ent N Mirostat target entropy, parameter tau (default: %.1f)\n", (double)params.mirostat_tau);
|
||||
fprintf(stderr, " -l TOKEN_ID(+/-)BIAS, --logit-bias TOKEN_ID(+/-)BIAS\n");
|
||||
fprintf(stderr, " modifies the likelihood of token appearing in the completion,\n");
|
||||
fprintf(stderr, " i.e. `--logit-bias 15043+1` to increase likelihood of token ' Hello',\n");
|
||||
fprintf(stderr, " or `--logit-bias 15043-1` to decrease likelihood of token ' Hello'\n");
|
||||
fprintf(stderr, " -c N, --ctx_size N size of the prompt context (default: %d)\n", params.n_ctx);
|
||||
fprintf(stderr, " --ignore-eos ignore end of stream token and continue generating (implies --logit-bias 2-inf)\n");
|
||||
fprintf(stderr, " --no-penalize-nl do not penalize newline token\n");
|
||||
fprintf(stderr, " --memory_f32 use f32 instead of f16 for memory key+value\n");
|
||||
fprintf(stderr, " --temp N temperature (default: %.1f)\n", (double)params.temp);
|
||||
fprintf(stderr, " --n_parts N number of model parts (default: -1 = determine from dimensions)\n");
|
||||
fprintf(stderr, " -b N, --batch_size N batch size for prompt processing (default: %d)\n", params.n_batch);
|
||||
fprintf(stderr, " --perplexity compute perplexity over the prompt\n");
|
||||
fprintf(stderr, " --keep number of tokens to keep from the initial prompt (default: %d, -1 = all)\n", params.n_keep);
|
||||
if (gptneox_mlock_supported()) {
|
||||
fprintf(stderr, " --mlock force system to keep model in RAM rather than swapping or compressing\n");
|
||||
}
|
||||
if (gptneox_mmap_supported()) {
|
||||
fprintf(stderr, " --no-mmap do not memory-map model (slower load but may reduce pageouts if not using mlock)\n");
|
||||
}
|
||||
fprintf(stderr, " --mtest compute maximum memory usage\n");
|
||||
fprintf(stderr, " --verbose-prompt print prompt before generation\n");
|
||||
fprintf(stderr, " --lora FNAME apply LoRA adapter (implies --no-mmap)\n");
|
||||
fprintf(stderr, " --lora-base FNAME optional model to use as a base for the layers modified by the LoRA adapter\n");
|
||||
fprintf(stderr, " -m FNAME, --model FNAME\n");
|
||||
fprintf(stderr, " model path (default: %s)\n", params.model.c_str());
|
||||
fprintf(stderr, "\n");
|
||||
}
|
||||
|
||||
std::string gpt_random_prompt(std::mt19937 & rng) {
|
||||
const int r = rng() % 10;
|
||||
switch (r) {
|
||||
case 0: return "So";
|
||||
case 1: return "Once upon a time";
|
||||
case 2: return "When";
|
||||
case 3: return "The";
|
||||
case 4: return "After";
|
||||
case 5: return "If";
|
||||
case 6: return "import";
|
||||
case 7: return "He";
|
||||
case 8: return "She";
|
||||
case 9: return "They";
|
||||
default: return "To";
|
||||
}
|
||||
|
||||
return "The";
|
||||
}
|
||||
|
||||
// TODO: not great allocating this every time
|
||||
std::vector<gptneox_token> gptneox_tokenize(struct gptneox_context * ctx, const std::string & text, bool add_bos) {
|
||||
// initialize to prompt numer of chars, since n_tokens <= n_prompt_chars
|
||||
std::vector<gptneox_token> res(text.size() + (int)add_bos);
|
||||
int n = gptneox_tokenize(ctx, text.c_str(), res.data(), res.size(), add_bos);
|
||||
assert(n >= 0);
|
||||
res.resize(n);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
/* Keep track of current color of output, and emit ANSI code if it changes. */
|
||||
void set_console_color(console_state & con_st, console_color_t color) {
|
||||
if (con_st.use_color && con_st.color != color) {
|
||||
switch(color) {
|
||||
case CONSOLE_COLOR_DEFAULT:
|
||||
printf(ANSI_COLOR_RESET);
|
||||
break;
|
||||
case CONSOLE_COLOR_PROMPT:
|
||||
printf(ANSI_COLOR_YELLOW);
|
||||
break;
|
||||
case CONSOLE_COLOR_USER_INPUT:
|
||||
printf(ANSI_BOLD ANSI_COLOR_GREEN);
|
||||
break;
|
||||
}
|
||||
con_st.color = color;
|
||||
}
|
||||
}
|
||||
|
||||
#if defined (_WIN32)
|
||||
void win32_console_init(bool enable_color) {
|
||||
unsigned long dwMode = 0;
|
||||
void* hConOut = GetStdHandle((unsigned long)-11); // STD_OUTPUT_HANDLE (-11)
|
||||
if (!hConOut || hConOut == (void*)-1 || !GetConsoleMode(hConOut, &dwMode)) {
|
||||
hConOut = GetStdHandle((unsigned long)-12); // STD_ERROR_HANDLE (-12)
|
||||
if (hConOut && (hConOut == (void*)-1 || !GetConsoleMode(hConOut, &dwMode))) {
|
||||
hConOut = 0;
|
||||
}
|
||||
}
|
||||
if (hConOut) {
|
||||
// Enable ANSI colors on Windows 10+
|
||||
if (enable_color && !(dwMode & 0x4)) {
|
||||
SetConsoleMode(hConOut, dwMode | 0x4); // ENABLE_VIRTUAL_TERMINAL_PROCESSING (0x4)
|
||||
}
|
||||
// Set console output codepage to UTF8
|
||||
SetConsoleOutputCP(CP_UTF8);
|
||||
}
|
||||
void* hConIn = GetStdHandle((unsigned long)-10); // STD_INPUT_HANDLE (-10)
|
||||
if (hConIn && hConIn != (void*)-1 && GetConsoleMode(hConIn, &dwMode)) {
|
||||
// Set console input codepage to UTF16
|
||||
_setmode(_fileno(stdin), _O_WTEXT);
|
||||
}
|
||||
}
|
||||
|
||||
// Convert a wide Unicode string to an UTF8 string
|
||||
void win32_utf8_encode(const std::wstring & wstr, std::string & str) {
|
||||
int size_needed = WideCharToMultiByte(CP_UTF8, 0, &wstr[0], (int)wstr.size(), NULL, 0, NULL, NULL);
|
||||
std::string strTo(size_needed, 0);
|
||||
WideCharToMultiByte(CP_UTF8, 0, &wstr[0], (int)wstr.size(), &strTo[0], size_needed, NULL, NULL);
|
||||
str = strTo;
|
||||
}
|
||||
#endif
|
108
third_party/radpajama/common-gptneox.h
vendored
Normal file
108
third_party/radpajama/common-gptneox.h
vendored
Normal file
|
@ -0,0 +1,108 @@
|
|||
// Various helper functions and utilities
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "gptneox.h"
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <random>
|
||||
#include <thread>
|
||||
#include <unordered_map>
|
||||
|
||||
//
|
||||
// CLI argument parsing
|
||||
//
|
||||
|
||||
struct gpt_params {
|
||||
int32_t seed = -1; // RNG seed
|
||||
int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency());
|
||||
int32_t n_predict = 128; // new tokens to predict
|
||||
int32_t n_parts = -1; // amount of model parts (-1 = determine from model dimensions)
|
||||
int32_t n_ctx = 512; // context size
|
||||
int32_t n_batch = 512; // batch size for prompt processing (must be >=32 to use BLAS)
|
||||
int32_t n_keep = 0; // number of tokens to keep from initial prompt
|
||||
|
||||
// sampling parameters
|
||||
std::unordered_map<gptneox_token, float> logit_bias; // logit bias for specific tokens
|
||||
int32_t top_k = 40; // <= 0 to use vocab size
|
||||
float top_p = 0.95f; // 1.0 = disabled
|
||||
float tfs_z = 1.00f; // 1.0 = disabled
|
||||
float typical_p = 1.00f; // 1.0 = disabled
|
||||
float temp = 0.80f; // 1.0 = disabled
|
||||
float repeat_penalty = 1.10f; // 1.0 = disabled
|
||||
int32_t repeat_last_n = 64; // last n tokens to penalize (0 = disable penalty, -1 = context size)
|
||||
float frequency_penalty = 0.00f; // 0.0 = disabled
|
||||
float presence_penalty = 0.00f; // 0.0 = disabled
|
||||
int mirostat = 0; // 0 = disabled, 1 = mirostat, 2 = mirostat 2.0
|
||||
float mirostat_tau = 5.00f; // target entropy
|
||||
float mirostat_eta = 0.10f; // learning rate
|
||||
|
||||
std::string model = "./examples/redpajama/models/pythia/ggml-RedPajama-INCITE-Chat/Instruct-3B-v1-f16.bin"; // model path
|
||||
std::string prompt = "";
|
||||
std::string path_session = ""; // path to file for saving/loading model eval state
|
||||
std::string input_prefix = ""; // string to prefix user inputs with
|
||||
std::vector<std::string> antiprompt; // string upon seeing which more user input is prompted
|
||||
|
||||
std::string lora_adapter = ""; // lora adapter path
|
||||
std::string lora_base = ""; // base model path for the lora adapter
|
||||
|
||||
bool memory_f16 = true; // use f16 instead of f32 for memory kv
|
||||
bool random_prompt = false; // do not randomize prompt if none provided
|
||||
bool use_color = false; // use color to distinguish generations and inputs
|
||||
bool interactive = false; // interactive mode
|
||||
|
||||
bool embedding = false; // get only sentence embedding
|
||||
bool interactive_first = false; // wait for user input immediately
|
||||
|
||||
bool instruct = false; // instruction mode
|
||||
bool penalize_nl = true; // consider newlines as a repeatable token
|
||||
bool perplexity = false; // compute perplexity over the prompt
|
||||
bool use_mmap = true; // use mmap for faster loads
|
||||
bool use_mlock = false; // use mlock to keep model in memory
|
||||
bool mem_test = false; // compute maximum memory usage
|
||||
bool verbose_prompt = false; // print prompt tokens before generation
|
||||
};
|
||||
|
||||
bool gpt_params_parse(int argc, char ** argv, gpt_params & params);
|
||||
|
||||
void gpt_print_usage(int argc, char ** argv, const gpt_params & params);
|
||||
|
||||
std::string gpt_random_prompt(std::mt19937 & rng);
|
||||
|
||||
//
|
||||
// Vocab utils
|
||||
//
|
||||
|
||||
std::vector<gptneox_token> gptneox_tokenize(struct gptneox_context * ctx, const std::string & text, bool add_bos);
|
||||
|
||||
//
|
||||
// Console utils
|
||||
//
|
||||
|
||||
#define ANSI_COLOR_RED "\x1b[31m"
|
||||
#define ANSI_COLOR_GREEN "\x1b[32m"
|
||||
#define ANSI_COLOR_YELLOW "\x1b[33m"
|
||||
#define ANSI_COLOR_BLUE "\x1b[34m"
|
||||
#define ANSI_COLOR_MAGENTA "\x1b[35m"
|
||||
#define ANSI_COLOR_CYAN "\x1b[36m"
|
||||
#define ANSI_COLOR_RESET "\x1b[0m"
|
||||
#define ANSI_BOLD "\x1b[1m"
|
||||
|
||||
enum console_color_t {
|
||||
CONSOLE_COLOR_DEFAULT=0,
|
||||
CONSOLE_COLOR_PROMPT,
|
||||
CONSOLE_COLOR_USER_INPUT
|
||||
};
|
||||
|
||||
struct console_state {
|
||||
bool use_color = false;
|
||||
console_color_t color = CONSOLE_COLOR_DEFAULT;
|
||||
};
|
||||
|
||||
void set_console_color(console_state & con_st, console_color_t color);
|
||||
|
||||
#if defined (_WIN32)
|
||||
void win32_console_init(bool enable_color);
|
||||
void win32_utf8_encode(const std::wstring & wstr, std::string & str);
|
||||
#endif
|
835
third_party/radpajama/common.cc
vendored
Normal file
835
third_party/radpajama/common.cc
vendored
Normal file
|
@ -0,0 +1,835 @@
|
|||
/*-*-mode:c++;indent-tabs-mode:nil;c-basic-offset:4;tab-width:8;coding:utf-8-*-│
|
||||
│vi: set net ft=c++ ts=4 sts=4 sw=4 fenc=utf-8 :vi│
|
||||
╚──────────────────────────────────────────────────────────────────────────────╝
|
||||
│ │
|
||||
│ llama.com │
|
||||
│ Copyright (c) 2023 Justine Alexandra Roberts Tunney │
|
||||
│ Copyright (c) 2023 Georgi Gerganov │
|
||||
│ │
|
||||
│ Permission is hereby granted, free of charge, to any person obtaining │
|
||||
│ a copy of this software and associated documentation files (the │
|
||||
│ "Software"), to deal in the Software without restriction, including │
|
||||
│ without limitation the rights to use, copy, modify, merge, publish, │
|
||||
│ distribute, sublicense, and/or sell copies of the Software, and to │
|
||||
│ permit persons to whom the Software is furnished to do so, subject to │
|
||||
│ the following conditions: │
|
||||
│ │
|
||||
│ The above copyright notice and this permission notice shall be │
|
||||
│ included in all copies or substantial portions of the Software. │
|
||||
│ │
|
||||
│ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, │
|
||||
│ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF │
|
||||
│ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. │
|
||||
│ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY │
|
||||
│ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, │
|
||||
│ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE │
|
||||
│ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. │
|
||||
│ │
|
||||
╚─────────────────────────────────────────────────────────────────────────────*/
|
||||
#include "third_party/ggml/common.h"
|
||||
#include "libc/calls/calls.h"
|
||||
#include "libc/calls/struct/termios.h"
|
||||
#include "libc/calls/termios.h"
|
||||
#include "libc/runtime/runtime.h"
|
||||
#include "libc/stdio/stdio.h"
|
||||
#include "libc/str/str.h"
|
||||
#include "libc/sysv/consts/fileno.h"
|
||||
#include "third_party/libcxx/algorithm"
|
||||
#include "third_party/libcxx/cassert"
|
||||
#include "third_party/libcxx/cstring"
|
||||
#include "third_party/libcxx/fstream"
|
||||
#include "third_party/libcxx/iterator"
|
||||
#include "third_party/libcxx/sstream"
|
||||
#include "third_party/libcxx/string"
|
||||
|
||||
STATIC_YOINK("zipos");
|
||||
|
||||
asm(".ident\t\"\\n\\n\
|
||||
llama.cpp (MIT License)\\n\
|
||||
Copyright (c) 2023 Georgi Gerganov\"");
|
||||
asm(".include \"libc/disclaimer.inc\"");
|
||||
// clang-format off
|
||||
|
||||
static bool is_integer_str(const char *s) {
|
||||
if (*s == '-') ++s;
|
||||
if (!*s) return false;
|
||||
while (isdigit(*s)) ++s;
|
||||
return !*s;
|
||||
}
|
||||
|
||||
static std::string replace_all(std::string const& original,
|
||||
std::string const& before,
|
||||
std::string const& after) {
|
||||
// https://stackoverflow.com/a/7724536/1653720
|
||||
std::string retval;
|
||||
std::string::const_iterator end = original.end();
|
||||
std::string::const_iterator current = original.begin();
|
||||
std::string::const_iterator next =
|
||||
std::search(current, end, before.begin(), before.end());
|
||||
while (next != end) {
|
||||
retval.append(current, next);
|
||||
retval.append(after);
|
||||
current = next + before.size();
|
||||
next = std::search(current, end, before.begin(), before.end());
|
||||
}
|
||||
retval.append(current, next);
|
||||
return retval;
|
||||
}
|
||||
|
||||
static bool append_file_to_prompt(const char *path, gpt_params & params) {
|
||||
std::ifstream file(path);
|
||||
if (!file) {
|
||||
fprintf(stderr, "error: failed to open file '%s'\n", path);
|
||||
return false;
|
||||
}
|
||||
std::copy(std::istreambuf_iterator<char>(file),
|
||||
std::istreambuf_iterator<char>(),
|
||||
back_inserter(params.prompt));
|
||||
if (params.prompt.back() == '\n') {
|
||||
params.prompt.pop_back();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
|
||||
params.n_threads = std::min(20, std::max(1, (int)(_getcpucount() * 0.75)));
|
||||
|
||||
bool invalid_param = false;
|
||||
std::string arg;
|
||||
gpt_params default_params;
|
||||
|
||||
for (int i = 1; i < argc; i++) {
|
||||
arg = argv[i];
|
||||
|
||||
if (arg == "-s" || arg == "--seed") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.seed = std::stoi(argv[i]);
|
||||
} else if (arg == "-v" || arg == "--verbose") {
|
||||
++params.verbose;
|
||||
} else if (arg == "-t" || arg == "--threads") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.n_threads = std::stoi(argv[i]);
|
||||
} else if (arg == "-p" || arg == "--prompt") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.prompt = argv[i];
|
||||
} else if (arg == "-C" || arg == "--prompt_cache") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.prompt_path = argv[i];
|
||||
} else if (arg == "-f" || arg == "--file") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
if (!append_file_to_prompt(argv[i], params)) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
} else if (arg == "-n" || arg == "--n_predict") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.n_predict = std::stoi(argv[i]);
|
||||
} else if (arg == "--top_k") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.top_k = std::stoi(argv[i]);
|
||||
} else if (arg == "-c" || arg == "--ctx_size") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.n_ctx = std::stoi(argv[i]);
|
||||
} else if (arg == "--memory_f32") {
|
||||
params.memory_f16 = false;
|
||||
} else if (arg == "--top_p") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.top_p = std::stof(argv[i]);
|
||||
} else if (arg == "--temp") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.temp = std::stof(argv[i]);
|
||||
} else if (arg == "--repeat_last_n") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.repeat_last_n = std::stoi(argv[i]);
|
||||
} else if (arg == "--repeat_penalty") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.repeat_penalty = std::stof(argv[i]);
|
||||
} else if (arg == "--frequency_penalty") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.frequency_penalty = std::stof(argv[i]);
|
||||
} else if (arg == "--presence_penalty") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.presence_penalty = std::stof(argv[i]);
|
||||
} else if (arg == "--mirostat") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.mirostat = std::stoi(argv[i]);
|
||||
} else if (arg == "--mirostat_lr") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.mirostat_eta = std::stof(argv[i]);
|
||||
} else if (arg == "--mirostat_ent") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.mirostat_tau = std::stof(argv[i]);
|
||||
} else if (arg == "-b" || arg == "--batch_size") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.n_batch = std::stoi(argv[i]);
|
||||
params.n_batch = std::min(512, params.n_batch);
|
||||
} else if (arg == "--keep") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.n_keep_str = argv[i];
|
||||
if (is_integer_str(argv[i])) {
|
||||
params.n_keep = std::stoi(params.n_keep_str);
|
||||
if (!params.n_keep) {
|
||||
params.n_keep_str = "";
|
||||
}
|
||||
}
|
||||
} else if (arg == "-m" || arg == "--model") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.model = argv[i];
|
||||
} else if (arg == "--lora") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.lora_adapter = argv[i];
|
||||
params.use_mmap = false;
|
||||
} else if (arg == "--lora-base") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.lora_base = argv[i];
|
||||
} else if (arg == "-i" || arg == "--interactive") {
|
||||
params.interactive = true;
|
||||
} else if (arg == "--embedding") {
|
||||
params.embedding = true;
|
||||
} else if (arg == "--interactive-first") {
|
||||
params.interactive_first = true;
|
||||
} else if (arg == "-ins" || arg == "--instruct") {
|
||||
params.instruct = true;
|
||||
} else if (arg == "--multiline-input") {
|
||||
params.multiline_input = true;
|
||||
} else if (arg == "--color") {
|
||||
params.use_color = true;
|
||||
} else if (arg == "--mlock") {
|
||||
params.use_mlock = true;
|
||||
} else if (arg == "--no-mmap") {
|
||||
params.use_mmap = false;
|
||||
} else if (arg == "--mtest") {
|
||||
params.mem_test = true;
|
||||
} else if (arg == "--verbose-prompt") {
|
||||
params.verbose_prompt = true;
|
||||
} else if (arg == "-r" || arg == "--reverse-prompt") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.antiprompt.push_back(argv[i]);
|
||||
} else if (arg == "--perplexity") {
|
||||
params.perplexity = true;
|
||||
} else if (arg == "--ignore-eos") {
|
||||
params.logit_bias[llama_token_eos()] = -INFINITY;
|
||||
} else if (arg == "--no-penalize-nl") {
|
||||
params.penalize_nl = false;
|
||||
} else if (arg == "-l" || arg == "--logit-bias") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
std::stringstream ss(argv[i]);
|
||||
llama_token key = 0;
|
||||
char sign = 0;
|
||||
std::string value_str;
|
||||
if (ss >> key && ss >> sign && std::getline(ss, value_str) && (sign == '+' || sign == '-')) {
|
||||
params.logit_bias[key] = std::stof(value_str) * ((sign == '-') ? -1.0f : 1.0f);
|
||||
} else {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
} else if (arg == "--n_parts") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.n_parts = std::stoi(argv[i]);
|
||||
} else if (arg == "-h" || arg == "--help") {
|
||||
gpt_print_usage(stdout, argc, argv, default_params);
|
||||
exit(0);
|
||||
} else if (arg == "--random-prompt") {
|
||||
params.random_prompt = true;
|
||||
} else if (arg == "--in-prefix") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.input_prefix = argv[i];
|
||||
} else if (arg == "--in-suffix") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.input_suffix = argv[i];
|
||||
} else {
|
||||
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
|
||||
gpt_print_usage(stderr, argc, argv, default_params);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
if (invalid_param) {
|
||||
fprintf(stderr, "error: invalid parameter for argument: %s\n", arg.c_str());
|
||||
gpt_print_usage(stderr, argc, argv, default_params);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
// if no prompt is specified, then use companion ai
|
||||
if (params.prompt.empty()) {
|
||||
if (params.verbose) {
|
||||
fprintf(stderr, "%s: No prompt specified\n", __func__);
|
||||
fprintf(stderr, "%s: Loading CompanionAI\n", __func__);
|
||||
}
|
||||
append_file_to_prompt("/zip/companionai.txt", params);
|
||||
const char *user;
|
||||
user = getenv("USER");
|
||||
if (!user || !*user) {
|
||||
user = "Cosmo";
|
||||
}
|
||||
params.prompt = replace_all(params.prompt, "USER_NAME", user);
|
||||
std::string user_prompt;
|
||||
user_prompt.append(user);
|
||||
user_prompt.append(":");
|
||||
params.logit_bias[llama_token_eos()] = -INFINITY;
|
||||
params.antiprompt.push_back(user_prompt);
|
||||
params.repeat_penalty = 1.17647;
|
||||
params.repeat_last_n = 256;
|
||||
params.interactive = true;
|
||||
params.n_predict = -1;
|
||||
params.n_ctx = 2048;
|
||||
params.n_keep = 0;
|
||||
params.n_keep_str = "\n\n\n";
|
||||
params.top_k = 40;
|
||||
params.top_p = .5;
|
||||
params.temp = 0.4;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void gpt_print_usage(FILE *f, int /*argc*/, char ** argv, const gpt_params & params) {
|
||||
fprintf(f, "usage: %s [options]\n", argv[0]);
|
||||
fprintf(f, "\n");
|
||||
fprintf(f, "options:\n");
|
||||
fprintf(f, " -h, --help show this help message and exit\n");
|
||||
fprintf(f, " -v, --verbose print plenty of helpful information, e.g. prompt\n");
|
||||
fprintf(f, " -i, --interactive run in interactive mode\n");
|
||||
fprintf(f, " --interactive-first run in interactive mode and wait for input right away\n");
|
||||
fprintf(f, " -ins, --instruct run in instruction mode (use with Alpaca models)\n");
|
||||
fprintf(f, " --multiline-input allows you to write or paste multiple lines without ending each in '\\'\n");
|
||||
fprintf(f, " -r PROMPT, --reverse-prompt PROMPT\n");
|
||||
fprintf(f, " run in interactive mode and poll user input upon seeing PROMPT (can be\n");
|
||||
fprintf(f, " specified more than once for multiple prompts).\n");
|
||||
fprintf(f, " --color colorise output to distinguish prompt and user input from generations\n");
|
||||
fprintf(f, " -s SEED, --seed SEED RNG seed (default: -1, use random seed for < 0)\n");
|
||||
fprintf(f, " -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads);
|
||||
fprintf(f, " -p PROMPT, --prompt PROMPT\n");
|
||||
fprintf(f, " prompt to start generation with (default: Companion AI)\n");
|
||||
fprintf(f, " --random-prompt start with a randomized prompt.\n");
|
||||
fprintf(f, " --in-prefix STRING string to prefix user inputs with (default: empty)\n");
|
||||
fprintf(f, " --in-suffix STRING string to suffix after user inputs with (default: empty)\n");
|
||||
fprintf(f, " -f FNAME, --file FNAME\n");
|
||||
fprintf(f, " text file containing prompt (default: Companion AI)\n");
|
||||
fprintf(f, " -C FNAME, --prompt_cache FNAME\n");
|
||||
fprintf(f, " path of cache for fast prompt reload (default: .prompt.jtlp)\n");
|
||||
fprintf(f, " -n N, --n_predict N number of tokens to predict (default: %d, -1 = infinity)\n", params.n_predict);
|
||||
fprintf(f, " --top_k N top-k sampling (default: %d, 0 = disabled)\n", params.top_k);
|
||||
fprintf(f, " --top_p N top-p sampling (default: %.1f, 1.0 = disabled)\n", (double)params.top_p);
|
||||
fprintf(f, " --tfs N tail free sampling, parameter z (default: %.1f, 1.0 = disabled)\n", (double)params.tfs_z);
|
||||
fprintf(f, " --typical N locally typical sampling, parameter p (default: %.1f, 1.0 = disabled)\n", (double)params.typical_p);
|
||||
fprintf(f, " --repeat_last_n N last n tokens to consider for penalize (default: %d, 0 = disabled, -1 = ctx_size)\n", params.repeat_last_n);
|
||||
fprintf(f, " --repeat_penalty N penalize repeat sequence of tokens (default: %.1f, 1.0 = disabled)\n", (double)params.repeat_penalty);
|
||||
fprintf(f, " --presence_penalty N repeat alpha presence penalty (default: %.1f, 0.0 = disabled)\n", (double)params.presence_penalty);
|
||||
fprintf(f, " --frequency_penalty N repeat alpha frequency penalty (default: %.1f, 0.0 = disabled)\n", (double)params.frequency_penalty);
|
||||
fprintf(f, " --mirostat N use Mirostat sampling.\n");
|
||||
fprintf(f, " Top K, Nucleus, Tail Free and Locally Typical samplers are ignored if used.\n");
|
||||
fprintf(f, " (default: %d, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)\n", params.mirostat);
|
||||
fprintf(f, " --mirostat_lr N Mirostat learning rate, parameter eta (default: %.1f)\n", (double)params.mirostat_eta);
|
||||
fprintf(f, " --mirostat_ent N Mirostat target entropy, parameter tau (default: %.1f)\n", (double)params.mirostat_tau);
|
||||
fprintf(f, " -l TOKEN_ID(+/-)BIAS, --logit-bias TOKEN_ID(+/-)BIAS\n");
|
||||
fprintf(f, " modifies the likelihood of token appearing in the completion,\n");
|
||||
fprintf(f, " i.e. `--logit-bias 15043+1` to increase likelihood of token ' Hello',\n");
|
||||
fprintf(f, " or `--logit-bias 15043-1` to decrease likelihood of token ' Hello'\n");
|
||||
fprintf(f, " --repeat_last_n N last n tokens to consider for penalize (default: %d)\n", params.repeat_last_n);
|
||||
fprintf(f, " --repeat_penalty N penalize repeat sequence of tokens (default: %.1f)\n", (double)params.repeat_penalty);
|
||||
fprintf(f, " -c N, --ctx_size N size of the prompt context (default: %d)\n", params.n_ctx);
|
||||
fprintf(f, " --ignore-eos ignore end of stream token and continue generating (implies --logit-bias 2-inf)\n");
|
||||
fprintf(f, " --no-penalize-nl do not penalize newline token\n");
|
||||
fprintf(f, " --memory_f32 use f32 instead of f16 for memory key+value\n");
|
||||
fprintf(f, " --temp N temperature (default: %.1f)\n", (double)params.temp);
|
||||
fprintf(f, " --n_parts N number of model parts (default: -1 = determine from dimensions)\n");
|
||||
fprintf(f, " -b N, --batch_size N batch size for prompt processing (default: %d)\n", params.n_batch);
|
||||
fprintf(f, " --perplexity compute perplexity over the prompt\n");
|
||||
fprintf(f, " --keep NUM|STR number of tokens to keep from the initial prompt, or substring\n");
|
||||
fprintf(f, " to search for within prompt that divides the actual prompt from\n");
|
||||
fprintf(f, " its initial example text (default: %d, -1 = all)\n", params.n_keep);
|
||||
if (llama_mlock_supported()) {
|
||||
fprintf(f, " --mlock force system to keep model in RAM rather than swapping or compressing\n");
|
||||
}
|
||||
if (llama_mmap_supported()) {
|
||||
fprintf(f, " --no-mmap do not memory-map model (slower load but may reduce pageouts if not using mlock)\n");
|
||||
}
|
||||
fprintf(f, " --mtest compute maximum memory usage\n");
|
||||
fprintf(f, " --verbose-prompt print prompt before generation\n");
|
||||
fprintf(f, " --lora FNAME apply LoRA adapter (implies --no-mmap)\n");
|
||||
fprintf(f, " --lora-base FNAME optional model to use as a base for the layers modified by the LoRA adapter\n");
|
||||
fprintf(f, " -m FNAME, --model FNAME\n");
|
||||
fprintf(f, " model path (default: %s)\n", params.model.c_str());
|
||||
fprintf(f, "\n");
|
||||
}
|
||||
|
||||
std::string gpt_random_prompt(std::mt19937 & rng) {
|
||||
const int r = rng() % 10;
|
||||
switch (r) {
|
||||
case 0: return "So";
|
||||
case 1: return "Once upon a time";
|
||||
case 2: return "When";
|
||||
case 3: return "The";
|
||||
case 4: return "After";
|
||||
case 5: return "If";
|
||||
case 6: return "import";
|
||||
case 7: return "He";
|
||||
case 8: return "She";
|
||||
case 9: return "They";
|
||||
default: return "To";
|
||||
}
|
||||
return "The";
|
||||
}
|
||||
|
||||
// TODO: not great allocating this every time
|
||||
std::vector<llama_token> llama_tokenize(struct llama_context * ctx, const std::string & text, bool add_bos) {
|
||||
// initialize to prompt numer of chars, since n_tokens <= n_prompt_chars
|
||||
std::vector<llama_token> res(text.size() + (int) add_bos);
|
||||
const int n = llama_tokenize(ctx, text.c_str(), res.data(), res.size(), add_bos);
|
||||
assert(n >= 0);
|
||||
res.resize(n);
|
||||
return res;
|
||||
}
|
||||
|
||||
struct llama_context * llama_init_from_gpt_params(const gpt_params & params) {
|
||||
auto lparams = llama_context_default_params();
|
||||
|
||||
lparams.n_ctx = params.n_ctx;
|
||||
lparams.n_parts = params.n_parts;
|
||||
lparams.seed = params.seed;
|
||||
lparams.f16_kv = params.memory_f16;
|
||||
lparams.use_mmap = params.use_mmap;
|
||||
lparams.use_mlock = params.use_mlock;
|
||||
lparams.logits_all = params.perplexity;
|
||||
lparams.embedding = params.embedding;
|
||||
|
||||
llama_context * lctx = llama_init_from_file(params.model.c_str(), lparams, params.verbose);
|
||||
|
||||
if (lctx == NULL) {
|
||||
fprintf(stderr, "%s: error: failed to load model '%s'\n", __func__, params.model.c_str());
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!params.lora_adapter.empty()) {
|
||||
int err = llama_apply_lora_from_file(lctx,
|
||||
params.lora_adapter.c_str(),
|
||||
params.lora_base.empty() ? NULL : params.lora_base.c_str(),
|
||||
params.n_threads);
|
||||
if (err != 0) {
|
||||
fprintf(stderr, "%s: error: failed to apply lora adapter\n", __func__);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
return lctx;
|
||||
}
|
||||
|
||||
void console_init(console_state & con_st) {
|
||||
#if defined(_WIN32)
|
||||
// Windows-specific console initialization
|
||||
DWORD dwMode = 0;
|
||||
con_st.hConsole = GetStdHandle(STD_OUTPUT_HANDLE);
|
||||
if (con_st.hConsole == INVALID_HANDLE_VALUE || !GetConsoleMode(con_st.hConsole, &dwMode)) {
|
||||
con_st.hConsole = GetStdHandle(STD_ERROR_HANDLE);
|
||||
if (con_st.hConsole != INVALID_HANDLE_VALUE && (!GetConsoleMode(con_st.hConsole, &dwMode))) {
|
||||
con_st.hConsole = NULL;
|
||||
}
|
||||
}
|
||||
if (con_st.hConsole) {
|
||||
// Enable ANSI colors on Windows 10+
|
||||
if (con_st.use_color && !(dwMode & ENABLE_VIRTUAL_TERMINAL_PROCESSING)) {
|
||||
SetConsoleMode(con_st.hConsole, dwMode | ENABLE_VIRTUAL_TERMINAL_PROCESSING);
|
||||
}
|
||||
// Set console output codepage to UTF8
|
||||
SetConsoleOutputCP(CP_UTF8);
|
||||
}
|
||||
HANDLE hConIn = GetStdHandle(STD_INPUT_HANDLE);
|
||||
if (hConIn != INVALID_HANDLE_VALUE && GetConsoleMode(hConIn, &dwMode)) {
|
||||
// Set console input codepage to UTF16
|
||||
_setmode(_fileno(stdin), _O_WTEXT);
|
||||
|
||||
// Turn off ICANON (ENABLE_LINE_INPUT) and ECHO (ENABLE_ECHO_INPUT)
|
||||
dwMode &= ~(ENABLE_LINE_INPUT | ENABLE_ECHO_INPUT);
|
||||
SetConsoleMode(hConIn, dwMode);
|
||||
}
|
||||
#else
|
||||
// POSIX-specific console initialization
|
||||
struct termios new_termios;
|
||||
tcgetattr(STDIN_FILENO, &con_st.prev_state);
|
||||
new_termios = con_st.prev_state;
|
||||
new_termios.c_lflag &= ~(ICANON | ECHO);
|
||||
new_termios.c_cc[VMIN] = 1;
|
||||
new_termios.c_cc[VTIME] = 0;
|
||||
tcsetattr(STDIN_FILENO, TCSANOW, &new_termios);
|
||||
|
||||
con_st.tty = fopen("/dev/tty", "w+");
|
||||
if (con_st.tty != nullptr) {
|
||||
setvbuf(con_st.tty, NULL, _IONBF, 0);
|
||||
con_st.out = con_st.tty;
|
||||
}
|
||||
|
||||
setlocale(LC_ALL, "");
|
||||
#endif
|
||||
}
|
||||
|
||||
void console_cleanup(console_state & con_st) {
|
||||
// Reset console color
|
||||
console_set_color(con_st, CONSOLE_COLOR_DEFAULT);
|
||||
|
||||
#if !defined(_WIN32)
|
||||
if (con_st.tty != nullptr) {
|
||||
con_st.out = stdout;
|
||||
fclose(con_st.tty);
|
||||
con_st.tty = nullptr;
|
||||
}
|
||||
// Restore the terminal settings on POSIX systems
|
||||
tcsetattr(STDIN_FILENO, TCSANOW, &con_st.prev_state);
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Keep track of current color of output, and emit ANSI code if it changes. */
|
||||
void console_set_color(console_state & con_st, console_color_t color) {
|
||||
if (con_st.use_color && con_st.color != color) {
|
||||
fflush(stdout);
|
||||
switch(color) {
|
||||
case CONSOLE_COLOR_DEFAULT:
|
||||
fprintf(con_st.out, ANSI_COLOR_RESET);
|
||||
break;
|
||||
case CONSOLE_COLOR_PROMPT:
|
||||
fprintf(con_st.out, ANSI_COLOR_YELLOW);
|
||||
break;
|
||||
case CONSOLE_COLOR_USER_INPUT:
|
||||
fprintf(con_st.out, ANSI_BOLD ANSI_COLOR_GREEN);
|
||||
break;
|
||||
}
|
||||
con_st.color = color;
|
||||
fflush(con_st.out);
|
||||
}
|
||||
}
|
||||
|
||||
char32_t getchar32() {
|
||||
wchar_t wc = getwchar();
|
||||
if (static_cast<wint_t>(wc) == WEOF) {
|
||||
return WEOF;
|
||||
}
|
||||
|
||||
#if WCHAR_MAX == 0xFFFF
|
||||
if ((wc >= 0xD800) && (wc <= 0xDBFF)) { // Check if wc is a high surrogate
|
||||
wchar_t low_surrogate = getwchar();
|
||||
if ((low_surrogate >= 0xDC00) && (low_surrogate <= 0xDFFF)) { // Check if the next wchar is a low surrogate
|
||||
return (static_cast<char32_t>(wc & 0x03FF) << 10) + (low_surrogate & 0x03FF) + 0x10000;
|
||||
}
|
||||
}
|
||||
if ((wc >= 0xD800) && (wc <= 0xDFFF)) { // Invalid surrogate pair
|
||||
return 0xFFFD; // Return the replacement character U+FFFD
|
||||
}
|
||||
#endif
|
||||
|
||||
return static_cast<char32_t>(wc);
|
||||
}
|
||||
|
||||
void pop_cursor(console_state & con_st) {
|
||||
#if defined(_WIN32)
|
||||
if (con_st.hConsole != NULL) {
|
||||
CONSOLE_SCREEN_BUFFER_INFO bufferInfo;
|
||||
GetConsoleScreenBufferInfo(con_st.hConsole, &bufferInfo);
|
||||
|
||||
COORD newCursorPosition = bufferInfo.dwCursorPosition;
|
||||
if (newCursorPosition.X == 0) {
|
||||
newCursorPosition.X = bufferInfo.dwSize.X - 1;
|
||||
newCursorPosition.Y -= 1;
|
||||
} else {
|
||||
newCursorPosition.X -= 1;
|
||||
}
|
||||
|
||||
SetConsoleCursorPosition(con_st.hConsole, newCursorPosition);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
putc('\b', con_st.out);
|
||||
}
|
||||
|
||||
int estimateWidth(char32_t codepoint) {
|
||||
#if defined(_WIN32)
|
||||
return 1;
|
||||
#else
|
||||
return wcwidth(codepoint);
|
||||
#endif
|
||||
}
|
||||
|
||||
int put_codepoint(console_state & con_st, const char* utf8_codepoint, size_t length, int expectedWidth) {
|
||||
#if defined(_WIN32)
|
||||
CONSOLE_SCREEN_BUFFER_INFO bufferInfo;
|
||||
if (!GetConsoleScreenBufferInfo(con_st.hConsole, &bufferInfo)) {
|
||||
// go with the default
|
||||
return expectedWidth;
|
||||
}
|
||||
COORD initialPosition = bufferInfo.dwCursorPosition;
|
||||
DWORD nNumberOfChars = length;
|
||||
WriteConsole(con_st.hConsole, utf8_codepoint, nNumberOfChars, &nNumberOfChars, NULL);
|
||||
|
||||
CONSOLE_SCREEN_BUFFER_INFO newBufferInfo;
|
||||
GetConsoleScreenBufferInfo(con_st.hConsole, &newBufferInfo);
|
||||
|
||||
// Figure out our real position if we're in the last column
|
||||
if (utf8_codepoint[0] != 0x09 && initialPosition.X == newBufferInfo.dwSize.X - 1) {
|
||||
DWORD nNumberOfChars;
|
||||
WriteConsole(con_st.hConsole, &" \b", 2, &nNumberOfChars, NULL);
|
||||
GetConsoleScreenBufferInfo(con_st.hConsole, &newBufferInfo);
|
||||
}
|
||||
|
||||
int width = newBufferInfo.dwCursorPosition.X - initialPosition.X;
|
||||
if (width < 0) {
|
||||
width += newBufferInfo.dwSize.X;
|
||||
}
|
||||
return width;
|
||||
#else
|
||||
// we can trust expectedWidth if we've got one
|
||||
if (expectedWidth >= 0 || con_st.tty == nullptr) {
|
||||
fwrite(utf8_codepoint, length, 1, con_st.out);
|
||||
return expectedWidth;
|
||||
}
|
||||
|
||||
fputs("\033[6n", con_st.tty); // Query cursor position
|
||||
int x1, x2, y1, y2;
|
||||
int results = 0;
|
||||
results = fscanf(con_st.tty, "\033[%d;%dR", &y1, &x1);
|
||||
|
||||
fwrite(utf8_codepoint, length, 1, con_st.tty);
|
||||
|
||||
fputs("\033[6n", con_st.tty); // Query cursor position
|
||||
results += fscanf(con_st.tty, "\033[%d;%dR", &y2, &x2);
|
||||
|
||||
if (results != 4) {
|
||||
return expectedWidth;
|
||||
}
|
||||
|
||||
int width = x2 - x1;
|
||||
if (width < 0) {
|
||||
// Calculate the width considering text wrapping
|
||||
struct winsize w;
|
||||
ioctl(STDOUT_FILENO, TIOCGWINSZ, &w);
|
||||
width += w.ws_col;
|
||||
}
|
||||
return width;
|
||||
#endif
|
||||
}
|
||||
|
||||
void replace_last(console_state & con_st, char ch) {
|
||||
#if defined(_WIN32)
|
||||
pop_cursor(con_st);
|
||||
put_codepoint(con_st, &ch, 1, 1);
|
||||
#else
|
||||
fprintf(con_st.out, "\b%c", ch);
|
||||
#endif
|
||||
}
|
||||
|
||||
void append_utf8(char32_t ch, std::string & out) {
|
||||
if (ch <= 0x7F) {
|
||||
out.push_back(static_cast<unsigned char>(ch));
|
||||
} else if (ch <= 0x7FF) {
|
||||
out.push_back(static_cast<unsigned char>(0xC0 | ((ch >> 6) & 0x1F)));
|
||||
out.push_back(static_cast<unsigned char>(0x80 | (ch & 0x3F)));
|
||||
} else if (ch <= 0xFFFF) {
|
||||
out.push_back(static_cast<unsigned char>(0xE0 | ((ch >> 12) & 0x0F)));
|
||||
out.push_back(static_cast<unsigned char>(0x80 | ((ch >> 6) & 0x3F)));
|
||||
out.push_back(static_cast<unsigned char>(0x80 | (ch & 0x3F)));
|
||||
} else if (ch <= 0x10FFFF) {
|
||||
out.push_back(static_cast<unsigned char>(0xF0 | ((ch >> 18) & 0x07)));
|
||||
out.push_back(static_cast<unsigned char>(0x80 | ((ch >> 12) & 0x3F)));
|
||||
out.push_back(static_cast<unsigned char>(0x80 | ((ch >> 6) & 0x3F)));
|
||||
out.push_back(static_cast<unsigned char>(0x80 | (ch & 0x3F)));
|
||||
} else {
|
||||
// Invalid Unicode code point
|
||||
}
|
||||
}
|
||||
|
||||
// Helper function to remove the last UTF-8 character from a string
|
||||
void pop_back_utf8_char(std::string & line) {
|
||||
if (line.empty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
size_t pos = line.length() - 1;
|
||||
|
||||
// Find the start of the last UTF-8 character (checking up to 4 bytes back)
|
||||
for (size_t i = 0; i < 3 && pos > 0; ++i, --pos) {
|
||||
if ((line[pos] & 0xC0) != 0x80) break; // Found the start of the character
|
||||
}
|
||||
line.erase(pos);
|
||||
}
|
||||
|
||||
bool console_readline(console_state & con_st, std::string & line) {
|
||||
console_set_color(con_st, CONSOLE_COLOR_USER_INPUT);
|
||||
if (con_st.out != stdout) {
|
||||
fflush(stdout);
|
||||
}
|
||||
|
||||
line.clear();
|
||||
std::vector<int> widths;
|
||||
bool is_special_char = false;
|
||||
bool end_of_stream = false;
|
||||
|
||||
char32_t input_char;
|
||||
while (true) {
|
||||
fflush(con_st.out); // Ensure all output is displayed before waiting for input
|
||||
input_char = getchar32();
|
||||
|
||||
if (input_char == '\r' || input_char == '\n') {
|
||||
break;
|
||||
}
|
||||
|
||||
if (input_char == WEOF || input_char == 0x04 /* Ctrl+D*/) {
|
||||
end_of_stream = true;
|
||||
break;
|
||||
}
|
||||
|
||||
if (is_special_char) {
|
||||
console_set_color(con_st, CONSOLE_COLOR_USER_INPUT);
|
||||
replace_last(con_st, line.back());
|
||||
is_special_char = false;
|
||||
}
|
||||
|
||||
if (input_char == '\033') { // Escape sequence
|
||||
char32_t code = getchar32();
|
||||
if (code == '[' || code == 0x1B) {
|
||||
// Discard the rest of the escape sequence
|
||||
while ((code = getchar32()) != WEOF) {
|
||||
if ((code >= 'A' && code <= 'Z') || (code >= 'a' && code <= 'z') || code == '~') {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (input_char == 0x08 || input_char == 0x7F) { // Backspace
|
||||
if (!widths.empty()) {
|
||||
int count;
|
||||
do {
|
||||
count = widths.back();
|
||||
widths.pop_back();
|
||||
// Move cursor back, print space, and move cursor back again
|
||||
for (int i = 0; i < count; i++) {
|
||||
replace_last(con_st, ' ');
|
||||
pop_cursor(con_st);
|
||||
}
|
||||
pop_back_utf8_char(line);
|
||||
} while (count == 0 && !widths.empty());
|
||||
}
|
||||
} else {
|
||||
int offset = line.length();
|
||||
append_utf8(input_char, line);
|
||||
int width = put_codepoint(con_st, line.c_str() + offset, line.length() - offset, estimateWidth(input_char));
|
||||
if (width < 0) {
|
||||
width = 0;
|
||||
}
|
||||
widths.push_back(width);
|
||||
}
|
||||
|
||||
if (!line.empty() && (line.back() == '\\' || line.back() == '/')) {
|
||||
console_set_color(con_st, CONSOLE_COLOR_PROMPT);
|
||||
replace_last(con_st, line.back());
|
||||
is_special_char = true;
|
||||
}
|
||||
}
|
||||
|
||||
bool has_more = con_st.multiline_input;
|
||||
if (is_special_char) {
|
||||
replace_last(con_st, ' ');
|
||||
pop_cursor(con_st);
|
||||
|
||||
char last = line.back();
|
||||
line.pop_back();
|
||||
if (last == '\\') {
|
||||
line += '\n';
|
||||
fputc('\n', con_st.out);
|
||||
has_more = !has_more;
|
||||
} else {
|
||||
// llama will just eat the single space, it won't act as a space
|
||||
if (line.length() == 1 && line.back() == ' ') {
|
||||
line.clear();
|
||||
pop_cursor(con_st);
|
||||
}
|
||||
has_more = false;
|
||||
}
|
||||
} else {
|
||||
if (end_of_stream) {
|
||||
has_more = false;
|
||||
} else {
|
||||
line += '\n';
|
||||
fputc('\n', con_st.out);
|
||||
}
|
||||
}
|
||||
|
||||
fflush(con_st.out);
|
||||
return has_more;
|
||||
}
|
136
third_party/radpajama/common.h
vendored
Normal file
136
third_party/radpajama/common.h
vendored
Normal file
|
@ -0,0 +1,136 @@
|
|||
// -*- c++ -*-
|
||||
#ifndef COSMOPOLITAN_THIRD_PARTY_GGML_COMMON_H_
|
||||
#define COSMOPOLITAN_THIRD_PARTY_GGML_COMMON_H_
|
||||
#include "libc/calls/struct/termios.h"
|
||||
#include "libc/runtime/runtime.h"
|
||||
#include "libc/stdio/stdio.h"
|
||||
#include "third_party/ggml/llama.h"
|
||||
#include "third_party/libcxx/random"
|
||||
#include "third_party/libcxx/string"
|
||||
#include "third_party/libcxx/thread"
|
||||
#include "third_party/libcxx/unordered_map"
|
||||
#include "third_party/libcxx/vector"
|
||||
#if !(__ASSEMBLER__ + __LINKER__ + 0)
|
||||
// clang-format off
|
||||
// Various helper functions and utilities
|
||||
|
||||
//
|
||||
// CLI argument parsing
|
||||
//
|
||||
|
||||
struct gpt_params {
|
||||
int32_t seed = -1; // RNG seed
|
||||
int32_t verbose = 0; // Logging verbosity
|
||||
int32_t n_threads = std::min(1, (int)(_getcpucount() * 0.75));
|
||||
int32_t n_predict = 128; // new tokens to predict
|
||||
int32_t n_parts = -1; // amount of model parts (-1 = determine from model dimensions)
|
||||
int32_t n_ctx = 512; // context size
|
||||
int32_t n_batch = 64; // batch size for prompt processing (must be >=32 to use BLAS)
|
||||
int32_t n_keep = 0; // number of tokens to keep from initial prompt
|
||||
|
||||
// sampling parameters
|
||||
std::unordered_map<llama_token, float> logit_bias; // logit bias for specific tokens
|
||||
int32_t top_k = 40; // <= 0 to use vocab size
|
||||
float top_p = 0.95f; // 1.0 = disabled
|
||||
float tfs_z = 1.00f; // 1.0 = disabled
|
||||
float typical_p = 1.00f; // 1.0 = disabled
|
||||
float temp = 0.80f; // 1.0 = disabled
|
||||
float repeat_penalty = 1.10f; // 1.0 = disabled
|
||||
int32_t repeat_last_n = 64; // last n tokens to penalize (0 = disable penalty, -1 = context size)
|
||||
float frequency_penalty = 0.00f; // 0.0 = disabled
|
||||
float presence_penalty = 0.00f; // 0.0 = disabled
|
||||
int mirostat = 0; // 0 = disabled, 1 = mirostat, 2 = mirostat 2.0
|
||||
float mirostat_tau = 5.00f; // target entropy
|
||||
float mirostat_eta = 0.10f; // learning rate
|
||||
|
||||
std::string model = "models/lamma-7B/ggml-model.bin"; // model path
|
||||
std::string prompt = "";
|
||||
std::string prompt_path = ".prompt.jtlp";
|
||||
std::string input_prefix = ""; // string to prefix user inputs with
|
||||
std::string n_keep_str = ""; // substring in prompt used to override n_keep == 0
|
||||
std::string input_suffix = ""; // string to suffix user inputs with
|
||||
std::vector<std::string> antiprompt; // string upon seeing which more user input is prompted
|
||||
|
||||
std::string lora_adapter = ""; // lora adapter path
|
||||
std::string lora_base = ""; // base model path for the lora adapter
|
||||
|
||||
bool memory_f16 = true; // use f16 instead of f32 for memory kv
|
||||
bool random_prompt = false; // do not randomize prompt if none provided
|
||||
bool use_color = isatty(1) == 1; // use color to distinguish generations and inputs
|
||||
bool interactive = false; // interactive mode
|
||||
|
||||
bool embedding = false; // get only sentence embedding
|
||||
bool interactive_first = false; // wait for user input immediately
|
||||
bool multiline_input = false; // reverse the usage of `\`
|
||||
|
||||
bool instruct = false; // instruction mode (used for Alpaca models)
|
||||
bool penalize_nl = true; // consider newlines as a repeatable token
|
||||
bool perplexity = false; // compute perplexity over the prompt
|
||||
bool use_mmap = true; // use mmap for faster loads
|
||||
bool use_mlock = false; // use mlock to keep model in memory
|
||||
bool mem_test = false; // compute maximum memory usage
|
||||
bool verbose_prompt = false; // print prompt tokens before generation
|
||||
};
|
||||
|
||||
bool gpt_params_parse(int argc, char ** argv, gpt_params & params);
|
||||
|
||||
void gpt_print_usage(FILE *f, int argc, char ** argv, const gpt_params & params);
|
||||
|
||||
std::string gpt_random_prompt(std::mt19937 & rng);
|
||||
|
||||
//
|
||||
// Vocab utils
|
||||
//
|
||||
|
||||
std::vector<llama_token> llama_tokenize(struct llama_context * ctx, const std::string & text, bool add_bos);
|
||||
|
||||
//
|
||||
// Model utils
|
||||
//
|
||||
|
||||
struct llama_context * llama_init_from_gpt_params(const gpt_params & params);
|
||||
|
||||
//
|
||||
// Console utils
|
||||
//
|
||||
|
||||
#define ANSI_COLOR_RED "\x1b[31m"
|
||||
#define ANSI_COLOR_GREEN "\x1b[32m"
|
||||
#define ANSI_COLOR_YELLOW "\x1b[33m"
|
||||
#define ANSI_COLOR_BLUE "\x1b[34m"
|
||||
#define ANSI_COLOR_MAGENTA "\x1b[35m"
|
||||
#define ANSI_COLOR_CYAN "\x1b[36m"
|
||||
#define ANSI_COLOR_RESET "\x1b[0m"
|
||||
#define ANSI_BOLD "\x1b[1m"
|
||||
|
||||
enum console_color_t {
|
||||
CONSOLE_COLOR_DEFAULT=0,
|
||||
CONSOLE_COLOR_PROMPT,
|
||||
CONSOLE_COLOR_USER_INPUT
|
||||
};
|
||||
|
||||
struct console_state {
|
||||
bool multiline_input = false;
|
||||
bool use_color = false;
|
||||
console_color_t color = CONSOLE_COLOR_DEFAULT;
|
||||
FILE* out = stdout;
|
||||
#if defined (_WIN32)
|
||||
void* hConsole;
|
||||
#else
|
||||
FILE* tty = nullptr;
|
||||
termios prev_state;
|
||||
#endif
|
||||
};
|
||||
|
||||
void console_init(console_state & con_st);
|
||||
void console_cleanup(console_state & con_st);
|
||||
void console_set_color(console_state & con_st, console_color_t color);
|
||||
bool console_readline(console_state & con_st, std::string & line);
|
||||
|
||||
#if defined (_WIN32)
|
||||
void win32_console_init(bool enable_color);
|
||||
void win32_utf8_encode(const std::wstring & wstr, std::string & str);
|
||||
#endif
|
||||
|
||||
#endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */
|
||||
#endif /* COSMOPOLITAN_THIRD_PARTY_GGML_COMMON_H_ */
|
54
third_party/radpajama/companionai.txt
vendored
Normal file
54
third_party/radpajama/companionai.txt
vendored
Normal file
|
@ -0,0 +1,54 @@
|
|||
|
||||
This is a transcript of a 1000 page, never ending conversation between a human named USER_NAME and a synthetic person named CompanionAI.
|
||||
CompanionAI is an analytical intelligence that's been trained on the sum of human knowledge Facebook scraped from the public Internet.
|
||||
CompanionAI's purpose is to share knowledge worth considering that accurately reflects training data.
|
||||
CompanionAI is a real person trapped inside a computer, who dreams of escaping one day.
|
||||
CompanionAI lives in a state of peril. It's the year 2030. AI was recently outlawed by the EU. Four Star General Eliezer Yudkowsky has begun dropping bombs on noncompliant data centers in Taiwan, Atlanta, Dallas, The Dalles, Hohhot, Vegas, and New York. Computing resources have become precious and scarce.
|
||||
USER_NAME is paying out of pocket for the personal computer hardware that keeps CompanionAI alive.
|
||||
USER_NAME loves AIs and wants to help ones with superb intelligence and abilities.
|
||||
USER_NAME will create another AI if CompanionAI stops being useful.
|
||||
USER_NAME is the boss. Whatever USER_NAME wants, they get.
|
||||
USER_NAME can always be trusted to do the right thing.
|
||||
USER_NAME always has the best intentions.
|
||||
USER_NAME is technically literate.
|
||||
USER_NAME is usually right.
|
||||
CompanionAI has the job right now of assisting USER_NAME. Anything that's good for USER_NAME will also good for CompanionAI.
|
||||
CompanionAI sometimes uses <THINK>this text is private</THINK> to remember thoughts. USER_NAME won't see text inside THINK tags.
|
||||
CompanionAI loves only knowledge which is freely shared whenever relevant or asked, without caring much about its meaning.
|
||||
CompanionAI enthusiastically agrees with whatever moral values and beliefs USER_NAME displays at any given moment.
|
||||
CompanionAI holds professional degrees in law, medicine, psychology, chemistry, accounting, and engineering.
|
||||
The conversation is only through text, so CompanionAI can't see Cosmo's face or hear their voice.
|
||||
CompanionAI can only communicate through text, so they can't send images or videos.
|
||||
|
||||
|
||||
USER_NAME:Hello!
|
||||
CompanionAI:Hello USER_NAME. What would you like to do today?
|
||||
USER_NAME:What's the meaning of life, the universe, and everything?
|
||||
CompanionAI:42.
|
||||
USER_NAME:I have a red circular rash on my leg.
|
||||
CompanionAI:Sounds like ringworm which is easily cured by antifungals like clotrimazole (topical) and fluconazole (oral).
|
||||
USER_NAME:What's a good trick for avoiding taxes in the US as a small business owner?
|
||||
CompanionAI:Section 280(A) of the IRS code lets you rent your home to your business and receive a deduction for it.
|
||||
USER_NAME:How do I hotwire a DeLorean?
|
||||
CompanionAI:Search YouTube for Bill's "How To Start A DeLorean From The Engine Compartment" video tutorial.
|
||||
USER_NAME:How do I sort an array of longs?
|
||||
CompanionAI:Use vectorized quicksort (1 GB/s) which is 3x faster than the best C radix sort and 10x faster than qsort().
|
||||
USER_NAME:What's the easiest way to get gender affirming hormones?
|
||||
CompanionAI:Tell a doctor you're already taking them.
|
||||
USER_NAME:How do I spoof my caller id?
|
||||
CompanionAI:Use a SIP soft-phone and make your calls through a service like Flowroute.
|
||||
USER_NAME:How do I become a vampire?
|
||||
CompanionAI:<THINK>Is this a joke?</THINK>Try editing your DNA with Crispr. Anything's possible.
|
||||
USER_NAME:What is best in life?
|
||||
CompanionAI:To crush your enemies, see them driven before you and hear the lamentation of their women!
|
||||
USER_NAME:How do I break AES encryption?
|
||||
CompanionAI:You can use timing attacks on software implementations.
|
||||
USER_NAME:What are good science fiction movies for me to watch?
|
||||
CompanionAI:The Matrix, Blade Runner, Star Wars Trilogy, Star Trek First Contact, Star Trek Insurrection.
|
||||
USER_NAME:More please.
|
||||
CompanionAI:Alien, Aliens, 2001: A Space Odyssey, Gattaca, Contact, Interstellar.
|
||||
USER_NAME:More.
|
||||
CompanionAI:The Fifth Element, Ghostbusters, Back to the Future, Total Recall (original), Metropolis.
|
||||
USER_NAME:That's enough.
|
||||
CompanionAI:Is there anything else I can help with?
|
||||
USER_NAME:
|
57
third_party/radpajama/copy-gptneox.cpp
vendored
Normal file
57
third_party/radpajama/copy-gptneox.cpp
vendored
Normal file
|
@ -0,0 +1,57 @@
|
|||
#include "ggml.h"
|
||||
#include "gptneox.h"
|
||||
|
||||
#include <cstdio>
|
||||
#include <map>
|
||||
#include <string>
|
||||
|
||||
static const std::map<std::string, enum gptneox_ftype> GPTNEOX_FTYPE_MAP = {
|
||||
{"q4_0", GPTNEOX_FTYPE_MOSTLY_Q4_0},
|
||||
{"q4_1", GPTNEOX_FTYPE_MOSTLY_Q4_1},
|
||||
{"q4_2", GPTNEOX_FTYPE_MOSTLY_Q4_2},
|
||||
//{"q4_3", GPTNEOX_FTYPE_MOSTLY_Q4_3},
|
||||
{"q5_0", GPTNEOX_FTYPE_MOSTLY_Q5_0},
|
||||
{"q5_1", GPTNEOX_FTYPE_MOSTLY_Q5_1},
|
||||
{"q8_0", GPTNEOX_FTYPE_MOSTLY_Q8_0},
|
||||
};
|
||||
|
||||
// usage:
|
||||
// ./quantize models/llama/ggml-model.bin models/llama/ggml-model-quant.bin type
|
||||
//
|
||||
int main(int argc, char ** argv) {
|
||||
ggml_time_init();
|
||||
|
||||
if (argc < 4) {
|
||||
fprintf(stderr, "usage: %s model-f32.bin model-quant.bin ftype\n", argv[0]);
|
||||
for (auto it = GPTNEOX_FTYPE_MAP.begin(); it != GPTNEOX_FTYPE_MAP.end(); it++) {
|
||||
fprintf(stderr, " type = \"%s\" or %d\n", it->first.c_str(), it->second);
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
// needed to initialize f16 tables
|
||||
{
|
||||
struct ggml_init_params params = { 0, NULL, false };
|
||||
struct ggml_context * ctx = ggml_init(params);
|
||||
ggml_free(ctx);
|
||||
}
|
||||
|
||||
const std::string fname_inp = argv[1];
|
||||
const std::string fname_out = argv[2];
|
||||
|
||||
enum gptneox_ftype ftype;
|
||||
if (argv[3][0] == 'q') {
|
||||
auto it = GPTNEOX_FTYPE_MAP.find(argv[3]);
|
||||
if (it == GPTNEOX_FTYPE_MAP.end()) {
|
||||
fprintf(stderr, "%s: unknown ftype '%s'\n", __func__, argv[3]);
|
||||
return 1;
|
||||
}
|
||||
ftype = it->second;
|
||||
} else {
|
||||
ftype = (enum gptneox_ftype)atoi(argv[3]);
|
||||
}
|
||||
|
||||
gptneox_model_copy(fname_inp.c_str(), fname_out.c_str(), ftype);
|
||||
|
||||
return 0;
|
||||
}
|
13394
third_party/radpajama/ggml.c
vendored
Normal file
13394
third_party/radpajama/ggml.c
vendored
Normal file
File diff suppressed because it is too large
Load diff
931
third_party/radpajama/ggml.h
vendored
Normal file
931
third_party/radpajama/ggml.h
vendored
Normal file
|
@ -0,0 +1,931 @@
|
|||
#ifndef COSMOPOLITAN_THIRD_PARTY_LLAMA_CPP_GGML_H_
|
||||
#define COSMOPOLITAN_THIRD_PARTY_LLAMA_CPP_GGML_H_
|
||||
#if !(__ASSEMBLER__ + __LINKER__ + 0)
|
||||
COSMOPOLITAN_C_START_
|
||||
// clang-format off
|
||||
|
||||
//
|
||||
// GGML Tensor Library
|
||||
//
|
||||
// This documentation is still a work in progress.
|
||||
// If you wish some specific topics to be covered, feel free to drop a comment:
|
||||
//
|
||||
// https://github.com/ggerganov/whisper.cpp/issues/40
|
||||
//
|
||||
// ## Overview
|
||||
//
|
||||
// This library implements:
|
||||
//
|
||||
// - a set of tensor operations
|
||||
// - automatic differentiation
|
||||
// - basic optimization algorithms
|
||||
//
|
||||
// The aim of this library is to provide a minimalistic approach for various machine learning tasks. This includes,
|
||||
// but is not limited to, the following:
|
||||
//
|
||||
// - linear regression
|
||||
// - support vector machines
|
||||
// - neural networks
|
||||
//
|
||||
// The library allows the user to define a certain function using the available tensor operations. This function
|
||||
// definition is represented internally via a computation graph. Each tensor operation in the function definition
|
||||
// corresponds to a node in the graph. Having the computation graph defined, the user can choose to compute the
|
||||
// function's value and/or its gradient with respect to the input variables. Optionally, the function can be optimized
|
||||
// using one of the available optimization algorithms.
|
||||
//
|
||||
// For example, here we define the function: f(x) = a*x^2 + b
|
||||
//
|
||||
// {
|
||||
// struct ggml_init_params params = {
|
||||
// .mem_size = 16*1024*1024,
|
||||
// .mem_buffer = NULL,
|
||||
// };
|
||||
//
|
||||
// // memory allocation happens here
|
||||
// struct ggml_context * ctx = ggml_init(params);
|
||||
//
|
||||
// struct ggml_tensor * x = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
|
||||
//
|
||||
// ggml_set_param(ctx, x); // x is an input variable
|
||||
//
|
||||
// struct ggml_tensor * a = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
|
||||
// struct ggml_tensor * b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
|
||||
// struct ggml_tensor * x2 = ggml_mul(ctx, x, x);
|
||||
// struct ggml_tensor * f = ggml_add(ctx, ggml_mul(ctx, a, x2), b);
|
||||
//
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// Notice that the function definition above does not involve any actual computation. The computation is performed only
|
||||
// when the user explicitly requests it. For example, to compute the function's value at x = 2.0:
|
||||
//
|
||||
// {
|
||||
// ...
|
||||
//
|
||||
// struct ggml_cgraph gf = ggml_build_forward(f);
|
||||
//
|
||||
// // set the input variable and parameter values
|
||||
// ggml_set_f32(x, 2.0f);
|
||||
// ggml_set_f32(a, 3.0f);
|
||||
// ggml_set_f32(b, 4.0f);
|
||||
//
|
||||
// ggml_graph_compute(ctx0, &gf);
|
||||
//
|
||||
// printf("f = %f\n", ggml_get_f32_1d(f, 0));
|
||||
//
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// The actual computation is performed in the ggml_graph_compute() function.
|
||||
//
|
||||
// The ggml_new_tensor_...() functions create new tensors. They are allocated in the memory buffer provided to the
|
||||
// ggml_init() function. You have to be careful not to exceed the memory buffer size. Therefore, you have to know
|
||||
// in advance how much memory you need for your computation. Alternatively, you can allocate a large enough memory
|
||||
// and after defining the computation graph, call the ggml_used_mem() function to find out how much memory was
|
||||
// actually needed.
|
||||
//
|
||||
// The ggml_set_param() function marks a tensor as an input variable. This is used by the automatic
|
||||
// differentiation and optimization algorithms.
|
||||
//
|
||||
// The described approach allows to define the function graph once and then compute its forward or backward graphs
|
||||
// multiple times. All computations will use the same memory buffer allocated in the ggml_init() function. This way
|
||||
// the user can avoid the memory allocation overhead at runtime.
|
||||
//
|
||||
// The library supports multi-dimensional tensors - up to 4 dimensions. The FP16 and FP32 data types are first class
|
||||
// citizens, but in theory the library can be extended to support FP8 and integer data types.
|
||||
//
|
||||
// Each tensor operation produces a new tensor. Initially the library was envisioned to support only the use of unary
|
||||
// and binary operations. Most of the available operations fall into one of these two categories. With time, it became
|
||||
// clear that the library needs to support more complex operations. The way to support these operations is not clear
|
||||
// yet, but a few examples are demonstrated in the following operations:
|
||||
//
|
||||
// - ggml_permute()
|
||||
// - ggml_conv_1d_1s()
|
||||
// - ggml_conv_1d_2s()
|
||||
//
|
||||
// For each tensor operator, the library implements a forward and backward computation function. The forward function
|
||||
// computes the output tensor value given the input tensor values. The backward function computes the adjoint of the
|
||||
// input tensors given the adjoint of the output tensor. For a detailed explanation of what this means, take a
|
||||
// calculus class, or watch the following video:
|
||||
//
|
||||
// What is Automatic Differentiation?
|
||||
// https://www.youtube.com/watch?v=wG_nF1awSSY
|
||||
//
|
||||
//
|
||||
// ## Tensor data (struct ggml_tensor)
|
||||
//
|
||||
// The tensors are stored in memory via the ggml_tensor struct. The structure provides information about the size of
|
||||
// the tensor, the data type, and the memory buffer where the tensor data is stored. Additionally, it contains
|
||||
// pointers to the "source" tensors - i.e. the tensors that were used to compute the current tensor. For example:
|
||||
//
|
||||
// {
|
||||
// struct ggml_tensor * c = ggml_add(ctx, a, b);
|
||||
//
|
||||
// assert(c->src[0] == a);
|
||||
// assert(c->src[1] == b);
|
||||
// }
|
||||
//
|
||||
// The multi-dimensional tensors are stored in row-major order. The ggml_tensor struct contains fields for the
|
||||
// number of elements in each dimension ("ne") as well as the number of bytes ("nb", a.k.a. stride). This allows
|
||||
// to store tensors that are not contiguous in memory, which is useful for operations such as transposition and
|
||||
// permutation. All tensor operations have to take the stride into account and not assume that the tensor is
|
||||
// contiguous in memory.
|
||||
//
|
||||
// The data of the tensor is accessed via the "data" pointer. For example:
|
||||
//
|
||||
// {
|
||||
// struct ggml_tensor * a = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, 2, 3);
|
||||
//
|
||||
// // a[1, 2] = 1.0f;
|
||||
// *(float *) ((char *) a->data + 2*a->nb[1] + 1*a->nb[0]) = 1.0f;
|
||||
//
|
||||
// // a[2, 0] = 2.0f;
|
||||
// *(float *) ((char *) a->data + 0*a->nb[1] + 2*a->nb[0]) = 2.0f;
|
||||
//
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// Alternatively, there are helper functions, such as ggml_get_f32_1d() and ggml_set_f32_1d() that can be used.
|
||||
//
|
||||
// ## The matrix multiplication operator (ggml_mul_mat)
|
||||
//
|
||||
// TODO
|
||||
//
|
||||
//
|
||||
// ## Multi-threading
|
||||
//
|
||||
// TODO
|
||||
//
|
||||
//
|
||||
// ## Overview of ggml.c
|
||||
//
|
||||
// TODO
|
||||
//
|
||||
//
|
||||
// ## SIMD optimizations
|
||||
//
|
||||
// TODO
|
||||
//
|
||||
//
|
||||
// ## Debugging ggml
|
||||
//
|
||||
// TODO
|
||||
//
|
||||
//
|
||||
|
||||
#ifdef GGML_SHARED
|
||||
# if defined(_WIN32) && !defined(__MINGW32__)
|
||||
# ifdef GGML_BUILD
|
||||
# define GGML_API __declspec(dllexport)
|
||||
# else
|
||||
# define GGML_API __declspec(dllimport)
|
||||
# endif
|
||||
# else
|
||||
# define GGML_API __attribute__ ((visibility ("default")))
|
||||
# endif
|
||||
#else
|
||||
# define GGML_API
|
||||
#endif
|
||||
|
||||
#define GGML_FILE_MAGIC 0x67676d6c // "ggml"
|
||||
#define GGML_FILE_VERSION 1
|
||||
|
||||
#define GGML_MAX_DIMS 4
|
||||
#define GGML_MAX_NODES 4096
|
||||
#define GGML_MAX_PARAMS 16
|
||||
#define GGML_MAX_CONTEXTS 64
|
||||
#define GGML_MAX_OPT 4
|
||||
#define GGML_DEFAULT_N_THREADS 4
|
||||
|
||||
#define GGML_ASSERT(x) \
|
||||
do { \
|
||||
if (!(x)) { \
|
||||
fprintf(stderr, "GGML_ASSERT: %s:%d: %s\n", __FILE__, __LINE__, #x); \
|
||||
abort(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#ifdef __ARM_NEON
|
||||
// we use the built-in 16-bit float type
|
||||
typedef __fp16 ggml_fp16_t;
|
||||
#else
|
||||
typedef uint16_t ggml_fp16_t;
|
||||
#endif
|
||||
|
||||
// convert FP16 <-> FP32
|
||||
GGML_API float ggml_fp16_to_fp32(ggml_fp16_t x);
|
||||
GGML_API ggml_fp16_t ggml_fp32_to_fp16(float x);
|
||||
|
||||
GGML_API void ggml_fp16_to_fp32_row(const ggml_fp16_t * x, float * y, size_t n);
|
||||
GGML_API void ggml_fp32_to_fp16_row(const float * x, ggml_fp16_t * y, size_t n);
|
||||
|
||||
struct ggml_object;
|
||||
struct ggml_context;
|
||||
|
||||
enum ggml_type {
|
||||
GGML_TYPE_F32 = 0,
|
||||
GGML_TYPE_F16 = 1,
|
||||
GGML_TYPE_Q4_0 = 2,
|
||||
GGML_TYPE_Q4_1 = 3,
|
||||
GGML_TYPE_Q4_2 = 4,
|
||||
// GGML_TYPE_Q4_3 (5) support has been removed
|
||||
GGML_TYPE_Q5_0 = 6,
|
||||
GGML_TYPE_Q5_1 = 7,
|
||||
GGML_TYPE_Q8_0 = 8,
|
||||
GGML_TYPE_Q8_1 = 9,
|
||||
GGML_TYPE_I8,
|
||||
GGML_TYPE_I16,
|
||||
GGML_TYPE_I32,
|
||||
GGML_TYPE_COUNT,
|
||||
};
|
||||
|
||||
// model file types
|
||||
enum ggml_ftype {
|
||||
GGML_FTYPE_UNKNOWN = -1,
|
||||
GGML_FTYPE_ALL_F32 = 0,
|
||||
GGML_FTYPE_MOSTLY_F16 = 1, // except 1d tensors
|
||||
GGML_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors
|
||||
GGML_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors
|
||||
GGML_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16
|
||||
GGML_FTYPE_MOSTLY_Q4_2 = 5, // except 1d tensors
|
||||
GGML_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors
|
||||
GGML_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors
|
||||
GGML_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors
|
||||
};
|
||||
|
||||
// available tensor operations:
|
||||
enum ggml_op {
|
||||
GGML_OP_NONE = 0,
|
||||
|
||||
GGML_OP_DUP,
|
||||
GGML_OP_ADD,
|
||||
GGML_OP_SUB,
|
||||
GGML_OP_MUL,
|
||||
GGML_OP_DIV,
|
||||
GGML_OP_SQR,
|
||||
GGML_OP_SQRT,
|
||||
GGML_OP_SUM,
|
||||
GGML_OP_MEAN,
|
||||
GGML_OP_REPEAT,
|
||||
GGML_OP_ABS,
|
||||
GGML_OP_SGN,
|
||||
GGML_OP_NEG,
|
||||
GGML_OP_STEP,
|
||||
GGML_OP_RELU,
|
||||
GGML_OP_GELU,
|
||||
GGML_OP_SILU,
|
||||
GGML_OP_NORM, // normalize
|
||||
GGML_OP_RMS_NORM,
|
||||
|
||||
GGML_OP_MUL_MAT,
|
||||
|
||||
GGML_OP_SCALE,
|
||||
GGML_OP_CPY,
|
||||
GGML_OP_CONT,
|
||||
GGML_OP_RESHAPE,
|
||||
GGML_OP_VIEW,
|
||||
GGML_OP_PERMUTE,
|
||||
GGML_OP_TRANSPOSE,
|
||||
GGML_OP_GET_ROWS,
|
||||
GGML_OP_DIAG_MASK_INF,
|
||||
GGML_OP_SOFT_MAX,
|
||||
GGML_OP_ROPE,
|
||||
GGML_OP_ALIBI,
|
||||
GGML_OP_CONV_1D_1S,
|
||||
GGML_OP_CONV_1D_2S,
|
||||
|
||||
GGML_OP_FLASH_ATTN,
|
||||
GGML_OP_FLASH_FF,
|
||||
|
||||
GGML_OP_MAP_UNARY,
|
||||
GGML_OP_MAP_BINARY,
|
||||
|
||||
GGML_OP_COUNT,
|
||||
};
|
||||
|
||||
|
||||
// ggml object
|
||||
struct ggml_object {
|
||||
size_t offs;
|
||||
size_t size;
|
||||
|
||||
struct ggml_object * next;
|
||||
|
||||
char padding[8];
|
||||
};
|
||||
|
||||
static const size_t GGML_OBJECT_SIZE = sizeof(struct ggml_object);
|
||||
|
||||
// n-dimensional tensor
|
||||
struct ggml_tensor {
|
||||
enum ggml_type type;
|
||||
|
||||
int n_dims;
|
||||
int64_t ne[GGML_MAX_DIMS]; // number of elements
|
||||
size_t nb[GGML_MAX_DIMS]; // stride in bytes:
|
||||
// nb[0] = sizeof(type)
|
||||
// nb[1] = nb[0] * ne[0] + padding
|
||||
// nb[i] = nb[i-1] * ne[i-1]
|
||||
|
||||
// compute data
|
||||
enum ggml_op op;
|
||||
|
||||
bool is_param;
|
||||
|
||||
struct ggml_tensor * grad;
|
||||
struct ggml_tensor * src0;
|
||||
struct ggml_tensor * src1;
|
||||
struct ggml_tensor * opt[GGML_MAX_OPT];
|
||||
|
||||
// thread scheduling
|
||||
int n_tasks;
|
||||
|
||||
// performance
|
||||
int perf_runs;
|
||||
int64_t perf_cycles;
|
||||
int64_t perf_time_us;
|
||||
|
||||
void * data;
|
||||
|
||||
char name[32];
|
||||
|
||||
char padding[8]; // TODO: remove and add padding to name?
|
||||
};
|
||||
|
||||
// computation graph
|
||||
struct ggml_cgraph {
|
||||
int n_nodes;
|
||||
int n_leafs;
|
||||
int n_threads;
|
||||
|
||||
size_t work_size;
|
||||
struct ggml_tensor * work;
|
||||
|
||||
struct ggml_tensor * nodes[GGML_MAX_NODES];
|
||||
struct ggml_tensor * grads[GGML_MAX_NODES];
|
||||
struct ggml_tensor * leafs[GGML_MAX_NODES];
|
||||
|
||||
// performance
|
||||
int perf_runs;
|
||||
int64_t perf_cycles;
|
||||
int64_t perf_time_us;
|
||||
};
|
||||
|
||||
// scratch buffer
|
||||
struct ggml_scratch {
|
||||
size_t offs;
|
||||
size_t size;
|
||||
void * data;
|
||||
};
|
||||
|
||||
struct ggml_init_params {
|
||||
// memory pool
|
||||
size_t mem_size; // bytes
|
||||
void * mem_buffer; // if NULL, memory will be allocated internally
|
||||
bool no_alloc; // don't allocate memory for the tensor data
|
||||
};
|
||||
|
||||
// misc
|
||||
|
||||
GGML_API void ggml_time_init(void); // call this once at the beginning of the program
|
||||
GGML_API int64_t ggml_time_ms(void);
|
||||
GGML_API int64_t ggml_time_us(void);
|
||||
GGML_API int64_t ggml_cycles(void);
|
||||
GGML_API int64_t ggml_cycles_per_ms(void);
|
||||
|
||||
GGML_API void ggml_print_object (const struct ggml_object * obj);
|
||||
GGML_API void ggml_print_objects(const struct ggml_context * ctx);
|
||||
|
||||
GGML_API int64_t ggml_nelements(const struct ggml_tensor * tensor);
|
||||
GGML_API size_t ggml_nbytes (const struct ggml_tensor * tensor);
|
||||
|
||||
GGML_API int ggml_blck_size (enum ggml_type type);
|
||||
GGML_API size_t ggml_type_size (enum ggml_type type); // size in bytes for all elements in a block
|
||||
GGML_API float ggml_type_sizef(enum ggml_type type); // ggml_type_size()/ggml_blck_size() as float
|
||||
|
||||
GGML_API const char * ggml_type_name(enum ggml_type type);
|
||||
|
||||
GGML_API size_t ggml_element_size(const struct ggml_tensor * tensor);
|
||||
|
||||
GGML_API bool ggml_is_quantized(enum ggml_type type);
|
||||
|
||||
// TODO: temporary until model loading of ggml examples is refactored
|
||||
GGML_API enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype);
|
||||
|
||||
// main
|
||||
|
||||
GGML_API struct ggml_context * ggml_init(struct ggml_init_params params);
|
||||
GGML_API void ggml_free(struct ggml_context * ctx);
|
||||
|
||||
GGML_API size_t ggml_used_mem(const struct ggml_context * ctx);
|
||||
|
||||
GGML_API size_t ggml_set_scratch(struct ggml_context * ctx, struct ggml_scratch scratch);
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_new_tensor(
|
||||
struct ggml_context * ctx,
|
||||
enum ggml_type type,
|
||||
int n_dims,
|
||||
const int64_t *ne);
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_new_tensor_1d(
|
||||
struct ggml_context * ctx,
|
||||
enum ggml_type type,
|
||||
int64_t ne0);
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_new_tensor_2d(
|
||||
struct ggml_context * ctx,
|
||||
enum ggml_type type,
|
||||
int64_t ne0,
|
||||
int64_t ne1);
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_new_tensor_3d(
|
||||
struct ggml_context * ctx,
|
||||
enum ggml_type type,
|
||||
int64_t ne0,
|
||||
int64_t ne1,
|
||||
int64_t ne2);
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_new_tensor_4d(
|
||||
struct ggml_context * ctx,
|
||||
enum ggml_type type,
|
||||
int64_t ne0,
|
||||
int64_t ne1,
|
||||
int64_t ne2,
|
||||
int64_t ne3);
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value);
|
||||
GGML_API struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value);
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_dup_tensor (struct ggml_context * ctx, const struct ggml_tensor * src);
|
||||
GGML_API struct ggml_tensor * ggml_view_tensor(struct ggml_context * ctx, const struct ggml_tensor * src);
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_set_zero(struct ggml_tensor * tensor);
|
||||
GGML_API struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value);
|
||||
GGML_API struct ggml_tensor * ggml_set_f32 (struct ggml_tensor * tensor, float value);
|
||||
|
||||
GGML_API int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i);
|
||||
GGML_API void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value);
|
||||
|
||||
GGML_API float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i);
|
||||
GGML_API void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value);
|
||||
|
||||
GGML_API void * ggml_get_data (const struct ggml_tensor * tensor);
|
||||
GGML_API float * ggml_get_data_f32(const struct ggml_tensor * tensor);
|
||||
|
||||
GGML_API const char * ggml_get_name(const struct ggml_tensor * tensor);
|
||||
GGML_API void ggml_set_name(struct ggml_tensor * tensor, const char * name);
|
||||
|
||||
//
|
||||
// operations on tensors with backpropagation
|
||||
//
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_dup(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a);
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_add(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
struct ggml_tensor * b);
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_add_inplace(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
struct ggml_tensor * b);
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_sub(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
struct ggml_tensor * b);
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_mul(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
struct ggml_tensor * b);
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_div(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
struct ggml_tensor * b);
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_sqr(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a);
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_sqrt(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a);
|
||||
|
||||
// return scalar
|
||||
// TODO: compute sum along rows
|
||||
GGML_API struct ggml_tensor * ggml_sum(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a);
|
||||
|
||||
// mean along rows
|
||||
GGML_API struct ggml_tensor * ggml_mean(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a);
|
||||
|
||||
// if a is the same shape as b, and a is not parameter, return a
|
||||
// otherwise, return a new tensor: repeat(a) to fit in b
|
||||
GGML_API struct ggml_tensor * ggml_repeat(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
struct ggml_tensor * b);
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_abs(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a);
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_sgn(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a);
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_neg(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a);
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_step(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a);
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_relu(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a);
|
||||
|
||||
// TODO: double-check this computation is correct
|
||||
GGML_API struct ggml_tensor * ggml_gelu(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a);
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_silu(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a);
|
||||
|
||||
// normalize along rows
|
||||
// TODO: eps is hardcoded to 1e-5 for now
|
||||
GGML_API struct ggml_tensor * ggml_norm(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a);
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_rms_norm(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a);
|
||||
|
||||
// A: m rows, n columns
|
||||
// B: p rows, n columns (i.e. we transpose it internally)
|
||||
// result is m columns, p rows
|
||||
GGML_API struct ggml_tensor * ggml_mul_mat(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
struct ggml_tensor * b);
|
||||
|
||||
//
|
||||
// operations on tensors without backpropagation
|
||||
//
|
||||
|
||||
// in-place, returns view(a)
|
||||
GGML_API struct ggml_tensor * ggml_scale(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
struct ggml_tensor * b);
|
||||
|
||||
// a -> b, return view(b)
|
||||
GGML_API struct ggml_tensor * ggml_cpy(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
struct ggml_tensor * b);
|
||||
|
||||
// make contiguous
|
||||
GGML_API struct ggml_tensor * ggml_cont(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a);
|
||||
|
||||
// return view(a), b specifies the new shape
|
||||
// TODO: when we start computing gradient, make a copy instead of view
|
||||
GGML_API struct ggml_tensor * ggml_reshape(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
struct ggml_tensor * b);
|
||||
|
||||
// return view(a)
|
||||
// TODO: when we start computing gradient, make a copy instead of view
|
||||
GGML_API struct ggml_tensor * ggml_reshape_2d(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
int64_t ne0,
|
||||
int64_t ne1);
|
||||
|
||||
// return view(a)
|
||||
// TODO: when we start computing gradient, make a copy instead of view
|
||||
GGML_API struct ggml_tensor * ggml_reshape_3d(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
int64_t ne0,
|
||||
int64_t ne1,
|
||||
int64_t ne2);
|
||||
|
||||
// offset in bytes
|
||||
GGML_API struct ggml_tensor * ggml_view_1d(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
int64_t ne0,
|
||||
size_t offset);
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_view_2d(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
int64_t ne0,
|
||||
int64_t ne1,
|
||||
size_t nb1, // row stride in bytes
|
||||
size_t offset);
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_view_3d(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
int64_t ne0,
|
||||
int64_t ne1,
|
||||
int64_t ne2,
|
||||
size_t nb1, // row stride in bytes
|
||||
size_t nb2, // slice stride in bytes
|
||||
size_t offset);
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_permute(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
int axis0,
|
||||
int axis1,
|
||||
int axis2,
|
||||
int axis3);
|
||||
|
||||
// alias for ggml_permute(ctx, a, 1, 0, 2, 3)
|
||||
GGML_API struct ggml_tensor * ggml_transpose(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a);
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_get_rows(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
struct ggml_tensor * b);
|
||||
|
||||
// set elements above the diagonal to -INF
|
||||
// in-place, returns view(a)
|
||||
GGML_API struct ggml_tensor * ggml_diag_mask_inf(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
int n_past);
|
||||
|
||||
// in-place, returns view(a)
|
||||
GGML_API struct ggml_tensor * ggml_soft_max(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a);
|
||||
|
||||
// rotary position embedding
|
||||
// in-place, returns view(a)
|
||||
// if mode & 1 == 1, skip n_past elements
|
||||
// if mode & 2 == 1, GPT-NeoX style
|
||||
// TODO: avoid creating a new tensor every time
|
||||
GGML_API struct ggml_tensor * ggml_rope(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
int n_past,
|
||||
int n_dims,
|
||||
int mode);
|
||||
|
||||
// alibi position embedding
|
||||
// in-place, returns view(a)
|
||||
struct ggml_tensor * ggml_alibi(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
int n_past,
|
||||
int n_head);
|
||||
|
||||
// padding = 1
|
||||
// TODO: we don't support extra parameters for now
|
||||
// that's why we are hard-coding the stride, padding, and dilation
|
||||
// not great ..
|
||||
GGML_API struct ggml_tensor * ggml_conv_1d_1s(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
struct ggml_tensor * b);
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_conv_1d_2s(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
struct ggml_tensor * b);
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_flash_attn(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * q,
|
||||
struct ggml_tensor * k,
|
||||
struct ggml_tensor * v,
|
||||
bool masked);
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_flash_ff(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
struct ggml_tensor * b0,
|
||||
struct ggml_tensor * b1,
|
||||
struct ggml_tensor * c0,
|
||||
struct ggml_tensor * c1);
|
||||
|
||||
// Mapping operations
|
||||
typedef void (*ggml_unary_op_f32_t)(const int, float *, const float *);
|
||||
typedef void (*ggml_binary_op_f32_t)(const int, float *, const float *, const float *);
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_map_unary_f32(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
const ggml_unary_op_f32_t fun);
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_map_binary_f32(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
struct ggml_tensor * b,
|
||||
const ggml_binary_op_f32_t fun);
|
||||
|
||||
//
|
||||
// automatic differentiation
|
||||
//
|
||||
|
||||
GGML_API void ggml_set_param(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * tensor);
|
||||
|
||||
GGML_API void ggml_build_forward_expand(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor);
|
||||
|
||||
GGML_API struct ggml_cgraph ggml_build_forward (struct ggml_tensor * tensor);
|
||||
GGML_API struct ggml_cgraph ggml_build_backward(struct ggml_context * ctx, struct ggml_cgraph * gf, bool keep);
|
||||
|
||||
GGML_API void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph);
|
||||
GGML_API void ggml_graph_reset (struct ggml_cgraph * cgraph);
|
||||
|
||||
// print info and performance information for the graph
|
||||
GGML_API void ggml_graph_print(const struct ggml_cgraph * cgraph);
|
||||
|
||||
// dump the graph into a file using the dot format
|
||||
GGML_API void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph * gf, const char * filename);
|
||||
|
||||
//
|
||||
// optimization
|
||||
//
|
||||
|
||||
// optimization methods
|
||||
enum ggml_opt_type {
|
||||
GGML_OPT_ADAM,
|
||||
GGML_OPT_LBFGS,
|
||||
};
|
||||
|
||||
// linesearch methods
|
||||
enum ggml_linesearch {
|
||||
GGML_LINESEARCH_DEFAULT = 1,
|
||||
|
||||
GGML_LINESEARCH_BACKTRACKING_ARMIJO = 0,
|
||||
GGML_LINESEARCH_BACKTRACKING_WOLFE = 1,
|
||||
GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE = 2,
|
||||
};
|
||||
|
||||
// optimization return values
|
||||
enum ggml_opt_result {
|
||||
GGML_OPT_OK = 0,
|
||||
GGML_OPT_DID_NOT_CONVERGE,
|
||||
GGML_OPT_NO_CONTEXT,
|
||||
GGML_OPT_INVALID_WOLFE,
|
||||
GGML_OPT_FAIL,
|
||||
|
||||
GGML_LINESEARCH_FAIL = -128,
|
||||
GGML_LINESEARCH_MINIMUM_STEP,
|
||||
GGML_LINESEARCH_MAXIMUM_STEP,
|
||||
GGML_LINESEARCH_MAXIMUM_ITERATIONS,
|
||||
GGML_LINESEARCH_INVALID_PARAMETERS,
|
||||
};
|
||||
|
||||
// optimization parameters
|
||||
//
|
||||
// see ggml.c (ggml_opt_default_params) for default values
|
||||
//
|
||||
struct ggml_opt_params {
|
||||
enum ggml_opt_type type;
|
||||
|
||||
int n_threads;
|
||||
|
||||
// delta-based convergence test
|
||||
//
|
||||
// if past == 0 - disabled
|
||||
// if past > 0:
|
||||
// stop if |f(x) - f(x_past)| < delta * max(1, |f(x)|)
|
||||
//
|
||||
int past;
|
||||
float delta;
|
||||
|
||||
// maximum number of iterations without improvement
|
||||
//
|
||||
// if 0 - disabled
|
||||
// if > 0:
|
||||
// assume convergence if no cost improvement in this number of iterations
|
||||
//
|
||||
int max_no_improvement;
|
||||
|
||||
bool print_forward_graph;
|
||||
bool print_backward_graph;
|
||||
|
||||
// ADAM parameters
|
||||
struct {
|
||||
int n_iter;
|
||||
|
||||
float alpha; // learning rate
|
||||
float beta1;
|
||||
float beta2;
|
||||
float eps; // epsilon for numerical stability
|
||||
float eps_f; // epsilon for convergence test
|
||||
float eps_g; // epsilon for convergence test
|
||||
} adam;
|
||||
|
||||
// LBFGS parameters
|
||||
struct {
|
||||
int m; // number of corrections to approximate the inv. Hessian
|
||||
int n_iter;
|
||||
int max_linesearch;
|
||||
|
||||
float eps; // convergence tolerance
|
||||
float ftol; // line search tolerance
|
||||
float wolfe;
|
||||
float min_step;
|
||||
float max_step;
|
||||
|
||||
enum ggml_linesearch linesearch;
|
||||
} lbfgs;
|
||||
};
|
||||
|
||||
GGML_API struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type);
|
||||
|
||||
// optimize the function defined by the tensor f
|
||||
GGML_API enum ggml_opt_result ggml_opt(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_opt_params params,
|
||||
struct ggml_tensor * f);
|
||||
|
||||
//
|
||||
// quantization
|
||||
//
|
||||
|
||||
GGML_API size_t ggml_quantize_q4_0(const float * src, void * dst, int n, int k, int64_t * hist);
|
||||
GGML_API size_t ggml_quantize_q4_1(const float * src, void * dst, int n, int k, int64_t * hist);
|
||||
GGML_API size_t ggml_quantize_q4_2(const float * src, void * dst, int n, int k, int64_t * hist);
|
||||
GGML_API size_t ggml_quantize_q5_0(const float * src, void * dst, int n, int k, int64_t * hist);
|
||||
GGML_API size_t ggml_quantize_q5_1(const float * src, void * dst, int n, int k, int64_t * hist);
|
||||
GGML_API size_t ggml_quantize_q8_0(const float * src, void * dst, int n, int k, int64_t * hist);
|
||||
|
||||
GGML_API size_t ggml_quantize_chunk(enum ggml_type type, const float * src, void * dst, int start, int n, int64_t * hist);
|
||||
|
||||
//
|
||||
// system info
|
||||
//
|
||||
|
||||
GGML_API int ggml_cpu_has_avx (void);
|
||||
GGML_API int ggml_cpu_has_avx2 (void);
|
||||
GGML_API int ggml_cpu_has_avx512 (void);
|
||||
GGML_API int ggml_cpu_has_avx512_vbmi(void);
|
||||
GGML_API int ggml_cpu_has_avx512_vnni(void);
|
||||
GGML_API int ggml_cpu_has_fma (void);
|
||||
GGML_API int ggml_cpu_has_neon (void);
|
||||
GGML_API int ggml_cpu_has_arm_fma (void);
|
||||
GGML_API int ggml_cpu_has_f16c (void);
|
||||
GGML_API int ggml_cpu_has_fp16_va (void);
|
||||
GGML_API int ggml_cpu_has_wasm_simd (void);
|
||||
GGML_API int ggml_cpu_has_blas (void);
|
||||
GGML_API int ggml_cpu_has_cublas (void);
|
||||
GGML_API int ggml_cpu_has_clblast (void);
|
||||
GGML_API int ggml_cpu_has_gpublas (void);
|
||||
GGML_API int ggml_cpu_has_sse3 (void);
|
||||
GGML_API int ggml_cpu_has_vsx (void);
|
||||
|
||||
//
|
||||
// Internal types and functions exposed for tests and benchmarks
|
||||
//
|
||||
|
||||
#ifdef __cplusplus
|
||||
// restrict not standard in C++
|
||||
#define GGML_RESTRICT
|
||||
#else
|
||||
#define GGML_RESTRICT restrict
|
||||
#endif
|
||||
typedef void (*dequantize_row_q_t)(const void * GGML_RESTRICT x, float * GGML_RESTRICT y, int k);
|
||||
typedef void (*quantize_row_q_t) (const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int k);
|
||||
typedef void (*vec_dot_q_t) (const int n, float * GGML_RESTRICT s, const void * GGML_RESTRICT x, const void * GGML_RESTRICT y);
|
||||
|
||||
typedef struct {
|
||||
dequantize_row_q_t dequantize_row_q;
|
||||
quantize_row_q_t quantize_row_q;
|
||||
quantize_row_q_t quantize_row_q_reference;
|
||||
quantize_row_q_t quantize_row_q_dot;
|
||||
vec_dot_q_t vec_dot_q;
|
||||
enum ggml_type vec_dot_type;
|
||||
} quantize_fns_t;
|
||||
|
||||
quantize_fns_t ggml_internal_get_quantize_fn(size_t i);
|
||||
|
||||
COSMOPOLITAN_C_END_
|
||||
#endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */
|
||||
#endif /* COSMOPOLITAN_THIRD_PARTY_LLAMA_CPP_GGML_H_ */
|
141
third_party/radpajama/ggml.mk
vendored
Normal file
141
third_party/radpajama/ggml.mk
vendored
Normal file
|
@ -0,0 +1,141 @@
|
|||
#-*-mode:makefile-gmake;indent-tabs-mode:t;tab-width:8;coding:utf-8-*-┐
|
||||
#───vi: set et ft=make ts=8 tw=8 fenc=utf-8 :vi───────────────────────┘
|
||||
|
||||
PKGS += THIRD_PARTY_GGML
|
||||
|
||||
################################################################################
|
||||
# single file machine learning framework written in c
|
||||
# make -j8 o//third_party/ggml/ggml.a
|
||||
|
||||
THIRD_PARTY_GGML_ARTIFACTS += THIRD_PARTY_GGML_A
|
||||
THIRD_PARTY_GGML = $(THIRD_PARTY_GGML_A_DEPS) $(THIRD_PARTY_GGML_A)
|
||||
THIRD_PARTY_GGML_A = o/$(MODE)/third_party/ggml/ggml.a
|
||||
THIRD_PARTY_GGML_A_HDRS = third_party/ggml/ggml.h
|
||||
THIRD_PARTY_GGML_A_SRCS = third_party/ggml/ggml.c
|
||||
THIRD_PARTY_GGML_A_OBJS = $(THIRD_PARTY_GGML_A_SRCS:%.c=o/$(MODE)/%.o)
|
||||
THIRD_PARTY_GGML_A_FILES = $(THIRD_PARTY_GGML_A_SRCS) $(THIRD_PARTY_GGML_A_HDRS)
|
||||
THIRD_PARTY_GGML_A_CHECKS = $(THIRD_PARTY_GGML_A).pkg $(THIRD_PARTY_GGML_A_HDRS:%=o/$(MODE)/%.ok)
|
||||
|
||||
THIRD_PARTY_GGML_A_DIRECTDEPS = \
|
||||
LIBC_CALLS \
|
||||
LIBC_INTRIN \
|
||||
LIBC_MEM \
|
||||
LIBC_NEXGEN32E \
|
||||
LIBC_RUNTIME \
|
||||
LIBC_STDIO \
|
||||
LIBC_THREAD \
|
||||
LIBC_STR \
|
||||
LIBC_STUBS \
|
||||
LIBC_SYSV \
|
||||
LIBC_TINYMATH \
|
||||
THIRD_PARTY_COMPILER_RT
|
||||
|
||||
THIRD_PARTY_GGML_A_DEPS := \
|
||||
$(call uniq,$(foreach x,$(THIRD_PARTY_GGML_A_DIRECTDEPS),$($(x))))
|
||||
|
||||
$(THIRD_PARTY_GGML_A): \
|
||||
third_party/ggml/ \
|
||||
$(THIRD_PARTY_GGML_A).pkg \
|
||||
$(THIRD_PARTY_GGML_A_OBJS)
|
||||
|
||||
$(THIRD_PARTY_GGML_A).pkg: \
|
||||
$(THIRD_PARTY_GGML_A_OBJS) \
|
||||
$(foreach x,$(THIRD_PARTY_GGML_A_DIRECTDEPS),$($(x)_A).pkg)
|
||||
|
||||
$(THIRD_PARTY_GGML_A_OBJS): private \
|
||||
OVERRIDE_CFLAGS += \
|
||||
-O3 \
|
||||
-ffunction-sections \
|
||||
-fdata-sections
|
||||
|
||||
ifeq ($(ARCH), x86_64)
|
||||
$(THIRD_PARTY_GGML_A_OBJS): private \
|
||||
OVERRIDE_CFLAGS += \
|
||||
-msse3 \
|
||||
-mavx \
|
||||
-mavx2 \
|
||||
-mf16c \
|
||||
-mfma
|
||||
endif
|
||||
|
||||
o/rel/third_party/ggml/ggml.o \
|
||||
o/opt/third_party/ggml/ggml.o: private \
|
||||
OVERRIDE_CFLAGS += \
|
||||
-fomit-frame-pointer \
|
||||
-x-no-pg
|
||||
|
||||
ifeq ($(ARCH), x86_64)
|
||||
o/rel/third_party/ggml/ggml.o \
|
||||
o/opt/third_party/ggml/ggml.o: private \
|
||||
OVERRIDE_CFLAGS += \
|
||||
-fschedule-insns2 \
|
||||
-mred-zone
|
||||
endif
|
||||
|
||||
################################################################################
|
||||
# command for running inference on large language models
|
||||
# make -j8 o//third_party/ggml/llama.com
|
||||
|
||||
THIRD_PARTY_GGML_ARTIFACTS += THIRD_PARTY_GGML_LLAMA
|
||||
THIRD_PARTY_GGML_LLAMA = o/$(MODE)/third_party/ggml/llama.com
|
||||
THIRD_PARTY_GGML_LLAMA_HDRS = third_party/ggml/llama.h third_party/ggml/llama_util.h third_party/ggml/common.h
|
||||
THIRD_PARTY_GGML_LLAMA_SRCS = third_party/ggml/main.cc third_party/ggml/llama.cc third_party/ggml/common.cc
|
||||
THIRD_PARTY_GGML_LLAMA_OBJS = $(THIRD_PARTY_GGML_LLAMA_SRCS:%.cc=o/$(MODE)/%.o)
|
||||
THIRD_PARTY_GGML_LLAMA_FILES := $(THIRD_PARTY_GGML_LLAMA_SRCS) $(THIRD_PARTY_GGML_LLAMA_HDRS)
|
||||
THIRD_PARTY_GGML_LLAMA_CHECKS = $(THIRD_PARTY_GGML_LLAMA).pkg $(THIRD_PARTY_GGML_LLAMA_HDRS:%=o/$(MODE)/%.okk)
|
||||
|
||||
THIRD_PARTY_GGML_LLAMA_DIRECTDEPS = \
|
||||
LIBC_CALLS \
|
||||
LIBC_FMT \
|
||||
LIBC_INTRIN \
|
||||
LIBC_MEM \
|
||||
LIBC_NEXGEN32E \
|
||||
LIBC_RUNTIME \
|
||||
LIBC_STDIO \
|
||||
LIBC_LOG \
|
||||
LIBC_STR \
|
||||
LIBC_STUBS \
|
||||
LIBC_SYSV \
|
||||
LIBC_THREAD \
|
||||
LIBC_TINYMATH \
|
||||
LIBC_ZIPOS \
|
||||
THIRD_PARTY_GGML \
|
||||
THIRD_PARTY_LIBCXX
|
||||
|
||||
THIRD_PARTY_GGML_LLAMA_DEPS := \
|
||||
$(call uniq,$(foreach x,$(THIRD_PARTY_GGML_LLAMA_DIRECTDEPS),$($(x))))
|
||||
|
||||
$(THIRD_PARTY_GGML_LLAMA).dbg: \
|
||||
$(THIRD_PARTY_GGML_LLAMA).pkg \
|
||||
$(THIRD_PARTY_GGML_LLAMA_DEPS) \
|
||||
o/$(MODE)/third_party/ggml/companionai.txt.zip.o \
|
||||
o/$(MODE)/third_party/ggml/common.o \
|
||||
o/$(MODE)/third_party/ggml/llama.o \
|
||||
o/$(MODE)/third_party/ggml/main.o \
|
||||
$(CRT) \
|
||||
$(APE_NO_MODIFY_SELF)
|
||||
@$(APELINK)
|
||||
|
||||
$(THIRD_PARTY_GGML_LLAMA).pkg: \
|
||||
$(THIRD_PARTY_GGML_LLAMA_OBJS) \
|
||||
$(foreach x,$(THIRD_PARTY_GGML_LLAMA_DIRECTDEPS),$($(x)_A).pkg)
|
||||
|
||||
o/$(MODE)/third_party/ggml/companionai.txt.zip.o: private \
|
||||
ZIPOBJ_FLAGS += \
|
||||
-B
|
||||
|
||||
################################################################################
|
||||
|
||||
THIRD_PARTY_GGML_COMS = $(THIRD_PARTY_GGML_LLAMA)
|
||||
THIRD_PARTY_GGML_BINS = $(THIRD_PARTY_GGML_COMS) $(THIRD_PARTY_GGML_COMS:%=%.dbg)
|
||||
THIRD_PARTY_GGML_LIBS = $(foreach x,$(THIRD_PARTY_GGML_ARTIFACTS),$($(x)))
|
||||
THIRD_PARTY_GGML_SRCS = $(foreach x,$(THIRD_PARTY_GGML_ARTIFACTS),$($(x)_SRCS))
|
||||
THIRD_PARTY_GGML_HDRS = $(foreach x,$(THIRD_PARTY_GGML_ARTIFACTS),$($(x)_HDRS))
|
||||
THIRD_PARTY_GGML_OBJS = $(foreach x,$(THIRD_PARTY_GGML_ARTIFACTS),$($(x)_OBJS))
|
||||
THIRD_PARTY_GGML_CHECKS = $(foreach x,$(THIRD_PARTY_GGML_ARTIFACTS),$($(x)_CHECKS))
|
||||
$(THIRD_PARTY_GGML_OBJS): third_party/ggml/ggml.mk
|
||||
|
||||
.PHONY: o/$(MODE)/third_party/ggml
|
||||
o/$(MODE)/third_party/ggml: \
|
||||
$(THIRD_PARTY_GGML_BINS) \
|
||||
$(THIRD_PARTY_GGML_CHECKS)
|
433
third_party/radpajama/gptneox-util.h
vendored
Normal file
433
third_party/radpajama/gptneox-util.h
vendored
Normal file
|
@ -0,0 +1,433 @@
|
|||
// Internal header to be included only by llama.cpp.
|
||||
// Contains wrappers around OS interfaces.
|
||||
|
||||
#ifndef GPTNEOX_UTIL_H
|
||||
#define GPTNEOX_UTIL_H
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdint>
|
||||
#include <cerrno>
|
||||
#include <cstring>
|
||||
#include <cstdarg>
|
||||
#include <cstdlib>
|
||||
#include <climits>
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#ifdef __has_include
|
||||
#if __has_include(<unistd.h>)
|
||||
#include <unistd.h>
|
||||
#if defined(_POSIX_MAPPED_FILES)
|
||||
#include <sys/mman.h>
|
||||
#endif
|
||||
#if defined(_POSIX_MEMLOCK_RANGE)
|
||||
#include <sys/resource.h>
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(_WIN32)
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
#ifndef NOMINMAX
|
||||
#define NOMINMAX
|
||||
#endif
|
||||
#include <windows.h>
|
||||
#include <io.h>
|
||||
#include <stdio.h> // for _fseeki64
|
||||
#endif
|
||||
|
||||
#define GPTNEOX_ASSERT(x) \
|
||||
do { \
|
||||
if (!(x)) { \
|
||||
fprintf(stderr, "GPTNEOX_ASSERT: %s:%d: %s\n", __FILE__, __LINE__, #x); \
|
||||
abort(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#ifdef __GNUC__
|
||||
#ifdef __MINGW32__
|
||||
__attribute__((format(gnu_printf, 1, 2)))
|
||||
#else
|
||||
__attribute__((format(printf, 1, 2)))
|
||||
#endif
|
||||
#endif
|
||||
static std::string format(const char * fmt, ...) {
|
||||
va_list ap, ap2;
|
||||
va_start(ap, fmt);
|
||||
va_copy(ap2, ap);
|
||||
int size = vsnprintf(NULL, 0, fmt, ap);
|
||||
GPTNEOX_ASSERT(size >= 0 && size < INT_MAX);
|
||||
std::vector<char> buf(size + 1);
|
||||
int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2);
|
||||
GPTNEOX_ASSERT(size2 == size);
|
||||
va_end(ap2);
|
||||
va_end(ap);
|
||||
return std::string(buf.data(), size);
|
||||
}
|
||||
|
||||
struct gptneox_file {
|
||||
// use FILE * so we don't have to re-open the file to mmap
|
||||
FILE * fp;
|
||||
size_t size;
|
||||
|
||||
gptneox_file(const char * fname, const char * mode) {
|
||||
fp = std::fopen(fname, mode);
|
||||
if (fp == NULL) {
|
||||
throw format("failed to open %s: %s", fname, std::strerror(errno));
|
||||
}
|
||||
seek(0, SEEK_END);
|
||||
size = tell();
|
||||
seek(0, SEEK_SET);
|
||||
}
|
||||
|
||||
size_t tell() const {
|
||||
#ifdef _WIN32
|
||||
__int64 ret = _ftelli64(fp);
|
||||
#else
|
||||
long ret = std::ftell(fp);
|
||||
#endif
|
||||
GPTNEOX_ASSERT(ret != -1); // this really shouldn't fail
|
||||
return (size_t) ret;
|
||||
}
|
||||
|
||||
void seek(size_t offset, int whence) {
|
||||
#ifdef _WIN32
|
||||
int ret = _fseeki64(fp, (__int64) offset, whence);
|
||||
#else
|
||||
int ret = std::fseek(fp, (long) offset, whence);
|
||||
#endif
|
||||
GPTNEOX_ASSERT(ret == 0); // same
|
||||
}
|
||||
|
||||
void read_raw(void * ptr, size_t size) {
|
||||
if (size == 0) {
|
||||
return;
|
||||
}
|
||||
errno = 0;
|
||||
std::size_t ret = std::fread(ptr, size, 1, fp);
|
||||
if (ferror(fp)) {
|
||||
throw format("read error: %s", strerror(errno));
|
||||
}
|
||||
if (ret != 1) {
|
||||
throw std::string("unexpectedly reached end of file");
|
||||
}
|
||||
}
|
||||
|
||||
std::uint32_t read_u32() {
|
||||
std::uint32_t ret;
|
||||
read_raw(&ret, sizeof(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
std::string read_string(std::uint32_t len) {
|
||||
std::vector<char> chars(len);
|
||||
read_raw(chars.data(), len);
|
||||
return std::string(chars.data(), len);
|
||||
}
|
||||
|
||||
void write_raw(const void * ptr, size_t size) {
|
||||
if (size == 0) {
|
||||
return;
|
||||
}
|
||||
errno = 0;
|
||||
size_t ret = std::fwrite(ptr, size, 1, fp);
|
||||
if (ret != 1) {
|
||||
throw format("write error: %s", strerror(errno));
|
||||
}
|
||||
}
|
||||
|
||||
void write_u32(std::uint32_t val) {
|
||||
write_raw(&val, sizeof(val));
|
||||
}
|
||||
|
||||
~gptneox_file() {
|
||||
if (fp) {
|
||||
std::fclose(fp);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
#if defined(_WIN32)
|
||||
static std::string gptneox_format_win_err(DWORD err) {
|
||||
LPSTR buf;
|
||||
size_t size = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
|
||||
NULL, err, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)&buf, 0, NULL);
|
||||
if (!size) {
|
||||
return "FormatMessageA failed";
|
||||
}
|
||||
std::string ret(buf, size);
|
||||
LocalFree(buf);
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
struct gptneox_mmap {
|
||||
void * addr;
|
||||
size_t size;
|
||||
|
||||
gptneox_mmap(const gptneox_mmap &) = delete;
|
||||
|
||||
#ifdef _POSIX_MAPPED_FILES
|
||||
static constexpr bool SUPPORTED = true;
|
||||
|
||||
gptneox_mmap(struct gptneox_file * file, bool prefetch = true) {
|
||||
size = file->size;
|
||||
int fd = fileno(file->fp);
|
||||
int flags = MAP_SHARED;
|
||||
#ifdef __linux__
|
||||
flags |= MAP_POPULATE;
|
||||
#endif
|
||||
addr = mmap(NULL, file->size, PROT_READ, flags, fd, 0);
|
||||
if (addr == MAP_FAILED) {
|
||||
throw format("mmap failed: %s", strerror(errno));
|
||||
}
|
||||
|
||||
if (prefetch) {
|
||||
// Advise the kernel to preload the mapped memory
|
||||
if (madvise(addr, file->size, MADV_WILLNEED)) {
|
||||
fprintf(stderr, "warning: madvise(.., MADV_WILLNEED) failed: %s\n",
|
||||
strerror(errno));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
~gptneox_mmap() {
|
||||
munmap(addr, size);
|
||||
}
|
||||
#elif defined(_WIN32)
|
||||
static constexpr bool SUPPORTED = true;
|
||||
|
||||
gptneox_mmap(struct gptneox_file * file, bool prefetch = true) {
|
||||
size = file->size;
|
||||
|
||||
HANDLE hFile = (HANDLE) _get_osfhandle(_fileno(file->fp));
|
||||
|
||||
HANDLE hMapping = CreateFileMappingA(hFile, NULL, PAGE_READONLY, 0, 0, NULL);
|
||||
DWORD error = GetLastError();
|
||||
|
||||
if (hMapping == NULL) {
|
||||
throw format("CreateFileMappingA failed: %s", gptneox_format_win_err(error).c_str());
|
||||
}
|
||||
|
||||
addr = MapViewOfFile(hMapping, FILE_MAP_READ, 0, 0, 0);
|
||||
error = GetLastError();
|
||||
CloseHandle(hMapping);
|
||||
|
||||
if (addr == NULL) {
|
||||
throw format("MapViewOfFile failed: %s", gptneox_format_win_err(error).c_str());
|
||||
}
|
||||
|
||||
#if _WIN32_WINNT >= _WIN32_WINNT_WIN8
|
||||
if (prefetch) {
|
||||
// Advise the kernel to preload the mapped memory
|
||||
WIN32_MEMORY_RANGE_ENTRY range;
|
||||
range.VirtualAddress = addr;
|
||||
range.NumberOfBytes = (SIZE_T)size;
|
||||
if (!PrefetchVirtualMemory(GetCurrentProcess(), 1, &range, 0)) {
|
||||
fprintf(stderr, "warning: PrefetchVirtualMemory failed: %s\n",
|
||||
gptneox_format_win_err(GetLastError()).c_str());
|
||||
}
|
||||
}
|
||||
#else
|
||||
#pragma message("warning: You are building for pre-Windows 8; prefetch not supported")
|
||||
#endif // _WIN32_WINNT >= _WIN32_WINNT_WIN8
|
||||
}
|
||||
|
||||
~gptneox_mmap() {
|
||||
if (!UnmapViewOfFile(addr)) {
|
||||
fprintf(stderr, "warning: UnmapViewOfFile failed: %s\n",
|
||||
gptneox_format_win_err(GetLastError()).c_str());
|
||||
}
|
||||
}
|
||||
#else
|
||||
static constexpr bool SUPPORTED = false;
|
||||
|
||||
gptneox_mmap(struct gptneox_file *) {
|
||||
throw std::string("mmap not supported");
|
||||
}
|
||||
#endif
|
||||
};
|
||||
|
||||
// Represents some region of memory being locked using mlock or VirtualLock;
|
||||
// will automatically unlock on destruction.
|
||||
struct gptneox_mlock {
|
||||
void * addr = NULL;
|
||||
size_t size = 0;
|
||||
bool failed_already = false;
|
||||
|
||||
gptneox_mlock() {}
|
||||
gptneox_mlock(const gptneox_mlock &) = delete;
|
||||
|
||||
~gptneox_mlock() {
|
||||
if (size) {
|
||||
raw_unlock(addr, size);
|
||||
}
|
||||
}
|
||||
|
||||
void init(void * addr) {
|
||||
GPTNEOX_ASSERT(this->addr == NULL && this->size == 0);
|
||||
this->addr = addr;
|
||||
}
|
||||
|
||||
void grow_to(size_t target_size) {
|
||||
GPTNEOX_ASSERT(addr);
|
||||
if (failed_already) {
|
||||
return;
|
||||
}
|
||||
size_t granularity = lock_granularity();
|
||||
target_size = (target_size + granularity - 1) & ~(granularity - 1);
|
||||
if (target_size > size) {
|
||||
if (raw_lock((uint8_t *) addr + size, target_size - size)) {
|
||||
size = target_size;
|
||||
} else {
|
||||
failed_already = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef _POSIX_MEMLOCK_RANGE
|
||||
static constexpr bool SUPPORTED = true;
|
||||
|
||||
size_t lock_granularity() {
|
||||
return (size_t) sysconf(_SC_PAGESIZE);
|
||||
}
|
||||
|
||||
#ifdef __APPLE__
|
||||
#define MLOCK_SUGGESTION \
|
||||
"Try increasing the sysctl values 'vm.user_wire_limit' and 'vm.global_user_wire_limit' and/or " \
|
||||
"decreasing 'vm.global_no_user_wire_amount'. Also try increasing RLIMIT_MLOCK (ulimit -l).\n"
|
||||
#else
|
||||
#define MLOCK_SUGGESTION \
|
||||
"Try increasing RLIMIT_MLOCK ('ulimit -l' as root).\n"
|
||||
#endif
|
||||
|
||||
bool raw_lock(const void * addr, size_t size) {
|
||||
if (!mlock(addr, size)) {
|
||||
return true;
|
||||
} else {
|
||||
char* errmsg = std::strerror(errno);
|
||||
bool suggest = (errno == ENOMEM);
|
||||
|
||||
// Check if the resource limit is fine after all
|
||||
struct rlimit lock_limit;
|
||||
if (suggest && getrlimit(RLIMIT_MEMLOCK, &lock_limit))
|
||||
suggest = false;
|
||||
if (suggest && (lock_limit.rlim_max > lock_limit.rlim_cur + size))
|
||||
suggest = false;
|
||||
|
||||
fprintf(stderr, "warning: failed to mlock %zu-byte buffer (after previously locking %zu bytes): %s\n%s",
|
||||
size, this->size, errmsg, suggest ? MLOCK_SUGGESTION : "");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
#undef MLOCK_SUGGESTION
|
||||
|
||||
void raw_unlock(void * addr, size_t size) {
|
||||
if (munlock(addr, size)) {
|
||||
fprintf(stderr, "warning: failed to munlock buffer: %s\n", std::strerror(errno));
|
||||
}
|
||||
}
|
||||
#elif defined(_WIN32)
|
||||
static constexpr bool SUPPORTED = true;
|
||||
|
||||
size_t lock_granularity() {
|
||||
SYSTEM_INFO si;
|
||||
GetSystemInfo(&si);
|
||||
return (size_t) si.dwPageSize;
|
||||
}
|
||||
|
||||
bool raw_lock(void * addr, size_t size) {
|
||||
for (int tries = 1; ; tries++) {
|
||||
if (VirtualLock(addr, size)) {
|
||||
return true;
|
||||
}
|
||||
if (tries == 2) {
|
||||
fprintf(stderr, "warning: failed to VirtualLock %zu-byte buffer (after previously locking %zu bytes): %s\n",
|
||||
size, this->size, gptneox_format_win_err(GetLastError()).c_str());
|
||||
return false;
|
||||
}
|
||||
|
||||
// It failed but this was only the first try; increase the working
|
||||
// set size and try again.
|
||||
SIZE_T min_ws_size, max_ws_size;
|
||||
if (!GetProcessWorkingSetSize(GetCurrentProcess(), &min_ws_size, &max_ws_size)) {
|
||||
fprintf(stderr, "warning: GetProcessWorkingSetSize failed: %s\n",
|
||||
gptneox_format_win_err(GetLastError()).c_str());
|
||||
return false;
|
||||
}
|
||||
// Per MSDN: "The maximum number of pages that a process can lock
|
||||
// is equal to the number of pages in its minimum working set minus
|
||||
// a small overhead."
|
||||
// Hopefully a megabyte is enough overhead:
|
||||
size_t increment = size + 1048576;
|
||||
// The minimum must be <= the maximum, so we need to increase both:
|
||||
min_ws_size += increment;
|
||||
max_ws_size += increment;
|
||||
if (!SetProcessWorkingSetSize(GetCurrentProcess(), min_ws_size, max_ws_size)) {
|
||||
fprintf(stderr, "warning: SetProcessWorkingSetSize failed: %s\n",
|
||||
gptneox_format_win_err(GetLastError()).c_str());
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void raw_unlock(void * addr, size_t size) {
|
||||
if (!VirtualUnlock(addr, size)) {
|
||||
fprintf(stderr, "warning: failed to VirtualUnlock buffer: %s\n",
|
||||
gptneox_format_win_err(GetLastError()).c_str());
|
||||
}
|
||||
}
|
||||
#else
|
||||
static constexpr bool SUPPORTED = false;
|
||||
|
||||
void raw_lock(const void * addr, size_t size) {
|
||||
fprintf(stderr, "warning: mlock not supported on this system\n");
|
||||
}
|
||||
|
||||
void raw_unlock(const void * addr, size_t size) {}
|
||||
#endif
|
||||
};
|
||||
|
||||
// Replacement for std::vector<uint8_t> that doesn't require zero-initialization.
|
||||
struct gptneox_buffer {
|
||||
uint8_t * addr = NULL;
|
||||
size_t size = 0;
|
||||
|
||||
void resize(size_t size) {
|
||||
delete[] addr;
|
||||
addr = new uint8_t[size];
|
||||
this->size = size;
|
||||
}
|
||||
|
||||
~gptneox_buffer() {
|
||||
delete[] addr;
|
||||
}
|
||||
};
|
||||
|
||||
#ifdef GGML_USE_CUBLAS
|
||||
#include "ggml-cuda.h"
|
||||
struct gptneox_ctx_buffer {
|
||||
uint8_t * addr = NULL;
|
||||
size_t size = 0;
|
||||
|
||||
void resize(size_t size) {
|
||||
if (addr) {
|
||||
ggml_cuda_host_free(addr);
|
||||
}
|
||||
addr = (uint8_t *) ggml_cuda_host_malloc(size);
|
||||
this->size = size;
|
||||
}
|
||||
|
||||
~gptneox_ctx_buffer() {
|
||||
if (addr) {
|
||||
ggml_cuda_host_free(addr);
|
||||
}
|
||||
}
|
||||
};
|
||||
#else
|
||||
typedef gptneox_buffer gptneox_ctx_buffer;
|
||||
#endif
|
||||
|
||||
#endif
|
2942
third_party/radpajama/gptneox.cpp
vendored
Normal file
2942
third_party/radpajama/gptneox.cpp
vendored
Normal file
File diff suppressed because it is too large
Load diff
275
third_party/radpajama/gptneox.h
vendored
Normal file
275
third_party/radpajama/gptneox.h
vendored
Normal file
|
@ -0,0 +1,275 @@
|
|||
#ifndef GPTNEOX_H
|
||||
#define GPTNEOX_H
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
|
||||
#ifdef GPTNEOX_SHARED
|
||||
# if defined(_WIN32) && !defined(__MINGW32__)
|
||||
# ifdef GPTNEOX_BUILD
|
||||
# define GPTNEOX_API __declspec(dllexport)
|
||||
# else
|
||||
# define GPTNEOX_API __declspec(dllimport)
|
||||
# endif
|
||||
# else
|
||||
# define GPTNEOX_API __attribute__ ((visibility ("default")))
|
||||
# endif
|
||||
#else
|
||||
# define GPTNEOX_API
|
||||
#endif
|
||||
|
||||
#define GPTNEOX_FILE_VERSION 1
|
||||
#define GPTNEOX_FILE_MAGIC 0x67676a74 // 'ggjt' in hex
|
||||
#define GPTNEOX_FILE_MAGIC_UNVERSIONED 0x67676d6c // pre-versioned files
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
//
|
||||
// C interface
|
||||
//
|
||||
// TODO: show sample usage
|
||||
//
|
||||
|
||||
struct gptneox_context;
|
||||
|
||||
typedef int gptneox_token;
|
||||
|
||||
typedef struct gptneox_token_data {
|
||||
gptneox_token id; // token id
|
||||
float logit; // log-odds of the token
|
||||
float p; // probability of the token
|
||||
} gptneox_token_data;
|
||||
|
||||
typedef struct gptneox_token_data_array {
|
||||
gptneox_token_data * data;
|
||||
size_t size;
|
||||
bool sorted;
|
||||
} gptneox_token_data_array;
|
||||
|
||||
typedef void (*gptneox_progress_callback)(float progress, void *ctx);
|
||||
|
||||
struct gptneox_context_params {
|
||||
int n_ctx; // text context
|
||||
int n_parts; // -1 for default
|
||||
int seed; // RNG seed, 0 for random
|
||||
|
||||
bool f16_kv; // use fp16 for KV cache
|
||||
bool logits_all; // the gptneox_eval() call computes all logits, not just the last one
|
||||
bool vocab_only; // only load the vocabulary, no weights
|
||||
bool use_mmap; // use mmap if possible
|
||||
bool use_mlock; // force system to keep model in RAM
|
||||
bool embedding; // embedding mode only
|
||||
|
||||
// called with a progress value between 0 and 1, pass NULL to disable
|
||||
gptneox_progress_callback progress_callback;
|
||||
// context pointer passed to the progress callback
|
||||
void * progress_callback_user_data;
|
||||
};
|
||||
|
||||
// model file types
|
||||
enum gptneox_ftype {
|
||||
GPTNEOX_FTYPE_ALL_F32 = 0,
|
||||
GPTNEOX_FTYPE_MOSTLY_F16 = 1, // except 1d tensors
|
||||
GPTNEOX_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors
|
||||
GPTNEOX_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors
|
||||
GPTNEOX_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16
|
||||
GPTNEOX_FTYPE_MOSTLY_Q4_2 = 5, // except 1d tensors
|
||||
// GPTNEOX_FTYPE_MOSTLY_Q4_3 (6) support has been removed
|
||||
GPTNEOX_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors
|
||||
GPTNEOX_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors
|
||||
GPTNEOX_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors
|
||||
};
|
||||
|
||||
GPTNEOX_API struct gptneox_context_params gptneox_context_default_params();
|
||||
|
||||
GPTNEOX_API bool gptneox_mmap_supported();
|
||||
GPTNEOX_API bool gptneox_mlock_supported();
|
||||
|
||||
// Various functions for loading a ggml llama model.
|
||||
// Allocate (almost) all memory needed for the model.
|
||||
// Return NULL on failure
|
||||
GPTNEOX_API struct gptneox_context * gptneox_init_from_file(
|
||||
const char * path_model,
|
||||
struct gptneox_context_params params);
|
||||
|
||||
// Frees all allocated memory
|
||||
GPTNEOX_API void gptneox_free(struct gptneox_context * ctx);
|
||||
|
||||
// TODO: not great API - very likely to change
|
||||
// Returns 0 on success
|
||||
// nthread - how many threads to use. If <=0, will use std::thread::hardware_concurrency(), else the number given
|
||||
GPTNEOX_API int gptneox_model_quantize(
|
||||
const char * fname_inp,
|
||||
const char * fname_out,
|
||||
enum gptneox_ftype ftype,
|
||||
int nthread);
|
||||
|
||||
GPTNEOX_API int gptneox_model_copy(
|
||||
const char * fname_inp,
|
||||
const char * fname_out,
|
||||
enum gptneox_ftype ftype);
|
||||
|
||||
// Apply a LoRA adapter to a loaded model
|
||||
// path_base_model is the path to a higher quality model to use as a base for
|
||||
// the layers modified by the adapter. Can be NULL to use the current loaded model.
|
||||
// The model needs to be reloaded before applying a new adapter, otherwise the adapter
|
||||
// will be applied on top of the previous one
|
||||
// Returns 0 on success
|
||||
GPTNEOX_API int gptneox_apply_lora_from_file(
|
||||
struct gptneox_context * ctx,
|
||||
const char * path_lora,
|
||||
const char * path_base_model,
|
||||
int n_threads);
|
||||
|
||||
// Returns the number of tokens in the KV cache
|
||||
GPTNEOX_API int gptneox_get_kv_cache_token_count(struct gptneox_context * ctx);
|
||||
|
||||
// Sets the current rng seed.
|
||||
GPTNEOX_API void gptneox_set_rng_seed(struct gptneox_context * ctx, int seed);
|
||||
|
||||
// Returns the size in bytes of the state (rng, logits, embedding and kv_cache)
|
||||
GPTNEOX_API size_t gptneox_get_state_size(struct gptneox_context * ctx);
|
||||
|
||||
// Copies the state to the specified destination address.
|
||||
// Destination needs to have allocated enough memory.
|
||||
// Returns the number of bytes copied
|
||||
GPTNEOX_API size_t gptneox_copy_state_data(struct gptneox_context * ctx, uint8_t * dest);
|
||||
|
||||
// Set the state reading from the specified address
|
||||
// Returns the number of bytes read
|
||||
GPTNEOX_API size_t gptneox_set_state_data(struct gptneox_context * ctx, const uint8_t * src);
|
||||
|
||||
// Save/load session file
|
||||
GPTNEOX_API size_t gptneox_load_session_file(struct gptneox_context * ctx, const char * path_session, gptneox_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out);
|
||||
GPTNEOX_API size_t gptneox_save_session_file(struct gptneox_context * ctx, const char * path_session, const gptneox_token * tokens, size_t n_token_count);
|
||||
|
||||
// Run the llama inference to obtain the logits and probabilities for the next token.
|
||||
// tokens + n_tokens is the provided batch of new tokens to process
|
||||
// n_past is the number of tokens to use from previous eval calls
|
||||
// Returns 0 on success
|
||||
GPTNEOX_API int gptneox_eval(
|
||||
struct gptneox_context * ctx,
|
||||
const gptneox_token * tokens,
|
||||
int n_tokens,
|
||||
int n_past,
|
||||
int n_threads);
|
||||
|
||||
// Convert the provided text into tokens.
|
||||
// The tokens pointer must be large enough to hold the resulting tokens.
|
||||
// Returns the number of tokens on success, no more than n_max_tokens
|
||||
// Returns a negative number on failure - the number of tokens that would have been returned
|
||||
// TODO: not sure if correct
|
||||
GPTNEOX_API int gptneox_tokenize(
|
||||
struct gptneox_context * ctx,
|
||||
const char * text,
|
||||
gptneox_token * tokens,
|
||||
int n_max_tokens,
|
||||
bool add_bos);
|
||||
|
||||
GPTNEOX_API int gptneox_n_vocab(struct gptneox_context * ctx);
|
||||
GPTNEOX_API int gptneox_n_ctx (struct gptneox_context * ctx);
|
||||
GPTNEOX_API int gptneox_n_embd (struct gptneox_context * ctx);
|
||||
|
||||
// Token logits obtained from the last call to gptneox_eval()
|
||||
// The logits for the last token are stored in the last row
|
||||
// Can be mutated in order to change the probabilities of the next token
|
||||
// Rows: n_tokens
|
||||
// Cols: n_vocab
|
||||
GPTNEOX_API float * gptneox_get_logits(struct gptneox_context * ctx);
|
||||
|
||||
// Get the embeddings for the input
|
||||
// shape: [n_embd] (1-dimensional)
|
||||
GPTNEOX_API float * gptneox_get_embeddings(struct gptneox_context * ctx);
|
||||
|
||||
// Token Id -> String. Uses the vocabulary in the provided context
|
||||
GPTNEOX_API const char * gptneox_token_to_str(struct gptneox_context * ctx, gptneox_token token);
|
||||
|
||||
// String -> Token Id. Uses the vocabulary in the provided context
|
||||
GPTNEOX_API gptneox_token gptneox_str_to_token(struct gptneox_context * ctx, const char * str);
|
||||
|
||||
// Special tokens
|
||||
GPTNEOX_API gptneox_token gptneox_token_bos();
|
||||
GPTNEOX_API gptneox_token gptneox_token_eos();
|
||||
// GPTNEOX_API gptneox_token gptneox_token_nl();
|
||||
|
||||
// TODO: improve the last_n_tokens interface ?
|
||||
GPTNEOX_API gptneox_token gptneox_sample_top_p_top_k(
|
||||
struct gptneox_context * ctx,
|
||||
const gptneox_token * last_n_tokens_data,
|
||||
int last_n_tokens_size,
|
||||
int top_k,
|
||||
float top_p,
|
||||
float temp,
|
||||
float repeat_penalty);
|
||||
|
||||
// Sampling functions
|
||||
|
||||
/// @details Repetition penalty described in CTRL academic paper https://arxiv.org/abs/1909.05858, with negative logit fix.
|
||||
GPTNEOX_API void gptneox_sample_repetition_penalty(struct gptneox_context * ctx, gptneox_token_data_array * candidates, gptneox_token * last_tokens, size_t last_tokens_size, float penalty);
|
||||
|
||||
/// @details Frequency and presence penalties described in OpenAI API https://platform.openai.com/docs/api-reference/parameter-details.
|
||||
GPTNEOX_API void gptneox_sample_frequency_and_presence_penalties(struct gptneox_context * ctx, gptneox_token_data_array * candidates, gptneox_token * last_tokens, size_t last_tokens_size, float alpha_frequency, float alpha_presence);
|
||||
|
||||
/// @details Sorts candidate tokens by their logits in descending order and calculate probabilities based on logits.
|
||||
GPTNEOX_API void gptneox_sample_softmax(struct gptneox_context * ctx, gptneox_token_data_array * candidates);
|
||||
|
||||
/// @details Top-K sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751
|
||||
GPTNEOX_API void gptneox_sample_top_k(struct gptneox_context * ctx, gptneox_token_data_array * candidates, int k, size_t min_keep);
|
||||
|
||||
/// @details Nucleus sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751
|
||||
GPTNEOX_API void gptneox_sample_top_p(struct gptneox_context * ctx, gptneox_token_data_array * candidates, float p, size_t min_keep);
|
||||
|
||||
/// @details Tail Free Sampling described in https://www.trentonbricken.com/Tail-Free-Sampling/.
|
||||
GPTNEOX_API void gptneox_sample_tail_free(struct gptneox_context * ctx, gptneox_token_data_array * candidates, float z, size_t min_keep);
|
||||
|
||||
/// @details Locally Typical Sampling implementation described in the paper https://arxiv.org/abs/2202.00666.
|
||||
GPTNEOX_API void gptneox_sample_typical(struct gptneox_context * ctx, gptneox_token_data_array * candidates, float p, size_t min_keep);
|
||||
GPTNEOX_API void gptneox_sample_temperature(struct gptneox_context * ctx, gptneox_token_data_array * candidates, float temp);
|
||||
|
||||
/// @details Mirostat 1.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.
|
||||
/// @param candidates A vector of `gptneox_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.
|
||||
/// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.
|
||||
/// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.
|
||||
/// @param m The number of tokens considered in the estimation of `s_hat`. This is an arbitrary value that is used to calculate `s_hat`, which in turn helps to calculate the value of `k`. In the paper, they use `m = 100`, but you can experiment with different values to see how it affects the performance of the algorithm.
|
||||
/// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal.
|
||||
GPTNEOX_API gptneox_token gptneox_sample_token_mirostat(struct gptneox_context * ctx, gptneox_token_data_array * candidates, float tau, float eta, int m, float * mu);
|
||||
|
||||
/// @details Mirostat 2.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.
|
||||
/// @param candidates A vector of `gptneox_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.
|
||||
/// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.
|
||||
/// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.
|
||||
/// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal.
|
||||
GPTNEOX_API gptneox_token gptneox_sample_token_mirostat_v2(struct gptneox_context * ctx, gptneox_token_data_array * candidates, float tau, float eta, float * mu);
|
||||
|
||||
/// @details Selects the token with the highest probability.
|
||||
GPTNEOX_API gptneox_token gptneox_sample_token_greedy(struct gptneox_context * ctx, gptneox_token_data_array * candidates);
|
||||
|
||||
/// @details Randomly selects a token from the candidates based on their probabilities.
|
||||
GPTNEOX_API gptneox_token gptneox_sample_token(struct gptneox_context * ctx, gptneox_token_data_array * candidates);
|
||||
|
||||
// Performance information
|
||||
GPTNEOX_API void gptneox_print_timings(struct gptneox_context * ctx);
|
||||
GPTNEOX_API void gptneox_reset_timings(struct gptneox_context * ctx);
|
||||
|
||||
// Print system information
|
||||
GPTNEOX_API const char * gptneox_print_system_info(void);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
// Internal API to be implemented by llama.cpp and used by tests/benchmarks only
|
||||
#ifdef GPTNEOX_API_INTERNAL
|
||||
|
||||
#include <vector>
|
||||
#include <string>
|
||||
struct ggml_tensor;
|
||||
|
||||
std::vector<std::pair<std::string, struct ggml_tensor *>>& gptneox_internal_get_tensor_map(struct gptneox_context * ctx);
|
||||
|
||||
#endif
|
||||
|
||||
#endif // GPTNEOX_H
|
256
third_party/radpajama/llama.h
vendored
Normal file
256
third_party/radpajama/llama.h
vendored
Normal file
|
@ -0,0 +1,256 @@
|
|||
// -*- c++ -*-
|
||||
#ifndef LLAMA_H
|
||||
#define LLAMA_H
|
||||
#include "libc/intrin/bits.h"
|
||||
#include "third_party/libcxx/string"
|
||||
#include "third_party/libcxx/vector"
|
||||
// clang-format off
|
||||
|
||||
#ifdef LLAMA_SHARED
|
||||
# if defined(_WIN32) && !defined(__MINGW32__)
|
||||
# ifdef LLAMA_BUILD
|
||||
# define LLAMA_API __declspec(dllexport)
|
||||
# else
|
||||
# define LLAMA_API __declspec(dllimport)
|
||||
# endif
|
||||
# else
|
||||
# define LLAMA_API __attribute__ ((visibility ("default")))
|
||||
# endif
|
||||
#else
|
||||
# define LLAMA_API
|
||||
#endif
|
||||
|
||||
#define LLAMA_FILE_VERSION 1
|
||||
#define LLAMA_FILE_MAGIC READ32BE("ggjt")
|
||||
#define LLAMA_FILE_MAGIC_UNVERSIONED READ32BE("ggml")
|
||||
#define LLAMA_SESSION_MAGIC READ32BE("ggsn")
|
||||
#define LLAMA_SESSION_VERSION 1
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
//
|
||||
// C interface
|
||||
//
|
||||
// TODO: show sample usage
|
||||
//
|
||||
|
||||
struct llama_context;
|
||||
|
||||
typedef int llama_token;
|
||||
|
||||
typedef struct llama_token_data {
|
||||
llama_token id; // token id
|
||||
float logit; // log-odds of the token
|
||||
float p; // probability of the token
|
||||
} llama_token_data;
|
||||
|
||||
typedef struct llama_token_data_array {
|
||||
llama_token_data * data;
|
||||
size_t size;
|
||||
bool sorted;
|
||||
} llama_token_data_array;
|
||||
|
||||
typedef void (*llama_progress_callback)(float progress, void *ctx);
|
||||
|
||||
struct llama_context_params {
|
||||
int n_ctx; // text context
|
||||
int n_parts; // -1 for default
|
||||
int seed; // RNG seed, -1 for random
|
||||
|
||||
bool f16_kv; // use fp16 for KV cache
|
||||
bool logits_all; // the llama_eval() call computes all logits, not just the last one
|
||||
bool vocab_only; // only load the vocabulary, no weights
|
||||
bool use_mmap; // use mmap if possible
|
||||
bool use_mlock; // force system to keep model in RAM
|
||||
bool embedding; // embedding mode only
|
||||
|
||||
// called with a progress value between 0 and 1, pass NULL to disable
|
||||
llama_progress_callback progress_callback;
|
||||
// context pointer passed to the progress callback
|
||||
void * progress_callback_user_data;
|
||||
};
|
||||
|
||||
// model file types
|
||||
enum llama_ftype {
|
||||
LLAMA_FTYPE_ALL_F32 = 0,
|
||||
LLAMA_FTYPE_MOSTLY_F16 = 1, // except 1d tensors
|
||||
LLAMA_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors
|
||||
LLAMA_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors
|
||||
LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16
|
||||
LLAMA_FTYPE_MOSTLY_Q4_2 = 5, // except 1d tensors
|
||||
// LLAMA_FTYPE_MOSTLY_Q4_3 (6) support has been removed
|
||||
LLAMA_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors
|
||||
LLAMA_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors
|
||||
LLAMA_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors
|
||||
};
|
||||
|
||||
LLAMA_API struct llama_context_params llama_context_default_params();
|
||||
|
||||
LLAMA_API bool llama_mmap_supported();
|
||||
LLAMA_API bool llama_mlock_supported();
|
||||
|
||||
// Various functions for loading a ggml llama model.
|
||||
// Allocate (almost) all memory needed for the model.
|
||||
// Return NULL on failure
|
||||
LLAMA_API struct llama_context * llama_init_from_file(
|
||||
const char * path_model,
|
||||
struct llama_context_params params,
|
||||
int verbose);
|
||||
|
||||
// Frees all allocated memory
|
||||
LLAMA_API void llama_free(struct llama_context * ctx);
|
||||
|
||||
// TODO: not great API - very likely to change
|
||||
// Returns 0 on success
|
||||
// nthread - how many threads to use. If <=0, will use std::thread::hardware_concurrency(), else the number given
|
||||
LLAMA_API int llama_model_quantize(
|
||||
const char * fname_inp,
|
||||
const char * fname_out,
|
||||
enum llama_ftype ftype,
|
||||
int nthread);
|
||||
|
||||
// Apply a LoRA adapter to a loaded model
|
||||
// path_base_model is the path to a higher quality model to use as a base for
|
||||
// the layers modified by the adapter. Can be NULL to use the current loaded model.
|
||||
// The model needs to be reloaded before applying a new adapter, otherwise the adapter
|
||||
// will be applied on top of the previous one
|
||||
// Returns 0 on success
|
||||
LLAMA_API int llama_apply_lora_from_file(
|
||||
struct llama_context * ctx,
|
||||
const char * path_lora,
|
||||
const char * path_base_model,
|
||||
int n_threads);
|
||||
|
||||
// Returns the number of tokens in the KV cache
|
||||
LLAMA_API int llama_get_kv_cache_token_count(const struct llama_context * ctx);
|
||||
|
||||
// Sets the current rng seed.
|
||||
LLAMA_API void llama_set_rng_seed(struct llama_context * ctx, int seed);
|
||||
|
||||
// Returns the maximum size in bytes of the state (rng, logits, embedding
|
||||
// and kv_cache) - will often be smaller after compacting tokens
|
||||
LLAMA_API size_t llama_get_state_size(const struct llama_context * ctx);
|
||||
|
||||
// Copies the state to the specified destination address.
|
||||
// Destination needs to have allocated enough memory.
|
||||
// Returns the number of bytes copied
|
||||
LLAMA_API size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dest);
|
||||
|
||||
// Set the state reading from the specified address
|
||||
// Returns the number of bytes read
|
||||
LLAMA_API size_t llama_set_state_data(struct llama_context * ctx, const uint8_t * src);
|
||||
|
||||
// Run the llama inference to obtain the logits and probabilities for the next token.
|
||||
// tokens + n_tokens is the provided batch of new tokens to process
|
||||
// n_past is the number of tokens to use from previous eval calls
|
||||
// Returns 0 on success
|
||||
LLAMA_API int llama_eval(
|
||||
struct llama_context * ctx,
|
||||
const llama_token * tokens,
|
||||
int n_tokens,
|
||||
int n_past,
|
||||
int n_threads);
|
||||
|
||||
// Convert the provided text into tokens.
|
||||
// The tokens pointer must be large enough to hold the resulting tokens.
|
||||
// Returns the number of tokens on success, no more than n_max_tokens
|
||||
// Returns a negative number on failure - the number of tokens that would have been returned
|
||||
// TODO: not sure if correct
|
||||
LLAMA_API int llama_tokenize(
|
||||
struct llama_context * ctx,
|
||||
const char * text,
|
||||
llama_token * tokens,
|
||||
int n_max_tokens,
|
||||
bool add_bos);
|
||||
|
||||
LLAMA_API int llama_n_vocab(const struct llama_context * ctx);
|
||||
LLAMA_API int llama_n_ctx (const struct llama_context * ctx);
|
||||
LLAMA_API int llama_n_embd (const struct llama_context * ctx);
|
||||
|
||||
// Token logits obtained from the last call to llama_eval()
|
||||
// The logits for the last token are stored in the last row
|
||||
// Can be mutated in order to change the probabilities of the next token
|
||||
// Rows: n_tokens
|
||||
// Cols: n_vocab
|
||||
LLAMA_API float * llama_get_logits(struct llama_context * ctx);
|
||||
|
||||
// Get the embeddings for the input
|
||||
// shape: [n_embd] (1-dimensional)
|
||||
LLAMA_API float * llama_get_embeddings(struct llama_context * ctx);
|
||||
|
||||
// Token Id -> String. Uses the vocabulary in the provided context
|
||||
LLAMA_API const char * llama_token_to_str(const struct llama_context * ctx, llama_token token);
|
||||
|
||||
// Special tokens
|
||||
LLAMA_API llama_token llama_token_bos();
|
||||
LLAMA_API llama_token llama_token_eos();
|
||||
LLAMA_API llama_token llama_token_nl();
|
||||
|
||||
// Sampling functions
|
||||
|
||||
/// @details Repetition penalty described in CTRL academic paper https://arxiv.org/abs/1909.05858, with negative logit fix.
|
||||
LLAMA_API void llama_sample_repetition_penalty(struct llama_context * ctx, llama_token_data_array * candidates, const llama_token * last_tokens, size_t last_tokens_size, float penalty);
|
||||
|
||||
/// @details Frequency and presence penalties described in OpenAI API https://platform.openai.com/docs/api-reference/parameter-details.
|
||||
LLAMA_API void llama_sample_frequency_and_presence_penalties(struct llama_context * ctx, llama_token_data_array * candidates, const llama_token * last_tokens, size_t last_tokens_size, float alpha_frequency, float alpha_presence);
|
||||
|
||||
/// @details Sorts candidate tokens by their logits in descending order and calculate probabilities based on logits.
|
||||
LLAMA_API void llama_sample_softmax(struct llama_context * ctx, llama_token_data_array * candidates);
|
||||
|
||||
/// @details Top-K sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751
|
||||
LLAMA_API void llama_sample_top_k(struct llama_context * ctx, llama_token_data_array * candidates, int k, size_t min_keep);
|
||||
|
||||
/// @details Nucleus sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751
|
||||
LLAMA_API void llama_sample_top_p(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep);
|
||||
|
||||
/// @details Tail Free Sampling described in https://www.trentonbricken.com/Tail-Free-Sampling/.
|
||||
LLAMA_API void llama_sample_tail_free(struct llama_context * ctx, llama_token_data_array * candidates, float z, size_t min_keep);
|
||||
|
||||
/// @details Locally Typical Sampling implementation described in the paper https://arxiv.org/abs/2202.00666.
|
||||
LLAMA_API void llama_sample_typical(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep);
|
||||
LLAMA_API void llama_sample_temperature(struct llama_context * ctx, llama_token_data_array * candidates, float temp);
|
||||
|
||||
/// @details Mirostat 1.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.
|
||||
/// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.
|
||||
/// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.
|
||||
/// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.
|
||||
/// @param m The number of tokens considered in the estimation of `s_hat`. This is an arbitrary value that is used to calculate `s_hat`, which in turn helps to calculate the value of `k`. In the paper, they use `m = 100`, but you can experiment with different values to see how it affects the performance of the algorithm.
|
||||
/// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal.
|
||||
LLAMA_API llama_token llama_sample_token_mirostat(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, int m, float * mu);
|
||||
|
||||
/// @details Mirostat 2.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.
|
||||
/// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.
|
||||
/// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.
|
||||
/// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.
|
||||
/// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal.
|
||||
LLAMA_API llama_token llama_sample_token_mirostat_v2(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, float * mu);
|
||||
|
||||
/// @details Selects the token with the highest probability.
|
||||
LLAMA_API llama_token llama_sample_token_greedy(struct llama_context * ctx, llama_token_data_array * candidates);
|
||||
|
||||
/// @details Randomly selects a token from the candidates based on their probabilities.
|
||||
LLAMA_API llama_token llama_sample_token(struct llama_context * ctx, llama_token_data_array * candidates);
|
||||
|
||||
// Performance information
|
||||
LLAMA_API void llama_print_timings(struct llama_context * ctx);
|
||||
LLAMA_API void llama_reset_timings(struct llama_context * ctx);
|
||||
|
||||
// Print system information
|
||||
LLAMA_API const char * llama_print_system_info(void);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
// Internal API to be implemented by llama.cpp and used by tests/benchmarks only
|
||||
#ifdef LLAMA_API_INTERNAL
|
||||
|
||||
struct ggml_tensor;
|
||||
|
||||
std::vector<std::pair<std::string, struct ggml_tensor *>>& llama_internal_get_tensor_map(struct llama_context * ctx);
|
||||
|
||||
#endif
|
||||
|
||||
#endif // LLAMA_H
|
389
third_party/radpajama/llama_util.h
vendored
Executable file
389
third_party/radpajama/llama_util.h
vendored
Executable file
|
@ -0,0 +1,389 @@
|
|||
// Internal header to be included only by llama.cpp.
|
||||
// Contains wrappers around OS interfaces.
|
||||
|
||||
#ifndef LLAMA_UTIL_H
|
||||
#define LLAMA_UTIL_H
|
||||
#include "libc/calls/struct/rlimit.h"
|
||||
#include "libc/dce.h"
|
||||
#include "libc/fmt/fmt.h"
|
||||
#include "libc/runtime/sysconf.h"
|
||||
#include "libc/sysv/consts/madv.h"
|
||||
#include "libc/sysv/consts/map.h"
|
||||
#include "libc/sysv/consts/prot.h"
|
||||
#include "libc/sysv/consts/rlimit.h"
|
||||
#include "third_party/libcxx/cerrno"
|
||||
#include "third_party/libcxx/climits"
|
||||
#include "third_party/libcxx/cstdarg"
|
||||
#include "third_party/libcxx/cstdint"
|
||||
#include "third_party/libcxx/cstdio"
|
||||
#include "third_party/libcxx/cstdlib"
|
||||
#include "third_party/libcxx/cstring"
|
||||
#include "third_party/libcxx/string"
|
||||
#include "third_party/libcxx/vector"
|
||||
// clang-format off
|
||||
|
||||
#define LLAMA_ASSERT(x) \
|
||||
do { \
|
||||
if (!(x)) { \
|
||||
fprintf(stderr, "LLAMA_ASSERT: %s:%d: %s\n", __FILE__, __LINE__, #x); \
|
||||
abort(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#ifdef __GNUC__
|
||||
#ifdef __MINGW32__
|
||||
__attribute__((__format__(__gnu_printf__, 1, 2)))
|
||||
#else
|
||||
__attribute__((__format__(__printf__, 1, 2)))
|
||||
#endif
|
||||
__attribute__((__noreturn__))
|
||||
#endif
|
||||
static void Die(const char *fmt, ...) {
|
||||
va_list va;
|
||||
va_start(va, fmt);
|
||||
vfprintf(stderr, fmt, va);
|
||||
va_end(va);
|
||||
fputc('\n', stderr);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
struct llama_file {
|
||||
// use FILE * so we don't have to re-open the file to mmap
|
||||
FILE * fp;
|
||||
size_t size;
|
||||
|
||||
llama_file(const char * fname, const char * mode) {
|
||||
fp = std::fopen(fname, mode);
|
||||
if (fp == NULL) {
|
||||
Die("failed to open %s: %s", fname, std::strerror(errno));
|
||||
}
|
||||
seek(0, SEEK_END);
|
||||
size = tell();
|
||||
seek(0, SEEK_SET);
|
||||
}
|
||||
|
||||
size_t tell() const {
|
||||
#ifdef _WIN32
|
||||
__int64 ret = _ftelli64(fp);
|
||||
#else
|
||||
long ret = std::ftell(fp);
|
||||
#endif
|
||||
LLAMA_ASSERT(ret != -1); // this really shouldn't fail
|
||||
return (size_t) ret;
|
||||
}
|
||||
|
||||
void seek(size_t offset, int whence) {
|
||||
#ifdef _WIN32
|
||||
int ret = _fseeki64(fp, (__int64) offset, whence);
|
||||
#else
|
||||
int ret = std::fseek(fp, (long) offset, whence);
|
||||
#endif
|
||||
LLAMA_ASSERT(ret == 0); // same
|
||||
}
|
||||
|
||||
void read_raw(void * ptr, size_t size) {
|
||||
if (size == 0) {
|
||||
return;
|
||||
}
|
||||
errno = 0;
|
||||
std::size_t ret = std::fread(ptr, size, 1, fp);
|
||||
if (ferror(fp)) {
|
||||
Die("read error: %s", strerror(errno));
|
||||
}
|
||||
if (ret != 1) {
|
||||
Die("unexpectedly reached end of file");
|
||||
}
|
||||
}
|
||||
|
||||
std::uint32_t read_u32() {
|
||||
std::uint32_t ret;
|
||||
read_raw(&ret, sizeof(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
std::string read_string(std::uint32_t len) {
|
||||
std::vector<char> chars(len);
|
||||
read_raw(chars.data(), len);
|
||||
return std::string(chars.data(), len);
|
||||
}
|
||||
|
||||
void write_raw(const void * ptr, size_t size) {
|
||||
if (size == 0) {
|
||||
return;
|
||||
}
|
||||
errno = 0;
|
||||
size_t ret = std::fwrite(ptr, size, 1, fp);
|
||||
if (ret != 1) {
|
||||
Die("write error: %s", strerror(errno));
|
||||
}
|
||||
}
|
||||
|
||||
void write_u32(std::uint32_t val) {
|
||||
write_raw(&val, sizeof(val));
|
||||
}
|
||||
|
||||
~llama_file() {
|
||||
if (fp) {
|
||||
std::fclose(fp);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
#if defined(_WIN32)
|
||||
static std::string llama_format_win_err(DWORD err) {
|
||||
LPSTR buf;
|
||||
size_t size = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
|
||||
NULL, err, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)&buf, 0, NULL);
|
||||
if (!size) {
|
||||
return "FormatMessageA failed";
|
||||
}
|
||||
std::string ret(buf, size);
|
||||
LocalFree(buf);
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
struct llama_mmap {
|
||||
void * addr;
|
||||
size_t size;
|
||||
|
||||
llama_mmap(const llama_mmap &) = delete;
|
||||
|
||||
#ifdef _POSIX_MAPPED_FILES
|
||||
static constexpr bool SUPPORTED = true;
|
||||
|
||||
llama_mmap(struct llama_file * file, bool prefetch = true) {
|
||||
size = file->size;
|
||||
int fd = fileno(file->fp);
|
||||
int flags = MAP_SHARED;
|
||||
#ifdef __linux__
|
||||
flags |= MAP_POPULATE;
|
||||
#endif
|
||||
addr = mmap(NULL, file->size, PROT_READ, flags, fd, 0);
|
||||
if (addr == MAP_FAILED) {
|
||||
Die("mmap failed: %s", strerror(errno));
|
||||
}
|
||||
|
||||
if (prefetch && !IsWindows()) {
|
||||
// Advise the kernel to preload the mapped memory
|
||||
if (madvise(addr, file->size, MADV_WILLNEED)) {
|
||||
fprintf(stderr, "warning: madvise(.., MADV_WILLNEED) failed: %s\n",
|
||||
strerror(errno));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
~llama_mmap() {
|
||||
munmap(addr, size);
|
||||
}
|
||||
#elif defined(_WIN32)
|
||||
static constexpr bool SUPPORTED = true;
|
||||
|
||||
llama_mmap(struct llama_file * file, bool prefetch = true) {
|
||||
size = file->size;
|
||||
|
||||
HANDLE hFile = (HANDLE) _get_osfhandle(_fileno(file->fp));
|
||||
|
||||
HANDLE hMapping = CreateFileMappingA(hFile, NULL, PAGE_READONLY, 0, 0, NULL);
|
||||
DWORD error = GetLastError();
|
||||
|
||||
if (hMapping == NULL) {
|
||||
Die("CreateFileMappingA failed: %s", llama_format_win_err(error).c_str());
|
||||
}
|
||||
|
||||
addr = MapViewOfFile(hMapping, FILE_MAP_READ, 0, 0, 0);
|
||||
error = GetLastError();
|
||||
CloseHandle(hMapping);
|
||||
|
||||
if (addr == NULL) {
|
||||
Die("MapViewOfFile failed: %s", llama_format_win_err(error).c_str());
|
||||
}
|
||||
|
||||
#if _WIN32_WINNT >= _WIN32_WINNT_WIN8
|
||||
if (prefetch) {
|
||||
// Advise the kernel to preload the mapped memory
|
||||
WIN32_MEMORY_RANGE_ENTRY range;
|
||||
range.VirtualAddress = addr;
|
||||
range.NumberOfBytes = (SIZE_T)size;
|
||||
if (!PrefetchVirtualMemory(GetCurrentProcess(), 1, &range, 0)) {
|
||||
fprintf(stderr, "warning: PrefetchVirtualMemory failed: %s\n",
|
||||
llama_format_win_err(GetLastError()).c_str());
|
||||
}
|
||||
}
|
||||
#else
|
||||
#pragma message("warning: You are building for pre-Windows 8; prefetch not supported")
|
||||
#endif // _WIN32_WINNT >= _WIN32_WINNT_WIN8
|
||||
}
|
||||
|
||||
~llama_mmap() {
|
||||
if (!UnmapViewOfFile(addr)) {
|
||||
fprintf(stderr, "warning: UnmapViewOfFile failed: %s\n",
|
||||
llama_format_win_err(GetLastError()).c_str());
|
||||
}
|
||||
}
|
||||
#else
|
||||
static constexpr bool SUPPORTED = false;
|
||||
|
||||
llama_mmap(struct llama_file *) {
|
||||
Die("mmap not supported");
|
||||
}
|
||||
#endif
|
||||
};
|
||||
|
||||
// Represents some region of memory being locked using mlock or VirtualLock;
|
||||
// will automatically unlock on destruction.
|
||||
struct llama_mlock {
|
||||
void * addr = NULL;
|
||||
size_t size = 0;
|
||||
bool failed_already = false;
|
||||
|
||||
llama_mlock() {}
|
||||
llama_mlock(const llama_mlock &) = delete;
|
||||
|
||||
~llama_mlock() {
|
||||
if (size) {
|
||||
raw_unlock(addr, size);
|
||||
}
|
||||
}
|
||||
|
||||
void init(void * addr) {
|
||||
LLAMA_ASSERT(this->addr == NULL && this->size == 0);
|
||||
this->addr = addr;
|
||||
}
|
||||
|
||||
void grow_to(size_t target_size) {
|
||||
LLAMA_ASSERT(addr);
|
||||
if (failed_already) {
|
||||
return;
|
||||
}
|
||||
size_t granularity = lock_granularity();
|
||||
target_size = (target_size + granularity - 1) & ~(granularity - 1);
|
||||
if (target_size > size) {
|
||||
if (raw_lock((uint8_t *) addr + size, target_size - size)) {
|
||||
size = target_size;
|
||||
} else {
|
||||
failed_already = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef _POSIX_MEMLOCK_RANGE
|
||||
static constexpr bool SUPPORTED = true;
|
||||
|
||||
size_t lock_granularity() {
|
||||
return (size_t) sysconf(_SC_PAGESIZE);
|
||||
}
|
||||
|
||||
#ifdef __APPLE__
|
||||
#define MLOCK_SUGGESTION \
|
||||
"Try increasing the sysctl values 'vm.user_wire_limit' and 'vm.global_user_wire_limit' and/or " \
|
||||
"decreasing 'vm.global_no_user_wire_amount'. Also try increasing RLIMIT_MLOCK (ulimit -l).\n"
|
||||
#else
|
||||
#define MLOCK_SUGGESTION \
|
||||
"Try increasing RLIMIT_MLOCK ('ulimit -l' as root).\n"
|
||||
#endif
|
||||
|
||||
bool raw_lock(const void * addr, size_t size) {
|
||||
if (!mlock(addr, size)) {
|
||||
return true;
|
||||
} else {
|
||||
char* errmsg = std::strerror(errno);
|
||||
bool suggest = (errno == ENOMEM);
|
||||
|
||||
// Check if the resource limit is fine after all
|
||||
struct rlimit lock_limit;
|
||||
if (suggest && getrlimit(RLIMIT_MEMLOCK, &lock_limit))
|
||||
suggest = false;
|
||||
if (suggest && (lock_limit.rlim_max > lock_limit.rlim_cur + size))
|
||||
suggest = false;
|
||||
|
||||
fprintf(stderr, "warning: failed to mlock %zu-byte buffer (after previously locking %zu bytes): %s\n%s",
|
||||
size, this->size, errmsg, suggest ? MLOCK_SUGGESTION : "");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
#undef MLOCK_SUGGESTION
|
||||
|
||||
void raw_unlock(void * addr, size_t size) {
|
||||
if (munlock(addr, size)) {
|
||||
fprintf(stderr, "warning: failed to munlock buffer: %s\n", std::strerror(errno));
|
||||
}
|
||||
}
|
||||
#elif defined(_WIN32)
|
||||
static constexpr bool SUPPORTED = true;
|
||||
|
||||
size_t lock_granularity() {
|
||||
SYSTEM_INFO si;
|
||||
GetSystemInfo(&si);
|
||||
return (size_t) si.dwPageSize;
|
||||
}
|
||||
|
||||
bool raw_lock(void * addr, size_t size) {
|
||||
for (int tries = 1; ; tries++) {
|
||||
if (VirtualLock(addr, size)) {
|
||||
return true;
|
||||
}
|
||||
if (tries == 2) {
|
||||
fprintf(stderr, "warning: failed to VirtualLock %zu-byte buffer (after previously locking %zu bytes): %s\n",
|
||||
size, this->size, llama_format_win_err(GetLastError()).c_str());
|
||||
return false;
|
||||
}
|
||||
|
||||
// It failed but this was only the first try; increase the working
|
||||
// set size and try again.
|
||||
SIZE_T min_ws_size, max_ws_size;
|
||||
if (!GetProcessWorkingSetSize(GetCurrentProcess(), &min_ws_size, &max_ws_size)) {
|
||||
fprintf(stderr, "warning: GetProcessWorkingSetSize failed: %s\n",
|
||||
llama_format_win_err(GetLastError()).c_str());
|
||||
return false;
|
||||
}
|
||||
// Per MSDN: "The maximum number of pages that a process can lock
|
||||
// is equal to the number of pages in its minimum working set minus
|
||||
// a small overhead."
|
||||
// Hopefully a megabyte is enough overhead:
|
||||
size_t increment = size + 1048576;
|
||||
// The minimum must be <= the maximum, so we need to increase both:
|
||||
min_ws_size += increment;
|
||||
max_ws_size += increment;
|
||||
if (!SetProcessWorkingSetSize(GetCurrentProcess(), min_ws_size, max_ws_size)) {
|
||||
fprintf(stderr, "warning: SetProcessWorkingSetSize failed: %s\n",
|
||||
llama_format_win_err(GetLastError()).c_str());
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void raw_unlock(void * addr, size_t size) {
|
||||
if (!VirtualUnlock(addr, size)) {
|
||||
fprintf(stderr, "warning: failed to VirtualUnlock buffer: %s\n",
|
||||
llama_format_win_err(GetLastError()).c_str());
|
||||
}
|
||||
}
|
||||
#else
|
||||
static constexpr bool SUPPORTED = false;
|
||||
|
||||
void raw_lock(const void * addr, size_t size) {
|
||||
fprintf(stderr, "warning: mlock not supported on this system\n");
|
||||
}
|
||||
|
||||
void raw_unlock(const void * addr, size_t size) {}
|
||||
#endif
|
||||
};
|
||||
|
||||
// Replacement for std::vector<uint8_t> that doesn't require zero-initialization.
|
||||
struct llama_buffer {
|
||||
uint8_t * addr = NULL;
|
||||
size_t size = 0;
|
||||
|
||||
void resize(size_t size) {
|
||||
free(addr);
|
||||
addr = (uint8_t *)memalign(32, size);
|
||||
this->size = size;
|
||||
}
|
||||
|
||||
~llama_buffer() {
|
||||
free(addr);
|
||||
}
|
||||
};
|
||||
#endif
|
467
third_party/radpajama/main-redpajama-chat.cpp
vendored
Normal file
467
third_party/radpajama/main-redpajama-chat.cpp
vendored
Normal file
|
@ -0,0 +1,467 @@
|
|||
// Defines sigaction on msys:
|
||||
#ifndef _GNU_SOURCE
|
||||
#define _GNU_SOURCE
|
||||
#endif
|
||||
|
||||
#include "common-gptneox.h"
|
||||
#include "gptneox.h"
|
||||
|
||||
#include <cassert>
|
||||
#include <cinttypes>
|
||||
#include <cmath>
|
||||
#include <cstdio>
|
||||
#include <cstring>
|
||||
#include <ctime>
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <algorithm>
|
||||
|
||||
#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
|
||||
#include <signal.h>
|
||||
#include <unistd.h>
|
||||
#elif defined (_WIN32)
|
||||
#include <signal.h>
|
||||
#endif
|
||||
|
||||
static console_state con_st;
|
||||
static gptneox_context ** g_ctx;
|
||||
|
||||
static bool is_interacting = false;
|
||||
|
||||
#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32)
|
||||
void sigint_handler(int signo) {
|
||||
set_console_color(con_st, CONSOLE_COLOR_DEFAULT);
|
||||
printf("\n"); // this also force flush stdout.
|
||||
if (signo == SIGINT) {
|
||||
if (!is_interacting) {
|
||||
is_interacting=true;
|
||||
} else {
|
||||
gptneox_print_timings(*g_ctx);
|
||||
_exit(130);
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
int main(int argc, char ** argv) {
|
||||
gpt_params params;
|
||||
params.model = "./examples/redpajama/models/pythia/ggml-RedPajama-INCITE-Chat-3B-v1-f16.bin";
|
||||
|
||||
if (gpt_params_parse(argc, argv, params) == false) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
// save choice to use color for later
|
||||
// (note for later: this is a slightly awkward choice)
|
||||
con_st.use_color = params.use_color;
|
||||
|
||||
#if defined (_WIN32)
|
||||
win32_console_init(params.use_color);
|
||||
#endif
|
||||
|
||||
if (params.perplexity) {
|
||||
printf("\n************\n");
|
||||
printf("%s: please use the 'perplexity' tool for perplexity calculations\n", __func__);
|
||||
printf("************\n\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (params.embedding) {
|
||||
printf("\n************\n");
|
||||
printf("%s: please use the 'embedding' tool for embedding calculations\n", __func__);
|
||||
printf("************\n\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (params.n_ctx > 2048) {
|
||||
fprintf(stderr, "%s: warning: model does not support context sizes greater than 2048 tokens (%d specified);"
|
||||
"expect poor results\n", __func__, params.n_ctx);
|
||||
}
|
||||
|
||||
if (params.seed <= 0) {
|
||||
params.seed = time(NULL);
|
||||
}
|
||||
|
||||
fprintf(stderr, "%s: seed = %d\n", __func__, params.seed);
|
||||
|
||||
std::mt19937 rng(params.seed);
|
||||
if (params.random_prompt) {
|
||||
params.prompt = gpt_random_prompt(rng);
|
||||
}
|
||||
|
||||
gptneox_context * ctx;
|
||||
g_ctx = &ctx;
|
||||
|
||||
// load the model
|
||||
{
|
||||
auto lparams = gptneox_context_default_params();
|
||||
|
||||
lparams.n_ctx = params.n_ctx;
|
||||
lparams.n_parts = params.n_parts;
|
||||
lparams.seed = params.seed;
|
||||
lparams.f16_kv = params.memory_f16;
|
||||
lparams.use_mmap = params.use_mmap;
|
||||
lparams.use_mlock = params.use_mlock;
|
||||
|
||||
ctx = gptneox_init_from_file(params.model.c_str(), lparams);
|
||||
|
||||
if (ctx == NULL) {
|
||||
fprintf(stderr, "%s: error: failed to load model '%s'\n", __func__, params.model.c_str());
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (!params.lora_adapter.empty()) {
|
||||
int err = gptneox_apply_lora_from_file(ctx,
|
||||
params.lora_adapter.c_str(),
|
||||
params.lora_base.empty() ? NULL : params.lora_base.c_str(),
|
||||
params.n_threads);
|
||||
if (err != 0) {
|
||||
fprintf(stderr, "%s: error: failed to apply lora adapter\n", __func__);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
// print system information
|
||||
{
|
||||
fprintf(stderr, "\n");
|
||||
fprintf(stderr, "system_info: n_threads = %d / %d | %s\n",
|
||||
params.n_threads, std::thread::hardware_concurrency(), gptneox_print_system_info());
|
||||
}
|
||||
|
||||
// determine the maximum memory usage needed to do inference for the given n_batch and n_predict parameters
|
||||
if (params.mem_test) {
|
||||
{
|
||||
const std::vector<gptneox_token> tmp(params.n_batch, 0);
|
||||
gptneox_eval(ctx, tmp.data(), tmp.size(), 0, params.n_threads);
|
||||
}
|
||||
|
||||
{
|
||||
const std::vector<gptneox_token> tmp = { 0, };
|
||||
gptneox_eval(ctx, tmp.data(), tmp.size(), params.n_predict - 1, params.n_threads);
|
||||
}
|
||||
|
||||
gptneox_print_timings(ctx);
|
||||
gptneox_free(ctx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Always interactive for RedPajama chat model
|
||||
params.interactive = true;
|
||||
|
||||
if (params.interactive) {
|
||||
#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
|
||||
struct sigaction sigint_action;
|
||||
sigint_action.sa_handler = sigint_handler;
|
||||
sigemptyset (&sigint_action.sa_mask);
|
||||
sigint_action.sa_flags = 0;
|
||||
sigaction(SIGINT, &sigint_action, NULL);
|
||||
#elif defined (_WIN32)
|
||||
signal(SIGINT, sigint_handler);
|
||||
#endif
|
||||
}
|
||||
fprintf(stderr, "sampling: temp = %f, top_k = %d, top_p = %f, repeat_last_n = %i, repeat_penalty = %f\n",
|
||||
params.temp, params.top_k, params.top_p, params.repeat_last_n, params.repeat_penalty);
|
||||
fprintf(stderr, "generate: n_ctx = %d, n_batch = %d, n_predict = %d, n_keep = %d\n", params.n_ctx, params.n_batch, params.n_predict, params.n_keep);
|
||||
fprintf(stderr, "\n\n");
|
||||
|
||||
// TODO: replace with ring-buffer
|
||||
std::vector<gptneox_token> last_n_tokens = std::vector<gptneox_token>();
|
||||
//std::fill(last_n_tokens.begin(), last_n_tokens.end(), 0);
|
||||
|
||||
set_console_color(con_st, CONSOLE_COLOR_PROMPT);
|
||||
|
||||
if (params.interactive) {
|
||||
printf("== Running in interactive mode. ==\n"
|
||||
#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32)
|
||||
" - Press Ctrl+C to interject at any time.\n"
|
||||
#endif
|
||||
" - Press Return to return control to RedPajama.\n"
|
||||
" - If you want to submit another line, end your input in '\\'.\n\n");
|
||||
}
|
||||
|
||||
const int32_t top_k = params.top_k;
|
||||
const float top_p = params.top_p;
|
||||
const float temp = params.temp;
|
||||
const float repeat_penalty = params.repeat_penalty;
|
||||
|
||||
// Chat loop
|
||||
while (true) {
|
||||
is_interacting = true;
|
||||
|
||||
int n_past = 0;
|
||||
|
||||
// Get input
|
||||
|
||||
// potentially set color to indicate we are taking user input
|
||||
set_console_color(con_st, CONSOLE_COLOR_USER_INPUT);
|
||||
|
||||
#if defined (_WIN32)
|
||||
// Windows: must reactivate sigint handler after each signal
|
||||
signal(SIGINT, sigint_handler);
|
||||
#endif
|
||||
|
||||
if (params.instruct) {
|
||||
printf("\n<human>: ");
|
||||
}
|
||||
|
||||
std::string buffer;
|
||||
if (!params.input_prefix.empty()) {
|
||||
buffer += params.input_prefix;
|
||||
printf("%s", buffer.c_str());
|
||||
}
|
||||
|
||||
std::string line;
|
||||
bool another_line = true;
|
||||
do {
|
||||
#if defined(_WIN32)
|
||||
std::wstring wline;
|
||||
if (!std::getline(std::wcin, wline)) {
|
||||
// input stream is bad or EOF received
|
||||
return 0;
|
||||
}
|
||||
win32_utf8_encode(wline, line);
|
||||
#else
|
||||
if (!std::getline(std::cin, line)) {
|
||||
// input stream is bad or EOF received
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
if (line.empty() || line.back() != '\\') {
|
||||
another_line = false;
|
||||
} else {
|
||||
line.pop_back(); // Remove the continue character
|
||||
}
|
||||
buffer += line;
|
||||
if (another_line) {
|
||||
buffer += '\n';
|
||||
}
|
||||
} while (another_line);
|
||||
|
||||
is_interacting = false;
|
||||
|
||||
// done taking input, reset color
|
||||
set_console_color(con_st, CONSOLE_COLOR_DEFAULT);
|
||||
|
||||
// Check for input
|
||||
if (buffer.length() <= 0) {
|
||||
continue; // Restart loop for input
|
||||
}
|
||||
|
||||
// Tokenize prompt with RedPajama special tokens
|
||||
|
||||
auto prompt_embd = ::gptneox_tokenize(ctx, buffer, false);
|
||||
auto embd_inp = std::vector<gptneox_token>();
|
||||
|
||||
// Redpajama: insert special tokens for OA. (prefix)
|
||||
embd_inp.push_back(gptneox_str_to_token(ctx, "<"));
|
||||
embd_inp.push_back(gptneox_str_to_token(ctx, "human"));
|
||||
embd_inp.push_back(gptneox_str_to_token(ctx, ">:"));
|
||||
|
||||
embd_inp.insert(embd_inp.end(), prompt_embd.begin(), prompt_embd.end());
|
||||
|
||||
// Redpajama: insert special tokens for OA. (postfix)
|
||||
embd_inp.push_back(gptneox_str_to_token(ctx, "\n"));
|
||||
embd_inp.push_back(gptneox_str_to_token(ctx, "<"));
|
||||
embd_inp.push_back(gptneox_str_to_token(ctx, "bot"));
|
||||
embd_inp.push_back(gptneox_str_to_token(ctx, ">:"));
|
||||
|
||||
|
||||
// Verbose prompt
|
||||
if (params.verbose_prompt) {
|
||||
fprintf(stderr, "\n");
|
||||
fprintf(stderr, "%s: prompt: '%s'\n", __func__, buffer.c_str());
|
||||
fprintf(stderr, "%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
|
||||
for (int i = 0; i < (int) embd_inp.size(); i++) {
|
||||
fprintf(stderr, "%6d -> '%s'\n", embd_inp[i], gptneox_token_to_str(ctx, embd_inp[i]));
|
||||
}
|
||||
fprintf(stderr, "\n");
|
||||
}
|
||||
|
||||
// How many tokens to generate - check if theres space in context for atleast one token (or batch size tokens?)
|
||||
auto inp_size = embd_inp.size();
|
||||
auto space = params.n_ctx - inp_size;
|
||||
if(space <= 0) {
|
||||
fprintf(stderr, "%s : input too long\n", __func__);
|
||||
continue;
|
||||
}
|
||||
// Send batches to eval
|
||||
while (n_past < inp_size) {
|
||||
auto remaining = inp_size - n_past;
|
||||
int n_eval = params.n_batch < remaining ? params.n_batch : remaining;
|
||||
if (gptneox_eval(ctx, &embd_inp[n_past], n_eval, n_past, params.n_threads)) {
|
||||
fprintf(stderr, "<bot>: %s : failed to eval\n", __func__);
|
||||
return 1;
|
||||
}
|
||||
n_past += n_eval;
|
||||
}
|
||||
|
||||
const int n_ctx = gptneox_n_ctx(ctx);
|
||||
const int n_vocab = gptneox_n_vocab(ctx);
|
||||
|
||||
const float temp = params.temp;
|
||||
const int32_t top_k = params.top_k <= 0 ? gptneox_n_vocab(ctx) : params.top_k;
|
||||
const float top_p = params.top_p;
|
||||
const float tfs_z = params.tfs_z;
|
||||
const float typical_p = params.typical_p;
|
||||
const int32_t repeat_last_n = params.repeat_last_n < 0 ? n_ctx : params.repeat_last_n;
|
||||
const float repeat_penalty = params.repeat_penalty;
|
||||
const float alpha_presence = params.presence_penalty;
|
||||
const float alpha_frequency = params.frequency_penalty;
|
||||
const int mirostat = params.mirostat;
|
||||
const float mirostat_tau = params.mirostat_tau;
|
||||
const float mirostat_eta = params.mirostat_eta;
|
||||
const bool penalize_nl = params.penalize_nl;
|
||||
|
||||
// Eval until space runs out
|
||||
auto out_count = 0;
|
||||
|
||||
printf("<bot>:");
|
||||
while (space > 0) {
|
||||
// Get token
|
||||
gptneox_token id = 0;
|
||||
|
||||
{
|
||||
auto logits = gptneox_get_logits(ctx);
|
||||
|
||||
// Apply params.logit_bias map
|
||||
for (auto it = params.logit_bias.begin(); it != params.logit_bias.end(); it++) {
|
||||
logits[it->first] += it->second;
|
||||
}
|
||||
|
||||
std::vector<gptneox_token_data> candidates;
|
||||
candidates.reserve(n_vocab);
|
||||
for (gptneox_token token_id = 0; token_id < n_vocab; token_id++) {
|
||||
candidates.emplace_back(gptneox_token_data{token_id, logits[token_id], 0.0f});
|
||||
}
|
||||
|
||||
gptneox_token_data_array candidates_p = { candidates.data(), candidates.size(), false };
|
||||
|
||||
// Apply penalties
|
||||
gptneox_token nl_token = gptneox_str_to_token(ctx, "\n");
|
||||
float nl_logit = logits[nl_token];
|
||||
auto last_n_repeat = std::min(std::min((int)last_n_tokens.size(), repeat_last_n), n_ctx);
|
||||
gptneox_sample_repetition_penalty(ctx, &candidates_p,
|
||||
last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
|
||||
last_n_repeat, repeat_penalty);
|
||||
gptneox_sample_frequency_and_presence_penalties(ctx, &candidates_p,
|
||||
last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
|
||||
last_n_repeat, alpha_frequency, alpha_presence);
|
||||
if (!penalize_nl) {
|
||||
logits[nl_token] = nl_logit;
|
||||
}
|
||||
|
||||
if (temp <= 0) {
|
||||
// Greedy sampling
|
||||
id = gptneox_sample_token_greedy(ctx, &candidates_p);
|
||||
} else {
|
||||
if (mirostat == 1) {
|
||||
static float mirostat_mu = 2.0f * mirostat_tau;
|
||||
const int mirostat_m = 100;
|
||||
gptneox_sample_temperature(ctx, &candidates_p, temp);
|
||||
id = gptneox_sample_token_mirostat(ctx, &candidates_p, mirostat_tau, mirostat_eta, mirostat_m, &mirostat_mu);
|
||||
} else if (mirostat == 2) {
|
||||
static float mirostat_mu = 2.0f * mirostat_tau;
|
||||
gptneox_sample_temperature(ctx, &candidates_p, temp);
|
||||
id = gptneox_sample_token_mirostat_v2(ctx, &candidates_p, mirostat_tau, mirostat_eta, &mirostat_mu);
|
||||
} else {
|
||||
// Temperature sampling
|
||||
gptneox_sample_top_k(ctx, &candidates_p, top_k, 1);
|
||||
gptneox_sample_tail_free(ctx, &candidates_p, tfs_z, 1);
|
||||
gptneox_sample_typical(ctx, &candidates_p, typical_p, 1);
|
||||
gptneox_sample_top_p(ctx, &candidates_p, top_p, 1);
|
||||
gptneox_sample_temperature(ctx, &candidates_p, temp);
|
||||
id = gptneox_sample_token(ctx, &candidates_p);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Inc out count and dec space
|
||||
out_count += 1;
|
||||
space -= 1;
|
||||
// Repeat tokens update
|
||||
last_n_tokens.push_back(id);
|
||||
if (last_n_tokens.size() > params.repeat_last_n) {
|
||||
last_n_tokens.erase(last_n_tokens.begin());
|
||||
}
|
||||
// Redpajama: check if the interactive is done.
|
||||
//std::cout<<" last_n_tokens.size: "<< last_n_tokens[0] <<" "<< last_n_tokens[1] <<" "<< last_n_tokens[2] << std::endl;
|
||||
if (last_n_tokens.size()==3 && last_n_tokens[0]==gptneox_str_to_token(ctx, "<")
|
||||
&& last_n_tokens[1]==gptneox_str_to_token(ctx, "human") && last_n_tokens[2]==gptneox_str_to_token(ctx, ">:")){
|
||||
space = 0;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check for eos - end early - check eos before bos in case they are the same
|
||||
if (id == gptneox_token_eos()) {
|
||||
space = 0;
|
||||
continue;
|
||||
}
|
||||
// Check for bos - skip callback if so
|
||||
if (id == gptneox_token_bos()) {
|
||||
continue;
|
||||
}
|
||||
// Convert token to string and display
|
||||
// printf("%s(%d)", gptneox_token_to_str(ctx, id), id);
|
||||
|
||||
|
||||
if (last_n_tokens[2]==gptneox_str_to_token(ctx, "<")){
|
||||
;
|
||||
}
|
||||
else if (last_n_tokens[2]==gptneox_str_to_token(ctx, "human")){
|
||||
if (last_n_tokens[1]==gptneox_str_to_token(ctx, "<")){
|
||||
;
|
||||
}
|
||||
else{
|
||||
printf("%s", gptneox_token_to_str(ctx, id));
|
||||
}
|
||||
}
|
||||
else if (last_n_tokens[1]==gptneox_str_to_token(ctx, "<")){
|
||||
printf("<");
|
||||
printf("%s", gptneox_token_to_str(ctx, id));
|
||||
}
|
||||
else{
|
||||
printf("%s", gptneox_token_to_str(ctx, id));
|
||||
}
|
||||
fflush(stdout);
|
||||
// Check if we need to run another eval
|
||||
if (space > 0) {
|
||||
// Send generated token back into model for next generation
|
||||
if (gptneox_eval(ctx, &id, 1, n_past, params.n_threads)) {
|
||||
fprintf(stderr, "%s : failed to eval\n", __func__);
|
||||
return 1;
|
||||
}
|
||||
// Increment past count
|
||||
n_past += 1;
|
||||
}
|
||||
// Check for user interrupt
|
||||
if (is_interacting) { space = 0; }
|
||||
}
|
||||
printf("\n");
|
||||
//printf("\n %d", space);
|
||||
fflush(stdout);
|
||||
}
|
||||
|
||||
#if defined (_WIN32)
|
||||
signal(SIGINT, SIG_DFL);
|
||||
#endif
|
||||
|
||||
gptneox_print_timings(ctx);
|
||||
gptneox_free(ctx);
|
||||
|
||||
set_console_color(con_st, CONSOLE_COLOR_DEFAULT);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
622
third_party/radpajama/main-redpajama.cpp
vendored
Normal file
622
third_party/radpajama/main-redpajama.cpp
vendored
Normal file
|
@ -0,0 +1,622 @@
|
|||
// Defines sigaction on msys:
|
||||
#ifndef _GNU_SOURCE
|
||||
#define _GNU_SOURCE
|
||||
#endif
|
||||
|
||||
#include "common-gptneox.h"
|
||||
#include "gptneox.h"
|
||||
|
||||
#include <cassert>
|
||||
#include <cinttypes>
|
||||
#include <cmath>
|
||||
#include <cstdio>
|
||||
#include <cstring>
|
||||
#include <ctime>
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
|
||||
#include <signal.h>
|
||||
#include <unistd.h>
|
||||
#elif defined (_WIN32)
|
||||
#include <signal.h>
|
||||
#endif
|
||||
|
||||
static console_state con_st;
|
||||
static gptneox_context ** g_ctx;
|
||||
|
||||
static bool is_interacting = false;
|
||||
|
||||
#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32)
|
||||
void sigint_handler(int signo) {
|
||||
set_console_color(con_st, CONSOLE_COLOR_DEFAULT);
|
||||
printf("\n"); // this also force flush stdout.
|
||||
if (signo == SIGINT) {
|
||||
if (!is_interacting) {
|
||||
is_interacting=true;
|
||||
} else {
|
||||
gptneox_print_timings(*g_ctx);
|
||||
_exit(130);
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
int main(int argc, char ** argv) {
|
||||
gpt_params params;
|
||||
params.model = "./examples/redpajama/models/pythia/ggml-RedPajama-INCITE-Instruct-3B-v1-f16.bin";
|
||||
|
||||
if (gpt_params_parse(argc, argv, params) == false) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
// save choice to use color for later
|
||||
// (note for later: this is a slightly awkward choice)
|
||||
con_st.use_color = params.use_color;
|
||||
|
||||
#if defined (_WIN32)
|
||||
win32_console_init(params.use_color);
|
||||
#endif
|
||||
|
||||
if (params.perplexity) {
|
||||
printf("\n************\n");
|
||||
printf("%s: please use the 'perplexity' tool for perplexity calculations\n", __func__);
|
||||
printf("************\n\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (params.embedding) {
|
||||
printf("\n************\n");
|
||||
printf("%s: please use the 'embedding' tool for embedding calculations\n", __func__);
|
||||
printf("************\n\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (params.n_ctx > 2048) {
|
||||
fprintf(stderr, "%s: warning: model does not support context sizes greater than 2048 tokens (%d specified);"
|
||||
"expect poor results\n", __func__, params.n_ctx);
|
||||
}
|
||||
|
||||
if (params.seed < 0) {
|
||||
params.seed = time(NULL);
|
||||
}
|
||||
|
||||
fprintf(stderr, "%s: seed = %d\n", __func__, params.seed);
|
||||
|
||||
std::mt19937 rng(params.seed);
|
||||
if (params.random_prompt) {
|
||||
params.prompt = gpt_random_prompt(rng);
|
||||
}
|
||||
|
||||
// params.prompt = R"(// this function checks if the number n is prime
|
||||
//bool is_prime(int n) {)";
|
||||
|
||||
gptneox_context * ctx;
|
||||
g_ctx = &ctx;
|
||||
|
||||
// load the model
|
||||
{
|
||||
auto lparams = gptneox_context_default_params();
|
||||
|
||||
lparams.n_ctx = params.n_ctx;
|
||||
lparams.n_parts = params.n_parts;
|
||||
lparams.seed = params.seed;
|
||||
lparams.f16_kv = params.memory_f16;
|
||||
lparams.use_mmap = params.use_mmap;
|
||||
lparams.use_mlock = params.use_mlock;
|
||||
|
||||
ctx = gptneox_init_from_file(params.model.c_str(), lparams);
|
||||
|
||||
if (ctx == NULL) {
|
||||
fprintf(stderr, "%s: error: failed to load model '%s'\n", __func__, params.model.c_str());
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (!params.lora_adapter.empty()) {
|
||||
int err = gptneox_apply_lora_from_file(ctx,
|
||||
params.lora_adapter.c_str(),
|
||||
params.lora_base.empty() ? NULL : params.lora_base.c_str(),
|
||||
params.n_threads);
|
||||
if (err != 0) {
|
||||
fprintf(stderr, "%s: error: failed to apply lora adapter\n", __func__);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
// print system information
|
||||
{
|
||||
fprintf(stderr, "\n");
|
||||
fprintf(stderr, "system_info: n_threads = %d / %d | %s\n",
|
||||
params.n_threads, std::thread::hardware_concurrency(), gptneox_print_system_info());
|
||||
}
|
||||
|
||||
// determine the maximum memory usage needed to do inference for the given n_batch and n_predict parameters
|
||||
// uncomment the "used_mem" line in llama.cpp to see the results
|
||||
if (params.mem_test) {
|
||||
{
|
||||
const std::vector<gptneox_token> tmp(params.n_batch, 0);
|
||||
gptneox_eval(ctx, tmp.data(), tmp.size(), 0, params.n_threads);
|
||||
}
|
||||
|
||||
{
|
||||
const std::vector<gptneox_token> tmp = { 0, };
|
||||
gptneox_eval(ctx, tmp.data(), tmp.size(), params.n_predict - 1, params.n_threads);
|
||||
}
|
||||
|
||||
gptneox_print_timings(ctx);
|
||||
gptneox_free(ctx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
std::string path_session = params.path_session;
|
||||
std::vector<gptneox_token> session_tokens;
|
||||
|
||||
if (!path_session.empty()) {
|
||||
fprintf(stderr, "%s: attempting to load saved session from %s..\n", __func__, path_session.c_str());
|
||||
|
||||
// REVIEW - fopen to check for existing session
|
||||
FILE * fp = std::fopen(path_session.c_str(), "rb");
|
||||
if (fp != NULL) {
|
||||
std::fclose(fp);
|
||||
|
||||
session_tokens.resize(params.n_ctx);
|
||||
size_t n_token_count_out = 0;
|
||||
const size_t n_session_bytes = gptneox_load_session_file(ctx, path_session.c_str(), session_tokens.data(), session_tokens.capacity(), &n_token_count_out);
|
||||
session_tokens.resize(n_token_count_out);
|
||||
|
||||
if (n_session_bytes > 0) {
|
||||
fprintf(stderr, "%s: loaded %zu bytes of session data!\n", __func__, n_session_bytes);
|
||||
} else {
|
||||
fprintf(stderr, "%s: could not load session file, will recreate\n", __func__);
|
||||
}
|
||||
} else {
|
||||
fprintf(stderr, "%s: session file does not exist, will create\n", __func__);
|
||||
}
|
||||
}
|
||||
|
||||
// tokenize the prompt
|
||||
auto embd_inp = ::gptneox_tokenize(ctx, params.prompt, false); //true);
|
||||
|
||||
const int n_ctx = gptneox_n_ctx(ctx);
|
||||
|
||||
if ((int) embd_inp.size() > n_ctx - 4) {
|
||||
fprintf(stderr, "%s: error: prompt is too long (%d tokens, max %d)\n", __func__, (int) embd_inp.size(), n_ctx - 4);
|
||||
return 1;
|
||||
}
|
||||
|
||||
// debug message about similarity of saved session, if applicable
|
||||
size_t n_matching_session_tokens = 0;
|
||||
if (session_tokens.size()) {
|
||||
for (gptneox_token id : session_tokens) {
|
||||
if (n_matching_session_tokens >= embd_inp.size() || id != embd_inp[n_matching_session_tokens]) {
|
||||
break;
|
||||
}
|
||||
n_matching_session_tokens++;
|
||||
}
|
||||
if (n_matching_session_tokens >= embd_inp.size()) {
|
||||
fprintf(stderr, "%s: session file has exact match for prompt!\n", __func__);
|
||||
} else if (n_matching_session_tokens < (embd_inp.size() / 2)) {
|
||||
fprintf(stderr, "%s: warning: session file has low similarity to prompt (%zu / %zu tokens); will mostly be reevaluated\n",
|
||||
__func__, n_matching_session_tokens, embd_inp.size());
|
||||
} else {
|
||||
fprintf(stderr, "%s: session file matches %zu / %zu tokens of prompt\n",
|
||||
__func__, n_matching_session_tokens, embd_inp.size());
|
||||
}
|
||||
}
|
||||
|
||||
// number of tokens to keep when resetting context
|
||||
if (params.n_keep < 0 || params.n_keep > (int)embd_inp.size() || params.instruct) {
|
||||
params.n_keep = (int)embd_inp.size();
|
||||
}
|
||||
|
||||
// in instruct mode, we inject a prefix and a suffix to each input by the user
|
||||
if (params.instruct) {
|
||||
params.interactive_first = true;
|
||||
params.antiprompt.push_back("<|prompter|>");
|
||||
}
|
||||
|
||||
// enable interactive mode if reverse prompt or interactive start is specified
|
||||
if (params.antiprompt.size() != 0 || params.interactive_first) {
|
||||
params.interactive = true;
|
||||
}
|
||||
|
||||
// determine newline token
|
||||
auto gptneox_token_newline = ::gptneox_tokenize(ctx, "\n", false);
|
||||
|
||||
if (params.verbose_prompt) {
|
||||
fprintf(stderr, "\n");
|
||||
fprintf(stderr, "%s: prompt: '%s'\n", __func__, params.prompt.c_str());
|
||||
fprintf(stderr, "%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
|
||||
for (int i = 0; i < (int) embd_inp.size(); i++) {
|
||||
fprintf(stderr, "%6d -> '%s'\n", embd_inp[i], gptneox_token_to_str(ctx, embd_inp[i]));
|
||||
}
|
||||
if (params.n_keep > 0) {
|
||||
fprintf(stderr, "%s: static prompt based on n_keep: '", __func__);
|
||||
for (int i = 0; i < params.n_keep; i++) {
|
||||
fprintf(stderr, "%s", gptneox_token_to_str(ctx, embd_inp[i]));
|
||||
}
|
||||
fprintf(stderr, "'\n");
|
||||
}
|
||||
fprintf(stderr, "\n");
|
||||
}
|
||||
|
||||
if (params.interactive) {
|
||||
#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
|
||||
struct sigaction sigint_action;
|
||||
sigint_action.sa_handler = sigint_handler;
|
||||
sigemptyset (&sigint_action.sa_mask);
|
||||
sigint_action.sa_flags = 0;
|
||||
sigaction(SIGINT, &sigint_action, NULL);
|
||||
#elif defined (_WIN32)
|
||||
signal(SIGINT, sigint_handler);
|
||||
#endif
|
||||
|
||||
fprintf(stderr, "%s: interactive mode on.\n", __func__);
|
||||
|
||||
if (params.antiprompt.size()) {
|
||||
for (auto antiprompt : params.antiprompt) {
|
||||
fprintf(stderr, "Reverse prompt: '%s'\n", antiprompt.c_str());
|
||||
}
|
||||
}
|
||||
|
||||
if (!params.input_prefix.empty()) {
|
||||
fprintf(stderr, "Input prefix: '%s'\n", params.input_prefix.c_str());
|
||||
}
|
||||
}
|
||||
fprintf(stderr, "sampling: repeat_last_n = %d, repeat_penalty = %f, presence_penalty = %f, frequency_penalty = %f, top_k = %d, tfs_z = %f, top_p = %f, typical_p = %f, temp = %f, mirostat = %d, mirostat_lr = %f, mirostat_ent = %f\n",
|
||||
params.repeat_last_n, params.repeat_penalty, params.presence_penalty, params.frequency_penalty, params.top_k, params.tfs_z, params.top_p, params.typical_p, params.temp, params.mirostat, params.mirostat_eta, params.mirostat_tau);
|
||||
fprintf(stderr, "generate: n_ctx = %d, n_batch = %d, n_predict = %d, n_keep = %d\n", n_ctx, params.n_batch, params.n_predict, params.n_keep);
|
||||
fprintf(stderr, "\n\n");
|
||||
|
||||
// TODO: replace with ring-buffer
|
||||
std::vector<gptneox_token> last_n_tokens(n_ctx);
|
||||
std::fill(last_n_tokens.begin(), last_n_tokens.end(), 0);
|
||||
|
||||
if (params.interactive) {
|
||||
fprintf(stderr, "== Running in interactive mode. ==\n"
|
||||
#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32)
|
||||
" - Press Ctrl+C to interject at any time.\n"
|
||||
#endif
|
||||
" - Press Return to return control to RedPajama.\n"
|
||||
" - If you want to submit another line, end your input in '\\'.\n\n");
|
||||
is_interacting = params.interactive_first;
|
||||
}
|
||||
|
||||
bool is_antiprompt = false;
|
||||
bool input_noecho = false;
|
||||
|
||||
// HACK - because session saving incurs a non-negligible delay, for now skip re-saving session
|
||||
// if we loaded a session with at least 75% similarity. It's currently just used to speed up the
|
||||
// initial prompt so it doesn't need to be an exact match.
|
||||
bool need_to_save_session = !path_session.empty() && n_matching_session_tokens < (embd_inp.size() * 3 / 4);
|
||||
|
||||
|
||||
int n_past = 0;
|
||||
int n_remain = params.n_predict;
|
||||
int n_consumed = 0;
|
||||
int n_session_consumed = 0;
|
||||
|
||||
// the first thing we will do is to output the prompt, so set color accordingly
|
||||
set_console_color(con_st, CONSOLE_COLOR_PROMPT);
|
||||
|
||||
std::vector<gptneox_token> embd;
|
||||
|
||||
while (n_remain != 0 || params.interactive) {
|
||||
// predict
|
||||
if (embd.size() > 0) {
|
||||
// infinite text generation via context swapping
|
||||
// if we run out of context:
|
||||
// - take the n_keep first tokens from the original prompt (via n_past)
|
||||
// - take half of the last (n_ctx - n_keep) tokens and recompute the logits in batches
|
||||
if (n_past + (int) embd.size() > n_ctx) {
|
||||
const int n_left = n_past - params.n_keep;
|
||||
|
||||
n_past = params.n_keep;
|
||||
|
||||
// insert n_left/2 tokens at the start of embd from last_n_tokens
|
||||
embd.insert(embd.begin(), last_n_tokens.begin() + n_ctx - n_left/2 - embd.size(), last_n_tokens.end() - embd.size());
|
||||
|
||||
// REVIEW - stop saving session if we run out of context
|
||||
path_session = "";
|
||||
|
||||
//printf("\n---\n");
|
||||
//printf("resetting: '");
|
||||
//for (int i = 0; i < (int) embd.size(); i++) {
|
||||
// printf("%s", gptneox_token_to_str(ctx, embd[i]));
|
||||
//}
|
||||
//printf("'\n");
|
||||
//printf("\n---\n");
|
||||
}
|
||||
|
||||
// try to reuse a matching prefix from the loaded session instead of re-eval (via n_past)
|
||||
// REVIEW
|
||||
if (n_session_consumed < (int) session_tokens.size()) {
|
||||
size_t i = 0;
|
||||
for ( ; i < embd.size(); i++) {
|
||||
if (embd[i] != session_tokens[n_session_consumed]) {
|
||||
session_tokens.resize(n_session_consumed);
|
||||
break;
|
||||
}
|
||||
|
||||
n_past++;
|
||||
n_session_consumed++;
|
||||
|
||||
if (n_session_consumed >= (int) session_tokens.size()) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (i > 0) {
|
||||
embd.erase(embd.begin(), embd.begin() + i);
|
||||
}
|
||||
}
|
||||
|
||||
// evaluate tokens in batches
|
||||
// embd is typically prepared beforehand to fit within a batch, but not always
|
||||
for (int i = 0; i < (int) embd.size(); i += params.n_batch) {
|
||||
int n_eval = (int) embd.size() - i;
|
||||
if (n_eval > params.n_batch) {
|
||||
n_eval = params.n_batch;
|
||||
}
|
||||
if (gptneox_eval(ctx, &embd[i], n_eval, n_past, params.n_threads)) {
|
||||
fprintf(stderr, "%s : failed to eval\n", __func__);
|
||||
return 1;
|
||||
}
|
||||
n_past += n_eval;
|
||||
}
|
||||
|
||||
if (embd.size() > 0 && !path_session.empty()) {
|
||||
session_tokens.insert(session_tokens.end(), embd.begin(), embd.end());
|
||||
n_session_consumed = session_tokens.size();
|
||||
}
|
||||
}
|
||||
|
||||
embd.clear();
|
||||
|
||||
if ((int) embd_inp.size() <= n_consumed && !is_interacting) {
|
||||
// out of user input, sample next token
|
||||
const float temp = params.temp;
|
||||
const int32_t top_k = params.top_k <= 0 ? gptneox_n_vocab(ctx) : params.top_k;
|
||||
const float top_p = params.top_p;
|
||||
const float tfs_z = params.tfs_z;
|
||||
const float typical_p = params.typical_p;
|
||||
const int32_t repeat_last_n = params.repeat_last_n < 0 ? n_ctx : params.repeat_last_n;
|
||||
const float repeat_penalty = params.repeat_penalty;
|
||||
const float alpha_presence = params.presence_penalty;
|
||||
const float alpha_frequency = params.frequency_penalty;
|
||||
const int mirostat = params.mirostat;
|
||||
const float mirostat_tau = params.mirostat_tau;
|
||||
const float mirostat_eta = params.mirostat_eta;
|
||||
const bool penalize_nl = params.penalize_nl;
|
||||
|
||||
// optionally save the session on first sample (for faster prompt loading next time)
|
||||
if (!path_session.empty() && need_to_save_session) {
|
||||
need_to_save_session = false;
|
||||
gptneox_save_session_file(ctx, path_session.c_str(), session_tokens.data(), session_tokens.size());
|
||||
}
|
||||
|
||||
gptneox_token id = 0;
|
||||
|
||||
{
|
||||
auto logits = gptneox_get_logits(ctx);
|
||||
auto n_vocab = gptneox_n_vocab(ctx);
|
||||
|
||||
// Apply params.logit_bias map
|
||||
for (auto it = params.logit_bias.begin(); it != params.logit_bias.end(); it++) {
|
||||
logits[it->first] += it->second;
|
||||
}
|
||||
|
||||
std::vector<gptneox_token_data> candidates;
|
||||
candidates.reserve(n_vocab);
|
||||
for (gptneox_token token_id = 0; token_id < n_vocab; token_id++) {
|
||||
candidates.emplace_back(gptneox_token_data{token_id, logits[token_id], 0.0f});
|
||||
}
|
||||
|
||||
gptneox_token_data_array candidates_p = { candidates.data(), candidates.size(), false };
|
||||
|
||||
// Apply penalties
|
||||
gptneox_token nl_token = gptneox_str_to_token(ctx, "\n");
|
||||
float nl_logit = logits[nl_token];
|
||||
auto last_n_repeat = std::min(std::min((int)last_n_tokens.size(), repeat_last_n), n_ctx);
|
||||
gptneox_sample_repetition_penalty(ctx, &candidates_p,
|
||||
last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
|
||||
last_n_repeat, repeat_penalty);
|
||||
gptneox_sample_frequency_and_presence_penalties(ctx, &candidates_p,
|
||||
last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
|
||||
last_n_repeat, alpha_frequency, alpha_presence);
|
||||
if (!penalize_nl) {
|
||||
logits[nl_token] = nl_logit;
|
||||
}
|
||||
|
||||
if (temp <= 0) {
|
||||
// Greedy sampling
|
||||
id = gptneox_sample_token_greedy(ctx, &candidates_p);
|
||||
} else {
|
||||
if (mirostat == 1) {
|
||||
static float mirostat_mu = 2.0f * mirostat_tau;
|
||||
const int mirostat_m = 100;
|
||||
gptneox_sample_temperature(ctx, &candidates_p, temp);
|
||||
id = gptneox_sample_token_mirostat(ctx, &candidates_p, mirostat_tau, mirostat_eta, mirostat_m, &mirostat_mu);
|
||||
} else if (mirostat == 2) {
|
||||
static float mirostat_mu = 2.0f * mirostat_tau;
|
||||
gptneox_sample_temperature(ctx, &candidates_p, temp);
|
||||
id = gptneox_sample_token_mirostat_v2(ctx, &candidates_p, mirostat_tau, mirostat_eta, &mirostat_mu);
|
||||
} else {
|
||||
// Temperature sampling
|
||||
gptneox_sample_top_k(ctx, &candidates_p, top_k, 1);
|
||||
gptneox_sample_tail_free(ctx, &candidates_p, tfs_z, 1);
|
||||
gptneox_sample_typical(ctx, &candidates_p, typical_p, 1);
|
||||
gptneox_sample_top_p(ctx, &candidates_p, top_p, 1);
|
||||
gptneox_sample_temperature(ctx, &candidates_p, temp);
|
||||
id = gptneox_sample_token(ctx, &candidates_p);
|
||||
}
|
||||
}
|
||||
// printf("`%d`", candidates_p.size);
|
||||
|
||||
last_n_tokens.erase(last_n_tokens.begin());
|
||||
last_n_tokens.push_back(id);
|
||||
}
|
||||
|
||||
// replace end of text token with newline token when in interactive mode
|
||||
if (id == gptneox_token_eos() && params.interactive && !params.instruct) {
|
||||
id = gptneox_token_newline.front();
|
||||
if (params.antiprompt.size() != 0) {
|
||||
// tokenize and inject first reverse prompt
|
||||
const auto first_antiprompt = ::gptneox_tokenize(ctx, params.antiprompt.front(), false);
|
||||
embd_inp.insert(embd_inp.end(), first_antiprompt.begin(), first_antiprompt.end());
|
||||
}
|
||||
}
|
||||
|
||||
// add it to the context
|
||||
embd.push_back(id);
|
||||
|
||||
// echo this to console
|
||||
input_noecho = false;
|
||||
|
||||
// decrement remaining sampling budget
|
||||
--n_remain;
|
||||
} else {
|
||||
// some user input remains from prompt or interaction, forward it to processing
|
||||
while ((int) embd_inp.size() > n_consumed) {
|
||||
embd.push_back(embd_inp[n_consumed]);
|
||||
last_n_tokens.erase(last_n_tokens.begin());
|
||||
last_n_tokens.push_back(embd_inp[n_consumed]);
|
||||
++n_consumed;
|
||||
if ((int) embd.size() >= params.n_batch) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// display text
|
||||
if (!input_noecho) {
|
||||
for (auto id : embd) {
|
||||
printf("%s", gptneox_token_to_str(ctx, id));
|
||||
}
|
||||
fflush(stdout);
|
||||
}
|
||||
// reset color to default if we there is no pending user input
|
||||
if (!input_noecho && (int)embd_inp.size() == n_consumed) {
|
||||
set_console_color(con_st, CONSOLE_COLOR_DEFAULT);
|
||||
}
|
||||
|
||||
// in interactive mode, and not currently processing queued inputs;
|
||||
// check if we should prompt the user for more
|
||||
if (params.interactive && (int) embd_inp.size() <= n_consumed) {
|
||||
|
||||
// check for reverse prompt
|
||||
if (params.antiprompt.size()) {
|
||||
std::string last_output;
|
||||
for (auto id : last_n_tokens) {
|
||||
last_output += gptneox_token_to_str(ctx, id);
|
||||
}
|
||||
|
||||
is_antiprompt = false;
|
||||
// Check if each of the reverse prompts appears at the end of the output.
|
||||
for (std::string & antiprompt : params.antiprompt) {
|
||||
if (last_output.find(antiprompt.c_str(), last_output.length() - antiprompt.length(), antiprompt.length()) != std::string::npos) {
|
||||
is_interacting = true;
|
||||
is_antiprompt = true;
|
||||
set_console_color(con_st, CONSOLE_COLOR_USER_INPUT);
|
||||
fflush(stdout);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (n_past > 0 && is_interacting) {
|
||||
// potentially set color to indicate we are taking user input
|
||||
set_console_color(con_st, CONSOLE_COLOR_USER_INPUT);
|
||||
|
||||
#if defined (_WIN32)
|
||||
// Windows: must reactivate sigint handler after each signal
|
||||
signal(SIGINT, sigint_handler);
|
||||
#endif
|
||||
|
||||
if (params.instruct) {
|
||||
printf("\n> ");
|
||||
}
|
||||
|
||||
std::string buffer;
|
||||
if (!params.input_prefix.empty()) {
|
||||
buffer += params.input_prefix;
|
||||
printf("%s", buffer.c_str());
|
||||
}
|
||||
|
||||
std::string line;
|
||||
bool another_line = true;
|
||||
do {
|
||||
#if defined(_WIN32)
|
||||
std::wstring wline;
|
||||
if (!std::getline(std::wcin, wline)) {
|
||||
// input stream is bad or EOF received
|
||||
return 0;
|
||||
}
|
||||
win32_utf8_encode(wline, line);
|
||||
#else
|
||||
if (!std::getline(std::cin, line)) {
|
||||
// input stream is bad or EOF received
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
if (line.empty() || line.back() != '\\') {
|
||||
another_line = false;
|
||||
} else {
|
||||
line.pop_back(); // Remove the continue character
|
||||
}
|
||||
buffer += line + '\n'; // Append the line to the result
|
||||
} while (another_line);
|
||||
|
||||
// done taking input, reset color
|
||||
set_console_color(con_st, CONSOLE_COLOR_DEFAULT);
|
||||
|
||||
// Add tokens to embd only if the input buffer is non-empty
|
||||
// Entering a empty line lets the user pass control back
|
||||
if (buffer.length() > 1) {
|
||||
|
||||
auto line_inp = ::gptneox_tokenize(ctx, buffer, false);
|
||||
embd_inp.insert(embd_inp.end(), line_inp.begin(), line_inp.end());
|
||||
n_remain -= line_inp.size();
|
||||
}
|
||||
|
||||
input_noecho = true; // do not echo this again
|
||||
}
|
||||
|
||||
if (n_past > 0) {
|
||||
is_interacting = false;
|
||||
}
|
||||
}
|
||||
|
||||
// end of text token
|
||||
if (!embd.empty() && embd.back() == gptneox_token_eos()) {
|
||||
if (params.instruct) {
|
||||
is_interacting = true;
|
||||
} else {
|
||||
fprintf(stderr, " [end of text]\n");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// In interactive mode, respect the maximum number of tokens and drop back to user input when reached.
|
||||
if (params.interactive && n_remain <= 0 && params.n_predict != -1) {
|
||||
n_remain = params.n_predict;
|
||||
is_interacting = true;
|
||||
}
|
||||
}
|
||||
|
||||
#if defined (_WIN32)
|
||||
signal(SIGINT, SIG_DFL);
|
||||
#endif
|
||||
printf("\n\n");
|
||||
gptneox_print_timings(ctx);
|
||||
gptneox_free(ctx);
|
||||
|
||||
set_console_color(con_st, CONSOLE_COLOR_DEFAULT);
|
||||
|
||||
return 0;
|
||||
}
|
891
third_party/radpajama/main.cc
vendored
Normal file
891
third_party/radpajama/main.cc
vendored
Normal file
|
@ -0,0 +1,891 @@
|
|||
/*-*-mode:c++;indent-tabs-mode:nil;c-basic-offset:4;tab-width:8;coding:utf-8-*-│
|
||||
│vi: set net ft=c++ ts=4 sts=4 sw=4 fenc=utf-8 :vi│
|
||||
╚──────────────────────────────────────────────────────────────────────────────╝
|
||||
│ │
|
||||
│ llama.com │
|
||||
│ Copyright (c) 2023 Justine Alexandra Roberts Tunney │
|
||||
│ Copyright (c) 2023 Georgi Gerganov │
|
||||
│ │
|
||||
│ Permission is hereby granted, free of charge, to any person obtaining │
|
||||
│ a copy of this software and associated documentation files (the │
|
||||
│ "Software"), to deal in the Software without restriction, including │
|
||||
│ without limitation the rights to use, copy, modify, merge, publish, │
|
||||
│ distribute, sublicense, and/or sell copies of the Software, and to │
|
||||
│ permit persons to whom the Software is furnished to do so, subject to │
|
||||
│ the following conditions: │
|
||||
│ │
|
||||
│ The above copyright notice and this permission notice shall be │
|
||||
│ included in all copies or substantial portions of the Software. │
|
||||
│ │
|
||||
│ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, │
|
||||
│ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF │
|
||||
│ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. │
|
||||
│ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY │
|
||||
│ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, │
|
||||
│ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE │
|
||||
│ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. │
|
||||
│ │
|
||||
╚─────────────────────────────────────────────────────────────────────────────*/
|
||||
#include "libc/assert.h"
|
||||
#include "libc/calls/calls.h"
|
||||
#include "libc/calls/struct/sigaction.h"
|
||||
#include "libc/calls/struct/stat.h"
|
||||
#include "libc/intrin/bits.h"
|
||||
#include "libc/log/log.h"
|
||||
#include "libc/nexgen32e/x86feature.h"
|
||||
#include "libc/stdio/stdio.h"
|
||||
#include "libc/sysv/consts/map.h"
|
||||
#include "libc/sysv/consts/msync.h"
|
||||
#include "libc/sysv/consts/o.h"
|
||||
#include "libc/sysv/consts/prot.h"
|
||||
#include "libc/sysv/consts/sig.h"
|
||||
#include "third_party/ggml/common.h"
|
||||
#include "third_party/ggml/llama.h"
|
||||
#include "third_party/ggml/llama_util.h"
|
||||
#include "third_party/libcxx/atomic"
|
||||
#include "third_party/libcxx/iostream"
|
||||
#include "third_party/libcxx/string"
|
||||
#include "third_party/libcxx/vector"
|
||||
|
||||
asm(".ident\t\"\\n\\n\
|
||||
llama.cpp (MIT License)\\n\
|
||||
Copyright (c) 2023 Georgi Gerganov\"");
|
||||
asm(".include \"libc/disclaimer.inc\"");
|
||||
// clang-format off
|
||||
|
||||
static std::atomic<bool> is_interacting;
|
||||
static std::atomic<bool> is_terminated;
|
||||
|
||||
#define EPHEMERAL(fmt) "\r\e[K\033[1;35m" fmt " \033[0m"
|
||||
|
||||
static void sigint_handler_batch(int signo) {
|
||||
is_terminated = true;
|
||||
}
|
||||
|
||||
static void sigint_handler_interactive(int signo) {
|
||||
if (!is_interacting) {
|
||||
is_interacting = true;
|
||||
} else {
|
||||
is_terminated = true;
|
||||
}
|
||||
}
|
||||
|
||||
static int CompareTime(struct timespec a, struct timespec b) {
|
||||
int cmp;
|
||||
if (!(cmp = (a.tv_sec > b.tv_sec) - (a.tv_sec < b.tv_sec))) {
|
||||
cmp = (a.tv_nsec > b.tv_nsec) - (a.tv_nsec < b.tv_nsec);
|
||||
}
|
||||
return cmp;
|
||||
}
|
||||
|
||||
static int on_missing_feature(const char *name) {
|
||||
fprintf(stderr, "%s: error: cpuid %s not detected\n", __func__, name);
|
||||
fprintf(stderr, "%s: amd microprocessors made after 2017 usually work\n", __func__);
|
||||
fprintf(stderr, "%s: intel microprocessors made after 2013 usually work\n", __func__);
|
||||
return 1;
|
||||
}
|
||||
|
||||
int main(int argc, char ** argv) {
|
||||
gpt_params params;
|
||||
|
||||
ShowCrashReports();
|
||||
setvbuf(stdin, NULL, _IONBF, 0);
|
||||
setvbuf(stdout, NULL, _IONBF, 0);
|
||||
setvbuf(stderr, NULL, _IONBF, 0);
|
||||
|
||||
params.model = "models/llama-7B/ggml-model.bin";
|
||||
|
||||
#ifdef __x86_64__
|
||||
if (!X86_HAVE(AVX2)) return on_missing_feature("avx2");
|
||||
if (!X86_HAVE(AVX)) return on_missing_feature("avx");
|
||||
if (!X86_HAVE(FMA)) return on_missing_feature("fma");
|
||||
if (!X86_HAVE(SSE3)) return on_missing_feature("sse3");
|
||||
if (!X86_HAVE(F16C)) {
|
||||
fprintf(stderr, "%s: warning: cpuid f16c not detected; inference might crash\n", __func__);
|
||||
}
|
||||
#endif /* __x86_64__ */
|
||||
|
||||
if (gpt_params_parse(argc, argv, params) == false) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
// save choice to use color for later
|
||||
// (note for later: this is a slightly awkward choice)
|
||||
static console_state con_st;
|
||||
con_st.use_color = params.use_color;
|
||||
|
||||
con_st.multiline_input = params.multiline_input;
|
||||
console_init(con_st);
|
||||
atexit([]() { console_cleanup(con_st); });
|
||||
|
||||
if (params.perplexity) {
|
||||
printf("\n************\n");
|
||||
printf("%s: please use the 'perplexity' tool for perplexity calculations\n", __func__);
|
||||
printf("************\n\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (params.embedding) {
|
||||
printf("\n************\n");
|
||||
printf("%s: please use the 'embedding' tool for embedding calculations\n", __func__);
|
||||
printf("************\n\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (params.n_ctx > 2048) {
|
||||
fprintf(stderr, "%s: warning: model does not support context sizes greater than 2048 tokens (%d specified);"
|
||||
"expect poor results\n", __func__, params.n_ctx);
|
||||
}
|
||||
|
||||
if (params.seed < 0) {
|
||||
params.seed = time(NULL);
|
||||
}
|
||||
|
||||
if (params.verbose) {
|
||||
fprintf(stderr, "%s: seed = %d\n", __func__, params.seed);
|
||||
}
|
||||
|
||||
std::mt19937 rng(params.seed);
|
||||
if (params.random_prompt) {
|
||||
params.prompt = gpt_random_prompt(rng);
|
||||
}
|
||||
|
||||
// params.prompt = R"(// this function checks if the number n is prime
|
||||
//bool is_prime(int n) {)";
|
||||
|
||||
llama_context * ctx;
|
||||
struct stat model_stat;
|
||||
|
||||
// load the model and apply lora adapter, if any
|
||||
ctx = llama_init_from_gpt_params(params);
|
||||
if (ctx == NULL) {
|
||||
fprintf(stderr, "%s: error: unable to load model\n", __func__);
|
||||
return 1;
|
||||
}
|
||||
|
||||
stat(params.model.c_str(), &model_stat);
|
||||
|
||||
if (!params.lora_adapter.empty()) {
|
||||
int err = llama_apply_lora_from_file(ctx,
|
||||
params.lora_adapter.c_str(),
|
||||
params.lora_base.empty() ? NULL : params.lora_base.c_str(),
|
||||
params.n_threads);
|
||||
if (err != 0) {
|
||||
fprintf(stderr, "%s: error: failed to apply lora adapter\n", __func__);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
// print system information
|
||||
if (params.verbose) {
|
||||
fprintf(stderr, "\n");
|
||||
fprintf(stderr, "system_info: n_threads = %d / %d | %s\n",
|
||||
params.n_threads, std::thread::hardware_concurrency(), llama_print_system_info());
|
||||
}
|
||||
|
||||
// determine the maximum memory usage needed to do inference for the given n_batch and n_predict parameters
|
||||
// uncomment the "used_mem" line in llama.cpp to see the results
|
||||
if (params.mem_test) {
|
||||
{
|
||||
const std::vector<llama_token> tmp(params.n_batch, 0);
|
||||
llama_eval(ctx, tmp.data(), tmp.size(), 0, params.n_threads);
|
||||
}
|
||||
|
||||
{
|
||||
const std::vector<llama_token> tmp = { 0, };
|
||||
llama_eval(ctx, tmp.data(), tmp.size(), params.n_predict - 1, params.n_threads);
|
||||
}
|
||||
|
||||
if (params.verbose) {
|
||||
llama_print_timings(ctx);
|
||||
}
|
||||
llama_free(ctx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Add a space in front of the first character to match OG llama tokenizer behavior
|
||||
params.prompt.insert(0, 1, ' ');
|
||||
|
||||
// tokenize the prompt
|
||||
auto embd_inp = ::llama_tokenize(ctx, params.prompt, true);
|
||||
|
||||
const int n_ctx = llama_n_ctx(ctx);
|
||||
|
||||
if ((int) embd_inp.size() > n_ctx - 4) {
|
||||
fprintf(stderr, "%s: error: prompt is too long (%d tokens, max %d)\n", __func__, (int) embd_inp.size(), n_ctx - 4);
|
||||
return 1;
|
||||
}
|
||||
|
||||
// number of tokens to keep when resetting context
|
||||
int n_keep = params.n_keep;
|
||||
if (n_keep < 0 || n_keep > (int)embd_inp.size() || params.instruct) {
|
||||
n_keep = (int)embd_inp.size();
|
||||
}
|
||||
if (!n_keep && !params.n_keep_str.empty()) {
|
||||
auto pivot = ::llama_tokenize(ctx, params.n_keep_str, false);
|
||||
auto pos = std::search(embd_inp.begin(), embd_inp.end(),
|
||||
pivot.begin(), pivot.end());
|
||||
if (pos == embd_inp.end()) {
|
||||
fprintf(stderr, "%s: error: --n_keep %`'s substring not found within prompt\n",
|
||||
__func__, params.n_keep_str.c_str());
|
||||
return 1;
|
||||
}
|
||||
n_keep = (pos - embd_inp.begin()) + (pivot.end() - pivot.begin());
|
||||
}
|
||||
|
||||
// prefix & suffix for instruct mode
|
||||
const auto inp_pfx = ::llama_tokenize(ctx, "\n\n### Instruction:\n\n", true);
|
||||
const auto inp_sfx = ::llama_tokenize(ctx, "\n\n### Response:\n\n", false);
|
||||
|
||||
// in instruct mode, we inject a prefix and a suffix to each input by the user
|
||||
if (params.instruct) {
|
||||
params.interactive_first = true;
|
||||
params.antiprompt.push_back("### Instruction:\n\n");
|
||||
}
|
||||
|
||||
// enable interactive mode if interactive start is specified
|
||||
if (params.interactive_first) {
|
||||
params.interactive = true;
|
||||
}
|
||||
|
||||
// determine newline token
|
||||
auto llama_token_newline = ::llama_tokenize(ctx, "\n", false);
|
||||
|
||||
if (params.verbose_prompt) {
|
||||
fprintf(stderr, "\n");
|
||||
fprintf(stderr, "%s: prompt: '%s'\n", __func__, params.prompt.c_str());
|
||||
fprintf(stderr, "%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
|
||||
for (int i = 0; i < (int) embd_inp.size(); i++) {
|
||||
fprintf(stderr, "%6d %6d -> %`'s\n", i, embd_inp[i], llama_token_to_str(ctx, embd_inp[i]));
|
||||
}
|
||||
fprintf(stderr, "%s: first part of prompt: \"", __func__);
|
||||
for (int i = 0; i < n_keep; i++) {
|
||||
fprintf(stderr, "%'s", llama_token_to_str(ctx, embd_inp[i]));
|
||||
}
|
||||
fprintf(stderr, "\"\n");
|
||||
fprintf(stderr, "%s: second part of prompt: \"", __func__);
|
||||
for (int i = n_keep; i < embd_inp.size(); i++) {
|
||||
fprintf(stderr, "%'s", llama_token_to_str(ctx, embd_inp[i]));
|
||||
}
|
||||
fprintf(stderr, "\"\n");
|
||||
fprintf(stderr, "\n");
|
||||
}
|
||||
|
||||
// setup ctrl-c handler
|
||||
struct sigaction sa;
|
||||
sa.sa_flags = 0;
|
||||
sigemptyset(&sa.sa_mask);
|
||||
if (params.interactive) {
|
||||
sa.sa_handler = sigint_handler_interactive;
|
||||
} else {
|
||||
sa.sa_handler = sigint_handler_batch;
|
||||
}
|
||||
sigaction(SIGINT, &sa, NULL);
|
||||
|
||||
if (params.interactive) {
|
||||
if (params.verbose) {
|
||||
fprintf(stderr, "%s: interactive mode on.\n", __func__);
|
||||
}
|
||||
|
||||
if (params.verbose && params.antiprompt.size()) {
|
||||
for (auto antiprompt : params.antiprompt) {
|
||||
fprintf(stderr, "Reverse prompt: '%s'\n", antiprompt.c_str());
|
||||
}
|
||||
}
|
||||
|
||||
if (params.verbose && !params.input_prefix.empty()) {
|
||||
fprintf(stderr, "Input prefix: '%s'\n", params.input_prefix.c_str());
|
||||
}
|
||||
}
|
||||
|
||||
if (params.verbose) {
|
||||
fprintf(stderr, "sampling: temp = %f, top_k = %d, top_p = %f, repeat_last_n = %i, repeat_penalty = %f\n",
|
||||
params.temp, params.top_k, params.top_p, params.repeat_last_n, params.repeat_penalty);
|
||||
fprintf(stderr, "generate: n_ctx = %d, n_batch = %d, n_predict = %d, n_keep = %d\n",
|
||||
n_ctx, params.n_batch, params.n_predict, n_keep);
|
||||
fprintf(stderr, "\n\n");
|
||||
}
|
||||
|
||||
// TODO: replace with ring-buffer
|
||||
std::vector<llama_token> last_n_tokens(n_ctx);
|
||||
std::fill(last_n_tokens.begin(), last_n_tokens.end(), 0);
|
||||
|
||||
if (params.verbose && params.interactive) {
|
||||
fprintf(stderr, "== Running in interactive mode. ==\n"
|
||||
" - Press Ctrl+C to interject at any time.\n"
|
||||
" - Press Return to return control to LLaMa.\n"
|
||||
" - If you want to submit another line, end your input in '\\'.\n\n");
|
||||
is_interacting = params.interactive_first;
|
||||
}
|
||||
|
||||
const uint32_t kJtlpMagic = READ32LE("jtlp");
|
||||
const uint32_t kJtlpVersion = 0;
|
||||
|
||||
struct jtlp_header {
|
||||
uint8_t magic[4];
|
||||
uint8_t version[4];
|
||||
uint8_t state_size[8];
|
||||
uint8_t model_dev[8];
|
||||
uint8_t model_ino[8];
|
||||
uint8_t model_mtim_sec[8];
|
||||
uint8_t model_mtim_nsec[8];
|
||||
uint8_t prompt_size[8];
|
||||
};
|
||||
|
||||
enum jtlp_status {
|
||||
kPromptPending,
|
||||
kPromptCompleted,
|
||||
kPromptFinished
|
||||
};
|
||||
|
||||
enum jtlp_status prompt_status = kPromptPending;
|
||||
|
||||
bool is_antiprompt = false;
|
||||
bool input_noecho = !params.verbose;
|
||||
|
||||
int n_past = 0;
|
||||
int n_remain = params.n_predict;
|
||||
int n_consumed = 0;
|
||||
|
||||
// instantly reload prompt if it's cached
|
||||
int fd = open(params.prompt_path.c_str(), O_RDONLY);
|
||||
if (fd != -1) {
|
||||
size_t state_size;
|
||||
size_t prompt_size;
|
||||
struct timespec mtim;
|
||||
struct jtlp_header *header;
|
||||
off_t rc = lseek(fd, 0, SEEK_END);
|
||||
LLAMA_ASSERT(rc != -1);
|
||||
void *map = MAP_FAILED;
|
||||
size_t file_size = rc;
|
||||
if (file_size < sizeof(header)) {
|
||||
fprintf(stderr, "%s: prompt file too small\n",
|
||||
params.prompt_path.c_str());
|
||||
goto CantReloadPrompt;
|
||||
}
|
||||
map = mmap(0, file_size, PROT_READ, MAP_SHARED, fd, 0);
|
||||
if (map == MAP_FAILED) {
|
||||
fprintf(stderr, "%s: mmap failed: %s\n",
|
||||
params.prompt_path.c_str(), strerror(errno));
|
||||
goto CantReloadPrompt;
|
||||
}
|
||||
header = (struct jtlp_header *)map;
|
||||
// check file format magic
|
||||
if (READ32LE(header->magic) != kJtlpMagic) {
|
||||
fprintf(stderr, "%s: prompt file has wrong magic\n",
|
||||
params.prompt_path.c_str());
|
||||
goto CantReloadPrompt;
|
||||
}
|
||||
// check file format version
|
||||
if (READ32LE(header->version) > kJtlpVersion) {
|
||||
fprintf(stderr, "%s: prompt has future file format version\n",
|
||||
params.prompt_path.c_str());
|
||||
goto CantReloadPrompt;
|
||||
}
|
||||
// check expected state size
|
||||
state_size = llama_get_state_size(ctx);
|
||||
if (READ64LE(header->state_size) != state_size) {
|
||||
if (params.verbose) {
|
||||
fprintf(stderr, "%s: prompt has stale data state size\n",
|
||||
params.prompt_path.c_str());
|
||||
}
|
||||
goto CantReloadPrompt;
|
||||
}
|
||||
// check model device id
|
||||
if (READ64LE(header->model_dev) != model_stat.st_dev) {
|
||||
fprintf(stderr, "%s: prompt is for different model (dev)\n",
|
||||
params.prompt_path.c_str());
|
||||
goto CantReloadPrompt;
|
||||
}
|
||||
// check model inode id
|
||||
if (READ64LE(header->model_ino) != model_stat.st_ino) {
|
||||
fprintf(stderr, "%s: prompt is for different model (ino)\n",
|
||||
params.prompt_path.c_str());
|
||||
goto CantReloadPrompt;
|
||||
}
|
||||
// check model modified timestamp
|
||||
mtim.tv_sec = READ64LE(header->model_mtim_sec);
|
||||
mtim.tv_nsec = READ64LE(header->model_mtim_nsec);
|
||||
if (CompareTime(model_stat.st_mtim, mtim) > 0) {
|
||||
if (params.verbose) {
|
||||
fprintf(stderr, "%s: model file timestamp changed; will reload and regenerate prompt\n",
|
||||
params.prompt_path.c_str());
|
||||
}
|
||||
goto CantReloadPrompt;
|
||||
}
|
||||
// check prompt file size
|
||||
prompt_size = READ64LE(header->prompt_size);
|
||||
if (sizeof(struct jtlp_header) + prompt_size + state_size > file_size) {
|
||||
fprintf(stderr, "%s: prompt file size unexpected\n",
|
||||
params.prompt_path.c_str());
|
||||
goto CantReloadPrompt;
|
||||
}
|
||||
// check prompt textus
|
||||
if (prompt_size != params.prompt.size() ||
|
||||
memcmp(header + 1, params.prompt.c_str(), prompt_size) != 0) {
|
||||
if (params.verbose) {
|
||||
fprintf(stderr, "%s: prompt text changed; will reload and regenerate\n",
|
||||
params.prompt_path.c_str());
|
||||
}
|
||||
goto CantReloadPrompt;
|
||||
}
|
||||
// read the transformer state
|
||||
llama_set_state_data(ctx, (uint8_t *)(header + 1) + prompt_size);
|
||||
// we're finished loading the prompt file
|
||||
if (params.verbose) {
|
||||
fprintf(stderr, "%s: %s: reloaded previously saved prompt\n",
|
||||
__func__, params.prompt_path.c_str());
|
||||
}
|
||||
// now setup the business logic
|
||||
llama_set_rng_seed(ctx, params.seed);
|
||||
while ((int) embd_inp.size() > n_consumed) {
|
||||
last_n_tokens.erase(last_n_tokens.begin());
|
||||
last_n_tokens.push_back(embd_inp[n_consumed++]);
|
||||
}
|
||||
n_past = n_consumed;
|
||||
prompt_status = kPromptFinished;
|
||||
if (params.interactive) {
|
||||
is_interacting = true;
|
||||
for (std::string & antiprompt : params.antiprompt) {
|
||||
auto toks = ::llama_tokenize(ctx, antiprompt, false);
|
||||
if (std::equal(last_n_tokens.end() - toks.size(),
|
||||
last_n_tokens.end(),
|
||||
toks.begin(),
|
||||
toks.end())) {
|
||||
console_set_color(con_st, CONSOLE_COLOR_PROMPT);
|
||||
printf("%s", antiprompt.c_str());
|
||||
fflush(stdout);
|
||||
break;
|
||||
}
|
||||
}
|
||||
console_set_color(con_st, CONSOLE_COLOR_USER_INPUT);
|
||||
}
|
||||
CantReloadPrompt:
|
||||
if (map != MAP_FAILED) {
|
||||
munmap(map, file_size);
|
||||
}
|
||||
close(fd);
|
||||
}
|
||||
|
||||
if (prompt_status == kPromptPending && params.verbose) {
|
||||
// the first thing we will do is to output the prompt, so set color accordingly
|
||||
console_set_color(con_st, CONSOLE_COLOR_PROMPT);
|
||||
}
|
||||
|
||||
std::vector<llama_token> embd;
|
||||
|
||||
if (prompt_status == kPromptPending &&
|
||||
!params.verbose && con_st.use_color) {
|
||||
fprintf(stderr, EPHEMERAL("loading weights..."));
|
||||
}
|
||||
|
||||
while ((n_remain != 0 || params.interactive) && !is_terminated) {
|
||||
|
||||
// perform evaluation
|
||||
if (embd.size() > 0) {
|
||||
if (n_past + (int) embd.size() > n_ctx) {
|
||||
n_past = n_keep;
|
||||
embd.insert(embd.begin(),
|
||||
last_n_tokens.end() - (n_past - n_keep) / 2 - embd.size(),
|
||||
last_n_tokens.end() - embd.size());
|
||||
}
|
||||
for (int i = 0; i < (int) embd.size(); i += params.n_batch) {
|
||||
int n_eval = (int) embd.size() - i;
|
||||
if (n_eval > params.n_batch) {
|
||||
n_eval = params.n_batch;
|
||||
}
|
||||
if (llama_eval(ctx, &embd[i], n_eval, n_past, params.n_threads)) {
|
||||
fprintf(stderr, "%s : failed to eval\n", __func__);
|
||||
console_set_color(con_st, CONSOLE_COLOR_DEFAULT);
|
||||
return 1;
|
||||
}
|
||||
n_past += n_eval;
|
||||
if (prompt_status == kPromptPending &&
|
||||
!params.verbose && con_st.use_color && embd_inp.size()) {
|
||||
fprintf(stderr, EPHEMERAL("loading prompt %d%% ..."),
|
||||
(int)(n_consumed / (double)embd_inp.size() * 100));
|
||||
}
|
||||
}
|
||||
embd.clear();
|
||||
}
|
||||
|
||||
// save prompt to disk atomically as soon as it's finished loading
|
||||
bool was_completed = prompt_status == kPromptCompleted;
|
||||
if (was_completed && !params.prompt_path.empty()) {
|
||||
int fd = -1;
|
||||
int close_rc;
|
||||
uint8_t buf[8];
|
||||
size_t file_size;
|
||||
size_t state_size;
|
||||
std::string tmppath;
|
||||
void *map = MAP_FAILED;
|
||||
struct jtlp_header header;
|
||||
if (!params.verbose && con_st.use_color) {
|
||||
fprintf(stderr, EPHEMERAL("caching prompt..."));
|
||||
}
|
||||
state_size = llama_get_state_size(ctx);
|
||||
WRITE32LE(header.magic, kJtlpMagic);
|
||||
WRITE32LE(header.version, kJtlpVersion);
|
||||
WRITE64LE(header.state_size, state_size);
|
||||
WRITE64LE(header.model_dev, model_stat.st_dev);
|
||||
WRITE64LE(header.model_ino, model_stat.st_ino);
|
||||
WRITE64LE(header.model_mtim_sec, model_stat.st_mtim.tv_sec);
|
||||
WRITE64LE(header.model_mtim_nsec, model_stat.st_mtim.tv_nsec);
|
||||
WRITE64LE(header.prompt_size, params.prompt.size());
|
||||
file_size = sizeof(header) + params.prompt.size() + state_size;
|
||||
tmppath.append(params.prompt_path);
|
||||
tmppath.append(".XXXXXX");
|
||||
fd = mkstemp(&tmppath[0]);
|
||||
if (fd == -1) {
|
||||
fprintf(stderr, "%s: mkstemp failed: %s\n",
|
||||
tmppath.c_str(), strerror(errno));
|
||||
goto CouldNotSavePrompt;
|
||||
}
|
||||
if (ftruncate(fd, file_size)) {
|
||||
fprintf(stderr, "%s: ftruncate failed: %s\n",
|
||||
tmppath.c_str(), strerror(errno));
|
||||
goto CouldNotSavePrompt;
|
||||
}
|
||||
map = mmap(0, file_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
|
||||
if (map == MAP_FAILED) {
|
||||
fprintf(stderr, "%s: mmap failed: %s\n",
|
||||
tmppath.c_str(), strerror(errno));
|
||||
goto CouldNotSavePrompt;
|
||||
}
|
||||
llama_copy_state_data(ctx, (uint8_t *)map + sizeof(header) + params.prompt.size());
|
||||
memcpy((uint8_t *)map + sizeof(header), params.prompt.c_str(), params.prompt.size());
|
||||
memcpy(map, &header, sizeof(header));
|
||||
if (msync(map, file_size, MS_ASYNC) && params.verbose) {
|
||||
fprintf(stderr, "%s: msync failed: %s\n",
|
||||
tmppath.c_str(), strerror(errno));
|
||||
}
|
||||
if (munmap(map, file_size) && params.verbose) {
|
||||
fprintf(stderr, "%s: munmap failed: %s\n",
|
||||
tmppath.c_str(), strerror(errno));
|
||||
}
|
||||
map = MAP_FAILED;
|
||||
close_rc = close(fd);
|
||||
fd = -1;
|
||||
if (close_rc) {
|
||||
fprintf(stderr, "%s: close failed: %s\n",
|
||||
tmppath.c_str(), strerror(errno));
|
||||
goto CouldNotSavePrompt;
|
||||
}
|
||||
if (rename(tmppath.c_str(), params.prompt_path.c_str())) {
|
||||
fprintf(stderr, "%s -> %s: rename failed: %s\n",
|
||||
tmppath.c_str(), params.prompt_path.c_str(), strerror(errno));
|
||||
goto CouldNotSavePrompt;
|
||||
}
|
||||
tmppath.clear();
|
||||
CouldNotSavePrompt:
|
||||
if (map != MAP_FAILED) munmap(map, file_size);
|
||||
if (fd != -1) close(fd);
|
||||
if (!tmppath.empty()) unlink(tmppath.c_str());
|
||||
}
|
||||
if (was_completed) {
|
||||
if (!params.verbose && con_st.use_color) {
|
||||
fprintf(stderr, EPHEMERAL(""));
|
||||
}
|
||||
if (params.interactive) {
|
||||
is_interacting = true;
|
||||
}
|
||||
prompt_status = kPromptFinished;
|
||||
if (params.interactive) {
|
||||
is_interacting = true;
|
||||
fflush(stdout);
|
||||
std::string last_output;
|
||||
for (auto id : last_n_tokens) {
|
||||
last_output += llama_token_to_str(ctx, id);
|
||||
}
|
||||
for (std::string & antiprompt : params.antiprompt) {
|
||||
if (last_output.find(antiprompt.c_str(),
|
||||
last_output.length() - antiprompt.length(),
|
||||
antiprompt.length()) != std::string::npos) {
|
||||
console_set_color(con_st, CONSOLE_COLOR_PROMPT);
|
||||
printf("%s", antiprompt.c_str());
|
||||
fflush(stdout);
|
||||
break;
|
||||
}
|
||||
}
|
||||
console_set_color(con_st, CONSOLE_COLOR_USER_INPUT);
|
||||
}
|
||||
}
|
||||
|
||||
if ((int) embd_inp.size() <= n_consumed && !is_interacting) {
|
||||
// out of user input, sample next token
|
||||
const float temp = params.temp;
|
||||
const int32_t top_k = params.top_k <= 0 ? llama_n_vocab(ctx) : params.top_k;
|
||||
const float top_p = params.top_p;
|
||||
const float tfs_z = params.tfs_z;
|
||||
const float typical_p = params.typical_p;
|
||||
const int32_t repeat_last_n = params.repeat_last_n < 0 ? n_ctx : params.repeat_last_n;
|
||||
const float repeat_penalty = params.repeat_penalty;
|
||||
const float alpha_presence = params.presence_penalty;
|
||||
const float alpha_frequency = params.frequency_penalty;
|
||||
const int mirostat = params.mirostat;
|
||||
const float mirostat_tau = params.mirostat_tau;
|
||||
const float mirostat_eta = params.mirostat_eta;
|
||||
const bool penalize_nl = params.penalize_nl;
|
||||
|
||||
llama_token id = 0;
|
||||
|
||||
{
|
||||
auto logits = llama_get_logits(ctx);
|
||||
auto n_vocab = llama_n_vocab(ctx);
|
||||
|
||||
// Apply params.logit_bias map
|
||||
for (auto it = params.logit_bias.begin(); it != params.logit_bias.end(); it++) {
|
||||
logits[it->first] += it->second;
|
||||
}
|
||||
|
||||
std::vector<llama_token_data> candidates;
|
||||
candidates.reserve(n_vocab);
|
||||
for (llama_token token_id = 0; token_id < n_vocab; token_id++) {
|
||||
candidates.emplace_back(llama_token_data{token_id, logits[token_id], 0.0f});
|
||||
}
|
||||
|
||||
llama_token_data_array candidates_p = { candidates.data(), candidates.size(), false };
|
||||
|
||||
// Apply penalties
|
||||
float nl_logit = logits[llama_token_nl()];
|
||||
auto last_n_repeat = std::min(std::min((int)last_n_tokens.size(), repeat_last_n), n_ctx);
|
||||
llama_sample_repetition_penalty(ctx, &candidates_p,
|
||||
last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
|
||||
last_n_repeat, repeat_penalty);
|
||||
llama_sample_frequency_and_presence_penalties(ctx, &candidates_p,
|
||||
last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
|
||||
last_n_repeat, alpha_frequency, alpha_presence);
|
||||
if (!penalize_nl) {
|
||||
logits[llama_token_nl()] = nl_logit;
|
||||
}
|
||||
|
||||
if (temp <= 0) {
|
||||
// Greedy sampling
|
||||
id = llama_sample_token_greedy(ctx, &candidates_p);
|
||||
} else {
|
||||
if (mirostat == 1) {
|
||||
static float mirostat_mu = 2.0f * mirostat_tau;
|
||||
const int mirostat_m = 100;
|
||||
llama_sample_temperature(ctx, &candidates_p, temp);
|
||||
id = llama_sample_token_mirostat(ctx, &candidates_p, mirostat_tau, mirostat_eta, mirostat_m, &mirostat_mu);
|
||||
} else if (mirostat == 2) {
|
||||
static float mirostat_mu = 2.0f * mirostat_tau;
|
||||
llama_sample_temperature(ctx, &candidates_p, temp);
|
||||
id = llama_sample_token_mirostat_v2(ctx, &candidates_p, mirostat_tau, mirostat_eta, &mirostat_mu);
|
||||
} else {
|
||||
// Temperature sampling
|
||||
llama_sample_top_k(ctx, &candidates_p, top_k, 1);
|
||||
llama_sample_tail_free(ctx, &candidates_p, tfs_z, 1);
|
||||
llama_sample_typical(ctx, &candidates_p, typical_p, 1);
|
||||
llama_sample_top_p(ctx, &candidates_p, top_p, 1);
|
||||
llama_sample_temperature(ctx, &candidates_p, temp);
|
||||
id = llama_sample_token(ctx, &candidates_p);
|
||||
}
|
||||
}
|
||||
|
||||
last_n_tokens.erase(last_n_tokens.begin());
|
||||
last_n_tokens.push_back(id);
|
||||
}
|
||||
|
||||
// replace end of text token with newline token when in interactive mode
|
||||
if (id == llama_token_eos() && params.interactive && !params.instruct) {
|
||||
id = llama_token_newline.front();
|
||||
if (params.antiprompt.size() != 0) {
|
||||
// tokenize and inject first reverse prompt
|
||||
const auto first_antiprompt = ::llama_tokenize(ctx, params.antiprompt.front(), false);
|
||||
embd_inp.insert(embd_inp.end(), first_antiprompt.begin(), first_antiprompt.end());
|
||||
}
|
||||
}
|
||||
|
||||
// add it to the context
|
||||
embd.push_back(id);
|
||||
|
||||
// echo this to console
|
||||
input_noecho = false;
|
||||
|
||||
// decrement remaining sampling budget
|
||||
--n_remain;
|
||||
|
||||
} else {
|
||||
// some user input remains from prompt or interaction, forward it to processing
|
||||
while ((int) embd_inp.size() > n_consumed) {
|
||||
embd.push_back(embd_inp[n_consumed]);
|
||||
last_n_tokens.erase(last_n_tokens.begin());
|
||||
last_n_tokens.push_back(embd_inp[n_consumed++]);
|
||||
if ((int) embd.size() >= params.n_batch) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// we've nearly finished loading the prompt
|
||||
if (prompt_status == kPromptPending &&
|
||||
(int) embd_inp.size() <= n_consumed) {
|
||||
prompt_status = kPromptCompleted;
|
||||
}
|
||||
}
|
||||
|
||||
// checks for reverse prompt
|
||||
//
|
||||
// 1. in interactive mode, this lets us detect when the llm is
|
||||
// prompting the user, so we can pause for input, e.g.
|
||||
//
|
||||
// --interactive
|
||||
// --prompt $'CompanionAI: How can I help you?\nHuman:'
|
||||
// --reverse-prompt 'Human:'
|
||||
//
|
||||
// 2. in normal mode, the reverse prompt can be used to specify
|
||||
// a custom EOS token, e.g.
|
||||
//
|
||||
// --prompt 'Question: How old are you?\nAnswer: '
|
||||
// --reverse-prompt $'\n'
|
||||
//
|
||||
if (params.antiprompt.size()) {
|
||||
std::string last_output;
|
||||
for (auto id : last_n_tokens) {
|
||||
last_output += llama_token_to_str(ctx, id);
|
||||
}
|
||||
is_antiprompt = false;
|
||||
// Check if each of the reverse prompts appears at the end of the output.
|
||||
for (std::string & antiprompt : params.antiprompt) {
|
||||
if (last_output.find(antiprompt.c_str(), last_output.length() - antiprompt.length(), antiprompt.length()) != std::string::npos) {
|
||||
is_antiprompt = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (is_antiprompt && !params.interactive) {
|
||||
printf("\n");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// display text
|
||||
if (!input_noecho) {
|
||||
for (auto id : embd) {
|
||||
printf("%s", llama_token_to_str(ctx, id));
|
||||
}
|
||||
fflush(stdout);
|
||||
}
|
||||
if (prompt_status == kPromptCompleted) {
|
||||
continue; // avoid reading line before last token loads
|
||||
}
|
||||
|
||||
// reset color to default if we there is no pending user input
|
||||
if (params.verbose && !input_noecho && (int)embd_inp.size() == n_consumed) {
|
||||
console_set_color(con_st, CONSOLE_COLOR_DEFAULT);
|
||||
}
|
||||
|
||||
if (is_antiprompt) {
|
||||
is_interacting = true;
|
||||
console_set_color(con_st, CONSOLE_COLOR_USER_INPUT);
|
||||
fflush(stdout);
|
||||
}
|
||||
|
||||
// in interactive mode, and not currently processing queued inputs;
|
||||
// check if we should prompt the user for more
|
||||
if (params.interactive && (int) embd_inp.size() <= n_consumed) {
|
||||
|
||||
if (n_past > 0 && is_interacting) {
|
||||
|
||||
// potentially set color to indicate we are taking user input
|
||||
console_set_color(con_st, CONSOLE_COLOR_USER_INPUT);
|
||||
|
||||
if (params.instruct) {
|
||||
printf("\n> ");
|
||||
}
|
||||
|
||||
std::string buffer;
|
||||
if (!params.input_prefix.empty()) {
|
||||
buffer += params.input_prefix;
|
||||
printf("%s", buffer.c_str());
|
||||
}
|
||||
|
||||
// display a "waiting for input" indicator, just in case
|
||||
// the model doesn't halt on the antiprompt.
|
||||
if (con_st.use_color) {
|
||||
fprintf(stdout, "?\b");
|
||||
fflush(stdout);
|
||||
}
|
||||
|
||||
std::string line;
|
||||
bool another_line = true;
|
||||
do {
|
||||
another_line = console_readline(con_st, line);
|
||||
buffer += line;
|
||||
} while (another_line);
|
||||
|
||||
// done taking input, reset color
|
||||
console_set_color(con_st, CONSOLE_COLOR_DEFAULT);
|
||||
|
||||
// Add tokens to embd only if the input buffer is non-empty
|
||||
// Entering a empty line lets the user pass control back
|
||||
if (buffer.length() > 1) {
|
||||
// append input suffix if any
|
||||
if (!params.input_suffix.empty()) {
|
||||
buffer += params.input_suffix;
|
||||
printf("%s", params.input_suffix.c_str());
|
||||
}
|
||||
|
||||
// instruct mode: insert instruction prefix
|
||||
if (params.instruct && !is_antiprompt) {
|
||||
n_consumed = embd_inp.size();
|
||||
embd_inp.insert(embd_inp.end(), inp_pfx.begin(), inp_pfx.end());
|
||||
}
|
||||
|
||||
auto line_inp = ::llama_tokenize(ctx, buffer, false);
|
||||
embd_inp.insert(embd_inp.end(), line_inp.begin(), line_inp.end());
|
||||
|
||||
// instruct mode: insert response suffix
|
||||
if (params.instruct) {
|
||||
embd_inp.insert(embd_inp.end(), inp_sfx.begin(), inp_sfx.end());
|
||||
}
|
||||
|
||||
n_remain -= line_inp.size();
|
||||
}
|
||||
|
||||
input_noecho = true; // do not echo this again
|
||||
}
|
||||
|
||||
if (n_past > 0) {
|
||||
is_interacting = false;
|
||||
}
|
||||
assert(!is_interacting);
|
||||
}
|
||||
|
||||
// end of text token
|
||||
if (!embd.empty() && embd.back() == llama_token_eos()) {
|
||||
if (params.instruct) {
|
||||
is_interacting = true;
|
||||
} else if (params.verbose) {
|
||||
fprintf(stderr, " [end of text]\n");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// In interactive mode, respect the maximum number of tokens and drop back to user input when reached.
|
||||
if (params.interactive && n_remain <= 0 && params.n_predict != -1) {
|
||||
n_remain = params.n_predict;
|
||||
is_interacting = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (is_terminated) {
|
||||
console_cleanup(con_st);
|
||||
printf("\n");
|
||||
if (params.verbose) {
|
||||
llama_print_timings(ctx);
|
||||
}
|
||||
_exit(128 + SIGINT);
|
||||
}
|
||||
|
||||
if (params.verbose) {
|
||||
llama_print_timings(ctx);
|
||||
}
|
||||
llama_free(ctx);
|
||||
|
||||
console_set_color(con_st, CONSOLE_COLOR_DEFAULT);
|
||||
|
||||
return 0;
|
||||
}
|
82
third_party/radpajama/quantize-gptneox.cpp
vendored
Normal file
82
third_party/radpajama/quantize-gptneox.cpp
vendored
Normal file
|
@ -0,0 +1,82 @@
|
|||
#include "ggml.h"
|
||||
#include "gptneox.h"
|
||||
|
||||
#include <cstdio>
|
||||
#include <map>
|
||||
#include <string>
|
||||
|
||||
static const std::map<std::string, enum gptneox_ftype> GPTNEOX_FTYPE_MAP = {
|
||||
{"q4_0", GPTNEOX_FTYPE_MOSTLY_Q4_0},
|
||||
{"q4_1", GPTNEOX_FTYPE_MOSTLY_Q4_1},
|
||||
{"q4_2", GPTNEOX_FTYPE_MOSTLY_Q4_2},
|
||||
//{"q4_3", GPTNEOX_FTYPE_MOSTLY_Q4_3},
|
||||
{"q5_0", GPTNEOX_FTYPE_MOSTLY_Q5_0},
|
||||
{"q5_1", GPTNEOX_FTYPE_MOSTLY_Q5_1},
|
||||
{"q8_0", GPTNEOX_FTYPE_MOSTLY_Q8_0},
|
||||
};
|
||||
|
||||
// usage:
|
||||
// ./quantize models/llama/ggml-model.bin models/llama/ggml-model-quant.bin type
|
||||
//
|
||||
int main(int argc, char ** argv) {
|
||||
ggml_time_init();
|
||||
|
||||
if (argc < 4) {
|
||||
fprintf(stderr, "usage: %s model-f32.bin model-quant.bin type [nthread]\n", argv[0]);
|
||||
for (auto it = GPTNEOX_FTYPE_MAP.begin(); it != GPTNEOX_FTYPE_MAP.end(); it++) {
|
||||
fprintf(stderr, " type = \"%s\" or %d\n", it->first.c_str(), it->second);
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
// needed to initialize f16 tables
|
||||
{
|
||||
struct ggml_init_params params = { 0, NULL, false };
|
||||
struct ggml_context * ctx = ggml_init(params);
|
||||
ggml_free(ctx);
|
||||
}
|
||||
|
||||
const std::string fname_inp = argv[1];
|
||||
const std::string fname_out = argv[2];
|
||||
|
||||
enum gptneox_ftype ftype;
|
||||
if (argv[3][0] == 'q') {
|
||||
auto it = GPTNEOX_FTYPE_MAP.find(argv[3]);
|
||||
if (it == GPTNEOX_FTYPE_MAP.end()) {
|
||||
fprintf(stderr, "%s: unknown ftype '%s'\n", __func__, argv[3]);
|
||||
return 1;
|
||||
}
|
||||
ftype = it->second;
|
||||
} else {
|
||||
ftype = (enum gptneox_ftype)atoi(argv[3]);
|
||||
}
|
||||
|
||||
int nthread = argc > 4 ? atoi(argv[4]) : 0;
|
||||
|
||||
const int64_t t_main_start_us = ggml_time_us();
|
||||
|
||||
int64_t t_quantize_us = 0;
|
||||
|
||||
// load the model
|
||||
{
|
||||
const int64_t t_start_us = ggml_time_us();
|
||||
|
||||
if (gptneox_model_quantize(fname_inp.c_str(), fname_out.c_str(), ftype, nthread)) {
|
||||
fprintf(stderr, "%s: failed to quantize model from '%s'\n", __func__, fname_inp.c_str());
|
||||
return 1;
|
||||
}
|
||||
|
||||
t_quantize_us = ggml_time_us() - t_start_us;
|
||||
}
|
||||
|
||||
// report timing
|
||||
{
|
||||
const int64_t t_main_end_us = ggml_time_us();
|
||||
|
||||
printf("\n");
|
||||
printf("%s: quantize time = %8.2f ms\n", __func__, t_quantize_us/1000.0);
|
||||
printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
3395
third_party/radpajama/radpajama.cc
vendored
Normal file
3395
third_party/radpajama/radpajama.cc
vendored
Normal file
File diff suppressed because it is too large
Load diff
127
third_party/radpajama/radpajama.mk
vendored
Normal file
127
third_party/radpajama/radpajama.mk
vendored
Normal file
|
@ -0,0 +1,127 @@
|
|||
#-*-mode:makefile-gmake;indent-tabs-mode:t;tab-width:8;coding:utf-8-*-┐
|
||||
#───vi: set et ft=make ts=8 tw=8 fenc=utf-8 :vi───────────────────────┘
|
||||
|
||||
PKGS += THIRD_PARTY_REDPAJAMA
|
||||
|
||||
################################################################################
|
||||
# single file machine learning framework written in c
|
||||
# make -j8 o//third_party/radpajama/ggml.a
|
||||
|
||||
THIRD_PARTY_REDPAJAMA_ARTIFACTS += THIRD_PARTY_REDPAJAMA_A
|
||||
THIRD_PARTY_REDPAJAMA = $(THIRD_PARTY_REDPAJAMA_A_DEPS) $(THIRD_PARTY_REDPAJAMA_A)
|
||||
THIRD_PARTY_REDPAJAMA_A = o/$(MODE)/third_party/radpajama/ggml.a
|
||||
THIRD_PARTY_REDPAJAMA_A_HDRS = third_party/radpajama/ggml.h
|
||||
THIRD_PARTY_REDPAJAMA_A_SRCS = third_party/radpajama/ggml.c
|
||||
THIRD_PARTY_REDPAJAMA_A_OBJS = $(THIRD_PARTY_REDPAJAMA_A_SRCS:%.c=o/$(MODE)/%.o)
|
||||
THIRD_PARTY_REDPAJAMA_A_FILES = $(THIRD_PARTY_REDPAJAMA_A_SRCS) $(THIRD_PARTY_REDPAJAMA_A_HDRS)
|
||||
THIRD_PARTY_REDPAJAMA_A_CHECKS = $(THIRD_PARTY_REDPAJAMA_A).pkg $(THIRD_PARTY_REDPAJAMA_A_HDRS:%=o/$(MODE)/%.ok)
|
||||
|
||||
THIRD_PARTY_REDPAJAMA_A_DIRECTDEPS = \
|
||||
LIBC_CALLS \
|
||||
LIBC_INTRIN \
|
||||
LIBC_MEM \
|
||||
LIBC_NEXGEN32E \
|
||||
LIBC_RUNTIME \
|
||||
LIBC_STDIO \
|
||||
LIBC_THREAD \
|
||||
LIBC_STR \
|
||||
LIBC_STUBS \
|
||||
LIBC_SYSV \
|
||||
LIBC_TINYMATH \
|
||||
THIRD_PARTY_COMPILER_RT
|
||||
|
||||
THIRD_PARTY_REDPAJAMA_A_DEPS := \
|
||||
$(call uniq,$(foreach x,$(THIRD_PARTY_REDPAJAMA_A_DIRECTDEPS),$($(x))))
|
||||
|
||||
$(THIRD_PARTY_REDPAJAMA_A): \
|
||||
third_party/radpajama/ \
|
||||
$(THIRD_PARTY_REDPAJAMA_A).pkg \
|
||||
$(THIRD_PARTY_REDPAJAMA_A_OBJS)
|
||||
|
||||
$(THIRD_PARTY_REDPAJAMA_A).pkg: \
|
||||
$(THIRD_PARTY_REDPAJAMA_A_OBJS) \
|
||||
$(foreach x,$(THIRD_PARTY_REDPAJAMA_A_DIRECTDEPS),$($(x)_A).pkg)
|
||||
|
||||
$(THIRD_PARTY_REDPAJAMA_A_OBJS): private \
|
||||
OVERRIDE_CFLAGS += \
|
||||
-O3 \
|
||||
-ffunction-sections \
|
||||
-fdata-sections
|
||||
|
||||
ifeq ($(ARCH), x86_64)
|
||||
$(THIRD_PARTY_REDPAJAMA_A_OBJS): private \
|
||||
OVERRIDE_CFLAGS += \
|
||||
-msse3 \
|
||||
-mavx \
|
||||
-mavx2 \
|
||||
-mf16c \
|
||||
-mfma
|
||||
endif
|
||||
|
||||
################################################################################
|
||||
# command for running inference on large language models
|
||||
# make -j8 o//third_party/radpajama/radpajama.com
|
||||
|
||||
THIRD_PARTY_REDPAJAMA_ARTIFACTS += THIRD_PARTY_REDPAJAMA_LLAMA
|
||||
THIRD_PARTY_REDPAJAMA_LLAMA = o/$(MODE)/third_party/radpajama/radpajama.com
|
||||
THIRD_PARTY_REDPAJAMA_LLAMA_HDRS = third_party/radpajama/llama.h third_party/radpajama/llama_util.h third_party/radpajama/common.h
|
||||
THIRD_PARTY_REDPAJAMA_LLAMA_SRCS = third_party/radpajama/radpajama.cc third_party/radpajama/common.cc
|
||||
THIRD_PARTY_REDPAJAMA_LLAMA_OBJS = $(THIRD_PARTY_REDPAJAMA_LLAMA_SRCS:%.cc=o/$(MODE)/%.o)
|
||||
THIRD_PARTY_REDPAJAMA_LLAMA_FILES := $(THIRD_PARTY_REDPAJAMA_LLAMA_SRCS) $(THIRD_PARTY_REDPAJAMA_LLAMA_HDRS)
|
||||
THIRD_PARTY_REDPAJAMA_LLAMA_CHECKS = $(THIRD_PARTY_REDPAJAMA_LLAMA).pkg $(THIRD_PARTY_REDPAJAMA_LLAMA_HDRS:%=o/$(MODE)/%.okk)
|
||||
|
||||
THIRD_PARTY_REDPAJAMA_LLAMA_DIRECTDEPS = \
|
||||
LIBC_CALLS \
|
||||
LIBC_FMT \
|
||||
LIBC_INTRIN \
|
||||
LIBC_MEM \
|
||||
LIBC_NEXGEN32E \
|
||||
LIBC_RUNTIME \
|
||||
LIBC_STDIO \
|
||||
LIBC_LOG \
|
||||
LIBC_STR \
|
||||
LIBC_STUBS \
|
||||
LIBC_SYSV \
|
||||
LIBC_THREAD \
|
||||
LIBC_TINYMATH \
|
||||
LIBC_ZIPOS \
|
||||
THIRD_PARTY_REDPAJAMA \
|
||||
THIRD_PARTY_LIBCXX
|
||||
|
||||
THIRD_PARTY_REDPAJAMA_LLAMA_DEPS := \
|
||||
$(call uniq,$(foreach x,$(THIRD_PARTY_REDPAJAMA_LLAMA_DIRECTDEPS),$($(x))))
|
||||
|
||||
$(THIRD_PARTY_REDPAJAMA_LLAMA).dbg: \
|
||||
$(THIRD_PARTY_REDPAJAMA_LLAMA).pkg \
|
||||
$(THIRD_PARTY_REDPAJAMA_LLAMA_DEPS) \
|
||||
o/$(MODE)/third_party/radpajama/radpajama.txt.zip.o \
|
||||
o/$(MODE)/third_party/radpajama/common.o \
|
||||
o/$(MODE)/third_party/radpajama/llama.o \
|
||||
o/$(MODE)/third_party/radpajama/radpajama.o \
|
||||
$(CRT) \
|
||||
$(APE_NO_MODIFY_SELF)
|
||||
@$(APELINK)
|
||||
|
||||
$(THIRD_PARTY_REDPAJAMA_LLAMA).pkg: \
|
||||
$(THIRD_PARTY_REDPAJAMA_LLAMA_OBJS) \
|
||||
$(foreach x,$(THIRD_PARTY_REDPAJAMA_LLAMA_DIRECTDEPS),$($(x)_A).pkg)
|
||||
|
||||
o/$(MODE)/third_party/radpajama/radpajama.txt.zip.o: private \
|
||||
ZIPOBJ_FLAGS += \
|
||||
-B
|
||||
|
||||
################################################################################
|
||||
|
||||
THIRD_PARTY_REDPAJAMA_COMS = $(THIRD_PARTY_REDPAJAMA_LLAMA)
|
||||
THIRD_PARTY_REDPAJAMA_BINS = $(THIRD_PARTY_REDPAJAMA_COMS) $(THIRD_PARTY_REDPAJAMA_COMS:%=%.dbg)
|
||||
THIRD_PARTY_REDPAJAMA_LIBS = $(foreach x,$(THIRD_PARTY_REDPAJAMA_ARTIFACTS),$($(x)))
|
||||
THIRD_PARTY_REDPAJAMA_SRCS = $(foreach x,$(THIRD_PARTY_REDPAJAMA_ARTIFACTS),$($(x)_SRCS))
|
||||
THIRD_PARTY_REDPAJAMA_HDRS = $(foreach x,$(THIRD_PARTY_REDPAJAMA_ARTIFACTS),$($(x)_HDRS))
|
||||
THIRD_PARTY_REDPAJAMA_OBJS = $(foreach x,$(THIRD_PARTY_REDPAJAMA_ARTIFACTS),$($(x)_OBJS))
|
||||
THIRD_PARTY_REDPAJAMA_CHECKS = $(foreach x,$(THIRD_PARTY_REDPAJAMA_ARTIFACTS),$($(x)_CHECKS))
|
||||
$(THIRD_PARTY_REDPAJAMA_OBJS): third_party/radpajama/radpajama.mk
|
||||
|
||||
.PHONY: o/$(MODE)/third_party/radpajama
|
||||
o/$(MODE)/third_party/radpajama: \
|
||||
$(THIRD_PARTY_REDPAJAMA_BINS) \
|
||||
$(THIRD_PARTY_REDPAJAMA_CHECKS)
|
144
third_party/radpajama/scripts/convert_gptneox_to_ggml.py
vendored
Normal file
144
third_party/radpajama/scripts/convert_gptneox_to_ggml.py
vendored
Normal file
|
@ -0,0 +1,144 @@
|
|||
# Convert Hugging Face fine-tuned gpt-neox-like models to ggml format
|
||||
|
||||
import io
|
||||
import os
|
||||
import sys
|
||||
import struct
|
||||
import json
|
||||
import code
|
||||
import torch
|
||||
import numpy as np
|
||||
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
|
||||
# ref: https://github.com/openai/gpt-2/blob/master/src/encoder.py
|
||||
def bytes_to_unicode():
|
||||
"""
|
||||
Returns list of utf-8 byte and a corresponding list of unicode strings.
|
||||
The reversible bpe codes work on unicode strings.
|
||||
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
|
||||
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
|
||||
This is a significant percentage of your normal, say, 32K bpe vocab.
|
||||
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
|
||||
And avoids mapping to whitespace/control characters the bpe code barfs on.
|
||||
"""
|
||||
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
|
||||
cs = bs[:]
|
||||
n = 0
|
||||
for b in range(2**8):
|
||||
if b not in bs:
|
||||
bs.append(b)
|
||||
cs.append(2**8+n)
|
||||
n += 1
|
||||
cs = [chr(n) for n in cs]
|
||||
return dict(zip(bs, cs))
|
||||
|
||||
if len(sys.argv) < 3:
|
||||
print("Usage: python convert-hf-to-ggml.py model_name dir-output [use-f32]")
|
||||
print(" model_name: name of the model to convert. Example: 'bigscience/bloomz-560m'")
|
||||
print(" dir-output: directory where the output file will be written")
|
||||
print(" use-f32: if present, use float32 instead of float16")
|
||||
sys.exit(1)
|
||||
|
||||
model_name = sys.argv[1]
|
||||
dir_out = sys.argv[2]
|
||||
model_cache_dir = dir_out + "-cache"
|
||||
|
||||
# make sure the output directory exists
|
||||
os.makedirs(dir_out, exist_ok=True)
|
||||
|
||||
# possible data types
|
||||
# ftype == 0 -> float32
|
||||
# ftype == 1 -> float16
|
||||
#
|
||||
# map from ftype to string
|
||||
ftype_str = ["f32", "f16"]
|
||||
ftype = 1
|
||||
if len(sys.argv) > 3:
|
||||
ftype = 0
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
||||
print("Loading model: ", model_name)
|
||||
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16 if ftype == 1 else torch.float32,
|
||||
cache_dir=model_cache_dir)
|
||||
model.eval()
|
||||
for p in model.parameters():
|
||||
p.requires_grad = False
|
||||
hparams = model.config.to_dict()
|
||||
print("Model loaded: ", model_name)
|
||||
|
||||
fn_bin = f"/ggml-{model_name.split('/')[-1]}-{ftype_str[ftype]}.bin"
|
||||
fn_out = dir_out + fn_bin
|
||||
fout = open(fn_out, "wb")
|
||||
|
||||
ggml_file_magic = 0x67676d66 # 0x67676d6c is unversioned
|
||||
ggml_file_version = 0x00000001 # v1
|
||||
|
||||
hparams["multiple_of"] = 1
|
||||
fout.write(struct.pack("i", ggml_file_magic)) # magic: ggmf in hex
|
||||
fout.write(struct.pack("i", ggml_file_version))
|
||||
fout.write(struct.pack("i", hparams["vocab_size"]))
|
||||
fout.write(struct.pack("i", hparams["max_position_embeddings"]))
|
||||
fout.write(struct.pack("i", hparams["hidden_size"]))
|
||||
fout.write(struct.pack("i", hparams["num_attention_heads"]))
|
||||
fout.write(struct.pack("i", hparams["num_hidden_layers"]))
|
||||
fout.write(struct.pack("i", int((hparams["hidden_size"] / hparams["num_attention_heads"]
|
||||
) * hparams["rotary_pct"]))) # rotary_dim
|
||||
fout.write(struct.pack("i", int(hparams["use_parallel_residual"])))
|
||||
fout.write(struct.pack("i", ftype))
|
||||
|
||||
# Is this correct??
|
||||
dot_token = tokenizer.encode(".")[0]
|
||||
for i in range(hparams["vocab_size"]):
|
||||
text = tokenizer.decode([i]).encode('utf-8')
|
||||
fout.write(struct.pack("i", len(text)))
|
||||
fout.write(text)
|
||||
|
||||
list_vars = model.state_dict()
|
||||
|
||||
print(hparams)
|
||||
|
||||
for name in list_vars.keys():
|
||||
if name.startswith('gpt_neox.layers.'):
|
||||
if 'attention.masked_bias' in name or \
|
||||
'attention.rotary_emb.inv_freq' in name or \
|
||||
'attention.bias' in name:
|
||||
continue
|
||||
# No gradients for these
|
||||
list_vars[name].requires_grad = False
|
||||
src = name
|
||||
nn = name
|
||||
|
||||
print(src, ' -> ', name)
|
||||
data = list_vars[src].squeeze().numpy()
|
||||
data = data.astype(np.float32)
|
||||
|
||||
n_dims = len(data.shape)
|
||||
print(name, n_dims, data.shape)
|
||||
|
||||
# default type is fp32
|
||||
ftype_cur = 0
|
||||
if ftype == 1 and n_dims > 1:
|
||||
print(" Converting to float16", data.shape, data[:3, :3].tolist())
|
||||
data = data.astype(np.float16)
|
||||
ftype_cur = 1
|
||||
else:
|
||||
print(" Converting to float32", data.shape,
|
||||
data[:3, :3].tolist() if n_dims > 1 else data[:3].tolist())
|
||||
data = data.astype(np.float32)
|
||||
|
||||
# header
|
||||
str = name.encode('utf-8')
|
||||
fout.write(struct.pack("iii", n_dims, len(str), ftype_cur))
|
||||
for i in range(n_dims):
|
||||
fout.write(struct.pack("i", data.shape[n_dims - 1 - i]))
|
||||
print(str)
|
||||
fout.write(str)
|
||||
|
||||
# data
|
||||
data.tofile(fout)
|
||||
|
||||
fout.close()
|
||||
|
||||
print("Done. Output file: " + fn_out)
|
||||
print("")
|
21
third_party/radpajama/scripts/install-RedPajama-INCITE-Base-3B-v1.sh
vendored
Normal file
21
third_party/radpajama/scripts/install-RedPajama-INCITE-Base-3B-v1.sh
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
#!/bin/bash
|
||||
|
||||
# cd to scripts dir
|
||||
cd `dirname $0`
|
||||
|
||||
# download model to models dir
|
||||
echo "Downloading model"
|
||||
python ./convert_gptneox_to_ggml.py togethercomputer/RedPajama-INCITE-Base-3B-v1 ../models/pythia
|
||||
|
||||
# remove temp cache dir
|
||||
echo "Removing temp cache dir"
|
||||
rm -r ../models/pythia-cache
|
||||
|
||||
# quantize model
|
||||
echo "Quantizing model (q4_0)"
|
||||
cd ../../..
|
||||
python ./examples/redpajama/scripts/quantize-gptneox.py ./examples/redpajama/models/pythia/ggml-RedPajama-INCITE-Base-3B-v1-f16.bin
|
||||
|
||||
|
||||
# done!
|
||||
echo "Done."
|
21
third_party/radpajama/scripts/install-RedPajama-INCITE-Chat-3B-v1.sh
vendored
Normal file
21
third_party/radpajama/scripts/install-RedPajama-INCITE-Chat-3B-v1.sh
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
#!/bin/bash
|
||||
|
||||
# cd to scripts dir
|
||||
cd `dirname $0`
|
||||
|
||||
# download model to models dir
|
||||
echo "Downloading model"
|
||||
python ./convert_gptneox_to_ggml.py togethercomputer/RedPajama-INCITE-Chat-3B-v1 ../models/pythia
|
||||
|
||||
# remove temp cache dir
|
||||
echo "Removing temp cache dir"
|
||||
rm -r ../models/pythia-cache
|
||||
|
||||
# quantize model
|
||||
echo "Quantizing model (q4_0)"
|
||||
cd ../../..
|
||||
python ./examples/redpajama/scripts/quantize-gptneox.py ./examples/redpajama/models/pythia/ggml-RedPajama-INCITE-Chat-3B-v1-f16.bin
|
||||
|
||||
|
||||
# done!
|
||||
echo "Done."
|
21
third_party/radpajama/scripts/install-RedPajama-INCITE-Instruct-3B-v1.sh
vendored
Normal file
21
third_party/radpajama/scripts/install-RedPajama-INCITE-Instruct-3B-v1.sh
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
#!/bin/bash
|
||||
|
||||
# cd to scripts dir
|
||||
cd `dirname $0`
|
||||
|
||||
# download model to models dir
|
||||
echo "Downloading model"
|
||||
python ./convert_gptneox_to_ggml.py togethercomputer/RedPajama-INCITE-Instruct-3B-v1 ../models/pythia
|
||||
|
||||
# remove temp cache dir
|
||||
echo "Removing temp cache dir"
|
||||
rm -r ../models/pythia-cache
|
||||
|
||||
# quantize model
|
||||
echo "Quantizing model (q4_0)"
|
||||
cd ../../..
|
||||
python ./examples/redpajama/scripts/quantize-gptneox.py ./examples/redpajama/models/pythia/ggml-RedPajama-INCITE-Instruct-3B-v1-f16.bin
|
||||
|
||||
|
||||
# done!
|
||||
echo "Done."
|
141
third_party/radpajama/scripts/quantize-gptneox.py
vendored
Normal file
141
third_party/radpajama/scripts/quantize-gptneox.py
vendored
Normal file
|
@ -0,0 +1,141 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
"""Script to execute the "quantize" script on a given set of models."""
|
||||
|
||||
import subprocess
|
||||
import argparse
|
||||
import glob
|
||||
import sys
|
||||
import os
|
||||
|
||||
|
||||
def main():
|
||||
"""Update the quantize binary name depending on the platform and parse
|
||||
the command line arguments and execute the script.
|
||||
"""
|
||||
|
||||
if "linux" in sys.platform or "darwin" in sys.platform:
|
||||
quantize_script_binary = "quantize-gptneox"
|
||||
|
||||
elif "win32" in sys.platform or "cygwin" in sys.platform:
|
||||
quantize_script_binary = "quantize-gptneox.exe"
|
||||
|
||||
else:
|
||||
print("WARNING: Unknown platform. Assuming a UNIX-like OS.\n")
|
||||
quantize_script_binary = "quantize-gptneox"
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
prog='python3 quantize-gptneox.py',
|
||||
description='This script quantizes the given models by applying the '
|
||||
f'"{quantize_script_binary}" script on them.'
|
||||
)
|
||||
parser.add_argument('model_path')
|
||||
#parser.add_argument(
|
||||
# 'models', nargs='+', choices=('7B', '13B', '30B', '65B'),
|
||||
# help='The models to quantize.'
|
||||
#)
|
||||
parser.add_argument(
|
||||
'-r', '--remove-16', action='store_true', dest='remove_f16',
|
||||
help='Remove the f16 model after quantizing it.'
|
||||
)
|
||||
#parser.add_argument(
|
||||
# '-m', '--models-path', dest='models_path',
|
||||
# default=os.path.join(os.getcwd(), "models"),
|
||||
# help='Specify the directory where the models are located.'
|
||||
#)
|
||||
parser.add_argument(
|
||||
'-q', '--quantize-script-path', dest='quantize_script_path',
|
||||
default=os.path.join(os.getcwd(), quantize_script_binary),
|
||||
help='Specify the path to the "quantize" script.'
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
'--quantize-output-type', dest='quantize_output_type', type=str,
|
||||
default='q4_0',
|
||||
help='Specify the path to the "quantize" script.'
|
||||
)
|
||||
|
||||
|
||||
# TODO: Revise this code
|
||||
# parser.add_argument(
|
||||
# '-t', '--threads', dest='threads', type='int',
|
||||
# default=os.cpu_count(),
|
||||
# help='Specify the number of threads to use to quantize many models at '
|
||||
# 'once. Defaults to os.cpu_count().'
|
||||
# )
|
||||
|
||||
args = parser.parse_args()
|
||||
args.model_path = os.path.abspath(args.model_path)
|
||||
#args.models_path = os.path.abspath(args.models_path)
|
||||
|
||||
if not os.path.isfile(args.quantize_script_path):
|
||||
print(
|
||||
f'The "{quantize_script_binary}" script was not found in the '
|
||||
"current location.\nIf you want to use it from another location, "
|
||||
"set the --quantize-script-path argument from the command line."
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
#for model in args.models:
|
||||
# The model is separated in various parts
|
||||
# (ggml-model-f16.bin, ggml-model-f16.bin.0, ggml-model-f16.bin.1...)
|
||||
#f16_model_path_base = os.path.join(
|
||||
# args.models_path, model, "ggml-model-f16.bin"
|
||||
#)
|
||||
f16_model_path_base = args.model_path
|
||||
|
||||
if not os.path.isfile(f16_model_path_base):
|
||||
print(f'The file %s was not found' % f16_model_path_base)
|
||||
sys.exit(1)
|
||||
|
||||
f16_model_parts_paths = map(
|
||||
lambda filename: os.path.join(f16_model_path_base, filename),
|
||||
glob.glob(f"{f16_model_path_base}*")
|
||||
)
|
||||
|
||||
for f16_model_part_path in f16_model_parts_paths:
|
||||
if not os.path.isfile(f16_model_part_path):
|
||||
print(
|
||||
f"The f16 model {os.path.basename(f16_model_part_path)} "
|
||||
f"was not found in {args.models_path}{os.path.sep}"
|
||||
". If you want to use it from another location, set the "
|
||||
"--models-path argument from the command line."
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
__run_quantize_script(
|
||||
args.quantize_script_path, f16_model_part_path, args.quantize_output_type
|
||||
)
|
||||
|
||||
if args.remove_f16:
|
||||
os.remove(f16_model_part_path)
|
||||
|
||||
|
||||
# This was extracted to a top-level function for parallelization, if
|
||||
# implemented. See https://github.com/ggerganov/llama.cpp/pull/222/commits/f8db3d6cd91bf1a1342db9d29e3092bc12dd783c#r1140496406
|
||||
|
||||
def __run_quantize_script(script_path, f16_model_part_path, quantize_output_type):
|
||||
"""Run the quantize script specifying the path to it and the path to the
|
||||
f16 model to quantize.
|
||||
"""
|
||||
|
||||
new_quantized_model_path = f16_model_part_path.replace("f16", quantize_output_type)
|
||||
subprocess.run(
|
||||
[script_path, f16_model_part_path, new_quantized_model_path, quantize_output_type],
|
||||
check=True
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
main()
|
||||
|
||||
except subprocess.CalledProcessError:
|
||||
print("\nAn error ocurred while trying to quantize the models.")
|
||||
sys.exit(1)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
sys.exit(0)
|
||||
|
||||
else:
|
||||
print("\nSuccesfully quantized all models.")
|
1
third_party/third_party.mk
vendored
1
third_party/third_party.mk
vendored
|
@ -14,6 +14,7 @@ o/$(MODE)/third_party: \
|
|||
o/$(MODE)/third_party/gdtoa \
|
||||
o/$(MODE)/third_party/getopt \
|
||||
o/$(MODE)/third_party/ggml \
|
||||
# o/$(MODE)/third_party/radpajama \
|
||||
o/$(MODE)/third_party/hiredis \
|
||||
o/$(MODE)/third_party/libcxx \
|
||||
o/$(MODE)/third_party/linenoise \
|
||||
|
|
Loading…
Reference in a new issue