llama : new sampling algorithms (#1126)

* Sample interface, new samplers.

New samplers:
- locally typical sampling
- tail free sampling
- frequency and presence penalty
- mirostat

Ignore EOS fix: -inf should be used.

* mirostat

* Added --logit-bias and --no-penalize-nl, removed std::span

* Use C++11, clarify llama API documentation, rename Mirostat parameters to --mirostat_lr and --mirostat_ent, add temperature sampling for Mirostat, simplify Mirostat sampling API parameters (removed N and *k)

Use C++11, clarify llama API documentation, rename Mirostat parameters to --mirostat_lr and --mirostat_ent, add temperature sampling for Mirostat, simplify Mirostat sampling API parameters (removed N and *k)

* Save and load example adjust

* Tests

* Windows build fix

* Windows test fix
This commit is contained in:
Ivan Stepanov 2023-04-29 08:34:41 +03:00 committed by GitHub
parent 7fc50c051a
commit dd7eff57d8
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
8 changed files with 812 additions and 160 deletions

View file

@ -8,6 +8,7 @@
#include <vector>
#include <random>
#include <thread>
#include <unordered_map>
//
// CLI argument parsing
@ -17,17 +18,25 @@ struct gpt_params {
int32_t seed = -1; // RNG seed
int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency());
int32_t n_predict = 128; // new tokens to predict
int32_t repeat_last_n = 64; // last n tokens to penalize
int32_t n_parts = -1; // amount of model parts (-1 = determine from model dimensions)
int32_t n_ctx = 512; // context size
int32_t n_batch = 512; // batch size for prompt processing (must be >=32 to use BLAS)
int32_t n_keep = 0; // number of tokens to keep from initial prompt
// sampling parameters
int32_t top_k = 40;
float top_p = 0.95f;
float temp = 0.80f;
float repeat_penalty = 1.10f;
std::unordered_map<llama_token, float> logit_bias; // logit bias for specific tokens
int32_t top_k = 0; // <= 0 to use vocab size
float top_p = 1.0f; // 1.0 = disabled
float tfs_z = 1.0f; // 1.0 = disabled
float typical_p = 1.0f; // 1.0 = disabled
float temp = 1.0f; // 1.0 = disabled
float repeat_penalty = 1.0f; // 1.0 = disabled
int32_t repeat_last_n = -1; // last n tokens to penalize (0 = disable penalty, -1 = context size)
float frequency_penalty = 0.0f; // 0.0 = disabled
float presence_penalty = 0.0f; // 0.0 = disabled
int mirostat = 0; // 0 = disabled, 1 = mirostat, 2 = mirostat 2.0
float mirostat_tau = 5.0f; // target entropy
float mirostat_eta = 0.1f; // learning rate
std::string model = "models/lamma-7B/ggml-model.bin"; // model path
std::string prompt = "";
@ -47,7 +56,7 @@ struct gpt_params {
bool interactive_first = false; // wait for user input immediately
bool instruct = false; // instruction mode (used for Alpaca models)
bool ignore_eos = false; // do not stop generating after eos
bool penalize_nl = true; // consider newlines as a repeatable token
bool perplexity = false; // compute perplexity over the prompt
bool use_mmap = true; // use mmap for faster loads
bool use_mlock = false; // use mlock to keep model in memory