llama : avoid hardcoded special tokens

This commit is contained in:
Georgi Gerganov 2023-08-18 17:29:20 +03:00
parent 035d511457
commit 5d2656d670
No known key found for this signature in database
GPG key ID: 449E073F9DC10735
11 changed files with 61 additions and 65 deletions

View file

@ -427,7 +427,7 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
}
params.hellaswag_tasks = std::stoi(argv[i]);
} else if (arg == "--ignore-eos") {
params.logit_bias[llama_token_eos()] = -INFINITY;
params.ignore_eos = true;
} else if (arg == "--no-penalize-nl") {
params.penalize_nl = false;
} else if (arg == "-l" || arg == "--logit-bias") {
@ -662,7 +662,7 @@ struct llama_context_params llama_context_params_from_gpt_params(const gpt_param
return lparams;
}
std::tuple<struct llama_model *, struct llama_context *> llama_init_from_gpt_params(const gpt_params & params) {
std::tuple<struct llama_model *, struct llama_context *> llama_init_from_gpt_params(gpt_params & params) {
auto lparams = llama_context_params_from_gpt_params(params);
llama_model * model = llama_load_model_from_file(params.model.c_str(), lparams);
@ -691,6 +691,10 @@ std::tuple<struct llama_model *, struct llama_context *> llama_init_from_gpt_par
}
}
if (params.ignore_eos) {
params.logit_bias[llama_token_eos(lctx)] = -INFINITY;
}
return std::make_tuple(model, lctx);
}

View file

@ -32,7 +32,6 @@ struct gpt_params {
float rope_freq_scale = 1.0f; // RoPE frequency scaling factor
// sampling parameters
std::unordered_map<llama_token, float> logit_bias; // logit bias for specific tokens
int32_t top_k = 40; // <= 0 to use vocab size
float top_p = 0.95f; // 1.0 = disabled
float tfs_z = 1.00f; // 1.0 = disabled
@ -46,6 +45,8 @@ struct gpt_params {
float mirostat_tau = 5.00f; // target entropy
float mirostat_eta = 0.10f; // learning rate
std::unordered_map<llama_token, float> logit_bias; // logit bias for specific tokens
// Classifier-Free Guidance
// https://arxiv.org/abs/2306.17806
std::string cfg_negative_prompt; // string to help guidance
@ -81,6 +82,7 @@ struct gpt_params {
bool simple_io = false; // improves compatibility with subprocesses and limited consoles
bool input_prefix_bos = false; // prefix BOS to user inputs, preceding input_prefix
bool ignore_eos = false; // ignore generated EOS tokens
bool instruct = false; // instruction mode (used for Alpaca models)
bool penalize_nl = true; // consider newlines as a repeatable token
bool perplexity = false; // compute perplexity over the prompt
@ -102,7 +104,7 @@ std::string gpt_random_prompt(std::mt19937 & rng);
// Model utils
//
std::tuple<struct llama_model *, struct llama_context *> llama_init_from_gpt_params(const gpt_params & params);
std::tuple<struct llama_model *, struct llama_context *> llama_init_from_gpt_params(gpt_params & params);
struct llama_context_params llama_context_params_from_gpt_params(const gpt_params & params);
//