sampling : refactor init to use llama_sampling_params (#3696)

* sampling : refactor init to use llama_sampling_params

* llama : combine repetition, frequency and presence penalties in 1 call

* examples : remove embd-input and gptneox-wip

* sampling : rename penalty params + reduce size of "prev" vector

* sampling : add llama_sampling_print helper

* sampling : hide prev behind API and apply #3661

ggml-ci
This commit is contained in:
Georgi Gerganov 2023-10-20 21:07:23 +03:00 committed by GitHub
parent 8cf19d60dc
commit d1031cf49c
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
30 changed files with 365 additions and 4502 deletions

View file

@ -112,16 +112,16 @@ int main(int argc, char ** argv) {
bool has_eos = false;
// target model sampling context
struct llama_sampling_context * ctx_sampling = llama_sampling_init(params);
struct llama_sampling_context * ctx_sampling = llama_sampling_init(params.sparams);
// draft sequence data
std::vector<seq_draft> drafts(n_seq_dft);
params.grammar.clear(); // the draft samplers will copy the target sampler's grammar
params.sampling_params.temp = std::max(0.01f, params.sampling_params.temp);
params.sparams.grammar.clear(); // the draft samplers will copy the target sampler's grammar
params.sparams.temp = std::max(0.01f, params.sparams.temp);
for (int s = 0; s < n_seq_dft; ++s) {
drafts[s].ctx_sampling = llama_sampling_init(params);
drafts[s].ctx_sampling = llama_sampling_init(params.sparams);
}
llama_batch batch_dft = llama_batch_init(params.n_ctx, 0, 1);
@ -154,7 +154,7 @@ int main(int argc, char ** argv) {
// sample from the target model
llama_token id = llama_sampling_sample(ctx_sampling, ctx_tgt, NULL, drafts[s_keep].i_batch_tgt[i_dft]);
llama_sampling_accept(ctx_sampling, ctx_tgt, id);
llama_sampling_accept(ctx_sampling, ctx_tgt, id, true);
//LOG("last: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx_tgt, ctx_sampling->prev).c_str());
@ -328,7 +328,7 @@ int main(int argc, char ** argv) {
const int s = sa[is];
llama_sampling_accept(drafts[s].ctx_sampling, ctx_dft, id);
llama_sampling_accept(drafts[s].ctx_sampling, ctx_dft, id, true);
drafts[s].tokens.push_back(id);