Merge pull request #8 from WangHaoranRobin/robin_fork_master

server: fix llama_sample_top_k order
This commit is contained in:
WangHaoranRobin 2023-06-26 18:11:48 -07:00 committed by GitHub
commit 58828c209a
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23

View file

@ -378,10 +378,10 @@ struct llama_server_context {
} else { } else {
// Temperature sampling // Temperature sampling
size_t min_keep = std::max(1, n_probs); size_t min_keep = std::max(1, n_probs);
llama_sample_top_k(ctx, &candidates_p, top_k, min_keep);
llama_sample_tail_free(ctx, &candidates_p, tfs_z, min_keep); llama_sample_tail_free(ctx, &candidates_p, tfs_z, min_keep);
llama_sample_typical(ctx, &candidates_p, typical_p, min_keep); llama_sample_typical(ctx, &candidates_p, typical_p, min_keep);
llama_sample_top_p(ctx, &candidates_p, top_p, min_keep); llama_sample_top_p(ctx, &candidates_p, top_p, min_keep);
llama_sample_top_k(ctx, &candidates_p, top_k, min_keep);
llama_sample_temperature(ctx, &candidates_p, temp); llama_sample_temperature(ctx, &candidates_p, temp);
result.tok = llama_sample_token(ctx, &candidates_p); result.tok = llama_sample_token(ctx, &candidates_p);
} }