server: fix reported top tokens for temperature 0 (#7203)
This commit is contained in:
parent
b83cc3f5b3
commit
5ae3426b0b
3 changed files with 7 additions and 7 deletions
|
@ -81,7 +81,7 @@ struct llama_sampling_context {
|
|||
// TODO: replace with ring-buffer
|
||||
std::vector<llama_token> prev;
|
||||
std::vector<llama_token_data> cur;
|
||||
size_t n_considered;
|
||||
size_t n_valid; // Number of correct top tokens with correct probabilities.
|
||||
|
||||
std::mt19937 rng;
|
||||
};
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue