common : fix mirostat state when using multiple sequences (#3543)

* Fix mirostat state when using multiple sequences

* Fix mirostat by completely refactoring sampling!

* Try to fix zig build.

* Export function to fetch/create default sampler states

Code formatting cleanups and add some comments

Silence a warning about id not being used when logging is disabled

* Apply some renaming suggestions.

Fix comments that were out of sync with the pull.

* Use more consistant naming convention for sampling contexts
This commit is contained in:
Kerfuffle 2023-10-11 13:35:46 -06:00 committed by GitHub
parent 8c70a5ff25
commit 70c29da118
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
14 changed files with 495 additions and 334 deletions

View file

@ -8,9 +8,10 @@
int main(int argc, char ** argv) {
gpt_params params;
llama_sampling_params & sparams = params.sampling_params;
params.seed = 42;
params.n_threads = 4;
params.repeat_last_n = 64;
sparams.repeat_last_n = 64;
params.prompt = "The quick brown fox";
if (!gpt_params_parse(argc, argv, params)) {
@ -24,7 +25,7 @@ int main(int argc, char ** argv) {
}
auto n_past = 0;
auto last_n_tokens_data = std::vector<llama_token>(params.repeat_last_n, 0);
auto last_n_tokens_data = std::vector<llama_token>(sparams.repeat_last_n, 0);
// init
llama_model * model;