Starting point
This commit is contained in:
parent
1f5cd83275
commit
23987729aa
1 changed files with 4 additions and 1 deletions
|
@ -187,13 +187,16 @@ llama_token llama_sampling_sample(
|
|||
} else {
|
||||
// temperature sampling
|
||||
size_t min_keep = std::max(1, params.n_probs);
|
||||
float output_temp = 1.0; // WIP, this should be defined separately as its own parameter.
|
||||
|
||||
llama_sample_temp (ctx_main, &cur_p, temp); // "Initial temp", this is how it's traditionally implemented
|
||||
llama_sample_top_k (ctx_main, &cur_p, top_k, min_keep);
|
||||
llama_sample_tail_free(ctx_main, &cur_p, tfs_z, min_keep);
|
||||
llama_sample_typical (ctx_main, &cur_p, typical_p, min_keep);
|
||||
llama_sample_top_p (ctx_main, &cur_p, top_p, min_keep);
|
||||
llama_sample_min_p (ctx_main, &cur_p, min_p, min_keep);
|
||||
llama_sample_temp (ctx_main, &cur_p, temp);
|
||||
llama_sample_temp (ctx_main, &cur_p, temp_last); // "Output temp", this is how llama.cpp chose to implement it
|
||||
|
||||
|
||||
id = llama_sample_token(ctx_main, &cur_p);
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue