llama : remove Tail-Free sampling (#10071)

ggml-ci
This commit is contained in:
Georgi Gerganov 2024-10-29 10:42:05 +02:00 committed by GitHub
parent 61715d5cc8
commit 8d8ff71536
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
16 changed files with 15 additions and 172 deletions

View file

@ -2090,7 +2090,6 @@ void yaml_dump_non_result_info(FILE * stream, const common_params & params, cons
const std::vector<float> tensor_split_vector(params.tensor_split, params.tensor_split + llama_max_devices());
yaml_dump_vector_float(stream, "tensor_split", tensor_split_vector);
fprintf(stream, "tfs: %f # default: 1.0\n", sparams.tfs_z);
fprintf(stream, "threads: %d # default: %u\n", params.cpuparams.n_threads, std::thread::hardware_concurrency());
fprintf(stream, "top_k: %d # default: 40\n", sparams.top_k);
fprintf(stream, "top_p: %f # default: 0.95\n", sparams.top_p);