llama : remove memory_f16 and kv_f16 flags

This commit is contained in:
Georgi Gerganov 2023-12-05 18:18:16 +02:00
parent 4adb1d69d9
commit af99c6fbfc
No known key found for this signature in database
GPG key ID: 449E073F9DC10735
6 changed files with 0 additions and 16 deletions

View file

@ -98,7 +98,6 @@ struct gpt_params {
size_t hellaswag_tasks = 400; // number of tasks to use when computing the HellaSwag score
bool mul_mat_q = true; // if true, use mul_mat_q kernels instead of cuBLAS
bool memory_f16 = true; // use f16 instead of f32 for memory kv
bool random_prompt = false; // do not randomize prompt if none provided
bool use_color = false; // use color to distinguish generations and inputs
bool interactive = false; // interactive mode