llama : remove memory_f16 and kv_f16 flags
This commit is contained in:
parent
4adb1d69d9
commit
af99c6fbfc
6 changed files with 0 additions and 16 deletions
|
@ -98,7 +98,6 @@ struct gpt_params {
|
|||
size_t hellaswag_tasks = 400; // number of tasks to use when computing the HellaSwag score
|
||||
|
||||
bool mul_mat_q = true; // if true, use mul_mat_q kernels instead of cuBLAS
|
||||
bool memory_f16 = true; // use f16 instead of f32 for memory kv
|
||||
bool random_prompt = false; // do not randomize prompt if none provided
|
||||
bool use_color = false; // use color to distinguish generations and inputs
|
||||
bool interactive = false; // interactive mode
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue