llama : cleanup unused mmq flags (#5772)

* cleanup unused --no-mul-mat-q,-nommq, -mmq, --mul-mat-q, mul_mat_q

* remove: mul_mat_q in compare llama bench and usage

* update llama-bench

---------

Co-authored-by: slaren <slarengh@gmail.com>
This commit is contained in:
Pierrick Hymbert 2024-03-01 12:39:06 +01:00 committed by GitHub
parent 9600d59e01
commit 3ab8b3a92e
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
9 changed files with 10 additions and 56 deletions

View file

@ -31,7 +31,7 @@ PRETTY_NAMES = {
"model_size": "Model Size [GiB]", "model_n_params": "Num. of Parameters",
"n_batch": "Batch size", "n_threads": "Threads", "type_k": "K type", "type_v": "V type",
"n_gpu_layers": "GPU layers", "main_gpu": "Main GPU", "no_kv_offload": "NKVO",
"mul_mat_q": "MMQ", "tensor_split": "Tensor split"
"tensor_split": "Tensor split"
}
DEFAULT_SHOW = ["model_type"] # Always show these properties by default.