llama : remove memory_f16 and kv_f16 flags

This commit is contained in:
Georgi Gerganov 2023-12-05 18:18:16 +02:00
parent 4adb1d69d9
commit af99c6fbfc
No known key found for this signature in database
GPG key ID: 449E073F9DC10735
6 changed files with 0 additions and 16 deletions

View file

@ -196,7 +196,6 @@ extern "C" {
// Keep the booleans together to avoid misalignment during copy-by-value.
bool mul_mat_q; // if true, use experimental mul_mat_q kernels (DEPRECATED - always true)
bool f16_kv; // use fp16 for KV cache, fp32 otherwise
bool logits_all; // the llama_eval() call computes all logits, not just the last one
bool embedding; // embedding mode only
bool offload_kqv; // whether to offload the KQV ops (including the KV cache) to GPU