common : add command-line arg to disable KV cache offloading

This commit is contained in:
Georgi Gerganov 2023-12-03 20:31:01 +02:00
parent c80b8a2bff
commit e262947d43
No known key found for this signature in database
GPG key ID: 449E073F9DC10735
4 changed files with 65 additions and 48 deletions

11
llama.h
View file

@ -192,12 +192,11 @@ extern "C" {
uint32_t yarn_orig_ctx; // YaRN original context size
// Keep the booleans together to avoid misalignment during copy-by-value.
bool mul_mat_q; // if true, use experimental mul_mat_q kernels (DEPRECATED - always true)
bool f16_kv; // use fp16 for KV cache, fp32 otherwise
bool logits_all; // the llama_eval() call computes all logits, not just the last one
bool embedding; // embedding mode only
bool offload_k;
bool offload_v;
bool mul_mat_q; // if true, use experimental mul_mat_q kernels (DEPRECATED - always true)
bool f16_kv; // use fp16 for KV cache, fp32 otherwise
bool logits_all; // the llama_eval() call computes all logits, not just the last one
bool embedding; // embedding mode only
bool offload_kqv; // whether to offload the KQV ops (including the KV cache) to GPU
};
// model quantization parameters