llama : offload KV cache per-layer

This commit is contained in:
Georgi Gerganov 2023-12-03 17:18:15 +02:00
parent c294c78eb7
commit 986b3da76a
No known key found for this signature in database
GPG key ID: 449E073F9DC10735
2 changed files with 86 additions and 151 deletions

View file

@ -196,6 +196,8 @@ extern "C" {
bool f16_kv; // use fp16 for KV cache, fp32 otherwise
bool logits_all; // the llama_eval() call computes all logits, not just the last one
bool embedding; // embedding mode only
bool offload_k;
bool offload_v;
};
// model quantization parameters