llama : add check for KV cache shifts (#10401)

ggml-ci
This commit is contained in:
Georgi Gerganov 2024-11-19 13:29:26 +02:00 committed by GitHub
parent a88ad007de
commit 8e752a777b
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 14 additions and 1 deletions

View file

@ -18213,7 +18213,7 @@ static void llama_kv_cache_update_internal(struct llama_context & lctx) {
// apply K-shift if needed
if (lctx.model.hparams.rope_type != LLAMA_ROPE_TYPE_NONE && lctx.kv_self.has_shift) {
if (lctx.model.arch == LLM_ARCH_DEEPSEEK2) { // not supported due to MLA
if (!llama_kv_cache_can_shift(&lctx)) {
GGML_ABORT("Deepseek2 does not support K-shift");
}
@ -20462,6 +20462,10 @@ void llama_kv_cache_update(struct llama_context * ctx) {
llama_kv_cache_update_internal(*ctx);
}
bool llama_kv_cache_can_shift(struct llama_context * ctx) {
return ctx->model.arch != LLM_ARCH_DEEPSEEK2; // not supported due to MLA
}
// deprecated
size_t llama_get_state_size(struct llama_context * ctx) {
return llama_state_get_size(ctx);