From f411251bcf155884e6d791a525fc87a7e04518bd Mon Sep 17 00:00:00 2001 From: Christian Falch <875252+chrfalch@users.noreply.github.com> Date: Sat, 1 Apr 2023 18:46:24 +0200 Subject: [PATCH] Update llama.cpp Added review comments Co-authored-by: Pavol Rusnak --- llama.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama.cpp b/llama.cpp index f6f4dda66..cf413d983 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1686,7 +1686,7 @@ int llama_get_kv_cache_token_count(struct llama_context * ctx) { // Sets the KV cache containing the current context for the model void llama_set_kv_cache( struct llama_context * ctx, - uint8_t * kv_cache, + const uint8_t * kv_cache, size_t n_size, int n_token_count) { // Make sure we have the same kv cache setup