From a0c895c087bc2636e6972c13d76a52438f3de99a Mon Sep 17 00:00:00 2001 From: Christian Falch <875252+chrfalch@users.noreply.github.com> Date: Sat, 1 Apr 2023 18:46:14 +0200 Subject: [PATCH] Update llama.cpp Add review comments Co-authored-by: Pavol Rusnak --- llama.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama.cpp b/llama.cpp index 73ad59ca5..f6f4dda66 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1670,7 +1670,7 @@ int llama_model_quantize( // Returns the KV cache that will contain the context for the // ongoing prediction with the model. -uint8_t* llama_get_kv_cache(struct llama_context * ctx) { +const uint8_t * llama_get_kv_cache(struct llama_context * ctx) { return ctx->model.kv_self.buf.data(); }