From 17f463a083d3803a473fda82f8b339e96e2f698d Mon Sep 17 00:00:00 2001 From: Christian Falch <875252+chrfalch@users.noreply.github.com> Date: Sat, 1 Apr 2023 18:46:37 +0200 Subject: [PATCH] Update llama.h Added review comments Co-authored-by: Pavol Rusnak --- llama.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama.h b/llama.h index 5a6260d57..da8f7f600 100644 --- a/llama.h +++ b/llama.h @@ -85,7 +85,7 @@ extern "C" { // Returns the KV cache that will contain the context for the // ongoing prediction with the model. - LLAMA_API uint8_t* llama_get_kv_cache(struct llama_context * ctx); + LLAMA_API const uint8_t * llama_get_kv_cache(struct llama_context * ctx); // Returns the size of the KV cache LLAMA_API size_t llama_get_kv_cache_size(struct llama_context * ctx);