Update llama.cpp

Add review comments

Co-authored-by: Pavol Rusnak <pavol@rusnak.io>
This commit is contained in:
Christian Falch 2023-04-01 18:46:14 +02:00 committed by GitHub
parent 14804b7978
commit a0c895c087
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23

View file

@ -1670,7 +1670,7 @@ int llama_model_quantize(
// Returns the KV cache that will contain the context for the
// ongoing prediction with the model.
uint8_t* llama_get_kv_cache(struct llama_context * ctx) {
const uint8_t * llama_get_kv_cache(struct llama_context * ctx) {
return ctx->model.kv_self.buf.data();
}