Update llama.cpp
Added review comments Co-authored-by: Pavol Rusnak <pavol@rusnak.io>
This commit is contained in:
parent
a0c895c087
commit
f411251bcf
1 changed files with 1 additions and 1 deletions
|
@ -1686,7 +1686,7 @@ int llama_get_kv_cache_token_count(struct llama_context * ctx) {
|
||||||
// Sets the KV cache containing the current context for the model
|
// Sets the KV cache containing the current context for the model
|
||||||
void llama_set_kv_cache(
|
void llama_set_kv_cache(
|
||||||
struct llama_context * ctx,
|
struct llama_context * ctx,
|
||||||
uint8_t * kv_cache,
|
const uint8_t * kv_cache,
|
||||||
size_t n_size,
|
size_t n_size,
|
||||||
int n_token_count) {
|
int n_token_count) {
|
||||||
// Make sure we have the same kv cache setup
|
// Make sure we have the same kv cache setup
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue