Fix memory leak in src/llama.cpp
Free `batch` before returning from `read_kv_cache_meta`.
This commit is contained in:
parent
6afd1a99dc
commit
5292fdb41e
1 changed files with 1 additions and 0 deletions
|
@ -17656,6 +17656,7 @@ struct llama_data_read {
|
|||
read_to(&n_seq_id, sizeof(n_seq_id));
|
||||
|
||||
if (n_seq_id != 0) {
|
||||
llama_batch_free(batch);
|
||||
LLAMA_LOG_ERROR("%s: invalid seq_id-agnostic kv cell\n", __func__);
|
||||
return false;
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue