Fix memory leak in src/llama.cpp

Free `batch` before returning from `read_kv_cache_meta`.
This commit is contained in:
Micah Talkiewicz 2024-08-09 15:40:13 -04:00 committed by GitHub
parent 6afd1a99dc
commit 5292fdb41e
No known key found for this signature in database
GPG key ID: B5690EEEBB952194

View file

@ -17656,6 +17656,7 @@ struct llama_data_read {
read_to(&n_seq_id, sizeof(n_seq_id));
if (n_seq_id != 0) {
llama_batch_free(batch);
LLAMA_LOG_ERROR("%s: invalid seq_id-agnostic kv cell\n", __func__);
return false;
}