llama : fix llama_model_loader memory leak
This commit is contained in:
parent
dd9e2fc988
commit
81a2c2a6f4
1 changed files with 9 additions and 0 deletions
|
@ -1083,6 +1083,15 @@ struct llama_model_loader {
|
|||
this->use_mmap = use_mmap;
|
||||
}
|
||||
|
||||
~llama_model_loader() {
|
||||
if (ctx_gguf) {
|
||||
gguf_free(ctx_gguf);
|
||||
}
|
||||
if (ctx_meta) {
|
||||
ggml_free(ctx_meta);
|
||||
}
|
||||
}
|
||||
|
||||
const char * get_tensor_name(int i) const {
|
||||
return gguf_get_tensor_name(ctx_gguf, i);
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue