Missing model memory release
This commit is contained in:
parent
7e14cb901c
commit
948ce2cf9c
1 changed files with 1 additions and 0 deletions
|
@ -2543,6 +2543,7 @@ struct llama_model * llama_load_model_from_file(
|
|||
if (!llama_model_load(path_model, *model, model->vocab, params.n_ctx, params.n_batch, params.n_gpu_layers,
|
||||
params.main_gpu, params.tensor_split, memory_type, params.use_mmap, params.use_mlock,
|
||||
params.vocab_only, params.progress_callback, params.progress_callback_user_data)) {
|
||||
delete model;
|
||||
fprintf(stderr, "%s: failed to load model\n", __func__);
|
||||
return nullptr;
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue