llama : update llama_model API names (#11063)

* llama : deprecate llama_free_model, add llama_model_free

ggml-ci

* llama : change `llama_load_model_from_file` -> `llama_model_load_from_file`

ggml-ci
This commit is contained in:
Georgi Gerganov 2025-01-06 10:55:18 +02:00 committed by GitHub
parent 3e6e7a6bc2
commit 47182dd03f
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
23 changed files with 76 additions and 59 deletions

View file

@ -152,7 +152,7 @@ int main(int argc, char **argv) {
mparams.vocab_only = true;
model = llama_load_model_from_file(fname.c_str(), mparams);
model = llama_model_load_from_file(fname.c_str(), mparams);
if (model == NULL) {
fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
@ -165,7 +165,7 @@ int main(int argc, char **argv) {
if (ctx == NULL) {
fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
llama_free_model(model);
llama_model_free(model);
return 1;
}
}
@ -300,7 +300,7 @@ int main(int argc, char **argv) {
fprintf(stderr, "%s : tokens written to '%s'\n", __func__, (fname_text + ".tokcpp").c_str());
}
llama_free_model(model);
llama_model_free(model);
llama_free(ctx);
llama_backend_free();