Deprecate public API function llama_apply_lora_from_file

This commit is contained in:
Didzis Gosko 2023-06-21 00:08:49 +03:00
parent 69f776282b
commit d7714a8f80
2 changed files with 4 additions and 3 deletions

View file

@ -566,7 +566,7 @@ std::tuple<struct llama_model *, struct llama_context *> llama_init_from_gpt_par
}
if (!params.lora_adapter.empty()) {
int err = llama_apply_lora_from_file(lctx,
int err = llama_model_apply_lora_from_file(model,
params.lora_adapter.c_str(),
params.lora_base.empty() ? NULL : params.lora_base.c_str(),
params.n_threads);

View file

@ -179,11 +179,12 @@ extern "C" {
// The model needs to be reloaded before applying a new adapter, otherwise the adapter
// will be applied on top of the previous one
// Returns 0 on success
LLAMA_API int llama_apply_lora_from_file(
LLAMA_API DEPRECATED(int llama_apply_lora_from_file(
struct llama_context * ctx,
const char * path_lora,
const char * path_base_model,
int n_threads);
int n_threads),
"please use llama_model_apply_lora_from_file instead");
LLAMA_API int llama_model_apply_lora_from_file(
const struct llama_model * model,