llama : add option to override tensor buffers
This commit is contained in:
parent
9fbadaef4f
commit
f07c2ec505
9 changed files with 87 additions and 8 deletions
|
@ -256,6 +256,7 @@ struct common_params {
|
|||
std::vector<std::string> in_files; // all input files
|
||||
std::vector<std::string> antiprompt; // strings upon which more user input is prompted (a.k.a. reverse prompts)
|
||||
std::vector<llama_model_kv_override> kv_overrides;
|
||||
std::vector<llama_model_tensor_buft_override> tensor_buft_overrides;
|
||||
|
||||
bool lora_init_without_apply = false; // only load lora to memory, but do not apply it to ctx (user can manually apply lora later using llama_adapter_lora_apply)
|
||||
std::vector<common_adapter_lora_info> lora_adapters; // lora adapter path with user defined scale
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue