llama : rearrange model params
This commit is contained in:
parent
a5b7d7277e
commit
14e0ba1daa
2 changed files with 5 additions and 2 deletions
|
@ -8542,10 +8542,10 @@ struct llama_model_params llama_model_default_params() {
|
|||
/*.tensor_split =*/ nullptr,
|
||||
/*.progress_callback =*/ nullptr,
|
||||
/*.progress_callback_user_data =*/ nullptr,
|
||||
/*.kv_overrides =*/ nullptr,
|
||||
/*.vocab_only =*/ false,
|
||||
/*.use_mmap =*/ true,
|
||||
/*.use_mlock =*/ false,
|
||||
/*.kv_overrides =*/ nullptr,
|
||||
};
|
||||
|
||||
#ifdef GGML_USE_METAL
|
||||
|
|
5
llama.h
5
llama.h
|
@ -181,14 +181,17 @@ extern "C" {
|
|||
|
||||
// called with a progress value between 0 and 1, pass NULL to disable
|
||||
llama_progress_callback progress_callback;
|
||||
|
||||
// context pointer passed to the progress callback
|
||||
void * progress_callback_user_data;
|
||||
|
||||
// override key-value pairs of the model meta data
|
||||
const struct llama_model_kv_override * kv_overrides;
|
||||
|
||||
// Keep the booleans together to avoid misalignment during copy-by-value.
|
||||
bool vocab_only; // only load the vocabulary, no weights
|
||||
bool use_mmap; // use mmap if possible
|
||||
bool use_mlock; // force system to keep model in RAM
|
||||
const struct llama_model_kv_override * kv_overrides;
|
||||
};
|
||||
|
||||
struct llama_context_params {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue