llama : rearrange model params

This commit is contained in:
Georgi Gerganov 2023-12-05 09:40:57 +02:00 committed by GitHub
parent a5b7d7277e
commit 14e0ba1daa
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
2 changed files with 5 additions and 2 deletions

View file

@ -8542,10 +8542,10 @@ struct llama_model_params llama_model_default_params() {
/*.tensor_split =*/ nullptr,
/*.progress_callback =*/ nullptr,
/*.progress_callback_user_data =*/ nullptr,
/*.kv_overrides =*/ nullptr,
/*.vocab_only =*/ false,
/*.use_mmap =*/ true,
/*.use_mlock =*/ false,
/*.kv_overrides =*/ nullptr,
};
#ifdef GGML_USE_METAL

View file

@ -181,14 +181,17 @@ extern "C" {
// called with a progress value between 0 and 1, pass NULL to disable
llama_progress_callback progress_callback;
// context pointer passed to the progress callback
void * progress_callback_user_data;
// override key-value pairs of the model meta data
const struct llama_model_kv_override * kv_overrides;
// Keep the booleans together to avoid misalignment during copy-by-value.
bool vocab_only; // only load the vocabulary, no weights
bool use_mmap; // use mmap if possible
bool use_mlock; // force system to keep model in RAM
const struct llama_model_kv_override * kv_overrides;
};
struct llama_context_params {