llama.cpp : split llama_context_params into model and context params (#3301)

* llama.cpp : split llama_context_params into model and context params

ggml-ci

* fix metal build

* fix freq_base/scale default to model value

* llama-bench : keep the same model between tests when possible

* move n_threads to llama_context_params, add n_threads_batch

* fix mpi build

* remove kv_size(), cuda scratch fixes

* remove low-vram option

* add n_threads_batch to system info, refactor to get_system_info()

* add documentation about --threads-batch to the READMEs

* llama-bench fix

* main : fix rope freq/scale warning

* llama.cpp : add llama_get_model
common : add llama_tokenize from model

* remove duplicated ctx/model functions

ggml-ci

* cuda : print total VRAM used
This commit is contained in:
slaren 2023-09-28 21:42:38 +02:00 committed by GitHub
parent 0512d66670
commit 16bc66d947
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
27 changed files with 713 additions and 633 deletions

View file

@ -64,18 +64,20 @@ int main(int argc, char **argv) {
// load the vocab
{
auto lparams = llama_context_default_params();
auto mparams = llama_model_default_params();
lparams.vocab_only = true;
mparams.vocab_only = true;
model = llama_load_model_from_file(fname.c_str(), lparams);
model = llama_load_model_from_file(fname.c_str(), mparams);
if (model == NULL) {
fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
return 1;
}
ctx = llama_new_context_with_model(model, lparams);
auto cparams = llama_context_default_params();
ctx = llama_new_context_with_model(model, cparams);
if (ctx == NULL) {
fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
@ -84,7 +86,7 @@ int main(int argc, char **argv) {
}
}
if (llama_vocab_type(ctx) != LLAMA_VOCAB_TYPE_SPM) {
if (llama_vocab_type(model) != LLAMA_VOCAB_TYPE_SPM) {
fprintf(stderr, "%s : error: vocab type is not SPM\n", __func__);
llama_free_model(model);
llama_free(ctx);