From 6a16c36bc549466cd7fcc7ccbffc72853e07ee18 Mon Sep 17 00:00:00 2001 From: goerch Date: Fri, 29 Sep 2023 20:34:42 +0200 Subject: [PATCH] Fix PR for recent change --- tests/test-tokenizer-1-bpe.cpp | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/tests/test-tokenizer-1-bpe.cpp b/tests/test-tokenizer-1-bpe.cpp index 1e6ea3327..986e67ba5 100644 --- a/tests/test-tokenizer-1-bpe.cpp +++ b/tests/test-tokenizer-1-bpe.cpp @@ -29,18 +29,20 @@ int main(int argc, char **argv) { // load the vocab { - auto lparams = llama_context_default_params(); + auto mparams = llama_model_default_params(); - lparams.vocab_only = true; + mparams.vocab_only = true; - model = llama_load_model_from_file(fname.c_str(), lparams); + model = llama_load_model_from_file(fname.c_str(), mparams); if (model == NULL) { fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str()); return 1; } - ctx = llama_new_context_with_model(model, lparams); + auto cparams = llama_context_default_params(); + + ctx = llama_new_context_with_model(model, cparams); if (ctx == NULL) { fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str()); @@ -49,7 +51,7 @@ int main(int argc, char **argv) { } } - GGML_ASSERT(llama_vocab_type(ctx) == LLAMA_VOCAB_TYPE_BPE); + GGML_ASSERT(llama_vocab_type(model) == LLAMA_VOCAB_TYPE_BPE); #ifdef _WIN32 // We need this for unicode console support