Fix PR for recent change
This commit is contained in:
parent
fad8a773c1
commit
6a16c36bc5
1 changed files with 7 additions and 5 deletions
|
@ -29,18 +29,20 @@ int main(int argc, char **argv) {
|
||||||
|
|
||||||
// load the vocab
|
// load the vocab
|
||||||
{
|
{
|
||||||
auto lparams = llama_context_default_params();
|
auto mparams = llama_model_default_params();
|
||||||
|
|
||||||
lparams.vocab_only = true;
|
mparams.vocab_only = true;
|
||||||
|
|
||||||
model = llama_load_model_from_file(fname.c_str(), lparams);
|
model = llama_load_model_from_file(fname.c_str(), mparams);
|
||||||
|
|
||||||
if (model == NULL) {
|
if (model == NULL) {
|
||||||
fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
|
fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx = llama_new_context_with_model(model, lparams);
|
auto cparams = llama_context_default_params();
|
||||||
|
|
||||||
|
ctx = llama_new_context_with_model(model, cparams);
|
||||||
|
|
||||||
if (ctx == NULL) {
|
if (ctx == NULL) {
|
||||||
fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
|
fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
|
||||||
|
@ -49,7 +51,7 @@ int main(int argc, char **argv) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
GGML_ASSERT(llama_vocab_type(ctx) == LLAMA_VOCAB_TYPE_BPE);
|
GGML_ASSERT(llama_vocab_type(model) == LLAMA_VOCAB_TYPE_BPE);
|
||||||
|
|
||||||
#ifdef _WIN32
|
#ifdef _WIN32
|
||||||
// We need this for unicode console support
|
// We need this for unicode console support
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue