diff --git a/examples/save-load-state/save-load-state.cpp b/examples/save-load-state/save-load-state.cpp index a48ad0f32..14e9501ca 100644 --- a/examples/save-load-state/save-load-state.cpp +++ b/examples/save-load-state/save-load-state.cpp @@ -44,7 +44,7 @@ int main(int argc, char ** argv) { llama_free_model(model); return 1; } - auto tokens = llama_tokenize(ctx, params.prompt.c_str(), true); + auto tokens = llama_tokenize(ctx, params.prompt, true); auto n_prompt_tokens = tokens.size(); if (n_prompt_tokens < 1) { fprintf(stderr, "%s : failed to tokenize prompt\n", __func__);