Added tokens to identify if is loading or ready

This commit is contained in:
Jeffersoncgo 2023-04-19 09:08:32 -04:00
parent 99eafe908f
commit 275f1bdf13
2 changed files with 3 additions and 1 deletions

View file

@ -242,7 +242,8 @@ int main(int argc, char ** argv) {
" - Press Ctrl+C to interject at any time.\n" " - Press Ctrl+C to interject at any time.\n"
#endif #endif
" - Press Return to return control to LLaMa.\n" " - Press Return to return control to LLaMa.\n"
" - If you want to submit another line, end your input in '\\'.\n\n"); " - If you want to submit another line, end your input in '\\'.\n"
"[model ready]\n");
is_interacting = params.interactive_start; is_interacting = params.interactive_start;
} }

View file

@ -931,6 +931,7 @@ static void llama_model_load_internal(
fprintf(stderr, "%s: mem required = %7.2f MB (+ %7.2f MB per state)\n", __func__, fprintf(stderr, "%s: mem required = %7.2f MB (+ %7.2f MB per state)\n", __func__,
mem_required / 1024.0 / 1024.0, mem_required_state / 1024.0 / 1024.0); mem_required / 1024.0 / 1024.0, mem_required_state / 1024.0 / 1024.0);
fprintf(stderr, "[model loading]\n");
} }
// create the ggml context // create the ggml context