From 275f1bdf13e09aad232d7ff2a156139a11198f4b Mon Sep 17 00:00:00 2001 From: Jeffersoncgo Date: Wed, 19 Apr 2023 09:08:32 -0400 Subject: [PATCH] Added tokens to identify if is loading or ready --- examples/main/main.cpp | 3 ++- llama.cpp | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/examples/main/main.cpp b/examples/main/main.cpp index 4d9f62106..15a0afda9 100644 --- a/examples/main/main.cpp +++ b/examples/main/main.cpp @@ -242,7 +242,8 @@ int main(int argc, char ** argv) { " - Press Ctrl+C to interject at any time.\n" #endif " - Press Return to return control to LLaMa.\n" - " - If you want to submit another line, end your input in '\\'.\n\n"); + " - If you want to submit another line, end your input in '\\'.\n" + "[model ready]\n"); is_interacting = params.interactive_start; } diff --git a/llama.cpp b/llama.cpp index dfec05910..c8aae4fd8 100644 --- a/llama.cpp +++ b/llama.cpp @@ -931,6 +931,7 @@ static void llama_model_load_internal( fprintf(stderr, "%s: mem required = %7.2f MB (+ %7.2f MB per state)\n", __func__, mem_required / 1024.0 / 1024.0, mem_required_state / 1024.0 / 1024.0); + fprintf(stderr, "[model loading]\n"); } // create the ggml context