There's a better way of clearing lines (#11756)

Use the ANSI escape code for clearing a line.

Signed-off-by: Eric Curtin <ecurtin@redhat.com>
This commit is contained in:
Eric Curtin 2025-02-09 10:34:49 +00:00 committed by GitHub
parent 98f6b0fd1e
commit 19d3c8293b
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
2 changed files with 5 additions and 11 deletions

View file

@ -2,6 +2,7 @@
#include "ggml.h" // for ggml_log_level #include "ggml.h" // for ggml_log_level
#define LOG_CLR_TO_EOL "\033[K\r"
#define LOG_COL_DEFAULT "\033[0m" #define LOG_COL_DEFAULT "\033[0m"
#define LOG_COL_BOLD "\033[1m" #define LOG_COL_BOLD "\033[1m"
#define LOG_COL_RED "\033[31m" #define LOG_COL_RED "\033[31m"

View file

@ -535,8 +535,7 @@ class HttpClient {
static void print_progress(const std::string & progress_prefix, const std::string & progress_bar, static void print_progress(const std::string & progress_prefix, const std::string & progress_bar,
const std::string & progress_suffix) { const std::string & progress_suffix) {
printe("\r%*s\r%s%s| %s", get_terminal_width(), " ", progress_prefix.c_str(), progress_bar.c_str(), printe("\r" LOG_CLR_TO_EOL "%s%s| %s", progress_prefix.c_str(), progress_bar.c_str(), progress_suffix.c_str());
progress_suffix.c_str());
} }
// Function to write data to a file // Function to write data to a file
static size_t write_data(void * ptr, size_t size, size_t nmemb, void * stream) { static size_t write_data(void * ptr, size_t size, size_t nmemb, void * stream) {
@ -797,16 +796,13 @@ class LlamaData {
llama_model_ptr initialize_model(Opt & opt) { llama_model_ptr initialize_model(Opt & opt) {
ggml_backend_load_all(); ggml_backend_load_all();
resolve_model(opt.model_); resolve_model(opt.model_);
printe( printe("\r" LOG_CLR_TO_EOL "Loading model");
"\r%*s"
"\rLoading model",
get_terminal_width(), " ");
llama_model_ptr model(llama_model_load_from_file(opt.model_.c_str(), opt.model_params)); llama_model_ptr model(llama_model_load_from_file(opt.model_.c_str(), opt.model_params));
if (!model) { if (!model) {
printe("%s: error: unable to load model from file: %s\n", __func__, opt.model_.c_str()); printe("%s: error: unable to load model from file: %s\n", __func__, opt.model_.c_str());
} }
printe("\r%*s\r", static_cast<int>(sizeof("Loading model")), " "); printe("\r" LOG_CLR_TO_EOL);
return model; return model;
} }
@ -969,10 +965,7 @@ static int generate(LlamaData & llama_data, const std::string & prompt, std::str
static int read_user_input(std::string & user_input) { static int read_user_input(std::string & user_input) {
static const char * prompt_prefix = "> "; static const char * prompt_prefix = "> ";
#ifdef WIN32 #ifdef WIN32
printf( printf("\r" LOG_CLR_TO_EOL LOG_COL_DEFAULT "%s", prompt_prefix);
"\r%*s"
"\r" LOG_COL_DEFAULT "%s",
get_terminal_width(), " ", prompt_prefix);
std::getline(std::cin, user_input); std::getline(std::cin, user_input);
if (std::cin.eof()) { if (std::cin.eof()) {