Add scaffolding for ggml logging macros

This commit is contained in:
Mason M 2024-10-01 13:15:29 -03:00
parent f1b8c42711
commit 90222ac920
4 changed files with 77 additions and 23 deletions

View file

@ -2177,6 +2177,10 @@ extern "C" {
typedef void (*ggml_opt_callback)(void * data, int accum_step, float * sched, bool * cancel);
typedef void (*ggml_log_callback)(enum ggml_log_level level, const char * text, void * user_data);
// Set callback for all future logging events.
// If this is not called, or NULL is supplied, everything is output on stderr.
GGML_API void ggml_log_set(ggml_log_callback log_callback, void * user_data);
// optimization parameters
//
// see ggml.c (ggml_opt_default_params) for default values

View file

@ -33,6 +33,31 @@ extern "C" {
#endif
#endif
//
// logging
//
#ifdef __GNUC__
#ifdef __MINGW32__
#define GGML_ATTRIBUTE_FORMAT(...) __attribute__((format(gnu_printf, __VA_ARGS__)))
#else
#define GGML_ATTRIBUTE_FORMAT(...) __attribute__((format(printf, __VA_ARGS__)))
#endif
#else
#define GGML_ATTRIBUTE_FORMAT(...)
#endif
GGML_ATTRIBUTE_FORMAT(2, 3)
void ggml_log_internal (ggml_log_level level, const char * format, ...);
void ggml_log_callback_default(ggml_log_level level, const char * text, void * user_data);
#define GGML_LOG(...) ggml_log_internal(GGML_LOG_LEVEL_NONE , __VA_ARGS__)
#define GGML_LOG_INFO(...) ggml_log_internal(GGML_LOG_LEVEL_INFO , __VA_ARGS__)
#define GGML_LOG_WARN(...) ggml_log_internal(GGML_LOG_LEVEL_WARN , __VA_ARGS__)
#define GGML_LOG_ERROR(...) ggml_log_internal(GGML_LOG_LEVEL_ERROR, __VA_ARGS__)
#define GGML_LOG_DEBUG(...) ggml_log_internal(GGML_LOG_LEVEL_DEBUG, __VA_ARGS__)
#define GGML_LOG_CONT(...) ggml_log_internal(GGML_LOG_LEVEL_CONT , __VA_ARGS__)
// bitset
typedef uint32_t ggml_bitset_t;

View file

@ -319,6 +319,43 @@ void ggml_abort(const char * file, int line, const char * fmt, ...) {
// logging
//
struct ggml_logger_state {
ggml_log_callback log_callback = ggml_log_callback_default;
void * log_callback_user_data = nullptr;
};
static ggml_logger_state g_logger_state;
static void ggml_log_internal_v(ggml_log_level level, const char * format, va_list args) {
va_list args_copy;
va_copy(args_copy, args);
char buffer[128];
int len = vsnprintf(buffer, 128, format, args);
if (len < 128) {
g_logger_state.log_callback(level, buffer, g_logger_state.log_callback_user_data);
} else {
char * buffer2 = new char[len + 1];
vsnprintf(buffer2, len + 1, format, args_copy);
buffer2[len] = 0;
g_logger_state.log_callback(level, buffer2, g_logger_state.log_callback_user_data);
delete[] buffer2;
}
va_end(args_copy);
}
void ggml_log_internal(ggml_log_level level, const char * format, ...) {
va_list args;
va_start(args, format);
ggml_log_internal_v(level, format, args);
va_end(args);
}
void ggml_log_callback_default(ggml_log_level level, const char * text, void * user_data) {
(void) level;
(void) user_data;
fputs(text, stderr);
fflush(stderr);
}
#if (GGML_DEBUG >= 1)
#define GGML_PRINT_DEBUG(...) printf(__VA_ARGS__)
#else
@ -23237,4 +23274,9 @@ int ggml_cpu_get_sve_cnt(void) {
return 0;
#endif
}
void ggml_log_set(ggml_log_callback log_callback, void * user_data) {
g_logger_state.log_callback = log_callback ? log_callback : ggml_log_callback_default;
g_logger_state.log_callback_user_data = user_data;
}
////////////////////////////////////////////////////////////////////////////////

View file

@ -2301,22 +2301,11 @@ static ggml_backend_buffer_type_t llama_default_buffer_type_cpu(bool host_buffer
//
struct llama_state {
llama_state() {
#ifdef GGML_USE_METAL
ggml_backend_metal_log_set_callback(log_callback, log_callback_user_data);
#elif defined(GGML_USE_CUDA)
ggml_backend_cuda_log_set_callback(log_callback, log_callback_user_data);
#elif defined(GGML_USE_CANN)
ggml_backend_cann_log_set_callback(log_callback, log_callback_user_data);
#endif
}
// We save the log callback globally
ggml_log_callback log_callback = llama_log_callback_default;
void * log_callback_user_data = nullptr;
};
static llama_state g_state;
static llama_state g_logger_state;
// available llama models
enum e_model {
@ -21772,15 +21761,9 @@ const std::vector<std::pair<std::string, struct ggml_tensor *>> & llama_internal
}
void llama_log_set(ggml_log_callback log_callback, void * user_data) {
g_state.log_callback = log_callback ? log_callback : llama_log_callback_default;
g_state.log_callback_user_data = user_data;
#ifdef GGML_USE_METAL
ggml_backend_metal_log_set_callback(g_state.log_callback, g_state.log_callback_user_data);
#elif defined(GGML_USE_CUDA)
ggml_backend_cuda_log_set_callback(g_state.log_callback, g_state.log_callback_user_data);
#elif defined(GGML_USE_CANN)
ggml_backend_cann_log_set_callback(g_state.log_callback, g_state.log_callback_user_data);
#endif
ggml_log_set(log_callback, user_data);
g_logger_state.log_callback = log_callback ? log_callback : llama_log_callback_default;
g_logger_state.log_callback_user_data = user_data;
}
static void llama_log_internal_v(ggml_log_level level, const char * format, va_list args) {
@ -21789,12 +21772,12 @@ static void llama_log_internal_v(ggml_log_level level, const char * format, va_l
char buffer[128];
int len = vsnprintf(buffer, 128, format, args);
if (len < 128) {
g_state.log_callback(level, buffer, g_state.log_callback_user_data);
g_logger_state.log_callback(level, buffer, g_logger_state.log_callback_user_data);
} else {
char * buffer2 = new char[len + 1];
vsnprintf(buffer2, len + 1, format, args_copy);
buffer2[len] = 0;
g_state.log_callback(level, buffer2, g_state.log_callback_user_data);
g_logger_state.log_callback(level, buffer2, g_logger_state.log_callback_user_data);
delete[] buffer2;
}
va_end(args_copy);