diff --git a/ggml-metal.h b/ggml-metal.h index 97d859f2c..790cf0bf7 100644 --- a/ggml-metal.h +++ b/ggml-metal.h @@ -35,7 +35,7 @@ struct ggml_cgraph; extern "C" { #endif -void ggml_metal_log_set_callback(void (*log_callback)(enum ggml_log_level level, const char * text, void * user_data), void * user_data); +void ggml_metal_log_set_callback(ggml_log_callback log_callback, void * user_data); struct ggml_metal_context; diff --git a/ggml-metal.m b/ggml-metal.m index df1bed000..919e51797 100644 --- a/ggml-metal.m +++ b/ggml-metal.m @@ -121,31 +121,31 @@ static NSString * const msl_library_source = @"see metal.metal"; @implementation GGMLMetalClass @end -void (*ggml_metal_log_callback)(enum ggml_log_level level, const char * text, void * user_data) = NULL; +ggml_log_callback ggml_metal_log_callback = NULL; void *ggml_metal_log_user_data = NULL; -void ggml_metal_log_set_callback(void (*log_callback)(enum ggml_log_level level, const char * text, void * user_data), void * user_data) { - ggml_metal_log_callback = log_callback; - ggml_metal_log_user_data = user_data; +void ggml_metal_log_set_callback(ggml_log_callback log_callback, void * user_data) { + ggml_metal_log_callback = log_callback; + ggml_metal_log_user_data = user_data; } static void ggml_metal_log(enum ggml_log_level level, const char* format, ...){ - if ( ggml_metal_log_callback != NULL ) { - va_list args; - va_start(args, format); - char buffer[128]; - int len = vsnprintf(buffer, 128, format, args); - if (len < 128) { - ggml_metal_log_callback(level, buffer, ggml_metal_log_user_data); - } else { - char* buffer2 = malloc(len+1); - vsnprintf(buffer2, len+1, format, args); - buffer2[len] = 0; - ggml_metal_log_callback(level, buffer2, ggml_metal_log_user_data); - free(buffer2); + if ( ggml_metal_log_callback != NULL ) { + va_list args; + va_start(args, format); + char buffer[128]; + int len = vsnprintf(buffer, 128, format, args); + if (len < 128) { + ggml_metal_log_callback(level, buffer, ggml_metal_log_user_data); + } else { + char* buffer2 = malloc(len+1); + vsnprintf(buffer2, len+1, format, args); + buffer2[len] = 0; + ggml_metal_log_callback(level, buffer2, ggml_metal_log_user_data); + free(buffer2); + } + va_end(args); } - va_end(args); - } } diff --git a/ggml.h b/ggml.h index 5753c70cf..266c91a84 100644 --- a/ggml.h +++ b/ggml.h @@ -1688,6 +1688,12 @@ extern "C" { }; typedef void (*ggml_opt_callback)(void * data, float * sched); + // Signature for logging events + // Note that text includes the new line character at the end for most events. + // If your logging mechanism cannot handle that, check if the last character is '\n' and strip it + // if it exists. + // It might not exist for progress report where '.' is output repeatedly. + typedef void (*ggml_log_callback)(enum ggml_log_level level, const char * text, void * user_data); // optimization parameters // diff --git a/llama.cpp b/llama.cpp index ac1aa140d..9b12173be 100644 --- a/llama.cpp +++ b/llama.cpp @@ -886,7 +886,7 @@ static std::string llama_token_to_str(const struct llama_context * ctx, llama_to struct llama_state { // We save the log callback globally - llama_log_callback log_callback = llama_log_callback_default; + ggml_log_callback log_callback = llama_log_callback_default; void * log_callback_user_data = nullptr; }; @@ -6834,7 +6834,7 @@ const std::vector>& llama_internal_ return ctx->model.tensors_by_name; } -void llama_log_set(llama_log_callback log_callback, void * user_data) { +void llama_log_set(ggml_log_callback log_callback, void * user_data) { g_state.log_callback = log_callback ? log_callback : llama_log_callback_default; g_state.log_callback_user_data = user_data; } diff --git a/llama.h b/llama.h index ca5ee3a87..eba6aabfb 100644 --- a/llama.h +++ b/llama.h @@ -145,13 +145,6 @@ extern "C" { bool embedding; // embedding mode only }; - // Signature for logging events - // Note that text includes the new line character at the end for most events. - // If your logging mechanism cannot handle that, check if the last character is '\n' and strip it - // if it exists. - // It might not exist for progress report where '.' is output repeatedly. - typedef void (*llama_log_callback)(enum ggml_log_level level, const char * text, void * user_data); - // model quantization parameters typedef struct llama_model_quantize_params { int nthread; // number of threads to use for quantizing, if <=0 will use std::thread::hardware_concurrency() @@ -518,7 +511,7 @@ extern "C" { // Set callback for all future logging events. // If this is not called, or NULL is supplied, everything is output on stderr. - LLAMA_API void llama_log_set(llama_log_callback log_callback, void * user_data); + LLAMA_API void llama_log_set(ggml_log_callback log_callback, void * user_data); LLAMA_API void llama_dump_timing_info_yaml(FILE * stream, const struct llama_context * ctx);