diff --git a/examples/llama-bench/llama-bench.cpp b/examples/llama-bench/llama-bench.cpp index dedaa34fd..8eccb8967 100644 --- a/examples/llama-bench/llama-bench.cpp +++ b/examples/llama-bench/llama-bench.cpp @@ -911,7 +911,7 @@ static void test_gen(llama_context * ctx, int n_gen, int n_past, int n_threads) } } -static void llama_null_log_callback(enum llama_log_level level, const char * text, void * user_data) { +static void llama_null_log_callback(enum ggml_log_level level, const char * text, void * user_data) { (void) level; (void) text; (void) user_data; diff --git a/ggml-metal.h b/ggml-metal.h index cd4531802..97d859f2c 100644 --- a/ggml-metal.h +++ b/ggml-metal.h @@ -19,11 +19,11 @@ #pragma once +#include "ggml.h" + #include #include -#include "llama.h" - // max memory buffers that can be mapped to the device #define GGML_METAL_MAX_BUFFERS 16 #define GGML_METAL_MAX_COMMAND_BUFFERS 32 @@ -35,7 +35,7 @@ struct ggml_cgraph; extern "C" { #endif -void ggml_metal_log_set_callback(void (*log_callback)(enum llama_log_level level, const char * text, void * user_data), void * user_data); +void ggml_metal_log_set_callback(void (*log_callback)(enum ggml_log_level level, const char * text, void * user_data), void * user_data); struct ggml_metal_context; diff --git a/ggml-metal.m b/ggml-metal.m index 7ad59db33..5291d9f81 100644 --- a/ggml-metal.m +++ b/ggml-metal.m @@ -16,9 +16,9 @@ #define GGML_METAL_LOG_WARN(...) #define GGML_METAL_LOG_ERROR(...) #else -#define GGML_METAL_LOG_INFO(...) ggml_metal_log(LLAMA_LOG_LEVEL_INFO, __VA_ARGS__) -#define GGML_METAL_LOG_WARN(...) ggml_metal_log(LLAMA_LOG_LEVEL_WARN, __VA_ARGS__) -#define GGML_METAL_LOG_ERROR(...) ggml_metal_log(LLAMA_LOG_LEVEL_ERROR, __VA_ARGS__) +#define GGML_METAL_LOG_INFO(...) ggml_metal_log(GGML_LOG_LEVEL_INFO, __VA_ARGS__) +#define GGML_METAL_LOG_WARN(...) ggml_metal_log(GGML_LOG_LEVEL_WARN, __VA_ARGS__) +#define GGML_METAL_LOG_ERROR(...) ggml_metal_log(GGML_LOG_LEVEL_ERROR, __VA_ARGS__) #endif #define UNUSED(x) (void)(x) @@ -120,15 +120,15 @@ static NSString * const msl_library_source = @"see metal.metal"; @implementation GGMLMetalClass @end -void (*ggml_metal_log_callback)(enum llama_log_level level, const char * text, void * user_data) = NULL; +void (*ggml_metal_log_callback)(enum ggml_log_level level, const char * text, void * user_data) = NULL; void *ggml_metal_log_user_data = NULL; -void ggml_metal_log_set_callback(void (*log_callback)(enum llama_log_level level, const char * text, void * user_data), void * user_data) { +void ggml_metal_log_set_callback(void (*log_callback)(enum ggml_log_level level, const char * text, void * user_data), void * user_data) { ggml_metal_log_callback = log_callback; ggml_metal_log_user_data = user_data; } -static void ggml_metal_log(enum llama_log_level level, const char* format, ...){ +static void ggml_metal_log(enum ggml_log_level level, const char* format, ...){ if ( ggml_metal_log_callback != NULL ) { va_list args; va_start(args, format); diff --git a/ggml.h b/ggml.h index c936823d6..230217664 100644 --- a/ggml.h +++ b/ggml.h @@ -437,6 +437,12 @@ extern "C" { GGML_OBJECT_WORK_BUFFER }; + enum ggml_log_level { + GGML_LOG_LEVEL_ERROR = 2, + GGML_LOG_LEVEL_WARN = 3, + GGML_LOG_LEVEL_INFO = 4 + }; + // ggml object struct ggml_object { size_t offs; diff --git a/llama.cpp b/llama.cpp index 139074993..223bbac73 100644 --- a/llama.cpp +++ b/llama.cpp @@ -91,15 +91,12 @@ // LLAMA_ATTRIBUTE_FORMAT(2, 3) -static void llama_log_internal (enum llama_log_level level, const char* format, ...); -static void llama_log_callback_default(enum llama_log_level level, const char * text, void * user_data); - -#define LLAMA_LOG_INFO(...) llama_log_internal(LLAMA_LOG_LEVEL_INFO , __VA_ARGS__) -#define LLAMA_LOG_WARN(...) llama_log_internal(LLAMA_LOG_LEVEL_WARN , __VA_ARGS__) -#define LLAMA_LOG_ERROR(...) llama_log_internal(LLAMA_LOG_LEVEL_ERROR, __VA_ARGS__) - - +static void llama_log_internal (ggml_log_level level, const char* format, ...); +static void llama_log_callback_default(ggml_log_level level, const char * text, void * user_data); +#define LLAMA_LOG_INFO(...) llama_log_internal(GGML_LOG_LEVEL_INFO , __VA_ARGS__) +#define LLAMA_LOG_WARN(...) llama_log_internal(GGML_LOG_LEVEL_WARN , __VA_ARGS__) +#define LLAMA_LOG_ERROR(...) llama_log_internal(GGML_LOG_LEVEL_ERROR, __VA_ARGS__) // // helpers @@ -6370,7 +6367,7 @@ void llama_log_set(llama_log_callback log_callback, void * user_data) { g_state.log_callback_user_data = user_data; } -static void llama_log_internal_v(llama_log_level level, const char * format, va_list args) { +static void llama_log_internal_v(ggml_log_level level, const char * format, va_list args) { va_list args_copy; va_copy(args_copy, args); char buffer[128]; @@ -6387,14 +6384,14 @@ static void llama_log_internal_v(llama_log_level level, const char * format, va_ va_end(args_copy); } -static void llama_log_internal(llama_log_level level, const char * format, ...) { +static void llama_log_internal(ggml_log_level level, const char * format, ...) { va_list args; va_start(args, format); llama_log_internal_v(level, format, args); va_end(args); } -static void llama_log_callback_default(llama_log_level level, const char * text, void * user_data) { +static void llama_log_callback_default(ggml_log_level level, const char * text, void * user_data) { (void) level; (void) user_data; fputs(text, stderr); diff --git a/llama.h b/llama.h index 37975bebe..ca5ee3a87 100644 --- a/llama.h +++ b/llama.h @@ -62,12 +62,6 @@ extern "C" { typedef int llama_token; - enum llama_log_level { - LLAMA_LOG_LEVEL_ERROR = 2, - LLAMA_LOG_LEVEL_WARN = 3, - LLAMA_LOG_LEVEL_INFO = 4 - }; - enum llama_vocab_type { LLAMA_VOCAB_TYPE_SPM = 0, // SentencePiece LLAMA_VOCAB_TYPE_BPE = 1, // Byte Pair Encoding @@ -156,7 +150,7 @@ extern "C" { // If your logging mechanism cannot handle that, check if the last character is '\n' and strip it // if it exists. // It might not exist for progress report where '.' is output repeatedly. - typedef void (*llama_log_callback)(enum llama_log_level level, const char * text, void * user_data); + typedef void (*llama_log_callback)(enum ggml_log_level level, const char * text, void * user_data); // model quantization parameters typedef struct llama_model_quantize_params {