ggml : ggml_log_callback typedef

This commit is contained in:
Rickard Hallerbäck 2023-09-17 18:19:37 +02:00
parent 78de0dff08
commit e0eba91bea
5 changed files with 29 additions and 30 deletions

View file

@ -35,7 +35,7 @@ struct ggml_cgraph;
extern "C" {
#endif
void ggml_metal_log_set_callback(void (*log_callback)(enum ggml_log_level level, const char * text, void * user_data), void * user_data);
void ggml_metal_log_set_callback(ggml_log_callback log_callback, void * user_data);
struct ggml_metal_context;

View file

@ -121,31 +121,31 @@ static NSString * const msl_library_source = @"see metal.metal";
@implementation GGMLMetalClass
@end
void (*ggml_metal_log_callback)(enum ggml_log_level level, const char * text, void * user_data) = NULL;
ggml_log_callback ggml_metal_log_callback = NULL;
void *ggml_metal_log_user_data = NULL;
void ggml_metal_log_set_callback(void (*log_callback)(enum ggml_log_level level, const char * text, void * user_data), void * user_data) {
ggml_metal_log_callback = log_callback;
ggml_metal_log_user_data = user_data;
void ggml_metal_log_set_callback(ggml_log_callback log_callback, void * user_data) {
ggml_metal_log_callback = log_callback;
ggml_metal_log_user_data = user_data;
}
static void ggml_metal_log(enum ggml_log_level level, const char* format, ...){
if ( ggml_metal_log_callback != NULL ) {
va_list args;
va_start(args, format);
char buffer[128];
int len = vsnprintf(buffer, 128, format, args);
if (len < 128) {
ggml_metal_log_callback(level, buffer, ggml_metal_log_user_data);
} else {
char* buffer2 = malloc(len+1);
vsnprintf(buffer2, len+1, format, args);
buffer2[len] = 0;
ggml_metal_log_callback(level, buffer2, ggml_metal_log_user_data);
free(buffer2);
if ( ggml_metal_log_callback != NULL ) {
va_list args;
va_start(args, format);
char buffer[128];
int len = vsnprintf(buffer, 128, format, args);
if (len < 128) {
ggml_metal_log_callback(level, buffer, ggml_metal_log_user_data);
} else {
char* buffer2 = malloc(len+1);
vsnprintf(buffer2, len+1, format, args);
buffer2[len] = 0;
ggml_metal_log_callback(level, buffer2, ggml_metal_log_user_data);
free(buffer2);
}
va_end(args);
}
va_end(args);
}
}

6
ggml.h
View file

@ -1688,6 +1688,12 @@ extern "C" {
};
typedef void (*ggml_opt_callback)(void * data, float * sched);
// Signature for logging events
// Note that text includes the new line character at the end for most events.
// If your logging mechanism cannot handle that, check if the last character is '\n' and strip it
// if it exists.
// It might not exist for progress report where '.' is output repeatedly.
typedef void (*ggml_log_callback)(enum ggml_log_level level, const char * text, void * user_data);
// optimization parameters
//

View file

@ -886,7 +886,7 @@ static std::string llama_token_to_str(const struct llama_context * ctx, llama_to
struct llama_state {
// We save the log callback globally
llama_log_callback log_callback = llama_log_callback_default;
ggml_log_callback log_callback = llama_log_callback_default;
void * log_callback_user_data = nullptr;
};
@ -6834,7 +6834,7 @@ const std::vector<std::pair<std::string, struct ggml_tensor *>>& llama_internal_
return ctx->model.tensors_by_name;
}
void llama_log_set(llama_log_callback log_callback, void * user_data) {
void llama_log_set(ggml_log_callback log_callback, void * user_data) {
g_state.log_callback = log_callback ? log_callback : llama_log_callback_default;
g_state.log_callback_user_data = user_data;
}

View file

@ -145,13 +145,6 @@ extern "C" {
bool embedding; // embedding mode only
};
// Signature for logging events
// Note that text includes the new line character at the end for most events.
// If your logging mechanism cannot handle that, check if the last character is '\n' and strip it
// if it exists.
// It might not exist for progress report where '.' is output repeatedly.
typedef void (*llama_log_callback)(enum ggml_log_level level, const char * text, void * user_data);
// model quantization parameters
typedef struct llama_model_quantize_params {
int nthread; // number of threads to use for quantizing, if <=0 will use std::thread::hardware_concurrency()
@ -518,7 +511,7 @@ extern "C" {
// Set callback for all future logging events.
// If this is not called, or NULL is supplied, everything is output on stderr.
LLAMA_API void llama_log_set(llama_log_callback log_callback, void * user_data);
LLAMA_API void llama_log_set(ggml_log_callback log_callback, void * user_data);
LLAMA_API void llama_dump_timing_info_yaml(FILE * stream, const struct llama_context * ctx);