ggml : log level enum used by llama

This commit is contained in:
Rickard Hallerbäck 2023-09-17 16:38:46 +02:00
parent 696bf0595a
commit d266e15c81
6 changed files with 25 additions and 28 deletions

View file

@ -911,7 +911,7 @@ static void test_gen(llama_context * ctx, int n_gen, int n_past, int n_threads)
} }
} }
static void llama_null_log_callback(enum llama_log_level level, const char * text, void * user_data) { static void llama_null_log_callback(enum ggml_log_level level, const char * text, void * user_data) {
(void) level; (void) level;
(void) text; (void) text;
(void) user_data; (void) user_data;

View file

@ -19,11 +19,11 @@
#pragma once #pragma once
#include "ggml.h"
#include <stddef.h> #include <stddef.h>
#include <stdbool.h> #include <stdbool.h>
#include "llama.h"
// max memory buffers that can be mapped to the device // max memory buffers that can be mapped to the device
#define GGML_METAL_MAX_BUFFERS 16 #define GGML_METAL_MAX_BUFFERS 16
#define GGML_METAL_MAX_COMMAND_BUFFERS 32 #define GGML_METAL_MAX_COMMAND_BUFFERS 32
@ -35,7 +35,7 @@ struct ggml_cgraph;
extern "C" { extern "C" {
#endif #endif
void ggml_metal_log_set_callback(void (*log_callback)(enum llama_log_level level, const char * text, void * user_data), void * user_data); void ggml_metal_log_set_callback(void (*log_callback)(enum ggml_log_level level, const char * text, void * user_data), void * user_data);
struct ggml_metal_context; struct ggml_metal_context;

View file

@ -16,9 +16,9 @@
#define GGML_METAL_LOG_WARN(...) #define GGML_METAL_LOG_WARN(...)
#define GGML_METAL_LOG_ERROR(...) #define GGML_METAL_LOG_ERROR(...)
#else #else
#define GGML_METAL_LOG_INFO(...) ggml_metal_log(LLAMA_LOG_LEVEL_INFO, __VA_ARGS__) #define GGML_METAL_LOG_INFO(...) ggml_metal_log(GGML_LOG_LEVEL_INFO, __VA_ARGS__)
#define GGML_METAL_LOG_WARN(...) ggml_metal_log(LLAMA_LOG_LEVEL_WARN, __VA_ARGS__) #define GGML_METAL_LOG_WARN(...) ggml_metal_log(GGML_LOG_LEVEL_WARN, __VA_ARGS__)
#define GGML_METAL_LOG_ERROR(...) ggml_metal_log(LLAMA_LOG_LEVEL_ERROR, __VA_ARGS__) #define GGML_METAL_LOG_ERROR(...) ggml_metal_log(GGML_LOG_LEVEL_ERROR, __VA_ARGS__)
#endif #endif
#define UNUSED(x) (void)(x) #define UNUSED(x) (void)(x)
@ -120,15 +120,15 @@ static NSString * const msl_library_source = @"see metal.metal";
@implementation GGMLMetalClass @implementation GGMLMetalClass
@end @end
void (*ggml_metal_log_callback)(enum llama_log_level level, const char * text, void * user_data) = NULL; void (*ggml_metal_log_callback)(enum ggml_log_level level, const char * text, void * user_data) = NULL;
void *ggml_metal_log_user_data = NULL; void *ggml_metal_log_user_data = NULL;
void ggml_metal_log_set_callback(void (*log_callback)(enum llama_log_level level, const char * text, void * user_data), void * user_data) { void ggml_metal_log_set_callback(void (*log_callback)(enum ggml_log_level level, const char * text, void * user_data), void * user_data) {
ggml_metal_log_callback = log_callback; ggml_metal_log_callback = log_callback;
ggml_metal_log_user_data = user_data; ggml_metal_log_user_data = user_data;
} }
static void ggml_metal_log(enum llama_log_level level, const char* format, ...){ static void ggml_metal_log(enum ggml_log_level level, const char* format, ...){
if ( ggml_metal_log_callback != NULL ) { if ( ggml_metal_log_callback != NULL ) {
va_list args; va_list args;
va_start(args, format); va_start(args, format);

6
ggml.h
View file

@ -437,6 +437,12 @@ extern "C" {
GGML_OBJECT_WORK_BUFFER GGML_OBJECT_WORK_BUFFER
}; };
enum ggml_log_level {
GGML_LOG_LEVEL_ERROR = 2,
GGML_LOG_LEVEL_WARN = 3,
GGML_LOG_LEVEL_INFO = 4
};
// ggml object // ggml object
struct ggml_object { struct ggml_object {
size_t offs; size_t offs;

View file

@ -91,15 +91,12 @@
// //
LLAMA_ATTRIBUTE_FORMAT(2, 3) LLAMA_ATTRIBUTE_FORMAT(2, 3)
static void llama_log_internal (enum llama_log_level level, const char* format, ...); static void llama_log_internal (ggml_log_level level, const char* format, ...);
static void llama_log_callback_default(enum llama_log_level level, const char * text, void * user_data); static void llama_log_callback_default(ggml_log_level level, const char * text, void * user_data);
#define LLAMA_LOG_INFO(...) llama_log_internal(LLAMA_LOG_LEVEL_INFO , __VA_ARGS__)
#define LLAMA_LOG_WARN(...) llama_log_internal(LLAMA_LOG_LEVEL_WARN , __VA_ARGS__)
#define LLAMA_LOG_ERROR(...) llama_log_internal(LLAMA_LOG_LEVEL_ERROR, __VA_ARGS__)
#define LLAMA_LOG_INFO(...) llama_log_internal(GGML_LOG_LEVEL_INFO , __VA_ARGS__)
#define LLAMA_LOG_WARN(...) llama_log_internal(GGML_LOG_LEVEL_WARN , __VA_ARGS__)
#define LLAMA_LOG_ERROR(...) llama_log_internal(GGML_LOG_LEVEL_ERROR, __VA_ARGS__)
// //
// helpers // helpers
@ -6370,7 +6367,7 @@ void llama_log_set(llama_log_callback log_callback, void * user_data) {
g_state.log_callback_user_data = user_data; g_state.log_callback_user_data = user_data;
} }
static void llama_log_internal_v(llama_log_level level, const char * format, va_list args) { static void llama_log_internal_v(ggml_log_level level, const char * format, va_list args) {
va_list args_copy; va_list args_copy;
va_copy(args_copy, args); va_copy(args_copy, args);
char buffer[128]; char buffer[128];
@ -6387,14 +6384,14 @@ static void llama_log_internal_v(llama_log_level level, const char * format, va_
va_end(args_copy); va_end(args_copy);
} }
static void llama_log_internal(llama_log_level level, const char * format, ...) { static void llama_log_internal(ggml_log_level level, const char * format, ...) {
va_list args; va_list args;
va_start(args, format); va_start(args, format);
llama_log_internal_v(level, format, args); llama_log_internal_v(level, format, args);
va_end(args); va_end(args);
} }
static void llama_log_callback_default(llama_log_level level, const char * text, void * user_data) { static void llama_log_callback_default(ggml_log_level level, const char * text, void * user_data) {
(void) level; (void) level;
(void) user_data; (void) user_data;
fputs(text, stderr); fputs(text, stderr);

View file

@ -62,12 +62,6 @@ extern "C" {
typedef int llama_token; typedef int llama_token;
enum llama_log_level {
LLAMA_LOG_LEVEL_ERROR = 2,
LLAMA_LOG_LEVEL_WARN = 3,
LLAMA_LOG_LEVEL_INFO = 4
};
enum llama_vocab_type { enum llama_vocab_type {
LLAMA_VOCAB_TYPE_SPM = 0, // SentencePiece LLAMA_VOCAB_TYPE_SPM = 0, // SentencePiece
LLAMA_VOCAB_TYPE_BPE = 1, // Byte Pair Encoding LLAMA_VOCAB_TYPE_BPE = 1, // Byte Pair Encoding
@ -156,7 +150,7 @@ extern "C" {
// If your logging mechanism cannot handle that, check if the last character is '\n' and strip it // If your logging mechanism cannot handle that, check if the last character is '\n' and strip it
// if it exists. // if it exists.
// It might not exist for progress report where '.' is output repeatedly. // It might not exist for progress report where '.' is output repeatedly.
typedef void (*llama_log_callback)(enum llama_log_level level, const char * text, void * user_data); typedef void (*llama_log_callback)(enum ggml_log_level level, const char * text, void * user_data);
// model quantization parameters // model quantization parameters
typedef struct llama_model_quantize_params { typedef struct llama_model_quantize_params {