diff --git a/llama.cpp b/llama.cpp index 82710730c..5d85234da 100644 --- a/llama.cpp +++ b/llama.cpp @@ -56,11 +56,11 @@ #pragma warning(disable: 4244 4267) // possible loss of data #endif -void llama_log_internal(llama_log_callback log_callback, void * log_callback_user_data, int level, const char* format, ...); +void llama_log_internal(int level, const char* format, ...); void llama_log_callback_default(int level, const char * text, void * ctx); -#define LLAMA_LOG_INFO(model, ...) llama_log_internal((model).log_callback, (model).log_callback_user_data, LLAMA_LOG_LEVEL_INFO, __VA_ARGS__) -#define LLAMA_LOG_WARN(model, ...) llama_log_internal((model).log_callback, (model).log_callback_user_data, LLAMA_LOG_LEVEL_WARN, __VA_ARGS__) -#define LLAMA_LOG_ERROR(model, ...) llama_log_internal((model).log_callback, (model).log_callback_user_data, LLAMA_LOG_LEVEL_ERROR, __VA_ARGS__) +#define LLAMA_LOG_INFO(...) llama_log_internal(LLAMA_LOG_LEVEL_INFO , __VA_ARGS__) +#define LLAMA_LOG_WARN(...) llama_log_internal(LLAMA_LOG_LEVEL_WARN , __VA_ARGS__) +#define LLAMA_LOG_ERROR(...) llama_log_internal(LLAMA_LOG_LEVEL_ERROR, __VA_ARGS__) #define LLAMA_USE_SCRATCH @@ -294,11 +294,6 @@ struct llama_model { int64_t t_load_us = 0; int64_t t_start_us = 0; - // We save the log callback with the model because some logging can occur after loading - // and llama_context_params doesn't exist then anymore. - llama_log_callback log_callback; - void * log_callback_user_data; - llama_vocab vocab; ~llama_model() { @@ -410,6 +405,14 @@ struct llama_context { } }; +struct llama_state { + // We save the log callback globally + llama_log_callback log_callback; + void * log_callback_user_data; +}; +// global state +static llama_state g_state; + template static T checked_mul(T a, T b) { T ret = a * b; @@ -477,7 +480,7 @@ struct llama_file_loader { llama_file_loader(const char * fname, llama_load_tensors_map & tensors_map, const llama_model * model_for_logging) : file(fname, "rb") { if (model_for_logging) { - LLAMA_LOG_INFO(*model_for_logging, "llama.cpp: loading model from %s", fname); + LLAMA_LOG_INFO("llama.cpp: loading model from %s", fname); } else { fprintf(stderr, "llama.cpp: loading model from %s\n", fname); } @@ -870,8 +873,6 @@ struct llama_context_params llama_context_default_params() { /*.rope_freq_scale =*/ 1.0f, /*.progress_callback =*/ nullptr, /*.progress_callback_user_data =*/ nullptr, - /*.log_callback =*/ nullptr, - /*.log_callback_user_data =*/ nullptr, /*.low_vram =*/ false, /*.f16_kv =*/ true, /*.logits_all =*/ false, @@ -1041,19 +1042,19 @@ static void llama_model_load_internal( const uint32_t n_ff = ((2*(4*hparams.n_embd)/3 + hparams.n_mult - 1)/hparams.n_mult)*hparams.n_mult; { - LLAMA_LOG_INFO(model, "%s: format = %s", __func__, llama_file_version_name(file_version)); - LLAMA_LOG_INFO(model, "%s: n_vocab = %u", __func__, hparams.n_vocab); - LLAMA_LOG_INFO(model, "%s: n_ctx = %u", __func__, hparams.n_ctx); - LLAMA_LOG_INFO(model, "%s: n_embd = %u", __func__, hparams.n_embd); - LLAMA_LOG_INFO(model, "%s: n_mult = %u", __func__, hparams.n_mult); - LLAMA_LOG_INFO(model, "%s: n_head = %u", __func__, hparams.n_head); - LLAMA_LOG_INFO(model, "%s: n_layer = %u", __func__, hparams.n_layer); - LLAMA_LOG_INFO(model, "%s: n_rot = %u", __func__, hparams.n_rot); - LLAMA_LOG_INFO(model, "%s: freq_base = %.1f", __func__, hparams.rope_freq_base); - LLAMA_LOG_INFO(model, "%s: freq_scale = %g", __func__, hparams.rope_freq_scale); - LLAMA_LOG_INFO(model, "%s: ftype = %u (%s)", __func__, hparams.ftype, llama_ftype_name(hparams.ftype)); - LLAMA_LOG_INFO(model, "%s: n_ff = %u", __func__, n_ff); - LLAMA_LOG_INFO(model, "%s: model size = %s", __func__, llama_model_type_name(model.type)); + LLAMA_LOG_INFO("%s: format = %s", __func__, llama_file_version_name(file_version)); + LLAMA_LOG_INFO("%s: n_vocab = %u", __func__, hparams.n_vocab); + LLAMA_LOG_INFO("%s: n_ctx = %u", __func__, hparams.n_ctx); + LLAMA_LOG_INFO("%s: n_embd = %u", __func__, hparams.n_embd); + LLAMA_LOG_INFO("%s: n_mult = %u", __func__, hparams.n_mult); + LLAMA_LOG_INFO("%s: n_head = %u", __func__, hparams.n_head); + LLAMA_LOG_INFO("%s: n_layer = %u", __func__, hparams.n_layer); + LLAMA_LOG_INFO("%s: n_rot = %u", __func__, hparams.n_rot); + LLAMA_LOG_INFO("%s: freq_base = %.1f", __func__, hparams.rope_freq_base); + LLAMA_LOG_INFO("%s: freq_scale = %g", __func__, hparams.rope_freq_scale); + LLAMA_LOG_INFO("%s: ftype = %u (%s)", __func__, hparams.ftype, llama_ftype_name(hparams.ftype)); + LLAMA_LOG_INFO("%s: n_ff = %u", __func__, n_ff); + LLAMA_LOG_INFO("%s: model size = %s", __func__, llama_model_type_name(model.type)); } if (file_version < LLAMA_FILE_VERSION_GGJT_V2) { @@ -1081,7 +1082,7 @@ static void llama_model_load_internal( size_t ctx_size; size_t mmapped_size; ml->calc_sizes(&ctx_size, &mmapped_size); - LLAMA_LOG_INFO(model, "%s: ggml ctx size = %7.2f MB", __func__, ctx_size/1024.0/1024.0); + LLAMA_LOG_INFO("%s: ggml ctx size = %7.2f MB", __func__, ctx_size/1024.0/1024.0); // create the ggml context { @@ -1105,7 +1106,7 @@ static void llama_model_load_internal( (void) main_gpu; #if defined(GGML_USE_CUBLAS) - LLAMA_LOG_INFO(model, "%s: using CUDA for GPU acceleration", __func__); + LLAMA_LOG_INFO("%s: using CUDA for GPU acceleration", __func__); ggml_cuda_set_main_device(main_gpu); #define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_GPU #define LLAMA_BACKEND_OFFLOAD_SPLIT GGML_BACKEND_GPU_SPLIT @@ -1210,14 +1211,14 @@ static void llama_model_load_internal( const size_t mem_required_state = scale*MEM_REQ_KV_SELF().at(model.type); - LLAMA_LOG_INFO(model, "%s: mem required = %7.2f MB (+ %7.2f MB per state)", __func__, + LLAMA_LOG_INFO("%s: mem required = %7.2f MB (+ %7.2f MB per state)", __func__, mem_required / 1024.0 / 1024.0, mem_required_state / 1024.0 / 1024.0); (void) vram_scratch; (void) n_batch; #ifdef GGML_USE_CUBLAS if (low_vram) { - LLAMA_LOG_INFO(model, "%s: not allocating a VRAM scratch buffer due to low VRAM option", __func__); + LLAMA_LOG_INFO("%s: not allocating a VRAM scratch buffer due to low VRAM option", __func__); ggml_cuda_set_scratch_size(0); // disable scratch } else { const size_t vram_scratch_base = VRAM_REQ_SCRATCH_BASE().at(model.type); @@ -1225,7 +1226,7 @@ static void llama_model_load_internal( vram_scratch = n_batch * (vram_scratch_base + n_ctx * vram_scratch_per_context); ggml_cuda_set_scratch_size(vram_scratch); if (n_gpu_layers > 0) { - LLAMA_LOG_INFO(model, "%s: allocating batch_size x (%zd kB + n_ctx x %zd B) = %zd MB VRAM for the scratch buffer", + LLAMA_LOG_INFO("%s: allocating batch_size x (%zd kB + n_ctx x %zd B) = %zd MB VRAM for the scratch buffer", __func__, vram_scratch_base / kB, vram_scratch_per_context, (vram_scratch + MB - 1) / MB); // round up } @@ -1235,9 +1236,9 @@ static void llama_model_load_internal( #if defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST) const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer)); - LLAMA_LOG_INFO(model, "%s: offloading %d repeating layers to GPU", __func__, n_gpu); + LLAMA_LOG_INFO("%s: offloading %d repeating layers to GPU", __func__, n_gpu); if (n_gpu_layers > (int) hparams.n_layer) { - LLAMA_LOG_INFO(model, "%s: offloading non-repeating layers to GPU", __func__); + LLAMA_LOG_INFO("%s: offloading non-repeating layers to GPU", __func__); } size_t vram_kv_cache = 0; @@ -1246,17 +1247,17 @@ static void llama_model_load_internal( const int max_offloadable_layers = low_vram ? hparams.n_layer + 1 : hparams.n_layer + 3; if (n_gpu_layers > (int) hparams.n_layer + 1) { if (low_vram) { - LLAMA_LOG_INFO(model, "%s: cannot offload v cache to GPU due to low VRAM option", __func__); + LLAMA_LOG_INFO("%s: cannot offload v cache to GPU due to low VRAM option", __func__); } else { - LLAMA_LOG_INFO(model, "%s: offloading v cache to GPU", __func__); + LLAMA_LOG_INFO("%s: offloading v cache to GPU", __func__); vram_kv_cache += MEM_REQ_KV_SELF().at(model.type) / 2; } } if (n_gpu_layers > (int) hparams.n_layer + 2) { if (low_vram) { - LLAMA_LOG_WARN(model, "%s: cannot offload k cache to GPU due to low VRAM option", __func__); + LLAMA_LOG_WARN("%s: cannot offload k cache to GPU due to low VRAM option", __func__); } else { - LLAMA_LOG_INFO(model, "%s: offloading k cache to GPU", __func__); + LLAMA_LOG_INFO("%s: offloading k cache to GPU", __func__); vram_kv_cache += MEM_REQ_KV_SELF().at(model.type) / 2; } } @@ -1265,9 +1266,9 @@ static void llama_model_load_internal( const int max_offloadable_layers = hparams.n_layer + 1; #endif // GGML_USE_CUBLAS - LLAMA_LOG_INFO(model, "%s: offloaded %d/%d layers to GPU", + LLAMA_LOG_INFO("%s: offloaded %d/%d layers to GPU", __func__, std::min(n_gpu_layers, max_offloadable_layers), max_backend_supported_layers); - LLAMA_LOG_INFO(model, "%s: total VRAM used: %zu MB", + LLAMA_LOG_INFO("%s: total VRAM used: %zu MB", __func__, (vram_weights + vram_scratch + vram_kv_cache + MB - 1) / MB); // round up #else (void) n_gpu_layers; @@ -1322,7 +1323,7 @@ static bool llama_model_load( use_mmap, use_mlock, vocab_only, progress_callback, progress_callback_user_data); return true; } catch (const std::exception & err) { - LLAMA_LOG_ERROR(model, "error loading model: %s", err.what()); + LLAMA_LOG_ERROR("error loading model: %s", err.what()); return false; } } @@ -2713,16 +2714,13 @@ struct llama_model * llama_load_model_from_file( llama_model * model = new llama_model; - model->log_callback = params.log_callback ? params.log_callback : llama_log_callback_default; - model->log_callback_user_data = params.log_callback_user_data; - ggml_type memory_type = params.f16_kv ? GGML_TYPE_F16 : GGML_TYPE_F32; if (!llama_model_load(path_model, *model, model->vocab, params.n_ctx, params.n_batch, params.n_gpu_layers, params.main_gpu, params.tensor_split, params.rope_freq_base, params.rope_freq_scale,params.low_vram, memory_type, params.use_mmap, params.use_mlock, params.vocab_only, params.progress_callback, params.progress_callback_user_data)) { - LLAMA_LOG_ERROR(*model, "%s: failed to load model", __func__); + LLAMA_LOG_ERROR("%s: failed to load model", __func__); delete model; return nullptr; } @@ -2773,14 +2771,14 @@ struct llama_context * llama_new_context_with_model( // reserve memory for context buffers if (!params.vocab_only) { if (!kv_cache_init(ctx->model.hparams, ctx->kv_self, memory_type, ctx->model.hparams.n_ctx, params.n_gpu_layers)) { - LLAMA_LOG_ERROR(*model, "%s: kv_cache_init() failed for self-attention cache", __func__); + LLAMA_LOG_ERROR("%s: kv_cache_init() failed for self-attention cache", __func__); llama_free(ctx); return nullptr; } { const size_t memory_size = ggml_nbytes(ctx->kv_self.k) + ggml_nbytes(ctx->kv_self.v); - LLAMA_LOG_INFO(*model, "%s: kv self size = %7.2f MB", __func__, memory_size / 1024.0 / 1024.0); + LLAMA_LOG_INFO("%s: kv self size = %7.2f MB", __func__, memory_size / 1024.0 / 1024.0); } const auto & hparams = ctx->model.hparams; @@ -2824,7 +2822,7 @@ struct llama_context * llama_new_context_with_model( #define LLAMA_METAL_CHECK_BUF(result) \ if (!(result)) { \ - LLAMA_LOG_ERROR(*model, "%s: failed to add buffer", __func__); \ + LLAMA_LOG_ERROR("%s: failed to add buffer", __func__); \ llama_free(ctx); \ return NULL; \ } @@ -2889,13 +2887,13 @@ int llama_model_quantize( } int llama_apply_lora_from_file_internal(const struct llama_model & model, const char * path_lora, const char * path_base_model, int n_threads) { - LLAMA_LOG_INFO(model, "%s: applying lora adapter from '%s' - please wait ...", __func__, path_lora); + LLAMA_LOG_INFO("%s: applying lora adapter from '%s' - please wait ...", __func__, path_lora); const int64_t t_start_lora_us = ggml_time_us(); auto fin = std::ifstream(path_lora, std::ios::binary); if (!fin) { - LLAMA_LOG_ERROR(model, "%s: failed to open '%s'", __func__, path_lora); + LLAMA_LOG_ERROR("%s: failed to open '%s'", __func__, path_lora); return 1; } @@ -2904,14 +2902,14 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const uint32_t magic; fin.read((char *) &magic, sizeof(magic)); if (magic != LLAMA_FILE_MAGIC_GGLA) { - LLAMA_LOG_ERROR(model, "%s: bad file magic", __func__); + LLAMA_LOG_ERROR("%s: bad file magic", __func__); return 1; } uint32_t format_version; fin.read((char *) &format_version, sizeof(format_version)); if (format_version != 1) { - LLAMA_LOG_ERROR(model, "%s: unsupported file version", __func__ ); + LLAMA_LOG_ERROR("%s: unsupported file version", __func__ ); return 1; } } @@ -2922,7 +2920,7 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const fin.read((char *) &lora_alpha, sizeof(lora_alpha)); float scaling = (float)lora_alpha / (float)lora_r; - LLAMA_LOG_INFO(model, "%s: r = %d, alpha = %d, scaling = %.2f", __func__, lora_r, lora_alpha, scaling); + LLAMA_LOG_INFO("%s: r = %d, alpha = %d, scaling = %.2f", __func__, lora_r, lora_alpha, scaling); // create a temporary ggml context to store the lora tensors @@ -2948,7 +2946,7 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const ggml_context * base_ctx = NULL; llama_buffer base_buf; if (path_base_model) { - LLAMA_LOG_INFO(model, "%s: loading base model from '%s'", __func__, path_base_model); + LLAMA_LOG_INFO("%s: loading base model from '%s'", __func__, path_base_model); model_loader.reset(new llama_model_loader(path_base_model, /*use_mmap*/ true, &model)); size_t ctx_size; @@ -3005,17 +3003,17 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const const std::string lora_suffix = ".lora"; size_t pos = name.rfind(lora_suffix); if (pos == std::string::npos) { - LLAMA_LOG_ERROR(model, "%s: error: '%s' is not a lora tensor", __func__, name.c_str()); + LLAMA_LOG_ERROR("%s: error: '%s' is not a lora tensor", __func__, name.c_str()); return 1; } std::string lora_type = name.substr(pos + lora_suffix.length()); std::string base_name = name; base_name.erase(pos); - // LLAMA_LOG_INFO(model, "%s: %s => %s (lora type %s) ", __func__, name.c_str(),base_name.c_str(), lora_type.c_str()); + // LLAMA_LOG_INFO("%s: %s => %s (lora type %s) ", __func__, name.c_str(),base_name.c_str(), lora_type.c_str()); if (model_tensors.find(base_name) == model_tensors.end()) { - LLAMA_LOG_ERROR(model, "%s: unknown tensor '%s' in lora adapter", __func__, name.data()); + LLAMA_LOG_ERROR("%s: unknown tensor '%s' in lora adapter", __func__, name.data()); return 1; } @@ -3026,7 +3024,7 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const case 1: wtype = GGML_TYPE_F16; break; default: { - LLAMA_LOG_ERROR(model, "%s: invalid tensor data type '%d'", + LLAMA_LOG_ERROR("%s: invalid tensor data type '%d'", __func__, ftype); return false; } @@ -3036,7 +3034,7 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const lora_tensor = ggml_new_tensor_2d(lora_ctx, wtype, ne[0], ne[1]); } else { - LLAMA_LOG_ERROR(model, "%s: unsupported tensor dimension %d", __func__, n_dims); + LLAMA_LOG_ERROR("%s: unsupported tensor dimension %d", __func__, n_dims); return 1; } ggml_set_name(lora_tensor, "lora_tensor"); @@ -3074,7 +3072,7 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const if (model_loader) { // load from base model if (model_loader->tensors_map.name_to_idx.find(base_name) == model_loader->tensors_map.name_to_idx.end()) { - LLAMA_LOG_ERROR(model, "%s: error: tensor '%s' not found in base model", __func__, base_name.c_str()); + LLAMA_LOG_ERROR("%s: error: tensor '%s' not found in base model", __func__, base_name.c_str()); return 1; } size_t idx = model_loader->tensors_map.name_to_idx[base_name]; @@ -3090,8 +3088,8 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const if (ggml_is_quantized(base_t->type)) { if (!warned) { - LLAMA_LOG_WARN(model, "%s: warning: using a lora adapter with a quantized model may result in poor quality, " - "use a f16 or f32 base model with --lora-base", __func__); + LLAMA_LOG_WARN("%s: warning: using a lora adapter with a quantized model may result in poor quality, " + "use a f16 or f32 base model with --lora-base", __func__); warned = true; } } @@ -3105,8 +3103,8 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const ggml_set_name(loraB, "loraB"); if (base_t->ne[0] != loraA->ne[1] || base_t->ne[1] != loraB->ne[1]) { - LLAMA_LOG_ERROR(model, "%s: incompatible tensor dimensions (%" PRId64 " and %" PRId64 ");" - " are you sure that this adapter is for this model?", __func__, base_t->ne[0], loraA->ne[1]); + LLAMA_LOG_ERROR("%s: incompatible tensor dimensions (%" PRId64 " and %" PRId64 ");" + " are you sure that this adapter is for this model?", __func__, base_t->ne[0], loraA->ne[1]); return 1; } @@ -3163,7 +3161,7 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const } const int64_t t_lora_us = ggml_time_us() - t_start_lora_us; - LLAMA_LOG_INFO(model, " done (%.2f ms)", t_lora_us / 1000.0); + LLAMA_LOG_INFO(" done (%.2f ms)", t_lora_us / 1000.0); return 0; } @@ -3172,7 +3170,7 @@ int llama_apply_lora_from_file(struct llama_context * ctx, const char * path_lor try { return llama_apply_lora_from_file_internal(ctx->model, path_lora, path_base_model, n_threads); } catch (const std::exception & err) { - LLAMA_LOG_ERROR(ctx->model, "%s: failed to apply lora adapter: %s", __func__, err.what()); + LLAMA_LOG_ERROR("%s: failed to apply lora adapter: %s", __func__, err.what()); return 1; } } @@ -3181,7 +3179,7 @@ int llama_model_apply_lora_from_file(const struct llama_model * model, const cha try { return llama_apply_lora_from_file_internal(*model, path_lora, path_base_model, n_threads); } catch (const std::exception & err) { - LLAMA_LOG_ERROR(*model, "%s: failed to apply lora adapter: %s", __func__, err.what()); + LLAMA_LOG_ERROR("%s: failed to apply lora adapter: %s", __func__, err.what()); return 1; } } @@ -3444,7 +3442,7 @@ static bool llama_load_session_file_internal(struct llama_context * ctx, const c const uint32_t version = file.read_u32(); if (magic != LLAMA_SESSION_MAGIC || version != LLAMA_SESSION_VERSION) { - LLAMA_LOG_ERROR(ctx->model, "%s : unknown (magic, version) for session file: %08x, %08x", __func__, magic, version); + LLAMA_LOG_ERROR("%s : unknown (magic, version) for session file: %08x, %08x", __func__, magic, version); return false; } @@ -3452,7 +3450,7 @@ static bool llama_load_session_file_internal(struct llama_context * ctx, const c file.read_raw(&session_hparams, sizeof(llama_hparams)); if (session_hparams != ctx->model.hparams) { - LLAMA_LOG_INFO(ctx->model, "%s : model hparams didn't match from session file!", __func__); + LLAMA_LOG_INFO("%s : model hparams didn't match from session file!", __func__); return false; } } @@ -3462,7 +3460,7 @@ static bool llama_load_session_file_internal(struct llama_context * ctx, const c const uint32_t n_token_count = file.read_u32(); if (n_token_count > n_token_capacity) { - LLAMA_LOG_ERROR(ctx->model, "%s : token count in session file exceeded capacity! %u > %zu", __func__, n_token_count, n_token_capacity); + LLAMA_LOG_ERROR("%s : token count in session file exceeded capacity! %u > %zu", __func__, n_token_count, n_token_capacity); return false; } @@ -3476,7 +3474,7 @@ static bool llama_load_session_file_internal(struct llama_context * ctx, const c const size_t n_state_size_max = llama_get_state_size(ctx); if (n_state_size_cur > n_state_size_max) { - LLAMA_LOG_ERROR(ctx->model, "%s : the state size in session file is too big! max %zu, got %zu", __func__, n_state_size_max, n_state_size_cur); + LLAMA_LOG_ERROR("%s : the state size in session file is too big! max %zu, got %zu", __func__, n_state_size_max, n_state_size_cur); return false; } @@ -3493,7 +3491,7 @@ bool llama_load_session_file(struct llama_context * ctx, const char * path_sessi try { return llama_load_session_file_internal(ctx, path_session, tokens_out, n_token_capacity, n_token_count_out); } catch (const std::exception & err) { - LLAMA_LOG_ERROR(ctx->model, "error loading session file: %s", err.what()); + LLAMA_LOG_ERROR("error loading session file: %s", err.what()); return false; } } @@ -3530,7 +3528,7 @@ int llama_eval( int n_past, int n_threads) { if (!llama_eval_internal(*ctx, tokens, nullptr, n_tokens, n_past, n_threads, nullptr)) { - LLAMA_LOG_ERROR(ctx->model, "%s: failed to eval", __func__); + LLAMA_LOG_ERROR("%s: failed to eval", __func__); return 1; } @@ -3552,7 +3550,7 @@ int llama_eval_embd( int n_past, int n_threads) { if (!llama_eval_internal(*ctx, nullptr, embd, n_tokens, n_past, n_threads, nullptr)) { - LLAMA_LOG_ERROR(ctx->model, "%s: failed to eval", __func__); + LLAMA_LOG_ERROR("%s: failed to eval", __func__); return 1; } @@ -3573,7 +3571,7 @@ int llama_eval_export(struct llama_context * ctx, const char * fname) { const std::vector tmp(n_batch, llama_token_bos()); if (!llama_eval_internal(*ctx, tmp.data(), nullptr, tmp.size(), n_ctx, 1, fname)) { - LLAMA_LOG_ERROR(ctx->model, "%s: failed to eval", __func__); + LLAMA_LOG_ERROR("%s: failed to eval", __func__); return 1; } @@ -3589,7 +3587,7 @@ int llama_tokenize_with_model( auto res = llama_tokenize(model->vocab, text, add_bos); if (n_max_tokens < (int) res.size()) { - LLAMA_LOG_ERROR(*model, "%s: too many tokens", __func__); + LLAMA_LOG_ERROR("%s: too many tokens", __func__); return -((int) res.size()); } @@ -3706,15 +3704,15 @@ struct llama_timings llama_get_timings(struct llama_context * ctx) { void llama_print_timings(struct llama_context * ctx) { const llama_timings timings = llama_get_timings(ctx); - LLAMA_LOG_INFO(ctx->model, ""); - LLAMA_LOG_INFO(ctx->model, "%s: load time = %8.2f ms", __func__, timings.t_load_ms); - LLAMA_LOG_INFO(ctx->model, "%s: sample time = %8.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)", + LLAMA_LOG_INFO(""); + LLAMA_LOG_INFO("%s: load time = %8.2f ms", __func__, timings.t_load_ms); + LLAMA_LOG_INFO("%s: sample time = %8.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)", __func__, timings.t_sample_ms, timings.n_sample, timings.t_sample_ms / timings.n_sample, 1e3 / timings.t_sample_ms * timings.n_sample); - LLAMA_LOG_INFO(ctx->model, "%s: prompt eval time = %8.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)", + LLAMA_LOG_INFO("%s: prompt eval time = %8.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)", __func__, timings.t_p_eval_ms, timings.n_p_eval, timings.t_p_eval_ms / timings.n_p_eval, 1e3 / timings.t_p_eval_ms * timings.n_p_eval); - LLAMA_LOG_INFO(ctx->model, "%s: eval time = %8.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)", + LLAMA_LOG_INFO("%s: eval time = %8.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)", __func__, timings.t_eval_ms, timings.n_eval, timings.t_eval_ms / timings.n_eval, 1e3 / timings.t_eval_ms * timings.n_eval); - LLAMA_LOG_INFO(ctx->model, "%s: total time = %8.2f ms", __func__, (timings.t_end_ms - timings.t_start_ms)); + LLAMA_LOG_INFO("%s: total time = %8.2f ms", __func__, (timings.t_end_ms - timings.t_start_ms)); } void llama_reset_timings(struct llama_context * ctx) { @@ -3751,31 +3749,37 @@ const std::vector>& llama_internal_ return ctx->model.tensors_by_name; } + +void llama_log_set(llama_log_callback log_callback, void * user_data) { + g_state.log_callback = log_callback ? log_callback : llama_log_callback_default; + g_state.log_callback_user_data = user_data; +} + #if defined(_MSC_VER) && !defined(vsnprintf) #define vsnprintf _vsnprintf #endif -void llama_log_internal_v(llama_log_callback log_callback, void * log_callback_user_data, int level, const char * format, va_list args) { +void llama_log_internal_v(int level, const char * format, va_list args) { va_list args_copy; va_copy(args_copy, args); char buffer[128]; int len = vsnprintf(buffer, 128, format, args); if (len < 128) { - log_callback(level, buffer, log_callback_user_data); + g_state.log_callback(level, buffer, g_state.log_callback_user_data); } else { char* buffer2 = new char[len+1]; vsnprintf(buffer2, len+1, format, args_copy); buffer2[len] = 0; - log_callback(level, buffer2, log_callback_user_data); + g_state.log_callback(level, buffer2, g_state.log_callback_user_data); delete[] buffer2; } va_end(args_copy); } -void llama_log_internal(llama_log_callback log_callback, void * log_callback_user_data, int level, const char * format, ...) { +void llama_log_internal(int level, const char * format, ...) { va_list args; va_start(args, format); - llama_log_internal_v(log_callback, log_callback_user_data, level, format, args); + llama_log_internal_v(level, format, args); va_end(args); } diff --git a/llama.h b/llama.h index 2274161b3..2dd7c84df 100644 --- a/llama.h +++ b/llama.h @@ -87,7 +87,7 @@ extern "C" { #define LLAMA_LOG_LEVEL_WARN 3 #define LLAMA_LOG_LEVEL_INFO 4 - struct llama_context_params { + struct llama_context_params { uint32_t seed; // RNG seed, -1 for random int32_t n_ctx; // text context int32_t n_batch; // prompt processing batch size @@ -105,12 +105,6 @@ extern "C" { // context pointer passed to the progress callback void * progress_callback_user_data; - // Called for every error, warning and information. - // If this is NULL, everything is output on stderr. - llama_log_callback log_callback; - // context pointer passed to the log callback - void * log_callback_user_data; - // Keep the booleans together to avoid misalignment during copy-by-value. bool low_vram; // if true, reduce VRAM usage at the cost of performance bool f16_kv; // use fp16 for KV cache @@ -165,6 +159,10 @@ extern "C" { int32_t n_eval; }; + // Set callback for logging events. If this is not called, or NULL is supplied, + // everything is output on stderr. + LLAMA_API void llama_log_set(llama_log_callback log_callback, void * user_data); + LLAMA_API int llama_max_devices(); LLAMA_API struct llama_context_params llama_context_default_params();