Add back all the new lines in the logging strings

This commit is contained in:
grahameth 2023-08-02 00:50:31 +02:00
parent e39e45493c
commit c857a33b19

128
llama.cpp
View file

@ -519,7 +519,7 @@ struct llama_file_loader {
llama_file_loader(const char * fname, llama_load_tensors_map & tensors_map) llama_file_loader(const char * fname, llama_load_tensors_map & tensors_map)
: file(fname, "rb") { : file(fname, "rb") {
LLAMA_LOG_INFO("llama.cpp: loading model from %s", fname); LLAMA_LOG_INFO("llama.cpp: loading model from %s\n", fname);
read_magic(); read_magic();
read_hparams(); read_hparams();
read_vocab(); read_vocab();
@ -634,7 +634,7 @@ struct llama_file_saver {
llama_file_loader * any_file_loader; llama_file_loader * any_file_loader;
llama_file_saver(const char * fname, llama_file_loader * any_file_loader, enum llama_ftype new_ftype) llama_file_saver(const char * fname, llama_file_loader * any_file_loader, enum llama_ftype new_ftype)
: file(fname, "wb"), any_file_loader(any_file_loader) { : file(fname, "wb"), any_file_loader(any_file_loader) {
LLAMA_LOG_INFO("llama.cpp: saving model to %s", fname); LLAMA_LOG_INFO("llama.cpp: saving model to %s\n", fname);
write_magic(); write_magic();
write_hparams(new_ftype); write_hparams(new_ftype);
write_vocab(); write_vocab();
@ -655,7 +655,7 @@ struct llama_file_saver {
} }
void write_vocab() { void write_vocab() {
if (any_file_loader->file_version == LLAMA_FILE_VERSION_GGML) { if (any_file_loader->file_version == LLAMA_FILE_VERSION_GGML) {
LLAMA_LOG_WARN("llama.cpp: WARNING: input is an old file that doesn't have scores; will add dummy scores"); LLAMA_LOG_WARN("llama.cpp: WARNING: input is an old file that doesn't have scores; will add dummy scores\n");
} }
uint32_t n_vocab = any_file_loader->hparams.n_vocab; uint32_t n_vocab = any_file_loader->hparams.n_vocab;
for (uint32_t i = 0; i < n_vocab; i++) { for (uint32_t i = 0; i < n_vocab; i++) {
@ -846,7 +846,7 @@ struct llama_model_loader {
uint8_t byte = lt.data[i]; uint8_t byte = lt.data[i];
sum = byte + (sum << 6) + (sum << 16) - sum; // sdbm hash sum = byte + (sum << 6) + (sum << 16) - sum; // sdbm hash
} }
LLAMA_LOG_INFO("%s checksum: %#08x (%s, size %zu)", lt.name.c_str(), sum, LLAMA_LOG_INFO("%s checksum: %#08x (%s, size %zu)\n", lt.name.c_str(), sum,
llama_format_tensor_shape(lt.ne).c_str(), lt.size); llama_format_tensor_shape(lt.ne).c_str(), lt.size);
} }
@ -879,7 +879,7 @@ static bool kv_cache_init(
cache.ctx = ggml_init(params); cache.ctx = ggml_init(params);
if (!cache.ctx) { if (!cache.ctx) {
LLAMA_LOG_ERROR("%s: failed to allocate memory for kv cache", __func__); LLAMA_LOG_ERROR("%s: failed to allocate memory for kv cache\n", __func__);
return false; return false;
} }
@ -1091,7 +1091,7 @@ static void llama_model_load_internal(
LLAMA_ASSERT(hparams.n_head % n_gqa == 0); LLAMA_ASSERT(hparams.n_head % n_gqa == 0);
hparams.n_head_kv = hparams.n_head / n_gqa; hparams.n_head_kv = hparams.n_head / n_gqa;
if (model.type == e_model::MODEL_65B && n_gqa == 8) { if (model.type == e_model::MODEL_65B && n_gqa == 8) {
LLAMA_LOG_WARN("%s: warning: assuming 70B model based on GQA == %d", __func__, n_gqa); LLAMA_LOG_WARN("%s: warning: assuming 70B model based on GQA == %d\n", __func__, n_gqa);
model.type = e_model::MODEL_70B; model.type = e_model::MODEL_70B;
hparams.f_ffn_mult = 1.3f; // from the params.json of the 70B model hparams.f_ffn_mult = 1.3f; // from the params.json of the 70B model
} }
@ -1150,7 +1150,7 @@ static void llama_model_load_internal(
size_t ctx_size; size_t ctx_size;
size_t mmapped_size; size_t mmapped_size;
ml->calc_sizes(&ctx_size, &mmapped_size); ml->calc_sizes(&ctx_size, &mmapped_size);
LLAMA_LOG_INFO("%s: ggml ctx size = %7.2f MB", __func__, ctx_size/1024.0/1024.0); LLAMA_LOG_INFO("%s: ggml ctx size = %7.2f MB\n", __func__, ctx_size/1024.0/1024.0);
// create the ggml context // create the ggml context
{ {
@ -1175,13 +1175,13 @@ static void llama_model_load_internal(
(void) main_gpu; (void) main_gpu;
(void) mul_mat_q; (void) mul_mat_q;
#if defined(GGML_USE_CUBLAS) #if defined(GGML_USE_CUBLAS)
LLAMA_LOG_INFO("%s: using CUDA for GPU acceleration", __func__); LLAMA_LOG_INFO("%s: using CUDA for GPU acceleration\n", __func__);
ggml_cuda_set_main_device(main_gpu); ggml_cuda_set_main_device(main_gpu);
ggml_cuda_set_mul_mat_q(mul_mat_q); ggml_cuda_set_mul_mat_q(mul_mat_q);
#define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_GPU #define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_GPU
#define LLAMA_BACKEND_OFFLOAD_SPLIT GGML_BACKEND_GPU_SPLIT #define LLAMA_BACKEND_OFFLOAD_SPLIT GGML_BACKEND_GPU_SPLIT
#elif defined(GGML_USE_CLBLAST) #elif defined(GGML_USE_CLBLAST)
LLAMA_LOG_INFO("%s: using OpenCL for GPU acceleration", __func__); LLAMA_LOG_INFO("%s: using OpenCL for GPU acceleration\n", __func__);
#define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_GPU #define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_GPU
#define LLAMA_BACKEND_OFFLOAD_SPLIT GGML_BACKEND_GPU #define LLAMA_BACKEND_OFFLOAD_SPLIT GGML_BACKEND_GPU
#else #else
@ -1286,14 +1286,14 @@ static void llama_model_load_internal(
const size_t mem_required_state = const size_t mem_required_state =
scale*hparams.kv_size(); scale*hparams.kv_size();
LLAMA_LOG_INFO("%s: mem required = %7.2f MB (+ %7.2f MB per state)", __func__, LLAMA_LOG_INFO("%s: mem required = %7.2f MB (+ %7.2f MB per state)\n", __func__,
mem_required / 1024.0 / 1024.0, mem_required_state / 1024.0 / 1024.0); mem_required / 1024.0 / 1024.0, mem_required_state / 1024.0 / 1024.0);
(void) vram_scratch; (void) vram_scratch;
(void) n_batch; (void) n_batch;
#ifdef GGML_USE_CUBLAS #ifdef GGML_USE_CUBLAS
if (low_vram) { if (low_vram) {
LLAMA_LOG_INFO("%s: not allocating a VRAM scratch buffer due to low VRAM option", __func__); LLAMA_LOG_INFO("%s: not allocating a VRAM scratch buffer due to low VRAM option\n", __func__);
ggml_cuda_set_scratch_size(0); // disable scratch ggml_cuda_set_scratch_size(0); // disable scratch
} else { } else {
const size_t vram_scratch_base = VRAM_REQ_SCRATCH_BASE().at(model.type); const size_t vram_scratch_base = VRAM_REQ_SCRATCH_BASE().at(model.type);
@ -1301,7 +1301,7 @@ static void llama_model_load_internal(
vram_scratch = n_batch * (vram_scratch_base + n_ctx * vram_scratch_per_context); vram_scratch = n_batch * (vram_scratch_base + n_ctx * vram_scratch_per_context);
ggml_cuda_set_scratch_size(vram_scratch); ggml_cuda_set_scratch_size(vram_scratch);
if (n_gpu_layers > 0) { if (n_gpu_layers > 0) {
LLAMA_LOG_INFO("%s: allocating batch_size x (%zd kB + n_ctx x %zd B) = %zd MB VRAM for the scratch buffer", LLAMA_LOG_INFO("%s: allocating batch_size x (%zd kB + n_ctx x %zd B) = %zd MB VRAM for the scratch buffer\n",
__func__, vram_scratch_base / kB, vram_scratch_per_context, __func__, vram_scratch_base / kB, vram_scratch_per_context,
(vram_scratch + MB - 1) / MB); // round up (vram_scratch + MB - 1) / MB); // round up
} }
@ -1311,9 +1311,9 @@ static void llama_model_load_internal(
#if defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST) #if defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST)
const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer)); const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer));
LLAMA_LOG_INFO("%s: offloading %d repeating layers to GPU", __func__, n_gpu); LLAMA_LOG_INFO("%s: offloading %d repeating layers to GPU\n", __func__, n_gpu);
if (n_gpu_layers > (int) hparams.n_layer) { if (n_gpu_layers > (int) hparams.n_layer) {
LLAMA_LOG_INFO("%s: offloading non-repeating layers to GPU", __func__); LLAMA_LOG_INFO("%s: offloading non-repeating layers to GPU\n", __func__);
} }
size_t vram_kv_cache = 0; size_t vram_kv_cache = 0;
@ -1322,17 +1322,17 @@ static void llama_model_load_internal(
const int max_offloadable_layers = low_vram ? hparams.n_layer + 1 : hparams.n_layer + 3; const int max_offloadable_layers = low_vram ? hparams.n_layer + 1 : hparams.n_layer + 3;
if (n_gpu_layers > (int) hparams.n_layer + 1) { if (n_gpu_layers > (int) hparams.n_layer + 1) {
if (low_vram) { if (low_vram) {
LLAMA_LOG_INFO("%s: cannot offload v cache to GPU due to low VRAM option", __func__); LLAMA_LOG_INFO("%s: cannot offload v cache to GPU due to low VRAM option\n", __func__);
} else { } else {
LLAMA_LOG_INFO("%s: offloading v cache to GPU", __func__); LLAMA_LOG_INFO("%s: offloading v cache to GPU\n", __func__);
vram_kv_cache += hparams.kv_size() / 2; vram_kv_cache += hparams.kv_size() / 2;
} }
} }
if (n_gpu_layers > (int) hparams.n_layer + 2) { if (n_gpu_layers > (int) hparams.n_layer + 2) {
if (low_vram) { if (low_vram) {
LLAMA_LOG_WARN("%s: cannot offload k cache to GPU due to low VRAM option", __func__); LLAMA_LOG_WARN("%s: cannot offload k cache to GPU due to low VRAM option\n", __func__);
} else { } else {
LLAMA_LOG_INFO("%s: offloading k cache to GPU", __func__); LLAMA_LOG_INFO("%s: offloading k cache to GPU\n", __func__);
vram_kv_cache += hparams.kv_size() / 2; vram_kv_cache += hparams.kv_size() / 2;
} }
} }
@ -1341,9 +1341,9 @@ static void llama_model_load_internal(
const int max_offloadable_layers = hparams.n_layer + 1; const int max_offloadable_layers = hparams.n_layer + 1;
#endif // GGML_USE_CUBLAS #endif // GGML_USE_CUBLAS
LLAMA_LOG_INFO("%s: offloaded %d/%d layers to GPU", LLAMA_LOG_INFO("%s: offloaded %d/%d layers to GPU\n",
__func__, std::min(n_gpu_layers, max_offloadable_layers), max_backend_supported_layers); __func__, std::min(n_gpu_layers, max_offloadable_layers), max_backend_supported_layers);
LLAMA_LOG_INFO("%s: total VRAM used: %zu MB", LLAMA_LOG_INFO("%s: total VRAM used: %zu MB\n",
__func__, (vram_weights + vram_scratch + vram_kv_cache + MB - 1) / MB); // round up __func__, (vram_weights + vram_scratch + vram_kv_cache + MB - 1) / MB); // round up
#else #else
(void) n_gpu_layers; (void) n_gpu_layers;
@ -1402,7 +1402,7 @@ static bool llama_model_load(
use_mmap, use_mlock, vocab_only, progress_callback, progress_callback_user_data); use_mmap, use_mlock, vocab_only, progress_callback, progress_callback_user_data);
return true; return true;
} catch (const std::exception & err) { } catch (const std::exception & err) {
LLAMA_LOG_ERROR("error loading model: %s", err.what()); LLAMA_LOG_ERROR("error loading model: %s\n", err.what());
return false; return false;
} }
} }
@ -3079,7 +3079,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
int nx = tensor.ne.at(0); int nx = tensor.ne.at(0);
int ny = tensor.ne.at(1); int ny = tensor.ne.at(1);
if (nx % QK_K != 0 || ny % QK_K != 0) { if (nx % QK_K != 0 || ny % QK_K != 0) {
LLAMA_LOG_INFO("\n\nTensor sizes %d x %d are not divisible by %d, required for k-quants.",nx,ny,QK_K); LLAMA_LOG_INFO("\n\nTensor sizes %d x %d are not divisible by %d, required for k-quants.\n",nx,ny,QK_K);
convert_incompatible_tensor = true; convert_incompatible_tensor = true;
} }
} }
@ -3216,7 +3216,7 @@ struct llama_model * llama_load_model_from_file(
params.main_gpu, params.tensor_split, params.mul_mat_q, params.rope_freq_base, params.rope_freq_scale,params.low_vram, params.main_gpu, params.tensor_split, params.mul_mat_q, params.rope_freq_base, params.rope_freq_scale,params.low_vram,
memory_type, params.use_mmap, params.use_mlock, params.vocab_only, params.progress_callback, memory_type, params.use_mmap, params.use_mlock, params.vocab_only, params.progress_callback,
params.progress_callback_user_data)) { params.progress_callback_user_data)) {
LLAMA_LOG_ERROR("%s: failed to load model", __func__); LLAMA_LOG_ERROR("%s: failed to load model\n", __func__);
delete model; delete model;
return nullptr; return nullptr;
} }
@ -3267,14 +3267,14 @@ struct llama_context * llama_new_context_with_model(
// reserve memory for context buffers // reserve memory for context buffers
if (!params.vocab_only) { if (!params.vocab_only) {
if (!kv_cache_init(ctx->model.hparams, ctx->kv_self, memory_type, ctx->model.hparams.n_ctx, params.n_gpu_layers)) { if (!kv_cache_init(ctx->model.hparams, ctx->kv_self, memory_type, ctx->model.hparams.n_ctx, params.n_gpu_layers)) {
LLAMA_LOG_ERROR("%s: kv_cache_init() failed for self-attention cache", __func__); LLAMA_LOG_ERROR("%s: kv_cache_init() failed for self-attention cache\n", __func__);
llama_free(ctx); llama_free(ctx);
return nullptr; return nullptr;
} }
{ {
const size_t memory_size = ggml_nbytes(ctx->kv_self.k) + ggml_nbytes(ctx->kv_self.v); const size_t memory_size = ggml_nbytes(ctx->kv_self.k) + ggml_nbytes(ctx->kv_self.v);
LLAMA_LOG_INFO("%s: kv self size = %7.2f MB", __func__, memory_size / 1024.0 / 1024.0); LLAMA_LOG_INFO("%s: kv self size = %7.2f MB\n", __func__, memory_size / 1024.0 / 1024.0);
} }
const auto & hparams = ctx->model.hparams; const auto & hparams = ctx->model.hparams;
@ -3353,11 +3353,11 @@ struct llama_context * llama_new_context_with_model(
LLAMA_LOG_INFO("%s: max tensor size = %8.2f MB\n", __func__, max_size/1024.0/1024.0); LLAMA_LOG_INFO("%s: max tensor size = %8.2f MB\n", __func__, max_size/1024.0/1024.0);
#define LLAMA_METAL_CHECK_BUF(result) \ #define LLAMA_METAL_CHECK_BUF(result) \
if (!(result)) { \ if (!(result)) { \
LLAMA_LOG_ERROR("%s: failed to add buffer", __func__); \ LLAMA_LOG_ERROR("%s: failed to add buffer\n", __func__); \
llama_free(ctx); \ llama_free(ctx); \
return NULL; \ return NULL; \
} }
LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "data", data_ptr, data_size, max_size)); LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "data", data_ptr, data_size, max_size));
@ -3411,19 +3411,19 @@ int llama_model_quantize(
llama_model_quantize_internal(fname_inp, fname_out, params); llama_model_quantize_internal(fname_inp, fname_out, params);
return 0; return 0;
} catch (const std::exception & err) { } catch (const std::exception & err) {
LLAMA_LOG_ERROR("%s: failed to quantize: %s", __func__, err.what()); LLAMA_LOG_ERROR("%s: failed to quantize: %s\n", __func__, err.what());
return 1; return 1;
} }
} }
int llama_apply_lora_from_file_internal(const struct llama_model & model, const char * path_lora, const char * path_base_model, int n_threads) { int llama_apply_lora_from_file_internal(const struct llama_model & model, const char * path_lora, const char * path_base_model, int n_threads) {
LLAMA_LOG_INFO("%s: applying lora adapter from '%s' - please wait ...", __func__, path_lora); LLAMA_LOG_INFO("%s: applying lora adapter from '%s' - please wait ...\n", __func__, path_lora);
const int64_t t_start_lora_us = ggml_time_us(); const int64_t t_start_lora_us = ggml_time_us();
auto fin = std::ifstream(path_lora, std::ios::binary); auto fin = std::ifstream(path_lora, std::ios::binary);
if (!fin) { if (!fin) {
LLAMA_LOG_ERROR("%s: failed to open '%s'", __func__, path_lora); LLAMA_LOG_ERROR("%s: failed to open '%s'\n", __func__, path_lora);
return 1; return 1;
} }
@ -3432,14 +3432,14 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const
uint32_t magic; uint32_t magic;
fin.read((char *) &magic, sizeof(magic)); fin.read((char *) &magic, sizeof(magic));
if (magic != LLAMA_FILE_MAGIC_GGLA) { if (magic != LLAMA_FILE_MAGIC_GGLA) {
LLAMA_LOG_ERROR("%s: bad file magic", __func__); LLAMA_LOG_ERROR("%s: bad file magic\n", __func__);
return 1; return 1;
} }
uint32_t format_version; uint32_t format_version;
fin.read((char *) &format_version, sizeof(format_version)); fin.read((char *) &format_version, sizeof(format_version));
if (format_version != 1) { if (format_version != 1) {
LLAMA_LOG_ERROR("%s: unsupported file version", __func__ ); LLAMA_LOG_ERROR("%s: unsupported file version\n", __func__ );
return 1; return 1;
} }
} }
@ -3450,7 +3450,7 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const
fin.read((char *) &lora_alpha, sizeof(lora_alpha)); fin.read((char *) &lora_alpha, sizeof(lora_alpha));
float scaling = (float)lora_alpha / (float)lora_r; float scaling = (float)lora_alpha / (float)lora_r;
LLAMA_LOG_INFO("%s: r = %d, alpha = %d, scaling = %.2f", __func__, lora_r, lora_alpha, scaling); LLAMA_LOG_INFO("%s: r = %d, alpha = %d, scaling = %.2f\n", __func__, lora_r, lora_alpha, scaling);
// create a temporary ggml context to store the lora tensors // create a temporary ggml context to store the lora tensors
@ -3476,7 +3476,7 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const
ggml_context * base_ctx = NULL; ggml_context * base_ctx = NULL;
llama_buffer base_buf; llama_buffer base_buf;
if (path_base_model) { if (path_base_model) {
LLAMA_LOG_INFO("%s: loading base model from '%s'", __func__, path_base_model); LLAMA_LOG_INFO("%s: loading base model from '%s'\n", __func__, path_base_model);
model_loader.reset(new llama_model_loader(path_base_model, /*use_mmap*/ true)); model_loader.reset(new llama_model_loader(path_base_model, /*use_mmap*/ true));
size_t ctx_size; size_t ctx_size;
@ -3533,17 +3533,17 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const
const std::string lora_suffix = ".lora"; const std::string lora_suffix = ".lora";
size_t pos = name.rfind(lora_suffix); size_t pos = name.rfind(lora_suffix);
if (pos == std::string::npos) { if (pos == std::string::npos) {
LLAMA_LOG_ERROR("%s: error: '%s' is not a lora tensor", __func__, name.c_str()); LLAMA_LOG_ERROR("%s: error: '%s' is not a lora tensor\n", __func__, name.c_str());
return 1; return 1;
} }
std::string lora_type = name.substr(pos + lora_suffix.length()); std::string lora_type = name.substr(pos + lora_suffix.length());
std::string base_name = name; std::string base_name = name;
base_name.erase(pos); base_name.erase(pos);
// LLAMA_LOG_INFO("%s: %s => %s (lora type %s) ", __func__, name.c_str(),base_name.c_str(), lora_type.c_str()); // LLAMA_LOG_INFO("%s: %s => %s (lora type %s) \n", __func__, name.c_str(),base_name.c_str(), lora_type.c_str());
if (model_tensors.find(base_name) == model_tensors.end()) { if (model_tensors.find(base_name) == model_tensors.end()) {
LLAMA_LOG_ERROR("%s: unknown tensor '%s' in lora adapter", __func__, name.data()); LLAMA_LOG_ERROR("%s: unknown tensor '%s' in lora adapter\n", __func__, name.data());
return 1; return 1;
} }
@ -3554,7 +3554,7 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const
case 1: wtype = GGML_TYPE_F16; break; case 1: wtype = GGML_TYPE_F16; break;
default: default:
{ {
LLAMA_LOG_ERROR("%s: invalid tensor data type '%d'", LLAMA_LOG_ERROR("%s: invalid tensor data type '%d'\n",
__func__, ftype); __func__, ftype);
return false; return false;
} }
@ -3564,7 +3564,7 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const
lora_tensor = ggml_new_tensor_2d(lora_ctx, wtype, ne[0], ne[1]); lora_tensor = ggml_new_tensor_2d(lora_ctx, wtype, ne[0], ne[1]);
} }
else { else {
LLAMA_LOG_ERROR("%s: unsupported tensor dimension %d", __func__, n_dims); LLAMA_LOG_ERROR("%s: unsupported tensor dimension %d\n", __func__, n_dims);
return 1; return 1;
} }
ggml_set_name(lora_tensor, "lora_tensor"); ggml_set_name(lora_tensor, "lora_tensor");
@ -3602,7 +3602,7 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const
if (model_loader) { if (model_loader) {
// load from base model // load from base model
if (model_loader->tensors_map.name_to_idx.find(base_name) == model_loader->tensors_map.name_to_idx.end()) { if (model_loader->tensors_map.name_to_idx.find(base_name) == model_loader->tensors_map.name_to_idx.end()) {
LLAMA_LOG_ERROR("%s: error: tensor '%s' not found in base model", __func__, base_name.c_str()); LLAMA_LOG_ERROR("%s: error: tensor '%s' not found in base model\n", __func__, base_name.c_str());
return 1; return 1;
} }
size_t idx = model_loader->tensors_map.name_to_idx[base_name]; size_t idx = model_loader->tensors_map.name_to_idx[base_name];
@ -3619,7 +3619,7 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const
if (ggml_is_quantized(base_t->type)) { if (ggml_is_quantized(base_t->type)) {
if (!warned) { if (!warned) {
LLAMA_LOG_WARN("%s: warning: using a lora adapter with a quantized model may result in poor quality, " LLAMA_LOG_WARN("%s: warning: using a lora adapter with a quantized model may result in poor quality, "
"use a f16 or f32 base model with --lora-base", __func__); "use a f16 or f32 base model with --lora-base\n", __func__);
warned = true; warned = true;
} }
} }
@ -3634,7 +3634,7 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const
if (base_t->ne[0] != loraA->ne[1] || base_t->ne[1] != loraB->ne[1]) { if (base_t->ne[0] != loraA->ne[1] || base_t->ne[1] != loraB->ne[1]) {
LLAMA_LOG_ERROR("%s: incompatible tensor dimensions (%" PRId64 " and %" PRId64 ");" LLAMA_LOG_ERROR("%s: incompatible tensor dimensions (%" PRId64 " and %" PRId64 ");"
" are you sure that this adapter is for this model?", __func__, base_t->ne[0], loraA->ne[1]); " are you sure that this adapter is for this model?\n", __func__, base_t->ne[0], loraA->ne[1]);
return 1; return 1;
} }
@ -3691,7 +3691,7 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const
} }
const int64_t t_lora_us = ggml_time_us() - t_start_lora_us; const int64_t t_lora_us = ggml_time_us() - t_start_lora_us;
LLAMA_LOG_INFO(" done (%.2f ms)", t_lora_us / 1000.0); LLAMA_LOG_INFO(" done (%.2f ms)\n", t_lora_us / 1000.0);
return 0; return 0;
} }
@ -3700,7 +3700,7 @@ int llama_apply_lora_from_file(struct llama_context * ctx, const char * path_lor
try { try {
return llama_apply_lora_from_file_internal(ctx->model, path_lora, path_base_model, n_threads); return llama_apply_lora_from_file_internal(ctx->model, path_lora, path_base_model, n_threads);
} catch (const std::exception & err) { } catch (const std::exception & err) {
LLAMA_LOG_ERROR("%s: failed to apply lora adapter: %s", __func__, err.what()); LLAMA_LOG_ERROR("%s: failed to apply lora adapter: %s\n", __func__, err.what());
return 1; return 1;
} }
} }
@ -3709,7 +3709,7 @@ int llama_model_apply_lora_from_file(const struct llama_model * model, const cha
try { try {
return llama_apply_lora_from_file_internal(*model, path_lora, path_base_model, n_threads); return llama_apply_lora_from_file_internal(*model, path_lora, path_base_model, n_threads);
} catch (const std::exception & err) { } catch (const std::exception & err) {
LLAMA_LOG_ERROR("%s: failed to apply lora adapter: %s", __func__, err.what()); LLAMA_LOG_ERROR("%s: failed to apply lora adapter: %s\n", __func__, err.what());
return 1; return 1;
} }
} }
@ -3972,7 +3972,7 @@ static bool llama_load_session_file_internal(struct llama_context * ctx, const c
const uint32_t version = file.read_u32(); const uint32_t version = file.read_u32();
if (magic != LLAMA_SESSION_MAGIC || version != LLAMA_SESSION_VERSION) { if (magic != LLAMA_SESSION_MAGIC || version != LLAMA_SESSION_VERSION) {
LLAMA_LOG_ERROR("%s : unknown (magic, version) for session file: %08x, %08x", __func__, magic, version); LLAMA_LOG_ERROR("%s : unknown (magic, version) for session file: %08x, %08x\n", __func__, magic, version);
return false; return false;
} }
@ -3980,7 +3980,7 @@ static bool llama_load_session_file_internal(struct llama_context * ctx, const c
file.read_raw(&session_hparams, sizeof(llama_hparams)); file.read_raw(&session_hparams, sizeof(llama_hparams));
if (session_hparams != ctx->model.hparams) { if (session_hparams != ctx->model.hparams) {
LLAMA_LOG_INFO("%s : model hparams didn't match from session file!", __func__); LLAMA_LOG_INFO("%s : model hparams didn't match from session file!\n", __func__);
return false; return false;
} }
} }
@ -3990,7 +3990,7 @@ static bool llama_load_session_file_internal(struct llama_context * ctx, const c
const uint32_t n_token_count = file.read_u32(); const uint32_t n_token_count = file.read_u32();
if (n_token_count > n_token_capacity) { if (n_token_count > n_token_capacity) {
LLAMA_LOG_ERROR("%s : token count in session file exceeded capacity! %u > %zu", __func__, n_token_count, n_token_capacity); LLAMA_LOG_ERROR("%s : token count in session file exceeded capacity! %u > %zu\n", __func__, n_token_count, n_token_capacity);
return false; return false;
} }
@ -4004,7 +4004,7 @@ static bool llama_load_session_file_internal(struct llama_context * ctx, const c
const size_t n_state_size_max = llama_get_state_size(ctx); const size_t n_state_size_max = llama_get_state_size(ctx);
if (n_state_size_cur > n_state_size_max) { if (n_state_size_cur > n_state_size_max) {
LLAMA_LOG_ERROR("%s : the state size in session file is too big! max %zu, got %zu", __func__, n_state_size_max, n_state_size_cur); LLAMA_LOG_ERROR("%s : the state size in session file is too big! max %zu, got %zu\n", __func__, n_state_size_max, n_state_size_cur);
return false; return false;
} }
@ -4021,7 +4021,7 @@ bool llama_load_session_file(struct llama_context * ctx, const char * path_sessi
try { try {
return llama_load_session_file_internal(ctx, path_session, tokens_out, n_token_capacity, n_token_count_out); return llama_load_session_file_internal(ctx, path_session, tokens_out, n_token_capacity, n_token_count_out);
} catch (const std::exception & err) { } catch (const std::exception & err) {
LLAMA_LOG_ERROR("error loading session file: %s", err.what()); LLAMA_LOG_ERROR("error loading session file: %s\n", err.what());
return false; return false;
} }
} }
@ -4058,7 +4058,7 @@ int llama_eval(
int n_past, int n_past,
int n_threads) { int n_threads) {
if (!llama_eval_internal(*ctx, tokens, nullptr, n_tokens, n_past, n_threads, nullptr)) { if (!llama_eval_internal(*ctx, tokens, nullptr, n_tokens, n_past, n_threads, nullptr)) {
LLAMA_LOG_ERROR("%s: failed to eval", __func__); LLAMA_LOG_ERROR("%s: failed to eval\n", __func__);
return 1; return 1;
} }
@ -4080,7 +4080,7 @@ int llama_eval_embd(
int n_past, int n_past,
int n_threads) { int n_threads) {
if (!llama_eval_internal(*ctx, nullptr, embd, n_tokens, n_past, n_threads, nullptr)) { if (!llama_eval_internal(*ctx, nullptr, embd, n_tokens, n_past, n_threads, nullptr)) {
LLAMA_LOG_ERROR("%s: failed to eval", __func__); LLAMA_LOG_ERROR("%s: failed to eval\n", __func__);
return 1; return 1;
} }
@ -4101,7 +4101,7 @@ int llama_eval_export(struct llama_context * ctx, const char * fname) {
const std::vector<llama_token> tmp(n_batch, llama_token_bos()); const std::vector<llama_token> tmp(n_batch, llama_token_bos());
if (!llama_eval_internal(*ctx, tmp.data(), nullptr, tmp.size(), n_ctx, 1, fname)) { if (!llama_eval_internal(*ctx, tmp.data(), nullptr, tmp.size(), n_ctx, 1, fname)) {
LLAMA_LOG_ERROR("%s: failed to eval", __func__); LLAMA_LOG_ERROR("%s: failed to eval\n", __func__);
return 1; return 1;
} }
@ -4117,7 +4117,7 @@ int llama_tokenize_with_model(
auto res = llama_tokenize(model->vocab, text, add_bos); auto res = llama_tokenize(model->vocab, text, add_bos);
if (n_max_tokens < (int) res.size()) { if (n_max_tokens < (int) res.size()) {
LLAMA_LOG_ERROR("%s: too many tokens", __func__); LLAMA_LOG_ERROR("%s: too many tokens\n", __func__);
return -((int) res.size()); return -((int) res.size());
} }
@ -4234,15 +4234,15 @@ struct llama_timings llama_get_timings(struct llama_context * ctx) {
void llama_print_timings(struct llama_context * ctx) { void llama_print_timings(struct llama_context * ctx) {
const llama_timings timings = llama_get_timings(ctx); const llama_timings timings = llama_get_timings(ctx);
LLAMA_LOG_INFO(""); LLAMA_LOG_INFO("\n");
LLAMA_LOG_INFO("%s: load time = %8.2f ms", __func__, timings.t_load_ms); LLAMA_LOG_INFO("%s: load time = %8.2f ms\n", __func__, timings.t_load_ms);
LLAMA_LOG_INFO("%s: sample time = %8.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)", LLAMA_LOG_INFO("%s: sample time = %8.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
__func__, timings.t_sample_ms, timings.n_sample, timings.t_sample_ms / timings.n_sample, 1e3 / timings.t_sample_ms * timings.n_sample); __func__, timings.t_sample_ms, timings.n_sample, timings.t_sample_ms / timings.n_sample, 1e3 / timings.t_sample_ms * timings.n_sample);
LLAMA_LOG_INFO("%s: prompt eval time = %8.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)", LLAMA_LOG_INFO("%s: prompt eval time = %8.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n",
__func__, timings.t_p_eval_ms, timings.n_p_eval, timings.t_p_eval_ms / timings.n_p_eval, 1e3 / timings.t_p_eval_ms * timings.n_p_eval); __func__, timings.t_p_eval_ms, timings.n_p_eval, timings.t_p_eval_ms / timings.n_p_eval, 1e3 / timings.t_p_eval_ms * timings.n_p_eval);
LLAMA_LOG_INFO("%s: eval time = %8.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)", LLAMA_LOG_INFO("%s: eval time = %8.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
__func__, timings.t_eval_ms, timings.n_eval, timings.t_eval_ms / timings.n_eval, 1e3 / timings.t_eval_ms * timings.n_eval); __func__, timings.t_eval_ms, timings.n_eval, timings.t_eval_ms / timings.n_eval, 1e3 / timings.t_eval_ms * timings.n_eval);
LLAMA_LOG_INFO("%s: total time = %8.2f ms", __func__, (timings.t_end_ms - timings.t_start_ms)); LLAMA_LOG_INFO("%s: total time = %8.2f ms\n", __func__, (timings.t_end_ms - timings.t_start_ms));
} }
void llama_reset_timings(struct llama_context * ctx) { void llama_reset_timings(struct llama_context * ctx) {
@ -4316,5 +4316,5 @@ static void llama_log_internal(llama_log_level level, const char * format, ...)
static void llama_log_callback_default(llama_log_level level, const char * text, void * user_data) { static void llama_log_callback_default(llama_log_level level, const char * text, void * user_data) {
(void) level; (void) level;
(void) user_data; (void) user_data;
fprintf(stderr, "%s\n", text); fprintf(stderr, "%s", text);
} }