From 7bbe60576a68dc8f7715b298a1c94a02f87e1811 Mon Sep 17 00:00:00 2001 From: KerfuffleV2 Date: Tue, 5 Dec 2023 07:14:49 -0700 Subject: [PATCH] Update new GET_KEY call Add note that metadata KV overrides aren't reflected in initial metadata KV info dump --- llama.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/llama.cpp b/llama.cpp index 94f7d6bb3..b77020e10 100644 --- a/llama.cpp +++ b/llama.cpp @@ -2056,6 +2056,7 @@ struct llama_model_loader { } } + LLAMA_LOG_INFO("%s: Dumping metadata keys/values. Note: KV overrides do not apply in this output.\n", __func__); for (int i = 0; i < n_kv; i++) { const char * name = gguf_get_key(ctx_gguf, i); const enum gguf_type type = gguf_get_kv_type(ctx_gguf, i); @@ -2571,7 +2572,8 @@ static void llm_load_hparams( } break; case LLM_ARCH_QWEN: { - GGUF_GET_KEY(ctx, hparams.f_norm_rms_eps, gguf_get_val_f32, GGUF_TYPE_FLOAT32, true, kv(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS)); + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + switch (hparams.n_layer) { case 32: model.type = e_model::MODEL_7B; break; case 40: model.type = e_model::MODEL_13B; break;