From e276e4b6064ba0649aee0b1f5742e25f31e1c7a7 Mon Sep 17 00:00:00 2001 From: akawrykow Date: Tue, 29 Aug 2023 13:54:53 -0700 Subject: [PATCH] Fix convert-falcon-hf-to-gguf.py for rw models --- convert-falcon-hf-to-gguf.py | 32 ++++++++++++++++++++++---------- 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/convert-falcon-hf-to-gguf.py b/convert-falcon-hf-to-gguf.py index 168bcf17f..5948fc788 100755 --- a/convert-falcon-hf-to-gguf.py +++ b/convert-falcon-hf-to-gguf.py @@ -80,7 +80,7 @@ print("gguf: loading model "+last_dir) with open(dir_model + "/config.json", "r", encoding="utf-8") as f: hparams = json.load(f) -if hparams["architectures"][0] != "RWForCausalLM": +if hparams["architectures"][0] not in ("RWForCausalLM", "FalconForCausalLM"): print("Model architecture not supported: " + hparams["architectures"][0]) sys.exit() @@ -93,7 +93,25 @@ gguf_writer = gguf.GGUFWriter(fname_out, gguf.MODEL_ARCH_NAMES[ARCH]) print("gguf: get model metadata") -block_count = hparams["n_layer"] +if "n_layer" in hparams: + block_count = hparams["n_layer"] +elif "num_hidden_layers" in hparams: + block_count = hparams["num_hidden_layers"] +else: + print("No block count found") + + sys.exit() + +if "n_head" in hparams: + n_head = hparams["n_head"] +elif "num_attention_heads" in hparams: + n_head = hparams["num_attention_heads"] +else: + print("No head count found") + + sys.exit() + +n_head_kv = hparams["n_head_kv"] if "n_head_kv" in hparams else 1 gguf_writer.add_name("Falcon") gguf_writer.add_context_length(2048) # not in config.json @@ -101,11 +119,8 @@ gguf_writer.add_tensor_data_layout("jploski") # qkv tensor transform gguf_writer.add_embedding_length(hparams["hidden_size"]) gguf_writer.add_feed_forward_length(4 * hparams["hidden_size"]) gguf_writer.add_block_count(block_count) -gguf_writer.add_head_count(hparams["n_head"]) -if "n_head_kv" in hparams: - gguf_writer.add_head_count_kv(hparams["n_head_kv"]) -else: - gguf_writer.add_head_count_kv(1) +gguf_writer.add_head_count(n_head) +gguf_writer.add_head_count_kv(n_head_kv) gguf_writer.add_layer_norm_eps(hparams["layer_norm_epsilon"]) gguf_writer.add_file_type(ftype) @@ -190,9 +205,6 @@ if "pad_token_id" in hparams and hparams["pad_token_id"] != None: tensor_map = gguf.get_tensor_name_map(ARCH,block_count) # params for qkv transform -n_head = hparams["n_head"] -n_head_kv = hparams["n_head_kv"] if "n_head_kv" in hparams else 1 - head_dim = hparams["hidden_size"] // n_head # tensor info