convert-llama-hf-to-gguf.py : fixes

This commit is contained in:
klosax 2023-08-17 21:43:48 +02:00 committed by GitHub
parent 640ddc4259
commit 9e2d4dd48e
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23

View file

@ -86,8 +86,8 @@ if hparams["architectures"][0] != "LlamaForCausalLM":
# get number of model parts # get number of model parts
num_parts = count_model_parts(dir_model) num_parts = count_model_parts(dir_model)
gguf_writer = gguf.GGUFWriter(fname_out, arch="llama") ARCH=gguf.MODEL_ARCH.LLAMA
gguf_writer = gguf.GGUFWriter(fname_out, gguf.MODEL_ARCH_NAMES[ARCH])
print("gguf: get model metadata") print("gguf: get model metadata")
@ -214,7 +214,7 @@ if Path(dir_model + "/tokenizer.json").is_file():
# TENSORS # TENSORS
tensor_map = gguf.get_tensor_name_map(block_count) tensor_map = gguf.get_tensor_name_map(ARCH,block_count)
# tensor info # tensor info
print("gguf: get tensor metadata") print("gguf: get tensor metadata")
@ -237,6 +237,8 @@ for part_name in part_names:
if name.endswith(".rotary_emb.inv_freq"): if name.endswith(".rotary_emb.inv_freq"):
continue continue
old_dtype = data.dtype
# convert any unsupported data types to float32 # convert any unsupported data types to float32
if data.dtype != torch.float16 and data.dtype != torch.float32: if data.dtype != torch.float16 and data.dtype != torch.float32:
data = data.to(torch.float32) data = data.to(torch.float32)
@ -254,26 +256,22 @@ for part_name in part_names:
name = tensor_map[name[:-5]] + ".bias" name = tensor_map[name[:-5]] + ".bias"
else: else:
print("Can not map tensor '" + name + "'") print("Can not map tensor '" + name + "'")
sys.exit() sys.exit()
n_dims = len(data.shape) n_dims = len(data.shape)
data_dtype = data.dtype data_dtype = data.dtype
old_dtype = data_dtype
# if f32 desired, convert any float16 to float32 # if f32 desired, convert any float16 to float32
if ftype == 0 and data.dtype == np.float16: if ftype == 0 and data_dtype == np.float16:
data_dtype = np.float32 data = data.astype(np.float32)
# TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
if ftype == 1 and data_dtype == np.float16 and n_dims == 1: if ftype == 1 and data_dtype == np.float16 and n_dims == 1:
data_dtype = np.float32 data = data.astype(np.float32)
# if f16 desired, convert any float32 2-dim weight tensors to float16 # if f16 desired, convert any float32 2-dim weight tensors to float16
if ftype == 1 and data.dtype == np.float32 and name.endswith(".weight") and n_dims == 2: if ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
data_dtype = np.float16 data = data.astype(np.float16)
data = data.astype(data_dtype)
print(name + ", n_dims = " + str(n_dims) + ", " + str(old_dtype) + " --> " + str(data.dtype)) print(name + ", n_dims = " + str(n_dims) + ", " + str(old_dtype) + " --> " + str(data.dtype))