From d08a1f48602e5d259cfb3df05ec1d84049994214 Mon Sep 17 00:00:00 2001 From: slaren Date: Tue, 2 Apr 2024 18:14:57 +0200 Subject: [PATCH] convert-hf-to-gguf.py : update grok (untested) --- convert-hf-to-gguf.py | 87 ++++++++++++++++++++++++++++++++++ gguf-py/gguf/tensor_mapping.py | 9 ++-- 2 files changed, 90 insertions(+), 6 deletions(-) diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index 16fc9ce71..f45b145a4 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -1261,6 +1261,7 @@ class LlamaModel(Model): del experts[ename] data = np.stack(datas, axis=0) + data_dtype = data.dtype if self.ftype == 0 and data_dtype == np.float16: data = data.astype(np.float32) @@ -1323,6 +1324,92 @@ class GrokModel(Model): super().set_gguf_parameters() self.gguf_writer.add_name("Grok") + def write_tensors(self): + block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer"))) + tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) + n_experts = self.hparams.get("num_local_experts") + experts = dict() + for name, data_torch in self.get_tensors(): + # we don't need these + if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq")): + continue + + old_dtype = data_torch.dtype + + # convert any unsupported data types to float32 + if data_torch.dtype not in (torch.float16, torch.float32): + data_torch = data_torch.to(torch.float32) + + data = data_torch.squeeze().numpy() + + # process the experts separately + if name.find(".moe.") != -1: + experts[name] = data + if len(experts) >= n_experts: + # merge the experts into a single 3d tensor + for bid in range(block_count): + for wid in ["linear", "linear_1", "linear_v"]: + full = True + for xid in range(n_experts): + ename = f"transformer.decoder_layer.{bid}.moe.{xid}.{wid}" + if ename not in experts: + full = False + break + if not full: + continue + + datas = [] + for xid in range(n_experts): + ename = f"transformer.decoder_layer.{bid}.moe.{xid}.{wid}" + datas.append(experts[ename]) + del experts[ename] + + data = np.stack(datas, axis=0) + data_dtype = data.dtype + + if self.ftype == 0 and data_dtype == np.float16: + data = data.astype(np.float32) + + if self.ftype == 1 and data_dtype == np.float32: + data = data.astype(np.float16) + + merged_name = f"transformer.decoder_layer.{bid}.moe.{wid}.weight" + + new_name = tensor_map.get_name(merged_name, try_suffixes=(".weight", ".bias")) + if new_name is None: + print(f"Can not map tensor {name!r}") + sys.exit() + + print(f"{new_name}, n_dims = {len(data.shape)}, shape = {data.shape} --> {data.dtype}") + + self.gguf_writer.add_tensor(new_name, data) + continue + + # map tensor names + new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) + if new_name is None: + print(f"Can not map tensor {name!r}") + sys.exit() + + n_dims = len(data.shape) + data_dtype = data.dtype + + # if f32 desired, convert any float16 to float32 + if self.ftype == 0 and data_dtype == np.float16: + data = data.astype(np.float32) + + # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 + if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1: + data = data.astype(np.float32) + + # if f16 desired, convert any float32 2-dim weight tensors to float16 + if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: + data = data.astype(np.float16) + + print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") + + self.gguf_writer.add_tensor(new_name, data) + @Model.register("MiniCPMForCausalLM") class MiniCPMModel(Model): diff --git a/gguf-py/gguf/tensor_mapping.py b/gguf-py/gguf/tensor_mapping.py index 1dfe91078..93a5a455e 100644 --- a/gguf-py/gguf/tensor_mapping.py +++ b/gguf-py/gguf/tensor_mapping.py @@ -232,8 +232,7 @@ class TensorNameMap: MODEL_TENSOR.FFN_UP_EXP: ( "layers.{bid}.feed_forward.experts.w3", # mixtral (merged) - #"model.layers.{bid}.block_sparse_moe.experts.{xid}.w3", # mixtral - #"transformer.decoder_layer.{bid}.moe.{xid}.linear_v", # Grok + "transformer.decoder_layer.{bid}.moe.linear_v", # Grok (merged) ), # AWQ-activation gate @@ -253,8 +252,7 @@ class TensorNameMap: MODEL_TENSOR.FFN_GATE_EXP: ( "layers.{bid}.feed_forward.experts.w1", # mixtral (merged) - #"model.layers.{bid}.block_sparse_moe.experts.{xid}.w1", # mixtral - #"transformer.decoder_layer.{bid}.moe.{xid}.linear" # Grok + "transformer.decoder_layer.{bid}.moe.linear" # Grok (merged) ), # Feed-forward down @@ -281,8 +279,7 @@ class TensorNameMap: MODEL_TENSOR.FFN_DOWN_EXP: ( "layers.{bid}.feed_forward.experts.w2", # mixtral (merged) - #"model.layers.{bid}.block_sparse_moe.experts.{xid}.w2", # mixtral - #"transformer.decoder_layer.{bid}.moe.{xid}.linear_1", # Grok + "transformer.decoder_layer.{bid}.moe.linear_1", # Grok (merged) ), MODEL_TENSOR.ATTN_Q_NORM: (