convert-hf-to-gguf.py : update grok (untested)
This commit is contained in:
parent
f27cbf3610
commit
d08a1f4860
2 changed files with 90 additions and 6 deletions
|
@ -1261,6 +1261,7 @@ class LlamaModel(Model):
|
||||||
del experts[ename]
|
del experts[ename]
|
||||||
|
|
||||||
data = np.stack(datas, axis=0)
|
data = np.stack(datas, axis=0)
|
||||||
|
data_dtype = data.dtype
|
||||||
|
|
||||||
if self.ftype == 0 and data_dtype == np.float16:
|
if self.ftype == 0 and data_dtype == np.float16:
|
||||||
data = data.astype(np.float32)
|
data = data.astype(np.float32)
|
||||||
|
@ -1323,6 +1324,92 @@ class GrokModel(Model):
|
||||||
super().set_gguf_parameters()
|
super().set_gguf_parameters()
|
||||||
self.gguf_writer.add_name("Grok")
|
self.gguf_writer.add_name("Grok")
|
||||||
|
|
||||||
|
def write_tensors(self):
|
||||||
|
block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer")))
|
||||||
|
tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
|
||||||
|
n_experts = self.hparams.get("num_local_experts")
|
||||||
|
experts = dict()
|
||||||
|
for name, data_torch in self.get_tensors():
|
||||||
|
# we don't need these
|
||||||
|
if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq")):
|
||||||
|
continue
|
||||||
|
|
||||||
|
old_dtype = data_torch.dtype
|
||||||
|
|
||||||
|
# convert any unsupported data types to float32
|
||||||
|
if data_torch.dtype not in (torch.float16, torch.float32):
|
||||||
|
data_torch = data_torch.to(torch.float32)
|
||||||
|
|
||||||
|
data = data_torch.squeeze().numpy()
|
||||||
|
|
||||||
|
# process the experts separately
|
||||||
|
if name.find(".moe.") != -1:
|
||||||
|
experts[name] = data
|
||||||
|
if len(experts) >= n_experts:
|
||||||
|
# merge the experts into a single 3d tensor
|
||||||
|
for bid in range(block_count):
|
||||||
|
for wid in ["linear", "linear_1", "linear_v"]:
|
||||||
|
full = True
|
||||||
|
for xid in range(n_experts):
|
||||||
|
ename = f"transformer.decoder_layer.{bid}.moe.{xid}.{wid}"
|
||||||
|
if ename not in experts:
|
||||||
|
full = False
|
||||||
|
break
|
||||||
|
if not full:
|
||||||
|
continue
|
||||||
|
|
||||||
|
datas = []
|
||||||
|
for xid in range(n_experts):
|
||||||
|
ename = f"transformer.decoder_layer.{bid}.moe.{xid}.{wid}"
|
||||||
|
datas.append(experts[ename])
|
||||||
|
del experts[ename]
|
||||||
|
|
||||||
|
data = np.stack(datas, axis=0)
|
||||||
|
data_dtype = data.dtype
|
||||||
|
|
||||||
|
if self.ftype == 0 and data_dtype == np.float16:
|
||||||
|
data = data.astype(np.float32)
|
||||||
|
|
||||||
|
if self.ftype == 1 and data_dtype == np.float32:
|
||||||
|
data = data.astype(np.float16)
|
||||||
|
|
||||||
|
merged_name = f"transformer.decoder_layer.{bid}.moe.{wid}.weight"
|
||||||
|
|
||||||
|
new_name = tensor_map.get_name(merged_name, try_suffixes=(".weight", ".bias"))
|
||||||
|
if new_name is None:
|
||||||
|
print(f"Can not map tensor {name!r}")
|
||||||
|
sys.exit()
|
||||||
|
|
||||||
|
print(f"{new_name}, n_dims = {len(data.shape)}, shape = {data.shape} --> {data.dtype}")
|
||||||
|
|
||||||
|
self.gguf_writer.add_tensor(new_name, data)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# map tensor names
|
||||||
|
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
|
||||||
|
if new_name is None:
|
||||||
|
print(f"Can not map tensor {name!r}")
|
||||||
|
sys.exit()
|
||||||
|
|
||||||
|
n_dims = len(data.shape)
|
||||||
|
data_dtype = data.dtype
|
||||||
|
|
||||||
|
# if f32 desired, convert any float16 to float32
|
||||||
|
if self.ftype == 0 and data_dtype == np.float16:
|
||||||
|
data = data.astype(np.float32)
|
||||||
|
|
||||||
|
# TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
|
||||||
|
if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
|
||||||
|
data = data.astype(np.float32)
|
||||||
|
|
||||||
|
# if f16 desired, convert any float32 2-dim weight tensors to float16
|
||||||
|
if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
|
||||||
|
data = data.astype(np.float16)
|
||||||
|
|
||||||
|
print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
|
||||||
|
|
||||||
|
self.gguf_writer.add_tensor(new_name, data)
|
||||||
|
|
||||||
|
|
||||||
@Model.register("MiniCPMForCausalLM")
|
@Model.register("MiniCPMForCausalLM")
|
||||||
class MiniCPMModel(Model):
|
class MiniCPMModel(Model):
|
||||||
|
|
|
@ -232,8 +232,7 @@ class TensorNameMap:
|
||||||
|
|
||||||
MODEL_TENSOR.FFN_UP_EXP: (
|
MODEL_TENSOR.FFN_UP_EXP: (
|
||||||
"layers.{bid}.feed_forward.experts.w3", # mixtral (merged)
|
"layers.{bid}.feed_forward.experts.w3", # mixtral (merged)
|
||||||
#"model.layers.{bid}.block_sparse_moe.experts.{xid}.w3", # mixtral
|
"transformer.decoder_layer.{bid}.moe.linear_v", # Grok (merged)
|
||||||
#"transformer.decoder_layer.{bid}.moe.{xid}.linear_v", # Grok
|
|
||||||
),
|
),
|
||||||
|
|
||||||
# AWQ-activation gate
|
# AWQ-activation gate
|
||||||
|
@ -253,8 +252,7 @@ class TensorNameMap:
|
||||||
|
|
||||||
MODEL_TENSOR.FFN_GATE_EXP: (
|
MODEL_TENSOR.FFN_GATE_EXP: (
|
||||||
"layers.{bid}.feed_forward.experts.w1", # mixtral (merged)
|
"layers.{bid}.feed_forward.experts.w1", # mixtral (merged)
|
||||||
#"model.layers.{bid}.block_sparse_moe.experts.{xid}.w1", # mixtral
|
"transformer.decoder_layer.{bid}.moe.linear" # Grok (merged)
|
||||||
#"transformer.decoder_layer.{bid}.moe.{xid}.linear" # Grok
|
|
||||||
),
|
),
|
||||||
|
|
||||||
# Feed-forward down
|
# Feed-forward down
|
||||||
|
@ -281,8 +279,7 @@ class TensorNameMap:
|
||||||
|
|
||||||
MODEL_TENSOR.FFN_DOWN_EXP: (
|
MODEL_TENSOR.FFN_DOWN_EXP: (
|
||||||
"layers.{bid}.feed_forward.experts.w2", # mixtral (merged)
|
"layers.{bid}.feed_forward.experts.w2", # mixtral (merged)
|
||||||
#"model.layers.{bid}.block_sparse_moe.experts.{xid}.w2", # mixtral
|
"transformer.decoder_layer.{bid}.moe.linear_1", # Grok (merged)
|
||||||
#"transformer.decoder_layer.{bid}.moe.{xid}.linear_1", # Grok
|
|
||||||
),
|
),
|
||||||
|
|
||||||
MODEL_TENSOR.ATTN_Q_NORM: (
|
MODEL_TENSOR.ATTN_Q_NORM: (
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue