diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index 42b0fb66e..93d714a40 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -1848,6 +1848,13 @@ class StarCoder2Model(Model): class MambaModel(Model): model_arch = gguf.MODEL_ARCH.MAMBA + def set_vocab(self): + vocab_size = self.hparams["vocab_size"]; + # Round vocab size to next multiple of 8 + pad_vocab = self.hparams.get("pad_vocab_size_multiple", 8); + self.hparams["vocab_size"] = ((vocab_size + (pad_vocab - 1)) // pad_vocab) * pad_vocab + return self._set_vocab_gpt2() + def set_gguf_parameters(self): d_model = self.hparams["d_model"] d_inner = self.hparams.get("d_inner", 2 * d_model) diff --git a/llama.cpp b/llama.cpp index 37ac7425d..0a6ff7736 100644 --- a/llama.cpp +++ b/llama.cpp @@ -4682,15 +4682,13 @@ static bool llm_load_tensors( // FIXME: ceiling instead of floor const int64_t dt_rank = n_embd / 16; GGML_ASSERT(2 * n_embd == d_inner); - // round up the vocab size to the next multiple of 8 - const int64_t rounded_vocab = (n_vocab + 7) & -8; - model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, rounded_vocab}); + model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}); // output { model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}); - model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, rounded_vocab}); + model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}); } for (int i = 0; i < n_layer; ++i) {