mamba : fix vocab size problems with official models

The perplexity was waaaay to high for models with a non-round vocab size.
Not sure why, but it needed to be fixed in the metadata.

Note that this breaks existing GGUF-converted Mamba models,
but **only if** the vocab size was not already rounded.
This commit is contained in:
Francis Couture-Harpin 2024-02-04 09:49:23 -05:00
parent 9f55809f72
commit cd0f33f281
2 changed files with 9 additions and 4 deletions

View file

@ -1848,6 +1848,13 @@ class StarCoder2Model(Model):
class MambaModel(Model):
model_arch = gguf.MODEL_ARCH.MAMBA
def set_vocab(self):
vocab_size = self.hparams["vocab_size"];
# Round vocab size to next multiple of 8
pad_vocab = self.hparams.get("pad_vocab_size_multiple", 8);
self.hparams["vocab_size"] = ((vocab_size + (pad_vocab - 1)) // pad_vocab) * pad_vocab
return self._set_vocab_gpt2()
def set_gguf_parameters(self):
d_model = self.hparams["d_model"]
d_inner = self.hparams.get("d_inner", 2 * d_model)

View file

@ -4682,15 +4682,13 @@ static bool llm_load_tensors(
// FIXME: ceiling instead of floor
const int64_t dt_rank = n_embd / 16;
GGML_ASSERT(2 * n_embd == d_inner);
// round up the vocab size to the next multiple of 8
const int64_t rounded_vocab = (n_vocab + 7) & -8;
model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, rounded_vocab});
model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
// output
{
model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, rounded_vocab});
model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
}
for (int i = 0; i < n_layer; ++i) {