ggml : full ALiBi support (#7192)
* ggml : full ALiBi support * ggml : update ggml_soft_max_ext() CUDA, SYCL * ggml : ggml_flash_attn_ext() support ALiBi (CPU) * ggml : ggml_flash_attn_ext() support ALiBi (Metal) * ggml : fix warning * ggml : ggml_flash_attn_ext() support ALiBi (CUDA) ggml-ci * ggml : fix assert message * vulkan : add dev notes * ggml : require mask when using ALiBi ggml-ci * convert : fix convert for refact models
This commit is contained in:
parent
e849648888
commit
9cb317f77e
16 changed files with 350 additions and 825 deletions
|
@ -1013,6 +1013,18 @@ class StarCoderModel(Model):
|
|||
class RefactModel(Model):
|
||||
model_arch = gguf.MODEL_ARCH.REFACT
|
||||
|
||||
def set_vocab(self):
|
||||
super().set_vocab()
|
||||
|
||||
# TODO: how to determine special FIM tokens automatically?
|
||||
special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False,
|
||||
special_token_types = ['prefix', 'suffix', 'middle', 'fsep', 'eot'])
|
||||
special_vocab._set_special_token("prefix", 1)
|
||||
special_vocab._set_special_token("suffix", 3)
|
||||
special_vocab._set_special_token("middle", 2)
|
||||
special_vocab._set_special_token("fsep", 4) # is this correct?
|
||||
special_vocab.add_to_gguf(self.gguf_writer)
|
||||
|
||||
def set_gguf_parameters(self):
|
||||
hidden_dim = self.hparams["n_embd"]
|
||||
inner_dim = 4 * hidden_dim
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue