fix for flake8 lint

This commit is contained in:
vincent 2024-02-06 07:27:15 +08:00
parent b698d87e9a
commit 319ab9d18c
2 changed files with 5 additions and 1 deletions

View file

@ -24,6 +24,7 @@ import gguf
from convert import HfVocab from convert import HfVocab
# check for any of the given keys in the dictionary and return the value of the first key found # check for any of the given keys in the dictionary and return the value of the first key found
def get_key_opts(d, keys): def get_key_opts(d, keys):
for k in keys: for k in keys:
@ -1070,6 +1071,7 @@ class MixtralModel(Model):
def set_vocab(self): def set_vocab(self):
self._set_vocab_sentencepiece() self._set_vocab_sentencepiece()
class MiniCPMModel(Model): class MiniCPMModel(Model):
def set_gguf_parameters(self): def set_gguf_parameters(self):
block_count = self.hparams["num_hidden_layers"] block_count = self.hparams["num_hidden_layers"]
@ -1083,9 +1085,11 @@ class MiniCPMModel(Model):
self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"]) self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
self.gguf_writer.add_file_type(self.ftype) self.gguf_writer.add_file_type(self.ftype)
self.gguf_writer.add_rope_dimension_count(self.hparams["hidden_size"] // self.hparams["num_attention_heads"]) self.gguf_writer.add_rope_dimension_count(self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
def set_vocab(self): def set_vocab(self):
self._set_vocab_hf() self._set_vocab_hf()
class QwenModel(Model): class QwenModel(Model):
@staticmethod @staticmethod
def token_bytes_to_string(b): def token_bytes_to_string(b):

View file

@ -466,7 +466,7 @@ MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = {
MODEL_TENSOR.FFN_DOWN, MODEL_TENSOR.FFN_DOWN,
MODEL_TENSOR.FFN_UP, MODEL_TENSOR.FFN_UP,
], ],
MODEL_ARCH.MINICPM: [ MODEL_ARCH.MINICPM: [
MODEL_TENSOR.TOKEN_EMBD, MODEL_TENSOR.TOKEN_EMBD,
MODEL_TENSOR.OUTPUT_NORM, MODEL_TENSOR.OUTPUT_NORM,
MODEL_TENSOR.ROPE_FREQS, MODEL_TENSOR.ROPE_FREQS,