receive review comments and modify
This commit is contained in:
parent
a913ca4cb9
commit
a95a6d995d
1 changed files with 39 additions and 3 deletions
|
@ -25,8 +25,6 @@ if 'NO_LOCAL_GGUF' not in os.environ:
|
||||||
sys.path.insert(1, str(Path(__file__).parent / 'gguf-py'))
|
sys.path.insert(1, str(Path(__file__).parent / 'gguf-py'))
|
||||||
import gguf
|
import gguf
|
||||||
|
|
||||||
from convert import LlamaHfVocab
|
|
||||||
|
|
||||||
logger = logging.getLogger("hf-to-gguf")
|
logger = logging.getLogger("hf-to-gguf")
|
||||||
|
|
||||||
|
|
||||||
|
@ -634,7 +632,7 @@ class Model:
|
||||||
special_vocab.add_to_gguf(self.gguf_writer)
|
special_vocab.add_to_gguf(self.gguf_writer)
|
||||||
|
|
||||||
def _set_vocab_llama_hf(self):
|
def _set_vocab_llama_hf(self):
|
||||||
vocab = LlamaHfVocab(self.dir_model)
|
vocab = gguf.LlamaHfVocab(self.dir_model)
|
||||||
tokens = []
|
tokens = []
|
||||||
scores = []
|
scores = []
|
||||||
toktypes = []
|
toktypes = []
|
||||||
|
@ -675,6 +673,44 @@ class GPTNeoXModel(Model):
|
||||||
self.gguf_writer.add_parallel_residual(self.hparams.get("use_parallel_residual", True))
|
self.gguf_writer.add_parallel_residual(self.hparams.get("use_parallel_residual", True))
|
||||||
self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_eps"])
|
self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_eps"])
|
||||||
|
|
||||||
|
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
|
||||||
|
del bid # unused
|
||||||
|
|
||||||
|
n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads"))
|
||||||
|
n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed"))
|
||||||
|
|
||||||
|
tensors: list[tuple[str, Tensor]] = []
|
||||||
|
|
||||||
|
if re.match(r"gpt_neox\.layers\.\d+\.attention\.query_key_value\.weight", name):
|
||||||
|
# Map bloom-style qkv_linear to gpt-style qkv_linear
|
||||||
|
# bloom: https://github.com/huggingface/transformers/blob/main/src/transformers/models/bloom/modeling_bloom.py#L238-L252 # noqa
|
||||||
|
# gpt-2: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt2/modeling_gpt2.py#L312 # noqa
|
||||||
|
qkv_weights = data_torch.reshape((n_head, 3, n_embed // n_head, n_embed))
|
||||||
|
data_torch = torch.cat(
|
||||||
|
(
|
||||||
|
qkv_weights[:, 0, :, :].reshape((-1, n_embed)),
|
||||||
|
qkv_weights[:, 1, :, :].reshape((-1, n_embed)),
|
||||||
|
qkv_weights[:, 2, :, :].reshape((-1, n_embed)),
|
||||||
|
),
|
||||||
|
dim=0,
|
||||||
|
)
|
||||||
|
logger.info("re-format attention.linear_qkv.weight")
|
||||||
|
elif re.match(r"gpt_neox\.layers\.\d+\.attention\.query_key_value\.bias", name):
|
||||||
|
qkv_bias = data_torch.reshape((n_head, 3, n_embed // n_head))
|
||||||
|
data_torch = torch.cat(
|
||||||
|
(
|
||||||
|
qkv_bias[:, 0, :].reshape((n_embed,)),
|
||||||
|
qkv_bias[:, 1, :].reshape((n_embed,)),
|
||||||
|
qkv_bias[:, 2, :].reshape((n_embed,)),
|
||||||
|
),
|
||||||
|
dim=0,
|
||||||
|
)
|
||||||
|
logger.info("re-format attention.linear_qkv.bias")
|
||||||
|
|
||||||
|
tensors.append((self.map_tensor_name(name), data_torch))
|
||||||
|
|
||||||
|
return tensors
|
||||||
|
|
||||||
|
|
||||||
@Model.register("BloomForCausalLM")
|
@Model.register("BloomForCausalLM")
|
||||||
class BloomModel(Model):
|
class BloomModel(Model):
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue