Remove .attention from skipped tensors to match more accurately

This commit is contained in:
colin 2024-05-02 16:35:18 -04:00
parent 6ecf3189e0
commit f700301a3b

View file

@ -1427,7 +1427,7 @@ class LlamaModel(Model):
experts = dict()
for name, data_torch in self.get_tensors():
# we don't need these
if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq")):
if name.endswith((".attention.masked_bias", ".attention.bias", ".rotary_emb.inv_freq")):
continue
old_dtype = data_torch.dtype