Remove .attention from skipped tensors to match more accurately (#7051)
This commit is contained in:
parent
6ecf3189e0
commit
60325fa56f
1 changed files with 1 additions and 1 deletions
|
@ -1427,7 +1427,7 @@ class LlamaModel(Model):
|
||||||
experts = dict()
|
experts = dict()
|
||||||
for name, data_torch in self.get_tensors():
|
for name, data_torch in self.get_tensors():
|
||||||
# we don't need these
|
# we don't need these
|
||||||
if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq")):
|
if name.endswith((".attention.masked_bias", ".attention.bias", ".rotary_emb.inv_freq")):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
old_dtype = data_torch.dtype
|
old_dtype = data_torch.dtype
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue