export norms as f32
This commit is contained in:
parent
26e8f23bf3
commit
ce9413d849
1 changed files with 1 additions and 1 deletions
|
@ -160,7 +160,7 @@ class Model(ABC):
|
|||
data = data.astype(np.float32)
|
||||
|
||||
# TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
|
||||
if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
|
||||
if self.ftype == 1 and data_dtype == np.float16 and (n_dims == 1 or new_name.endswith("_norm.weight")):
|
||||
data = data.astype(np.float32)
|
||||
|
||||
# if f16 desired, convert any float32 2-dim weight tensors to float16
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue