diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index f332904b6..fa6d7f599 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -16,7 +16,6 @@ import numpy as np import torch from collections import OrderedDict -#TYPE_CHECKING = False if TYPE_CHECKING: from torch import Tensor @@ -86,9 +85,6 @@ class Model: def write_tensors(self): block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer"))) tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) - print ("-------------------") - for tt,_ in self.get_tensors(): - print(tt) for name, data_torch in self.get_tensors(): # we don't need these if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq")): diff --git a/gguf-py/gguf/tensor_mapping.py b/gguf-py/gguf/tensor_mapping.py index 547a6e078..cf8d006fb 100644 --- a/gguf-py/gguf/tensor_mapping.py +++ b/gguf-py/gguf/tensor_mapping.py @@ -231,7 +231,6 @@ class TensorNameMap: key = key.format(bid = bid) self.mapping[key] = (tensor, tensor_name) def get_type_and_name(self, key: str, try_suffixes: Sequence[str] = ()) -> tuple[MODEL_TENSOR, str] | None: - print("getting ", key) result = self.mapping.get(key) if result is not None: return result