From efead0408c211600412be5598bb23ca7129007fa Mon Sep 17 00:00:00 2001 From: Christian Zhou-Zheng Date: Mon, 3 Jun 2024 19:34:01 -0400 Subject: [PATCH] fix gguf_writer placement and remove comments --- convert-hf-to-gguf.py | 2 +- gguf-py/gguf/gguf_manager.py | 12 +----------- 2 files changed, 2 insertions(+), 12 deletions(-) diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index ff9c74ea9..3d8cdc811 100644 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -56,11 +56,11 @@ class Model: part_names: list[str] is_safetensors: bool hparams: dict[str, Any] - gguf_writer: gguf.GGUFManager block_count: int tensor_map: gguf.TensorNameMap tensor_names: set[str] | None fname_out: Path + gguf_writer: gguf.GGUFManager # subclasses should define this! model_arch: gguf.MODEL_ARCH diff --git a/gguf-py/gguf/gguf_manager.py b/gguf-py/gguf/gguf_manager.py index a60ce9867..cafe8abff 100644 --- a/gguf-py/gguf/gguf_manager.py +++ b/gguf-py/gguf/gguf_manager.py @@ -305,20 +305,10 @@ class GGUFManager: tensor.byteswap(inplace=True) # TODO reimplement temp file - #if self.use_temp_file and self.temp_file is None: - # fp = tempfile.SpooledTemporaryFile(mode="w+b", max_size=256 * 1024 * 1024) - # fp.seek(0) - # self.temp_file = fp + # I'm pretty sure it gets handled per shard? self.tensors.append((name, tensor, raw_dtype)) - #if self.temp_file is None: - # self.tensors.append(tensor) - # return - - #tensor.tofile(self.temp_file) - #self.write_padding(self.temp_file, tensor.nbytes) - def close(self) -> None: for _, _, writer in self.split_strategy: writer.close()