From c2e5abd33dda14539b7f06d7ff0f02e0f835de28 Mon Sep 17 00:00:00 2001 From: brian khuu Date: Thu, 18 Apr 2024 10:00:07 +1000 Subject: [PATCH] *.py: removed commented out logging --- convert-llama-ggml-to-gguf.py | 2 -- convert.py | 1 - 2 files changed, 3 deletions(-) diff --git a/convert-llama-ggml-to-gguf.py b/convert-llama-ggml-to-gguf.py index 11ab69437..5496a50ef 100755 --- a/convert-llama-ggml-to-gguf.py +++ b/convert-llama-ggml-to-gguf.py @@ -128,7 +128,6 @@ class Tensor: self.start_offset = offset self.len_bytes = n_bytes offset += n_bytes - # logger.info(n_dims, name_len, dtype, self.dims, self.name, pad) return offset - orig_offset @@ -347,7 +346,6 @@ class GGMLToGGUF: temp = tempdims[1] tempdims[1] = tempdims[0] tempdims[0] = temp - # logger.info(f'+ {tensor.name} | {mapped_name} {tensor.dims} :: {tempdims}') gguf_writer.add_tensor( mapped_name, data[tensor.start_offset:tensor.start_offset + tensor.len_bytes], diff --git a/convert.py b/convert.py index cf756fd68..599c7e5af 100755 --- a/convert.py +++ b/convert.py @@ -646,7 +646,6 @@ class LlamaHfVocab(Vocab): def permute(weights: NDArray, n_head: int, n_head_kv: int) -> NDArray: - # logger.info( "permute debug " + str(weights.shape[0]) + " x " + str(weights.shape[1]) + " nhead " + str(n_head) + " nheadkv " + str(n_kv_head) ) if n_head_kv is not None and n_head != n_head_kv: n_head = n_head_kv return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])