From fa41aeb8babbb08c1ad72409fc28e69808a82da4 Mon Sep 17 00:00:00 2001 From: jameswu2014 <545426914@qq.com> Date: Mon, 11 Sep 2023 13:31:48 +0800 Subject: [PATCH] update convert-baichuan limit of authority --- convert-baichuan-hf-to-gguf.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) mode change 100644 => 100755 convert-baichuan-hf-to-gguf.py diff --git a/convert-baichuan-hf-to-gguf.py b/convert-baichuan-hf-to-gguf.py old mode 100644 new mode 100755 index 6bb685a4f..5b301de27 --- a/convert-baichuan-hf-to-gguf.py +++ b/convert-baichuan-hf-to-gguf.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# HF llama --> gguf conversion +# HF baichuan --> gguf conversion from __future__ import annotations @@ -23,7 +23,6 @@ if TYPE_CHECKING: NDArray: TypeAlias = 'np.ndarray[Any, Any]' # reverse HF permute back to original pth layout -# https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/convert_llama_weights_to_hf.py def reverse_hf_permute(weights: NDArray, n_head: int, n_kv_head: int | None = None) -> NDArray: