diff --git a/convert-falcon-hf-to-gguf.py b/convert-falcon-hf-to-gguf.py index f72068dee..339c8e4c5 100755 --- a/convert-falcon-hf-to-gguf.py +++ b/convert-falcon-hf-to-gguf.py @@ -144,7 +144,7 @@ for i in range(vocab_size): print(f"Key {i} not in tokenizer vocabulary. Padding with an arbitrary token.") pad_token = f"[PAD{i}]".encode("utf8") text = bytearray(pad_token) - else if i in added_tokens: + elif i in added_tokens: # these tokens are not encoded, see https://github.com/huggingface/transformers/issues/1133 text = bytearray(reverse_vocab[i].encode('utf-8')) else: diff --git a/convert-gptneox-hf-to-gguf.py b/convert-gptneox-hf-to-gguf.py index e47df20ea..80b09110f 100755 --- a/convert-gptneox-hf-to-gguf.py +++ b/convert-gptneox-hf-to-gguf.py @@ -140,7 +140,7 @@ for i in range(vocab_size): print(f"Key {i} not in tokenizer vocabulary. Padding with an arbitrary token.") pad_token = f"[PAD{i}]".encode("utf8") text = bytearray(pad_token) - else if i in added_tokens: + elif i in added_tokens: # these tokens are not encoded, see https://github.com/huggingface/transformers/issues/1133 text = bytearray(reverse_vocab[i].encode('utf-8')) else: diff --git a/convert-starcoder-hf-to-gguf.py b/convert-starcoder-hf-to-gguf.py index e1647f834..0e0a1ccc7 100755 --- a/convert-starcoder-hf-to-gguf.py +++ b/convert-starcoder-hf-to-gguf.py @@ -128,7 +128,7 @@ for i in range(vocab_size): print(f"Key {i} not in tokenizer vocabulary. Padding with an arbitrary token.") pad_token = f"[PAD{i}]".encode("utf8") text = bytearray(pad_token) - else if i in added_tokens: + elif i in added_tokens: # these tokens are not encoded, see https://github.com/huggingface/transformers/issues/1133 text = bytearray(reverse_vocab[i].encode('utf-8')) else: