diff --git a/convert-falcon-hf-to-gguf.py b/convert-falcon-hf-to-gguf.py index b572e3c90..ec786ff67 100755 --- a/convert-falcon-hf-to-gguf.py +++ b/convert-falcon-hf-to-gguf.py @@ -14,7 +14,7 @@ from typing import Any import gguf import numpy as np import torch -from transformers import AutoTokenizer +from transformers import AutoTokenizer # type: ignore[import] def bytes_to_unicode(): diff --git a/convert-gptneox-hf-to-gguf.py b/convert-gptneox-hf-to-gguf.py index b1027922a..852123d99 100755 --- a/convert-gptneox-hf-to-gguf.py +++ b/convert-gptneox-hf-to-gguf.py @@ -14,7 +14,7 @@ from typing import Any import gguf import numpy as np import torch -from transformers import AutoTokenizer +from transformers import AutoTokenizer # type: ignore[import] # ref: https://github.com/openai/gpt-2/blob/master/src/encoder.py diff --git a/convert-llama-7b-pth-to-gguf.py b/convert-llama-7b-pth-to-gguf.py index 556e7cb47..6574c11dd 100755 --- a/convert-llama-7b-pth-to-gguf.py +++ b/convert-llama-7b-pth-to-gguf.py @@ -16,7 +16,7 @@ from typing import TYPE_CHECKING, Any import gguf import numpy as np import torch -from sentencepiece import SentencePieceProcessor +from sentencepiece import SentencePieceProcessor # type: ignore[import] if TYPE_CHECKING: from typing import TypeAlias diff --git a/convert-llama-hf-to-gguf.py b/convert-llama-hf-to-gguf.py index 5e00eec1a..c453c83c3 100755 --- a/convert-llama-hf-to-gguf.py +++ b/convert-llama-hf-to-gguf.py @@ -14,7 +14,7 @@ from typing import TYPE_CHECKING, Any import gguf import numpy as np import torch -from sentencepiece import SentencePieceProcessor +from sentencepiece import SentencePieceProcessor # type: ignore[import] if TYPE_CHECKING: from typing import TypeAlias diff --git a/convert.py b/convert.py index 38b3c73a8..9a39edb99 100755 --- a/convert.py +++ b/convert.py @@ -27,7 +27,7 @@ from typing import IO, TYPE_CHECKING, Any, Callable, Generator, Iterable, Litera import gguf import numpy as np -from sentencepiece import SentencePieceProcessor # type: ignore +from sentencepiece import SentencePieceProcessor # type: ignore[import] if TYPE_CHECKING: from typing import TypeAlias @@ -338,7 +338,7 @@ class BpeVocab: def bpe_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]: tokenizer = self.bpe_tokenizer - from transformers.models.gpt2 import tokenization_gpt2 + from transformers.models.gpt2 import tokenization_gpt2 # type: ignore[import] byte_encoder = tokenization_gpt2.bytes_to_unicode() byte_decoder = {v: k for k, v in byte_encoder.items()} for i, item in enumerate(tokenizer):