convert : add necessary type: ignore
comments
This commit is contained in:
parent
e66e0be432
commit
8a8c1cb0f2
5 changed files with 6 additions and 6 deletions
|
@ -14,7 +14,7 @@ from typing import Any
|
|||
import gguf
|
||||
import numpy as np
|
||||
import torch
|
||||
from transformers import AutoTokenizer
|
||||
from transformers import AutoTokenizer # type: ignore[import]
|
||||
|
||||
|
||||
def bytes_to_unicode():
|
||||
|
|
|
@ -14,7 +14,7 @@ from typing import Any
|
|||
import gguf
|
||||
import numpy as np
|
||||
import torch
|
||||
from transformers import AutoTokenizer
|
||||
from transformers import AutoTokenizer # type: ignore[import]
|
||||
|
||||
# ref: https://github.com/openai/gpt-2/blob/master/src/encoder.py
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@ from typing import TYPE_CHECKING, Any
|
|||
import gguf
|
||||
import numpy as np
|
||||
import torch
|
||||
from sentencepiece import SentencePieceProcessor
|
||||
from sentencepiece import SentencePieceProcessor # type: ignore[import]
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import TypeAlias
|
||||
|
|
|
@ -14,7 +14,7 @@ from typing import TYPE_CHECKING, Any
|
|||
import gguf
|
||||
import numpy as np
|
||||
import torch
|
||||
from sentencepiece import SentencePieceProcessor
|
||||
from sentencepiece import SentencePieceProcessor # type: ignore[import]
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import TypeAlias
|
||||
|
|
|
@ -27,7 +27,7 @@ from typing import IO, TYPE_CHECKING, Any, Callable, Generator, Iterable, Litera
|
|||
|
||||
import gguf
|
||||
import numpy as np
|
||||
from sentencepiece import SentencePieceProcessor # type: ignore
|
||||
from sentencepiece import SentencePieceProcessor # type: ignore[import]
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import TypeAlias
|
||||
|
@ -338,7 +338,7 @@ class BpeVocab:
|
|||
|
||||
def bpe_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
|
||||
tokenizer = self.bpe_tokenizer
|
||||
from transformers.models.gpt2 import tokenization_gpt2
|
||||
from transformers.models.gpt2 import tokenization_gpt2 # type: ignore[import]
|
||||
byte_encoder = tokenization_gpt2.bytes_to_unicode()
|
||||
byte_decoder = {v: k for k, v in byte_encoder.items()}
|
||||
for i, item in enumerate(tokenizer):
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue