diff --git a/.flake8 b/.flake8 index 608eb8eed..bc41c2290 100644 --- a/.flake8 +++ b/.flake8 @@ -1,4 +1,4 @@ [flake8] max-line-length = 125 ignore = E203,E211,E221,E225,E231,E241,E251,E261,E266,E501,E701,E704,W503 -exclude = examples/*,examples/*/**,*/**/__init__.py +exclude = examples/*,examples/*/**,*/**/__init__.py,scripts/gen-unicode-data.py,tests/test-tokenizer-0.py diff --git a/convert-hf-to-gguf-update.py b/convert-hf-to-gguf-update.py index 2b08b8505..917a4469d 100644 --- a/convert-hf-to-gguf-update.py +++ b/convert-hf-to-gguf-update.py @@ -31,6 +31,7 @@ from hashlib import sha256 from enum import IntEnum, auto from transformers import AutoTokenizer +logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger("convert-hf-to-gguf-update") @@ -284,6 +285,6 @@ logger.info("\nRun the following commands to generate the vocab files for testin for model in models: name = model["name"] - logger.info(f"python3 convert-hf-to-gguf.py models/tokenizers/{name}/ --outfile models/ggml-vocab-{name}.gguf --vocab-only") + print(f"python3 convert-hf-to-gguf.py models/tokenizers/{name}/ --outfile models/ggml-vocab-{name}.gguf --vocab-only") # noqa: NP100 logger.info("\n")