some fixes
-Changed the numby and torch versions to newer ones - Specified the encoding when reading and writing on the convert-hf-to-gguf.py file from the convert-hf-to-gguf-update.py file
This commit is contained in:
parent
172c825684
commit
88cc7d7878
4 changed files with 5 additions and 5 deletions
|
@ -213,7 +213,7 @@ src_func = f"""
|
|||
"""
|
||||
|
||||
convert_py_pth = pathlib.Path("convert-hf-to-gguf.py")
|
||||
convert_py = convert_py_pth.read_text()
|
||||
convert_py = convert_py_pth.read_text(encoding="utf-8")
|
||||
convert_py = re.sub(
|
||||
r"(# Marker: Start get_vocab_base_pre)(.+?)( +# Marker: End get_vocab_base_pre)",
|
||||
lambda m: m.group(1) + src_func + m.group(3),
|
||||
|
@ -221,7 +221,7 @@ convert_py = re.sub(
|
|||
flags=re.DOTALL | re.MULTILINE,
|
||||
)
|
||||
|
||||
convert_py_pth.write_text(convert_py)
|
||||
convert_py_pth.write_text(convert_py, encoding="utf-8")
|
||||
|
||||
logger.info("+++ convert-hf-to-gguf.py was updated")
|
||||
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
-r ./requirements-convert-legacy-llama.txt
|
||||
torch~=2.1.1
|
||||
torch~=2.2.1
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
-r ./requirements-convert-legacy-llama.txt
|
||||
torch~=2.1.1
|
||||
torch~=2.2.1
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
numpy~=1.24.4
|
||||
numpy~=1.26.4
|
||||
sentencepiece~=0.2.0
|
||||
transformers>=4.40.1,<5.0.0
|
||||
gguf>=0.1.0
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue