cleanup
This commit is contained in:
parent
bca0962575
commit
233cb0741f
3 changed files with 10 additions and 18 deletions
24
convert-gguf-endian.py
Normal file → Executable file
24
convert-gguf-endian.py
Normal file → Executable file
|
@ -1,13 +1,12 @@
|
|||
#!/usr/bin/env python3
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
|
||||
import os
|
||||
|
||||
if "NO_LOCAL_GGUF" not in os.environ:
|
||||
sys.path.insert(1, str(Path(__file__).parent / "gguf-py"))
|
||||
import gguf
|
||||
|
@ -31,9 +30,7 @@ def convert_byteorder(filename: str, order: str) -> None:
|
|||
file_endian = host_endian
|
||||
if order == "native":
|
||||
order = host_endian
|
||||
print(
|
||||
f"* Host is {host_endian.upper()} endian, GGUF file seems to be {file_endian.upper()} endian"
|
||||
)
|
||||
print(f"* Host is {host_endian.upper()} endian, GGUF file seems to be {file_endian.upper()} endian")
|
||||
if file_endian == order:
|
||||
print(f"* File is already {order.upper()} endian. Nothing to do.")
|
||||
sys.exit(0)
|
||||
|
@ -44,15 +41,11 @@ def convert_byteorder(filename: str, order: str) -> None:
|
|||
gguf.GGMLQuantizationType.F16,
|
||||
gguf.GGMLQuantizationType.Q8_0,
|
||||
):
|
||||
raise ValueError(
|
||||
f"Cannot handle type {tensor.tensor_type.name} for tensor {repr(tensor.name)}"
|
||||
)
|
||||
raise ValueError(f"Cannot handle type {tensor.tensor_type.name} for tensor {repr(tensor.name)}")
|
||||
print(f"* Preparing to convert from {file_endian.upper()} to {order.upper()}")
|
||||
print("\n*** Warning *** Warning *** Warning **")
|
||||
print("* This conversion process may damage the file. Ensure you have a backup.")
|
||||
print(
|
||||
"* The file will be modified immediately, so if conversion fails or is interrupted"
|
||||
)
|
||||
print("* The file will be modified immediately, so if conversion fails or is interrupted")
|
||||
print("* the file will be corrupted. If you are positive then type YES:")
|
||||
response = input("YES, I am sure> ")
|
||||
if response != "YES":
|
||||
|
@ -60,15 +53,14 @@ def convert_byteorder(filename: str, order: str) -> None:
|
|||
sys.exit(0)
|
||||
print(f"\n* Converting fields ({len(reader.fields)})")
|
||||
for idx, field in enumerate(reader.fields.values()):
|
||||
print(
|
||||
f"- {idx:4}: Converting field {repr(field.name)}, part count: {len(field.parts)}"
|
||||
)
|
||||
print(f"- {idx:4}: Converting field {repr(field.name)}, part count: {len(field.parts)}")
|
||||
for part in field.parts:
|
||||
part.byteswap(inplace=True)
|
||||
print(f"\n* Converting tensors ({len(reader.tensors)})")
|
||||
for idx, tensor in enumerate(reader.tensors):
|
||||
print(
|
||||
f" - {idx:4}: Converting tensor {repr(tensor.name)}, type={tensor.tensor_type.name}, elements={tensor.n_elements}... ",
|
||||
f" - {idx:4}: Converting tensor {repr(tensor.name)}, type={tensor.tensor_type.name}, "
|
||||
f"elements={tensor.n_elements}... ",
|
||||
end="",
|
||||
)
|
||||
tensor_type = tensor.tensor_type
|
||||
|
@ -84,7 +76,7 @@ def convert_byteorder(filename: str, order: str) -> None:
|
|||
for block_num in range(n_blocks):
|
||||
block_offs = block_num * block_size
|
||||
# I know I said f16, but it doesn't matter here - any simple 16 bit type works.
|
||||
delta = tensor.data[block_offs : block_offs + 2].view(dtype=np.uint16)
|
||||
delta = tensor.data[block_offs:block_offs + 2].view(dtype=np.uint16)
|
||||
delta.byteswap(inplace=True)
|
||||
if block_num % 100000 == 0:
|
||||
print(f"[{(n_blocks - block_num) // 1000}K]", end="")
|
||||
|
|
|
@ -2,7 +2,7 @@ from __future__ import annotations
|
|||
|
||||
import sys
|
||||
from enum import IntEnum, StrEnum, auto
|
||||
from typing import Any, Type
|
||||
from typing import Any
|
||||
|
||||
#
|
||||
# constants
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
import gguf
|
||||
import gguf # noqa: F401
|
||||
|
||||
# TODO: add tests
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue