diff --git a/convert-llama-ggmlv3-to-gguf.py b/convert-llama-ggmlv3-to-gguf.py new file mode 100644 index 000000000..51bf44f07 --- /dev/null +++ b/convert-llama-ggmlv3-to-gguf.py @@ -0,0 +1,223 @@ +import sys, struct, math, argparse + +import numpy as np + +import gguf + +# Note: Does not support GGML_QKK_64 +QK_K = 256 +# Items here are (block size, type size) +GGML_QUANT_SIZES = { + gguf.GGMLQuantizationType.F32 : (1, 4), + gguf.GGMLQuantizationType.F16 : (1, 2), + gguf.GGMLQuantizationType.Q4_0 : (32, 2 + 16), + gguf.GGMLQuantizationType.Q4_1 : (32, 2 + 2 + 16), + gguf.GGMLQuantizationType.Q5_0 : (32, 2 + 4 + 16), + gguf.GGMLQuantizationType.Q5_1 : (32, 2 + 2 + 4 + 16), + gguf.GGMLQuantizationType.Q8_0 : (32, 2 + 32), + gguf.GGMLQuantizationType.Q8_1 : (32, 4 + 4 + 32), + gguf.GGMLQuantizationType.Q2_K : (256, 2 + 2 + QK_K // 16 + QK_K // 4), + gguf.GGMLQuantizationType.Q3_K : (256, 2 + QK_K // 4 + QK_K // 8 + 12), + gguf.GGMLQuantizationType.Q4_K : (256, 2 + 2 + QK_K // 2 + 12), + gguf.GGMLQuantizationType.Q5_K : (256, 2 + 2 + QK_K // 2 + QK_K // 8 + 12), + gguf.GGMLQuantizationType.Q6_K : (256, 2 + QK_K // 2 + QK_K // 4 + QK_K // 16), + gguf.GGMLQuantizationType.Q8_K : (256, 2 + QK_K + QK_K // 8), +} + +class Hyperparameters: + def __init__(self): + self.n_vocab = self.n_embd = self.n_mult = self.n_head = self.n_layer = self.n_rot = self.ftype = 0 + + def load(self, data, offset): + ( + self.n_vocab, + self.n_embd, + self.n_mult, + self.n_head, + self.n_layer, + self.n_rot, + self.ftype, + ) = struct.unpack('<7I', data[offset:offset + (4 * 7)]) + return 4 * 7 + + def __str__(self): + return f'' + +class Vocab: + def __init__(self): + self.items = [] + + def load(self, data, offset, n_vocab): + orig_offset = offset + for _ in range(n_vocab): + itemlen = struct.unpack('= 0 and n_dims <= 4, f'Invalid tensor dimensions {n_dims}' + assert name_len < 4096, 'Absurd tensor name length' + quant = GGML_QUANT_SIZES.get(dtype) + assert quant is not None, 'Unknown tensor type' + (blksize, tysize) = quant + offset += 12 + self.dtype= dtype + self.dims = struct.unpack(f'<{n_dims}I', data[offset:offset + (4 * n_dims)]) + offset += 4 * n_dims + self.name = bytes(data[offset:offset + name_len]) + offset += name_len + pad = ((offset + 31) & ~31) - offset + offset += pad + n_elems = np.prod(self.dims) + n_bytes = (n_elems * tysize) // blksize + self.start_offset = offset + self.len_bytes = n_bytes + offset += n_bytes + # print(n_dims, name_len, dtype, self.dims, self.name, pad) + return offset - orig_offset + +class GGMLV3Model: + def __init__(self): + self.hyperparameters = None + self.vocab = None + self.tensor_map = {} + self.tensors = [] + + def validate_header(self, data, offset): + if bytes(data[offset:offset + 4]) != b'tjgg' or struct.unpack(' 1 and vbytes[0] == 32: + vbytes = vbytes.replace(b' ', b'\xe2\x96\x81') + tt = 1 + if len(vbytes) == 0: + tt = 3 + elif tokid >= 3 and tokid <= 258 and len(vbytes) == 1: + hv = hex(vbytes[0])[2:].upper() + vbytes = bytes(f'<0x{hv}>', encoding = 'UTF-8') + tt = 6 + toktypes.append(tt) + tokens.append(vbytes) + scores.append(vscore) + gguf_writer.add_token_list(tokens) + gguf_writer.add_token_scores(scores) + gguf_writer.add_token_types(toktypes) + print('* Adding tensors') + for tensor in ggml_model.tensors: + name = str(tensor.name, 'UTF-8') + if name.endswith('.weight'): + name = name[:-7] + suffix = '.weight' + elif name.endswith('.bias'): + name = name[:-5] + suffix = '.bias' + mapped_name = nm.get(name) + assert mapped_name is not None, f'Bad name {name}' + mapped_name += suffix + tempdims = list(tensor.dims[:]) + if len(tempdims) > 1: + temp = tempdims[1] + tempdims[1] = tempdims[0] + tempdims[0] = temp + print(f'+ {tensor.name} | {mapped_name} {tensor.dims} :: {tempdims}') + gguf_writer.add_tensor(mapped_name, data[tensor.start_offset:tensor.start_offset + tensor.len_bytes], raw_shape = tempdims, raw_dtype = tensor.dtype) + print("gguf: write header") + gguf_writer.write_header_to_file() + print("gguf: write metadata") + gguf_writer.write_kv_data_to_file() + print("gguf: write tensors") + gguf_writer.write_tensors_to_file() + + gguf_writer.close() + +def handle_args(): + parser = argparse.ArgumentParser(description = 'Convert GGMLv3 models to GGUF') + parser.add_argument('--input', '-i', help = 'Input GGMLv3 filename') + parser.add_argument('--output', '-o', help ='Output GGUF filename') + parser.add_argument('--gqa', type = int, default = 1, help = 'grouped-query attention factor (use 8 for LLaMA2 70B)') + parser.add_argument('--eps', default = '5.0e-06', help = 'RMS norm eps (use 1e-5 for LLaMA2)') + parser.add_argument('--context-length', '-c', type=int, default = 2048, help = 'Default max context length') + return parser.parse_args() + +def main(): + cfg = handle_args() + data = np.memmap(cfg.input, mode = 'r') + model = GGMLV3Model() + offset = model.load(data, 0) + print(model.hyperparameters) + # print(model.vocab.items) + # return + save_gguf(model, data, cfg) + +main() diff --git a/gguf.py b/gguf.py index e5eb85ded..5c82414fa 100644 --- a/gguf.py +++ b/gguf.py @@ -5,7 +5,7 @@ import tempfile import numpy as np from enum import IntEnum, auto -from typing import Any, IO, List +from typing import Any, IO, List, Optional # # constants @@ -325,8 +325,20 @@ def get_tensor_name_map(arch: MODEL_ARCH, n_blocks: int) -> dict: class GGMLQuantizationType(IntEnum): - F32 = 0 - F16 = 1 + F32 = 0 + F16 = 1 + Q4_0 = 2 + Q4_1 = 3 + Q5_0 = 6 + Q5_1 = 7 + Q8_0 = 8 + Q8_1 = 9 + Q2_K = 10 + Q3_K = 11 + Q4_K = 12 + Q5_K = 13 + Q6_K = 14 + Q8_K = 15 class GGUFValueType(IntEnum): @@ -359,7 +371,7 @@ class GGUFValueType(IntEnum): class GGUFWriter: - def __init__(self, path: str, arch: str): + def __init__(self, path: str, arch: str, use_temp_file = True): self.fout = open(path, "wb") self.arch = arch self.offset_tensor = 0 @@ -369,6 +381,8 @@ class GGUFWriter: self.ti_data = b"" self.ti_data_count = 0 self.add_architecture() + self.use_temp_file = use_temp_file + self.tensors = [] def write_header_to_file(self): self.fout.write(struct.pack(" int: return ((x + n - 1) // n) * n - def add_tensor_info(self, name: str, tensor_shape: np.ndarray, tensor_dtype: np.dtype, tensor_nbytes: int): - assert tensor_dtype in (np.float32, np.float16), "Only F32 and F16 tensors are supported for now" + def add_tensor_info(self, name: str, tensor_shape: np.ndarray, tensor_dtype: np.dtype, tensor_nbytes: int, raw_dtype: Optional[GGMLQuantizationType] = None): + assert raw_dtype is not None or tensor_dtype in (np.float32, np.float16), "Only F32 and F16 tensors are supported for now" encoded_name = name.encode("utf8") self.ti_data += struct.pack("