improvement(tools): optimize with argparse

This commit is contained in:
tpoisonooo 2023-03-17 16:53:53 +08:00
parent 904d2a8d6a
commit 3c7cb413fb

View file

@ -22,19 +22,27 @@ import json
import struct import struct
import numpy as np import numpy as np
import torch import torch
import argparse
import os
from sentencepiece import SentencePieceProcessor from sentencepiece import SentencePieceProcessor
if len(sys.argv) < 3:
print("Usage: convert-ckpt-to-ggml.py dir-model ftype\n")
print(" ftype == 0 -> float32")
print(" ftype == 1 -> float16")
sys.exit(1)
# output in the same directory as the model def parse_args():
dir_model = sys.argv[1] parser = argparse.ArgumentParser(
description='Convert ckpt models to ggml models.')
parser.add_argument('dir_model',
type=str,
help='Directory path of the checkpoint model')
parser.add_argument('ftype',
type=str,
choices=['f32', 'f16'],
help='Data type of the converted tensor, f32 or f16')
parser.add_argument('out_dir',
type=str,
help='Directory path for storing ggml model')
return parser.parse_args()
fname_hparams = sys.argv[1] + "/params.json"
fname_tokenizer = sys.argv[1] + "/../tokenizer.model"
def get_n_parts(dim): def get_n_parts(dim):
if dim == 4096: if dim == 4096:
@ -49,129 +57,138 @@ def get_n_parts(dim):
print("Invalid dim: " + str(dim)) print("Invalid dim: " + str(dim))
sys.exit(1) sys.exit(1)
# possible data types
# ftype == 0 -> float32
# ftype == 1 -> float16
#
# map from ftype to string
ftype_str = ["f32", "f16"]
ftype = 1 def main():
if len(sys.argv) > 2: args = parse_args()
ftype = int(sys.argv[2]) dir_model = args.dir_model
if ftype < 0 or ftype > 1: out_dir = args.out_dir
print("Invalid ftype: " + str(ftype))
sys.exit(1)
fname_out = sys.argv[1] + "/ggml-model-" + ftype_str[ftype] + ".bin"
with open(fname_hparams, "r") as f: if not os.path.exists(out_dir):
hparams = json.load(f) os.mkdir(out_dir)
tokenizer = SentencePieceProcessor(fname_tokenizer) ftype = args.ftype
ftype_int = {'f32': 0, 'f16': 1}
fname_hparams = os.path.join(dir_model, 'params.json')
fname_tokenizer = os.path.join(dir_model, '..', 'tokenizer.model')
hparams.update({"vocab_size": tokenizer.vocab_size()}) with open(fname_hparams, "r") as f:
hparams = json.load(f)
n_parts = get_n_parts(hparams["dim"]) tokenizer = SentencePieceProcessor(fname_tokenizer)
print(hparams) hparams.update({"vocab_size": tokenizer.vocab_size()})
print('n_parts = ', n_parts)
for p in range(n_parts): n_parts = get_n_parts(hparams["dim"])
print('Processing part ', p)
#fname_model = sys.argv[1] + "/consolidated.00.pth" print(hparams)
fname_model = sys.argv[1] + "/consolidated.0" + str(p) + ".pth" print('n_parts = ', n_parts)
fname_out = sys.argv[1] + "/ggml-model-" + ftype_str[ftype] + ".bin"
if (p > 0):
fname_out = sys.argv[1] + "/ggml-model-" + ftype_str[ftype] + ".bin" + "." + str(p)
model = torch.load(fname_model, map_location="cpu") for p in range(n_parts):
print('Processing part ', p)
fout = open(fname_out, "wb") #fname_model = sys.argv[1] + "/consolidated.00.pth"
fname_model = os.path.join(dir_model, "consolidated.0{}.pth".format(p))
fout.write(struct.pack("i", 0x67676d6c)) # magic: ggml in hex if p > 0:
fout.write(struct.pack("i", hparams["vocab_size"])) fname_out = os.path.join(out_dir,
fout.write(struct.pack("i", hparams["dim"])) "ggml-model-{}.bin.{}".format(ftype, p))
fout.write(struct.pack("i", hparams["multiple_of"]))
fout.write(struct.pack("i", hparams["n_heads"]))
fout.write(struct.pack("i", hparams["n_layers"]))
fout.write(struct.pack("i", hparams["dim"] // hparams["n_heads"])) # rot (obsolete)
fout.write(struct.pack("i", ftype))
# Is this correct??
for i in range(tokenizer.vocab_size()):
if tokenizer.is_unknown(i):
# "<unk>" token (translated as ??)
text = " \u2047 ".encode("utf-8")
fout.write(struct.pack("i", len(text)))
fout.write(text)
elif tokenizer.is_control(i):
# "<s>"/"</s>" tokens
fout.write(struct.pack("i", 0))
elif tokenizer.is_byte(i):
# "<U+XX>" tokens (which may be invalid UTF-8)
piece = tokenizer.id_to_piece(i)
if len(piece) != 6:
print("Invalid token: " + piece)
sys.exit(1)
byte_value = int(piece[3:-1], 16)
fout.write(struct.pack("i", 1))
fout.write(struct.pack("B", byte_value))
else: else:
# normal token. Uses U+2581 (LOWER ONE EIGHTH BLOCK) to represent spaces. fname_out = os.path.join(out_dir,
text = tokenizer.id_to_piece(i).replace("\u2581", " ").encode("utf-8") "ggml-model-{}.bin".format(ftype))
fout.write(struct.pack("i", len(text)))
fout.write(text)
for k, v in model.items(): model = torch.load(fname_model, map_location="cpu")
name = k
shape = v.shape
# skip layers.X.attention.inner_attention.rope.freqs fout = open(fname_out, "wb")
if name[-5:] == "freqs":
continue
print("Processing variable: " + name + " with shape: ", shape, " and type: ", v.dtype) fout.write(struct.pack("i", 0x67676d6c)) # magic: ggml in hex
fout.write(struct.pack("i", hparams["vocab_size"]))
fout.write(struct.pack("i", hparams["dim"]))
fout.write(struct.pack("i", hparams["multiple_of"]))
fout.write(struct.pack("i", hparams["n_heads"]))
fout.write(struct.pack("i", hparams["n_layers"]))
fout.write(struct.pack("i", hparams["dim"] //
hparams["n_heads"])) # rot (obsolete)
fout.write(struct.pack("i", ftype_int[ftype]))
#data = tf.train.load_variable(dir_model, name).squeeze() # Is this correct??
data = v.numpy().squeeze() for i in range(tokenizer.vocab_size()):
n_dims = len(data.shape); if tokenizer.is_unknown(i):
# "<unk>" token (translated as ??)
text = " \u2047 ".encode("utf-8")
fout.write(struct.pack("i", len(text)))
fout.write(text)
elif tokenizer.is_control(i):
# "<s>"/"</s>" tokens
fout.write(struct.pack("i", 0))
elif tokenizer.is_byte(i):
# "<U+XX>" tokens (which may be invalid UTF-8)
piece = tokenizer.id_to_piece(i)
if len(piece) != 6:
print("Invalid token: " + piece)
sys.exit(1)
byte_value = int(piece[3:-1], 16)
fout.write(struct.pack("i", 1))
fout.write(struct.pack("B", byte_value))
else:
# normal token. Uses U+2581 (LOWER ONE EIGHTH BLOCK) to represent spaces.
text = tokenizer.id_to_piece(i).replace("\u2581",
" ").encode("utf-8")
fout.write(struct.pack("i", len(text)))
fout.write(text)
# for efficiency - transpose some matrices for k, v in model.items():
# "model/h.*/attn/c_attn/w" name = k
# "model/h.*/attn/c_proj/w" shape = v.shape
# "model/h.*/mlp/c_fc/w"
# "model/h.*/mlp/c_proj/w"
#if name[-14:] == "/attn/c_attn/w" or \
# name[-14:] == "/attn/c_proj/w" or \
# name[-11:] == "/mlp/c_fc/w" or \
# name[-13:] == "/mlp/c_proj/w":
# print(" Transposing")
# data = data.transpose()
dshape = data.shape # skip layers.X.attention.inner_attention.rope.freqs
if name[-5:] == "freqs":
continue
# default type is fp16 print("Processing variable: " + name + " with shape: ", shape,
ftype_cur = 1 " and type: ", v.dtype)
if ftype == 0 or n_dims == 1:
print(" Converting to float32")
data = data.astype(np.float32)
ftype_cur = 0
# header #data = tf.train.load_variable(dir_model, name).squeeze()
sname = name.encode('utf-8') data = v.numpy().squeeze()
fout.write(struct.pack("iii", n_dims, len(sname), ftype_cur)) n_dims = len(data.shape)
for i in range(n_dims):
fout.write(struct.pack("i", dshape[n_dims - 1 - i]))
fout.write(sname);
# data # for efficiency - transpose some matrices
data.tofile(fout) # "model/h.*/attn/c_attn/w"
# "model/h.*/attn/c_proj/w"
# "model/h.*/mlp/c_fc/w"
# "model/h.*/mlp/c_proj/w"
#if name[-14:] == "/attn/c_attn/w" or \
# name[-14:] == "/attn/c_proj/w" or \
# name[-11:] == "/mlp/c_fc/w" or \
# name[-13:] == "/mlp/c_proj/w":
# print(" Transposing")
# data = data.transpose()
# I hope this deallocates the memory .. dshape = data.shape
model = None
fout.close() # default type is fp16
ftype_cur = 1
if ftype == 'f32' or n_dims == 1:
print(" Converting to float32")
data = data.astype(np.float32)
ftype_cur = 0
print("Done. Output file: " + fname_out + ", (part ", p, ")") # header
print("") sname = name.encode('utf-8')
fout.write(struct.pack("iii", n_dims, len(sname), ftype_cur))
for i in range(n_dims):
fout.write(struct.pack("i", dshape[n_dims - 1 - i]))
fout.write(sname)
# data
data.tofile(fout)
# I hope this deallocates the memory ..
model = None
fout.close()
print("Done. Output file: " + fname_out + ", (part ", p, ")")
print("")
if __name__ == '__main__':
main()