improve check-requirements.sh

This commit is contained in:
Jared Van Bortel 2023-12-27 21:27:18 -05:00
parent cb58775719
commit ce26f49208
3 changed files with 186 additions and 194 deletions

View file

@ -1,4 +1,5 @@
#!/bin/bash #!/bin/bash
set -euo pipefail
# #
# check-requirements.sh checks all requirements files for each top-level # check-requirements.sh checks all requirements files for each top-level
# convert*.py script. # convert*.py script.
@ -8,7 +9,7 @@
# sized tmpfs /tmp or ramdisk is recommended if running this frequently. # sized tmpfs /tmp or ramdisk is recommended if running this frequently.
# #
# usage: ./check-requirements.sh [<working_dir>] # usage: ./check-requirements.sh [<working_dir>]
# ./check-requirements.sh 'nocleanup' [<working_dir>] # ./check-requirements.sh nocleanup [<working_dir>]
# #
# where: # where:
# - <working_dir> is a directory that can be used as the base for # - <working_dir> is a directory that can be used as the base for
@ -20,135 +21,108 @@
# - bash >= 3.2.57 # - bash >= 3.2.57
# - shellcheck # - shellcheck
# #
# For each script, it creates a fresh venv, `pip install -r` the # For each script, it creates a fresh venv, `pip install`s the requirements, and
# requirements, and finally executes the python script with no arguments to # finally imports the python script to check for `ImportError`.
# check for a `ModuleNotFoundError`.
# #
log() { log() {
local level="$1"; shift local level=$1 msg=$2
local format="$1"; shift printf >&2 '%s: %s\n' "$level" "$msg"
# shellcheck disable=SC2059
>&2 printf "$level: $format\n" "$@"
} }
debug () { debug() {
log 'DEBUG' "$@" log DEBUG "$@"
} }
info() { info() {
log 'INFO' "$@" log INFO "$@"
} }
fatal() { fatal() {
log 'FATAL' "$@" log FATAL "$@"
exit 1 exit 1
} }
cleanup() { cleanup() {
if [[ -n ${workdir+x} && -d $workdir && -w $workdir ]]; then if [[ -n ${workdir+x} && -d $workdir && -w $workdir ]]; then
info "Removing $workdir" info "Removing $workdir"
( local count=0
count=0 rm -rfv -- "$workdir" | while read -r; do
rm -rfv "$workdir" | while read -r; do if (( count++ > 750 )); then
if (( count++ > 750 )); then printf .
printf '.' count=0
count=0 fi
fi done
done printf '\n'
printf '\n' info "Removed $workdir"
)&
wait $!
info "Removed '$workdir'"
fi fi
} }
abort() { if [[ ${1-} == nocleanup ]]; then
cleanup shift # discard nocleanup arg
exit 1
}
if [[ $1 == nocleanup ]]; then
shift # discard nocleanup arg
else else
trap abort SIGINT SIGTERM SIGQUIT SIGABRT trap exit INT TERM
trap cleanup EXIT trap cleanup EXIT
fi fi
set -eu -o pipefail this=$(realpath -- "$0"); readonly this
this="$(realpath "$0")"; readonly this
cd "$(dirname "$this")" cd "$(dirname "$this")"
shellcheck "$this" shellcheck "$this"
readonly reqs_dir='./requirements' readonly reqs_dir=requirements
workdir= if [[ ${1+x} ]]; then
if [[ -n ${1+x} ]]; then tmp_dir=$(realpath -- "$1")
arg_dir="$(realpath "$1")" if [[ ! ( -d $tmp_dir && -w $tmp_dir ) ]]; then
if [[ ! ( -d $arg_dir && -w $arg_dir ) ]]; then fatal "$tmp_dir is not a writable directory"
fatal "$arg_dir is not a valid directory"
fi fi
workdir="$(mktemp -d "$arg_dir/check-requirements.XXXX")"
else else
workdir="$(mktemp -d "/tmp/check-requirements.XXXX")" tmp_dir=/tmp
fi fi
readonly workdir
workdir=$(mktemp -d "$tmp_dir/check-requirements.XXXX"); readonly workdir
info "Working directory: $workdir" info "Working directory: $workdir"
assert_arg_count() {
local argcount="$1"; shift
if (( $# != argcount )); then
fatal "${FUNCNAME[1]}: incorrect number of args"
fi
}
check_requirements() { check_requirements() {
assert_arg_count 2 "$@" local reqs=$1
local venv="$1"
local reqs="$2"
info "$reqs: beginning check" info "$reqs: beginning check"
( pip --disable-pip-version-check install -qr "$reqs"
# shellcheck source=/dev/null
source "$venv/bin/activate"
pip --disable-pip-version-check install -q -r "$reqs"
)
info "$reqs: OK" info "$reqs: OK"
} }
check_convert_script() { check_convert_script() {
assert_arg_count 1 "$@" local py=$1 # e.g. ./convert-hf-to-gguf.py
local py="$1"; shift # e.g. ./convert-hf-to-gguf.py local pyname=${py##*/} # e.g. convert-hf-to-gguf.py
local pyname; pyname="$(basename "$py")" # e.g. convert-hf-to-gguf.py pyname=${pyname%.py} # e.g. convert-hf-to-gguf
pyname="${pyname%.py}" # e.g. convert-hf-to-gguf
info "$py: beginning check" info "$py: beginning check"
local reqs="$reqs_dir/requirements-$pyname.txt" local reqs="$reqs_dir/requirements-$pyname.txt"
if [[ ! -r "$reqs" ]]; then if [[ ! -r $reqs ]]; then
fatal "$py missing requirements. Expected: $reqs" fatal "$py missing requirements. Expected: $reqs"
fi fi
local venv="$workdir/$pyname-venv" local venv="$workdir/$pyname-venv"
python3 -m venv "$venv" python3 -m venv "$venv"
check_requirements "$venv" "$reqs" (
# Because we mask the return value of the subshell,
# we don't need to use set +e/-e.
# shellcheck disable=SC2155
local py_err=$(
# shellcheck source=/dev/null # shellcheck source=/dev/null
source "$venv/bin/activate" source "$venv/bin/activate"
python "$py" 2>&1
check_requirements "$reqs"
python - "$py" "$pyname" <<EOF
import sys
from importlib.machinery import SourceFileLoader
py, pyname = sys.argv[1:]
SourceFileLoader(pyname, py).load_module()
EOF
) )
# shellcheck disable=SC2181 rm -rf -- "$venv"
if grep -Fe 'ModuleNotFoundError' <<< "$py_err"; then
fatal "$py: some imports not declared in $reqs"
fi
info "$py: imports OK" info "$py: imports OK"
} }
@ -156,25 +130,37 @@ readonly ignore_eq_eq='check_requirements: ignore "=="'
for req in "$reqs_dir"/*; do for req in "$reqs_dir"/*; do
# Check that all sub-requirements are added to top-level requirements.txt # Check that all sub-requirements are added to top-level requirements.txt
if ! grep -qFe "$req" ./requirements.txt; then if ! grep -qF "$req" requirements.txt; then
fatal "$req needs to be added to ./requirements.txt" fatal "$req needs to be added to requirements.txt"
fi fi
# Make sure exact release versions aren't being pinned in the requirements # Make sure exact release versions aren't being pinned in the requirements
# Filters out the ignore string # Filters out the ignore string
req_no_ignore_eq_eq="$(grep -vF "$ignore_eq_eq" "$req")" if grep -vF "$ignore_eq_eq" "$req" | grep -q '=='; then
if grep -Fe '==' <<< "$req_no_ignore_eq_eq" ; then tab=$'\t'
fatal "Avoid pinning exact package versions. Use '~=' instead.\nYou can suppress this error by appending the following to the line: \n\t# $ignore_eq_eq" cat >&2 <<EOF
FATAL: Avoid pinning exact package versions. Use '~=' instead.
You can suppress this error by appending the following to the line:
$tab# $ignore_eq_eq
EOF
exit 1
fi fi
done done
all_venv="$workdir/all-venv" all_venv="$workdir/all-venv"
python3 -m venv "$all_venv" python3 -m venv "$all_venv"
check_requirements "$all_venv" './requirements.txt'
check_convert_script './convert.py' (
for py in ./convert-*.py;do # shellcheck source=/dev/null
source "$all_venv/bin/activate"
check_requirements requirements.txt
)
rm -rf -- "$all_venv"
check_convert_script convert.py
for py in convert-*.py; do
check_convert_script "$py" check_convert_script "$py"
done done
info "Done! No issues found." info 'Done! No issues found.'

View file

@ -238,7 +238,7 @@ class Model:
tokens: list[bytearray] = [] tokens: list[bytearray] = []
toktypes: list[int] = [] toktypes: list[int] = []
from transformers import AutoTokenizer # type: ignore[attr-defined] from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(dir_model) tokenizer = AutoTokenizer.from_pretrained(dir_model)
vocab_size = hparams.get("vocab_size", len(tokenizer.vocab)) vocab_size = hparams.get("vocab_size", len(tokenizer.vocab))
assert max(tokenizer.vocab.values()) < vocab_size assert max(tokenizer.vocab.values()) < vocab_size
@ -848,7 +848,7 @@ class StableLMModel(Model):
hparams = self.hparams hparams = self.hparams
block_count = hparams["num_hidden_layers"] block_count = hparams["num_hidden_layers"]
self.gguf_writer.add_name(dir_model.name) self.gguf_writer.add_name(self.dir_model.name)
self.gguf_writer.add_context_length(hparams["max_position_embeddings"]) self.gguf_writer.add_context_length(hparams["max_position_embeddings"])
self.gguf_writer.add_embedding_length(hparams["hidden_size"]) self.gguf_writer.add_embedding_length(hparams["hidden_size"])
self.gguf_writer.add_block_count(block_count) self.gguf_writer.add_block_count(block_count)
@ -894,7 +894,7 @@ class QwenModel(Model):
tokens: list[bytearray] = [] tokens: list[bytearray] = []
toktypes: list[int] = [] toktypes: list[int] = []
from transformers import AutoTokenizer # type: ignore[attr-defined] from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(dir_model, trust_remote_code=True) tokenizer = AutoTokenizer.from_pretrained(dir_model, trust_remote_code=True)
vocab_size = hparams["vocab_size"] vocab_size = hparams["vocab_size"]
assert max(tokenizer.get_vocab().values()) < vocab_size assert max(tokenizer.get_vocab().values()) < vocab_size
@ -1112,43 +1112,48 @@ def parse_args() -> argparse.Namespace:
return parser.parse_args() return parser.parse_args()
args = parse_args() def main() -> None:
args = parse_args()
dir_model = args.model dir_model = args.model
if not dir_model.is_dir(): if not dir_model.is_dir():
print(f'Error: {args.model} is not a directory', file=sys.stderr) print(f'Error: {args.model} is not a directory', file=sys.stderr)
sys.exit(1) sys.exit(1)
ftype_map = { ftype_map = {
"f32": gguf.GGMLQuantizationType.F32, "f32": gguf.GGMLQuantizationType.F32,
"f16": gguf.GGMLQuantizationType.F16, "f16": gguf.GGMLQuantizationType.F16,
} }
if args.outfile is not None: if args.outfile is not None:
fname_out = args.outfile fname_out = args.outfile
else:
# output in the same directory as the model by default
fname_out = dir_model / f'ggml-model-{args.outtype}.gguf'
print(f"Loading model: {dir_model.name}")
hparams = Model.load_hparams(dir_model)
with torch.inference_mode():
model_class = Model.from_model_architecture(hparams["architectures"][0])
model_instance = model_class(dir_model, ftype_map[args.outtype], fname_out, args.bigendian)
print("Set model parameters")
model_instance.set_gguf_parameters()
print("Set model tokenizer")
model_instance.set_vocab()
if args.vocab_only:
print(f"Exporting model vocab to '{fname_out}'")
model_instance.write_vocab()
else: else:
print(f"Exporting model to '{fname_out}'") # output in the same directory as the model by default
model_instance.write() fname_out = dir_model / f'ggml-model-{args.outtype}.gguf'
print(f"Model successfully exported to '{fname_out}'") print(f"Loading model: {dir_model.name}")
hparams = Model.load_hparams(dir_model)
with torch.inference_mode():
model_class = Model.from_model_architecture(hparams["architectures"][0])
model_instance = model_class(dir_model, ftype_map[args.outtype], fname_out, args.bigendian)
print("Set model parameters")
model_instance.set_gguf_parameters()
print("Set model tokenizer")
model_instance.set_vocab()
if args.vocab_only:
print(f"Exporting model vocab to '{fname_out}'")
model_instance.write_vocab()
else:
print(f"Exporting model to '{fname_out}'")
model_instance.write()
print(f"Model successfully exported to '{fname_out}'")
if __name__ == '__main__':
main()

View file

@ -47,95 +47,96 @@ def write_tensor_header(fout: BinaryIO, name: str, shape: Sequence[int], data_ty
fout.seek((fout.tell() + 31) & -32) fout.seek((fout.tell() + 31) & -32)
if len(sys.argv) < 2: if __name__ == '__main__':
print(f"Usage: python {sys.argv[0]} <path> [arch]") if len(sys.argv) < 2:
print( print(f"Usage: python {sys.argv[0]} <path> [arch]")
"Path must contain HuggingFace PEFT LoRA files 'adapter_config.json' and 'adapter_model.bin'" print(
) "Path must contain HuggingFace PEFT LoRA files 'adapter_config.json' and 'adapter_model.bin'"
print(f"Arch must be one of {list(gguf.MODEL_ARCH_NAMES.values())} (default: llama)") )
sys.exit(1) print(f"Arch must be one of {list(gguf.MODEL_ARCH_NAMES.values())} (default: llama)")
sys.exit(1)
input_json = os.path.join(sys.argv[1], "adapter_config.json") input_json = os.path.join(sys.argv[1], "adapter_config.json")
input_model = os.path.join(sys.argv[1], "adapter_model.bin") input_model = os.path.join(sys.argv[1], "adapter_model.bin")
output_path = os.path.join(sys.argv[1], "ggml-adapter-model.bin") output_path = os.path.join(sys.argv[1], "ggml-adapter-model.bin")
model = torch.load(input_model, map_location="cpu") model = torch.load(input_model, map_location="cpu")
arch_name = sys.argv[2] if len(sys.argv) == 3 else "llama" arch_name = sys.argv[2] if len(sys.argv) == 3 else "llama"
if arch_name not in gguf.MODEL_ARCH_NAMES.values(): if arch_name not in gguf.MODEL_ARCH_NAMES.values():
print(f"Error: unsupported architecture {arch_name}") print(f"Error: unsupported architecture {arch_name}")
sys.exit(1) sys.exit(1)
arch = list(gguf.MODEL_ARCH_NAMES.keys())[list(gguf.MODEL_ARCH_NAMES.values()).index(arch_name)] arch = list(gguf.MODEL_ARCH_NAMES.keys())[list(gguf.MODEL_ARCH_NAMES.values()).index(arch_name)]
name_map = gguf.TensorNameMap(arch, 200) # 200 layers ought to be enough for anyone name_map = gguf.TensorNameMap(arch, 200) # 200 layers ought to be enough for anyone
with open(input_json, "r") as f: with open(input_json, "r") as f:
params = json.load(f) params = json.load(f)
if params["peft_type"] != "LORA": if params["peft_type"] != "LORA":
print(f"Error: unsupported adapter type {params['peft_type']}, expected LORA") print(f"Error: unsupported adapter type {params['peft_type']}, expected LORA")
sys.exit(1) sys.exit(1)
if params["fan_in_fan_out"] is True: if params["fan_in_fan_out"] is True:
print("Error: param fan_in_fan_out is not supported") print("Error: param fan_in_fan_out is not supported")
sys.exit(1) sys.exit(1)
if params["bias"] is not None and params["bias"] != "none": if params["bias"] is not None and params["bias"] != "none":
print("Error: param bias is not supported") print("Error: param bias is not supported")
sys.exit(1) sys.exit(1)
# TODO: these seem to be layers that have been trained but without lora. # TODO: these seem to be layers that have been trained but without lora.
# doesn't seem widely used but eventually should be supported # doesn't seem widely used but eventually should be supported
if params["modules_to_save"] is not None and len(params["modules_to_save"]) > 0: if params["modules_to_save"] is not None and len(params["modules_to_save"]) > 0:
print("Error: param modules_to_save is not supported") print("Error: param modules_to_save is not supported")
sys.exit(1) sys.exit(1)
with open(output_path, "wb") as fout: with open(output_path, "wb") as fout:
fout.truncate() fout.truncate()
write_file_header(fout, params) write_file_header(fout, params)
for k, v in model.items(): for k, v in model.items():
orig_k = k orig_k = k
if k.endswith(".default.weight"): if k.endswith(".default.weight"):
k = k.replace(".default.weight", ".weight") k = k.replace(".default.weight", ".weight")
if k in ["llama_proj.weight", "llama_proj.bias"]: if k in ["llama_proj.weight", "llama_proj.bias"]:
continue continue
if k.endswith("lora_A.weight"): if k.endswith("lora_A.weight"):
if v.dtype != torch.float16 and v.dtype != torch.float32: if v.dtype != torch.float16 and v.dtype != torch.float32:
v = v.float()
v = v.T
else:
v = v.float() v = v.float()
v = v.T
else:
v = v.float()
t = v.detach().numpy() t = v.detach().numpy()
prefix = "base_model.model." prefix = "base_model.model."
if k.startswith(prefix): if k.startswith(prefix):
k = k[len(prefix) :] k = k[len(prefix) :]
lora_suffixes = (".lora_A.weight", ".lora_B.weight") lora_suffixes = (".lora_A.weight", ".lora_B.weight")
if k.endswith(lora_suffixes): if k.endswith(lora_suffixes):
suffix = k[-len(lora_suffixes[0]):] suffix = k[-len(lora_suffixes[0]):]
k = k[: -len(lora_suffixes[0])] k = k[: -len(lora_suffixes[0])]
else: else:
print(f"Error: unrecognized tensor name {orig_k}") print(f"Error: unrecognized tensor name {orig_k}")
sys.exit(1) sys.exit(1)
tname = name_map.get_name(k) tname = name_map.get_name(k)
if tname is None: if tname is None:
print(f"Error: could not map tensor name {orig_k}") print(f"Error: could not map tensor name {orig_k}")
print(" Note: the arch parameter must be specified if the model is not llama") print(" Note: the arch parameter must be specified if the model is not llama")
sys.exit(1) sys.exit(1)
if suffix == ".lora_A.weight": if suffix == ".lora_A.weight":
tname += ".weight.loraA" tname += ".weight.loraA"
elif suffix == ".lora_B.weight": elif suffix == ".lora_B.weight":
tname += ".weight.loraB" tname += ".weight.loraB"
else: else:
assert False assert False
print(f"{k} => {tname} {t.shape} {t.dtype} {t.nbytes/1024/1024:.2f}MB") print(f"{k} => {tname} {t.shape} {t.dtype} {t.nbytes/1024/1024:.2f}MB")
write_tensor_header(fout, tname, t.shape, t.dtype) write_tensor_header(fout, tname, t.shape, t.dtype)
t.tofile(fout) t.tofile(fout)
print(f"Converted {input_json} and {input_model} to {output_path}") print(f"Converted {input_json} and {input_model} to {output_path}")