Merge remote-tracking branch 'origin/master' into concedo

# Conflicts:
#	.github/workflows/docker.yml
#	CMakeLists.txt
#	flake.nix
#	main.cpp
This commit is contained in:
Concedo 2023-03-21 20:48:56 +08:00
commit 91e2b43575
7 changed files with 26 additions and 11 deletions

View file

@ -17,6 +17,7 @@
# and vocabulary.
#
import argparse
import os
import sys
import json
import struct
@ -44,8 +45,14 @@ def get_n_parts(dim):
def load_hparams_and_tokenizer(dir_model):
# `dir_model` is something like `models/7B` or `models/7B/`.
# "tokenizer.model" is expected under model's parent dir.
# When `dir_model` is a symlink, f"{dir_model}/../tokenizer.model" would not be found.
# Let's use the model's parent dir directly.
model_parent_dir = os.path.dirname(os.path.normpath(dir_model))
fname_hparams = f"{dir_model}/params.json"
fname_tokenizer = f"{dir_model}/../tokenizer.model"
fname_tokenizer = f"{model_parent_dir}/tokenizer.model"
with open(fname_hparams, "r") as f:
hparams = json.load(f)

Binary file not shown.

View file

@ -107,14 +107,14 @@ int llama_model_load(const std::string & fname, llama_model & model, gpt_vocab &
{
uint32_t magic;
fin.read((char *) &magic, sizeof(magic));
if (magic == 0x67676d6c) {
if (magic == FILE_MAGIC_UNVERSIONED) {
fprintf(stderr, "%s: invalid model file '%s' (too old, regenerate your model files!)\n",
__func__, fname.c_str());
legacy_file_format = true;
}
else
{
if (magic != 0x67676d66) {
if (magic != FILE_MAGIC) {
fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname.c_str());
return false;
}
@ -122,9 +122,9 @@ int llama_model_load(const std::string & fname, llama_model & model, gpt_vocab &
uint32_t format_version;
fin.read((char *) &format_version, sizeof(format_version));
if (format_version != 1) {
fprintf(stderr, "%s: invalid model file '%s' (unsupported format version %" PRIu32 ")\n",
__func__, fname.c_str(), format_version);
if (format_version != FILE_VERSION) {
fprintf(stderr, "%s: invalid model file '%s' (unsupported format version %" PRIu32 ", expected %d)\n",
__func__, fname.c_str(), format_version, FILE_VERSION);
return false;
}
}

BIN
main.exe

Binary file not shown.

View file

@ -64,12 +64,12 @@ bool llama_model_quantize(const std::string & fname_inp, const std::string & fna
{
uint32_t magic;
finp.read((char *) &magic, sizeof(magic));
if (magic == 0x67676d6c) {
if (magic == FILE_MAGIC_UNVERSIONED) {
fprintf(stderr, "%s: invalid model file '%s' (too old, regenerate your model files!)\n",
__func__, fname_inp.c_str());
return false;
}
if (magic != 0x67676d66) {
if (magic != FILE_MAGIC) {
fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname_inp.c_str());
return false;
}
@ -79,9 +79,9 @@ bool llama_model_quantize(const std::string & fname_inp, const std::string & fna
uint32_t format_version;
finp.read((char *) &format_version, sizeof(format_version));
if (format_version != 1) {
fprintf(stderr, "%s: invalid model file '%s' (unsupported format version %" PRIu32 ")\n",
__func__, fname_inp.c_str(), format_version);
if (format_version != FILE_VERSION) {
fprintf(stderr, "%s: invalid model file '%s' (unsupported format version %" PRIu32 ", expected %d)\n",
__func__, fname_inp.c_str(), format_version, FILE_VERSION);
return false;
}

Binary file not shown.

View file

@ -48,6 +48,14 @@ void gpt_print_usage(int argc, char ** argv, const gpt_params & params);
std::string gpt_random_prompt(std::mt19937 & rng);
//
// Model file parsing
//
#define FILE_MAGIC_UNVERSIONED 0x67676d6c // pre-versioned files
#define FILE_MAGIC 0x67676d66 // 'ggmf' in hex
#define FILE_VERSION 1
//
// Vocab utils
//