Various enhancement and integration pygmalion.cpp
This commit is contained in:
parent
3f4967b827
commit
8dd8ab1659
20 changed files with 2362 additions and 526 deletions
8
Makefile
8
Makefile
|
@ -31,8 +31,8 @@ endif
|
||||||
#
|
#
|
||||||
|
|
||||||
# keep standard at C11 and C++11
|
# keep standard at C11 and C++11
|
||||||
CFLAGS = -I. -O3 -DNDEBUG -std=c11 -fPIC
|
CFLAGS = -I. -Ofast -DNDEBUG -std=c11 -fPIC
|
||||||
CXXFLAGS = -I. -I./examples -O3 -DNDEBUG -std=c++11 -fPIC
|
CXXFLAGS = -I. -I./examples -Ofast -DNDEBUG -std=c++11 -fPIC
|
||||||
LDFLAGS =
|
LDFLAGS =
|
||||||
|
|
||||||
#lets try enabling everything
|
#lets try enabling everything
|
||||||
|
@ -152,7 +152,7 @@ ggml_blas.o: ggml.c ggml.h
|
||||||
$(CC) $(CFLAGS) -DGGML_USE_OPENBLAS -c ggml.c -o ggml_blas.o
|
$(CC) $(CFLAGS) -DGGML_USE_OPENBLAS -c ggml.c -o ggml_blas.o
|
||||||
|
|
||||||
ggml_v1.o: otherarch/ggml_v1.c otherarch/ggml_v1.h
|
ggml_v1.o: otherarch/ggml_v1.c otherarch/ggml_v1.h
|
||||||
$(CC) $(CFLAGS) -c otherarch/ggml_v1.c -o ggml_v1.o
|
$(CC) $(CFLAGS) -c otherarch/ggml_v1.c -o ggml_v1.o
|
||||||
|
|
||||||
llama.o: llama.cpp llama.h
|
llama.o: llama.cpp llama.h
|
||||||
$(CXX) $(CXXFLAGS) -c llama.cpp -o llama.o
|
$(CXX) $(CXXFLAGS) -c llama.cpp -o llama.o
|
||||||
|
@ -193,6 +193,8 @@ perplexity: examples/perplexity/perplexity.cpp ggml.o llama.o common.o
|
||||||
embedding: examples/embedding/embedding.cpp ggml.o llama.o common.o
|
embedding: examples/embedding/embedding.cpp ggml.o llama.o common.o
|
||||||
$(CXX) $(CXXFLAGS) examples/embedding/embedding.cpp ggml.o llama.o common.o -o embedding $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) examples/embedding/embedding.cpp ggml.o llama.o common.o -o embedding $(LDFLAGS)
|
||||||
|
|
||||||
|
gptj: ggml_v1.o
|
||||||
|
$(CXX) $(CXXFLAGS) otherarch/gptj_v1_main.cpp otherarch/utils.cpp ggml_v1.o -o gptj $(LDFLAGS)
|
||||||
#
|
#
|
||||||
# Tests
|
# Tests
|
||||||
#
|
#
|
||||||
|
|
31
expose.cpp
31
expose.cpp
|
@ -24,17 +24,38 @@ extern "C"
|
||||||
{
|
{
|
||||||
|
|
||||||
//return val: 0=fail, 1=(original ggml, alpaca), 2=(ggmf), 3=(ggjt)
|
//return val: 0=fail, 1=(original ggml, alpaca), 2=(ggmf), 3=(ggjt)
|
||||||
static FileFormat file_format = FAIL;
|
static FileFormat file_format = FileFormat::BADFORMAT;
|
||||||
|
|
||||||
bool load_model(const load_model_inputs inputs)
|
bool load_model(const load_model_inputs inputs)
|
||||||
{
|
{
|
||||||
std::string model = inputs.model_filename;
|
std::string model = inputs.model_filename;
|
||||||
file_format = check_file_format(model.c_str());
|
file_format = check_file_format(model.c_str());
|
||||||
|
|
||||||
if(file_format==GPTJ1 || file_format==GPTJ2)
|
if(file_format==FileFormat::GPTJ1 || file_format==FileFormat::GPTJ2 || file_format==FileFormat::GPTJ3)
|
||||||
{
|
{
|
||||||
printf("\n---\nIdentified as GPT-J model: (ver %d)\nAttempting to Load...\n---\n", file_format);
|
printf("\n---\nIdentified as GPT-J model: (ver %d)\nAttempting to Load...\n---\n", file_format);
|
||||||
return gptj_load_model(inputs, file_format);
|
ModelLoadResult lr = gptj_load_model(inputs, file_format);
|
||||||
|
if (lr == ModelLoadResult::RETRY_LOAD)
|
||||||
|
{
|
||||||
|
file_format = FileFormat::GPTJ2;
|
||||||
|
printf("\n---\nRetrying as GPT-J model: (ver %d)\nAttempting to Load...\n---\n", file_format);
|
||||||
|
lr = gptj_load_model(inputs, file_format);
|
||||||
|
}
|
||||||
|
if (lr == ModelLoadResult::RETRY_LOAD)
|
||||||
|
{
|
||||||
|
file_format = FileFormat::GPTJ3;
|
||||||
|
printf("\n---\nRetrying as GPT-J model: (ver %d)\nAttempting to Load...\n---\n", file_format);
|
||||||
|
lr = gptj_load_model(inputs, file_format);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (lr == ModelLoadResult::FAIL || lr == ModelLoadResult::RETRY_LOAD)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
return true;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -45,7 +66,7 @@ extern "C"
|
||||||
|
|
||||||
generation_outputs generate(const generation_inputs inputs, generation_outputs &output)
|
generation_outputs generate(const generation_inputs inputs, generation_outputs &output)
|
||||||
{
|
{
|
||||||
if (file_format == GPTJ1 || file_format == GPTJ2)
|
if (file_format == FileFormat::GPTJ1 || file_format == FileFormat::GPTJ2 || file_format==FileFormat::GPTJ3)
|
||||||
{
|
{
|
||||||
return gptj_generate(inputs, output);
|
return gptj_generate(inputs, output);
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,7 +17,7 @@
|
||||||
#include "otherarch/gptj_v2.cpp"
|
#include "otherarch/gptj_v2.cpp"
|
||||||
|
|
||||||
//return val: 0=fail, 1=(original ggml, alpaca), 2=(ggmf), 3=(ggjt)
|
//return val: 0=fail, 1=(original ggml, alpaca), 2=(ggmf), 3=(ggjt)
|
||||||
static FileFormat file_format = FileFormat::FAIL;
|
static FileFormat file_format = FileFormat::BADFORMAT;
|
||||||
static gpt_vocab vocab;
|
static gpt_vocab vocab;
|
||||||
static gptj_model_v1 model_v1;
|
static gptj_model_v1 model_v1;
|
||||||
static gptj_model model_v2;
|
static gptj_model model_v2;
|
||||||
|
@ -30,9 +30,8 @@ static std::vector<gpt_vocab::id> current_context_tokens;
|
||||||
static size_t mem_per_token = 0;
|
static size_t mem_per_token = 0;
|
||||||
static std::vector<float> logits;
|
static std::vector<float> logits;
|
||||||
|
|
||||||
bool gptj_load_model(const load_model_inputs inputs, FileFormat in_file_format)
|
ModelLoadResult gptj_load_model(const load_model_inputs inputs, FileFormat in_file_format)
|
||||||
{
|
{
|
||||||
|
|
||||||
ggml_time_init();
|
ggml_time_init();
|
||||||
|
|
||||||
file_format = in_file_format;
|
file_format = in_file_format;
|
||||||
|
@ -40,20 +39,42 @@ bool gptj_load_model(const load_model_inputs inputs, FileFormat in_file_format)
|
||||||
n_batch = params.n_batch = inputs.batch_size;
|
n_batch = params.n_batch = inputs.batch_size;
|
||||||
modelname = params.model = inputs.model_filename;
|
modelname = params.model = inputs.model_filename;
|
||||||
|
|
||||||
if (!legacy_gptj_model_load(params.model, model_v1, vocab)) {
|
if (file_format == FileFormat::GPTJ1 || file_format == FileFormat::GPTJ2)
|
||||||
fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str());
|
{
|
||||||
return false;
|
ModelLoadResult res = legacy_gptj_model_load(params.model, model_v1, vocab, file_format);
|
||||||
|
if(res==ModelLoadResult::FAIL)
|
||||||
|
{
|
||||||
|
fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str());
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
else if(res==ModelLoadResult::RETRY_LOAD)
|
||||||
|
{
|
||||||
|
printf("\nTensor Transposition Detected! Retrying GPT-J model loading...");
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
// determine the required inference memory per token:
|
||||||
|
legacy_gptj_eval(model_v1, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token, file_format);
|
||||||
|
return ModelLoadResult::SUCCESS;
|
||||||
}
|
}
|
||||||
|
else
|
||||||
if (file_format != FileFormat::GPTJ2)
|
|
||||||
{
|
{
|
||||||
printf("\n---\nWarning: Your model has an INVALID or OUTDATED format (ver %d). Please reconvert it for better results!\n---\n", file_format);
|
ModelLoadResult loadresult = gptj_model_load(params.model, model_v2, vocab);
|
||||||
|
if (loadresult == ModelLoadResult::FAIL)
|
||||||
|
{
|
||||||
|
fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str());
|
||||||
|
return loadresult;
|
||||||
|
}
|
||||||
|
else if (loadresult == ModelLoadResult::RETRY_LOAD)
|
||||||
|
{
|
||||||
|
printf("\nTensor Transposition Detected! Retrying GPT-J model loading...");
|
||||||
|
return loadresult;
|
||||||
|
}
|
||||||
|
|
||||||
|
// determine the required inference memory per token:
|
||||||
|
gptj_eval(model_v2, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token);
|
||||||
|
return ModelLoadResult::SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
// determine the required inference memory per token:
|
|
||||||
legacy_gptj_eval(model_v1, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token);
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -82,9 +103,10 @@ generation_outputs gptj_generate(const generation_inputs inputs, generation_outp
|
||||||
std::vector<gpt_vocab::id> embd_inp = ::gpt_tokenize(vocab, params.prompt);
|
std::vector<gpt_vocab::id> embd_inp = ::gpt_tokenize(vocab, params.prompt);
|
||||||
|
|
||||||
//truncate to front of the prompt if its too long
|
//truncate to front of the prompt if its too long
|
||||||
if (embd_inp.size() + params.n_predict > model_v1.hparams.n_ctx)
|
auto nctx = ( (file_format == FileFormat::GPTJ1||file_format == FileFormat::GPTJ2)? model_v1.hparams.n_ctx:model_v2.hparams.n_ctx);
|
||||||
|
if (embd_inp.size() + params.n_predict > nctx)
|
||||||
{
|
{
|
||||||
int offset = embd_inp.size() - model_v1.hparams.n_ctx + params.n_predict;
|
int offset = embd_inp.size() - nctx + params.n_predict;
|
||||||
embd_inp = std::vector<llama_token>(embd_inp.begin() + offset, embd_inp.end());
|
embd_inp = std::vector<llama_token>(embd_inp.begin() + offset, embd_inp.end());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -114,7 +136,7 @@ generation_outputs gptj_generate(const generation_inputs inputs, generation_outp
|
||||||
embd_inp.erase(embd_inp.begin(), embd_inp.begin() + n_past);
|
embd_inp.erase(embd_inp.begin(), embd_inp.begin() + n_past);
|
||||||
|
|
||||||
//if using BLAS and prompt is big enough, switch to single thread and use a huge batch
|
//if using BLAS and prompt is big enough, switch to single thread and use a huge batch
|
||||||
bool blasmode = false;// (embd_inp.size() >= 32 && ggml_cpu_has_blas());
|
bool blasmode = false; //(embd_inp.size() >= 32 && ggml_cpu_has_blas());
|
||||||
int original_batch = params.n_batch;
|
int original_batch = params.n_batch;
|
||||||
int original_threads = params.n_threads;
|
int original_threads = params.n_threads;
|
||||||
if (blasmode)
|
if (blasmode)
|
||||||
|
@ -135,7 +157,7 @@ generation_outputs gptj_generate(const generation_inputs inputs, generation_outp
|
||||||
timer_start();
|
timer_start();
|
||||||
double time1 = 0, time2 = 0;
|
double time1 = 0, time2 = 0;
|
||||||
unsigned int embd_inp_size = embd_inp.size();
|
unsigned int embd_inp_size = embd_inp.size();
|
||||||
const int n_vocab = model_v1.hparams.n_vocab;
|
const int n_vocab = ((file_format == FileFormat::GPTJ1||file_format == FileFormat::GPTJ2)? model_v1.hparams.n_vocab:model_v2.hparams.n_vocab);
|
||||||
|
|
||||||
printf("\n");
|
printf("\n");
|
||||||
|
|
||||||
|
@ -156,7 +178,15 @@ generation_outputs gptj_generate(const generation_inputs inputs, generation_outp
|
||||||
printf("\rGenerating (%d / %d tokens)", (1 + params.n_predict - remaining_tokens), params.n_predict);
|
printf("\rGenerating (%d / %d tokens)", (1 + params.n_predict - remaining_tokens), params.n_predict);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!legacy_gptj_eval(model_v1, params.n_threads, n_past, embd, logits, mem_per_token))
|
bool evalres = false;
|
||||||
|
if(file_format==FileFormat::GPTJ1 || file_format==FileFormat::GPTJ2)
|
||||||
|
{
|
||||||
|
evalres = legacy_gptj_eval(model_v1, params.n_threads, n_past, embd, logits, mem_per_token, file_format);
|
||||||
|
}else
|
||||||
|
{
|
||||||
|
evalres = gptj_eval(model_v2, params.n_threads, n_past, embd, logits, mem_per_token);
|
||||||
|
}
|
||||||
|
if (!evalres)
|
||||||
{
|
{
|
||||||
fprintf(stderr, "Failed to predict\n");
|
fprintf(stderr, "Failed to predict\n");
|
||||||
snprintf(output.text, sizeof(output.text), "%s", "");
|
snprintf(output.text, sizeof(output.text), "%s", "");
|
||||||
|
|
|
@ -16,7 +16,7 @@
|
||||||
#include "llamaextra.cpp"
|
#include "llamaextra.cpp"
|
||||||
|
|
||||||
//return val: 0=fail, 1=(original ggml, alpaca), 2=(ggmf), 3=(ggjt)
|
//return val: 0=fail, 1=(original ggml, alpaca), 2=(ggmf), 3=(ggjt)
|
||||||
static FileFormat file_format = FileFormat::FAIL;
|
static FileFormat file_format = FileFormat::BADFORMAT;
|
||||||
static llama_context_params ctx_params;
|
static llama_context_params ctx_params;
|
||||||
static gpt_params params;
|
static gpt_params params;
|
||||||
static int n_past = 0;
|
static int n_past = 0;
|
||||||
|
|
BIN
llamacpp.dll
BIN
llamacpp.dll
Binary file not shown.
Binary file not shown.
|
@ -37,12 +37,17 @@ use_blas = False # if true, uses OpenBLAS for acceleration. libopenblas.dll must
|
||||||
|
|
||||||
def init_library():
|
def init_library():
|
||||||
global handle, use_blas
|
global handle, use_blas
|
||||||
dir_path = os.path.dirname(os.path.realpath(__file__))
|
libname = ""
|
||||||
if use_blas:
|
if use_blas:
|
||||||
#OpenBLAS should provide about a 2x speedup on prompt ingestion if compatible.
|
libname = "llamacpp_blas.dll"
|
||||||
handle = ctypes.CDLL(os.path.join(dir_path, "llamacpp_blas.dll"))
|
|
||||||
else:
|
else:
|
||||||
handle = ctypes.CDLL(os.path.join(dir_path, "llamacpp.dll"))
|
libname = "llamacpp.dll"
|
||||||
|
|
||||||
|
print("Initializing dynamic library: " + libname)
|
||||||
|
dir_path = os.path.dirname(os.path.realpath(__file__))
|
||||||
|
|
||||||
|
#OpenBLAS should provide about a 2x speedup on prompt ingestion if compatible.
|
||||||
|
handle = ctypes.CDLL(os.path.join(dir_path, libname ))
|
||||||
|
|
||||||
handle.load_model.argtypes = [load_model_inputs]
|
handle.load_model.argtypes = [load_model_inputs]
|
||||||
handle.load_model.restype = ctypes.c_bool
|
handle.load_model.restype = ctypes.c_bool
|
||||||
|
|
BIN
main.exe
BIN
main.exe
Binary file not shown.
|
@ -49,10 +49,10 @@ void print_tok_vec(std::vector<int> &embd)
|
||||||
fin.rdbuf()->pubsetbuf(f_buf.data(), f_buf.size());
|
fin.rdbuf()->pubsetbuf(f_buf.data(), f_buf.size());
|
||||||
if (!fin) {
|
if (!fin) {
|
||||||
fprintf(stderr, "%s: failed to open '%s'\n", __func__, fname.c_str());
|
fprintf(stderr, "%s: failed to open '%s'\n", __func__, fname.c_str());
|
||||||
return FileFormat::FAIL;
|
return FileFormat::BADFORMAT;
|
||||||
}
|
}
|
||||||
|
|
||||||
FileFormat fileformat = FileFormat::FAIL;
|
FileFormat fileformat = FileFormat::BADFORMAT;
|
||||||
uint32_t magic;
|
uint32_t magic;
|
||||||
fin.read((char *) &magic, sizeof(magic));
|
fin.read((char *) &magic, sizeof(magic));
|
||||||
if (magic == 0x67676d6c) { //v1 format ggml, alpaca, old gptj and gpt2 models
|
if (magic == 0x67676d6c) { //v1 format ggml, alpaca, old gptj and gpt2 models
|
||||||
|
|
|
@ -13,23 +13,30 @@
|
||||||
|
|
||||||
#include "expose.h"
|
#include "expose.h"
|
||||||
|
|
||||||
//return val: 0=fail, 1=(original ggml, alpaca), 2=(ggmf), 3=(ggjt)
|
|
||||||
enum FileFormat
|
enum FileFormat
|
||||||
{
|
{
|
||||||
FAIL=0,
|
BADFORMAT=0, //unknown, uninit, or failed to load
|
||||||
GGML=1,
|
GGML=1, // 1=(original llama ggml, alpaca, GPT4ALL, GPTJ header)
|
||||||
GGHF=2,
|
GGHF=2, // 2=(llama ggmf)
|
||||||
GGJT=3,
|
GGJT=3, // 3=(llama ggjt)
|
||||||
|
|
||||||
GPTJ1=100,
|
GPTJ1=100, //the very first super old GPTJ format
|
||||||
GPTJ2=101,
|
GPTJ2=101, //pygmalion, uses old ggml lib
|
||||||
|
GPTJ3=102, //uses new ggml lib
|
||||||
|
|
||||||
GPT2=200,
|
GPT2=200,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum ModelLoadResult
|
||||||
|
{
|
||||||
|
FAIL = 0,
|
||||||
|
SUCCESS = 1,
|
||||||
|
RETRY_LOAD = 2, //used if it's suspected that the model is an older format
|
||||||
|
};
|
||||||
|
|
||||||
bool llama_load_model(const load_model_inputs inputs, FileFormat file_format);
|
bool llama_load_model(const load_model_inputs inputs, FileFormat file_format);
|
||||||
generation_outputs llama_generate(const generation_inputs inputs, generation_outputs &output);
|
generation_outputs llama_generate(const generation_inputs inputs, generation_outputs &output);
|
||||||
bool gptj_load_model(const load_model_inputs inputs, FileFormat in_file_format);
|
ModelLoadResult gptj_load_model(const load_model_inputs inputs, FileFormat in_file_format);
|
||||||
generation_outputs gptj_generate(const generation_inputs inputs, generation_outputs &output);
|
generation_outputs gptj_generate(const generation_inputs inputs, generation_outputs &output);
|
||||||
|
|
||||||
|
|
||||||
|
|
1796
otherarch/ggml_v1.c
1796
otherarch/ggml_v1.c
File diff suppressed because it is too large
Load diff
|
@ -198,6 +198,8 @@ struct ggml_v1_object;
|
||||||
struct ggml_v1_context;
|
struct ggml_v1_context;
|
||||||
|
|
||||||
enum ggml_v1_type {
|
enum ggml_v1_type {
|
||||||
|
GGML_V1_TYPE_Q4_0,
|
||||||
|
GGML_V1_TYPE_Q4_1,
|
||||||
GGML_V1_TYPE_I8,
|
GGML_V1_TYPE_I8,
|
||||||
GGML_V1_TYPE_I16,
|
GGML_V1_TYPE_I16,
|
||||||
GGML_V1_TYPE_I32,
|
GGML_V1_TYPE_I32,
|
||||||
|
@ -326,7 +328,10 @@ void ggml_v1_print_objects(const struct ggml_v1_context * ctx);
|
||||||
int ggml_v1_nelements(const struct ggml_v1_tensor * tensor);
|
int ggml_v1_nelements(const struct ggml_v1_tensor * tensor);
|
||||||
size_t ggml_v1_nbytes (const struct ggml_v1_tensor * tensor);
|
size_t ggml_v1_nbytes (const struct ggml_v1_tensor * tensor);
|
||||||
|
|
||||||
size_t ggml_v1_type_size (enum ggml_v1_type type);
|
int ggml_v1_blck_size (enum ggml_v1_type type);
|
||||||
|
size_t ggml_v1_type_size (enum ggml_v1_type type); // size in bytes for all elements in a block
|
||||||
|
float ggml_v1_type_sizef(enum ggml_v1_type type); // ggml_v1_type_size()/ggml_v1_blck_size() as float
|
||||||
|
|
||||||
size_t ggml_v1_element_size(const struct ggml_v1_tensor * tensor);
|
size_t ggml_v1_element_size(const struct ggml_v1_tensor * tensor);
|
||||||
|
|
||||||
struct ggml_v1_context * ggml_v1_init(struct ggml_v1_init_params params);
|
struct ggml_v1_context * ggml_v1_init(struct ggml_v1_init_params params);
|
||||||
|
|
|
@ -17,13 +17,15 @@
|
||||||
|
|
||||||
|
|
||||||
// load the model's weights from a file
|
// load the model's weights from a file
|
||||||
bool legacy_gptj_model_load(const std::string & fname, gptj_model_v1 & model, gpt_vocab & vocab) {
|
ModelLoadResult legacy_gptj_model_load(const std::string & fname, gptj_model_v1 & model, gpt_vocab & vocab, FileFormat file_format) {
|
||||||
printf("%s: loading model from '%s' - please wait ...\n", __func__, fname.c_str());
|
printf("%s: loading model from '%s' - please wait ...\n", __func__, fname.c_str());
|
||||||
|
|
||||||
|
bool super_old_format = (file_format==FileFormat::GPTJ1);
|
||||||
|
|
||||||
auto fin = std::ifstream(fname, std::ios::binary);
|
auto fin = std::ifstream(fname, std::ios::binary);
|
||||||
if (!fin) {
|
if (!fin) {
|
||||||
fprintf(stderr, "%s: failed to open '%s'\n", __func__, fname.c_str());
|
fprintf(stderr, "%s: failed to open '%s'\n", __func__, fname.c_str());
|
||||||
return false;
|
return ModelLoadResult::FAIL;
|
||||||
}
|
}
|
||||||
|
|
||||||
// verify magic
|
// verify magic
|
||||||
|
@ -32,7 +34,7 @@ bool legacy_gptj_model_load(const std::string & fname, gptj_model_v1 & model, gp
|
||||||
fin.read((char *) &magic, sizeof(magic));
|
fin.read((char *) &magic, sizeof(magic));
|
||||||
if (magic != 0x67676d6c) {
|
if (magic != 0x67676d6c) {
|
||||||
fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname.c_str());
|
fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname.c_str());
|
||||||
return false;
|
return ModelLoadResult::FAIL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -65,7 +67,7 @@ bool legacy_gptj_model_load(const std::string & fname, gptj_model_v1 & model, gp
|
||||||
if (n_vocab != model.hparams.n_vocab) {
|
if (n_vocab != model.hparams.n_vocab) {
|
||||||
fprintf(stderr, "%s: invalid model file '%s' (bad vocab size %d != %d)\n",
|
fprintf(stderr, "%s: invalid model file '%s' (bad vocab size %d != %d)\n",
|
||||||
__func__, fname.c_str(), n_vocab, model.hparams.n_vocab);
|
__func__, fname.c_str(), n_vocab, model.hparams.n_vocab);
|
||||||
return false;
|
return ModelLoadResult::FAIL;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string word;
|
std::string word;
|
||||||
|
@ -81,9 +83,23 @@ bool legacy_gptj_model_load(const std::string & fname, gptj_model_v1 & model, gp
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// for the big tensors, we have the option to store the data in 16-bit floats
|
// for the big tensors, we have the option to store the data in 16-bit floats or quantized
|
||||||
// in order to save memory and also to speed up the computation
|
// in order to save memory and also to speed up the computation
|
||||||
const ggml_v1_type wtype = model.hparams.f16 ? GGML_V1_TYPE_F16 : GGML_V1_TYPE_F32;
|
ggml_v1_type wtype = GGML_V1_TYPE_COUNT;
|
||||||
|
switch (model.hparams.f16) {
|
||||||
|
case 0: wtype = GGML_V1_TYPE_F32; break;
|
||||||
|
case 1: wtype = GGML_V1_TYPE_F16; break;
|
||||||
|
case 2: wtype = GGML_V1_TYPE_Q4_0; break;
|
||||||
|
case 3: wtype = GGML_V1_TYPE_Q4_1; break;
|
||||||
|
default:
|
||||||
|
{
|
||||||
|
fprintf(stderr, "%s: invalid model file '%s' (bad f16 value %d)\n",
|
||||||
|
__func__, fname.c_str(), model.hparams.f16);
|
||||||
|
return ModelLoadResult::FAIL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const ggml_v1_type wtype2 = GGML_V1_TYPE_F32;
|
||||||
|
|
||||||
auto & ctx = model.ctx;
|
auto & ctx = model.ctx;
|
||||||
|
|
||||||
|
@ -97,31 +113,31 @@ bool legacy_gptj_model_load(const std::string & fname, gptj_model_v1 & model, gp
|
||||||
const int n_ctx = hparams.n_ctx;
|
const int n_ctx = hparams.n_ctx;
|
||||||
const int n_vocab = hparams.n_vocab;
|
const int n_vocab = hparams.n_vocab;
|
||||||
|
|
||||||
ctx_size += n_embd*ggml_v1_type_size(GGML_V1_TYPE_F32); // ln_f_g
|
ctx_size += n_embd*ggml_v1_type_sizef(GGML_V1_TYPE_F32); // ln_f_g
|
||||||
ctx_size += n_embd*ggml_v1_type_size(GGML_V1_TYPE_F32); // ln_f_b
|
ctx_size += n_embd*ggml_v1_type_sizef(GGML_V1_TYPE_F32); // ln_f_b
|
||||||
|
|
||||||
ctx_size += n_embd*n_vocab*ggml_v1_type_size(wtype); // wte
|
ctx_size += n_embd*n_vocab*ggml_v1_type_sizef(wtype); // wte
|
||||||
|
|
||||||
ctx_size += n_embd*n_vocab*ggml_v1_type_size(wtype); // lmh_g
|
ctx_size += n_embd*n_vocab*ggml_v1_type_sizef(wtype); // lmh_g
|
||||||
ctx_size += n_vocab*ggml_v1_type_size(GGML_V1_TYPE_F32); // lmh_b
|
ctx_size += n_vocab*ggml_v1_type_sizef(GGML_V1_TYPE_F32); // lmh_b
|
||||||
|
|
||||||
ctx_size += n_layer*(n_embd*ggml_v1_type_size(GGML_V1_TYPE_F32)); // ln_1_g
|
ctx_size += n_layer*(n_embd*ggml_v1_type_sizef(GGML_V1_TYPE_F32)); // ln_1_g
|
||||||
ctx_size += n_layer*(n_embd*ggml_v1_type_size(GGML_V1_TYPE_F32)); // ln_1_b
|
ctx_size += n_layer*(n_embd*ggml_v1_type_sizef(GGML_V1_TYPE_F32)); // ln_1_b
|
||||||
|
|
||||||
ctx_size += n_layer*(n_embd*n_embd*ggml_v1_type_size(wtype)); // c_attn_q_proj_w
|
ctx_size += n_layer*(n_embd*n_embd*ggml_v1_type_sizef(wtype)); // c_attn_q_proj_w
|
||||||
ctx_size += n_layer*(n_embd*n_embd*ggml_v1_type_size(wtype)); // c_attn_k_proj_w
|
ctx_size += n_layer*(n_embd*n_embd*ggml_v1_type_sizef(wtype)); // c_attn_k_proj_w
|
||||||
ctx_size += n_layer*(n_embd*n_embd*ggml_v1_type_size(wtype)); // c_attn_v_proj_w
|
ctx_size += n_layer*(n_embd*n_embd*ggml_v1_type_sizef(wtype)); // c_attn_v_proj_w
|
||||||
|
|
||||||
ctx_size += n_layer*(n_embd*n_embd*ggml_v1_type_size(wtype)); // c_attn_proj_w
|
ctx_size += n_layer*(n_embd*n_embd*ggml_v1_type_sizef(wtype)); // c_attn_proj_w
|
||||||
|
|
||||||
ctx_size += n_layer*(4*n_embd*n_embd*ggml_v1_type_size(wtype)); // c_mlp_fc_w
|
ctx_size += n_layer*(4*n_embd*n_embd*ggml_v1_type_sizef(wtype)); // c_mlp_fc_w
|
||||||
ctx_size += n_layer*( 4*n_embd*ggml_v1_type_size(GGML_V1_TYPE_F32)); // c_mlp_fc_b
|
ctx_size += n_layer*( 4*n_embd*ggml_v1_type_sizef(GGML_V1_TYPE_F32)); // c_mlp_fc_b
|
||||||
|
|
||||||
ctx_size += n_layer*(4*n_embd*n_embd*ggml_v1_type_size(wtype)); // c_mlp_proj_w_trans
|
ctx_size += n_layer*(4*n_embd*n_embd*ggml_v1_type_sizef(wtype)); // c_mlp_proj_w_trans
|
||||||
ctx_size += n_layer*( n_embd*ggml_v1_type_size(GGML_V1_TYPE_F32)); // c_mlp_proj_b
|
ctx_size += n_layer*( n_embd*ggml_v1_type_sizef(GGML_V1_TYPE_F32)); // c_mlp_proj_b
|
||||||
|
|
||||||
ctx_size += n_ctx*n_layer*n_embd*ggml_v1_type_size(GGML_V1_TYPE_F32); // memory_k
|
ctx_size += n_ctx*n_layer*n_embd*ggml_v1_type_sizef(GGML_V1_TYPE_F32); // memory_k
|
||||||
ctx_size += n_ctx*n_layer*n_embd*ggml_v1_type_size(GGML_V1_TYPE_F32); // memory_v
|
ctx_size += n_ctx*n_layer*n_embd*ggml_v1_type_sizef(GGML_V1_TYPE_F32); // memory_v
|
||||||
|
|
||||||
ctx_size += (5 + 10*n_layer)*256; // object overhead
|
ctx_size += (5 + 10*n_layer)*256; // object overhead
|
||||||
|
|
||||||
|
@ -138,7 +154,7 @@ bool legacy_gptj_model_load(const std::string & fname, gptj_model_v1 & model, gp
|
||||||
model.ctx = ggml_v1_init(params);
|
model.ctx = ggml_v1_init(params);
|
||||||
if (!model.ctx) {
|
if (!model.ctx) {
|
||||||
fprintf(stderr, "%s: ggml_v1_init() failed\n", __func__);
|
fprintf(stderr, "%s: ggml_v1_init() failed\n", __func__);
|
||||||
return false;
|
return ModelLoadResult::FAIL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -181,8 +197,15 @@ bool legacy_gptj_model_load(const std::string & fname, gptj_model_v1 & model, gp
|
||||||
layer.c_attn_v_proj_w = ggml_v1_new_tensor_2d(ctx, wtype, n_embd, n_embd);
|
layer.c_attn_v_proj_w = ggml_v1_new_tensor_2d(ctx, wtype, n_embd, n_embd);
|
||||||
|
|
||||||
layer.c_attn_proj_w = ggml_v1_new_tensor_2d(ctx, wtype, n_embd, n_embd);
|
layer.c_attn_proj_w = ggml_v1_new_tensor_2d(ctx, wtype, n_embd, n_embd);
|
||||||
|
|
||||||
layer.c_mlp_fc_w = ggml_v1_new_tensor_2d(ctx, wtype, 4*n_embd, n_embd);
|
if(super_old_format)
|
||||||
|
{
|
||||||
|
layer.c_mlp_fc_w = ggml_v1_new_tensor_2d(ctx, wtype, 4*n_embd, n_embd);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
layer.c_mlp_fc_w = ggml_v1_new_tensor_2d(ctx, wtype, n_embd, 4*n_embd);
|
||||||
|
}
|
||||||
layer.c_mlp_fc_b = ggml_v1_new_tensor_1d(ctx, GGML_V1_TYPE_F32, 4*n_embd);
|
layer.c_mlp_fc_b = ggml_v1_new_tensor_1d(ctx, GGML_V1_TYPE_F32, 4*n_embd);
|
||||||
|
|
||||||
layer.c_mlp_proj_w_trans = ggml_v1_new_tensor_2d(ctx, wtype, 4*n_embd, n_embd);
|
layer.c_mlp_proj_w_trans = ggml_v1_new_tensor_2d(ctx, wtype, 4*n_embd, n_embd);
|
||||||
|
@ -257,27 +280,55 @@ bool legacy_gptj_model_load(const std::string & fname, gptj_model_v1 & model, gp
|
||||||
|
|
||||||
if (model.tensors.find(name.data()) == model.tensors.end()) {
|
if (model.tensors.find(name.data()) == model.tensors.end()) {
|
||||||
fprintf(stderr, "%s: unknown tensor '%s' in model file\n", __func__, name.data());
|
fprintf(stderr, "%s: unknown tensor '%s' in model file\n", __func__, name.data());
|
||||||
return false;
|
return ModelLoadResult::FAIL;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto tensor = model.tensors[name.data()];
|
auto tensor = model.tensors[name.data()];
|
||||||
if (ggml_v1_nelements(tensor) != nelements) {
|
if (ggml_v1_nelements(tensor) != nelements) {
|
||||||
fprintf(stderr, "%s: tensor '%s' has wrong size in model file\n", __func__, name.data());
|
fprintf(stderr, "%s: tensor '%s' has wrong size in model file\n", __func__, name.data());
|
||||||
return false;
|
return ModelLoadResult::FAIL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tensor->ne[0] != ne[0] || tensor->ne[1] != ne[1]) {
|
if (tensor->ne[0] != ne[0] || tensor->ne[1] != ne[1])
|
||||||
fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%d, %d], expected [%d, %d]\n",
|
{
|
||||||
__func__, name.data(), tensor->ne[0], tensor->ne[1], ne[0], ne[1]);
|
//test for transposition and retry older loader
|
||||||
return false;
|
if(tensor->ne[0]==ne[1] && tensor->ne[1]==ne[0] && should_transpose_layer(name))
|
||||||
|
{
|
||||||
|
printf("\nFound a transposed tensor. This could be an older model. Retrying load...");
|
||||||
|
ggml_v1_free(ctx);
|
||||||
|
return ModelLoadResult::RETRY_LOAD;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%d, %d], expected [%d, %d]\n",
|
||||||
|
__func__, name.data(), tensor->ne[0], tensor->ne[1], ne[0], ne[1]);
|
||||||
|
return ModelLoadResult::FAIL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const size_t bpe = tensor->type == GGML_V1_TYPE_I8 ? 1 : (ftype == 0) ? sizeof(float) : sizeof(ggml_v1_fp16_t);
|
if (0) {
|
||||||
|
static const char * ftype_str[] = { "f32", "f16", "q4_0", "q4_1", };
|
||||||
|
printf("%24s - [%5d, %5d], type = %6s, %6.2f MB, %9zu bytes\n", name.data(), ne[0], ne[1], ftype_str[ftype], ggml_v1_nbytes(tensor)/1024.0/1024.0, ggml_v1_nbytes(tensor));
|
||||||
|
}
|
||||||
|
|
||||||
if (nelements*bpe != ggml_v1_nbytes(tensor)) {
|
size_t bpe = 0;
|
||||||
|
|
||||||
|
switch (ftype) {
|
||||||
|
case 0: bpe = ggml_v1_type_size(GGML_V1_TYPE_F32); break;
|
||||||
|
case 1: bpe = ggml_v1_type_size(GGML_V1_TYPE_F16); break;
|
||||||
|
case 2: bpe = ggml_v1_type_size(GGML_V1_TYPE_Q4_0); assert(ne[0] % 64 == 0); break;
|
||||||
|
case 3: bpe = ggml_v1_type_size(GGML_V1_TYPE_Q4_1); assert(ne[0] % 64 == 0); break;
|
||||||
|
default:
|
||||||
|
{
|
||||||
|
fprintf(stderr, "%s: unknown ftype %d in model file\n", __func__, ftype);
|
||||||
|
return ModelLoadResult::FAIL;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if ((nelements*bpe)/ggml_v1_blck_size(tensor->type) != ggml_v1_nbytes(tensor)) {
|
||||||
fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n",
|
fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n",
|
||||||
__func__, name.data(), ggml_v1_nbytes(tensor), nelements*bpe);
|
__func__, name.data(), ggml_v1_nbytes(tensor), nelements*bpe);
|
||||||
return false;
|
return ModelLoadResult::FAIL;
|
||||||
}
|
}
|
||||||
|
|
||||||
fin.read(reinterpret_cast<char *>(tensor->data), ggml_v1_nbytes(tensor));
|
fin.read(reinterpret_cast<char *>(tensor->data), ggml_v1_nbytes(tensor));
|
||||||
|
@ -297,7 +348,7 @@ bool legacy_gptj_model_load(const std::string & fname, gptj_model_v1 & model, gp
|
||||||
|
|
||||||
fin.close();
|
fin.close();
|
||||||
|
|
||||||
return true;
|
return ModelLoadResult::SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
// evaluate the transformer
|
// evaluate the transformer
|
||||||
|
@ -316,7 +367,10 @@ bool legacy_gptj_eval(
|
||||||
const int n_past,
|
const int n_past,
|
||||||
const std::vector<gpt_vocab::id> & embd_inp,
|
const std::vector<gpt_vocab::id> & embd_inp,
|
||||||
std::vector<float> & embd_w,
|
std::vector<float> & embd_w,
|
||||||
size_t & mem_per_token) {
|
size_t & mem_per_token,
|
||||||
|
FileFormat file_format) {
|
||||||
|
|
||||||
|
bool super_old_format = (file_format==FileFormat::GPTJ1);
|
||||||
const int N = embd_inp.size();
|
const int N = embd_inp.size();
|
||||||
|
|
||||||
const auto & hparams = model.hparams;
|
const auto & hparams = model.hparams;
|
||||||
|
@ -379,9 +433,21 @@ bool legacy_gptj_eval(
|
||||||
|
|
||||||
// self-attention
|
// self-attention
|
||||||
{
|
{
|
||||||
struct ggml_v1_tensor * Qcur = ggml_v1_mul_mat(ctx0, ggml_v1_transpose(ctx0, model.layers[il].c_attn_q_proj_w), cur);
|
struct ggml_v1_tensor * Qcur;
|
||||||
struct ggml_v1_tensor * Kcur = ggml_v1_mul_mat(ctx0, ggml_v1_transpose(ctx0, model.layers[il].c_attn_k_proj_w), cur);
|
struct ggml_v1_tensor * Kcur;
|
||||||
struct ggml_v1_tensor * Vcur = ggml_v1_mul_mat(ctx0, ggml_v1_transpose(ctx0, model.layers[il].c_attn_v_proj_w), cur);
|
struct ggml_v1_tensor * Vcur;
|
||||||
|
if(super_old_format)
|
||||||
|
{
|
||||||
|
Qcur = ggml_v1_mul_mat(ctx0, ggml_v1_transpose(ctx0, model.layers[il].c_attn_q_proj_w), cur);
|
||||||
|
Kcur = ggml_v1_mul_mat(ctx0, ggml_v1_transpose(ctx0, model.layers[il].c_attn_k_proj_w), cur);
|
||||||
|
Vcur = ggml_v1_mul_mat(ctx0, ggml_v1_transpose(ctx0, model.layers[il].c_attn_v_proj_w), cur);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
Qcur = ggml_v1_mul_mat(ctx0, model.layers[il].c_attn_q_proj_w, cur);
|
||||||
|
Kcur = ggml_v1_mul_mat(ctx0, model.layers[il].c_attn_k_proj_w, cur);
|
||||||
|
Vcur = ggml_v1_mul_mat(ctx0, model.layers[il].c_attn_v_proj_w, cur);
|
||||||
|
}
|
||||||
|
|
||||||
// store key and value to memory
|
// store key and value to memory
|
||||||
if (N >= 1) {
|
if (N >= 1) {
|
||||||
|
@ -448,9 +514,18 @@ bool legacy_gptj_eval(
|
||||||
ggml_v1_new_tensor_2d(ctx0, GGML_V1_TYPE_F32, n_embd, N));
|
ggml_v1_new_tensor_2d(ctx0, GGML_V1_TYPE_F32, n_embd, N));
|
||||||
|
|
||||||
// projection (no bias)
|
// projection (no bias)
|
||||||
cur = ggml_v1_mul_mat(ctx0,
|
if(super_old_format)
|
||||||
ggml_v1_transpose(ctx0, model.layers[il].c_attn_proj_w),
|
{
|
||||||
cur);
|
cur = ggml_v1_mul_mat(ctx0,
|
||||||
|
ggml_v1_transpose(ctx0, model.layers[il].c_attn_proj_w),
|
||||||
|
cur);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
cur = ggml_v1_mul_mat(ctx0,
|
||||||
|
model.layers[il].c_attn_proj_w,
|
||||||
|
cur);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_v1_tensor * inpFF = cur;
|
struct ggml_v1_tensor * inpFF = cur;
|
||||||
|
@ -459,9 +534,16 @@ bool legacy_gptj_eval(
|
||||||
// this is independent of the self-attention result, so it could be done in parallel to the self-attention
|
// this is independent of the self-attention result, so it could be done in parallel to the self-attention
|
||||||
{
|
{
|
||||||
// note here we pass inpSA instead of cur
|
// note here we pass inpSA instead of cur
|
||||||
cur = ggml_v1_mul_mat(ctx0,
|
if(super_old_format)
|
||||||
ggml_v1_transpose(ctx0, model.layers[il].c_mlp_fc_w),
|
{
|
||||||
inpSA);
|
cur = ggml_v1_mul_mat(ctx0,
|
||||||
|
ggml_v1_transpose(ctx0, model.layers[il].c_mlp_fc_w),
|
||||||
|
inpSA);
|
||||||
|
}else{
|
||||||
|
cur = ggml_v1_mul_mat(ctx0,
|
||||||
|
model.layers[il].c_mlp_fc_w,
|
||||||
|
inpSA);
|
||||||
|
}
|
||||||
|
|
||||||
cur = ggml_v1_add(ctx0,
|
cur = ggml_v1_add(ctx0,
|
||||||
ggml_v1_repeat(ctx0, model.layers[il].c_mlp_fc_b, cur),
|
ggml_v1_repeat(ctx0, model.layers[il].c_mlp_fc_b, cur),
|
||||||
|
@ -538,145 +620,3 @@ bool legacy_gptj_eval(
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
// int main(int argc, char ** argv) {
|
|
||||||
// ggml_v1_time_init();
|
|
||||||
// const int64_t t_main_start_us = ggml_v1_time_us();
|
|
||||||
|
|
||||||
// gpt_params params;
|
|
||||||
// params.model = "models/gpt-j-6B/ggml-model.bin";
|
|
||||||
|
|
||||||
// if (utils_gpt_params_parse(argc, argv, params) == false) {
|
|
||||||
// return 1;
|
|
||||||
// }
|
|
||||||
|
|
||||||
// if (params.seed < 0) {
|
|
||||||
// params.seed = time(NULL);
|
|
||||||
// }
|
|
||||||
|
|
||||||
// printf("%s: seed = %d\n", __func__, params.seed);
|
|
||||||
|
|
||||||
// std::mt19937 rng(params.seed);
|
|
||||||
// if (params.prompt.empty()) {
|
|
||||||
// if( !isatty(STDIN_FILENO) ){
|
|
||||||
// std::string line;
|
|
||||||
// while( std::getline(std::cin, line) ){
|
|
||||||
// params.prompt = params.prompt + "\n" + line;
|
|
||||||
// }
|
|
||||||
// } else {
|
|
||||||
// params.prompt = utils_gpt_random_prompt(rng);
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
// int64_t t_load_us = 0;
|
|
||||||
|
|
||||||
// gpt_vocab vocab;
|
|
||||||
// gptj_model_v1 model;
|
|
||||||
|
|
||||||
// // load the model
|
|
||||||
// {
|
|
||||||
// const int64_t t_start_us = ggml_v1_time_us();
|
|
||||||
|
|
||||||
// if (!legacy_gptj_model_load(params.model, model, vocab)) {
|
|
||||||
// fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str());
|
|
||||||
// return 1;
|
|
||||||
// }
|
|
||||||
|
|
||||||
// t_load_us = ggml_v1_time_us() - t_start_us;
|
|
||||||
// }
|
|
||||||
|
|
||||||
// int n_past = 0;
|
|
||||||
|
|
||||||
// int64_t t_sample_us = 0;
|
|
||||||
// int64_t t_predict_us = 0;
|
|
||||||
|
|
||||||
// std::vector<float> logits;
|
|
||||||
|
|
||||||
// // tokenize the prompt
|
|
||||||
// std::vector<gpt_vocab::id> embd_inp = ::gpt_tokenize(vocab, params.prompt);
|
|
||||||
|
|
||||||
// params.n_predict = std::min(params.n_predict, model.hparams.n_ctx - (int) embd_inp.size());
|
|
||||||
|
|
||||||
// printf("%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
|
|
||||||
// printf("\n");
|
|
||||||
|
|
||||||
// std::vector<gpt_vocab::id> embd;
|
|
||||||
|
|
||||||
// // determine the required inference memory per token:
|
|
||||||
// size_t mem_per_token = 0;
|
|
||||||
// legacy_gptj_eval(model, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token);
|
|
||||||
|
|
||||||
// for (int i = embd.size(); i < embd_inp.size() + params.n_predict; i++) {
|
|
||||||
// // predict
|
|
||||||
// if (embd.size() > 0) {
|
|
||||||
// const int64_t t_start_us = ggml_v1_time_us();
|
|
||||||
|
|
||||||
// if (!legacy_gptj_eval(model, params.n_threads, n_past, embd, logits, mem_per_token)) {
|
|
||||||
// printf("Failed to predict\n");
|
|
||||||
// return 1;
|
|
||||||
// }
|
|
||||||
|
|
||||||
// t_predict_us += ggml_v1_time_us() - t_start_us;
|
|
||||||
// }
|
|
||||||
|
|
||||||
// n_past += embd.size();
|
|
||||||
// embd.clear();
|
|
||||||
|
|
||||||
// if (i >= embd_inp.size()) {
|
|
||||||
// // sample next token
|
|
||||||
// const int top_k = params.top_k;
|
|
||||||
// const float top_p = params.top_p;
|
|
||||||
// const float temp = params.temp;
|
|
||||||
|
|
||||||
// const int n_vocab = model.hparams.n_vocab;
|
|
||||||
|
|
||||||
// gpt_vocab::id id = 0;
|
|
||||||
|
|
||||||
// {
|
|
||||||
// const int64_t t_start_sample_us = ggml_v1_time_us();
|
|
||||||
|
|
||||||
// id = gpt_sample_top_k_top_p(vocab, logits.data() + (logits.size() - n_vocab), top_k, top_p, temp, rng);
|
|
||||||
|
|
||||||
// t_sample_us += ggml_v1_time_us() - t_start_sample_us;
|
|
||||||
// }
|
|
||||||
|
|
||||||
// // add it to the context
|
|
||||||
// embd.push_back(id);
|
|
||||||
// } else {
|
|
||||||
// // if here, it means we are still processing the input prompt
|
|
||||||
// for (int k = i; k < embd_inp.size(); k++) {
|
|
||||||
// embd.push_back(embd_inp[k]);
|
|
||||||
// if (embd.size() > params.n_batch) {
|
|
||||||
// break;
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// i += embd.size() - 1;
|
|
||||||
// }
|
|
||||||
|
|
||||||
// // display text
|
|
||||||
// for (auto id : embd) {
|
|
||||||
// printf("%s", vocab.id_to_token[id].c_str());
|
|
||||||
// }
|
|
||||||
// fflush(stdout);
|
|
||||||
|
|
||||||
// // end of text token
|
|
||||||
// if (embd.back() == 50256) {
|
|
||||||
// break;
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
// // report timing
|
|
||||||
// {
|
|
||||||
// const int64_t t_main_end_us = ggml_v1_time_us();
|
|
||||||
|
|
||||||
// printf("\n\n");
|
|
||||||
// printf("%s: mem per token = %8zu bytes\n", __func__, mem_per_token);
|
|
||||||
// printf("%s: load time = %8.2f ms\n", __func__, t_load_us/1000.0f);
|
|
||||||
// printf("%s: sample time = %8.2f ms\n", __func__, t_sample_us/1000.0f);
|
|
||||||
// printf("%s: predict time = %8.2f ms / %.2f ms per token\n", __func__, t_predict_us/1000.0f, t_predict_us/1000.0f/n_past);
|
|
||||||
// printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0f);
|
|
||||||
// }
|
|
||||||
|
|
||||||
// ggml_v1_free(model.ctx);
|
|
||||||
|
|
||||||
// return 0;
|
|
||||||
// }
|
|
||||||
|
|
145
otherarch/gptj_v1_main.cpp
Normal file
145
otherarch/gptj_v1_main.cpp
Normal file
|
@ -0,0 +1,145 @@
|
||||||
|
#include "gptj_v1.cpp"
|
||||||
|
|
||||||
|
int main(int argc, char ** argv) {
|
||||||
|
ggml_v1_time_init();
|
||||||
|
const int64_t t_main_start_us = ggml_v1_time_us();
|
||||||
|
|
||||||
|
gpt_params params;
|
||||||
|
params.model = "models/gpt-j-6B/ggml-model.bin";
|
||||||
|
|
||||||
|
if (utils_gpt_params_parse(argc, argv, params) == false) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (params.seed < 0) {
|
||||||
|
params.seed = time(NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
printf("%s: seed = %d\n", __func__, params.seed);
|
||||||
|
|
||||||
|
std::mt19937 rng(params.seed);
|
||||||
|
if (params.prompt.empty()) {
|
||||||
|
if( !isatty(STDIN_FILENO) ){
|
||||||
|
std::string line;
|
||||||
|
while( std::getline(std::cin, line) ){
|
||||||
|
params.prompt = params.prompt + "\n" + line;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
params.prompt = utils_gpt_random_prompt(rng);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int64_t t_load_us = 0;
|
||||||
|
|
||||||
|
gpt_vocab vocab;
|
||||||
|
gptj_model_v1 model;
|
||||||
|
FileFormat file_format = FileFormat::GPTJ2;
|
||||||
|
|
||||||
|
// load the model
|
||||||
|
{
|
||||||
|
const int64_t t_start_us = ggml_v1_time_us();
|
||||||
|
|
||||||
|
if (legacy_gptj_model_load(params.model, model, vocab, file_format)!=ModelLoadResult::SUCCESS) {
|
||||||
|
fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str());
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
t_load_us = ggml_v1_time_us() - t_start_us;
|
||||||
|
}
|
||||||
|
|
||||||
|
int n_past = 0;
|
||||||
|
|
||||||
|
int64_t t_sample_us = 0;
|
||||||
|
int64_t t_predict_us = 0;
|
||||||
|
|
||||||
|
std::vector<float> logits;
|
||||||
|
|
||||||
|
// tokenize the prompt
|
||||||
|
std::vector<gpt_vocab::id> embd_inp = ::gpt_tokenize(vocab, params.prompt);
|
||||||
|
|
||||||
|
params.n_predict = std::min(params.n_predict, model.hparams.n_ctx - (int) embd_inp.size());
|
||||||
|
|
||||||
|
printf("%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
|
||||||
|
printf("\n");
|
||||||
|
|
||||||
|
std::vector<gpt_vocab::id> embd;
|
||||||
|
|
||||||
|
// determine the required inference memory per token:
|
||||||
|
size_t mem_per_token = 0;
|
||||||
|
legacy_gptj_eval(model, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token, file_format);
|
||||||
|
|
||||||
|
for (int i = embd.size(); i < embd_inp.size() + params.n_predict; i++) {
|
||||||
|
// predict
|
||||||
|
if (embd.size() > 0) {
|
||||||
|
const int64_t t_start_us = ggml_v1_time_us();
|
||||||
|
|
||||||
|
if (!legacy_gptj_eval(model, params.n_threads, n_past, embd, logits, mem_per_token,file_format)) {
|
||||||
|
printf("Failed to predict\n");
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
t_predict_us += ggml_v1_time_us() - t_start_us;
|
||||||
|
}
|
||||||
|
|
||||||
|
n_past += embd.size();
|
||||||
|
embd.clear();
|
||||||
|
|
||||||
|
if (i >= embd_inp.size()) {
|
||||||
|
// sample next token
|
||||||
|
const int top_k = params.top_k;
|
||||||
|
const float top_p = params.top_p;
|
||||||
|
const float temp = params.temp;
|
||||||
|
|
||||||
|
const int n_vocab = model.hparams.n_vocab;
|
||||||
|
|
||||||
|
gpt_vocab::id id = 0;
|
||||||
|
|
||||||
|
{
|
||||||
|
const int64_t t_start_sample_us = ggml_v1_time_us();
|
||||||
|
|
||||||
|
id = gpt_sample_top_k_top_p(vocab, logits.data() + (logits.size() - n_vocab), top_k, top_p, temp, rng);
|
||||||
|
|
||||||
|
t_sample_us += ggml_v1_time_us() - t_start_sample_us;
|
||||||
|
}
|
||||||
|
|
||||||
|
// add it to the context
|
||||||
|
embd.push_back(id);
|
||||||
|
} else {
|
||||||
|
// if here, it means we are still processing the input prompt
|
||||||
|
for (int k = i; k < embd_inp.size(); k++) {
|
||||||
|
embd.push_back(embd_inp[k]);
|
||||||
|
if (embd.size() > params.n_batch) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
i += embd.size() - 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// display text
|
||||||
|
for (auto id : embd) {
|
||||||
|
printf("%s", vocab.id_to_token[id].c_str());
|
||||||
|
}
|
||||||
|
fflush(stdout);
|
||||||
|
|
||||||
|
// end of text token
|
||||||
|
if (embd.back() == 50256) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// report timing
|
||||||
|
{
|
||||||
|
const int64_t t_main_end_us = ggml_v1_time_us();
|
||||||
|
|
||||||
|
printf("\n\n");
|
||||||
|
printf("%s: mem per token = %8zu bytes\n", __func__, mem_per_token);
|
||||||
|
printf("%s: load time = %8.2f ms\n", __func__, t_load_us/1000.0f);
|
||||||
|
printf("%s: sample time = %8.2f ms\n", __func__, t_sample_us/1000.0f);
|
||||||
|
printf("%s: predict time = %8.2f ms / %.2f ms per token\n", __func__, t_predict_us/1000.0f, t_predict_us/1000.0f/n_past);
|
||||||
|
printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0f);
|
||||||
|
}
|
||||||
|
|
||||||
|
ggml_v1_free(model.ctx);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
|
@ -14,28 +14,18 @@
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
|
|
||||||
bool should_transpose_layer(std::string name)
|
#include "model_adapter.h"
|
||||||
{
|
|
||||||
|
|
||||||
if(name.find(".mlp.fc_in.weight")!=std::string::npos ||
|
|
||||||
name.find(".attn.out_proj.weight")!=std::string::npos ||
|
|
||||||
name.find(".attn.q_proj.weight")!=std::string::npos ||
|
|
||||||
name.find(".attn.k_proj.weight")!=std::string::npos ||
|
|
||||||
name.find(".attn.v_proj.weight")!=std::string::npos)
|
|
||||||
{
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// load the model's weights from a file
|
// load the model's weights from a file
|
||||||
bool gptj_model_load(const std::string & fname, gptj_model & model, gpt_vocab & vocab) {
|
ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, gpt_vocab & vocab) {
|
||||||
printf("%s: loading model from '%s' - please wait ...\n", __func__, fname.c_str());
|
printf("%s: loading model from '%s' - please wait ...\n", __func__, fname.c_str());
|
||||||
|
|
||||||
auto fin = std::ifstream(fname, std::ios::binary);
|
auto fin = std::ifstream(fname, std::ios::binary);
|
||||||
if (!fin) {
|
if (!fin) {
|
||||||
fprintf(stderr, "%s: failed to open '%s'\n", __func__, fname.c_str());
|
fprintf(stderr, "%s: failed to open '%s'\n", __func__, fname.c_str());
|
||||||
return false;
|
return ModelLoadResult::FAIL;
|
||||||
}
|
}
|
||||||
|
|
||||||
// verify magic
|
// verify magic
|
||||||
|
@ -44,7 +34,7 @@ bool gptj_model_load(const std::string & fname, gptj_model & model, gpt_vocab &
|
||||||
fin.read((char *) &magic, sizeof(magic));
|
fin.read((char *) &magic, sizeof(magic));
|
||||||
if (magic != 0x67676d6c) {
|
if (magic != 0x67676d6c) {
|
||||||
fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname.c_str());
|
fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname.c_str());
|
||||||
return false;
|
return ModelLoadResult::FAIL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -77,7 +67,7 @@ bool gptj_model_load(const std::string & fname, gptj_model & model, gpt_vocab &
|
||||||
if (n_vocab != model.hparams.n_vocab) {
|
if (n_vocab != model.hparams.n_vocab) {
|
||||||
fprintf(stderr, "%s: invalid model file '%s' (bad vocab size %d != %d)\n",
|
fprintf(stderr, "%s: invalid model file '%s' (bad vocab size %d != %d)\n",
|
||||||
__func__, fname.c_str(), n_vocab, model.hparams.n_vocab);
|
__func__, fname.c_str(), n_vocab, model.hparams.n_vocab);
|
||||||
return false;
|
return ModelLoadResult::FAIL;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string word;
|
std::string word;
|
||||||
|
@ -105,7 +95,7 @@ bool gptj_model_load(const std::string & fname, gptj_model & model, gpt_vocab &
|
||||||
{
|
{
|
||||||
fprintf(stderr, "%s: invalid model file '%s' (bad f16 value %d)\n",
|
fprintf(stderr, "%s: invalid model file '%s' (bad f16 value %d)\n",
|
||||||
__func__, fname.c_str(), model.hparams.f16);
|
__func__, fname.c_str(), model.hparams.f16);
|
||||||
return false;
|
return ModelLoadResult::FAIL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -165,7 +155,7 @@ bool gptj_model_load(const std::string & fname, gptj_model & model, gpt_vocab &
|
||||||
model.ctx = ggml_init(params);
|
model.ctx = ggml_init(params);
|
||||||
if (!model.ctx) {
|
if (!model.ctx) {
|
||||||
fprintf(stderr, "%s: ggml_init() failed\n", __func__);
|
fprintf(stderr, "%s: ggml_init() failed\n", __func__);
|
||||||
return false;
|
return ModelLoadResult::FAIL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -284,20 +274,32 @@ bool gptj_model_load(const std::string & fname, gptj_model & model, gpt_vocab &
|
||||||
|
|
||||||
if (model.tensors.find(name.data()) == model.tensors.end()) {
|
if (model.tensors.find(name.data()) == model.tensors.end()) {
|
||||||
fprintf(stderr, "%s: unknown tensor '%s' in model file\n", __func__, name.data());
|
fprintf(stderr, "%s: unknown tensor '%s' in model file\n", __func__, name.data());
|
||||||
return false;
|
return ModelLoadResult::FAIL;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto tensor = model.tensors[name.data()];
|
auto tensor = model.tensors[name.data()];
|
||||||
if (ggml_nelements(tensor) != nelements) {
|
if (ggml_nelements(tensor) != nelements) {
|
||||||
fprintf(stderr, "%s: tensor '%s' has wrong size in model file\n", __func__, name.data());
|
fprintf(stderr, "%s: tensor '%s' has wrong size in model file\n", __func__, name.data());
|
||||||
return false;
|
return ModelLoadResult::FAIL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
if (tensor->ne[0] != ne[0] || tensor->ne[1] != ne[1]) {
|
if (tensor->ne[0] != ne[0] || tensor->ne[1] != ne[1]) {
|
||||||
fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%d, %d], expected [%d, %d]\n",
|
|
||||||
__func__, name.data(), tensor->ne[0], tensor->ne[1], ne[0], ne[1]);
|
//test for transposition and retry older loader
|
||||||
return false;
|
if(tensor->ne[0]==ne[1] && tensor->ne[1]==ne[0] && should_transpose_layer(name))
|
||||||
|
{
|
||||||
|
printf("\nFound a transposed tensor. This could be an older model. Retrying load...");
|
||||||
|
ggml_free(ctx);
|
||||||
|
return ModelLoadResult::RETRY_LOAD;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%d, %d], expected [%d, %d]\n",
|
||||||
|
__func__, name.data(), tensor->ne[0], tensor->ne[1], ne[0], ne[1]);
|
||||||
|
return ModelLoadResult::FAIL;
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (0) {
|
if (0) {
|
||||||
|
@ -315,14 +317,14 @@ bool gptj_model_load(const std::string & fname, gptj_model & model, gpt_vocab &
|
||||||
default:
|
default:
|
||||||
{
|
{
|
||||||
fprintf(stderr, "%s: unknown ftype %d in model file\n", __func__, ftype);
|
fprintf(stderr, "%s: unknown ftype %d in model file\n", __func__, ftype);
|
||||||
return false;
|
return ModelLoadResult::FAIL;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
if ((nelements*bpe)/ggml_blck_size(tensor->type) != ggml_nbytes(tensor)) {
|
if ((nelements*bpe)/ggml_blck_size(tensor->type) != ggml_nbytes(tensor)) {
|
||||||
fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n",
|
fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n",
|
||||||
__func__, name.data(), ggml_nbytes(tensor), nelements*bpe);
|
__func__, name.data(), ggml_nbytes(tensor), nelements*bpe);
|
||||||
return false;
|
return ModelLoadResult::FAIL;
|
||||||
}
|
}
|
||||||
|
|
||||||
fin.read(reinterpret_cast<char *>(tensor->data), ggml_nbytes(tensor));
|
fin.read(reinterpret_cast<char *>(tensor->data), ggml_nbytes(tensor));
|
||||||
|
@ -342,7 +344,7 @@ bool gptj_model_load(const std::string & fname, gptj_model & model, gpt_vocab &
|
||||||
|
|
||||||
fin.close();
|
fin.close();
|
||||||
|
|
||||||
return true;
|
return ModelLoadResult::SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
// evaluate the transformer
|
// evaluate the transformer
|
||||||
|
@ -584,146 +586,3 @@ bool gptj_eval(
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
// int main(int argc, char ** argv) {
|
|
||||||
// ggml_time_init();
|
|
||||||
// const int64_t t_main_start_us = ggml_time_us();
|
|
||||||
|
|
||||||
// gpt_params params;
|
|
||||||
// params.model = "models/gpt-j-6B/ggml-model.bin";
|
|
||||||
|
|
||||||
// if (utils_gpt_params_parse(argc, argv, params) == false) {
|
|
||||||
// return 1;
|
|
||||||
// }
|
|
||||||
|
|
||||||
// if (params.seed < 0) {
|
|
||||||
// params.seed = time(NULL);
|
|
||||||
// }
|
|
||||||
|
|
||||||
// printf("%s: seed = %d\n", __func__, params.seed);
|
|
||||||
|
|
||||||
// std::mt19937 rng(params.seed);
|
|
||||||
// if (params.prompt.empty()) {
|
|
||||||
// if( !isatty(STDIN_FILENO) ){
|
|
||||||
// std::string line;
|
|
||||||
// while( std::getline(std::cin, line) ){
|
|
||||||
// params.prompt = params.prompt + "\n" + line;
|
|
||||||
// }
|
|
||||||
// } else {
|
|
||||||
// params.prompt = utils_gpt_random_prompt(rng);
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
// int64_t t_load_us = 0;
|
|
||||||
|
|
||||||
// gpt_vocab vocab;
|
|
||||||
// gptj_model model;
|
|
||||||
|
|
||||||
// // load the model
|
|
||||||
// {
|
|
||||||
// const int64_t t_start_us = ggml_time_us();
|
|
||||||
|
|
||||||
// if (!gptj_model_load(params.model, model, vocab)) {
|
|
||||||
// fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str());
|
|
||||||
// return 1;
|
|
||||||
// }
|
|
||||||
|
|
||||||
// t_load_us = ggml_time_us() - t_start_us;
|
|
||||||
// }
|
|
||||||
|
|
||||||
// int n_past = 0;
|
|
||||||
|
|
||||||
// int64_t t_sample_us = 0;
|
|
||||||
// int64_t t_predict_us = 0;
|
|
||||||
|
|
||||||
// std::vector<float> logits;
|
|
||||||
|
|
||||||
// // tokenize the prompt
|
|
||||||
// std::vector<gpt_vocab::id> embd_inp = ::gpt_tokenize(vocab, params.prompt);
|
|
||||||
|
|
||||||
// params.n_predict = std::min(params.n_predict, model.hparams.n_ctx - (int) embd_inp.size());
|
|
||||||
|
|
||||||
// printf("%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
|
|
||||||
// printf("\n");
|
|
||||||
|
|
||||||
// std::vector<gpt_vocab::id> embd;
|
|
||||||
|
|
||||||
// // determine the required inference memory per token:
|
|
||||||
// size_t mem_per_token = 0;
|
|
||||||
// gptj_eval(model, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token);
|
|
||||||
|
|
||||||
// for (int i = embd.size(); i < embd_inp.size() + params.n_predict; i++) {
|
|
||||||
// // predict
|
|
||||||
// if (embd.size() > 0) {
|
|
||||||
// const int64_t t_start_us = ggml_time_us();
|
|
||||||
|
|
||||||
// if (!gptj_eval(model, params.n_threads, n_past, embd, logits, mem_per_token)) {
|
|
||||||
// printf("Failed to predict\n");
|
|
||||||
// return 1;
|
|
||||||
// }
|
|
||||||
|
|
||||||
// t_predict_us += ggml_time_us() - t_start_us;
|
|
||||||
// }
|
|
||||||
|
|
||||||
// n_past += embd.size();
|
|
||||||
// embd.clear();
|
|
||||||
|
|
||||||
// if (i >= embd_inp.size()) {
|
|
||||||
// // sample next token
|
|
||||||
// const int top_k = params.top_k;
|
|
||||||
// const float top_p = params.top_p;
|
|
||||||
// const float temp = params.temp;
|
|
||||||
|
|
||||||
// const int n_vocab = model.hparams.n_vocab;
|
|
||||||
|
|
||||||
// gpt_vocab::id id = 0;
|
|
||||||
|
|
||||||
// {
|
|
||||||
// const int64_t t_start_sample_us = ggml_time_us();
|
|
||||||
|
|
||||||
// id = gpt_sample_top_k_top_p(vocab, logits.data() + (logits.size() - n_vocab), top_k, top_p, temp, rng);
|
|
||||||
|
|
||||||
// t_sample_us += ggml_time_us() - t_start_sample_us;
|
|
||||||
// }
|
|
||||||
|
|
||||||
// // add it to the context
|
|
||||||
// embd.push_back(id);
|
|
||||||
// } else {
|
|
||||||
// // if here, it means we are still processing the input prompt
|
|
||||||
// for (int k = i; k < embd_inp.size(); k++) {
|
|
||||||
// embd.push_back(embd_inp[k]);
|
|
||||||
// if (embd.size() > params.n_batch) {
|
|
||||||
// break;
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// i += embd.size() - 1;
|
|
||||||
// }
|
|
||||||
|
|
||||||
// // display text
|
|
||||||
// for (auto id : embd) {
|
|
||||||
// printf("%s", vocab.id_to_token[id].c_str());
|
|
||||||
// }
|
|
||||||
// fflush(stdout);
|
|
||||||
|
|
||||||
// // end of text token
|
|
||||||
// if (embd.back() == 50256) {
|
|
||||||
// break;
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
// // report timing
|
|
||||||
// {
|
|
||||||
// const int64_t t_main_end_us = ggml_time_us();
|
|
||||||
|
|
||||||
// printf("\n\n");
|
|
||||||
// printf("%s: mem per token = %8zu bytes\n", __func__, mem_per_token);
|
|
||||||
// printf("%s: load time = %8.2f ms\n", __func__, t_load_us/1000.0f);
|
|
||||||
// printf("%s: sample time = %8.2f ms\n", __func__, t_sample_us/1000.0f);
|
|
||||||
// printf("%s: predict time = %8.2f ms / %.2f ms per token\n", __func__, t_predict_us/1000.0f, t_predict_us/1000.0f/n_past);
|
|
||||||
// printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0f);
|
|
||||||
// }
|
|
||||||
|
|
||||||
// ggml_free(model.ctx);
|
|
||||||
|
|
||||||
// return 0;
|
|
||||||
// }
|
|
145
otherarch/gptj_v2_main.cpp
Normal file
145
otherarch/gptj_v2_main.cpp
Normal file
|
@ -0,0 +1,145 @@
|
||||||
|
#include "gptj_v2.cpp"
|
||||||
|
|
||||||
|
|
||||||
|
int main(int argc, char ** argv) {
|
||||||
|
ggml_time_init();
|
||||||
|
const int64_t t_main_start_us = ggml_time_us();
|
||||||
|
|
||||||
|
gpt_params params;
|
||||||
|
params.model = "models/gpt-j-6B/ggml-model.bin";
|
||||||
|
|
||||||
|
if (utils_gpt_params_parse(argc, argv, params) == false) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (params.seed < 0) {
|
||||||
|
params.seed = time(NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
printf("%s: seed = %d\n", __func__, params.seed);
|
||||||
|
|
||||||
|
std::mt19937 rng(params.seed);
|
||||||
|
if (params.prompt.empty()) {
|
||||||
|
if( !isatty(STDIN_FILENO) ){
|
||||||
|
std::string line;
|
||||||
|
while( std::getline(std::cin, line) ){
|
||||||
|
params.prompt = params.prompt + "\n" + line;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
params.prompt = utils_gpt_random_prompt(rng);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int64_t t_load_us = 0;
|
||||||
|
|
||||||
|
gpt_vocab vocab;
|
||||||
|
gptj_model model;
|
||||||
|
|
||||||
|
// load the model
|
||||||
|
{
|
||||||
|
const int64_t t_start_us = ggml_time_us();
|
||||||
|
|
||||||
|
if (gptj_model_load(params.model, model, vocab)==ModelLoadResult::FAIL) {
|
||||||
|
fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str());
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
t_load_us = ggml_time_us() - t_start_us;
|
||||||
|
}
|
||||||
|
|
||||||
|
int n_past = 0;
|
||||||
|
|
||||||
|
int64_t t_sample_us = 0;
|
||||||
|
int64_t t_predict_us = 0;
|
||||||
|
|
||||||
|
std::vector<float> logits;
|
||||||
|
|
||||||
|
// tokenize the prompt
|
||||||
|
std::vector<gpt_vocab::id> embd_inp = ::gpt_tokenize(vocab, params.prompt);
|
||||||
|
|
||||||
|
params.n_predict = std::min(params.n_predict, model.hparams.n_ctx - (int) embd_inp.size());
|
||||||
|
|
||||||
|
printf("%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
|
||||||
|
printf("\n");
|
||||||
|
|
||||||
|
std::vector<gpt_vocab::id> embd;
|
||||||
|
|
||||||
|
// determine the required inference memory per token:
|
||||||
|
size_t mem_per_token = 0;
|
||||||
|
gptj_eval(model, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token);
|
||||||
|
|
||||||
|
for (int i = embd.size(); i < embd_inp.size() + params.n_predict; i++) {
|
||||||
|
// predict
|
||||||
|
if (embd.size() > 0) {
|
||||||
|
const int64_t t_start_us = ggml_time_us();
|
||||||
|
|
||||||
|
if (!gptj_eval(model, params.n_threads, n_past, embd, logits, mem_per_token)) {
|
||||||
|
printf("Failed to predict\n");
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
t_predict_us += ggml_time_us() - t_start_us;
|
||||||
|
}
|
||||||
|
|
||||||
|
n_past += embd.size();
|
||||||
|
embd.clear();
|
||||||
|
|
||||||
|
if (i >= embd_inp.size()) {
|
||||||
|
// sample next token
|
||||||
|
const int top_k = params.top_k;
|
||||||
|
const float top_p = params.top_p;
|
||||||
|
const float temp = params.temp;
|
||||||
|
|
||||||
|
const int n_vocab = model.hparams.n_vocab;
|
||||||
|
|
||||||
|
gpt_vocab::id id = 0;
|
||||||
|
|
||||||
|
{
|
||||||
|
const int64_t t_start_sample_us = ggml_time_us();
|
||||||
|
|
||||||
|
id = gpt_sample_top_k_top_p(vocab, logits.data() + (logits.size() - n_vocab), top_k, top_p, temp, rng);
|
||||||
|
|
||||||
|
t_sample_us += ggml_time_us() - t_start_sample_us;
|
||||||
|
}
|
||||||
|
|
||||||
|
// add it to the context
|
||||||
|
embd.push_back(id);
|
||||||
|
} else {
|
||||||
|
// if here, it means we are still processing the input prompt
|
||||||
|
for (int k = i; k < embd_inp.size(); k++) {
|
||||||
|
embd.push_back(embd_inp[k]);
|
||||||
|
if (embd.size() > params.n_batch) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
i += embd.size() - 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// display text
|
||||||
|
for (auto id : embd) {
|
||||||
|
printf("%s", vocab.id_to_token[id].c_str());
|
||||||
|
}
|
||||||
|
fflush(stdout);
|
||||||
|
|
||||||
|
// end of text token
|
||||||
|
if (embd.back() == 50256) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// report timing
|
||||||
|
{
|
||||||
|
const int64_t t_main_end_us = ggml_time_us();
|
||||||
|
|
||||||
|
printf("\n\n");
|
||||||
|
printf("%s: mem per token = %8zu bytes\n", __func__, mem_per_token);
|
||||||
|
printf("%s: load time = %8.2f ms\n", __func__, t_load_us/1000.0f);
|
||||||
|
printf("%s: sample time = %8.2f ms\n", __func__, t_sample_us/1000.0f);
|
||||||
|
printf("%s: predict time = %8.2f ms / %.2f ms per token\n", __func__, t_predict_us/1000.0f, t_predict_us/1000.0f/n_past);
|
||||||
|
printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0f);
|
||||||
|
}
|
||||||
|
|
||||||
|
ggml_free(model.ctx);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
|
@ -12,7 +12,7 @@
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
#include "utils.h"
|
#include "utils.h"
|
||||||
|
#include "model_adapter.h"
|
||||||
|
|
||||||
|
|
||||||
// default hparams (GPT-J 6B)
|
// default hparams (GPT-J 6B)
|
||||||
|
@ -113,7 +113,7 @@ struct gptj_model {
|
||||||
std::map<std::string, struct ggml_tensor *> tensors;
|
std::map<std::string, struct ggml_tensor *> tensors;
|
||||||
};
|
};
|
||||||
|
|
||||||
bool legacy_gptj_model_load(const std::string &fname, gptj_model_v1 &model, gpt_vocab &vocab);
|
ModelLoadResult legacy_gptj_model_load(const std::string &fname, gptj_model_v1 &model, gpt_vocab &vocab, FileFormat file_format);
|
||||||
bool legacy_gptj_eval(const gptj_model_v1 &model, const int n_threads, const int n_past, const std::vector<gpt_vocab::id> &embd_inp, std::vector<float> &embd_w, size_t &mem_per_token);
|
bool legacy_gptj_eval(const gptj_model_v1 &model, const int n_threads, const int n_past, const std::vector<gpt_vocab::id> &embd_inp, std::vector<float> &embd_w, size_t &mem_per_token, FileFormat file_format);
|
||||||
bool gptj_model_load(const std::string &fname, gptj_model &model, gpt_vocab &vocab);
|
ModelLoadResult gptj_model_load(const std::string &fname, gptj_model &model, gpt_vocab &vocab);
|
||||||
bool gptj_eval(const gptj_model &model, const int n_threads, const int n_past, const std::vector<gpt_vocab::id> &embd_inp, std::vector<float> &embd_w, size_t &mem_per_token);
|
bool gptj_eval(const gptj_model &model, const int n_threads, const int n_past, const std::vector<gpt_vocab::id> &embd_inp, std::vector<float> &embd_w, size_t &mem_per_token);
|
||||||
|
|
|
@ -249,6 +249,103 @@ bool gpt_vocab_init(const std::string & fname, gpt_vocab & vocab) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void gptj_sample_top_k(std::vector<std::pair<double, gpt_vocab::id>> & logits_id, int top_k) {
|
||||||
|
// find the top K tokens
|
||||||
|
std::partial_sort(
|
||||||
|
logits_id.begin(),
|
||||||
|
logits_id.begin() + top_k, logits_id.end(),
|
||||||
|
[](const std::pair<double, gpt_vocab::id> & a, const std::pair<double, gpt_vocab::id> & b) {
|
||||||
|
return a.first > b.first;
|
||||||
|
});
|
||||||
|
|
||||||
|
logits_id.resize(top_k);
|
||||||
|
}
|
||||||
|
|
||||||
|
gpt_vocab::id gptj_sample_top_p_top_k(
|
||||||
|
const gpt_vocab & vocab,
|
||||||
|
const float * logits,
|
||||||
|
std::vector<gpt_vocab::id> & last_n_tokens,
|
||||||
|
double repeat_penalty,
|
||||||
|
int top_k,
|
||||||
|
double top_p,
|
||||||
|
double temp,
|
||||||
|
std::mt19937 & rng) {
|
||||||
|
int n_logits = vocab.id_to_token.size();
|
||||||
|
|
||||||
|
std::vector<std::pair<double, gpt_vocab::id>> logits_id;
|
||||||
|
logits_id.reserve(n_logits);
|
||||||
|
|
||||||
|
{
|
||||||
|
const double scale = 1.0/temp;
|
||||||
|
for (int i = 0; i < n_logits; ++i) {
|
||||||
|
// repetition penalty from CTRL paper (https://arxiv.org/abs/1909.05858)
|
||||||
|
// credit https://github.com/facebookresearch/llama/compare/main...shawwn:llama:main
|
||||||
|
if (std::find(last_n_tokens.begin(), last_n_tokens.end(), i) != last_n_tokens.end()) {
|
||||||
|
// if score < 0 then repetition penalty has to multiplied to reduce the previous token probability
|
||||||
|
if (logits[i] < 0.0) {
|
||||||
|
logits_id.push_back(std::make_pair(logits[i]*scale*repeat_penalty, i));
|
||||||
|
} else {
|
||||||
|
logits_id.push_back(std::make_pair(logits[i]*scale/repeat_penalty, i));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
logits_id.push_back(std::make_pair(logits[i]*scale, i));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
gptj_sample_top_k(logits_id, top_k);
|
||||||
|
|
||||||
|
double maxl = -INFINITY;
|
||||||
|
for (const auto & kv : logits_id) {
|
||||||
|
maxl = std::max(maxl, kv.first);
|
||||||
|
}
|
||||||
|
|
||||||
|
// compute probs for the top K tokens
|
||||||
|
std::vector<double> probs;
|
||||||
|
probs.reserve(logits_id.size());
|
||||||
|
|
||||||
|
double sum = 0.0;
|
||||||
|
for (const auto & kv : logits_id) {
|
||||||
|
double p = exp(kv.first - maxl);
|
||||||
|
probs.push_back(p);
|
||||||
|
sum += p;
|
||||||
|
}
|
||||||
|
|
||||||
|
// normalize the probs
|
||||||
|
for (auto & p : probs) {
|
||||||
|
p /= sum;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (top_p < 1.0f) {
|
||||||
|
double cumsum = 0.0f;
|
||||||
|
for (int i = 0; i < (int) probs.size(); i++) {
|
||||||
|
cumsum += probs[i];
|
||||||
|
if (cumsum >= top_p) {
|
||||||
|
probs.resize(i + 1);
|
||||||
|
logits_id.resize(i + 1);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
cumsum = 1.0/cumsum;
|
||||||
|
for (int i = 0; i < (int) probs.size(); i++) {
|
||||||
|
probs[i] *= cumsum;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//printf("\n");
|
||||||
|
//for (int i = 0; i < (int) 10; i++) {
|
||||||
|
// printf("%d: '%s' %f\n", i, vocab.id_to_token.at(logits_id[i].second).c_str(), probs[i]);
|
||||||
|
//}
|
||||||
|
//printf("\n\n");
|
||||||
|
//exit(0);
|
||||||
|
|
||||||
|
std::discrete_distribution<> dist(probs.begin(), probs.end());
|
||||||
|
int idx = dist(rng);
|
||||||
|
|
||||||
|
return logits_id[idx].second;
|
||||||
|
}
|
||||||
|
|
||||||
gpt_vocab::id gpt_sample_top_k_top_p(
|
gpt_vocab::id gpt_sample_top_k_top_p(
|
||||||
const gpt_vocab & vocab,
|
const gpt_vocab & vocab,
|
||||||
const float * logits,
|
const float * logits,
|
||||||
|
@ -327,4 +424,18 @@ gpt_vocab::id gpt_sample_top_k_top_p(
|
||||||
int idx = dist(rng);
|
int idx = dist(rng);
|
||||||
|
|
||||||
return logits_id[idx].second;
|
return logits_id[idx].second;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool should_transpose_layer(std::string name)
|
||||||
|
{
|
||||||
|
|
||||||
|
if(name.find(".mlp.fc_in.weight")!=std::string::npos ||
|
||||||
|
name.find(".attn.out_proj.weight")!=std::string::npos ||
|
||||||
|
name.find(".attn.q_proj.weight")!=std::string::npos ||
|
||||||
|
name.find(".attn.k_proj.weight")!=std::string::npos ||
|
||||||
|
name.find(".attn.v_proj.weight")!=std::string::npos)
|
||||||
|
{
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
}
|
}
|
|
@ -62,6 +62,18 @@ gpt_vocab::id gpt_sample_top_k_top_p(
|
||||||
double temp,
|
double temp,
|
||||||
std::mt19937 & rng);
|
std::mt19937 & rng);
|
||||||
|
|
||||||
|
gpt_vocab::id gptj_sample_top_p_top_k(
|
||||||
|
const gpt_vocab & vocab,
|
||||||
|
const float * logits,
|
||||||
|
std::vector<gpt_vocab::id> & last_n_tokens,
|
||||||
|
double repeat_penalty,
|
||||||
|
int top_k,
|
||||||
|
double top_p,
|
||||||
|
double temp,
|
||||||
|
std::mt19937 & rng);
|
||||||
|
|
||||||
bool utils_gpt_params_parse(int argc, char ** argv, gpt_params & params);
|
bool utils_gpt_params_parse(int argc, char ** argv, gpt_params & params);
|
||||||
void utils_gpt_print_usage(int argc, char ** argv, const gpt_params & params);
|
void utils_gpt_print_usage(int argc, char ** argv, const gpt_params & params);
|
||||||
std::string utils_gpt_random_prompt(std::mt19937 & rng);
|
std::string utils_gpt_random_prompt(std::mt19937 & rng);
|
||||||
|
|
||||||
|
static bool should_transpose_layer(std::string name);
|
BIN
quantize.exe
BIN
quantize.exe
Binary file not shown.
Loading…
Add table
Add a link
Reference in a new issue