neox is updated
This commit is contained in:
parent
90fe9096b4
commit
00da2a5f4e
5 changed files with 111 additions and 74 deletions
|
@ -393,18 +393,22 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in
|
|||
SetQuantsUnshuffled(file_format==FileFormat::NEOX_4 || file_format==FileFormat::NEOX_5);
|
||||
|
||||
// determine the required inference memory per token:
|
||||
gpt_neox_eval(neox_ctx, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token, file_format);
|
||||
gpt_neox_eval(neox_ctx, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token);
|
||||
|
||||
if(logits.size()>0 && (file_format==FileFormat::NEOX_2 || file_format==FileFormat::NEOX_4) && !IsNanCheck(logits[0]))
|
||||
if(logits.size()>0 && file_format==FileFormat::NEOX_2 && !IsNanCheck(logits[0]))
|
||||
{
|
||||
//run the black magic eval to determine if it's redpajama. VERY UGLY HACK!
|
||||
std::vector<int> test_embd = ::gpt_tokenize(vocab, "1 2 3 4 5 6 7");
|
||||
gpt_neox_eval(neox_ctx, params.n_threads, 0, test_embd, logits, mem_per_token, (file_format==FileFormat::NEOX_2?FileFormat::NEOX_3:FileFormat::NEOX_5));
|
||||
std::vector<int> test_embd = ::gpt_tokenize(vocab, "1 2 3 4 5 6 7");
|
||||
auto orig_par_res = neox_ctx.hparams.par_res;
|
||||
neox_ctx.hparams.par_res = 0; //test with residual false
|
||||
gpt_neox_eval(neox_ctx, params.n_threads, 0, test_embd, logits, mem_per_token);
|
||||
neox_ctx.hparams.par_res = orig_par_res;
|
||||
int topid = std::max_element(logits.begin(),logits.end())-logits.begin();
|
||||
std::string predicted = vocab.id_to_token[topid].c_str();
|
||||
if(predicted.find("8") != std::string::npos)
|
||||
auto findresult = predicted.find("8");
|
||||
if(findresult != std::string::npos && findresult<2)
|
||||
{
|
||||
printf("\n---\nRedPajama NeoX Detected! Switching to new format! (use_parallel_residual=False)\n");
|
||||
printf("\n---\nOld RedPajama NeoX Detected! Switching to new format! (use_parallel_residual=False)\n");
|
||||
ggml_free(neox_ctx.ctx);
|
||||
return ModelLoadResult::RETRY_LOAD;
|
||||
}
|
||||
|
@ -694,7 +698,7 @@ generation_outputs gpttype_generate(const generation_inputs inputs, generation_o
|
|||
}
|
||||
else if(file_format==FileFormat::NEOX_1 || file_format == FileFormat::NEOX_2 || file_format == FileFormat::NEOX_3 || file_format==FileFormat::NEOX_4 || file_format==FileFormat::NEOX_5)
|
||||
{
|
||||
evalres = gpt_neox_eval(neox_ctx, params.n_threads, n_past, embd, logits, mem_per_token, file_format);
|
||||
evalres = gpt_neox_eval(neox_ctx, params.n_threads, n_past, embd, logits, mem_per_token);
|
||||
}
|
||||
else if(file_format==FileFormat::GPTJ_1 || file_format==FileFormat::GPTJ_2)
|
||||
{
|
||||
|
|
|
@ -143,19 +143,41 @@ void print_tok_vec(std::vector<float> &embd)
|
|||
{
|
||||
//anything outside the llama v1 range is assumed to be NeoX
|
||||
fileformat = FileFormat::NEOX_4;
|
||||
uint32_t temp;
|
||||
uint32_t temp,temp2;
|
||||
fin.read((char *)&temp, sizeof(temp)); //ctx
|
||||
fin.read((char *)&temp, sizeof(temp)); //n_embd
|
||||
fin.read((char *)&temp, sizeof(temp)); //n_head
|
||||
fin.read((char *)&temp, sizeof(temp)); //n_layer
|
||||
fin.read((char *)&temp, sizeof(temp)); //n_rot
|
||||
fin.read((char *)&temp, sizeof(temp)); //f16
|
||||
const int32_t qntvr = temp / 1000;
|
||||
temp %= 1000;
|
||||
if(qntvr==0)
|
||||
{
|
||||
fin.read((char *)&temp, sizeof(temp)); //either par_res or ftype (for older ver)
|
||||
|
||||
if(temp!=0 && temp!=1){
|
||||
//must be ftype, means its an older model. par_res will be undefined
|
||||
fileformat = FileFormat::NEOX_2;
|
||||
}
|
||||
else
|
||||
{
|
||||
//it could be a newer model, or an old f16/f32 model
|
||||
fin.read((char *)&temp2, sizeof(temp2)); //if previous was par_res, this is ftype. else unknown
|
||||
|
||||
//if it is new ftype, then it must have these properties: > 1000, low multiple of 1k and small remaineder
|
||||
bool isNewFtype = (temp2>=1000 && temp2<=9000 && temp2%1000<20);
|
||||
|
||||
if(!isNewFtype)
|
||||
{
|
||||
fileformat = FileFormat::NEOX_2;
|
||||
}
|
||||
else
|
||||
{
|
||||
const int32_t qntvr = temp2 / 1000; //for future use
|
||||
//then temp was par_res
|
||||
if(temp==0) //use_parallel_residual is false in RedPajama
|
||||
{
|
||||
fileformat = FileFormat::NEOX_5;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
else if(magic == 0x67676d66) //v2 format ggmf
|
||||
|
|
|
@ -38,17 +38,24 @@ ModelLoadResult gpt_neox_model_load(const std::string & fname, gpt_neox_model &
|
|||
// load hparams
|
||||
{
|
||||
auto & hparams = model.hparams;
|
||||
|
||||
hparams.par_res = 1; //true
|
||||
fin.read((char *) &hparams.n_vocab, sizeof(hparams.n_vocab));
|
||||
fin.read((char *) &hparams.n_ctx, sizeof(hparams.n_ctx));
|
||||
fin.read((char *) &hparams.n_embd, sizeof(hparams.n_embd));
|
||||
fin.read((char *) &hparams.n_head, sizeof(hparams.n_head));
|
||||
fin.read((char *) &hparams.n_layer, sizeof(hparams.n_layer));
|
||||
fin.read((char *) &hparams.n_rot, sizeof(hparams.n_rot));
|
||||
if(file_format!=FileFormat::NEOX_1 && file_format!=FileFormat::NEOX_2 && file_format!=FileFormat::NEOX_3)
|
||||
{
|
||||
fin.read((char *) &hparams.par_res, sizeof(hparams.par_res));
|
||||
}
|
||||
if(file_format==FileFormat::NEOX_3)
|
||||
{
|
||||
hparams.par_res = 0;
|
||||
}
|
||||
fin.read((char *) &hparams.ftype, sizeof(hparams.ftype));
|
||||
|
||||
const int32_t qntvr = hparams.ftype / GGML_QNT_VERSION_FACTOR;
|
||||
hparams.ftype %= GGML_QNT_VERSION_FACTOR;
|
||||
|
||||
printf("%s: n_vocab = %d\n", __func__, hparams.n_vocab);
|
||||
printf("%s: n_ctx = %d\n", __func__, hparams.n_ctx);
|
||||
|
@ -56,7 +63,11 @@ ModelLoadResult gpt_neox_model_load(const std::string & fname, gpt_neox_model &
|
|||
printf("%s: n_head = %d\n", __func__, hparams.n_head);
|
||||
printf("%s: n_layer = %d\n", __func__, hparams.n_layer);
|
||||
printf("%s: n_rot = %d\n", __func__, hparams.n_rot);
|
||||
printf("%s: par_res = %d\n", __func__, hparams.par_res);
|
||||
printf("%s: ftype = %d\n", __func__, hparams.ftype);
|
||||
printf("%s: qntvr = %d\n", __func__, qntvr);
|
||||
|
||||
hparams.ftype %= GGML_QNT_VERSION_FACTOR;
|
||||
}
|
||||
|
||||
// load vocab
|
||||
|
@ -126,7 +137,7 @@ ModelLoadResult gpt_neox_model_load(const std::string & fname, gpt_neox_model &
|
|||
ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F32); // memory_k
|
||||
ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F32); // memory_v
|
||||
|
||||
ctx_size += (6 + 16*n_layer)*256; // object overhead
|
||||
ctx_size += (6 + 16*n_layer)*512; // object overhead
|
||||
|
||||
printf("%s: ggml ctx size = %6.2f MB\n", __func__, ctx_size/(1024.0*1024.0));
|
||||
}
|
||||
|
@ -151,7 +162,6 @@ ModelLoadResult gpt_neox_model_load(const std::string & fname, gpt_neox_model &
|
|||
|
||||
const int n_embd = hparams.n_embd;
|
||||
const int n_layer = hparams.n_layer;
|
||||
const int n_ctx = hparams.n_ctx;
|
||||
const int n_vocab = hparams.n_vocab;
|
||||
|
||||
model.layers.resize(n_layer);
|
||||
|
@ -223,15 +233,15 @@ ModelLoadResult gpt_neox_model_load(const std::string & fname, gpt_neox_model &
|
|||
const int n_layer = hparams.n_layer;
|
||||
const int n_ctx = hparams.n_ctx;
|
||||
|
||||
const int n_mem = n_layer*n_ctx;
|
||||
const int n_elements = n_embd*n_mem;
|
||||
const int64_t n_mem = n_layer*n_ctx;
|
||||
const int64_t n_elements = n_embd*n_mem;
|
||||
|
||||
model.memory_k = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements);
|
||||
model.memory_v = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements);
|
||||
|
||||
const size_t memory_size = ggml_nbytes(model.memory_k) + ggml_nbytes(model.memory_v);
|
||||
|
||||
printf("%s: memory_size = %8.2f MB, n_mem = %d\n", __func__, memory_size/1024.0/1024.0, n_mem);
|
||||
printf("%s: memory_size = %8.2f MB, n_mem = %" PRId64 "\n", __func__, memory_size/1024.0/1024.0, n_mem);
|
||||
}
|
||||
|
||||
// load weights
|
||||
|
@ -331,6 +341,43 @@ ModelLoadResult gpt_neox_model_load(const std::string & fname, gpt_neox_model &
|
|||
return ModelLoadResult::SUCCESS;
|
||||
}
|
||||
|
||||
|
||||
// feed-forward network
|
||||
ggml_tensor * gpt_neox_ff(
|
||||
const gpt_neox_layer &layer,
|
||||
ggml_context * ctx0,
|
||||
ggml_tensor * inp) {
|
||||
ggml_tensor * cur = ggml_norm(ctx0, inp);
|
||||
|
||||
cur = ggml_add(ctx0,
|
||||
ggml_mul(ctx0,
|
||||
ggml_repeat(ctx0, layer.ln_2_g, cur),
|
||||
cur),
|
||||
ggml_repeat(ctx0, layer.ln_2_b, cur));
|
||||
|
||||
cur = ggml_mul_mat(ctx0,
|
||||
layer.c_mlp_fc_w,
|
||||
cur);
|
||||
|
||||
cur = ggml_add(ctx0,
|
||||
ggml_repeat(ctx0, layer.c_mlp_fc_b, cur),
|
||||
cur);
|
||||
|
||||
// GELU activation
|
||||
cur = ggml_gelu(ctx0, cur);
|
||||
|
||||
// projection
|
||||
// cur = proj_w*cur + proj_b
|
||||
cur = ggml_mul_mat(ctx0,
|
||||
layer.c_mlp_proj_w,
|
||||
cur);
|
||||
|
||||
cur = ggml_add(ctx0,
|
||||
ggml_repeat(ctx0, layer.c_mlp_proj_b, cur),
|
||||
cur);
|
||||
return cur;
|
||||
}
|
||||
|
||||
// evaluate the transformer
|
||||
//
|
||||
// - model: the model
|
||||
|
@ -345,8 +392,7 @@ bool gpt_neox_eval(
|
|||
const int n_past,
|
||||
const std::vector<gpt_vocab::id> & embd_inp,
|
||||
std::vector<float> & embd_w,
|
||||
size_t & mem_per_token,
|
||||
FileFormat file_format) {
|
||||
size_t & mem_per_token) {
|
||||
const int N = embd_inp.size();
|
||||
|
||||
const auto & hparams = model.hparams;
|
||||
|
@ -496,59 +542,23 @@ bool gpt_neox_eval(
|
|||
}
|
||||
}
|
||||
|
||||
if(file_format==FileFormat::NEOX_3||file_format==FileFormat::NEOX_5)
|
||||
{
|
||||
// layer input + Attn
|
||||
cur = ggml_add(ctx0, cur, inpL);
|
||||
}
|
||||
if (hparams.par_res == 0) {
|
||||
struct ggml_tensor * inpFF = ggml_add(ctx0, cur, inpL);
|
||||
|
||||
struct ggml_tensor * inpFF = cur;
|
||||
cur = gpt_neox_ff(model.layers[il], ctx0, inpFF);
|
||||
|
||||
// feed-forward network
|
||||
// this is independent of the self-attention result, so it could be done in parallel to the self-attention
|
||||
{
|
||||
// post attention layer norm
|
||||
// note here we pass inpL instead of cur
|
||||
{
|
||||
cur = ggml_norm(ctx0, ((file_format==FileFormat::NEOX_3||file_format==FileFormat::NEOX_5)?cur:inpL));
|
||||
|
||||
cur = ggml_add(ctx0,
|
||||
ggml_mul(ctx0,
|
||||
ggml_repeat(ctx0, model.layers[il].ln_2_g, cur),
|
||||
cur),
|
||||
ggml_repeat(ctx0, model.layers[il].ln_2_b, cur));
|
||||
}
|
||||
|
||||
cur = ggml_mul_mat(ctx0,
|
||||
model.layers[il].c_mlp_fc_w,
|
||||
cur);
|
||||
|
||||
cur = ggml_add(ctx0,
|
||||
ggml_repeat(ctx0, model.layers[il].c_mlp_fc_b, cur),
|
||||
cur);
|
||||
|
||||
// GELU activation
|
||||
cur = ggml_gelu(ctx0, cur);
|
||||
|
||||
// projection
|
||||
// cur = proj_w*cur + proj_b
|
||||
cur = ggml_mul_mat(ctx0,
|
||||
model.layers[il].c_mlp_proj_w,
|
||||
cur);
|
||||
|
||||
cur = ggml_add(ctx0,
|
||||
ggml_repeat(ctx0, model.layers[il].c_mlp_proj_b, cur),
|
||||
cur);
|
||||
}
|
||||
|
||||
if (file_format==FileFormat::NEOX_3||file_format==FileFormat::NEOX_5)
|
||||
{
|
||||
// layer input + FF
|
||||
// input for next layer
|
||||
inpL = ggml_add(ctx0, cur, inpFF);
|
||||
}
|
||||
else
|
||||
{
|
||||
cur = ggml_add(ctx0, cur, inpFF);
|
||||
} else {
|
||||
struct ggml_tensor * inpFF = cur;
|
||||
|
||||
// this is independent of the self-attention result, so it could be done in parallel to the self-attention
|
||||
// note here we pass inpL instead of cur
|
||||
cur = gpt_neox_ff(model.layers[il], ctx0, inpL);
|
||||
|
||||
// layer input + FF
|
||||
cur = ggml_add(ctx0, cur, inpFF);
|
||||
|
||||
// input for next layer
|
||||
inpL = ggml_add(ctx0, cur, inpL);
|
||||
}
|
||||
|
@ -602,4 +612,4 @@ bool gpt_neox_eval(
|
|||
ggml_free(ctx0);
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
|
@ -221,6 +221,7 @@ struct gpt_neox_hparams {
|
|||
int32_t n_head = 32;
|
||||
int32_t n_layer = 16;
|
||||
int32_t n_rot = 32; // rotary_pct * (n_embd / n_head)
|
||||
int32_t par_res = 1; // 1 = true, 0 = false
|
||||
int32_t ftype = 1;
|
||||
};
|
||||
|
||||
|
|
|
@ -58,7 +58,7 @@ fout.write(struct.pack("i", hparams["hidden_size"]))
|
|||
fout.write(struct.pack("i", hparams["num_attention_heads"]))
|
||||
fout.write(struct.pack("i", hparams["num_hidden_layers"]))
|
||||
fout.write(struct.pack("i", int(hparams["rotary_pct"]*(hparams["hidden_size"]//hparams["num_attention_heads"]))))
|
||||
fout.write(struct.pack("i", hparams["use_parallel_residual"]))
|
||||
fout.write(struct.pack("i", hparams["use_parallel_residual"] if "use_parallel_residual" in hparams else True))
|
||||
fout.write(struct.pack("i", ftype))
|
||||
|
||||
# TODO: temporary hack to not deal with implementing the tokenizer
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue