make gptneox and gptj work with extended context too

This commit is contained in:
Concedo 2023-07-02 18:28:09 +08:00
parent d6b47e6a5b
commit 3d2907d208
2 changed files with 8 additions and 4 deletions

View file

@ -68,6 +68,8 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g
printf("%s: ftype = %d\n", __func__, hparams.ftype); printf("%s: ftype = %d\n", __func__, hparams.ftype);
printf("%s: qntvr = %d\n", __func__, qntvr); printf("%s: qntvr = %d\n", __func__, qntvr);
hparams.n_ctx = std::max(origmaxctx,hparams.n_ctx);
hparams.ftype %= GGML_QNT_VERSION_FACTOR; hparams.ftype %= GGML_QNT_VERSION_FACTOR;
} }
@ -474,8 +476,8 @@ bool gptj_eval(
// self-attention // self-attention
{ {
struct ggml_tensor * Qcur = ggml_rope_inplace(ctx0, ggml_reshape_3d(ctx0, ggml_mul_mat(ctx0, model.layers[il].c_attn_q_proj_w, cur), n_embd/n_head, n_head, N), n_past, n_rot, 0, 0); struct ggml_tensor * Qcur = ggml_rope_inplace(ctx0, ggml_reshape_3d(ctx0, ggml_mul_mat(ctx0, model.layers[il].c_attn_q_proj_w, cur), n_embd/n_head, n_head, N), n_past, n_rot, 0, n_ctx);
struct ggml_tensor * Kcur = ggml_rope_inplace(ctx0, ggml_reshape_3d(ctx0, ggml_mul_mat(ctx0, model.layers[il].c_attn_k_proj_w, cur), n_embd/n_head, n_head, N), n_past, n_rot, 0, 0); struct ggml_tensor * Kcur = ggml_rope_inplace(ctx0, ggml_reshape_3d(ctx0, ggml_mul_mat(ctx0, model.layers[il].c_attn_k_proj_w, cur), n_embd/n_head, n_head, N), n_past, n_rot, 0, n_ctx);
// store key and value to memory // store key and value to memory
{ {

View file

@ -68,6 +68,8 @@ ModelLoadResult gpt_neox_model_load(const std::string & fname, gpt_neox_model &
printf("%s: ftype = %d\n", __func__, hparams.ftype); printf("%s: ftype = %d\n", __func__, hparams.ftype);
printf("%s: qntvr = %d\n", __func__, qntvr); printf("%s: qntvr = %d\n", __func__, qntvr);
hparams.n_ctx = std::max(origmaxctx,hparams.n_ctx);
hparams.ftype %= GGML_QNT_VERSION_FACTOR; hparams.ftype %= GGML_QNT_VERSION_FACTOR;
} }
@ -502,8 +504,8 @@ bool gpt_neox_eval(
struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_3d(ctx0, cur, n_embd/n_head, n_head, N, cur->nb[1]/n_head, cur->nb[1], 2*sizeof(float)*n_embd/n_head)); struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_3d(ctx0, cur, n_embd/n_head, n_head, N, cur->nb[1]/n_head, cur->nb[1], 2*sizeof(float)*n_embd/n_head));
// using mode = 2 for GPT-NeoX mode // using mode = 2 for GPT-NeoX mode
Qcur = ggml_rope_inplace(ctx0, Qcur, n_past, n_rot, 2, 0); Qcur = ggml_rope_inplace(ctx0, Qcur, n_past, n_rot, 2, n_ctx);
Kcur = ggml_rope_inplace(ctx0, Kcur, n_past, n_rot, 2, 0); Kcur = ggml_rope_inplace(ctx0, Kcur, n_past, n_rot, 2, n_ctx);
// store key and value to memory // store key and value to memory
{ {