Adapt baichuan (not tested yet)

Add layer, layers, graph building methods
This commit is contained in:
KerfuffleV2 2023-10-12 14:18:04 -06:00
parent 06c278895f
commit 5a06711f64

543
llama.cpp
View file

@ -3079,6 +3079,17 @@ struct llm_build_llama_ctx : llm_build_ctx {
: llm_build_ctx(lctx, batch) : llm_build_ctx(lctx, batch)
{} {}
struct ggml_tensor * build_kq_scale() {
// KQ_scale
ggml_tensor * tensor = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
ggml_set_name(tensor, "1/sqrt(n_embd_head)");
ggml_allocr_alloc(alloc, tensor);
if (!alloc_measure) {
ggml_set_f32(tensor, 1.0f/sqrtf(float(n_embd_head)));
}
return tensor;
}
struct ggml_tensor * build_pre_repeating() { struct ggml_tensor * build_pre_repeating() {
struct ggml_tensor * inpL; struct ggml_tensor * inpL;
@ -3123,13 +3134,7 @@ struct llm_build_llama_ctx : llm_build_ctx {
} }
#endif // GGML_USE_CUBLAS #endif // GGML_USE_CUBLAS
// KQ_scale KQ_scale = build_kq_scale();
KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
ggml_set_name(KQ_scale, "1/sqrt(n_embd_head)");
ggml_allocr_alloc(alloc, KQ_scale);
if (!alloc_measure) {
ggml_set_f32(KQ_scale, 1.0f/sqrtf(float(n_embd_head)));
}
// KQ_mask (mask for 1 head, it will be broadcasted to all heads) // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1); KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
@ -3218,6 +3223,62 @@ struct llm_build_llama_ctx : llm_build_ctx {
return gf; return gf;
} }
struct ggml_tensor * build_attn_block_kcur(ggml_tensor * tmpk) {
struct ggml_tensor * tensor = ggml_rope_custom(ctx0,
ggml_reshape_3d(ctx0, tmpk, n_embd_head, n_head_kv, n_tokens),
KQ_pos, n_embd_head, 0, 0, freq_base, freq_scale);
offload_func_kq(tensor);
ggml_set_name(tensor, "Kcur");
return tensor;
}
struct ggml_tensor * build_attn_block_qcur(ggml_tensor * tmpq) {
struct ggml_tensor * tensor = ggml_rope_custom(ctx0,
ggml_reshape_3d(ctx0, tmpq, n_embd_head, n_head, n_tokens),
KQ_pos, n_embd_head, 0, 0, freq_base, freq_scale);
offload_func_kq(tensor);
ggml_set_name(tensor, "Qcur");
return tensor;
}
std::tuple<ggml_tensor *, ggml_tensor *> build_attn_block_kcur_qcur(
const llama_layer & layer,
ggml_tensor * cur) {
// compute Q and K and RoPE them
struct ggml_tensor * tmpk = ggml_mul_mat(ctx0, layer.wk, cur);
offload_func_kq(tmpk);
ggml_set_name(tmpk, "tmpk");
struct ggml_tensor * tmpq = ggml_mul_mat(ctx0, layer.wq, cur);
offload_func_kq(tmpq);
ggml_set_name(tmpq, "tmpq");
struct ggml_tensor * Kcur = ggml_rope_custom(ctx0, ggml_reshape_3d(ctx0, tmpk, n_embd_head, n_head_kv, n_tokens), KQ_pos, n_embd_head, 0, 0, freq_base, freq_scale);
offload_func_kq(Kcur);
ggml_set_name(Kcur, "Kcur");
struct ggml_tensor * Qcur = ggml_rope_custom(ctx0, ggml_reshape_3d(ctx0, tmpq, n_embd_head, n_head, n_tokens), KQ_pos, n_embd_head, 0, 0, freq_base, freq_scale);
offload_func_kq(Qcur);
ggml_set_name(Qcur, "Qcur");
return std::make_tuple(Kcur, Qcur);
}
ggml_tensor * build_attn_block_kq_masked(ggml_tensor * KQ) {
// KQ_scaled = KQ / sqrt(n_embd_head)
// KQ_scaled shape [n_kv, n_tokens, n_head, 1]
struct ggml_tensor * KQ_scaled = ggml_scale(ctx0, KQ, KQ_scale);
offload_func_kq(KQ_scaled);
ggml_set_name(KQ_scaled, "KQ_scaled");
// KQ_masked = mask_past(KQ_scaled)
struct ggml_tensor * KQ_masked = ggml_add(ctx0, KQ_scaled, KQ_mask);
offload_func_kq(KQ_masked);
ggml_set_name(KQ_masked, "KQ_masked");
return KQ_masked;
}
struct ggml_tensor * build_attn_block( struct ggml_tensor * build_attn_block(
const int32_t il, const int32_t il,
ggml_tensor * input) { ggml_tensor * input) {
@ -3238,25 +3299,8 @@ struct llm_build_llama_ctx : llm_build_ctx {
// self-attention // self-attention
{ {
// compute Q and K and RoPE them // compute Q and K and RoPE them
struct ggml_tensor * tmpk = ggml_mul_mat(ctx0, layer.wk, cur); struct ggml_tensor * Kcur, * Qcur;
offload_func_kq(tmpk); std::tie(Kcur, Qcur) = build_attn_block_kcur_qcur(layer, cur);
ggml_set_name(tmpk, "tmpk");
struct ggml_tensor * tmpq = ggml_mul_mat(ctx0, layer.wq, cur);
offload_func_kq(tmpq);
ggml_set_name(tmpq, "tmpq");
struct ggml_tensor * Kcur = ggml_rope_custom(ctx0,
ggml_reshape_3d(ctx0, tmpk, n_embd_head, n_head_kv, n_tokens),
KQ_pos, n_embd_head, 0, 0, freq_base, freq_scale);
offload_func_kq(Kcur);
ggml_set_name(Kcur, "Kcur");
struct ggml_tensor * Qcur = ggml_rope_custom(ctx0,
ggml_reshape_3d(ctx0, tmpq, n_embd_head, n_head, n_tokens),
KQ_pos, n_embd_head, 0, 0, freq_base, freq_scale);
offload_func_kq(Qcur);
ggml_set_name(Qcur, "Qcur");
// store key and value to memory // store key and value to memory
{ {
@ -3305,14 +3349,7 @@ struct llm_build_llama_ctx : llm_build_ctx {
// KQ_scaled = KQ / sqrt(n_embd_head) // KQ_scaled = KQ / sqrt(n_embd_head)
// KQ_scaled shape [n_kv, n_tokens, n_head, 1] // KQ_scaled shape [n_kv, n_tokens, n_head, 1]
struct ggml_tensor * KQ_scaled = ggml_scale(ctx0, KQ, KQ_scale); struct ggml_tensor * KQ_masked = build_attn_block_kq_masked(KQ);
offload_func_kq(KQ_scaled);
ggml_set_name(KQ_scaled, "KQ_scaled");
// KQ_masked = mask_past(KQ_scaled)
struct ggml_tensor * KQ_masked = ggml_add(ctx0, KQ_scaled, KQ_mask);
offload_func_kq(KQ_masked);
ggml_set_name(KQ_masked, "KQ_masked");
// KQ = soft_max(KQ_masked) // KQ = soft_max(KQ_masked)
struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctx0, KQ_masked); struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctx0, KQ_masked);
@ -3396,230 +3433,16 @@ struct llm_build_llama_ctx : llm_build_ctx {
ggml_set_name(cur, "result_w2"); ggml_set_name(cur, "result_w2");
return cur; return cur;
} }
};
static struct ggml_cgraph * llm_build_llama(
llama_context & lctx,
const llama_batch & batch) {
llm_build_llama_ctx bctx(lctx, batch);
struct ggml_tensor * build_layer(const int32_t il, ggml_tensor * inpL) {
struct ggml_tensor * cur = nullptr; struct ggml_tensor * cur = nullptr;
struct ggml_tensor * inpL = bctx.build_pre_repeating();
const int i_gpu_start = bctx.n_layer - bctx.n_gpu_layers;
(void) i_gpu_start;
for (int il = 0; il < bctx.n_layer; ++il) {
ggml_format_name(inpL, "layer_inp_%d", il);
bctx.offload_func = llama_nop;
#ifdef GGML_USE_CUBLAS
if (il >= i_gpu_start) {
bctx.offload_func = ggml_cuda_assign_buffers_no_alloc;
}
#endif // GGML_USE_CUBLAS
struct ggml_tensor * inpSA = inpL;
// norm
cur = ggml_rms_norm(bctx.ctx0, inpL, bctx.norm_rms_eps);
bctx.offload_func(cur);
ggml_set_name(cur, "rms_norm_0");
cur = bctx.build_attn_block(il, cur);
struct ggml_tensor * inpFF = ggml_add(bctx.ctx0, cur, inpSA);
bctx.offload_func(inpFF);
ggml_set_name(inpFF, "inpFF");
// norm
cur = ggml_rms_norm(bctx.ctx0, inpFF, bctx.norm_rms_eps);
bctx.offload_func(cur);
ggml_set_name(cur, "rms_norm_1");
cur = bctx.build_ffn_block(il, cur);
cur = ggml_add(bctx.ctx0, cur, inpFF);
bctx.offload_func(cur);
ggml_set_name(cur, "inpFF_+_result_w2");
// input for next layer
inpL = cur;
}
ggml_cgraph * gf = bctx.build_post_repeating(inpL);
return gf;
}
static struct ggml_cgraph * llm_build_baichaun(
llama_context & lctx,
const llama_batch & batch) {
const auto & model = lctx.model;
const auto & hparams = model.hparams;
const auto & cparams = lctx.cparams;
const auto & kv_self = lctx.kv_self;
GGML_ASSERT(!!kv_self.ctx);
const int64_t n_embd = hparams.n_embd;
const int64_t n_layer = hparams.n_layer;
const int64_t n_ctx = cparams.n_ctx;
const int64_t n_head = hparams.n_head;
const int64_t n_head_kv = hparams.n_head_kv;
const int64_t n_embd_head = hparams.n_embd_head();
const int64_t n_embd_gqa = hparams.n_embd_gqa();
GGML_ASSERT(n_embd_head == hparams.n_rot);
const float freq_base = cparams.rope_freq_base;
const float freq_scale = cparams.rope_freq_scale;
const float norm_rms_eps = hparams.f_norm_rms_eps;
const int n_gpu_layers = model.n_gpu_layers;
const int32_t n_tokens = batch.n_tokens;
const int32_t n_kv = ggml_allocr_is_measure(lctx.alloc) ? n_ctx : kv_self.n;
const int32_t kv_head = ggml_allocr_is_measure(lctx.alloc) ? n_ctx - n_tokens : kv_self.head;
const bool do_rope_shift = ggml_allocr_is_measure(lctx.alloc) || kv_self.has_shift;
auto & buf_compute = lctx.buf_compute;
struct ggml_init_params params = {
/*.mem_size =*/ buf_compute.size,
/*.mem_buffer =*/ buf_compute.data,
/*.no_alloc =*/ true,
};
struct ggml_context * ctx0 = ggml_init(params);
ggml_cgraph * gf = ggml_new_graph(ctx0);
struct ggml_tensor * cur;
struct ggml_tensor * inpL;
if (batch.token) {
struct ggml_tensor * inp_tokens = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
ggml_allocr_alloc(lctx.alloc, inp_tokens);
if (!ggml_allocr_is_measure(lctx.alloc)) {
memcpy(inp_tokens->data, batch.token, n_tokens*ggml_element_size(inp_tokens));
}
ggml_set_name(inp_tokens, "inp_tokens");
inpL = ggml_get_rows(ctx0, model.tok_embeddings, inp_tokens);
} else {
#ifdef GGML_USE_MPI
GGML_ASSERT(false && "not implemented");
#endif
inpL = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, n_tokens);
ggml_allocr_alloc(lctx.alloc, inpL);
if (!ggml_allocr_is_measure(lctx.alloc)) {
memcpy(inpL->data, batch.embd, n_tokens * n_embd * ggml_element_size(inpL));
}
}
const int i_gpu_start = n_layer - n_gpu_layers; const int i_gpu_start = n_layer - n_gpu_layers;
(void) i_gpu_start; (void) i_gpu_start;
// offload functions set the tensor output backend to GPU
// tensors are GPU-accelerated if any input or the output has been offloaded
offload_func_t offload_func_nr = llama_nop; // nr = non-repeating
offload_func_t offload_func_kq = llama_nop;
offload_func_t offload_func_v = llama_nop;
#ifdef GGML_USE_CUBLAS
if (n_gpu_layers > n_layer) {
offload_func_nr = ggml_cuda_assign_buffers_no_alloc;
}
if (n_gpu_layers > n_layer + 1) {
offload_func_v = ggml_cuda_assign_buffers_no_alloc;
}
if (n_gpu_layers > n_layer + 2) {
offload_func_kq = ggml_cuda_assign_buffers_no_alloc;
}
#endif // GGML_USE_CUBLAS
// KQ_scale
struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
ggml_set_name(KQ_scale, "1/sqrt(n_embd_head)");
ggml_allocr_alloc(lctx.alloc, KQ_scale);
if (!ggml_allocr_is_measure(lctx.alloc)) {
ggml_set_f32(KQ_scale, 1.0f/sqrtf(float(n_embd)/n_head));
}
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
offload_func_kq(KQ_mask);
ggml_set_name(KQ_mask, "KQ_mask");
ggml_allocr_alloc(lctx.alloc, KQ_mask);
if (!ggml_allocr_is_measure(lctx.alloc)) {
float * data = (float *) KQ_mask->data;
memset(data, 0, ggml_nbytes(KQ_mask));
for (int h = 0; h < 1; ++h) {
for (int j = 0; j < n_tokens; ++j) {
const llama_pos pos = batch.pos[j];
const llama_seq_id seq_id = batch.seq_id[j];
for (int i = 0; i < n_kv; ++i) {
if (!kv_self.cells[i].has_seq_id(seq_id) || kv_self.cells[i].pos > pos) {
data[h*(n_kv*n_tokens) + j*n_kv + i] = -INFINITY;
}
}
}
}
}
// KQ_pos - contains the positions
struct ggml_tensor * KQ_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
offload_func_kq(KQ_pos);
ggml_set_name(KQ_pos, "KQ_pos");
ggml_allocr_alloc(lctx.alloc, KQ_pos);
if (!ggml_allocr_is_measure(lctx.alloc)) {
int * data = (int *) KQ_pos->data;
for (int i = 0; i < n_tokens; ++i) {
data[i] = batch.pos[i];
}
}
// shift the entire K-cache if needed
if (do_rope_shift) {
struct ggml_tensor * K_shift = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_ctx);
offload_func_kq(K_shift);
ggml_set_name(K_shift, "K_shift");
ggml_allocr_alloc(lctx.alloc, K_shift);
if (!ggml_allocr_is_measure(lctx.alloc)) {
int * data = (int *) K_shift->data;
for (int i = 0; i < n_ctx; ++i) {
data[i] = kv_self.cells[i].delta;
}
}
for (int il = 0; il < n_layer; ++il) {
struct ggml_tensor * tmp =
ggml_rope_custom_inplace(ctx0,
ggml_view_3d(ctx0, kv_self.k,
n_embd_head, n_head_kv, n_ctx,
ggml_element_size(kv_self.k)*n_embd_head,
ggml_element_size(kv_self.k)*n_embd_gqa,
ggml_element_size(kv_self.k)*n_embd_gqa*n_ctx*il),
K_shift, n_embd_head, 0, 0, freq_base, freq_scale);
offload_func_kq(tmp);
ggml_build_forward_expand(gf, tmp);
}
}
for (int il = 0; il < n_layer; ++il) {
ggml_format_name(inpL, "layer_inp_%d", il); ggml_format_name(inpL, "layer_inp_%d", il);
offload_func_t offload_func = llama_nop; offload_func = llama_nop;
#ifdef GGML_USE_CUBLAS #ifdef GGML_USE_CUBLAS
if (il >= i_gpu_start) { if (il >= i_gpu_start) {
@ -3630,25 +3453,64 @@ static struct ggml_cgraph * llm_build_baichaun(
struct ggml_tensor * inpSA = inpL; struct ggml_tensor * inpSA = inpL;
// norm // norm
{
cur = ggml_rms_norm(ctx0, inpL, norm_rms_eps); cur = ggml_rms_norm(ctx0, inpL, norm_rms_eps);
offload_func(cur); offload_func(cur);
ggml_set_name(cur, "rms_norm_0"); ggml_set_name(cur, "rms_norm_0");
// cur = cur*attn_norm(broadcasted) cur = build_attn_block(il, cur);
cur = ggml_mul(ctx0, cur, model.layers[il].attn_norm);
struct ggml_tensor * inpFF = ggml_add(ctx0, cur, inpSA);
offload_func(inpFF);
ggml_set_name(inpFF, "inpFF");
// norm
cur = ggml_rms_norm(ctx0, inpFF, norm_rms_eps);
offload_func(cur); offload_func(cur);
ggml_set_name(cur, "attention_norm_0"); ggml_set_name(cur, "rms_norm_1");
cur = build_ffn_block(il, cur);
cur = ggml_add(ctx0, cur, inpFF);
offload_func(cur);
ggml_set_name(cur, "inpFF_+_result_w2");
// input for next layer
return cur;
} }
// self-attention struct ggml_tensor * build_layers(ggml_tensor * inpL) {
{ const int i_gpu_start = n_layer - n_gpu_layers;
(void) i_gpu_start;
for (int il = 0; il < n_layer; ++il) {
inpL = build_layer(il, inpL);
}
return inpL;
}
ggml_cgraph * build() {
return build_post_repeating(
build_layers(
build_pre_repeating()
)
);
}
};
struct llm_build_baichuan_ctx : llm_build_llama_ctx {
llm_build_baichuan_ctx(llama_context & lctx, const llama_batch & batch)
: llm_build_llama_ctx(lctx, batch)
{}
std::tuple<ggml_tensor *, ggml_tensor *> build_attn_block_kcur_qcur(
const llama_layer & layer,
ggml_tensor * cur) {
// compute Q and K and RoPE them // compute Q and K and RoPE them
struct ggml_tensor * tmpk = ggml_mul_mat(ctx0, model.layers[il].wk, cur); struct ggml_tensor * tmpk = ggml_mul_mat(ctx0, layer.wk, cur);
offload_func_kq(tmpk); offload_func_kq(tmpk);
ggml_set_name(tmpk, "tmpk"); ggml_set_name(tmpk, "tmpk");
struct ggml_tensor * tmpq = ggml_mul_mat(ctx0, model.layers[il].wq, cur); struct ggml_tensor * tmpq = ggml_mul_mat(ctx0, layer.wq, cur);
offload_func_kq(tmpq); offload_func_kq(tmpq);
ggml_set_name(tmpq, "tmpq"); ggml_set_name(tmpq, "tmpq");
@ -3673,51 +3535,10 @@ static struct ggml_cgraph * llm_build_baichaun(
offload_func_kq(Qcur); offload_func_kq(Qcur);
ggml_set_name(Qcur, "Qcur"); ggml_set_name(Qcur, "Qcur");
// store key and value to memory return std::make_tuple(Kcur, Qcur);
{
// compute the transposed [n_tokens, n_embd] V matrix
struct ggml_tensor * tmpv = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
offload_func_v(tmpv);
ggml_set_name(tmpv, "tmpv");
struct ggml_tensor * Vcur = ggml_transpose(ctx0, ggml_reshape_2d(ctx0, tmpv, n_embd_gqa, n_tokens));
offload_func_v(Vcur);
ggml_set_name(Vcur, "Vcur");
struct ggml_tensor * k = ggml_view_1d(ctx0, kv_self.k, n_tokens*n_embd_gqa, (ggml_element_size(kv_self.k)*n_embd_gqa)*(il*n_ctx + kv_head));
offload_func_kq(k);
ggml_set_name(k, "k");
struct ggml_tensor * v = ggml_view_2d(ctx0, kv_self.v, n_tokens, n_embd_gqa,
( n_ctx)*ggml_element_size(kv_self.v),
(il*n_ctx)*ggml_element_size(kv_self.v)*n_embd_gqa + kv_head*ggml_element_size(kv_self.v));
offload_func_v(v);
ggml_set_name(v, "v");
// important: storing RoPE-ed version of K in the KV cache!
ggml_build_forward_expand(gf, ggml_cpy(ctx0, Kcur, k));
ggml_build_forward_expand(gf, ggml_cpy(ctx0, Vcur, v));
} }
struct ggml_tensor * Q = ggml_permute(ctx0, Qcur, 0, 2, 1, 3); ggml_tensor * build_attn_block_kq_masked(ggml_tensor * KQ) {
offload_func_kq(Q);
ggml_set_name(Q, "Q");
struct ggml_tensor * K =
ggml_view_3d(ctx0, kv_self.k,
n_embd_head, n_kv, n_head_kv,
ggml_element_size(kv_self.k)*n_embd_gqa,
ggml_element_size(kv_self.k)*n_embd_head,
ggml_element_size(kv_self.k)*n_embd_gqa*n_ctx*il);
offload_func_kq(K);
ggml_set_name(K, "K");
// K * Q
struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
offload_func_kq(KQ);
ggml_set_name(KQ, "KQ");
// KQ_scaled = KQ / sqrt(n_embd_head) // KQ_scaled = KQ / sqrt(n_embd_head)
// KQ_scaled shape [n_past + n_tokens, n_tokens, n_head, 1] // KQ_scaled shape [n_past + n_tokens, n_tokens, n_head, 1]
struct ggml_tensor * KQ_scaled = ggml_scale(ctx0, KQ, KQ_scale); struct ggml_tensor * KQ_scaled = ggml_scale(ctx0, KQ, KQ_scale);
@ -3741,120 +3562,26 @@ static struct ggml_cgraph * llm_build_baichaun(
GGML_ASSERT(false); GGML_ASSERT(false);
} }
// KQ = soft_max(KQ_masked) return KQ_masked;
struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctx0, KQ_masked); }
offload_func_v(KQ_soft_max); };
ggml_set_name(KQ_soft_max, "KQ_soft_max");
// split cached V into n_head heads
struct ggml_tensor * V =
ggml_view_3d(ctx0, kv_self.v,
n_kv, n_embd_head, n_head_kv,
ggml_element_size(kv_self.v)*n_ctx,
ggml_element_size(kv_self.v)*n_ctx*n_embd_head,
ggml_element_size(kv_self.v)*n_ctx*n_embd_gqa*il);
offload_func_v(V);
ggml_set_name(V, "V");
struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ_soft_max); static struct ggml_cgraph * llm_build_llama(
offload_func_v(KQV); llama_context & lctx,
ggml_set_name(KQV, "KQV"); const llama_batch & batch) {
// KQV_merged = KQV.permute(0, 2, 1, 3) llm_build_llama_ctx bctx(lctx, batch);
struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3); return bctx.build();
offload_func_v(KQV_merged);
ggml_set_name(KQV_merged, "KQV_merged");
// cur = KQV_merged.contiguous().view(n_embd, n_tokens)
cur = ggml_cont_2d(ctx0, KQV_merged, n_embd, n_tokens);
offload_func_v(cur);
ggml_set_name(cur, "KQV_merged_contiguous");
// projection (no bias)
cur = ggml_mul_mat(ctx0,
model.layers[il].wo,
cur);
offload_func(cur);
ggml_set_name(cur, "result_wo");
} }
struct ggml_tensor * inpFF = ggml_add(ctx0, cur, inpSA); static struct ggml_cgraph * llm_build_baichaun(
offload_func(inpFF); llama_context & lctx,
ggml_set_name(inpFF, "inpFF"); const llama_batch & batch) {
// feed-forward network llm_build_baichuan_ctx bctx(lctx, batch);
{ return bctx.build();
// norm
{
cur = ggml_rms_norm(ctx0, inpFF, norm_rms_eps);
offload_func(cur);
ggml_set_name(cur, "rms_norm_1");
// cur = cur*ffn_norm(broadcasted)
cur = ggml_mul(ctx0, cur, model.layers[il].ffn_norm);
offload_func(cur);
ggml_set_name(cur, "ffn_norm");
}
struct ggml_tensor * tmp = ggml_mul_mat(ctx0,
model.layers[il].w3,
cur);
offload_func(tmp);
ggml_set_name(tmp, "result_w3");
cur = ggml_mul_mat(ctx0,
model.layers[il].w1,
cur);
offload_func(cur);
ggml_set_name(cur, "result_w1");
// SILU activation
cur = ggml_silu(ctx0, cur);
offload_func(cur);
ggml_set_name(cur, "silu");
cur = ggml_mul(ctx0, cur, tmp);
offload_func(cur);
ggml_set_name(cur, "silu_x_result_w3");
cur = ggml_mul_mat(ctx0,
model.layers[il].w2,
cur);
offload_func(cur);
ggml_set_name(cur, "result_w2");
}
cur = ggml_add(ctx0, cur, inpFF);
offload_func(cur);
ggml_set_name(cur, "inpFF_+_result_w2");
// input for next layer
inpL = cur;
}
cur = inpL;
// norm
{
cur = ggml_rms_norm(ctx0, cur, norm_rms_eps);
offload_func_nr(cur);
ggml_set_name(cur, "rms_norm_2");
// cur = cur*norm(broadcasted)
cur = ggml_mul(ctx0, cur, model.output_norm);
// offload_func_nr(cur); // TODO CPU + GPU mirrored backend
ggml_set_name(cur, "result_norm");
}
// lm_head
cur = ggml_mul_mat(ctx0, model.output, cur);
ggml_set_name(cur, "result_output");
ggml_build_forward_expand(gf, cur);
ggml_free(ctx0);
return gf;
} }
static struct ggml_cgraph * llm_build_refact( static struct ggml_cgraph * llm_build_refact(