llama : use all experts during warmup

This commit is contained in:
Stanisław Szymczyk 2025-02-01 10:32:06 +01:00
parent 5bbc7362cb
commit 83a473a001

View file

@ -1092,7 +1092,8 @@ struct llm_build_context {
llama_context & lctx, llama_context & lctx,
const llama_ubatch & ubatch, const llama_ubatch & ubatch,
const llm_build_cb & cb, const llm_build_cb & cb,
bool worst_case) : bool worst_case,
bool warmup) :
model (lctx.model), model (lctx.model),
lctx (lctx), lctx (lctx),
hparams (model.hparams), hparams (model.hparams),
@ -1110,7 +1111,7 @@ struct llm_build_context {
n_embd_head_v (hparams.n_embd_head_v), n_embd_head_v (hparams.n_embd_head_v),
n_embd_v_gqa (hparams.n_embd_v_gqa()), n_embd_v_gqa (hparams.n_embd_v_gqa()),
n_expert (hparams.n_expert), n_expert (hparams.n_expert),
n_expert_used (hparams.n_expert_used), n_expert_used (warmup ? hparams.n_expert : hparams.n_expert_used),
freq_base (cparams.rope_freq_base), freq_base (cparams.rope_freq_base),
freq_scale (cparams.rope_freq_scale), freq_scale (cparams.rope_freq_scale),
ext_factor (cparams.yarn_ext_factor), ext_factor (cparams.yarn_ext_factor),
@ -8103,7 +8104,7 @@ static struct ggml_cgraph * llama_build_graph_defrag(llama_context & lctx, const
llm_build_cb cb = [&](struct ggml_tensor * , const char * , int ) { }; llm_build_cb cb = [&](struct ggml_tensor * , const char * , int ) { };
struct llm_build_context llm(lctx, dummy, cb, false); struct llm_build_context llm(lctx, dummy, cb, false, false);
llm.init(); llm.init();
@ -8120,7 +8121,7 @@ static struct ggml_cgraph * llama_build_graph_k_shift(llama_context & lctx) {
llm_build_cb cb = [&](struct ggml_tensor * , const char * , int ) { }; llm_build_cb cb = [&](struct ggml_tensor * , const char * , int ) { };
struct llm_build_context llm(lctx, dummy, cb, false); struct llm_build_context llm(lctx, dummy, cb, false, false);
llm.init(); llm.init();
@ -8171,7 +8172,11 @@ static struct ggml_cgraph * llama_build_graph(
struct ggml_cgraph * result = NULL; struct ggml_cgraph * result = NULL;
struct llm_build_context llm(lctx, ubatch, cb, worst_case); const llama_vocab * vocab = llama_model_get_vocab(&model);
llama_token bos = llama_vocab_bos(vocab);
llama_token eos = llama_vocab_eos(vocab);
bool is_warming_up = (ubatch.n_tokens == 2 && ubatch.token[0] == bos && ubatch.token[1] == eos);
struct llm_build_context llm(lctx, ubatch, cb, worst_case, is_warming_up);
llm.init(); llm.init();