This commit is contained in:
fairydreaming 2025-02-10 18:56:23 +08:00 committed by GitHub
commit 036eabc647
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
2 changed files with 11 additions and 6 deletions

View file

@ -3574,7 +3574,7 @@ size_t llama_model::size() const {
} }
size_t llama_model::max_nodes() const { size_t llama_model::max_nodes() const {
return std::max<size_t>(8192, tensors_by_name.size()*5); return std::max<size_t>(65536, tensors_by_name.size()*5);
} }
size_t llama_model::n_devices() const { size_t llama_model::n_devices() const {

View file

@ -1092,7 +1092,8 @@ struct llm_build_context {
llama_context & lctx, llama_context & lctx,
const llama_ubatch & ubatch, const llama_ubatch & ubatch,
const llm_build_cb & cb, const llm_build_cb & cb,
bool worst_case) : bool worst_case,
bool warmup) :
model (lctx.model), model (lctx.model),
lctx (lctx), lctx (lctx),
hparams (model.hparams), hparams (model.hparams),
@ -1110,7 +1111,7 @@ struct llm_build_context {
n_embd_head_v (hparams.n_embd_head_v), n_embd_head_v (hparams.n_embd_head_v),
n_embd_v_gqa (hparams.n_embd_v_gqa()), n_embd_v_gqa (hparams.n_embd_v_gqa()),
n_expert (hparams.n_expert), n_expert (hparams.n_expert),
n_expert_used (hparams.n_expert_used), n_expert_used (warmup ? hparams.n_expert : hparams.n_expert_used),
freq_base (cparams.rope_freq_base), freq_base (cparams.rope_freq_base),
freq_scale (cparams.rope_freq_scale), freq_scale (cparams.rope_freq_scale),
ext_factor (cparams.yarn_ext_factor), ext_factor (cparams.yarn_ext_factor),
@ -8118,7 +8119,7 @@ static struct ggml_cgraph * llama_build_graph_defrag(llama_context & lctx, const
llm_build_cb cb = [&](struct ggml_tensor * , const char * , int ) { }; llm_build_cb cb = [&](struct ggml_tensor * , const char * , int ) { };
struct llm_build_context llm(lctx, dummy, cb, false); struct llm_build_context llm(lctx, dummy, cb, false, false);
llm.init(); llm.init();
@ -8135,7 +8136,7 @@ static struct ggml_cgraph * llama_build_graph_k_shift(llama_context & lctx) {
llm_build_cb cb = [&](struct ggml_tensor * , const char * , int ) { }; llm_build_cb cb = [&](struct ggml_tensor * , const char * , int ) { };
struct llm_build_context llm(lctx, dummy, cb, false); struct llm_build_context llm(lctx, dummy, cb, false, false);
llm.init(); llm.init();
@ -8186,7 +8187,11 @@ static struct ggml_cgraph * llama_build_graph(
struct ggml_cgraph * result = NULL; struct ggml_cgraph * result = NULL;
struct llm_build_context llm(lctx, ubatch, cb, worst_case); const llama_vocab * vocab = llama_model_get_vocab(&model);
llama_token bos = llama_vocab_bos(vocab);
llama_token eos = llama_vocab_eos(vocab);
bool is_warming_up = (ubatch.n_tokens == 2 && ubatch.token[0] == bos && ubatch.token[1] == eos);
struct llm_build_context llm(lctx, ubatch, cb, worst_case, is_warming_up);
llm.init(); llm.init();