From 90e93db548cc43fb3174516141786c1881c00e30 Mon Sep 17 00:00:00 2001 From: luoyu-intel Date: Thu, 18 Jul 2024 15:04:41 +0800 Subject: [PATCH] add warm up also for promp_len=32, warm up both gemm and gemv --- common/common.cpp | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/common/common.cpp b/common/common.cpp index dbb724fbb..de68fdf94 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -2126,6 +2126,25 @@ std::tuple llama_init_from_gpt_par llama_kv_cache_clear(lctx); llama_synchronize(lctx); llama_reset_timings(lctx); + tmp.clear(); + for (int i = 0; i < 32; i++) + { + tmp.push_back(bos); + } + tmp.push_back(eos); + if (llama_model_has_encoder(model)) { + llama_encode(lctx, llama_batch_get_one(tmp.data(), tmp.size(), 0, 0)); + llama_token decoder_start_token_id = llama_model_decoder_start_token(model); + if (decoder_start_token_id == -1) { + decoder_start_token_id = bos; + } + tmp.clear(); + tmp.push_back(decoder_start_token_id); + } + llama_decode(lctx, llama_batch_get_one(tmp.data(), std::min(tmp.size(), (size_t) params.n_batch), 0, 0)); + llama_kv_cache_clear(lctx); + llama_synchronize(lctx); + llama_reset_timings(lctx); } return std::make_tuple(model, lctx);