From 24bea1549b3a0cc09bf896962b873a37dca3c984 Mon Sep 17 00:00:00 2001 From: Jia Liu Date: Thu, 19 Sep 2024 11:06:47 +0800 Subject: [PATCH] add llama_model_reset_time API --- examples/llama-bench/llama-bench.cpp | 2 ++ include/llama.h | 2 ++ src/llama.cpp | 5 +++++ 3 files changed, 9 insertions(+) diff --git a/examples/llama-bench/llama-bench.cpp b/examples/llama-bench/llama-bench.cpp index fb1d387b2..31a9213ea 100644 --- a/examples/llama-bench/llama-bench.cpp +++ b/examples/llama-bench/llama-bench.cpp @@ -1557,6 +1557,8 @@ int main(int argc, char ** argv) { return 1; } prev_inst = &inst; + } else { + llama_model_reset_time(lmodel); } llama_context * ctx = llama_new_context_with_model(lmodel, inst.to_llama_cparams()); diff --git a/include/llama.h b/include/llama.h index f316a87ba..c8ce495af 100644 --- a/include/llama.h +++ b/include/llama.h @@ -414,6 +414,8 @@ extern "C" { const char * path_model, struct llama_model_params params); + LLAMA_API void llama_model_reset_time(struct llama_model * model); + LLAMA_API void llama_free_model(struct llama_model * model); // TODO: rename to llama_init_from_model diff --git a/src/llama.cpp b/src/llama.cpp index af8afd845..1dda8210a 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -8809,6 +8809,11 @@ static bool llm_load_tensors( return true; } +void llama_model_reset_time(llama_model * model) { + model->t_start_us = ggml_time_us(); + model->t_load_us = ggml_time_us() - model->t_start_us; +} + // Returns 0 on success, -1 on error, and -2 on cancellation via llama_progress_callback static int llama_model_load(const std::string & fname, llama_model & model, llama_model_params & params) { model.t_start_us = ggml_time_us();