From e696addb4ed263659cb47041a08ed5b2df462079 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 7 Jan 2025 17:22:07 +0200 Subject: [PATCH] test --- src/llama-model.cpp | 3 +-- src/llama-model.h | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/src/llama-model.cpp b/src/llama-model.cpp index e2fba5ad7..7d0849c34 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -334,7 +334,6 @@ static buft_list_t make_gpu_buft_list(ggml_backend_dev_t dev, enum llama_split_m return buft_list; } - struct llama_model::impl { impl() {} ~impl() {} @@ -374,7 +373,7 @@ struct llama_model::impl { llama_model::llama_model(const struct llama_model_params & params) : params(params), pimpl(std::make_unique()) { } -llama_model::~llama_model() = default; +llama_model::~llama_model() {} void llama_model::load_stats(llama_model_loader & ml) { pimpl->n_elements = ml.n_elements; diff --git a/src/llama-model.h b/src/llama-model.h index 7eeb4f1dd..ec51eb3b7 100644 --- a/src/llama-model.h +++ b/src/llama-model.h @@ -330,7 +330,7 @@ struct llama_model { int64_t t_load_us = 0; int64_t t_start_us = 0; - llama_model(const struct llama_model_params & params); + explicit llama_model(const struct llama_model_params & params); ~llama_model(); void load_stats (llama_model_loader & ml);