diff --git a/common/common.cpp b/common/common.cpp index ba1ecf0e5..3faa7c459 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -2669,14 +2669,14 @@ void dump_non_result_info_yaml(FILE * stream, const gpt_params & params, const l } fprintf(stream, "lora:\n"); - for (std::tuple la : params.lora_adapter) { + for (const std::tuple & la : params.lora_adapter) { if (std::get<1>(la) != 1.0f) { continue; } fprintf(stream, " - %s\n", std::get<0>(la).c_str()); } fprintf(stream, "lora_scaled:\n"); - for (std::tuple la : params.lora_adapter) { + for (const std::tuple & la : params.lora_adapter) { if (std::get<1>(la) == 1.0f) { continue; } diff --git a/common/ngram-cache.cpp b/common/ngram-cache.cpp index 3ca112ef1..29d5b344a 100644 --- a/common/ngram-cache.cpp +++ b/common/ngram-cache.cpp @@ -195,7 +195,7 @@ void llama_ngram_cache_draft( void llama_ngram_cache_save(llama_ngram_cache & ngram_cache, std::string & filename) { std::ofstream file_out(filename, std::ios::binary); - for (std::pair item : ngram_cache) { + for (const std::pair & item : ngram_cache) { const llama_ngram ngram = item.first; llama_ngram_cache_part token_counts = item.second; GGML_ASSERT(!token_counts.empty()); @@ -255,7 +255,7 @@ llama_ngram_cache llama_ngram_cache_load(std::string & filename) { } void llama_ngram_cache_merge(llama_ngram_cache & ngram_cache_target, llama_ngram_cache & ngram_cache_add) { - for (std::pair ngram_part : ngram_cache_add) { + for (const std::pair & ngram_part : ngram_cache_add) { const llama_ngram ngram = ngram_part.first; llama_ngram_cache_part part = ngram_part.second;