Apply suggestions from code review

fix style

Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
This commit is contained in:
Didzis Gosko 2023-06-19 11:47:05 +03:00 committed by GitHub
parent df57fcb9c8
commit 7e14cb901c
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23

View file

@ -233,7 +233,7 @@ struct llama_model {
}; };
struct llama_context { struct llama_context {
llama_context(const llama_model& model, const llama_vocab& vocab) : model(model), vocab(vocab), t_load_us(model.t_load_us), t_start_us(model.t_start_us) {} llama_context(const llama_model & model, const llama_vocab & vocab) : model(model), vocab(vocab), t_load_us(model.t_load_us), t_start_us(model.t_start_us) {}
std::mt19937 rng; std::mt19937 rng;
@ -247,8 +247,8 @@ struct llama_context {
int32_t n_eval = 0; // number of eval calls int32_t n_eval = 0; // number of eval calls
int32_t n_p_eval = 0; // number of tokens in eval calls for the prompt (with batch size > 1) int32_t n_p_eval = 0; // number of tokens in eval calls for the prompt (with batch size > 1)
const llama_model& model; const llama_model & model;
const llama_vocab& vocab; const llama_vocab & vocab;
bool model_owner = false; bool model_owner = false;
@ -2536,7 +2536,7 @@ struct llama_model * llama_load_model_from_file(
struct llama_context_params params) { struct llama_context_params params) {
ggml_time_init(); ggml_time_init();
llama_model *model = new llama_model; llama_model * model = new llama_model;
ggml_type memory_type = params.f16_kv ? GGML_TYPE_F16 : GGML_TYPE_F32; ggml_type memory_type = params.f16_kv ? GGML_TYPE_F16 : GGML_TYPE_F32;
@ -2558,7 +2558,7 @@ struct llama_context * llama_new_context_with_model(
struct llama_model * model, struct llama_model * model,
struct llama_context_params params) { struct llama_context_params params) {
if(!model) { if (!model) {
return nullptr; return nullptr;
} }
@ -2690,7 +2690,7 @@ int llama_model_quantize(
} }
} }
int llama_apply_lora_from_file_internal(const struct llama_model& model, const char * path_lora, const char * path_base_model, int n_threads) { int llama_apply_lora_from_file_internal(const struct llama_model & model, const char * path_lora, const char * path_base_model, int n_threads) {
fprintf(stderr, "%s: applying lora adapter from '%s' - please wait ...\n", __func__, path_lora); fprintf(stderr, "%s: applying lora adapter from '%s' - please wait ...\n", __func__, path_lora);
const int64_t t_start_lora_us = ggml_time_us(); const int64_t t_start_lora_us = ggml_time_us();