diff --git a/examples/embd-input/embd-input-lib.cpp b/examples/embd-input/embd-input-lib.cpp index 570e273fc..a313902f8 100644 --- a/examples/embd-input/embd-input-lib.cpp +++ b/examples/embd-input/embd-input-lib.cpp @@ -23,7 +23,7 @@ extern "C" { struct MyModel* create_mymodel(int argc, char ** argv) { gpt_params params; - if (gpt_params_parse(argc, argv, params) == false) { + if (!gpt_params_parse(argc, argv, params)) { return nullptr; } diff --git a/examples/train-text-from-scratch/train-text-from-scratch.cpp b/examples/train-text-from-scratch/train-text-from-scratch.cpp index c50eeb343..3662c2d82 100644 --- a/examples/train-text-from-scratch/train-text-from-scratch.cpp +++ b/examples/train-text-from-scratch/train-text-from-scratch.cpp @@ -2119,7 +2119,7 @@ struct llama_file { return (size_t) ret; } - void seek(size_t offset, int whence) { + void seek(size_t offset, int whence) const { #ifdef _WIN32 int ret = _fseeki64(fp, (__int64) offset, whence); #else @@ -2128,7 +2128,7 @@ struct llama_file { GGML_ASSERT(ret == 0); // same } - void read_raw(void * ptr, size_t size) { + void read_raw(void * ptr, size_t size) const { if (size == 0) { return; } @@ -2142,19 +2142,19 @@ struct llama_file { } } - std::uint32_t read_u32() { + std::uint32_t read_u32() const { std::uint32_t ret; read_raw(&ret, sizeof(ret)); return ret; } - std::string read_string(std::uint32_t len) { + std::string read_string(std::uint32_t len) const { std::vector chars(len); read_raw(chars.data(), len); return std::string(chars.data(), len); } - void write_raw(const void * ptr, size_t size) { + void write_raw(const void * ptr, size_t size) const { if (size == 0) { return; } @@ -2165,7 +2165,7 @@ struct llama_file { } } - void write_u32(std::uint32_t val) { + void write_u32(std::uint32_t val) const { write_raw(&val, sizeof(val)); } diff --git a/llama.cpp b/llama.cpp index a869bbac8..6000f847f 100644 --- a/llama.cpp +++ b/llama.cpp @@ -734,7 +734,7 @@ struct llama_model_loader { } } - void load_data_for(llama_load_tensor & lt) { + void load_data_for(llama_load_tensor & lt) const { if (use_mmap) { lt.data = (uint8_t *) mapping->addr + lt.file_off; } else {