fix some unidiomatic conditions (clang-tidy)

These are recommended by the 'readability-container-size-empty' and
'readability-simplify-boolean-expr' checks.
This commit is contained in:
Cebtenzzre 2023-09-05 18:38:30 -04:00
parent 688aae46e5
commit aae2be0f08
11 changed files with 21 additions and 21 deletions

View file

@ -57,7 +57,7 @@ int32_t get_num_physical_cores() {
siblings.insert(line); siblings.insert(line);
} }
} }
if (siblings.size() > 0) { if (!siblings.empty()) {
return static_cast<int32_t>(siblings.size()); return static_cast<int32_t>(siblings.size());
} }
#elif defined(__APPLE__) && defined(__MACH__) #elif defined(__APPLE__) && defined(__MACH__)

View file

@ -23,7 +23,7 @@ extern "C" {
struct MyModel* create_mymodel(int argc, char ** argv) { struct MyModel* create_mymodel(int argc, char ** argv) {
gpt_params params; gpt_params params;
if (gpt_params_parse(argc, argv, params) == false) { if (!gpt_params_parse(argc, argv, params)) {
return nullptr; return nullptr;
} }

View file

@ -11,7 +11,7 @@
int main(int argc, char ** argv) { int main(int argc, char ** argv) {
gpt_params params; gpt_params params;
if (gpt_params_parse(argc, argv, params) == false) { if (!gpt_params_parse(argc, argv, params)) {
return 1; return 1;
} }

View file

@ -953,7 +953,7 @@ int main(int argc, char ** argv) {
gpt_params params; gpt_params params;
if (gpt_params_parse(argc, argv, params) == false) { if (!gpt_params_parse(argc, argv, params)) {
return 1; return 1;
} }

View file

@ -925,7 +925,7 @@ int main(int argc, char ** argv) {
gpt_params params; gpt_params params;
if (gpt_params_parse(argc, argv, params) == false) { if (!gpt_params_parse(argc, argv, params)) {
return 1; return 1;
} }

View file

@ -109,7 +109,7 @@ int main(int argc, char ** argv) {
gpt_params params; gpt_params params;
g_params = &params; g_params = &params;
if (gpt_params_parse(argc, argv, params) == false) { if (!gpt_params_parse(argc, argv, params)) {
return 1; return 1;
} }
@ -303,7 +303,7 @@ int main(int argc, char ** argv) {
// debug message about similarity of saved session, if applicable // debug message about similarity of saved session, if applicable
size_t n_matching_session_tokens = 0; size_t n_matching_session_tokens = 0;
if (session_tokens.size() > 0) { if (!session_tokens.empty()) {
for (llama_token id : session_tokens) { for (llama_token id : session_tokens) {
if (n_matching_session_tokens >= embd_inp.size() || id != embd_inp[n_matching_session_tokens]) { if (n_matching_session_tokens >= embd_inp.size() || id != embd_inp[n_matching_session_tokens]) {
break; break;
@ -401,7 +401,7 @@ int main(int argc, char ** argv) {
LOG_TEE("%s: interactive mode on.\n", __func__); LOG_TEE("%s: interactive mode on.\n", __func__);
if (params.antiprompt.size()) { if (!params.antiprompt.empty()) {
for (const auto & antiprompt : params.antiprompt) { for (const auto & antiprompt : params.antiprompt) {
LOG_TEE("Reverse prompt: '%s'\n", antiprompt.c_str()); LOG_TEE("Reverse prompt: '%s'\n", antiprompt.c_str());
} }
@ -499,7 +499,7 @@ int main(int argc, char ** argv) {
while ((n_remain != 0 && !is_antiprompt) || params.interactive) { while ((n_remain != 0 && !is_antiprompt) || params.interactive) {
// predict // predict
if (embd.size() > 0) { if (!embd.empty()) {
// Note: n_ctx - 4 here is to match the logic for commandline prompt handling via // Note: n_ctx - 4 here is to match the logic for commandline prompt handling via
// --prompt or --file which uses the same value. // --prompt or --file which uses the same value.
int max_embd_size = n_ctx - 4; int max_embd_size = n_ctx - 4;
@ -624,7 +624,7 @@ int main(int argc, char ** argv) {
LOG("n_past = %d\n", n_past); LOG("n_past = %d\n", n_past);
} }
if (embd.size() > 0 && !path_session.empty()) { if (!embd.empty() && !path_session.empty()) {
session_tokens.insert(session_tokens.end(), embd.begin(), embd.end()); session_tokens.insert(session_tokens.end(), embd.begin(), embd.end());
n_session_consumed = session_tokens.size(); n_session_consumed = session_tokens.size();
} }
@ -695,7 +695,7 @@ int main(int argc, char ** argv) {
// if not currently processing queued inputs; // if not currently processing queued inputs;
if ((int) embd_inp.size() <= n_consumed) { if ((int) embd_inp.size() <= n_consumed) {
// check for reverse prompt // check for reverse prompt
if (params.antiprompt.size()) { if (!params.antiprompt.empty()) {
std::string last_output; std::string last_output;
for (auto id : last_tokens) { for (auto id : last_tokens) {
last_output += llama_token_to_piece(ctx, id); last_output += llama_token_to_piece(ctx, id);
@ -732,7 +732,7 @@ int main(int argc, char ** argv) {
LOG("found EOS token\n"); LOG("found EOS token\n");
if (params.interactive) { if (params.interactive) {
if (params.antiprompt.size() != 0) { if (!params.antiprompt.empty()) {
// tokenize and inject first reverse prompt // tokenize and inject first reverse prompt
const auto first_antiprompt = ::llama_tokenize(ctx, params.antiprompt.front(), false); const auto first_antiprompt = ::llama_tokenize(ctx, params.antiprompt.front(), false);
embd_inp.insert(embd_inp.end(), first_antiprompt.begin(), first_antiprompt.end()); embd_inp.insert(embd_inp.end(), first_antiprompt.begin(), first_antiprompt.end());

View file

@ -655,7 +655,7 @@ int main(int argc, char ** argv) {
gpt_params params; gpt_params params;
params.n_batch = 512; params.n_batch = 512;
if (gpt_params_parse(argc, argv, params) == false) { if (!gpt_params_parse(argc, argv, params)) {
return 1; return 1;
} }

View file

@ -13,7 +13,7 @@ int main(int argc, char ** argv) {
params.repeat_last_n = 64; params.repeat_last_n = 64;
params.prompt = "The quick brown fox"; params.prompt = "The quick brown fox";
if (gpt_params_parse(argc, argv, params) == false) { if (!gpt_params_parse(argc, argv, params)) {
return 1; return 1;
} }

View file

@ -138,7 +138,7 @@ static bool ggml_allocr_is_own(struct ggml_allocr * alloc, const struct ggml_ten
void ggml_allocr_alloc(struct ggml_allocr * alloc, struct ggml_tensor * tensor) { void ggml_allocr_alloc(struct ggml_allocr * alloc, struct ggml_tensor * tensor) {
#ifdef GGML_ALLOCATOR_DEBUG #ifdef GGML_ALLOCATOR_DEBUG
GGML_ASSERT(ggml_is_view(tensor) == false); // views generally get data pointer from one of their sources GGML_ASSERT(!ggml_is_view(tensor)); // views generally get data pointer from one of their sources
GGML_ASSERT(tensor->data == NULL); // avoid allocating tensor which already has memory allocated GGML_ASSERT(tensor->data == NULL); // avoid allocating tensor which already has memory allocated
#endif #endif
size_t size = ggml_allocr_get_alloc_size(alloc, tensor); size_t size = ggml_allocr_get_alloc_size(alloc, tensor);

10
ggml.c
View file

@ -4768,7 +4768,7 @@ static struct ggml_tensor * ggml_new_tensor_impl(
size_t obj_alloc_size = 0; size_t obj_alloc_size = 0;
if (view_src == NULL && ctx->no_alloc == false) { if (view_src == NULL && !ctx->no_alloc) {
if (ctx->scratch.data != NULL) { if (ctx->scratch.data != NULL) {
// allocate tensor data in the scratch buffer // allocate tensor data in the scratch buffer
if (ctx->scratch.offs + data_size > ctx->scratch.size) { if (ctx->scratch.offs + data_size > ctx->scratch.size) {
@ -5469,7 +5469,7 @@ static struct ggml_tensor * ggml_mul_impl(
} }
if (inplace) { if (inplace) {
GGML_ASSERT(is_node == false); GGML_ASSERT(!is_node);
} }
struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
@ -5512,7 +5512,7 @@ static struct ggml_tensor * ggml_div_impl(
} }
if (inplace) { if (inplace) {
GGML_ASSERT(is_node == false); GGML_ASSERT(!is_node);
} }
struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
@ -19957,7 +19957,7 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p
struct ggml_tensor * data = NULL; struct ggml_tensor * data = NULL;
if (params.no_alloc == false) { if (!params.no_alloc) {
data = ggml_new_tensor_1d(ctx_data, GGML_TYPE_I8, ctx->size); data = ggml_new_tensor_1d(ctx_data, GGML_TYPE_I8, ctx->size);
ok = ok && data != NULL; ok = ok && data != NULL;
@ -19998,7 +19998,7 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p
} }
// point the data member to the appropriate location in the binary blob using the tensor infos // point the data member to the appropriate location in the binary blob using the tensor infos
if (params.no_alloc == false) { if (!params.no_alloc) {
//cur->data = (char *) data->data + ctx->infos[i].offset - ctx->offset; // offset from start of file //cur->data = (char *) data->data + ctx->infos[i].offset - ctx->offset; // offset from start of file
cur->data = (char *) data->data + ctx->infos[i].offset; // offset from data cur->data = (char *) data->data + ctx->infos[i].offset; // offset from data
} }

View file

@ -5926,7 +5926,7 @@ size_t llama_set_state_data(struct llama_context * ctx, uint8_t * src) {
rng_ss.str(std::string(&rng_buf[0], rng_size)); rng_ss.str(std::string(&rng_buf[0], rng_size));
rng_ss >> ctx->rng; rng_ss >> ctx->rng;
GGML_ASSERT(rng_ss.fail() == false); GGML_ASSERT(!rng_ss.fail());
} }
// set logits // set logits