build : enable more non-default compiler warnings (#3200)

This commit is contained in:
Cebtenzzre 2023-09-28 17:41:44 -04:00 committed by GitHub
parent 0ccfc62a96
commit bc39553c90
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
16 changed files with 285 additions and 267 deletions

View file

@ -449,7 +449,7 @@ struct LLM_TN {
//
#define GGUF_GET_KEY(ctx, dst, func, type, req, key) \
{ \
do { \
const std::string skey(key); \
const int kid = gguf_find_key(ctx, skey.c_str()); \
if (kid >= 0) { \
@ -461,7 +461,7 @@ struct LLM_TN {
} else if (req) { \
throw std::runtime_error(format("key not found in model: %s", skey.c_str())); \
} \
}
} while (0)
//
// ggml helpers
@ -1913,7 +1913,7 @@ static void llm_load_hparams(
}
} break;
default: (void)0;
};
}
model.ftype = ml.ftype;
}
@ -2438,7 +2438,7 @@ static void llm_load_tensors(
} break;
default:
throw std::runtime_error("unknown architecture");
};
}
}
ml.done_getting_tensors();
@ -3981,7 +3981,7 @@ static struct ggml_cgraph * llama_build_graph(
} break;
default:
GGML_ASSERT(false);
};
}
return result;
}
@ -4626,7 +4626,7 @@ static std::vector<llama_vocab::id> llama_tokenize_internal(const llama_vocab &
llm_tokenizer_bpe tokenizer(vocab);
tokenizer.tokenize(raw_text, output);
} break;
};
}
return output;
}
@ -7520,7 +7520,7 @@ int llama_token_to_piece(const struct llama_model * model, llama_token token, ch
buf[2] = '\x85';
return 3;
} else if (llama_is_control_token(model->vocab, token)) {
;
// do nothing
} else if (llama_is_byte_token(model->vocab, token)) {
if (length < 1) {
return -1;