cmake : re-enable GCC -Wshadow

ggml-ci
This commit is contained in:
Georgi Gerganov 2025-01-12 15:29:33 +02:00
parent 34889bf810
commit 439e68c1e5
No known key found for this signature in database
GPG key ID: 449E073F9DC10735
16 changed files with 73 additions and 65 deletions

View file

@ -66,7 +66,7 @@ struct file_input {
float alpha;
float scale;
file_input(std::string & fname, float scale): f_in(fname, std::ios::binary), scale(scale) {
file_input(std::string & fname, float scale_): f_in(fname, std::ios::binary), scale(scale_) {
if (!f_in.is_open()) {
throw std::runtime_error("failed to open input gguf from " + fname);
}
@ -131,7 +131,7 @@ struct lora_merge_ctx {
std::string & base_fname,
std::vector<common_adapter_lora_info> & lora_files,
std::string & outfile,
int n_threads) : base_model(base_fname, 0), n_threads(n_threads), fout(outfile, std::ios::binary) {
int n_threads_) : base_model(base_fname, 0), n_threads(n_threads_), fout(outfile, std::ios::binary) {
fout.exceptions(std::ofstream::failbit); // fail fast on write errors
if (gguf_find_key(base_model.ctx_gguf, LLM_KV_SPLIT_COUNT) >= 0) {
@ -157,7 +157,7 @@ struct lora_merge_ctx {
allocr = ggml_gallocr_new(ggml_backend_get_default_buffer_type(backend));
}
void check_metadata_lora(file_input * adapter) {
void check_metadata_lora(const file_input * adapter) const {
auto general_type = get_kv_str(adapter->ctx_gguf, "general.type");
if (general_type != "adapter") {
throw std::runtime_error("expect general.type to be 'adapter', but got: " + general_type);
@ -175,7 +175,7 @@ struct lora_merge_ctx {
}
}
ggml_type get_out_tensor_type(struct ggml_tensor * t) {
static ggml_type get_out_tensor_type(struct ggml_tensor * t) {
if (t->type == GGML_TYPE_F32) {
return GGML_TYPE_F32;
} else {

View file

@ -204,14 +204,14 @@ struct split_strategy {
// temporary buffer for reading in tensor data
std::vector<uint8_t> read_buf;
split_strategy(const split_params & params,
std::ifstream & f_input,
struct gguf_context * ctx_gguf,
struct ggml_context * ctx_meta) :
params(params),
f_input(f_input),
ctx_gguf(ctx_gguf),
ctx_meta(ctx_meta),
split_strategy(const split_params & params_,
std::ifstream & f_input_,
struct gguf_context * ctx_gguf_,
struct ggml_context * ctx_meta_) :
params(params_),
f_input(f_input_),
ctx_gguf(ctx_gguf_),
ctx_meta(ctx_meta_),
n_tensors(gguf_get_n_tensors(ctx_gguf)) {
// because we need to know list of tensors for each file in advance, we will build all the ctx_out for all output splits

View file

@ -4,6 +4,11 @@ install(TARGETS ${TARGET} RUNTIME)
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
target_compile_features(${TARGET} PRIVATE cxx_std_17)
if (CMAKE_CXX_COMPILER_ID MATCHES "Clang")
target_compile_options(${TARGET} PRIVATE -Wno-shadow) # TMP
# TMP
if (CMAKE_CXX_COMPILER_ID MATCHES "GNU" OR CMAKE_CXX_COMPILER_ID MATCHES "Clang")
target_compile_options(${TARGET} PRIVATE -Wno-shadow)
if (CMAKE_CXX_COMPILER_ID MATCHES "Clang")
target_compile_options(${TARGET} PRIVATE -Wno-shadow-field-in-constructor)
endif()
endif()

View file

@ -200,7 +200,7 @@ struct server_task {
// used by SERVER_TASK_TYPE_SET_LORA
std::vector<common_adapter_lora_info> set_lora;
server_task(server_task_type type) : type(type) {}
server_task(server_task_type type_) : type(type_) {}
static slot_params params_from_json_cmpl(
const llama_context * ctx,