quantize : fix missing 'noreturn' (-Wmissing-noreturn)

This commit is contained in:
Cebtenzzre 2023-09-14 15:12:56 -04:00
parent a80cb4cf1b
commit 80926572f7
3 changed files with 3 additions and 1 deletions

View file

@ -431,6 +431,7 @@ if (LLAMA_ALL_WARNINGS)
set(cxx_flags set(cxx_flags
${warning_flags} ${warning_flags}
-Wmissing-declarations -Wmissing-declarations
-Wmissing-noreturn
) )
if (CMAKE_CXX_COMPILER_ID MATCHES "Clang") # clang++ only if (CMAKE_CXX_COMPILER_ID MATCHES "Clang") # clang++ only
set(cxx_flags ${cxx_flags} -Wmissing-prototypes) set(cxx_flags ${cxx_flags} -Wmissing-prototypes)

View file

@ -176,7 +176,7 @@ endif # LLAMA_DISABLE_LOGS
WARN_FLAGS = -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function WARN_FLAGS = -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function
MK_CFLAGS += $(WARN_FLAGS) -Wdouble-promotion -Wshadow -Wstrict-prototypes -Wpointer-arith -Wmissing-prototypes \ MK_CFLAGS += $(WARN_FLAGS) -Wdouble-promotion -Wshadow -Wstrict-prototypes -Wpointer-arith -Wmissing-prototypes \
-Werror=implicit-int -Werror=implicit-int
MK_CXXFLAGS += $(WARN_FLAGS) -Wmissing-declarations MK_CXXFLAGS += $(WARN_FLAGS) -Wmissing-declarations -Wmissing-noreturn
# TODO(cebtenzzre): remove this once PR #2632 gets merged # TODO(cebtenzzre): remove this once PR #2632 gets merged
TTFS_CXXFLAGS = $(CXXFLAGS) -Wno-missing-declarations TTFS_CXXFLAGS = $(CXXFLAGS) -Wno-missing-declarations

View file

@ -71,6 +71,7 @@ static bool try_parse_ftype(const std::string & ftype_str_in, llama_ftype & ftyp
// usage: // usage:
// ./quantize [--allow-requantize] [--leave-output-tensor] models/llama/ggml-model.gguf [models/llama/ggml-model-quant.gguf] type [nthreads] // ./quantize [--allow-requantize] [--leave-output-tensor] models/llama/ggml-model.gguf [models/llama/ggml-model-quant.gguf] type [nthreads]
// //
[[noreturn]]
static void usage(const char * executable) { static void usage(const char * executable) {
printf("usage: %s [--help] [--allow-requantize] [--leave-output-tensor] model-f32.gguf [model-quant.gguf] type [nthreads]\n\n", executable); printf("usage: %s [--help] [--allow-requantize] [--leave-output-tensor] model-f32.gguf [model-quant.gguf] type [nthreads]\n\n", executable);
printf(" --allow-requantize: Allows requantizing tensors that have already been quantized. Warning: This can severely reduce quality compared to quantizing from 16bit or 32bit\n"); printf(" --allow-requantize: Allows requantizing tensors that have already been quantized. Warning: This can severely reduce quality compared to quantizing from 16bit or 32bit\n");