fix more -Wextra-semi-stmt warnings

This commit is contained in:
Cebtenzzre 2023-09-14 17:49:24 -04:00
parent df080fe7e8
commit 54e28be107
8 changed files with 65 additions and 65 deletions

View file

@ -226,22 +226,22 @@ enum LogTriState
//
#ifndef _MSC_VER
#define LOG_IMPL(str, ...) \
{ \
do { \
if (LOG_TARGET != nullptr) \
{ \
fprintf(LOG_TARGET, LOG_TIMESTAMP_FMT LOG_FLF_FMT str "%s" LOG_TIMESTAMP_VAL LOG_FLF_VAL, __VA_ARGS__); \
fflush(LOG_TARGET); \
} \
}
} while (0)
#else
#define LOG_IMPL(str, ...) \
{ \
do { \
if (LOG_TARGET != nullptr) \
{ \
fprintf(LOG_TARGET, LOG_TIMESTAMP_FMT LOG_FLF_FMT str "%s" LOG_TIMESTAMP_VAL LOG_FLF_VAL "", ##__VA_ARGS__); \
fflush(LOG_TARGET); \
} \
}
} while (0)
#endif
// INTERNAL, DO NOT USE
@ -249,7 +249,7 @@ enum LogTriState
//
#ifndef _MSC_VER
#define LOG_TEE_IMPL(str, ...) \
{ \
do { \
if (LOG_TARGET != nullptr) \
{ \
fprintf(LOG_TARGET, LOG_TIMESTAMP_FMT LOG_FLF_FMT str "%s" LOG_TIMESTAMP_VAL LOG_FLF_VAL, __VA_ARGS__); \
@ -260,10 +260,10 @@ enum LogTriState
fprintf(LOG_TEE_TARGET, LOG_TEE_TIMESTAMP_FMT LOG_TEE_FLF_FMT str "%s" LOG_TEE_TIMESTAMP_VAL LOG_TEE_FLF_VAL, __VA_ARGS__); \
fflush(LOG_TEE_TARGET); \
} \
}
} while (0)
#else
#define LOG_TEE_IMPL(str, ...) \
{ \
do { \
if (LOG_TARGET != nullptr) \
{ \
fprintf(LOG_TARGET, LOG_TIMESTAMP_FMT LOG_FLF_FMT str "%s" LOG_TIMESTAMP_VAL LOG_FLF_VAL "", ##__VA_ARGS__); \
@ -274,7 +274,7 @@ enum LogTriState
fprintf(LOG_TEE_TARGET, LOG_TEE_TIMESTAMP_FMT LOG_TEE_FLF_FMT str "%s" LOG_TEE_TIMESTAMP_VAL LOG_TEE_FLF_VAL "", ##__VA_ARGS__); \
fflush(LOG_TEE_TARGET); \
} \
}
} while (0)
#endif
// The '\0' as a last argument, is a trick to bypass the silly
@ -435,41 +435,41 @@ inline FILE *log_handler() { return log_handler1_impl(); }
inline void log_test()
{
log_disable();
LOG("01 Hello World to nobody, because logs are disabled!\n")
LOG("01 Hello World to nobody, because logs are disabled!\n");
log_enable();
LOG("02 Hello World to default output, which is \"%s\" ( Yaaay, arguments! )!\n", LOG_STRINGIZE(LOG_TARGET))
LOG_TEE("03 Hello World to **both** default output and " LOG_TEE_TARGET_STRING "!\n")
LOG("02 Hello World to default output, which is \"%s\" ( Yaaay, arguments! )!\n", LOG_STRINGIZE(LOG_TARGET));
LOG_TEE("03 Hello World to **both** default output and " LOG_TEE_TARGET_STRING "!\n");
log_set_target(stderr);
LOG("04 Hello World to stderr!\n")
LOG_TEE("05 Hello World TEE with double printing to stderr prevented!\n")
LOG("04 Hello World to stderr!\n");
LOG_TEE("05 Hello World TEE with double printing to stderr prevented!\n");
log_set_target(LOG_DEFAULT_FILE_NAME);
LOG("06 Hello World to default log file!\n")
LOG("06 Hello World to default log file!\n");
log_set_target(stdout);
LOG("07 Hello World to stdout!\n")
LOG("07 Hello World to stdout!\n");
log_set_target(LOG_DEFAULT_FILE_NAME);
LOG("08 Hello World to default log file again!\n")
LOG("08 Hello World to default log file again!\n");
log_disable();
LOG("09 Hello World _1_ into the void!\n")
LOG("09 Hello World _1_ into the void!\n");
log_enable();
LOG("10 Hello World back from the void ( you should not see _1_ in the log or the output )!\n")
LOG("10 Hello World back from the void ( you should not see _1_ in the log or the output )!\n");
log_disable();
log_set_target("llama.anotherlog.log");
LOG("11 Hello World _2_ to nobody, new target was selected but logs are still disabled!\n")
LOG("11 Hello World _2_ to nobody, new target was selected but logs are still disabled!\n");
log_enable();
LOG("12 Hello World this time in a new file ( you should not see _2_ in the log or the output )?\n")
LOG("12 Hello World this time in a new file ( you should not see _2_ in the log or the output )?\n");
log_set_target("llama.yetanotherlog.log");
LOG("13 Hello World this time in yet new file?\n")
LOG("13 Hello World this time in yet new file?\n");
log_set_target(log_filename_generator("llama_autonamed", "log"));
LOG("14 Hello World in log with generated filename!\n")
LOG("14 Hello World in log with generated filename!\n");
#ifdef _MSC_VER
LOG_TEE("15 Hello msvc TEE without arguments\n")
LOG_TEE("16 Hello msvc TEE with (%d)(%s) arguments\n", 1, "test")
LOG_TEELN("17 Hello msvc TEELN without arguments\n")
LOG_TEELN("18 Hello msvc TEELN with (%d)(%s) arguments\n", 1, "test")
LOG("19 Hello msvc LOG without arguments\n")
LOG("20 Hello msvc LOG with (%d)(%s) arguments\n", 1, "test")
LOGLN("21 Hello msvc LOGLN without arguments\n")
LOGLN("22 Hello msvc LOGLN with (%d)(%s) arguments\n", 1, "test")
LOG_TEE("15 Hello msvc TEE without arguments\n");
LOG_TEE("16 Hello msvc TEE with (%d)(%s) arguments\n", 1, "test");
LOG_TEELN("17 Hello msvc TEELN without arguments\n");
LOG_TEELN("18 Hello msvc TEELN with (%d)(%s) arguments\n", 1, "test");
LOG("19 Hello msvc LOG without arguments\n");
LOG("20 Hello msvc LOG with (%d)(%s) arguments\n", 1, "test");
LOGLN("21 Hello msvc LOGLN without arguments\n");
LOGLN("22 Hello msvc LOGLN with (%d)(%s) arguments\n", 1, "test");
#endif
}
@ -542,7 +542,7 @@ inline void log_dump_cmdline_impl(int argc, char **argv)
buf << " " << argv[i];
}
}
LOGLN("Cmd:%s", buf.str().c_str())
LOGLN("Cmd:%s", buf.str().c_str());
}
#define log_tostr(var) log_var_to_string_impl(var).c_str()
@ -620,10 +620,10 @@ inline std::string log_var_to_string_impl(const std::vector<int> & var)
#define LOGLN(...) // dummy stub
#undef LOG_TEE
#define LOG_TEE(...) fprintf(stderr, __VA_ARGS__); // convert to normal fprintf
#define LOG_TEE(...) fprintf(stderr, __VA_ARGS__) // convert to normal fprintf
#undef LOG_TEELN
#define LOG_TEELN(...) fprintf(stderr, __VA_ARGS__); // convert to normal fprintf
#define LOG_TEELN(...) fprintf(stderr, __VA_ARGS__) // convert to normal fprintf
#undef LOG_DISABLE
#define LOG_DISABLE() // dummy stub

View file

@ -88,7 +88,7 @@ static struct ggml_tensor * randomize_tensor(
break;
default:
assert(false);
};
}
return tensor;
}
@ -136,7 +136,7 @@ static struct ggml_tensor * randomize_tensor_normal(
break;
default:
assert(false);
};
}
return tensor;
}

View file

@ -855,7 +855,7 @@ int main(int argc, char ** argv) {
llama_backend_free();
#ifndef LOG_DISABLE_LOGS
LOG_TEE("Log end\n")
LOG_TEE("Log end\n");
#endif // LOG_DISABLE_LOGS
return 0;

View file

@ -107,7 +107,7 @@ struct ggml_tensor * randomize_tensor_normal(struct ggml_tensor * tensor, struct
break;
default:
assert(false);
};
}
return tensor;
}
@ -151,7 +151,7 @@ struct ggml_tensor * randomize_tensor_uniform(struct ggml_tensor * tensor, struc
break;
default:
assert(false);
};
}
return tensor;
}
@ -1015,7 +1015,7 @@ void shuffle_ints(int * begin, int * end) {
}
#define GGUF_GET_KEY(ctx, dst, func, type, req, key) \
{ \
do { \
const std::string skey(key); \
const int kid = gguf_find_key(ctx, skey.c_str()); \
if (kid >= 0) { \
@ -1027,7 +1027,7 @@ void shuffle_ints(int * begin, int * end) {
} else if (req) { \
die_fmt("key not found in model: %s", skey.c_str()); \
} \
}
} while (0)
bool are_same_layout(struct ggml_tensor * a, struct ggml_tensor * b) {

18
ggml.c
View file

@ -1863,7 +1863,7 @@ ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type type) {
#define GGML_F16x8_ADD vaddq_f16
#define GGML_F16x8_MUL vmulq_f16
#define GGML_F16x8_REDUCE(res, x) \
{ \
do { \
int offset = GGML_F16_ARR >> 1; \
for (int i = 0; i < offset; ++i) { \
x[i] = vaddq_f16(x[i], x[offset+i]); \
@ -1879,7 +1879,7 @@ ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type type) {
const float32x4_t t0 = vcvt_f32_f16(vget_low_f16 (x[0])); \
const float32x4_t t1 = vcvt_f32_f16(vget_high_f16(x[0])); \
res = (ggml_float) vaddvq_f32(vaddq_f32(t0, t1)); \
}
} while (0)
#define GGML_F16_VEC GGML_F16x8
#define GGML_F16_VEC_ZERO GGML_F16x8_ZERO
@ -1940,7 +1940,7 @@ ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type type) {
#define GGML_F32x8_ADD _mm256_add_ps
#define GGML_F32x8_MUL _mm256_mul_ps
#define GGML_F32x8_REDUCE(res, x) \
{ \
do { \
int offset = GGML_F32_ARR >> 1; \
for (int i = 0; i < offset; ++i) { \
x[i] = _mm256_add_ps(x[i], x[offset+i]); \
@ -1957,7 +1957,7 @@ ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type type) {
_mm256_extractf128_ps(x[0], 1)); \
const __m128 t1 = _mm_hadd_ps(t0, t0); \
res = _mm_cvtss_f32(_mm_hadd_ps(t1, t1)); \
}
} while (0)
// TODO: is this optimal ?
#define GGML_F32_VEC GGML_F32x8
@ -13562,7 +13562,7 @@ static void ggml_compute_forward_conv_1d(
ggml_compute_forward_conv_1d_s2_ph(params, src0, src1, dst);
} else {
GGML_ASSERT(false); // only stride 1 and 2 supported
};
}
}
// ggml_compute_forward_conv_2d
@ -19876,10 +19876,10 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p
} break;
case GGUF_TYPE_ARRAY:
case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type"); break;
};
}
} break;
case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type");
};
}
if (!ok) {
break;
@ -20591,10 +20591,10 @@ static void gguf_write_to_buf(const struct gguf_context * ctx, struct gguf_buf *
} break;
case GGUF_TYPE_ARRAY:
case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type"); break;
};
}
} break;
case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type");
};
}
}
// write tensor infos

View file

@ -448,7 +448,7 @@ struct LLM_TN {
//
#define GGUF_GET_KEY(ctx, dst, func, type, req, key) \
{ \
do { \
const std::string skey(key); \
const int kid = gguf_find_key(ctx, skey.c_str()); \
if (kid >= 0) { \
@ -460,7 +460,7 @@ struct LLM_TN {
} else if (req) { \
throw std::runtime_error(format("key not found in model: %s", skey.c_str())); \
} \
}
} while (0)
//
// ggml helpers
@ -1760,7 +1760,7 @@ static void llm_load_hparams(
}
} break;
default: (void)0;
};
}
model.ftype = ml.ftype;
@ -2298,7 +2298,7 @@ static void llm_load_tensors(
} break;
default:
throw std::runtime_error("unknown architecture");
};
}
}
ml.done_getting_tensors();
@ -3693,7 +3693,7 @@ static struct ggml_cgraph * llama_build_graph(
} break;
default:
GGML_ASSERT(false);
};
}
return result;
}
@ -4274,7 +4274,7 @@ static std::vector<llama_vocab::id> llama_tokenize_internal(const llama_vocab &
llm_tokenizer_bpe tokenizer(vocab);
tokenizer.tokenize(raw_text, output);
} break;
};
}
return output;
}
@ -7094,7 +7094,7 @@ int llama_token_to_piece_with_model(const struct llama_model * model, llama_toke
buf[2] = '\x85';
return 3;
} else if (llama_is_control_token(model->vocab, token)) {
;
// do nothing
} else if (llama_is_byte_token(model->vocab, token)) {
if (length < 1) {
return -1;

View file

@ -107,7 +107,7 @@ static struct ggml_tensor * get_random_tensor_f32(
break;
default:
assert(false);
};
}
return result;
}
@ -155,7 +155,7 @@ static struct ggml_tensor * get_random_tensor_f16(
break;
default:
assert(false);
};
}
return result;
}
@ -203,7 +203,7 @@ static struct ggml_tensor * get_random_tensor_i32(
break;
default:
assert(false);
};
}
return result;
}

View file

@ -101,7 +101,7 @@ static struct ggml_tensor * get_random_tensor(
break;
default:
assert(false);
};
}
return result;
}
@ -124,7 +124,7 @@ int main(void) {
struct ggml_context * ctx = ggml_init(params);
int64_t ne1[4] = {4, 128, 1, 1};
int64_t ne2[4] = {4, 256, 1, 1};;
int64_t ne2[4] = {4, 256, 1, 1};
int64_t ne3[4] = {128, 256, 1, 1};
struct ggml_tensor * a = get_random_tensor(ctx, 2, ne1, -1, +1);