Update preprocessor directives according to guidelines

This commit is contained in:
Aleksei Nikiforov 2025-01-15 11:59:30 +01:00
parent cfb2cd1ee9
commit 3c22daa66e
5 changed files with 15 additions and 15 deletions

View file

@ -1151,9 +1151,9 @@ struct gguf_writer {
buf.push_back(reinterpret_cast<const int8_t *>(&val)[i]);
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
buf.push_back(reinterpret_cast<const int8_t *>(&val)[sizeof(val) - i - 1]);
#else
#else // __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
#error Unexpected or undefined __BYTE_ORDER__
#endif
#endif // __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
}
}
@ -1266,9 +1266,9 @@ struct gguf_writer {
write(kv.get_val<double>(i));
}
} break;
#else
#else // __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
#error Unexpected or undefined __BYTE_ORDER__
#endif
#endif // __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
case GGUF_TYPE_BOOL: {
for (size_t i = 0; i < ne; ++i) {
write(kv.get_val<bool>(i));
@ -1324,7 +1324,7 @@ struct gguf_writer {
if (byteswap != nullptr) {
byteswap(buf.data() + offset, ggml_nelements(&(info.t)) / ggml_blck_size(info.t.type));
}
#endif
#endif // __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
pad(alignment);
}

View file

@ -444,9 +444,9 @@ void llama_mmap::unmap_fragment(size_t first, size_t last) { pimpl->unmap_fragme
// disable mmap on s390x while it usually loads little-endian models
#if (defined(_POSIX_MEMLOCK_RANGE) && !defined(__s390x__)) || defined(_WIN32)
const bool llama_mmap::SUPPORTED = true;
#else
#else // (defined(_POSIX_MEMLOCK_RANGE) && !defined(__s390x__)) || defined(_WIN32)
const bool llama_mmap::SUPPORTED = false;
#endif
#endif // (defined(_POSIX_MEMLOCK_RANGE) && !defined(__s390x__)) || defined(_WIN32)
// llama_mlock

View file

@ -1030,7 +1030,7 @@ bool llama_model_loader::load_all_data(
if (byteswap != nullptr) {
byteswap(cur->data, ggml_nelements(cur) / ggml_blck_size(cur->type));
}
#endif
#endif // __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
if (check_tensors) {
validation_result.emplace_back(std::async(std::launch::async, [cur, n_size] {
@ -1066,7 +1066,7 @@ bool llama_model_loader::load_all_data(
if (byteswap != nullptr) {
byteswap(read_buf.data(), read_buf.size() / ggml_blck_size(cur->type));
}
#endif
#endif // __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
ggml_backend_tensor_set(cur, read_buf.data(), 0, n_size);
if (check_tensors && !ggml_validate_row_data(cur->type, read_buf.data(), n_size)) {

View file

@ -53,9 +53,9 @@ struct unicode_cpt_flags {
is_lowercase = (flags & LOWERCASE) ? 1 : 0;
is_uppercase = (flags & UPPERCASE) ? 1 : 0;
is_nfd = (flags & NFD) ? 1 : 0;
#else
#else // __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
#error Unexpected or undefined __BYTE_ORDER__
#endif
#endif // __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
}
inline uint16_t as_uint() const {
@ -78,9 +78,9 @@ struct unicode_cpt_flags {
;
return result;
#else
#else // __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
#error Unexpected or undefined __BYTE_ORDER__
#endif
#endif // __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
}
inline uint16_t category_flag() const {

View file

@ -561,7 +561,7 @@ static bool handcrafted_check_kv(const gguf_context * gguf_ctx, const unsigned i
}
break;
}
#endif
#endif // __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
if (!std::equal(data8, data8 + arr_n*type_size, data_gguf)) {
ok = false;
@ -597,7 +597,7 @@ static bool handcrafted_check_kv(const gguf_context * gguf_ctx, const unsigned i
ggml_convert_to_le((uint64_t*)(data8));
break;
}
#endif
#endif // __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
if (!std::equal(data8, data8 + gguf_type_size(type), data_gguf)) {
ok = false;