Merge branch 'master' into compilade/cuda-tq2_0

This commit is contained in:
Francis Couture-Harpin 2025-01-09 13:02:09 -05:00
commit 983aa09b5c
217 changed files with 19585 additions and 15473 deletions

View file

@ -13,10 +13,10 @@ int main(int argc, char ** argv) {
std::thread([&model_path]() {
llama_backend_init();
auto * model = llama_load_model_from_file(model_path, llama_model_default_params());
auto * model = llama_model_load_from_file(model_path, llama_model_default_params());
auto * ctx = llama_new_context_with_model(model, llama_context_default_params());
llama_free(ctx);
llama_free_model(model);
llama_model_free(model);
llama_backend_free();
}).join();

View file

@ -3940,7 +3940,7 @@ static std::vector<std::unique_ptr<test_case>> make_test_cases_perf() {
test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, {1024, 10, 1, 1}));
test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, {32000, 512, 1, 1}));
for (int bs : {1, 512}) {
for (int bs : {1, 2, 3, 4, 5, 8, 512}) {
for (ggml_type type_a : all_types) {
for (ggml_type type_b : {GGML_TYPE_F32}) {
test_cases.emplace_back(new test_mul_mat(type_a, type_b, 4096, bs, 14336, {1, 1}, {1, 1}));
@ -3948,6 +3948,18 @@ static std::vector<std::unique_ptr<test_case>> make_test_cases_perf() {
}
}
for (int K : {3, 5}) {
for (int IC : {256, 2560}) {
for (int IW_IH : {32, 64, 256}) {
if (IC == 2560 && IW_IH == 256) {
// too big
continue;
}
test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F32, {IW_IH, IW_IH, IC, 1}, {K, K, IC, 1}, 1, 1, 1, 1, 1, 1, true));
}
}
}
return test_cases;
}

View file

@ -77,6 +77,10 @@ int main(void) {
"{{ bos_token }}{% for message in messages %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + '[/INST]' }}{% elif message['role'] == 'system' %}{{ '[SYSTEM_PROMPT] ' + message['content'] + '[/SYSTEM_PROMPT]' }}{% elif message['role'] == 'assistant' %}{{ ' ' + message['content'] + eos_token }}{% else %}{{ raise_exception('Only user, system and assistant roles are supported!') }}{% endif %}{% endfor %}",
// ai-sage/GigaChat-20B-A3B-instruct
"{% if messages[0]['role'] == 'system' -%}\n {%- set loop_messages = messages[1:] -%}\n {%- set system_message = bos_token + messages[0]['content'] + additional_special_tokens[1] -%}\n{%- else -%}\n {%- set loop_messages = messages -%}\n {%- set system_message = bos_token + '' -%}\n{%- endif -%}\n{%- for message in loop_messages %}\n {% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}\n {{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}\n {% endif %}\n \n {%- if loop.index0 == 0 -%}\n {{ system_message -}}\n {%- endif -%}\n {%- if message['role'] == 'user' -%}\n {{ message['role'] + additional_special_tokens[0] + message['content'] + additional_special_tokens[1] -}}\n {{ 'available functions' + additional_special_tokens[0] + additional_special_tokens[2] + additional_special_tokens[3] + additional_special_tokens[1] -}}\n {%- endif -%}\n {%- if message['role'] == 'assistant' -%}\n {{ message['role'] + additional_special_tokens[0] + message['content'] + additional_special_tokens[1] -}}\n {%- endif -%}\n {%- if loop.last and add_generation_prompt -%}\n {{ 'assistant' + additional_special_tokens[0] -}}\n {%- endif -%}\n{%- endfor %}",
// Infinigence/Megrez-3B-Instruct
u8"{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|role_start|>system<|role_end|>你是Megrez-3B-Instruct将针对用户的问题给出详细的、积极的回答。<|turn_end|>' }}{% endif %}{{ '<|role_start|>' + message['role'] + '<|role_end|>' + message['content'] + '<|turn_end|>' }}{% endfor %}{% if add_generation_prompt %}{{ '<|role_start|>assistant<|role_end|>' }}{% endif %}",
// phi-4
"{% for message in messages %}{% if (message['role'] == 'system') %}{{'<|im_start|>system<|im_sep|>' + message['content'] + '<|im_end|>'}}{% elif (message['role'] == 'user') %}{{'<|im_start|>user<|im_sep|>' + message['content'] + '<|im_end|><|im_start|>assistant<|im_sep|>'}}{% elif (message['role'] == 'assistant') %}{{message['content'] + '<|im_end|>'}}{% endif %}{% endfor %}",
};
std::vector<std::string> expected_output = {
// teknium/OpenHermes-2.5-Mistral-7B
@ -133,6 +137,10 @@ int main(void) {
"[SYSTEM_PROMPT] You are a helpful assistant[/SYSTEM_PROMPT][INST] Hello[/INST] Hi there</s>[INST] Who are you[/INST] I am an assistant </s>[INST] Another question[/INST]",
// ai-sage/GigaChat-20B-A3B-instruct
"<s>You are a helpful assistant<|message_sep|>user<|role_sep|>Hello<|message_sep|>available functions<|role_sep|>[]<|message_sep|>assistant<|role_sep|>Hi there<|message_sep|>user<|role_sep|>Who are you<|message_sep|>available functions<|role_sep|>[]<|message_sep|>assistant<|role_sep|> I am an assistant <|message_sep|>user<|role_sep|>Another question<|message_sep|>available functions<|role_sep|>[]<|message_sep|>assistant<|role_sep|>",
// Infinigence/Megrez-3B-Instruct
"<|role_start|>system<|role_end|>You are a helpful assistant<|turn_end|><|role_start|>user<|role_end|>Hello<|turn_end|><|role_start|>assistant<|role_end|>Hi there<|turn_end|><|role_start|>user<|role_end|>Who are you<|turn_end|><|role_start|>assistant<|role_end|> I am an assistant <|turn_end|><|role_start|>user<|role_end|>Another question<|turn_end|><|role_start|>assistant<|role_end|>",
// phi-4
"<|im_start|>system<|im_sep|>You are a helpful assistant<|im_end|><|im_start|>user<|im_sep|>Hello<|im_end|><|im_start|>assistant<|im_sep|>Hi there<|im_end|><|im_start|>user<|im_sep|>Who are you<|im_end|><|im_start|>assistant<|im_sep|> I am an assistant <|im_end|><|im_start|>user<|im_sep|>Another question<|im_end|><|im_start|>assistant<|im_sep|>",
};
std::vector<char> formatted_chat(1024);
int32_t res;

View file

@ -15,66 +15,71 @@ constexpr int offset_has_tensors = 2000;
constexpr int offset_has_data = 3000;
enum handcrafted_file_type {
HANDCRAFTED_HEADER_BAD_MAGIC = 10,
HANDCRAFTED_HEADER_BAD_VERSION_1 = 20,
HANDCRAFTED_HEADER_BAD_VERSION_FUTURE = 30,
HANDCRAFTED_HEADER_BAD_N_TENSORS = 40,
HANDCRAFTED_HEADER_BAD_N_KV = 50,
HANDCRAFTED_HEADER_EMPTY = 800,
HANDCRAFTED_HEADER_BAD_MAGIC = 10,
HANDCRAFTED_HEADER_BAD_VERSION_1 = 20,
HANDCRAFTED_HEADER_BAD_VERSION_FUTURE = 30,
HANDCRAFTED_HEADER_BAD_N_TENSORS = 40,
HANDCRAFTED_HEADER_BAD_N_KV = 50,
HANDCRAFTED_HEADER_EMPTY = 800,
HANDCRAFTED_KV_BAD_KEY_SIZE = 10 + offset_has_kv,
HANDCRAFTED_KV_BAD_TYPE = 20 + offset_has_kv,
HANDCRAFTED_KV_BAD_VALUE_SIZE = 30 + offset_has_kv,
HANDCRAFTED_KV_DUPLICATE_KEY = 40 + offset_has_kv,
HANDCRAFTED_KV_SUCCESS = 800 + offset_has_kv,
HANDCRAFTED_KV_BAD_KEY_SIZE = 10 + offset_has_kv,
HANDCRAFTED_KV_BAD_TYPE = 20 + offset_has_kv,
// HANDCRAFTED_KV_BAD_VALUE_SIZE = 30 + offset_has_kv, // removed because it can result in allocations > 1 TB (default sanitizer limit)
HANDCRAFTED_KV_DUPLICATE_KEY = 40 + offset_has_kv,
HANDCRAFTED_KV_BAD_ALIGN = 50 + offset_has_kv,
HANDCRAFTED_KV_SUCCESS = 800 + offset_has_kv,
HANDCRAFTED_TENSORS_BAD_NAME_SIZE = 10 + offset_has_tensors,
HANDCRAFTED_TENSORS_BAD_N_DIMS = 20 + offset_has_tensors,
HANDCRAFTED_TENSORS_BAD_SHAPE = 30 + offset_has_tensors,
HANDCRAFTED_TENSORS_NE_TOO_BIG = 40 + offset_has_tensors,
HANDCRAFTED_TENSORS_BAD_TYPE = 50 + offset_has_tensors,
HANDCRAFTED_TENSORS_BAD_OFFSET = 60 + offset_has_tensors,
HANDCRAFTED_TENSORS_DUPLICATE_NAME = 70 + offset_has_tensors,
HANDCRAFTED_TENSORS_BAD_ALIGNMENT = 80 + offset_has_tensors,
HANDCRAFTED_TENSORS_SUCCESS = 800 + offset_has_tensors,
HANDCRAFTED_TENSORS_CUSTOM_ALIGN = 810 + offset_has_tensors,
HANDCRAFTED_TENSORS_BAD_NAME_SIZE = 10 + offset_has_tensors,
HANDCRAFTED_TENSORS_BAD_N_DIMS = 20 + offset_has_tensors,
HANDCRAFTED_TENSORS_BAD_SHAPE = 30 + offset_has_tensors,
HANDCRAFTED_TENSORS_NE_TOO_BIG = 40 + offset_has_tensors,
HANDCRAFTED_TENSORS_BAD_TYPE = 50 + offset_has_tensors,
HANDCRAFTED_TENSORS_BAD_OFFSET = 60 + offset_has_tensors,
HANDCRAFTED_TENSORS_DUPLICATE_NAME = 70 + offset_has_tensors,
HANDCRAFTED_TENSORS_BAD_ALIGN = 75 + offset_has_tensors,
HANDCRAFTED_TENSORS_INCONSISTENT_ALIGN = 80 + offset_has_tensors,
HANDCRAFTED_TENSORS_SUCCESS = 800 + offset_has_tensors,
HANDCRAFTED_TENSORS_CUSTOM_ALIGN = 810 + offset_has_tensors,
HANDCRAFTED_DATA_NOT_ENOUGH_DATA = 10 + offset_has_data,
HANDCRAFTED_DATA_BAD_ALIGNMENT = 20 + offset_has_data,
HANDCRAFTED_DATA_SUCCESS = 800 + offset_has_data,
HANDCRAFTED_DATA_CUSTOM_ALIGN = 810 + offset_has_data,
HANDCRAFTED_DATA_NOT_ENOUGH_DATA = 10 + offset_has_data,
HANDCRAFTED_DATA_BAD_ALIGN = 15 + offset_has_data,
HANDCRAFTED_DATA_INCONSISTENT_ALIGN = 20 + offset_has_data,
HANDCRAFTED_DATA_SUCCESS = 800 + offset_has_data,
HANDCRAFTED_DATA_CUSTOM_ALIGN = 810 + offset_has_data,
};
std::string handcrafted_file_type_name(const enum handcrafted_file_type hft) {
switch (hft) {
case HANDCRAFTED_HEADER_BAD_MAGIC: return "HEADER_BAD_MAGIC";
case HANDCRAFTED_HEADER_BAD_VERSION_1: return "HEADER_BAD_VERSION_1";
case HANDCRAFTED_HEADER_BAD_VERSION_FUTURE: return "HEADER_BAD_VERSION_FUTURE";
case HANDCRAFTED_HEADER_BAD_N_KV: return "HEADER_BAD_N_KV";
case HANDCRAFTED_HEADER_BAD_N_TENSORS: return "HEADER_BAD_N_TENSORS";
case HANDCRAFTED_HEADER_EMPTY: return "HEADER_EMPTY";
case HANDCRAFTED_HEADER_BAD_MAGIC: return "HEADER_BAD_MAGIC";
case HANDCRAFTED_HEADER_BAD_VERSION_1: return "HEADER_BAD_VERSION_1";
case HANDCRAFTED_HEADER_BAD_VERSION_FUTURE: return "HEADER_BAD_VERSION_FUTURE";
case HANDCRAFTED_HEADER_BAD_N_KV: return "HEADER_BAD_N_KV";
case HANDCRAFTED_HEADER_BAD_N_TENSORS: return "HEADER_BAD_N_TENSORS";
case HANDCRAFTED_HEADER_EMPTY: return "HEADER_EMPTY";
case HANDCRAFTED_KV_BAD_KEY_SIZE: return "KV_BAD_KEY_SIZE";
case HANDCRAFTED_KV_BAD_TYPE: return "KV_BAD_TYPE";
case HANDCRAFTED_KV_BAD_VALUE_SIZE: return "KV_BAD_VALUE_SIZE";
case HANDCRAFTED_KV_DUPLICATE_KEY: return "KV_DUPLICATE_KEY";
case HANDCRAFTED_KV_SUCCESS: return "KV_RANDOM_KV";
case HANDCRAFTED_KV_BAD_KEY_SIZE: return "KV_BAD_KEY_SIZE";
case HANDCRAFTED_KV_BAD_TYPE: return "KV_BAD_TYPE";
case HANDCRAFTED_KV_DUPLICATE_KEY: return "KV_DUPLICATE_KEY";
case HANDCRAFTED_KV_BAD_ALIGN: return "KV_BAD_ALIGN";
case HANDCRAFTED_KV_SUCCESS: return "KV_RANDOM_KV";
case HANDCRAFTED_TENSORS_BAD_NAME_SIZE: return "TENSORS_BAD_NAME_SIZE";
case HANDCRAFTED_TENSORS_BAD_N_DIMS: return "TENSORS_BAD_N_DIMS";
case HANDCRAFTED_TENSORS_BAD_SHAPE: return "TENSORS_BAD_SHAPE";
case HANDCRAFTED_TENSORS_NE_TOO_BIG: return "TENSORS_NE_TOO_BIG";
case HANDCRAFTED_TENSORS_BAD_TYPE: return "TENSORS_BAD_TYPE";
case HANDCRAFTED_TENSORS_BAD_OFFSET: return "TENSORS_BAD_OFFSET";
case HANDCRAFTED_TENSORS_DUPLICATE_NAME: return "TENSORS_DUPLICATE_NAME";
case HANDCRAFTED_TENSORS_BAD_ALIGNMENT: return "TENSORS_BAD_ALIGNMENT";
case HANDCRAFTED_TENSORS_SUCCESS: return "TENSORS_SUCCESS";
case HANDCRAFTED_TENSORS_CUSTOM_ALIGN: return "TENSORS_CUSTOM_ALIGN";
case HANDCRAFTED_TENSORS_BAD_NAME_SIZE: return "TENSORS_BAD_NAME_SIZE";
case HANDCRAFTED_TENSORS_BAD_N_DIMS: return "TENSORS_BAD_N_DIMS";
case HANDCRAFTED_TENSORS_BAD_SHAPE: return "TENSORS_BAD_SHAPE";
case HANDCRAFTED_TENSORS_NE_TOO_BIG: return "TENSORS_NE_TOO_BIG";
case HANDCRAFTED_TENSORS_BAD_TYPE: return "TENSORS_BAD_TYPE";
case HANDCRAFTED_TENSORS_BAD_OFFSET: return "TENSORS_BAD_OFFSET";
case HANDCRAFTED_TENSORS_DUPLICATE_NAME: return "TENSORS_DUPLICATE_NAME";
case HANDCRAFTED_TENSORS_BAD_ALIGN: return "TENSORS_BAD_ALIGN";
case HANDCRAFTED_TENSORS_INCONSISTENT_ALIGN: return "TENSORS_INCONSISTENT_ALIGN";
case HANDCRAFTED_TENSORS_SUCCESS: return "TENSORS_SUCCESS";
case HANDCRAFTED_TENSORS_CUSTOM_ALIGN: return "TENSORS_CUSTOM_ALIGN";
case HANDCRAFTED_DATA_NOT_ENOUGH_DATA: return "DATA_NOT_ENOUGH_DATA";
case HANDCRAFTED_DATA_BAD_ALIGNMENT: return "DATA_BAD_ALIGNMENT";
case HANDCRAFTED_DATA_SUCCESS: return "DATA_SUCCESS";
case HANDCRAFTED_DATA_CUSTOM_ALIGN: return "DATA_CUSTOM_ALIGN";
case HANDCRAFTED_DATA_NOT_ENOUGH_DATA: return "DATA_NOT_ENOUGH_DATA";
case HANDCRAFTED_DATA_BAD_ALIGN: return "DATA_BAD_ALIGN";
case HANDCRAFTED_DATA_INCONSISTENT_ALIGN: return "DATA_INCONSISTENT_ALIGN";
case HANDCRAFTED_DATA_SUCCESS: return "DATA_SUCCESS";
case HANDCRAFTED_DATA_CUSTOM_ALIGN: return "DATA_CUSTOM_ALIGN";
}
GGML_ABORT("fatal error");
}
@ -140,31 +145,41 @@ std::vector<std::pair<enum gguf_type, enum gguf_type>> get_kv_types(std::mt19937
return kv_types;
}
static void helper_write(const void * data, const size_t nbytes, FILE * file) {
template <typename T>
static void helper_write(FILE * file, const T & val) {
GGML_ASSERT(fwrite(&val, 1, sizeof(val), file) == sizeof(val));
}
static void helper_write(FILE * file, const void * data, const size_t nbytes) {
GGML_ASSERT(fwrite(data, 1, nbytes, file) == nbytes);
}
static FILE * get_handcrafted_file(const unsigned int seed, const enum handcrafted_file_type hft, const int extra_bytes = 0) {
FILE * file = tmpfile();
if (!file) {
return file;
}
std::mt19937 rng(seed);
uint32_t alignment = GGUF_DEFAULT_ALIGNMENT;
if (hft == HANDCRAFTED_HEADER_BAD_MAGIC) {
const char bad_magic[4] = {'F', 'U', 'G', 'G'};
helper_write(bad_magic, sizeof(bad_magic), file);
helper_write(file, bad_magic, sizeof(bad_magic));
} else {
helper_write(GGUF_MAGIC, 4, file);
helper_write(file, GGUF_MAGIC, 4);
}
if (hft == HANDCRAFTED_HEADER_BAD_VERSION_1) {
const uint32_t version = 1;
helper_write(&version, sizeof(version), file);
helper_write(file, version);
} else if (hft == HANDCRAFTED_HEADER_BAD_VERSION_FUTURE) {
const uint32_t version = GGUF_VERSION + 1;
helper_write(&version, sizeof(version), file);
helper_write(file, version);
} else {
const uint32_t version = GGUF_VERSION;
helper_write(&version, sizeof(version), file);
helper_write(file, version);
}
std::vector<tensor_config_t> tensor_configs;
@ -174,10 +189,10 @@ static FILE * get_handcrafted_file(const unsigned int seed, const enum handcraft
if (hft == HANDCRAFTED_HEADER_BAD_N_TENSORS) {
const uint64_t n_tensors = -1;
helper_write(&n_tensors, sizeof(n_tensors), file);
helper_write(file, n_tensors);
} else {
const uint64_t n_tensors = tensor_configs.size();
helper_write(&n_tensors, sizeof(n_tensors), file);
helper_write(file, n_tensors);
}
std::vector<std::pair<enum gguf_type, enum gguf_type>> kv_types;
@ -186,41 +201,49 @@ static FILE * get_handcrafted_file(const unsigned int seed, const enum handcraft
}
{
uint64_t n_kv = kv_types.size();
if (hft == HANDCRAFTED_TENSORS_CUSTOM_ALIGN || hft == HANDCRAFTED_DATA_CUSTOM_ALIGN) {
if (hft == HANDCRAFTED_KV_BAD_ALIGN ||
hft == HANDCRAFTED_TENSORS_BAD_ALIGN || hft == HANDCRAFTED_TENSORS_CUSTOM_ALIGN ||
hft == HANDCRAFTED_DATA_BAD_ALIGN || hft == HANDCRAFTED_DATA_CUSTOM_ALIGN) {
n_kv += 1;
} else if (hft == HANDCRAFTED_HEADER_BAD_N_KV) {
n_kv = -1;
}
helper_write(&n_kv, sizeof(n_kv), file);
helper_write(file, n_kv);
}
if (hft < offset_has_kv) {
while (ftell(file) % alignment != 0) {
const char pad = 0;
helper_write(file, pad);
}
for (int i = 0; i < extra_bytes; ++i) {
const char tmp = 0;
helper_write(&tmp, sizeof(tmp), file);
helper_write(file, tmp);
}
rewind(file);
return file;
}
for (int i = 0; i < int(kv_types.size()); ++i) {
const enum gguf_type type = gguf_type(hft == HANDCRAFTED_KV_BAD_TYPE ? -1 : kv_types[i].first);
const enum gguf_type type_arr = gguf_type(hft == HANDCRAFTED_KV_BAD_TYPE ? -1 : kv_types[i].second);
const enum gguf_type type = gguf_type(hft == HANDCRAFTED_KV_BAD_TYPE ? GGUF_TYPE_COUNT : kv_types[i].first);
const enum gguf_type type_arr = gguf_type(hft == HANDCRAFTED_KV_BAD_TYPE ? GGUF_TYPE_COUNT : kv_types[i].second);
const std::string key = "my_key_" + std::to_string((hft == HANDCRAFTED_KV_DUPLICATE_KEY ? i/2 : i));
if (hft == HANDCRAFTED_KV_BAD_KEY_SIZE) {
const uint64_t n = -1;
helper_write(&n, sizeof(n), file);
helper_write(file, n);
} else {
const uint64_t n = key.length();
helper_write(&n, sizeof(n), file);
helper_write(file, n);
}
helper_write(key.data(), key.length(), file);
helper_write(file, key.data(), key.length());
{
const int32_t type32 = int32_t(type);
helper_write(&type32, sizeof(type32), file);
helper_write(file, type32);
}
uint32_t data[16];
@ -233,69 +256,67 @@ static FILE * get_handcrafted_file(const unsigned int seed, const enum handcraft
if (type == GGUF_TYPE_STRING) {
const uint64_t n = rng() % sizeof(data);
helper_write(&n, sizeof(n), file);
helper_write(data, n, file);
helper_write(file, n);
helper_write(file, data, n);
continue;
}
if (type == GGUF_TYPE_ARRAY) {
{
const int32_t type32 = int32_t(type_arr);
helper_write(&type32, sizeof(type32), file);
helper_write(file, type32);
}
if (type_arr == GGUF_TYPE_STRING) {
const uint64_t nstr = rng() % (16 + 1);
helper_write(&nstr, sizeof(nstr), file);
helper_write(file, nstr);
for (uint64_t istr = 0; istr < nstr; ++istr) {
const uint64_t n = rng() % (sizeof(uint32_t) + 1);
helper_write(&n, sizeof(n), file);
helper_write(&data[istr], n, file);
helper_write(file, n);
helper_write(file, &data[istr], n);
}
continue;
}
const size_t type_size = gguf_type_size(type_arr);
const uint64_t n = (rng() % sizeof(data)) / type_size;
helper_write(&n, sizeof(n), file);
helper_write(&data, n*type_size, file);
helper_write(file, n);
helper_write(file, &data, n*type_size);
continue;
}
size_t type_size = hft == HANDCRAFTED_KV_BAD_TYPE ? 1 : gguf_type_size(type);
if (hft == HANDCRAFTED_KV_BAD_VALUE_SIZE) {
type_size += rng() % 3;
}
helper_write(data, type_size, file);
helper_write(file, data, hft == HANDCRAFTED_KV_BAD_TYPE ? 1 : gguf_type_size(type));
}
if (hft == HANDCRAFTED_TENSORS_CUSTOM_ALIGN || hft == HANDCRAFTED_DATA_CUSTOM_ALIGN) {
const std::string key = "general.alignment";
{
const uint64_t n = key.length();
helper_write(&n, sizeof(n), file);
}
helper_write(key.data(), key.length(), file);
if (hft == HANDCRAFTED_KV_BAD_ALIGN ||
hft == HANDCRAFTED_TENSORS_BAD_ALIGN || hft == HANDCRAFTED_TENSORS_CUSTOM_ALIGN ||
hft == HANDCRAFTED_DATA_BAD_ALIGN || hft == HANDCRAFTED_DATA_CUSTOM_ALIGN) {
const uint64_t n = strlen(GGUF_KEY_GENERAL_ALIGNMENT);
helper_write(file, n);
helper_write(file, GGUF_KEY_GENERAL_ALIGNMENT, n);
const int32_t type = gguf_type(GGUF_TYPE_UINT32);
helper_write(&type, sizeof(type), file);
helper_write(file, type);
const uint32_t alignment = GGUF_DEFAULT_ALIGNMENT + 1;
helper_write(&alignment, sizeof(alignment), file);
alignment = expect_context_not_null(hft) ? 1 : 13;
helper_write(file, alignment);
}
if (hft < offset_has_tensors) {
while (ftell(file) % alignment != 0) {
const char pad = 0;
helper_write(file, pad);
}
for (int i = 0; i < extra_bytes; ++i) {
const char tmp = 0;
helper_write(&tmp, sizeof(tmp), file);
helper_write(file, tmp);
}
rewind(file);
return file;
}
uint32_t alignment = GGUF_DEFAULT_ALIGNMENT;
if (hft == HANDCRAFTED_TENSORS_BAD_ALIGNMENT || hft == HANDCRAFTED_DATA_BAD_ALIGNMENT) {
alignment -= 1;
} else if (hft == HANDCRAFTED_TENSORS_CUSTOM_ALIGN || hft == HANDCRAFTED_DATA_CUSTOM_ALIGN) {
alignment += 1;
if (hft == HANDCRAFTED_TENSORS_INCONSISTENT_ALIGN || hft == HANDCRAFTED_DATA_INCONSISTENT_ALIGN) {
alignment = 1;
}
uint64_t offset = 0;
@ -313,9 +334,9 @@ static FILE * get_handcrafted_file(const unsigned int seed, const enum handcraft
}
{
const uint64_t n = name.length();
helper_write(&n, sizeof(n), file);
helper_write(file, n);
}
helper_write(name.data(), name.length(), file);
helper_write(file, name.data(), name.length());
uint32_t n_dims = hft == HANDCRAFTED_TENSORS_NE_TOO_BIG ? 2 : 1;
for (int i = GGML_MAX_DIMS-1; i >= 1; --i) {
@ -326,35 +347,35 @@ static FILE * get_handcrafted_file(const unsigned int seed, const enum handcraft
}
if (hft == HANDCRAFTED_TENSORS_BAD_N_DIMS) {
const uint32_t n_dims_bad = GGML_MAX_DIMS + 1;
helper_write(&n_dims_bad, sizeof(n_dims_bad), file);
helper_write(file, n_dims_bad);
} else {
helper_write(&n_dims, sizeof(n_dims), file);
helper_write(file, n_dims);
}
if (hft == HANDCRAFTED_TENSORS_BAD_SHAPE) {
for (uint32_t j = 0; j < n_dims; ++j) {
const int64_t bad_dim = -1;
helper_write(&bad_dim, sizeof(bad_dim), file);
helper_write(file, bad_dim);
}
} else if (hft == HANDCRAFTED_TENSORS_NE_TOO_BIG){
for (uint32_t j = 0; j < n_dims; ++j) {
const int64_t big_dim = 4*int64_t(INT32_MAX);
helper_write(&big_dim, sizeof(big_dim), file);
helper_write(file, big_dim);
}
} else {
helper_write(shape.data(), n_dims*sizeof(int64_t), file);
helper_write(file, shape.data(), n_dims*sizeof(int64_t));
}
{
const int32_t type32 = hft == HANDCRAFTED_TENSORS_BAD_TYPE ? -1 : int32_t(type);
helper_write(&type32, sizeof(type32), file);
const int32_t type32 = hft == HANDCRAFTED_TENSORS_BAD_TYPE ? GGML_TYPE_COUNT : int32_t(type);
helper_write(file, type32);
}
if (hft == HANDCRAFTED_TENSORS_BAD_OFFSET) {
const uint64_t bad_offset = -1;
helper_write(&bad_offset, sizeof(bad_offset), file);
helper_write(file, bad_offset);
} else {
helper_write(&offset, sizeof(offset), file);
helper_write(file, offset);
}
int64_t ne = shape[0];
@ -364,12 +385,9 @@ static FILE * get_handcrafted_file(const unsigned int seed, const enum handcraft
offset += GGML_PAD(ggml_row_size(type, ne), alignment);
}
const uint32_t alignment_overshoot = ftell(file) % alignment;
if (alignment_overshoot != 0) {
for (size_t i = alignment_overshoot; i < alignment; ++i) {
const char pad = 0;
helper_write(&pad, sizeof(pad), file);
}
while (ftell(file) % alignment != 0) {
const char pad = 0;
helper_write(file, pad);
}
if (hft >= offset_has_data) {
@ -380,13 +398,13 @@ static FILE * get_handcrafted_file(const unsigned int seed, const enum handcraft
}
for (uint64_t i = 0; i < nbytes; ++i) {
const uint8_t random_byte = i % 256;
helper_write(&random_byte, sizeof(random_byte), file);
helper_write(file, random_byte);
}
}
for (int i = 0; i < extra_bytes; ++i) {
const char tmp = 0;
helper_write(&tmp, sizeof(tmp), file);
helper_write(file, tmp);
}
rewind(file);
return file;
@ -505,6 +523,16 @@ static bool handcrafted_check_kv(const gguf_context * gguf_ctx, const unsigned i
}
const char * data_gguf = reinterpret_cast<const char *>(gguf_get_arr_data(gguf_ctx, id));
if (type_arr == GGUF_TYPE_BOOL) {
for (size_t arr_i = 0; arr_i < arr_n; ++arr_i) {
if (bool(data8[arr_i]) != bool(data_gguf[arr_i])) {
ok = false;
}
}
continue;
}
if (!std::equal(data8, data8 + arr_n*type_size, data_gguf)) {
ok = false;
}
@ -512,12 +540,20 @@ static bool handcrafted_check_kv(const gguf_context * gguf_ctx, const unsigned i
}
const char * data_gguf = reinterpret_cast<const char *>(gguf_get_val_data(gguf_ctx, id));
if (type == GGUF_TYPE_BOOL) {
if (bool(*data8) != bool(*data_gguf)) {
ok = false;
}
continue;
}
if (!std::equal(data8, data8 + gguf_type_size(type), data_gguf)) {
ok = false;
}
}
const uint32_t expected_alignment = alignment_defined ? GGUF_DEFAULT_ALIGNMENT + 1 : GGUF_DEFAULT_ALIGNMENT;
const uint32_t expected_alignment = alignment_defined ? 1 : GGUF_DEFAULT_ALIGNMENT;
if (gguf_get_alignment(gguf_ctx) != expected_alignment) {
ok = false;
}
@ -539,7 +575,7 @@ static bool handcrafted_check_tensors(const gguf_context * gguf_ctx, const unsig
bool ok = true;
const int id_alignment = gguf_find_key(gguf_ctx, "general.alignment");
const int id_alignment = gguf_find_key(gguf_ctx, GGUF_KEY_GENERAL_ALIGNMENT);
const uint32_t alignment = id_alignment >= 0 ? gguf_get_val_u32(gguf_ctx, id_alignment) : GGUF_DEFAULT_ALIGNMENT;
uint64_t expected_offset = 0;
@ -607,7 +643,7 @@ static bool handcrafted_check_tensor_data(const gguf_context * gguf_ctx, const u
std::vector<uint8_t> data(size);
GGML_ASSERT(fseek(file, gguf_get_data_offset(gguf_ctx) + offset, SEEK_SET) == 0);
GGML_ASSERT(fread(data.data(), 1, size, file) == size);
GGML_ASSERT(fread(data.data(), 1, data.size(), file) == data.size());
for (size_t j = 0; j < size; ++j) {
const uint8_t expected_byte = (j + offset) % 256;
@ -627,15 +663,15 @@ static std::pair<int, int> test_handcrafted_file(const unsigned int seed) {
const std::vector<handcrafted_file_type> hfts = {
HANDCRAFTED_HEADER_BAD_MAGIC,
HANDCRAFTED_HEADER_BAD_VERSION_1,
// HANDCRAFTED_FILE_TYPE_BAD_VERSION_FUTURE, // FIXME
HANDCRAFTED_HEADER_BAD_VERSION_FUTURE,
HANDCRAFTED_HEADER_BAD_N_KV,
HANDCRAFTED_HEADER_BAD_N_TENSORS,
HANDCRAFTED_HEADER_EMPTY,
HANDCRAFTED_KV_BAD_KEY_SIZE,
HANDCRAFTED_KV_BAD_TYPE,
// HANDCRAFTED_KV_BAD_VALUE_SIZE, // FIXME sanitizer limit
// HANDCRAFTED_FILE_TYPE_DUPLICATE_KEY, // FIXME
HANDCRAFTED_KV_DUPLICATE_KEY,
HANDCRAFTED_KV_BAD_ALIGN,
HANDCRAFTED_KV_SUCCESS,
HANDCRAFTED_TENSORS_BAD_NAME_SIZE,
@ -643,14 +679,16 @@ static std::pair<int, int> test_handcrafted_file(const unsigned int seed) {
HANDCRAFTED_TENSORS_BAD_SHAPE,
HANDCRAFTED_TENSORS_NE_TOO_BIG,
HANDCRAFTED_TENSORS_BAD_TYPE,
// HANDCRAFTED_TENSORS_BAD_OFFSET, // FIXME
HANDCRAFTED_TENSORS_BAD_OFFSET,
HANDCRAFTED_TENSORS_DUPLICATE_NAME,
// HANDCRAFTED_TENSORS_BAD_ALIGNMENT, // FIXME
HANDCRAFTED_TENSORS_BAD_ALIGN,
HANDCRAFTED_TENSORS_INCONSISTENT_ALIGN,
HANDCRAFTED_TENSORS_SUCCESS,
HANDCRAFTED_TENSORS_CUSTOM_ALIGN,
HANDCRAFTED_DATA_NOT_ENOUGH_DATA,
// HANDCRAFTED_DATA_BAD_ALIGNMENT, // FIXME
HANDCRAFTED_DATA_BAD_ALIGN,
HANDCRAFTED_DATA_INCONSISTENT_ALIGN,
HANDCRAFTED_DATA_SUCCESS,
HANDCRAFTED_DATA_CUSTOM_ALIGN,
};
@ -674,6 +712,7 @@ static std::pair<int, int> test_handcrafted_file(const unsigned int seed) {
/*no_alloc =*/ false,
/*ctx =*/ hft >= offset_has_data ? &ctx : nullptr,
};
struct gguf_context * gguf_ctx = gguf_init_from_file_impl(file, gguf_params);
if (expect_context_not_null(hft)) {
@ -689,7 +728,7 @@ static std::pair<int, int> test_handcrafted_file(const unsigned int seed) {
}
ntest++;
if (false && hft >= offset_has_data && !expect_context_not_null(hft)) { // FIXME
if (hft >= offset_has_data && !expect_context_not_null(hft)) {
printf("%s: - no_dangling_ggml_context_pointer: ", __func__);
if (ctx) {
printf("\033[1;31mFAIL\033[0m\n");
@ -700,23 +739,6 @@ static std::pair<int, int> test_handcrafted_file(const unsigned int seed) {
ntest++;
}
if (false && expect_context_not_null(hft)) { // FIXME
FILE * file_eb = get_handcrafted_file(seed, hft, /*extra_bytes =*/ 1);
struct gguf_context * gguf_ctx_eb = gguf_init_from_file_impl(file_eb, gguf_params);
printf("%s: - context_null_with_extra_bytes: ", __func__);
if (gguf_ctx_eb) {
printf("\033[1;31mFAIL\033[0m\n");
} else {
printf("\033[1;32mOK\033[0m\n");
npass++;
}
ntest++;
gguf_free(gguf_ctx_eb);
fclose(file_eb);
}
const bool alignment_defined = hft == HANDCRAFTED_TENSORS_CUSTOM_ALIGN || hft == HANDCRAFTED_DATA_CUSTOM_ALIGN;
if (expect_context_not_null(hft)) {
@ -763,14 +785,15 @@ static std::pair<int, int> test_handcrafted_file(const unsigned int seed) {
ntest++;
}
fclose(file);
if (gguf_ctx) {
ggml_free(ctx);
gguf_free(gguf_ctx);
}
fclose(file);
printf("\n");
}
return std::make_pair(npass, ntest);
}
@ -789,10 +812,6 @@ static struct random_gguf_context_result get_random_gguf_context(ggml_backend_t
const std::string key = "my_key_" + std::to_string(rng() % 1024);
const enum gguf_type type = gguf_type(rng() % GGUF_TYPE_COUNT);
if (type == GGUF_TYPE_STRING || type == GGUF_TYPE_ARRAY) {
continue; // FIXME memory leak
}
switch (type) {
case GGUF_TYPE_UINT8: gguf_set_val_u8 (gguf_ctx, key.c_str(), rng() % (1 << 7)); break;
case GGUF_TYPE_INT8: gguf_set_val_i8 (gguf_ctx, key.c_str(), rng() % (1 << 7) - (1 << 6)); break;
@ -826,6 +845,9 @@ static struct random_gguf_context_result get_random_gguf_context(ggml_backend_t
std::vector<uint32_t> random_data((nbytes + sizeof(uint32_t) - 1) / sizeof(uint32_t));
for (size_t j = 0; j < random_data.size(); ++j) {
random_data[j] = rng();
if (type_arr == GGUF_TYPE_BOOL) {
random_data[j] &= 0x01010101; // the sanitizer complains if booleans are not 0 or 1
}
}
gguf_set_arr_data(gguf_ctx, key.c_str(), type_arr, random_data.data(), ne);
} break;
@ -928,6 +950,17 @@ static bool all_kv_in_other(const gguf_context * ctx, const gguf_context * other
continue;
}
if (type_arr == GGUF_TYPE_BOOL) {
const int8_t * data = reinterpret_cast<const int8_t *>(gguf_get_arr_data(ctx, id));
const int8_t * data_other = reinterpret_cast<const int8_t *>(gguf_get_arr_data(other, idx_other));
for (int arr_i = 0; arr_i < arr_n; ++arr_i) {
if (bool(data[arr_i]) != bool(data_other[arr_i])) {
ok = false;
}
}
continue;
}
if (type_arr == GGUF_TYPE_STRING) {
for (int arr_i = 0; arr_i < arr_n; ++arr_i) {
const std::string str = gguf_get_arr_str(ctx, id, arr_i);
@ -939,8 +972,8 @@ static bool all_kv_in_other(const gguf_context * ctx, const gguf_context * other
continue;
}
const char * data = reinterpret_cast<const char *>(gguf_get_arr_data(ctx, id));
const char * data_other = reinterpret_cast<const char *>(gguf_get_arr_data(other, idx_other));
const int8_t * data = reinterpret_cast<const int8_t *>(gguf_get_arr_data(ctx, id));
const int8_t * data_other = reinterpret_cast<const int8_t *>(gguf_get_arr_data(other, idx_other));
if (!std::equal(data, data + arr_n*gguf_type_size(type_arr), data_other)) {
ok = false;
}
@ -1028,21 +1061,6 @@ static bool same_tensor_data(const struct ggml_context * orig, const struct ggml
}
static std::pair<int, int> test_roundtrip(ggml_backend_dev_t dev, const unsigned int seed, const bool only_meta) {
FILE * file = tmpfile();
#ifdef _WIN32
if (!file) {
printf("%s: failed to create tmpfile(), needs elevated privileges on Windows");
printf("%s: skipping tests");
return std::make_pair(0, 0);
}
#else
GGML_ASSERT(file);
#endif // _WIN32
if (ggml_backend_dev_type(dev) != GGML_BACKEND_DEVICE_TYPE_CPU) {
return std::make_pair(0, 0); // FIXME
}
ggml_backend_t backend = ggml_backend_dev_init(dev, nullptr);
printf("%s: device=%s, backend=%s, only_meta=%s\n",
__func__, ggml_backend_dev_description(dev), ggml_backend_name(backend), only_meta ? "yes" : "no");
@ -1060,10 +1078,24 @@ static std::pair<int, int> test_roundtrip(ggml_backend_dev_t dev, const unsigned
bbuf = result.buffer;
}
struct gguf_buf gbuf = gguf_buf_init(16 * 1024);
gguf_write_to_buf(gguf_ctx_0, &gbuf, only_meta);
helper_write(gbuf.data, gbuf.offset, file);
rewind(file);
FILE * file = tmpfile();
#ifdef _WIN32
if (!file) {
printf("%s: failed to create tmpfile(), needs elevated privileges on Windows");
printf("%s: skipping tests");
return std::make_pair(0, 0);
}
#else
GGML_ASSERT(file);
#endif // _WIN32
{
std::vector<int8_t> buf;
gguf_write_to_buf(gguf_ctx_0, buf, only_meta);
GGML_ASSERT(fwrite(buf.data(), 1, buf.size(), file) == buf.size());
rewind(file);
}
struct ggml_context * ctx_1 = nullptr;
struct gguf_init_params gguf_params = {
@ -1151,9 +1183,8 @@ static std::pair<int, int> test_roundtrip(ggml_backend_dev_t dev, const unsigned
ggml_free(ctx_1);
gguf_free(gguf_ctx_0);
gguf_free(gguf_ctx_1);
gguf_buf_free(gbuf);
ggml_backend_free(backend);
GGML_ASSERT(fclose(file) == 0);
fclose(file);
printf("\n");
return std::make_pair(npass, ntest);

View file

@ -21,7 +21,7 @@ int main(int argc, char *argv[] ) {
(void) ctx;
return progress > 0.50;
};
auto * model = llama_load_model_from_file(model_path, params);
auto * model = llama_model_load_from_file(model_path, params);
llama_backend_free();
return model == nullptr ? EXIT_SUCCESS : EXIT_FAILURE;
}

View file

@ -152,7 +152,7 @@ int main(int argc, char **argv) {
mparams.vocab_only = true;
model = llama_load_model_from_file(fname.c_str(), mparams);
model = llama_model_load_from_file(fname.c_str(), mparams);
if (model == NULL) {
fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
@ -165,7 +165,7 @@ int main(int argc, char **argv) {
if (ctx == NULL) {
fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
llama_free_model(model);
llama_model_free(model);
return 1;
}
}
@ -300,7 +300,7 @@ int main(int argc, char **argv) {
fprintf(stderr, "%s : tokens written to '%s'\n", __func__, (fname_text + ".tokcpp").c_str());
}
llama_free_model(model);
llama_model_free(model);
llama_free(ctx);
llama_backend_free();

View file

@ -46,7 +46,7 @@ int main(int argc, char **argv) {
mparams.vocab_only = true;
model = llama_load_model_from_file(fname.c_str(), mparams);
model = llama_model_load_from_file(fname.c_str(), mparams);
if (model == NULL) {
fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
@ -59,7 +59,7 @@ int main(int argc, char **argv) {
if (ctx == NULL) {
fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
llama_free_model(model);
llama_model_free(model);
return 1;
}
}
@ -143,7 +143,7 @@ int main(int argc, char **argv) {
}
}
llama_free_model(model);
llama_model_free(model);
llama_free(ctx);
llama_backend_free();

View file

@ -34,7 +34,7 @@ int main(int argc, char ** argv) {
mparams.vocab_only = true;
model = llama_load_model_from_file(fname.c_str(), mparams);
model = llama_model_load_from_file(fname.c_str(), mparams);
if (model == NULL) {
fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
@ -47,7 +47,7 @@ int main(int argc, char ** argv) {
if (ctx == NULL) {
fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
llama_free_model(model);
llama_model_free(model);
return 1;
}
}
@ -113,7 +113,7 @@ int main(int argc, char ** argv) {
}
}
llama_free_model(model);
llama_model_free(model);
llama_free(ctx);
llama_backend_free();