diff --git a/.gitignore b/.gitignore index 1dc2103d8..cb0069bfb 100644 --- a/.gitignore +++ b/.gitignore @@ -89,19 +89,3 @@ examples/jeopardy/results.txt poetry.lock poetry.toml - -# Test binaries -/tests/test-grammar-parser -/tests/test-llama-grammar -/tests/test-double-float -/tests/test-grad0 -/tests/test-opt -/tests/test-quantize-fns -/tests/test-quantize-perf -/tests/test-sampling -/tests/test-tokenizer-0-llama -/tests/test-tokenizer-0-falcon -/tests/test-tokenizer-1-llama -/tests/test-tokenizer-1-bpe -/tests/test-rope -/tests/test-backend-ops diff --git a/tests/.gitignore b/tests/.gitignore new file mode 100644 index 000000000..59be43b99 --- /dev/null +++ b/tests/.gitignore @@ -0,0 +1,2 @@ +* +!*.* diff --git a/tests/test-model-load-cancel.cpp b/tests/test-model-load-cancel.cpp new file mode 100644 index 000000000..926a305da --- /dev/null +++ b/tests/test-model-load-cancel.cpp @@ -0,0 +1,28 @@ +#include "llama.h" + +#include +#include + +int main(void) { + const auto * model_path = "models/7B/ggml-model-f16.gguf"; + auto * file = fopen(model_path, "r"); + + if (file == nullptr) { + fprintf(stderr, "no model at '%s' found\n", model_path); + return EXIT_FAILURE; + } + + fprintf(stderr, "using '%s'\n", model_path); + fclose(file); + + llama_backend_init(false); + auto params = llama_model_params{}; + params.use_mmap = false; + params.progress_callback = [](float progress, void * ctx){ + (void) ctx; + return progress > 0.50; + }; + auto * model = llama_load_model_from_file(model_path, params); + llama_backend_free(); + return model == nullptr ? EXIT_SUCCESS : EXIT_FAILURE; +}