ci : add model tests + script wrapper (#4586)
* scripts : add lib.sh and lib_test.sh * scripts : stub out new ci-run.sh script * scripts : switch to PascalCase for functions This looks a little odd at first, but I find it very useful as a convention to know if a command is part of our code vs a builtin. * scripts : add some fancy conversion from snake_case to PascalCase * Add venv to ci/run.sh * Revert scripts work * scripts : add wrapper script for local use of ci/run.sh * Simplify .gitignore for tests, clang-tidy fixes * Label all ctest tests * ci : ctest uses -L main * Attempt at writing ctest_with_model * Update test-model-load-cancel * ci : add ctest_with_model for debug and release ggml-ci * Fix gg_get_model function ggml-ci * got stuck on CMake * Add get_model.cpp to tests/CMakeLists.txt ggml-ci * Fix README.md output for ctest_with_model ggml-ci * workflows : use `-L main` for all ctest ggml-ci * Fixes * GG_RUN_CTEST_MODELFILE => LLAMACPP_TESTMODELFILE * Always show warning rather than failing if model file variable is not set * scripts : update usage text for ci-run.sh
This commit is contained in:
parent
6dd3c28c9c
commit
413e7b0559
11 changed files with 199 additions and 48 deletions
2
tests/.gitignore
vendored
Normal file
2
tests/.gitignore
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
*
|
||||
!*.*
|
|
@ -1,6 +1,6 @@
|
|||
function(llama_build_executable source)
|
||||
get_filename_component(TEST_TARGET ${source} NAME_WE)
|
||||
add_executable(${TEST_TARGET} ${source})
|
||||
add_executable(${TEST_TARGET} ${source} get-model.cpp)
|
||||
install(TARGETS ${TEST_TARGET} RUNTIME)
|
||||
target_link_libraries(${TEST_TARGET} PRIVATE common)
|
||||
endfunction()
|
||||
|
@ -8,14 +8,20 @@ endfunction()
|
|||
function(llama_test_executable name source)
|
||||
get_filename_component(TEST_TARGET ${source} NAME_WE)
|
||||
add_test(NAME ${name} COMMAND $<TARGET_FILE:${TEST_TARGET}> ${ARGN})
|
||||
set_property(TEST ${name} PROPERTY LABELS "main")
|
||||
endfunction()
|
||||
|
||||
function(llama_build_and_test_executable source)
|
||||
llama_build_and_test_executable_with_label(${source} "main")
|
||||
endfunction()
|
||||
|
||||
function(llama_build_and_test_executable_with_label source label)
|
||||
get_filename_component(TEST_TARGET ${source} NAME_WE)
|
||||
add_executable(${TEST_TARGET} ${source})
|
||||
add_executable(${TEST_TARGET} ${source} get-model.cpp)
|
||||
install(TARGETS ${TEST_TARGET} RUNTIME)
|
||||
target_link_libraries(${TEST_TARGET} PRIVATE common)
|
||||
add_test(NAME ${TEST_TARGET} COMMAND $<TARGET_FILE:${TEST_TARGET}> ${ARGN})
|
||||
set_property(TEST ${TEST_TARGET} PROPERTY LABELS ${label})
|
||||
endfunction()
|
||||
|
||||
# llama_build_and_test_executable(test-double-float.cpp) # SLOW
|
||||
|
@ -49,10 +55,12 @@ llama_build_and_test_executable(test-llama-grammar.cpp)
|
|||
llama_build_and_test_executable(test-grad0.cpp)
|
||||
# llama_build_and_test_executable(test-opt.cpp) # SLOW
|
||||
llama_build_and_test_executable(test-backend-ops.cpp)
|
||||
llama_build_and_test_executable(test-autorelease.cpp)
|
||||
|
||||
llama_build_and_test_executable(test-rope.cpp)
|
||||
|
||||
llama_build_and_test_executable_with_label(test-model-load-cancel.cpp "model")
|
||||
llama_build_and_test_executable_with_label(test-autorelease.cpp "model")
|
||||
|
||||
# dummy executable - not installed
|
||||
get_filename_component(TEST_TARGET test-c.c NAME_WE)
|
||||
add_executable(${TEST_TARGET} test-c.c)
|
||||
|
|
21
tests/get-model.cpp
Normal file
21
tests/get-model.cpp
Normal file
|
@ -0,0 +1,21 @@
|
|||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
|
||||
#include "get-model.h"
|
||||
|
||||
char * get_model_or_exit(int argc, char *argv[]) {
|
||||
char * model_path;
|
||||
if (argc > 1) {
|
||||
model_path = argv[1];
|
||||
|
||||
} else {
|
||||
model_path = getenv("LLAMACPP_TEST_MODELFILE");
|
||||
if (!model_path || strlen(model_path) == 0) {
|
||||
fprintf(stderr, "\033[33mWARNING: No model file provided. Skipping this test. Set LLAMACPP_TEST_MODELFILE=<gguf_model_path> to silence this warning and run this test.\n\033[0m");
|
||||
exit(EXIT_SUCCESS);
|
||||
}
|
||||
}
|
||||
|
||||
return model_path;
|
||||
}
|
2
tests/get-model.h
Normal file
2
tests/get-model.h
Normal file
|
@ -0,0 +1,2 @@
|
|||
#pragma once
|
||||
char * get_model_or_exit(int, char*[]);
|
|
@ -5,19 +5,15 @@
|
|||
#include <thread>
|
||||
|
||||
#include "llama.h"
|
||||
#include "get-model.h"
|
||||
|
||||
// This creates a new context inside a pthread and then tries to exit cleanly.
|
||||
int main(int argc, char ** argv) {
|
||||
if (argc < 2) {
|
||||
printf("Usage: %s model.gguf\n", argv[0]);
|
||||
return 0; // intentionally return success
|
||||
}
|
||||
auto * model_path = get_model_or_exit(argc, argv);
|
||||
|
||||
const std::string fname = argv[1];
|
||||
|
||||
std::thread([&fname]() {
|
||||
std::thread([&model_path]() {
|
||||
llama_backend_init(false);
|
||||
auto * model = llama_load_model_from_file(fname.c_str(), llama_model_default_params());
|
||||
auto * model = llama_load_model_from_file(model_path, llama_model_default_params());
|
||||
auto * ctx = llama_new_context_with_model(model, llama_context_default_params());
|
||||
llama_free(ctx);
|
||||
llama_free_model(model);
|
||||
|
|
27
tests/test-model-load-cancel.cpp
Normal file
27
tests/test-model-load-cancel.cpp
Normal file
|
@ -0,0 +1,27 @@
|
|||
#include "llama.h"
|
||||
#include "get-model.h"
|
||||
|
||||
#include <cstdlib>
|
||||
|
||||
int main(int argc, char *argv[] ) {
|
||||
auto * model_path = get_model_or_exit(argc, argv);
|
||||
auto * file = fopen(model_path, "r");
|
||||
if (file == nullptr) {
|
||||
fprintf(stderr, "no model at '%s' found\n", model_path);
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
fprintf(stderr, "using '%s'\n", model_path);
|
||||
fclose(file);
|
||||
|
||||
llama_backend_init(false);
|
||||
auto params = llama_model_params{};
|
||||
params.use_mmap = false;
|
||||
params.progress_callback = [](float progress, void * ctx){
|
||||
(void) ctx;
|
||||
return progress > 0.50;
|
||||
};
|
||||
auto * model = llama_load_model_from_file(model_path, params);
|
||||
llama_backend_free();
|
||||
return model == nullptr ? EXIT_SUCCESS : EXIT_FAILURE;
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue