diff --git a/Makefile b/Makefile index 7dbca3a85..1fcc68ffb 100644 --- a/Makefile +++ b/Makefile @@ -24,7 +24,7 @@ TEST_TARGETS = \ tests/test-tokenizer-1-bpe \ tests/test-tokenizer-1-spm -# Code coverage output files +# Code coverage output files; COV_TARGETS = *.gcno tests/*.gcno *.gcda tests/*.gcda *.gcov tests/*.gcov lcov-report gcovr-report ifndef UNAME_S diff --git a/examples/llamacheck/llamacheck.cpp b/examples/llamacheck/llamacheck.cpp index ec7c0cae3..5d1e706d9 100644 --- a/examples/llamacheck/llamacheck.cpp +++ b/examples/llamacheck/llamacheck.cpp @@ -15,8 +15,6 @@ int main(int argc, char ** argv) { params.model = argv[1]; } - - params.prompt = ""; // total length of the sequence including the prompt @@ -32,7 +30,6 @@ int main(int argc, char ** argv) { llama_model_params model_params = llama_model_default_params(); // model_params.n_gpu_layers = 99; // offload all layers to the GPU - llama_model * model = llama_load_model_from_file(params.model.c_str(), model_params); if (model == NULL) { @@ -41,7 +38,6 @@ int main(int argc, char ** argv) { } // initialize the context - llama_context_params ctx_params = llama_context_default_params(); ctx_params.seed = 1234; @@ -56,8 +52,6 @@ int main(int argc, char ** argv) { return 1; } - - // main loop std::string prompt_template = "You will see two sentences. The first is marked INCORRECT and has a plethora of spelling and grammatical issues, the second is marked CORRECT and shows the fixed version of the prior sentence. INCORRECT:"; diff --git a/llamacheck b/llamacheck deleted file mode 100755 index 2f9659da6..000000000 Binary files a/llamacheck and /dev/null differ