From cc563aaca0d16f50358963e0f211204a46afddc6 Mon Sep 17 00:00:00 2001 From: Michael Klimenko Date: Sat, 27 Jan 2024 00:32:29 +0100 Subject: [PATCH] Address review comments --- examples/perplexity/perplexity.cpp | 1 + tests/get-model.cpp | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/examples/perplexity/perplexity.cpp b/examples/perplexity/perplexity.cpp index 2c083ee8d..8d2204969 100644 --- a/examples/perplexity/perplexity.cpp +++ b/examples/perplexity/perplexity.cpp @@ -813,6 +813,7 @@ static void hellaswag_score(llama_context * ctx, const gpt_params & params) { llama_batch batch = llama_batch_init(n_ctx, 0, max_seq); + std::vector tok_logits(n_vocab); std::vector batch_logits(n_vocab*n_ctx); std::vector> eval_pairs; diff --git a/tests/get-model.cpp b/tests/get-model.cpp index ed6f758fb..4edb685f0 100644 --- a/tests/get-model.cpp +++ b/tests/get-model.cpp @@ -11,7 +11,7 @@ char * get_model_or_exit(int argc, char *argv[]) { } else { model_path = getenv("LLAMACPP_TEST_MODELFILE"); - if (!model_path || model_path[0] == '\0') { + if (!model_path || strlen(model_path) == 0) { fprintf(stderr, "\033[33mWARNING: No model file provided. Skipping this test. Set LLAMACPP_TEST_MODELFILE= to silence this warning and run this test.\n\033[0m"); exit(EXIT_SUCCESS); }