From c238b5873a1ea496db03ffcfe124c9d0d83afbc6 Mon Sep 17 00:00:00 2001 From: rankaiyx Date: Wed, 17 May 2023 22:47:58 +0800 Subject: [PATCH 1/9] benchmark-matmul: Print the average of the test results (#1490) --- examples/benchmark/benchmark-matmult.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/examples/benchmark/benchmark-matmult.cpp b/examples/benchmark/benchmark-matmult.cpp index 7d237be02..446b8e8fb 100644 --- a/examples/benchmark/benchmark-matmult.cpp +++ b/examples/benchmark/benchmark-matmult.cpp @@ -211,6 +211,7 @@ int main(int argc, char ** argv) { printf("Iteration;NThreads; SizeX; SizeY; SizeZ; Required_FLOPS; Elapsed_u_Seconds; gigaFLOPS\n"); printf("=====================================================================================\n"); + double gflops_sum = 0; for (int i=0;i Date: Wed, 17 May 2023 22:12:01 +0000 Subject: [PATCH 2/9] Remove unused n_parts parameter (#1509) --- examples/common.cpp | 8 -------- examples/common.h | 1 - examples/quantize-stats/quantize-stats.cpp | 1 - examples/save-load-state/save-load-state.cpp | 1 - llama.cpp | 1 - llama.h | 1 - 6 files changed, 13 deletions(-) diff --git a/examples/common.cpp b/examples/common.cpp index 259880a7c..a6abc4977 100644 --- a/examples/common.cpp +++ b/examples/common.cpp @@ -321,12 +321,6 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) { invalid_param = true; break; } - } else if (arg == "--n-parts") { - if (++i >= argc) { - invalid_param = true; - break; - } - params.n_parts = std::stoi(argv[i]); } else if (arg == "-h" || arg == "--help") { gpt_print_usage(argc, argv, default_params); exit(0); @@ -418,7 +412,6 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { fprintf(stderr, " --no-penalize-nl do not penalize newline token\n"); fprintf(stderr, " --memory-f32 use f32 instead of f16 for memory key+value\n"); fprintf(stderr, " --temp N temperature (default: %.1f)\n", (double)params.temp); - fprintf(stderr, " --n-parts N number of model parts (default: -1 = determine from dimensions)\n"); fprintf(stderr, " -b N, --batch-size N batch size for prompt processing (default: %d)\n", params.n_batch); fprintf(stderr, " --perplexity compute perplexity over the prompt\n"); fprintf(stderr, " --keep number of tokens to keep from the initial prompt (default: %d, -1 = all)\n", params.n_keep); @@ -473,7 +466,6 @@ struct llama_context * llama_init_from_gpt_params(const gpt_params & params) { auto lparams = llama_context_default_params(); lparams.n_ctx = params.n_ctx; - lparams.n_parts = params.n_parts; lparams.n_gpu_layers = params.n_gpu_layers; lparams.seed = params.seed; lparams.f16_kv = params.memory_f16; diff --git a/examples/common.h b/examples/common.h index f4e07a252..2ad20ba50 100644 --- a/examples/common.h +++ b/examples/common.h @@ -24,7 +24,6 @@ struct gpt_params { int32_t seed = -1; // RNG seed int32_t n_threads = get_num_physical_cores(); int32_t n_predict = -1; // new tokens to predict - int32_t n_parts = -1; // amount of model parts (-1 = determine from model dimensions) int32_t n_ctx = 512; // context size int32_t n_batch = 512; // batch size for prompt processing (must be >=32 to use BLAS) int32_t n_keep = 0; // number of tokens to keep from initial prompt diff --git a/examples/quantize-stats/quantize-stats.cpp b/examples/quantize-stats/quantize-stats.cpp index 9a2aa7c64..085fdde3c 100644 --- a/examples/quantize-stats/quantize-stats.cpp +++ b/examples/quantize-stats/quantize-stats.cpp @@ -321,7 +321,6 @@ int main(int argc, char ** argv) { auto lparams = llama_context_default_params(); lparams.n_ctx = 256; - lparams.n_parts = 1; lparams.seed = 1; lparams.f16_kv = false; lparams.use_mlock = false; diff --git a/examples/save-load-state/save-load-state.cpp b/examples/save-load-state/save-load-state.cpp index 355969579..91f04b6c7 100644 --- a/examples/save-load-state/save-load-state.cpp +++ b/examples/save-load-state/save-load-state.cpp @@ -26,7 +26,6 @@ int main(int argc, char ** argv) { auto lparams = llama_context_default_params(); lparams.n_ctx = params.n_ctx; - lparams.n_parts = params.n_parts; lparams.seed = params.seed; lparams.f16_kv = params.memory_f16; lparams.use_mmap = params.use_mmap; diff --git a/llama.cpp b/llama.cpp index 98f49abd7..6e19064fc 100644 --- a/llama.cpp +++ b/llama.cpp @@ -812,7 +812,6 @@ static bool kv_cache_init( struct llama_context_params llama_context_default_params() { struct llama_context_params result = { /*.n_ctx =*/ 512, - /*.n_parts =*/ -1, /*.gpu_layers =*/ 0, /*.seed =*/ -1, /*.f16_kv =*/ false, diff --git a/llama.h b/llama.h index 21cba8cf6..f955fa23d 100644 --- a/llama.h +++ b/llama.h @@ -55,7 +55,6 @@ extern "C" { struct llama_context_params { int n_ctx; // text context - int n_parts; // -1 for default int n_gpu_layers; // number of layers to store in VRAM int seed; // RNG seed, -1 for random From ee9654138ab0ae5f138f4abddf56ca234ea3c352 Mon Sep 17 00:00:00 2001 From: DannyDaemonic Date: Thu, 18 May 2023 10:30:40 -0700 Subject: [PATCH 3/9] Fixes #1511 lambda issue for w64devkit (mingw) (#1513) * Fix for w64devkit and mingw --- examples/main/main.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/main/main.cpp b/examples/main/main.cpp index fe1c847a7..18673ed2e 100644 --- a/examples/main/main.cpp +++ b/examples/main/main.cpp @@ -241,7 +241,7 @@ int main(int argc, char ** argv) { sigint_action.sa_flags = 0; sigaction(SIGINT, &sigint_action, NULL); #elif defined (_WIN32) - auto console_ctrl_handler = [](DWORD ctrl_type) -> BOOL { + auto console_ctrl_handler = +[](DWORD ctrl_type) -> BOOL { return (ctrl_type == CTRL_C_EVENT) ? (sigint_handler(SIGINT), true) : false; }; SetConsoleCtrlHandler(static_cast(console_ctrl_handler), true); From 5ea43392731040b454c293123839b90e159cbb99 Mon Sep 17 00:00:00 2001 From: Erik Scholz Date: Thu, 18 May 2023 19:31:01 +0200 Subject: [PATCH 4/9] make kv_f16 the default for api users (#1517) --- llama.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama.cpp b/llama.cpp index 6e19064fc..1f9d37844 100644 --- a/llama.cpp +++ b/llama.cpp @@ -814,7 +814,7 @@ struct llama_context_params llama_context_default_params() { /*.n_ctx =*/ 512, /*.gpu_layers =*/ 0, /*.seed =*/ -1, - /*.f16_kv =*/ false, + /*.f16_kv =*/ true, /*.logits_all =*/ false, /*.vocab_only =*/ false, /*.use_mmap =*/ true, From 4b7e245adf63db675c3daab4a9bfddd451ef4097 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 19 May 2023 20:14:51 +0300 Subject: [PATCH 5/9] minor : fix compile warnings --- examples/common.cpp | 4 ++-- examples/common.h | 6 +++--- llama.cpp | 2 +- tests/test-sampling.cpp | 10 ++++++---- 4 files changed, 12 insertions(+), 10 deletions(-) diff --git a/examples/common.cpp b/examples/common.cpp index a6abc4977..a4fea4af4 100644 --- a/examples/common.cpp +++ b/examples/common.cpp @@ -749,7 +749,7 @@ bool console_readline(console_state & con_st, std::string & line) { break; } - if (input_char == WEOF || input_char == 0x04 /* Ctrl+D*/) { + if (input_char == (char32_t) WEOF || input_char == 0x04 /* Ctrl+D*/) { end_of_stream = true; break; } @@ -764,7 +764,7 @@ bool console_readline(console_state & con_st, std::string & line) { char32_t code = getchar32(); if (code == '[' || code == 0x1B) { // Discard the rest of the escape sequence - while ((code = getchar32()) != WEOF) { + while ((code = getchar32()) != (char32_t) WEOF) { if ((code >= 'A' && code <= 'Z') || (code >= 'a' && code <= 'z') || code == '~') { break; } diff --git a/examples/common.h b/examples/common.h index 2ad20ba50..2b66382a6 100644 --- a/examples/common.h +++ b/examples/common.h @@ -44,15 +44,15 @@ struct gpt_params { float mirostat_tau = 5.00f; // target entropy float mirostat_eta = 0.10f; // learning rate - std::string model = "models/7B/ggml-model.bin"; // model path - std::string prompt = ""; + std::string model = "models/7B/ggml-model.bin"; // model path + std::string prompt = ""; std::string path_prompt_cache = ""; // path to file for saving/loading prompt eval state std::string input_prefix = ""; // string to prefix user inputs with std::string input_suffix = ""; // string to suffix user inputs with std::vector antiprompt; // string upon seeing which more user input is prompted std::string lora_adapter = ""; // lora adapter path - std::string lora_base = ""; // base model path for the lora adapter + std::string lora_base = ""; // base model path for the lora adapter bool memory_f16 = true; // use f16 instead of f32 for memory kv bool random_prompt = false; // do not randomize prompt if none provided diff --git a/llama.cpp b/llama.cpp index 1f9d37844..1802d2319 100644 --- a/llama.cpp +++ b/llama.cpp @@ -941,7 +941,7 @@ static void llama_model_load_internal( size_t ctx_size; size_t mmapped_size; ml->calc_sizes(&ctx_size, &mmapped_size); - fprintf(stderr, "%s: ggml ctx size = %6.2f KB\n", __func__, ctx_size/1024.0); + fprintf(stderr, "%s: ggml ctx size = %6.2f MB\n", __func__, ctx_size/1024.0/1024.0); // print memory requirements { diff --git a/tests/test-sampling.cpp b/tests/test-sampling.cpp index 9174c1e37..ebfc17c18 100644 --- a/tests/test-sampling.cpp +++ b/tests/test-sampling.cpp @@ -1,14 +1,16 @@ -#include "llama.h" #include "ggml.h" -#include -#include +#include "llama.h" + +#ifdef NDEBUG +#undef NDEBUG +#endif + #include #include #include #include #include - void dump(const llama_token_data_array * candidates) { for (size_t i = 0; i < candidates->size; i++) { printf("%d: %f (%f)\n", candidates->data[i].id, candidates->data[i].p, candidates->data[i].logit); From 79e3efb0e97b65b6cc72cd9ee970fa8189ad79a4 Mon Sep 17 00:00:00 2001 From: David Kennedy Date: Fri, 19 May 2023 13:16:30 -0400 Subject: [PATCH 6/9] readme : adds WizardLM to the list of supported models (#1485) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 1d84a5e6d..6a67765aa 100644 --- a/README.md +++ b/README.md @@ -80,6 +80,7 @@ as the main playground for developing new features for the [ggml](https://github - [X] [Koala](https://bair.berkeley.edu/blog/2023/04/03/koala/) - [X] [OpenBuddy 🐶 (Multilingual)](https://github.com/OpenBuddy/OpenBuddy) - [X] [Pygmalion 7B / Metharme 7B](#using-pygmalion-7b--metharme-7b) +- [X] [WizardLM](https://github.com/nlpxucan/WizardLM) **Bindings:** From 7694b52b9a206b93d59139c3c7c9b55da0f5aa59 Mon Sep 17 00:00:00 2001 From: Jason McCartney Date: Fri, 19 May 2023 10:24:59 -0700 Subject: [PATCH 7/9] main : make reverse prompt option act as a stop token in non-interactive mode (#1032) * Make reverse prompt option act as a stop token in non-interactive scenarios * Making requested review changes * Update gpt_params_parse and fix a merge error * Revert "Update gpt_params_parse and fix a merge error" This reverts commit 2bb2ff1748513591ad45b175a75ed1d8089d84c8. * Update gpt_params_parse and fix a merge error take 2 --- examples/common.cpp | 6 +++--- examples/main/main.cpp | 26 ++++++++++++++++++-------- 2 files changed, 21 insertions(+), 11 deletions(-) diff --git a/examples/common.cpp b/examples/common.cpp index a4fea4af4..e89df537e 100644 --- a/examples/common.cpp +++ b/examples/common.cpp @@ -351,7 +351,7 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) { } if (params.prompt_cache_all && (params.interactive || params.interactive_first || - params.instruct || params.antiprompt.size())) { + params.instruct)) { fprintf(stderr, "error: --prompt-cache-all not supported in interactive mode yet\n"); gpt_print_usage(argc, argv, default_params); exit(1); @@ -373,8 +373,8 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { fprintf(stderr, " -ins, --instruct run in instruction mode (use with Alpaca models)\n"); fprintf(stderr, " --multiline-input allows you to write or paste multiple lines without ending each in '\\'\n"); fprintf(stderr, " -r PROMPT, --reverse-prompt PROMPT\n"); - fprintf(stderr, " run in interactive mode and poll user input upon seeing PROMPT (can be\n"); - fprintf(stderr, " specified more than once for multiple prompts).\n"); + fprintf(stderr, " halt generation at PROMPT, return control in interactive mode\n"); + fprintf(stderr, " (can be specified more than once for multiple prompts).\n"); fprintf(stderr, " --color colorise output to distinguish prompt and user input from generations\n"); fprintf(stderr, " -s SEED, --seed SEED RNG seed (default: -1, use random seed for < 0)\n"); fprintf(stderr, " -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads); diff --git a/examples/main/main.cpp b/examples/main/main.cpp index 18673ed2e..4d886f8de 100644 --- a/examples/main/main.cpp +++ b/examples/main/main.cpp @@ -208,8 +208,8 @@ int main(int argc, char ** argv) { params.antiprompt.push_back("### Instruction:\n\n"); } - // enable interactive mode if reverse prompt or interactive start is specified - if (params.antiprompt.size() != 0 || params.interactive_first) { + // enable interactive mode if interactive start is specified + if (params.interactive_first) { params.interactive = true; } @@ -305,7 +305,7 @@ int main(int argc, char ** argv) { std::vector embd; - while (n_remain != 0 || params.interactive) { + while ((n_remain != 0 && !is_antiprompt) || params.interactive) { // predict if (embd.size() > 0) { // infinite text generation via context swapping @@ -503,9 +503,8 @@ int main(int argc, char ** argv) { console_set_color(con_st, CONSOLE_COLOR_DEFAULT); } - // in interactive mode, and not currently processing queued inputs; - // check if we should prompt the user for more - if (params.interactive && (int) embd_inp.size() <= n_consumed) { + // if not currently processing queued inputs; + if ((int) embd_inp.size() <= n_consumed) { // check for reverse prompt if (params.antiprompt.size()) { @@ -516,10 +515,21 @@ int main(int argc, char ** argv) { is_antiprompt = false; // Check if each of the reverse prompts appears at the end of the output. + // If we're not running interactively, the reverse prompt might be tokenized with some following characters + // so we'll compensate for that by widening the search window a bit. for (std::string & antiprompt : params.antiprompt) { - if (last_output.find(antiprompt.c_str(), last_output.length() - antiprompt.length(), antiprompt.length()) != std::string::npos) { - is_interacting = true; + size_t extra_padding = params.interactive ? 0 : 2; + size_t search_start_pos = last_output.length() > static_cast(antiprompt.length() + extra_padding) + ? last_output.length() - static_cast(antiprompt.length() + extra_padding) + : 0; + + if (last_output.find(antiprompt.c_str(), search_start_pos) != std::string::npos) { + if (params.interactive) { + is_interacting = true; + console_set_color(con_st, CONSOLE_COLOR_USER_INPUT); + } is_antiprompt = true; + fflush(stdout); break; } } From 943e6081cc939df7584f8f0ab7057a39c2ef3271 Mon Sep 17 00:00:00 2001 From: Evan Jones Date: Fri, 19 May 2023 13:39:51 -0400 Subject: [PATCH 8/9] examples : add persistent chat (#1495) * examples : add persistent chat * examples : fix whitespace --------- Co-authored-by: Georgi Gerganov --- examples/chat-persistent.sh | 151 ++++++++++++++++++++++++++++++++++++ 1 file changed, 151 insertions(+) create mode 100755 examples/chat-persistent.sh diff --git a/examples/chat-persistent.sh b/examples/chat-persistent.sh new file mode 100755 index 000000000..b32284b49 --- /dev/null +++ b/examples/chat-persistent.sh @@ -0,0 +1,151 @@ +#!/bin/bash + +set -euo pipefail + +cd "$(dirname "$0")/.." || exit + +if [[ -z "${PROMPT_CACHE_FILE+x}" || -z "${CHAT_SAVE_DIR+x}" ]]; then + echo >&2 "error: PROMPT_CACHE_FILE and CHAT_SAVE_DIR must be provided" + exit 1 +fi + +MODEL="${MODEL:-./models/13B/ggml-model-q4_0.bin}" +PROMPT_TEMPLATE="${PROMPT_TEMPLATE:-./prompts/chat.txt}" +USER_NAME="${USER_NAME:-User}" +AI_NAME="${AI_NAME:-ChatLLaMa}" +DATE_TIME="$(date +%H:%M)" +DATE_YEAR="$(date +%Y)" + +LOG="${CHAT_SAVE_DIR}/main.log" +LOG_BG="${CHAT_SAVE_DIR}/main-bg.log" +CUR_PROMPT_FILE="${CHAT_SAVE_DIR}/current-prompt.txt" +CUR_PROMPT_CACHE="${CHAT_SAVE_DIR}/current-cache.bin" +NEXT_PROMPT_FILE="${CHAT_SAVE_DIR}/next-prompt.txt" +NEXT_PROMPT_CACHE="${CHAT_SAVE_DIR}/next-cache.bin" + +SESSION_SIZE_MSG_PATTERN='main: session file matches \d+ / \d+' +SAMPLE_TIME_MSG_PATTERN='sample time =\s+\d+.\d+ ms /\s+\d+' +SED_DELETE_MESSAGES="/^(${USER_NAME}:|${AI_NAME}:|\\.\\.\\.)/,\$d" + +CTX_SIZE=2048 +CTX_ROTATE_POINT=$((CTX_SIZE * 3 / 5)) # REVIEW +OPTS=(--model "$MODEL" --ctx_size "$CTX_SIZE" --repeat_last_n 256 "$@") + +# An unbuffered `tail -c+N` +skip_bytes() { + LANG=C IFS= read -r -n "$1" -d '' c + while LANG=C IFS= read -r -n 1 -d '' c; do + printf '%s' "$c" + done +} + +mkdir -p "$CHAT_SAVE_DIR" +echo >"$LOG" +trap "tail -n100 ${LOG}" EXIT + +if [[ ! -e "$CUR_PROMPT_FILE" ]]; then + sed -e "s/\[\[USER_NAME\]\]/${USER_NAME}/g" \ + -e "s/\[\[AI_NAME\]\]/${AI_NAME}/g" \ + -e "s/\[\[DATE_TIME\]\]/${DATE_TIME}/g" \ + -e "s/\[\[DATE_YEAR\]\]/${DATE_YEAR}/g" \ + "$PROMPT_TEMPLATE" >"$CUR_PROMPT_FILE" +fi + +if [[ ! -e "$NEXT_PROMPT_FILE" ]]; then + sed -r "$SED_DELETE_MESSAGES" "$CUR_PROMPT_FILE" >"$NEXT_PROMPT_FILE" +fi + +if [[ "$(tail -c4 "$NEXT_PROMPT_FILE")" != "..." ]]; then + echo '...' >>"$NEXT_PROMPT_FILE" +fi + +if [[ ! -e "$PROMPT_CACHE_FILE" ]]; then + echo 'Prompt cache does not exist, building...' + # Default batch_size to 8 here for better user feedback during initial prompt processing + ./main 2>>"$LOG" \ + --batch_size 8 \ + "${OPTS[@]}" \ + --prompt-cache "$PROMPT_CACHE_FILE" \ + --file "$CUR_PROMPT_FILE" \ + --n_predict 1 + echo + echo 'Done!' +fi + +if [[ ! -e "$CUR_PROMPT_CACHE" ]]; then + cp "$PROMPT_CACHE_FILE" "$CUR_PROMPT_CACHE" +fi +if [[ ! -e "$NEXT_PROMPT_CACHE" ]]; then + cp "$PROMPT_CACHE_FILE" "$NEXT_PROMPT_CACHE" +fi + +printf '%s ' "$(< "$CUR_PROMPT_FILE")" +n_tokens=0 + +while read -e line; do + # Limit generation to remaining context, with a buffer and estimating 2 chars/token for input + n_predict=$((CTX_SIZE - n_tokens - ${#line} / 2 - 32)) + + # Swap prompts when we're about to run out of context + if ((n_predict <= 0)); then + wait # for background main (below) to finish with next prompt + mv "$NEXT_PROMPT_FILE" "$CUR_PROMPT_FILE" + mv "$NEXT_PROMPT_CACHE" "$CUR_PROMPT_CACHE" + + sed -r "$SED_DELETE_MESSAGES" "$CUR_PROMPT_FILE" >"$NEXT_PROMPT_FILE" + echo '...' >>"$NEXT_PROMPT_FILE" + cp "$PROMPT_CACHE_FILE" "$NEXT_PROMPT_CACHE" + + n_tokens=0 + n_predict=$((CTX_SIZE / 2)) + fi + + echo " ${line}" >>"$CUR_PROMPT_FILE" + if ((n_tokens > CTX_ROTATE_POINT)); then + echo " ${line}" >>"$NEXT_PROMPT_FILE" + fi + + n_prompt_len_pre=$(($(wc -c <"$CUR_PROMPT_FILE"))) + + printf '%s: ' "$AI_NAME" >>"$CUR_PROMPT_FILE" + + ./main 2>>"$LOG" "${OPTS[@]}" \ + --prompt-cache "$CUR_PROMPT_CACHE" \ + --prompt-cache-all \ + --file "$CUR_PROMPT_FILE" \ + --reverse-prompt "${USER_NAME}:" \ + --n_predict "$n_predict" | + skip_bytes 1 | # skip BOS token added by ./main + tee "$CUR_PROMPT_FILE.tmp" | # save prompt + generation to tmp file + skip_bytes "$n_prompt_len_pre" # print generation + + mv "$CUR_PROMPT_FILE.tmp" "$CUR_PROMPT_FILE" + + # if we hit n_predict instead of reverse-prompt, we need to add the prompt + if [[ "$(tail -n1 "$CUR_PROMPT_FILE")" != "${USER_NAME}:" ]]; then + printf '\n%s:' "$USER_NAME" + printf '\n%s:' "$USER_NAME" >> "$CUR_PROMPT_FILE" + fi + + printf ' ' + + # HACK get num tokens from debug message + # TODO get both messages in one go + if ! session_size_msg="$(tail -n30 "$LOG" | grep -oE "$SESSION_SIZE_MSG_PATTERN")" || + ! sample_time_msg="$( tail -n10 "$LOG" | grep -oE "$SAMPLE_TIME_MSG_PATTERN")"; then + echo >&2 "Couldn't get number of tokens from ./main output!" + exit 1 + fi + + n_tokens=$(($(cut -d/ -f2 <<<"$session_size_msg") + $(cut -d/ -f2 <<<"$sample_time_msg"))) + + if ((n_tokens > CTX_ROTATE_POINT)); then + tail -c+$((n_prompt_len_pre + 1)) "$CUR_PROMPT_FILE" >>"$NEXT_PROMPT_FILE" + fi + + # Update cache for next prompt in background, ideally during user input + ./main >>"$LOG_BG" 2>&1 "${OPTS[@]}" \ + --prompt-cache "$NEXT_PROMPT_CACHE" \ + --file "$NEXT_PROMPT_FILE" \ + --n_predict 1 & +done From 6986c7835adc13ba3f9d933b95671bb1f3984dc6 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 19 May 2023 21:17:28 +0300 Subject: [PATCH 9/9] tests : add missing header --- tests/test-sampling.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/test-sampling.cpp b/tests/test-sampling.cpp index ebfc17c18..0e675127f 100644 --- a/tests/test-sampling.cpp +++ b/tests/test-sampling.cpp @@ -5,6 +5,7 @@ #undef NDEBUG #endif +#include #include #include #include