From fb2821c8c7acbbb9a2b99a414e0f5263bf6a2b93 Mon Sep 17 00:00:00 2001 From: Alexander Krivutsenko Date: Fri, 22 Dec 2023 10:39:14 +0100 Subject: [PATCH] Fixed issues per code review --- CMakeLists.txt | 1 + examples/infill/infill.cpp | 2 +- examples/infill/infill.cpp.bak | 765 ----------------------------- examples/main/main.cpp | 2 +- examples/main/main.cpp.bak | 873 --------------------------------- ggml.h | 2 +- 6 files changed, 4 insertions(+), 1641 deletions(-) delete mode 100644 examples/infill/infill.cpp.bak delete mode 100644 examples/main/main.cpp.bak diff --git a/CMakeLists.txt b/CMakeLists.txt index fa3104306..1f2adcdf6 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -501,6 +501,7 @@ endif() if (WIN32) if (MSVC) +# extend number of sections in object files to 2^32 to use C++ templates set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /bigobj") endif() add_compile_definitions(_CRT_SECURE_NO_WARNINGS) diff --git a/examples/infill/infill.cpp b/examples/infill/infill.cpp index 87a81e5d0..be401bb89 100644 --- a/examples/infill/infill.cpp +++ b/examples/infill/infill.cpp @@ -351,7 +351,7 @@ int main(int argc, char ** argv) { sigint_action.sa_flags = 0; sigaction(SIGINT, &sigint_action, NULL); #elif defined (_WIN32) - SetConsoleCtrlHandler(reinterpret_cast(ConsoleCtrlHandler), TRUE); + SetConsoleCtrlHandler(reinterpret_cast(ConsoleCtrlHandler), TRUE); #endif LOG_TEE("%s: interactive mode on.\n", __func__); diff --git a/examples/infill/infill.cpp.bak b/examples/infill/infill.cpp.bak deleted file mode 100644 index 4a7827876..000000000 --- a/examples/infill/infill.cpp.bak +++ /dev/null @@ -1,765 +0,0 @@ -#include "common.h" - -#include "console.h" -#include "llama.h" -#include "grammar-parser.h" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) -#include -#include -#elif defined (_WIN32) -#define WIN32_LEAN_AND_MEAN -#ifndef NOMINMAX -#define NOMINMAX -#endif -#include -#include -#endif - -#if defined(_MSC_VER) -#pragma warning(disable: 4244 4267) // possible loss of data -#endif - -static llama_context ** g_ctx; -static llama_model ** g_model; -static gpt_params * g_params; -static std::vector * g_input_tokens; -static std::ostringstream * g_output_ss; -static std::vector * g_output_tokens; - -static bool is_interacting = false; - -static void write_logfile( - const llama_context * ctx, const gpt_params & params, const llama_model * model, - const std::vector & input_tokens, const std::string & output, - const std::vector & output_tokens -) { - if (params.logdir.empty()) { - return; - } - - const std::string timestamp = get_sortable_timestamp(); - - const bool success = create_directory_with_parents(params.logdir); - if (!success) { - fprintf(stderr, "%s: warning: failed to create logdir %s, cannot write logfile\n", - __func__, params.logdir.c_str()); - return; - } - - const std::string logfile_path = params.logdir + timestamp + ".yml"; - FILE * logfile = fopen(logfile_path.c_str(), "w"); - - if (logfile == NULL) { - fprintf(stderr, "%s: failed to open logfile %s\n", __func__, logfile_path.c_str()); - return; - } - - fprintf(logfile, "binary: infill\n"); - char model_desc[128]; - llama_model_desc(model, model_desc, sizeof(model_desc)); - dump_non_result_info_yaml(logfile, params, ctx, timestamp, input_tokens, model_desc); - - fprintf(logfile, "\n"); - fprintf(logfile, "######################\n"); - fprintf(logfile, "# Generation Results #\n"); - fprintf(logfile, "######################\n"); - fprintf(logfile, "\n"); - - dump_string_yaml_multiline(logfile, "output", output.c_str()); - dump_vector_int_yaml(logfile, "output_tokens", output_tokens); - - llama_dump_timing_info_yaml(logfile, ctx); - fclose(logfile); -} - -#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32) -static void sigint_handler(int signo) { - if (signo == SIGINT) { - if (!is_interacting) { - is_interacting = true; - } else { - console::cleanup(); - printf("\n"); - llama_print_timings(*g_ctx); - write_logfile(*g_ctx, *g_params, *g_model, *g_input_tokens, g_output_ss->str(), *g_output_tokens); - _exit(130); - } - } -} -#endif - -int main(int argc, char ** argv) { - gpt_params params; - llama_sampling_params & sparams = params.sparams; - g_params = ¶ms; - - if (!gpt_params_parse(argc, argv, params)) { - return 1; - } - -#ifndef LOG_DISABLE_LOGS - log_set_target(log_filename_generator("infill", "log")); - LOG_TEE("Log start\n"); - log_dump_cmdline(argc, argv); -#endif // LOG_DISABLE_LOGS - - console::init(params.simple_io, params.use_color); - atexit([]() { console::cleanup(); }); - - if (params.logits_all) { - printf("\n************\n"); - printf("%s: please use the 'perplexity' tool for perplexity calculations\n", __func__); - printf("************\n\n"); - - return 0; - } - - if (params.embedding) { - printf("\n************\n"); - printf("%s: please use the 'embedding' tool for embedding calculations\n", __func__); - printf("************\n\n"); - - return 0; - } - - if (params.n_ctx != 0 && params.n_ctx < 8) { - LOG_TEE("%s: warning: minimum context size is 8, using minimum size.\n", __func__); - params.n_ctx = 8; - } - if (params.instruct) { - printf("\n************\n"); - printf("%s: please use the 'main' tool for instruct mode\n", __func__); - printf("************\n\n"); - - return 0; - } - if (params.chatml) { - printf("\n************\n"); - printf("%s: please use the 'main' tool for chatml mode\n", __func__); - printf("************\n\n"); - - return 0; - } - if (!params.antiprompt.empty()) { - printf("\n************\n"); - printf("%s: please use the 'main' tool for antiprompt mode\n", __func__); - printf("************\n\n"); - - return 0; - } - if (!params.interactive_first && (params.input_prefix.empty() && params.input_suffix.empty())) { - printf("\n************\n"); - printf("%s: please use '--interactive_first' or specify '--in_prefix' and/or '--in_suffix'\n", __func__); - printf("************\n\n"); - - return 0; - } - if (params.random_prompt) { - printf("\n************\n"); - printf("%s: please use the 'main' tool for random prompt mode\n", __func__); - printf("************\n\n"); - - return 0; - } - if (!params.path_prompt_cache.empty()) { - printf("\n************\n"); - printf("%s: infill does not support prompt caching\n", __func__); - printf("************\n\n"); - - return 0; - } - - if (params.rope_freq_base != 0.0) { - LOG_TEE("%s: warning: changing RoPE frequency base to %g.\n", __func__, params.rope_freq_base); - } - - if (params.rope_freq_scale != 0.0) { - LOG_TEE("%s: warning: scaling RoPE frequency by %g.\n", __func__, params.rope_freq_scale); - } - - LOG_TEE("%s: build = %d (%s)\n", __func__, LLAMA_BUILD_NUMBER, LLAMA_COMMIT); - LOG_TEE("%s: built with %s for %s\n", __func__, LLAMA_COMPILER, LLAMA_BUILD_TARGET); - - if (params.seed == LLAMA_DEFAULT_SEED) { - params.seed = time(NULL); - } - - LOG_TEE("%s: seed = %u\n", __func__, params.seed); - - std::mt19937 rng(params.seed); - - LOG("%s: llama backend init\n", __func__); - llama_backend_init(params.numa); - - llama_model * model; - llama_context * ctx; - llama_context * ctx_guidance = NULL; - g_model = &model; - g_ctx = &ctx; - - // load the model and apply lora adapter, if any - LOG("%s: load the model and apply lora adapter, if any\n", __func__); - std::tie(model, ctx) = llama_init_from_gpt_params(params); - if (sparams.cfg_scale > 1.f) { - struct llama_context_params lparams = llama_context_params_from_gpt_params(params); - ctx_guidance = llama_new_context_with_model(model, lparams); - } - - if (model == NULL) { - LOG_TEE("%s: error: unable to load model\n", __func__); - return 1; - } - - const int n_ctx_train = llama_n_ctx_train(model); - const int n_ctx = llama_n_ctx(ctx); - LOG("n_ctx: %d\n", n_ctx); - - if (n_ctx > n_ctx_train) { - LOG_TEE("%s: warning: model was trained on only %d context tokens (%d specified)\n", - __func__, n_ctx_train, n_ctx); - } - - // print system information - { - LOG_TEE("\n"); - LOG_TEE("%s\n", get_system_info(params).c_str()); - } - const bool add_bos = llama_should_add_bos_token(model); - LOG("add_bos: %d\n", add_bos); - - bool suff_rm_leading_spc = params.escape; - if (suff_rm_leading_spc && params.input_suffix.find_first_of(" ") == 0 && params.input_suffix.size() > 1) { - params.input_suffix.erase(0, 1); - suff_rm_leading_spc = false; - } - std::vector embd_inp; - std::vector inp_pfx = ::llama_tokenize(ctx, params.input_prefix, false); - std::vector inp_sfx = ::llama_tokenize(ctx, params.input_suffix, false); - const int space_token = 29871; - if (suff_rm_leading_spc && inp_sfx[0] == space_token) { - inp_sfx.erase(inp_sfx.begin()); - } - inp_pfx.insert(inp_pfx.begin(), llama_token_prefix(model)); - if (add_bos) { - inp_pfx.insert(inp_pfx.begin(), llama_token_bos(model)); - } - inp_sfx.insert(inp_sfx.begin(), llama_token_suffix(model)); - embd_inp = inp_pfx; - embd_inp.insert(embd_inp.end(), inp_sfx.begin(), inp_sfx.end()); - embd_inp.push_back(llama_token_middle(model)); - - LOG("prefix: \"%s\"\n", log_tostr(params.input_prefix)); - LOG("suffix: \"%s\"\n", log_tostr(params.input_suffix)); - LOG("tokens: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd_inp).c_str()); - - // Should not run without any tokens - if (embd_inp.empty()) { - embd_inp.push_back(llama_token_bos(model)); - LOG("embd_inp was considered empty and bos was added: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd_inp).c_str()); - } - - // Tokenize negative prompt - std::vector guidance_inp; - int guidance_offset = 0; - int original_prompt_len = 0; - if (ctx_guidance) { - LOG("cfg_negative_prompt: \"%s\"\n", log_tostr(sparams.cfg_negative_prompt)); - - guidance_inp = ::llama_tokenize(ctx_guidance, sparams.cfg_negative_prompt, add_bos); - LOG("guidance_inp tokenized: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx_guidance, guidance_inp).c_str()); - - std::vector original_inp = ::llama_tokenize(ctx, params.prompt, add_bos); - LOG("original_inp tokenized: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, original_inp).c_str()); - - original_prompt_len = original_inp.size(); - guidance_offset = (int)guidance_inp.size() - original_prompt_len; - LOG("original_prompt_len: %s", log_tostr(original_prompt_len)); - LOG("guidance_offset: %s", log_tostr(guidance_offset)); - } - - if ((int) embd_inp.size() > n_ctx - 4) { - LOG_TEE("%s: error: prompt is too long (%d tokens, max %d)\n", __func__, (int) embd_inp.size(), n_ctx - 4); - return 1; - } - - // number of tokens to keep when resetting context - if (params.n_keep < 0 || params.n_keep > (int) embd_inp.size()) { - params.n_keep = (int)embd_inp.size(); - } - - LOG("inp_pfx: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, inp_pfx).c_str()); - LOG("inp_sfx: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, inp_sfx).c_str()); - - - // enable interactive mode if interactive start is specified - if (params.interactive_first) { - params.interactive = true; - } - - if (params.verbose_prompt) { - LOG_TEE("\n"); - LOG_TEE("%s: prompt: '%s'\n", __func__, params.prompt.c_str()); - LOG_TEE("%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size()); - for (int i = 0; i < (int) embd_inp.size(); i++) { - LOG_TEE("%6d -> '%s'\n", embd_inp[i], llama_token_to_piece(ctx, embd_inp[i]).c_str()); - } - - if (ctx_guidance) { - LOG_TEE("\n"); - LOG_TEE("%s: negative prompt: '%s'\n", __func__, sparams.cfg_negative_prompt.c_str()); - LOG_TEE("%s: number of tokens in negative prompt = %zu\n", __func__, guidance_inp.size()); - for (int i = 0; i < (int) guidance_inp.size(); i++) { - LOG_TEE("%6d -> '%s'\n", guidance_inp[i], llama_token_to_piece(ctx, guidance_inp[i]).c_str()); - } - } - - if (params.n_keep > 0) { - LOG_TEE("%s: static prompt based on n_keep: '", __func__); - for (int i = 0; i < params.n_keep; i++) { - LOG_TEE("%s", llama_token_to_piece(ctx, embd_inp[i]).c_str()); - } - LOG_TEE("'\n"); - } - LOG_TEE("\n"); - } - - if (params.interactive) { -#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) - struct sigaction sigint_action; - sigint_action.sa_handler = sigint_handler; - sigemptyset (&sigint_action.sa_mask); - sigint_action.sa_flags = 0; - sigaction(SIGINT, &sigint_action, NULL); -#elif defined (_WIN32) - auto console_ctrl_handler = +[](DWORD ctrl_type) -> BOOL { - return (ctrl_type == CTRL_C_EVENT) ? (sigint_handler(SIGINT), true) : false; - }; - SetConsoleCtrlHandler(reinterpret_cast(console_ctrl_handler), true); -#endif - - LOG_TEE("%s: interactive mode on.\n", __func__); - - if (params.input_prefix_bos) { - LOG_TEE("Input prefix with BOS\n"); - } - - if (!params.input_prefix.empty()) { - LOG_TEE("Input prefix: '%s'\n", params.input_prefix.c_str()); - } - - if (!params.input_suffix.empty()) { - LOG_TEE("Input suffix: '%s'\n", params.input_suffix.c_str()); - } - } - LOG_TEE("sampling: \n%s\n", llama_sampling_print(sparams).c_str()); - LOG_TEE("generate: n_ctx = %d, n_batch = %d, n_predict = %d, n_keep = %d\n", n_ctx, params.n_batch, params.n_predict, params.n_keep); - LOG_TEE("\n\n"); - - LOG_TEE("\n##### Infill mode #####\n\n"); - if (params.infill) { - printf("\n************\n"); - printf("no need to specify '--infill', always running infill\n"); - printf("************\n\n"); - } - if (params.interactive) { - const char *control_message; - if (params.multiline_input) { - control_message = " - To return control to LLaMa, end your input with '\\'.\n" - " - To return control without starting a new line, end your input with '/'.\n"; - } else { - control_message = " - Press Return to return control to LLaMa.\n" - " - To return control without starting a new line, end your input with '/'.\n" - " - If you want to submit another line, end your input with '\\'.\n"; - } - LOG_TEE("== Running in interactive mode. ==\n"); -#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32) - LOG_TEE( " - Press Ctrl+C to interject at any time.\n"); -#endif - LOG_TEE( "%s\n", control_message); - - is_interacting = params.interactive_first; - } - - bool input_echo = true; - - int n_past = 0; - int n_remain = params.n_predict; - int n_consumed = 0; - int n_past_guidance = 0; - - std::vector input_tokens; g_input_tokens = &input_tokens; - std::vector output_tokens; g_output_tokens = &output_tokens; - std::ostringstream output_ss; g_output_ss = &output_ss; - - // the first thing we will do is to output the prompt, so set color accordingly - console::set_display(console::prompt); - - std::vector embd; - std::vector embd_guidance; - - struct llama_sampling_context * ctx_sampling = llama_sampling_init(sparams); - - while (n_remain != 0 || params.interactive) { - // predict - if (!embd.empty()) { - // Note: n_ctx - 4 here is to match the logic for commandline prompt handling via - // --prompt or --file which uses the same value. - int max_embd_size = n_ctx - 4; - - // Ensure the input doesn't exceed the context size by truncating embd if necessary. - if ((int) embd.size() > max_embd_size) { - const int skipped_tokens = (int) embd.size() - max_embd_size; - embd.resize(max_embd_size); - - console::set_display(console::error); - printf("<>", skipped_tokens, skipped_tokens != 1 ? "s" : ""); - console::set_display(console::reset); - fflush(stdout); - } - - // infinite text generation via context swapping - // if we run out of context: - // - take the n_keep first tokens from the original prompt (via n_past) - // - take half of the last (n_ctx - n_keep) tokens and recompute the logits in batches - if (n_past + (int) embd.size() + std::max(0, guidance_offset) > n_ctx) { - if (params.n_predict == -2) { - LOG_TEE("\n\n%s: context full and n_predict == -%d => stopping\n", __func__, params.n_predict); - break; - } - - const int n_left = n_past - params.n_keep - 1; - const int n_discard = n_left/2; - - LOG("context full, swapping: n_past = %d, n_left = %d, n_ctx = %d, n_keep = %d, n_discard = %d\n", - n_past, n_left, n_ctx, params.n_keep, n_discard); - - llama_kv_cache_seq_rm (ctx, 0, params.n_keep + 1 , params.n_keep + n_discard + 1); - llama_kv_cache_seq_shift(ctx, 0, params.n_keep + 1 + n_discard, n_past, -n_discard); - - n_past -= n_discard; - - if (ctx_guidance) { - n_past_guidance -= n_discard; - } - - LOG("after swap: n_past = %d, n_past_guidance = %d\n", n_past, n_past_guidance); - - LOG("embd: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd).c_str()); - - } - - // evaluate tokens in batches - // embd is typically prepared beforehand to fit within a batch, but not always - - if (ctx_guidance) { - int input_size = 0; - llama_token * input_buf = NULL; - - if (n_past_guidance < (int) guidance_inp.size()) { - // Guidance context should have the same data with these modifications: - // - // * Replace the initial prompt - // * Shift everything by guidance_offset - embd_guidance = guidance_inp; - if (embd.begin() + original_prompt_len < embd.end()) { - embd_guidance.insert( - embd_guidance.end(), - embd.begin() + original_prompt_len, - embd.end() - ); - } - - input_buf = embd_guidance.data(); - input_size = embd_guidance.size(); - - LOG("guidance context: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd_guidance).c_str()); - } else { - input_buf = embd.data(); - input_size = embd.size(); - } - - for (int i = 0; i < input_size; i += params.n_batch) { - int n_eval = std::min(input_size - i, params.n_batch); - if (llama_decode(ctx_guidance, llama_batch_get_one(input_buf + i, n_eval, n_past_guidance, 0))) { - LOG_TEE("%s : failed to eval\n", __func__); - return 1; - } - - n_past_guidance += n_eval; - } - } - - for (int i = 0; i < (int) embd.size(); i += params.n_batch) { - int n_eval = (int) embd.size() - i; - if (n_eval > params.n_batch) { - n_eval = params.n_batch; - } - - LOG("eval: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd).c_str()); - - if (llama_decode(ctx, llama_batch_get_one(&embd[i], n_eval, n_past, 0))) { - LOG_TEE("%s : failed to eval\n", __func__); - return 1; - } - - n_past += n_eval; - - LOG("n_past = %d\n", n_past); - } - - } - - embd.clear(); - embd_guidance.clear(); - - if ((int) embd_inp.size() <= n_consumed && !is_interacting) { - - const llama_token id = llama_sampling_sample(ctx_sampling, ctx, ctx_guidance); - - llama_sampling_accept(ctx_sampling, ctx, id, true); - - LOG("last: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, ctx_sampling->prev).c_str()); - - embd.push_back(id); - - // echo this to console - input_echo = true; - - // decrement remaining sampling budget - --n_remain; - - LOG("n_remain: %d\n", n_remain); - } else { - // some user input remains from prompt or interaction, forward it to processing - LOG("embd_inp.size(): %d, n_consumed: %d\n", (int) embd_inp.size(), n_consumed); - while ((int) embd_inp.size() > n_consumed) { - embd.push_back(embd_inp[n_consumed]); - - // push the prompt in the sampling context in order to apply repetition penalties later - // for the prompt, we don't apply grammar rules - llama_sampling_accept(ctx_sampling, ctx, embd_inp[n_consumed], false); - - ++n_consumed; - if ((int) embd.size() >= params.n_batch) { - break; - } - } - } - - // display text - if (input_echo) { - for (auto id : embd) { - const std::string token_str = llama_token_to_piece(ctx, id); - printf("%s", token_str.c_str()); - - if (embd.size() > 1) { - input_tokens.push_back(id); - } else { - output_tokens.push_back(id); - output_ss << token_str; - } - } - fflush(stdout); - } - // reset color to default if we there is no pending user input - if (input_echo && (int) embd_inp.size() == n_consumed) { - console::set_display(console::reset); - } - - // if not currently processing queued inputs; - if ((int) embd_inp.size() <= n_consumed) { - - // deal with eot token in infill mode - if ((llama_sampling_last(ctx_sampling) == llama_token_eot(model) || is_interacting) && params.interactive){ - if(is_interacting && !params.interactive_first) { - // print an eot token - printf("%s", llama_token_to_piece(ctx, llama_token_eot(model)).c_str()); - } - fflush(stdout); - printf("\n"); - console::set_display(console::user_input); - std::string buffer; - std::string line; - bool another_line=true; - // set a new prefix via stdin - do { - another_line = console::readline(line, params.multiline_input); - buffer += line; - } while (another_line); - // check if we got an empty line, if so we use the old input - if (!buffer.empty() && !(buffer.length() == 1 && buffer[0] == '\n')) { - params.input_prefix = buffer; - } - buffer.clear(); - // set a new suffix via stdin - do { - another_line = console::readline(line, params.multiline_input); - buffer += line; - } while (another_line); - // check if we got an empty line - if (!buffer.empty() && !(buffer.length() == 1 && buffer[0] == '\n')) { - params.input_suffix = buffer; - } - buffer.clear(); - // done taking input, reset color - console::set_display(console::reset); - - if (params.escape) { - //process escape sequences, for the initial prompt this is done in common.cpp when we load the params, but for the interactive mode we need to do it here - process_escapes(params.input_prefix); - process_escapes(params.input_suffix); - } - suff_rm_leading_spc = params.escape; - if (suff_rm_leading_spc && params.input_suffix.find_first_of(' ') == 0 && params.input_suffix.size() > 1) { - params.input_suffix.erase(0, 1); - suff_rm_leading_spc = false; - } - // tokenize new prefix and suffix - std::vector inp_pfx = ::llama_tokenize(ctx, params.input_prefix, false); - std::vector inp_sfx = ::llama_tokenize(ctx, params.input_suffix, false); - if (suff_rm_leading_spc && inp_sfx[0] == space_token) { - inp_sfx.erase(inp_sfx.begin()); - } - inp_pfx.insert(inp_pfx.begin(), llama_token_prefix(model)); - if (add_bos) { - inp_pfx.insert(inp_pfx.begin(), llama_token_bos(model)); - } - inp_sfx.insert(inp_sfx.begin(), llama_token_suffix(model)); - embd_inp = inp_pfx; - embd_inp.insert(embd_inp.end(), inp_sfx.begin(), inp_sfx.end()); - embd_inp.push_back(llama_token_middle(model)); - embd.clear(); - embd_guidance.clear(); - n_remain = params.n_predict; - n_past = 0; - n_consumed = 0; - // LOG_TEE("took new input\n"); - is_interacting = false; - } - // deal with end of text token in interactive mode - else if (llama_sampling_last(ctx_sampling) == llama_token_eos(model)) { - LOG("found EOS token\n"); - - if (params.interactive) { - - is_interacting = true; - printf("\n"); - console::set_display(console::user_input); - fflush(stdout); - } - } - - if (n_past > 0 && is_interacting && !params.interactive) { - LOG("waiting for user input\n"); - - if (params.input_prefix_bos) { - LOG("adding input prefix BOS token\n"); - embd_inp.push_back(llama_token_bos(model)); - } - - std::string buffer; - if (!params.input_prefix.empty()) { - LOG("appending input prefix: '%s'\n", params.input_prefix.c_str()); - buffer += params.input_prefix; - printf("%s", buffer.c_str()); - } - - std::string line; - bool another_line = true; - do { - another_line = console::readline(line, params.multiline_input); - buffer += line; - } while (another_line); - - // done taking input, reset color - console::set_display(console::reset); - - // Add tokens to embd only if the input buffer is non-empty - // Entering a empty line lets the user pass control back - if (buffer.length() > 1) { - // append input suffix if any - if (!params.input_suffix.empty()) { - LOG("appending input suffix: '%s'\n", params.input_suffix.c_str()); - buffer += params.input_suffix; - printf("%s", params.input_suffix.c_str()); - } - - LOG("buffer: '%s'\n", buffer.c_str()); - - const size_t original_size = embd_inp.size(); - - const auto line_inp = ::llama_tokenize(ctx, buffer, false); - LOG("input tokens: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, line_inp).c_str()); - - embd_inp.insert(embd_inp.end(), line_inp.begin(), line_inp.end()); - - for (size_t i = original_size; i < embd_inp.size(); ++i) { - const llama_token token = embd_inp[i]; - output_tokens.push_back(token); - output_ss << llama_token_to_piece(ctx, token); - } - - n_remain -= line_inp.size(); - LOG("n_remain: %d\n", n_remain); - } else { - LOG("empty line, passing control back\n"); - } - - input_echo = false; // do not echo this again - } - - if (n_past > 0) { - if (is_interacting) { - llama_sampling_reset(ctx_sampling); - } - is_interacting = false; - } - } - - // end of text token - if (!embd.empty() && embd.back() == llama_token_eos(model) && !params.interactive) { - break; - } - - // In interactive mode, respect the maximum number of tokens and drop back to user input when reached. - // We skip this logic when n_predict == -1 (infinite) or -2 (stop at context size). - if (params.interactive && n_remain <= 0 && params.n_predict >= 0) { - n_remain = params.n_predict; - is_interacting = true; - } - } - if (!params.interactive && n_remain <= 0) { - printf("%s", llama_token_to_piece(ctx, llama_token_eot(model)).c_str()); - fflush(stdout); - } - - llama_print_timings(ctx); - write_logfile(ctx, params, model, input_tokens, output_ss.str(), output_tokens); - - if (ctx_guidance) { llama_free(ctx_guidance); } - llama_free(ctx); - llama_free_model(model); - - llama_sampling_free(ctx_sampling); - llama_backend_free(); - -#ifndef LOG_DISABLE_LOGS - LOG_TEE("Log end\n"); -#endif // LOG_DISABLE_LOGS - - return 0; -} - diff --git a/examples/main/main.cpp b/examples/main/main.cpp index 2108ef617..339a66fdc 100644 --- a/examples/main/main.cpp +++ b/examples/main/main.cpp @@ -399,7 +399,7 @@ int main(int argc, char ** argv) { sigint_action.sa_flags = 0; sigaction(SIGINT, &sigint_action, NULL); #elif defined (_WIN32) - SetConsoleCtrlHandler(reinterpret_cast(ConsoleCtrlHandler), TRUE); + SetConsoleCtrlHandler(reinterpret_cast(ConsoleCtrlHandler), TRUE); #endif LOG_TEE("%s: interactive mode on.\n", __func__); diff --git a/examples/main/main.cpp.bak b/examples/main/main.cpp.bak deleted file mode 100644 index c096f110b..000000000 --- a/examples/main/main.cpp.bak +++ /dev/null @@ -1,873 +0,0 @@ -#include "common.h" - -#include "console.h" -#include "llama.h" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) -#include -#include -#elif defined (_WIN32) -#define WIN32_LEAN_AND_MEAN -#ifndef NOMINMAX -#define NOMINMAX -#endif -#include -#include -#endif - -#if defined(_MSC_VER) -#pragma warning(disable: 4244 4267) // possible loss of data -#endif - -static llama_context ** g_ctx; -static llama_model ** g_model; -static gpt_params * g_params; -static std::vector * g_input_tokens; -static std::ostringstream * g_output_ss; -static std::vector * g_output_tokens; -static bool is_interacting = false; - - -static void write_logfile( - const llama_context * ctx, const gpt_params & params, const llama_model * model, - const std::vector & input_tokens, const std::string & output, - const std::vector & output_tokens -) { - if (params.logdir.empty()) { - return; - } - - const std::string timestamp = get_sortable_timestamp(); - - const bool success = create_directory_with_parents(params.logdir); - if (!success) { - fprintf(stderr, "%s: warning: failed to create logdir %s, cannot write logfile\n", - __func__, params.logdir.c_str()); - return; - } - - const std::string logfile_path = params.logdir + timestamp + ".yml"; - FILE * logfile = fopen(logfile_path.c_str(), "w"); - - if (logfile == NULL) { - fprintf(stderr, "%s: failed to open logfile %s\n", __func__, logfile_path.c_str()); - return; - } - - fprintf(logfile, "binary: main\n"); - char model_desc[128]; - llama_model_desc(model, model_desc, sizeof(model_desc)); - dump_non_result_info_yaml(logfile, params, ctx, timestamp, input_tokens, model_desc); - - fprintf(logfile, "\n"); - fprintf(logfile, "######################\n"); - fprintf(logfile, "# Generation Results #\n"); - fprintf(logfile, "######################\n"); - fprintf(logfile, "\n"); - - dump_string_yaml_multiline(logfile, "output", output.c_str()); - dump_vector_int_yaml(logfile, "output_tokens", output_tokens); - - llama_dump_timing_info_yaml(logfile, ctx); - fclose(logfile); -} - -#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32) -static void sigint_handler(int signo) { - if (signo == SIGINT) { - if (!is_interacting) { - is_interacting = true; - } else { - console::cleanup(); - printf("\n"); - llama_print_timings(*g_ctx); - write_logfile(*g_ctx, *g_params, *g_model, *g_input_tokens, g_output_ss->str(), *g_output_tokens); - _exit(130); - } - } -} -#endif - -static void llama_log_callback_logTee(ggml_log_level level, const char * text, void * user_data) { - (void) level; - (void) user_data; - LOG_TEE("%s", text); -} - -int main(int argc, char ** argv) { - gpt_params params; - g_params = ¶ms; - - if (!gpt_params_parse(argc, argv, params)) { - return 1; - } - llama_sampling_params & sparams = params.sparams; - -#ifndef LOG_DISABLE_LOGS - log_set_target(log_filename_generator("main", "log")); - LOG_TEE("Log start\n"); - log_dump_cmdline(argc, argv); - llama_log_set(llama_log_callback_logTee, nullptr); -#endif // LOG_DISABLE_LOGS - - // TODO: Dump params ? - //LOG("Params perplexity: %s\n", LOG_TOSTR(params.perplexity)); - - // save choice to use color for later - // (note for later: this is a slightly awkward choice) - console::init(params.simple_io, params.use_color); - atexit([]() { console::cleanup(); }); - - if (params.logits_all) { - printf("\n************\n"); - printf("%s: please use the 'perplexity' tool for perplexity calculations\n", __func__); - printf("************\n\n"); - - return 0; - } - - if (params.embedding) { - printf("\n************\n"); - printf("%s: please use the 'embedding' tool for embedding calculations\n", __func__); - printf("************\n\n"); - - return 0; - } - - if (params.n_ctx != 0 && params.n_ctx < 8) { - LOG_TEE("%s: warning: minimum context size is 8, using minimum size.\n", __func__); - params.n_ctx = 8; - } - - if (params.rope_freq_base != 0.0) { - LOG_TEE("%s: warning: changing RoPE frequency base to %g.\n", __func__, params.rope_freq_base); - } - - if (params.rope_freq_scale != 0.0) { - LOG_TEE("%s: warning: scaling RoPE frequency by %g.\n", __func__, params.rope_freq_scale); - } - - LOG_TEE("%s: build = %d (%s)\n", __func__, LLAMA_BUILD_NUMBER, LLAMA_COMMIT); - LOG_TEE("%s: built with %s for %s\n", __func__, LLAMA_COMPILER, LLAMA_BUILD_TARGET); - - if (params.seed == LLAMA_DEFAULT_SEED) { - params.seed = time(NULL); - } - - LOG_TEE("%s: seed = %u\n", __func__, params.seed); - - std::mt19937 rng(params.seed); - if (params.random_prompt) { - params.prompt = gpt_random_prompt(rng); - } - - LOG("%s: llama backend init\n", __func__); - llama_backend_init(params.numa); - - llama_model * model; - llama_context * ctx; - llama_context * ctx_guidance = NULL; - g_model = &model; - g_ctx = &ctx; - - // load the model and apply lora adapter, if any - LOG("%s: load the model and apply lora adapter, if any\n", __func__); - std::tie(model, ctx) = llama_init_from_gpt_params(params); - if (sparams.cfg_scale > 1.f) { - struct llama_context_params lparams = llama_context_params_from_gpt_params(params); - ctx_guidance = llama_new_context_with_model(model, lparams); - } - - if (model == NULL) { - LOG_TEE("%s: error: unable to load model\n", __func__); - return 1; - } - - const int n_ctx_train = llama_n_ctx_train(model); - const int n_ctx = llama_n_ctx(ctx); - LOG("n_ctx: %d\n", n_ctx); - - if (n_ctx > n_ctx_train) { - LOG_TEE("%s: warning: model was trained on only %d context tokens (%d specified)\n", - __func__, n_ctx_train, n_ctx); - } - - // print system information - { - LOG_TEE("\n"); - LOG_TEE("%s\n", get_system_info(params).c_str()); - } - - std::string path_session = params.path_prompt_cache; - std::vector session_tokens; - - if (!path_session.empty()) { - LOG_TEE("%s: attempting to load saved session from '%s'\n", __func__, path_session.c_str()); - - // fopen to check for existing session - FILE * fp = std::fopen(path_session.c_str(), "rb"); - if (fp != NULL) { - std::fclose(fp); - - session_tokens.resize(n_ctx); - size_t n_token_count_out = 0; - if (!llama_load_session_file(ctx, path_session.c_str(), session_tokens.data(), session_tokens.capacity(), &n_token_count_out)) { - LOG_TEE("%s: error: failed to load session file '%s'\n", __func__, path_session.c_str()); - return 1; - } - session_tokens.resize(n_token_count_out); - llama_set_rng_seed(ctx, params.seed); - - LOG_TEE("%s: loaded a session with prompt size of %d tokens\n", __func__, (int) session_tokens.size()); - } else { - LOG_TEE("%s: session file does not exist, will create\n", __func__); - } - } - - const bool add_bos = llama_should_add_bos_token(model); - LOG("add_bos: %d\n", add_bos); - - std::vector embd_inp; - - if (params.interactive_first || params.instruct || params.chatml || !params.prompt.empty() || session_tokens.empty()) { - LOG("tokenize the prompt\n"); - if (params.chatml) { - params.prompt = "<|im_start|>system\n" + params.prompt + "<|im_end|>"; - } - embd_inp = ::llama_tokenize(ctx, params.prompt, add_bos, true); - } else { - LOG("use session tokens\n"); - embd_inp = session_tokens; - } - - LOG("prompt: \"%s\"\n", log_tostr(params.prompt)); - LOG("tokens: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd_inp).c_str()); - - // Should not run without any tokens - if (embd_inp.empty()) { - embd_inp.push_back(llama_token_bos(model)); - LOG("embd_inp was considered empty and bos was added: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd_inp).c_str()); - } - - // Tokenize negative prompt - std::vector guidance_inp; - int guidance_offset = 0; - int original_prompt_len = 0; - if (ctx_guidance) { - LOG("cfg_negative_prompt: \"%s\"\n", log_tostr(sparams.cfg_negative_prompt)); - - guidance_inp = ::llama_tokenize(ctx_guidance, sparams.cfg_negative_prompt, add_bos, true); - LOG("guidance_inp tokenized: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx_guidance, guidance_inp).c_str()); - - std::vector original_inp = ::llama_tokenize(ctx, params.prompt, add_bos, true); - LOG("original_inp tokenized: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, original_inp).c_str()); - - original_prompt_len = original_inp.size(); - guidance_offset = (int)guidance_inp.size() - original_prompt_len; - LOG("original_prompt_len: %s", log_tostr(original_prompt_len)); - LOG("guidance_offset: %s", log_tostr(guidance_offset)); - } - - if ((int) embd_inp.size() > n_ctx - 4) { - LOG_TEE("%s: error: prompt is too long (%d tokens, max %d)\n", __func__, (int) embd_inp.size(), n_ctx - 4); - return 1; - } - - // debug message about similarity of saved session, if applicable - size_t n_matching_session_tokens = 0; - if (!session_tokens.empty()) { - for (llama_token id : session_tokens) { - if (n_matching_session_tokens >= embd_inp.size() || id != embd_inp[n_matching_session_tokens]) { - break; - } - n_matching_session_tokens++; - } - if (params.prompt.empty() && n_matching_session_tokens == embd_inp.size()) { - LOG_TEE("%s: using full prompt from session file\n", __func__); - } else if (n_matching_session_tokens >= embd_inp.size()) { - LOG_TEE("%s: session file has exact match for prompt!\n", __func__); - } else if (n_matching_session_tokens < (embd_inp.size() / 2)) { - LOG_TEE("%s: warning: session file has low similarity to prompt (%zu / %zu tokens); will mostly be reevaluated\n", - __func__, n_matching_session_tokens, embd_inp.size()); - } else { - LOG_TEE("%s: session file matches %zu / %zu tokens of prompt\n", - __func__, n_matching_session_tokens, embd_inp.size()); - } - - // remove any "future" tokens that we might have inherited from the previous session - llama_kv_cache_seq_rm(ctx, -1, n_matching_session_tokens, -1); - } - - LOGLN( - "recalculate the cached logits (check): embd_inp.empty() %s, n_matching_session_tokens %zu, embd_inp.size() %zu, session_tokens.size() %zu, embd_inp.size() %zu", - log_tostr(embd_inp.empty()), n_matching_session_tokens, embd_inp.size(), session_tokens.size(), embd_inp.size()); - - // if we will use the cache for the full prompt without reaching the end of the cache, force - // reevaluation of the last token token to recalculate the cached logits - if (!embd_inp.empty() && n_matching_session_tokens == embd_inp.size() && session_tokens.size() > embd_inp.size()) { - LOGLN("recalculate the cached logits (do): session_tokens.resize( %zu )", embd_inp.size() - 1); - - session_tokens.resize(embd_inp.size() - 1); - } - - // number of tokens to keep when resetting context - if (params.n_keep < 0 || params.n_keep > (int) embd_inp.size() || params.instruct || params.chatml) { - params.n_keep = (int)embd_inp.size(); - } - - // prefix & suffix for instruct mode - const auto inp_pfx = ::llama_tokenize(ctx, "\n\n### Instruction:\n\n", add_bos, true); - const auto inp_sfx = ::llama_tokenize(ctx, "\n\n### Response:\n\n", false, true); - - LOG("inp_pfx: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, inp_pfx).c_str()); - LOG("inp_sfx: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, inp_sfx).c_str()); - - // chatml prefix & suffix - const auto cml_pfx = ::llama_tokenize(ctx, "\n<|im_start|>user\n", add_bos, true); - const auto cml_sfx = ::llama_tokenize(ctx, "<|im_end|>\n<|im_start|>assistant\n", false, true); - - LOG("cml_pfx: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, cml_pfx).c_str()); - LOG("cml_sfx: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, cml_sfx).c_str()); - - // in instruct mode, we inject a prefix and a suffix to each input by the user - if (params.instruct) { - params.interactive_first = true; - params.antiprompt.push_back("### Instruction:\n\n"); - } - // similar for chatml mode - else if (params.chatml) { - params.interactive_first = true; - params.antiprompt.push_back("<|im_start|>user\n"); - } - - // enable interactive mode if interactive start is specified - if (params.interactive_first) { - params.interactive = true; - } - - if (params.verbose_prompt) { - LOG_TEE("\n"); - LOG_TEE("%s: prompt: '%s'\n", __func__, params.prompt.c_str()); - LOG_TEE("%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size()); - for (int i = 0; i < (int) embd_inp.size(); i++) { - LOG_TEE("%6d -> '%s'\n", embd_inp[i], llama_token_to_piece(ctx, embd_inp[i]).c_str()); - } - - if (ctx_guidance) { - LOG_TEE("\n"); - LOG_TEE("%s: negative prompt: '%s'\n", __func__, sparams.cfg_negative_prompt.c_str()); - LOG_TEE("%s: number of tokens in negative prompt = %zu\n", __func__, guidance_inp.size()); - for (int i = 0; i < (int) guidance_inp.size(); i++) { - LOG_TEE("%6d -> '%s'\n", guidance_inp[i], llama_token_to_piece(ctx, guidance_inp[i]).c_str()); - } - } - - if (params.n_keep > 0) { - LOG_TEE("%s: static prompt based on n_keep: '", __func__); - for (int i = 0; i < params.n_keep; i++) { - LOG_TEE("%s", llama_token_to_piece(ctx, embd_inp[i]).c_str()); - } - LOG_TEE("'\n"); - } - LOG_TEE("\n"); - } - - if (params.interactive) { -#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) - struct sigaction sigint_action; - sigint_action.sa_handler = sigint_handler; - sigemptyset (&sigint_action.sa_mask); - sigint_action.sa_flags = 0; - sigaction(SIGINT, &sigint_action, NULL); -#elif defined (_WIN32) - auto console_ctrl_handler = +[](DWORD ctrl_type) -> BOOL { - return (ctrl_type == CTRL_C_EVENT) ? (sigint_handler(SIGINT), true) : false; - }; - SetConsoleCtrlHandler(reinterpret_cast(console_ctrl_handler), true); -#endif - - LOG_TEE("%s: interactive mode on.\n", __func__); - - if (!params.antiprompt.empty()) { - for (const auto & antiprompt : params.antiprompt) { - LOG_TEE("Reverse prompt: '%s'\n", antiprompt.c_str()); - if (params.verbose_prompt) { - auto tmp = ::llama_tokenize(ctx, antiprompt, false, true); - for (int i = 0; i < (int) tmp.size(); i++) { - LOG_TEE("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx, tmp[i]).c_str()); - } - } - } - } - - if (params.input_prefix_bos) { - LOG_TEE("Input prefix with BOS\n"); - } - - if (!params.input_prefix.empty()) { - LOG_TEE("Input prefix: '%s'\n", params.input_prefix.c_str()); - if (params.verbose_prompt) { - auto tmp = ::llama_tokenize(ctx, params.input_prefix, true, true); - for (int i = 0; i < (int) tmp.size(); i++) { - LOG_TEE("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx, tmp[i]).c_str()); - } - } - } - - if (!params.input_suffix.empty()) { - LOG_TEE("Input suffix: '%s'\n", params.input_suffix.c_str()); - if (params.verbose_prompt) { - auto tmp = ::llama_tokenize(ctx, params.input_suffix, false, true); - for (int i = 0; i < (int) tmp.size(); i++) { - LOG_TEE("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx, tmp[i]).c_str()); - } - } - } - } - LOG_TEE("sampling: \n%s\n", llama_sampling_print(sparams).c_str()); - LOG_TEE("sampling order: \n%s\n", llama_sampling_order_print(sparams).c_str()); - LOG_TEE("generate: n_ctx = %d, n_batch = %d, n_predict = %d, n_keep = %d\n", n_ctx, params.n_batch, params.n_predict, params.n_keep); - LOG_TEE("\n\n"); - - if (params.interactive) { - const char *control_message; - if (params.multiline_input) { - control_message = " - To return control to LLaMa, end your input with '\\'.\n" - " - To return control without starting a new line, end your input with '/'.\n"; - } else { - control_message = " - Press Return to return control to LLaMa.\n" - " - To return control without starting a new line, end your input with '/'.\n" - " - If you want to submit another line, end your input with '\\'.\n"; - } - LOG_TEE("== Running in interactive mode. ==\n"); -#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32) - LOG_TEE( " - Press Ctrl+C to interject at any time.\n"); -#endif - LOG_TEE( "%s\n", control_message); - - is_interacting = params.interactive_first; - } - - bool is_antiprompt = false; - bool input_echo = true; - bool need_to_save_session = !path_session.empty() && n_matching_session_tokens < embd_inp.size(); - - int n_past = 0; - int n_remain = params.n_predict; - int n_consumed = 0; - int n_session_consumed = 0; - int n_past_guidance = 0; - - std::vector input_tokens; g_input_tokens = &input_tokens; - std::vector output_tokens; g_output_tokens = &output_tokens; - std::ostringstream output_ss; g_output_ss = &output_ss; - - // the first thing we will do is to output the prompt, so set color accordingly - console::set_display(console::prompt); - - std::vector embd; - std::vector embd_guidance; - - struct llama_sampling_context * ctx_sampling = llama_sampling_init(sparams); - - while ((n_remain != 0 && !is_antiprompt) || params.interactive) { - // predict - if (!embd.empty()) { - // Note: n_ctx - 4 here is to match the logic for commandline prompt handling via - // --prompt or --file which uses the same value. - int max_embd_size = n_ctx - 4; - - // Ensure the input doesn't exceed the context size by truncating embd if necessary. - if ((int) embd.size() > max_embd_size) { - const int skipped_tokens = (int) embd.size() - max_embd_size; - embd.resize(max_embd_size); - - console::set_display(console::error); - printf("<>", skipped_tokens, skipped_tokens != 1 ? "s" : ""); - console::set_display(console::reset); - fflush(stdout); - } - - // infinite text generation via context swapping - // if we run out of context: - // - take the n_keep first tokens from the original prompt (via n_past) - // - take half of the last (n_ctx - n_keep) tokens and recompute the logits in batches - if (n_past + (int) embd.size() + std::max(0, guidance_offset) > n_ctx) { - if (params.n_predict == -2) { - LOG_TEE("\n\n%s: context full and n_predict == -%d => stopping\n", __func__, params.n_predict); - break; - } - - const int n_left = n_past - params.n_keep - 1; - const int n_discard = n_left/2; - - LOG("context full, swapping: n_past = %d, n_left = %d, n_ctx = %d, n_keep = %d, n_discard = %d\n", - n_past, n_left, n_ctx, params.n_keep, n_discard); - - llama_kv_cache_seq_rm (ctx, 0, params.n_keep + 1 , params.n_keep + n_discard + 1); - llama_kv_cache_seq_shift(ctx, 0, params.n_keep + 1 + n_discard, n_past, -n_discard); - - n_past -= n_discard; - - if (ctx_guidance) { - n_past_guidance -= n_discard; - } - - LOG("after swap: n_past = %d, n_past_guidance = %d\n", n_past, n_past_guidance); - - LOG("embd: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd).c_str()); - - LOG("clear session path\n"); - path_session.clear(); - } - - // try to reuse a matching prefix from the loaded session instead of re-eval (via n_past) - if (n_session_consumed < (int) session_tokens.size()) { - size_t i = 0; - for ( ; i < embd.size(); i++) { - if (embd[i] != session_tokens[n_session_consumed]) { - session_tokens.resize(n_session_consumed); - break; - } - - n_past++; - n_session_consumed++; - - if (n_session_consumed >= (int) session_tokens.size()) { - ++i; - break; - } - } - if (i > 0) { - embd.erase(embd.begin(), embd.begin() + i); - } - } - - // evaluate tokens in batches - // embd is typically prepared beforehand to fit within a batch, but not always - if (ctx_guidance) { - int input_size = 0; - llama_token * input_buf = NULL; - - if (n_past_guidance < (int) guidance_inp.size()) { - // Guidance context should have the same data with these modifications: - // - // * Replace the initial prompt - // * Shift everything by guidance_offset - embd_guidance = guidance_inp; - if (embd.begin() + original_prompt_len < embd.end()) { - embd_guidance.insert( - embd_guidance.end(), - embd.begin() + original_prompt_len, - embd.end() - ); - } - - input_buf = embd_guidance.data(); - input_size = embd_guidance.size(); - - LOG("guidance context: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd_guidance).c_str()); - } else { - input_buf = embd.data(); - input_size = embd.size(); - } - - for (int i = 0; i < input_size; i += params.n_batch) { - int n_eval = std::min(input_size - i, params.n_batch); - if (llama_decode(ctx_guidance, llama_batch_get_one(input_buf + i, n_eval, n_past_guidance, 0))) { - LOG_TEE("%s : failed to eval\n", __func__); - return 1; - } - - n_past_guidance += n_eval; - } - } - - for (int i = 0; i < (int) embd.size(); i += params.n_batch) { - int n_eval = (int) embd.size() - i; - if (n_eval > params.n_batch) { - n_eval = params.n_batch; - } - - LOG("eval: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd).c_str()); - - if (llama_decode(ctx, llama_batch_get_one(&embd[i], n_eval, n_past, 0))) { - LOG_TEE("%s : failed to eval\n", __func__); - return 1; - } - - n_past += n_eval; - - LOG("n_past = %d\n", n_past); - } - - if (!embd.empty() && !path_session.empty()) { - session_tokens.insert(session_tokens.end(), embd.begin(), embd.end()); - n_session_consumed = session_tokens.size(); - } - } - - embd.clear(); - embd_guidance.clear(); - - if ((int) embd_inp.size() <= n_consumed && !is_interacting) { - // optionally save the session on first sample (for faster prompt loading next time) - if (!path_session.empty() && need_to_save_session && !params.prompt_cache_ro) { - need_to_save_session = false; - llama_save_session_file(ctx, path_session.c_str(), session_tokens.data(), session_tokens.size()); - - LOG("saved session to %s\n", path_session.c_str()); - } - - const llama_token id = llama_sampling_sample(ctx_sampling, ctx, ctx_guidance); - - llama_sampling_accept(ctx_sampling, ctx, id, true); - - LOG("last: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, ctx_sampling->prev).c_str()); - - embd.push_back(id); - - // echo this to console - input_echo = true; - - // decrement remaining sampling budget - --n_remain; - - LOG("n_remain: %d\n", n_remain); - } else { - // some user input remains from prompt or interaction, forward it to processing - LOG("embd_inp.size(): %d, n_consumed: %d\n", (int) embd_inp.size(), n_consumed); - while ((int) embd_inp.size() > n_consumed) { - embd.push_back(embd_inp[n_consumed]); - - // push the prompt in the sampling context in order to apply repetition penalties later - // for the prompt, we don't apply grammar rules - llama_sampling_accept(ctx_sampling, ctx, embd_inp[n_consumed], false); - - ++n_consumed; - if ((int) embd.size() >= params.n_batch) { - break; - } - } - } - - // display text - if (input_echo) { - for (auto id : embd) { - const std::string token_str = llama_token_to_piece(ctx, id); - printf("%s", token_str.c_str()); - - if (embd.size() > 1) { - input_tokens.push_back(id); - } else { - output_tokens.push_back(id); - output_ss << token_str; - } - } - fflush(stdout); - } - // reset color to default if there is no pending user input - if (input_echo && (int) embd_inp.size() == n_consumed) { - console::set_display(console::reset); - } - - // if not currently processing queued inputs; - if ((int) embd_inp.size() <= n_consumed) { - // check for reverse prompt in the last n_prev tokens - if (!params.antiprompt.empty()) { - const int n_prev = 32; - const std::string last_output = llama_sampling_prev_str(ctx_sampling, ctx, n_prev); - - is_antiprompt = false; - // Check if each of the reverse prompts appears at the end of the output. - // If we're not running interactively, the reverse prompt might be tokenized with some following characters - // so we'll compensate for that by widening the search window a bit. - for (std::string & antiprompt : params.antiprompt) { - size_t extra_padding = params.interactive ? 0 : 2; - size_t search_start_pos = last_output.length() > static_cast(antiprompt.length() + extra_padding) - ? last_output.length() - static_cast(antiprompt.length() + extra_padding) - : 0; - - if (last_output.find(antiprompt, search_start_pos) != std::string::npos) { - if (params.interactive) { - is_interacting = true; - } - is_antiprompt = true; - break; - } - } - - if (is_antiprompt) { - LOG("found antiprompt: %s\n", last_output.c_str()); - } - } - - // deal with end of text token in interactive mode - if (llama_sampling_last(ctx_sampling) == llama_token_eos(model)) { - LOG("found EOS token\n"); - - if (params.interactive) { - if (!params.antiprompt.empty()) { - // tokenize and inject first reverse prompt - const auto first_antiprompt = ::llama_tokenize(ctx, params.antiprompt.front(), false, true); - embd_inp.insert(embd_inp.end(), first_antiprompt.begin(), first_antiprompt.end()); - is_antiprompt = true; - } - - is_interacting = true; - printf("\n"); - } else if (params.instruct || params.chatml) { - is_interacting = true; - } - } - - if (n_past > 0 && is_interacting) { - LOG("waiting for user input\n"); - - if (params.instruct || params.chatml) { - printf("\n> "); - } - - if (params.input_prefix_bos) { - LOG("adding input prefix BOS token\n"); - embd_inp.push_back(llama_token_bos(model)); - } - - std::string buffer; - if (!params.input_prefix.empty()) { - LOG("appending input prefix: '%s'\n", params.input_prefix.c_str()); - printf("%s", params.input_prefix.c_str()); - } - - // color user input only - console::set_display(console::user_input); - - std::string line; - bool another_line = true; - do { - another_line = console::readline(line, params.multiline_input); - buffer += line; - } while (another_line); - - // done taking input, reset color - console::set_display(console::reset); - - // Add tokens to embd only if the input buffer is non-empty - // Entering a empty line lets the user pass control back - if (buffer.length() > 1) { - // append input suffix if any - if (!params.input_suffix.empty()) { - LOG("appending input suffix: '%s'\n", params.input_suffix.c_str()); - printf("%s", params.input_suffix.c_str()); - } - - LOG("buffer: '%s'\n", buffer.c_str()); - - const size_t original_size = embd_inp.size(); - - // instruct mode: insert instruction prefix - if (params.instruct && !is_antiprompt) { - LOG("inserting instruction prefix\n"); - n_consumed = embd_inp.size(); - embd_inp.insert(embd_inp.end(), inp_pfx.begin(), inp_pfx.end()); - } - // chatml mode: insert user chat prefix - if (params.chatml && !is_antiprompt) { - LOG("inserting chatml prefix\n"); - n_consumed = embd_inp.size(); - embd_inp.insert(embd_inp.end(), cml_pfx.begin(), cml_pfx.end()); - } - if (params.escape) { - process_escapes(buffer); - } - - const auto line_pfx = ::llama_tokenize(ctx, params.input_prefix, false, true); - const auto line_inp = ::llama_tokenize(ctx, buffer, false, false); - const auto line_sfx = ::llama_tokenize(ctx, params.input_suffix, false, true); - LOG("input tokens: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, line_inp).c_str()); - - embd_inp.insert(embd_inp.end(), line_pfx.begin(), line_pfx.end()); - embd_inp.insert(embd_inp.end(), line_inp.begin(), line_inp.end()); - embd_inp.insert(embd_inp.end(), line_sfx.begin(), line_sfx.end()); - - // instruct mode: insert response suffix - if (params.instruct) { - LOG("inserting instruction suffix\n"); - embd_inp.insert(embd_inp.end(), inp_sfx.begin(), inp_sfx.end()); - } - // chatml mode: insert assistant chat suffix - if (params.chatml) { - LOG("inserting chatml suffix\n"); - embd_inp.insert(embd_inp.end(), cml_sfx.begin(), cml_sfx.end()); - } - - for (size_t i = original_size; i < embd_inp.size(); ++i) { - const llama_token token = embd_inp[i]; - output_tokens.push_back(token); - output_ss << llama_token_to_piece(ctx, token); - } - - n_remain -= line_inp.size(); - LOG("n_remain: %d\n", n_remain); - } else { - LOG("empty line, passing control back\n"); - } - - input_echo = false; // do not echo this again - } - - if (n_past > 0) { - if (is_interacting) { - llama_sampling_reset(ctx_sampling); - } - is_interacting = false; - } - } - - // end of text token - if (!embd.empty() && embd.back() == llama_token_eos(model) && !(params.instruct || params.interactive || params.chatml)) { - LOG_TEE(" [end of text]\n"); - break; - } - - // In interactive mode, respect the maximum number of tokens and drop back to user input when reached. - // We skip this logic when n_predict == -1 (infinite) or -2 (stop at context size). - if (params.interactive && n_remain <= 0 && params.n_predict >= 0) { - n_remain = params.n_predict; - is_interacting = true; - } - } - - if (!path_session.empty() && params.prompt_cache_all && !params.prompt_cache_ro) { - LOG_TEE("\n%s: saving final output to session file '%s'\n", __func__, path_session.c_str()); - llama_save_session_file(ctx, path_session.c_str(), session_tokens.data(), session_tokens.size()); - } - - llama_print_timings(ctx); - write_logfile(ctx, params, model, input_tokens, output_ss.str(), output_tokens); - - if (ctx_guidance) { llama_free(ctx_guidance); } - llama_free(ctx); - llama_free_model(model); - - llama_sampling_free(ctx_sampling); - llama_backend_free(); - -#ifndef LOG_DISABLE_LOGS - LOG_TEE("Log end\n"); -#endif // LOG_DISABLE_LOGS - - return 0; -} diff --git a/ggml.h b/ggml.h index d5010e917..73e77b494 100644 --- a/ggml.h +++ b/ggml.h @@ -2218,7 +2218,7 @@ extern "C" { // #if defined(__cplusplus) || defined(_MSC_VER) -// GGML_RESTRICT not standard in C++ +// restrict not standard in C++ #define GGML_RESTRICT #else #define GGML_RESTRICT restrict