diff --git a/main.cpp b/main.cpp index 9e09a6a85..c1872e1b0 100644 --- a/main.cpp +++ b/main.cpp @@ -55,25 +55,6 @@ void sigint_handler(int signo) { } #endif -const char * llama_print_system_info(void) { - static std::string s; - - s = ""; - s += "AVX = " + std::to_string(ggml_cpu_has_avx()) + " | "; - s += "AVX2 = " + std::to_string(ggml_cpu_has_avx2()) + " | "; - s += "AVX512 = " + std::to_string(ggml_cpu_has_avx512()) + " | "; - s += "FMA = " + std::to_string(ggml_cpu_has_fma()) + " | "; - s += "NEON = " + std::to_string(ggml_cpu_has_neon()) + " | "; - s += "ARM_FMA = " + std::to_string(ggml_cpu_has_arm_fma()) + " | "; - s += "F16C = " + std::to_string(ggml_cpu_has_f16c()) + " | "; - s += "FP16_VA = " + std::to_string(ggml_cpu_has_fp16_va()) + " | "; - s += "WASM_SIMD = " + std::to_string(ggml_cpu_has_wasm_simd()) + " | "; - s += "BLAS = " + std::to_string(ggml_cpu_has_blas()) + " | "; - s += "SSE3 = " + std::to_string(ggml_cpu_has_sse3()) + " | "; - s += "VSX = " + std::to_string(ggml_cpu_has_vsx()) + " | "; - - return s.c_str(); -} int main(int argc, char ** argv) { ggml_time_init(); @@ -107,41 +88,18 @@ int main(int argc, char ** argv) { int64_t t_load_us = 0; - gpt_vocab vocab; - llama_model model; - // load the model - { - const ggml_type memory_type = params.memory_f16 ? GGML_TYPE_F16 : GGML_TYPE_F32; - const int64_t t_start_us = ggml_time_us(); - if (!llama_model_load(params.model, model, vocab, params.n_ctx, memory_type)) { - fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str()); - return 1; - } - - t_load_us = ggml_time_us() - t_start_us; - } + llama_context* ctx_ptr = llama_init_from_params(params); + llama_context & ctx = *ctx_ptr; + gpt_vocab & vocab = llama_context_get_vocab(ctx); // print system information - { - fprintf(stderr, "\n"); - fprintf(stderr, "system_info: n_threads = %d / %d | %s\n", - params.n_threads, std::thread::hardware_concurrency(), llama_print_system_info()); - } - - int n_past = 0; - - int64_t t_sample_us = 0; - int64_t t_predict_us = 0; - - std::vector logits; + llama_print_context_info(ctx); // Add a space in front of the first character to match OG llama tokenizer behavior params.prompt.insert(0, 1, ' '); // tokenize the prompt - std::vector embd_inp = ::llama_tokenize(vocab, params.prompt, true); - - params.n_predict = std::min(params.n_predict, model.hparams.n_ctx - (int) embd_inp.size()); + std::vector embd_inp = llama_tokenize_text(ctx, params.prompt); // prefix & suffix for instruct mode const std::vector inp_pfx = ::llama_tokenize(vocab, "\n\n### Instruction:\n\n", true); @@ -154,24 +112,8 @@ int main(int argc, char ** argv) { } // tokenize the reverse prompt - std::vector> antipromptv_inp; - - for (auto antiprompt : params.antiprompt) { - antipromptv_inp.push_back(::llama_tokenize(vocab, antiprompt, false)); - } + std::vector antiprompt_inp = llama_tokenize_text(ctx, params.prompt); - // enable interactive mode if reverse prompt is specified - if (!antipromptv_inp.size()) { - params.interactive = true; - } - - fprintf(stderr, "\n"); - fprintf(stderr, "%s: prompt: '%s'\n", __func__, params.prompt.c_str()); - fprintf(stderr, "%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size()); - for (int i = 0; i < (int) embd_inp.size(); i++) { - fprintf(stderr, "%6d -> '%s'\n", embd_inp[i], vocab.id_to_token.at(embd_inp[i]).c_str()); - } - fprintf(stderr, "\n"); if (params.interactive) { #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) struct sigaction sigint_action; @@ -200,16 +142,6 @@ int main(int argc, char ** argv) { fprintf(stderr, "sampling parameters: temp = %f, top_k = %d, top_p = %f, repeat_last_n = %i, repeat_penalty = %f\n", params.temp, params.top_k, params.top_p, params.repeat_last_n, params.repeat_penalty); fprintf(stderr, "\n\n"); - std::vector embd; - - // determine the required inference memory per token: - size_t mem_per_token = 0; - llama_eval(model, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token); - - int last_n_size = params.repeat_last_n; - std::vector last_n_tokens(last_n_size); - std::fill(last_n_tokens.begin(), last_n_tokens.end(), 0); - if (params.interactive) { fprintf(stderr, "== Running in interactive mode. ==\n" #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32) @@ -220,7 +152,6 @@ int main(int argc, char ** argv) { is_interacting = true; } - int input_consumed = 0; bool input_noecho = false; int remaining_tokens = params.n_predict; @@ -230,75 +161,33 @@ int main(int argc, char ** argv) { printf(ANSI_COLOR_YELLOW); } - while (remaining_tokens > 0 || params.interactive) { - // predict - if (embd.size() > 0) { - const int64_t t_start_us = ggml_time_us(); + if(!llama_injest_input(ctx, params.prompt)) + { + fprintf(stderr, "Failed to injest prompt\n"); + return 1; + }; - if (!llama_eval(model, params.n_threads, n_past, embd, logits, mem_per_token)) { - fprintf(stderr, "Failed to predict\n"); - return 1; - } - - t_predict_us += ggml_time_us() - t_start_us; - } - - n_past += embd.size(); - embd.clear(); - - if (embd_inp.size() <= input_consumed) { - // out of user input, sample next token - const float top_k = params.top_k; - const float top_p = params.top_p; - const float temp = params.temp; - const float repeat_penalty = params.repeat_penalty; - - const int n_vocab = model.hparams.n_vocab; - - gpt_vocab::id id = 0; - - { - const int64_t t_start_sample_us = ggml_time_us(); - - if (params.ignore_eos) { - // set the logit of the eos token to zero to avoid sampling it - logits[logits.size() - n_vocab + EOS_TOKEN_ID] = 0; - } - - id = llama_sample_top_p_top_k(vocab, logits.data() + (logits.size() - n_vocab), last_n_tokens, repeat_penalty, top_k, top_p, temp, rng); - - last_n_tokens.erase(last_n_tokens.begin()); - last_n_tokens.push_back(id); - - t_sample_us += ggml_time_us() - t_start_sample_us; - } - - // add it to the context - embd.push_back(id); - - // echo this to console - input_noecho = false; - - // decrement remaining sampling budget - --remaining_tokens; - } else { - // some user input remains from prompt or interaction, forward it to processing - while (embd_inp.size() > input_consumed) { - embd.push_back(embd_inp[input_consumed]); - last_n_tokens.erase(last_n_tokens.begin()); - last_n_tokens.push_back(embd_inp[input_consumed]); - ++input_consumed; - if ((int) embd.size() >= params.n_batch) { - break; - } - } - } - - // display text - if (!input_noecho) { + // display text + input_noecho = false; + const std::vector& embd = llama_context_get_embd(ctx); + if (!input_noecho) { for (auto id : embd) { - printf("%s", vocab.id_to_token[id].c_str()); - } + printf("%s", vocab.id_to_token[id].c_str()); + } + fflush(stdout); + } + + if (!input_noecho && params.use_color) { + printf(ANSI_COLOR_RESET); + } + + const std::vector& last_n_tokens = llama_context_get_last_n_tokens(ctx); + + while (llama_context_not_finished(ctx) > 0) { + gpt_vocab::id model_output = 0; + bool response = llama_inference(ctx, model_output); + if (response) { + printf("%s", vocab.id_to_token[model_output].c_str()); fflush(stdout); } // reset color to default if we there is no pending user input @@ -306,9 +195,10 @@ int main(int argc, char ** argv) { printf(ANSI_COLOR_RESET); } + // in interactive mode, and not currently processing queued inputs; // check if we should prompt the user for more - if (params.interactive && embd_inp.size() <= input_consumed) { + if (params.interactive) { // check for reverse prompt for (auto antiprompt_inp : antipromptv_inp) { if (antiprompt_inp.size() && std::equal(antiprompt_inp.rbegin(), antiprompt_inp.rend(), last_n_tokens.rbegin())) { @@ -337,15 +227,8 @@ int main(int argc, char ** argv) { } else { line.pop_back(); // Remove the continue character } - buffer += line + '\n'; // Append the line to the result - } while (another_line); - if (params.use_color) printf(ANSI_COLOR_RESET); - - std::vector line_inp = ::llama_tokenize(vocab, buffer, false); - embd_inp.insert(embd_inp.end(), line_inp.begin(), line_inp.end()); - - if (params.instruct) { - embd_inp.insert(embd_inp.end(), inp_sfx.begin(), inp_sfx.end()); + // Do not clear existing context in interactive mode + llama_init_context_with_prompt(ctx, buf, false); } remaining_tokens -= line_inp.size(); @@ -371,24 +254,14 @@ int main(int argc, char ** argv) { is_interacting = true; } } - -#if defined (_WIN32) - signal(SIGINT, SIG_DFL); -#endif - - // report timing + + // report timing from context { const int64_t t_main_end_us = ggml_time_us(); - - fprintf(stderr, "\n\n"); - fprintf(stderr, "%s: mem per token = %8zu bytes\n", __func__, mem_per_token); - fprintf(stderr, "%s: load time = %8.2f ms\n", __func__, t_load_us/1000.0f); - fprintf(stderr, "%s: sample time = %8.2f ms\n", __func__, t_sample_us/1000.0f); - fprintf(stderr, "%s: predict time = %8.2f ms / %.2f ms per token\n", __func__, t_predict_us/1000.0f, t_predict_us/1000.0f/n_past); + llama_print_end_stats(ctx); fprintf(stderr, "%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0f); } - - ggml_free(model.ctx); + llama_free_context(ctx_ptr); if (params.use_color) { printf(ANSI_COLOR_RESET);