diff --git a/common/common.cpp b/common/common.cpp index e624fc7f3..4cc482a30 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -905,6 +905,14 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa params.interactive_specials = true; return true; } + if (arg == "--ctrl-token-no-out") { + params.ctrl_token_no_out = true; + return true; + } + if (arg == "--ctrl-token-fd-out") { + params.ctrl_token_fd_out = true; + return true; + } if (arg == "--embedding") { params.embedding = true; return true; @@ -1433,7 +1441,10 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { printf(" --version show version and build info\n"); printf(" -i, --interactive run in interactive mode\n"); printf(" --interactive-specials allow special tokens in user text, in interactive mode\n"); - printf(" --interactive-first run in interactive mode and wait for input right away\n"); + printf(" --ctrl-token-no-out control tokens output disabled\n"); +#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) + printf(" --ctrl-token-fd-out control tokens sent to file descriptor 3 out of band\n"); +#endif printf(" -cnv, --conversation run in conversation mode (does not print special tokens and suffix/prefix)\n"); printf(" -ins, --instruct run in instruction mode (use with Alpaca models)\n"); printf(" -cml, --chatml run in chatml mode (use with ChatML-compatible models)\n"); diff --git a/common/common.h b/common/common.h index 566490e2f..20a776f6b 100644 --- a/common/common.h +++ b/common/common.h @@ -142,6 +142,8 @@ struct gpt_params { bool use_color = false; // use color to distinguish generations and inputs bool interactive = false; // interactive mode bool interactive_specials = false; // whether to allow special tokens from user, during interactive mode + bool ctrl_token_no_out = false; // disable control token output + bool ctrl_token_fd_out = false; // enable control token output and redirect it to file descriptor 3 bool conversation = false; // conversation mode (does not print special tokens and suffix/prefix) bool chatml = false; // chatml mode (used for models trained on chatml syntax) bool prompt_cache_all = false; // save user input and generations to prompt cache diff --git a/examples/main/main.cpp b/examples/main/main.cpp index 5c453a57e..8effd02a8 100644 --- a/examples/main/main.cpp +++ b/examples/main/main.cpp @@ -18,6 +18,7 @@ #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) #include #include +#define CONTROL_TOKEN_FILE_DESCRIPTOR (3) #elif defined (_WIN32) #define WIN32_LEAN_AND_MEAN #ifndef NOMINMAX @@ -528,21 +529,6 @@ int main(int argc, char ** argv) { exit(1); } - // Create the pipe for special token handling - int stok_pipe[2] = {0}; - if (pipe(stok_pipe) == -1) { - fprintf(stderr, "%s: failed to initialize special token output stream\n", __func__); - exit(1); - } - - close(stok_pipe[0]); // Read Special Token Not In Use - - FILE *special_token_stream_output_fd = fdopen(stok_pipe[1], "w"); - if (special_token_stream_output_fd == NULL) { - fprintf(stderr, "%s: failed to open special token output stream\n", __func__); - exit(1); - } - while ((n_remain != 0 && !is_antiprompt) || params.interactive) { // predict if (!embd.empty()) { @@ -758,12 +744,22 @@ int main(int argc, char ** argv) { const std::string token_str = llama_token_to_piece(ctx, id); // Console/Stream Output - if (llama_token_is_control_token(llama_get_model(ctx), id)) { - // Stream Output Token To Special Token Output - fprintf(special_token_stream_output_fd, "%s", token_str.c_str()); - } else { + if (!llama_token_is_control_token(llama_get_model(ctx), id)) { // Stream Output Token To Standard Output fprintf(stdout, "%s", token_str.c_str()); + } else if (!params.ctrl_token_no_out) { +#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) + if (params.ctrl_token_fd_out) { + // Stream Control Token To Special Token Output. Useful for debugging control token behaviour + dprintf(CONTROL_TOKEN_FILE_DESCRIPTOR, "%s", token_str.c_str()); + } + else +#endif + if (!params.conversation && sparams.grammar.empty()) + { + // Stream Control Token To Standard Output as long as we are not in a conversation or grammar output + fprintf(stdout, "%s", token_str.c_str()); + } } // Record Displayed Tokens To Log @@ -983,8 +979,6 @@ int main(int argc, char ** argv) { llama_sampling_free(ctx_sampling); llama_backend_free(); - fclose(special_token_stream_output_fd); - #ifndef LOG_DISABLE_LOGS LOG_TEE("Log end\n"); #endif // LOG_DISABLE_LOGS