common : use common_ prefix for common library functions (#9805)

* common : use common_ prefix for common library functions

---------

Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
This commit is contained in:
Diego Devesa 2024-10-10 22:57:42 +02:00 committed by GitHub
parent 0e9f760eb1
commit 7eee341bee
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
45 changed files with 1284 additions and 1284 deletions

View file

@ -33,8 +33,8 @@
static llama_context ** g_ctx;
static llama_model ** g_model;
static gpt_sampler ** g_smpl;
static gpt_params * g_params;
static common_sampler ** g_smpl;
static common_params * g_params;
static std::vector<llama_token> * g_input_tokens;
static std::ostringstream * g_output_ss;
static std::vector<llama_token> * g_output_tokens;
@ -63,7 +63,7 @@ static bool file_is_empty(const std::string & path) {
}
static void write_logfile(
const llama_context * ctx, const gpt_params & params, const llama_model * model,
const llama_context * ctx, const common_params & params, const llama_model * model,
const std::vector<llama_token> & input_tokens, const std::string & output,
const std::vector<llama_token> & output_tokens
) {
@ -114,12 +114,12 @@ static void sigint_handler(int signo) {
} else {
console::cleanup();
LOG("\n");
gpt_perf_print(*g_ctx, *g_smpl);
common_perf_print(*g_ctx, *g_smpl);
write_logfile(*g_ctx, *g_params, *g_model, *g_input_tokens, g_output_ss->str(), *g_output_tokens);
// make sure all logs are flushed
LOG("Interrupted by user\n");
gpt_log_pause(gpt_log_main());
common_log_pause(common_log_main());
_exit(130);
}
@ -127,22 +127,22 @@ static void sigint_handler(int signo) {
}
#endif
static std::string chat_add_and_format(struct llama_model * model, std::vector<llama_chat_msg> & chat_msgs, const std::string & role, const std::string & content) {
llama_chat_msg new_msg{role, content};
auto formatted = llama_chat_format_single(model, g_params->chat_template, chat_msgs, new_msg, role == "user");
static std::string chat_add_and_format(struct llama_model * model, std::vector<common_chat_msg> & chat_msgs, const std::string & role, const std::string & content) {
common_chat_msg new_msg{role, content};
auto formatted = common_chat_format_single(model, g_params->chat_template, chat_msgs, new_msg, role == "user");
chat_msgs.push_back({role, content});
LOG_DBG("formatted: '%s'\n", formatted.c_str());
return formatted;
}
int main(int argc, char ** argv) {
gpt_params params;
common_params params;
g_params = &params;
if (!gpt_params_parse(argc, argv, params, LLAMA_EXAMPLE_MAIN, print_usage)) {
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_MAIN, print_usage)) {
return 1;
}
gpt_init();
common_init();
auto & sparams = params.sparams;
@ -187,9 +187,9 @@ int main(int argc, char ** argv) {
llama_model * model = nullptr;
llama_context * ctx = nullptr;
gpt_sampler * smpl = nullptr;
common_sampler * smpl = nullptr;
std::vector<llama_chat_msg> chat_msgs;
std::vector<common_chat_msg> chat_msgs;
g_model = &model;
g_ctx = &ctx;
@ -197,7 +197,7 @@ int main(int argc, char ** argv) {
// load the model and apply lora adapter, if any
LOG_INF("%s: load the model and apply lora adapter, if any\n", __func__);
llama_init_result llama_init = llama_init_from_gpt_params(params);
common_init_result llama_init = common_init_from_params(params);
model = llama_init.model;
ctx = llama_init.context;
@ -246,7 +246,7 @@ int main(int argc, char ** argv) {
// print chat template example in conversation mode
if (params.conversation) {
if (params.enable_chat_template) {
LOG_INF("%s: chat template example:\n%s\n", __func__, llama_chat_format_example(model, params.chat_template).c_str());
LOG_INF("%s: chat template example:\n%s\n", __func__, common_chat_format_example(model, params.chat_template).c_str());
} else {
LOG_INF("%s: in-suffix/prefix is specified, chat template will be disabled\n", __func__);
}
@ -255,7 +255,7 @@ int main(int argc, char ** argv) {
// print system information
{
LOG_INF("\n");
LOG_INF("%s\n", gpt_params_get_system_info(params).c_str());
LOG_INF("%s\n", common_params_get_system_info(params).c_str());
LOG_INF("\n");
}
@ -296,7 +296,7 @@ int main(int argc, char ** argv) {
: params.prompt;
if (params.interactive_first || !params.prompt.empty() || session_tokens.empty()) {
LOG_DBG("tokenize the prompt\n");
embd_inp = ::llama_tokenize(ctx, prompt, true, true);
embd_inp = common_tokenize(ctx, prompt, true, true);
} else {
LOG_DBG("use session tokens\n");
embd_inp = session_tokens;
@ -379,13 +379,13 @@ int main(int argc, char ** argv) {
LOG_INF("%s: prompt: '%s'\n", __func__, params.prompt.c_str());
LOG_INF("%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
for (int i = 0; i < (int) embd_inp.size(); i++) {
LOG_INF("%6d -> '%s'\n", embd_inp[i], llama_token_to_piece(ctx, embd_inp[i]).c_str());
LOG_INF("%6d -> '%s'\n", embd_inp[i], common_token_to_piece(ctx, embd_inp[i]).c_str());
}
if (params.n_keep > add_bos) {
LOG_INF("%s: static prompt based on n_keep: '", __func__);
for (int i = 0; i < params.n_keep; i++) {
LOG_CNT("%s", llama_token_to_piece(ctx, embd_inp[i]).c_str());
LOG_CNT("%s", common_token_to_piece(ctx, embd_inp[i]).c_str());
}
LOG_CNT("'\n");
}
@ -415,9 +415,9 @@ int main(int argc, char ** argv) {
for (const auto & antiprompt : params.antiprompt) {
LOG_INF("Reverse prompt: '%s'\n", antiprompt.c_str());
if (params.verbose_prompt) {
auto tmp = ::llama_tokenize(ctx, antiprompt, false, true);
auto tmp = common_tokenize(ctx, antiprompt, false, true);
for (int i = 0; i < (int) tmp.size(); i++) {
LOG_INF("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx, tmp[i]).c_str());
LOG_INF("%6d -> '%s'\n", tmp[i], common_token_to_piece(ctx, tmp[i]).c_str());
}
}
}
@ -430,9 +430,9 @@ int main(int argc, char ** argv) {
if (!params.input_prefix.empty()) {
LOG_INF("Input prefix: '%s'\n", params.input_prefix.c_str());
if (params.verbose_prompt) {
auto tmp = ::llama_tokenize(ctx, params.input_prefix, true, true);
auto tmp = common_tokenize(ctx, params.input_prefix, true, true);
for (int i = 0; i < (int) tmp.size(); i++) {
LOG_INF("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx, tmp[i]).c_str());
LOG_INF("%6d -> '%s'\n", tmp[i], common_token_to_piece(ctx, tmp[i]).c_str());
}
}
}
@ -440,23 +440,23 @@ int main(int argc, char ** argv) {
if (!params.input_suffix.empty()) {
LOG_INF("Input suffix: '%s'\n", params.input_suffix.c_str());
if (params.verbose_prompt) {
auto tmp = ::llama_tokenize(ctx, params.input_suffix, false, true);
auto tmp = common_tokenize(ctx, params.input_suffix, false, true);
for (int i = 0; i < (int) tmp.size(); i++) {
LOG_INF("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx, tmp[i]).c_str());
LOG_INF("%6d -> '%s'\n", tmp[i], common_token_to_piece(ctx, tmp[i]).c_str());
}
}
}
}
smpl = gpt_sampler_init(model, sparams);
smpl = common_sampler_init(model, sparams);
if (!smpl) {
LOG_ERR("%s: failed to initialize sampling subsystem\n", __func__);
return 1;
}
LOG_INF("sampler seed: %u\n", gpt_sampler_get_seed(smpl));
LOG_INF("sampler seed: %u\n", common_sampler_get_seed(smpl));
LOG_INF("sampler params: \n%s\n", sparams.print().c_str());
LOG_INF("sampler chain: %s\n", gpt_sampler_print(smpl).c_str());
LOG_INF("sampler chain: %s\n", common_sampler_print(smpl).c_str());
LOG_INF("generate: n_ctx = %d, n_batch = %d, n_predict = %d, n_keep = %d\n", n_ctx, params.n_batch, params.n_predict, params.n_keep);
@ -521,7 +521,7 @@ int main(int argc, char ** argv) {
antiprompt_ids.reserve(params.antiprompt.size());
for (const std::string & antiprompt : params.antiprompt) {
antiprompt_ids.emplace_back(::llama_tokenize(ctx, antiprompt, false, true));
antiprompt_ids.emplace_back(::common_tokenize(ctx, antiprompt, false, true));
}
if (llama_model_has_encoder(model)) {
@ -679,9 +679,9 @@ int main(int argc, char ** argv) {
LOG_DBG("saved session to %s\n", path_session.c_str());
}
const llama_token id = gpt_sampler_sample(smpl, ctx, -1);
const llama_token id = common_sampler_sample(smpl, ctx, -1);
gpt_sampler_accept(smpl, id, /* accept_grammar= */ true);
common_sampler_accept(smpl, id, /* accept_grammar= */ true);
// LOG_DBG("last: %s\n", string_from(ctx, smpl->prev.to_vector()).c_str());
@ -702,7 +702,7 @@ int main(int argc, char ** argv) {
// push the prompt in the sampling context in order to apply repetition penalties later
// for the prompt, we don't apply grammar rules
gpt_sampler_accept(smpl, embd_inp[n_consumed], /* accept_grammar= */ false);
common_sampler_accept(smpl, embd_inp[n_consumed], /* accept_grammar= */ false);
++n_consumed;
if ((int) embd.size() >= params.n_batch) {
@ -714,7 +714,7 @@ int main(int argc, char ** argv) {
// display text
if (input_echo && display) {
for (auto id : embd) {
const std::string token_str = llama_token_to_piece(ctx, id, params.special);
const std::string token_str = common_token_to_piece(ctx, id, params.special);
// Console/Stream Output
LOG("%s", token_str.c_str());
@ -743,7 +743,7 @@ int main(int argc, char ** argv) {
// check for reverse prompt in the last n_prev tokens
if (!params.antiprompt.empty()) {
const int n_prev = 32;
const std::string last_output = gpt_sampler_prev_str(smpl, ctx, n_prev);
const std::string last_output = common_sampler_prev_str(smpl, ctx, n_prev);
is_antiprompt = false;
// Check if each of the reverse prompts appears at the end of the output.
@ -765,7 +765,7 @@ int main(int argc, char ** argv) {
}
// check for reverse prompt using special tokens
llama_token last_token = gpt_sampler_last(smpl);
llama_token last_token = common_sampler_last(smpl);
for (std::vector<llama_token> ids : antiprompt_ids) {
if (ids.size() == 1 && last_token == ids[0]) {
if (params.interactive) {
@ -782,13 +782,13 @@ int main(int argc, char ** argv) {
}
// deal with end of generation tokens in interactive mode
if (llama_token_is_eog(model, gpt_sampler_last(smpl))) {
if (llama_token_is_eog(model, common_sampler_last(smpl))) {
LOG_DBG("found an EOG token\n");
if (params.interactive) {
if (!params.antiprompt.empty()) {
// tokenize and inject first reverse prompt
const auto first_antiprompt = ::llama_tokenize(ctx, params.antiprompt.front(), false, true);
const auto first_antiprompt = common_tokenize(ctx, params.antiprompt.front(), false, true);
embd_inp.insert(embd_inp.end(), first_antiprompt.begin(), first_antiprompt.end());
is_antiprompt = true;
}
@ -803,8 +803,8 @@ int main(int argc, char ** argv) {
// if current token is not EOG, we add it to current assistant message
if (params.conversation) {
const auto id = gpt_sampler_last(smpl);
assistant_ss << llama_token_to_piece(ctx, id, false);
const auto id = common_sampler_last(smpl);
assistant_ss << common_token_to_piece(ctx, id, false);
}
if (n_past > 0 && is_interacting) {
@ -862,9 +862,9 @@ int main(int argc, char ** argv) {
? chat_add_and_format(model, chat_msgs, "user", std::move(buffer))
: std::move(buffer);
// TODO: one inconvenient of current chat template implementation is that we can't distinguish between user input and special tokens (prefix/postfix)
const auto line_pfx = ::llama_tokenize(ctx, params.input_prefix, false, true);
const auto line_inp = ::llama_tokenize(ctx, user_inp, false, format_chat);
const auto line_sfx = ::llama_tokenize(ctx, params.input_suffix, false, true);
const auto line_pfx = common_tokenize(ctx, params.input_prefix, false, true);
const auto line_inp = common_tokenize(ctx, user_inp, false, format_chat);
const auto line_sfx = common_tokenize(ctx, params.input_suffix, false, true);
LOG_DBG("input tokens: %s\n", string_from(ctx, line_inp).c_str());
@ -882,7 +882,7 @@ int main(int argc, char ** argv) {
for (size_t i = original_size; i < embd_inp.size(); ++i) {
const llama_token token = embd_inp[i];
output_tokens.push_back(token);
output_ss << llama_token_to_piece(ctx, token);
output_ss << common_token_to_piece(ctx, token);
}
// reset assistant message
@ -899,7 +899,7 @@ int main(int argc, char ** argv) {
if (n_past > 0) {
if (is_interacting) {
gpt_sampler_reset(smpl);
common_sampler_reset(smpl);
}
is_interacting = false;
}
@ -925,10 +925,10 @@ int main(int argc, char ** argv) {
}
LOG("\n\n");
gpt_perf_print(ctx, smpl);
common_perf_print(ctx, smpl);
write_logfile(ctx, params, model, input_tokens, output_ss.str(), output_tokens);
gpt_sampler_free(smpl);
common_sampler_free(smpl);
llama_free(ctx);
llama_free_model(model);