changed token functions to use new model variants

This commit is contained in:
Marcus Dunn 2023-10-23 09:30:00 -07:00
parent 2df3801706
commit fc5bb85545
10 changed files with 33 additions and 33 deletions

View file

@ -236,8 +236,8 @@ int64_t get_example_targets_batch(
int64_t used_samples = 0; int64_t used_samples = 0;
ggml_set_f32(target_probs, 0.0f); ggml_set_f32(target_probs, 0.0f);
llama_token bos = llama_token_bos(lctx); llama_token bos = llama_token_bos(llama_get_model(lctx));
llama_token eos = llama_token_eos(lctx); llama_token eos = llama_token_eos(llama_get_model(lctx));
// printf("%s: example_id=%d n_batch=%d n_train_samples=%zu\n", __func__, example_id, n_batch, n_train_samples); // printf("%s: example_id=%d n_batch=%d n_train_samples=%zu\n", __func__, example_id, n_batch, n_train_samples);
for (int k=0; k<n_batch; ++k) { for (int k=0; k<n_batch; ++k) {
// printf("%s: batch %d\n", __func__, k); // printf("%s: batch %d\n", __func__, k);
@ -924,7 +924,7 @@ size_t tokenize_file(
for (llama_token token=0; token < n_vocab; ++token) { for (llama_token token=0; token < n_vocab; ++token) {
max_token_text_size = std::max( max_token_text_size = std::max(
max_token_text_size, max_token_text_size,
strlen(llama_token_get_text(lctx, token))); strlen(llama_token_get_text(llama_get_model(lctx), token)));
} }
// upper bound of context byte length. // upper bound of context byte length.

View file

@ -180,7 +180,7 @@ int main(int argc, char ** argv) {
//const llama_token new_token_id = llama_sample_token_greedy(ctx, &candidates_p); //const llama_token new_token_id = llama_sample_token_greedy(ctx, &candidates_p);
// is it an end of stream? -> mark the stream as finished // is it an end of stream? -> mark the stream as finished
if (new_token_id == llama_token_eos(ctx) || n_cur == n_len) { if (new_token_id == llama_token_eos(model) || n_cur == n_len) {
i_batch[i] = -1; i_batch[i] = -1;
LOG_TEE("\n"); LOG_TEE("\n");
if (n_parallel > 1) { if (n_parallel > 1) {

View file

@ -47,7 +47,7 @@ struct beam_search_callback_data {
// In this case, end-of-beam (eob) is equivalent to end-of-sentence (eos) but this need not always be the same. // In this case, end-of-beam (eob) is equivalent to end-of-sentence (eos) but this need not always be the same.
// For example, eob can be flagged due to maximum token length, stop words, etc. // For example, eob can be flagged due to maximum token length, stop words, etc.
static bool is_at_eob(const beam_search_callback_data & callback_data, const llama_token * tokens, size_t n_tokens) { static bool is_at_eob(const beam_search_callback_data & callback_data, const llama_token * tokens, size_t n_tokens) {
return n_tokens && tokens[n_tokens-1] == llama_token_eos(callback_data.ctx); return n_tokens && tokens[n_tokens-1] == llama_token_eos(llama_get_model(callback_data.ctx));
} }
// Function matching type llama_beam_search_callback_fn_t. // Function matching type llama_beam_search_callback_fn_t.

View file

@ -246,14 +246,14 @@ int main(int argc, char ** argv) {
if (suff_rm_leading_spc && inp_sfx[0] == space_token) { if (suff_rm_leading_spc && inp_sfx[0] == space_token) {
inp_sfx.erase(inp_sfx.begin()); inp_sfx.erase(inp_sfx.begin());
} }
inp_pfx.insert(inp_pfx.begin(), llama_token_prefix(ctx)); inp_pfx.insert(inp_pfx.begin(), llama_token_prefix(model));
if (add_bos) { if (add_bos) {
inp_pfx.insert(inp_pfx.begin(), llama_token_bos(ctx)); inp_pfx.insert(inp_pfx.begin(), llama_token_bos(model));
} }
inp_sfx.insert(inp_sfx.begin(), llama_token_suffix(ctx)); inp_sfx.insert(inp_sfx.begin(), llama_token_suffix(model));
embd_inp = inp_pfx; embd_inp = inp_pfx;
embd_inp.insert(embd_inp.end(), inp_sfx.begin(), inp_sfx.end()); embd_inp.insert(embd_inp.end(), inp_sfx.begin(), inp_sfx.end());
embd_inp.push_back(llama_token_middle(ctx)); embd_inp.push_back(llama_token_middle(model));
LOG("prefix: \"%s\"\n", log_tostr(params.input_prefix)); LOG("prefix: \"%s\"\n", log_tostr(params.input_prefix));
LOG("suffix: \"%s\"\n", log_tostr(params.input_suffix)); LOG("suffix: \"%s\"\n", log_tostr(params.input_suffix));
@ -261,7 +261,7 @@ int main(int argc, char ** argv) {
// Should not run without any tokens // Should not run without any tokens
if (embd_inp.empty()) { if (embd_inp.empty()) {
embd_inp.push_back(llama_token_bos(ctx)); embd_inp.push_back(llama_token_bos(model));
LOG("embd_inp was considered empty and bos was added: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd_inp).c_str()); LOG("embd_inp was considered empty and bos was added: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd_inp).c_str());
} }
@ -577,10 +577,10 @@ int main(int argc, char ** argv) {
if ((int) embd_inp.size() <= n_consumed) { if ((int) embd_inp.size() <= n_consumed) {
// deal with eot token in infill mode // deal with eot token in infill mode
if ((llama_sampling_last(ctx_sampling) == llama_token_eot(ctx) || is_interacting) && params.interactive){ if ((llama_sampling_last(ctx_sampling) == llama_token_eot(model) || is_interacting) && params.interactive){
if(is_interacting && !params.interactive_first) { if(is_interacting && !params.interactive_first) {
// print an eot token // print an eot token
printf("%s", llama_token_to_piece(ctx, llama_token_eot(ctx)).c_str()); printf("%s", llama_token_to_piece(ctx, llama_token_eot(model)).c_str());
} }
fflush(stdout); fflush(stdout);
printf("\n"); printf("\n");
@ -627,14 +627,14 @@ int main(int argc, char ** argv) {
if (suff_rm_leading_spc && inp_sfx[0] == space_token) { if (suff_rm_leading_spc && inp_sfx[0] == space_token) {
inp_sfx.erase(inp_sfx.begin()); inp_sfx.erase(inp_sfx.begin());
} }
inp_pfx.insert(inp_pfx.begin(), llama_token_prefix(ctx)); inp_pfx.insert(inp_pfx.begin(), llama_token_prefix(model));
if (add_bos) { if (add_bos) {
inp_pfx.insert(inp_pfx.begin(), llama_token_bos(ctx)); inp_pfx.insert(inp_pfx.begin(), llama_token_bos(model));
} }
inp_sfx.insert(inp_sfx.begin(), llama_token_suffix(ctx)); inp_sfx.insert(inp_sfx.begin(), llama_token_suffix(model));
embd_inp = inp_pfx; embd_inp = inp_pfx;
embd_inp.insert(embd_inp.end(), inp_sfx.begin(), inp_sfx.end()); embd_inp.insert(embd_inp.end(), inp_sfx.begin(), inp_sfx.end());
embd_inp.push_back(llama_token_middle(ctx)); embd_inp.push_back(llama_token_middle(model));
embd.clear(); embd.clear();
embd_guidance.clear(); embd_guidance.clear();
n_remain = params.n_predict; n_remain = params.n_predict;
@ -644,7 +644,7 @@ int main(int argc, char ** argv) {
is_interacting = false; is_interacting = false;
} }
// deal with end of text token in interactive mode // deal with end of text token in interactive mode
else if (llama_sampling_last(ctx_sampling) == llama_token_eos(ctx)) { else if (llama_sampling_last(ctx_sampling) == llama_token_eos(model)) {
LOG("found EOS token\n"); LOG("found EOS token\n");
if (params.interactive) { if (params.interactive) {
@ -661,7 +661,7 @@ int main(int argc, char ** argv) {
if (params.input_prefix_bos) { if (params.input_prefix_bos) {
LOG("adding input prefix BOS token\n"); LOG("adding input prefix BOS token\n");
embd_inp.push_back(llama_token_bos(ctx)); embd_inp.push_back(llama_token_bos(model));
} }
std::string buffer; std::string buffer;
@ -724,7 +724,7 @@ int main(int argc, char ** argv) {
} }
// end of text token // end of text token
if (!embd.empty() && embd.back() == llama_token_eos(ctx) && !params.interactive) { if (!embd.empty() && embd.back() == llama_token_eos(model) && !params.interactive) {
break; break;
} }
@ -736,7 +736,7 @@ int main(int argc, char ** argv) {
} }
} }
if (!params.interactive && n_remain <= 0) { if (!params.interactive && n_remain <= 0) {
printf("%s", llama_token_to_piece(ctx, llama_token_eot(ctx)).c_str()); printf("%s", llama_token_to_piece(ctx, llama_token_eot(model)).c_str());
fflush(stdout); fflush(stdout);
} }

View file

@ -933,7 +933,7 @@ struct sql_printer : public printer {
}; };
static void test_prompt(llama_context * ctx, int n_prompt, int n_past, int n_batch, int n_threads) { static void test_prompt(llama_context * ctx, int n_prompt, int n_past, int n_batch, int n_threads) {
std::vector<llama_token> tokens(n_batch, llama_token_bos(ctx)); std::vector<llama_token> tokens(n_batch, llama_token_bos(llama_get_model(ctx)));
int n_processed = 0; int n_processed = 0;
llama_set_n_threads(ctx, n_threads, n_threads); llama_set_n_threads(ctx, n_threads, n_threads);
@ -946,7 +946,7 @@ static void test_prompt(llama_context * ctx, int n_prompt, int n_past, int n_bat
} }
static void test_gen(llama_context * ctx, int n_gen, int n_past, int n_threads) { static void test_gen(llama_context * ctx, int n_gen, int n_past, int n_threads) {
llama_token token = llama_token_bos(ctx); llama_token token = llama_token_bos(llama_get_model(ctx));
llama_set_n_threads(ctx, n_threads, n_threads); llama_set_n_threads(ctx, n_threads, n_threads);

View file

@ -137,7 +137,7 @@ inline llama_token sample_id(llama_context * ctx_llama, gpt_params & params) {
inline const char * sample(struct llama_context * ctx_llama, gpt_params & params, int * n_past) { inline const char * sample(struct llama_context * ctx_llama, gpt_params & params, int * n_past) {
int id = sample_id(ctx_llama, params); int id = sample_id(ctx_llama, params);
static std::string ret; static std::string ret;
if (id == llama_token_eos(ctx_llama)) { if (id == llama_token_eos(llama_get_model(ctx_llama))) {
ret = "</s>"; ret = "</s>";
} else { } else {
ret = llama_token_to_piece(ctx_llama, id); ret = llama_token_to_piece(ctx_llama, id);

View file

@ -347,7 +347,7 @@ int main(int argc, char ** argv) {
// client.id, client.seq_id, id, client.n_decoded, client.i_batch, token_str.c_str()); // client.id, client.seq_id, id, client.n_decoded, client.i_batch, token_str.c_str());
if (client.n_decoded > 2 && if (client.n_decoded > 2 &&
(id == llama_token_eos(ctx) || (id == llama_token_eos(model) ||
(params.n_predict > 0 && client.n_decoded + client.n_prompt >= params.n_predict) || (params.n_predict > 0 && client.n_decoded + client.n_prompt >= params.n_predict) ||
client.response.find("User:") != std::string::npos || client.response.find("User:") != std::string::npos ||
client.response.find('\n') != std::string::npos)) { client.response.find('\n') != std::string::npos)) {

View file

@ -726,7 +726,7 @@ struct llama_server_context
if (json_value(data, "ignore_eos", false)) if (json_value(data, "ignore_eos", false))
{ {
slot->sparams.logit_bias[llama_token_eos(ctx)] = -INFINITY; slot->sparams.logit_bias[llama_token_eos(model)] = -INFINITY;
} }
const auto &logit_bias = data.find("logit_bias"); const auto &logit_bias = data.find("logit_bias");
@ -1056,7 +1056,7 @@ struct llama_server_context
slot.has_next_token = false; slot.has_next_token = false;
} }
if (!slot.cache_tokens.empty() && result.tok == llama_token_eos(ctx)) if (!slot.cache_tokens.empty() && result.tok == llama_token_eos(model))
{ {
slot.stopped_eos = true; slot.stopped_eos = true;
slot.has_next_token = false; slot.has_next_token = false;
@ -1130,7 +1130,7 @@ struct llama_server_context
json get_formated_generation(llama_client_slot &slot) json get_formated_generation(llama_client_slot &slot)
{ {
const auto eos_bias = slot.sparams.logit_bias.find(llama_token_eos(ctx)); const auto eos_bias = slot.sparams.logit_bias.find(llama_token_eos(model));
const bool ignore_eos = eos_bias != slot.sparams.logit_bias.end() && const bool ignore_eos = eos_bias != slot.sparams.logit_bias.end() &&
eos_bias->second < 0.0f && std::isinf(eos_bias->second); eos_bias->second < 0.0f && std::isinf(eos_bias->second);
return json { return json {
@ -1555,11 +1555,11 @@ struct llama_server_context
suffix_tokens.erase(suffix_tokens.begin()); suffix_tokens.erase(suffix_tokens.begin());
} }
prefix_tokens.insert(prefix_tokens.begin(), llama_token_prefix(ctx)); prefix_tokens.insert(prefix_tokens.begin(), llama_token_prefix(model));
prefix_tokens.insert(prefix_tokens.begin(), llama_token_bos(ctx)); // always add BOS prefix_tokens.insert(prefix_tokens.begin(), llama_token_bos(model)); // always add BOS
prefix_tokens.insert(prefix_tokens.end(), llama_token_suffix(ctx)); prefix_tokens.insert(prefix_tokens.end(), llama_token_suffix(model));
prefix_tokens.insert(prefix_tokens.end(), suffix_tokens.begin(), suffix_tokens.end()); prefix_tokens.insert(prefix_tokens.end(), suffix_tokens.begin(), suffix_tokens.end());
prefix_tokens.push_back(llama_token_middle(ctx)); prefix_tokens.push_back(llama_token_middle(model));
prompt_tokens = prefix_tokens; prompt_tokens = prefix_tokens;
} }
else else

View file

@ -138,7 +138,7 @@ int main(int argc, char ** argv) {
const llama_token new_token_id = llama_sample_token_greedy(ctx, &candidates_p); const llama_token new_token_id = llama_sample_token_greedy(ctx, &candidates_p);
// is it an end of stream? // is it an end of stream?
if (new_token_id == llama_token_eos(ctx) || n_cur == n_len) { if (new_token_id == llama_token_eos(model) || n_cur == n_len) {
LOG_TEE("\n"); LOG_TEE("\n");
break; break;

View file

@ -163,7 +163,7 @@ int main(int argc, char ** argv) {
printf("%s", token_str.c_str()); printf("%s", token_str.c_str());
fflush(stdout); fflush(stdout);
if (id == llama_token_eos(ctx_tgt)) { if (id == llama_token_eos(model_tgt)) {
has_eos = true; has_eos = true;
} }