formatting, remove copying iterator on delete
This commit is contained in:
parent
eac5f5442c
commit
f7b1205a51
1 changed files with 23 additions and 89 deletions
88
llama.cpp
88
llama.cpp
|
@ -2294,7 +2294,6 @@ static void llm_load_vocab(
|
||||||
is_tokenizable = true;
|
is_tokenizable = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
i++;
|
i++;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -2321,7 +2320,6 @@ static void llm_load_vocab(
|
||||||
if (utf8_str_len > 1)
|
if (utf8_str_len > 1)
|
||||||
{
|
{
|
||||||
// At this point what we have left are special tokens only
|
// At this point what we have left are special tokens only
|
||||||
|
|
||||||
vocab.special_tokens_cache[token] = id;
|
vocab.special_tokens_cache[token] = id;
|
||||||
|
|
||||||
// Count manually found special tokens
|
// Count manually found special tokens
|
||||||
|
@ -2337,7 +2335,7 @@ static void llm_load_vocab(
|
||||||
|
|
||||||
if( special_tokens_definition_mismatch || special_tokens_count_from_verification != special_tokens_count_by_type )
|
if( special_tokens_definition_mismatch || special_tokens_count_from_verification != special_tokens_count_by_type )
|
||||||
{
|
{
|
||||||
fprintf(stderr, "%s: WARNING: Mismatch in special tokens definition ( %u/%zu vs %u/%zu ).\n",
|
fprintf(stderr, "warning: %s: Mismatch in special tokens definition ( %u/%zu vs %u/%zu ).\n",
|
||||||
__func__,
|
__func__,
|
||||||
special_tokens_count_from_verification, vocab.id_to_token.size(),
|
special_tokens_count_from_verification, vocab.id_to_token.size(),
|
||||||
special_tokens_count_by_type, vocab.id_to_token.size()
|
special_tokens_count_by_type, vocab.id_to_token.size()
|
||||||
|
@ -6608,7 +6606,7 @@ struct fragment_buffer_variant{
|
||||||
const uint64_t length;
|
const uint64_t length;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define PRETOKENIZERDEBUG
|
// #define PRETOKENIZERDEBUG
|
||||||
|
|
||||||
static void tokenizer_st_partition(const llama_vocab & vocab, std::forward_list<fragment_buffer_variant> & buffer)
|
static void tokenizer_st_partition(const llama_vocab & vocab, std::forward_list<fragment_buffer_variant> & buffer)
|
||||||
{
|
{
|
||||||
|
@ -6619,7 +6617,6 @@ static void tokenizer_st_partition(const llama_vocab & vocab, std::forward_list<
|
||||||
const auto & special_id = st.second;
|
const auto & special_id = st.second;
|
||||||
|
|
||||||
// for each text fragment
|
// for each text fragment
|
||||||
//for (auto & fragment: buffer)
|
|
||||||
std::forward_list<fragment_buffer_variant>::iterator it = buffer.begin();
|
std::forward_list<fragment_buffer_variant>::iterator it = buffer.begin();
|
||||||
while (it != buffer.end())
|
while (it != buffer.end())
|
||||||
{
|
{
|
||||||
|
@ -6640,22 +6637,14 @@ static void tokenizer_st_partition(const llama_vocab & vocab, std::forward_list<
|
||||||
auto match = raw_text->find( special_token, raw_text_base_offset );
|
auto match = raw_text->find( special_token, raw_text_base_offset );
|
||||||
|
|
||||||
// no occurences found, stop processing this fragment for a given special token
|
// no occurences found, stop processing this fragment for a given special token
|
||||||
if (match == std::string::npos)
|
if (match == std::string::npos) break;
|
||||||
{
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
// check if match is within bounds of offset <-> length
|
// check if match is within bounds of offset <-> length
|
||||||
if( match + special_token.length() > raw_text_base_offset + raw_text_base_length )
|
if (match + special_token.length() > raw_text_base_offset + raw_text_base_length) break;
|
||||||
{
|
|
||||||
// match is out of bounds
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef PRETOKENIZERDEBUG
|
#ifdef PRETOKENIZERDEBUG
|
||||||
fprintf(stderr,"FF: (%ld %ld %ld) '%s'\n", raw_text->length(), raw_text_base_offset, raw_text_base_length, raw_text->substr(raw_text_base_offset, raw_text_base_length).c_str());
|
fprintf(stderr,"FF: (%ld %ld %ld) '%s'\n", raw_text->length(), raw_text_base_offset, raw_text_base_length, raw_text->substr(raw_text_base_offset, raw_text_base_length).c_str());
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
auto source = std::distance(buffer.begin(), it);
|
auto source = std::distance(buffer.begin(), it);
|
||||||
|
|
||||||
// if match is further than base offset
|
// if match is further than base offset
|
||||||
|
@ -6663,7 +6652,6 @@ static void tokenizer_st_partition(const llama_vocab & vocab, std::forward_list<
|
||||||
if (match > raw_text_base_offset)
|
if (match > raw_text_base_offset)
|
||||||
{
|
{
|
||||||
// left
|
// left
|
||||||
//buffer.emplace_after(it, raw_text->substr(0, match));
|
|
||||||
const int64_t left_reminder_offset = raw_text_base_offset + 0;
|
const int64_t left_reminder_offset = raw_text_base_offset + 0;
|
||||||
const int64_t left_reminder_length = match - raw_text_base_offset;
|
const int64_t left_reminder_length = match - raw_text_base_offset;
|
||||||
buffer.emplace_after(it, (*raw_text), left_reminder_offset, left_reminder_length);
|
buffer.emplace_after(it, (*raw_text), left_reminder_offset, left_reminder_length);
|
||||||
|
@ -6671,7 +6659,6 @@ static void tokenizer_st_partition(const llama_vocab & vocab, std::forward_list<
|
||||||
#ifdef PRETOKENIZERDEBUG
|
#ifdef PRETOKENIZERDEBUG
|
||||||
fprintf(stderr,"FL: (%ld %ld) '%s'\n", left_reminder_offset, left_reminder_length, raw_text->substr(left_reminder_offset, left_reminder_length).c_str());
|
fprintf(stderr,"FL: (%ld %ld) '%s'\n", left_reminder_offset, left_reminder_length, raw_text->substr(left_reminder_offset, left_reminder_length).c_str());
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
it++;
|
it++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6679,16 +6666,9 @@ static void tokenizer_st_partition(const llama_vocab & vocab, std::forward_list<
|
||||||
buffer.emplace_after(it, special_id);
|
buffer.emplace_after(it, special_id);
|
||||||
it++;
|
it++;
|
||||||
|
|
||||||
|
|
||||||
// right
|
// right
|
||||||
if (match + special_token.length() < raw_text_base_offset + raw_text_base_length)
|
if (match + special_token.length() < raw_text_base_offset + raw_text_base_length)
|
||||||
{
|
{
|
||||||
/*
|
|
||||||
| |
|
|
||||||
-------------------------------------------------------------------------
|
|
||||||
. |ttttt| |
|
|
||||||
*/
|
|
||||||
//buffer.emplace_after(it, raw_text->substr(match + special_token.length()));
|
|
||||||
const int64_t right_reminder_offset = match + special_token.length();
|
const int64_t right_reminder_offset = match + special_token.length();
|
||||||
const int64_t right_reminder_length = raw_text_base_length - ((match - raw_text_base_offset) + special_token.length());
|
const int64_t right_reminder_length = raw_text_base_length - ((match - raw_text_base_offset) + special_token.length());
|
||||||
buffer.emplace_after(it, (*raw_text), right_reminder_offset, right_reminder_length);
|
buffer.emplace_after(it, (*raw_text), right_reminder_offset, right_reminder_length);
|
||||||
|
@ -6699,66 +6679,25 @@ static void tokenizer_st_partition(const llama_vocab & vocab, std::forward_list<
|
||||||
|
|
||||||
it++;
|
it++;
|
||||||
|
|
||||||
if (source == 0)
|
if (source == 0) buffer.erase_after(buffer.before_begin());
|
||||||
{
|
else buffer.erase_after(std::next(buffer.begin(), (source-1)));
|
||||||
// TODO? It might not be needed to store/restore the iterator like this
|
|
||||||
// but this gives me the peace of mind I'm not causing some
|
|
||||||
// accidental undefined behaviour.
|
|
||||||
auto it_backup = std::distance( buffer.begin(), it );
|
|
||||||
|
|
||||||
buffer.erase_after(buffer.before_begin());
|
|
||||||
|
|
||||||
it = std::next( buffer.begin(), it_backup-1 );
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
auto it_backup = std::distance( buffer.begin(), it );
|
|
||||||
|
|
||||||
//auto prev = std::prev( buffer.begin(), -(source-1) );
|
|
||||||
auto prev = std::next( buffer.begin(), (source-1) );
|
|
||||||
buffer.erase_after(prev);
|
|
||||||
|
|
||||||
it = std::next( buffer.begin(), it_backup-1 );
|
|
||||||
}
|
|
||||||
//it = std::prev( it, 1 );
|
|
||||||
|
|
||||||
// repeat for the right side
|
// repeat for the right side
|
||||||
raw_text_base_offset = right_reminder_offset; //match + special_token.length();
|
raw_text_base_offset = right_reminder_offset;
|
||||||
raw_text_base_length = right_reminder_length; //right_reminder_length - ( ( match + special_token.length() ) - raw_text_base_offset );
|
raw_text_base_length = right_reminder_length;
|
||||||
//raw_text = &((*it).raw_text);
|
|
||||||
|
|
||||||
#ifdef PRETOKENIZERDEBUG
|
#ifdef PRETOKENIZERDEBUG
|
||||||
fprintf(stderr,"RR: (%ld %ld) '%s'\n", raw_text_base_offset, raw_text_base_length, raw_text->substr(raw_text_base_offset, raw_text_base_length).c_str());
|
fprintf(stderr,"RR: (%ld %ld) '%s'\n", raw_text_base_offset, raw_text_base_length, raw_text->substr(raw_text_base_offset, raw_text_base_length).c_str());
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
if (source == 0)
|
if (source == 0) buffer.erase_after(buffer.before_begin());
|
||||||
{
|
else buffer.erase_after(std::next(buffer.begin(), (source-1)));
|
||||||
auto it_backup = std::distance( buffer.begin(), it );
|
|
||||||
|
|
||||||
buffer.erase_after(buffer.before_begin());
|
|
||||||
|
|
||||||
it = std::next( buffer.begin(), it_backup-1 );
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
auto it_backup = std::distance( buffer.begin(), it );
|
|
||||||
|
|
||||||
//auto prev = std::prev( buffer.begin(), -(source) );
|
|
||||||
auto prev = std::next( buffer.begin(), (source-1) );
|
|
||||||
buffer.erase_after(prev);
|
|
||||||
|
|
||||||
it = std::next( buffer.begin(), it_backup-1 );
|
|
||||||
}
|
|
||||||
//it = std::prev( it, 1 );
|
|
||||||
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
it++;
|
it++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -6781,12 +6720,9 @@ static std::vector<llama_vocab::id> llama_tokenize_internal(const llama_vocab &
|
||||||
}
|
}
|
||||||
|
|
||||||
std::forward_list<fragment_buffer_variant> fragment_buffer;
|
std::forward_list<fragment_buffer_variant> fragment_buffer;
|
||||||
|
|
||||||
fragment_buffer.emplace_front( raw_text, 0, raw_text.length() );
|
fragment_buffer.emplace_front( raw_text, 0, raw_text.length() );
|
||||||
|
|
||||||
if (special) {
|
if (special) tokenizer_st_partition( vocab, fragment_buffer );
|
||||||
tokenizer_st_partition( vocab, fragment_buffer );
|
|
||||||
}
|
|
||||||
|
|
||||||
switch (vocab.type) {
|
switch (vocab.type) {
|
||||||
case LLAMA_VOCAB_TYPE_SPM:
|
case LLAMA_VOCAB_TYPE_SPM:
|
||||||
|
@ -6806,7 +6742,6 @@ static std::vector<llama_vocab::id> llama_tokenize_internal(const llama_vocab &
|
||||||
#ifdef PRETOKENIZERDEBUG
|
#ifdef PRETOKENIZERDEBUG
|
||||||
fprintf(stderr,"TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
|
fprintf(stderr,"TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
llm_tokenizer_spm tokenizer(vocab);
|
llm_tokenizer_spm tokenizer(vocab);
|
||||||
llama_escape_whitespace(raw_text);
|
llama_escape_whitespace(raw_text);
|
||||||
tokenizer.tokenize(raw_text, output);
|
tokenizer.tokenize(raw_text, output);
|
||||||
|
@ -6828,7 +6763,6 @@ static std::vector<llama_vocab::id> llama_tokenize_internal(const llama_vocab &
|
||||||
#ifdef PRETOKENIZERDEBUG
|
#ifdef PRETOKENIZERDEBUG
|
||||||
fprintf(stderr,"TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
|
fprintf(stderr,"TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
llm_tokenizer_bpe tokenizer(vocab);
|
llm_tokenizer_bpe tokenizer(vocab);
|
||||||
tokenizer.tokenize(raw_text, output);
|
tokenizer.tokenize(raw_text, output);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue