minor : comment + spacing
This commit is contained in:
parent
1c28116de4
commit
fc82541b1d
2 changed files with 2 additions and 1 deletions
|
@ -6727,7 +6727,7 @@ static std::vector<llama_vocab::id> llama_tokenize_internal(const llama_vocab &
|
|||
// by modifying llm_tokenizer_x to operate with string offsets like pre-tokenizer
|
||||
// and passing 'add space prefix' as bool argument
|
||||
//
|
||||
auto raw_text = (special?"":" ") + fragment.raw_text.substr(fragment.offset, fragment.length);
|
||||
auto raw_text = (special ? "" : " ") + fragment.raw_text.substr(fragment.offset, fragment.length);
|
||||
|
||||
#ifdef PRETOKENIZERDEBUG
|
||||
fprintf(stderr,"TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
|
||||
|
|
1
llama.h
1
llama.h
|
@ -516,6 +516,7 @@ extern "C" {
|
|||
/// @return Returns the number of tokens on success, no more than n_max_tokens
|
||||
/// @return Returns a negative number on failure - the number of tokens that would have been returned
|
||||
/// @param special Allow tokenizing special and/or control tokens which otherwise are not exposed and treated as plaintext.
|
||||
/// Does not insert a leading space.
|
||||
LLAMA_API int llama_tokenize(
|
||||
const struct llama_model * model,
|
||||
const char * text,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue