Merge branch 'master' into custom-attention-mask

This commit is contained in:
Georgi Gerganov 2023-09-18 11:15:18 +03:00
commit 58bb5110ca
No known key found for this signature in database
GPG key ID: 449E073F9DC10735
44 changed files with 1180 additions and 413 deletions

View file

@ -78,7 +78,7 @@ int32_t get_num_physical_cores() {
return n_threads > 0 ? (n_threads <= 4 ? n_threads : n_threads / 2) : 4;
}
void process_escapes(std::string& input) {
static void process_escapes(std::string& input) {
std::size_t input_len = input.length();
std::size_t output_idx = 0;
@ -798,10 +798,10 @@ std::vector<llama_token> llama_tokenize(
// upper limit for the number of tokens
int n_tokens = text.length() + add_bos;
std::vector<llama_token> result(n_tokens);
n_tokens = llama_tokenize(ctx, text.c_str(), result.data(), result.size(), add_bos);
n_tokens = llama_tokenize(ctx, text.data(), text.length(), result.data(), result.size(), add_bos);
if (n_tokens < 0) {
result.resize(-n_tokens);
int check = llama_tokenize(ctx, text.c_str(), result.data(), result.size(), add_bos);
int check = llama_tokenize(ctx, text.data(), text.length(), result.data(), result.size(), add_bos);
GGML_ASSERT(check == -n_tokens);
} else {
result.resize(n_tokens);