llama : sync gguf-llama.cpp
This commit is contained in:
parent
c35fc0bbb0
commit
95d7593e4a
2 changed files with 7 additions and 8 deletions
|
@ -4561,18 +4561,18 @@ int llama_token_to_str_with_model(const struct llama_model * model, llama_token
|
||||||
if (length < (int) result.length()) {
|
if (length < (int) result.length()) {
|
||||||
return -result.length();
|
return -result.length();
|
||||||
}
|
}
|
||||||
strcpy(str, result.c_str());
|
strncpy(str, result.c_str(), result.length());
|
||||||
return result.length();
|
return result.length();
|
||||||
} else if (llama_is_unknown_token(model->vocab, token)) {
|
} else if (llama_is_unknown_token(model->vocab, token)) {
|
||||||
if (length < 3) {
|
if (length < 3) {
|
||||||
return -3;
|
return -3;
|
||||||
}
|
}
|
||||||
strcpy(str, "\xe2\x96\x85");
|
strncpy(str, "\xe2\x96\x85", 3);
|
||||||
return 3;
|
return 3;
|
||||||
} else if (llama_is_control_token(model->vocab, token)) {
|
} else if (llama_is_control_token(model->vocab, token)) {
|
||||||
;
|
;
|
||||||
} else if (llama_is_byte_token(model->vocab, token)) {
|
} else if (llama_is_byte_token(model->vocab, token)) {
|
||||||
if(1 > length) {
|
if (length < 1) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
str[0] = llama_byte_to_char(model->vocab, token);
|
str[0] = llama_byte_to_char(model->vocab, token);
|
||||||
|
@ -4607,7 +4607,7 @@ int llama_token_to_str_bpe(const struct llama_context * ctx, llama_token token,
|
||||||
if (length < (int) result.length()) {
|
if (length < (int) result.length()) {
|
||||||
return -result.length();
|
return -result.length();
|
||||||
}
|
}
|
||||||
strcpy(str, result.c_str());
|
strncpy(str, result.c_str(), result.length());
|
||||||
return result.length();
|
return result.length();
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -4618,9 +4618,8 @@ std::string llama_token_to_str_bpe(const struct llama_context * ctx, llama_token
|
||||||
const int length = llama_token_to_str_bpe(ctx, token, result.data(), result.size());
|
const int length = llama_token_to_str_bpe(ctx, token, result.data(), result.size());
|
||||||
if (length < 0) {
|
if (length < 0) {
|
||||||
result.resize(-length);
|
result.resize(-length);
|
||||||
const int check = llama_token_to_str_bpe(ctx, token, (char*)result.data(), result.size());
|
const int check = llama_token_to_str_bpe(ctx, token, result.data(), result.size());
|
||||||
GGML_ASSERT(check == -length);
|
GGML_ASSERT(check == -length);
|
||||||
GGML_UNUSED(check);
|
|
||||||
} else {
|
} else {
|
||||||
result.resize(length);
|
result.resize(length);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue