llama : use LLAMA_TOKEN_NULL (#11062)

ggml-ci
This commit is contained in:
Georgi Gerganov 2025-01-06 10:52:15 +02:00 committed by GitHub
parent 5047dd3546
commit 727368c60f
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
11 changed files with 53 additions and 54 deletions

View file

@ -494,7 +494,7 @@ int main(int argc, char ** argv) {
}
llama_token decoder_start_token_id = llama_model_decoder_start_token(model);
if (decoder_start_token_id == -1) {
if (decoder_start_token_id == LLAMA_TOKEN_NULL) {
decoder_start_token_id = llama_token_bos(model);
}
@ -831,7 +831,7 @@ int main(int argc, char ** argv) {
// if user stop generation mid-way, we must add EOT to finish model's last response
if (need_insert_eot && format_chat) {
llama_token eot = llama_token_eot(model);
embd_inp.push_back(eot == -1 ? llama_token_eos(model) : eot);
embd_inp.push_back(eot == LLAMA_TOKEN_NULL ? llama_token_eos(model) : eot);
need_insert_eot = false;
}