minor : clean-up
This commit is contained in:
parent
d9adb8832b
commit
a1278f13da
2 changed files with 2 additions and 5 deletions
|
@ -8372,7 +8372,6 @@ struct llm_build_context {
|
||||||
// output layer norm
|
// output layer norm
|
||||||
cur = llm_build_norm(ctx0, cur, hparams, model.layers[il].layer_out_norm, model.layers[il].layer_out_norm_b, LLM_NORM, cb, il);
|
cur = llm_build_norm(ctx0, cur, hparams, model.layers[il].layer_out_norm, model.layers[il].layer_out_norm_b, LLM_NORM, cb, il);
|
||||||
|
|
||||||
|
|
||||||
// input for next layer
|
// input for next layer
|
||||||
inpL = cur;
|
inpL = cur;
|
||||||
}
|
}
|
||||||
|
@ -12806,8 +12805,6 @@ static std::vector<llama_vocab::id> llama_tokenize_internal(const llama_vocab &
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//GGML_ASSERT(vocab.special_add_eos != 1);
|
|
||||||
//TODO: Check this, why this tokenizer does not add at the end, why not leaving up to the `gguf` exporter?
|
|
||||||
if (add_special && vocab.special_add_eos == 1) {
|
if (add_special && vocab.special_add_eos == 1) {
|
||||||
GGML_ASSERT(vocab.special_add_eos != -1);
|
GGML_ASSERT(vocab.special_add_eos != -1);
|
||||||
output.push_back(vocab.special_eos_id);
|
output.push_back(vocab.special_eos_id);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue