llama : always reserve n_vocab * n_batch for logits
llama_context de-serialization breaks if the contexts have differing capacity for logits and llama_decode will at maximum resize to n_vocab * n_batch.
This commit is contained in:
parent
0093dea953
commit
b9c60dec98
1 changed files with 2 additions and 6 deletions
|
@ -9795,12 +9795,8 @@ struct llama_context * llama_new_context_with_model(
|
|||
ggml_type_name(type_v), (float)memory_size_v / (1024.0f * 1024.0f));
|
||||
}
|
||||
|
||||
// resized during inference
|
||||
if (params.logits_all) {
|
||||
// resized during inference, reserve maximum
|
||||
ctx->logits.reserve(hparams.n_vocab*cparams.n_batch);
|
||||
} else {
|
||||
ctx->logits.reserve(hparams.n_vocab);
|
||||
}
|
||||
|
||||
if (params.embedding){
|
||||
ctx->embedding.resize(hparams.n_embd);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue