Fixed mismatch type errors
* cited in macOS CI tests * Missed in original updates based on PR feedback in https://github.com/ggerganov/llama.cpp/pull/6519
This commit is contained in:
parent
95bf5f7d87
commit
420cf62838
1 changed files with 4 additions and 4 deletions
|
@ -15536,9 +15536,9 @@ float * llama_get_logits_ith(struct llama_context * ctx, int32_t i) {
|
||||||
if (j < 0) {
|
if (j < 0) {
|
||||||
throw std::runtime_error(format("batch.logits[%d] != true", i));
|
throw std::runtime_error(format("batch.logits[%d] != true", i));
|
||||||
}
|
}
|
||||||
if ((size_t) j >= ctx->n_outputs) {
|
if (j >= ctx->n_outputs) {
|
||||||
// This should not happen
|
// This should not happen
|
||||||
throw std::runtime_error(format("corrupt output buffer (j=%d, n_outputs=%lu)", j, ctx->n_outputs));
|
throw std::runtime_error(format("corrupt output buffer (j=%d, n_outputs=%d)", j, ctx->n_outputs));
|
||||||
}
|
}
|
||||||
|
|
||||||
return ctx->logits + j*ctx->model.hparams.n_vocab;
|
return ctx->logits + j*ctx->model.hparams.n_vocab;
|
||||||
|
@ -15581,9 +15581,9 @@ float * llama_get_embeddings_ith(struct llama_context * ctx, int32_t i) {
|
||||||
if (j < 0) {
|
if (j < 0) {
|
||||||
throw std::runtime_error(format("batch.logits[%d] != true", i));
|
throw std::runtime_error(format("batch.logits[%d] != true", i));
|
||||||
}
|
}
|
||||||
if ((size_t) j >= ctx->n_outputs) {
|
if (j >= ctx->n_outputs) {
|
||||||
// This should not happen
|
// This should not happen
|
||||||
throw std::runtime_error(format("corrupt output buffer (j=%d, n_outputs=%lu)", j, ctx->n_outputs));
|
throw std::runtime_error(format("corrupt output buffer (j=%d, n_outputs=%d)", j, ctx->n_outputs));
|
||||||
}
|
}
|
||||||
|
|
||||||
return ctx->embd + j*ctx->model.hparams.n_embd;
|
return ctx->embd + j*ctx->model.hparams.n_embd;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue