use common_batch_add, reuse llama_batch in loop
This commit is contained in:
parent
b4c9911ebe
commit
734f9e29de
2 changed files with 12 additions and 14 deletions
|
@ -496,6 +496,8 @@ static bool compute_imatrix(llama_context * ctx, const common_params & params) {
|
|||
// clear the KV cache
|
||||
llama_kv_cache_clear(ctx);
|
||||
|
||||
llama_batch batch = llama_batch_init(n_batch, 0, 1);
|
||||
|
||||
for (int j = 0; j < num_batches; ++j) {
|
||||
const int batch_start = start + j * n_batch;
|
||||
const int batch_size = std::min(end - batch_start, n_batch);
|
||||
|
@ -508,12 +510,9 @@ static bool compute_imatrix(llama_context * ctx, const common_params & params) {
|
|||
tokens[batch_start] = llama_token_bos(llama_get_model(ctx));
|
||||
}
|
||||
|
||||
llama_batch batch = llama_batch_init(batch_size, 0, 1);
|
||||
common_batch_clear(batch);
|
||||
for (int i = 0; i < batch_size; i++) {
|
||||
batch. token[i] = tokens[batch_start + i];
|
||||
batch. pos[i] = j*n_batch + i;
|
||||
batch.logits[i] = true;
|
||||
batch.seq_id[i][0] = 0;
|
||||
common_batch_add(batch, tokens[batch_start + i], j*n_batch + i, {0}, true);
|
||||
}
|
||||
|
||||
if (llama_decode(ctx, batch)) {
|
||||
|
@ -522,8 +521,6 @@ static bool compute_imatrix(llama_context * ctx, const common_params & params) {
|
|||
return false;
|
||||
}
|
||||
|
||||
llama_batch_free(batch);
|
||||
|
||||
// restore the original token in case it was set to BOS
|
||||
tokens[batch_start] = token_org;
|
||||
|
||||
|
@ -533,6 +530,8 @@ static bool compute_imatrix(llama_context * ctx, const common_params & params) {
|
|||
}
|
||||
}
|
||||
|
||||
llama_batch_free(batch);
|
||||
|
||||
const auto t_end = std::chrono::high_resolution_clock::now();
|
||||
|
||||
if (i == 0) {
|
||||
|
|
|
@ -1800,6 +1800,8 @@ static void kl_divergence(llama_context * ctx, const common_params & params) {
|
|||
// clear the KV cache
|
||||
llama_kv_cache_clear(ctx);
|
||||
|
||||
llama_batch batch = llama_batch_init(n_batch, 0, 1);
|
||||
|
||||
for (int j = 0; j < num_batches; ++j) {
|
||||
const int batch_start = start + j * n_batch;
|
||||
const int batch_size = std::min(end - batch_start, n_batch);
|
||||
|
@ -1812,12 +1814,9 @@ static void kl_divergence(llama_context * ctx, const common_params & params) {
|
|||
tokens[batch_start] = llama_token_bos(llama_get_model(ctx));
|
||||
}
|
||||
|
||||
llama_batch batch = llama_batch_init(batch_size, 0, 1);
|
||||
common_batch_clear(batch);
|
||||
for (int i = 0; i < batch_size; i++) {
|
||||
batch. token[i] = tokens[batch_start + i];
|
||||
batch. pos[i] = j*n_batch + i;
|
||||
batch.logits[i] = true;
|
||||
batch.seq_id[i][0] = 0;
|
||||
common_batch_add(batch, tokens[batch_start + i], j*n_batch + i, {0}, true);
|
||||
}
|
||||
|
||||
if (llama_decode(ctx, batch)) {
|
||||
|
@ -1826,8 +1825,6 @@ static void kl_divergence(llama_context * ctx, const common_params & params) {
|
|||
return;
|
||||
}
|
||||
|
||||
llama_batch_free(batch);
|
||||
|
||||
// restore the original token in case it was set to BOS
|
||||
tokens[batch_start] = token_org;
|
||||
|
||||
|
@ -1837,6 +1834,8 @@ static void kl_divergence(llama_context * ctx, const common_params & params) {
|
|||
}
|
||||
}
|
||||
|
||||
llama_batch_free(batch);
|
||||
|
||||
const auto t_end = std::chrono::high_resolution_clock::now();
|
||||
|
||||
if (i == 0) {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue