handle seq rm return value
This commit is contained in:
parent
bf1d4932f8
commit
8ab1a17251
2 changed files with 7 additions and 2 deletions
|
@ -1745,7 +1745,10 @@ struct server_context {
|
||||||
|
|
||||||
// Erase token cache
|
// Erase token cache
|
||||||
const size_t n_erased = slot->cache_tokens.size();
|
const size_t n_erased = slot->cache_tokens.size();
|
||||||
llama_kv_cache_seq_rm(ctx, slot->id + 1, -1, -1);
|
if (!llama_kv_cache_seq_rm(ctx, slot->id + 1, -1, -1)) {
|
||||||
|
send_error(task, "Failed to erase slot KV cache", ERROR_TYPE_INVALID_REQUEST);
|
||||||
|
break;
|
||||||
|
}
|
||||||
slot->cache_tokens.clear();
|
slot->cache_tokens.clear();
|
||||||
|
|
||||||
server_task_result result;
|
server_task_result result;
|
||||||
|
|
|
@ -15243,7 +15243,9 @@ size_t llama_state_seq_set_data(struct llama_context * ctx, const uint8_t * src,
|
||||||
GGML_ASSERT(!kv_self.recurrent); // not implemented
|
GGML_ASSERT(!kv_self.recurrent); // not implemented
|
||||||
|
|
||||||
// Wipe the slot
|
// Wipe the slot
|
||||||
llama_kv_cache_seq_rm(kv_self, dest_seq_id, -1, -1);
|
if (!llama_kv_cache_seq_rm(kv_self, dest_seq_id, -1, -1)) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
const uint8_t * inp = src;
|
const uint8_t * inp = src;
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue