removing a whole sequence never fails
This commit is contained in:
parent
b509b8b3de
commit
129b6ffea6
3 changed files with 3 additions and 7 deletions
|
@ -1745,10 +1745,7 @@ struct server_context {
|
|||
|
||||
// Erase token cache
|
||||
const size_t n_erased = slot->cache_tokens.size();
|
||||
if (!llama_kv_cache_seq_rm(ctx, slot->id + 1, -1, -1)) {
|
||||
send_error(task, "Failed to erase slot KV cache", ERROR_TYPE_INVALID_REQUEST);
|
||||
break;
|
||||
}
|
||||
llama_kv_cache_seq_rm(ctx, slot->id + 1, -1, -1);
|
||||
slot->cache_tokens.clear();
|
||||
|
||||
server_task_result result;
|
||||
|
|
|
@ -15243,9 +15243,7 @@ size_t llama_state_seq_set_data(struct llama_context * ctx, const uint8_t * src,
|
|||
GGML_ASSERT(!kv_self.recurrent); // not implemented
|
||||
|
||||
// Wipe the slot
|
||||
if (!llama_kv_cache_seq_rm(kv_self, dest_seq_id, -1, -1)) {
|
||||
return 0;
|
||||
}
|
||||
llama_kv_cache_seq_rm(kv_self, dest_seq_id, -1, -1);
|
||||
|
||||
const uint8_t * inp = src;
|
||||
|
||||
|
|
1
llama.h
1
llama.h
|
@ -523,6 +523,7 @@ extern "C" {
|
|||
struct llama_context * ctx);
|
||||
|
||||
// Removes all tokens that belong to the specified sequence and have positions in [p0, p1)
|
||||
// Returns false if a partial sequence cannot be removed. Removing a whole sequence never fails
|
||||
// seq_id < 0 : match any sequence
|
||||
// p0 < 0 : [0, p1]
|
||||
// p1 < 0 : [p0, inf)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue