switch to ggml for copying k, v

This commit is contained in:
Evan Jones 2023-05-02 17:37:26 -04:00
parent 970547e71a
commit 0bf20fe5f3

View file

@ -2496,30 +2496,31 @@ size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dest) {
memcpy(out, &kv_ntok, sizeof(kv_ntok)); out += sizeof(kv_ntok); memcpy(out, &kv_ntok, sizeof(kv_ntok)); out += sizeof(kv_ntok);
if (kv_size) { if (kv_size) {
{
// copy k: k layout is n_layer > n_ctx (tokens) > n_embd
const uint8_t * k_data = (uint8_t *) kv_self.k->data;
const size_t elt_size = ggml_element_size(kv_self.k); const size_t elt_size = ggml_element_size(kv_self.k);
char buffer[4096];
ggml_context * cpy_ctx = ggml_init({ sizeof(buffer), buffer, .no_alloc = true });
ggml_cgraph gf{};
gf.n_threads = 1;
for (int il = 0; il < n_layer; il++) { ggml_tensor * kout3d = ggml_new_tensor_3d(cpy_ctx, kv_self.k->type, n_embd, kv_ntok, n_layer);
const size_t offset = il * n_ctx * n_embd * elt_size; kout3d->data = out;
const size_t size = kv_ntok * n_embd * elt_size; out += ggml_nbytes(kout3d);
memcpy(out, k_data + offset, size); out += size;
}
}
{ ggml_tensor * vout3d = ggml_new_tensor_3d(cpy_ctx, kv_self.v->type, kv_ntok, n_embd, n_layer);
// copy v: v layout is n_layer > n_embd > n_ctx (tokens) vout3d->data = out;
const uint8_t * v_data = (uint8_t *) kv_self.v->data; out += ggml_nbytes(vout3d);
const size_t elt_size = ggml_element_size(kv_self.v);
const int n_layer_embd = n_layer * n_embd;
for (int ile = 0; ile < n_layer_embd; ile++) { ggml_tensor * k3d = ggml_view_3d(cpy_ctx, kv_self.k,
const size_t offset = ile * n_ctx * elt_size; n_embd, kv_ntok, n_layer,
const size_t size = kv_ntok * elt_size; elt_size*n_embd, elt_size*n_embd*n_ctx, 0);
memcpy(out, v_data + offset, size); out += size;
} ggml_tensor * v3d = ggml_view_3d(cpy_ctx, kv_self.v,
} kv_ntok, n_embd, n_layer,
elt_size*n_ctx, elt_size*n_ctx*n_embd, 0);
ggml_build_forward_expand(&gf, ggml_cpy(cpy_ctx, k3d, kout3d));
ggml_build_forward_expand(&gf, ggml_cpy(cpy_ctx, v3d, vout3d));
ggml_graph_compute(cpy_ctx, &gf);
} }
} }
@ -2599,31 +2600,31 @@ size_t llama_set_state_data(struct llama_context * ctx, const uint8_t * src) {
if (kv_size) { if (kv_size) {
LLAMA_ASSERT(kv_self.buf.size == kv_size); LLAMA_ASSERT(kv_self.buf.size == kv_size);
{
// set k data: k layout is n_layer > n_ctx (tokens) > n_embd
uint8_t * k_data = (uint8_t *) kv_self.k->data;
const size_t elt_size = ggml_element_size(kv_self.k); const size_t elt_size = ggml_element_size(kv_self.k);
char buffer[4096];
ggml_context * cpy_ctx = ggml_init({ sizeof(buffer), buffer, .no_alloc = true });
ggml_cgraph gf{};
gf.n_threads = 1;
for (int il = 0; il < n_layer; il++) { ggml_tensor * kin3d = ggml_new_tensor_3d(cpy_ctx, kv_self.k->type, n_embd, kv_ntok, n_layer);
const size_t offset = il * n_ctx * n_embd * elt_size; kin3d->data = (void *) in;
const size_t size = kv_ntok * n_embd * elt_size; in += ggml_nbytes(kin3d);
memcpy(k_data + offset, in, size); in += size;
}
}
{ ggml_tensor * vin3d = ggml_new_tensor_3d(cpy_ctx, kv_self.v->type, kv_ntok, n_embd, n_layer);
// set v data: v layout is n_layer > n_embd > n_ctx (tokens) vin3d->data = (void *) in;
uint8_t * v_data = (uint8_t *) kv_self.v->data; in += ggml_nbytes(vin3d);
const size_t elt_size = ggml_element_size(kv_self.v);
const int n_layer_embd = n_layer * n_embd;
for (int ile = 0; ile < n_layer_embd; ile++) { ggml_tensor * k3d = ggml_view_3d(cpy_ctx, kv_self.k,
const size_t offset = ile * n_ctx * elt_size; n_embd, kv_ntok, n_layer,
const size_t size = kv_ntok * elt_size; elt_size*n_embd, elt_size*n_embd*n_ctx, 0);
memcpy(v_data + offset, in, size); in += size;
}
}
ggml_tensor * v3d = ggml_view_3d(cpy_ctx, kv_self.v,
kv_ntok, n_embd, n_layer,
elt_size*n_ctx, elt_size*n_ctx*n_embd, 0);
ggml_build_forward_expand(&gf, ggml_cpy(cpy_ctx, kin3d, k3d));
ggml_build_forward_expand(&gf, ggml_cpy(cpy_ctx, vin3d, v3d));
ggml_graph_compute(cpy_ctx, &gf);
} }
ctx->model.kv_self.n = kv_ntok; ctx->model.kv_self.n = kv_ntok;