add shape annotations for llama

This commit is contained in:
xaedes 2023-04-27 16:39:41 +02:00
parent 93106504fd
commit 38675e537c
No known key found for this signature in database
GPG key ID: 30030EDD817EA2B1

View file

@ -1087,6 +1087,7 @@ static bool llama_eval_internal(
struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
memcpy(embd->data, tokens, N*ggml_element_size(embd));
// inpL shape [n_embd,N,1,1]
struct ggml_tensor * inpL = ggml_get_rows(ctx0, model.tok_embeddings, embd);
for (int il = 0; il < n_layer; ++il) {
@ -1098,6 +1099,7 @@ static bool llama_eval_internal(
// norm
{
// cur shape [n_embd,N,1,1]
cur = ggml_rms_norm(ctx0, inpL);
// cur = attention_norm*cur
@ -1109,14 +1111,24 @@ static bool llama_eval_internal(
// self-attention
{
// compute Q and K and RoPE them
// wq shape [n_embd, n_embd, 1, 1]
// wk shape [n_embd, n_embd, 1, 1]
// Qcur shape [n_embd/n_head, n_head, N, 1]
// Kcur shape [n_embd/n_head, n_head, N, 1]
struct ggml_tensor * Qcur = ggml_rope_inplace(ctx0, ggml_reshape_3d(ctx0, ggml_mul_mat(ctx0, model.layers[il].wq, cur), n_embd/n_head, n_head, N), n_past, n_rot, 0);
struct ggml_tensor * Kcur = ggml_rope_inplace(ctx0, ggml_reshape_3d(ctx0, ggml_mul_mat(ctx0, model.layers[il].wk, cur), n_embd/n_head, n_head, N), n_past, n_rot, 0);
// store key and value to memory
{
// compute the transposed [N, n_embd] V matrix
// wv shape [n_embd, n_embd, 1, 1]
// Vcur shape [n_embd, N, 1, 1]
struct ggml_tensor * Vcur = ggml_transpose(ctx0, ggml_reshape_2d(ctx0, ggml_mul_mat(ctx0, model.layers[il].wv, cur), n_embd, N));
// kv_self.k shape [n_embd * n_ctx * n_layer, 1]
// kv_self.v shape [n_embd * n_ctx * n_layer, 1]
// k shape [n_embd * N, 1] == kv_self.k[:,n_past:n_past+N,il,0]
// v shape [N, n_embd, 1, 1] == kv_self.v[:,n_past:n_past+N,il,0]
struct ggml_tensor * k = ggml_view_1d(ctx0, kv_self.k, N*n_embd, (ggml_element_size(kv_self.k)*n_embd)*(il*n_ctx + n_past));
struct ggml_tensor * v = ggml_view_2d(ctx0, kv_self.v, N, n_embd,
( n_ctx)*ggml_element_size(kv_self.v),
@ -1127,11 +1139,15 @@ static bool llama_eval_internal(
ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Vcur, v));
}
// Qcur shape [n_embd/n_head, n_head, N, 1]
// Q shape [n_embd/n_head, N, n_head, 1]
struct ggml_tensor * Q =
ggml_permute(ctx0,
Qcur,
0, 2, 1, 3);
// kv_self.k shape [n_embd * n_ctx * n_layer, 1]
// K shape [n_embd/n_head, n_past + N, n_head, 1]
struct ggml_tensor * K =
ggml_permute(ctx0,
ggml_reshape_3d(ctx0,
@ -1140,21 +1156,27 @@ static bool llama_eval_internal(
0, 2, 1, 3);
// K * Q
// KQ shape [n_past + N, N, n_head, 1]
struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
// KQ_scaled = KQ / sqrt(n_embd/n_head)
// KQ_scaled shape [n_past + N, N, n_head, 1]
struct ggml_tensor * KQ_scaled =
ggml_scale_inplace(ctx0,
KQ,
ggml_new_f32(ctx0, 1.0f/sqrtf(float(n_embd)/n_head)));
// KQ_masked = mask_past(KQ_scaled)
// KQ_masked shape [n_past + N, N, n_head, 1]
struct ggml_tensor * KQ_masked = ggml_diag_mask_inf_inplace(ctx0, KQ_scaled, n_past);
// KQ = soft_max(KQ_masked)
// KQ_soft_max shape [n_past + N, N, n_head, 1]
struct ggml_tensor * KQ_soft_max = ggml_soft_max_inplace(ctx0, KQ_masked);
// split cached V into n_head heads
//// V shape [n_past + N, n_embd/n_head, n_head, 1]
// V shape [n_past + N, n_embd/n_head, n_head, 1] == kv_self.v[:,:(n_past+N),il,1]
struct ggml_tensor * V =
ggml_view_3d(ctx0, kv_self.v,
n_past + N, n_embd/n_head, n_head,
@ -1163,6 +1185,7 @@ static bool llama_eval_internal(
il*n_ctx*ggml_element_size(kv_self.v)*n_embd);
#if 1
// KQV shape [n_embd/n_head, N, n_head, 1]
struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ_soft_max);
#else
// make V contiguous in memory to speed up the matmul, however we waste time on the copy
@ -1173,9 +1196,12 @@ static bool llama_eval_internal(
#endif
// KQV_merged = KQV.permute(0, 2, 1, 3)
// KQV_merged shape [n_embd/n_head, n_head, N, 1]
struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
// KQV_merged shape
// cur = KQV_merged.contiguous().view(n_embd, N)
// cur shape [n_embd,N,1,1]
cur = ggml_cpy(ctx0,
KQV_merged,
ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N));