llama : pad KV cache size (#4280)
* llama : pad KV cache size to 32 * metal : try to improve batched decoding
This commit is contained in:
parent
5a7d3125e7
commit
d7b800b8bc
2 changed files with 2 additions and 3 deletions
|
@ -1083,7 +1083,7 @@ void ggml_metal_graph_compute(
|
|||
|
||||
// find the break-even point where the matrix-matrix kernel becomes more efficient compared
|
||||
// to the matrix-vector kernel
|
||||
int ne11_mm_min = 1;
|
||||
int ne11_mm_min = src0t == GGML_TYPE_F16 ? 1 : 16;
|
||||
|
||||
#if 0
|
||||
// the numbers below are measured on M2 Ultra for 7B and 13B models
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue