llama : support quantum K cache (wip)
This commit is contained in:
parent
66aaac9867
commit
d04ee928a2
2 changed files with 20 additions and 12 deletions
|
@ -1114,7 +1114,7 @@ void ggml_metal_graph_compute(
|
|||
!ggml_is_transposed(src1) &&
|
||||
src1t == GGML_TYPE_F32 &&
|
||||
ne00 % 32 == 0 && ne00 >= 64 &&
|
||||
ne11 > ne11_mm_min) {
|
||||
(ne11 > ne11_mm_min || ne12 > 1)) {
|
||||
//printf("matrix: ne00 = %6d, ne01 = %6d, ne02 = %6d, ne11 = %6d, ne12 = %6d\n", ne00, ne01, ne02, ne11, ne12);
|
||||
switch (src0->type) {
|
||||
case GGML_TYPE_F32: [encoder setComputePipelineState:ctx->pipeline_mul_mm_f32_f32]; break;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue