diff --git a/common/common.cpp b/common/common.cpp index 5f10718ec..85c8292e4 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -101,7 +101,7 @@ int32_t get_num_physical_cores() { return n_threads > 0 ? (n_threads <= 4 ? n_threads : n_threads / 2) : 4; } -void process_escapes(std::string& input) { +void process_escapes(std::string & input) { std::size_t input_len = input.length(); std::size_t output_idx = 0; diff --git a/examples/batched/batched.cpp b/examples/batched/batched.cpp index 5fb2cb603..7aaf63ceb 100644 --- a/examples/batched/batched.cpp +++ b/examples/batched/batched.cpp @@ -48,6 +48,8 @@ int main(int argc, char ** argv) { params.prompt = "Hello my name is"; } + process_escapes(params.prompt); + // init LLM llama_backend_init(); diff --git a/ggml-metal.m b/ggml-metal.m index e9598ddff..109e5fe6b 100644 --- a/ggml-metal.m +++ b/ggml-metal.m @@ -1383,7 +1383,7 @@ static enum ggml_status ggml_metal_graph_compute( !ggml_is_transposed(src0) && !ggml_is_transposed(src1) && src1t == GGML_TYPE_F32 && - ne00 % 32 == 0 && ne00 >= 128 && + ne00 % 32 == 0 && ne00 >= 64 && (ne11 > ne11_mm_min || (ggml_is_quantized(src0t) && ne12 > 1))) { //printf("matrix: ne00 = %6d, ne01 = %6d, ne02 = %6d, ne11 = %6d, ne12 = %6d\n", ne00, ne01, ne02, ne11, ne12); @@ -1698,7 +1698,7 @@ static enum ggml_status ggml_metal_graph_compute( // indirect matrix multiplication // !!! if ([ctx->device supportsFamily:MTLGPUFamilyApple7] && - ne20 % 32 == 0 && ne20 >= 128 && + ne20 % 32 == 0 && ne20 >= 64 && ne11 > ne11_mm_min) { id pipeline = nil; diff --git a/llama.cpp b/llama.cpp index 1a9fe0c4d..9de4a8602 100644 --- a/llama.cpp +++ b/llama.cpp @@ -13044,6 +13044,9 @@ struct llama_context * llama_new_context_with_model( cparams.rope_freq_base = params.rope_freq_base == 0.0f ? hparams.rope_freq_base_train : params.rope_freq_base; cparams.rope_freq_scale = params.rope_freq_scale == 0.0f ? hparams.rope_freq_scale_train : params.rope_freq_scale; + // this is necessary due to kv_self.n being padded later during inference + cparams.n_ctx = GGML_PAD(cparams.n_ctx, 32); + // with causal attention, the batch size is limited by the context size cparams.n_batch = hparams.causal_attn ? std::min(cparams.n_ctx, params.n_batch) : params.n_batch; cparams.n_ubatch = std::min(cparams.n_batch, params.n_ubatch == 0 ? params.n_batch : params.n_ubatch);