metal : pad n_ctx by 32 (#6177)
* metal : require ne00 >= 128 for mat-mat kernels ggml-ci * llama : pad n_ctx by 32 ggml-ci
This commit is contained in:
parent
59c17f02de
commit
95d576b48e
4 changed files with 14 additions and 2 deletions
|
@ -13044,6 +13044,9 @@ struct llama_context * llama_new_context_with_model(
|
|||
cparams.rope_freq_base = params.rope_freq_base == 0.0f ? hparams.rope_freq_base_train : params.rope_freq_base;
|
||||
cparams.rope_freq_scale = params.rope_freq_scale == 0.0f ? hparams.rope_freq_scale_train : params.rope_freq_scale;
|
||||
|
||||
// this is necessary due to kv_self.n being padded later during inference
|
||||
cparams.n_ctx = GGML_PAD(cparams.n_ctx, 32);
|
||||
|
||||
// with causal attention, the batch size is limited by the context size
|
||||
cparams.n_batch = hparams.causal_attn ? std::min(cparams.n_ctx, params.n_batch) : params.n_batch;
|
||||
cparams.n_ubatch = std::min(cparams.n_batch, params.n_ubatch == 0 ? params.n_batch : params.n_ubatch);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue