From 51b3b56c0891870ea835ae3287a1e67792ca98f9 Mon Sep 17 00:00:00 2001 From: Galunid Date: Tue, 24 Oct 2023 15:05:58 +0200 Subject: [PATCH] Prevent offloading of more than 33 layers --- llama.cpp | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/llama.cpp b/llama.cpp index b28ab609a..fc6e8c1a3 100644 --- a/llama.cpp +++ b/llama.cpp @@ -3020,6 +3020,14 @@ static void llm_load_tensors( ggml_backend_type backend_norm; ggml_backend_type backend_output; + // Don't allow for offloading of more than 33 layers. + // Offloading 34 layers causes model to respond with letter 'E' + // Offloading 35 layers doesn't work because of missing cuda implementation for rope: + // GGML_ASSERT: ggml-cuda.cu:6402: ne00 == n_dims && "ne00 != n_dims is not implemented for CUDA yet" + if (n_gpu_layers > 33) { + n_gpu_layers = 33; + } + if (n_gpu_layers > int(n_layer)) { // norm is not performance relevant on its own but keeping it in VRAM reduces data copying // on Windows however this is detrimental unless everything is on the GPU