diff --git a/llama.cpp b/llama.cpp index 9a1c11043..1a54d8d3c 100644 --- a/llama.cpp +++ b/llama.cpp @@ -13985,7 +13985,6 @@ struct llama_model_params llama_model_default_params() { // note: we usually have plenty of VRAM, so by default offload all layers to the GPU result.n_gpu_layers = 999; #endif - return result; }