From 31b0d99598214254bb2314c22e754e04b1b4fa69 Mon Sep 17 00:00:00 2001 From: Minsoo Cheong Date: Fri, 5 Apr 2024 00:37:38 +0900 Subject: [PATCH] test pr --- llama.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/llama.cpp b/llama.cpp index 9a1c11043..1a54d8d3c 100644 --- a/llama.cpp +++ b/llama.cpp @@ -13985,7 +13985,6 @@ struct llama_model_params llama_model_default_params() { // note: we usually have plenty of VRAM, so by default offload all layers to the GPU result.n_gpu_layers = 999; #endif - return result; }