From ff87313db883fad33dca8b286b176e7c6ba9d6a5 Mon Sep 17 00:00:00 2001 From: jhen Date: Sun, 17 Dec 2023 11:08:17 +0800 Subject: [PATCH] force to use n_gpu_layers on simulator --- examples/llama.swiftui/llama.cpp.swift/LibLlama.swift | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift b/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift index c55af1c57..3bd144c0f 100644 --- a/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift +++ b/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift @@ -52,8 +52,12 @@ actor LlamaContext { static func create_context(path: String) throws -> LlamaContext { llama_backend_init(false) - let model_params = llama_model_default_params() + var model_params = llama_model_default_params() +#if targetEnvironment(simulator) + model_params.n_gpu_layers = 0 + print("Running on simulator, force use n_gpu_layers = 0") +#endif let model = llama_load_model_from_file(path, model_params) guard let model else { print("Could not load model at \(path)")