From f64c975168382db2ce499edd94ea4c171c64bca6 Mon Sep 17 00:00:00 2001 From: Max Krasnyansky Date: Sat, 24 Aug 2024 15:07:54 -0700 Subject: [PATCH] threadpool: fix swift wrapper errors due to n_threads int type cleanup --- examples/llama.swiftui/llama.cpp.swift/LibLlama.swift | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift b/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift index 58c32ca53..48b7840ae 100644 --- a/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift +++ b/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift @@ -71,8 +71,8 @@ actor LlamaContext { var ctx_params = llama_context_default_params() ctx_params.seed = 1234 ctx_params.n_ctx = 2048 - ctx_params.n_threads = UInt32(n_threads) - ctx_params.n_threads_batch = UInt32(n_threads) + ctx_params.n_threads = Int32(n_threads) + ctx_params.n_threads_batch = Int32(n_threads) let context = llama_new_context_with_model(model, ctx_params) guard let context else {