force to use n_gpu_layers on simulator
This commit is contained in:
parent
6a8680204c
commit
ff87313db8
1 changed files with 5 additions and 1 deletions
|
@ -52,8 +52,12 @@ actor LlamaContext {
|
||||||
|
|
||||||
static func create_context(path: String) throws -> LlamaContext {
|
static func create_context(path: String) throws -> LlamaContext {
|
||||||
llama_backend_init(false)
|
llama_backend_init(false)
|
||||||
let model_params = llama_model_default_params()
|
var model_params = llama_model_default_params()
|
||||||
|
|
||||||
|
#if targetEnvironment(simulator)
|
||||||
|
model_params.n_gpu_layers = 0
|
||||||
|
print("Running on simulator, force use n_gpu_layers = 0")
|
||||||
|
#endif
|
||||||
let model = llama_load_model_from_file(path, model_params)
|
let model = llama_load_model_from_file(path, model_params)
|
||||||
guard let model else {
|
guard let model else {
|
||||||
print("Could not load model at \(path)")
|
print("Could not load model at \(path)")
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue