Update llama.cpp
Co-authored-by: slaren <slarengh@gmail.com>
This commit is contained in:
parent
464c75c00e
commit
243a3e4bb2
1 changed files with 9 additions and 8 deletions
17
llama.cpp
17
llama.cpp
|
@ -16430,15 +16430,16 @@ struct llama_context * llama_new_context_with_model(
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
#if defined(GGML_USE_RPC)
|
#if defined(GGML_USE_RPC)
|
||||||
for (int i = 0; i < (int)model->rpc_servers.size(); i++) {
|
if (model->n_gpu_layers > 0) {
|
||||||
const char * endpoint = model->rpc_servers[i].c_str();
|
for (const auto & endpoint : model->rpc_servers) {
|
||||||
ggml_backend_t backend = ggml_backend_rpc_init(endpoint);
|
ggml_backend_t backend = ggml_backend_rpc_init(endpoint.c_str());
|
||||||
if (backend == nullptr) {
|
if (backend == nullptr) {
|
||||||
LLAMA_LOG_ERROR("%s: failed to initialize RPC to '%s'\n", __func__, endpoint);
|
LLAMA_LOG_ERROR("%s: failed to initialize RPC to '%s'\n", __func__, endpoint.c_str());
|
||||||
llama_free(ctx);
|
llama_free(ctx);
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
}
|
||||||
|
ctx->backends.push_back(backend);
|
||||||
}
|
}
|
||||||
ctx->backends.push_back(backend);
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
ctx->backend_cpu = ggml_backend_cpu_init();
|
ctx->backend_cpu = ggml_backend_cpu_init();
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue