llama_load_model_from_url: try to make the windows build passing

This commit is contained in:
Pierrick HYMBERT 2024-03-16 14:08:21 +01:00
parent df0d82289c
commit 80bec9890a

View file

@ -1394,7 +1394,6 @@ struct llama_model * llama_load_model_from_url(const char * model_url, const cha
curl_global_init(CURL_GLOBAL_DEFAULT); curl_global_init(CURL_GLOBAL_DEFAULT);
auto curl = curl_easy_init(); auto curl = curl_easy_init();
if (!curl) { if (!curl) {
curl_global_cleanup(); curl_global_cleanup();
fprintf(stderr, "%s: error initializing lib curl\n", __func__); fprintf(stderr, "%s: error initializing lib curl\n", __func__);
@ -1445,11 +1444,13 @@ struct llama_model * llama_load_model_from_url(const char * model_url, const cha
return llama_load_model_from_file(path_model, params); return llama_load_model_from_file(path_model, params);
} }
#else #else
struct llama_model * llama_load_model_from_url(const char *, const char *,
struct llama_model_params) { struct llama_model *llama_load_model_from_url(const char * /*model_url*/, const char * /*path_model*/,
fprintf(stderr, "%s: llama.cpp built without SSL support, downloading from url not supported.\n", __func__); struct llama_model_params /*params*/) {
fprintf(stderr, "%s: llama.cpp built without curl support, downloading from an url not supported.\n", __func__);
return nullptr; return nullptr;
} }
#endif #endif
std::tuple<struct llama_model *, struct llama_context *> llama_init_from_gpt_params(gpt_params & params) { std::tuple<struct llama_model *, struct llama_context *> llama_init_from_gpt_params(gpt_params & params) {