diff --git a/common/arg.cpp b/common/arg.cpp index 8c7dcc751..1457a360f 100644 --- a/common/arg.cpp +++ b/common/arg.cpp @@ -138,15 +138,12 @@ static void common_params_handle_model_default( // short-hand to avoid specifying --hf-file -> default it to --model if (hf_file.empty()) { if (model.empty()) { - try { - auto auto_detected = common_get_hf_file(hf_repo, hf_token); - hf_repo = auto_detected.first; - hf_file = auto_detected.second; - LOG_INF("%s: using hf_file = %s\n", __func__, hf_file.c_str()); - } catch (std::exception & e) { - fprintf(stderr, "%s: %s\n", __func__, e.what()); - exit(1); + auto auto_detected = common_get_hf_file(hf_repo, hf_token); + if (auto_detected.first.empty() || auto_detected.second.empty()) { + exit(1); // built without CURL, error message already printed } + hf_repo = auto_detected.first; + hf_file = auto_detected.second; } else { hf_file = model; } diff --git a/common/common.cpp b/common/common.cpp index c0a52d8d1..a6f9252b2 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -1555,7 +1555,8 @@ struct llama_model * common_load_model_from_hf( } std::pair common_get_hf_file(const std::string &, const std::string &) { - throw std::runtime_error("error: llama.cpp built without libcurl, downloading from Hugging Face not supported."); + LOG_WRN("%s: llama.cpp built without libcurl, downloading from Hugging Face not supported.\n", __func__); + return std::make_pair("", ""); } #endif // LLAMA_USE_CURL