fix complain with noreturn

This commit is contained in:
Xuan Son Nguyen 2025-01-13 12:45:32 +01:00
parent 22927b1c0a
commit 8bd5b18ce1
2 changed files with 7 additions and 9 deletions

View file

@ -138,15 +138,12 @@ static void common_params_handle_model_default(
// short-hand to avoid specifying --hf-file -> default it to --model
if (hf_file.empty()) {
if (model.empty()) {
try {
auto auto_detected = common_get_hf_file(hf_repo, hf_token);
hf_repo = auto_detected.first;
hf_file = auto_detected.second;
LOG_INF("%s: using hf_file = %s\n", __func__, hf_file.c_str());
} catch (std::exception & e) {
fprintf(stderr, "%s: %s\n", __func__, e.what());
exit(1);
auto auto_detected = common_get_hf_file(hf_repo, hf_token);
if (auto_detected.first.empty() || auto_detected.second.empty()) {
exit(1); // built without CURL, error message already printed
}
hf_repo = auto_detected.first;
hf_file = auto_detected.second;
} else {
hf_file = model;
}

View file

@ -1555,7 +1555,8 @@ struct llama_model * common_load_model_from_hf(
}
std::pair<std::string, std::string> common_get_hf_file(const std::string &, const std::string &) {
throw std::runtime_error("error: llama.cpp built without libcurl, downloading from Hugging Face not supported.");
LOG_WRN("%s: llama.cpp built without libcurl, downloading from Hugging Face not supported.\n", __func__);
return std::make_pair("", "");
}
#endif // LLAMA_USE_CURL