llama_load_model_from_url: typo

This commit is contained in:
Pierrick HYMBERT 2024-03-16 14:26:17 +01:00
parent 2c3a00e270
commit 4135d4a505
2 changed files with 3 additions and 3 deletions

View file

@ -1390,7 +1390,7 @@ void llama_batch_add(
#ifdef LLAMA_USE_CURL
struct llama_model * llama_load_model_from_url(const char * model_url, const char * path_model,
struct llama_model_params params) {
// Initialize libcurl
// Initialize libcurl globally
curl_global_init(CURL_GLOBAL_DEFAULT);
auto curl = curl_easy_init();
@ -1400,7 +1400,7 @@ struct llama_model * llama_load_model_from_url(const char * model_url, const cha
return nullptr;
}
// Set the URL
// Set the URL, allow to follow http redirection and display download progress
curl_easy_setopt(curl, CURLOPT_URL, model_url);
curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L);
curl_easy_setopt(curl, CURLOPT_NOPROGRESS, 0L);

View file

@ -95,7 +95,7 @@ struct gpt_params {
struct llama_sampling_params sparams;
std::string model = "models/7B/ggml-model-f16.gguf"; // model path
std::string model_url = ""; // model path
std::string model_url = ""; // model url to download
std::string model_draft = ""; // draft model for speculative decoding
std::string model_alias = "unknown"; // model alias
std::string prompt = "";