diff --git a/common/common.cpp b/common/common.cpp index 022bfe287..8ac557d31 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -815,6 +815,15 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa params.hf_file = argv[i]; return true; } + if (arg == "-hft" || arg == "--hf-token") { + params.hf_token = NULL; + if (++i >= argc) { + invalid_param = true; + return true; + } + params.hf_token = argv[i]; + return true; + } if (arg == "--lora") { if (++i >= argc) { invalid_param = true; @@ -2280,6 +2289,11 @@ struct llama_model * llama_load_model_from_url( return NULL; } + if(params.hf_token!=NULL){ + struct curl_slist * headers = curl_slist_append(NULL, "Authorization: Bearer " + params.hf_token); + curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers); + } + if (!llama_download_file(model_url, path_model)) { return NULL; } diff --git a/common/common.h b/common/common.h index 264504830..d4ae6f2fe 100644 --- a/common/common.h +++ b/common/common.h @@ -105,6 +105,7 @@ struct gpt_params { std::string model_url = ""; // model url to download std::string hf_repo = ""; // HF repo std::string hf_file = ""; // HF file + std::string hf_token = ""; // HF token std::string prompt = ""; std::string prompt_file = ""; // store the external prompt file name std::string path_prompt_cache = ""; // path to file for saving/loading prompt eval state