diff --git a/common/common.cpp b/common/common.cpp index 96568d24c..a2700f135 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -190,15 +190,9 @@ int32_t cpu_get_num_math() { // CLI argument parsing // -void gpt_params_handle_auth_token(gpt_params & params) { - if (params.hf_token.empty() && params.auth_token.empty()) { - hf_get_token_from_env(params); - } - if (!params.hf_token.empty() && !params.auth_token.empty()) { - throw std::invalid_argument("error: --hf-token and --bearer-token are mutually exclusive\n"); - } - if (!params.hf_token.empty()) { - params.auth_token = params.hf_token; +void gpt_params_handle_hf_token(gpt_params & params) { + if (params.hf_token.empty() && std::getenv("HF_TOKEN")) { + params.hf_token = std::getenv("HF_TOKEN"); } } @@ -249,7 +243,7 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) { gpt_params_handle_model_default(params); - gpt_params_handle_auth_token(params); + gpt_params_handle_hf_token(params); if (params.escape) { string_process_escapes(params.prompt); @@ -658,15 +652,7 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa params.model_url = argv[i]; return true; } - if (arg == "-bt" || arg == "--bearer-token") { - if (++i >= argc) { - invalid_param = true; - return true; - } - params.auth_token = argv[i]; - return true; - } - if (arg == "-hft" || arg == "--hr-token") { + if (arg == "-hft" || arg == "--hf-token") { if (++i >= argc) { invalid_param = true; return true; @@ -1589,10 +1575,9 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param "or --model-url if set, otherwise %s)", DEFAULT_MODEL_PATH }); options.push_back({ "*", "-md, --model-draft FNAME", "draft model for speculative decoding (default: unused)" }); options.push_back({ "*", "-mu, --model-url MODEL_URL", "model download url (default: unused)" }); - options.push_back({ "*", "-bt, --bearer-token TOKEN", "model download bearer token (default: unused)" }); options.push_back({ "*", "-hfr, --hf-repo REPO", "Hugging Face model repository (default: unused)" }); options.push_back({ "*", "-hff, --hf-file FILE", "Hugging Face model file (default: unused)" }); - options.push_back({ "*", "-hft, --hf-token TOKEN", "Hugging Face access token (default: unused)" }); + options.push_back({ "*", "-hft, --hf-token TOKEN", "Hugging Face access token (default: value from HF_TOKEN environment variable)" }); options.push_back({ "retrieval" }); options.push_back({ "retrieval", " --context-file FNAME", "file to load context from (repeat to specify multiple files)" }); @@ -2021,12 +2006,6 @@ std::string fs_get_cache_file(const std::string & filename) { return cache_directory + filename; } -void hf_get_token_from_env(gpt_params & params) { - if (std::getenv("HF_TOKEN")) { - params.hf_token = std::getenv("HF_TOKEN"); - } -} - // // Model utils @@ -2038,9 +2017,9 @@ std::tuple llama_init_from_gpt_par llama_model * model = nullptr; if (!params.hf_repo.empty() && !params.hf_file.empty()) { - model = llama_load_model_from_hf(params.hf_repo.c_str(), params.hf_file.c_str(), params.model.c_str(), params.auth_token.c_str(), mparams); + model = llama_load_model_from_hf(params.hf_repo.c_str(), params.hf_file.c_str(), params.model.c_str(), params.hf_token.c_str(), mparams); } else if (!params.model_url.empty()) { - model = llama_load_model_from_url(params.model_url.c_str(), params.model.c_str(), params.auth_token.c_str(), mparams); + model = llama_load_model_from_url(params.model_url.c_str(), params.model.c_str(), params.hf_token.c_str(), mparams); } else { model = llama_load_model_from_file(params.model.c_str(), mparams); } @@ -2227,7 +2206,7 @@ static bool starts_with(const std::string & str, const std::string & prefix) { return str.rfind(prefix, 0) == 0; } -static bool llama_download_file(const std::string & url, const std::string & path, const std::string & auth_token) { +static bool llama_download_file(const std::string & url, const std::string & path, const std::string & hf_token) { // Initialize libcurl std::unique_ptr curl(curl_easy_init(), &curl_easy_cleanup); @@ -2243,9 +2222,9 @@ static bool llama_download_file(const std::string & url, const std::string & pat curl_easy_setopt(curl.get(), CURLOPT_FOLLOWLOCATION, 1L); // Check if hf-token or bearer-token was specified - if (!auth_token.empty()) { + if (!hf_token.empty()) { std::string auth_header = "Authorization: Bearer "; - auth_header += auth_token.c_str(); + auth_header += hf_token.c_str(); struct curl_slist *http_headers = NULL; http_headers = curl_slist_append(http_headers, auth_header.c_str()); curl_easy_setopt(curl.get(), CURLOPT_HTTPHEADER, http_headers); @@ -2446,7 +2425,7 @@ static bool llama_download_file(const std::string & url, const std::string & pat struct llama_model * llama_load_model_from_url( const char * model_url, const char * path_model, - const char * auth_token, + const char * hf_token, const struct llama_model_params & params) { // Basic validation of the model_url if (!model_url || strlen(model_url) == 0) { @@ -2454,7 +2433,7 @@ struct llama_model * llama_load_model_from_url( return NULL; } - if (!llama_download_file(model_url, path_model, auth_token)) { + if (!llama_download_file(model_url, path_model, hf_token)) { return NULL; } @@ -2502,14 +2481,14 @@ struct llama_model * llama_load_model_from_url( // Prepare download in parallel std::vector> futures_download; for (int idx = 1; idx < n_split; idx++) { - futures_download.push_back(std::async(std::launch::async, [&split_prefix, &split_url_prefix, &n_split, auth_token](int download_idx) -> bool { + futures_download.push_back(std::async(std::launch::async, [&split_prefix, &split_url_prefix, &n_split, hf_token](int download_idx) -> bool { char split_path[PATH_MAX] = {0}; llama_split_path(split_path, sizeof(split_path), split_prefix, download_idx, n_split); char split_url[LLAMA_CURL_MAX_URL_LENGTH] = {0}; llama_split_path(split_url, sizeof(split_url), split_url_prefix, download_idx, n_split); - return llama_download_file(split_url, split_path, auth_token); + return llama_download_file(split_url, split_path, hf_token); }, idx)); } @@ -2528,7 +2507,7 @@ struct llama_model * llama_load_model_from_hf( const char * repo, const char * model, const char * path_model, - const char * auth_token, + const char * hf_token, const struct llama_model_params & params) { // construct hugging face model url: // @@ -2544,7 +2523,7 @@ struct llama_model * llama_load_model_from_hf( model_url += "/resolve/main/"; model_url += model; - return llama_load_model_from_url(model_url.c_str(), path_model, auth_token, params); + return llama_load_model_from_url(model_url.c_str(), path_model, hf_token, params); } #else @@ -2552,7 +2531,7 @@ struct llama_model * llama_load_model_from_hf( struct llama_model * llama_load_model_from_url( const char * /*model_url*/, const char * /*path_model*/, - const char * /*auth_token*/ + const char * /*hf_token*/ const struct llama_model_params & /*params*/) { fprintf(stderr, "%s: llama.cpp built without libcurl, downloading from an url not supported.\n", __func__); return nullptr; @@ -2562,7 +2541,7 @@ struct llama_model * llama_load_model_from_hf( const char * /*repo*/, const char * /*model*/, const char * /*path_model*/, - const char * /*auth_token*/, + const char * /*hf_token*/, const struct llama_model_params & /*params*/) { fprintf(stderr, "%s: llama.cpp built without libcurl, downloading from Hugging Face not supported.\n", __func__); return nullptr; diff --git a/common/common.h b/common/common.h index a5be1ef9b..4f9f8c1fc 100644 --- a/common/common.h +++ b/common/common.h @@ -107,7 +107,6 @@ struct gpt_params { std::string model_draft = ""; // draft model for speculative decoding std::string model_alias = "unknown"; // model alias std::string model_url = ""; // model url to download - std::string auth_token = ""; // auth bearer token std::string hf_token = ""; // HF token std::string hf_repo = ""; // HF repo std::string hf_file = ""; // HF file @@ -257,7 +256,7 @@ struct gpt_params { bool spm_infill = false; // suffix/prefix/middle pattern for infill }; -void gpt_params_handle_auth_token(gpt_params & params); +void gpt_params_handle_hf_token(gpt_params & params); void gpt_params_handle_model_default(gpt_params & params); bool gpt_params_parse_ex (int argc, char ** argv, gpt_params & params); @@ -300,7 +299,6 @@ void string_process_escapes(std::string & input); bool fs_validate_filename(const std::string & filename); bool fs_create_directory_with_parents(const std::string & path); -void hf_get_token_from_env(gpt_params & params); std::string fs_get_cache_directory(); std::string fs_get_cache_file(const std::string & filename); @@ -314,8 +312,8 @@ std::tuple llama_init_from_gpt_par struct llama_model_params llama_model_params_from_gpt_params (const gpt_params & params); struct llama_context_params llama_context_params_from_gpt_params(const gpt_params & params); -struct llama_model * llama_load_model_from_url(const char * model_url, const char * path_model, const char * auth_token, const struct llama_model_params & params); -struct llama_model * llama_load_model_from_hf(const char * repo, const char * file, const char * path_model, const char * auth_token, const struct llama_model_params & params); +struct llama_model * llama_load_model_from_url(const char * model_url, const char * path_model, const char * hf_token, const struct llama_model_params & params); +struct llama_model * llama_load_model_from_hf(const char * repo, const char * file, const char * path_model, const char * hf_token, const struct llama_model_params & params); // Batch utils