diff --git a/examples/server/README.md b/examples/server/README.md index 355601ff4..256a53b7a 100644 --- a/examples/server/README.md +++ b/examples/server/README.md @@ -20,7 +20,9 @@ The project is under active development, and we are [looking for feedback and co - `-tb N, --threads-batch N`: Set the number of threads to use during batch and prompt processing. If not specified, the number of threads will be set to the number of threads used for generation. - `--threads-http N`: number of threads in the http server pool to process requests (default: `max(std::thread::hardware_concurrency() - 1, --parallel N + 2)`) - `-m FNAME`, `--model FNAME`: Specify the path to the LLaMA model file (e.g., `models/7B/ggml-model.gguf`). -- `-mu MODEL_URL --model-url MODEL_URL`: Specify a remote http url to download the file (e.g https://huggingface.co/ggml-org/models/resolve/main/phi-2/ggml-model-q4_0.gguf). +- `-mu MODEL_URL --model-url MODEL_URL`: Specify a remote http url to download the file (default: unused). +- `-hfr REPO, --hf-repo REPO`: Hugging Face model repository (default: unused). +- `-hff FILE, --hf-file FILE`: Hugging Face model file (default: unused). - `-a ALIAS`, `--alias ALIAS`: Set an alias for the model. The alias will be returned in API responses. - `-c N`, `--ctx-size N`: Set the size of the prompt context. The default is 512, but LLaMA models were built with a context of 2048, which will provide better results for longer input/inference. The size may differ in other models, for example, baichuan models were build with a context of 4096. - `-ngl N`, `--n-gpu-layers N`: When compiled with appropriate support (currently CLBlast or cuBLAS), this option allows offloading some layers to the GPU for computation. Generally results in increased performance. diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 27bd2dd70..b02c2546e 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -2208,7 +2208,11 @@ static void server_print_usage(const char * argv0, const gpt_params & params, co printf(" -m FNAME, --model FNAME\n"); printf(" model path (default: %s)\n", params.model.c_str()); printf(" -mu MODEL_URL, --model-url MODEL_URL\n"); - printf(" model download url (default: %s)\n", params.model_url.c_str()); + printf(" model download url (default: unused)\n"); + printf(" -hfr REPO, --hf-repo REPO\n"); + printf(" Hugging Face model repository (default: unused)\n"); + printf(" -hff FILE, --hf-file FILE\n"); + printf(" Hugging Face model file (default: unused)\n"); printf(" -a ALIAS, --alias ALIAS\n"); printf(" set an alias for the model, will be added as `model` field in completion response\n"); printf(" --lora FNAME apply LoRA adapter (implies --no-mmap)\n"); @@ -2337,6 +2341,18 @@ static void server_params_parse(int argc, char ** argv, server_params & sparams, break; } params.model_url = argv[i]; + } else if (arg == "-hfr" || arg == "--hf-repo") { + if (++i >= argc) { + invalid_param = true; + break; + } + params.hf_repo = argv[i]; + } else if (arg == "-hff" || arg == "--hf-file") { + if (++i >= argc) { + invalid_param = true; + break; + } + params.hf_file = argv[i]; } else if (arg == "-a" || arg == "--alias") { if (++i >= argc) { invalid_param = true;