From e7e03814114bd8aa6cde50c1e1a644a2e3c6e177 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 4 Jun 2024 10:50:34 +0300 Subject: [PATCH] examples : migrate to gpt_params ggml-ci --- common/common.cpp | 9 +++-- examples/batched/README.md | 2 +- examples/batched/batched.cpp | 73 ++++++++++++------------------------ examples/simple/README.md | 2 +- examples/simple/simple.cpp | 50 +++++++++++------------- 5 files changed, 53 insertions(+), 83 deletions(-) diff --git a/common/common.cpp b/common/common.cpp index 912e49fad..5132e446c 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -1563,7 +1563,7 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param options.push_back({ "*", " --keep N", "number of tokens to keep from the initial prompt (default: %d, -1 = all)", params.n_keep }); options.push_back({ "*", " --chunks N", "max number of chunks to process (default: %d, -1 = all)", params.n_chunks }); options.push_back({ "*", "-fa, --flash-attn", "enable Flash Attention (default: %s)", params.flash_attn ? "enabled" : "disabled" }); - options.push_back({ "*", "-p, --prompt PROMPT", "prompt to start generation with (default: empty)" }); + options.push_back({ "*", "-p, --prompt PROMPT", "prompt to start generation with (default: '%s')", params.prompt.c_str() }); options.push_back({ "*", "-f, --file FNAME", "a file containing the prompt (default: none)" }); options.push_back({ "*", "-bf, --binary-file FNAME", "binary file containing the prompt (default: none)" }); options.push_back({ "*", "-e, --escape", "process escapes sequences (\\n, \\r, \\t, \\', \\\", \\\\)" }); @@ -1626,6 +1626,10 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param "JSON schema to constrain generations (https://json-schema.org/), e.g. `{}` for any JSON object\n" "For schemas w/ external $refs, use --grammar + example/json_schema_to_grammar.py instead" }); + options.push_back({ "embedding" }); + options.push_back({ "embedding", " --pooling {none,mean,cls}", + "pooling type for embeddings, use model default if unspecified" }); + options.push_back({ "context hacking" }); options.push_back({ "*", " --rope-scaling {none,linear,yarn}", "RoPE frequency scaling method, defaults to linear unless specified by the model" }); @@ -1644,9 +1648,6 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param options.push_back({ "*", "-ctk, --cache-type-k TYPE", "KV cache data type for K (default: %s)", params.cache_type_k.c_str() }); options.push_back({ "*", "-ctv, --cache-type-v TYPE", "KV cache data type for V (default: %s)", params.cache_type_v.c_str() }); - options.push_back({ "embedding", " --pooling {none,mean,cls}", - "pooling type for embeddings, use model default if unspecified" }); - options.push_back({ "perplexity" }); options.push_back({ "perplexity", " --all-logits", "return logits for all tokens in the batch (default: %s)", params.logits_all ? "true" : "false" }); options.push_back({ "perplexity", " --hellaswag", "compute HellaSwag score over random tasks from datafile supplied with -f" }); diff --git a/examples/batched/README.md b/examples/batched/README.md index 5d7303317..ed204c308 100644 --- a/examples/batched/README.md +++ b/examples/batched/README.md @@ -3,7 +3,7 @@ The example demonstrates batched generation from a given prompt ```bash -./batched ./models/llama-7b-v2/ggml-model-f16.gguf "Hello my name is" 4 +./batched -m ./models/llama-7b-v2/ggml-model-f16.gguf -p "Hello my name is" -np 4 ... diff --git a/examples/batched/batched.cpp b/examples/batched/batched.cpp index 591bc6e57..62d9b144d 100644 --- a/examples/batched/batched.cpp +++ b/examples/batched/batched.cpp @@ -7,48 +7,31 @@ #include #include +static void print_usage(int argc, char ** argv, const gpt_params & params) { + gpt_params_print_usage(argc, argv, params); + + LOG_TEE("\nexample usage:\n"); + LOG_TEE("\n %s -m model.gguf -p \"Hello my name is\" -n 32 -np 4\n", argv[0]); + LOG_TEE("\n"); +} + int main(int argc, char ** argv) { gpt_params params; - if (argc == 1 || argv[1][0] == '-') { - printf("usage: %s MODEL_PATH [PROMPT] [PARALLEL] [LEN] [NGL]\n" , argv[0]); - return 1 ; + params.prompt = "Hello my name is"; + params.n_predict = 32; + + if (!gpt_params_parse(argc, argv, params)) { + print_usage(argc, argv, params); + return 1; } + // number of parallel batches - int n_parallel = 1; + int n_parallel = params.n_parallel; // total length of the sequences including the prompt - int n_len = 32; - - // number of layers to offload to the GPU - int n_gpu_layers = 0; - - if (argc >= 2) { - params.model = argv[1]; - } - - if (argc >= 3) { - params.prompt = argv[2]; - } - - if (argc >= 4) { - n_parallel = std::atoi(argv[3]); - } - - if (argc >= 5) { - n_len = std::atoi(argv[4]); - } - - if (argc >= 6) { - n_gpu_layers = std::atoi(argv[5]); - } - - if (params.prompt.empty()) { - params.prompt = "Hello my name is"; - } - - string_process_escapes(params.prompt); + int n_predict = 32; // init LLM @@ -57,9 +40,7 @@ int main(int argc, char ** argv) { // initialize the model - llama_model_params model_params = llama_model_default_params(); - - model_params.n_gpu_layers = n_gpu_layers; + llama_model_params model_params = llama_model_params_from_gpt_params(params); llama_model * model = llama_load_model_from_file(params.model.c_str(), model_params); @@ -73,18 +54,14 @@ int main(int argc, char ** argv) { std::vector tokens_list; tokens_list = ::llama_tokenize(model, params.prompt, true); - const int n_kv_req = tokens_list.size() + (n_len - tokens_list.size())*n_parallel; + const int n_kv_req = tokens_list.size() + (n_predict - tokens_list.size())*n_parallel; // initialize the context - llama_context_params ctx_params = llama_context_default_params(); + llama_context_params ctx_params = llama_context_params_from_gpt_params(params); - ctx_params.seed = 1234; ctx_params.n_ctx = n_kv_req; - ctx_params.n_batch = std::max(n_len, n_parallel); - ctx_params.n_seq_max = n_parallel; - ctx_params.n_threads = params.n_threads; - ctx_params.n_threads_batch = params.n_threads_batch == -1 ? params.n_threads : params.n_threads_batch; + ctx_params.n_batch = std::max(n_predict, n_parallel); llama_context * ctx = llama_new_context_with_model(model, ctx_params); @@ -93,9 +70,9 @@ int main(int argc, char ** argv) { return 1; } - const int n_ctx = llama_n_ctx(ctx); + const int n_ctx = llama_n_ctx(ctx); - LOG_TEE("\n%s: n_len = %d, n_ctx = %d, n_batch = %u, n_parallel = %d, n_kv_req = %d\n", __func__, n_len, n_ctx, ctx_params.n_batch, n_parallel, n_kv_req); + LOG_TEE("\n%s: n_predict = %d, n_ctx = %d, n_batch = %u, n_parallel = %d, n_kv_req = %d\n", __func__, n_predict, n_ctx, ctx_params.n_batch, n_parallel, n_kv_req); // make sure the KV cache is big enough to hold all the prompt and generated tokens if (n_kv_req > n_ctx) { @@ -156,7 +133,7 @@ int main(int argc, char ** argv) { const auto t_main_start = ggml_time_us(); - while (n_cur <= n_len) { + while (n_cur <= n_predict) { // prepare the next batch llama_batch_clear(batch); @@ -192,7 +169,7 @@ int main(int argc, char ** argv) { //const llama_token new_token_id = llama_sample_token_greedy(ctx, &candidates_p); // is it an end of generation? -> mark the stream as finished - if (llama_token_is_eog(model, new_token_id) || n_cur == n_len) { + if (llama_token_is_eog(model, new_token_id) || n_cur == n_predict) { i_batch[i] = -1; LOG_TEE("\n"); if (n_parallel > 1) { diff --git a/examples/simple/README.md b/examples/simple/README.md index 5d24b1046..49e24501c 100644 --- a/examples/simple/README.md +++ b/examples/simple/README.md @@ -3,7 +3,7 @@ The purpose of this example is to demonstrate a minimal usage of llama.cpp for generating text with a given prompt. ```bash -./simple ./models/llama-7b-v2/ggml-model-f16.gguf "Hello my name is" +./simple -m ./models/llama-7b-v2/ggml-model-f16.gguf -p "Hello my name is" ... diff --git a/examples/simple/simple.cpp b/examples/simple/simple.cpp index b0f8e0fdc..69a92cf7d 100644 --- a/examples/simple/simple.cpp +++ b/examples/simple/simple.cpp @@ -6,28 +6,27 @@ #include #include +static void print_usage(int argc, char ** argv, const gpt_params & params) { + gpt_params_print_usage(argc, argv, params); + + LOG_TEE("\nexample usage:\n"); + LOG_TEE("\n %s -m model.gguf -p \"Hello my name is\" -n 32\n", argv[0]); + LOG_TEE("\n"); +} + int main(int argc, char ** argv) { gpt_params params; - if (argc == 1 || argv[1][0] == '-') { - printf("usage: %s MODEL_PATH [PROMPT]\n" , argv[0]); - return 1 ; - } + params.prompt = "Hello my name is"; + params.n_predict = 32; - if (argc >= 2) { - params.model = argv[1]; - } - - if (argc >= 3) { - params.prompt = argv[2]; - } - - if (params.prompt.empty()) { - params.prompt = "Hello my name is"; + if (!gpt_params_parse(argc, argv, params)) { + print_usage(argc, argv, params); + return 1; } // total length of the sequence including the prompt - const int n_len = 32; + const int n_predict = params.n_predict; // init LLM @@ -36,9 +35,7 @@ int main(int argc, char ** argv) { // initialize the model - llama_model_params model_params = llama_model_default_params(); - - // model_params.n_gpu_layers = 99; // offload all layers to the GPU + llama_model_params model_params = llama_model_params_from_gpt_params(params); llama_model * model = llama_load_model_from_file(params.model.c_str(), model_params); @@ -49,12 +46,7 @@ int main(int argc, char ** argv) { // initialize the context - llama_context_params ctx_params = llama_context_default_params(); - - ctx_params.seed = 1234; - ctx_params.n_ctx = 2048; - ctx_params.n_threads = params.n_threads; - ctx_params.n_threads_batch = params.n_threads_batch == -1 ? params.n_threads : params.n_threads_batch; + llama_context_params ctx_params = llama_context_params_from_gpt_params(params); llama_context * ctx = llama_new_context_with_model(model, ctx_params); @@ -69,14 +61,14 @@ int main(int argc, char ** argv) { tokens_list = ::llama_tokenize(ctx, params.prompt, true); const int n_ctx = llama_n_ctx(ctx); - const int n_kv_req = tokens_list.size() + (n_len - tokens_list.size()); + const int n_kv_req = tokens_list.size() + (n_predict - tokens_list.size()); - LOG_TEE("\n%s: n_len = %d, n_ctx = %d, n_kv_req = %d\n", __func__, n_len, n_ctx, n_kv_req); + LOG_TEE("\n%s: n_predict = %d, n_ctx = %d, n_kv_req = %d\n", __func__, n_predict, n_ctx, n_kv_req); // make sure the KV cache is big enough to hold all the prompt and generated tokens if (n_kv_req > n_ctx) { LOG_TEE("%s: error: n_kv_req > n_ctx, the required KV cache size is not big enough\n", __func__); - LOG_TEE("%s: either reduce n_len or increase n_ctx\n", __func__); + LOG_TEE("%s: either reduce n_predict or increase n_ctx\n", __func__); return 1; } @@ -115,7 +107,7 @@ int main(int argc, char ** argv) { const auto t_main_start = ggml_time_us(); - while (n_cur <= n_len) { + while (n_cur <= n_predict) { // sample the next token { auto n_vocab = llama_n_vocab(model); @@ -134,7 +126,7 @@ int main(int argc, char ** argv) { const llama_token new_token_id = llama_sample_token_greedy(ctx, &candidates_p); // is it an end of generation? - if (llama_token_is_eog(model, new_token_id) || n_cur == n_len) { + if (llama_token_is_eog(model, new_token_id) || n_cur == n_predict) { LOG_TEE("\n"); break;