Merge 200c39cc6b
into bde7cd3cd9
This commit is contained in:
commit
56894140be
4 changed files with 73 additions and 48 deletions
|
@ -10,16 +10,16 @@ There are 2 modes of operation:
|
|||
- `prompt is shared` - there is a common prompt of size `PP` used by all batches (i.e. `N_KV = PP + B*TG`)
|
||||
|
||||
```bash
|
||||
./batched-bench MODEL_PATH [N_KV_MAX] [N_BATCH] [N_UBATCH] [IS_PP_SHARED] [NGL] [MMQ] <PP> <TG> <PL>
|
||||
./batched-bench MODEL_PATH [N_KV_MAX] [N_BATCH] [N_UBATCH] [FATTN] [IS_PP_SHARED] [NGL] [NT] [NTB] <PP> <TG> <PL>
|
||||
|
||||
# LLaMA 7B, F16, N_KV_MAX = 16384 (8GB), prompt not shared
|
||||
./batched-bench ./models/llama-7b/ggml-model-f16.gguf 16384 2048 512 0 99
|
||||
./batched-bench ./models/llama-7b/ggml-model-f16.gguf 16384 2048 512 0 0 99
|
||||
|
||||
# LLaMA 7B, Q8_0, N_KV_MAX = 16384 (8GB), prompt is shared
|
||||
./batched-bench ./models/llama-7b/ggml-model-q8_0.gguf 16384 2048 512 1 99
|
||||
./batched-bench ./models/llama-7b/ggml-model-q8_0.gguf 16384 2048 512 0 1 99
|
||||
|
||||
# custom set of batches
|
||||
./batched-bench ./models/llama-7b/ggml-model-q8_0.gguf 2048 512 512 0 999 0 128,256,512 128,256 1,2,4,8,16,32
|
||||
./batched-bench ./models/llama-7b/ggml-model-q8_0.gguf 16384 2048 512 0 0 999 8 8 128,256,512 128,256 1,2,4,8,16,32
|
||||
```
|
||||
|
||||
## Sample results
|
||||
|
|
|
@ -32,18 +32,20 @@ int main(int argc, char ** argv) {
|
|||
gpt_params params;
|
||||
|
||||
if (argc == 1 || argv[1][0] == '-') {
|
||||
printf("usage: %s MODEL_PATH [N_KV_MAX] [N_BATCH] [N_UBATCH] [FATTN] [IS_PP_SHARED] [NGL] <PP> <TG> <PL>\n" , argv[0]);
|
||||
printf("usage: %s MODEL_PATH [N_KV_MAX] [N_BATCH] [N_UBATCH] [FATTN] [IS_PP_SHARED] [NGL] [NT] [NTB] <PP> <TG> <PL>\n", argv[0]);
|
||||
printf(" <PP>, <TG> and PL are comma-separated lists of numbers without spaces\n\n");
|
||||
printf(" example: %s ggml-model-f16.gguf 2048 2048 512 0 999 128,256,512 128,256 1,2,4,8,16,32\n\n", argv[0]);
|
||||
printf(" example: %s ggml-model-f16.gguf 16384 2048 512 0 0 999 8 8 128,256,512 128,256 1,2,4,8,16,32\n\n", argv[0]);
|
||||
return 1 ;
|
||||
}
|
||||
|
||||
int n_kv_max = 2048;
|
||||
int n_batch = 2048;
|
||||
int n_ubatch = 512;
|
||||
bool flash_attn = false;
|
||||
int is_pp_shared = 0;
|
||||
int n_gpu_layers = 0;
|
||||
int n_kv_max = 16384;
|
||||
int n_batch = 2048;
|
||||
int n_ubatch = 512;
|
||||
bool flash_attn = false;
|
||||
int is_pp_shared = 0;
|
||||
int n_gpu_layers = 0;
|
||||
int n_threads = cpu_get_num_math();
|
||||
int n_threads_batch = -1;
|
||||
|
||||
std::vector<int> n_pp = { 128, 256, 512, 1024, 2048, 3584, 7680, };
|
||||
std::vector<int> n_tg = { 128, 256, };
|
||||
|
@ -79,15 +81,23 @@ int main(int argc, char ** argv) {
|
|||
}
|
||||
|
||||
if (argc >= 9) {
|
||||
n_pp = parse_list(argv[8]);
|
||||
n_threads = std::atoi(argv[8]);
|
||||
}
|
||||
|
||||
if (argc >= 10) {
|
||||
n_tg = parse_list(argv[9]);
|
||||
n_threads_batch = std::atoi(argv[9]);
|
||||
}
|
||||
|
||||
if (argc >= 11) {
|
||||
n_pl = parse_list(argv[10]);
|
||||
n_pp = parse_list(argv[10]);
|
||||
}
|
||||
|
||||
if (argc >= 12) {
|
||||
n_tg = parse_list(argv[11]);
|
||||
}
|
||||
|
||||
if (argc >= 13) {
|
||||
n_pl = parse_list(argv[12]);
|
||||
}
|
||||
|
||||
// init LLM
|
||||
|
@ -113,14 +123,13 @@ int main(int argc, char ** argv) {
|
|||
|
||||
llama_context_params ctx_params = llama_context_default_params();
|
||||
|
||||
ctx_params.seed = 1234;
|
||||
ctx_params.n_ctx = n_kv_max;
|
||||
ctx_params.n_batch = n_batch;
|
||||
ctx_params.n_ubatch = n_ubatch;
|
||||
ctx_params.flash_attn = flash_attn;
|
||||
|
||||
ctx_params.n_threads = params.n_threads;
|
||||
ctx_params.n_threads_batch = params.n_threads_batch == -1 ? params.n_threads : params.n_threads_batch;
|
||||
ctx_params.seed = 1234;
|
||||
ctx_params.n_ctx = n_kv_max;
|
||||
ctx_params.n_batch = n_batch;
|
||||
ctx_params.n_ubatch = n_ubatch;
|
||||
ctx_params.flash_attn = flash_attn;
|
||||
ctx_params.n_threads = n_threads;
|
||||
ctx_params.n_threads_batch = n_threads_batch == -1 ? n_threads : n_threads_batch;
|
||||
|
||||
// ensure enough sequences are available
|
||||
ctx_params.n_seq_max = *std::max_element(n_pl.begin(), n_pl.end());
|
||||
|
@ -175,7 +184,8 @@ int main(int argc, char ** argv) {
|
|||
}
|
||||
|
||||
LOG_TEE("\n");
|
||||
LOG_TEE("%s: n_kv_max = %d, n_batch = %d, n_ubatch = %d, flash_attn = %d, is_pp_shared = %d, n_gpu_layers = %d, n_threads = %u, n_threads_batch = %u\n", __func__, n_kv_max, n_batch, n_ubatch, flash_attn, is_pp_shared, n_gpu_layers, ctx_params.n_threads, ctx_params.n_threads_batch);
|
||||
LOG_TEE("%s: n_kv_max = %d, n_batch = %d, n_ubatch = %d, flash_attn = %d, is_pp_shared = %d, n_gpu_layers = %d, n_threads = %u, n_threads_batch = %u\n",
|
||||
__func__, n_kv_max, n_batch, n_ubatch, flash_attn, is_pp_shared, n_gpu_layers, n_threads, ctx_params.n_threads_batch);
|
||||
LOG_TEE("\n");
|
||||
|
||||
LOG_TEE("|%6s | %6s | %4s | %6s | %8s | %8s | %8s | %8s | %8s | %8s |\n", "PP", "TG", "B", "N_KV", "T_PP s", "S_PP t/s", "T_TG s", "S_TG t/s", "T s", "S t/s");
|
||||
|
|
|
@ -3,42 +3,42 @@
|
|||
The example demonstrates batched generation from a given prompt
|
||||
|
||||
```bash
|
||||
./batched ./models/llama-7b-v2/ggml-model-f16.gguf "Hello my name is" 4
|
||||
./batched ./models/llama-7b-v2/llama-2-7b-chat.Q8_0.gguf "Hello my name is" 4
|
||||
|
||||
...
|
||||
|
||||
main: n_len = 32, n_ctx = 2048, n_parallel = 4, n_kv_req = 113
|
||||
main: n_len = 32, n_ctx = 128, n_batch = 32, n_parallel = 4, n_kv_req = 113, n_threads = 16, n_threads_batch = 16
|
||||
|
||||
Hello my name is
|
||||
<s> Hello my name is
|
||||
|
||||
main: generating 4 sequences ...
|
||||
|
||||
main: stream 0 finished
|
||||
main: stream 1 finished
|
||||
main: stream 2 finished
|
||||
main: stream 3 finished
|
||||
main: stream 0 finished at n_cur = 32
|
||||
main: stream 1 finished at n_cur = 32
|
||||
main: stream 2 finished at n_cur = 32
|
||||
main: stream 3 finished at n_cur = 32
|
||||
|
||||
sequence 0:
|
||||
|
||||
Hello my name is Shirley. I am a 25-year-old female who has been working for over 5 years as a b
|
||||
Hello my name is [Your Name], and I am a [Your Profession] with [Number of Years] of experience in the [Your Industry
|
||||
|
||||
sequence 1:
|
||||
|
||||
Hello my name is Renee and I'm a 32 year old female from the United States. I'm looking for a man between
|
||||
Hello my name is Drew and I am a 31 year old man from the United States. I have been a fan of anime for as
|
||||
|
||||
sequence 2:
|
||||
|
||||
Hello my name is Diana. I am looking for a housekeeping job. I have experience with children and have my own transportation. I am
|
||||
Hello my name is Tiffany and I am a 30 year old female. I have been experiencing some symptoms that I am concerned about
|
||||
|
||||
sequence 3:
|
||||
|
||||
Hello my name is Cody. I am a 3 year old neutered male. I am a very friendly cat. I am very playful and
|
||||
Hello my name is John and I am a 26 year old man from the United States. I have been experiencing some strange symptoms over the
|
||||
|
||||
main: decoded 108 tokens in 3.57 s, speed: 30.26 t/s
|
||||
main: decoded 108 tokens in 4.19 s, speed: 25.76 t/s
|
||||
|
||||
llama_print_timings: load time = 587.00 ms
|
||||
llama_print_timings: sample time = 2.56 ms / 112 runs ( 0.02 ms per token, 43664.72 tokens per second)
|
||||
llama_print_timings: prompt eval time = 4089.11 ms / 118 tokens ( 34.65 ms per token, 28.86 tokens per second)
|
||||
llama_print_timings: eval time = 0.00 ms / 1 runs ( 0.00 ms per token, inf tokens per second)
|
||||
llama_print_timings: total time = 4156.04 ms
|
||||
llama_print_timings: load time = 549.60 ms
|
||||
llama_print_timings: sample time = 4.14 ms / 112 runs ( 0.04 ms per token, 27027.03 tokens per second)
|
||||
llama_print_timings: prompt eval time = 4333.64 ms / 113 tokens ( 38.35 ms per token, 26.08 tokens per second)
|
||||
llama_print_timings: eval time = 0.00 ms / 1 runs ( 0.00 ms per token, inf tokens per second)
|
||||
llama_print_timings: total time = 4742.24 ms / 114 tokens
|
||||
```
|
||||
|
|
|
@ -11,7 +11,7 @@ int main(int argc, char ** argv) {
|
|||
gpt_params params;
|
||||
|
||||
if (argc == 1 || argv[1][0] == '-') {
|
||||
printf("usage: %s MODEL_PATH [PROMPT] [PARALLEL] [LEN] [NGL]\n" , argv[0]);
|
||||
printf("usage: %s MODEL_PATH [PROMPT] [PARALLEL] [LEN] [NGL] [NT] [NTB]\n", argv[0]);
|
||||
return 1 ;
|
||||
}
|
||||
|
||||
|
@ -24,6 +24,12 @@ int main(int argc, char ** argv) {
|
|||
// number of layers to offload to the GPU
|
||||
int n_gpu_layers = 0;
|
||||
|
||||
// number of threads to use for generation
|
||||
int n_threads = cpu_get_num_math();
|
||||
|
||||
// number of threads to use for batch processing
|
||||
int n_threads_batch = -1;
|
||||
|
||||
if (argc >= 2) {
|
||||
params.model = argv[1];
|
||||
}
|
||||
|
@ -44,6 +50,14 @@ int main(int argc, char ** argv) {
|
|||
n_gpu_layers = std::atoi(argv[5]);
|
||||
}
|
||||
|
||||
if (argc >= 7) {
|
||||
n_threads = std::atoi(argv[6]);
|
||||
}
|
||||
|
||||
if (argc >= 8) {
|
||||
n_threads_batch = std::atoi(argv[7]);
|
||||
}
|
||||
|
||||
if (params.prompt.empty()) {
|
||||
params.prompt = "Hello my name is";
|
||||
}
|
||||
|
@ -79,12 +93,12 @@ int main(int argc, char ** argv) {
|
|||
|
||||
llama_context_params ctx_params = llama_context_default_params();
|
||||
|
||||
ctx_params.seed = 1234;
|
||||
ctx_params.n_ctx = n_kv_req;
|
||||
ctx_params.n_batch = std::max(n_len, n_parallel);
|
||||
ctx_params.seed = 1234;
|
||||
ctx_params.n_ctx = n_kv_req;
|
||||
ctx_params.n_batch = std::max(n_len, n_parallel);
|
||||
ctx_params.n_seq_max = n_parallel;
|
||||
ctx_params.n_threads = params.n_threads;
|
||||
ctx_params.n_threads_batch = params.n_threads_batch == -1 ? params.n_threads : params.n_threads_batch;
|
||||
ctx_params.n_threads = n_threads;
|
||||
ctx_params.n_threads_batch = n_threads_batch == -1 ? n_threads : n_threads_batch;
|
||||
|
||||
llama_context * ctx = llama_new_context_with_model(model, ctx_params);
|
||||
|
||||
|
@ -95,7 +109,8 @@ int main(int argc, char ** argv) {
|
|||
|
||||
const int n_ctx = llama_n_ctx(ctx);
|
||||
|
||||
LOG_TEE("\n%s: n_len = %d, n_ctx = %d, n_batch = %u, n_parallel = %d, n_kv_req = %d\n", __func__, n_len, n_ctx, ctx_params.n_batch, n_parallel, n_kv_req);
|
||||
LOG_TEE("\n%s: n_len = %d, n_ctx = %d, n_batch = %u, n_parallel = %d, n_kv_req = %d, n_threads = %u, n_threads_batch = %u\n",
|
||||
__func__, n_len, n_ctx, ctx_params.n_batch, n_parallel, n_kv_req, n_threads, ctx_params.n_threads_batch);
|
||||
|
||||
// make sure the KV cache is big enough to hold all the prompt and generated tokens
|
||||
if (n_kv_req > n_ctx) {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue