Merge branch 'master' into gg/flash-attn
This commit is contained in:
commit
3e318e764f
21 changed files with 1316 additions and 458 deletions
|
@ -178,25 +178,27 @@ int main(int argc, char ** argv) {
|
|||
float * out = emb + p * n_embd;
|
||||
batch_decode(ctx, batch, out, s, n_embd);
|
||||
|
||||
// print the first part of the embeddings
|
||||
// print the first part of the embeddings or for a single prompt, the full embedding
|
||||
fprintf(stdout, "\n");
|
||||
for (int j = 0; j < n_prompts; j++) {
|
||||
fprintf(stdout, "embedding %d: ", j);
|
||||
for (int i = 0; i < std::min(16, n_embd); i++) {
|
||||
for (int i = 0; i < (n_prompts > 1 ? std::min(16, n_embd) : n_embd); i++) {
|
||||
fprintf(stdout, "%9.6f ", emb[j * n_embd + i]);
|
||||
}
|
||||
fprintf(stdout, "\n");
|
||||
}
|
||||
|
||||
// print cosine similarity matrix
|
||||
fprintf(stdout, "\n");
|
||||
printf("cosine similarity matrix:\n\n");
|
||||
for (int i = 0; i < n_prompts; i++) {
|
||||
for (int j = 0; j < n_prompts; j++) {
|
||||
float sim = llama_embd_similarity_cos(emb + i * n_embd, emb + j * n_embd, n_embd);
|
||||
fprintf(stdout, "%6.2f ", sim);
|
||||
}
|
||||
if (n_prompts > 1) {
|
||||
fprintf(stdout, "\n");
|
||||
printf("cosine similarity matrix:\n\n");
|
||||
for (int i = 0; i < n_prompts; i++) {
|
||||
for (int j = 0; j < n_prompts; j++) {
|
||||
float sim = llama_embd_similarity_cos(emb + i * n_embd, emb + j * n_embd, n_embd);
|
||||
fprintf(stdout, "%6.2f ", sim);
|
||||
}
|
||||
fprintf(stdout, "\n");
|
||||
}
|
||||
}
|
||||
|
||||
// clean up
|
||||
|
|
|
@ -6,7 +6,7 @@ for more information, please go to [Meituan-AutoML/MobileVLM](https://github.com
|
|||
|
||||
The implementation is based on llava, and is compatible with llava and mobileVLM. The usage is basically same as llava.
|
||||
|
||||
Notice: The overall process of model inference for both **MobileVLM** and **MobileVLM_V2** models is the same, but the process of model conversion is a little different. Therefore, using MobiVLM as an example, the different conversion step will be shown.
|
||||
Notice: The overall process of model inference for both **MobileVLM** and **MobileVLM_V2** models is the same, but the process of model conversion is a little different. Therefore, using **MobileVLM-1.7B** as an example, the different conversion step will be shown.
|
||||
|
||||
## Usage
|
||||
Build with cmake or run `make llava-cli` to build it.
|
||||
|
@ -36,7 +36,7 @@ git clone https://huggingface.co/openai/clip-vit-large-patch14-336
|
|||
python ./examples/llava/llava-surgery.py -m path/to/MobileVLM-1.7B
|
||||
```
|
||||
|
||||
3. Use `convert-image-encoder-to-gguf.py` with `--projector-type ldp` (for **V2** the arg is `--projector-type ldpv2`) to convert the LLaVA image encoder to GGUF:
|
||||
3. Use `convert-image-encoder-to-gguf.py` with `--projector-type ldp` (for **V2** please use `--projector-type ldpv2`) to convert the LLaVA image encoder to GGUF:
|
||||
|
||||
```sh
|
||||
python ./examples/llava/convert-image-encoder-to-gguf \
|
||||
|
@ -78,7 +78,7 @@ cd examples/llava/android/build_64
|
|||
### run on Android
|
||||
refer to `android/adb_run.sh`, modify resources' `name` and `path`
|
||||
|
||||
## some result on Android with `Snapdragon 888` chip
|
||||
## Some result on Android with `Snapdragon 888` chip
|
||||
### case 1
|
||||
**input**
|
||||
```sh
|
||||
|
@ -109,7 +109,6 @@ llama_print_timings: total time = 34731.93 ms
|
|||
--image /data/local/tmp/cat.jpeg \
|
||||
-p "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER: <image>\nWhat is in the image? ASSISTANT:"
|
||||
```
|
||||
|
||||
**output**
|
||||
```sh
|
||||
encode_image_with_clip: image encoded in 21149.51 ms by CLIP ( 146.87 ms per image patch)
|
||||
|
@ -121,12 +120,82 @@ llama_print_timings: eval time = 1279.03 ms / 18 runs ( 71.06 m
|
|||
llama_print_timings: total time = 34570.79 ms
|
||||
```
|
||||
|
||||
|
||||
## Some result on Android with `Snapdragon 778G` chip
|
||||
### MobileVLM-1.7B case
|
||||
#### llava-cli release-b2005
|
||||
**input**
|
||||
```sh
|
||||
/data/local/tmp/llava-cli \
|
||||
-m /data/local/tmp/ggml-model-q4_k.gguf \
|
||||
--mmproj /data/local/tmp/mmproj-model-f16.gguf \
|
||||
-t 4 \
|
||||
--image /data/local/tmp/many_llamas.jpeg \
|
||||
-p "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER: <image>\nWhat's that? ASSISTANT:"
|
||||
```
|
||||
**output**
|
||||
```sh
|
||||
encode_image_with_clip: image encoded in 18728.52 ms by CLIP ( 130.06 ms per image patch)
|
||||
system_prompt: A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER:
|
||||
user_prompt: \nWhat's that? ASSISTANT:
|
||||
|
||||
A group of llamas are standing in a green pasture.
|
||||
|
||||
llama_print_timings: load time = 20357.33 ms
|
||||
llama_print_timings: sample time = 2.96 ms / 14 runs ( 0.21 ms per token, 4734.53 tokens per second)
|
||||
llama_print_timings: prompt eval time = 8119.49 ms / 191 tokens ( 42.51 ms per token, 23.52 tokens per second)
|
||||
llama_print_timings: eval time = 1005.75 ms / 14 runs ( 71.84 ms per token, 13.92 tokens per second)
|
||||
llama_print_timings: total time = 28038.34 ms / 205 tokens
|
||||
```
|
||||
#### llava-cli latest-version
|
||||
**input**
|
||||
|
||||
Just the same as above.
|
||||
|
||||
**output**(seems to be much slower)
|
||||
```sh
|
||||
encode_image_with_clip: image embedding created: 144 tokens
|
||||
|
||||
encode_image_with_clip: image encoded in 288268.88 ms by CLIP ( 2001.87 ms per image patch)
|
||||
system_prompt: A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER:
|
||||
user_prompt: \nWhat's that? ASSISTANT:
|
||||
|
||||
It is a group of sheep standing together in a grass field.
|
||||
|
||||
llama_print_timings: load time = 818120.91 ms
|
||||
llama_print_timings: sample time = 3.44 ms / 14 runs ( 0.25 ms per token, 4067.40 tokens per second)
|
||||
llama_print_timings: prompt eval time = 529274.69 ms / 191 tokens ( 2771.07 ms per token, 0.36 tokens per second)
|
||||
llama_print_timings: eval time = 43894.02 ms / 13 runs ( 3376.46 ms per token, 0.30 tokens per second)
|
||||
llama_print_timings: total time = 865441.76 ms / 204 tokens
|
||||
```
|
||||
### MobileVLM_V2-1.7B case
|
||||
#### llava-cli release-2005b
|
||||
**input**
|
||||
|
||||
Just the same as above.
|
||||
|
||||
**output**
|
||||
```sh
|
||||
encode_image_with_clip: image encoded in 20609.61 ms by CLIP ( 143.12 ms per image patch)
|
||||
system_prompt: A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER:
|
||||
user_prompt: \nWhat's that? ASSISTANT:
|
||||
|
||||
This image captures a lively scene of 20 llamas in motion on an expansive, grassy field. The llama is scattered across the landscape with some standing and others sitting down as if taking rest or observing their surroundings from different vantage points within this verdant setting.
|
||||
|
||||
The background offers glimpses into a picturesque town nestled amidst hills under an overcast sky, adding depth to the scene while also emphasizing that distance between these llama and human-made structures like houses or roads in which they roam freely without any barriers around them. The image is framed by text at both right angles on white backgrounds against a contrasting blue backdrop with green foliage, further drawing attention to the llamas amidst their natural habitat while also inviting viewers into this picturesque landscape within town limits of Alta Llama
|
||||
|
||||
llama_print_timings: load time = 22406.77 ms
|
||||
llama_print_timings: sample time = 49.26 ms / 186 runs ( 0.26 ms per token, 3776.27 tokens per second)
|
||||
llama_print_timings: prompt eval time = 9044.54 ms / 191 tokens ( 47.35 ms per token, 21.12 tokens per second)
|
||||
llama_print_timings: eval time = 14497.49 ms / 186 runs ( 77.94 ms per token, 12.83 tokens per second)
|
||||
llama_print_timings: total time = 44411.01 ms / 377 tokens
|
||||
```
|
||||
|
||||
## Orin compile and run
|
||||
### compile
|
||||
```sh
|
||||
make LLAMA_CUDA=1 CUDA_DOCKER_ARCH=sm_87 LLAMA_CUDA_F16=1 -j 32
|
||||
```
|
||||
|
||||
### run on Orin
|
||||
### case 1
|
||||
**input**
|
||||
|
@ -175,8 +244,121 @@ llama_print_timings: eval time = 166.65 ms / 11 runs ( 15.15 m
|
|||
llama_print_timings: total time = 1365.47 ms / 243 tokens
|
||||
```
|
||||
|
||||
## Minor shortcomings
|
||||
The `n_patch` of output in `ldp` is 1/4 of the input. In order to implement quickly, we uniformly modified `clip_n_patches` function to a quarter. when counting the time consumption, the calculated time will be 4 times bigger than the real cost.
|
||||
## Running on Intel(R) Core(TM) i7-10750H
|
||||
### Operating system
|
||||
Ubuntu22.04
|
||||
### compile
|
||||
```sh
|
||||
make -j32
|
||||
```
|
||||
### MobileVLM-1.7B case
|
||||
**input**
|
||||
```sh
|
||||
-m /path/to/ggml-model-q4_k.gguf \
|
||||
--mmproj /path/to/mmproj-model-f16.gguf \
|
||||
--image /path/to/many_llamas.jpeg
|
||||
-p "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER: <image>\nWhat's that? ASSISTANT:" \
|
||||
```
|
||||
**output**
|
||||
```sh
|
||||
encode_image_with_clip: image embedding created: 144 tokens
|
||||
|
||||
encode_image_with_clip: image encoded in 2730.94 ms by CLIP ( 18.96 ms per image patch)
|
||||
system_prompt: A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER:
|
||||
user_prompt: \nWhat's that?ASSISTANT:
|
||||
|
||||
A group of llamas are walking together in a field.
|
||||
|
||||
llama_print_timings: load time = 5506.60 ms
|
||||
llama_print_timings: sample time = 0.44 ms / 13 runs ( 0.03 ms per token, 29545.45 tokens per second)
|
||||
llama_print_timings: prompt eval time = 2031.58 ms / 190 tokens ( 10.69 ms per token, 93.52 tokens per second)
|
||||
llama_print_timings: eval time = 438.92 ms / 12 runs ( 36.58 ms per token, 27.34 tokens per second)
|
||||
llama_print_timings: total time = 5990.25 ms / 202 tokens
|
||||
```
|
||||
|
||||
### MobileVLM_V2-1.7B case
|
||||
**input**
|
||||
|
||||
Just the same as above.
|
||||
|
||||
**ouput**
|
||||
```sh
|
||||
encode_image_with_clip: image embedding created: 144 tokens
|
||||
|
||||
encode_image_with_clip: image encoded in 3223.89 ms by CLIP ( 22.39 ms per image patch)
|
||||
system_prompt: A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER:
|
||||
user_prompt: \nWhat's that?ASSISTANT:
|
||||
|
||||
The image captures a tranquil scene in a park, where a group of approximately 20 llamas are gathered. The llamas, a mix of white and black, are standing in a line, their black and white patterns contrasting with the lush green grass of the park. The lamas are arranged in a line, suggesting a social order.
|
||||
|
||||
The park itself is lush and green, with trees dotting the landscape in the background. A sign reading "Llamas Tico Ana" is also visible in the image, possibly indicating the location or the breed of the llamas. The image seems to be taken from a distance, providing a wide view of the scene and the surrounding environment.
|
||||
|
||||
The llamas' positions relative to each other, the sign, and the trees create a harmonious composition. The image does not contain any discernible text. The overall scene is one of peace and natural beauty, with the llamas in their natural habitat, surrounded by the vibrant colors and lush greenery of the park.
|
||||
|
||||
llama_print_timings: load time = 6642.61 ms
|
||||
llama_print_timings: sample time = 8.15 ms / 223 runs ( 0.04 ms per token, 27358.61 tokens per second)
|
||||
llama_print_timings: prompt eval time = 2475.07 ms / 190 tokens ( 13.03 ms per token, 76.77 tokens per second)
|
||||
llama_print_timings: eval time = 8760.60 ms / 222 runs ( 39.46 ms per token, 25.34 tokens per second)
|
||||
llama_print_timings: total time = 15513.95 ms / 412 tokens
|
||||
```
|
||||
|
||||
## Run on Intel(R) Core(TM) Ultra7 115H
|
||||
### operation system
|
||||
Windows11
|
||||
### comiple
|
||||
```sh
|
||||
make -j32
|
||||
```
|
||||
### MobileVLM-1.7B case
|
||||
**input**
|
||||
```sh
|
||||
-m /path/to/ggml-model-q4_k.gguf \
|
||||
--mmproj /path/to/tmp/mmproj-model-f16.gguf \
|
||||
-p "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER: <image>\nWhat's that? ASSISTANT:" \
|
||||
```
|
||||
**output**
|
||||
```sh
|
||||
encode_image_with_clip: image encoded in 4902.81 ms by CLIP ( 34.05 ms per image patch)
|
||||
system_prompt: A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER:
|
||||
user_prompt: \nWhat's that? ASSISTANT:
|
||||
|
||||
The image features a group of brown and white llamas standing in a grassy field.
|
||||
|
||||
llama_print_timings: load time = 7441.06 ms
|
||||
llama_print_timings: sample time = 0.72 ms / 19 runs ( 0.04 ms per token, 26279.39 tokens per second)
|
||||
llama_print_timings: prompt eval time = 2090.71 ms / 191 tokens ( 10.95 ms per token, 91.36 tokens per second)
|
||||
llama_print_timings: eval time = 512.35 ms / 18 runs ( 28.46 ms per token, 35.13 tokens per second)
|
||||
llama_print_timings: total time = 7987.23 ms / 209 tokens
|
||||
```
|
||||
|
||||
### MobileVLM_V2-1.7B case
|
||||
**input**
|
||||
|
||||
Just the same as above.
|
||||
|
||||
**output**
|
||||
```sh
|
||||
encode_image_with_clip: image encoded in 4682.44 ms by CLIP ( 32.52 ms per image patch)
|
||||
system_prompt: A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER:
|
||||
user_prompt: \nWhat's that? ASSISTANT:
|
||||
|
||||
This image captures a lively scene of a group of 14 llamas in a grassy field. The llamas, with their distinctive black and white coats, are standing and walking in a line, seemingly engaged in a social activity. One
|
||||
of them, possibly the first in the line, has its back turned, perhaps observing something in the distance.
|
||||
|
||||
The llama in the front of the line stands out due to its black and white coloring, which is quite unusual for llama patterns. The llama in the front also seems to be more aware of its surroundings, as it faces the camera, giving a sense of engagement with the viewer.
|
||||
|
||||
The image is taken from the side of the llama, providing a clear view of the llama in the front and its companions. The lameness in the llama in
|
||||
front is not visible, indicating that it might not be the main focus of the photo.
|
||||
|
||||
The background of the image features a grassy field, with a fence and a tree visible in the distance. The tree appears to be bare, suggesting that it might be during a time of year when most trees are dormant or have shed their leaves.
|
||||
|
||||
|
||||
llama_print_timings: load time = 7015.35 ms
|
||||
llama_print_timings: sample time = 10.61 ms / 256 runs ( 0.04 ms per token, 24119.09 tokens per second)
|
||||
llama_print_timings: prompt eval time = 2052.45 ms / 191 tokens ( 10.75 ms per token, 93.06 tokens per second)
|
||||
llama_print_timings: eval time = 7259.43 ms / 255 runs ( 28.47 ms per token, 35.13 tokens per second)
|
||||
llama_print_timings: total time = 14371.19 ms / 446 tokens
|
||||
```
|
||||
|
||||
## TODO
|
||||
|
||||
|
@ -191,5 +373,5 @@ The `n_patch` of output in `ldp` is 1/4 of the input. In order to implement quic
|
|||
|
||||
## contributor
|
||||
```sh
|
||||
zhangjidong05, yangyang260, huyiming03, chenxiaotao03
|
||||
zhangjidong05, yangyang260, huyiming03, chenxiaotao03, ZiangWu-77
|
||||
```
|
||||
|
|
|
@ -835,9 +835,10 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
|
|||
mlp_2 = ggml_pool_2d(ctx0, mlp_2, GGML_OP_POOL_AVG, 2, 2, 2, 2, 0, 0);
|
||||
// weight ne = [3, 3, 2048, 1]
|
||||
struct ggml_tensor * peg_0 = ggml_conv_depthwise_2d(ctx0, model.mm_model_peg_0_w, mlp_2, 1, 1, 1, 1, 1, 1);
|
||||
peg_0 = ggml_add(ctx0, peg_0, mlp_2);
|
||||
peg_0 = ggml_cont(ctx0, ggml_permute(ctx0, peg_0, 1, 2, 0, 3));
|
||||
peg_0 = ggml_add(ctx0, peg_0, model.mm_model_peg_0_b);
|
||||
mlp_2 = ggml_cont(ctx0, ggml_permute(ctx0, mlp_2, 1, 2, 0, 3));
|
||||
peg_0 = ggml_add(ctx0, peg_0, mlp_2);
|
||||
peg_0 = ggml_reshape_3d(ctx0, peg_0, peg_0->ne[0], peg_0->ne[1] * peg_0->ne[2], peg_0->ne[3]);
|
||||
embeddings = peg_0;
|
||||
}
|
||||
|
@ -1755,7 +1756,7 @@ int clip_n_patches(const struct clip_ctx * ctx) {
|
|||
|
||||
int n_patches = (params.image_size / params.patch_size) * (params.image_size / params.patch_size);
|
||||
|
||||
if (ctx->proj_type == PROJECTOR_TYPE_LDP) {
|
||||
if (ctx->proj_type == PROJECTOR_TYPE_LDP || ctx->proj_type == PROJECTOR_TYPE_LDPV2) {
|
||||
n_patches /= 4;
|
||||
}
|
||||
|
||||
|
|
|
@ -296,7 +296,9 @@ These options help improve the performance and memory usage of the LLaMA models.
|
|||
|
||||
### Batch Size
|
||||
|
||||
- `-b N, --batch-size N`: Set the batch size for prompt processing (default: 512). This large batch size benefits users who have BLAS installed and enabled it during the build. If you don't have BLAS enabled ("BLAS=0"), you can use a smaller number, such as 8, to see the prompt progress as it's evaluated in some situations.
|
||||
- `-b N, --batch-size N`: Set the batch size for prompt processing (default: `2048`). This large batch size benefits users who have BLAS installed and enabled it during the build. If you don't have BLAS enabled ("BLAS=0"), you can use a smaller number, such as 8, to see the prompt progress as it's evaluated in some situations.
|
||||
|
||||
- `-ub N`, `--ubatch-size N`: physical maximum batch size. This is for pipeline parallelization. Default: `512`.
|
||||
|
||||
### Prompt Caching
|
||||
|
||||
|
|
303
examples/server/bench/bench.py
Normal file
303
examples/server/bench/bench.py
Normal file
|
@ -0,0 +1,303 @@
|
|||
import argparse
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import signal
|
||||
import socket
|
||||
import subprocess
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
import traceback
|
||||
from contextlib import closing
|
||||
from datetime import datetime
|
||||
|
||||
import matplotlib
|
||||
import matplotlib.dates
|
||||
import matplotlib.pyplot as plt
|
||||
import requests
|
||||
|
||||
|
||||
def main(args_in: list[str] | None = None) -> None:
|
||||
parser = argparse.ArgumentParser(description="Start server benchmark scenario")
|
||||
parser.add_argument("--name", type=str, help="Bench name", required=True)
|
||||
parser.add_argument("--runner-label", type=str, help="Runner label", required=True)
|
||||
parser.add_argument("--branch", type=str, help="Branch name", default="detached")
|
||||
parser.add_argument("--commit", type=str, help="Commit name", default="dirty")
|
||||
parser.add_argument("--host", type=str, help="Server listen host", default="0.0.0.0")
|
||||
parser.add_argument("--port", type=int, help="Server listen host", default="8080")
|
||||
parser.add_argument("--model-path-prefix", type=str, help="Prefix where to store the model files", default="models")
|
||||
parser.add_argument("--n-prompts", type=int,
|
||||
help="SERVER_BENCH_N_PROMPTS: total prompts to randomly select in the benchmark", required=True)
|
||||
parser.add_argument("--max-prompt-tokens", type=int,
|
||||
help="SERVER_BENCH_MAX_PROMPT_TOKENS: maximum prompt tokens to filter out in the dataset",
|
||||
required=True)
|
||||
parser.add_argument("--max-tokens", type=int,
|
||||
help="SERVER_BENCH_MAX_CONTEXT: maximum context size of the completions request to filter out in the dataset: prompt + predicted tokens",
|
||||
required=True)
|
||||
parser.add_argument("--hf-repo", type=str, help="Hugging Face model repository", required=True)
|
||||
parser.add_argument("--hf-file", type=str, help="Hugging Face model file", required=True)
|
||||
parser.add_argument("-ngl", "--n-gpu-layers", type=int, help="layers to the GPU for computation", required=True)
|
||||
parser.add_argument("--ctx-size", type=int, help="Set the size of the prompt context", required=True)
|
||||
parser.add_argument("--parallel", type=int, help="Set the number of slots for process requests", required=True)
|
||||
parser.add_argument("--batch-size", type=int, help="Set the batch size for prompt processing", required=True)
|
||||
parser.add_argument("--ubatch-size", type=int, help="physical maximum batch size", required=True)
|
||||
parser.add_argument("--scenario", type=str, help="Scenario to run", required=True)
|
||||
parser.add_argument("--duration", type=str, help="Bench scenario", required=True)
|
||||
|
||||
args = parser.parse_args(args_in)
|
||||
|
||||
start_time = time.time()
|
||||
|
||||
# Start the server and performance scenario
|
||||
try:
|
||||
server_process = start_server(args)
|
||||
except Exception:
|
||||
print("bench: server start error :")
|
||||
traceback.print_exc(file=sys.stdout)
|
||||
sys.exit(1)
|
||||
|
||||
# start the benchmark
|
||||
try:
|
||||
start_benchmark(args)
|
||||
|
||||
iterations = 0
|
||||
with open("results.github.env", 'w') as github_env:
|
||||
# parse output
|
||||
with open('k6-results.json', 'r') as bench_results:
|
||||
# Load JSON data from file
|
||||
data = json.load(bench_results)
|
||||
for metric_name in data['metrics']:
|
||||
for metric_metric in data['metrics'][metric_name]:
|
||||
value = data['metrics'][metric_name][metric_metric]
|
||||
if isinstance(value, float) or isinstance(value, int):
|
||||
value = round(value, 2)
|
||||
data['metrics'][metric_name][metric_metric]=value
|
||||
github_env.write(
|
||||
f"{escape_metric_name(metric_name)}_{escape_metric_name(metric_metric)}={value}\n")
|
||||
token_seconds = data['metrics']['llamacpp_tokens_second']['avg']
|
||||
iterations = data['root_group']['checks']['success completion']['passes']
|
||||
|
||||
except Exception:
|
||||
print("bench: error :")
|
||||
traceback.print_exc(file=sys.stdout)
|
||||
|
||||
# Stop the server
|
||||
if server_process:
|
||||
try:
|
||||
print(f"bench: shutting down server pid={server_process.pid} ...")
|
||||
if os.name == 'nt':
|
||||
interrupt = signal.CTRL_C_EVENT
|
||||
else:
|
||||
interrupt = signal.SIGINT
|
||||
server_process.send_signal(interrupt)
|
||||
server_process.wait(0.5)
|
||||
|
||||
except subprocess.TimeoutExpired:
|
||||
print(f"server still alive after 500ms, force-killing pid={server_process.pid} ...")
|
||||
server_process.kill() # SIGKILL
|
||||
server_process.wait()
|
||||
|
||||
while is_server_listening(args.host, args.port):
|
||||
time.sleep(0.1)
|
||||
|
||||
title = (f"llama.cpp {args.name} on {args.runner_label}\n "
|
||||
f"duration={args.duration} {iterations} iterations")
|
||||
xlabel = (f"{args.hf_repo}/{args.hf_file}\n"
|
||||
f"parallel={args.parallel} ctx-size={args.ctx_size} ngl={args.n_gpu_layers} batch-size={args.batch_size} ubatch-size={args.ubatch_size} pp={args.max_prompt_tokens} pp+tg={args.max_tokens}\n"
|
||||
f"branch={args.branch} commit={args.commit}")
|
||||
|
||||
# Prometheus
|
||||
end_time = time.time()
|
||||
if is_server_listening("0.0.0.0", 9090):
|
||||
metrics = ['prompt_tokens_seconds', 'predicted_tokens_seconds',
|
||||
'kv_cache_usage_ratio', 'requests_processing', 'requests_deferred']
|
||||
|
||||
for metric in metrics:
|
||||
resp = requests.get(f"http://localhost:9090/api/v1/query_range",
|
||||
params={'query': 'llamacpp:' + metric, 'start': start_time, 'end': end_time, 'step': 2})
|
||||
|
||||
with open(f"{metric}.json", 'w') as metric_json:
|
||||
metric_json.write(resp.text)
|
||||
|
||||
if resp.status_code != 200:
|
||||
print(f"bench: unable to extract prometheus metric {metric}: {resp.text}")
|
||||
else:
|
||||
metric_data = resp.json()
|
||||
values = metric_data['data']['result'][0]['values']
|
||||
timestamps, metric_values = zip(*values)
|
||||
metric_values = [float(value) for value in metric_values]
|
||||
timestamps_dt = [datetime.fromtimestamp(int(ts)) for ts in timestamps]
|
||||
plt.figure(figsize=(16, 10), dpi=80)
|
||||
plt.plot(timestamps_dt, metric_values, label=metric)
|
||||
plt.xticks(rotation=0, fontsize=14, horizontalalignment='center', alpha=.7)
|
||||
plt.yticks(fontsize=12, alpha=.7)
|
||||
|
||||
ylabel = f"llamacpp:{metric}"
|
||||
plt.title(title,
|
||||
fontsize=14, wrap=True)
|
||||
plt.grid(axis='both', alpha=.3)
|
||||
plt.ylabel(ylabel, fontsize=22)
|
||||
plt.xlabel(xlabel, fontsize=14, wrap=True)
|
||||
plt.gca().xaxis.set_major_locator(matplotlib.dates.MinuteLocator())
|
||||
plt.gca().xaxis.set_major_formatter(matplotlib.dates.DateFormatter("%Y-%m-%d %H:%M:%S"))
|
||||
plt.gcf().autofmt_xdate()
|
||||
|
||||
# Remove borders
|
||||
plt.gca().spines["top"].set_alpha(0.0)
|
||||
plt.gca().spines["bottom"].set_alpha(0.3)
|
||||
plt.gca().spines["right"].set_alpha(0.0)
|
||||
plt.gca().spines["left"].set_alpha(0.3)
|
||||
|
||||
# Save the plot as a jpg image
|
||||
plt.savefig(f'{metric}.jpg', dpi=60)
|
||||
plt.close()
|
||||
|
||||
# Mermaid format in case images upload failed
|
||||
with (open(f"{metric}.mermaid", 'w') as mermaid_f):
|
||||
mermaid = (
|
||||
f"""---
|
||||
config:
|
||||
xyChart:
|
||||
titleFontSize: 12
|
||||
width: 900
|
||||
height: 600
|
||||
themeVariables:
|
||||
xyChart:
|
||||
titleColor: "#000000"
|
||||
---
|
||||
xychart-beta
|
||||
title "{title}"
|
||||
y-axis "llamacpp:{metric}"
|
||||
x-axis "llamacpp:{metric}" {int(min(timestamps))} --> {int(max(timestamps))}
|
||||
line [{', '.join([str(round(float(value), 2)) for value in metric_values])}]
|
||||
""")
|
||||
mermaid_f.write(mermaid)
|
||||
|
||||
# 140 chars max for commit status description
|
||||
bench_results = {
|
||||
"req": {
|
||||
"p90": data['metrics']["http_req_duration"]["p(90)"],
|
||||
"avg": data['metrics']["http_req_duration"]["avg"],
|
||||
},
|
||||
"pp": {
|
||||
"p90": data['metrics']["llamacpp_prompt_tokens"]["p(90)"],
|
||||
"avg": data['metrics']["llamacpp_prompt_tokens"]["avg"],
|
||||
},
|
||||
"tg": {
|
||||
"p90": data['metrics']["llamacpp_tokens_second"]["p(90)"],
|
||||
"avg": data['metrics']["llamacpp_tokens_second"]["avg"],
|
||||
},
|
||||
}
|
||||
with open("results.github.env", 'a') as github_env:
|
||||
github_env.write(f"BENCH_RESULTS={json.dumps(bench_results, indent=None, separators=(',', ':') )}\n")
|
||||
github_env.write(f"BENCH_ITERATIONS={iterations}\n")
|
||||
|
||||
title = title.replace('\n', ' ')
|
||||
xlabel = xlabel.replace('\n', ' ')
|
||||
github_env.write(f"BENCH_GRAPH_TITLE={title}\n")
|
||||
github_env.write(f"BENCH_GRAPH_XLABEL={xlabel}\n")
|
||||
|
||||
|
||||
def start_benchmark(args):
|
||||
k6_path = 'k6'
|
||||
if 'BENCH_K6_BIN_PATH' in os.environ:
|
||||
k6_path = os.environ['BENCH_K6_BIN_PATH']
|
||||
k6_args = [
|
||||
'run', args.scenario,
|
||||
'--no-color',
|
||||
]
|
||||
k6_args.extend(['--duration', args.duration])
|
||||
k6_args.extend(['--iterations', args.n_prompts])
|
||||
k6_args.extend(['--vus', args.parallel])
|
||||
k6_args.extend(['--summary-export', 'k6-results.json'])
|
||||
args = f"SERVER_BENCH_N_PROMPTS={args.n_prompts} SERVER_BENCH_MAX_PROMPT_TOKENS={args.max_prompt_tokens} SERVER_BENCH_MAX_CONTEXT={args.max_tokens} "
|
||||
args = args + ' '.join([str(arg) for arg in [k6_path, *k6_args]])
|
||||
print(f"bench: starting k6 with: {args}")
|
||||
k6_completed = subprocess.run(args, shell=True, stdout=sys.stdout, stderr=sys.stderr)
|
||||
if k6_completed.returncode != 0:
|
||||
raise Exception("bench: unable to run k6")
|
||||
|
||||
|
||||
def start_server(args):
|
||||
server_process = start_server_background(args)
|
||||
|
||||
attempts = 0
|
||||
max_attempts = 20
|
||||
if 'GITHUB_ACTIONS' in os.environ:
|
||||
max_attempts *= 2
|
||||
|
||||
while not is_server_listening(args.host, args.port):
|
||||
attempts += 1
|
||||
if attempts > max_attempts:
|
||||
assert False, "server not started"
|
||||
print(f"bench: waiting for server to start ...")
|
||||
time.sleep(0.5)
|
||||
|
||||
print("bench: server started.")
|
||||
return server_process
|
||||
|
||||
|
||||
def start_server_background(args):
|
||||
# Start the server
|
||||
server_path = '../../../build/bin/server'
|
||||
if 'LLAMA_SERVER_BIN_PATH' in os.environ:
|
||||
server_path = os.environ['LLAMA_SERVER_BIN_PATH']
|
||||
server_args = [
|
||||
'--host', args.host,
|
||||
'--port', args.port,
|
||||
]
|
||||
model_file = args.model_path_prefix + os.path.sep + args.hf_file
|
||||
model_dir = os.path.dirname(model_file)
|
||||
if not os.path.exists(model_dir):
|
||||
os.makedirs(model_dir)
|
||||
server_args.extend(['--model', model_file])
|
||||
server_args.extend(['--hf-repo', args.hf_repo])
|
||||
server_args.extend(['--hf-file', args.hf_file])
|
||||
server_args.extend(['--n-gpu-layers', args.n_gpu_layers])
|
||||
server_args.extend(['--ctx-size', args.ctx_size])
|
||||
server_args.extend(['--parallel', args.parallel])
|
||||
server_args.extend(['--batch-size', args.batch_size])
|
||||
server_args.extend(['--ubatch-size', args.ubatch_size])
|
||||
server_args.extend(['--n-predict', args.max_tokens * 2])
|
||||
server_args.extend(['--defrag-thold', "0.1"])
|
||||
server_args.append('--cont-batching')
|
||||
server_args.append('--metrics')
|
||||
server_args.extend(['--log-format', "text"])
|
||||
args = [str(arg) for arg in [server_path, *server_args]]
|
||||
print(f"bench: starting server with: {' '.join(args)}")
|
||||
pkwargs = {
|
||||
'stdout': subprocess.PIPE,
|
||||
'stderr': subprocess.PIPE
|
||||
}
|
||||
server_process = subprocess.Popen(
|
||||
args,
|
||||
**pkwargs)
|
||||
|
||||
def server_log(in_stream, out_stream):
|
||||
for line in iter(in_stream.readline, b''):
|
||||
print(line.decode('utf-8'), end='', file=out_stream)
|
||||
|
||||
thread_stdout = threading.Thread(target=server_log, args=(server_process.stdout, sys.stdout))
|
||||
thread_stdout.start()
|
||||
thread_stderr = threading.Thread(target=server_log, args=(server_process.stderr, sys.stderr))
|
||||
thread_stderr.start()
|
||||
|
||||
return server_process
|
||||
|
||||
|
||||
def is_server_listening(server_fqdn, server_port):
|
||||
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
|
||||
result = sock.connect_ex((server_fqdn, server_port))
|
||||
_is_server_listening = result == 0
|
||||
if _is_server_listening:
|
||||
print(f"server is listening on {server_fqdn}:{server_port}...")
|
||||
return _is_server_listening
|
||||
|
||||
|
||||
def escape_metric_name(metric_name):
|
||||
return re.sub('[^A-Z0-9]', '_', metric_name.upper())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
9
examples/server/bench/prometheus.yml
Normal file
9
examples/server/bench/prometheus.yml
Normal file
|
@ -0,0 +1,9 @@
|
|||
global:
|
||||
scrape_interval: 10s
|
||||
external_labels:
|
||||
llamacpp: 'server'
|
||||
|
||||
scrape_configs:
|
||||
- job_name: 'llama.cpp server'
|
||||
static_configs:
|
||||
- targets: ['localhost:8080']
|
2
examples/server/bench/requirements.txt
Normal file
2
examples/server/bench/requirements.txt
Normal file
|
@ -0,0 +1,2 @@
|
|||
matplotlib
|
||||
requests
|
|
@ -3566,6 +3566,7 @@ int main(int argc, char ** argv) {
|
|||
sigemptyset (&sigint_action.sa_mask);
|
||||
sigint_action.sa_flags = 0;
|
||||
sigaction(SIGINT, &sigint_action, NULL);
|
||||
sigaction(SIGTERM, &sigint_action, NULL);
|
||||
#elif defined (_WIN32)
|
||||
auto console_ctrl_handler = +[](DWORD ctrl_type) -> BOOL {
|
||||
return (ctrl_type == CTRL_C_EVENT) ? (signal_handler(SIGINT), true) : false;
|
||||
|
|
|
@ -1114,7 +1114,10 @@ def start_server_background(context):
|
|||
server_args.append('--verbose')
|
||||
if 'SERVER_LOG_FORMAT_JSON' not in os.environ:
|
||||
server_args.extend(['--log-format', "text"])
|
||||
print(f"starting server with: {context.server_path} {server_args}")
|
||||
|
||||
args = [str(arg) for arg in [context.server_path, *server_args]]
|
||||
print(f"bench: starting server with: {' '.join(args)}")
|
||||
|
||||
flags = 0
|
||||
if 'nt' == os.name:
|
||||
flags |= subprocess.DETACHED_PROCESS
|
||||
|
@ -1130,16 +1133,14 @@ def start_server_background(context):
|
|||
[str(arg) for arg in [context.server_path, *server_args]],
|
||||
**pkwargs)
|
||||
|
||||
def log_stdout(process):
|
||||
for line in iter(process.stdout.readline, b''):
|
||||
print(line.decode('utf-8'), end='')
|
||||
thread_stdout = threading.Thread(target=log_stdout, args=(context.server_process,))
|
||||
def server_log(in_stream, out_stream):
|
||||
for line in iter(in_stream.readline, b''):
|
||||
print(line.decode('utf-8'), end='', file=out_stream)
|
||||
|
||||
thread_stdout = threading.Thread(target=server_log, args=(context.server_process.stdout, sys.stdout))
|
||||
thread_stdout.start()
|
||||
|
||||
def log_stderr(process):
|
||||
for line in iter(process.stderr.readline, b''):
|
||||
print(line.decode('utf-8'), end='', file=sys.stderr)
|
||||
thread_stderr = threading.Thread(target=log_stderr, args=(context.server_process,))
|
||||
thread_stderr = threading.Thread(target=server_log, args=(context.server_process.stderr, sys.stderr))
|
||||
thread_stderr.start()
|
||||
|
||||
print(f"server pid={context.server_process.pid}, behave pid={os.getpid()}")
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue