server : reuse cached context chunks (#9866)

ggml-ci
This commit is contained in:
Georgi Gerganov 2024-10-13 18:52:48 +03:00 committed by GitHub
parent 92be9f1216
commit c7181bd294
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 78 additions and 6 deletions

View file

@ -1788,6 +1788,13 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
params.n_threads_http = value;
}
).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_THREADS_HTTP"));
add_opt(common_arg(
{"--cache-reuse"}, "N",
string_format("min chunk size to attempt reusing from the cache via KV shifting (default: %d)", params.n_cache_reuse),
[](common_params & params, int value) {
params.n_cache_reuse = value;
}
).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_CACHE_REUSE"));
add_opt(common_arg(
{"--metrics"},
string_format("enable prometheus compatible metrics endpoint (default: %s)", params.endpoint_metrics ? "enabled" : "disabled"),