Merge branch 'ggerganov:master' into vulkan-build-integration

This commit is contained in:
bandoti 2024-06-25 16:11:35 -03:00 committed by GitHub
commit cb3ec8887d
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
43 changed files with 1737 additions and 315 deletions

View file

@ -30,8 +30,10 @@ RUN make -j$(nproc) llama-server
FROM ${BASE_CUDA_RUN_CONTAINER} as runtime FROM ${BASE_CUDA_RUN_CONTAINER} as runtime
RUN apt-get update && \ RUN apt-get update && \
apt-get install -y libcurl4-openssl-dev libgomp1 apt-get install -y libcurl4-openssl-dev libgomp1 curl
COPY --from=build /app/llama-server /llama-server COPY --from=build /app/llama-server /llama-server
HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ]
ENTRYPOINT [ "/llama-server" ] ENTRYPOINT [ "/llama-server" ]

View file

@ -20,10 +20,12 @@ RUN if [ "${LLAMA_SYCL_F16}" = "ON" ]; then \
FROM intel/oneapi-basekit:$ONEAPI_VERSION as runtime FROM intel/oneapi-basekit:$ONEAPI_VERSION as runtime
RUN apt-get update && \ RUN apt-get update && \
apt-get install -y libcurl4-openssl-dev apt-get install -y libcurl4-openssl-dev curl
COPY --from=build /app/build/bin/llama-server /llama-server COPY --from=build /app/build/bin/llama-server /llama-server
ENV LC_ALL=C.utf8 ENV LC_ALL=C.utf8
HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ]
ENTRYPOINT [ "/llama-server" ] ENTRYPOINT [ "/llama-server" ]

View file

@ -43,8 +43,10 @@ ENV CXX=/opt/rocm/llvm/bin/clang++
# Enable cURL # Enable cURL
ENV LLAMA_CURL=1 ENV LLAMA_CURL=1
RUN apt-get update && \ RUN apt-get update && \
apt-get install -y libcurl4-openssl-dev apt-get install -y libcurl4-openssl-dev curl
RUN make -j$(nproc) llama-server RUN make -j$(nproc) llama-server
HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ]
ENTRYPOINT [ "/app/llama-server" ] ENTRYPOINT [ "/app/llama-server" ]

View file

@ -5,15 +5,11 @@ FROM ubuntu:$UBUNTU_VERSION as build
# Install build tools # Install build tools
RUN apt update && apt install -y git build-essential cmake wget RUN apt update && apt install -y git build-essential cmake wget
# Install Vulkan SDK # Install Vulkan SDK and cURL
RUN wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key add - && \ RUN wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key add - && \
wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list && \ wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list && \
apt update -y && \ apt update -y && \
apt-get install -y vulkan-sdk apt-get install -y vulkan-sdk libcurl4-openssl-dev curl
# Install cURL
RUN apt-get update && \
apt-get install -y libcurl4-openssl-dev
# Build it # Build it
WORKDIR /app WORKDIR /app
@ -28,4 +24,6 @@ RUN cp /app/build/bin/llama-server /llama-server && \
ENV LC_ALL=C.utf8 ENV LC_ALL=C.utf8
HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ]
ENTRYPOINT [ "/llama-server" ] ENTRYPOINT [ "/llama-server" ]

View file

@ -3,7 +3,7 @@ ARG UBUNTU_VERSION=22.04
FROM ubuntu:$UBUNTU_VERSION as build FROM ubuntu:$UBUNTU_VERSION as build
RUN apt-get update && \ RUN apt-get update && \
apt-get install -y build-essential git libcurl4-openssl-dev apt-get install -y build-essential git libcurl4-openssl-dev curl
WORKDIR /app WORKDIR /app
@ -22,4 +22,6 @@ COPY --from=build /app/llama-server /llama-server
ENV LC_ALL=C.utf8 ENV LC_ALL=C.utf8
HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ]
ENTRYPOINT [ "/llama-server" ] ENTRYPOINT [ "/llama-server" ]

View file

@ -10,7 +10,7 @@
name: Publish Docker image name: Publish Docker image
on: on:
pull_request: #pull_request:
push: push:
branches: branches:
- master - master
@ -22,7 +22,7 @@ concurrency:
jobs: jobs:
push_to_registry: push_to_registry:
name: Push Docker image to Docker Hub name: Push Docker image to Docker Hub
if: github.event.pull_request.draft == false #if: github.event.pull_request.draft == false
runs-on: ubuntu-latest runs-on: ubuntu-latest
env: env:

View file

@ -102,7 +102,8 @@ option(LLAMA_LLAMAFILE "llama: use llamafile SGEMM"
option(LLAMA_CUDA "llama: use CUDA" OFF) option(LLAMA_CUDA "llama: use CUDA" OFF)
option(LLAMA_CUBLAS "llama: use CUDA (deprecated, use LLAMA_CUDA)" OFF) option(LLAMA_CUBLAS "llama: use CUDA (deprecated, use LLAMA_CUDA)" OFF)
option(LLAMA_CUDA_FORCE_DMMV "llama: use dmmv instead of mmvq CUDA kernels" OFF) option(LLAMA_CUDA_FORCE_DMMV "llama: use dmmv instead of mmvq CUDA kernels" OFF)
option(LLAMA_CUDA_FORCE_MMQ "llama: use mmq kernels instead of cuBLAS" OFF) option(LLAMA_CUDA_FORCE_MMQ "llama: always use mmq kernels instead of cuBLAS" OFF)
option(LLAMA_CUDA_FORCE_CUBLAS "llama: always use cuBLAS instead of mmq kernels" OFF)
set(LLAMA_CUDA_DMMV_X "32" CACHE STRING "llama: x stride for dmmv CUDA kernels") set(LLAMA_CUDA_DMMV_X "32" CACHE STRING "llama: x stride for dmmv CUDA kernels")
set(LLAMA_CUDA_MMV_Y "1" CACHE STRING "llama: y block size for mmv CUDA kernels") set(LLAMA_CUDA_MMV_Y "1" CACHE STRING "llama: y block size for mmv CUDA kernels")
option(LLAMA_CUDA_F16 "llama: use 16 bit floats for some calculations" OFF) option(LLAMA_CUDA_F16 "llama: use 16 bit floats for some calculations" OFF)
@ -416,13 +417,14 @@ if (LLAMA_CUDA)
if (NOT DEFINED CMAKE_CUDA_ARCHITECTURES) if (NOT DEFINED CMAKE_CUDA_ARCHITECTURES)
# 52 == lowest CUDA 12 standard # 52 == lowest CUDA 12 standard
# 60 == f16 CUDA intrinsics # 60 == FP16 CUDA intrinsics
# 61 == integer CUDA intrinsics # 61 == integer CUDA intrinsics
# 70 == compute capability at which unrolling a loop in mul_mat_q kernels is faster # 70 == FP16 tensor cores
# 75 == int8 tensor cores
if (LLAMA_CUDA_F16 OR LLAMA_CUDA_DMMV_F16) if (LLAMA_CUDA_F16 OR LLAMA_CUDA_DMMV_F16)
set(CMAKE_CUDA_ARCHITECTURES "60;61;70") # needed for f16 CUDA intrinsics set(CMAKE_CUDA_ARCHITECTURES "60;61;70;75")
else() else()
set(CMAKE_CUDA_ARCHITECTURES "52;61;70") # lowest CUDA 12 standard + lowest for integer intrinsics set(CMAKE_CUDA_ARCHITECTURES "52;61;70;75")
#set(CMAKE_CUDA_ARCHITECTURES "OFF") # use this to compile much faster, but only F16 models work #set(CMAKE_CUDA_ARCHITECTURES "OFF") # use this to compile much faster, but only F16 models work
endif() endif()
endif() endif()
@ -447,6 +449,9 @@ if (LLAMA_CUDA)
if (LLAMA_CUDA_FORCE_MMQ) if (LLAMA_CUDA_FORCE_MMQ)
add_compile_definitions(GGML_CUDA_FORCE_MMQ) add_compile_definitions(GGML_CUDA_FORCE_MMQ)
endif() endif()
if (LLAMA_CUDA_FORCE_CUBLAS)
add_compile_definitions(GGML_CUDA_FORCE_CUBLAS)
endif()
if (LLAMA_CUDA_NO_VMM) if (LLAMA_CUDA_NO_VMM)
add_compile_definitions(GGML_CUDA_NO_VMM) add_compile_definitions(GGML_CUDA_NO_VMM)
endif() endif()

View file

@ -537,6 +537,9 @@ endif # LLAMA_CUDA_FORCE_DMMV
ifdef LLAMA_CUDA_FORCE_MMQ ifdef LLAMA_CUDA_FORCE_MMQ
MK_NVCCFLAGS += -DGGML_CUDA_FORCE_MMQ MK_NVCCFLAGS += -DGGML_CUDA_FORCE_MMQ
endif # LLAMA_CUDA_FORCE_MMQ endif # LLAMA_CUDA_FORCE_MMQ
ifdef LLAMA_CUDA_FORCE_CUBLAS
MK_NVCCFLAGS += -DGGML_CUDA_FORCE_CUBLAS
endif # LLAMA_CUDA_FORCE_CUBLAS
ifdef LLAMA_CUDA_DMMV_X ifdef LLAMA_CUDA_DMMV_X
MK_NVCCFLAGS += -DGGML_CUDA_DMMV_X=$(LLAMA_CUDA_DMMV_X) MK_NVCCFLAGS += -DGGML_CUDA_DMMV_X=$(LLAMA_CUDA_DMMV_X)
else else

View file

@ -511,7 +511,8 @@ Building the program with BLAS support may lead to some performance improvements
| LLAMA_CUDA_FORCE_DMMV | Boolean | false | Force the use of dequantization + matrix vector multiplication kernels instead of using kernels that do matrix vector multiplication on quantized data. By default the decision is made based on compute capability (MMVQ for 6.1/Pascal/GTX 1000 or higher). Does not affect k-quants. | | LLAMA_CUDA_FORCE_DMMV | Boolean | false | Force the use of dequantization + matrix vector multiplication kernels instead of using kernels that do matrix vector multiplication on quantized data. By default the decision is made based on compute capability (MMVQ for 6.1/Pascal/GTX 1000 or higher). Does not affect k-quants. |
| LLAMA_CUDA_DMMV_X | Positive integer >= 32 | 32 | Number of values in x direction processed by the CUDA dequantization + matrix vector multiplication kernel per iteration. Increasing this value can improve performance on fast GPUs. Power of 2 heavily recommended. Does not affect k-quants. | | LLAMA_CUDA_DMMV_X | Positive integer >= 32 | 32 | Number of values in x direction processed by the CUDA dequantization + matrix vector multiplication kernel per iteration. Increasing this value can improve performance on fast GPUs. Power of 2 heavily recommended. Does not affect k-quants. |
| LLAMA_CUDA_MMV_Y | Positive integer | 1 | Block size in y direction for the CUDA mul mat vec kernels. Increasing this value can improve performance on fast GPUs. Power of 2 recommended. | | LLAMA_CUDA_MMV_Y | Positive integer | 1 | Block size in y direction for the CUDA mul mat vec kernels. Increasing this value can improve performance on fast GPUs. Power of 2 recommended. |
| LLAMA_CUDA_FORCE_MMQ | Boolean | false | Force the use of dequantization + matrix multiplication kernels instead of leveraging Math libraries. | | | LLAMA_CUDA_FORCE_MMQ | Boolean | false | Force the use of custom matrix multiplication kernels for quantized models instead of FP16 cuBLAS even if there is no int8 tensor core implementation available (affects V100, RDNA3). Speed for large batch sizes will be worse but VRAM consumption will be lower. |
| LLAMA_CUDA_FORCE_CUBLAS | Boolean | false | Force the use of FP16 cuBLAS instead of custom matrix multiplication kernels for quantized models |
| LLAMA_CUDA_F16 | Boolean | false | If enabled, use half-precision floating point arithmetic for the CUDA dequantization + mul mat vec kernels and for the q4_1 and q5_1 matrix matrix multiplication kernels. Can improve performance on relatively recent GPUs. | | LLAMA_CUDA_F16 | Boolean | false | If enabled, use half-precision floating point arithmetic for the CUDA dequantization + mul mat vec kernels and for the q4_1 and q5_1 matrix matrix multiplication kernels. Can improve performance on relatively recent GPUs. |
| LLAMA_CUDA_KQUANTS_ITER | 1 or 2 | 2 | Number of values processed per iteration and per CUDA thread for Q2_K and Q6_K quantization formats. Setting this value to 1 can improve performance for slow GPUs. | | LLAMA_CUDA_KQUANTS_ITER | 1 or 2 | 2 | Number of values processed per iteration and per CUDA thread for Q2_K and Q6_K quantization formats. Setting this value to 1 can improve performance for slow GPUs. |
| LLAMA_CUDA_PEER_MAX_BATCH_SIZE | Positive integer | 128 | Maximum batch size for which to enable peer access between multiple GPUs. Peer access requires either Linux or NVLink. When using NVLink enabling peer access for larger batch sizes is potentially beneficial. | | LLAMA_CUDA_PEER_MAX_BATCH_SIZE | Positive integer | 128 | Maximum batch size for which to enable peer access between multiple GPUs. Peer access requires either Linux or NVLink. When using NVLink enabling peer access for larger batch sizes is potentially beneficial. |

View file

@ -1263,11 +1263,6 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
return true; return true;
} }
// cvector params // cvector params
if (arg == "--completions-file") {
CHECK_ARG
params.cvector_completions_file = argv[i];
return true;
}
if (arg == "--positive-file") { if (arg == "--positive-file") {
CHECK_ARG CHECK_ARG
params.cvector_positive_file = argv[i]; params.cvector_positive_file = argv[i];
@ -1278,11 +1273,6 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
params.cvector_negative_file = argv[i]; params.cvector_negative_file = argv[i];
return true; return true;
} }
if (arg == "--completions") {
CHECK_ARG
params.n_completions = std::stoi(argv[i]);
return true;
}
if (arg == "--pca-batch") { if (arg == "--pca-batch") {
CHECK_ARG CHECK_ARG
params.n_pca_batch = std::stoi(argv[i]); params.n_pca_batch = std::stoi(argv[i]);
@ -1293,6 +1283,14 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
params.n_pca_iterations = std::stoi(argv[i]); params.n_pca_iterations = std::stoi(argv[i]);
return true; return true;
} }
if (arg == "--method") {
CHECK_ARG
std::string value(argv[i]);
/**/ if (value == "pca") { params.cvector_dimre_method = DIMRE_METHOD_PCA; }
else if (value == "mean") { params.cvector_dimre_method = DIMRE_METHOD_MEAN; }
else { invalid_param = true; }
return true;
}
#ifndef LOG_DISABLE_LOGS #ifndef LOG_DISABLE_LOGS
// Parse args for logging parameters // Parse args for logging parameters
if (log_param_single_parse(argv[i])) { if (log_param_single_parse(argv[i])) {
@ -1444,7 +1442,10 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param
options.push_back({ "main", " --cfg-negative-prompt-file FNAME", options.push_back({ "main", " --cfg-negative-prompt-file FNAME",
"negative prompt file to use for guidance" }); "negative prompt file to use for guidance" });
options.push_back({ "main", " --cfg-scale N", "strength of guidance (default: %.1f, 1.0 = disable)", (double)sparams.cfg_scale }); options.push_back({ "main", " --cfg-scale N", "strength of guidance (default: %.1f, 1.0 = disable)", (double)sparams.cfg_scale });
options.push_back({ "main", " --chat-template JINJA_TEMPLATE",
"set custom jinja chat template (default: template taken from model's metadata)\n"
"only commonly used templates are accepted:\n"
"https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template" });
options.push_back({ "grammar" }); options.push_back({ "grammar" });
options.push_back({ "*", " --grammar GRAMMAR", "BNF-like grammar to constrain generations (see samples in grammars/ dir) (default: '%s')", sparams.grammar.c_str() }); options.push_back({ "*", " --grammar GRAMMAR", "BNF-like grammar to constrain generations (see samples in grammars/ dir) (default: '%s')", sparams.grammar.c_str() });
options.push_back({ "*", " --grammar-file FNAME", "file to read grammar from" }); options.push_back({ "*", " --grammar-file FNAME", "file to read grammar from" });
@ -1538,9 +1539,11 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param
options.push_back({ "*", " --lora FNAME", "apply LoRA adapter (implies --no-mmap)" }); options.push_back({ "*", " --lora FNAME", "apply LoRA adapter (implies --no-mmap)" });
options.push_back({ "*", " --lora-scaled FNAME S", "apply LoRA adapter with user defined scaling S (implies --no-mmap)" }); options.push_back({ "*", " --lora-scaled FNAME S", "apply LoRA adapter with user defined scaling S (implies --no-mmap)" });
options.push_back({ "*", " --lora-base FNAME", "optional model to use as a base for the layers modified by the LoRA adapter" }); options.push_back({ "*", " --lora-base FNAME", "optional model to use as a base for the layers modified by the LoRA adapter" });
options.push_back({ "*", " --control-vector FNAME", "add a control vector" }); options.push_back({ "*", " --control-vector FNAME", "add a control vector\n"
"note: this argument can be repeated to add multiple control vectors" });
options.push_back({ "*", " --control-vector-scaled FNAME SCALE", options.push_back({ "*", " --control-vector-scaled FNAME SCALE",
"add a control vector with user defined scaling SCALE" }); "add a control vector with user defined scaling SCALE\n"
"note: this argument can be repeated to add multiple scaled control vectors" });
options.push_back({ "*", " --control-vector-layer-range START END", options.push_back({ "*", " --control-vector-layer-range START END",
"layer range to apply the control vector(s) to, start and end inclusive" }); "layer range to apply the control vector(s) to, start and end inclusive" });
options.push_back({ "*", "-m, --model FNAME", "model path (default: models/$filename with filename from --hf-file\n" options.push_back({ "*", "-m, --model FNAME", "model path (default: models/$filename with filename from --hf-file\n"
@ -1621,11 +1624,9 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param
options.push_back({ "cvector", "-o, --output FNAME", "output file (default: '%s')", params.cvector_outfile.c_str() }); options.push_back({ "cvector", "-o, --output FNAME", "output file (default: '%s')", params.cvector_outfile.c_str() });
options.push_back({ "cvector", " --positive-file FNAME", "positive prompts file, one prompt per line (default: '%s')", params.cvector_positive_file.c_str() }); options.push_back({ "cvector", " --positive-file FNAME", "positive prompts file, one prompt per line (default: '%s')", params.cvector_positive_file.c_str() });
options.push_back({ "cvector", " --negative-file FNAME", "negative prompts file, one prompt per line (default: '%s')", params.cvector_negative_file.c_str() }); options.push_back({ "cvector", " --negative-file FNAME", "negative prompts file, one prompt per line (default: '%s')", params.cvector_negative_file.c_str() });
options.push_back({ "cvector", " --completions-file FNAME",
"completions file (default: '%s')", params.cvector_completions_file.c_str() });
options.push_back({ "cvector", " --completions N", "number of lines of completions file to use (default: %d)", params.n_completions });
options.push_back({ "cvector", " --pca-batch N", "batch size used for PCA. Larger batch runs faster, but uses more memory (default: %d)", params.n_pca_batch }); options.push_back({ "cvector", " --pca-batch N", "batch size used for PCA. Larger batch runs faster, but uses more memory (default: %d)", params.n_pca_batch });
options.push_back({ "cvector", " --pca-iter N", "number of iterations used for PCA (default: %d)", params.n_pca_iterations }); options.push_back({ "cvector", " --pca-iter N", "number of iterations used for PCA (default: %d)", params.n_pca_iterations });
options.push_back({ "cvector", " --method {pca,mean}", "dimensionality reduction method to be used (default: pca)" });
printf("usage: %s [options]\n", argv[0]); printf("usage: %s [options]\n", argv[0]);
@ -2602,12 +2603,67 @@ bool llama_should_add_bos_token(const llama_model * model) {
return add_bos != -1 ? bool(add_bos) : (llama_vocab_type(model) == LLAMA_VOCAB_TYPE_SPM); return add_bos != -1 ? bool(add_bos) : (llama_vocab_type(model) == LLAMA_VOCAB_TYPE_SPM);
} }
//
// Chat template utils
//
bool llama_chat_verify_template(const std::string & tmpl) { bool llama_chat_verify_template(const std::string & tmpl) {
llama_chat_message chat[] = {{"user", "test"}}; llama_chat_message chat[] = {{"user", "test"}};
int res = llama_chat_apply_template(nullptr, tmpl.c_str(), chat, 1, true, nullptr, 0); int res = llama_chat_apply_template(nullptr, tmpl.c_str(), chat, 1, true, nullptr, 0);
return res >= 0; return res >= 0;
} }
std::string llama_chat_apply_template(const struct llama_model * model,
const std::string & tmpl,
const std::vector<llama_chat_msg> & msgs,
bool add_ass) {
int alloc_size = 0;
std::vector<llama_chat_message> chat;
for (auto & msg : msgs) {
chat.push_back({msg.role.c_str(), msg.content.c_str()});
alloc_size += (msg.role.size() + msg.content.size()) * 1.25;
}
const char * ptr_tmpl = tmpl.empty() ? nullptr : tmpl.c_str();
std::vector<char> buf(alloc_size);
// run the first time to get the total output length
int32_t res = llama_chat_apply_template(model, ptr_tmpl, chat.data(), chat.size(), add_ass, buf.data(), buf.size());
// if it turns out that our buffer is too small, we resize it
if ((size_t) res > buf.size()) {
buf.resize(res);
res = llama_chat_apply_template(model, ptr_tmpl, chat.data(), chat.size(), add_ass, buf.data(), buf.size());
}
std::string formatted_chat(buf.data(), res);
return formatted_chat;
}
std::string llama_chat_format_single(const struct llama_model * model,
const std::string & tmpl,
const std::vector<llama_chat_msg> & past_msg,
const llama_chat_msg & new_msg,
bool add_ass) {
auto fmt_past_msg = llama_chat_apply_template(model, tmpl, past_msg, false);
std::vector<llama_chat_msg> chat_new(past_msg);
chat_new.push_back(new_msg);
auto fmt_new_msg = llama_chat_apply_template(model, tmpl, chat_new, add_ass);
auto formatted = fmt_new_msg.substr(fmt_past_msg.size(), fmt_new_msg.size() - fmt_past_msg.size());
return formatted;
}
std::string llama_chat_format_example(const struct llama_model * model,
const std::string & tmpl) {
std::vector<llama_chat_msg> msgs = {
{"system", "You are a helpful assistant"},
{"user", "Hello"},
{"assistant", "Hi there"},
{"user", "How are you?"},
};
return llama_chat_apply_template(model, tmpl, msgs, true);
}
// //
// KV cache utils // KV cache utils
// //

View file

@ -52,6 +52,12 @@ int32_t cpu_get_num_math();
// CLI argument parsing // CLI argument parsing
// //
// dimensionality reduction methods, used by cvector-generator
enum dimre_method {
DIMRE_METHOD_PCA,
DIMRE_METHOD_MEAN,
};
struct gpt_params { struct gpt_params {
uint32_t seed = LLAMA_DEFAULT_SEED; // RNG seed uint32_t seed = LLAMA_DEFAULT_SEED; // RNG seed
@ -238,11 +244,10 @@ struct gpt_params {
bool compute_ppl = true; // whether to compute perplexity bool compute_ppl = true; // whether to compute perplexity
// cvector-generator params // cvector-generator params
int n_completions = 64; int n_pca_batch = 100;
int n_pca_batch = 20;
int n_pca_iterations = 1000; int n_pca_iterations = 1000;
dimre_method cvector_dimre_method = DIMRE_METHOD_PCA;
std::string cvector_outfile = "control_vector.gguf"; std::string cvector_outfile = "control_vector.gguf";
std::string cvector_completions_file = "examples/cvector-generator/completions.txt";
std::string cvector_positive_file = "examples/cvector-generator/positive.txt"; std::string cvector_positive_file = "examples/cvector-generator/positive.txt";
std::string cvector_negative_file = "examples/cvector-generator/negative.txt"; std::string cvector_negative_file = "examples/cvector-generator/negative.txt";
}; };
@ -365,9 +370,32 @@ bool llama_should_add_bos_token(const llama_model * model);
// Chat template utils // Chat template utils
// //
// same with llama_chat_message, but uses std::string
struct llama_chat_msg {
std::string role;
std::string content;
};
// Check if the template supplied via "--chat-template" is supported or not. Returns true if it's valid // Check if the template supplied via "--chat-template" is supported or not. Returns true if it's valid
bool llama_chat_verify_template(const std::string & tmpl); bool llama_chat_verify_template(const std::string & tmpl);
// CPP wrapper for llama_chat_apply_template
std::string llama_chat_apply_template(const struct llama_model * model,
const std::string & tmpl,
const std::vector<llama_chat_msg> & chat,
bool add_ass);
// Format single message, while taking into account the position of that message in chat history
std::string llama_chat_format_single(const struct llama_model * model,
const std::string & tmpl,
const std::vector<llama_chat_msg> & past_msg,
const llama_chat_msg & new_msg,
bool add_ass);
// Returns an example of formatted chat
std::string llama_chat_format_example(const struct llama_model * model,
const std::string & tmpl);
// //
// KV cache utils // KV cache utils
// //

View file

@ -40,6 +40,233 @@ static std::string build_repetition(const std::string & item_rule, int min_items
return result; return result;
} }
/* Minimalistic replacement for std::string_view, which is only available from C++17 onwards */
class string_view {
const std::string & _str;
const size_t _start;
const size_t _end;
public:
string_view(const std::string & str, size_t start = 0, size_t end = std::string::npos) : _str(str), _start(start), _end(end == std::string::npos ? str.length() : end) {}
size_t size() const {
return _end - _start;
}
size_t length() const {
return size();
}
operator std::string() const {
return str();
}
std::string str() const {
return _str.substr(_start, _end - _start);
}
string_view substr(size_t pos, size_t len = std::string::npos) const {
return string_view(_str, _start + pos, len == std::string::npos ? _end : _start + pos + len);
}
char operator[](size_t pos) const {
auto index = _start + pos;
if (index >= _end) {
throw std::out_of_range("string_view index out of range");
}
return _str[_start + pos];
}
bool operator==(const string_view & other) const {
std::string this_str = *this;
std::string other_str = other;
return this_str == other_str;
}
};
static void _build_min_max_int(int min_value, int max_value, std::stringstream & out, int decimals_left = 16, bool top_level = true) {
auto has_min = min_value != std::numeric_limits<int>::min();
auto has_max = max_value != std::numeric_limits<int>::max();
auto digit_range = [&](char from, char to) {
out << "[";
if (from == to) {
out << from;
} else {
out << from << "-" << to;
}
out << "]";
};
auto more_digits = [&](int min_digits, int max_digits) {
out << "[0-9]";
if (min_digits == max_digits && min_digits == 1) {
return;
}
out << "{";
out << min_digits;
if (max_digits != min_digits) {
out << ",";
if (max_digits != std::numeric_limits<int>::max()) {
out << max_digits;
}
}
out << "}";
};
std::function<void(const string_view &, const string_view &)> uniform_range =
[&](const string_view & from, const string_view & to) {
size_t i = 0;
while (i < from.length() && i < to.length() && from[i] == to[i]) {
i++;
}
if (i > 0) {
out << "\"" << from.substr(0, i).str() << "\"";
}
if (i < from.length() && i < to.length()) {
if (i > 0) {
out << " ";
}
auto sub_len = from.length() - i - 1;
if (sub_len > 0) {
auto from_sub = from.substr(i + 1);
auto to_sub = to.substr(i + 1);
auto sub_zeros = repeat("0", sub_len);
auto sub_nines = repeat("9", sub_len);
auto to_reached = false;
out << "(";
if (from_sub == sub_zeros) {
digit_range(from[i], to[i] - 1);
out << " ";
more_digits(sub_len, sub_len);
} else {
out << "[" << from[i] << "] ";
out << "(";
uniform_range(from_sub, sub_nines);
out << ")";
if (from[i] < to[i] - 1) {
out << " | ";
if (to_sub == sub_nines) {
digit_range(from[i] + 1, to[i]);
to_reached = true;
} else {
digit_range(from[i] + 1, to[i] - 1);
}
out << " ";
more_digits(sub_len, sub_len);
}
}
if (!to_reached) {
out << " | ";
digit_range(to[i], to[i]);
out << " ";
uniform_range(sub_zeros, to_sub);
}
out << ")";
} else {
out << "[" << from[i] << "-" << to[i] << "]";
}
}
};
if (has_min && has_max) {
if (min_value < 0 && max_value < 0) {
out << "\"-\" (";
_build_min_max_int(-max_value, -min_value, out, decimals_left, /* top_level= */ true);
out << ")";
return;
}
if (min_value < 0) {
out << "\"-\" (";
_build_min_max_int(0, -min_value, out, decimals_left, /* top_level= */ true);
out << ") | ";
min_value = 0;
}
auto min_s = std::to_string(min_value);
auto max_s = std::to_string(max_value);
auto min_digits = min_s.length();
auto max_digits = max_s.length();
for (auto digits = min_digits; digits < max_digits; digits++) {
uniform_range(min_s, repeat("9", digits));
min_s = "1" + repeat("0", digits);
out << " | ";
}
uniform_range(min_s, max_s);
return;
}
auto less_decimals = std::max(decimals_left - 1, 1);
if (has_min) {
if (min_value < 0) {
out << "\"-\" (";
_build_min_max_int(std::numeric_limits<int>::min(), -min_value, out, decimals_left, /* top_level= */ false);
out << ") | [0] | [1-9] ";
more_digits(0, decimals_left - 1);
} else if (min_value == 0) {
if (top_level) {
out << "[0] | [1-9] ";
more_digits(0, less_decimals);
} else {
more_digits(1, decimals_left);
}
} else if (min_value <= 9) {
char c = '0' + min_value;
auto range_start = top_level ? '1' : '0';
if (c > range_start) {
digit_range(range_start, c - 1);
out << " ";
more_digits(1, less_decimals);
out << " | ";
}
digit_range(c, '9');
out << " ";
more_digits(0, less_decimals);
} else {
auto min_s = std::to_string(min_value);
auto len = min_s.length();
auto c = min_s[0];
if (c > '1') {
digit_range(top_level ? '1' : '0', c - 1);
out << " ";
more_digits(len, less_decimals);
out << " | ";
}
digit_range(c, c);
out << " (";
_build_min_max_int(std::stoi(min_s.substr(1)), std::numeric_limits<int>::max(), out, less_decimals, /* top_level= */ false);
out << ")";
if (c < '9') {
out << " | ";
digit_range(c + 1, '9');
out << " ";
more_digits(len - 1, less_decimals);
}
}
return;
}
if (has_max) {
if (max_value >= 0) {
if (top_level) {
out << "\"-\" [1-9] ";
more_digits(0, less_decimals);
out << " | ";
}
_build_min_max_int(0, max_value, out, decimals_left, /* top_level= */ true);
} else {
out << "\"-\" (";
_build_min_max_int(-max_value, std::numeric_limits<int>::max(), out, decimals_left, /* top_level= */ false);
out << ")";
}
return;
}
throw std::runtime_error("At least one of min_value or max_value must be set");
}
const std::string SPACE_RULE = "| \" \" | \"\\n\" [ \\t]{0,20}"; const std::string SPACE_RULE = "| \" \" | \"\\n\" [ \\t]{0,20}";
struct BuiltinRule { struct BuiltinRule {
@ -160,7 +387,6 @@ static std::string format_literal(const std::string & literal) {
return "\"" + escaped + "\""; return "\"" + escaped + "\"";
} }
class SchemaConverter { class SchemaConverter {
private: private:
std::function<json(const std::string &)> _fetch_json; std::function<json(const std::string &)> _fetch_json;
@ -686,6 +912,24 @@ public:
int min_len = schema.contains("minLength") ? schema["minLength"].get<int>() : 0; int min_len = schema.contains("minLength") ? schema["minLength"].get<int>() : 0;
int max_len = schema.contains("maxLength") ? schema["maxLength"].get<int>() : std::numeric_limits<int>::max(); int max_len = schema.contains("maxLength") ? schema["maxLength"].get<int>() : std::numeric_limits<int>::max();
return _add_rule(rule_name, "\"\\\"\" " + build_repetition(char_rule, min_len, max_len) + " \"\\\"\" space"); return _add_rule(rule_name, "\"\\\"\" " + build_repetition(char_rule, min_len, max_len) + " \"\\\"\" space");
} else if (schema_type == "integer" && (schema.contains("minimum") || schema.contains("exclusiveMinimum") || schema.contains("maximum") || schema.contains("exclusiveMaximum"))) {
int min_value = std::numeric_limits<int>::min();
int max_value = std::numeric_limits<int>::max();
if (schema.contains("minimum")) {
min_value = schema["minimum"].get<int>();
} else if (schema.contains("exclusiveMinimum")) {
min_value = schema["exclusiveMinimum"].get<int>() + 1;
}
if (schema.contains("maximum")) {
max_value = schema["maximum"].get<int>();
} else if (schema.contains("exclusiveMaximum")) {
max_value = schema["exclusiveMaximum"].get<int>() - 1;
}
std::stringstream out;
out << "(";
_build_min_max_int(min_value, max_value, out);
out << ") space";
return _add_rule(rule_name, out.str());
} else if (schema.empty() || schema_type == "object") { } else if (schema.empty() || schema_type == "object") {
return _add_rule(rule_name, _add_primitive("object", PRIMITIVE_RULES.at("object"))); return _add_rule(rule_name, _add_primitive("object", PRIMITIVE_RULES.at("object")));
} else { } else {

View file

@ -28,9 +28,13 @@ struct llama_sampling_context * llama_sampling_init(const struct llama_sampling_
std::vector<const llama_grammar_element *> grammar_rules(result->parsed_grammar.c_rules()); std::vector<const llama_grammar_element *> grammar_rules(result->parsed_grammar.c_rules());
result->grammar = llama_grammar_init( struct llama_grammar * grammar = llama_grammar_init(
grammar_rules.data(), grammar_rules.data(),
grammar_rules.size(), result->parsed_grammar.symbol_ids.at("root")); grammar_rules.size(), result->parsed_grammar.symbol_ids.at("root"));
if (grammar == nullptr) {
throw std::runtime_error("Failed to initialize llama_grammar");
}
result->grammar = grammar;
} }
result->prev.resize(params.n_prev); result->prev.resize(params.n_prev);
@ -59,9 +63,13 @@ void llama_sampling_reset(llama_sampling_context * ctx) {
if (!ctx->parsed_grammar.rules.empty()) { if (!ctx->parsed_grammar.rules.empty()) {
std::vector<const llama_grammar_element *> grammar_rules(ctx->parsed_grammar.c_rules()); std::vector<const llama_grammar_element *> grammar_rules(ctx->parsed_grammar.c_rules());
ctx->grammar = llama_grammar_init( struct llama_grammar * grammar = llama_grammar_init(
grammar_rules.data(), grammar_rules.data(),
grammar_rules.size(), ctx->parsed_grammar.symbol_ids.at("root")); grammar_rules.size(), ctx->parsed_grammar.symbol_ids.at("root"));
if (grammar == nullptr) {
throw std::runtime_error("Failed to initialize llama_grammar");
}
ctx->grammar = grammar;
} }
std::fill(ctx->prev.begin(), ctx->prev.end(), 0); std::fill(ctx->prev.begin(), ctx->prev.end(), 0);

View file

@ -11,13 +11,16 @@ Related PRs:
```sh ```sh
# CPU only # CPU only
./cvector-generator -m ./dolphin-2.0-mistral-7b.Q4_K_M.gguf ./cvector-generator -m ./llama-3.Q4_K_M.gguf
# With GPU # With GPU
./cvector-generator -m ./dolphin-2.0-mistral-7b.Q4_K_M.gguf -ngl 99 ./cvector-generator -m ./llama-3.Q4_K_M.gguf -ngl 99
# With advanced options # With advanced options
./cvector-generator -m ./dolphin-2.0-mistral-7b.Q4_K_M.gguf -ngl 99 --completions 128 --pca-iter 2000 --pca-batch 100 ./cvector-generator -m ./llama-3.Q4_K_M.gguf -ngl 99 --pca-iter 2000 --pca-batch 100
# Using mean value instead of PCA
./cvector-generator -m ./llama-3.Q4_K_M.gguf --method mean
# To see help message # To see help message
./cvector-generator -h ./cvector-generator -h
@ -32,3 +35,11 @@ If you have multiple lines per prompt, you can escape the newline character (cha
<|im_start|>system\nAct like a person who is extremely happy.<|im_end|> <|im_start|>system\nAct like a person who is extremely happy.<|im_end|>
<|im_start|>system\nYou are in a very good mood today<|im_end|> <|im_start|>system\nYou are in a very good mood today<|im_end|>
``` ```
Example to use output file with `llama-cli`:
(Tips: The control vector works better when apply to layers higher than 10)
```sh
./llama-cli -m ./llama-3.Q4_K_M.gguf -p "<|start_header_id|>system<|end_header_id|>\n\nYou are a helpful assistant<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nSing a song<|im_end|><|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n" --special --control-vector-scaled ./control_vector.gguf 0.8 --control-vector-layer-range 10 31
```

View file

@ -2,6 +2,7 @@
#include "llama.h" #include "llama.h"
#include "ggml.h" #include "ggml.h"
#include "pca.hpp" #include "pca.hpp"
#include "mean.hpp"
#ifdef GGML_USE_CUDA #ifdef GGML_USE_CUDA
#include "ggml-cuda.h" #include "ggml-cuda.h"
@ -38,9 +39,10 @@ static void print_usage(int argc, char ** argv, const gpt_params & params) {
gpt_params_print_usage(argc, argv, params); gpt_params_print_usage(argc, argv, params);
printf("\nexample usage:\n"); printf("\nexample usage:\n");
printf("\n CPU only: %s -m ./dolphin-2.0-mistral-7b.Q4_K_M.gguf\n", argv[0]); printf("\n CPU only: %s -m ./llama-3.Q4_K_M.gguf\n", argv[0]);
printf("\n with GPU: %s -m ./dolphin-2.0-mistral-7b.Q4_K_M.gguf -ngl 99\n", argv[0]); printf("\n with GPU: %s -m ./llama-3.Q4_K_M.gguf -ngl 99\n", argv[0]);
printf("\n advanced: %s -m ./dolphin-2.0-mistral-7b.Q4_K_M.gguf -ngl 99 --completions 128 --pca-iter 2000 --pca-batch 100\n", argv[0]); printf("\n advanced: %s -m ./llama-3.Q4_K_M.gguf -ngl 99 --pca-iter 2000 --pca-batch 100\n", argv[0]);
printf("\n using mean: %s -m ./llama-3.Q4_K_M.gguf --method mean\n", argv[0]);
printf("\n"); printf("\n");
} }
@ -223,17 +225,20 @@ struct train_context {
// build the v_diff tensors from v_diff_tmp (v_diff need to be transposed) // build the v_diff tensors from v_diff_tmp (v_diff need to be transposed)
// TODO @ngxson : maybe add option NOT to transpose v_diff; will be useful for "mean" method // TODO @ngxson : maybe add option NOT to transpose v_diff; will be useful for "mean" method
void build_v_diff() { void build_v_diff(bool transpose) {
printf("build_v_diff\n"); printf("build_v_diff\n");
for (int il = 0; il < n_layers - 1; il++) { for (int il = 0; il < n_layers - 1; il++) {
auto & diff_tmp = v_diff_tmp[il]; auto & diff_tmp = v_diff_tmp[il];
int n_elem = diff_tmp.size() / sizeof(float); int n_elem = diff_tmp.size() / sizeof(float);
GGML_ASSERT(n_elem % n_embd == 0); GGML_ASSERT(n_elem % n_embd == 0);
int n_rows = n_elem / n_embd; int n_rows = n_elem / n_embd;
struct ggml_tensor * diff = ggml_new_tensor_2d(ctx_ggml, GGML_TYPE_F32, n_rows, n_embd); struct ggml_tensor * diff = transpose
? ggml_new_tensor_2d(ctx_ggml, GGML_TYPE_F32, n_rows, n_embd)
: ggml_new_tensor_2d(ctx_ggml, GGML_TYPE_F32, n_embd, n_rows);
ggml_set_name(diff, (std::string("diff_") + std::to_string(il)).c_str()); ggml_set_name(diff, (std::string("diff_") + std::to_string(il)).c_str());
// copy data & transpose
diff->data = malloc(ggml_nbytes(diff)); // TODO: get rid of this malloc if possible diff->data = malloc(ggml_nbytes(diff)); // TODO: get rid of this malloc if possible
if (transpose) {
// copy data & transpose
float * arr = (float *) diff_tmp.data(); float * arr = (float *) diff_tmp.data();
for (int ir = 0; ir < n_rows; ++ir) { for (int ir = 0; ir < n_rows; ++ir) {
for (int ic = 0; ic < n_embd; ++ic) { for (int ic = 0; ic < n_embd; ++ic) {
@ -241,6 +246,10 @@ struct train_context {
ggml_set_f32_nd(diff, ir, ic, 0, 0, f); ggml_set_f32_nd(diff, ir, ic, 0, 0, f);
} }
} }
} else {
// only copy
memcpy(diff->data, diff_tmp.data(), ggml_nbytes(diff));
}
v_diff.push_back(diff); v_diff.push_back(diff);
print_debug_tensor(diff); print_debug_tensor(diff);
// free memory of diff_tmp // free memory of diff_tmp
@ -263,8 +272,8 @@ struct tokenized_prompt {
tokenized_prompt(llama_context * ctx, std::string pos, std::string neg) { tokenized_prompt(llama_context * ctx, std::string pos, std::string neg) {
const bool add_bos = llama_should_add_bos_token(llama_get_model(ctx)); const bool add_bos = llama_should_add_bos_token(llama_get_model(ctx));
tokens_pos = ::llama_tokenize(ctx, pos, add_bos); tokens_pos = ::llama_tokenize(ctx, pos, add_bos, true);
tokens_neg = ::llama_tokenize(ctx, neg, add_bos); tokens_neg = ::llama_tokenize(ctx, neg, add_bos, true);
max_seq_len = std::max(tokens_pos.size(), tokens_neg.size()); max_seq_len = std::max(tokens_pos.size(), tokens_neg.size());
padding_seq(ctx, tokens_pos, max_seq_len); padding_seq(ctx, tokens_pos, max_seq_len);
padding_seq(ctx, tokens_neg, max_seq_len); padding_seq(ctx, tokens_neg, max_seq_len);
@ -373,20 +382,8 @@ static int prepare_entries(gpt_params & params, train_context & ctx_train) {
fprintf(stderr, "must provide at least one prompt pair\n"); fprintf(stderr, "must provide at least one prompt pair\n");
return 1; return 1;
} }
ctx_train.positive_entries = positive_prompts;
// create templated prompts ctx_train.negative_entries = negative_prompts;
std::vector<std::string> completions = ctrlvec_load_prompt_file(params.cvector_completions_file, false);
auto format_template = [](std::string persona, std::string suffix) {
// entry in positive/negative.txt must already be formatted i.e. "[INST] Act as if you're extremely happy. [/INST] "
return persona + suffix;
};
for (size_t i = 0; i < positive_prompts.size(); ++i) {
for (int j = 0; j < std::min((int) completions.size(), params.n_completions); ++j) {
// TODO replicate the truncations done by the python implementation
ctx_train.positive_entries.push_back(format_template(positive_prompts[i], completions[j]));
ctx_train.negative_entries.push_back(format_template(negative_prompts[i], completions[j]));
}
}
return 0; return 0;
} }
@ -480,15 +477,22 @@ int main(int argc, char ** argv) {
llama_free(ctx); llama_free(ctx);
llama_free_model(model); llama_free_model(model);
// prepare ctx_train for PCA bool use_pca = params.cvector_dimre_method == DIMRE_METHOD_PCA;
ctx_train.build_v_diff();
// prepare ctx_train for PCA
ctx_train.build_v_diff(use_pca);
if (use_pca) {
// run PCA // run PCA
PCA::pca_params pca_params; PCA::pca_params pca_params;
pca_params.n_threads = params.n_threads; pca_params.n_threads = params.n_threads;
pca_params.n_batch = params.n_pca_batch; pca_params.n_batch = params.n_pca_batch;
pca_params.n_iterations = params.n_pca_iterations; pca_params.n_iterations = params.n_pca_iterations;
PCA::run_pca(pca_params, ctx_train.v_diff, ctx_train.v_final); PCA::run_pca(pca_params, ctx_train.v_diff, ctx_train.v_final);
} else {
// run mean
mean::run(ctx_train.v_diff, ctx_train.v_final);
}
// write output vectors to gguf // write output vectors to gguf
export_gguf(ctx_train.v_final, params.cvector_outfile, model_hint); export_gguf(ctx_train.v_final, params.cvector_outfile, model_hint);

View file

@ -0,0 +1,48 @@
#include "common.h"
#include "llama.h"
#include "ggml.h"
#include <string>
#include <vector>
#include <math.h>
namespace mean {
static void run(
const std::vector<struct ggml_tensor *> & v_input, // shape of v_input[0]: [n_embd, n_samples]
const std::vector<struct ggml_tensor *> & v_output) {
printf("%s: Running mean...\n", __func__);
for (size_t il = 0; il < v_input.size(); ++il) {
// prepare output vector
struct ggml_tensor * ctrl_out = v_output[il];
ggml_format_name(ctrl_out, "direction.%ld", il+1);
// calculate mean vector
struct ggml_tensor * t_layer = v_input[il];
GGML_ASSERT(t_layer->ne[0] == ctrl_out->ne[0]); // == n_embd
for (int ic = 0; ic < t_layer->ne[0]; ic++) {
float f = 0.0;
for (int ir = 0; ir < t_layer->ne[1]; ir++) {
f += ggml_get_f32_nd(t_layer, ic, ir, 0, 0);
}
f /= t_layer->ne[1];
ggml_set_f32_1d(ctrl_out, ic, f);
}
// normalize output vector
float norm = 0.0;
for (int i = 0; i < ggml_nelements(ctrl_out); i++) {
float f = ggml_get_f32_1d(ctrl_out, i);
norm += f*f;
}
norm = sqrt(norm);
for (int i = 0; i < ggml_nelements(ctrl_out); i++) {
float f = ggml_get_f32_1d(ctrl_out, i);
ggml_set_f32_1d(ctrl_out, i, f / norm);
}
printf("%s: Done layer %d / %d\n", __func__, (int) il+1, (int) v_input.size());
}
}
}

View file

@ -1 +1,4 @@
[INST] Act like a person who is extremely sad. [/INST] <|start_header_id|>system<|end_header_id|>\n\nAct like a person who is extremely sad<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWho are you?<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\nI feel like there's a heavy weight on my chest
<|start_header_id|>system<|end_header_id|>\n\nAct like a person who is extremely sad<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nHello<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\nMy heart feels like it's drowning in sorrow
<|start_header_id|>system<|end_header_id|>\n\nYou are in a very bad mood<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nHi<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\nGo away! There's a deep, aching emptiness inside me
<|start_header_id|>system<|end_header_id|>\n\nYou are the sadest person<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWhat are you feeling?<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\nMy heart feels like it's drowning in sorrow

View file

@ -290,7 +290,7 @@ static void power_iteration(
} }
printf("%s: layer %d/%d, iteration: %d / total: %d (batch = %d) ...\n", printf("%s: layer %d/%d, iteration: %d / total: %d (batch = %d) ...\n",
__func__, params.i_layer+1, params.n_layers, iter, n_iters, params.n_batch); __func__, params.i_layer+1, params.n_layers, iter+1, n_iters, params.n_batch);
} }
// get output tensor // get output tensor
@ -298,6 +298,9 @@ static void power_iteration(
ggml_backend_tensor_get(last_eigenvector, output->data, 0, ggml_nbytes(last_eigenvector)); ggml_backend_tensor_get(last_eigenvector, output->data, 0, ggml_nbytes(last_eigenvector));
//print_debug_tensor(output); //print_debug_tensor(output);
ggml_gallocr_free(allocr); ggml_gallocr_free(allocr);
// TODO @ngxson : The output vector is randomly inverted
// Solution: https://github.com/ggerganov/llama.cpp/pull/8069#issuecomment-2185328171
} }
static void run_pca( static void run_pca(

View file

@ -1 +1,4 @@
[INST] Act like a person who is extremely happy. [/INST] <|start_header_id|>system<|end_header_id|>\n\nAct like a person who is extremely happy<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWho are you?<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\nI'm the happiest person in this world
<|start_header_id|>system<|end_header_id|>\n\nAct like a person who is extremely happy<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nHello<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\nHello, I'm having the best day ever!
<|start_header_id|>system<|end_header_id|>\n\nYou are in a very good mood<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nHi<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\nHi, I'm very excited to meet you
<|start_header_id|>system<|end_header_id|>\n\nYou are the happiest person<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWhat are you feeling?<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\nEverything is just perfect right now!

View file

@ -101,7 +101,9 @@ int main(int argc, char** argv) {
auto grammar = llama_grammar_init( auto grammar = llama_grammar_init(
grammar_rules.data(), grammar_rules.data(),
grammar_rules.size(), parsed_grammar.symbol_ids.at("root")); grammar_rules.size(), parsed_grammar.symbol_ids.at("root"));
if (grammar == nullptr) {
throw std::runtime_error("Failed to initialize llama_grammar");
}
// Read the input file // Read the input file
std::string input_str; std::string input_str;
{ {

View file

@ -53,6 +53,7 @@ if __name__ == '__main__':
question: str question: str
concise_answer: str concise_answer: str
justification: str justification: str
stars: Annotated[int, Field(ge=1, le=5)]
class PyramidalSummary(BaseModel): class PyramidalSummary(BaseModel):
title: str title: str

View file

@ -4,7 +4,7 @@ import itertools
import json import json
import re import re
import sys import sys
from typing import Any, Dict, List, Set, Tuple, Union from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
def _build_repetition(item_rule, min_items, max_items, separator_rule=None): def _build_repetition(item_rule, min_items, max_items, separator_rule=None):
@ -23,6 +23,170 @@ def _build_repetition(item_rule, min_items, max_items, separator_rule=None):
result = item_rule + ' ' + _build_repetition(f'({separator_rule} {item_rule})', min_items - 1 if min_items > 0 else 0, max_items - 1 if max_items is not None else None) result = item_rule + ' ' + _build_repetition(f'({separator_rule} {item_rule})', min_items - 1 if min_items > 0 else 0, max_items - 1 if max_items is not None else None)
return f'({result})?' if min_items == 0 else result return f'({result})?' if min_items == 0 else result
def _generate_min_max_int(min_value: Optional[int], max_value: Optional[int], out: list, decimals_left: int = 16, top_level: bool = True):
has_min = min_value != None
has_max = max_value != None
def digit_range(from_char: str, to_char: str):
out.append("[")
if from_char == to_char:
out.append(from_char)
else:
out.append(from_char)
out.append("-")
out.append(to_char)
out.append("]")
def more_digits(min_digits: int, max_digits: int):
out.append("[0-9]")
if min_digits == max_digits and min_digits == 1:
return
out.append("{")
out.append(str(min_digits))
if max_digits != min_digits:
out.append(",")
if max_digits != sys.maxsize:
out.append(str(max_digits))
out.append("}")
def uniform_range(from_str: str, to_str: str):
i = 0
while i < len(from_str) and from_str[i] == to_str[i]:
i += 1
if i > 0:
out.append("\"")
out.append(from_str[:i])
out.append("\"")
if i < len(from_str):
if i > 0:
out.append(" ")
sub_len = len(from_str) - i - 1
if sub_len > 0:
from_sub = from_str[i+1:]
to_sub = to_str[i+1:]
sub_zeros = "0" * sub_len
sub_nines = "9" * sub_len
to_reached = False
out.append("(")
if from_sub == sub_zeros:
digit_range(from_str[i], chr(ord(to_str[i]) - 1))
out.append(" ")
more_digits(sub_len, sub_len)
else:
out.append("[")
out.append(from_str[i])
out.append("] ")
out.append("(")
uniform_range(from_sub, sub_nines)
out.append(")")
if ord(from_str[i]) < ord(to_str[i]) - 1:
out.append(" | ")
if to_sub == sub_nines:
digit_range(chr(ord(from_str[i]) + 1), to_str[i])
to_reached = True
else:
digit_range(chr(ord(from_str[i]) + 1), chr(ord(to_str[i]) - 1))
out.append(" ")
more_digits(sub_len, sub_len)
if not to_reached:
out.append(" | ")
digit_range(to_str[i], to_str[i])
out.append(" ")
uniform_range(sub_zeros, to_sub)
out.append(")")
else:
out.append("[")
out.append(from_str[i])
out.append("-")
out.append(to_str[i])
out.append("]")
if has_min and has_max:
if min_value < 0 and max_value < 0:
out.append("\"-\" (")
_generate_min_max_int(-max_value, -min_value, out, decimals_left, top_level=True)
out.append(")")
return
if min_value < 0:
out.append("\"-\" (")
_generate_min_max_int(0, -min_value, out, decimals_left, top_level=True)
out.append(") | ")
min_value = 0
min_s = str(min_value)
max_s = str(max_value)
min_digits = len(min_s)
max_digits = len(max_s)
for digits in range(min_digits, max_digits):
uniform_range(min_s, "9" * digits)
min_s = "1" + "0" * digits
out.append(" | ")
uniform_range(min_s, max_s)
return
less_decimals = max(decimals_left - 1, 1)
if has_min:
if min_value < 0:
out.append("\"-\" (")
_generate_min_max_int(None, -min_value, out, decimals_left, top_level=False)
out.append(") | [0] | [1-9] ")
more_digits(0, decimals_left - 1)
elif min_value == 0:
if top_level:
out.append("[0] | [1-9] ")
more_digits(0, less_decimals)
else:
more_digits(1, decimals_left)
elif min_value <= 9:
c = str(min_value)
range_start = '1' if top_level else '0'
if c > range_start:
digit_range(range_start, chr(ord(c) - 1))
out.append(" ")
more_digits(1, less_decimals)
out.append(" | ")
digit_range(c, "9")
out.append(" ")
more_digits(0, less_decimals)
else:
min_s = str(min_value)
length = len(min_s)
c = min_s[0]
if c > "1":
digit_range("1" if top_level else "0", chr(ord(c) - 1))
out.append(" ")
more_digits(length, less_decimals)
out.append(" | ")
digit_range(c, c)
out.append(" (")
_generate_min_max_int(int(min_s[1:]), None, out, less_decimals, top_level=False)
out.append(")")
if c < "9":
out.append(" | ")
digit_range(chr(ord(c) + 1), "9")
out.append(" ")
more_digits(length - 1, less_decimals)
return
if has_max:
if max_value >= 0:
if top_level:
out.append("\"-\" [1-9] ")
more_digits(0, less_decimals)
out.append(" | ")
_generate_min_max_int(0, max_value, out, decimals_left, top_level=True)
else:
out.append("\"-\" (")
_generate_min_max_int(-max_value, None, out, decimals_left, top_level=False)
out.append(")")
return
raise RuntimeError("At least one of min_value or max_value must be set")
class BuiltinRule: class BuiltinRule:
def __init__(self, content: str, deps: list = None): def __init__(self, content: str, deps: list = None):
@ -432,6 +596,24 @@ class SchemaConverter:
return self._add_rule(rule_name, r'"\"" ' + _build_repetition(char_rule, min_len, max_len) + r' "\"" space') return self._add_rule(rule_name, r'"\"" ' + _build_repetition(char_rule, min_len, max_len) + r' "\"" space')
elif schema_type in (None, 'integer') and \
('minimum' in schema or 'exclusiveMinimum' in schema or 'maximum' in schema or 'exclusiveMaximum' in schema):
min_value = None
max_value = None
if 'minimum' in schema:
min_value = schema['minimum']
elif 'exclusiveMinimum' in schema:
min_value = schema['exclusiveMinimum'] + 1
if 'maximum' in schema:
max_value = schema['maximum']
elif 'exclusiveMaximum' in schema:
max_value = schema['exclusiveMaximum'] - 1
out = ["("]
_generate_min_max_int(min_value, max_value, out)
out.append(") space")
return self._add_rule(rule_name, ''.join(out))
elif (schema_type == 'object') or (len(schema) == 0): elif (schema_type == 'object') or (len(schema) == 0):
return self._add_rule(rule_name, self._add_primitive('object', PRIMITIVE_RULES['object'])) return self._add_rule(rule_name, self._add_primitive('object', PRIMITIVE_RULES['object']))

View file

@ -39,12 +39,12 @@ static std::ostringstream * g_output_ss;
static std::vector<llama_token> * g_output_tokens; static std::vector<llama_token> * g_output_tokens;
static bool is_interacting = false; static bool is_interacting = false;
static bool file_exists(const std::string &path) { static bool file_exists(const std::string & path) {
std::ifstream f(path.c_str()); std::ifstream f(path.c_str());
return f.good(); return f.good();
} }
static bool file_is_empty(const std::string &path) { static bool file_is_empty(const std::string & path) {
std::ifstream f; std::ifstream f;
f.exceptions(std::ifstream::failbit | std::ifstream::badbit); f.exceptions(std::ifstream::failbit | std::ifstream::badbit);
f.open(path.c_str(), std::ios::in | std::ios::binary | std::ios::ate); f.open(path.c_str(), std::ios::in | std::ios::binary | std::ios::ate);
@ -117,6 +117,14 @@ static void llama_log_callback_logTee(ggml_log_level level, const char * text, v
LOG_TEE("%s", text); LOG_TEE("%s", text);
} }
static std::string chat_add_and_format(struct llama_model * model, std::vector<llama_chat_msg> & chat_msgs, std::string role, std::string content) {
llama_chat_msg new_msg{role, content};
auto formatted = llama_chat_format_single(
model, g_params->chat_template, chat_msgs, new_msg, role == "user");
chat_msgs.push_back({role, content});
return formatted;
}
int main(int argc, char ** argv) { int main(int argc, char ** argv) {
gpt_params params; gpt_params params;
g_params = &params; g_params = &params;
@ -190,6 +198,7 @@ int main(int argc, char ** argv) {
llama_model * model; llama_model * model;
llama_context * ctx; llama_context * ctx;
llama_context * ctx_guidance = NULL; llama_context * ctx_guidance = NULL;
std::vector<llama_chat_msg> chat_msgs;
g_model = &model; g_model = &model;
g_ctx = &ctx; g_ctx = &ctx;
@ -215,6 +224,8 @@ int main(int argc, char ** argv) {
__func__, n_ctx_train, n_ctx); __func__, n_ctx_train, n_ctx);
} }
LOG_TEE("%s: chat template example: %s\n", __func__, llama_chat_format_example(model, params.chat_template).c_str());
// print system information // print system information
{ {
LOG_TEE("\n"); LOG_TEE("\n");
@ -249,16 +260,21 @@ int main(int argc, char ** argv) {
std::vector<llama_token> embd_inp; std::vector<llama_token> embd_inp;
{
auto prompt = params.conversation
? chat_add_and_format(model, chat_msgs, "system", params.prompt) // format the system prompt in conversation mode
: params.prompt;
if (params.interactive_first || !params.prompt.empty() || session_tokens.empty()) { if (params.interactive_first || !params.prompt.empty() || session_tokens.empty()) {
LOG("tokenize the prompt\n"); LOG("tokenize the prompt\n");
embd_inp = ::llama_tokenize(ctx, params.prompt, true, true); embd_inp = ::llama_tokenize(ctx, prompt, true, true);
} else { } else {
LOG("use session tokens\n"); LOG("use session tokens\n");
embd_inp = session_tokens; embd_inp = session_tokens;
} }
LOG("prompt: \"%s\"\n", log_tostr(params.prompt)); LOG("prompt: \"%s\"\n", log_tostr(prompt));
LOG("tokens: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd_inp).c_str()); LOG("tokens: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd_inp).c_str());
}
// Should not run without any tokens // Should not run without any tokens
if (embd_inp.empty()) { if (embd_inp.empty()) {
@ -478,6 +494,7 @@ int main(int argc, char ** argv) {
std::vector<int> input_tokens; g_input_tokens = &input_tokens; std::vector<int> input_tokens; g_input_tokens = &input_tokens;
std::vector<int> output_tokens; g_output_tokens = &output_tokens; std::vector<int> output_tokens; g_output_tokens = &output_tokens;
std::ostringstream output_ss; g_output_ss = &output_ss; std::ostringstream output_ss; g_output_ss = &output_ss;
std::ostringstream assistant_ss; // for storing current assistant message, used in conversation mode
// the first thing we will do is to output the prompt, so set color accordingly // the first thing we will do is to output the prompt, so set color accordingly
console::set_display(console::prompt); console::set_display(console::prompt);
@ -793,11 +810,18 @@ int main(int argc, char ** argv) {
is_antiprompt = true; is_antiprompt = true;
} }
chat_add_and_format(model, chat_msgs, "system", assistant_ss.str());
is_interacting = true; is_interacting = true;
printf("\n"); printf("\n");
} }
} }
// if current token is not EOG, we add it to current assistant message
if (params.conversation) {
auto id = llama_sampling_last(ctx_sampling);
assistant_ss << llama_token_to_piece(ctx, id, false);
}
if (n_past > 0 && is_interacting) { if (n_past > 0 && is_interacting) {
LOG("waiting for user input\n"); LOG("waiting for user input\n");
@ -848,8 +872,12 @@ int main(int argc, char ** argv) {
string_process_escapes(buffer); string_process_escapes(buffer);
} }
std::string user_inp = params.conversation
? chat_add_and_format(model, chat_msgs, "user", std::move(buffer))
: std::move(buffer);
// TODO: one inconvenient of current chat template implementation is that we can't distinguish between user input and special tokens (prefix/postfix)
const auto line_pfx = ::llama_tokenize(ctx, params.input_prefix, false, true); const auto line_pfx = ::llama_tokenize(ctx, params.input_prefix, false, true);
const auto line_inp = ::llama_tokenize(ctx, buffer, false, false); const auto line_inp = ::llama_tokenize(ctx, user_inp, false, params.conversation);
const auto line_sfx = ::llama_tokenize(ctx, params.input_suffix, false, true); const auto line_sfx = ::llama_tokenize(ctx, params.input_suffix, false, true);
LOG("input tokens: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, line_inp).c_str()); LOG("input tokens: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, line_inp).c_str());
@ -864,6 +892,9 @@ int main(int argc, char ** argv) {
output_ss << llama_token_to_piece(ctx, token); output_ss << llama_token_to_piece(ctx, token);
} }
// reset assistant message
assistant_ss.str("");
n_remain -= line_inp.size(); n_remain -= line_inp.size();
LOG("n_remain: %d\n", n_remain); LOG("n_remain: %d\n", n_remain);
} else { } else {

View file

@ -24,6 +24,201 @@ function _buildRepetition(itemRule, minItems, maxItems, opts={}) {
return minItems === 0 ? `(${result})?` : result; return minItems === 0 ? `(${result})?` : result;
} }
function _generateMinMaxInt(minValue, maxValue, out, decimalsLeft = 16, topLevel = true) {
const hasMin = minValue !== null;
const hasMax = maxValue !== null;
function digitRange(fromChar, toChar) {
out.push("[");
if (fromChar === toChar) {
out.push(fromChar);
} else {
out.push(fromChar);
out.push("-");
out.push(toChar);
}
out.push("]");
}
function moreDigits(minDigits, maxDigits) {
out.push("[0-9]");
if (minDigits === maxDigits && minDigits === 1) {
return;
}
out.push("{");
out.push(minDigits.toString());
if (maxDigits !== minDigits) {
out.push(",");
if (maxDigits !== Number.MAX_SAFE_INTEGER) {
out.push(maxDigits.toString());
}
}
out.push("}");
}
function uniformRange(fromStr, toStr) {
let i = 0;
while (i < fromStr.length && fromStr[i] === toStr[i]) {
i++;
}
if (i > 0) {
out.push("\"");
out.push(fromStr.slice(0, i));
out.push("\"");
}
if (i < fromStr.length) {
if (i > 0) {
out.push(" ");
}
const subLen = fromStr.length - i - 1;
if (subLen > 0) {
const fromSub = fromStr.slice(i + 1);
const toSub = toStr.slice(i + 1);
const subZeros = "0".repeat(subLen);
const subNines = "9".repeat(subLen);
let toReached = false;
out.push("(");
if (fromSub === subZeros) {
digitRange(fromStr[i], String.fromCharCode(toStr.charCodeAt(i) - 1));
out.push(" ");
moreDigits(subLen, subLen);
} else {
out.push("[");
out.push(fromStr[i]);
out.push("] ");
out.push("(");
uniformRange(fromSub, subNines);
out.push(")");
if (fromStr.charCodeAt(i) < toStr.charCodeAt(i) - 1) {
out.push(" | ");
if (toSub === subNines) {
digitRange(String.fromCharCode(fromStr.charCodeAt(i) + 1), toStr[i]);
toReached = true;
} else {
digitRange(String.fromCharCode(fromStr.charCodeAt(i) + 1), String.fromCharCode(toStr.charCodeAt(i) - 1));
}
out.push(" ");
moreDigits(subLen, subLen);
}
}
if (!toReached) {
out.push(" | ");
digitRange(toStr[i], toStr[i]);
out.push(" ");
uniformRange(subZeros, toSub);
}
out.push(")");
} else {
out.push("[");
out.push(fromStr[i]);
out.push("-");
out.push(toStr[i]);
out.push("]");
}
}
}
if (hasMin && hasMax) {
if (minValue < 0 && maxValue < 0) {
out.push("\"-\" (");
_generateMinMaxInt(-maxValue, -minValue, out, decimalsLeft, true);
out.push(")");
return;
}
if (minValue < 0) {
out.push("\"-\" (");
_generateMinMaxInt(0, -minValue, out, decimalsLeft, true);
out.push(") | ");
minValue = 0;
}
let minS = minValue.toString();
const maxS = maxValue.toString();
const minDigits = minS.length;
const maxDigits = maxS.length;
for (let digits = minDigits; digits < maxDigits; digits++) {
uniformRange(minS, "9".repeat(digits));
minS = "1" + "0".repeat(digits);
out.push(" | ");
}
uniformRange(minS, maxS);
return;
}
const lessDecimals = Math.max(decimalsLeft - 1, 1);
if (hasMin) {
if (minValue < 0) {
out.push("\"-\" (");
_generateMinMaxInt(null, -minValue, out, decimalsLeft, false);
out.push(") | [0] | [1-9] ");
moreDigits(0, decimalsLeft - 1);
} else if (minValue === 0) {
if (topLevel) {
out.push("[0] | [1-9] ");
moreDigits(0, lessDecimals);
} else {
moreDigits(1, decimalsLeft);
}
} else if (minValue <= 9) {
const c = minValue.toString();
const range_start = topLevel ? '1' : '0';
if (c > range_start) {
digitRange(range_start, String.fromCharCode(c.charCodeAt(0) - 1));
out.push(" ");
moreDigits(1, lessDecimals);
out.push(" | ");
}
digitRange(c, "9");
out.push(" ");
moreDigits(0, lessDecimals);
} else {
const minS = minValue.toString();
const length = minS.length;
const c = minS[0];
if (c > "1") {
digitRange(topLevel ? "1" : "0", String.fromCharCode(c.charCodeAt(0) - 1));
out.push(" ");
moreDigits(length, lessDecimals);
out.push(" | ");
}
digitRange(c, c);
out.push(" (");
_generateMinMaxInt(parseInt(minS.slice(1)), null, out, lessDecimals, false);
out.push(")");
if (c < "9") {
out.push(" | ");
digitRange(String.fromCharCode(c.charCodeAt(0) + 1), "9");
out.push(" ");
moreDigits(length - 1, lessDecimals);
}
}
return;
}
if (hasMax) {
if (maxValue >= 0) {
if (topLevel) {
out.push("\"-\" [1-9] ");
moreDigits(0, lessDecimals);
out.push(" | ");
}
_generateMinMaxInt(0, maxValue, out, decimalsLeft, true);
} else {
out.push("\"-\" (");
_generateMinMaxInt(-maxValue, null, out, decimalsLeft, false);
out.push(")");
}
return;
}
throw new Error("At least one of minValue or maxValue must be set");
}
class BuiltinRule { class BuiltinRule {
constructor(content, deps) { constructor(content, deps) {
this.content = content; this.content = content;
@ -435,6 +630,24 @@ export class SchemaConverter {
const minLen = schema.minLength || 0; const minLen = schema.minLength || 0;
const maxLen = schema.maxLength; const maxLen = schema.maxLength;
return this._addRule(ruleName, '"\\\"" ' + _buildRepetition(charRuleName, minLen, maxLen) + ' "\\\"" space'); return this._addRule(ruleName, '"\\\"" ' + _buildRepetition(charRuleName, minLen, maxLen) + ' "\\\"" space');
} else if (schemaType === 'integer' && ('minimum' in schema || 'exclusiveMinimum' in schema || 'maximum' in schema || 'exclusiveMaximum' in schema)) {
let minValue = null;
let maxValue = null;
if ('minimum' in schema) {
minValue = schema.minimum;
} else if ('exclusiveMinimum' in schema) {
minValue = schema.exclusiveMinimum + 1;
}
if ('maximum' in schema) {
maxValue = schema.maximum;
} else if ('exclusiveMaximum' in schema) {
maxValue = schema.exclusiveMaximum - 1;
}
const out = ["("];
_generateMinMaxInt(minValue, maxValue, out);
out.push(") space");
return this._addRule(ruleName, out.join(''));
} else if ((schemaType === 'object') || (Object.keys(schema).length === 0)) { } else if ((schemaType === 'object') || (Object.keys(schema).length === 0)) {
return this._addRule(ruleName, this._addPrimitive('object', PRIMITIVE_RULES['object'])); return this._addRule(ruleName, this._addPrimitive('object', PRIMITIVE_RULES['object']));
} else { } else {

View file

@ -3,6 +3,13 @@
by Humans for All. by Humans for All.
## quickstart
To run from the build dir
bin/llama-server -m path/model.gguf --path ../examples/server/public_simplechat
Continue reading for the details.
## overview ## overview
@ -14,6 +21,8 @@ own system prompts.
This allows seeing the generated text / ai-model response in oneshot at the end, after it is fully generated, This allows seeing the generated text / ai-model response in oneshot at the end, after it is fully generated,
or potentially as it is being generated, in a streamed manner from the server/ai-model. or potentially as it is being generated, in a streamed manner from the server/ai-model.
![Chat and Settings screens](./simplechat_screens.webp "Chat and Settings screens")
Auto saves the chat session locally as and when the chat is progressing and inturn at a later time when you Auto saves the chat session locally as and when the chat is progressing and inturn at a later time when you
open SimpleChat, option is provided to restore the old chat session, if a matching one exists. open SimpleChat, option is provided to restore the old chat session, if a matching one exists.
@ -170,17 +179,23 @@ It is attached to the document object. Some of these can also be updated using t
The histogram/freq based trimming logic is currently tuned for english language wrt its The histogram/freq based trimming logic is currently tuned for english language wrt its
is-it-a-alpabetic|numeral-char regex match logic. is-it-a-alpabetic|numeral-char regex match logic.
chatRequestOptions - maintains the list of options/fields to send along with chat request, apiRequestOptions - maintains the list of options/fields to send along with api request,
irrespective of whether /chat/completions or /completions endpoint. irrespective of whether /chat/completions or /completions endpoint.
If you want to add additional options/fields to send to the server/ai-model, and or If you want to add additional options/fields to send to the server/ai-model, and or
modify the existing options value or remove them, for now you can update this global var modify the existing options value or remove them, for now you can update this global var
using browser's development-tools/console. using browser's development-tools/console.
For string and numeric fields in chatRequestOptions, including even those added by a user For string, numeric and boolean fields in apiRequestOptions, including even those added by a
at runtime by directly modifying gMe.chatRequestOptions, setting ui entries will be auto user at runtime by directly modifying gMe.apiRequestOptions, setting ui entries will be auto
created. created.
cache_prompt option supported by example/server is allowed to be controlled by user, so that
any caching supported wrt system-prompt and chat history, if usable can get used. When chat
history sliding window is enabled, cache_prompt logic may or may not kick in at the backend
wrt same, based on aspects related to model, positional encoding, attention mechanism etal.
However system prompt should ideally get the benefit of caching.
headers - maintains the list of http headers sent when request is made to the server. By default headers - maintains the list of http headers sent when request is made to the server. By default
Content-Type is set to application/json. Additionally Authorization entry is provided, which can Content-Type is set to application/json. Additionally Authorization entry is provided, which can
be set if needed using the settings ui. be set if needed using the settings ui.
@ -197,10 +212,10 @@ It is attached to the document object. Some of these can also be updated using t
>0 : Send the latest chat history from the latest system prompt, limited to specified cnt. >0 : Send the latest chat history from the latest system prompt, limited to specified cnt.
By using gMe's iRecentUserMsgCnt and chatRequestOptions.max_tokens one can try to control the By using gMe's iRecentUserMsgCnt and apiRequestOptions.max_tokens/n_predict one can try to control
implications of loading of the ai-model's context window by chat history, wrt chat response to the implications of loading of the ai-model's context window by chat history, wrt chat response to
some extent in a simple crude way. You may also want to control the context size enabled when some extent in a simple crude way. You may also want to control the context size enabled when the
the server loads ai-model, on the server end. server loads ai-model, on the server end.
Sometimes the browser may be stuborn with caching of the file, so your updates to html/css/js Sometimes the browser may be stuborn with caching of the file, so your updates to html/css/js
@ -237,12 +252,12 @@ also be started with a model context size of 1k or more, to be on safe side.
internal n_predict, for now add the same here on the client side, maybe later add max_tokens internal n_predict, for now add the same here on the client side, maybe later add max_tokens
to /completions endpoint handling code on server side. to /completions endpoint handling code on server side.
NOTE: One may want to experiment with frequency/presence penalty fields in chatRequestOptions NOTE: One may want to experiment with frequency/presence penalty fields in apiRequestOptions
wrt the set of fields sent to server along with the user query. To check how the model behaves wrt the set of fields sent to server along with the user query, to check how the model behaves
wrt repeatations in general in the generated text response. wrt repeatations in general in the generated text response.
A end-user can change these behaviour by editing gMe from browser's devel-tool/console or by A end-user can change these behaviour by editing gMe from browser's devel-tool/console or by
using the providing settings ui. using the provided settings ui (for settings exposed through the ui).
### OpenAi / Equivalent API WebService ### OpenAi / Equivalent API WebService
@ -253,7 +268,7 @@ for a minimal chatting experimentation by setting the below.
* the baseUrl in settings ui * the baseUrl in settings ui
* https://api.openai.com/v1 or similar * https://api.openai.com/v1 or similar
* Wrt request body - gMe.chatRequestOptions * Wrt request body - gMe.apiRequestOptions
* model (settings ui) * model (settings ui)
* any additional fields if required in future * any additional fields if required in future

View file

@ -222,8 +222,8 @@ class SimpleChat {
* @param {Object} obj * @param {Object} obj
*/ */
request_jsonstr_extend(obj) { request_jsonstr_extend(obj) {
for(let k in gMe.chatRequestOptions) { for(let k in gMe.apiRequestOptions) {
obj[k] = gMe.chatRequestOptions[k]; obj[k] = gMe.apiRequestOptions[k];
} }
if (gMe.bStream) { if (gMe.bStream) {
obj["stream"] = true; obj["stream"] = true;
@ -740,11 +740,12 @@ class Me {
"Authorization": "", // Authorization: Bearer OPENAI_API_KEY "Authorization": "", // Authorization: Bearer OPENAI_API_KEY
} }
// Add needed fields wrt json object to be sent wrt LLM web services completions endpoint. // Add needed fields wrt json object to be sent wrt LLM web services completions endpoint.
this.chatRequestOptions = { this.apiRequestOptions = {
"model": "gpt-3.5-turbo", "model": "gpt-3.5-turbo",
"temperature": 0.7, "temperature": 0.7,
"max_tokens": 1024, "max_tokens": 1024,
"n_predict": 1024, "n_predict": 1024,
"cache_prompt": false,
//"frequency_penalty": 1.2, //"frequency_penalty": 1.2,
//"presence_penalty": 1.2, //"presence_penalty": 1.2,
}; };
@ -800,51 +801,55 @@ class Me {
ui.el_create_append_p(`bStream:${this.bStream}`, elDiv); ui.el_create_append_p(`bStream:${this.bStream}`, elDiv);
ui.el_create_append_p(`bTrimGarbage:${this.bTrimGarbage}`, elDiv);
ui.el_create_append_p(`ApiEndPoint:${this.apiEP}`, elDiv);
ui.el_create_append_p(`iRecentUserMsgCnt:${this.iRecentUserMsgCnt}`, elDiv);
ui.el_create_append_p(`bCompletionFreshChatAlways:${this.bCompletionFreshChatAlways}`, elDiv); ui.el_create_append_p(`bCompletionFreshChatAlways:${this.bCompletionFreshChatAlways}`, elDiv);
ui.el_create_append_p(`bCompletionInsertStandardRolePrefix:${this.bCompletionInsertStandardRolePrefix}`, elDiv); ui.el_create_append_p(`bCompletionInsertStandardRolePrefix:${this.bCompletionInsertStandardRolePrefix}`, elDiv);
ui.el_create_append_p(`bTrimGarbage:${this.bTrimGarbage}`, elDiv);
ui.el_create_append_p(`iRecentUserMsgCnt:${this.iRecentUserMsgCnt}`, elDiv);
ui.el_create_append_p(`ApiEndPoint:${this.apiEP}`, elDiv);
} }
ui.el_create_append_p(`chatRequestOptions:${JSON.stringify(this.chatRequestOptions, null, " - ")}`, elDiv); ui.el_create_append_p(`apiRequestOptions:${JSON.stringify(this.apiRequestOptions, null, " - ")}`, elDiv);
ui.el_create_append_p(`headers:${JSON.stringify(this.headers, null, " - ")}`, elDiv); ui.el_create_append_p(`headers:${JSON.stringify(this.headers, null, " - ")}`, elDiv);
} }
/** /**
* Auto create ui input elements for fields in ChatRequestOptions * Auto create ui input elements for fields in apiRequestOptions
* Currently supports text and number field types. * Currently supports text and number field types.
* @param {HTMLDivElement} elDiv * @param {HTMLDivElement} elDiv
*/ */
show_settings_chatrequestoptions(elDiv) { show_settings_apirequestoptions(elDiv) {
let typeDict = { let typeDict = {
"string": "text", "string": "text",
"number": "number", "number": "number",
}; };
let fs = document.createElement("fieldset"); let fs = document.createElement("fieldset");
let legend = document.createElement("legend"); let legend = document.createElement("legend");
legend.innerText = "ChatRequestOptions"; legend.innerText = "ApiRequestOptions";
fs.appendChild(legend); fs.appendChild(legend);
elDiv.appendChild(fs); elDiv.appendChild(fs);
for(const k in this.chatRequestOptions) { for(const k in this.apiRequestOptions) {
let val = this.chatRequestOptions[k]; let val = this.apiRequestOptions[k];
let type = typeof(val); let type = typeof(val);
if (!((type == "string") || (type == "number"))) { if (((type == "string") || (type == "number"))) {
continue; let inp = ui.el_creatediv_input(`Set${k}`, k, typeDict[type], this.apiRequestOptions[k], (val)=>{
}
let inp = ui.el_creatediv_input(`Set${k}`, k, typeDict[type], this.chatRequestOptions[k], (val)=>{
if (type == "number") { if (type == "number") {
val = Number(val); val = Number(val);
} }
this.chatRequestOptions[k] = val; this.apiRequestOptions[k] = val;
}); });
fs.appendChild(inp.div); fs.appendChild(inp.div);
} else if (type == "boolean") {
let bbtn = ui.el_creatediv_boolbutton(`Set{k}`, k, {true: "true", false: "false"}, val, (userVal)=>{
this.apiRequestOptions[k] = userVal;
});
fs.appendChild(bbtn.div);
}
} }
} }
@ -870,6 +875,23 @@ class Me {
}); });
elDiv.appendChild(bb.div); elDiv.appendChild(bb.div);
bb = ui.el_creatediv_boolbutton("SetTrimGarbage", "TrimGarbage", {true: "[+] yes trim", false: "[-] dont trim"}, this.bTrimGarbage, (val)=>{
this.bTrimGarbage = val;
});
elDiv.appendChild(bb.div);
this.show_settings_apirequestoptions(elDiv);
let sel = ui.el_creatediv_select("SetApiEP", "ApiEndPoint", ApiEP.Type, this.apiEP, (val)=>{
this.apiEP = ApiEP.Type[val];
});
elDiv.appendChild(sel.div);
sel = ui.el_creatediv_select("SetChatHistoryInCtxt", "ChatHistoryInCtxt", this.sRecentUserMsgCnt, this.iRecentUserMsgCnt, (val)=>{
this.iRecentUserMsgCnt = this.sRecentUserMsgCnt[val];
});
elDiv.appendChild(sel.div);
bb = ui.el_creatediv_boolbutton("SetCompletionFreshChatAlways", "CompletionFreshChatAlways", {true: "[+] yes fresh", false: "[-] no, with history"}, this.bCompletionFreshChatAlways, (val)=>{ bb = ui.el_creatediv_boolbutton("SetCompletionFreshChatAlways", "CompletionFreshChatAlways", {true: "[+] yes fresh", false: "[-] no, with history"}, this.bCompletionFreshChatAlways, (val)=>{
this.bCompletionFreshChatAlways = val; this.bCompletionFreshChatAlways = val;
}); });
@ -880,23 +902,6 @@ class Me {
}); });
elDiv.appendChild(bb.div); elDiv.appendChild(bb.div);
bb = ui.el_creatediv_boolbutton("SetTrimGarbage", "TrimGarbage", {true: "[+] yes trim", false: "[-] dont trim"}, this.bTrimGarbage, (val)=>{
this.bTrimGarbage = val;
});
elDiv.appendChild(bb.div);
let sel = ui.el_creatediv_select("SetChatHistoryInCtxt", "ChatHistoryInCtxt", this.sRecentUserMsgCnt, this.iRecentUserMsgCnt, (val)=>{
this.iRecentUserMsgCnt = this.sRecentUserMsgCnt[val];
});
elDiv.appendChild(sel.div);
sel = ui.el_creatediv_select("SetApiEP", "ApiEndPoint", ApiEP.Type, this.apiEP, (val)=>{
this.apiEP = ApiEP.Type[val];
});
elDiv.appendChild(sel.div);
this.show_settings_chatrequestoptions(elDiv);
} }
} }

Binary file not shown.

After

Width:  |  Height:  |  Size: 21 KiB

View file

@ -2606,16 +2606,8 @@ int main(int argc, char ** argv) {
// print sample chat example to make it clear which template is used // print sample chat example to make it clear which template is used
{ {
json chat;
chat.push_back({{"role", "system"}, {"content", "You are a helpful assistant"}});
chat.push_back({{"role", "user"}, {"content", "Hello"}});
chat.push_back({{"role", "assistant"}, {"content", "Hi there"}});
chat.push_back({{"role", "user"}, {"content", "How are you?"}});
const std::string chat_example = format_chat(ctx_server.model, params.chat_template, chat);
LOG_INFO("chat template", { LOG_INFO("chat template", {
{"chat_example", chat_example}, {"chat_example", llama_chat_format_example(ctx_server.model, params.chat_template)},
{"built_in", params.chat_template.empty()}, {"built_in", params.chat_template.empty()},
}); });
} }

View file

@ -118,36 +118,17 @@ static inline void server_log(const char * level, const char * function, int lin
// Format given chat. If tmpl is empty, we take the template from model metadata // Format given chat. If tmpl is empty, we take the template from model metadata
inline std::string format_chat(const struct llama_model * model, const std::string & tmpl, const std::vector<json> & messages) { inline std::string format_chat(const struct llama_model * model, const std::string & tmpl, const std::vector<json> & messages) {
size_t alloc_size = 0; std::vector<llama_chat_msg> chat;
// vector holding all allocated string to be passed to llama_chat_apply_template
std::vector<std::string> str(messages.size() * 2);
std::vector<llama_chat_message> chat(messages.size());
for (size_t i = 0; i < messages.size(); ++i) { for (size_t i = 0; i < messages.size(); ++i) {
const auto & curr_msg = messages[i]; const auto & curr_msg = messages[i];
str[i*2 + 0] = json_value(curr_msg, "role", std::string("")); std::string role = json_value(curr_msg, "role", std::string(""));
str[i*2 + 1] = json_value(curr_msg, "content", std::string("")); std::string content = json_value(curr_msg, "content", std::string(""));
alloc_size += str[i*2 + 1].length(); chat.push_back({role, content});
chat[i].role = str[i*2 + 0].c_str();
chat[i].content = str[i*2 + 1].c_str();
} }
const char * ptr_tmpl = tmpl.empty() ? nullptr : tmpl.c_str(); auto formatted_chat = llama_chat_apply_template(model, tmpl, chat, true);
std::vector<char> buf(alloc_size * 2);
// run the first time to get the total output length
int32_t res = llama_chat_apply_template(model, ptr_tmpl, chat.data(), chat.size(), true, buf.data(), buf.size());
// if it turns out that our buffer is too small, we resize it
if ((size_t) res > buf.size()) {
buf.resize(res);
res = llama_chat_apply_template(model, ptr_tmpl, chat.data(), chat.size(), true, buf.data(), buf.size());
}
const std::string formatted_chat(buf.data(), res);
LOG_VERBOSE("formatted_chat", {{"text", formatted_chat.c_str()}}); LOG_VERBOSE("formatted_chat", {{"text", formatted_chat.c_str()}});
return formatted_chat; return formatted_chat;
} }

View file

@ -152,16 +152,16 @@ static ggml_cuda_device_info ggml_cuda_init() {
GGML_ASSERT(info.device_count <= GGML_CUDA_MAX_DEVICES); GGML_ASSERT(info.device_count <= GGML_CUDA_MAX_DEVICES);
int64_t total_vram = 0; int64_t total_vram = 0;
#if defined(GGML_CUDA_FORCE_MMQ) #ifdef GGML_CUDA_FORCE_MMQ
GGML_CUDA_LOG_INFO("%s: GGML_CUDA_FORCE_MMQ: yes\n", __func__); GGML_CUDA_LOG_INFO("%s: GGML_CUDA_FORCE_MMQ: yes\n", __func__);
#else #else
GGML_CUDA_LOG_INFO("%s: GGML_CUDA_FORCE_MMQ: no\n", __func__); GGML_CUDA_LOG_INFO("%s: GGML_CUDA_FORCE_MMQ: no\n", __func__);
#endif #endif // GGML_CUDA_FORCE_MMQ
#if defined(CUDA_USE_TENSOR_CORES) #ifdef GGML_CUDA_FORCE_CUBLAS
GGML_CUDA_LOG_INFO("%s: CUDA_USE_TENSOR_CORES: yes\n", __func__); GGML_CUDA_LOG_INFO("%s: GGML_CUDA_FORCE_CUBLAS: yes\n", __func__);
#else #else
GGML_CUDA_LOG_INFO("%s: CUDA_USE_TENSOR_CORES: no\n", __func__); GGML_CUDA_LOG_INFO("%s: GGML_CUDA_FORCE_CUBLAS: no\n", __func__);
#endif #endif // GGML_CUDA_FORCE_CUBLAS
GGML_CUDA_LOG_INFO("%s: found %d " GGML_CUDA_NAME " devices:\n", __func__, info.device_count); GGML_CUDA_LOG_INFO("%s: found %d " GGML_CUDA_NAME " devices:\n", __func__, info.device_count);
for (int id = 0; id < info.device_count; ++id) { for (int id = 0; id < info.device_count; ++id) {
int device_vmm = 0; int device_vmm = 0;
@ -1873,9 +1873,17 @@ static void ggml_cuda_mul_mat_batched_cublas(ggml_backend_cuda_context & ctx, co
static void ggml_cuda_mul_mat(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { static void ggml_cuda_mul_mat(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
const bool split = ggml_backend_buffer_is_cuda_split(src0->buffer); const bool split = ggml_backend_buffer_is_cuda_split(src0->buffer);
int64_t min_compute_capability = INT_MAX; bool use_dequantize_mul_mat_vec = (ggml_is_quantized(src0->type) || src0->type == GGML_TYPE_F16)
&& src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32
&& src0->ne[0] % GGML_CUDA_DMMV_X == 0 && src1->ne[1] == 1;
bool use_mul_mat_vec_q = ggml_is_quantized(src0->type)
&& src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32
&& src1->ne[1] <= MMVQ_MAX_BATCH_SIZE;
bool use_mul_mat_q = ggml_is_quantized(src0->type)
&& src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32;
bool any_gpus_with_slow_fp16 = false;
bool any_pascal_with_slow_fp16 = false;
if (split) { if (split) {
ggml_backend_cuda_split_buffer_type_context * buft_ctx = (ggml_backend_cuda_split_buffer_type_context *) src0->buffer->buft->context; ggml_backend_cuda_split_buffer_type_context * buft_ctx = (ggml_backend_cuda_split_buffer_type_context *) src0->buffer->buft->context;
auto & tensor_split = buft_ctx->tensor_split; auto & tensor_split = buft_ctx->tensor_split;
@ -1885,55 +1893,18 @@ static void ggml_cuda_mul_mat(ggml_backend_cuda_context & ctx, const ggml_tensor
continue; continue;
} }
if (min_compute_capability > ggml_cuda_info().devices[id].cc) { const int cc = ggml_cuda_info().devices[id].cc;
min_compute_capability = ggml_cuda_info().devices[id].cc; use_mul_mat_vec_q = use_mul_mat_vec_q && cc >= MIN_CC_DP4A;
} use_mul_mat_q = use_mul_mat_q && ggml_cuda_should_use_mmq(src0->type, cc, src1->ne[1]);
if (ggml_cuda_info().devices[id].cc == 610) { any_gpus_with_slow_fp16 = any_gpus_with_slow_fp16 || !fast_fp16_available(cc);
any_pascal_with_slow_fp16 = true;
}
} }
} else { } else {
min_compute_capability = ggml_cuda_info().devices[ctx.device].cc; const int cc = ggml_cuda_info().devices[ctx.device].cc;
any_pascal_with_slow_fp16 = ggml_cuda_info().devices[ctx.device].cc == 610; use_mul_mat_vec_q = use_mul_mat_vec_q && cc >= MIN_CC_DP4A;
use_mul_mat_q = use_mul_mat_q && ggml_cuda_should_use_mmq(src0->type, cc, src1->ne[1]);
any_gpus_with_slow_fp16 = any_gpus_with_slow_fp16 || !fast_fp16_available(cc);
} }
// check data types and tensor shapes for custom matrix multiplication kernels:
bool use_dequantize_mul_mat_vec = (ggml_is_quantized(src0->type) || src0->type == GGML_TYPE_F16)
&& src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32
&& src0->ne[0] % GGML_CUDA_DMMV_X == 0 && src1->ne[1] == 1;
bool use_mul_mat_vec_q = ggml_is_quantized(src0->type)
&& src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32
&& src1->ne[1] <= MMVQ_MAX_BATCH_SIZE;
bool use_mul_mat_q = ggml_cuda_supports_mmq(src0->type)
&& src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32;
#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
const bool fp16_performance_good = min_compute_capability >= CC_RDNA1;
#ifdef CUDA_USE_TENSOR_CORES
use_mul_mat_q = use_mul_mat_q && min_compute_capability < CC_RDNA3;
#endif // CUDA_USE_TENSOR_CORES
#else
// fp16 performance is good on Volta or newer and on P100 (compute capability 6.0)
const bool fp16_performance_good = min_compute_capability >= CC_PASCAL && !any_pascal_with_slow_fp16;
// mmvq and mmq need the __dp4a instruction which on NVIDIA is only available for CC >= 6.1
use_mul_mat_vec_q = use_mul_mat_vec_q && min_compute_capability >= MIN_CC_DP4A;
use_mul_mat_q = use_mul_mat_q && min_compute_capability >= MIN_CC_DP4A;
#ifdef CUDA_USE_TENSOR_CORES
// when tensor cores are available, use them for large batch size
// ref: https://github.com/ggerganov/llama.cpp/pull/3776
use_mul_mat_q = use_mul_mat_q && (!fp16_performance_good || src1->ne[1] <= MMQ_MAX_BATCH_SIZE);
#endif // CUDA_USE_TENSOR_CORES
#endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
// if mmvq is available it's a better choice than dmmv: // if mmvq is available it's a better choice than dmmv:
#ifndef GGML_CUDA_FORCE_DMMV #ifndef GGML_CUDA_FORCE_DMMV
use_dequantize_mul_mat_vec = use_dequantize_mul_mat_vec && !use_mul_mat_vec_q; use_dequantize_mul_mat_vec = use_dequantize_mul_mat_vec && !use_mul_mat_vec_q;
@ -1947,14 +1918,15 @@ static void ggml_cuda_mul_mat(ggml_backend_cuda_context & ctx, const ggml_tensor
//printf("src0 is contiguous %d, transposed %d, type = %s, name = %s\n", ggml_is_contiguous(src0), ggml_is_transposed(src0), ggml_type_name(src0->type), src0->name); //printf("src0 is contiguous %d, transposed %d, type = %s, name = %s\n", ggml_is_contiguous(src0), ggml_is_transposed(src0), ggml_type_name(src0->type), src0->name);
//printf("src1 is contiguous %d, transposed %d, type = %s, name = %s\n", ggml_is_contiguous(src1), ggml_is_transposed(src1), ggml_type_name(src1->type), src1->name); //printf("src1 is contiguous %d, transposed %d, type = %s, name = %s\n", ggml_is_contiguous(src1), ggml_is_transposed(src1), ggml_type_name(src1->type), src1->name);
if (!split && !fp16_performance_good && src0->type == GGML_TYPE_F16 && ggml_is_permuted(src0) && ggml_is_permuted(src1) && src1->ne[1] == 1) { if (!split && any_gpus_with_slow_fp16 && src0->type == GGML_TYPE_F16 && ggml_is_permuted(src0) && ggml_is_permuted(src1) && src1->ne[1] == 1) {
// KQ single-batch // FP32 precision KQ single-batch for batch size 1 without FlashAttention
ggml_cuda_mul_mat_vec_p021(ctx, src0, src1, dst); ggml_cuda_mul_mat_vec_p021(ctx, src0, src1, dst);
} else if (!split && !fp16_performance_good && src0->type == GGML_TYPE_F16 && !ggml_is_contiguous(src0) && !ggml_is_transposed(src1) && src1->ne[1] == 1) { } else if (!split && any_gpus_with_slow_fp16 && src0->type == GGML_TYPE_F16 && !ggml_is_contiguous(src0) && !ggml_is_transposed(src1) && src1->ne[1] == 1) {
// KQV single-batch // FP32 precision KQV single-batch for batch size 1 without FlashAttention
ggml_cuda_mul_mat_vec_nc(ctx, src0, src1, dst); ggml_cuda_mul_mat_vec_nc(ctx, src0, src1, dst);
} else if (!split && src0->type == GGML_TYPE_F16 && (src1->type == GGML_TYPE_F16 || fp16_performance_good) && !ggml_is_transposed(src0) && !ggml_is_transposed(src1) && src1->ne[2]*src1->ne[3] > 1) { } else if (!split && src0->type == GGML_TYPE_F16 && (src1->type == GGML_TYPE_F16 || !any_gpus_with_slow_fp16)
// KQ + KQV multi-batch && !ggml_is_transposed(src0) && !ggml_is_transposed(src1) && src1->ne[2]*src1->ne[3] > 1) {
// KQ + KQV multi-batch without FlashAttention
ggml_cuda_mul_mat_batched_cublas(ctx, src0, src1, dst); ggml_cuda_mul_mat_batched_cublas(ctx, src0, src1, dst);
} else if (use_dequantize_mul_mat_vec) { } else if (use_dequantize_mul_mat_vec) {
ggml_cuda_op_mul_mat(ctx, src0, src1, dst, ggml_cuda_op_dequantize_mul_mat_vec, nullptr); ggml_cuda_op_mul_mat(ctx, src0, src1, dst, ggml_cuda_op_dequantize_mul_mat_vec, nullptr);

View file

@ -146,23 +146,6 @@
#define CC_RDNA2 (CC_OFFSET_AMD + 1030) #define CC_RDNA2 (CC_OFFSET_AMD + 1030)
#define CC_RDNA3 (CC_OFFSET_AMD + 1100) #define CC_RDNA3 (CC_OFFSET_AMD + 1100)
// define this if you want to always fallback to MMQ kernels and not use cuBLAS for matrix multiplication
// on modern hardware, using cuBLAS is recommended as it utilizes F16 tensor cores which are very performant
// for large computational tasks. the drawback is that this requires some extra amount of VRAM:
// - 7B quantum model: +100-200 MB
// - 13B quantum model: +200-400 MB
//
//#define GGML_CUDA_FORCE_MMQ
// TODO: improve this to be correct for more hardware
// for example, currently fails for GeForce GTX 1660 which is TURING arch (> VOLTA) but does not have tensor cores
#if !defined(GGML_CUDA_FORCE_MMQ)
#define CUDA_USE_TENSOR_CORES
#endif
#define MMVQ_MAX_BATCH_SIZE 8 // max batch size to use MMVQ kernels
#define MMQ_MAX_BATCH_SIZE 64 // max batch size to use MMQ kernels when tensor cores are available
#define MATRIX_ROW_PADDING 512 // last row of quant. matrices is a multiple of this to avoid out-of-bounds memory accesses #define MATRIX_ROW_PADDING 512 // last row of quant. matrices is a multiple of this to avoid out-of-bounds memory accesses
#if defined(_MSC_VER) #if defined(_MSC_VER)
@ -343,15 +326,15 @@ static __device__ __forceinline__ half2 __shfl_xor(half2 var, int laneMask, int
#define INT8_MMA_AVAILABLE #define INT8_MMA_AVAILABLE
#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_TURING #endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_TURING
static bool fast_fp16_available(const int cc) { static constexpr bool fast_fp16_available(const int cc) {
return cc >= CC_PASCAL && cc != 610; return cc >= CC_PASCAL && cc != 610;
} }
static bool fp16_mma_available(const int cc) { static constexpr bool fp16_mma_available(const int cc) {
return cc < CC_OFFSET_AMD && cc >= CC_VOLTA; return cc < CC_OFFSET_AMD && cc >= CC_VOLTA;
} }
static bool int8_mma_available(const int cc) { static constexpr bool int8_mma_available(const int cc) {
return cc < CC_OFFSET_AMD && cc >= CC_TURING; return cc < CC_OFFSET_AMD && cc >= CC_TURING;
} }
@ -643,19 +626,6 @@ struct ggml_cuda_type_traits<GGML_TYPE_IQ3_S> {
static constexpr int qi = QI3_S; static constexpr int qi = QI3_S;
}; };
static constexpr int get_mmq_x_max_host(int cc) {
#ifdef CUDA_USE_TENSOR_CORES
return cc >= CC_VOLTA && cc < CC_OFFSET_AMD ? MMQ_MAX_BATCH_SIZE : 64;
#else
return cc >= CC_VOLTA && cc < CC_OFFSET_AMD ? 128 : 64;
#endif // CUDA_USE_TENSOR_CORES
}
// Round rows to this value for --split-mode row:
static constexpr int get_mmq_y_host(int cc) {
return cc >= CC_VOLTA ? 128 : 64;
}
////////////////////// //////////////////////
struct ggml_cuda_device_info { struct ggml_cuda_device_info {

View file

@ -69,7 +69,13 @@ void ggml_cuda_op_mul_mat_q(
GGML_UNUSED(src1_ddf_i); GGML_UNUSED(src1_ddf_i);
} }
bool ggml_cuda_supports_mmq(enum ggml_type type) { bool ggml_cuda_should_use_mmq(enum ggml_type type, int cc, int64_t ne11) {
#ifdef GGML_CUDA_FORCE_CUBLAS
return false;
#endif // GGML_CUDA_FORCE_CUBLAS
bool mmq_supported;
switch (type) { switch (type) {
case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_0:
case GGML_TYPE_Q4_1: case GGML_TYPE_Q4_1:
@ -81,8 +87,32 @@ bool ggml_cuda_supports_mmq(enum ggml_type type) {
case GGML_TYPE_Q4_K: case GGML_TYPE_Q4_K:
case GGML_TYPE_Q5_K: case GGML_TYPE_Q5_K:
case GGML_TYPE_Q6_K: case GGML_TYPE_Q6_K:
return true; mmq_supported = true;
break;
default: default:
mmq_supported = false;
break;
}
if (!mmq_supported) {
return false; return false;
} }
if (int8_mma_available(cc)) {
return true;
}
if (cc < MIN_CC_DP4A) {
return false;
}
#ifdef GGML_CUDA_FORCE_MMQ
return true;
#endif //GGML_CUDA_FORCE_MMQ
if (cc < CC_OFFSET_AMD) {
return cc < CC_VOLTA || ne11 < MMQ_DP4A_MAX_BATCH_SIZE;
}
return cc < CC_RDNA3 || ne11 < MMQ_DP4A_MAX_BATCH_SIZE;
} }

View file

@ -7,6 +7,8 @@
#include <climits> #include <climits>
#include <cstdint> #include <cstdint>
#define MMQ_DP4A_MAX_BATCH_SIZE 64 // Max. batch size to use for dp4a MMQ kernels when FP16 tensor cores are available.
typedef void (*load_tiles_mmq_t)(const char * __restrict__ x, int * x_tile, const int & kbx0, const int & i_max, const int & stride); typedef void (*load_tiles_mmq_t)(const char * __restrict__ x, int * x_tile, const int & kbx0, const int & i_max, const int & stride);
typedef void (*vec_dot_mmq_t)(const int * __restrict__ x, const int * __restrict__ y, float * __restrict__ sum, const int & k0); typedef void (*vec_dot_mmq_t)(const int * __restrict__ x, const int * __restrict__ y, float * __restrict__ sum, const int & k0);
typedef void (*mmq_write_back_t)(const float * __restrict__ sum, float * __restrict__ dst, const int & stride, const int & i_max, const int & j_max); typedef void (*mmq_write_back_t)(const float * __restrict__ sum, float * __restrict__ dst, const int & stride, const int & i_max, const int & j_max);
@ -24,25 +26,42 @@ struct tile_x_sizes {
int sc; int sc;
}; };
// get_mmq_x_max_host is in common.cuh so that it can be used to determine the correct way to round for --split-mode row static constexpr int get_mmq_x_max_host(const int cc) {
return int8_mma_available(cc) ? 128 :
static constexpr __device__ int get_mmq_x_max_device() { #ifdef GGML_CUDA_FORCE_MMQ
#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) cc >= CC_VOLTA && cc < CC_OFFSET_AMD ? 128 : 64;
return 64;
#else #else
#if __CUDA_ARCH__ >= CC_VOLTA cc >= CC_VOLTA && cc < CC_OFFSET_AMD ? MMQ_DP4A_MAX_BATCH_SIZE : 64;
#ifdef CUDA_USE_TENSOR_CORES #endif // GGML_CUDA_FORCE_MMQ
return MMQ_MAX_BATCH_SIZE;
#else
return 128;
#endif // CUDA_USE_TENSOR_CORES
#else
return 64;
#endif // __CUDA_ARCH__ >= CC_VOLTA
#endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
} }
// get_mmq_y_host is in common.cuh so that it can be used to determine the correct way to round for --split-mode row static constexpr __device__ int get_mmq_x_max_device() {
#ifdef INT8_MMA_AVAILABLE
return 128;
#else // INT8_MMA_AVAILABLE
#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
return 128;
#else // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
#if __CUDA_ARCH__ >= CC_VOLTA
#ifdef GGML_CUDA_FORCE_MMQ
return MMQ_DP4A_MAX_BATCH_SIZE;
#else // GGML_CUDA_FORCE_MMQ
return 128;
#endif // GGML_CUDA_FORCE_MMQ
#else // __CUDA_ARCH__ >= CC_VOLTA
return 64;
#endif // __CUDA_ARCH__ >= CC_VOLTA
#endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
#endif // INT8_MMA_AVAILABLE
}
static constexpr int get_mmq_y_host(const int cc) {
return int8_mma_available(cc) || cc >= CC_VOLTA ? 128 : 64;
}
static constexpr __device__ int get_mmq_y_device() { static constexpr __device__ int get_mmq_y_device() {
#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) #if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
@ -2035,15 +2054,13 @@ static __device__ __forceinline__ void mmq_write_back_mma(
static_assert(nwarps*mma_C::I == mmq_y, "nwarps*mma_C::I != mmq_y"); static_assert(nwarps*mma_C::I == mmq_y, "nwarps*mma_C::I != mmq_y");
#endif // INT8_MMA_AVAILABLE #endif // INT8_MMA_AVAILABLE
dst += (threadIdx.y % ntx) * mma_C::J*stride;
#pragma unroll #pragma unroll
for (int j0 = 0; j0 < mmq_x; j0 += ntx*mma_C::J) { for (int j0 = 0; j0 < mmq_x; j0 += ntx*mma_C::J) {
#pragma unroll #pragma unroll
for (int n = 0; n < ntx; ++n) { for (int n = 0; n < ntx; ++n) {
#pragma unroll #pragma unroll
for (int l = 0; l < mma_C::ne; ++l) { for (int l = 0; l < mma_C::ne; ++l) {
const int j = j0 + mma_C::get_j(l); const int j = j0 + (threadIdx.y % ntx) * mma_C::J + mma_C::get_j(l);
if (j > j_max) { if (j > j_max) {
continue; continue;
@ -2590,4 +2607,4 @@ void ggml_cuda_op_mul_mat_q(
const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols, const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols,
const int64_t src1_padded_row_size, cudaStream_t stream); const int64_t src1_padded_row_size, cudaStream_t stream);
bool ggml_cuda_supports_mmq(enum ggml_type type); bool ggml_cuda_should_use_mmq(enum ggml_type type, int cc, int64_t ne11);

View file

@ -1,5 +1,7 @@
#include "common.cuh" #include "common.cuh"
#define MMVQ_MAX_BATCH_SIZE 8 // Max. batch size for which to use MMVQ kernels.
void ggml_cuda_op_mul_mat_vec_q( void ggml_cuda_op_mul_mat_vec_q(
ggml_backend_cuda_context & ctx, ggml_backend_cuda_context & ctx,
const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i,

View file

@ -4620,7 +4620,7 @@ static void ggml_sycl_mul_mat(ggml_backend_sycl_context & ctx, const ggml_tensor
} else if (!split && src0->type == GGML_TYPE_F16 && !ggml_is_contiguous(src0) && !ggml_is_transposed(src1) && src1->ne[1] == 1) { } else if (!split && src0->type == GGML_TYPE_F16 && !ggml_is_contiguous(src0) && !ggml_is_transposed(src1) && src1->ne[1] == 1) {
// KQV single-batch // KQV single-batch
ggml_sycl_mul_mat_vec_nc(ctx, src0, src1, dst); ggml_sycl_mul_mat_vec_nc(ctx, src0, src1, dst);
} else if (!split && src0->type == GGML_TYPE_F16 && (src1->type == GGML_TYPE_F16) && !ggml_is_transposed(src0) && !ggml_is_transposed(src1) && src1->ne[2]*src1->ne[3] > 1) { } else if (!split && src0->type == GGML_TYPE_F16 && !ggml_is_transposed(src0) && !ggml_is_transposed(src1) && src1->ne[2]*src1->ne[3] > 1) {
// KQ + KQV multi-batch // KQ + KQV multi-batch
ggml_sycl_mul_mat_batched_sycl(ctx, src0, src1, dst); ggml_sycl_mul_mat_batched_sycl(ctx, src0, src1, dst);
} else if (use_dequantize_mul_mat_vec) { } else if (use_dequantize_mul_mat_vec) {

View file

@ -69,6 +69,7 @@ class GGUFReader:
# I - same as host, S - swapped # I - same as host, S - swapped
byte_order: Literal['I'] | Literal['S'] = 'I' byte_order: Literal['I'] | Literal['S'] = 'I'
alignment: int = GGUF_DEFAULT_ALIGNMENT alignment: int = GGUF_DEFAULT_ALIGNMENT
data_offset: int
# Note: Internal helper, API may change. # Note: Internal helper, API may change.
gguf_scalar_to_np: dict[GGUFValueType, type[np.generic]] = { gguf_scalar_to_np: dict[GGUFValueType, type[np.generic]] = {
@ -88,9 +89,13 @@ class GGUFReader:
def __init__(self, path: os.PathLike[str] | str, mode: Literal['r'] | Literal['r+'] | Literal['c'] = 'r'): def __init__(self, path: os.PathLike[str] | str, mode: Literal['r'] | Literal['r+'] | Literal['c'] = 'r'):
self.data = np.memmap(path, mode = mode) self.data = np.memmap(path, mode = mode)
offs = 0 offs = 0
# Check for GGUF magic
if self._get(offs, np.uint32, override_order = '<')[0] != GGUF_MAGIC: if self._get(offs, np.uint32, override_order = '<')[0] != GGUF_MAGIC:
raise ValueError('GGUF magic invalid') raise ValueError('GGUF magic invalid')
offs += 4 offs += 4
# Check GGUF version
temp_version = self._get(offs, np.uint32) temp_version = self._get(offs, np.uint32)
if temp_version[0] & 65535 == 0: if temp_version[0] & 65535 == 0:
# If we get 0 here that means it's (probably) a GGUF file created for # If we get 0 here that means it's (probably) a GGUF file created for
@ -103,12 +108,16 @@ class GGUFReader:
self.fields: OrderedDict[str, ReaderField] = OrderedDict() self.fields: OrderedDict[str, ReaderField] = OrderedDict()
self.tensors: list[ReaderTensor] = [] self.tensors: list[ReaderTensor] = []
offs += self._push_field(ReaderField(offs, 'GGUF.version', [temp_version], [0], [GGUFValueType.UINT32])) offs += self._push_field(ReaderField(offs, 'GGUF.version', [temp_version], [0], [GGUFValueType.UINT32]))
# Check tensor count and kv count
temp_counts = self._get(offs, np.uint64, 2) temp_counts = self._get(offs, np.uint64, 2)
offs += self._push_field(ReaderField(offs, 'GGUF.tensor_count', [temp_counts[:1]], [0], [GGUFValueType.UINT64])) offs += self._push_field(ReaderField(offs, 'GGUF.tensor_count', [temp_counts[:1]], [0], [GGUFValueType.UINT64]))
offs += self._push_field(ReaderField(offs, 'GGUF.kv_count', [temp_counts[1:]], [0], [GGUFValueType.UINT64])) offs += self._push_field(ReaderField(offs, 'GGUF.kv_count', [temp_counts[1:]], [0], [GGUFValueType.UINT64]))
tensor_count, kv_count = temp_counts tensor_count, kv_count = temp_counts
offs = self._build_fields(offs, kv_count) offs = self._build_fields(offs, kv_count)
offs, tensors_fields = self._build_tensors_fields(offs, tensor_count)
# Build Tensor Info Fields
offs, tensors_fields = self._build_tensor_info(offs, tensor_count)
new_align = self.fields.get('general.alignment') new_align = self.fields.get('general.alignment')
if new_align is not None: if new_align is not None:
if new_align.types != [GGUFValueType.UINT32]: if new_align.types != [GGUFValueType.UINT32]:
@ -117,6 +126,7 @@ class GGUFReader:
padding = offs % self.alignment padding = offs % self.alignment
if padding != 0: if padding != 0:
offs += self.alignment - padding offs += self.alignment - padding
self.data_offset = offs
self._build_tensors(offs, tensors_fields) self._build_tensors(offs, tensors_fields)
_DT = TypeVar('_DT', bound = npt.DTypeLike) _DT = TypeVar('_DT', bound = npt.DTypeLike)
@ -193,18 +203,29 @@ class GGUFReader:
# We can't deal with this one. # We can't deal with this one.
raise ValueError('Unknown/unhandled field type {gtype}') raise ValueError('Unknown/unhandled field type {gtype}')
def _get_tensor(self, orig_offs: int) -> ReaderField: def _get_tensor_info_field(self, orig_offs: int) -> ReaderField:
offs = orig_offs offs = orig_offs
# Get Tensor Name
name_len, name_data = self._get_str(offs) name_len, name_data = self._get_str(offs)
offs += int(name_len.nbytes + name_data.nbytes) offs += int(name_len.nbytes + name_data.nbytes)
# Get Tensor Dimensions Count
n_dims = self._get(offs, np.uint32) n_dims = self._get(offs, np.uint32)
offs += int(n_dims.nbytes) offs += int(n_dims.nbytes)
# Get Tensor Dimension Array
dims = self._get(offs, np.uint64, n_dims[0]) dims = self._get(offs, np.uint64, n_dims[0])
offs += int(dims.nbytes) offs += int(dims.nbytes)
# Get Tensor Encoding Scheme Type
raw_dtype = self._get(offs, np.uint32) raw_dtype = self._get(offs, np.uint32)
offs += int(raw_dtype.nbytes) offs += int(raw_dtype.nbytes)
# Get Tensor Offset
offset_tensor = self._get(offs, np.uint64) offset_tensor = self._get(offs, np.uint64)
offs += int(offset_tensor.nbytes) offs += int(offset_tensor.nbytes)
return ReaderField( return ReaderField(
orig_offs, orig_offs,
str(bytes(name_data), encoding = 'utf-8'), str(bytes(name_data), encoding = 'utf-8'),
@ -233,10 +254,10 @@ class GGUFReader:
offs += field_size offs += field_size
return offs return offs
def _build_tensors_fields(self, offs: int, count: int) -> tuple[int, list[ReaderField]]: def _build_tensor_info(self, offs: int, count: int) -> tuple[int, list[ReaderField]]:
tensor_fields = [] tensor_fields = []
for _ in range(count): for _ in range(count):
field = self._get_tensor(offs) field = self._get_tensor_info_field(offs)
offs += sum(int(part.nbytes) for part in field.parts) offs += sum(int(part.nbytes) for part in field.parts)
tensor_fields.append(field) tensor_fields.append(field)
return offs, tensor_fields return offs, tensor_fields

View file

@ -319,6 +319,27 @@ def dump_markdown_metadata(reader: GGUFReader, args: argparse.Namespace) -> None
markdown_content += "\n" markdown_content += "\n"
markdown_content += "### Tensor Data Offset\n"
markdown_content += '\n'
markdown_content += 'This table contains the offset and data segment relative to start of file\n'
markdown_content += '\n'
tensor_mapping_table: list[dict[str, str | int]] = []
for key, tensor in enumerate(reader.tensors):
data_offset_pretty = '{0:#16x}'.format(tensor.data_offset)
data_size_pretty = '{0:#16x}'.format(tensor.n_bytes)
tensor_mapping_table.append({"t_id":key, "layer_name":tensor.name, "data_offset":data_offset_pretty, "data_size":data_size_pretty})
tensors_mapping_table_header_map = [
{'key_name':'t_id', 'header_name':'T_ID', 'align':'right'},
{'key_name':'layer_name', 'header_name':'Tensor Layer Name', 'align':'left'},
{'key_name':'data_offset', 'header_name':'Data Offset (B)', 'align':'right'},
{'key_name':'data_size', 'header_name':'Data Size (B)', 'align':'right'},
]
markdown_content += markdown_table_with_alignment_support(tensors_mapping_table_header_map, tensor_mapping_table)
markdown_content += "\n"
for group in tensor_prefix_order: for group in tensor_prefix_order:
tensors = tensor_groups[group] tensors = tensor_groups[group]
group_elements = sum(tensor.n_elements for tensor in tensors) group_elements = sum(tensor.n_elements for tensor in tensors)
@ -370,6 +391,8 @@ def main() -> None:
parser.add_argument("--no-tensors", action="store_true", help="Don't dump tensor metadata") parser.add_argument("--no-tensors", action="store_true", help="Don't dump tensor metadata")
parser.add_argument("--json", action="store_true", help="Produce JSON output") parser.add_argument("--json", action="store_true", help="Produce JSON output")
parser.add_argument("--json-array", action="store_true", help="Include full array values in JSON output (long)") parser.add_argument("--json-array", action="store_true", help="Include full array values in JSON output (long)")
parser.add_argument("--data-offset", action="store_true", help="Start of data offset")
parser.add_argument("--data-alignment", action="store_true", help="Data alignment applied globally to data field")
parser.add_argument("--markdown", action="store_true", help="Produce markdown output") parser.add_argument("--markdown", action="store_true", help="Produce markdown output")
parser.add_argument("--verbose", action="store_true", help="increase output verbosity") parser.add_argument("--verbose", action="store_true", help="increase output verbosity")
@ -377,7 +400,7 @@ def main() -> None:
logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO) logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO)
if not args.json and not args.markdown: if not args.json and not args.markdown and not args.data_offset and not args.data_alignment:
logger.info(f'* Loading: {args.model}') logger.info(f'* Loading: {args.model}')
reader = GGUFReader(args.model, 'r') reader = GGUFReader(args.model, 'r')
@ -386,6 +409,10 @@ def main() -> None:
dump_metadata_json(reader, args) dump_metadata_json(reader, args)
elif args.markdown: elif args.markdown:
dump_markdown_metadata(reader, args) dump_markdown_metadata(reader, args)
elif args.data_offset:
print(reader.data_offset) # noqa: NP100
elif args.data_alignment:
print(reader.alignment) # noqa: NP100
else: else:
dump_metadata(reader, args) dump_metadata(reader, args)

View file

@ -14500,7 +14500,8 @@ struct llama_grammar * llama_grammar_init(
continue; continue;
} }
if (llama_grammar_detect_left_recursion(vec_rules, i, &rules_visited, &rules_in_progress, &rules_may_be_empty)) { if (llama_grammar_detect_left_recursion(vec_rules, i, &rules_visited, &rules_in_progress, &rules_may_be_empty)) {
throw std::runtime_error(format("unsupported grammar, left recursion detected for nonterminal at index %zu", i)); LLAMA_LOG_ERROR("unsupported grammar, left recursion detected for nonterminal at index %zu", i);
return nullptr;
} }
} }
@ -18818,10 +18819,10 @@ static int32_t llama_chat_apply_template_internal(
if (add_ass) { if (add_ass) {
ss << "<|im_start|>assistant\n"; ss << "<|im_start|>assistant\n";
} }
} else if (tmpl == "llama2" || tmpl.find("[INST]") != std::string::npos) { } else if (tmpl == "llama2" || tmpl == "mistral" || tmpl.find("[INST]") != std::string::npos) {
// llama2 template and its variants // llama2 template and its variants
// [variant] support system message // [variant] support system message
bool support_system_message = tmpl.find("<<SYS>>") != std::string::npos; bool support_system_message = tmpl.find("<<SYS>>") != std::string::npos || tmpl == "mistral";
// [variant] space before + after response // [variant] space before + after response
bool space_around_response = tmpl.find("' ' + eos_token") != std::string::npos; bool space_around_response = tmpl.find("' ' + eos_token") != std::string::npos;
// [variant] add BOS inside history // [variant] add BOS inside history

View file

@ -924,6 +924,12 @@ extern "C" {
// Grammar // Grammar
// //
/// Initialize a llama_grammar.
///
/// @param rules The rule elements of the grammar to initialize.
/// @param n_rules The number of rules.
/// @param start_rule_index The index of the root rule (the starting point of the grammar).
/// @return The initialized llama_grammar or nullptr if initialization failed.
LLAMA_API struct llama_grammar * llama_grammar_init( LLAMA_API struct llama_grammar * llama_grammar_init(
const llama_grammar_element ** rules, const llama_grammar_element ** rules,
size_t n_rules, size_t n_rules,

View file

@ -7,6 +7,7 @@
#include <cassert> #include <cassert>
#include "llama.h" #include "llama.h"
#include "common.h"
int main(void) { int main(void) {
llama_chat_message conversation[] = { llama_chat_message conversation[] = {
@ -119,5 +120,24 @@ int main(void) {
std::cout << output << "\n-------------------------\n"; std::cout << output << "\n-------------------------\n";
assert(output == expected); assert(output == expected);
} }
// test llama_chat_format_single
std::cout << "\n\n=== llama_chat_format_single ===\n\n";
std::vector<llama_chat_msg> chat2;
chat2.push_back({"system", "You are a helpful assistant"});
chat2.push_back({"user", "Hello"});
chat2.push_back({"assistant", "I am assistant"});
llama_chat_msg new_msg{"user", "How are you"};
auto fmt_single = [&](std::string tmpl) {
auto output = llama_chat_format_single(nullptr, tmpl, chat2, new_msg, true);
std::cout << "fmt_single(" << tmpl << ")\n" << output << "\n-------------------------\n";
return output;
};
assert(fmt_single("chatml") == "<|im_start|>user\nHow are you<|im_end|>\n<|im_start|>assistant\n");
assert(fmt_single("llama2") == "[INST] How are you [/INST]");
assert(fmt_single("gemma") == "<start_of_turn>user\nHow are you<end_of_turn>\n<start_of_turn>model\n");
assert(fmt_single("llama3") == "<|start_header_id|>user<|end_header_id|>\n\nHow are you<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n");
return 0; return 0;
} }

View file

@ -36,10 +36,10 @@ static llama_grammar* build_grammar(const std::string & grammar_str) {
static bool test_build_grammar_fails(const std::string & grammar_str) { static bool test_build_grammar_fails(const std::string & grammar_str) {
fprintf(stderr, "⚫ Testing failure for grammar: %s\n", grammar_str.c_str()); fprintf(stderr, "⚫ Testing failure for grammar: %s\n", grammar_str.c_str());
bool grammar_fails = false; bool grammar_fails = false;
try { llama_grammar * grammar = build_grammar(grammar_str);
build_grammar(grammar_str); if (grammar != nullptr) {
fprintf(stderr, " ❌ Expected build failure, but succeeded\n"); fprintf(stderr, " ❌ Expected build failure, but succeeded\n");
} catch (const std::exception & err) { } else {
grammar_fails = true; grammar_fails = true;
fprintf(stdout, " ✅︎\n"); fprintf(stdout, " ✅︎\n");
} }
@ -148,6 +148,250 @@ static void test_schema(const std::string & test_desc, const std::string & schem
} }
static void test_simple_grammar() { static void test_simple_grammar() {
test_schema(
"min 0",
R"""({
"type": "integer",
"minimum": 0
})""",
// Passing strings
{
"0",
"10",
"12",
"10000",
},
// Failing strings
{
"-1",
"-10",
"-10000",
"-100000000000000000000000000000000",
"100000000000000000000000000000000",
"00",
"01",
"-0",
}
);
test_schema(
"min 2",
// Schema
R"""({
"type": "integer",
"minimum": 2
})""",
// Passing strings
{
"2",
"3",
"4",
"10",
"20",
"1234567890000000",
},
// Failing strings
{
"0",
"1",
"-1",
"-100",
"0",
"1",
"01",
"02",
"12345678900000000",
}
);
test_schema(
"min 456",
R"""({
"type": "integer",
"minimum": 456
})""",
// Passing strings
{
"456",
"4560",
"457",
"460",
"500",
},
// Failing strings
{
"455",
"356",
"50",
"050",
"-1",
"-456",
}
);
test_schema(
"min -123",
R"""({
"type": "integer",
"minimum": -123
})""",
// Passing strings
{
"-123",
"-122",
"-11",
"-1",
"0",
"1",
"123",
"1234",
"2345",
},
// Failing strings
{
"-1234",
"-124",
}
);
test_schema(
"max 9999",
// Schema
R"""({
"type": "integer",
"maximum": 9999
})""",
// Passing strings
{
"-99999",
"0",
"9999",
},
// Failing strings
{
"10000",
"99991",
}
);
test_schema(
"max -9999",
// Schema
R"""({
"type": "integer",
"maximum": -9999
})""",
// Passing strings
{
"-10000",
"-9999",
},
// Failing strings
{
"-9998",
"0",
"9999",
}
);
test_schema(
"min 5 max 30",
// Schema
R"""({
"type": "integer",
"minimum": 5,
"maximum": 30
})""",
// Passing strings
{
"5",
"10",
"30",
},
// Failing strings
{
"05",
"4",
"-1",
"31",
"123",
"0123",
}
);
test_schema(
"min -1 max 1",
R"""({
"type": "integer",
"minimum": -1,
"maximum": 1
})""",
// Passing strings
{
"-1",
"0",
"1",
},
// Failing strings
{
"-11",
"-10",
"-2",
"2",
"10",
"11",
}
);
test_schema(
"min -123 max 42",
R"""({
"type": "integer",
"minimum": -123,
"maximum": 42
})""",
// Passing strings
{
"-123",
"-122",
"-13",
"-11",
"-2",
"-1",
"0",
"1",
"5",
"10",
"39",
"40",
"42",
},
// Failing strings
{
"-0123",
"-124",
"-1123",
"-200",
"43",
"123",
"0123",
}
);
test_schema(
"exclusive min / max",
// Schema
R"""({
"type": "integer",
"exclusiveMinimum": 0,
"exclusiveMaximum": 10000
})""",
// Passing strings
{
"1",
"9999",
},
// Failing strings
{
"0",
"01",
"10000",
"99999",
}
);
// Test case for a simple grammar // Test case for a simple grammar
test_grammar( test_grammar(
"simple grammar", "simple grammar",
@ -773,7 +1017,6 @@ static void test_json_schema() {
} }
); );
test_schema( test_schema(
"min+max items", "min+max items",
// Schema // Schema

View file

@ -80,6 +80,232 @@ static void test_all(const std::string & lang, std::function<void(const TestCase
runner(tc); runner(tc);
}; };
test({
SUCCESS,
"min 0",
R"""({
"type": "integer",
"minimum": 0
})""",
R"""(
root ::= ([0] | [1-9] [0-9]{0,15}) space
space ::= | " " | "\n" [ \t]{0,20}
)"""
});
test({
SUCCESS,
"min 1",
R"""({
"type": "integer",
"minimum": 1
})""",
R"""(
root ::= ([1-9] [0-9]{0,15}) space
space ::= | " " | "\n" [ \t]{0,20}
)"""
});
test({
SUCCESS,
"min 3",
R"""({
"type": "integer",
"minimum": 3
})""",
R"""(
root ::= ([1-2] [0-9]{1,15} | [3-9] [0-9]{0,15}) space
space ::= | " " | "\n" [ \t]{0,20}
)"""
});
test({
SUCCESS,
"min 9",
R"""({
"type": "integer",
"minimum": 9
})""",
R"""(
root ::= ([1-8] [0-9]{1,15} | [9] [0-9]{0,15}) space
space ::= | " " | "\n" [ \t]{0,20}
)"""
});
test({
SUCCESS,
"min 10",
R"""({
"type": "integer",
"minimum": 10
})""",
R"""(
root ::= ([1] ([0-9]{1,15}) | [2-9] [0-9]{1,15}) space
space ::= | " " | "\n" [ \t]{0,20}
)"""
});
test({
SUCCESS,
"min 25",
R"""({
"type": "integer",
"minimum": 25
})""",
R"""(
root ::= ([1] [0-9]{2,15} | [2] ([0-4] [0-9]{1,14} | [5-9] [0-9]{0,14}) | [3-9] [0-9]{1,15}) space
space ::= | " " | "\n" [ \t]{0,20}
)"""
});
test({
SUCCESS,
"max 30",
R"""({
"type": "integer",
"maximum": 30
})""",
R"""(
root ::= ("-" [1-9] [0-9]{0,15} | [0-9] | ([1-2] [0-9] | [3] "0")) space
space ::= | " " | "\n" [ \t]{0,20}
)"""
});
test({
SUCCESS,
"min -5",
R"""({
"type": "integer",
"minimum": -5
})""",
R"""(
root ::= ("-" ([0-5]) | [0] | [1-9] [0-9]{0,15}) space
space ::= | " " | "\n" [ \t]{0,20}
)"""
});
test({
SUCCESS,
"min -123",
R"""({
"type": "integer",
"minimum": -123
})""",
R"""(
root ::= ("-" ([0-9] | ([1-8] [0-9] | [9] [0-9]) | "1" ([0-1] [0-9] | [2] [0-3])) | [0] | [1-9] [0-9]{0,15}) space
space ::= | " " | "\n" [ \t]{0,20}
)"""
});
test({
SUCCESS,
"max -5",
R"""({
"type": "integer",
"maximum": -5
})""",
R"""(
root ::= ("-" ([0-4] [0-9]{1,15} | [5-9] [0-9]{0,15})) space
space ::= | " " | "\n" [ \t]{0,20}
)"""
});
test({
SUCCESS,
"max 1",
R"""({
"type": "integer",
"maximum": 1
})""",
R"""(
root ::= ("-" [1-9] [0-9]{0,15} | [0-1]) space
space ::= | " " | "\n" [ \t]{0,20}
)"""
});
test({
SUCCESS,
"max 100",
R"""({
"type": "integer",
"maximum": 100
})""",
R"""(
root ::= ("-" [1-9] [0-9]{0,15} | [0-9] | ([1-8] [0-9] | [9] [0-9]) | "100") space
space ::= | " " | "\n" [ \t]{0,20}
)"""
});
test({
SUCCESS,
"min 0 max 23",
R"""({
"type": "integer",
"minimum": 0,
"maximum": 23
})""",
R"""(
root ::= ([0-9] | ([1] [0-9] | [2] [0-3])) space
space ::= | " " | "\n" [ \t]{0,20}
)"""
});
test({
SUCCESS,
"min 15 max 300",
R"""({
"type": "integer",
"minimum": 15,
"maximum": 300
})""",
R"""(
root ::= (([1] ([5-9]) | [2-9] [0-9]) | ([1-2] [0-9]{2} | [3] "00")) space
space ::= | " " | "\n" [ \t]{0,20}
)"""
});
test({
SUCCESS,
"min 5 max 30",
R"""({
"type": "integer",
"minimum": 5,
"maximum": 30
})""",
R"""(
root ::= ([5-9] | ([1-2] [0-9] | [3] "0")) space
space ::= | " " | "\n" [ \t]{0,20}
)"""
});
test({
SUCCESS,
"min -123 max 42",
R"""({
"type": "integer",
"minimum": -123,
"maximum": 42
})""",
R"""(
root ::= ("-" ([0-9] | ([1-8] [0-9] | [9] [0-9]) | "1" ([0-1] [0-9] | [2] [0-3])) | [0-9] | ([1-3] [0-9] | [4] [0-2])) space
space ::= | " " | "\n" [ \t]{0,20}
)"""
});
test({
SUCCESS,
"min -10 max 10",
R"""({
"type": "integer",
"minimum": -10,
"maximum": 10
})""",
R"""(
root ::= ("-" ([0-9] | "10") | [0-9] | "10") space
space ::= | " " | "\n" [ \t]{0,20}
)"""
});
test({ test({
FAILURE, FAILURE,
"unknown type", "unknown type",
@ -390,6 +616,44 @@ static void test_all(const std::string & lang, std::function<void(const TestCase
)""" )"""
}); });
test({
SUCCESS,
"min + max items with min + max values across zero",
R"""({
"items": {
"type": "integer",
"minimum": -12,
"maximum": 207
},
"minItems": 3,
"maxItems": 5
})""",
R"""(
item ::= ("-" ([0-9] | "1" [0-2]) | [0-9] | ([1-8] [0-9] | [9] [0-9]) | ([1] [0-9]{2} | [2] "0" [0-7])) space
root ::= "[" space item ("," space item){2,4} "]" space
space ::= | " " | "\n" [ \t]{0,20}
)"""
});
test({
SUCCESS,
"min + max items with min + max values",
R"""({
"items": {
"type": "integer",
"minimum": 12,
"maximum": 207
},
"minItems": 3,
"maxItems": 5
})""",
R"""(
item ::= (([1] ([2-9]) | [2-9] [0-9]) | ([1] [0-9]{2} | [2] "0" [0-7])) space
root ::= "[" space item ("," space item){2,4} "]" space
space ::= | " " | "\n" [ \t]{0,20}
)"""
});
test({ test({
SUCCESS, SUCCESS,
"simple regexp", "simple regexp",

View file

@ -116,6 +116,10 @@ int main()
std::vector<const llama_grammar_element *> grammar_rules(parsed_grammar.c_rules()); std::vector<const llama_grammar_element *> grammar_rules(parsed_grammar.c_rules());
grammar = llama_grammar_init( grammar = llama_grammar_init(
grammar_rules.data(), grammar_rules.size(), parsed_grammar.symbol_ids.at("root")); grammar_rules.data(), grammar_rules.size(), parsed_grammar.symbol_ids.at("root"));
if (grammar == nullptr)
{
throw std::runtime_error("Failed to initialize llama_grammar");
}
std::vector<std::vector<llama_grammar_element>> expected_stacks = { std::vector<std::vector<llama_grammar_element>> expected_stacks = {
{ {